[Python-modules-commits] [elasticsearch-curator] 01/11: Import elasticsearch-curator_4.2.4.orig.tar.gz

Apollon Oikonomopoulos apoikos at moszumanska.debian.org
Fri Dec 16 13:07:14 UTC 2016


This is an automated email from the git hooks/post-receive script.

apoikos pushed a commit to branch master
in repository elasticsearch-curator.

commit 1ea533b9c772000e427d8b03a2b425489821d25a
Author: Apollon Oikonomopoulos <apoikos at debian.org>
Date:   Fri Dec 16 11:08:24 2016 +0200

    Import elasticsearch-curator_4.2.4.orig.tar.gz
---
 .gitignore                                         |   3 +
 .travis.yml                                        |   4 +-
 README.rst                                         |  89 ++-
 Vagrant/centos/6/Vagrantfile                       |  18 +
 Vagrant/centos/7/Vagrantfile                       |  18 +
 Vagrant/ubuntu/14.04/Vagrantfile                   |  19 +
 curator/_version.py                                |   2 +-
 curator/actions.py                                 |  87 +++
 curator/cli.py                                     |  50 +-
 curator/config_utils.py                            |  49 ++
 curator/curator.py                                 |   5 -
 curator/curator_cli.py                             |   5 +
 curator/defaults/settings.py                       |   8 +-
 curator/es_repo_mgr.py                             |   4 -
 curator/indexlist.py                               |   2 +-
 curator/repomgrcli.py                              |  73 +--
 curator/singletons.py                              | 695 +++++++++++++++++++++
 curator/snapshotlist.py                            |   1 +
 curator/utils.py                                   |  50 +-
 curator/validators/actions.py                      |  11 +-
 curator/validators/config_file.py                  |  34 +-
 curator/validators/filter_elements.py              |  36 +-
 curator/validators/filters.py                      |  30 +-
 curator/validators/filtertypes.py                  |  15 +-
 curator/validators/options.py                      |  76 ++-
 docs/Changelog.rst                                 | 172 +++++
 docs/actionclasses.rst                             |   6 +
 docs/asciidoc/actions.asciidoc                     |  59 +-
 docs/asciidoc/command-line.asciidoc                | 180 +++++-
 docs/asciidoc/configuration.asciidoc               |  24 +-
 docs/asciidoc/examples.asciidoc                    |  90 ++-
 docs/asciidoc/faq.asciidoc                         |  70 +++
 docs/asciidoc/getting_started.asciidoc             |  22 -
 docs/asciidoc/index.asciidoc                       |   6 +-
 docs/asciidoc/installation.asciidoc                | 385 ++++++++----
 docs/asciidoc/options.asciidoc                     |  43 +-
 docs/asciidoc/security.asciidoc                    | 104 +++
 docs/asciidoc/upgrading.asciidoc                   |  29 -
 requirements.txt                                   |   5 +-
 run_curator.py                                     |  33 +-
 run_es_repo_mgr.py                                 |  33 +-
 run_singleton.py                                   |  37 ++
 setup.py                                           |  49 +-
 test/integration/__init__.py                       |   4 +-
 .../{test_forcemerge.py => test_clusterrouting.py} |  34 +-
 test/integration/test_es_repo_mgr.py               | 139 ++---
 test/integration/test_forcemerge.py                |   8 +-
 test/integration/testvars.py                       |  24 +
 test/unit/test_action_alias.py                     |  12 +
 test/unit/test_action_allocation.py                |   9 +
 test/unit/test_action_close.py                     |   6 +
 test/unit/test_action_clusterrouting.py            |  76 +++
 test/unit/test_action_delete_indices.py            |   7 +
 test/unit/test_action_forcemerge.py                |   7 +
 test/unit/test_action_open.py                      |   4 +
 test/unit/test_action_replicas.py                  |   6 +
 test/unit/test_action_restore.py                   |   3 +
 test/unit/test_action_snapshot.py                  |  13 +
 test/unit/test_class_index_list.py                 |  90 +++
 test/unit/test_utils.py                            |  13 +
 travis-run.sh                                      |   6 +-
 unix_packages/build_official_package.sh            | 157 +++++
 unix_packages/build_package_from_source.sh         | 155 +++++
 unix_packages/cx_freeze-5.0.dev.tar.gz             | Bin 0 -> 82899 bytes
 unix_packages/cx_freeze.setup.py.patch             |  34 +
 65 files changed, 2975 insertions(+), 563 deletions(-)

diff --git a/.gitignore b/.gitignore
index 3fec439..8f2f6d1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -17,3 +17,6 @@ index.html
 docs/asciidoc/html_docs
 wheelhouse
 Elastic.ico
+Vagrant/centos/6/.vagrant
+Vagrant/centos/7/.vagrant
+Vagrant/ubuntu/14.04/.vagrant
diff --git a/.travis.yml b/.travis.yml
index 850dc52..c22ffc1 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -10,8 +10,8 @@ env:
   - ES_VERSION=2.1.1
   - ES_VERSION=2.2.2
   - ES_VERSION=2.3.5
-  - ES_VERSION=2.4.0
-  - ES_VERSION=5.0.0-alpha5
+  - ES_VERSION=2.4.2
+  - ES_VERSION=5.0.0
 
 os: linux
 
diff --git a/README.rst b/README.rst
index e1baf0b..0595d50 100644
--- a/README.rst
+++ b/README.rst
@@ -10,15 +10,15 @@ Like a museum curator manages the exhibits and collections on display,
 Elasticsearch Curator helps you curate, or manage your indices.
 
 Compatibility Matrix
-=======
+====================
 
-+--------+----------+----------+----------+
-|Version | ES 1.x   | ES 2.x   | ES 5.x   |
-+========+==========+==========+==========+
-|    3   |    yes   |     yes  |     no   |
-+--------+----------+----------+----------+
-|    4   |    no    |     yes  |     yes  |
-+--------+----------+----------+----------+
++--------+----------+------------+----------+------------+----------+
+|Version | ES 1.x   | AWS ES 1.x | ES 2.x   | AWS ES 2.x | ES 5.x   |
++========+==========+============+==========+============+==========+
+|    3   |    yes   |     yes*   |   yes    |     yes*   |   no     |
++--------+----------+------------+----------+------------+----------+
+|    4   |    no    |     no     |   yes    |     no     |   yes    |
++--------+----------+------------+----------+------------+----------+
 
 It is important to note that Curator 4 will not work with indices created in
 versions of Elasticsearch older than 1.4 (if they have been subsequently
@@ -33,11 +33,17 @@ following:
     "creation_date"! This implies that the index predates Elasticsearch v1.4.
     For safety, this index will be removed from the actionable list.
 
-It is also important to note that Curator 4 requires access to the 
-``/_cluster/state/metadata`` endpoint.  Forks of Elasticsearch which do not 
-support this endpoint (such as AWS ES, see #717) *will not* be able to use 
+It is also important to note that Curator 4 requires access to the
+``/_cluster/state/metadata`` endpoint.  Forks of Elasticsearch which do not
+support this endpoint (such as AWS ES, see #717) *will not* be able to use
 Curator version 4.
 
+\* It appears that AWS ES `does not allow access to the snapshot status endpoint`_ 
+for either 1.x or 2.x versions.  This prevents Curator 3 from being used to 
+make snapshots.
+
+.. _does not allow access to the snapshot status endpoint: https://github.com/elastic/curator/issues/796
+
 Build Status
 ------------
 
@@ -50,6 +56,10 @@ Build Status
 +--------+----------+
 | 4.0    | |4_0|    |
 +--------+----------+
+| 4.1    | |4_1|    |
++--------+----------+
+| 4.2    | |4_2|    |
++--------+----------+
 
 PyPI: |pypi_pkg|
 
@@ -59,13 +69,17 @@ PyPI: |pypi_pkg|
     :target: https://travis-ci.org/elastic/curator
 .. |4_0| image:: https://travis-ci.org/elastic/curator.svg?branch=4.0
     :target: https://travis-ci.org/elastic/curator
+.. |4_1| image:: https://travis-ci.org/elastic/curator.svg?branch=4.1
+    :target: https://travis-ci.org/elastic/curator
+.. |4_2| image:: https://travis-ci.org/elastic/curator.svg?branch=4.2
+    :target: https://travis-ci.org/elastic/curator
 .. |pypi_pkg| image:: https://badge.fury.io/py/elasticsearch-curator.svg
     :target: https://badge.fury.io/py/elasticsearch-curator
 
 `Curator API Documentation`_
 ----------------------------
 
-Version 4.0 of Curator ships with both an API and a wrapper script (which is
+Version 4 of Curator ships with both an API and a wrapper script (which is
 actually defined as an entry point).  The API allows you to write your own
 scripts to accomplish similar goals, or even new and different things with the
 `Curator API`_, and the `Elasticsearch Python API`_.
@@ -143,10 +157,59 @@ integration tests against it. This will delete all the data stored there! You
 can use the env variable ``TEST_ES_SERVER`` to point to a different instance
 (for example, 'otherhost:9203').
 
+Binary Executables
+------------------
+
+The combination of `setuptools <https://github.com/pypa/setuptools>`_ and
+`cx_Freeze <http://cx-freeze.sourceforge.net>`_ allows for Curator to be
+compiled into binary packages.  These consist of a binary file placed in a
+directory which contains all the libraries required to run it.
+
+In order to make a binary package you must manually install the ``cx_freeze``
+python module.  You can do this via ``pip``, or ``python setup.py install``,
+or by package, if such exists for your platform.  In order to make it compile on
+recent Debian/Ubuntu platforms, a patch had to be applied to the ``setup.py``
+file in the extracted folder.  This patch file is in the ``unix_packages``
+directory in this repository.
+
+With ``cx_freeze`` installed, building a binary package is as simple as running
+``python setup.py build_exe``.  In Linux distributions, the results will be in
+the ``build`` directory, in a subdirectory labelled
+``exe.linux-x86_64-${PYVER}``, where `${PYVER}` is the current major/minor
+version of Python, e.g. ``2.7``.  This directory can be renamed as desired.
+
+Other entry-points that are defined in the ``setup.py`` file, such as
+``es_repo_mgr``, will also appear in this directory.
+
+The process is identical for building the binary package for Windows.  It must
+be run from a Windows machine with all dependencies installed.  Executables in
+Windows will have the ``.exe`` suffix attached.  The directory in ``build`` will
+be named ``exe.win-amd64-${PYVER}``, where `${PYVER}` is the current major/minor
+version of Python, e.g. ``2.7``.  This directory can be renamed as desired.
+
+In Windows, cx_Freeze also allows for building rudimentary MSI installers.  This
+can be done by invoking ``python setup.py bdist_msi``.  The MSI fill will be in
+the ``dist`` directory, and will be named
+``elasticsearch-curator-#.#.#-amd64.msi``, where the major, minor, and patch
+version numbers are substituted accordingly.  One drawback to this rudimentary
+MSI is that it does not allow updates to be installed on top of the existing
+installation.  You must uninstall the old version before installing the newer
+one.
+
+The ``unix_packages`` directory contains the ``build_packages.sh`` script used
+to generate the packages for the Curator YUM and APT repositories.  The
+``Vagrant`` directory has the Vagrantfiles used in conjunction with the
+``build_packages.sh`` script.  If you wish to use this method on your own, you
+must ensure that the shared folders exist.  ``/curator_packages`` is where the
+packages will be placed after building.  ``/curator_source`` is the path to the
+Curator source code, so that the ``build_packages.sh`` script can be called from
+there.  The ``build_packages.sh`` script does `not` use the local source code,
+but rather pulls the version specified as an argument directly from GitHub.
+
 Versioning
 ----------
 
-Version 4.0 of Curator is the current ``master`` branch.  It supports
+Version 4 of Curator is the current ``master`` branch.  It supports
 Elasticsearch versions 2.0 through 5.0.  This is the first release of Curator
 that is not fully reverse compatible.
 
diff --git a/Vagrant/centos/6/Vagrantfile b/Vagrant/centos/6/Vagrantfile
new file mode 100644
index 0000000..3e23f38
--- /dev/null
+++ b/Vagrant/centos/6/Vagrantfile
@@ -0,0 +1,18 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+Vagrant.configure(2) do |config|
+  config.vm.box = "elastic/centos-6-x86_64"
+
+  config.vm.provision "shell", inline: <<-SHELL
+    sudo yum -y groupinstall "Development Tools"
+    sudo yum -y install python-devel zlib-devel bzip2-devel sqlite sqlite-devel openssl-devel
+  SHELL
+
+  config.vm.synced_folder "/curator_packages", "/curator_packages", create: true, owner: "vagrant", group: "vagrant"
+  config.vm.synced_folder "/curator_source", "/curator_source", create: true, owner: "vagrant", group: "vagrant"
+
+  config.vm.provider "virtualbox" do |v|
+    v.customize ["modifyvm", :id, "--nictype1", "virtio"]
+  end
+end
diff --git a/Vagrant/centos/7/Vagrantfile b/Vagrant/centos/7/Vagrantfile
new file mode 100644
index 0000000..4914880
--- /dev/null
+++ b/Vagrant/centos/7/Vagrantfile
@@ -0,0 +1,18 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+Vagrant.configure(2) do |config|
+  config.vm.box = "elastic/centos-7-x86_64"
+
+  config.vm.provision "shell", inline: <<-SHELL
+    sudo yum -y groupinstall "Development Tools"
+    sudo yum -y install python-devel zlib-devel bzip2-devel sqlite sqlite-devel openssl-devel
+  SHELL
+
+  config.vm.synced_folder "/curator_packages", "/curator_packages", create: true, owner: "vagrant", group: "vagrant"
+  config.vm.synced_folder "/curator_source", "/curator_source", create: true, owner: "vagrant", group: "vagrant"
+
+  config.vm.provider "virtualbox" do |v|
+    v.customize ["modifyvm", :id, "--nictype1", "virtio"]
+  end
+end
diff --git a/Vagrant/ubuntu/14.04/Vagrantfile b/Vagrant/ubuntu/14.04/Vagrantfile
new file mode 100644
index 0000000..7809d08
--- /dev/null
+++ b/Vagrant/ubuntu/14.04/Vagrantfile
@@ -0,0 +1,19 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+Vagrant.configure(2) do |config|
+  config.vm.box = "ubuntu/trusty64"
+
+  config.vm.provision "shell", inline: <<-SHELL
+    sudo apt-get -y autoremove
+    sudo apt-get update
+    sudo apt-get install -y libxml2-dev zlib1g-dev pkg-config python-dev make build-essential libssl-dev libbz2-dev libsqlite3-dev
+  SHELL
+
+  config.vm.synced_folder "/curator_packages", "/curator_packages", create: true, owner: "vagrant", group: "vagrant"
+  config.vm.synced_folder "/curator_source", "/curator_source", create: true, owner: "vagrant", group: "vagrant"
+
+  config.vm.provider "virtualbox" do |v|
+    v.customize ["modifyvm", :id, "--nictype1", "virtio"]
+  end
+end
diff --git a/curator/_version.py b/curator/_version.py
index fa721b4..4b08b59 100644
--- a/curator/_version.py
+++ b/curator/_version.py
@@ -1 +1 @@
-__version__ = '4.1.0'
+__version__ = '4.2.4'
diff --git a/curator/actions.py b/curator/actions.py
index bd20529..0ce6c4f 100644
--- a/curator/actions.py
+++ b/curator/actions.py
@@ -259,6 +259,93 @@ class Close(object):
         except Exception as e:
             report_failure(e)
 
+class ClusterRouting(object):
+    def __init__(
+        self, client, routing_type=None, setting=None, value=None,
+        wait_for_completion=False, timeout=30,
+    ):
+        """
+        For now, the cluster routing settings are hardcoded to be ``transient``
+
+        :arg client: An :class:`elasticsearch.Elasticsearch` client object
+        :arg routing_type: Type of routing to apply. Either `allocation` or
+            `rebalance`
+        :arg setting: Currently, the only acceptable value for `setting` is
+            ``enable``. This is here in case that changes.
+        :arg value: Used only if `setting` is `enable`. Semi-dependent on
+            `routing_type`. Acceptable values for `allocation` and `rebalance`
+            are ``all``, ``primaries``, and ``none`` (string, not `NoneType`).
+            If `routing_type` is `allocation`, this can also be
+            ``new_primaries``, and if `rebalance`, it can be ``replicas``.
+        :arg wait_for_completion: Wait (or not) for the operation
+            to complete before returning.  (default: `False`)
+        :type wait_for_completion: bool
+        :arg timeout: Number of seconds to `wait_for_completion`
+        """
+        verify_client_object(client)
+        #: Instance variable.
+        #: An :class:`elasticsearch.Elasticsearch` client object
+        self.client  = client
+        self.loggit  = logging.getLogger('curator.actions.cluster_routing')
+        #: Instance variable.
+        #: Internal reference to `wait_for_completion`
+        self.wfc     = wait_for_completion
+        #: Instance variable.
+        #: How long in seconds to `wait_for_completion` before returning with an
+        #: exception
+        self.timeout = '{0}s'.format(timeout)
+
+        if setting != 'enable':
+            raise ValueError(
+                'Invalid value for "setting": {0}.'.format(setting)
+            )
+        if routing_type == 'allocation':
+            if value not in ['all', 'primaries', 'new_primaries', 'none']:
+                raise ValueError(
+                    'Invalid "value": {0} with "routing_type":'
+                    '{1}.'.format(value, routing_type)
+                )
+        elif routing_type == 'rebalance':
+            if value not in ['all', 'primaries', 'replicas', 'none']:
+                raise ValueError(
+                    'Invalid "value": {0} with "routing_type":'
+                    '{1}.'.format(value, routing_type)
+                )
+        else:
+            raise ValueError(
+                'Invalid value for "routing_type": {0}.'.format(routing_type)
+            )
+        bkey = 'cluster.routing.{0}.{1}'.format(routing_type,setting)
+        self.body = { 'transient' : { bkey : value } }
+
+    def do_dry_run(self):
+        """
+        Log what the output would be, but take no action.
+        """
+        logger.info('DRY-RUN MODE.  No changes will be made.')
+        self.loggit.info(
+            'DRY-RUN: Update cluster routing settings with arguments: '
+            '{0}'.format(self.body)
+        )
+
+    def do_action(self):
+        """
+        Change cluster routing settings with the settings in `body`.
+        """
+        self.loggit.info('Updating cluster settings: {0}'.format(self.body))
+        try:
+            self.client.cluster.put_settings(body=self.body)
+            if self.wfc:
+                logger.debug(
+                    'Waiting for shards to complete routing and/or rebalancing'
+                )
+                self.client.cluster.health(
+                    level='indices', wait_for_relocating_shards=0,
+                    timeout=self.timeout,
+                )
+        except Exception as e:
+            report_failure(e)
+
 class CreateIndex(object):
     def __init__(self, client, name, extra_settings={}):
         """
diff --git a/curator/cli.py b/curator/cli.py
index d2d95a3..3dcefb3 100644
--- a/curator/cli.py
+++ b/curator/cli.py
@@ -4,28 +4,20 @@ import logging
 import click
 from voluptuous import Schema
 from .defaults import settings
-from .validators import SchemaCheck, config_file
+from .validators import SchemaCheck
+from .config_utils import process_config
 from .exceptions import *
 from .utils import *
 from .indexlist import IndexList
 from .snapshotlist import SnapshotList
 from .actions import *
 from ._version import __version__
-from .logtools import LogInfo, Whitelist, Blacklist
-
-try:
-    from logging import NullHandler
-except ImportError:
-    from logging import Handler
-
-    class NullHandler(Handler):
-        def emit(self, record):
-            pass
 
 CLASS_MAP = {
     'alias' :  Alias,
     'allocation' : Allocation,
     'close' : Close,
+    'cluster_routing' : ClusterRouting,
     'create_index' : CreateIndex,
     'delete_indices' : DeleteIndices,
     'delete_snapshots' : DeleteSnapshots,
@@ -68,9 +60,6 @@ def process_action(client, config, **kwargs):
     ### Update the defaults with whatever came with opts, minus any Nones
     mykwargs.update(prune_nones(opts))
     logger.debug('Action kwargs: {0}'.format(mykwargs))
-    # This is no longer necessary with the config schema validator
-    # # Verify the args we're going to pass match the action
-    # verify_args(action, mykwargs)
 
     ### Set up the action ###
     if action == 'alias':
@@ -88,7 +77,7 @@ def process_action(client, config, **kwargs):
             removes = IndexList(client)
             removes.iterate_filters(config['remove'])
             action_obj.remove(removes)
-    elif action == 'create_index':
+    elif action in [ 'cluster_routing', 'create_index' ]:
         action_obj = action_class(client, **mykwargs)
     elif action == 'delete_snapshots' or action == 'restore':
         logger.debug('Running "{0}"'.format(action))
@@ -123,35 +112,8 @@ def cli(config, dry_run, action_file):
 
     See http://elastic.co/guide/en/elasticsearch/client/curator/current
     """
-    # Get config from yaml file
-    yaml_config  = get_yaml(config)
-    # if the file is empty, which is still valid yaml, set as an empty dict
-    yaml_config = {} if not yaml_config else prune_nones(yaml_config)
-    # Voluptuous can't verify the schema of a dict if it doesn't have keys,
-    # so make sure the keys are at least there and are dict()
-    for k in ['client', 'logging']:
-        if k not in yaml_config:
-            yaml_config[k] = {}
-        else:
-            yaml_config[k] = prune_nones(yaml_config[k])
-    config_dict = SchemaCheck(yaml_config, config_file.client(),
-        'Client Configuration', 'full configuration dictionary').result()
-    # Set up logging
-    log_opts = config_dict['logging']
-    loginfo = LogInfo(log_opts)
-    logging.root.addHandler(loginfo.handler)
-    logging.root.setLevel(loginfo.numeric_log_level)
-    logger = logging.getLogger('curator.cli')
-    # Set up NullHandler() to handle nested elasticsearch.trace Logger
-    # instance in elasticsearch python client
-    logging.getLogger('elasticsearch.trace').addHandler(NullHandler())
-    if log_opts['blacklist']:
-        for bl_entry in ensure_list(log_opts['blacklist']):
-            for handler in logging.root.handlers:
-                handler.addFilter(Blacklist(bl_entry))
-
-    client_args = config_dict['client']
-    test_client_options(client_args)
+    client_args = process_config(config)
+    logger = logging.getLogger(__name__)
     logger.debug('Client and logging options validated.')
 
     # Extract this and save it for later, in case there's no timeout_override.
diff --git a/curator/config_utils.py b/curator/config_utils.py
new file mode 100644
index 0000000..29ac435
--- /dev/null
+++ b/curator/config_utils.py
@@ -0,0 +1,49 @@
+from voluptuous import Schema
+# from .defaults import settings
+from .validators import SchemaCheck, config_file
+from .utils import *
+from .logtools import LogInfo, Whitelist, Blacklist
+
+def test_config(config):
+    # Get config from yaml file
+    yaml_config  = get_yaml(config)
+    # if the file is empty, which is still valid yaml, set as an empty dict
+    yaml_config = {} if not yaml_config else prune_nones(yaml_config)
+    # Voluptuous can't verify the schema of a dict if it doesn't have keys,
+    # so make sure the keys are at least there and are dict()
+    for k in ['client', 'logging']:
+        if k not in yaml_config:
+            yaml_config[k] = {}
+        else:
+            yaml_config[k] = prune_nones(yaml_config[k])
+    return SchemaCheck(yaml_config, config_file.client(),
+        'Client Configuration', 'full configuration dictionary').result()
+
+def set_logging(log_opts):
+    try:
+        from logging import NullHandler
+    except ImportError:
+        from logging import Handler
+
+        class NullHandler(Handler):
+            def emit(self, record):
+                pass
+
+    # Set up logging
+    loginfo = LogInfo(log_opts)
+    logging.root.addHandler(loginfo.handler)
+    logging.root.setLevel(loginfo.numeric_log_level)
+    logger = logging.getLogger('curator.cli')
+    # Set up NullHandler() to handle nested elasticsearch.trace Logger
+    # instance in elasticsearch python client
+    logging.getLogger('elasticsearch.trace').addHandler(NullHandler())
+    if log_opts['blacklist']:
+        for bl_entry in ensure_list(log_opts['blacklist']):
+            for handler in logging.root.handlers:
+                handler.addFilter(Blacklist(bl_entry))
+
+def process_config(yaml_file):
+    config = test_config(yaml_file)
+    set_logging(config['logging'])
+    test_client_options(config['client'])
+    return config['client']
diff --git a/curator/curator.py b/curator/curator.py
deleted file mode 100755
index 18b0d93..0000000
--- a/curator/curator.py
+++ /dev/null
@@ -1,5 +0,0 @@
-import click
-from .cli import cli
-
-def main():
-    cli()
diff --git a/curator/curator_cli.py b/curator/curator_cli.py
new file mode 100755
index 0000000..f0d703f
--- /dev/null
+++ b/curator/curator_cli.py
@@ -0,0 +1,5 @@
+import click
+from .singletons import cli
+
+def main():
+    cli(obj={})
diff --git a/curator/defaults/settings.py b/curator/defaults/settings.py
index cc53d13..a28b8c4 100644
--- a/curator/defaults/settings.py
+++ b/curator/defaults/settings.py
@@ -3,7 +3,7 @@ from voluptuous import *
 
 # Elasticsearch versions supported
 def version_max():
-    return (5, 1, 0)
+    return (5, 1, 99)
 def version_min():
     return (2, 0, 0)
 
@@ -35,6 +35,10 @@ def date_regex():
     }
 
 # Actions
+
+def cluster_actions():
+    return [ 'cluster_routing' ]
+
 def index_actions():
     return [
         'alias',
@@ -52,7 +56,7 @@ def snapshot_actions():
     return [ 'delete_snapshots', 'restore' ]
 
 def all_actions():
-    return sorted(index_actions() + snapshot_actions())
+    return sorted(cluster_actions() + index_actions() + snapshot_actions())
 
 def index_filtertypes():
     return [
diff --git a/curator/es_repo_mgr.py b/curator/es_repo_mgr.py
deleted file mode 100755
index 39be573..0000000
--- a/curator/es_repo_mgr.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from . import repomgrcli
-
-def main():
-    repomgrcli.repo_mgr_cli()
diff --git a/curator/indexlist.py b/curator/indexlist.py
index e47a37f..284188b 100644
--- a/curator/indexlist.py
+++ b/curator/indexlist.py
@@ -64,7 +64,7 @@ class IndexList(object):
         """
         self.loggit.debug('Getting all indices')
         self.all_indices = get_indices(self.client)
-        self.indices = self.all_indices
+        self.indices = self.all_indices[:]
         self.empty_list_check()
         for index in self.indices:
             self.__build_index_info(index)
diff --git a/curator/repomgrcli.py b/curator/repomgrcli.py
index 5ced77e..33c73b2 100644
--- a/curator/repomgrcli.py
+++ b/curator/repomgrcli.py
@@ -5,21 +5,12 @@ import sys
 import logging
 from .defaults import settings
 from .exceptions import *
+from .config_utils import process_config
 from .utils import *
 from ._version import __version__
-from .logtools import LogInfo
 
 logger = logging.getLogger('curator.repomgrcli')
 
-try:
-    from logging import NullHandler
-except ImportError:
-    from logging import Handler
-
-    class NullHandler(Handler):
-        def emit(self, record):
-            pass
-
 def delete_callback(ctx, param, value):
     if not value:
         ctx.abort()
@@ -31,8 +22,15 @@ def show_repos(client):
 
 @click.command(short_help='Filesystem Repository')
 @click.option('--repository', required=True, type=str, help='Repository name')
- at click.option('--location', required=True, type=str,
-            help='Shared file-system location. Must match remote path, & be accessible to all master & data nodes')
+ at click.option(
+    '--location',
+    required=True,
+    type=str,
+    help=(
+        'Shared file-system location. '
+        'Must match remote path, & be accessible to all master & data nodes'
+    )
+)
 @click.option('--compression', type=bool, default=True, show_default=True,
             help='Enable/Disable metadata compression.')
 @click.option('--chunk_size', type=str,
@@ -50,7 +48,8 @@ def fs(
     """
     Create a filesystem repository.
     """
-    client = get_client(**ctx.parent.parent.params)
+    logger = logging.getLogger('curator.repomgrcli.fs')
+    client = get_client(**ctx.obj['client_args'])
     try:
         create_repository(client, repo_type='fs', **ctx.params)
     except FailedExecution as e:
@@ -85,7 +84,8 @@ def s3(
     """
     Create an S3 repository.
     """
-    client = get_client(**ctx.parent.parent.params)
+    logger = logging.getLogger('curator.repomgrcli.s3')
+    client = get_client(**ctx.obj['client_args'])
     try:
         create_repository(client, repo_type='s3', **ctx.params)
     except FailedExecution as e:
@@ -95,41 +95,19 @@ def s3(
 
 @click.group()
 @click.option(
-    '--host', help='Elasticsearch host.', default='127.0.0.1')
- at click.option(
-    '--url_prefix', help='Elasticsearch http url prefix.',default='')
- at click.option('--port', help='Elasticsearch port.', default=9200, type=int)
- at click.option('--use_ssl', help='Connect to Elasticsearch through SSL.', is_flag=True)
- at click.option('--certificate', help='Path to certificate to use for SSL validation. (OPTIONAL)', type=str, default=None)
- at click.option('--client-cert', help='Path to file containing SSL certificate for client auth. (OPTIONAL)', type=str, default=None)
- at click.option('--client-key', help='Path to file containing SSL key for client auth. (OPTIONAL)', type=str, default=None)
- at click.option('--ssl-no-validate', help='Do not validate server\'s SSL certificate', is_flag=True)
- at click.option('--http_auth', help='Use Basic Authentication ex: user:pass', default='')
- at click.option('--timeout', help='Connection timeout in seconds.', default=30, type=int)
- at click.option('--master-only', is_flag=True, help='Only operate on elected master node.')
- at click.option('--debug', is_flag=True, help='Debug mode')
- at click.option('--loglevel', help='Log level', default='INFO')
- at click.option('--logfile', help='log file', default=None)
- at click.option('--logformat', help='Log output format [default|logstash].', default='default')
- at click.version_option(version=__version__)
+    '--config',
+    help="Path to configuration file. Default: ~/.curator/curator.yml",
+    type=click.Path(exists=True), default=settings.config_file()
+)
 @click.pass_context
-def repo_mgr_cli(
-        ctx, host, url_prefix, port, use_ssl, certificate, client_cert,
-        client_key, ssl_no_validate, http_auth, timeout, master_only, debug,
-        loglevel, logfile, logformat):
+def repo_mgr_cli(ctx, config):
     """
     Repository manager for Elasticsearch Curator.
     """
-    # Set up logging
-    if debug:
-        loglevel = 'DEBUG'
-    log_opts = {'loglevel':loglevel, 'logfile':logfile, 'logformat':logformat}
-    loginfo = LogInfo(log_opts)
-    logging.root.addHandler(loginfo.handler)
-    logging.root.setLevel(loginfo.numeric_log_level)
-    # Setting up NullHandler to handle nested elasticsearch.trace Logger
-    # instance in elasticsearch python client
-    logging.getLogger('elasticsearch.trace').addHandler(NullHandler())
+    ctx.obj = {}
+    ctx.obj['client_args'] = process_config(config)
+    logger = logging.getLogger(__name__)
+    logger.debug('Client and logging options validated.')
 
 @repo_mgr_cli.group('create')
 @click.pass_context
@@ -144,7 +122,7 @@ def show(ctx):
     """
     Show all repositories
     """
-    client = get_client(**ctx.parent.params)
+    client = get_client(**ctx.obj['client_args'])
     show_repos(client)
 
 @repo_mgr_cli.command('delete')
@@ -155,11 +133,10 @@ def show(ctx):
 @click.pass_context
 def _delete(ctx, repository):
     """Delete an Elasticsearch repository"""
-    client = get_client(**ctx.parent.params)
+    client = get_client(**ctx.obj['client_args'])
     try:
         logger.info('Deleting repository {0}...'.format(repository))
         client.snapshot.delete_repository(repository=repository)
-        # sys.exit(0)
     except elasticsearch.NotFoundError:
         logger.error(
             'Unable to delete repository: {0}  Not Found.'.format(repository))
diff --git a/curator/singletons.py b/curator/singletons.py
new file mode 100644
index 0000000..18e84f1
--- /dev/null
+++ b/curator/singletons.py
@@ -0,0 +1,695 @@
+import os, sys
+import yaml, json
+import logging
+import click
+from voluptuous import Schema
+from .defaults import settings
+from .validators import SchemaCheck, config_file, options
+from .config_utils import test_config, set_logging
+from .exceptions import *
+from .utils import *
+from .indexlist import IndexList
+from .snapshotlist import SnapshotList
+from .actions import *
+from ._version import __version__
+
+CLASS_MAP = {
+    'alias' :  Alias,
+    'allocation' : Allocation,
+    'close' : Close,
+    'cluster_routing' : ClusterRouting,
+    'create_index' : CreateIndex,
+    'delete_indices' : DeleteIndices,
+    'delete_snapshots' : DeleteSnapshots,
+    'forcemerge' : ForceMerge,
+    'open' : Open,
+    'replicas' : Replicas,
+    'restore' : Restore,
+    'snapshot' : Snapshot,
+}
+
+EXCLUDED_OPTIONS = [
+    'ignore_empty_list', 'timeout_override',
+    'continue_if_exception', 'disable_action'
+]
+
+def validate_filter_json(ctx, param, value):
+    try:
+        filter_list = ensure_list(json.loads(value))
+        return filter_list
+    except ValueError:
+        raise click.BadParameter('Invalid JSON: {0}'.format(value))
+
+def false_to_none(ctx, param, value):
+    try:
+        if value:
+            return True
+        else:
+            return None
+    except ValueError:
+        raise click.BadParameter('Invalid value: {0}'.format(value))
+
+def filter_schema_check(action, filter_dict):
+    valid_filters = SchemaCheck(
+        filter_dict,
+        Schema(filters.Filters(action, location='singleton')),
+        'filters',
+        '{0} singleton action "filters"'.format(action)
+    ).result()
+    return validate_filters(action, valid_filters)
+
+def _actionator(action, action_obj, dry_run=True):
+    logger = logging.getLogger(__name__)
+    logger.debug('Doing the singleton "{0}" action here.'.format(action))
+    try:
+        if dry_run:
+            action_obj.do_dry_run()
+        else:
+            action_obj.do_action()
+    except Exception as e:
+        if str(type(e)) == "<class 'curator.exceptions.NoIndices'>" or \
+            str(type(e)) == "<class 'curator.exceptions.NoSnapshots'>":
+            logger.error(
+                'Unable to complete action "{0}".  No actionable items '
+                'in list: {1}'.format(action, type(e))
+            )
+        else:
+            logger.error(
+                'Failed to complete action: {0}.  {1}: '
+                '{2}'.format(action, type(e), e)
+            )
+        sys.exit(1)
+    logger.info('Singleton "{0}" action completed.'.format(action))
+
+def _check_empty_list(list_object, ignore=False):
+    logger = logging.getLogger(__name__)
+    logger.debug('Testing for empty list object')
+    try:
+        list_object.empty_list_check()
+    except (NoIndices, NoSnapshots) as e:
+        if str(type(e)) == "<class 'curator.exceptions.NoIndices'>":
+            otype = 'index'
+        else:
+            otype = 'snapshot'
+        if ignore:
+            logger.info(
+                'Singleton action not performed: empty {0} list'.format(otype)
+            )
+            sys.exit(0)
+        else:
+            logger.error(
+                'Singleton action failed due to empty {0} list'.format(otype)
+            )
+            sys.exit(1)
+
+
+def _prune_excluded(option_dict):
+    for k in list(option_dict.keys()):
+        if k in EXCLUDED_OPTIONS:
+            del option_dict[k]
+    return option_dict
+
+def option_schema_check(action, option_dict):
+    clean_options = SchemaCheck(
+        prune_nones(option_dict),
+        options.get_schema(action),
+        'options',
+        '{0} singleton action "options"'.format(action)
+    ).result()
+    return _prune_excluded(clean_options)
+
+def config_override(ctx, config_dict):
+    if config_dict == None:
+        config_dict = {}
+    for k in ['client', 'logging']:
+        if not k in config_dict:
+            config_dict[k] = {}
+    for k in list(ctx.params.keys()):
+        if k in ['dry_run', 'config']:
+            pass
+        elif k == 'host':
+            if 'host' in ctx.params and ctx.params['host'] is not None:
+                config_dict['client']['hosts'] = ctx.params[k]
+        elif k in ['loglevel', 'logfile', 'logformat']:
+            if k in ctx.params and ctx.params[k] is not None:
+                config_dict['logging'][k] = ctx.params[k]
+        else:
+            if k in ctx.params and ctx.params[k] is not None:
+                config_dict['client'][k] = ctx.params[k]
+    # After override, prune the nones
+    for k in ['client', 'logging']:
+        config_dict[k] = prune_nones(config_dict[k])
+    return SchemaCheck(config_dict, config_file.client(),
+        'Client Configuration', 'full configuration dictionary').result()
+
+ at click.command(name='allocation')
+ at click.option(
+    '--key', type=str, required=True, help='Node identification tag'
+)
+ at click.option(
+    '--value', type=str, required=True, help='Value associated with --key'
+)
+ at click.option(
+    '--allocation_type', type=str,
+    help='Must be one of: require, include, or exclude'
+)
+ at click.option(
+    '--wait_for_completion', is_flag=True, help='Wait for operation to complete'
+)
+ at click.option(
+    '--ignore_empty_list', is_flag=True,
+    help='Do not raise exception if there are no actionable indices'
+)
+ at click.option(
+    '--filter_list', callback=validate_filter_json,
+    help='JSON string representing an array of filters.', required=True
+)
+ at click.pass_context
+def allocation_singleton(
+    ctx, key, value, allocation_type, wait_for_completion, ignore_empty_list,
+    filter_list):
+    """
+    Shard Routing Allocation
+    """
+    action = 'allocation'
+    action_class = CLASS_MAP[action]
+    c_args = ctx.obj['config']['client']
+    client = get_client(**c_args)
+    logger = logging.getLogger(__name__)
+    raw_options = {
+        'key': key,
+        'value': value,
+        'allocation_type': allocation_type,
+        'wait_for_completion': wait_for_completion,
+    }
+    logger.debug('Validating provided options: {0}'.format(raw_options))
+    mykwargs = option_schema_check(action, raw_options)
+    mykwargs.update(
+        { 'timeout': c_args['timeout'] if c_args['timeout'] else 30 }
+    )
+    logger.debug('Validating provided filters: {0}'.format(filter_list))
+    clean_filters = {
+        'filters': filter_schema_check(action, filter_list)
+    }
+    ilo = IndexList(client)
+    ilo.iterate_filters(clean_filters)
+    _check_empty_list(ilo, ignore_empty_list)
+    action_obj = action_class(ilo, **mykwargs)
+    ### Do the action
+    _actionator(action, action_obj, dry_run=ctx.parent.params['dry_run'])
+
+
+ at click.command(name='close')
+ at click.option(
+    '--delete_aliases', is_flag=True,
+    help='Delete all aliases from indices to be closed'
+)
+ at click.option(
+    '--ignore_empty_list', is_flag=True,
+    help='Do not raise exception if there are no actionable indices'
+)
+ at click.option(
+    '--filter_list', callback=validate_filter_json,
+    help='JSON string representing an array of filters.', required=True
... 5052 lines suppressed ...

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/python-modules/packages/elasticsearch-curator.git



More information about the Python-modules-commits mailing list