[Python-modules-commits] [dask.distributed] 01/12: New upstream version 1.18.0+ds.1

Diane Trout diane at moszumanska.debian.org
Tue Aug 1 23:18:33 UTC 2017


This is an automated email from the git hooks/post-receive script.

diane pushed a commit to branch master
in repository dask.distributed.

commit e344b306f8f49aea633c6ba160c7e2bf8bc81379
Author: Diane Trout <diane at ghic.org>
Date:   Fri Jul 21 15:50:46 2017 -0700

    New upstream version 1.18.0+ds.1
---
 .gitignore                                         |    5 +
 .travis.yml                                        |   81 +-
 MANIFEST.in                                        |    2 +
 appveyor.yml                                       |    3 -
 continuous_integration/setup_conda_environment.cmd |   28 +-
 continuous_integration/travis/install.sh           |   83 ++
 continuous_integration/travis/run_tests.sh         |   35 +
 distributed/__init__.py                            |   14 +-
 distributed/asyncio.py                             |  169 +++
 distributed/batched.py                             |    5 +
 distributed/bokeh/application.py                   |  119 --
 distributed/bokeh/background/server_lifecycle.py   |    4 +-
 distributed/bokeh/components.py                    |    8 +-
 distributed/bokeh/core.py                          |   32 +-
 distributed/bokeh/scheduler.py                     |  737 +++++++++-
 distributed/bokeh/status/__init__.py               |    0
 distributed/bokeh/status/main.py                   |   40 -
 distributed/bokeh/status/templates/index.html      |   74 -
 distributed/bokeh/task_stream.py                   |  113 ++
 distributed/bokeh/tasks/__init__.py                |    0
 distributed/bokeh/tasks/main.py                    |   20 -
 distributed/bokeh/template.html                    |  115 ++
 distributed/bokeh/tests/test_application.py        |   49 -
 distributed/bokeh/tests/test_scheduler_bokeh.py    |  189 ++-
 distributed/bokeh/tests/test_task_stream.py        |   41 +
 distributed/bokeh/tests/test_worker_bokeh.py       |   13 +-
 distributed/bokeh/worker.py                        |   96 +-
 distributed/bokeh/workers/__init__.py              |    0
 distributed/bokeh/workers/main.py                  |   30 -
 distributed/bokeh/workers/templates/index.html     |   74 -
 distributed/cfexecutor.py                          |  168 +++
 distributed/channels.py                            |  300 ----
 distributed/cli/dask_scheduler.py                  |  107 +-
 distributed/cli/dask_worker.py                     |  171 ++-
 distributed/cli/tests/test_cli_utils.py            |   10 +-
 distributed/cli/tests/test_dask_scheduler.py       |  123 +-
 distributed/cli/tests/test_dask_worker.py          |    6 +-
 distributed/cli/tests/test_tls_cli.py              |   60 +
 distributed/cli/utils.py                           |    2 +-
 distributed/client.py                              | 1108 ++++++++++-----
 distributed/collections.py                         |  271 ----
 distributed/comm/__init__.py                       |   18 -
 distributed/comm/addressing.py                     |    4 +-
 distributed/comm/core.py                           |   37 +-
 distributed/comm/inproc.py                         |   29 +-
 distributed/comm/registry.py                       |    4 +-
 distributed/comm/tcp.py                            |  238 +++-
 distributed/comm/tests/test_comms.py               |  346 +++--
 distributed/comm/utils.py                          |   66 +-
 distributed/comm/zmq.py                            |  301 ----
 distributed/comm/zmqimpl.py                        |  239 ----
 distributed/compatibility.py                       |    7 +-
 distributed/config.py                              |  143 +-
 distributed/config.yaml                            |   19 +
 distributed/core.py                                |  108 +-
 distributed/deploy/adaptive.py                     |    2 +-
 distributed/deploy/local.py                        |  137 +-
 distributed/deploy/ssh.py                          |   87 +-
 distributed/deploy/tests/test_adaptive.py          |    5 +-
 distributed/deploy/tests/test_local.py             |  115 +-
 distributed/diagnostics/plugin.py                  |   13 +
 distributed/diagnostics/progress.py                |   77 +-
 distributed/diagnostics/progress_stream.py         |    6 +-
 distributed/diagnostics/progressbar.py             |    4 +-
 distributed/diagnostics/tests/test_plugin.py       |   44 +-
 distributed/diagnostics/tests/test_progress.py     |   37 +-
 .../diagnostics/tests/test_progress_stream.py      |    8 +-
 distributed/hdfs.py                                |   36 +-
 distributed/http/core.py                           |    2 +-
 distributed/joblib.py                              |  108 +-
 distributed/nanny.py                               |  626 +++++----
 distributed/node.py                                |   37 +
 distributed/preloading.py                          |   51 +
 distributed/process.py                             |  273 ++++
 distributed/protocol/__init__.py                   |    9 +-
 distributed/protocol/compression.py                |   74 +-
 distributed/protocol/core.py                       |   21 +-
 distributed/protocol/keras.py                      |   10 +-
 distributed/protocol/numpy.py                      |   81 +-
 distributed/protocol/serialize.py                  |   47 +-
 distributed/protocol/sparse.py                     |   31 +
 distributed/protocol/tests/test_h5py.py            |    4 +-
 distributed/protocol/tests/test_netcdf4.py         |    2 +-
 distributed/protocol/tests/test_numpy.py           |   74 +-
 distributed/protocol/tests/test_protocol.py        |   60 +-
 distributed/protocol/tests/test_protocol_utils.py  |   11 +-
 distributed/protocol/tests/test_serialize.py       |   29 +-
 distributed/protocol/tests/test_sparse.py          |   38 +
 distributed/protocol/utils.py                      |   69 +-
 distributed/queues.py                              |  249 ++++
 distributed/recreate_exceptions.py                 |    2 +-
 distributed/scheduler.py                           |  384 ++++--
 distributed/security.py                            |  146 ++
 distributed/sizeof.py                              |   24 +-
 distributed/stealing.py                            |    5 -
 distributed/submit.py                              |   14 +-
 distributed/system_monitor.py                      |   12 +
 distributed/tests/make_tls_certs.py                |  167 +++
 distributed/tests/py3_test_asyncio.py              |  336 +++++
 distributed/tests/py3_test_client.py               |   39 +-
 distributed/tests/py3_test_utils_tst.py            |   10 +
 distributed/tests/test_as_completed.py             |   39 +-
 distributed/tests/test_asyncio.py                  |    5 +
 distributed/tests/test_asyncprocess.py             |  288 ++++
 distributed/tests/test_channels.py                 |  204 ---
 distributed/tests/test_client.py                   | 1433 +++++++++++++-------
 distributed/tests/test_client_executor.py          |  240 ++++
 distributed/tests/test_collections.py              |  211 +--
 distributed/tests/test_compatibility.py            |    5 +-
 distributed/tests/test_config.py                   |  271 ++++
 distributed/tests/test_core.py                     |  130 +-
 distributed/tests/test_hdfs.py                     |   41 +-
 distributed/tests/test_joblib.py                   |   91 +-
 distributed/tests/test_nanny.py                    |  107 +-
 distributed/tests/test_preload.py                  |   65 +
 distributed/tests/test_publish.py                  |   60 +-
 distributed/tests/test_queues.py                   |  229 ++++
 distributed/tests/test_resources.py                |    4 +-
 distributed/tests/test_scheduler.py                |   92 +-
 distributed/tests/test_security.py                 |  368 +++++
 distributed/tests/test_sizeof.py                   |    7 +
 distributed/tests/test_steal.py                    |   37 +-
 distributed/tests/test_stress.py                   |   15 +-
 distributed/tests/test_system_monitor.py           |   35 +
 distributed/tests/test_threadpoolexecutor.py       |    2 -
 distributed/tests/test_tls_functional.py           |  171 +++
 distributed/tests/test_utils.py                    |  126 +-
 distributed/tests/test_utils_comm.py               |    3 +-
 distributed/tests/test_utils_test.py               |   37 +-
 distributed/tests/test_variable.py                 |  207 +++
 distributed/tests/test_worker.py                   |  259 +++-
 distributed/tests/test_worker_client.py            |  108 +-
 distributed/tests/test_worker_failure.py           |   78 +-
 distributed/tests/tls-ca-cert.pem                  |   78 ++
 distributed/tests/tls-ca-key.pem                   |   28 +
 distributed/tests/tls-cert-chain.pem               |  135 ++
 distributed/tests/tls-cert.pem                     |   57 +
 distributed/tests/tls-key-cert.pem                 |   73 +
 distributed/tests/tls-key.pem                      |   16 +
 distributed/tests/tls-self-signed-cert.pem         |   14 +
 distributed/tests/tls-self-signed-key.pem          |   16 +
 distributed/threadpoolexecutor.py                  |   24 +-
 distributed/utils.py                               |  235 +++-
 distributed/utils_comm.py                          |   37 +-
 distributed/utils_test.py                          |  287 +++-
 distributed/variable.py                            |  207 +++
 distributed/versions.py                            |    6 +-
 distributed/worker.py                              |  447 ++++--
 distributed/worker_client.py                       |  166 +--
 docs/source/adaptive.rst                           |    5 +-
 docs/source/api.rst                                |   79 +-
 docs/source/asynchronous.rst                       |  147 ++
 docs/source/changelog.rst                          |   81 ++
 docs/source/channels.rst                           |  149 --
 docs/source/client.rst                             |   22 +-
 docs/source/communications.rst                     |    8 +-
 docs/source/conf.py                                |    8 +
 docs/source/configuration.rst                      |  176 +++
 docs/source/develop.rst                            |    3 +-
 docs/source/foundations.rst                        |    4 +-
 docs/source/index.rst                              |   22 +-
 docs/source/joblib.rst                             |   14 +
 docs/source/quickstart.rst                         |   14 +-
 docs/source/related-work.rst                       |   33 +-
 docs/source/scheduling-policies.rst                |   27 +
 docs/source/setup.rst                              |   61 +-
 docs/source/tls.rst                                |   98 ++
 requirements.txt                                   |    8 +-
 setup.py                                           |   18 +-
 169 files changed, 12574 insertions(+), 4877 deletions(-)

diff --git a/.gitignore b/.gitignore
index 81cc238..7e11023 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,8 @@
 *.pyc
 *.py~
+build/
+*.egg-info
 docs/build
+continuous_integration/hdfs-initialized
+.cache
+.#*
diff --git a/.travis.yml b/.travis.yml
index a5d7c4c..18e51c7 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,20 +1,27 @@
-language: python
+language: generic
 sudo: false
 dist: trusty
 services:
   - docker
 
-matrix:
-  fast_finish: true
-
 env:
   matrix:
-    - PYTHON=2.7 PACKAGES="blosc futures faulthandler"
-    - PYTHON=3.4 COVERAGE=true DASK_EXPERIMENTAL_ZMQ=1
-    - PYTHON=3.5 CRICK=true
-    - PYTHON=3.6 PACKAGES=blosc
+    - PYTHON=2.7 PACKAGES="python-blosc futures faulthandler"
+    - PYTHON=3.5 COVERAGE=true PACKAGES=python-blosc CRICK=true
+    - PYTHON=3.6
     - HDFS=true PYTHON=2.7
-    - HDFS=true PYTHON=3.5 PACKAGES=blosc
+    - HDFS=true PYTHON=3.5 PACKAGES=python-blosc
+
+matrix:
+  fast_finish: true
+  include:
+  - os: osx
+    env: PYTHON=3.6 RUNSLOW=false
+  # Together with fast_finish, allow build to be marked successful before the OS X job finishes
+  allow_failures:
+  - os: osx
+    # This needs to be the exact same line as above
+    env: PYTHON=3.6 RUNSLOW=false
 
 addons:
   hosts:
@@ -33,62 +40,14 @@ before_install:
     fi;
 
 install:
-  # Note we disable progress bars to make Travis log loading much faster
-
-  # Install conda
-  - wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh
-  - bash miniconda.sh -b -p $HOME/miniconda
-  - export PATH="$HOME/miniconda/bin:$PATH"
-  - conda config --set always_yes yes --set changeps1 no
-  - conda update -q conda
-
-  # Install dependencies
-  - conda create -q -n test-environment python=$PYTHON
-  - source activate test-environment
-  - conda install -q pytest pytest-timeout pytest-faulthandler coverage tornado toolz dill dask ipywidgets psutil bokeh requests joblib mock ipykernel jupyter_client h5py netcdf4 lz4 paramiko tblib click flake8 $PACKAGES -c conda-forge
-  - |
-    if [[ $HDFS == true ]]; then
-        conda install -q libxml2 krb5 boost
-        conda install -q -c conda-forge libhdfs3 libgsasl libntlm
-        pip install -q git+https://github.com/dask/hdfs3 --upgrade
-    fi;
-  - pip install -q git+https://github.com/dask/dask.git --upgrade
-  - pip install -q git+https://github.com/joblib/joblib.git --upgrade
-  - pip install -q git+https://github.com/dask/s3fs.git --upgrade
-  - pip install -q git+https://github.com/dask/zict.git --upgrade
-  - pip install -q sortedcollections msgpack-python
-  - pip install -q keras --upgrade --no-deps
-  - |
-    if [[ $CRICK == true ]]; then
-        conda install -q cython
-        pip install git+https://github.com/jcrist/crick.git
-    fi;
-
-  # Install distributed
-  - pip install --no-deps -e .
+  - source continuous_integration/travis/install.sh
 
 script:
-    - export PYTEST_OPTIONS="--verbose -r s --timeout-method=thread --timeout=300 --runslow --durations=20"
-    - |
-      if [[ $HDFS == true ]]; then
-        py.test distributed/tests/test_hdfs.py $PYTEST_OPTIONS
-        if [ $? -ne 0 ]; then
-            # Diagnose test error
-            echo "--"
-            echo "-- HDFS namenode log follows"
-            echo "--"
-            docker exec -it $(docker ps -q) bash -c "tail -n50 /usr/local/hadoop/logs/hadoop-root-namenode-hdfs-container.log"
-            (exit 1)
-        fi
-      elif [[ $COVERAGE == true ]]; then
-        coverage run $(which py.test) distributed -m "not avoid_travis" $PYTEST_OPTIONS;
-      else
-        py.test -m "not avoid_travis" distributed $PYTEST_OPTIONS;
-      fi;
-    - flake8 distributed/*.py distributed/{bokeh,protocol,deploy}/*.py # no tests yet
+  - source continuous_integration/travis/run_tests.sh
+  - flake8 distributed/*.py distributed/{bokeh,comm,deploy,protocol}/*.py # no tests yet
 
 after_success:
-    - if [[ $COVERAGE == true ]]; then coverage report; pip install -q coveralls ; coveralls ; fi
+  - if [[ $COVERAGE == true ]]; then coverage report; pip install -q coveralls ; coveralls ; fi
 
 notifications:
   email: false
diff --git a/MANIFEST.in b/MANIFEST.in
index 3338289..14e1cc8 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,6 +1,7 @@
 recursive-include distributed *.py
 recursive-include distributed *.js
 recursive-include distributed *.coffee
+recursive-include distributed *.html
 recursive-include docs *.rst
 
 include setup.py
@@ -10,6 +11,7 @@ include MANIFEST.in
 include requirements.txt
 include distributed/config.yaml
 include distributed/tests/mytestegg-1.0.0-py3.4.egg
+include distributed/tests/*.pem
 
 prune docs/_build
 include versioneer.py
diff --git a/appveyor.yml b/appveyor.yml
index 76a5295..496640b 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -22,9 +22,6 @@ init:
   - set PATH=%MINICONDA%;%MINICONDA%/Scripts;%MINICONDA%/Library/bin;%PATH%
 
 install:
-  # Update to a known good conda
-  # (to workaround http://help.appveyor.com/discussions/problems/4910)
-  - conda install -q -y conda=4.2.9
   - continuous_integration\\setup_conda_environment.cmd
 
 build_script:
diff --git a/continuous_integration/setup_conda_environment.cmd b/continuous_integration/setup_conda_environment.cmd
index e9820f3..9a2c2c3 100644
--- a/continuous_integration/setup_conda_environment.cmd
+++ b/continuous_integration/setup_conda_environment.cmd
@@ -8,6 +8,8 @@ set PIP_INSTALL=pip install -q
 
 @rem Deactivate any environment
 call deactivate
+ at rem Update conda
+%CONDA% update -q -y conda
 @rem Display root environment (for debugging)
 %CONDA% list
 @rem Clean up any left-over from a previous build
@@ -15,7 +17,29 @@ call deactivate
 
 @rem Create test environment
 @rem (note: no cytoolz as it seems to prevent faulthandler tracebacks on crash)
-%CONDA% create -n %CONDA_ENV% -q -y python=%PYTHON% pytest toolz dill futures dask ipywidgets psutil bokeh requests joblib mock ipykernel jupyter_client tblib msgpack-python cloudpickle click zict lz4 -c conda-forge
+%CONDA% create -n %CONDA_ENV% -q -y ^
+    bokeh ^
+    click ^
+    cloudpickle ^
+    dask ^
+    dill ^
+    futures ^
+    lz4 ^
+    ipykernel ^
+    ipywidgets ^
+    joblib ^
+    jupyter_client ^
+    mock ^
+    msgpack-python ^
+    psutil ^
+    pytest ^
+    python=%PYTHON% ^
+    requests ^
+    toolz ^
+    tblib ^
+    tornado=4.5 ^
+    zict ^
+    -c conda-forge
 
 call activate %CONDA_ENV%
 
@@ -29,5 +53,7 @@ call activate %CONDA_ENV%
 @rem Display final environment (for reproducing)
 %CONDA% list
 %CONDA% list --explicit
+where python
+where pip
 pip list
 python -m site
diff --git a/continuous_integration/travis/install.sh b/continuous_integration/travis/install.sh
new file mode 100644
index 0000000..9e91013
--- /dev/null
+++ b/continuous_integration/travis/install.sh
@@ -0,0 +1,83 @@
+#
+# This file should be source'd, so as to update the caller's environment
+# (such as the PATH variable)
+#
+
+# Note we disable progress bars to make Travis log loading much faster
+
+# Install conda
+case "$(uname -s)" in
+    'Darwin')
+        MINICONDA_FILENAME="Miniconda3-latest-MacOSX-x86_64.sh"
+        ;;
+    'Linux')
+        MINICONDA_FILENAME="Miniconda3-latest-Linux-x86_64.sh"
+        ;;
+    *)  ;;
+esac
+
+wget https://repo.continuum.io/miniconda/$MINICONDA_FILENAME -O miniconda.sh
+bash miniconda.sh -b -p $HOME/miniconda
+export PATH="$HOME/miniconda/bin:$PATH"
+conda config --set always_yes yes --set changeps1 no
+conda update -q conda
+
+# Create conda environment
+conda create -q -n test-environment python=$PYTHON
+source activate test-environment
+
+# Install dependencies
+# (Tornado pinned to 4.5 until we fix our compatibility with Tornado 5.0)
+conda install -q -c conda-forge \
+    bokeh \
+    click \
+    coverage \
+    dask \
+    dill \
+    flake8 \
+    h5py \
+    ipykernel \
+    ipywidgets \
+    joblib \
+    jupyter_client \
+    lz4 \
+    mock \
+    netcdf4 \
+    paramiko \
+    psutil \
+    pytest \
+    pytest-faulthandler \
+    pytest-timeout \
+    requests \
+    tblib \
+    toolz \
+    tornado=4.5 \
+    $PACKAGES
+
+if [[ $HDFS == true ]]; then
+    conda install -q libxml2 krb5 boost
+    conda install -q -c conda-forge libhdfs3 libgsasl libntlm
+    pip install -q git+https://github.com/dask/hdfs3 --upgrade
+fi;
+
+pip install -q git+https://github.com/dask/dask.git --upgrade
+pip install -q git+https://github.com/joblib/joblib.git --upgrade
+pip install -q git+https://github.com/dask/s3fs.git --upgrade
+pip install -q git+https://github.com/dask/zict.git --upgrade
+pip install -q sortedcollections msgpack-python
+pip install -q keras --upgrade --no-deps
+
+if [[ $CRICK == true ]]; then
+    conda install -q cython
+    pip install -q git+https://github.com/jcrist/crick.git
+fi;
+
+# Install distributed
+pip install --no-deps -e .
+
+# For debugging
+echo -e "--\n--Conda Environment\n--"
+conda list
+
+echo -e "--\n--Pip Environment\n--"
+pip list --format=columns
diff --git a/continuous_integration/travis/run_tests.sh b/continuous_integration/travis/run_tests.sh
new file mode 100644
index 0000000..8bacb6a
--- /dev/null
+++ b/continuous_integration/travis/run_tests.sh
@@ -0,0 +1,35 @@
+export PYTEST_OPTIONS="--verbose -r s --timeout-method=thread --timeout=300 --durations=20"
+if [[ $RUNSLOW != false ]]; then
+    export PYTEST_OPTIONS="$PYTEST_OPTIONS --runslow"
+fi
+
+# On OS X builders, the default open files limit is too small (256)
+if [[ $TRAVIS_OS_NAME == osx ]]; then
+    ulimit -n 8192
+fi
+
+echo "--"
+echo "-- Soft limits"
+echo "--"
+ulimit -a -S
+
+echo "--"
+echo "-- Hard limits"
+echo "--"
+ulimit -a -H
+
+if [[ $HDFS == true ]]; then
+    py.test distributed/tests/test_hdfs.py $PYTEST_OPTIONS
+    if [ $? -ne 0 ]; then
+        # Diagnose test error
+        echo "--"
+        echo "-- HDFS namenode log follows"
+        echo "--"
+        docker exec -it $(docker ps -q) bash -c "tail -n50 /usr/local/hadoop/logs/hadoop-root-namenode-hdfs-container.log"
+        (exit 1)
+    fi
+elif [[ $COVERAGE == true ]]; then
+    coverage run $(which py.test) distributed -m "not avoid_travis" $PYTEST_OPTIONS;
+else
+    py.test -m "not avoid_travis" distributed $PYTEST_OPTIONS;
+fi;
diff --git a/distributed/__init__.py b/distributed/__init__.py
index 49bd8fa..52cb8ab 100644
--- a/distributed/__init__.py
+++ b/distributed/__init__.py
@@ -4,19 +4,17 @@ from .config import config
 from .core import connect, rpc
 from .deploy import LocalCluster
 from .diagnostics import progress
-from .client import (Client, Executor, CompatibleExecutor, wait, as_completed,
-        default_client)
+from .client import (Client, Executor, CompatibleExecutor,
+                     wait, as_completed, default_client, fire_and_forget,
+                     Future)
 from .nanny import Nanny
+from .queues import Queue
 from .scheduler import Scheduler
 from .utils import sync
-from .worker import Worker
+from .variable import Variable
+from .worker import Worker, get_worker, get_client, secede
 from .worker_client import local_client, worker_client
 
-try:
-    from .collections import futures_to_collection
-except:
-    pass
-
 from ._version import get_versions
 versions = get_versions()
 __version__ = versions['version']
diff --git a/distributed/asyncio.py b/distributed/asyncio.py
new file mode 100644
index 0000000..47595c6
--- /dev/null
+++ b/distributed/asyncio.py
@@ -0,0 +1,169 @@
+"""Experimental interface for asyncio, may disappear without warning"""
+
+# flake8: noqa
+
+import asyncio
+from functools import wraps
+
+from toolz import merge
+
+from tornado.platform.asyncio import BaseAsyncIOLoop
+from tornado.platform.asyncio import to_asyncio_future
+
+from . import client
+from .client import Client, Future
+from .utils import ignoring
+
+
+def to_asyncio(fn, **default_kwargs):
+    """Converts Tornado gen.coroutines and futures to asyncio ones"""
+    @wraps(fn)
+    def convert(*args, **kwargs):
+        if default_kwargs:
+            kwargs = merge(default_kwargs, kwargs)
+        return to_asyncio_future(fn(*args, **kwargs))
+    return convert
+
+
+class AioFuture(Future):
+    """Provides awaitable syntax for a distributed Future"""
+
+    def __await__(self):
+        return self.result().__await__()
+
+    result = to_asyncio(Future._result)
+    exception = to_asyncio(Future._exception)
+    traceback = to_asyncio(Future._traceback)
+
+
+class AioClient(Client):
+    """ Connect to and drive computation on a distributed Dask cluster
+
+    This class provides an asyncio compatible async/await interface for
+    dask.distributed.
+
+    The Client connects users to a dask.distributed compute cluster. It
+    provides an asynchronous user interface around functions and futures.
+    This class resembles executors in ``concurrent.futures`` but also
+    allows ``Future`` objects within ``submit/map`` calls.
+
+    AioClient is an **experimental** interface for distributed and may
+    disappear without warning!
+
+    Parameters
+    ----------
+    address: string, or Cluster
+        This can be the address of a ``Scheduler`` server like a string
+        ``'127.0.0.1:8786'`` or a cluster object like ``LocalCluster()``
+
+    Examples
+    --------
+    Provide cluster's scheduler address on initialization::
+
+        client = AioClient('127.0.0.1:8786')
+
+    Start the client::
+
+        async def start_the_client():
+            client = await AioClient()
+
+            # Use the client....
+
+            await client.shutdown()
+
+    An ``async with`` statement is a more convenient way to start and shut down
+    the client::
+
+        async def start_the_client():
+            async with AioClient() as client:
+                # Use the client within this block.
+                pass
+
+    Use the ``submit`` method to send individual computations to the cluster,
+    and await the returned future to retrieve the result::
+
+        async def add_two_numbers():
+            async with AioClient() as client:
+                a = client.submit(add, 1, 2)
+                result = await a
+
+    Continue using submit or map on results to build up larger computations,
+    and gather results with the ``gather`` method::
+
+        async def gather_some_results():
+            async with AioClient() as client:
+                a = client.submit(add, 1, 2)
+                b = client.submit(add, 10, 20)
+                c = client.submit(add, a, b)
+                result = await client.gather([c])
+
+    See Also
+    --------
+    distributed.client.Client: Blocking Client
+    distributed.scheduler.Scheduler: Internal scheduler
+    """
+    _Future = AioFuture
+
+    def __init__(self, *args, **kwargs):
+        if kwargs.get('set_as_default'):
+            raise Exception("AioClient instance can't be set as default")
+
+        loop = asyncio.get_event_loop()
+        ioloop = BaseAsyncIOLoop(loop)
+        super().__init__(*args, loop=ioloop, set_as_default=False, asynchronous=True, **kwargs)
+
+    async def __aenter__(self):
+        await to_asyncio_future(self._started)
+        return self
+
+    async def __aexit__(self, type, value, traceback):
+        await self.shutdown()
+
+    def __await__(self):
+        return to_asyncio_future(self._started).__await__()
+
+    async def shutdown(self, fast=False):
+        if self.status == 'closed':
+            return
+
+        try:
+            future = self._shutdown(fast=fast)
+            await to_asyncio_future(future)
+
+            with ignoring(AttributeError):
+                future = self.cluster._close()
+                await to_asyncio_future(future)
+                self.cluster.status = 'closed'
+        finally:
+            BaseAsyncIOLoop.clear_current()
+
+    def __del__(self):
+        # Override Client.__del__ to avoid running self.shutdown()
+        assert self.status != 'running'
+
+    gather = to_asyncio(Client._gather)
+    scatter = to_asyncio(Client._scatter)
+    cancel = to_asyncio(Client._cancel)
+    publish_dataset = to_asyncio(Client._publish_dataset)
+    get_dataset = to_asyncio(Client._get_dataset)
+    run_on_scheduler = to_asyncio(Client._run_on_scheduler)
+    run = to_asyncio(Client._run)
+    run_coroutine = to_asyncio(Client._run_coroutine)
+    get = to_asyncio(Client.get, sync=False)
+    upload_environment = to_asyncio(Client._upload_environment)
+    restart = to_asyncio(Client._restart)
+    upload_file = to_asyncio(Client._upload_file)
+    upload_large_file = to_asyncio(Client._upload_large_file)
+    rebalance = to_asyncio(Client._rebalance)
+    replicate = to_asyncio(Client._replicate)
+    start_ipython_workers = to_asyncio(Client._start_ipython_workers)
+
+    def __enter__(self):
+        raise RuntimeError("Use AioClient in an 'async with' block, not 'with'")
+
+
+class as_completed(client.as_completed):
+    __anext__ = to_asyncio(client.as_completed.__anext__)
+
+
+wait = to_asyncio(client._wait)
diff --git a/distributed/batched.py b/distributed/batched.py
index 21aeab0..6fa464e 100644
--- a/distributed/batched.py
+++ b/distributed/batched.py
@@ -1,5 +1,6 @@
 from __future__ import print_function, division, absolute_import
 
+from collections import deque
 import logging
 
 from tornado import gen, locks
@@ -50,6 +51,7 @@ class BatchedSend(object):
         self.batch_count = 0
         self.byte_count = 0
         self.next_deadline = None
+        self.recent_message_log = deque(maxlen=100)
 
     def start(self, comm):
         self.comm = comm
@@ -78,6 +80,7 @@ class BatchedSend(object):
             self.batch_count += 1
             self.next_deadline = self.loop.time() + self.interval
             try:
+                self.recent_message_log.append(payload)
                 nbytes = yield self.comm.write(payload)
                 self.byte_count += nbytes
             except CommClosedError as e:
@@ -123,6 +126,8 @@ class BatchedSend(object):
     def abort(self):
         if self.comm is None:
             return
+        self.please_stop = True
+        self.buffer = []
         self.waker.set()
         if not self.comm.closed():
             self.comm.abort()
diff --git a/distributed/bokeh/application.py b/distributed/bokeh/application.py
deleted file mode 100644
index 29bf614..0000000
--- a/distributed/bokeh/application.py
+++ /dev/null
@@ -1,119 +0,0 @@
-from __future__ import print_function, division, absolute_import
-
-import atexit
-import logging
-import os
-import socket
-import sys
-
-import bokeh
-import distributed.bokeh
-from toolz import get_in, concat
-
-from ..utils import ignoring
-from ..compatibility import logging_names
-from ..config import config
-
-
-dirname = os.path.dirname(distributed.__file__)
-paths = [os.path.join(dirname, 'bokeh', name)
-         for name in ['background', 'status', 'tasks', 'workers',
-             'memory-usage.py', 'task-stream.py', 'task-progress.py',
-             'resource-profiles.py', 'worker-table.py', 'processing-stacks.py']]
-
-logger = logging.getLogger(__name__)
-
-dask_dir = os.path.join(os.path.expanduser('~'), '.dask')
-if not os.path.exists(dask_dir):
-    os.mkdir(dask_dir)
-
-logging_level = get_in(['logging', 'bokeh'], config, 'critical')
-logging_level = logging_names[logging_level.upper()]
-
-
-class BokehWebInterface(object):
-
-    process = None
-
-    def __init__(self, host='127.0.0.1', http_port=9786, bokeh_port=8787,
-                 scheduler_address='tcp://127.0.0.1:8786',
-                 bokeh_whitelist=[], log_level=logging_level,
-                 show=False, prefix=None, use_xheaders=False, quiet=True):
-        self.port = bokeh_port
-        ip = socket.gethostbyname(host)
-
-        hosts = ['localhost',
-                 '127.0.0.1',
-                 ip,
-                 host]
-
-        with ignoring(Exception):
-            hosts.append(socket.gethostbyname(ip))
-        with ignoring(Exception):
-            hosts.append(socket.gethostbyname(socket.gethostname()))
-
-        hosts = ['%s:%d' % (h, bokeh_port) for h in hosts]
-
-        hosts.append("*")
-
-        hosts.extend(map(str, bokeh_whitelist))
-
-        args = ([sys.executable, '-m', 'bokeh', 'serve'] + paths +
-                ['--check-unused-sessions=50',
-                 '--unused-session-lifetime=1',
-                 '--allow-websocket-origin=*',
-                 '--port', str(bokeh_port)])
-        if bokeh.__version__ <= '0.12.4':
-            args += sum([['--host', h] for h in hosts], [])
-
-        if prefix:
-            args.extend(['--prefix', prefix])
-
-        if show:
-            args.append('--show')
-
-        if use_xheaders:
-            args.append('--use-xheaders')
-
-        if log_level in ('debug', 'info', 'warning', 'error', 'critical'):
-            args.extend(['--log-level', log_level])
-
-        bokeh_options = {'host': host,
-                         'http-port': http_port,
-                         'scheduler-address': scheduler_address,
-                         'bokeh-port': bokeh_port}
-
-        args.extend(['--args'] + list(map(str, concat(bokeh_options.items()))))
-
-        import subprocess
-        process = subprocess.Popen(args)
-        self.process = process
-
-        @atexit.register
-        def cleanup_process():
-            try:
-                process.terminate()
-            except OSError:
-                pass
-
-        if not quiet:
-            logger.info("Web UI: http://%s:%d/status/"
-                         % (ip, bokeh_port))
-
-    def close(self, join=True, timeout=None):
-        if self.process is not None and self.process.poll() is None:
-            self.process.terminate()
-
-    def __del__(self):
-        self.close()
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, *args):
-        self.close()
-
-
-def bokeh_main(args):
-    from bokeh.command.bootstrap import main
-    main(args)
diff --git a/distributed/bokeh/background/server_lifecycle.py b/distributed/bokeh/background/server_lifecycle.py
index 2e2dfa3..6f6c1a5 100644
--- a/distributed/bokeh/background/server_lifecycle.py
+++ b/distributed/bokeh/background/server_lifecycle.py
@@ -41,7 +41,7 @@ def http_get(route):
         logger.info("Can not connect to %s", url, exc_info=True)
         return
     except HTTPError:
-        logger.warn("http route %s failed", route)
+        logger.warning("http route %s failed", route)
         return
     msg = json.loads(response.body.decode())
     messages[route]['deque'].append(msg)
@@ -58,7 +58,7 @@ def workers():
         response = yield client.fetch(
                 'http://%(host)s:%(http-port)d/workers.json' % options)
     except HTTPError:
-        logger.warn("workers http route failed")
+        logger.warning("workers http route failed")
         return
     msg = json.loads(response.body.decode())
     if msg:
diff --git a/distributed/bokeh/components.py b/distributed/bokeh/components.py
index 4361d72..4c1203b 100644
--- a/distributed/bokeh/components.py
+++ b/distributed/bokeh/components.py
@@ -2,6 +2,7 @@ from __future__ import print_function, division, absolute_import
 
 from bisect import bisect
 from operator import add
+from time import time
 
 from bokeh.layouts import row, column
 from bokeh.models import (
@@ -60,11 +61,12 @@ class TaskStream(DashboardComponent):
         self.last = 0
 
         self.source = ColumnDataSource(data=dict(
-            start=[], duration=[], key=[], name=[], color=[],
-            worker=[], y=[], worker_thread=[], alpha=[])
+            start=[time() - clear_interval], duration=[0.1], key=['start'],
+            name=['start'], color=['white'],
+            worker=['foo'], y=[0], worker_thread=[1], alpha=[0.0])
         )
 
-        x_range = DataRange1d()
+        x_range = DataRange1d(range_padding=0)
         y_range = DataRange1d(range_padding=0)
 
         self.root = Plot(
diff --git a/distributed/bokeh/core.py b/distributed/bokeh/core.py
index 4f85520..2d0180a 100644
--- a/distributed/bokeh/core.py
+++ b/distributed/bokeh/core.py
@@ -5,6 +5,8 @@ from bokeh.server.server import Server
 
 
 class BokehServer(object):
+    server_kwargs = {}
+
     def listen(self, addr):
         if self.server:
             return
@@ -20,11 +22,13 @@ class BokehServer(object):
                 else:
                     kwargs = {}
 
+                kwargs.update(self.server_kwargs)
+
                 self.server = Server(self.apps, io_loop=self.loop,
                                      port=port, address=ip,
                                      check_unused_sessions_milliseconds=500,
-                                     allow_websocket_origin=["*"], **kwargs
-                                     )
+                                     allow_websocket_origin=["*"],
+                                     **kwargs)
                 if bokeh.__version__ <= '0.12.3':
                     self.server.start(start_loop=False)
                 else:
@@ -49,29 +53,9 @@ class BokehServer(object):
         if self.server._tornado._ping_job is not None:
             self.server._tornado._ping_job.stop()
 
-        # self.server.stop()
         # https://github.com/bokeh/bokeh/issues/5494
-
-
-def format_bytes(n):
-    """ Format bytes as text
-
-    >>> format_bytes(1)
-    '1 B'
-    >>> format_bytes(1234)
-    '1.23 kB'
-    >>> format_bytes(12345678)
-    '12.35 MB'
-    >>> format_bytes(1234567890)
-    '1.23 GB'
-    """
-    if n > 1e9:
-        return '%0.2f GB' % (n / 1e9)
-    if n > 1e6:
-        return '%0.2f MB' % (n / 1e6)
-    if n > 1e3:
-        return '%0.2f kB' % (n / 1000)
-    return '%d B' % n
+        if bokeh.__version__ >= '0.12.4':
+            self.server.stop()
 
 
 def format_time(n):
diff --git a/distributed/bokeh/scheduler.py b/distributed/bokeh/scheduler.py
index 2b9351e..3ea7e65 100644
--- a/distributed/bokeh/scheduler.py
+++ b/distributed/bokeh/scheduler.py
@@ -2,34 +2,90 @@ from __future__ import print_function, division, absolute_import
 
 from functools import partial
 import logging
+import math
... 25151 lines suppressed ...

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/python-modules/packages/dask.distributed.git



More information about the Python-modules-commits mailing list