[Python-modules-commits] [dask.distributed] 01/01: Import dask.distributed_1.16.1+ds.1.orig.tar.gz

Diane Trout diane at moszumanska.debian.org
Wed Apr 26 06:37:56 UTC 2017


This is an automated email from the git hooks/post-receive script.

diane pushed a commit to branch upstream
in repository dask.distributed.

commit 82546bc2913e75e77ae83ca55b1c63491a8be600
Author: Diane Trout <diane at ghic.org>
Date:   Mon Apr 24 11:18:06 2017 -0700

    Import dask.distributed_1.16.1+ds.1.orig.tar.gz
---
 .travis.yml                                        |   98 +-
 MANIFEST.in                                        |    3 +
 appveyor.yml                                       |   40 +
 continuous_integration/Dockerfile                  |   60 +-
 continuous_integration/README.md                   |    2 +-
 continuous_integration/build.cmd                   |    6 +
 .../docker-files/cdh5-install.sh                   |   17 -
 continuous_integration/docker-files/cloudera.pref  |    3 -
 continuous_integration/docker-files/core-site.xml  |    7 +
 continuous_integration/docker-files/hdfs-site.xml  |   51 +
 .../docker-files/libhdfs-build.sh                  |    8 -
 continuous_integration/docker-files/start.sh       |   24 +-
 continuous_integration/run-hdfs.sh                 |   33 +
 continuous_integration/run_tests.cmd               |    9 +
 continuous_integration/run_with_env.cmd            |   90 +
 continuous_integration/setup_conda_environment.cmd |   33 +
 dev-requirements.txt                               |   11 +
 distributed/__init__.py                            |   11 +-
 distributed/_ipython_utils.py                      |    8 +-
 distributed/_version.py                            |  520 +++++
 distributed/batched.py                             |  236 +--
 distributed/bokeh/__init__.py                      |    6 +-
 distributed/bokeh/application.py                   |   23 +-
 distributed/bokeh/background/server_lifecycle.py   |   56 +-
 distributed/bokeh/components.py                    |   85 +-
 distributed/bokeh/core.py                          |   93 +
 distributed/bokeh/export_tool.py                   |    5 +-
 distributed/bokeh/memory-usage.py                  |    1 +
 distributed/bokeh/processing-stacks.py             |    1 +
 distributed/bokeh/resource-profiles.py             |    1 +
 distributed/bokeh/scheduler.py                     |  352 +++
 distributed/bokeh/status/main.py                   |    3 +-
 distributed/bokeh/task-progress.py                 |    1 +
 distributed/bokeh/task-stream.py                   |    1 +
 distributed/bokeh/tasks/main.py                    |    3 +-
 distributed/bokeh/tests/test_application.py        |    6 +-
 distributed/bokeh/tests/test_components.py         |    5 +-
 distributed/bokeh/tests/test_scheduler_bokeh.py    |   95 +
 distributed/bokeh/tests/test_worker_bokeh.py       |  126 ++
 distributed/bokeh/tests/test_worker_monitor.py     |   24 +-
 distributed/bokeh/utils.py                         |    6 +
 distributed/bokeh/worker-table.py                  |    1 +
 distributed/bokeh/worker.py                        |  606 ++++++
 distributed/bokeh/worker_monitor.py                |    9 +-
 distributed/bokeh/workers/main.py                  |    9 +-
 distributed/channels.py                            |  300 +++
 distributed/cli/dask_scheduler.py                  |   54 +-
 distributed/cli/dask_ssh.py                        |    7 +-
 distributed/cli/dask_worker.py                     |  160 +-
 distributed/cli/tests/test_cli_utils.py            |   51 +
 distributed/cli/tests/test_dask_scheduler.py       |  115 +-
 distributed/cli/tests/test_dask_worker.py          |  108 +
 distributed/cli/tests/test_dworker.py              |   38 -
 distributed/cli/utils.py                           |   36 +
 distributed/client.py                              |  887 +++++---
 distributed/collections.py                         |   22 +-
 distributed/comm/__init__.py                       |   35 +
 distributed/comm/addressing.py                     |  163 ++
 distributed/comm/core.py                           |  187 ++
 distributed/comm/inproc.py                         |  327 +++
 distributed/comm/registry.py                       |   72 +
 distributed/comm/tcp.py                            |  331 +++
 distributed/comm/tests/test_comms.py               |  802 +++++++
 distributed/comm/utils.py                          |   65 +
 distributed/comm/zmq.py                            |  301 +++
 distributed/comm/zmqimpl.py                        |  239 +++
 distributed/compatibility.py                       |  160 +-
 distributed/config.py                              |   62 +-
 distributed/config.yaml                            |    9 +
 distributed/core.py                                |  699 +++---
 distributed/counter.py                             |   67 +
 distributed/deploy/__init__.py                     |    2 +
 distributed/deploy/adaptive.py                     |   43 +-
 distributed/deploy/local.py                        |  127 +-
 distributed/deploy/ssh.py                          |  115 +-
 distributed/deploy/tests/test_adaptive.py          |   33 +-
 distributed/deploy/tests/test_local.py             |  111 +-
 distributed/deploy/tests/test_ssh.py               |    6 +-
 distributed/deploy/utils_test.py                   |    3 +-
 distributed/diagnostics/eventstream.py             |   18 +-
 distributed/diagnostics/plugin.py                  |    8 +
 distributed/diagnostics/progress.py                |   13 +-
 distributed/diagnostics/progress_stream.py         |  109 +-
 distributed/diagnostics/progressbar.py             |   63 +-
 distributed/diagnostics/scheduler.py               |   14 +-
 distributed/diagnostics/tests/test_eventstream.py  |   42 +-
 distributed/diagnostics/tests/test_progress.py     |   12 +-
 .../diagnostics/tests/test_progress_stream.py      |   44 +-
 distributed/diagnostics/tests/test_progressbar.py  |   15 +-
 .../tests/test_scheduler_diagnostics.py            |   17 +-
 distributed/diagnostics/tests/test_widgets.py      |   15 +-
 distributed/hdfs.py                                |  195 +-
 distributed/http/core.py                           |   23 +-
 distributed/http/scheduler.py                      |   26 +-
 distributed/http/tests/test_scheduler_http.py      |   25 +-
 distributed/http/tests/test_worker_http.py         |   46 +-
 distributed/http/worker.py                         |   20 +-
 distributed/metrics.py                             |   85 +
 distributed/nanny.py                               |  400 ++--
 distributed/protocol/__init__.py                   |   21 +-
 distributed/protocol/compression.py                |   20 +-
 distributed/protocol/core.py                       |   43 +-
 distributed/protocol/h5py.py                       |    4 -
 distributed/protocol/keras.py                      |   39 +
 distributed/protocol/netcdf4.py                    |    2 -
 distributed/protocol/numpy.py                      |   47 +-
 distributed/protocol/pickle.py                     |   36 +-
 distributed/protocol/serialize.py                  |   86 +-
 distributed/protocol/tests/test_h5py.py            |   51 +-
 distributed/protocol/tests/test_keras.py           |   29 +
 distributed/protocol/tests/test_netcdf4.py         |    1 -
 distributed/protocol/tests/test_numpy.py           |   51 +-
 distributed/protocol/tests/test_pandas.py          |   57 +
 distributed/protocol/tests/test_protocol.py        |   22 +-
 distributed/protocol/tests/test_protocol_utils.py  |    3 +
 distributed/protocol/tests/test_serialize.py       |   41 +-
 distributed/protocol/utils.py                      |    6 +-
 distributed/publish.py                             |   46 +
 distributed/recreate_exceptions.py                 |  179 ++
 distributed/scheduler.py                           | 2238 +++++++++++---------
 distributed/sizeof.py                              |   77 +-
 distributed/stealing.py                            |  298 +++
 distributed/submit.py                              |    6 +-
 distributed/sync.py                                |   80 -
 distributed/system_monitor.py                      |   71 +
 distributed/tests/py3_test_client.py               |   39 +
 distributed/tests/test_as_completed.py             |   83 +
 distributed/tests/test_batched.py                  |  232 +-
 distributed/tests/test_channels.py                 |  204 ++
 distributed/tests/test_client.py                   | 1100 ++++++----
 distributed/tests/test_collections.py              |   40 +-
 distributed/tests/test_compatibility.py            |   28 +-
 distributed/tests/test_core.py                     |  457 +++-
 distributed/tests/test_counter.py                  |   43 +
 distributed/tests/test_hdfs.py                     |  294 +--
 distributed/tests/test_ipython.py                  |   14 +-
 distributed/tests/test_joblib.py                   |    5 +-
 distributed/tests/test_metrics.py                  |   21 +
 distributed/tests/test_nanny.py                    |   81 +-
 distributed/tests/test_publish.py                  |  165 ++
 distributed/tests/test_resources.py                |  219 ++
 distributed/tests/test_scheduler.py                | 1007 ++++-----
 distributed/tests/test_sizeof.py                   |   51 +-
 distributed/tests/test_steal.py                    |  436 +++-
 distributed/tests/test_stress.py                   |  103 +-
 distributed/tests/test_submit_cli.py               |   12 +-
 distributed/tests/test_submit_remote_client.py     |   22 +-
 distributed/tests/test_sync.py                     |   12 -
 distributed/tests/test_system_monitor.py           |   20 +
 distributed/tests/test_threadpoolexecutor.py       |    6 +-
 distributed/tests/test_utils.py                    |  117 +-
 distributed/tests/test_utils_comm.py               |   20 +-
 distributed/tests/test_utils_test.py               |   66 +-
 distributed/tests/test_worker.py                   |  602 +++---
 distributed/tests/test_worker_client.py            |   63 +-
 distributed/tests/test_worker_failure.py           |   93 +-
 distributed/threadpoolexecutor.py                  |    9 +-
 distributed/utils.py                               |  230 +-
 distributed/utils_comm.py                          |   56 +-
 distributed/utils_test.py                          |  716 +++++--
 distributed/versions.py                            |    1 +
 distributed/worker.py                              | 2157 ++++++++++++++-----
 distributed/worker_client.py                       |   61 +-
 docs/requirements.txt                              |    2 +
 docs/source/api.rst                                |   13 +
 docs/source/changelog.rst                          |  209 ++
 docs/source/channels.rst                           |  149 ++
 docs/source/communications.rst                     |   98 +
 docs/source/conf.py                                |    8 +
 docs/source/ec2.rst                                |    4 +-
 docs/source/faq.rst                                |   45 +-
 docs/source/foundations.rst                        |   92 +-
 docs/source/images/task-state.dot                  |    6 +-
 docs/source/images/task-state.svg                  |  150 +-
 docs/source/images/worker-dep-state.dot            |    8 +
 docs/source/images/worker-dep-state.svg            |   38 +
 docs/source/images/worker-task-state.dot           |   13 +
 docs/source/images/worker-task-state.svg           |   78 +
 docs/source/index.rst                              |    7 +-
 docs/source/limitations.rst                        |   46 +
 docs/source/protocol.rst                           |   84 +-
 docs/source/resources.rst                          |   60 +
 docs/source/scheduling-policies.rst                |    7 +-
 docs/source/scheduling-state.rst                   |   93 +-
 docs/source/setup.rst                              |   29 +
 docs/source/task-launch.rst                        |   14 +-
 docs/source/web.rst                                |   39 +-
 docs/source/worker.rst                             |   57 +-
 release-notes.md                                   |  132 --
 requirements.txt                                   |   10 +-
 setup.cfg                                          |   49 +
 setup.py                                           |   30 +-
 versioneer.py                                      | 1817 ++++++++++++++++
 193 files changed, 19459 insertions(+), 6082 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index 5cb3e10..a5d7c4c 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,80 +1,94 @@
 language: python
 sudo: false
+dist: trusty
 services:
   - docker
 
 matrix:
   fast_finish: true
-  include:
-    - python: "2.7"
-    - python: "3.4"
-    - python: "3.5"
-      env: HDFS=false
-    - python: "2.7"
-      env: HDFS=true
-      sudo: true
-      dist: trusty
-    - python: "3.5"
-      env: HDFS=true
-      sudo: true
-      dist: trusty
+
+env:
+  matrix:
+    - PYTHON=2.7 PACKAGES="blosc futures faulthandler"
+    - PYTHON=3.4 COVERAGE=true DASK_EXPERIMENTAL_ZMQ=1
+    - PYTHON=3.5 CRICK=true
+    - PYTHON=3.6 PACKAGES=blosc
+    - HDFS=true PYTHON=2.7
+    - HDFS=true PYTHON=3.5 PACKAGES=blosc
+
+addons:
+  hosts:
+    # Need to make the container's hostname resolvable, since the HDFS
+    # client will contact it back.
+    - hdfs-container
 
 before_install:
   - |
     if [[ $HDFS == true ]]; then
-        if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then
-          export DOCKER_ENV=/opt/conda;
-        else
-          export DOCKER_ENV=/opt/conda/envs/py3;
-        fi
-        echo $DOCKER_ENV
-        pwd
         pushd continuous_integration
-        docker build -t distributed-hdfs .
+        docker build -t distributed-hdfs-test .
+        # Run HDFS
+        ./run-hdfs.sh || exit 1
         popd
-        docker run -d -p 8020:8020 -p 50070:50070 -v $(pwd):/distributed distributed-hdfs
-        export CONTAINER_ID=$(docker ps -l -q)
-        sleep 60  # Wait for namenode and datanode
     fi;
 
 install:
+  # Note we disable progress bars to make Travis log loading much faster
+
   # Install conda
   - wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh
   - bash miniconda.sh -b -p $HOME/miniconda
   - export PATH="$HOME/miniconda/bin:$PATH"
   - conda config --set always_yes yes --set changeps1 no
-  - conda update conda
+  - conda update -q conda
 
   # Install dependencies
-  - conda create -n test-environment python=$TRAVIS_PYTHON_VERSION
+  - conda create -q -n test-environment python=$PYTHON
   - source activate test-environment
-  - conda install pytest coverage tornado toolz dill futures dask ipywidgets psutil bokeh requests joblib mock ipykernel jupyter_client h5py netcdf4
-  - pip install git+https://github.com/dask/dask.git --upgrade
-  - pip install git+https://github.com/joblib/joblib.git --upgrade
-  - pip install git+https://github.com/dask/s3fs.git --upgrade
-
-  # Install distributed
-  - python setup.py install
-
+  - conda install -q pytest pytest-timeout pytest-faulthandler coverage tornado toolz dill dask ipywidgets psutil bokeh requests joblib mock ipykernel jupyter_client h5py netcdf4 lz4 paramiko tblib click flake8 $PACKAGES -c conda-forge
   - |
     if [[ $HDFS == true ]]; then
-        pwd
-        docker exec -it $CONTAINER_ID $DOCKER_ENV/bin/python setup.py install
+        conda install -q libxml2 krb5 boost
+        conda install -q -c conda-forge libhdfs3 libgsasl libntlm
+        pip install -q git+https://github.com/dask/hdfs3 --upgrade
+    fi;
+  - pip install -q git+https://github.com/dask/dask.git --upgrade
+  - pip install -q git+https://github.com/joblib/joblib.git --upgrade
+  - pip install -q git+https://github.com/dask/s3fs.git --upgrade
+  - pip install -q git+https://github.com/dask/zict.git --upgrade
+  - pip install -q sortedcollections msgpack-python
+  - pip install -q keras --upgrade --no-deps
+  - |
+    if [[ $CRICK == true ]]; then
+        conda install -q cython
+        pip install git+https://github.com/jcrist/crick.git
     fi;
 
+  # Install distributed
+  - pip install --no-deps -e .
+
 script:
+    - export PYTEST_OPTIONS="--verbose -r s --timeout-method=thread --timeout=300 --runslow --durations=20"
     - |
       if [[ $HDFS == true ]]; then
-        pwd
-        docker exec -it $CONTAINER_ID $DOCKER_ENV/bin/py.test distributed/tests/test_hdfs.py --verbose
-      elif [[ $TRAVIS_PYTHON_VERSION == '3.4' ]]; then
-        coverage run $(which py.test) distributed -m "not avoid_travis" --verbose;
+        py.test distributed/tests/test_hdfs.py $PYTEST_OPTIONS
+        if [ $? -ne 0 ]; then
+            # Diagnose test error
+            echo "--"
+            echo "-- HDFS namenode log follows"
+            echo "--"
+            docker exec -it $(docker ps -q) bash -c "tail -n50 /usr/local/hadoop/logs/hadoop-root-namenode-hdfs-container.log"
+            (exit 1)
+        fi
+      elif [[ $COVERAGE == true ]]; then
+        coverage run $(which py.test) distributed -m "not avoid_travis" $PYTEST_OPTIONS;
       else
-        py.test -m "not avoid_travis" distributed --verbose;
+        py.test -m "not avoid_travis" distributed $PYTEST_OPTIONS;
       fi;
+    - flake8 distributed/*.py distributed/{bokeh,protocol,deploy}/*.py # no tests yet
 
 after_success:
-    - if [[ $TRAVIS_PYTHON_VERSION == '3.4' ]]; then coverage report; pip install coveralls ; coveralls ; fi
+    - if [[ $COVERAGE == true ]]; then coverage report; pip install -q coveralls ; coveralls ; fi
 
 notifications:
   email: false
diff --git a/MANIFEST.in b/MANIFEST.in
index 0b8c1b2..3338289 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -9,5 +9,8 @@ include LICENSE.txt
 include MANIFEST.in
 include requirements.txt
 include distributed/config.yaml
+include distributed/tests/mytestegg-1.0.0-py3.4.egg
 
 prune docs/_build
+include versioneer.py
+include distributed/_version.py
diff --git a/appveyor.yml b/appveyor.yml
new file mode 100644
index 0000000..76a5295
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,40 @@
+# Environment loosely based on https://github.com/conda/conda/blob/master/appveyor.yml
+
+environment:
+
+  global:
+    # SDK v7.0 MSVC Express 2008's SetEnv.cmd script will fail if the
+    # /E:ON and /V:ON options are not enabled in the batch script intepreter
+    # See: http://stackoverflow.com/a/13751649/163740
+    CMD_IN_ENV: "cmd /E:ON /V:ON /C .\\continuous_integration\\run_with_env.cmd"
+    JUNIT_OUT: junit-results.xml
+
+  matrix:
+    # Since appveyor is quite slow, we only use a single configuration
+    - PYTHON: "3.5"
+      ARCH: "64"
+      CONDA_ENV: testenv
+
+init:
+  # Use AppVeyor's provided Miniconda: https://www.appveyor.com/docs/installed-software#python
+  - if "%ARCH%" == "64" set MINICONDA=C:\Miniconda35-x64
+  - if "%ARCH%" == "32" set MINICONDA=C:\Miniconda35
+  - set PATH=%MINICONDA%;%MINICONDA%/Scripts;%MINICONDA%/Library/bin;%PATH%
+
+install:
+  # Update to a known good conda
+  # (to workaround http://help.appveyor.com/discussions/problems/4910)
+  - conda install -q -y conda=4.2.9
+  - continuous_integration\\setup_conda_environment.cmd
+
+build_script:
+  - continuous_integration\\build.cmd
+
+test_script:
+  # %CMD_IN_ENV% is needed for distutils/setuptools-based tests
+  # on certain build configurations.
+  - "%CMD_IN_ENV% continuous_integration\\run_tests.cmd"
+
+on_finish:
+  - ps: $wc = New-Object 'System.Net.WebClient'
+  - ps: $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path "$($env:JUNIT_OUT)"))
diff --git a/continuous_integration/Dockerfile b/continuous_integration/Dockerfile
index 6d0bef4..53b548a 100644
--- a/continuous_integration/Dockerfile
+++ b/continuous_integration/Dockerfile
@@ -1,43 +1,27 @@
-FROM ubuntu:trusty
-
-# conda
-RUN apt-get update && apt-get install -y -q curl bzip2 git
-RUN curl https://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -o /tmp/miniconda.sh
-RUN /bin/bash /tmp/miniconda.sh -b -p /opt/conda
-RUN rm /tmp/miniconda.sh
-ENV PATH /opt/conda/bin:$PATH
-
-# hdfs3 - python 2
-RUN apt-get install -y -q protobuf-compiler libprotobuf-dev
-ENV LIBHDFS3_CONF /etc/hadoop/conf/hdfs-site.xml
-RUN /opt/conda/bin/conda install -y -q libxml2 krb5 boost ipython pytest pip pandas cython mock
-RUN /opt/conda/bin/conda install -y -q libhdfs3 libgsasl libntlm -c dask
-RUN /opt/conda/bin/pip install git+https://github.com/dask/hdfs3 --upgrade
-RUN /opt/conda/bin/pip install git+https://github.com/dask/s3fs --upgrade
-RUN /opt/conda/bin/pip install git+https://github.com/blaze/dask --upgrade
-# hdfs3 - python 3
-RUN conda create -n py3 -y python=3
-RUN conda install -n py3 -y -q libxml2 krb5 boost ipython pytest pip pandas cython mock
-RUN conda install -n py3 libhdfs3 libgsasl libntlm -c dask
-RUN /opt/conda/envs/py3/bin/pip install git+https://github.com/dask/hdfs3 --upgrade
-RUN /opt/conda/envs/py3/bin/pip install git+https://github.com/dask/s3fs --upgrade
-RUN /opt/conda/envs/py3/bin/pip install git+https://github.com/blaze/dask --upgrade
-
-# Cloudera repositories
-RUN curl -s http://archive.cloudera.com/cdh5/ubuntu/trusty/amd64/cdh/archive.key | apt-key add -
-RUN echo 'deb [arch=amd64] http://archive.cloudera.com/cdh5/ubuntu/trusty/amd64/cdh trusty-cdh5 contrib' > /etc/apt/sources.list.d/cloudera.list
-RUN echo 'deb-src http://archive.cloudera.com/cdh5/ubuntu/trusty/amd64/cdh trusty-cdh5 contrib' >> /etc/apt/sources.list.d/cloudera.list
-ADD docker-files/cloudera.pref /etc/apt/preferences.d/cloudera.pref
-
-# Install CDH5 in a single node: Pseudo Distributed
-ADD docker-files/cdh5-install.sh /tmp/cdh5-install.sh
-RUN bash /tmp/cdh5-install.sh
+FROM sequenceiq/hadoop-docker:2.7.1
+
+ENV HADOOP_PREFIX=/usr/local/hadoop
+ENV HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop
+
+ADD docker-files/start.sh /tmp/start.sh
+ADD docker-files/*.xml $HADOOP_CONF_DIR/
+
+VOLUME /host
 
 EXPOSE 8020
 EXPOSE 50070
 
-VOLUME /distributed
-WORKDIR /distributed
-
-ADD docker-files/start.sh /tmp/start.sh
 CMD ["bash", "/tmp/start.sh"]
+
+# Build:
+#
+# $ docker build -t distributed-hdfs-test .
+
+# Configure host to access container HDFS:
+#
+# $ sudo bash -c 'echo "127.0.0.1 hdfs-container" >> /etc/hosts'
+
+# Execution:
+#
+# $ rm -f hdfs-initialized
+# $ docker run -d -h hdfs-container -v$(pwd):/host -p8020:8020 -p 50070:50070 distributed-hdfs-test
diff --git a/continuous_integration/README.md b/continuous_integration/README.md
index a3c1e4d..2b73b5b 100644
--- a/continuous_integration/README.md
+++ b/continuous_integration/README.md
@@ -10,7 +10,7 @@ Build the container:
 docker build -t distributed-hdfs .
 ```
 
-Start the container and wait for the it to be ready:
+Start the container and wait for it to be ready:
 
 ```bash
 docker run -it -p 8020:8020 -p 50070:50070 -v $(pwd):/distributed distributed-hdfs
diff --git a/continuous_integration/build.cmd b/continuous_integration/build.cmd
new file mode 100644
index 0000000..c29c3ea
--- /dev/null
+++ b/continuous_integration/build.cmd
@@ -0,0 +1,6 @@
+call activate %CONDA_ENV%
+
+ at echo on
+
+ at rem Install Distributed
+%PIP_INSTALL% --no-deps -e .
diff --git a/continuous_integration/docker-files/cdh5-install.sh b/continuous_integration/docker-files/cdh5-install.sh
deleted file mode 100644
index 141aa0c..0000000
--- a/continuous_integration/docker-files/cdh5-install.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-# Install CDH5 in a single node: Pseudo Distributed
-# Docs: http://www.cloudera.com/content/www/en-us/documentation/enterprise/latest/topics/cdh_qs_yarn_pseudo.html
-
-apt-get update && apt-get install -y -q openjdk-7-jre-headless hadoop-conf-pseudo
-
-# Step 1: Format the NameNode
-sudo -u hdfs hdfs namenode -format -force
-
-# Step 2: Start HDFS
-for x in `cd /etc/init.d ; ls hadoop-hdfs-*` ; do sudo service $x start ; done
-
-# Step 3: Create the directories needed for Hadoop processes
-bash /usr/lib/hadoop/libexec/init-hdfs.sh
-
-# Step 6: Create User Directories
-# sudo -u hdfs hdfs dfs -mkdir -p /user/hadoop
-# sudo -u hdfs hdfs dfs -chown hadoop /user/hadoop
diff --git a/continuous_integration/docker-files/cloudera.pref b/continuous_integration/docker-files/cloudera.pref
deleted file mode 100644
index e5a1eae..0000000
--- a/continuous_integration/docker-files/cloudera.pref
+++ /dev/null
@@ -1,3 +0,0 @@
-Package: *
-Pin: release o=Cloudera, l=Cloudera
-Pin-Priority: 501
diff --git a/continuous_integration/docker-files/core-site.xml b/continuous_integration/docker-files/core-site.xml
new file mode 100644
index 0000000..14a30e6
--- /dev/null
+++ b/continuous_integration/docker-files/core-site.xml
@@ -0,0 +1,7 @@
+<configuration>
+    <property>
+        <name>fs.defaultFS</name>
+        <value>hdfs://0.0.0.0:8020</value>
+    </property>
+</configuration>
+
diff --git a/continuous_integration/docker-files/hdfs-site.xml b/continuous_integration/docker-files/hdfs-site.xml
new file mode 100644
index 0000000..161c2ca
--- /dev/null
+++ b/continuous_integration/docker-files/hdfs-site.xml
@@ -0,0 +1,51 @@
+<configuration>
+    <property>
+        <name>dfs.replication</name>
+        <value>1</value>
+    </property>
+
+<!--    <property>
+        <name>dfs.namenode.rpc-address</name>
+        <value>0.0.0.0:8020</value>
+        <description>
+            RPC address that handles all clients requests. In the case of HA/Federation where multiple namenodes exist,
+            the name service id is added to the name e.g. dfs.namenode.rpc-address.ns1
+            dfs.namenode.rpc-address.EXAMPLENAMESERVICE
+            The value of this property will take the form of nn-host1:rpc-port.
+        </description>
+    </property>-->
+
+<!--    <property>
+        <name>dfs.namenode.rpc-bind-host</name>
+        <value>0.0.0.0</value>
+        <description>
+            The actual address the RPC server will bind to. If this optional address is
+            set, it overrides only the hostname portion of dfs.namenode.rpc-address.
+            It can also be specified per name node or name service for HA/Federation.
+            This is useful for making the name node listen on all interfaces by
+            setting it to 0.0.0.0.
+        </description>
+    </property>-->
+
+    <property>
+        <name>dfs.namenode.safemode.extension</name>
+        <value>10</value>
+        <description>
+            Determines extension of safe mode in milliseconds
+            after the threshold level is reached.
+        </description>
+    </property>
+
+    <property>
+        <name>dfs.permissions</name>
+        <value>false</value>
+        <description>
+            If "true", enable permission checking in HDFS.
+            If "false", permission checking is turned off,
+            but all other behavior is unchanged.
+            Switching from one parameter value to the other does not change the mode,
+            owner or group of files or directories.
+        </description>
+    </property>
+
+</configuration>
diff --git a/continuous_integration/docker-files/libhdfs-build.sh b/continuous_integration/docker-files/libhdfs-build.sh
deleted file mode 100644
index 6de7a8a..0000000
--- a/continuous_integration/docker-files/libhdfs-build.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-cd /opt/libhdfs3
-
-mkdir build
-pushd build
-../bootstrap --prefix=/usr/local
-make
-make install
-popd
diff --git a/continuous_integration/docker-files/start.sh b/continuous_integration/docker-files/start.sh
old mode 100644
new mode 100755
index 5cd1a04..04140fd
--- a/continuous_integration/docker-files/start.sh
+++ b/continuous_integration/docker-files/start.sh
@@ -1,10 +1,22 @@
-#!/bin/bash
+#!bin/bash
 
-# Start HDFS
-sudo service hadoop-hdfs-namenode start
-sudo service hadoop-hdfs-datanode start
+set -e
 
-echo "Ready"
+export HADOOP_PREFIX=/usr/local/hadoop
+$HADOOP_PREFIX/etc/hadoop/hadoop-env.sh
 
-# Block
+service sshd start
+
+rm -f /tmp/*.pid
+$HADOOP_PREFIX/sbin/start-dfs.sh
+
+echo "--"
+echo "-- HDFS started!"
+echo "--"
+
+# Wait for nodes to be fully initialized
+sleep 5
+touch /host/hdfs-initialized
+
+# Stay alive
 sleep infinity
diff --git a/continuous_integration/run-hdfs.sh b/continuous_integration/run-hdfs.sh
new file mode 100755
index 0000000..90e371c
--- /dev/null
+++ b/continuous_integration/run-hdfs.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+HOSTDIR=$(pwd)
+INIT_MARKER=$HOSTDIR/hdfs-initialized
+
+# Remove initialization marker
+rm -f $INIT_MARKER
+
+CONTAINER_ID=$(docker run -d -h hdfs-container -v$HOSTDIR:/host -p8020:8020 -p 50070:50070 distributed-hdfs-test)
+
+if [ $? -ne 0 ]; then
+    echo "Failed starting HDFS container"
+    exit 1
+fi
+echo "Started HDFS container: $CONTAINER_ID"
+
+# CONTAINER_ID=$1
+CHECK_RUNNING="docker top $CONTAINER_ID"
+
+# Wait for initialization
+while [[ $($CHECK_RUNNING) ]] && [[ ! -f $INIT_MARKER ]]
+do
+    sleep 1
+done
+
+# Error out if the container failed starting
+if [[ ! $($CHECK_RUNNING) ]]; then
+    echo "HDFS startup failed! Logs follow"
+    echo "-------------------------------------------------"
+    docker logs $CONTAINER_ID
+    echo "-------------------------------------------------"
+    exit 1
+fi
diff --git a/continuous_integration/run_tests.cmd b/continuous_integration/run_tests.cmd
new file mode 100644
index 0000000..f5ba568
--- /dev/null
+++ b/continuous_integration/run_tests.cmd
@@ -0,0 +1,9 @@
+call activate %CONDA_ENV%
+
+ at echo on
+
+set PYTHONFAULTHANDLER=1
+
+set PYTEST=py.test --tb=native --timeout=120 -r s
+
+%PYTEST% -v -m "not avoid_travis" --junit-xml="%JUNIT_OUT%" distributed
diff --git a/continuous_integration/run_with_env.cmd b/continuous_integration/run_with_env.cmd
new file mode 100644
index 0000000..3a56e3e
--- /dev/null
+++ b/continuous_integration/run_with_env.cmd
@@ -0,0 +1,90 @@
+:: From https://github.com/ogrisel/python-appveyor-demo
+::
+:: To build extensions for 64 bit Python 3, we need to configure environment
+:: variables to use the MSVC 2010 C++ compilers from GRMSDKX_EN_DVD.iso of:
+:: MS Windows SDK for Windows 7 and .NET Framework 4 (SDK v7.1)
+::
+:: To build extensions for 64 bit Python 2, we need to configure environment
+:: variables to use the MSVC 2008 C++ compilers from GRMSDKX_EN_DVD.iso of:
+:: MS Windows SDK for Windows 7 and .NET Framework 3.5 (SDK v7.0)
+::
+:: 32 bit builds, and 64-bit builds for 3.5 and beyond, do not require specific
+:: environment configurations.
+::
+:: Note: this script needs to be run with the /E:ON and /V:ON flags for the
+:: cmd interpreter, at least for (SDK v7.0)
+::
+:: More details at:
+:: https://github.com/cython/cython/wiki/64BitCythonExtensionsOnWindows
+:: http://stackoverflow.com/a/13751649/163740
+::
+:: Author: Olivier Grisel
+:: License: CC0 1.0 Universal: http://creativecommons.org/publicdomain/zero/1.0/
+::
+:: Notes about batch files for Python people:
+::
+:: Quotes in values are literally part of the values:
+::      SET FOO="bar"
+:: FOO is now five characters long: " b a r "
+:: If you don't want quotes, don't include them on the right-hand side.
+::
+:: The CALL lines at the end of this file look redundant, but if you move them
+:: outside of the IF clauses, they do not run properly in the SET_SDK_64==Y
+:: case, I don't know why.
+ at ECHO OFF
+
+SET COMMAND_TO_RUN=%*
+SET WIN_SDK_ROOT=C:\Program Files\Microsoft SDKs\Windows
+SET WIN_WDK=c:\Program Files (x86)\Windows Kits\10\Include\wdf
+
+:: Extract the major and minor versions, and allow for the minor version to be
+:: more than 9.  This requires the version number to have two dots in it.
+SET MAJOR_PYTHON_VERSION=%PYTHON:~0,1%
+IF "%PYTHON:~3,1%" == "." (
+    SET MINOR_PYTHON_VERSION=%PYTHON:~2,1%
+) ELSE (
+    SET MINOR_PYTHON_VERSION=%PYTHON:~2,2%
+)
+
+:: Based on the Python version, determine what SDK version to use, and whether
+:: to set the SDK for 64-bit.
+IF %MAJOR_PYTHON_VERSION% == 2 (
+    SET WINDOWS_SDK_VERSION="v7.0"
+    SET SET_SDK_64=Y
+) ELSE (
+    IF %MAJOR_PYTHON_VERSION% == 3 (
+        SET WINDOWS_SDK_VERSION="v7.1"
+        IF %MINOR_PYTHON_VERSION% LEQ 4 (
+            SET SET_SDK_64=Y
+        ) ELSE (
+            SET SET_SDK_64=N
+            IF EXIST "%WIN_WDK%" (
+                :: See: https://connect.microsoft.com/VisualStudio/feedback/details/1610302/
+                REN "%WIN_WDK%" 0wdf
+            )
+        )
+    ) ELSE (
+        ECHO Unsupported Python version: "%MAJOR_PYTHON_VERSION%"
+        EXIT 1
+    )
+)
+
+IF %ARCH% == 64 (
+    IF %SET_SDK_64% == Y (
+        ECHO Configuring Windows SDK %WINDOWS_SDK_VERSION% for Python %MAJOR_PYTHON_VERSION% on a 64 bit architecture
+        SET DISTUTILS_USE_SDK=1
+        SET MSSdk=1
+        "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Setup\WindowsSdkVer.exe" -q -version:%WINDOWS_SDK_VERSION%
+        "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Bin\SetEnv.cmd" /x64 /release
+        ECHO Executing: %COMMAND_TO_RUN%
+        call %COMMAND_TO_RUN% || EXIT 1
+    ) ELSE (
+        ECHO Using default MSVC build environment for 64 bit architecture
+        ECHO Executing: %COMMAND_TO_RUN%
+        call %COMMAND_TO_RUN% || EXIT 1
+    )
+) ELSE (
+    ECHO Using default MSVC build environment for 32 bit architecture
+    ECHO Executing: %COMMAND_TO_RUN%
+    call %COMMAND_TO_RUN% || EXIT 1
+)
diff --git a/continuous_integration/setup_conda_environment.cmd b/continuous_integration/setup_conda_environment.cmd
new file mode 100644
index 0000000..e9820f3
--- /dev/null
+++ b/continuous_integration/setup_conda_environment.cmd
@@ -0,0 +1,33 @@
+ at rem The cmd /C hack circumvents a regression where conda installs a conda.bat
+ at rem script in non-root environments.
+set CONDA=cmd /C conda
+set CONDA_INSTALL=%CONDA% install -q -y
+set PIP_INSTALL=pip install -q
+
+ at echo on
+
+ at rem Deactivate any environment
+call deactivate
+ at rem Display root environment (for debugging)
+%CONDA% list
+ at rem Clean up any left-over from a previous build
+%CONDA% remove --all -q -y -n %CONDA_ENV%
+
+ at rem Create test environment
+ at rem (note: no cytoolz as it seems to prevent faulthandler tracebacks on crash)
+%CONDA% create -n %CONDA_ENV% -q -y python=%PYTHON% pytest toolz dill futures dask ipywidgets psutil bokeh requests joblib mock ipykernel jupyter_client tblib msgpack-python cloudpickle click zict lz4 -c conda-forge
+
+call activate %CONDA_ENV%
+
+%CONDA% uninstall -q -y --force dask joblib zict
+%PIP_INSTALL% git+https://github.com/dask/dask --upgrade
+%PIP_INSTALL% git+https://github.com/joblib/joblib.git --upgrade
+%PIP_INSTALL% git+https://github.com/dask/zict --upgrade
+
+%PIP_INSTALL% pytest-timeout pytest-faulthandler sortedcollections
+
+ at rem Display final environment (for reproducing)
+%CONDA% list
+%CONDA% list --explicit
+pip list
+python -m site
diff --git a/dev-requirements.txt b/dev-requirements.txt
new file mode 100644
index 0000000..295fed1
--- /dev/null
+++ b/dev-requirements.txt
@@ -0,0 +1,11 @@
+joblib >= 0.10.2
+mock >= 2.0.0
+pandas >= 0.19.2
+numpy >= 1.11.0
+bokeh >= 0.12.3
+requests >= 2.12.4
+pyzmq >= 16.0.2
+ipython >= 5.0.0
+jupyter_client >= 4.4.0
+ipykernel >= 4.5.2
+pytest >= 3.0.5
diff --git a/distributed/__init__.py b/distributed/__init__.py
index 8edad9c..49bd8fa 100644
--- a/distributed/__init__.py
+++ b/distributed/__init__.py
@@ -1,7 +1,7 @@
 from __future__ import print_function, division, absolute_import
 
 from .config import config
-from .core import connect, read, write, rpc
+from .core import connect, rpc
 from .deploy import LocalCluster
 from .diagnostics import progress
 from .client import (Client, Executor, CompatibleExecutor, wait, as_completed,
@@ -10,12 +10,15 @@ from .nanny import Nanny
 from .scheduler import Scheduler
 from .utils import sync
 from .worker import Worker
-from .worker_client import local_client
+from .worker_client import local_client, worker_client
 
 try:
     from .collections import futures_to_collection
 except:
     pass
 
-
-__version__ = '1.14.3'
+from ._version import get_versions
+versions = get_versions()
+__version__ = versions['version']
+__git_revision__ = versions['full-revisionid']
+del get_versions, versions
diff --git a/distributed/_ipython_utils.py b/distributed/_ipython_utils.py
index 112e472..042258c 100644
--- a/distributed/_ipython_utils.py
+++ b/distributed/_ipython_utils.py
@@ -84,12 +84,14 @@ def register_worker_magic(connection_info, magic_name='worker'):
     kc = BlockingKernelClient(**connection_info)
     kc.session.key = key
     kc.start_channels()
+
     def remote(line, cell=None):
         """Run the current cell on a remote IPython kernel"""
         if cell is None:
             # both line and cell magic
             cell = line
         run_cell_remote(ip, kc, cell)
+
     remote.client = kc # preserve reference on kc, largely for mocking
     ip.register_magic_function(remote, magic_kind='line', magic_name=magic_name)
     ip.register_magic_function(remote, magic_kind='cell', magic_name=magic_name)
@@ -137,6 +139,7 @@ def remote_magic(line, cell=None):
     # actually run the code
     run_cell_remote(ip, kc, cell)
 
+
 # cache clients for re-use in remote magic
 remote_magic._clients = {}
 
@@ -170,13 +173,14 @@ def connect_qtconsole(connection_info, name=None, extra_args=None):
     if extra_args:
         cmd.extend(extra_args)
     Popen(cmd)
+
+    @atexit.register
     def _cleanup_connection_file():
         """Cleanup our connection file when we exit."""
         try:
             os.remove(path)
         except OSError:
             pass
-    atexit.register(_cleanup_connection_file)
 
 
 def start_ipython(ip=None, ns=None, log=None):
@@ -221,6 +225,7 @@ def start_ipython(ip=None, ns=None, log=None):
     # initialization happens in the thread to avoid threading problems
     # with the sqlite history
     evt = Event()
+
     def _start():
         app.initialize([])
         app.kernel.pre_handler_hook = noop
@@ -243,4 +248,3 @@ def start_ipython(ip=None, ns=None, log=None):
     IOLoop.clear_instance()
     save_inst.install()
     return app
-
diff --git a/distributed/_version.py b/distributed/_version.py
new file mode 100644
index 0000000..1482ff1
--- /dev/null
+++ b/distributed/_version.py
@@ -0,0 +1,520 @@
+
+# This file helps to compute a version number in source trees obtained from
+# git-archive tarball (such as those provided by githubs download-from-tag
+# feature). Distribution tarballs (built by setup.py sdist) and build
+# directories (produced by setup.py build) will contain a much shorter file
+# that just contains the computed version number.
+
+# This file is released into the public domain. Generated by
+# versioneer-0.17 (https://github.com/warner/python-versioneer)
+
+"""Git implementation of _version.py."""
+
+import errno
+import os
+import re
+import subprocess
+import sys
+
+
+def get_keywords():
+    """Get the keywords needed to look up the version information."""
+    # these strings will be replaced by git during git-archive.
+    # setup.py/versioneer.py will grep for the variable names, so they must
+    # each be defined on a line of their own. _version.py will just call
+    # get_keywords().
+    git_refnames = "$Format:%d$"
+    git_full = "$Format:%H$"
+    git_date = "$Format:%ci$"
+    keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
+    return keywords
+
+
+class VersioneerConfig:
+    """Container for Versioneer configuration parameters."""
+
+
+def get_config():
+    """Create, populate and return the VersioneerConfig() object."""
+    # these strings are filled in when 'setup.py versioneer' creates
+    # _version.py
+    cfg = VersioneerConfig()
+    cfg.VCS = "git"
+    cfg.style = "pep440"
+    cfg.tag_prefix = ""
+    cfg.parentdir_prefix = "distributed-"
+    cfg.versionfile_source = "distributed/_version.py"
+    cfg.verbose = False
+    return cfg
+
+
+class NotThisMethod(Exception):
+    """Exception raised if a method is not valid for the current scenario."""
+
... 34108 lines suppressed ...

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/python-modules/packages/dask.distributed.git



More information about the Python-modules-commits mailing list