[Python-modules-commits] [python-eventlet] 01/19: Import python-eventlet_0.20.0.orig.tar.xz

Thomas Goirand zigo at moszumanska.debian.org
Tue Nov 14 21:39:21 UTC 2017


This is an automated email from the git hooks/post-receive script.

zigo pushed a commit to branch master
in repository python-eventlet.

commit b717fde61d936941885f6d11908d50802b22cefe
Author: Thomas Goirand <zigo at debian.org>
Date:   Tue Nov 14 22:12:35 2017 +0100

    Import python-eventlet_0.20.0.orig.tar.xz
---
 .travis.yml                                        |    9 +-
 AUTHORS                                            |    7 +
 NEWS                                               |   21 +
 bin/pull-dnspython                                 |   13 +
 bin/release                                        |    1 +
 doc/modules.rst                                    |    1 +
 doc/modules/dagpool.rst                            |  493 +++++
 doc/real_index.html                                |    2 +-
 eventlet/__init__.py                               |    2 +-
 eventlet/convenience.py                            |    3 +
 eventlet/dagpool.py                                |  602 ++++++
 eventlet/green/http/__init__.py                    |  189 ++
 eventlet/green/http/client.py                      | 1446 ++++++++++++-
 eventlet/green/http/cookiejar.py                   | 2165 +++++++++++++++++++-
 eventlet/green/http/cookies.py                     |  694 ++++++-
 eventlet/green/http/server.py                      | 1275 +++++++++++-
 eventlet/green/httplib.py                          |    5 +-
 eventlet/green/select.py                           |    4 +-
 eventlet/green/socket.py                           |   32 +-
 eventlet/green/ssl.py                              |   53 +-
 eventlet/green/subprocess.py                       |   21 +-
 eventlet/green/urllib/request.py                   |    8 +-
 eventlet/green/zmq.py                              |   90 +
 eventlet/greenio/base.py                           |   25 +-
 eventlet/greenio/py3.py                            |    4 +-
 eventlet/hubs/hub.py                               |    6 +-
 eventlet/patcher.py                                |   19 +-
 eventlet/support/dns/__init__.py                   |   54 +
 eventlet/support/dns/_compat.py                    |   47 +
 eventlet/support/dns/dnssec.py                     |  457 +++++
 eventlet/support/dns/e164.py                       |   85 +
 eventlet/support/dns/edns.py                       |  150 ++
 eventlet/support/dns/entropy.py                    |  141 ++
 eventlet/support/dns/exception.py                  |  128 ++
 eventlet/support/dns/flags.py                      |  112 +
 eventlet/support/dns/grange.py                     |   69 +
 eventlet/support/dns/hash.py                       |   31 +
 eventlet/support/dns/inet.py                       |  111 +
 eventlet/support/dns/ipv4.py                       |   59 +
 eventlet/support/dns/ipv6.py                       |  172 ++
 eventlet/support/dns/message.py                    | 1152 +++++++++++
 eventlet/support/dns/name.py                       |  886 ++++++++
 eventlet/support/dns/namedict.py                   |  104 +
 eventlet/support/dns/node.py                       |  178 ++
 eventlet/support/dns/opcode.py                     |  107 +
 eventlet/support/dns/query.py                      |  539 +++++
 eventlet/support/dns/rcode.py                      |  125 ++
 eventlet/support/dns/rdata.py                      |  458 +++++
 eventlet/support/dns/rdataclass.py                 |  118 ++
 eventlet/support/dns/rdataset.py                   |  338 +++
 eventlet/support/dns/rdatatype.py                  |  255 +++
 eventlet/support/dns/rdtypes/ANY/AFSDB.py          |   53 +
 eventlet/support/dns/rdtypes/ANY/AVC.py            |   23 +
 eventlet/support/dns/rdtypes/ANY/CAA.py            |   73 +
 eventlet/support/dns/rdtypes/ANY/CDNSKEY.py        |   25 +
 eventlet/support/dns/rdtypes/ANY/CDS.py            |   21 +
 eventlet/support/dns/rdtypes/ANY/CERT.py           |  121 ++
 eventlet/support/dns/rdtypes/ANY/CNAME.py          |   25 +
 eventlet/support/dns/rdtypes/ANY/CSYNC.py          |  124 ++
 eventlet/support/dns/rdtypes/ANY/DLV.py            |   21 +
 eventlet/support/dns/rdtypes/ANY/DNAME.py          |   24 +
 eventlet/support/dns/rdtypes/ANY/DNSKEY.py         |   25 +
 eventlet/support/dns/rdtypes/ANY/DS.py             |   21 +
 eventlet/support/dns/rdtypes/ANY/EUI48.py          |   29 +
 eventlet/support/dns/rdtypes/ANY/EUI64.py          |   29 +
 eventlet/support/dns/rdtypes/ANY/GPOS.py           |  160 ++
 eventlet/support/dns/rdtypes/ANY/HINFO.py          |   84 +
 eventlet/support/dns/rdtypes/ANY/HIP.py            |  113 +
 eventlet/support/dns/rdtypes/ANY/ISDN.py           |   97 +
 eventlet/support/dns/rdtypes/ANY/LOC.py            |  325 +++
 eventlet/support/dns/rdtypes/ANY/MX.py             |   21 +
 eventlet/support/dns/rdtypes/ANY/NS.py             |   21 +
 eventlet/support/dns/rdtypes/ANY/NSEC.py           |  126 ++
 eventlet/support/dns/rdtypes/ANY/NSEC3.py          |  191 ++
 eventlet/support/dns/rdtypes/ANY/NSEC3PARAM.py     |   88 +
 eventlet/support/dns/rdtypes/ANY/PTR.py            |   21 +
 eventlet/support/dns/rdtypes/ANY/RP.py             |   80 +
 eventlet/support/dns/rdtypes/ANY/RRSIG.py          |  156 ++
 eventlet/support/dns/rdtypes/ANY/RT.py             |   21 +
 eventlet/support/dns/rdtypes/ANY/SOA.py            |  114 ++
 eventlet/support/dns/rdtypes/ANY/SPF.py            |   23 +
 eventlet/support/dns/rdtypes/ANY/SSHFP.py          |   77 +
 eventlet/support/dns/rdtypes/ANY/TLSA.py           |   82 +
 eventlet/support/dns/rdtypes/ANY/TXT.py            |   21 +
 eventlet/support/dns/rdtypes/ANY/URI.py            |   80 +
 eventlet/support/dns/rdtypes/ANY/X25.py            |   64 +
 eventlet/support/dns/rdtypes/ANY/__init__.py       |   50 +
 eventlet/support/dns/rdtypes/IN/A.py               |   52 +
 eventlet/support/dns/rdtypes/IN/AAAA.py            |   53 +
 eventlet/support/dns/rdtypes/IN/APL.py             |  161 ++
 eventlet/support/dns/rdtypes/IN/DHCID.py           |   59 +
 eventlet/support/dns/rdtypes/IN/IPSECKEY.py        |  148 ++
 eventlet/support/dns/rdtypes/IN/KX.py              |   21 +
 eventlet/support/dns/rdtypes/IN/NAPTR.py           |  125 ++
 eventlet/support/dns/rdtypes/IN/NSAP.py            |   58 +
 eventlet/support/dns/rdtypes/IN/NSAP_PTR.py        |   21 +
 eventlet/support/dns/rdtypes/IN/PX.py              |   87 +
 eventlet/support/dns/rdtypes/IN/SRV.py             |   81 +
 eventlet/support/dns/rdtypes/IN/WKS.py             |  105 +
 eventlet/support/dns/rdtypes/IN/__init__.py        |   30 +
 eventlet/support/dns/rdtypes/__init__.py           |   24 +
 eventlet/support/dns/rdtypes/dnskeybase.py         |  136 ++
 eventlet/support/dns/rdtypes/dsbase.py             |   83 +
 eventlet/support/dns/rdtypes/euibase.py            |   71 +
 eventlet/support/dns/rdtypes/mxbase.py             |  101 +
 eventlet/support/dns/rdtypes/nsbase.py             |   81 +
 eventlet/support/dns/rdtypes/txtbase.py            |   90 +
 eventlet/support/dns/renderer.py                   |  329 +++
 eventlet/support/dns/resolver.py                   | 1407 +++++++++++++
 eventlet/support/dns/reversename.py                |   89 +
 eventlet/support/dns/rrset.py                      |  182 ++
 eventlet/support/dns/set.py                        |  259 +++
 eventlet/support/dns/tokenizer.py                  |  564 +++++
 eventlet/support/dns/tsig.py                       |  234 +++
 eventlet/support/dns/tsigkeyring.py                |   48 +
 eventlet/support/dns/ttl.py                        |   68 +
 eventlet/support/dns/update.py                     |  249 +++
 eventlet/support/dns/version.py                    |   34 +
 eventlet/support/dns/wiredata.py                   |  103 +
 eventlet/support/dns/zone.py                       | 1087 ++++++++++
 eventlet/support/greendns.py                       |   74 +-
 eventlet/tpool.py                                  |    2 +
 eventlet/websocket.py                              |   15 +-
 eventlet/wsgi.py                                   |   45 +-
 examples/websocket.html                            |   15 +-
 setup.py                                           |    1 +
 tests/__init__.py                                  |   21 +-
 tests/convenience_test.py                          |   19 +-
 tests/dagpool_test.py                              |  693 +++++++
 tests/green_http_test.py                           |   12 +
 tests/greendns_test.py                             |  147 +-
 tests/greenio_test.py                              |    2 +-
 .../green_http_doesnt_change_original_module.py    |    8 +
 .../green_httplib_doesnt_change_original_module.py |    8 +
 tests/isolated/greendns_from_address_203.py        |    7 +-
 tests/isolated/mysqldb_monkey_patch.py             |   10 +-
 .../patcher_blocking_select_methods_are_deleted.py |    4 +-
 tests/isolated/regular_file_readall.py             |   43 +
 tests/isolated/socket_resolve_green.py             |   45 +
 .../tpool_isolate_socket_default_timeout.py        |   15 +
 tests/manual/websocket-gunicorn.py                 |   48 +
 tests/patcher_test.py                              |   38 +-
 tests/semaphore_test.py                            |    5 -
 tests/socket_test.py                               |   62 +
 tests/ssl_test.py                                  |  108 +-
 tests/subprocess_test.py                           |   10 +
 tests/test__event.py                               |    5 -
 tests/test__greenness.py                           |   31 +-
 tests/test__refcount.py                            |    5 -
 tests/test__socket_errors.py                       |    4 -
 tests/tpool_test.py                                |   68 +-
 tests/wsgi_test.py                                 |   58 +-
 tox.ini                                            |    9 +-
 153 files changed, 23196 insertions(+), 376 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index 7261983..900cbf4 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,30 +1,29 @@
 language: python
-python: 2.7
+python: 3.5
 env:
   matrix:
     - TOX_ENV=pep8
     - TOX_ENV=py26-epolls
     - TOX_ENV=py26-poll
     - TOX_ENV=py26-selects
-    - TOX_ENV=py27-dns
     - TOX_ENV=py27-epolls
     - TOX_ENV=py27-poll
     - TOX_ENV=py27-selects
     - TOX_ENV=py33-epolls
     - TOX_ENV=py33-poll
     - TOX_ENV=py33-selects
-    - TOX_ENV=py34-dns
     - TOX_ENV=py34-epolls
     - TOX_ENV=py34-poll
     - TOX_ENV=py34-selects
-    - TOX_ENV=pypy-dns
+    - TOX_ENV=py35-epolls
+    - TOX_ENV=py35-poll
+    - TOX_ENV=py35-selects
     - TOX_ENV=pypy-epolls
     - TOX_ENV=pypy-poll
     - TOX_ENV=pypy-selects
 matrix:
   fast_finish: true
   allow_failures:
-    - env: TOX_ENV=pypy-dns
     - env: TOX_ENV=pypy-epolls
     - env: TOX_ENV=pypy-poll
     - env: TOX_ENV=pypy-selects
diff --git a/AUTHORS b/AUTHORS
index fbb93a8..3f49823 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -133,3 +133,10 @@ Thanks To
 * Collin Stocks, fixing eventlet.green.urllib2.urlopen() so it accepts cafile, capath, or cadefault arguments
 * Alexis Lee
 * Steven Erenst
+* Piët Delport
+* Alex Villacís Lasso
+* Yashwardhan Singh
+* Tim Burke
+* Ondřej Nový
+* Jarrod Johnson
+* Whitney Young
diff --git a/NEWS b/NEWS
index e058b05..28130e2 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,24 @@
+0.20.0
+======
+* IMPORTANT: removed select.poll() function
+* DNS resolving is always green with dnspython bundled in
+* greenio: only trampoline when we block
+* convenience: listen() sets SO_REUSEPORT when available; Thanks to Zhengwei Gao
+* ssl: Fix "TypeError: read() argument 2 must be read-write bytes-like object, not None"
+* greenio: _recv_loop behaviour with recv_into on closed sock
+* ipv6: getaddrinfo would fail with scope index
+* green.zmq: Support {send,recv}_{string,json,pyobj} wrappers
+* greendns: Return answers from /etc/hosts despite nameserver errors
+* patcher: fixed green existing locks fail (Python3)
+* Add DAGPool, a dependency-driven greenthread pool
+* wsgi: Unix socket address representation; Thanks to Samuel Merritt
+* tpool: isolate internal socket from default timeout; Thanks to Alex Villacís Lasso
+* wsgi: only skip Content-Type and Content-Length headers (GH-327)
+* wsgi: 400 on blank Content-Length headers (GH-334)
+* greenio: makefile related pypy socket ref counting
+* ssl: Fix recv_into blocking when reading chunks of data
+* websocket: support Gunicorn environ['gunicorn.socket']
+
 0.19.0
 ======
 * ssl: IMPORTANT DoS FIX do_handshake_connect=False in server accept(); Thanks to Garth Mollett
diff --git a/bin/pull-dnspython b/bin/pull-dnspython
new file mode 100755
index 0000000..56d1a76
--- /dev/null
+++ b/bin/pull-dnspython
@@ -0,0 +1,13 @@
+#!/bin/bash -eux
+cd "$( dirname "${BASH_SOURCE[0]}" )/.."
+version=${1-bb0c9f21f4a6f56f2fe8d7c1fc991080ef89d223}
+upstream_path=./dnspython-${version}
+if [[ ! -d "${upstream_path}" ]]; then
+  curl -L -odnspython.zip "https://github.com/rthalley/dnspython/archive/${version}.zip"
+  unzip dnspython.zip
+  rm dnspython.zip
+fi
+rm -rf eventlet/support/dns
+# patch --directory=eventlet/support -p1 --normal --forward -r/dev/null <./dns.patch
+mv ${upstream_path}/dns eventlet/support/
+rm -rf ${upstream_path}
diff --git a/bin/release b/bin/release
index 49d1082..cacb2a3 100755
--- a/bin/release
+++ b/bin/release
@@ -46,6 +46,7 @@ main() {
 
 	if confirm "Build documentation (website)? [Yn] " >&2; then
 		bin/build-website.bash || exit 1
+		git checkout "$branch"
 	fi
 
 	if confirm "Upload to PyPi? [Yn] "; then
diff --git a/doc/modules.rst b/doc/modules.rst
index 0b07d61..c9d2af5 100644
--- a/doc/modules.rst
+++ b/doc/modules.rst
@@ -6,6 +6,7 @@ Module Reference
 
    modules/backdoor
    modules/corolocal
+   modules/dagpool
    modules/debug
    modules/db_pool
    modules/event
diff --git a/doc/modules/dagpool.rst b/doc/modules/dagpool.rst
new file mode 100644
index 0000000..e192def
--- /dev/null
+++ b/doc/modules/dagpool.rst
@@ -0,0 +1,493 @@
+:mod:`dagpool` -- Dependency-Driven Greenthreads
+================================================
+
+Rationale
+*********
+
+The dagpool module provides the :class:`DAGPool <eventlet.dagpool.DAGPool>`
+class, which addresses situations in which the value produced by one
+greenthread might be consumed by several others -- while at the same time a
+consuming greenthread might depend on the output from several different
+greenthreads.
+
+If you have a tree with strict many-to-one dependencies -- each producer
+greenthread provides results to exactly one consumer, though a given consumer
+may depend on multiple producers -- that could be addressed by recursively
+constructing a :class:`GreenPool <eventlet.greenpool.GreenPool>` of producers
+for each consumer, then :meth:`waiting <eventlet.greenpool.GreenPool.waitall>`
+for all producers.
+
+If you have a tree with strict one-to-many dependencies -- each consumer
+greenthread depends on exactly one producer, though a given producer may
+provide results to multiple consumers -- that could be addressed by causing
+each producer to finish by launching a :class:`GreenPool
+<eventlet.greenpool.GreenPool>` of consumers.
+
+But when you have many-to-many dependencies, a tree doesn't suffice. This is
+known as a
+`Directed Acyclic Graph <https://en.wikipedia.org/wiki/Directed_acyclic_graph>`_,
+or DAG.
+
+You might consider sorting the greenthreads into dependency order
+(`topological sort <https://en.wikipedia.org/wiki/Topological_sorting>`_) and
+launching them in a GreenPool. But the concurrency of the GreenPool must be
+strictly constrained to ensure that no greenthread is launched before all its
+upstream producers have completed -- and the appropriate pool size is
+data-dependent. Only a pool of size 1 (serializing all the greenthreads)
+guarantees that a topological sort will produce correct results.
+
+Even if you do serialize all the greenthreads, how do you pass results from
+each producer to all its consumers, which might start at very different points
+in time?
+
+One answer is to associate each greenthread with a distinct key, and store its
+result in a common dict. Then each consumer greenthread can identify its
+direct upstream producers by their keys, and find their results in that dict.
+
+This is the essence of DAGPool.
+
+A DAGPool instance owns a dict, and stores greenthread results in that dict.
+You :meth:`spawn <eventlet.dagpool.DAGPool.spawn>` *all* greenthreads in the
+DAG, specifying for each its own key -- the key with which its result will be
+stored on completion -- plus the keys of the upstream producer greenthreads on
+whose results it directly depends.
+
+Keys need only be unique within the DAGPool instance; they need not be UUIDs.
+A key can be any type that can be used as a dict key. String keys make it
+easier to reason about a DAGPool's behavior, but are by no means required.
+
+The DAGPool passes to each greenthread an iterable of (key, value) pairs.
+The key in each pair is the key of one of the greenthread's specified upstream
+producers; the value is the value returned by that producer greenthread. Pairs
+are delivered in the order results become available; the consuming greenthread
+blocks until the next result can be delivered.
+
+Tutorial
+*******
+Example
+-------
+
+Consider a couple of programs in some compiled language that depend on a set
+of precompiled libraries. Suppose every such build requires as input the
+specific set of library builds on which it directly depends.
+
+::
+
+    a  zlib
+    | /  |
+    |/   |
+    b    c
+    |   /|
+    |  / |
+    | /  |
+    |/   |
+    d    e
+
+We can't run the build for program d until we have the build results for both
+b and c. We can't run the build for library b until we have build results for
+a and zlib. We can, however, immediately run the builds for a and zlib.
+
+So we can use a DAGPool instance to spawn greenthreads running a function such
+as this:
+
+::
+
+    def builder(key, upstream):
+        for libname, product in upstream:
+            # ... configure build for 'key' to use 'product' for 'libname'
+        # all upstream builds have completed
+        # ... run build for 'key'
+        return build_product_for_key
+
+:meth:`spawn <eventlet.dagpool.DAGPool.spawn>` all these greenthreads:
+
+::
+
+    pool = DAGPool()
+    # the upstream producer keys passed to spawn() can be from any iterable,
+    # including a generator
+    pool.spawn("d", ("b", "c"), builder)
+    pool.spawn("e", ["c"], builder)
+    pool.spawn("b", ("a", "zlib"), builder)
+    pool.spawn("c", ["zlib"], builder)
+    pool.spawn("a", (), builder)
+
+As with :func:`eventlet.spawn() <eventlet.spawn>`, if you need to pass special
+build flags to some set of builds, these can be passed as either positional or
+keyword arguments:
+
+::
+
+    def builder(key, upstream, cflags="", linkflags=""):
+        ...
+
+    pool.spawn("d", ("b", "c"), builder, "-o2")
+    pool.spawn("e", ["c"], builder, linkflags="-pie")
+
+However, if the arguments to each builder() call are uniform (as in the
+original example), you could alternatively build a dict of the dependencies
+and call :meth:`spawn_many() <eventlet.dagpool.DAGPool.spawn_many>`:
+
+::
+
+    deps = dict(d=("b", "c"),
+                e=["c"],
+                b=("a", "zlib"),
+                c=["zlib"],
+                a=())
+    pool.spawn_many(deps, builder)
+
+From outside the DAGPool, you can obtain the results for d and e (or in fact
+for any of the build greenthreads) in any of several ways.
+
+:meth:`pool.waitall() <eventlet.dagpool.DAGPool.waitall>` waits until the last of the spawned
+greenthreads has completed, and returns a dict containing results for *all* of
+them:
+
+::
+
+    final = pool.waitall()
+    print("for d: {0}".format(final["d"]))
+    print("for e: {0}".format(final["e"]))
+
+waitall() is an alias for :meth:`wait() <eventlet.dagpool.DAGPool.wait>` with no arguments:
+
+::
+
+    final = pool.wait()
+    print("for d: {0}".format(final["d"]))
+    print("for e: {0}".format(final["e"]))
+
+Or you can specifically wait for only the final programs:
+
+::
+
+    final = pool.wait(["d", "e"])
+
+The returned dict will contain only the specified keys. The keys may be passed
+into wait() from any iterable, including a generator.
+
+You can wait for any specified set of greenthreads; they need not be
+topologically last:
+
+::
+
+    # returns as soon as both a and zlib have returned results, regardless of
+    # what else is still running
+    leaves = pool.wait(["a", "zlib"])
+
+Suppose you want to wait specifically for just *one* of the final programs:
+
+::
+
+    final = pool.wait(["d"])
+    dprog = final["d"]
+
+The above wait() call will return as soon as greenthread d returns a result --
+regardless of whether greenthread e has finished.
+
+:meth:`__getitem()__ <eventlet.dagpool.DAGPool.__getitem__>` is shorthand for
+obtaining a single result:
+
+::
+
+    # waits until greenthread d returns its result
+    dprog = pool["d"]
+
+In contrast, :meth:`get() <eventlet.dagpool.DAGPool.get>` returns immediately,
+whether or not a result is ready:
+
+::
+
+    # returns immediately
+    if pool.get("d") is None:
+        ...
+
+Of course, your greenthread might not include an explicit return statement and
+hence might implicitly return None. You might have to test some other value.
+
+::
+
+    # returns immediately
+    if pool.get("d", "notdone") == "notdone":
+        ...
+
+Suppose you want to process each of the final programs in some way (upload
+it?), but you don't want to have to wait until they've both finished. You
+don't have to poll get() calls -- use :meth:`wait_each()
+<eventlet.dagpool.DAGPool.wait_each>`:
+
+::
+
+    for key, result in pool.wait_each(["d", "e"]):
+        # key will be d or e, in completion order
+        # process result...
+
+As with :meth:`wait() <eventlet.dagpool.DAGPool.wait>`, if you omit the
+argument to wait_each(), it delivers results for all the greenthreads of which
+it's aware:
+
+::
+
+    for key, result in pool.wait_each():
+        # key will be a, zlib, b, c, d, e, in whatever order each completes
+        # process its result...
+
+Introspection
+-------------
+
+Let's say you have set up a :class:`DAGPool <eventlet.dagpool.DAGPool>` with
+the dependencies shown above. To your consternation, your :meth:`waitall()
+<eventlet.dagpool.DAGPool.waitall>` call does not return! The DAGPool instance
+is stuck!
+
+You could change waitall() to :meth:`wait_each()
+<eventlet.dagpool.DAGPool.wait_each>`, and print each key as it becomes
+available:
+
+::
+
+    for key, result in pool.wait_each():
+        print("got result for {0}".format(key))
+        # ... process ...
+
+Once the build for a has completed, this produces:
+
+::
+
+    got result for a
+
+and then stops. Hmm!
+
+You can check the number of :meth:`running <eventlet.dagpool.DAGPool.running>`
+greenthreads:
+
+::
+
+    >>> print(pool.running())
+    4
+
+and the number of :meth:`waiting <eventlet.dagpool.DAGPool.waiting>`
+greenthreads:
+
+::
+
+    >>> print(pool.waiting())
+    4
+
+It's often more informative to ask *which* greenthreads are :meth:`still
+running <eventlet.dagpool.DAGPool.running_keys>`:
+
+::
+
+    >>> print(pool.running_keys())
+    ('c', 'b', 'e', 'd')
+
+but in this case, we already know a has completed.
+
+We can ask for all available results:
+
+::
+
+    >>> print(pool.keys())
+    ('a',)
+    >>> print(pool.items())
+    (('a', result_from_a),)
+
+The :meth:`keys() <eventlet.dagpool.DAGPool.keys>` and :meth:`items()
+<eventlet.dagpool.DAGPool.items>` methods only return keys and items for
+which results are actually available, reflecting the underlying dict.
+
+But what's blocking the works? What are we :meth:`waiting for
+<eventlet.dagpool.DAGPool.waiting_for>`?
+
+::
+
+    >>> print(pool.waiting_for("d"))
+    set(['c', 'b'])
+
+(waiting_for()'s optional argument is a *single* key.)
+
+That doesn't help much yet...
+
+::
+
+    >>> print(pool.waiting_for("b"))
+    set(['zlib'])
+    >>> print(pool.waiting_for("zlib"))
+    KeyError: 'zlib'
+
+Aha! We forgot to even include the zlib build when we were originally
+configuring this DAGPool!
+
+(For non-interactive use, it would be more informative to omit waiting_for()'s
+argument. This usage returns a dict indicating, for each greenthread key,
+which other keys it's waiting for.)
+
+::
+
+    from pprint import pprint
+    pprint(pool.waiting_for())
+
+    {'b': set(['zlib']), 'c': set(['zlib']), 'd': set(['b', 'c']), 'e': set(['c'])}
+
+In this case, a reasonable fix would be to spawn the zlib greenthread:
+
+::
+
+    pool.spawn("zlib", (), builder)
+
+Even if this is the last method call on this DAGPool instance, it should
+unblock all the rest of the DAGPool greenthreads.
+
+Posting
+-------
+
+If we happen to have zlib build results in hand already, though, we could
+instead :meth:`post() <eventlet.dagpool.DAGPool.post>` that result instead of
+rebuilding the library:
+
+::
+
+    pool.post("zlib", result_from_zlib)
+
+This, too, should unblock the rest of the DAGPool greenthreads.
+
+Preloading
+----------
+
+If rebuilding takes nontrivial realtime, it might be useful to record partial
+results, so that in case of interruption you can restart from where you left
+off rather than having to rebuild everything prior to that point.
+
+You could iteratively :meth:`post() <eventlet.dagpool.DAGPool.post>` those
+prior results into a new DAGPool instance; alternatively you can
+:meth:`preload <eventlet.dagpool.DAGPool.__init__>` the :class:`DAGPool
+<eventlet.dagpool.DAGPool>` from an existing dict:
+
+::
+
+    pool = DAGPool(dict(a=result_from_a, zlib=result_from_zlib))
+
+Any DAGPool greenthreads that depend on either a or zlib can immediately
+consume those results.
+
+It also works to construct DAGPool with an iterable of (key, result) pairs.
+
+Exception Propagation
+---------------------
+
+But what if we spawn a zlib build that fails? Suppose the zlib greenthread
+terminates with an exception? In that case none of b, c, d or e can proceed!
+Nor do we want to wait forever for them.
+
+::
+
+    dprog = pool["d"]
+    eventlet.dagpool.PropagateError: PropagateError(d): PropagateError: PropagateError(c): PropagateError: PropagateError(zlib): OriginalError
+
+DAGPool provides a :class:`PropagateError <eventlet.dagpool.PropagateError>`
+exception specifically to wrap such failures. If a DAGPool greenthread
+terminates with an Exception subclass, the DAGPool wraps that exception in a
+PropagateError instance whose *key* attribute is the key of the failing
+greenthread and whose *exc* attribute is the exception that terminated it.
+This PropagateError is stored as the result from that greenthread.
+
+Attempting to consume the result from a greenthread for which a PropagateError
+was stored raises that PropagateError.
+
+::
+
+    pool["zlib"]
+    eventlet.dagpool.PropagateError: PropagateError(zlib): OriginalError
+
+Thus, when greenthread c attempts to consume the result from zlib, the
+PropagateError for zlib is raised. Unless the builder function for greenthread
+c handles that PropagateError exception, that greenthread will itself
+terminate. That PropagateError will be wrapped in another PropagateError whose
+*key* attribute is c and whose *exc* attribute is the PropagateError for zlib.
+
+Similarly, when greenthread d attempts to consume the result from c, the
+PropagateError for c is raised. This in turn is wrapped in a PropagateError
+whose *key* is d and whose *exc* is the PropagateError for c.
+
+When someone attempts to consume the result from d, as shown above, the
+PropagateError for d is raised.
+
+You can programmatically chase the failure path to determine the original
+failure if desired:
+
+::
+
+    orig_err = err
+    key = "unknown"
+    while isinstance(orig_err, PropagateError):
+        key = orig_err.key
+        orig_err = orig_err.exc
+
+Scanning for Success / Exceptions
+---------------------------------
+
+Exception propagation means that we neither perform useless builds nor wait for
+results that will never arrive.
+
+However, it does make it difficult to obtain *partial* results for builds that
+*did* succeed.
+
+For that you can call :meth:`wait_each_success()
+<eventlet.dagpool.DAGPool.wait_each_success>`:
+
+::
+
+    for key, result in pool.wait_each_success():
+        print("{0} succeeded".format(key))
+        # ... process result ...
+
+    a succeeded
+
+Another problem is that although five different greenthreads failed in the
+example, we only see one chain of failures. You can enumerate the bad news
+with :meth:`wait_each_exception() <eventlet.dagpool.DAGPool.wait_each_exception>`:
+
+::
+
+    for key, err in pool.wait_each_exception():
+        print("{0} failed with {1}".format(key, err.exc.__class__.__name__))
+
+    c failed with PropagateError
+    b failed with PropagateError
+    e failed with PropagateError
+    d failed with PropagateError
+    zlib failed with OriginalError
+
+wait_each_exception() yields each PropagateError wrapper as if it were the
+result, rather than raising it as an exception.
+
+Notice that we print :code:`err.exc.__class__.__name__` because
+:code:`err.__class__.__name__` is always PropagateError.
+
+Both wait_each_success() and wait_each_exception() can accept an iterable of
+keys to report:
+
+::
+
+    for key, result in pool.wait_each_success(["d", "e"]):
+        print("{0} succeeded".format(key))
+
+    (no output)
+
+    for key, err in pool.wait_each_exception(["d", "e"]):
+        print("{0} failed with {1}".format(key, err.exc.__class__.__name__))
+
+    e failed with PropagateError
+    d failed with PropagateError
+
+Both wait_each_success() and wait_each_exception() must wait until the
+greenthreads for all specified keys (or all keys) have terminated, one way or
+the other, because of course we can't know until then how to categorize each.
+
+Module Contents
+===============
+
+.. automodule:: eventlet.dagpool
+	:members:
diff --git a/doc/real_index.html b/doc/real_index.html
index d114657..b901779 100644
--- a/doc/real_index.html
+++ b/doc/real_index.html
@@ -54,7 +54,7 @@ pip install eventlet
 <p>Alternately, you can download the source archive:</p>
 <ul>
 <li>latest release from <a class="reference external" target="_blank" href="https://pypi.python.org/pypi/eventlet/">PyPi</a>:
-  <a class="reference external" href="https://pypi.python.org/packages/source/e/eventlet/eventlet-0.19.0.tar.gz">eventlet-0.19.0.tar.gz</a></li>
+  <a class="reference external" href="https://pypi.python.org/packages/source/e/eventlet/eventlet-0.20.0.tar.gz">eventlet-0.20.0.tar.gz</a></li>
 <li>or <a class="reference external" href="https://github.com/eventlet/eventlet/archive/master.zip">latest development version</a></li>
 </ul>
 
diff --git a/eventlet/__init__.py b/eventlet/__init__.py
index d084445..1ec67ad 100644
--- a/eventlet/__init__.py
+++ b/eventlet/__init__.py
@@ -1,4 +1,4 @@
-version_info = (0, 19, 0)
+version_info = (0, 20, 0)
 __version__ = '.'.join(map(str, version_info))
 
 try:
diff --git a/eventlet/convenience.py b/eventlet/convenience.py
index d634b2c..88343a9 100644
--- a/eventlet/convenience.py
+++ b/eventlet/convenience.py
@@ -40,6 +40,9 @@ def listen(addr, family=socket.AF_INET, backlog=50):
     sock = socket.socket(family, socket.SOCK_STREAM)
     if sys.platform[:3] != "win":
         sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+    if hasattr(socket, 'SO_REUSEPORT'):
+        # NOTE(zhengwei): linux kernel >= 3.9
+        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
     sock.bind(addr)
     sock.listen(backlog)
     return sock
diff --git a/eventlet/dagpool.py b/eventlet/dagpool.py
new file mode 100644
index 0000000..618abf2
--- /dev/null
+++ b/eventlet/dagpool.py
@@ -0,0 +1,602 @@
+# @file   dagpool.py
+# @author Nat Goodspeed
+# @date   2016-08-08
+# @brief  Provide DAGPool class
+
+from eventlet.event import Event
+from eventlet import greenthread
+from eventlet.support import six
+import collections
+
+
+# value distinguished from any other Python value including None
+_MISSING = object()
+
+
+class Collision(Exception):
+    """
+    DAGPool raises Collision when you try to launch two greenthreads with the
+    same key, or post() a result for a key corresponding to a greenthread, or
+    post() twice for the same key. As with KeyError, str(collision) names the
+    key in question.
+    """
+    pass
+
+
+class PropagateError(Exception):
+    """
+    When a DAGPool greenthread terminates with an exception instead of
+    returning a result, attempting to retrieve its value raises
+    PropagateError.
+
+    Attributes:
+
+    key
+        the key of the greenthread which raised the exception
+
+    exc
+        the exception object raised by the greenthread
+    """
+    def __init__(self, key, exc):
+        # initialize base class with a reasonable string message
+        msg = "PropagateError({0}): {1}: {2}" \
+              .format(key, exc.__class__.__name__, exc)
+        super(PropagateError, self).__init__(msg)
+        self.msg = msg
+        # Unless we set args, this is unpickleable:
+        # https://bugs.python.org/issue1692335
+        self.args = (key, exc)
+        self.key = key
+        self.exc = exc
+
+    def __str__(self):
+        return self.msg
+
+
+class DAGPool(object):
+    """
+    A DAGPool is a pool that constrains greenthreads, not by max concurrency,
+    but by data dependencies.
+
+    This is a way to implement general DAG dependencies. A simple dependency
+    tree (flowing in either direction) can straightforwardly be implemented
+    using recursion and (e.g.)
+    :meth:`GreenThread.imap() <eventlet.greenthread.GreenThread.imap>`.
+    What gets complicated is when a given node depends on several other nodes
+    as well as contributing to several other nodes.
+
+    With DAGPool, you concurrently launch all applicable greenthreads; each
+    will proceed as soon as it has all required inputs. The DAG is implicit in
+    which items are required by each greenthread.
+
+    Each greenthread is launched in a DAGPool with a key: any value that can
+    serve as a Python dict key. The caller also specifies an iterable of other
+    keys on which this greenthread depends. This iterable may be empty.
+
+    The greenthread callable must accept (key, results), where:
+
+    key
+        is its own key
+
+    results
+        is an iterable of (key, value) pairs.
+
+    A newly-launched DAGPool greenthread is entered immediately, and can
+    perform any necessary setup work. At some point it will iterate over the
+    (key, value) pairs from the passed 'results' iterable. Doing so blocks the
+    greenthread until a value is available for each of the keys specified in
+    its initial dependencies iterable. These (key, value) pairs are delivered
+    in chronological order, *not* the order in which they are initially
+    specified: each value will be delivered as soon as it becomes available.
+
+    The value returned by a DAGPool greenthread becomes the value for its
+    key, which unblocks any other greenthreads waiting on that key.
+
+    If a DAGPool greenthread terminates with an exception instead of returning
+    a value, attempting to retrieve the value raises :class:`PropagateError`,
+    which binds the key of the original greenthread and the original
+    exception. Unless the greenthread attempting to retrieve the value handles
+    PropagateError, that exception will in turn be wrapped in a PropagateError
+    of its own, and so forth. The code that ultimately handles PropagateError
+    can follow the chain of PropagateError.exc attributes to discover the flow
+    of that exception through the DAG of greenthreads.
+
+    External greenthreads may also interact with a DAGPool. See :meth:`wait_each`,
+    :meth:`waitall`, :meth:`post`.
+
+    It is not recommended to constrain external DAGPool producer greenthreads
+    in a :class:`GreenPool <eventlet.greenpool.GreenPool>`: it may be hard to
+    provably avoid deadlock.
+
+    .. automethod:: __init__
+    .. automethod:: __getitem__
+    """
+
+    _Coro = collections.namedtuple("_Coro", ("greenthread", "pending"))
+
+    def __init__(self, preload={}):
+        """
+        DAGPool can be prepopulated with an initial dict or iterable of (key,
+        value) pairs. These (key, value) pairs are of course immediately
+        available for any greenthread that depends on any of those keys.
+        """
+        try:
+            # If a dict is passed, copy it. Don't risk a subsequent
+            # modification to passed dict affecting our internal state.
+            iteritems = six.iteritems(preload)
+        except AttributeError:
+            # Not a dict, just an iterable of (key, value) pairs
+            iteritems = preload
+
+        # Load the initial dict
+        self.values = dict(iteritems)
+
+        # track greenthreads
+        self.coros = {}
+
+        # The key to blocking greenthreads is the Event.
+        self.event = Event()
+
+    def waitall(self):
+        """
+        waitall() blocks the calling greenthread until there is a value for
+        every DAGPool greenthread launched by :meth:`spawn`. It returns a dict
+        containing all :class:`preload data <DAGPool>`, all data from
+        :meth:`post` and all values returned by spawned greenthreads.
+
+        See also :meth:`wait`.
+        """
+        # waitall() is an alias for compatibility with GreenPool
+        return self.wait()
+
+    def wait(self, keys=_MISSING):
+        """
+        *keys* is an optional iterable of keys. If you omit the argument, it
+        waits for all the keys from :class:`preload data <DAGPool>`, from
+        :meth:`post` calls and from :meth:`spawn` calls: in other words, all
+        the keys of which this DAGPool is aware.
+
+        wait() blocks the calling greenthread until all of the relevant keys
+        have values. wait() returns a dict whose keys are the relevant keys,
+        and whose values come from the *preload* data, from values returned by
+        DAGPool greenthreads or from :meth:`post` calls.
+
+        If a DAGPool greenthread terminates with an exception, wait() will
+        raise :class:`PropagateError` wrapping that exception. If more than
+        one greenthread terminates with an exception, it is indeterminate
+        which one wait() will raise.
+
+        If an external greenthread posts a :class:`PropagateError` instance,
+        wait() will raise that PropagateError. If more than one greenthread
+        posts PropagateError, it is indeterminate which one wait() will raise.
... 25027 lines suppressed ...

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/python-modules/packages/python-eventlet.git



More information about the Python-modules-commits mailing list