[Python-modules-commits] [python-docker] 01/05: Import python-docker_1.7.2.orig.tar.gz

Ondřej Nový onovy-guest at moszumanska.debian.org
Wed Mar 23 06:29:04 UTC 2016


This is an automated email from the git hooks/post-receive script.

onovy-guest pushed a commit to branch master
in repository python-docker.

commit 63875477fa2e0a529fcf52e36a3f9cb3db861000
Author: Ondřej Nový <novy at ondrej.org>
Date:   Wed Mar 23 07:23:09 2016 +0100

    Import python-docker_1.7.2.orig.tar.gz
---
 PKG-INFO                                  |    3 +-
 docker/api/build.py                       |   11 +-
 docker/api/container.py                   |   37 +-
 docker/api/exec_api.py                    |   19 +-
 docker/api/image.py                       |    5 +-
 docker/api/network.py                     |   30 +-
 docker/api/volume.py                      |    5 +-
 docker/auth/auth.py                       |  194 +--
 docker/client.py                          |   34 +-
 docker/constants.py                       |    2 +-
 docker/errors.py                          |    4 +-
 docker/ssladapter/ssladapter.py           |   26 +-
 docker/tls.py                             |   38 +-
 docker/unixconn/unixconn.py               |   14 +-
 docker/utils/__init__.py                  |    7 +-
 docker/utils/decorators.py                |   11 +
 docker/utils/utils.py                     |  403 ++++--
 docker/version.py                         |    2 +-
 docker_py.egg-info/PKG-INFO               |    3 +-
 docker_py.egg-info/SOURCES.txt            |   28 +-
 setup.cfg                                 |    3 +
 setup.py                                  |    1 -
 tests/base.py                             |   25 +
 tests/helpers.py                          |  148 +++
 tests/integration/__init__.py             |    0
 tests/integration/api_test.py             |  176 +++
 tests/integration/build_test.py           |  140 ++
 tests/integration/conftest.py             |   31 +
 tests/integration/container_test.py       | 1033 +++++++++++++++
 tests/integration/exec_test.py            |  130 ++
 tests/integration/image_test.py           |  235 ++++
 tests/integration/network_test.py         |  237 ++++
 tests/integration/regression_test.py      |   69 +
 tests/integration/volume_test.py          |   55 +
 tests/integration_test.py                 | 1971 -----------------------------
 tests/unit/__init__.py                    |    0
 tests/unit/api_test.py                    |  417 ++++++
 tests/unit/auth_test.py                   |  464 +++++++
 tests/unit/build_test.py                  |  105 ++
 tests/{test.py => unit/container_test.py} | 1668 ++++--------------------
 tests/unit/exec_test.py                   |   75 ++
 tests/{ => unit}/fake_api.py              |    2 +-
 tests/{ => unit}/fake_stat.py             |    0
 tests/unit/image_test.py                  |  346 +++++
 tests/unit/network_test.py                |  187 +++
 tests/{ => unit}/utils_test.py            |  602 +++++----
 tests/unit/volume_test.py                 |   96 ++
 47 files changed, 5160 insertions(+), 3932 deletions(-)

diff --git a/PKG-INFO b/PKG-INFO
index a387a6e..fd5c3e7 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: docker-py
-Version: 1.5.0
+Version: 1.7.2
 Summary: Python client for Docker.
 Home-page: https://github.com/docker/docker-py/
 Author: UNKNOWN
@@ -15,7 +15,6 @@ Classifier: Operating System :: OS Independent
 Classifier: Programming Language :: Python
 Classifier: Programming Language :: Python :: 2.6
 Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3.2
 Classifier: Programming Language :: Python :: 3.3
 Classifier: Programming Language :: Python :: 3.4
 Classifier: Topic :: Utilities
diff --git a/docker/api/build.py b/docker/api/build.py
index b303ba6..6bfaba1 100644
--- a/docker/api/build.py
+++ b/docker/api/build.py
@@ -1,6 +1,7 @@
 import logging
 import os
 import re
+import json
 
 from .. import constants
 from .. import errors
@@ -16,7 +17,7 @@ class BuildApiMixin(object):
               nocache=False, rm=False, stream=False, timeout=None,
               custom_context=False, encoding=None, pull=False,
               forcerm=False, dockerfile=None, container_limits=None,
-              decode=False):
+              decode=False, buildargs=None):
         remote = context = headers = None
         container_limits = container_limits or {}
         if path is None and fileobj is None:
@@ -71,6 +72,14 @@ class BuildApiMixin(object):
         }
         params.update(container_limits)
 
+        if buildargs:
+            if utils.version_gte(self._version, '1.21'):
+                params.update({'buildargs': json.dumps(buildargs)})
+            else:
+                raise errors.InvalidVersion(
+                    'buildargs was only introduced in API version 1.21'
+                )
+
         if context is not None:
             headers = {'Content-Type': 'application/tar'}
             if encoding:
diff --git a/docker/api/container.py b/docker/api/container.py
index 142bd0f..ceac173 100644
--- a/docker/api/container.py
+++ b/docker/api/container.py
@@ -1,8 +1,10 @@
 import six
 import warnings
+from datetime import datetime
 
 from .. import errors
 from .. import utils
+from ..utils.utils import create_networking_config, create_endpoint_config
 
 
 class ContainerApiMixin(object):
@@ -96,7 +98,8 @@ class ContainerApiMixin(object):
                          network_disabled=False, name=None, entrypoint=None,
                          cpu_shares=None, working_dir=None, domainname=None,
                          memswap_limit=None, cpuset=None, host_config=None,
-                         mac_address=None, labels=None, volume_driver=None):
+                         mac_address=None, labels=None, volume_driver=None,
+                         stop_signal=None, networking_config=None):
 
         if isinstance(volumes, six.string_types):
             volumes = [volumes, ]
@@ -111,7 +114,7 @@ class ContainerApiMixin(object):
             tty, mem_limit, ports, environment, dns, volumes, volumes_from,
             network_disabled, entrypoint, cpu_shares, working_dir, domainname,
             memswap_limit, cpuset, host_config, mac_address, labels,
-            volume_driver
+            volume_driver, stop_signal, networking_config,
         )
         return self.create_container_from_config(config, name)
 
@@ -137,6 +140,12 @@ class ContainerApiMixin(object):
         kwargs['version'] = self._version
         return utils.create_host_config(*args, **kwargs)
 
+    def create_networking_config(self, *args, **kwargs):
+        return create_networking_config(*args, **kwargs)
+
+    def create_endpoint_config(self, *args, **kwargs):
+        return create_endpoint_config(self._version, *args, **kwargs)
+
     @utils.check_resource
     def diff(self, container):
         return self._result(
@@ -184,7 +193,7 @@ class ContainerApiMixin(object):
 
     @utils.check_resource
     def logs(self, container, stdout=True, stderr=True, stream=False,
-             timestamps=False, tail='all'):
+             timestamps=False, tail='all', since=None):
         if utils.compare_version('1.11', self._version) >= 0:
             params = {'stderr': stderr and 1 or 0,
                       'stdout': stdout and 1 or 0,
@@ -192,9 +201,20 @@ class ContainerApiMixin(object):
                       'follow': stream and 1 or 0,
                       }
             if utils.compare_version('1.13', self._version) >= 0:
-                if tail != 'all' and (not isinstance(tail, int) or tail <= 0):
+                if tail != 'all' and (not isinstance(tail, int) or tail < 0):
                     tail = 'all'
                 params['tail'] = tail
+
+            if since is not None:
+                if utils.compare_version('1.19', self._version) < 0:
+                    raise errors.InvalidVersion(
+                        'since is not supported in API < 1.19'
+                    )
+                else:
+                    if isinstance(since, datetime):
+                        params['since'] = utils.datetime_to_timestamp(since)
+                    elif (isinstance(since, int) and since > 0):
+                        params['since'] = since
             url = self._url("/containers/{0}/logs", container)
             res = self._get(url, params=params, stream=stream)
             return self._get_result(container, stream, res)
@@ -344,9 +364,14 @@ class ContainerApiMixin(object):
 
     @utils.minimum_version('1.17')
     @utils.check_resource
-    def stats(self, container, decode=None):
+    def stats(self, container, decode=None, stream=True):
         url = self._url("/containers/{0}/stats", container)
-        return self._stream_helper(self._get(url, stream=True), decode=decode)
+        if stream:
+            return self._stream_helper(self._get(url, stream=True),
+                                       decode=decode)
+        else:
+            return self._result(self._get(url, params={'stream': False}),
+                                json=True)
 
     @utils.check_resource
     def stop(self, container, timeout=10):
diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py
index c66b9dd..f0e4afa 100644
--- a/docker/api/exec_api.py
+++ b/docker/api/exec_api.py
@@ -1,5 +1,3 @@
-import shlex
-
 import six
 
 from .. import errors
@@ -9,8 +7,8 @@ from .. import utils
 class ExecApiMixin(object):
     @utils.minimum_version('1.15')
     @utils.check_resource
-    def exec_create(self, container, cmd, stdout=True, stderr=True, tty=False,
-                    privileged=False, user=''):
+    def exec_create(self, container, cmd, stdout=True, stderr=True,
+                    stdin=False, tty=False, privileged=False, user=''):
         if privileged and utils.compare_version('1.19', self._version) < 0:
             raise errors.InvalidVersion(
                 'Privileged exec is not supported in API < 1.19'
@@ -20,14 +18,14 @@ class ExecApiMixin(object):
                 'User-specific exec is not supported in API < 1.19'
             )
         if isinstance(cmd, six.string_types):
-            cmd = shlex.split(str(cmd))
+            cmd = utils.split_command(cmd)
 
         data = {
             'Container': container,
             'User': user,
             'Privileged': privileged,
             'Tty': tty,
-            'AttachStdin': False,
+            'AttachStdin': stdin,
             'AttachStdout': stdout,
             'AttachStderr': stderr,
             'Cmd': cmd
@@ -55,7 +53,11 @@ class ExecApiMixin(object):
         self._raise_for_status(res)
 
     @utils.minimum_version('1.15')
-    def exec_start(self, exec_id, detach=False, tty=False, stream=False):
+    def exec_start(self, exec_id, detach=False, tty=False, stream=False,
+                   socket=False):
+        # we want opened socket if socket == True
+        if socket:
+            stream = True
         if isinstance(exec_id, dict):
             exec_id = exec_id.get('Id')
 
@@ -67,4 +69,7 @@ class ExecApiMixin(object):
         res = self._post_json(
             self._url('/exec/{0}/start', exec_id), data=data, stream=stream
         )
+
+        if socket:
+            return self._get_raw_response_socket(res)
         return self._get_result_tty(stream, res, tty)
diff --git a/docker/api/image.py b/docker/api/image.py
index f891e21..8493b38 100644
--- a/docker/api/image.py
+++ b/docker/api/image.py
@@ -158,8 +158,6 @@ class ImageApiMixin(object):
         if not tag:
             repository, tag = utils.parse_repository_tag(repository)
         registry, repo_name = auth.resolve_repository_name(repository)
-        if repo_name.count(":") == 1:
-            repository, tag = repository.rsplit(":", 1)
 
         params = {
             'tag': tag,
@@ -174,7 +172,8 @@ class ImageApiMixin(object):
                 log.debug('Looking for auth config')
                 if not self._auth_configs:
                     log.debug(
-                        "No auth config in memory - loading from filesystem")
+                        "No auth config in memory - loading from filesystem"
+                    )
                     self._auth_configs = auth.load_config()
                 authcfg = auth.resolve_authconfig(self._auth_configs, registry)
                 # Do not fail here if no authentication exists for this
diff --git a/docker/api/network.py b/docker/api/network.py
index 2dea679..d9a6128 100644
--- a/docker/api/network.py
+++ b/docker/api/network.py
@@ -1,6 +1,6 @@
 import json
 
-from ..utils import check_resource, minimum_version
+from ..utils import check_resource, minimum_version, normalize_links
 
 
 class NetworkApiMixin(object):
@@ -19,10 +19,15 @@ class NetworkApiMixin(object):
         return self._result(res, json=True)
 
     @minimum_version('1.21')
-    def create_network(self, name, driver=None):
+    def create_network(self, name, driver=None, options=None, ipam=None):
+        if options is not None and not isinstance(options, dict):
+            raise TypeError('options must be a dictionary')
+
         data = {
-            'name': name,
-            'driver': driver,
+            'Name': name,
+            'Driver': driver,
+            'Options': options,
+            'IPAM': ipam,
         }
         url = self._url("/networks/create")
         res = self._post_json(url, data=data)
@@ -42,14 +47,23 @@ class NetworkApiMixin(object):
 
     @check_resource
     @minimum_version('1.21')
-    def connect_container_to_network(self, container, net_id):
-        data = {"container": container}
+    def connect_container_to_network(self, container, net_id,
+                                     aliases=None, links=None):
+        data = {
+            "Container": container,
+            "EndpointConfig": {
+                "Aliases": aliases,
+                "Links": normalize_links(links) if links else None,
+            },
+        }
         url = self._url("/networks/{0}/connect", net_id)
-        self._post_json(url, data=data)
+        res = self._post_json(url, data=data)
+        self._raise_for_status(res)
 
     @check_resource
     @minimum_version('1.21')
     def disconnect_container_from_network(self, container, net_id):
         data = {"container": container}
         url = self._url("/networks/{0}/disconnect", net_id)
-        self._post_json(url, data=data)
+        res = self._post_json(url, data=data)
+        self._raise_for_status(res)
diff --git a/docker/api/volume.py b/docker/api/volume.py
index e9e7127..bb8b39b 100644
--- a/docker/api/volume.py
+++ b/docker/api/volume.py
@@ -5,14 +5,14 @@ class VolumeApiMixin(object):
     @utils.minimum_version('1.21')
     def volumes(self, filters=None):
         params = {
-            'filter': utils.convert_filters(filters) if filters else None
+            'filters': utils.convert_filters(filters) if filters else None
         }
         url = self._url('/volumes')
         return self._result(self._get(url, params=params), True)
 
     @utils.minimum_version('1.21')
     def create_volume(self, name, driver=None, driver_opts=None):
-        url = self._url('/volumes')
+        url = self._url('/volumes/create')
         if driver_opts is not None and not isinstance(driver_opts, dict):
             raise TypeError('driver_opts must be a dictionary')
 
@@ -33,4 +33,3 @@ class VolumeApiMixin(object):
         url = self._url('/volumes/{0}', name)
         resp = self._delete(url)
         self._raise_for_status(resp)
-        return True
diff --git a/docker/auth/auth.py b/docker/auth/auth.py
index 1ee9f81..eedb794 100644
--- a/docker/auth/auth.py
+++ b/docker/auth/auth.py
@@ -13,18 +13,15 @@
 #    limitations under the License.
 
 import base64
-import fileinput
 import json
 import logging
 import os
-import warnings
 
 import six
 
-from .. import constants
 from .. import errors
 
-INDEX_NAME = 'index.docker.io'
+INDEX_NAME = 'docker.io'
 INDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)
 DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
 LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
@@ -32,31 +29,36 @@ LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
 log = logging.getLogger(__name__)
 
 
-def resolve_repository_name(repo_name, insecure=False):
-    if insecure:
-        warnings.warn(
-            constants.INSECURE_REGISTRY_DEPRECATION_WARNING.format(
-                'resolve_repository_name()'
-            ), DeprecationWarning
-        )
-
+def resolve_repository_name(repo_name):
     if '://' in repo_name:
         raise errors.InvalidRepository(
-            'Repository name cannot contain a scheme ({0})'.format(repo_name))
-    parts = repo_name.split('/', 1)
-    if '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost':
-        # This is a docker index repo (ex: foo/bar or ubuntu)
-        return INDEX_NAME, repo_name
-    if len(parts) < 2:
-        raise errors.InvalidRepository(
-            'Invalid repository name ({0})'.format(repo_name))
+            'Repository name cannot contain a scheme ({0})'.format(repo_name)
+        )
 
-    if 'index.docker.io' in parts[0]:
+    index_name, remote_name = split_repo_name(repo_name)
+    if index_name[0] == '-' or index_name[-1] == '-':
         raise errors.InvalidRepository(
-            'Invalid repository name, try "{0}" instead'.format(parts[1])
+            'Invalid index name ({0}). Cannot begin or end with a'
+            ' hyphen.'.format(index_name)
         )
+    return resolve_index_name(index_name), remote_name
+
 
-    return parts[0], parts[1]
+def resolve_index_name(index_name):
+    index_name = convert_to_hostname(index_name)
+    if index_name == 'index.' + INDEX_NAME:
+        index_name = INDEX_NAME
+    return index_name
+
+
+def split_repo_name(repo_name):
+    parts = repo_name.split('/', 1)
+    if len(parts) == 1 or (
+        '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
+    ):
+        # This is a docker index repo (ex: username/foobar or ubuntu)
+        return INDEX_NAME, repo_name
+    return tuple(parts)
 
 
 def resolve_authconfig(authconfig, registry=None):
@@ -67,7 +69,7 @@ def resolve_authconfig(authconfig, registry=None):
     Returns None if no match was found.
     """
     # Default to the public index server
-    registry = convert_to_hostname(registry) if registry else INDEX_NAME
+    registry = resolve_index_name(registry) if registry else INDEX_NAME
     log.debug("Looking for auth entry for {0}".format(repr(registry)))
 
     if registry in authconfig:
@@ -75,7 +77,7 @@ def resolve_authconfig(authconfig, registry=None):
         return authconfig[registry]
 
     for key, config in six.iteritems(authconfig):
-        if convert_to_hostname(key) == registry:
+        if resolve_index_name(key) == registry:
             log.debug("Found {0}".format(repr(key)))
             return config
 
@@ -87,17 +89,12 @@ def convert_to_hostname(url):
     return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
 
 
-def encode_auth(auth_info):
-    return base64.b64encode(auth_info.get('username', '') + b':' +
-                            auth_info.get('password', ''))
-
-
 def decode_auth(auth):
     if isinstance(auth, six.string_types):
         auth = auth.encode('ascii')
     s = base64.b64decode(auth)
     login, pwd = s.split(b':', 1)
-    return login.decode('ascii'), pwd.decode('ascii')
+    return login.decode('utf8'), pwd.decode('utf8')
 
 
 def encode_header(auth):
@@ -105,12 +102,14 @@ def encode_header(auth):
     return base64.urlsafe_b64encode(auth_json)
 
 
-def parse_auth(entries):
+def parse_auth(entries, raise_on_error=False):
     """
     Parses authentication entries
 
     Args:
-      entries: Dict of authentication entries.
+      entries:        Dict of authentication entries.
+      raise_on_error: If set to true, an invalid format will raise
+                      InvalidConfigFile
 
     Returns:
       Authentication registry.
@@ -118,6 +117,19 @@ def parse_auth(entries):
 
     conf = {}
     for registry, entry in six.iteritems(entries):
+        if not (isinstance(entry, dict) and 'auth' in entry):
+            log.debug(
+                'Config entry for key {0} is not auth config'.format(registry)
+            )
+            # We sometimes fall back to parsing the whole config as if it was
+            # the auth config by itself, for legacy purposes. In that case, we
+            # fail silently and return an empty conf if any of the keys is not
+            # formatted properly.
+            if raise_on_error:
+                raise errors.InvalidConfigFile(
+                    'Invalid configuration for registry {0}'.format(registry)
+                )
+            return {}
         username, password = decode_auth(entry['auth'])
         log.debug(
             'Found entry (registry={0}, username={1})'
@@ -126,84 +138,90 @@ def parse_auth(entries):
         conf[registry] = {
             'username': username,
             'password': password,
-            'email': entry['email'],
+            'email': entry.get('email'),
             'serveraddress': registry,
         }
     return conf
 
 
+def find_config_file(config_path=None):
+    environment_path = os.path.join(
+        os.environ.get('DOCKER_CONFIG'),
+        os.path.basename(DOCKER_CONFIG_FILENAME)
+    ) if os.environ.get('DOCKER_CONFIG') else None
+
+    paths = [
+        config_path,  # 1
+        environment_path,  # 2
+        os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME),  # 3
+        os.path.join(
+            os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME
+        )  # 4
+    ]
+
+    for path in paths:
+        if path and os.path.exists(path):
+            return path
+    return None
+
+
 def load_config(config_path=None):
     """
     Loads authentication data from a Docker configuration file in the given
     root directory or if config_path is passed use given path.
+    Lookup priority:
+        explicit config_path parameter > DOCKER_CONFIG environment variable >
+        ~/.docker/config.json > ~/.dockercfg
     """
-    conf = {}
-    data = None
-
-    # Prefer ~/.docker/config.json.
-    config_file = config_path or os.path.join(os.path.expanduser('~'),
-                                              DOCKER_CONFIG_FILENAME)
-
-    log.debug("Trying {0}".format(config_file))
-
-    if os.path.exists(config_file):
-        try:
-            with open(config_file) as f:
-                for section, data in six.iteritems(json.load(f)):
-                    if section != 'auths':
-                        continue
-                    log.debug("Found 'auths' section")
-                    return parse_auth(data)
-            log.debug("Couldn't find 'auths' section")
-        except (IOError, KeyError, ValueError) as e:
-            # Likely missing new Docker config file or it's in an
-            # unknown format, continue to attempt to read old location
-            # and format.
-            log.debug(e)
-            pass
-    else:
-        log.debug("File doesn't exist")
-
-    config_file = config_path or os.path.join(os.path.expanduser('~'),
-                                              LEGACY_DOCKER_CONFIG_FILENAME)
-
-    log.debug("Trying {0}".format(config_file))
+    config_file = find_config_file(config_path)
 
-    if not os.path.exists(config_file):
-        log.debug("File doesn't exist - returning empty config")
+    if not config_file:
+        log.debug("File doesn't exist")
         return {}
 
-    log.debug("Attempting to parse as JSON")
     try:
         with open(config_file) as f:
-            return parse_auth(json.load(f))
-    except Exception as e:
+            data = json.load(f)
+            res = {}
+            if data.get('auths'):
+                log.debug("Found 'auths' section")
+                res.update(parse_auth(data['auths'], raise_on_error=True))
+            if data.get('HttpHeaders'):
+                log.debug("Found 'HttpHeaders' section")
+                res.update({'HttpHeaders': data['HttpHeaders']})
+            if res:
+                return res
+            else:
+                log.debug("Couldn't find 'auths' or 'HttpHeaders' sections")
+                f.seek(0)
+                return parse_auth(json.load(f))
+    except (IOError, KeyError, ValueError) as e:
+        # Likely missing new Docker config file or it's in an
+        # unknown format, continue to attempt to read old location
+        # and format.
         log.debug(e)
-        pass
 
-    # If that fails, we assume the configuration file contains a single
-    # authentication token for the public registry in the following format:
-    #
-    # auth = AUTH_TOKEN
-    # email = email at domain.com
     log.debug("Attempting to parse legacy auth file format")
     try:
         data = []
-        for line in fileinput.input(config_file):
-            data.append(line.strip().split(' = ')[1])
-        if len(data) < 2:
-            # Not enough data
-            raise errors.InvalidConfigFile(
-                'Invalid or empty configuration file!')
+        with open(config_file) as f:
+            for line in f.readlines():
+                data.append(line.strip().split(' = ')[1])
+            if len(data) < 2:
+                # Not enough data
+                raise errors.InvalidConfigFile(
+                    'Invalid or empty configuration file!'
+                )
 
         username, password = decode_auth(data[0])
-        conf[INDEX_NAME] = {
-            'username': username,
-            'password': password,
-            'email': data[1],
-            'serveraddress': INDEX_URL,
+        return {
+            INDEX_NAME: {
+                'username': username,
+                'password': password,
+                'email': data[1],
+                'serveraddress': INDEX_URL,
+            }
         }
-        return conf
     except Exception as e:
         log.debug(e)
         pass
diff --git a/docker/client.py b/docker/client.py
index d219472..7d1f7c4 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -28,7 +28,7 @@ from . import errors
 from .auth import auth
 from .unixconn import unixconn
 from .ssladapter import ssladapter
-from .utils import utils, check_resource
+from .utils import utils, check_resource, update_headers
 from .tls import TLSConfig
 
 
@@ -45,17 +45,17 @@ class Client(
                  timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False):
         super(Client, self).__init__()
 
-        if tls and not base_url.startswith('https://'):
+        if tls and not base_url:
             raise errors.TLSParameterError(
-                'If using TLS, the base_url argument must begin with '
-                '"https://".')
+                'If using TLS, the base_url argument must be provided.'
+            )
 
         self.base_url = base_url
         self.timeout = timeout
 
         self._auth_configs = auth.load_config()
 
-        base_url = utils.parse_host(base_url, sys.platform)
+        base_url = utils.parse_host(base_url, sys.platform, tls=bool(tls))
         if base_url.startswith('http+unix://'):
             self._custom_adapter = unixconn.UnixAdapter(base_url, timeout)
             self.mount('http+docker://', self._custom_adapter)
@@ -103,15 +103,19 @@ class Client(
         kwargs.setdefault('timeout', self.timeout)
         return kwargs
 
+    @update_headers
     def _post(self, url, **kwargs):
         return self.post(url, **self._set_request_timeout(kwargs))
 
+    @update_headers
     def _get(self, url, **kwargs):
         return self.get(url, **self._set_request_timeout(kwargs))
 
+    @update_headers
     def _put(self, url, **kwargs):
         return self.put(url, **self._set_request_timeout(kwargs))
 
+    @update_headers
     def _delete(self, url, **kwargs):
         return self.delete(url, **self._set_request_timeout(kwargs))
 
@@ -188,6 +192,8 @@ class Client(
         self._raise_for_status(response)
         if six.PY3:
             sock = response.raw._fp.fp.raw
+            if self.base_url.startswith("https://"):
+                sock = sock._sock
         else:
             sock = response.raw._fp.fp._sock
         try:
@@ -244,10 +250,7 @@ class Client(
         # Disable timeout on the underlying socket to prevent
         # Read timed out(s) for long running processes
         socket = self._get_raw_response_socket(response)
-        if six.PY3:
-            socket._sock.settimeout(None)
-        else:
-            socket.settimeout(None)
+        self._disable_socket_timeout(socket)
 
         while True:
             header = response.raw.read(constants.STREAM_HEADER_SIZE_BYTES)
@@ -276,6 +279,19 @@ class Client(
         for out in response.iter_content(chunk_size=1, decode_unicode=True):
             yield out
 
+    def _disable_socket_timeout(self, socket):
+        """ Depending on the combination of python version and whether we're
+        connecting over http or https, we might need to access _sock, which
+        may or may not exist; or we may need to just settimeout on socket
+         itself, which also may or may not have settimeout on it.
+
+        To avoid missing the correct one, we try both.
+        """
+        if hasattr(socket, "settimeout"):
+            socket.settimeout(None)
+        if hasattr(socket, "_sock") and hasattr(socket._sock, "settimeout"):
+            socket._sock.settimeout(None)
+
     def _get_result(self, container, stream, res):
         cont = self.inspect_container(container)
         return self._get_result_tty(stream, res, cont['Config']['Tty'])
diff --git a/docker/constants.py b/docker/constants.py
index 3647a3b..0627ba0 100644
--- a/docker/constants.py
+++ b/docker/constants.py
@@ -1,4 +1,4 @@
-DEFAULT_DOCKER_API_VERSION = '1.20'
+DEFAULT_DOCKER_API_VERSION = '1.21'
 DEFAULT_TIMEOUT_SECONDS = 60
 STREAM_HEADER_SIZE_BYTES = 8
 CONTAINER_LIMITS_KEYS = [
diff --git a/docker/errors.py b/docker/errors.py
index 066406a..e85910c 100644
--- a/docker/errors.py
+++ b/docker/errors.py
@@ -80,8 +80,8 @@ class TLSParameterError(DockerException):
     def __str__(self):
         return self.msg + (". TLS configurations should map the Docker CLI "
                            "client configurations. See "
-                           "http://docs.docker.com/examples/https/ for "
-                           "API details.")
+                           "https://docs.docker.com/engine/articles/https/ "
+                           "for API details.")
 
 
 class NullResource(DockerException, ValueError):
diff --git a/docker/ssladapter/ssladapter.py b/docker/ssladapter/ssladapter.py
index 3a70a91..5b43aa2 100644
--- a/docker/ssladapter/ssladapter.py
+++ b/docker/ssladapter/ssladapter.py
@@ -4,7 +4,6 @@
 """
 from distutils.version import StrictVersion
 from requests.adapters import HTTPAdapter
-import ssl
 
 try:
     import requests.packages.urllib3 as urllib3
@@ -14,20 +13,10 @@ except ImportError:
 PoolManager = urllib3.poolmanager.PoolManager
 
 
-def get_max_tls_protocol():
-    protocols = ('PROTOCOL_TLSv1_2',
-                 'PROTOCOL_TLSv1_1',
-                 'PROTOCOL_TLSv1')
-    for proto in protocols:
-        if hasattr(ssl, proto):
-            return getattr(ssl, proto)
-
-
 class SSLAdapter(HTTPAdapter):
     '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
     def __init__(self, ssl_version=None, assert_hostname=None,
                  assert_fingerprint=None, **kwargs):
-        ssl_version = ssl_version or get_max_tls_protocol()
         self.ssl_version = ssl_version
         self.assert_hostname = assert_hostname
         self.assert_fingerprint = assert_fingerprint
@@ -41,11 +30,24 @@ class SSLAdapter(HTTPAdapter):
             'assert_hostname': self.assert_hostname,
             'assert_fingerprint': self.assert_fingerprint,
         }
-        if self.can_override_ssl_version():
+        if self.ssl_version and self.can_override_ssl_version():
             kwargs['ssl_version'] = self.ssl_version
 
         self.poolmanager = PoolManager(**kwargs)
 
+    def get_connection(self, *args, **kwargs):
+        """
+        Ensure assert_hostname is set correctly on our pool
+
+        We already take care of a normal poolmanager via init_poolmanager
+
+        But we still need to take care of when there is a proxy poolmanager
+        """
+        conn = super(SSLAdapter, self).get_connection(*args, **kwargs)
+        if conn.assert_hostname != self.assert_hostname:
+            conn.assert_hostname = self.assert_hostname
+        return conn
+
     def can_override_ssl_version(self):
         urllib_ver = urllib3.__version__.split('-')[0]
         if urllib_ver is None:
diff --git a/docker/tls.py b/docker/tls.py
index d888b7d..83b0ff7 100644
--- a/docker/tls.py
+++ b/docker/tls.py
@@ -6,6 +6,7 @@ from .ssladapter import ssladapter
 
 class TLSConfig(object):
     cert = None
+    ca_cert = None
     verify = None
     ssl_version = None
 
@@ -13,16 +14,11 @@ class TLSConfig(object):
                  ssl_version=None, assert_hostname=None,
                  assert_fingerprint=None):
         # Argument compatibility/mapping with
-        # http://docs.docker.com/examples/https/
+        # https://docs.docker.com/engine/articles/https/
         # This diverges from the Docker CLI in that users can specify 'tls'
         # here, but also disable any public/default CA pool verification by
         # leaving tls_verify=False
 
-        # urllib3 sets a default ssl_version if ssl_version is None,
-        # but that default is the vulnerable PROTOCOL_SSLv23 selection,
-        # so we override the default with the maximum supported in the running
-        # Python interpeter up to TLS 1.2. (see: http://tinyurl.com/kxga8hb)
-        ssl_version = ssl_version or ssladapter.get_max_tls_protocol()
         self.ssl_version = ssl_version
         self.assert_hostname = assert_hostname
         self.assert_fingerprint = assert_fingerprint
@@ -48,29 +44,25 @@ class TLSConfig(object):
                 )
             self.cert = (tls_cert, tls_key)
 
-        # Either set verify to True (public/default CA checks) or to the
-        # path of a CA Cert file.
-        if verify is not None:
-            if not ca_cert:
-                self.verify = verify
-            elif os.path.isfile(ca_cert):
-                if not verify:
-                    raise errors.TLSParameterError(
-                        'verify can not be False when a CA cert is'
-                        ' provided.'
-                    )
-                self.verify = ca_cert
-            else:
-                raise errors.TLSParameterError(
-                    'Invalid CA certificate provided for `tls_ca_cert`.'
-                )
+        # If verify is set, make sure the cert exists
+        self.verify = verify
+        self.ca_cert = ca_cert
+        if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
+            raise errors.TLSParameterError(
+                'Invalid CA certificate provided for `tls_ca_cert`.'
+            )
 
     def configure_client(self, client):
         client.ssl_version = self.ssl_version
-        if self.verify is not None:
+
+        if self.verify and self.ca_cert:
+            client.verify = self.ca_cert
+        else:
             client.verify = self.verify
+
         if self.cert:
             client.cert = self.cert
+
         client.mount('https://', ssladapter.SSLAdapter(
             ssl_version=self.ssl_version,
             assert_hostname=self.assert_hostname,
diff --git a/docker/unixconn/unixconn.py b/docker/unixconn/unixconn.py
index 551bd29..d7e249e 100644
--- a/docker/unixconn/unixconn.py
+++ b/docker/unixconn/unixconn.py
@@ -73,12 +73,20 @@ class UnixAdapter(requests.adapters.HTTPAdapter):
             if pool:
                 return pool
 
-            pool = UnixHTTPConnectionPool(url,
-                                          self.socket_path,
-                                          self.timeout)
+            pool = UnixHTTPConnectionPool(
+                url, self.socket_path, self.timeout
+            )
             self.pools[url] = pool
 
         return pool
 
+    def request_url(self, request, proxies):
+        # The select_proxy utility in requests errors out when the provided URL
+        # doesn't have a hostname, like is the case when using a UNIX socket.
+        # Since proxies are an irrelevant notion in the case of UNIX sockets
+        # anyway, we simply return the path URL directly.
+        # See also: https://github.com/docker/docker-py/issues/811
+        return request.path_url
+
     def close(self):
         self.pools.clear()
diff --git a/docker/utils/__init__.py b/docker/utils/__init__.py
index 92e03e9..ccc3819 100644
--- a/docker/utils/__init__.py
+++ b/docker/utils/__init__.py
@@ -1,10 +1,11 @@
 from .utils import (
     compare_version, convert_port_bindings, convert_volume_binds,
     mkbuildcontext, tar, exclude_paths, parse_repository_tag, parse_host,
-    kwargs_from_env, convert_filters, create_host_config,
+    kwargs_from_env, convert_filters, datetime_to_timestamp, create_host_config,
     create_container_config, parse_bytes, ping_registry, parse_env_file,
-    version_lt, version_gte, decode_json_header
+    version_lt, version_gte, decode_json_header, split_command,
+    create_ipam_config, create_ipam_pool, parse_devices, normalize_links,
 ) # flake8: noqa
 
 from .types import Ulimit, LogConfig # flake8: noqa
-from .decorators import check_resource, minimum_version #flake8: noqa
+from .decorators import check_resource, minimum_version, update_headers #flake8: noqa
diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py
index 7d3b01a..7c41a5f 100644
--- a/docker/utils/decorators.py
+++ b/docker/utils/decorators.py
@@ -35,3 +35,14 @@ def minimum_version(version):
             return f(self, *args, **kwargs)
         return wrapper
     return decorator
+
+
+def update_headers(f):
+    def inner(self, *args, **kwargs):
+        if 'HttpHeaders' in self._auth_configs:
+            if 'headers' not in kwargs:
+                kwargs['headers'] = self._auth_configs['HttpHeaders']
+            else:
+                kwargs['headers'].update(self._auth_configs['HttpHeaders'])
+        return f(self, *args, **kwargs)
+    return inner
diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index 89837b7..6fcf037 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -44,6 +44,23 @@ BYTE_UNITS = {
 }
 
 
... 9414 lines suppressed ...

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/python-modules/packages/python-docker.git



More information about the Python-modules-commits mailing list