[Python-modules-commits] [python-cassandra-driver] 01/09: Import python-cassandra-driver_3.4.1.orig.tar.gz
Sandro Tosi
morph at moszumanska.debian.org
Sun Jun 12 22:23:43 UTC 2016
This is an automated email from the git hooks/post-receive script.
morph pushed a commit to branch master
in repository python-cassandra-driver.
commit 35a3c40c4b3bb9baccaca82d184b648363317cc6
Author: Sandro Tosi <morph at debian.org>
Date: Sat Jun 11 21:21:40 2016 +0100
Import python-cassandra-driver_3.4.1.orig.tar.gz
---
MANIFEST.in | 5 +
PKG-INFO | 30 +-
README.rst | 28 +-
cassandra/__init__.py | 217 ++-
cassandra/auth.py | 13 +-
cassandra/buffer.pxd | 58 +
cassandra/{io/__init__.py => bytesio.pxd} | 8 +-
cassandra/bytesio.pyx | 44 +
cassandra/cluster.py | 1350 +++++++++++------
cassandra/{murmur3.c => cmurmur3.c} | 37 +-
cassandra/concurrent.py | 266 ++--
cassandra/connection.py | 474 ++++--
cassandra/cqlengine/__init__.py | 6 +-
cassandra/cqlengine/columns.py | 500 +++----
cassandra/cqlengine/connection.py | 82 +-
cassandra/cqlengine/functions.py | 64 +-
cassandra/cqlengine/management.py | 375 ++---
cassandra/cqlengine/models.py | 309 ++--
cassandra/cqlengine/named.py | 28 +-
cassandra/cqlengine/operators.py | 50 +-
cassandra/cqlengine/query.py | 571 ++++---
cassandra/cqlengine/statements.py | 332 +++--
cassandra/cqlengine/usertype.py | 32 +-
cassandra/cqltypes.py | 292 ++--
cassandra/cython_deps.py | 11 +
cassandra/cython_marshal.pyx | 84 ++
cassandra/cython_utils.pxd | 2 +
cassandra/cython_utils.pyx | 62 +
cassandra/decoder.py | 58 -
cassandra/deserializers.pxd | 43 +
cassandra/deserializers.pyx | 521 +++++++
cassandra/encoder.py | 34 +-
cassandra/io/__init__.py | 2 +-
cassandra/io/asyncorereactor.py | 281 ++--
cassandra/io/eventletreactor.py | 126 +-
cassandra/io/geventreactor.py | 139 +-
cassandra/io/libevreactor.py | 107 +-
cassandra/io/libevwrapper.c | 133 ++
cassandra/io/twistedreactor.py | 88 +-
cassandra/ioutils.pyx | 47 +
cassandra/marshal.py | 8 +-
cassandra/metadata.py | 2060 +++++++++++++++++++-------
cassandra/metrics.py | 2 +-
cassandra/murmur3.py | 113 ++
cassandra/numpyFlags.h | 1 +
cassandra/numpy_parser.pyx | 175 +++
cassandra/obj_parser.pyx | 75 +
cassandra/{io/__init__.py => parsing.pxd} | 18 +-
cassandra/parsing.pyx | 44 +
cassandra/policies.py | 148 +-
cassandra/pool.py | 120 +-
cassandra/protocol.py | 451 ++++--
cassandra/query.py | 341 +++--
cassandra/row_parser.pyx | 38 +
cassandra/tuple.pxd | 41 +
cassandra/{io/__init__.py => type_codes.pxd} | 30 +-
cassandra/type_codes.py | 62 +
cassandra/util.py | 546 ++++---
cassandra_driver.egg-info/PKG-INFO | 30 +-
cassandra_driver.egg-info/SOURCES.txt | 25 +-
cassandra_driver.egg-info/pbr.json | 1 +
cassandra_driver.egg-info/requires.txt | 2 +-
cassandra_driver.egg-info/top_level.txt | 1 +
setup.cfg | 10 +
setup.py | 260 +++-
65 files changed, 8034 insertions(+), 3477 deletions(-)
diff --git a/MANIFEST.in b/MANIFEST.in
index 1825f7b..660db71 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1 +1,6 @@
include setup.py README.rst MANIFEST.in LICENSE ez_setup.py
+include cassandra/cmurmur3.c
+include cassandra/io/libevwrapper.c
+include cassandra/*.pyx
+include cassandra/*.pxd
+include cassandra/*.h
diff --git a/PKG-INFO b/PKG-INFO
index 6571faa..d1b5ebc 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: cassandra-driver
-Version: 2.5.1
+Version: 3.4.1
Summary: Python driver for Cassandra
Home-page: http://github.com/datastax/python-driver
Author: Tyler Hobbs
@@ -12,13 +12,25 @@ Description: DataStax Python Driver for Apache Cassandra
.. image:: https://travis-ci.org/datastax/python-driver.png?branch=master
:target: https://travis-ci.org/datastax/python-driver
- A Python client driver for Apache Cassandra. This driver works exclusively
- with the Cassandra Query Language v3 (CQL3) and Cassandra's native
- protocol. Cassandra versions 1.2 through 2.1 are supported.
+ A modern, `feature-rich <https://github.com/datastax/python-driver#features>`_ and highly-tunable Python client library for Apache Cassandra (1.2+) and DataStax Enterprise (3.1+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3.
- The driver supports Python 2.6, 2.7, 3.3, and 3.4*.
+ The driver supports Python 2.6, 2.7, 3.3, and 3.4.
- * cqlengine component presently supports Python 2.7+
+ Feedback Requested
+ ------------------
+ **Help us focus our efforts!** Provide your input on the `Platform and Runtime Survey <https://docs.google.com/a/datastax.com/forms/d/10wkbKLqmqs91gvhFW5u43y60pg_geZDolVNrxfO5_48/viewform>`_ (we kept it short).
+
+ Features
+ --------
+ * `Synchronous <http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.Session.execute>`_ and `Asynchronous <http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.Session.execute_async>`_ APIs
+ * `Simple, Prepared, and Batch statements <http://datastax.github.io/python-driver/api/cassandra/query.html#cassandra.query.Statement>`_
+ * Asynchronous IO, parallel execution, request pipelining
+ * `Connection pooling <http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.Cluster.get_core_connections_per_host>`_
+ * Automatic node discovery
+ * `Automatic reconnection <http://datastax.github.io/python-driver/api/cassandra/policies.html#reconnecting-to-dead-hosts>`_
+ * Configurable `load balancing <http://datastax.github.io/python-driver/api/cassandra/policies.html#load-balancing>`_ and `retry policies <http://datastax.github.io/python-driver/api/cassandra/policies.html#retrying-failed-operations>`_
+ * `Concurrent execution utilities <http://datastax.github.io/python-driver/api/cassandra/concurrent.html>`_
+ * `Object mapper <http://datastax.github.io/python-driver/object_mapper.html>`_
Installation
------------
@@ -46,6 +58,10 @@ Description: DataStax Python Driver for Apache Cassandra
community) is now maintained as an integral part of this package. Refer to
`documentation here <http://datastax.github.io/python-driver/object_mapper.html>`_.
+ Contributing
+ ------------
+ See `CONTRIBUTING.md <https://github.com/datastax/python-driver/blob/master/CONTRIBUTING.rst>`_.
+
Reporting Problems
------------------
Please report any bugs and make any feature requests on the
@@ -68,7 +84,7 @@ Description: DataStax Python Driver for Apache Cassandra
License
-------
- Copyright 2013-2015 DataStax
+ Copyright 2013-2016 DataStax
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/README.rst b/README.rst
index e976f93..88d50da 100644
--- a/README.rst
+++ b/README.rst
@@ -4,13 +4,25 @@ DataStax Python Driver for Apache Cassandra
.. image:: https://travis-ci.org/datastax/python-driver.png?branch=master
:target: https://travis-ci.org/datastax/python-driver
-A Python client driver for Apache Cassandra. This driver works exclusively
-with the Cassandra Query Language v3 (CQL3) and Cassandra's native
-protocol. Cassandra versions 1.2 through 2.1 are supported.
+A modern, `feature-rich <https://github.com/datastax/python-driver#features>`_ and highly-tunable Python client library for Apache Cassandra (1.2+) and DataStax Enterprise (3.1+) using exclusively Cassandra's binary protocol and Cassandra Query Language v3.
-The driver supports Python 2.6, 2.7, 3.3, and 3.4*.
+The driver supports Python 2.6, 2.7, 3.3, and 3.4.
-* cqlengine component presently supports Python 2.7+
+Feedback Requested
+------------------
+**Help us focus our efforts!** Provide your input on the `Platform and Runtime Survey <https://docs.google.com/a/datastax.com/forms/d/10wkbKLqmqs91gvhFW5u43y60pg_geZDolVNrxfO5_48/viewform>`_ (we kept it short).
+
+Features
+--------
+* `Synchronous <http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.Session.execute>`_ and `Asynchronous <http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.Session.execute_async>`_ APIs
+* `Simple, Prepared, and Batch statements <http://datastax.github.io/python-driver/api/cassandra/query.html#cassandra.query.Statement>`_
+* Asynchronous IO, parallel execution, request pipelining
+* `Connection pooling <http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.Cluster.get_core_connections_per_host>`_
+* Automatic node discovery
+* `Automatic reconnection <http://datastax.github.io/python-driver/api/cassandra/policies.html#reconnecting-to-dead-hosts>`_
+* Configurable `load balancing <http://datastax.github.io/python-driver/api/cassandra/policies.html#load-balancing>`_ and `retry policies <http://datastax.github.io/python-driver/api/cassandra/policies.html#retrying-failed-operations>`_
+* `Concurrent execution utilities <http://datastax.github.io/python-driver/api/cassandra/concurrent.html>`_
+* `Object mapper <http://datastax.github.io/python-driver/object_mapper.html>`_
Installation
------------
@@ -38,6 +50,10 @@ cqlengine (originally developed by Blake Eggleston and Jon Haddad, with contribu
community) is now maintained as an integral part of this package. Refer to
`documentation here <http://datastax.github.io/python-driver/object_mapper.html>`_.
+Contributing
+------------
+See `CONTRIBUTING.md <https://github.com/datastax/python-driver/blob/master/CONTRIBUTING.rst>`_.
+
Reporting Problems
------------------
Please report any bugs and make any feature requests on the
@@ -60,7 +76,7 @@ Features to be Added
License
-------
-Copyright 2013-2015 DataStax
+Copyright 2013-2016 DataStax
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/cassandra/__init__.py b/cassandra/__init__.py
index 2be5700..81172c8 100644
--- a/cassandra/__init__.py
+++ b/cassandra/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2013-2015 DataStax, Inc.
+# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,8 +22,7 @@ class NullHandler(logging.Handler):
logging.getLogger('cassandra').addHandler(NullHandler())
-
-__version_info__ = (2, 5, 1)
+__version_info__ = (3, 4, 1)
__version__ = '.'.join(map(str, __version_info__))
@@ -126,7 +125,90 @@ def consistency_value_to_name(value):
return ConsistencyLevel.value_to_name[value] if value is not None else "Not Set"
-class Unavailable(Exception):
+class SchemaChangeType(object):
+ DROPPED = 'DROPPED'
+ CREATED = 'CREATED'
+ UPDATED = 'UPDATED'
+
+
+class SchemaTargetType(object):
+ KEYSPACE = 'KEYSPACE'
+ TABLE = 'TABLE'
+ TYPE = 'TYPE'
+ FUNCTION = 'FUNCTION'
+ AGGREGATE = 'AGGREGATE'
+
+
+class SignatureDescriptor(object):
+
+ def __init__(self, name, argument_types):
+ self.name = name
+ self.argument_types = argument_types
+
+ @property
+ def signature(self):
+ """
+ function signature string in the form 'name([type0[,type1[...]]])'
+
+ can be used to uniquely identify overloaded function names within a keyspace
+ """
+ return self.format_signature(self.name, self.argument_types)
+
+ @staticmethod
+ def format_signature(name, argument_types):
+ return "%s(%s)" % (name, ','.join(t for t in argument_types))
+
+ def __repr__(self):
+ return "%s(%s, %s)" % (self.__class__.__name__, self.name, self.argument_types)
+
+
+class UserFunctionDescriptor(SignatureDescriptor):
+ """
+ Describes a User function by name and argument signature
+ """
+
+ name = None
+ """
+ name of the function
+ """
+
+ argument_types = None
+ """
+ Ordered list of CQL argument type names comprising the type signature
+ """
+
+
+class UserAggregateDescriptor(SignatureDescriptor):
+ """
+ Describes a User aggregate function by name and argument signature
+ """
+
+ name = None
+ """
+ name of the aggregate
+ """
+
+ argument_types = None
+ """
+ Ordered list of CQL argument type names comprising the type signature
+ """
+
+
+class DriverException(Exception):
+ """
+ Base for all exceptions explicitly raised by the driver.
+ """
+ pass
+
+
+class RequestExecutionException(DriverException):
+ """
+ Base for request execution exceptions returned from the server.
+ """
+ pass
+
+
+class Unavailable(RequestExecutionException):
"""
There were not enough live replicas to satisfy the requested consistency
level, so the coordinator node immediately failed the request without
@@ -152,7 +234,7 @@ class Unavailable(Exception):
'alive_replicas': alive_replicas}))
-class Timeout(Exception):
+class Timeout(RequestExecutionException):
"""
Replicas failed to respond to the coordinator node before timing out.
"""
@@ -221,7 +303,118 @@ class WriteTimeout(Timeout):
self.write_type = write_type
-class AlreadyExists(Exception):
+class CoordinationFailure(RequestExecutionException):
+ """
+ Replicas sent a failure to the coordinator.
+ """
+
+ consistency = None
+ """ The requested :class:`ConsistencyLevel` """
+
+ required_responses = None
+ """ The number of required replica responses """
+
+ received_responses = None
+ """
+ The number of replicas that responded before the coordinator timed out
+ the operation
+ """
+
+ failures = None
+ """
+ The number of replicas that sent a failure message
+ """
+
+ def __init__(self, summary_message, consistency=None, required_responses=None, received_responses=None, failures=None):
+ self.consistency = consistency
+ self.required_responses = required_responses
+ self.received_responses = received_responses
+ self.failures = failures
+ Exception.__init__(self, summary_message + ' info=' +
+ repr({'consistency': consistency_value_to_name(consistency),
+ 'required_responses': required_responses,
+ 'received_responses': received_responses,
+ 'failures': failures}))
+
+
+class ReadFailure(CoordinationFailure):
+ """
+ A subclass of :exc:`CoordinationFailure` for read operations.
+
+ This indicates that the replicas sent a failure message to the coordinator.
+ """
+
+ data_retrieved = None
+ """
+ A boolean indicating whether the requested data was retrieved
+ by the coordinator from any replicas before it timed out the
+ operation
+ """
+
+ def __init__(self, message, data_retrieved=None, **kwargs):
+ CoordinationFailure.__init__(self, message, **kwargs)
+ self.data_retrieved = data_retrieved
+
+
+class WriteFailure(CoordinationFailure):
+ """
+ A subclass of :exc:`CoordinationFailure` for write operations.
+
+ This indicates that the replicas sent a failure message to the coordinator.
+ """
+
+ write_type = None
+ """
+ The type of write operation, enum on :class:`~cassandra.policies.WriteType`
+ """
+
+ def __init__(self, message, write_type=None, **kwargs):
+ CoordinationFailure.__init__(self, message, **kwargs)
+ self.write_type = write_type
+
+
+class FunctionFailure(RequestExecutionException):
+ """
+ User Defined Function failed during execution
+ """
+
+ keyspace = None
+ """
+ Keyspace of the function
+ """
+
+ function = None
+ """
+ Name of the function
+ """
+
+ arg_types = None
+ """
+ List of argument type names of the function
+ """
+
+ def __init__(self, summary_message, keyspace, function, arg_types):
+ self.keyspace = keyspace
+ self.function = function
+ self.arg_types = arg_types
+ Exception.__init__(self, summary_message)
+
+
+class RequestValidationException(DriverException):
+ """
+ Server request validation failed
+ """
+ pass
+
+
+class ConfigurationException(RequestValidationException):
+ """
+ Server indicated request errro due to current configuration
+ """
+ pass
+
+
+class AlreadyExists(ConfigurationException):
"""
An attempt was made to create a keyspace or table that already exists.
"""
@@ -249,7 +442,7 @@ class AlreadyExists(Exception):
self.table = table
-class InvalidRequest(Exception):
+class InvalidRequest(RequestValidationException):
"""
A query was made that was invalid for some reason, such as trying to set
the keyspace for a connection to a nonexistent keyspace.
@@ -257,21 +450,21 @@ class InvalidRequest(Exception):
pass
-class Unauthorized(Exception):
+class Unauthorized(RequestValidationException):
"""
- The current user is not authorized to perfom the requested operation.
+ The current user is not authorized to perform the requested operation.
"""
pass
-class AuthenticationFailed(Exception):
+class AuthenticationFailed(DriverException):
"""
Failed to authenticate.
"""
pass
-class OperationTimedOut(Exception):
+class OperationTimedOut(DriverException):
"""
The operation took longer than the specified (client-side) timeout
to complete. This is not an error generated by Cassandra, only
@@ -295,7 +488,7 @@ class OperationTimedOut(Exception):
Exception.__init__(self, message)
-class UnsupportedOperation(Exception):
+class UnsupportedOperation(DriverException):
"""
An attempt was made to use a feature that is not supported by the
selected protocol version. See :attr:`Cluster.protocol_version`
diff --git a/cassandra/auth.py b/cassandra/auth.py
index 67d302a..b562728 100644
--- a/cassandra/auth.py
+++ b/cassandra/auth.py
@@ -1,4 +1,4 @@
-# Copyright 2013-2015 DataStax, Inc.
+# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -62,6 +62,9 @@ class Authenticator(object):
.. versionadded:: 2.0.0
"""
+ server_authenticator_class = None
+ """ Set during the connection AUTHENTICATE phase """
+
def initial_response(self):
"""
Returns an message to send to the server to initiate the SASL handshake.
@@ -139,8 +142,7 @@ class SaslAuthProvider(AuthProvider):
from cassandra.cluster import Cluster
from cassandra.auth import SaslAuthProvider
- sasl_kwargs = {'host': 'localhost',
- 'service': 'dse',
+ sasl_kwargs = {'service': 'something',
'mechanism': 'GSSAPI',
'qops': 'auth'.split(',')}
auth_provider = SaslAuthProvider(**sasl_kwargs)
@@ -152,10 +154,13 @@ class SaslAuthProvider(AuthProvider):
def __init__(self, **sasl_kwargs):
if SASLClient is None:
raise ImportError('The puresasl library has not been installed')
+ if 'host' in sasl_kwargs:
+ raise ValueError("kwargs should not contain 'host' since it is passed dynamically to new_authenticator")
self.sasl_kwargs = sasl_kwargs
def new_authenticator(self, host):
- return SaslAuthenticator(**self.sasl_kwargs)
+ return SaslAuthenticator(host, **self.sasl_kwargs)
+
class SaslAuthenticator(Authenticator):
"""
diff --git a/cassandra/buffer.pxd b/cassandra/buffer.pxd
new file mode 100644
index 0000000..f9976f0
--- /dev/null
+++ b/cassandra/buffer.pxd
@@ -0,0 +1,58 @@
+# Copyright 2013-2016 DataStax, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Simple buffer data structure that provides a view on existing memory
+(e.g. from a bytes object). This memory must stay alive while the
+buffer is in use.
+"""
+
+from cpython.bytes cimport PyBytes_AS_STRING
+ # char* PyBytes_AS_STRING(object string)
+ # Macro form of PyBytes_AsString() but without error
+ # checking. Only string objects are supported; no Unicode objects
+ # should be passed.
+
+
+cdef struct Buffer:
+ char *ptr
+ Py_ssize_t size
+
+
+cdef inline bytes to_bytes(Buffer *buf):
+ return buf.ptr[:buf.size]
+
+cdef inline char *buf_ptr(Buffer *buf):
+ return buf.ptr
+
+cdef inline char *buf_read(Buffer *buf, Py_ssize_t size) except NULL:
+ if size > buf.size:
+ raise IndexError("Requested more than length of buffer")
+ return buf.ptr
+
+cdef inline int slice_buffer(Buffer *buf, Buffer *out,
+ Py_ssize_t start, Py_ssize_t size) except -1:
+ if size < 0:
+ raise ValueError("Length must be positive")
+
+ if start + size > buf.size:
+ raise IndexError("Buffer slice out of bounds")
+
+ out.ptr = buf.ptr + start
+ out.size = size
+ return 0
+
+cdef inline void from_ptr_and_size(char *ptr, Py_ssize_t size, Buffer *out):
+ out.ptr = ptr
+ out.size = size
diff --git a/cassandra/io/__init__.py b/cassandra/bytesio.pxd
similarity index 72%
copy from cassandra/io/__init__.py
copy to cassandra/bytesio.pxd
index e4b89e5..a0bb083 100644
--- a/cassandra/io/__init__.py
+++ b/cassandra/bytesio.pxd
@@ -1,4 +1,4 @@
-# Copyright 2013-2015 DataStax, Inc.
+# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,3 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+cdef class BytesIOReader:
+ cdef bytes buf
+ cdef char *buf_ptr
+ cdef Py_ssize_t pos
+ cdef Py_ssize_t size
+ cdef char *read(self, Py_ssize_t n = ?) except NULL
diff --git a/cassandra/bytesio.pyx b/cassandra/bytesio.pyx
new file mode 100644
index 0000000..3334697
--- /dev/null
+++ b/cassandra/bytesio.pyx
@@ -0,0 +1,44 @@
+# Copyright 2013-2016 DataStax, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cdef class BytesIOReader:
+ """
+ This class provides efficient support for reading bytes from a 'bytes' buffer,
+ by returning char * values directly without allocating intermediate objects.
+ """
+
+ def __init__(self, bytes buf):
+ self.buf = buf
+ self.size = len(buf)
+ self.buf_ptr = self.buf
+
+ cdef char *read(self, Py_ssize_t n = -1) except NULL:
+ """Read at most size bytes from the file
+ (less if the read hits EOF before obtaining size bytes).
+
+ If the size argument is negative or omitted, read all data until EOF
+ is reached. The bytes are returned as a string object. An empty
+ string is returned when EOF is encountered immediately.
+ """
+ cdef Py_ssize_t newpos = self.pos + n
+ if n < 0:
+ newpos = self.size
+ elif newpos > self.size:
+ # Raise an error here, as we do not want the caller to consume past the
+ # end of the buffer
+ raise EOFError("Cannot read past the end of the file")
+
+ cdef char *res = self.buf_ptr + self.pos
+ self.pos = newpos
+ return res
diff --git a/cassandra/cluster.py b/cassandra/cluster.py
index c7b2be7..d311988 100644
--- a/cassandra/cluster.py
+++ b/cassandra/cluster.py
@@ -1,4 +1,4 @@
-# Copyright 2013-2015 DataStax, Inc.
+# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ This module houses the main classes you will interact with,
from __future__ import absolute_import
import atexit
-from collections import defaultdict
+from collections import defaultdict, Mapping
from concurrent.futures import ThreadPoolExecutor
import logging
from random import random
@@ -40,13 +40,13 @@ except ImportError:
from cassandra.util import WeakSet # NOQA
from functools import partial, wraps
-from itertools import groupby
+from itertools import groupby, count
from cassandra import (ConsistencyLevel, AuthenticationFailed,
- InvalidRequest, OperationTimedOut,
- UnsupportedOperation, Unauthorized)
+ OperationTimedOut, UnsupportedOperation,
+ SchemaTargetType, DriverException)
from cassandra.connection import (ConnectionException, ConnectionShutdown,
- ConnectionHeartbeat)
+ ConnectionHeartbeat, ProtocolVersionUnsupported)
from cassandra.cqltypes import UserType
from cassandra.encoder import Encoder
from cassandra.protocol import (QueryMessage, ResultMessage,
@@ -59,17 +59,19 @@ from cassandra.protocol import (QueryMessage, ResultMessage,
IsBootstrappingErrorMessage,
BatchMessage, RESULT_KIND_PREPARED,
RESULT_KIND_SET_KEYSPACE, RESULT_KIND_ROWS,
- RESULT_KIND_SCHEMA_CHANGE)
-from cassandra.metadata import Metadata, protect_name
-from cassandra.policies import (RoundRobinPolicy, SimpleConvictionPolicy,
+ RESULT_KIND_SCHEMA_CHANGE, MIN_SUPPORTED_VERSION,
+ ProtocolHandler)
+from cassandra.metadata import Metadata, protect_name, murmur3
+from cassandra.policies import (TokenAwarePolicy, DCAwareRoundRobinPolicy, SimpleConvictionPolicy,
ExponentialReconnectionPolicy, HostDistance,
- RetryPolicy)
+ RetryPolicy, IdentityTranslator)
from cassandra.pool import (Host, _ReconnectionHandler, _HostReconnectionHandler,
HostConnectionPool, HostConnection,
NoConnectionsAvailable)
from cassandra.query import (SimpleStatement, PreparedStatement, BoundStatement,
- BatchStatement, bind_params, QueryTrace, Statement,
- named_tuple_factory, dict_factory, FETCH_SIZE_UNSET)
+ BatchStatement, bind_params, QueryTrace,
+ named_tuple_factory, dict_factory, tuple_factory, FETCH_SIZE_UNSET)
+
def _is_eventlet_monkey_patched():
if 'eventlet.patcher' not in sys.modules:
@@ -77,10 +79,17 @@ def _is_eventlet_monkey_patched():
import eventlet.patcher
return eventlet.patcher.is_monkey_patched('socket')
+
+def _is_gevent_monkey_patched():
+ if 'gevent.monkey' not in sys.modules:
+ return False
+ import gevent.socket
+ return socket.socket is gevent.socket.socket
+
# default to gevent when we are monkey patched with gevent, eventlet when
# monkey patched with eventlet, otherwise if libev is available, use that as
# the default because it's fastest. Otherwise, use asyncore.
-if 'gevent.monkey' in sys.modules:
+if _is_gevent_monkey_patched():
from cassandra.io.geventreactor import GeventConnection as DefaultConnection
elif _is_eventlet_monkey_patched():
from cassandra.io.eventletreactor import EventletConnection as DefaultConnection
@@ -156,10 +165,36 @@ def run_in_executor(f):
return new_f
-def _shutdown_cluster(cluster):
- if cluster and not cluster.is_shutdown:
+_clusters_for_shutdown = set()
+
+
+def _register_cluster_shutdown(cluster):
+ _clusters_for_shutdown.add(cluster)
+
+
+def _discard_cluster_shutdown(cluster):
+ _clusters_for_shutdown.discard(cluster)
+
+
+def _shutdown_clusters():
+ clusters = _clusters_for_shutdown.copy() # copy because shutdown modifies the global set "discard"
+ for cluster in clusters:
cluster.shutdown()
+atexit.register(_shutdown_clusters)
+
+
+# murmur3 implementation required for TokenAware is only available for CPython
+import platform
+if platform.python_implementation() == 'CPython':
+ def default_lbp_factory():
+ if murmur3 is not None:
+ return TokenAwarePolicy(DCAwareRoundRobinPolicy())
+ return DCAwareRoundRobinPolicy()
+else:
+ def default_lbp_factory():
+ return DCAwareRoundRobinPolicy()
+
class Cluster(object):
"""
@@ -176,6 +211,8 @@ class Cluster(object):
>>> ...
>>> cluster.shutdown()
+ ``Cluster`` and ``Session`` also provide context management functions
+ which implicitly handle shutdown when leaving scope.
"""
contact_points = ['127.0.0.1']
@@ -185,9 +222,9 @@ class Cluster(object):
Defaults to loopback interface.
Note: When using :class:`.DCAwareLoadBalancingPolicy` with no explicit
- local_dc set, the DC is chosen from an arbitrary host in contact_points.
- In this case, contact_points should contain only nodes from a single,
- local DC.
+ local_dc set (as is the default), the DC is chosen from an arbitrary
+ host in contact_points. In this case, contact_points should contain
+ only nodes from a single, local DC.
"""
port = 9042
@@ -202,9 +239,14 @@ class Cluster(object):
server will be automatically used.
"""
- protocol_version = 2
+ protocol_version = 4
"""
- The version of the native protocol to use.
+ The maximum version of the native protocol to use.
+
+ The driver will automatically downgrade version based on a negotiation with
+ the server, but it is most efficient to set this to the maximum supported
+ by your version of Cassandra. Setting this will also prevent conflicting
+ versions negotiated if your cluster is upgraded.
Version 2 of the native protocol adds support for lightweight transactions,
batch operations, and automatic query paging. The v2 protocol is
@@ -215,6 +257,10 @@ class Cluster(object):
serial consistency levels for :class:`~.BatchStatement`, and an
improved connection pool.
+ Version 4 of the native protocol adds a number of new types, server warnings,
+ new failure messages, and custom payloads. Details in the
+ `project docs <https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v4.spec>`_
+
The following table describes the native protocol versions that
are supported by each version of Cassandra:
@@ -227,6 +273,8 @@ class Cluster(object):
+-------------------+-------------------+
| 2.1 | 1, 2, 3 |
+-------------------+-------------------+
+ | 2.2 | 1, 2, 3, 4 |
+ +-------------------+-------------------+
"""
compression = True
@@ -281,7 +329,16 @@ class Cluster(object):
load_balancing_policy = None
"""
An instance of :class:`.policies.LoadBalancingPolicy` or
- one of its subclasses. Defaults to :class:`~.RoundRobinPolicy`.
+ one of its subclasses.
+
+ .. versionchanged:: 2.6.0
+
+ Defaults to :class:`~.TokenAwarePolicy` (:class:`~.DCAwareRoundRobinPolicy`).
+ when using CPython (where the murmur3 extension is available). :class:`~.DCAwareRoundRobinPolicy`
+ otherwise. Default local DC will be chosen from contact points.
+
+ **Please see** :class:`~.DCAwareRoundRobinPolicy` **for a discussion on default behavior with respect to
+ DC locality and remote nodes.**
"""
reconnection_policy = ExponentialReconnectionPolicy(1.0, 600.0)
@@ -305,12 +362,20 @@ class Cluster(object):
:class:`.policies.SimpleConvictionPolicy`.
"""
+ address_translator = IdentityTranslator()
+ """
+ :class:`.policies.AddressTranslator` instance to be used in translating server node addresses
+ to driver connection addresses.
+ """
+
connect_to_remote_hosts = True
"""
If left as :const:`True`, hosts that are considered :attr:`~.HostDistance.REMOTE`
by the :attr:`~.Cluster.load_balancing_policy` will have a connection
opened to them. Otherwise, they will not have a connection opened to them.
+ Note that the default load balancing policy ignores remote hosts by default.
+
.. versionadded:: 2.1.0
"""
@@ -336,12 +401,24 @@ class Cluster(object):
a string pointing to the location of the CA certs file), and you probably
want to specify ``ssl_version`` as ``ssl.PROTOCOL_TLSv1`` to match
Cassandra's default protocol.
+
+ .. versionchanged:: 3.3.0
+
+ In addition to ``wrap_socket`` kwargs, clients may also specify ``'check_hostname': True`` to verify the cert hostname
+ as outlined in RFC 2818 and RFC 6125. Note that this requires the certificate to be transferred, so
+ should almost always require the option ``'cert_reqs': ssl.CERT_REQUIRED``. Note also that this functionality was not built into
+ Python standard library until (2.7.9, 3.2). To enable this mechanism in earlier versions, patch ``ssl.match_hostname``
+ with a custom or `back-ported function <https://pypi.python.org/pypi/backports.ssl_match_hostname>`_.
"""
sockopts = None
"""
An optional list of tuples which will be used as arguments to
``socket.setsockopt()`` for all created sockets.
+
+ Note: some drivers find setting TCPNODELAY beneficial in the context of
+ their execution model. It was not found generally beneficial for this driver.
+ To try with your own workload, set ``sockopts = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
"""
max_schema_agreement_wait = 10
@@ -363,18 +440,17 @@ class Cluster(object):
* :class:`cassandra.io.asyncorereactor.AsyncoreConnection`
* :class:`cassandra.io.libevreactor.LibevConnection`
- * :class:`cassandra.io.geventreactor.GeventConnection` (requires monkey-patching)
+ * :class:`cassandra.io.eventletreactor.EventletConnection` (requires monkey-patching - see doc for details)
+ * :class:`cassandra.io.geventreactor.GeventConnection` (requires monkey-patching - see doc for details)
* :class:`cassandra.io.twistedreactor.TwistedConnection`
By default, ``AsyncoreConnection`` will be used, which uses
- the ``asyncore`` module in the Python standard library. The
- performance is slightly worse than with ``libev``, but it is
- supported on a wider range of systems.
+ the ``asyncore`` module in the Python standard library.
If ``libev`` is installed, ``LibevConnection`` will be used instead.
- If gevent monkey-patching of the standard library is detected,
- GeventConnection will be used automatically.
+ If ``gevent`` or ``eventlet`` monkey-patching is detected, the corresponding
+ connection class will be used automatically.
"""
control_connection_timeout = 2.0
@@ -420,12 +496,80 @@ class Cluster(object):
Setting this to zero will execute refreshes immediately.
- Setting this negative will disable node refreshes in response to push events
- (refreshes will still occur in response to new nodes observed on "UP" events).
+ Setting this negative will disable node refreshes in response to push events.
See :attr:`.schema_event_refresh_window` for discussion of rationale
"""
+ status_event_refresh_window = 2
+ """
+ Window, in seconds, within which the driver will start the reconnect after
+ receiving a status_change event.
+
+ Setting this to zero will connect immediately.
+
+ This is primarily used to avoid 'thundering herd' in deployments with large fanout from cluster to clients.
+ When nodes come up, clients attempt to reprepare prepared statements (depending on :attr:`.reprepare_on_up`), and
+ establish connection pools. This can cause a rush of connections and queries if not mitigated with this factor.
+ """
+
+ prepare_on_all_hosts = True
+ """
+ Specifies whether statements should be prepared on all hosts, or just one.
+
+ This can reasonably be disabled on long-running applications with numerous clients preparing statements on startup,
+ where a randomized initial condition of the load balancing policy can be expected to distribute prepares from
+ different clients across the cluster.
+ """
+
+ reprepare_on_up = True
+ """
+ Specifies whether all known prepared statements should be prepared on a node when it comes up.
+
+ May be used to avoid overwhelming a node on return, or if it is supposed that the node was only marked down due to
+ network. If statements are not reprepared, they are prepared on the first execution, causing
+ an extra roundtrip for one or more client requests.
+ """
+
+ connect_timeout = 5
+ """
+ Timeout, in seconds, for creating new connections.
+
+ This timeout covers the entire connection negotiation, including TCP
+ establishment, options passing, and authentication.
+ """
+
+ @property
+ def schema_metadata_enabled(self):
+ """
+ Flag indicating whether internal schema metadata is updated.
+
+ When disabled, the driver does not populate Cluster.metadata.keyspaces on connect, or on schema change events. This
+ can be used to speed initial connection, and reduce load on client and server during operation. Turning this off
+ gives away token aware request routing, and programmatic inspection of the metadata model.
+ """
+ return self.control_connection._schema_meta_enabled
+
+ @schema_metadata_enabled.setter
+ def schema_metadata_enabled(self, enabled):
+ self.control_connection._schema_meta_enabled = bool(enabled)
... 16802 lines suppressed ...
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/python-modules/packages/python-cassandra-driver.git
More information about the Python-modules-commits
mailing list