[Python-modules-commits] [python-fakeredis] 01/03: import python-fakeredis_0.7.0.orig.tar.gz
Ondřej Nový
onovy at moszumanska.debian.org
Wed Jun 22 20:33:05 UTC 2016
This is an automated email from the git hooks/post-receive script.
onovy pushed a commit to branch master
in repository python-fakeredis.
commit 96d29762d2ee0072f3df7e70727bd4975a2a70a4
Author: Ondřej Nový <onovy at debian.org>
Date: Wed Jun 22 22:19:16 2016 +0200
import python-fakeredis_0.7.0.orig.tar.gz
---
.gitignore | 6 +
.travis.yml | 22 +
CONTRIBUTING.rst | 17 +
COPYING | 24 +
MANIFEST.in | 2 +
README.rst | 270 ++++++
fakeredis.py | 1866 ++++++++++++++++++++++++++++++++++++
requirements-26.txt | 2 +
requirements-dev.txt | 4 +
requirements.txt | 2 +
scripts/supported | 73 ++
setup.cfg | 2 +
setup.py | 29 +
test_fakeredis.py | 2599 ++++++++++++++++++++++++++++++++++++++++++++++++++
tox.ini | 13 +
15 files changed, 4931 insertions(+)
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..c1bdfc9
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,6 @@
+.commands.json
+fakeredis.egg-info
+dump.rdb
+extras/*
+.tox
+*.pyc
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..ef9df07
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,22 @@
+language: python
+python:
+ - "2.6"
+ - "2.7"
+ - "3.3"
+ - "3.4"
+sudo: false
+cache:
+ - pip
+services:
+ - redis-server
+install:
+ - if [[ $TRAVIS_PYTHON_VERSION == '2.6' ]]; then pip install -r requirements-26.txt; fi
+ - pip install -r requirements.txt
+ - pip install coverage python-coveralls
+script:
+ - coverage erase
+ - coverage run --source fakeredis.py test_fakeredis.py
+notifications:
+ email:
+ - js at jamesls.com
+after_success: coveralls
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
new file mode 100644
index 0000000..d03efa9
--- /dev/null
+++ b/CONTRIBUTING.rst
@@ -0,0 +1,17 @@
+============
+Contributing
+============
+
+Contributions are welcome. To ensure that your contributions are accepted
+please follow these guidelines.
+
+* Follow pep8
+* If you are adding docstrings, follow pep257
+* If you are adding new functionality or fixing a bug, please add tests.
+* If you are making a large change, consider filing an issue on github
+ first to see if there are any objections to the proposed changes.
+
+In general, new features or bug fixes **will not be merged unless they
+have tests.** This is not only to ensure the correctness of
+the code, but to also encourage others to expirement without wondering
+whether or not they are breaking existing code.
diff --git a/COPYING b/COPYING
new file mode 100644
index 0000000..6723b13
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,24 @@
+Copyright (c) 2011 James Saryerwinnie
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..e72662c
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,2 @@
+include COPYING
+include README.rst
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..ec298b1
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,270 @@
+fakeredis: A fake version of a redis-py
+=======================================
+
+.. image:: https://secure.travis-ci.org/jamesls/fakeredis.png?branch=master
+ :target: http://travis-ci.org/jamesls/fakeredis
+
+.. image:: https://pypip.in/version/fakeredis/badge.svg
+ :target: https://pypi.python.org/pypi/fakeredis
+
+.. image:: https://pypip.in/py_versions/fakeredis/badge.svg
+ :target: https://pypi.python.org/pypi/fakeredis/
+
+.. image:: https://pypip.in/license/fakeredis/badge.svg
+ :target: https://pypi.python.org/pypi/fakeredis/
+
+.. image:: https://coveralls.io/repos/jamesls/fakeredis/badge.png?branch=master
+ :target: https://coveralls.io/r/jamesls/fakeredis
+
+
+fakeredis is a pure python implementation of the redis-py python client
+that simulates talking to a redis server. This was created for a single
+purpose: **to write unittests**. Setting up redis is not hard, but
+many times you want to write unittests that do not talk to an external server
+(such as redis). This module now allows tests to simply use this
+module as a reasonable substitute for redis.
+
+
+How to Use
+==========
+
+The intent is for fakeredis to act as though you're talking to a real
+redis server. It does this by storing state in the fakeredis module.
+For example:
+
+.. code-block:: python
+
+ >>> import fakeredis
+ >>> r = fakeredis.FakeStrictRedis()
+ >>> r.set('foo', 'bar')
+ True
+ >>> r.get('foo')
+ 'bar'
+ >>> r.lpush('bar', 1)
+ 1
+ >>> r.lpush('bar', 2)
+ 2
+ >>> r.lrange('bar', 0, -1)
+ [2, 1]
+
+By storing state in the fakeredis module, instances can share
+data:
+
+.. code-block:: python
+
+ >>> import fakeredis
+ >>> r1 = fakeredis.FakeStrictRedis()
+ >>> r1.set('foo', 'bar')
+ True
+ >>> r2 = fakeredis.FakeStrictRedis()
+ >>> r2.get('foo')
+ 'bar'
+ >>> r2.set('bar', 'baz')
+ True
+ >>> r1.get('bar')
+ 'baz'
+ >>> r2.get('bar')
+ 'baz'
+
+Because fakeredis stores state at the module level, if you
+want to ensure that you have a clean slate for every unit
+test you run, be sure to call `r.flushall()` in your
+``tearDown`` method. For example::
+
+ def setUp(self):
+ # Setup fake redis for testing.
+ self.r = fakeredis.FakeStrictRedis()
+
+ def tearDown(self):
+ # Clear data in fakeredis.
+ self.r.flushall()
+
+
+Fakeredis implements the same interface as `redis-py`_, the
+popular redis client for python, and models the responses
+of redis 2.6.
+
+Unimplemented Commands
+======================
+
+All of the redis commands are implemented in fakeredis with
+these exceptions:
+
+
+sorted_set
+----------
+
+ * zscan
+
+
+hash
+----
+
+ * hstrlen
+
+
+string
+------
+
+ * bitop
+ * bitpos
+
+
+geo
+---
+
+ * geoadd
+ * geopos
+ * georadius
+ * geohash
+ * georadiusbymember
+ * geodist
+
+
+generic
+-------
+
+ * restore
+ * dump
+ * pexpireat
+ * pexpire
+ * migrate
+ * object
+ * wait
+
+
+server
+------
+
+ * client list
+ * lastsave
+ * slowlog
+ * debug object
+ * shutdown
+ * debug segfault
+ * command count
+ * monitor
+ * client kill
+ * cluster slots
+ * role
+ * config resetstat
+ * time
+ * config get
+ * config set
+ * save
+ * client setname
+ * command getkeys
+ * config rewrite
+ * sync
+ * client getname
+ * bgrewriteaof
+ * slaveof
+ * info
+ * client pause
+ * bgsave
+ * command
+ * dbsize
+ * command info
+
+
+
+cluster
+-------
+
+ * cluster getkeysinslot
+ * cluster info
+ * readwrite
+ * cluster slots
+ * cluster keyslot
+ * cluster addslots
+ * readonly
+ * cluster saveconfig
+ * cluster forget
+ * cluster meet
+ * cluster slaves
+ * cluster nodes
+ * cluster countkeysinslot
+ * cluster setslot
+ * cluster count-failure-reports
+ * cluster reset
+ * cluster failover
+ * cluster set-config-epoch
+ * cluster delslots
+ * cluster replicate
+
+
+connection
+----------
+
+ * echo
+ * select
+ * quit
+ * auth
+
+
+scripting
+---------
+
+ * script flush
+ * script kill
+ * script load
+ * evalsha
+ * eval
+ * script exists
+
+
+Contributing
+============
+
+Contributions are welcome. Please see the `contributing guide`_ for
+more details.
+
+
+Running the Tests
+=================
+
+To ensure parity with the real redis, there are a set of integration tests
+that mirror the unittests. For every unittest that is written, the same
+test is run against a real redis instance using a real redis-py client
+instance. In order to run these tests you must have a redis server running
+on localhost, port 6379 (the default settings). The integration tests use
+db=10 in order to minimize collisions with an existing redis instance.
+
+
+To run all the tests, install the requirements file::
+
+ pip install -r requirements.txt
+
+If you just want to run the unittests::
+
+ nosetests test_fakeredis.py:TestFakeStrictRedis test_fakeredis.py:TestFakeRedis
+
+Because this module is attempting to provide the same interface as `redis-py`_,
+the python bindings to redis, a reasonable way to test this to to take each
+unittest and run it against a real redis server. fakeredis and the real redis
+server should give the same result. This ensures parity between the two. You
+can run these "integration" tests like this::
+
+ nosetests test_fakeredis.py:TestRealStrictRedis test_fakeredis.py:TestRealRedis
+
+In terms of implementation, ``TestRealRedis`` is a subclass of
+``TestFakeRedis`` that overrides a factory method to create
+an instance of ``redis.Redis`` (an actual python client for redis)
+instead of ``fakeredis.FakeStrictRedis``.
+
+To run both the unittests and the "integration" tests, run::
+
+ nosetests
+
+If redis is not running and you try to run tests against a real redis server,
+these tests will have a result of 'S' for skipped.
+
+There are some tests that test redis blocking operations that are somewhat
+slow. If you want to skip these tests during day to day development,
+they have all been tagged as 'slow' so you can skip them by running::
+
+ nosetests -a '!slow'
+
+
+.. _redis-py: http://redis-py.readthedocs.org/en/latest/index.html
+.. _contributing guide: https://github.com/jamesls/fakeredis/blob/master/CONTRIBUTING.rst
diff --git a/fakeredis.py b/fakeredis.py
new file mode 100644
index 0000000..97b0ca9
--- /dev/null
+++ b/fakeredis.py
@@ -0,0 +1,1866 @@
+import random
+import warnings
+import copy
+from ctypes import CDLL, POINTER, c_double, c_char_p, pointer
+from ctypes.util import find_library
+import fnmatch
+from collections import MutableMapping
+from datetime import datetime, timedelta
+import operator
+import sys
+import time
+import re
+
+import redis
+from redis.exceptions import ResponseError
+import redis.client
+
+try:
+ # Python 2.6, 2.7
+ from Queue import Queue, Empty
+except:
+ # Python 3
+ from queue import Queue, Empty
+
+PY2 = sys.version_info[0] == 2
+
+if not PY2:
+ long = int
+
+
+__version__ = '0.7.0'
+
+
+if sys.version_info[0] == 2:
+ text_type = unicode
+ string_types = (str, unicode)
+ redis_string_types = (str, unicode, bytes)
+ byte_to_int = ord
+ int_to_byte = chr
+
+ def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
+ if x is None:
+ return None
+ if isinstance(x, (bytes, bytearray, buffer)) or hasattr(x, '__str__'):
+ return bytes(x)
+ if isinstance(x, unicode):
+ return x.encode(charset, errors)
+ if hasattr(x, '__unicode__'):
+ return unicode(x).encode(charset, errors)
+ raise TypeError('expected bytes or unicode, not ' + type(x).__name__)
+
+ def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
+ if x is None or isinstance(x, str):
+ return x
+ return x.encode(charset, errors)
+
+ iterkeys = lambda d: d.iterkeys()
+ itervalues = lambda d: d.itervalues()
+ iteritems = lambda d: d.iteritems()
+ from urlparse import urlparse
+else:
+ text_type = str
+ string_types = (str,)
+ redis_string_types = (bytes, str)
+
+ def byte_to_int(b):
+ if isinstance(b, int):
+ return b
+ raise TypeError('an integer is required')
+
+ int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
+
+ def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
+ if x is None:
+ return None
+ if isinstance(x, (bytes, bytearray, memoryview)):
+ return bytes(x)
+ if isinstance(x, str):
+ return x.encode(charset, errors)
+ if hasattr(x, '__str__'):
+ return str(x).encode(charset, errors)
+ raise TypeError('expected bytes or str, not ' + type(x).__name__)
+
+ def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
+ if x is None or isinstance(x, str):
+ return x
+ return x.decode(charset, errors)
+
+ iterkeys = lambda d: iter(d.keys())
+ itervalues = lambda d: iter(d.values())
+ iteritems = lambda d: iter(d.items())
+ from urllib.parse import urlparse
+
+
+DATABASES = {}
+
+_libc_library = find_library('c') or find_library('msvcrt')
+
+if not _libc_library:
+ raise ImportError('fakeredis: unable to find libc or equivalent')
+
+_libc = CDLL(_libc_library)
+_libc.strtod.restype = c_double
+_libc.strtod.argtypes = [c_char_p, POINTER(c_char_p)]
+_strtod = _libc.strtod
+
+
+def timedelta_total_seconds(delta):
+ return delta.days * 86400 + delta.seconds + delta.microseconds / 1E6
+
+
+class _StrKeyDict(MutableMapping):
+ def __init__(self, *args, **kwargs):
+ self._dict = dict(*args, **kwargs)
+ self._ex_keys = {}
+
+ def __getitem__(self, key):
+ self._update_expired_keys()
+ return self._dict[to_bytes(key)]
+
+ def __setitem__(self, key, value):
+ self._dict[to_bytes(key)] = value
+
+ def __delitem__(self, key):
+ del self._dict[to_bytes(key)]
+
+ def __len__(self):
+ return len(self._dict)
+
+ def __iter__(self):
+ return iter(self._dict)
+
+ def expire(self, key, timestamp):
+ self._ex_keys[key] = timestamp
+
+ def expiring(self, key):
+ if key not in self._ex_keys:
+ return None
+ return self._ex_keys[key]
+
+ def _update_expired_keys(self):
+ now = datetime.now()
+ deleted = []
+ for key in self._ex_keys:
+ if now > self._ex_keys[key]:
+ deleted.append(key)
+
+ for key in deleted:
+ del self._ex_keys[key]
+ del self[key]
+
+ def copy(self):
+ new_copy = _StrKeyDict()
+ for key, value in self._dict.items():
+ new_copy[key] = value
+ return new_copy
+
+ def clear(self):
+ super(_StrKeyDict, self).clear()
+ self._ex_keys.clear()
+
+ def to_bare_dict(self):
+ return copy.deepcopy(self._dict)
+
+
+class _ZSet(_StrKeyDict):
+ redis_type = b'zset'
+
+
+class _Hash(_StrKeyDict):
+ redis_type = b'hash'
+
+
+class FakeStrictRedis(object):
+ @classmethod
+ def from_url(cls, url, db=None, **kwargs):
+ url = urlparse(url)
+ if db is None:
+ try:
+ db = int(url.path.replace('/', ''))
+ except (AttributeError, ValueError):
+ db = 0
+ return cls(db=db)
+
+ def __init__(self, db=0, charset='utf-8', errors='strict', **kwargs):
+ if db not in DATABASES:
+ DATABASES[db] = _StrKeyDict()
+ self._db = DATABASES[db]
+ self._db_num = db
+ self._encoding = charset
+ self._encoding_errors = errors
+ self._pubsubs = []
+
+ def flushdb(self):
+ DATABASES[self._db_num].clear()
+ return True
+
+ def flushall(self):
+ for db in DATABASES:
+ DATABASES[db].clear()
+
+ del self._pubsubs[:]
+
+ # Basic key commands
+ def append(self, key, value):
+ self._db.setdefault(key, b'')
+ self._db[key] += to_bytes(value)
+ return len(self._db[key])
+
+ def bitcount(self, name, start=0, end=-1):
+ if end == -1:
+ end = None
+ else:
+ end += 1
+ try:
+ s = self._db[name][start:end]
+ return sum([bin(byte_to_int(l)).count('1') for l in s])
+ except KeyError:
+ return 0
+
+ def decr(self, name, amount=1):
+ try:
+ self._db[name] = int(self._db.get(name, '0')) - amount
+ except (TypeError, ValueError):
+ raise redis.ResponseError("value is not an integer or out of "
+ "range.")
+ return self._db[name]
+
+ def exists(self, name):
+ return name in self._db
+ __contains__ = exists
+
+ def expire(self, name, time):
+ if isinstance(time, timedelta):
+ time = int(timedelta_total_seconds(time))
+ if self.exists(name):
+ self._db.expire(name, datetime.now() + timedelta(seconds=time))
+ return True
+ else:
+ return False
+
+ def expireat(self, name, when):
+ if not isinstance(when, datetime):
+ when = datetime.fromtimestamp(when)
+ if self.exists(name):
+ self._db.expire(name, when)
+ return True
+ else:
+ return False
+
+ def echo(self, value):
+ if isinstance(value, text_type):
+ return value.encode('utf-8')
+ return value
+
+ def get(self, name):
+ value = self._db.get(name)
+ if isinstance(value, _StrKeyDict):
+ raise redis.ResponseError("WRONGTYPE Operation against a key "
+ "holding the wrong kind of value")
+ if value is not None:
+ return to_bytes(value)
+
+ def __getitem__(self, name):
+ return self._db[name]
+
+ def getbit(self, name, offset):
+ """Returns a boolean indicating the value of ``offset`` in ``name``"""
+ val = self._db.get(name, '\x00')
+ byte = offset // 8
+ remaining = offset % 8
+ actual_bitoffset = 7 - remaining
+ try:
+ actual_val = byte_to_int(val[byte])
+ except IndexError:
+ return 0
+ return 1 if (1 << actual_bitoffset) & actual_val else 0
+
+ def getset(self, name, value):
+ """
+ Set the value at key ``name`` to ``value`` if key doesn't exist
+ Return the value at key ``name`` atomically
+ """
+ val = self._db.get(name)
+ self._db[name] = value
+ return val
+
+ def incr(self, name, amount=1):
+ """
+ Increments the value of ``key`` by ``amount``. If no key exists,
+ the value will be initialized as ``amount``
+ """
+ try:
+ if not isinstance(amount, int):
+ raise redis.ResponseError("value is not an integer or out "
+ "of range.")
+ self._db[name] = to_bytes(int(self._db.get(name, '0')) + amount)
+ except (TypeError, ValueError):
+ raise redis.ResponseError("value is not an integer or out of "
+ "range.")
+ return int(self._db[name])
+
+ def incrby(self, name, amount=1):
+ """
+ Alias for command ``incr``
+ """
+ return self.incr(name, amount)
+
+ def incrbyfloat(self, name, amount=1.0):
+ try:
+ self._db[name] = float(self._db.get(name, '0')) + amount
+ except (TypeError, ValueError):
+ raise redis.ResponseError("value is not a valid float.")
+ return self._db[name]
+
+ def keys(self, pattern=None):
+ return [key for key in self._db
+ if not key or not pattern or
+ fnmatch.fnmatch(to_native(key), to_native(pattern))]
+
+ def mget(self, keys, *args):
+ all_keys = self._list_or_args(keys, args)
+ found = []
+ if not all_keys:
+ raise redis.ResponseError(
+ "wrong number of arguments for 'mget' command")
+ for key in all_keys:
+ found.append(self._db.get(key))
+ return found
+
+ def mset(self, *args, **kwargs):
+ if args:
+ if len(args) != 1 or not isinstance(args[0], dict):
+ raise redis.RedisError(
+ 'MSET requires **kwargs or a single dict arg')
+ kwargs.update(args[0])
+ for key, val in iteritems(kwargs):
+ self.set(key, val)
+ return True
+
+ def msetnx(self, mapping):
+ """
+ Sets each key in the ``mapping`` dict to its corresponding value if
+ none of the keys are already set
+ """
+ if not any(k in self._db for k in mapping):
+ for key, val in iteritems(mapping):
+ self.set(key, val)
+ return True
+ return False
+
+ def move(self, name, db):
+ pass
+
+ def persist(self, name):
+ pass
+
+ def ping(self):
+ return True
+
+ def randomkey(self):
+ pass
+
+ def rename(self, src, dst):
+ try:
+ value = self._db[src]
+ except KeyError:
+ raise redis.ResponseError("No such key: %s" % src)
+ self._db[dst] = value
+ del self._db[src]
+ return True
+
+ def renamenx(self, src, dst):
+ if dst in self._db:
+ return False
+ else:
+ return self.rename(src, dst)
+
+ def set(self, name, value, ex=None, px=None, nx=False, xx=False):
+ if (not nx and not xx) or (nx and self._db.get(name, None) is None) \
+ or (xx and not self._db.get(name, None) is None):
+ if ex is not None and ex > 0:
+ self._db.expire(name, datetime.now() + timedelta(seconds=ex))
+ elif px is not None and px > 0:
+ self._db.expire(name, datetime.now() +
+ timedelta(milliseconds=px))
+ self._db[name] = to_bytes(value)
+ return True
+ else:
+ return None
+
+ __setitem__ = set
+
+ def setbit(self, name, offset, value):
+ val = self._db.get(name, b'\x00')
+ byte = offset // 8
+ remaining = offset % 8
+ actual_bitoffset = 7 - remaining
+ if len(val) - 1 < byte:
+ # We need to expand val so that we can set the appropriate
+ # bit.
+ needed = byte - (len(val) - 1)
+ val += b'\x00' * needed
+ if value == 1:
+ new_byte = byte_to_int(val[byte]) | (1 << actual_bitoffset)
+ else:
+ new_byte = byte_to_int(val[byte]) ^ (1 << actual_bitoffset)
+ reconstructed = bytearray(val)
+ reconstructed[byte] = new_byte
+ self._db[name] = bytes(reconstructed)
+
+ def setex(self, name, time, value):
+ if isinstance(time, timedelta):
+ time = int(timedelta_total_seconds(time))
+ return self.set(name, value, ex=time)
+
+ def psetex(self, name, time_ms, value):
+ if isinstance(time_ms, timedelta):
+ time_ms = int(timedelta_total_seconds(time_ms) * 1000)
+ if time_ms == 0:
+ raise ResponseError("invalid expire time in SETEX")
+ return self.set(name, value, px=time_ms)
+
+ def setnx(self, name, value):
+ result = self.set(name, value, nx=True)
+ # Real Redis returns False from setnx, but None from set(nx=...)
+ if not result:
+ return False
+ return result
+
+ def setrange(self, name, offset, value):
+ pass
+
+ def strlen(self, name):
+ try:
+ return len(self._db[name])
+ except KeyError:
+ return 0
+
+ def substr(self, name, start, end=-1):
+ if end == -1:
+ end = None
+ else:
+ end += 1
+ try:
+ return self._db[name][start:end]
+ except KeyError:
+ return b''
+ # Redis >= 2.0.0 this command is called getrange
+ # according to the docs.
+ getrange = substr
+
+ def ttl(self, name):
+ return self._ttl(name)
+
+ def pttl(self, name):
+ return self._ttl(name, 1000)
+
+ def _ttl(self, name, multiplier=1):
+ if name not in self._db:
+ return None
+
+ exp_time = self._db.expiring(name)
+ if not exp_time:
+ return None
+
+ now = datetime.now()
+ if now > exp_time:
+ return None
+ else:
+ return round(((exp_time - now).days * 3600 * 24
+ + (exp_time - now).seconds
+ + (exp_time - now).microseconds / 1E6) * multiplier)
+
+ def type(self, name):
+ key = self._db.get(name)
+ if hasattr(key.__class__, 'redis_type'):
+ return key.redis_type
+ if isinstance(key, redis_string_types):
+ return b'string'
+ elif isinstance(key, list):
+ return b'list'
+ elif isinstance(key, set):
+ return b'set'
+
+ def watch(self, *names):
+ pass
+
+ def unwatch(self):
+ pass
+
+ def delete(self, *names):
+ deleted = 0
+ for name in names:
+ try:
+ del self._db[name]
+ if name in self._db._ex_keys:
+ del self._db._ex_keys[name]
+ deleted += 1
+ except KeyError:
+ continue
+ return deleted
+
+ def sort(self, name, start=None, num=None, by=None, get=None, desc=False,
+ alpha=False, store=None):
+ """Sort and return the list, set or sorted set at ``name``.
+
+ ``start`` and ``num`` allow for paging through the sorted data
+
+ ``by`` allows using an external key to weight and sort the items.
+ Use an "*" to indicate where in the key the item value is located
+
+ ``get`` allows for returning items from external keys rather than the
+ sorted data itself. Use an "*" to indicate where int he key
+ the item value is located
+
+ ``desc`` allows for reversing the sort
+
+ ``alpha`` allows for sorting lexicographically rather than numerically
+
+ ``store`` allows for storing the result of the sort into
+ the key ``store``
+
+ """
+ if (start is None and num is not None) or \
+ (start is not None and num is None):
+ raise redis.RedisError(
+ "RedisError: ``start`` and ``num`` must both be specified")
+ try:
+ data = list(self._db[name])[:]
+ if by is not None:
+ # _sort_using_by_arg mutates data so we don't
+ # need need a return value.
+ self._sort_using_by_arg(data, by=by)
+ elif not alpha:
+ data.sort(key=self._strtod_key_func)
+ else:
+ data.sort()
+ if desc:
+ data = list(reversed(data))
+ if not (start is None and num is None):
+ data = data[start:start + num]
+ if store is not None:
+ self._db[store] = data
+ return len(data)
+ else:
+ return self._retrive_data_from_sort(data, get)
+ except KeyError:
+ return []
+
+ def _retrive_data_from_sort(self, data, get):
+ if get is not None:
+ if isinstance(get, string_types):
+ get = [get]
+ new_data = []
+ for k in data:
+ for g in get:
+ single_item = self._get_single_item(k, g)
+ new_data.append(single_item)
+ data = new_data
+ return data
+
+ def _get_single_item(self, k, g):
+ g = to_bytes(g)
+ if b'*' in g:
+ g = g.replace(b'*', k)
+ if b'->' in g:
+ key, hash_key = g.split(b'->')
+ single_item = self._db.get(key, {}).get(hash_key)
+ else:
+ single_item = self._db.get(g)
+ elif b'#' in g:
+ single_item = k
+ else:
+ single_item = None
+ return single_item
+
+ def _strtod_key_func(self, arg):
+ # str()'ing the arg is important! Don't ever remove this.
+ arg = to_bytes(arg)
+ end = c_char_p()
+ val = _strtod(arg, pointer(end))
+ # real Redis also does an isnan check, not sure if
+ # that's needed here or not.
+ if end.value:
+ raise redis.ResponseError(
+ "One or more scores can't be converted into double")
+ else:
+ return val
+
+ def _sort_using_by_arg(self, data, by):
+ by = to_bytes(by)
+
+ def _by_key(arg):
... 4044 lines suppressed ...
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/python-modules/packages/python-fakeredis.git
More information about the Python-modules-commits
mailing list