[Python-modules-commits] [python-cachetools] 01/05: New upstream version 2.0.0
Christian Kastner
ckk at moszumanska.debian.org
Thu Jan 12 21:18:09 UTC 2017
This is an automated email from the git hooks/post-receive script.
ckk pushed a commit to branch master
in repository python-cachetools.
commit 50ad78b8fae1902b69efff9c3942118d31b1615d
Author: Christian Kastner <ckk at kvr.at>
Date: Thu Jan 12 20:40:09 2017 +0100
New upstream version 2.0.0
---
.travis.yml | 3 +-
CHANGES.rst | 12 +++++
LICENSE | 2 +-
README.rst | 4 --
cachetools/__init__.py | 43 ++++-------------
cachetools/cache.py | 4 +-
cachetools/func.py | 65 ++++++++-----------------
cachetools/keys.py | 4 ++
cachetools/lfu.py | 2 +
cachetools/lru.py | 2 +
cachetools/rr.py | 2 +
cachetools/ttl.py | 2 +
docs/index.rst | 127 +++++++------------------------------------------
setup.py | 3 --
tests/test_func.py | 47 ------------------
tests/test_keys.py | 8 ++--
tests/test_method.py | 33 +------------
tests/test_wrapper.py | 40 +++++++---------
tox.ini | 3 +-
19 files changed, 101 insertions(+), 305 deletions(-)
diff --git a/.travis.yml b/.travis.yml
index ac1856c..6a2852b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,13 +4,12 @@ language: python
python:
- 2.7
-- 3.2
- 3.3
- 3.4
- 3.5
install:
-- pip install "coverage<4" coveralls tox "virtualenv<14.0.0"
+- pip install coveralls tox
script:
- tox -e check-manifest,flake8,py
diff --git a/CHANGES.rst b/CHANGES.rst
index e468c31..6aeabd3 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,3 +1,15 @@
+v2.0.0 (2016-10-03)
+-------------------
+
+- Drop Python 3.2 support (breaking change).
+
+- Drop support for deprecated features (breaking change).
+
+- Move key functions to separate package (breaking change).
+
+- Accept non-integer ``maxsize`` in ``Cache.__repr__()``.
+
+
v1.1.6 (2016-04-01)
-------------------
diff --git a/LICENSE b/LICENSE
index aa77426..73c1611 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
The MIT License (MIT)
-Copyright (c) 2014, 2015 Thomas Kemmer
+Copyright (c) 2014-2016 Thomas Kemmer
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
diff --git a/README.rst b/README.rst
index a5e62af..4742da3 100644
--- a/README.rst
+++ b/README.rst
@@ -51,10 +51,6 @@ Project Resources
:target: https://pypi.python.org/pypi/cachetools/
:alt: Latest PyPI version
-.. image:: http://img.shields.io/pypi/dm/cachetools.svg?style=flat
- :target: https://pypi.python.org/pypi/cachetools/
- :alt: Number of PyPI downloads
-
.. image:: http://img.shields.io/travis/tkem/cachetools/master.svg?style=flat
:target: https://travis-ci.org/tkem/cachetools/
:alt: Travis CI build status
diff --git a/cachetools/__init__.py b/cachetools/__init__.py
index 0630970..6469970 100644
--- a/cachetools/__init__.py
+++ b/cachetools/__init__.py
@@ -1,11 +1,11 @@
"""Extensible memoizing collections and decorators."""
+from __future__ import absolute_import
+
import functools
-import warnings
+from . import keys
from .cache import Cache
-from .func import lfu_cache, lru_cache, rr_cache, ttl_cache
-from .keys import hashkey, typedkey
from .lfu import LFUCache
from .lru import LRUCache
from .rr import RRCache
@@ -13,14 +13,10 @@ from .ttl import TTLCache
__all__ = (
'Cache', 'LFUCache', 'LRUCache', 'RRCache', 'TTLCache',
- 'cached', 'cachedmethod', 'hashkey', 'typedkey',
- # make cachetools.func.* available for backwards compatibility
- 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache',
+ 'cached', 'cachedmethod'
)
-__version__ = '1.1.6'
-
-_default = [] # evaluates to False
+__version__ = '2.0.0'
if hasattr(functools.update_wrapper(lambda f: f(), lambda: 42), '__wrapped__'):
_update_wrapper = functools.update_wrapper
@@ -31,7 +27,7 @@ else:
return wrapper
-def cached(cache, key=hashkey, lock=None):
+def cached(cache, key=keys.hashkey, lock=None):
"""Decorator to wrap a function with a memoizing callable that saves
results in a cache.
@@ -72,29 +68,18 @@ def cached(cache, key=hashkey, lock=None):
return decorator
-def cachedmethod(cache, key=_default, lock=None, typed=_default):
+def cachedmethod(cache, key=keys.hashkey, lock=None):
"""Decorator to wrap a class or instance method with a memoizing
callable that saves results in a cache.
"""
- if key is not _default and not callable(key):
- key, typed = _default, key
- if typed is not _default:
- warnings.warn("Passing 'typed' to cachedmethod() is deprecated, "
- "use 'key=typedkey' instead", DeprecationWarning, 2)
-
def decorator(method):
- # pass method to default key function for backwards compatibilty
- if key is _default:
- makekey = functools.partial(typedkey if typed else hashkey, method)
- else:
- makekey = key # custom key function always receive method args
if lock is None:
def wrapper(self, *args, **kwargs):
c = cache(self)
if c is None:
return method(self, *args, **kwargs)
- k = makekey(self, *args, **kwargs)
+ k = key(self, *args, **kwargs)
try:
return c[k]
except KeyError:
@@ -110,7 +95,7 @@ def cachedmethod(cache, key=_default, lock=None, typed=_default):
c = cache(self)
if c is None:
return method(self, *args, **kwargs)
- k = makekey(self, *args, **kwargs)
+ k = key(self, *args, **kwargs)
try:
with lock(self):
return c[k]
@@ -123,13 +108,5 @@ def cachedmethod(cache, key=_default, lock=None, typed=_default):
except ValueError:
pass # value too large
return v
- _update_wrapper(wrapper, method)
-
- # deprecated wrapper attribute
- def getter(self):
- warnings.warn('%s.cache is deprecated' % method.__name__,
- DeprecationWarning, 2)
- return cache(self)
- wrapper.cache = getter
- return wrapper
+ return _update_wrapper(wrapper, method)
return decorator
diff --git a/cachetools/cache.py b/cachetools/cache.py
index 409a6b4..0852631 100644
--- a/cachetools/cache.py
+++ b/cachetools/cache.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+
from .abc import DefaultMapping
@@ -28,7 +30,7 @@ class Cache(DefaultMapping):
self.__maxsize = maxsize
def __repr__(self):
- return '%s(%r, maxsize=%d, currsize=%d)' % (
+ return '%s(%r, maxsize=%r, currsize=%r)' % (
self.__class__.__name__,
list(self.__data.items()),
self.__maxsize,
diff --git a/cachetools/func.py b/cachetools/func.py
index 25b415a..5a2ce84 100644
--- a/cachetools/func.py
+++ b/cachetools/func.py
@@ -1,48 +1,35 @@
"""`functools.lru_cache` compatible memoizing function decorators."""
+from __future__ import absolute_import
+
import collections
import functools
import random
import time
-import warnings
try:
from threading import RLock
except ImportError:
from dummy_threading import RLock
-from .keys import hashkey, typedkey
+from . import keys
+from .lfu import LFUCache
+from .lru import LRUCache
+from .rr import RRCache
+from .ttl import TTLCache
__all__ = ('lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache')
-class _NLock:
- def __enter__(self):
- pass
-
- def __exit__(self, *exc):
- pass
-
_CacheInfo = collections.namedtuple('CacheInfo', [
'hits', 'misses', 'maxsize', 'currsize'
])
-_marker = object()
-
-
-def _deprecated(message, level=2):
- warnings.warn('%s is deprecated' % message, DeprecationWarning, level)
-
-def _cache(cache, typed=False, context=_marker):
+def _cache(cache, typed=False):
def decorator(func):
- key = typedkey if typed else hashkey
- if context is _marker:
- lock = RLock()
- elif context is None:
- lock = _NLock()
- else:
- lock = context()
+ key = keys.typedkey if typed else keys.hashkey
+ lock = RLock()
stats = [0, 0]
def cache_info():
@@ -77,57 +64,43 @@ def _cache(cache, typed=False, context=_marker):
return v
functools.update_wrapper(wrapper, func)
if not hasattr(wrapper, '__wrapped__'):
- wrapper.__wrapped__ = func # Python < 3.2
+ wrapper.__wrapped__ = func # Python 2.7
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return decorator
-def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=_marker):
+def lfu_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Frequently Used (LFU)
algorithm.
"""
- from .lfu import LFUCache
- if lock is not _marker:
- _deprecated("Passing 'lock' to lfu_cache()", 3)
- return _cache(LFUCache(maxsize, getsizeof), typed, lock)
+ return _cache(LFUCache(maxsize), typed)
-def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=_marker):
+def lru_cache(maxsize=128, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm.
"""
- from .lru import LRUCache
- if lock is not _marker:
- _deprecated("Passing 'lock' to lru_cache()", 3)
- return _cache(LRUCache(maxsize, getsizeof), typed, lock)
+ return _cache(LRUCache(maxsize), typed)
-def rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None,
- lock=_marker):
+def rr_cache(maxsize=128, choice=random.choice, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Random Replacement (RR)
algorithm.
"""
- from .rr import RRCache
- if lock is not _marker:
- _deprecated("Passing 'lock' to rr_cache()", 3)
- return _cache(RRCache(maxsize, choice, getsizeof), typed, lock)
+ return _cache(RRCache(maxsize, choice), typed)
-def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False,
- getsizeof=None, lock=_marker):
+def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm with a per-item time-to-live (TTL) value.
"""
- from .ttl import TTLCache
- if lock is not _marker:
- _deprecated("Passing 'lock' to ttl_cache()", 3)
- return _cache(TTLCache(maxsize, ttl, timer, getsizeof), typed, lock)
+ return _cache(TTLCache(maxsize, ttl, timer), typed)
diff --git a/cachetools/keys.py b/cachetools/keys.py
index 887fb30..ba1e2fc 100644
--- a/cachetools/keys.py
+++ b/cachetools/keys.py
@@ -1,3 +1,7 @@
+"""Key functions for memoizing decorators."""
+
+from __future__ import absolute_import
+
__all__ = ('hashkey', 'typedkey')
diff --git a/cachetools/lfu.py b/cachetools/lfu.py
index 160f537..f5817a4 100644
--- a/cachetools/lfu.py
+++ b/cachetools/lfu.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+
import collections
from .cache import Cache
diff --git a/cachetools/lru.py b/cachetools/lru.py
index 525abd8..b945797 100644
--- a/cachetools/lru.py
+++ b/cachetools/lru.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+
import collections
from .cache import Cache
diff --git a/cachetools/rr.py b/cachetools/rr.py
index c82919e..8cd856c 100644
--- a/cachetools/rr.py
+++ b/cachetools/rr.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+
import random
from .cache import Cache
diff --git a/cachetools/ttl.py b/cachetools/ttl.py
index 04e9d85..d20cc0b 100644
--- a/cachetools/ttl.py
+++ b/cachetools/ttl.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+
import collections
import time
diff --git a/docs/index.rst b/docs/index.rst
index b287c46..23c4dae 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -21,20 +21,6 @@ Multiple cache classes based on different caching algorithms are
implemented, and decorators for easily memoizing function and method
calls are provided, too.
-.. note::
-
- Several features are now marked as deprecated and will be removed
- in the next major release, :mod:`cachetools` version 2.0. If you
- happen to rely on any of these features, it is highly recommended
- to specify your module dependencies accordingly, for example
- ``cachetools ~= 1.1`` when using :mod:`setuptools`.
-
-.. versionchanged:: 1.1
-
- Moved :func:`functools.lru_cache` compatible decorators to the
- :mod:`cachetools.func` module. For backwards compatibility, they
- continue to be visible in this module as well.
-
Cache implementations
------------------------------------------------------------------------
@@ -149,7 +135,7 @@ of one argument used to retrieve the size of an item's value.
expired by the current value returned by :attr:`timer`.
-Decorators
+Memoizing decorators
------------------------------------------------------------------------
The :mod:`cachetools` module provides decorators for memoizing
@@ -165,7 +151,7 @@ often called with the same arguments::
for i in range(100):
print('fib(%d) = %d' % (i, fib(i)))
-.. decorator:: cached(cache, key=hashkey, lock=None)
+.. decorator:: cached(cache, key=cachetools.keys.hashkey, lock=None)
Decorator to wrap a function with a memoizing callable that saves
results in a cache.
@@ -181,7 +167,7 @@ often called with the same arguments::
positional and keyword arguments as the wrapped function itself,
and which has to return a suitable cache key. Since caches are
mappings, the object returned by `key` must be hashable. The
- default is to call :func:`hashkey`.
+ default is to call :func:`cachetools.keys.hashkey`.
If `lock` is not :const:`None`, it must specify an object
implementing the `context manager`_ protocol. Any access to the
@@ -241,9 +227,7 @@ often called with the same arguments::
print(fac(42))
print(cache)
- .. versionadded:: 1.1
-
-.. decorator:: cachedmethod(cache, key=hashkey, lock=None, typed=False)
+.. decorator:: cachedmethod(cache, key=cachetools.keys.hashkey, lock=None)
Decorator to wrap a class or instance method with a memoizing
callable that saves results in a (possibly shared) cache.
@@ -261,11 +245,6 @@ often called with the same arguments::
is the user's responsibility to handle concurrent calls to the
underlying wrapped method in a multithreaded environment.
- If `key` or the optional `typed` keyword argument are set to
- :const:`True`, the :func:`typedkey` function is used for generating
- hash keys. This has been deprecated in favor of specifying
- ``key=typedkey`` explicitly.
-
One advantage of :func:`cachedmethod` over the :func:`cached`
function decorator is that cache properties such as `maxsize` can
be set at runtime::
@@ -290,51 +269,20 @@ often called with the same arguments::
peps = CachedPEPs(cachesize=10)
print("PEP #1: %s" % peps.get(1))
- For backwards compatibility, the default key function used by
- :func:`cachedmethod` will generate distinct keys for different
- methods to ease using a shared cache with multiple methods. This
- has been deprecated, and relying on this feature is strongly
- discouraged. When using a shared cache, distinct key functions
- should be used, as with the :func:`cached` decorator.
-
- .. versionadded:: 1.1
-
- The optional `key` and `lock` parameters.
-
- .. versionchanged:: 1.1
-
- The :attr:`__wrapped__` attribute is now set when running Python
- 2.7, too.
-
- .. deprecated:: 1.1
-
- The `typed` argument. Use ``key=typedkey`` instead.
-
- .. deprecated:: 1.1
-
- When using a shared cached for multiple methods, distinct key
- function should be used.
-
- .. deprecated:: 1.1
-
- The wrapper function's :attr:`cache` attribute. Use the
- original function passed as the decorator's `cache` argument to
- access the cache object.
+:mod:`cachetools.keys` --- Key functions for memoizing decorators
+============================================================================
-Key functions
-------------------------------------------------------------------------
+.. module:: cachetools.keys
-The following functions can be used as key functions with the
-:func:`cached` and :func:`cachedmethod` decorators:
+This module provides several functions that can be used as key
+functions with the :func:`cached` and :func:`cachedmethod` decorators:
.. autofunction:: hashkey
This function returns a :class:`tuple` instance suitable as a cache
key, provided the positional and keywords arguments are hashable.
- .. versionadded:: 1.1
-
.. autofunction:: typedkey
This function is similar to :func:`hashkey`, but arguments of
@@ -342,8 +290,6 @@ The following functions can be used as key functions with the
``typedkey(3)`` and ``typedkey(3.0)`` will return different
results.
- .. versionadded:: 1.1
-
These functions can also be helpful when implementing custom key
functions for handling some non-hashable arguments. For example,
calling the following function with a dictionary as its `env` argument
@@ -381,77 +327,40 @@ different caching strategies. Note that unlike
:func:`functools.lru_cache`, setting `maxsize` to :const:`None` is not
supported.
+If the optional argument `typed` is set to :const:`True`, function
+arguments of different types will be cached separately. For example,
+``f(3)`` and ``f(3.0)`` will be treated as distinct calls with
+distinct results.
+
The wrapped function is instrumented with :func:`cache_info` and
:func:`cache_clear` functions to provide information about cache
performance and clear the cache. See the :func:`functools.lru_cache`
documentation for details.
-In addition to `maxsize`, all decorators accept the following
-optional keyword arguments:
-
-- `typed`, if is set to :const:`True`, will cause function arguments
- of different types to be cached separately. For example, ``f(3)``
- and ``f(3.0)`` will be treated as distinct calls with distinct
- results.
-
-- `getsizeof` specifies a function of one argument that will be
- applied to each cache value to determine its size. The default
- value is :const:`None`, which will assign each item an equal size of
- :const:`1`. This has been deprecated in favor of the new
- :func:`cachetools.cached` decorator, which allows passing fully
- customized cache objects.
-
-- `lock` specifies a function of zero arguments that returns a
- `context manager`_ to lock the cache when necessary. If not
- specified, :class:`threading.RLock` will be used to synchronize
- access from multiple threads. The use of `lock` is discouraged, and
- the `lock` argument has been deprecated.
-
-.. versionadded:: 1.1
-
- Formerly, the decorators provided by :mod:`cachetools.func` were
- part of the :mod:`cachetools` module.
-
-.. decorator:: lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock)
+.. decorator:: lfu_cache(maxsize=128, typed=False)
Decorator that wraps a function with a memoizing callable that
saves up to `maxsize` results based on a Least Frequently Used
(LFU) algorithm.
- .. deprecated:: 1.1
-
- The `getsizeof` and `lock` arguments.
-
-.. decorator:: lru_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock)
+.. decorator:: lru_cache(maxsize=128, typed=False)
Decorator that wraps a function with a memoizing callable that
saves up to `maxsize` results based on a Least Recently Used (LRU)
algorithm.
- .. deprecated:: 1.1
-
- The `getsizeof` and `lock` arguments.
-
-.. decorator:: rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None, lock=threading.RLock)
+.. decorator:: rr_cache(maxsize=128, choice=random.choice, typed=False)
Decorator that wraps a function with a memoizing callable that
saves up to `maxsize` results based on a Random Replacement (RR)
algorithm.
- .. deprecated:: 1.1
-
- The `getsizeof` and `lock` arguments.
-
-.. decorator:: ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False, getsizeof=None, lock=threading.RLock)
+.. decorator:: ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False)
Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm with a per-item time-to-live (TTL) value.
- .. deprecated:: 1.1
-
- The `getsizeof` and `lock` arguments.
-
.. _ at lru_cache: http://docs.python.org/3/library/functools.html#functools.lru_cache
.. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms
diff --git a/setup.py b/setup.py
index b065d87..5c0706f 100644
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,3 @@
-from __future__ import unicode_literals
-
from setuptools import find_packages, setup
@@ -30,7 +28,6 @@ setup(
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
diff --git a/tests/test_func.py b/tests/test_func.py
index 236a5d7..1f33246 100644
--- a/tests/test_func.py
+++ b/tests/test_func.py
@@ -1,5 +1,4 @@
import unittest
-import warnings
import cachetools.func
@@ -55,52 +54,6 @@ class DecoratorTestMixin(object):
self.assertEqual(cached(1.0), 1.0)
self.assertEqual(cached.cache_info(), (2, 2, 2, 2))
- def test_decorator_lock(self):
- class Lock(object):
- count = 0
-
- def __enter__(self):
- Lock.count += 1
-
- def __exit__(self, *exc):
- pass
-
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter('always')
- cached = self.decorator(maxsize=2, lock=Lock)(lambda n: n)
- self.assertEqual(len(w), 1)
- self.assertIs(w[0].category, DeprecationWarning)
-
- self.assertEqual(cached.cache_info(), (0, 0, 2, 0))
- self.assertEqual(Lock.count, 1)
- self.assertEqual(cached(1), 1)
- self.assertEqual(Lock.count, 3)
- self.assertEqual(cached.cache_info(), (0, 1, 2, 1))
- self.assertEqual(Lock.count, 4)
- self.assertEqual(cached(1), 1)
- self.assertEqual(Lock.count, 5)
- self.assertEqual(cached.cache_info(), (1, 1, 2, 1))
- self.assertEqual(Lock.count, 6)
- self.assertEqual(cached(1.0), 1.0)
- self.assertEqual(Lock.count, 7)
- self.assertEqual(cached.cache_info(), (2, 1, 2, 1))
- self.assertEqual(Lock.count, 8)
-
- def test_decorator_nolock(self):
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter('always')
- cached = self.decorator(maxsize=2, lock=None)(lambda n: n)
- self.assertEqual(len(w), 1)
- self.assertIs(w[0].category, DeprecationWarning)
-
- self.assertEqual(cached.cache_info(), (0, 0, 2, 0))
- self.assertEqual(cached(1), 1)
- self.assertEqual(cached.cache_info(), (0, 1, 2, 1))
- self.assertEqual(cached(1), 1)
- self.assertEqual(cached.cache_info(), (1, 1, 2, 1))
- self.assertEqual(cached(1.0), 1.0)
- self.assertEqual(cached.cache_info(), (2, 1, 2, 1))
-
class LFUDecoratorTest(unittest.TestCase, DecoratorTestMixin):
diff --git a/tests/test_keys.py b/tests/test_keys.py
index 94184a5..2b9ced6 100644
--- a/tests/test_keys.py
+++ b/tests/test_keys.py
@@ -1,11 +1,11 @@
import unittest
-import cachetools
+import cachetools.keys
class CacheKeysTest(unittest.TestCase):
- def test_hashkey(self, key=cachetools.hashkey):
+ def test_hashkey(self, key=cachetools.keys.hashkey):
self.assertEqual(key(), key())
self.assertEqual(hash(key()), hash(key()))
self.assertEqual(key(1, 2, 3), key(1, 2, 3))
@@ -22,7 +22,7 @@ class CacheKeysTest(unittest.TestCase):
self.assertEqual(key(1, 2, 3), key(1.0, 2.0, 3.0))
self.assertEqual(hash(key(1, 2, 3)), hash(key(1.0, 2.0, 3.0)))
- def test_typedkey(self, key=cachetools.typedkey):
+ def test_typedkey(self, key=cachetools.keys.typedkey):
self.assertEqual(key(), key())
self.assertEqual(hash(key()), hash(key()))
self.assertEqual(key(1, 2, 3), key(1, 2, 3))
@@ -38,7 +38,7 @@ class CacheKeysTest(unittest.TestCase):
# typed keys compare unequal
self.assertNotEqual(key(1, 2, 3), key(1.0, 2.0, 3.0))
- def test_addkeys(self, key=cachetools.hashkey):
+ def test_addkeys(self, key=cachetools.keys.hashkey):
self.assertIsInstance(key(), tuple)
self.assertIsInstance(key(1, 2, 3) + key(4, 5, 6), type(key()))
self.assertIsInstance(key(1, 2, 3) + (4, 5, 6), type(key()))
diff --git a/tests/test_method.py b/tests/test_method.py
index b0b9916..db810b6 100644
--- a/tests/test_method.py
+++ b/tests/test_method.py
@@ -1,8 +1,7 @@
import operator
import unittest
-import warnings
-from cachetools import LRUCache, cachedmethod, typedkey
+from cachetools import LRUCache, cachedmethod, keys
class Cached(object):
@@ -17,7 +16,7 @@ class Cached(object):
self.count += 1
return count
- @cachedmethod(operator.attrgetter('cache'), key=typedkey)
+ @cachedmethod(operator.attrgetter('cache'), key=keys.typedkey)
def get_typed(self, value):
count = self.count
self.count += 1
@@ -45,7 +44,6 @@ class CachedMethodTest(unittest.TestCase):
def test_dict(self):
cached = Cached({})
- self.assertEqual(cached.cache, cached.get.cache(cached))
self.assertEqual(cached.get(0), 0)
self.assertEqual(cached.get(1), 1)
@@ -58,7 +56,6 @@ class CachedMethodTest(unittest.TestCase):
def test_typed_dict(self):
cached = Cached(LRUCache(maxsize=2))
- self.assertEqual(cached.cache, cached.get_typed.cache(cached))
self.assertEqual(cached.get_typed(0), 0)
self.assertEqual(cached.get_typed(1), 1)
@@ -70,7 +67,6 @@ class CachedMethodTest(unittest.TestCase):
def test_lru(self):
cached = Cached(LRUCache(maxsize=2))
- self.assertEqual(cached.cache, cached.get.cache(cached))
self.assertEqual(cached.get(0), 0)
self.assertEqual(cached.get(1), 1)
@@ -83,7 +79,6 @@ class CachedMethodTest(unittest.TestCase):
def test_typed_lru(self):
cached = Cached(LRUCache(maxsize=2))
- self.assertEqual(cached.cache, cached.get_typed.cache(cached))
self.assertEqual(cached.get_typed(0), 0)
self.assertEqual(cached.get_typed(1), 1)
@@ -95,7 +90,6 @@ class CachedMethodTest(unittest.TestCase):
def test_nospace(self):
cached = Cached(LRUCache(maxsize=0))
- self.assertEqual(cached.cache, cached.get.cache(cached))
self.assertEqual(cached.get(0), 0)
self.assertEqual(cached.get(1), 1)
@@ -105,7 +99,6 @@ class CachedMethodTest(unittest.TestCase):
def test_nocache(self):
cached = Cached(None)
- self.assertEqual(None, cached.get.cache(cached))
self.assertEqual(cached.get(0), 0)
self.assertEqual(cached.get(1), 1)
@@ -124,7 +117,6 @@ class CachedMethodTest(unittest.TestCase):
return Int(fractions.Fraction.__add__(self, other))
cached = Cached(weakref.WeakValueDictionary(), count=Int(0))
- self.assertEqual(cached.cache, cached.get.cache(cached))
self.assertEqual(cached.get(0), 0)
self.assertEqual(cached.get(0), 1)
@@ -144,7 +136,6 @@ class CachedMethodTest(unittest.TestCase):
def test_locked_dict(self):
cached = Locked({})
- self.assertEqual(cached.cache, cached.get.cache(cached))
self.assertEqual(cached.get(0), 1)
self.assertEqual(cached.get(1), 3)
@@ -154,7 +145,6 @@ class CachedMethodTest(unittest.TestCase):
def test_locked_nocache(self):
cached = Locked(None)
- self.assertEqual(None, cached.get.cache(cached))
self.assertEqual(cached.get(0), 0)
self.assertEqual(cached.get(1), 0)
@@ -164,28 +154,9 @@ class CachedMethodTest(unittest.TestCase):
def test_locked_nospace(self):
cached = Locked(LRUCache(maxsize=0))
- self.assertEqual(cached.cache, cached.get.cache(cached))
self.assertEqual(cached.get(0), 1)
self.assertEqual(cached.get(1), 3)
self.assertEqual(cached.get(1), 5)
self.assertEqual(cached.get(1.0), 7)
self.assertEqual(cached.get(1.0), 9)
-
- def test_typed_deprecated(self):
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter("always")
- cachedmethod(lambda self: None, None)(lambda self: None)
- self.assertIs(w[-1].category, DeprecationWarning)
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter("always")
- cachedmethod(lambda self: None, False)(lambda self: None)
- self.assertIs(w[-1].category, DeprecationWarning)
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter("always")
- cachedmethod(lambda self: None, True)(lambda self: None)
- self.assertIs(w[-1].category, DeprecationWarning)
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter("always")
- cachedmethod(lambda self: None, typed=None)(lambda self: None)
- self.assertIs(w[-1].category, DeprecationWarning)
diff --git a/tests/test_wrapper.py b/tests/test_wrapper.py
index 1d03fb2..a6e649c 100644
--- a/tests/test_wrapper.py
+++ b/tests/test_wrapper.py
@@ -1,6 +1,7 @@
import unittest
import cachetools
+import cachetools.keys
class DecoratorTestMixin(object):
@@ -24,15 +25,15 @@ class DecoratorTestMixin(object):
self.assertEqual(wrapper(0), 0)
self.assertEqual(len(cache), 1)
- self.assertIn(cachetools.hashkey(0), cache)
- self.assertNotIn(cachetools.hashkey(1), cache)
- self.assertNotIn(cachetools.hashkey(1.0), cache)
+ self.assertIn(cachetools.keys.hashkey(0), cache)
+ self.assertNotIn(cachetools.keys.hashkey(1), cache)
+ self.assertNotIn(cachetools.keys.hashkey(1.0), cache)
self.assertEqual(wrapper(1), 1)
self.assertEqual(len(cache), 2)
- self.assertIn(cachetools.hashkey(0), cache)
- self.assertIn(cachetools.hashkey(1), cache)
- self.assertIn(cachetools.hashkey(1.0), cache)
+ self.assertIn(cachetools.keys.hashkey(0), cache)
+ self.assertIn(cachetools.keys.hashkey(1), cache)
+ self.assertIn(cachetools.keys.hashkey(1.0), cache)
self.assertEqual(wrapper(1), 1)
self.assertEqual(len(cache), 2)
@@ -45,37 +46,32 @@ class DecoratorTestMixin(object):
def test_decorator_typed(self):
cache = self.cache(3)
-
- def typedkey(*args, **kwargs):
- key = cachetools.hashkey(*args, **kwargs)
- key += tuple(type(v) for v in args)
- key += tuple(type(v) for _, v in sorted(kwargs.items()))
- return key
- wrapper = cachetools.cached(cache, key=typedkey)(self.func)
+ key = cachetools.keys.typedkey
+ wrapper = cachetools.cached(cache, key=key)(self.func)
self.assertEqual(len(cache), 0)
self.assertEqual(wrapper.__wrapped__, self.func)
self.assertEqual(wrapper(0), 0)
self.assertEqual(len(cache), 1)
- self.assertIn(typedkey(0), cache)
- self.assertNotIn(typedkey(1), cache)
- self.assertNotIn(typedkey(1.0), cache)
+ self.assertIn(cachetools.keys.typedkey(0), cache)
+ self.assertNotIn(cachetools.keys.typedkey(1), cache)
+ self.assertNotIn(cachetools.keys.typedkey(1.0), cache)
self.assertEqual(wrapper(1), 1)
self.assertEqual(len(cache), 2)
- self.assertIn(typedkey(0), cache)
- self.assertIn(typedkey(1), cache)
- self.assertNotIn(typedkey(1.0), cache)
+ self.assertIn(cachetools.keys.typedkey(0), cache)
+ self.assertIn(cachetools.keys.typedkey(1), cache)
+ self.assertNotIn(cachetools.keys.typedkey(1.0), cache)
self.assertEqual(wrapper(1), 1)
self.assertEqual(len(cache), 2)
self.assertEqual(wrapper(1.0), 2)
self.assertEqual(len(cache), 3)
- self.assertIn(typedkey(0), cache)
- self.assertIn(typedkey(1), cache)
- self.assertIn(typedkey(1.0), cache)
+ self.assertIn(cachetools.keys.typedkey(0), cache)
+ self.assertIn(cachetools.keys.typedkey(1), cache)
+ self.assertIn(cachetools.keys.typedkey(1.0), cache)
self.assertEqual(wrapper(1.0), 2)
self.assertEqual(len(cache), 3)
diff --git a/tox.ini b/tox.ini
index c98f2f6..0bfd943 100644
--- a/tox.ini
+++ b/tox.ini
@@ -2,9 +2,8 @@
envlist = check-manifest,docs,flake8,py
[testenv]
-# coverage 4.0 drops Python 3.2 compatibility
deps =
- coverage<4
+ coverage
pytest
pytest-cov
commands =
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/python-modules/packages/python-cachetools.git
More information about the Python-modules-commits
mailing list