[med-svn] [Git][med-team/python-multipletau][upstream] New upstream version 0.3.0+ds

Alexandre Mestiashvili gitlab at salsa.debian.org
Thu Nov 1 15:26:43 GMT 2018


Alexandre Mestiashvili pushed to branch upstream at Debian Med / python-multipletau


Commits:
b1ffc393 by Alexandre Mestiashvili at 2018-11-01T14:08:18Z
New upstream version 0.3.0+ds
- - - - -


16 changed files:

- CHANGELOG
- README.rst
- docs/conf.py
- docs/extensions/fancy_include.py
- multipletau/__init__.py
- multipletau/_version.py
- multipletau/core.py
- setup.cfg
- setup.py
- tests/test_ac_cc.py
- tests/test_autocorrelate.py
- tests/test_basic.py
- + tests/test_compress.py
- tests/test_correlate.py
- tests/test_ref_numpy.py
- + tests/test_ret_sum.py


Changes:

=====================================
CHANGELOG
=====================================
@@ -1,3 +1,11 @@
+0.3.0
+ - feat: add option to choose the strategy for propagating values to
+   the next register (#14)
+ - feat: add option to return the pure sum and the internal normalization
+   count (#14)
+0.2.0
+ - tests: filter warnings and check with flake8
+ - implement unique warning classes
 0.1.9
  - include docs in sdist
 0.1.8


=====================================
README.rst
=====================================
@@ -10,7 +10,7 @@ correlation on a linear scale such as `numpy.correlate <http://docs.scipy.org/do
 
 Installation
 ------------
-Multipletau supports Python 2.6+ and Python 3.3+ with a common codebase.
+Multipletau supports Python 2.7 and Python 3.3+ with a common codebase.
 The only requirement for ``multipletau`` is `NumPy <http://www.numpy.org/>`__ (for fast
 operations on arrays). Install multipletau from the Python package index:
 
@@ -56,7 +56,7 @@ You can find out what version you are using by typing (in a Python console):
 
     >>> import multipletau
     >>> multipletau.__version__
-    '0.1.4'
+    '0.3.0'
 
 
 
@@ -64,7 +64,7 @@ You can find out what version you are using by typing (in a Python console):
    :target: https://pypi.python.org/pypi/multipletau
 .. |Tests Status| image:: http://img.shields.io/travis/FCS-analysis/multipletau.svg?label=tests
    :target: https://travis-ci.org/FCS-analysis/multipletau
-.. |Coverage Status| image:: https://img.shields.io/coveralls/FCS-analysis/multipletau.svg
-   :target: https://coveralls.io/r/FCS-analysis/multipletau
+.. |Coverage Status| image:: https://img.shields.io/codecov/c/github/FCS-analysis/multipletau/master.svg
+   :target: https://codecov.io/gh/FCS-analysis/multipletau
 .. |Docs Status| image:: https://readthedocs.org/projects/multipletau/badge/?version=latest
    :target: https://readthedocs.org/projects/multipletau/builds/


=====================================
docs/conf.py
=====================================
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # -*- coding: utf-8 -*-
 #
 # project documentation build configuration file, created by
@@ -15,12 +16,7 @@
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
-#
-# import os
-# import sys
-# sys.path.insert(0, os.path.abspath('.'))
 
-# Get version number from qpimage._version file
 import mock
 import os.path as op
 import sys
@@ -36,12 +32,18 @@ install_requires = ["numpy"]
 for mod_name in install_requires:
     sys.modules[mod_name] = mock.Mock()
 
-
-# There should be a file "setup.py" that has the property "version"
-from setup import author, authors, description, name, version, year
+name = 'multipletau'
+github_project = 'FCS-analysis/' + name
+year = "2012"
+author = 'Paul Müller'
+authors = [author]
+description = 'A multiple-tau algorithm for Python/NumPy'
 projectname = name
 projectdescription = description
 
+exec(open(op.join(pdir, "multipletau/_version.py")).read())
+release = version #@UndefinedVariable
+
 # http://www.sphinx-doc.org/en/stable/ext/autodoc.html#confval-autodoc_member_order
 # Order class attributes and functions in separate blocks
 autodoc_member_order = 'bysource'
@@ -94,7 +96,7 @@ copyright = year+", "+author
 #
 # The short X.Y version.
 # The full version, including alpha/beta/rc tags.
-release = version
+#release = version
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
@@ -275,7 +277,7 @@ man_pages = [
 # (source start file, target name, title, author,
 #  dir menu entry, description, category)
 texinfo_documents = [
-  ('index', projectname, projectname+u' Documentation',
+  ('index', projectname, projectname+' Documentation',
    author, projectname,
    projectdescription,
    'Numeric'),


=====================================
docs/extensions/fancy_include.py
=====================================
@@ -47,14 +47,19 @@ class IncludeDirective(Directive):
         with io.open(full_path, "r") as myfile:
             text = myfile.read()
 
+        # add reference
+        name = op.basename(full_path)[:-3]
+        rst = [".. _example_{}:".format(name),
+               "",
+               ]
+
+        # add docstring
         source = text.split('"""')
         doc = source[1].split("\n")
         doc.insert(1, "~" * len(doc[0]))  # make title heading
 
         code = source[2].split("\n")
 
-        # documentation
-        rst = []
         for line in doc:
             rst.append(line)
 


=====================================
multipletau/__init__.py
=====================================
@@ -51,7 +51,7 @@ You can find out what version you are using by typing
 
     >>> import multipletau
     >>> multipletau.__version__
-    '0.1.4'
+    '0.3.0'
 
 
 Usage


=====================================
multipletau/_version.py
=====================================
@@ -44,8 +44,11 @@ if True:  # pragma: no cover
             env['LANGUAGE'] = 'C'
             env['LANG'] = 'C'
             env['LC_ALL'] = 'C'
-            cmd = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env)
-            out = cmd.communicate()[0]
+            pop = subprocess.Popen(cmd,
+                                   stdout=subprocess.PIPE,
+                                   stderr=subprocess.PIPE,
+                                   env=env)
+            out = pop.communicate()[0]
             return out
 
         # change directory


=====================================
multipletau/core.py
=====================================
@@ -41,8 +41,16 @@ import warnings
 __all__ = ["autocorrelate", "correlate", "correlate_numpy"]
 
 
-def autocorrelate(a, m=16, deltat=1, normalize=False,
-                  copy=True, dtype=None):
+class DtypeWarning(UserWarning):
+    pass
+
+
+class InvalidMWarning(UserWarning):
+    pass
+
+
+def autocorrelate(a, m=16, deltat=1, normalize=False, copy=True, dtype=None,
+                  compress="average", ret_sum=False):
     """
     Autocorrelation of a 1-dimensional sequence on a log2-scale.
 
@@ -52,36 +60,58 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
 
         :func:`numpy.correlate(a, a, mode="full")[len(a)-1:]`
 
-        :math:`z_k = \Sigma_n a_n a_{n+k}`
+        :math:`z_k = \\Sigma_n a_n a_{n+k}`
 
 
     Parameters
     ----------
-    a : array-like
+    a: array-like
         input sequence
-    m : even integer
+    m: even integer
         defines the number of points on one level, must be an
         even integer
-    deltat : float
+    deltat: float
         distance between bins
-    normalize : bool
+    normalize: bool
         normalize the result to the square of the average input
         signal and the factor :math:`M-k`.
-    copy : bool
+    copy: bool
         copy input array, set to ``False`` to save memory
-    dtype : object to be converted to a data type object
+    dtype: object to be converted to a data type object
         The data type of the returned array and of the accumulator
         for the multiple-tau computation.
+    compress: str
+        strategy for propagating values to the next register
+
+        - `"average"` (default): average two measurements when pushing
+          to the next level of the correlator.
+        - `"first"`: use only the first value when pushing to the next
+          level of the correlator.
+        - `"second"`: use only the second value when pushing to the
+          next level of the correlator.
+
+        Using only the first or the second values during propagation
+        completely removes the systematic error at the cost of
+        increasing the statistical error.
+        See https://doi.org/10.1063/1.3491098 for a discussion on the
+        effect of averaging.
+    ret_sum: bool
+        return the exact sum :math:`z_k = \\Sigma_n a_n a_{n+k}`. In addition
+        :math:`M-k` is returned as an array of length N.
 
 
     Returns
     -------
-    autocorrelation : ndarray of shape (N,2)
+    autocorrelation: ndarray of shape (N,2)
         the lag time (1st column) and the autocorrelation (2nd column).
+    count: ndarray of length N
+        only returned if `ret_sum` is True; the value of :math:`M-k`
+        for each row in `autocorrelation`.
+
 
     Notes
     -----
-    .. versionchanged :: 0.1.6
+    .. versionchanged:: 0.1.6
        Compute the correlation for zero lag time.
 
     The algorithm computes the correlation with the convention of the
@@ -89,10 +119,10 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
 
     For experiments like e.g. fluorescence correlation spectroscopy,
     the signal can be normalized to :math:`M-k`
-    by invoking ``normalize = True``.
+    by invoking ``normalize=True``.
 
     For normalizing according to the behavior
-    of :py:func:`numpy.correlate`, use ``normalize = False``.
+    of :py:func:`numpy.correlate`, use ``normalize=False``.
 
     For complex arrays, this method falls back to the method
     :func:`correlate`.
@@ -110,6 +140,13 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
     """
     assert isinstance(copy, bool)
     assert isinstance(normalize, bool)
+    msg = "'normalize' and 'ret_sum' must not both be true"
+    assert not (normalize and ret_sum), msg
+
+    compress_values = ["average", "first", "second"]
+    assert any(compress in s for s in compress_values), \
+        "Unvalid string of compress. Possible values are " + \
+        ','.join(compress_values)
 
     if dtype is None:
         dtype = np.dtype(a[0].__class__)
@@ -127,7 +164,8 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
                          copy=copy,
                          dtype=dtype)
     elif dtype.kind != "f":
-        warnings.warn("Input dtype is not float; casting to np.float_!")
+        warnings.warn("Input dtype is not float; casting to np.float_!",
+                      DtypeWarning)
         dtype = np.dtype(np.float_)
 
     # If copy is false and dtype is the same as the input array,
@@ -139,7 +177,7 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
         mold = m
         m = np.int_((m // 2 + 1) * 2)
         warnings.warn("Invalid value of m={}. Using m={} instead"
-                      .format(mold, m))
+                      .format(mold, m), InvalidMWarning)
     else:
         m = np.int_(m)
 
@@ -184,8 +222,13 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
     # Check if len(trace) is even:
     if N % 2 == 1:
         N -= 1
-    # Add up every second element
-    trace = (trace[:N:2] + trace[1:N:2]) / 2
+    # compress every second element
+    if compress == compress_values[0]:
+        trace = (trace[:N:2] + trace[1:N:2]) / 2
+    elif compress == compress_values[1]:
+        trace = trace[:N:2]
+    elif compress == compress_values[2]:
+        trace = trace[1:N:2]
     N //= 2
     # Start iteration for each m/2 values
     for step in range(1, k + 1):
@@ -226,20 +269,29 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
         # Check if len(trace) is even:
         if N % 2 == 1:
             N -= 1
-        # Add up every second element
-        trace = (trace[:N:2] + trace[1:N:2]) / 2
+        # compress every second element
+        if compress == compress_values[0]:
+            trace = (trace[:N:2] + trace[1:N:2]) / 2
+        elif compress == compress_values[1]:
+            trace = trace[:N:2]
+        elif compress == compress_values[2]:
+            trace = trace[1:N:2]
+
         N //= 2
 
     if normalize:
         G[:, 1] /= traceavg**2 * normstat
-    else:
+    elif not ret_sum:
         G[:, 1] *= N0 / normnump
 
-    return G
+    if ret_sum:
+        return G, normstat
+    else:
+        return G
 
 
-def correlate(a, v, m=16, deltat=1, normalize=False,
-              copy=True, dtype=None):
+def correlate(a, v, m=16, deltat=1, normalize=False, copy=True, dtype=None,
+              compress="average", ret_sum=False):
     """
     Cross-correlation of two 1-dimensional sequences
     on a log2-scale.
@@ -250,7 +302,7 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
 
         :func:`numpy.correlate(a, v, mode="full")[len(a)-1:]`
 
-        :math:`z_k = \Sigma_n a_n v_{n+k}`
+        :math:`z_k = \\Sigma_n a_n v_{n+k}`
 
     Note that only the correlation in the positive direction is
     computed. To obtain the correlation for negative lag times
@@ -258,32 +310,53 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
 
     Parameters
     ----------
-    a, v : array-like
+    a, v: array-like
         input sequences with equal length
-    m : even integer
+    m: even integer
         defines the number of points on one level, must be an
         even integer
-    deltat : float
+    deltat: float
         distance between bins
-    normalize : bool
+    normalize: bool
         normalize the result to the square of the average input
         signal and the factor :math:`M-k`.
-    copy : bool
+    copy: bool
         copy input array, set to ``False`` to save memory
-    dtype : object to be converted to a data type object
+    dtype: object to be converted to a data type object
         The data type of the returned array and of the accumulator
         for the multiple-tau computation.
+    compress: str
+        strategy for propagating values to the next register
+
+        - `"average"` (default): average two measurements when pushing
+          to the next level of the correlator.
+        - `"first"`: use only the first value when pushing to the next
+          level of the correlator.
+        - `"second"`: use only the second value when pushing to the
+          next level of the correlator.
+
+        Using only the first or the second values during propagation
+        completely removes the systematic error at the cost of
+        increasing the statistical error.
+        See https://doi.org/10.1063/1.3491098 for a discussion on the
+        effect of averaging.
+    ret_sum: bool
+        return the exact sum :math:`z_k = \\Sigma_n a_n v_{n+k}`. In addition
+        :math:`M-k` is returned as an array of length N.
 
 
     Returns
     -------
-    cross_correlation : ndarray of shape (N,2)
-        the lag time (column 1) and the cross-correlation (column2).
+    cross_correlation: ndarray of shape (N,2)
+        the lag time (1st column), the cross-correlation (2nd column).
+    count: ndarray of length N
+        only returned if `ret_sum` is True; the value of :math:`M-k`
+        for each row in `autocorrelation`.
 
 
     Notes
     -----
-    .. versionchanged :: 0.1.6
+    .. versionchanged:: 0.1.6
        Compute the correlation for zero lag time and correctly
        normalize the correlation for a complex input sequence `v`.
 
@@ -292,10 +365,10 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
 
     For experiments like e.g. fluorescence correlation spectroscopy,
     the signal can be normalized to :math:`M-k`
-    by invoking ``normalize = True``.
+    by invoking ``normalize=True``.
 
     For normalizing according to the behavior of
-    :py:func:`numpy.correlate`, use ``normalize = False``.
+    :py:func:`numpy.correlate`, use ``normalize=False``.
 
 
     Examples
@@ -311,6 +384,14 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
     """
     assert isinstance(copy, bool)
     assert isinstance(normalize, bool)
+    msg = "'normalize' and 'ret_sum' must not both be true"
+    assert not (normalize and ret_sum), msg
+
+    compress_values = ["average", "first", "second"]
+    assert any(compress in s for s in compress_values), \
+        "Unvalid string of compress. Possible values are " + \
+        ','.join(compress_values)
+
     # See `autocorrelation` for better documented code.
     traceavg1 = np.average(v)
     traceavg2 = np.average(a)
@@ -325,16 +406,19 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
             if dtype.kind == "c" or dtype2.kind == "c":
                 # The user might try to combine complex64 and float128.
                 warnings.warn(
-                    "Input dtypes not equal; casting to np.complex_!")
+                    "Input dtypes not equal; casting to np.complex_!",
+                    InvalidMWarning)
                 dtype = np.dtype(np.complex_)
             else:
-                warnings.warn("Input dtypes not equal; casting to np.float_!")
+                warnings.warn("Input dtypes not equal; casting to np.float_!",
+                              InvalidMWarning)
                 dtype = np.dtype(np.float_)
     else:
         dtype = np.dtype(dtype)
 
     if dtype.kind not in ["c", "f"]:
-        warnings.warn("Input dtype is not float; casting to np.float_!")
+        warnings.warn("Input dtype is not float; casting to np.float_!",
+                      InvalidMWarning)
         dtype = np.dtype(np.float_)
 
     trace1 = np.array(v, dtype=dtype, copy=copy)
@@ -357,7 +441,7 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
         mold = m
         m = np.int_(m // 2 + 1) * 2
         warnings.warn("Invalid value of m={}. Using m={} instead"
-                      .format(mold, m))
+                      .format(mold, m), InvalidMWarning)
     else:
         m = np.int_(m)
 
@@ -394,9 +478,16 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
     # Check if len(trace) is even:
     if N % 2 == 1:
         N -= 1
-    # Add up every second element
-    trace1 = (trace1[:N:2] + trace1[1:N:2]) / 2
-    trace2 = (trace2[:N:2] + trace2[1:N:2]) / 2
+    # compress every second element
+    if compress == compress_values[0]:
+        trace1 = (trace1[:N:2] + trace1[1:N:2]) / 2
+        trace2 = (trace2[:N:2] + trace2[1:N:2]) / 2
+    elif compress == compress_values[1]:
+        trace1 = trace1[:N:2]
+        trace2 = trace2[:N:2]
+    elif compress == compress_values[2]:
+        trace1 = trace1[1:N:2]
+        trace2 = trace2[1:N:2]
     N //= 2
 
     for step in range(1, k + 1):
@@ -420,17 +511,27 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
         # Check if len(trace) is even:
         if N % 2 == 1:
             N -= 1
-        # Add up every second element
-        trace1 = (trace1[:N:2] + trace1[1:N:2]) / 2
-        trace2 = (trace2[:N:2] + trace2[1:N:2]) / 2
+        # compress every second element
+        if compress == compress_values[0]:
+            trace1 = (trace1[:N:2] + trace1[1:N:2]) / 2
+            trace2 = (trace2[:N:2] + trace2[1:N:2]) / 2
+        elif compress == compress_values[1]:
+            trace1 = trace1[:N:2]
+            trace2 = trace2[:N:2]
+        elif compress == compress_values[2]:
+            trace1 = trace1[1:N:2]
+            trace2 = trace2[1:N:2]
         N //= 2
 
     if normalize:
         G[:, 1] /= traceavg1 * traceavg2 * normstat
-    else:
+    elif not ret_sum:
         G[:, 1] *= N0 / normnump
 
-    return G
+    if ret_sum:
+        return G, normstat
+    else:
+        return G
 
 
 def correlate_numpy(a, v, deltat=1, normalize=False,
@@ -442,29 +543,29 @@ def correlate_numpy(a, v, deltat=1, normalize=False,
 
     Parameters
     ----------
-    a, v : array-like
+    a, v: array-like
         input sequences
-    deltat : float
+    deltat: float
         distance between bins
-    normalize : bool
+    normalize: bool
         normalize the result to the square of the average input signal
         and the factor :math:`M-k`. The resulting curve follows
         the convention of decaying to zero for large lag times.
-    copy : bool
+    copy: bool
         copy input array, set to ``False`` to save memory
-    dtype : object to be converted to a data type object
+    dtype: object to be converted to a data type object
         The data type of the returned array.
 
 
     Returns
     -------
-    cross_correlation : ndarray of shape (N,2)
+    cross_correlation: ndarray of shape (N,2)
         the lag time (column 1) and the cross-correlation (column 2).
 
 
     Notes
     -----
-    .. versionchanged :: 0.1.6
+    .. versionchanged:: 0.1.6
        Removed false normalization when `normalize==False`.
     """
     ab = np.array(a, dtype=dtype, copy=copy)


=====================================
setup.cfg
=====================================
@@ -3,3 +3,7 @@ test = pytest
 
 [bdist_wheel]
 universal = 1
+
+[metadata]
+license_file = LICENSE
+


=====================================
setup.py
=====================================
@@ -7,36 +7,36 @@ import sys
 
 author = u"Paul Müller"
 authors = [author]
-description = 'A multiple-tau algorithm for Python/NumPy.'
+description = 'A multiple-tau algorithm for Python/NumPy'
 name = 'multipletau'
-year = "2013"
+year = "2012"
 
 sys.path.insert(0, realpath(dirname(__file__))+"/"+name)
 from _version import version
 
-if __name__ == "__main__":
-    setup(
-        name=name,
-        author=author,
-        author_email='dev at craban.de',
-        url='https://github.com/FCS-analysis/multipletau',
-        version=version,
-        packages=[name],
-        package_dir={name: name},
-        license="BSD (3 clause)",
-        description=description,
-        long_description=open('README.rst').read() if exists('README.rst') else '',
-        install_requires=["numpy >= 1.5.1"],
-        keywords=["multiple tau", "fluorescence correlation spectroscopy"],
-        setup_requires=['pytest-runner'],
-        tests_require=["pytest"],
-        classifiers= [
-            'Operating System :: OS Independent',
-            'Programming Language :: Python :: 2',
-            'Programming Language :: Python :: 3',
-            'Topic :: Scientific/Engineering :: Visualization',
-            'Intended Audience :: Science/Research'
-            ],
-        platforms=['ALL']
-        )
+
+setup(
+    name=name,
+    author=author,
+    author_email='dev at craban.de',
+    url='https://github.com/FCS-analysis/multipletau',
+    version=version,
+    packages=[name],
+    package_dir={name: name},
+    license="BSD (3 clause)",
+    description=description,
+    long_description=open('README.rst').read() if exists('README.rst') else '',
+    install_requires=["numpy >= 1.5.1"],
+    keywords=["multiple tau", "fluorescence correlation spectroscopy"],
+    setup_requires=['pytest-runner'],
+    tests_require=["pytest"],
+    classifiers= [
+        'Operating System :: OS Independent',
+        'Programming Language :: Python :: 2',
+        'Programming Language :: Python :: 3',
+        'Topic :: Scientific/Engineering :: Visualization',
+        'Intended Audience :: Science/Research'
+        ],
+    platforms=['ALL']
+    )
 


=====================================
tests/test_ac_cc.py
=====================================
@@ -1,20 +1,11 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
-""" Tests correlation-autocorrelation identity
-"""
+"""Test correlation-autocorrelation identity"""
 from __future__ import division, print_function
 
-import numpy as np
-import os
-from os.path import abspath, basename, dirname, join, split, exists
-import platform
 import sys
-import warnings
-import zipfile
 
-# Add parent directory to beginning of path variable
-DIR = dirname(abspath(__file__))
-sys.path = [split(DIR)[0]] + sys.path
+import numpy as np
 
 import multipletau
 
@@ -25,13 +16,13 @@ def test_ac_cc_m():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
+
     arrs = get_sample_arrays()
 
     ms = [8, 16, 32, 64, 128]
     a = np.concatenate(arrs)
 
-    res = []    
+    res = []
     for m in ms:
         r = multipletau.autocorrelate(a=a,
                                       m=m,
@@ -42,7 +33,7 @@ def test_ac_cc_m():
         res.append(r)
     res = np.concatenate(res)
 
-    rescc = []    
+    rescc = []
     for m in ms:
         r = multipletau.correlate(a=a, v=a,
                                   m=m,
@@ -52,24 +43,24 @@ def test_ac_cc_m():
                                   dtype=np.float_)
         rescc.append(r)
         # test minimal length of array
-        _r2 = multipletau.correlate(a=a[:2*m], v=a[:2*m],
-                                    m=m,
-                                    deltat=1,
-                                    normalize=False,
-                                    copy=True,
-                                    dtype=np.float_)
-    
+        multipletau.correlate(a=a[:2*m], v=a[:2*m],
+                              m=m,
+                              deltat=1,
+                              normalize=False,
+                              copy=True,
+                              dtype=np.float_)
+
     rescc = np.concatenate(rescc)
-    assert np.all(res==rescc)
+    assert np.all(res == rescc)
 
 
 def test_ac_cc_normalize():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
+
     arrs = get_sample_arrays()
-    
+
     res = []
     for a in arrs:
         r = multipletau.autocorrelate(a=a,
@@ -79,7 +70,7 @@ def test_ac_cc_normalize():
                                       copy=True,
                                       dtype=np.float_)
         res.append(r)
-    
+
     res = np.concatenate(res)
 
     rescc = []
@@ -91,19 +82,19 @@ def test_ac_cc_normalize():
                                   copy=True,
                                   dtype=np.float_)
         rescc.append(r)
-    
+
     rescc = np.concatenate(rescc)
 
-    assert np.all(res==rescc)
+    assert np.all(res == rescc)
 
 
 def test_ac_cc_simple():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
+
     arrs = get_sample_arrays()
-    
+
     rescc = []
     for a in arrs:
         r = multipletau.correlate(a=a, v=a,
@@ -113,7 +104,7 @@ def test_ac_cc_simple():
                                   copy=True,
                                   dtype=np.float_)
         rescc.append(r)
-    
+
     rescc = np.concatenate(rescc)
 
     resac = []
@@ -125,10 +116,10 @@ def test_ac_cc_simple():
                                       copy=True,
                                       dtype=np.float_)
         resac.append(r)
-    
+
     resac = np.concatenate(resac)
-    
-    assert np.all(resac==rescc)
+
+    assert np.all(resac == rescc)
 
 
 if __name__ == "__main__":


=====================================
tests/test_autocorrelate.py
=====================================
@@ -1,20 +1,13 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
-""" Tests autocorrelation algorithm
-"""
+"""Tests autocorrelation algorithm"""
 from __future__ import division, print_function
 
-import numpy as np
 import os
-from os.path import abspath, basename, dirname, join, split, exists
-import platform
 import sys
-import warnings
-import zipfile
 
-# Add parent directory to beginning of path variable
-DIR = dirname(abspath(__file__))
-sys.path = [split(DIR)[0]] + sys.path
+import numpy as np
+import pytest
 
 import multipletau
 
@@ -28,16 +21,16 @@ def get_reference_data(funcname, pyfile):
 def get_sample_arrays():
     a = [-4.3,   1,    9, -99.2, 13]
     b = [9921, 281, 23.5,   5.3, 77]
-    l = [  33,  92,   47,    54, 99]
-    r = [   0,   1,   12,     4,  0] 
-    p = [   1,   4,   .5,     2,  3]
+    ll = [33,  92,   47,    54, 99]
+    r = [0,   1,   12,     4,  0]
+    p = [1,   4,   .5,     2,  3]
     arrs = []
-    
-    for ai, bi, li, ri, pi in zip(a,b,l,r,p): 
-        x = np.linspace(ai,bi,li)
-        arr = (x*np.roll(x,ri))**pi
+
+    for ai, bi, li, ri, pi in zip(a, b, ll, r, p):
+        x = np.linspace(ai, bi, li)
+        arr = (x*np.roll(x, ri))**pi
         arrs.append(arr)
-    
+
     return arrs
 
 
@@ -45,7 +38,7 @@ def test_ac_copy():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
+
     arrs = get_sample_arrays()
 
     res1 = []
@@ -67,7 +60,7 @@ def test_ac_copy():
                                       copy=False,
                                       dtype=np.float_)
         res2.append(r)
-    
+
     # simple test if result is the same
     assert np.all(np.concatenate(res1) == np.concatenate(res2))
 
@@ -78,13 +71,13 @@ def test_ac_copy():
     assert not np.all(arrs == refarrs)
 
 
+ at pytest.mark.filterwarnings("ignore::multipletau.core.DtypeWarning")
 def test_ac_dtype():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
-    a = np.round(get_sample_arrays()[0])
 
+    a = np.round(get_sample_arrays()[0])
 
     # integer
     rf = multipletau.autocorrelate(a=a,
@@ -102,29 +95,33 @@ def test_ac_dtype():
                                    dtype=np.uint)
 
     ri2 = multipletau.autocorrelate(a=np.array(a, dtype=np.uint),
-                                   m=16,
-                                   deltat=1,
-                                   normalize=True,
-                                   copy=True,
-                                   dtype=None)
-    
-    assert ri.dtype == np.dtype(np.float_), "if wrong dtype, dtype should default to np.float_"
-    assert ri2.dtype == np.dtype(np.float_), "if wrong dtype, dtype should default to np.float_"
-    assert np.all(rf == ri), "result should be the same, because input us the same"
-    assert np.all(rf == ri2), "result should be the same, because input us the same"
+                                    m=16,
+                                    deltat=1,
+                                    normalize=True,
+                                    copy=True,
+                                    dtype=None)
+
+    assert ri.dtype == np.dtype(
+        np.float_), "if wrong dtype, dtype should default to np.float_"
+    assert ri2.dtype == np.dtype(
+        np.float_), "if wrong dtype, dtype should default to np.float_"
+    assert np.all(
+        rf == ri), "result should be the same, because input us the same"
+    assert np.all(
+        rf == ri2), "result should be the same, because input us the same"
 
 
 def test_ac_m():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
+
     arrs = get_sample_arrays()
 
     ms = [8, 16, 32, 64, 128]
     a = np.concatenate(arrs)
 
-    res = []    
+    res = []
     for m in ms:
         r = multipletau.autocorrelate(a=a,
                                       m=m,
@@ -135,25 +132,27 @@ def test_ac_m():
         res.append(r)
 
         # test minimal length of array
-        _r2 = multipletau.autocorrelate(a=a[:2*m],
-                                        m=m,
-                                        deltat=1,
-                                        normalize=False,
-                                        copy=True,
-                                        dtype=np.float_)
-    
+        multipletau.autocorrelate(a=a[:2*m],
+                                  m=m,
+                                  deltat=1,
+                                  normalize=False,
+                                  copy=True,
+                                  dtype=np.float_)
+
     res = np.concatenate(res)
-    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
+    # np.save(os.path.dirname(__file__)
+    #         + "/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
     ref = get_reference_data(myname, __file__)
 
     assert np.allclose(res, ref, atol=0, rtol=1e-15)
 
 
+ at pytest.mark.filterwarnings("ignore::multipletau.core.InvalidMWarning")
 def test_ac_m_wrong():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
+
     a = get_sample_arrays()[0]
 
     # integer
@@ -191,19 +190,19 @@ def test_ac_m_wrong():
                                    normalize=True,
                                    copy=True,
                                    dtype=np.float_)
-    assert np.all(r1==r2)
-    assert np.all(r1==r3)
-    assert np.all(r1==r4)
-    assert np.all(r1==r5)
+    assert np.all(r1 == r2)
+    assert np.all(r1 == r3)
+    assert np.all(r1 == r4)
+    assert np.all(r1 == r5)
 
 
 def test_ac_normalize():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
+
     arrs = get_sample_arrays()
-    
+
     res = []
     for a in arrs:
         r = multipletau.autocorrelate(a=a,
@@ -213,9 +212,10 @@ def test_ac_normalize():
                                       copy=True,
                                       dtype=np.float_)
         res.append(r)
-    
+
     res = np.concatenate(res)
-    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
+    # np.save(os.path.dirname(__file__)
+    #         + "/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
     ref = get_reference_data(myname, __file__)
 
     assert np.allclose(res, ref, atol=0, rtol=1e-14)
@@ -225,9 +225,9 @@ def test_ac_simple():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
+
     arrs = get_sample_arrays()
-    
+
     res = []
     for a in arrs:
         r = multipletau.autocorrelate(a=a,
@@ -237,9 +237,10 @@ def test_ac_simple():
                                       copy=True,
                                       dtype=np.float_)
         res.append(r)
-    
+
     res = np.concatenate(res)
-    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
+    # np.save(os.path.dirname(__file__)
+    #         + "/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
     ref = get_reference_data(myname, __file__)
 
     assert np.allclose(res, ref, atol=0, rtol=1e-15)
@@ -251,4 +252,3 @@ if __name__ == "__main__":
     for key in list(loc.keys()):
         if key.startswith("test_") and hasattr(loc[key], "__call__"):
             loc[key]()
-    


=====================================
tests/test_basic.py
=====================================
@@ -1,41 +1,34 @@
 #!/usr/bin/python
 # -*- coding: utf-8 -*-
-"""
-basic tests also available in the function docs 
-"""
+"""basic tests also available in the function docs"""
 import numpy as np
-from os.path import abspath, dirname, join
-import sys
-
-sys.path.insert(0, dirname(dirname(abspath(__file__))))
 
 from multipletau import autocorrelate, correlate
 
 
 def test_ac():
     ist = autocorrelate(range(42), m=2, dtype=np.float_)
-    soll = np.array([[  0.00000000e+00,   2.38210000e+04],
-                     [  1.00000000e+00,   2.29600000e+04],
-                     [  2.00000000e+00,   2.21000000e+04],
-                     [  4.00000000e+00,   2.03775000e+04],
-                     [  8.00000000e+00,   1.50612000e+04]])
+    soll = np.array([[0.00000000e+00,   2.38210000e+04],
+                     [1.00000000e+00,   2.29600000e+04],
+                     [2.00000000e+00,   2.21000000e+04],
+                     [4.00000000e+00,   2.03775000e+04],
+                     [8.00000000e+00,   1.50612000e+04]])
     assert np.allclose(soll, ist)
 
 
 def test_cc():
-    ist = correlate(range(42), range(1,43), m=2, dtype=np.float_)
-    soll = np.array([[  0.00000000e+00,   2.46820000e+04],
-                     [  1.00000000e+00,   2.38210000e+04],
-                     [  2.00000000e+00,   2.29600000e+04],
-                     [  4.00000000e+00,   2.12325000e+04],
-                     [  8.00000000e+00,   1.58508000e+04]])
+    ist = correlate(range(42), range(1, 43), m=2, dtype=np.float_)
+    soll = np.array([[0.00000000e+00,   2.46820000e+04],
+                     [1.00000000e+00,   2.38210000e+04],
+                     [2.00000000e+00,   2.29600000e+04],
+                     [4.00000000e+00,   2.12325000e+04],
+                     [8.00000000e+00,   1.58508000e+04]])
     assert np.allclose(soll, ist)
 
-    
+
 if __name__ == "__main__":
     # Run all tests
     loc = locals()
     for key in list(loc.keys()):
         if key.startswith("test_") and hasattr(loc[key], "__call__"):
             loc[key]()
-    
\ No newline at end of file


=====================================
tests/test_compress.py
=====================================
@@ -0,0 +1,83 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+"""test strategies for propagating values to the next level"""
+import numpy as np
+
+from multipletau import autocorrelate, correlate
+
+
+def test_ac_compress_average():
+    ist = autocorrelate(range(42), m=2, dtype=np.float_, compress="average")
+    soll = np.array([[0.00000000e+00,   2.38210000e+04],
+                     [1.00000000e+00,   2.29600000e+04],
+                     [2.00000000e+00,   2.21000000e+04],
+                     [4.00000000e+00,   2.03775000e+04],
+                     [8.00000000e+00,   1.50612000e+04]])
+    assert np.allclose(soll, ist)
+
+
+def test_cc_compress_average():
+    ist = correlate(range(42), range(1, 43), m=2, dtype=np.float_,
+                    compress="average")
+    soll = np.array([[0.00000000e+00,   2.46820000e+04],
+                     [1.00000000e+00,   2.38210000e+04],
+                     [2.00000000e+00,   2.29600000e+04],
+                     [4.00000000e+00,   2.12325000e+04],
+                     [8.00000000e+00,   1.58508000e+04]])
+    assert np.allclose(soll, ist)
+
+
+def test_ac_compress_first():
+    ist = autocorrelate(range(42), m=2, dtype=np.float_,
+                        compress="first")
+    soll = np.array([[0.00000e+00, 2.38210e+04],
+                     [1.00000e+00, 2.29600e+04],
+                     [2.00000e+00, 2.21000e+04],
+                     [4.00000e+00, 1.96080e+04],
+                     [8.00000e+00, 1.31712e+04]])
+
+    assert np.allclose(soll, ist)
+
+
+def test_cc_compress_first():
+    ist = correlate(range(42), range(1, 43), m=2, dtype=np.float_,
+                    compress="first")
+    soll = np.array([[0.00000e+00, 2.46820e+04],
+                     [1.00000e+00, 2.38210e+04],
+                     [2.00000e+00, 2.29600e+04],
+                     [4.00000e+00, 2.04440e+04],
+                     [8.00000e+00, 1.39104e+04]])
+
+    assert np.allclose(soll, ist)
+
+
+def test_ac_compress_second():
+    ist = autocorrelate(range(42), m=2, dtype=np.float_,
+                        compress="second")
+    soll = np.array([[0.00000e+00, 2.38210e+04],
+                     [1.00000e+00, 2.29600e+04],
+                     [2.00000e+00, 2.21000e+04],
+                     [4.00000e+00, 2.11660e+04],
+                     [8.00000e+00, 1.71024e+04]])
+
+    assert np.allclose(soll, ist)
+
+
+def test_cc_compress_second():
+    ist = correlate(range(42), range(1, 43), m=2, dtype=np.float_,
+                    compress="second")
+    soll = np.array([[0.00000e+00, 2.46820e+04],
+                     [1.00000e+00, 2.38210e+04],
+                     [2.00000e+00, 2.29600e+04],
+                     [4.00000e+00, 2.20400e+04],
+                     [8.00000e+00, 1.79424e+04]])
+
+    assert np.allclose(soll, ist)
+
+
+if __name__ == "__main__":
+    # Run all tests
+    loc = locals()
+    for key in list(loc.keys()):
+        if key.startswith("test_") and hasattr(loc[key], "__call__"):
+            loc[key]()


=====================================
tests/test_correlate.py
=====================================
@@ -1,21 +1,12 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
-""" Tests correlation algorithm
-"""
+"""Tests correlation algorithm"""
 from __future__ import division, print_function
 
-import numpy as np
-import os
-from os.path import abspath, basename, dirname, join, split, exists
-import platform
 import sys
-import warnings
-import zipfile
-
 
-# Add parent directory to beginning of path variable
-DIR = dirname(abspath(__file__))
-sys.path = [split(DIR)[0]] + sys.path
+import numpy as np
+import pytest
 
 import multipletau
 
@@ -25,19 +16,19 @@ from test_autocorrelate import get_reference_data
 def get_sample_arrays_cplx():
     a = [-4.3,   1,    9, -99.2, 13]
     b = [9921, 281, 23.5,   5.3, 77]
-    c = [  12,   0,  2.1,   1.3, 33]
-    d = [  32,  .1,   -2,   6.3, 88]
-    l = [  33,  92,   47,    54, 99]
-    r = [   0,   1,   12,     4,  0] 
-    p = [   1,   4,   .5,     2,  3]
+    c = [12,   0,  2.1,   1.3, 33]
+    d = [32,  .1,   -2,   6.3, 88]
+    ll = [33,  92,   47,    54, 99]
+    r = [0,   1,   12,     4,  0]
+    p = [1,   4,   .5,     2,  3]
     arrs = []
-    
-    for ai, bi, ci, di, li, ri, pi in zip(a,b,c,d,l,r,p): 
-        x = np.linspace(ai,bi,li)
-        y = np.linspace(ci,di,li)
-        arr = (x*np.roll(x,ri))**pi + 1j*y
+
+    for ai, bi, ci, di, li, ri, pi in zip(a, b, c, d, ll, r, p):
+        x = np.linspace(ai, bi, li)
+        y = np.linspace(ci, di, li)
+        arr = (x*np.roll(x, ri))**pi + 1j*y
         arrs.append(arr)
-    
+
     return arrs
 
 
@@ -45,7 +36,7 @@ def test_cc_copy():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
+
     arrs = get_sample_arrays_cplx()
 
     res1 = []
@@ -67,7 +58,7 @@ def test_cc_copy():
                                   normalize=True,
                                   copy=False)
         res2.append(r)
-    
+
     # simple test if result is the same
     assert np.all(np.concatenate(res1) == np.concatenate(res2))
 
@@ -78,13 +69,13 @@ def test_cc_copy():
     assert not np.all(arrs == refarrs)
 
 
+ at pytest.mark.filterwarnings("ignore::multipletau.core.InvalidMWarning")
 def test_cc_dtype():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
-    a = np.round(get_sample_arrays_cplx()[0].real)
 
+    a = np.round(get_sample_arrays_cplx()[0].real)
 
     # integer
     rf = multipletau.correlate(a=a,
@@ -110,21 +101,25 @@ def test_cc_dtype():
                                 normalize=True,
                                 copy=True,
                                 dtype=None)
-    
-    assert ri.dtype == np.dtype(np.float_), "if wrong dtype, dtype should default to np.float_"
-    assert ri2.dtype == np.dtype(np.float_), "if wrong dtype, dtype should default to np.float_"
-    assert np.all(rf == ri), "result should be the same, because input us the same"
-    assert np.all(rf == ri2), "result should be the same, because input us the same"
+
+    assert ri.dtype == np.dtype(
+        np.float_), "if wrong dtype, dtype should default to np.float_"
+    assert ri2.dtype == np.dtype(
+        np.float_), "if wrong dtype, dtype should default to np.float_"
+    assert np.all(
+        rf == ri), "result should be the same, because input us the same"
+    assert np.all(
+        rf == ri2), "result should be the same, because input us the same"
 
 
+ at pytest.mark.filterwarnings("ignore::multipletau.core.InvalidMWarning")
 def test_cc_dtype2():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
+
     a = np.round(get_sample_arrays_cplx()[0])
 
-    print("this should issue a warning of unequal input dtypes, casting to complex")
     rf = multipletau.correlate(a=a.real,
                                v=a,
                                m=16,
@@ -133,13 +128,12 @@ def test_cc_dtype2():
                                copy=True)
     assert np.dtype(rf.dtype) == np.dtype(np.complex_)
 
-    print("this should issue a warning of unequal input dtypes, casting to float")
     rf2 = multipletau.correlate(a=a.real,
-                               v=np.array(a.imag, dtype=np.int_),
-                               m=16,
-                               deltat=1,
-                               normalize=True,
-                               copy=True)
+                                v=np.array(a.imag, dtype=np.int_),
+                                m=16,
+                                deltat=1,
+                                normalize=True,
+                                copy=True)
     assert np.dtype(rf2.dtype) == np.dtype(np.float_)
 
 
@@ -147,13 +141,13 @@ def test_cc_m():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
+
     arrs = get_sample_arrays_cplx()
 
     ms = [4, 8, 10, 16, 20, 64, 128]
     a = np.concatenate(arrs)
 
-    res = []    
+    res = []
     for m in ms:
         r = multipletau.correlate(a=a,
                                   v=a,
@@ -165,26 +159,28 @@ def test_cc_m():
         res.append(r)
 
         # test minimal length of array
-        _r2 = multipletau.correlate(a=a[:2*m],
-                                    v=a[:2*m],
-                                    m=m,
-                                    deltat=1,
-                                    normalize=False,
-                                    copy=True,
-                                    dtype=np.complex_)
-    
+        multipletau.correlate(a=a[:2*m],
+                              v=a[:2*m],
+                              m=m,
+                              deltat=1,
+                              normalize=False,
+                              copy=True,
+                              dtype=np.complex_)
+
     res = np.concatenate(res)
-    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
+    # np.save(os.path.dirname(__file__)
+    #         + "/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
     ref = get_reference_data(myname, __file__)
 
     assert np.allclose(res, ref, atol=0, rtol=1e-15)
 
 
+ at pytest.mark.filterwarnings("ignore::multipletau.core.InvalidMWarning")
 def test_cc_m_wrong():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
+
     a = get_sample_arrays_cplx()[0]
 
     # integer
@@ -223,19 +219,19 @@ def test_cc_m_wrong():
                                normalize=True,
                                copy=True)
 
-    assert np.all(r1==r2)
-    assert np.all(r1==r3)
-    assert np.all(r1==r4)
-    assert np.all(r1==r5)
+    assert np.all(r1 == r2)
+    assert np.all(r1 == r3)
+    assert np.all(r1 == r4)
+    assert np.all(r1 == r5)
 
 
 def test_cc_normalize():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
+
     arrs = get_sample_arrays_cplx()
-    
+
     res = []
     for a in arrs:
         r = multipletau.correlate(a=a.real,
@@ -247,7 +243,8 @@ def test_cc_normalize():
                                   dtype=np.float_)
         res.append(r)
     res = np.concatenate(res)
-    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
+    # np.save(os.path.dirname(__file__)
+    #         + "/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
     ref = get_reference_data(myname, __file__)
 
     assert np.allclose(res, ref, atol=0, rtol=1e-14)
@@ -257,9 +254,9 @@ def test_cc_simple():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
+
     arrs = get_sample_arrays_cplx()
-    
+
     res = []
     for a in arrs:
         r = multipletau.correlate(a=a,
@@ -272,7 +269,8 @@ def test_cc_simple():
         res.append(r)
     res = np.concatenate(res)
 
-    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
+    # np.save(os.path.dirname(__file__)
+    #         + "/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
     ref = get_reference_data(myname, __file__)
 
     assert np.allclose(res, ref, atol=0, rtol=1e-15)
@@ -298,4 +296,3 @@ if __name__ == "__main__":
     for key in list(loc.keys()):
         if key.startswith("test_") and hasattr(loc[key], "__call__"):
             loc[key]()
-    


=====================================
tests/test_ref_numpy.py
=====================================
@@ -1,13 +1,9 @@
 #!/usr/bin/python
 # -*- coding: utf-8 -*-
-"""
-Compare to numpy data.
-"""
-import numpy as np
-from os.path import abspath, dirname, join
+"""Compare to numpy data"""
 import sys
 
-sys.path.insert(0, dirname(dirname(abspath(__file__))))
+import numpy as np
 
 import multipletau
 
@@ -18,9 +14,9 @@ def test_corresponds_ac():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
+
     a = np.concatenate(get_sample_arrays_cplx()).real
-    m=16
+    m = 16
 
     restau = multipletau.autocorrelate(a=1*a,
                                        m=m,
@@ -34,25 +30,25 @@ def test_corresponds_ac():
                                          normalize=True,
                                          dtype=np.float_)
 
-    idx = np.array(restau[:,0].real, dtype=int)[:m]
+    idx = np.array(restau[:, 0].real, dtype=int)[:m]
 
-    assert np.allclose(reslin[idx, 1], restau[:m,1])
+    assert np.allclose(reslin[idx, 1], restau[:m, 1])
 
 
 def test_corresponds_ac_first_loop():
     """
     numpy correlation:
     G_m = sum_i(a_i*a_{i+m})
-    
+
     multipletau correlation 2nd order:
     b_j = (a_{2i} + a_{2i+1} / 2)
     G_m = sum_j(b_j*b_{j+1})
         = 1/4*sum_i(a_{2i}   * a_{2i+m}   +
                     a_{2i}   * a_{2i+m+1} +
-                    a_{2i+1} * a_{2i+m}   +   
+                    a_{2i+1} * a_{2i+m}   +
                     a_{2i+1} * a_{2i+m+1}
                     )
-    
+
     The values after the first m+1 lag times in the multipletau
     correlation differ from the normal correlation, because the
     traces are averaged over two consecutive items, effectively
@@ -64,33 +60,33 @@ def test_corresponds_ac_first_loop():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
-    a = [ arr / np.average(arr) for arr in get_sample_arrays_cplx() ]
+
+    a = [arr / np.average(arr) for arr in get_sample_arrays_cplx()]
     a = np.concatenate(a)[:222]
     # two consecutive elements are the same, so the multiple-tau method
     # corresponds to the numpy correlation for the first loop.
     a[::2] = a[1::2]
-    
-    for m in [2,4,6,8,10,12,14,16]:
+
+    for m in [2, 4, 6, 8, 10, 12, 14, 16]:
         restau = multipletau.correlate(a=a,
                                        v=a.imag+1j*a.real,
                                        m=m,
                                        copy=True,
                                        normalize=False,
                                        dtype=np.complex_)
-        
+
         reslin = multipletau.correlate_numpy(a=a,
                                              v=a.imag+1j*a.real,
                                              copy=True,
                                              normalize=False,
                                              dtype=np.complex_)
-        
-        idtau = np.where(restau[:,0]==m+2)[0][0]
-        tau3 = restau[idtau, 1] #m+1 initial bins
-    
-        idref = np.where(reslin[:,0]==m+2)[0][0]
+
+        idtau = np.where(restau[:, 0] == m+2)[0][0]
+        tau3 = restau[idtau, 1]  # m+1 initial bins
+
+        idref = np.where(reslin[:, 0] == m+2)[0][0]
         tau3ref = reslin[idref, 1]
-        
+
         assert np.allclose(tau3, tau3ref)
 
 
@@ -98,9 +94,9 @@ def test_corresponds_ac_nonormalize():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
+
     a = np.concatenate(get_sample_arrays_cplx()).real
-    m=16
+    m = 16
 
     restau = multipletau.autocorrelate(a=1*a,
                                        m=m,
@@ -114,18 +110,18 @@ def test_corresponds_ac_nonormalize():
                                          normalize=False,
                                          dtype=np.float_)
 
-    idx = np.array(restau[:,0].real, dtype=int)[:m+1]
+    idx = np.array(restau[:, 0].real, dtype=int)[:m+1]
 
-    assert np.allclose(reslin[idx, 1], restau[:m+1,1])
+    assert np.allclose(reslin[idx, 1], restau[:m+1, 1])
 
 
 def test_corresponds_cc():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
+
     a = np.concatenate(get_sample_arrays_cplx())
-    m=16
+    m = 16
 
     restau = multipletau.correlate(a=a,
                                    v=a.imag+1j*a.real,
@@ -140,18 +136,18 @@ def test_corresponds_cc():
                                          normalize=True,
                                          dtype=np.complex_)
 
-    idx = np.array(restau[:,0].real, dtype=int)[:m+1]
+    idx = np.array(restau[:, 0].real, dtype=int)[:m+1]
 
-    assert np.allclose(reslin[idx, 1], restau[:m+1,1])
+    assert np.allclose(reslin[idx, 1], restau[:m+1, 1])
 
 
 def test_corresponds_cc_nonormalize():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
-    
+
     a = np.concatenate(get_sample_arrays_cplx())
-    m=16
+    m = 16
 
     restau = multipletau.correlate(a=a,
                                    v=a.imag+1j*a.real,
@@ -166,10 +162,10 @@ def test_corresponds_cc_nonormalize():
                                          normalize=False,
                                          dtype=np.complex_)
 
-    idx = np.array(restau[:,0].real, dtype=int)[:m+1]
+    idx = np.array(restau[:, 0].real, dtype=int)[:m+1]
+
+    assert np.allclose(reslin[idx, 1], restau[:m+1, 1])
 
-    assert np.allclose(reslin[idx, 1], restau[:m+1,1])
-    
 
 if __name__ == "__main__":
     # Run all tests


=====================================
tests/test_ret_sum.py
=====================================
@@ -0,0 +1,40 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+"""test returning exact sum plus normalization factor"""
+import numpy as np
+
+from multipletau import autocorrelate, correlate
+
+
+def test_ac_return_sum():
+    ist, ist_count = autocorrelate(range(42), m=2, dtype=np.float_,
+                                   ret_sum=True)
+    soll = np.array([[0.000000e+00, 2.382100e+04],
+                     [1.000000e+00, 2.296000e+04],
+                     [2.000000e+00, 2.210000e+04],
+                     [4.000000e+00, 1.018875e+04],
+                     [8.000000e+00, 3.586000e+03]])
+    soll_count = [42., 41., 40., 19.,  8.]
+    assert np.allclose(soll, ist)
+    assert np.allclose(soll_count, ist_count)
+
+
+def test_cc_compress_average():
+    ist, ist_count = correlate(range(42), range(1, 43), m=2, dtype=np.float_,
+                               ret_sum=True)
+    soll = np.array([[0.000000e+00, 2.468200e+04],
+                     [1.000000e+00, 2.382100e+04],
+                     [2.000000e+00, 2.296000e+04],
+                     [4.000000e+00, 1.061625e+04],
+                     [8.000000e+00, 3.774000e+03]])
+    soll_count = [42., 41., 40., 19.,  8.]
+    assert np.allclose(soll, ist)
+    assert np.allclose(soll_count, ist_count)
+
+
+if __name__ == "__main__":
+    # Run all tests
+    loc = locals()
+    for key in list(loc.keys()):
+        if key.startswith("test_") and hasattr(loc[key], "__call__"):
+            loc[key]()



View it on GitLab: https://salsa.debian.org/med-team/python-multipletau/commit/b1ffc39320af84c7b546516703c44a4f4dc22582

-- 
View it on GitLab: https://salsa.debian.org/med-team/python-multipletau/commit/b1ffc39320af84c7b546516703c44a4f4dc22582
You're receiving this email because of your account on salsa.debian.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/debian-med-commit/attachments/20181101/b8a969fe/attachment-0001.html>


More information about the debian-med-commit mailing list