[med-svn] [python-multipletau] 01/03: New upstream version 0.1.7+ds

Alex Mestiashvili malex-guest at moszumanska.debian.org
Wed Apr 26 14:13:07 UTC 2017


This is an automated email from the git hooks/post-receive script.

malex-guest pushed a commit to branch master
in repository python-multipletau.

commit 55955562f7263e601402f60cd2ed60c3b4b76785
Author: Alexandre Mestiashvili <alex at biotec.tu-dresden.de>
Date:   Wed Apr 26 14:59:13 2017 +0200

    New upstream version 0.1.7+ds
---
 CHANGELOG                                          |   6 +
 MANIFEST.in                                        |  11 +-
 multipletau/__init__.py                            |  18 +-
 multipletau/_multipletau.py                        | 114 ++++++-----
 multipletau/_version.py                            |  65 +++---
 tests/data/test_correlate.py_test_cc_m.npy         | Bin 16400 -> 16400 bytes
 tests/data/test_correlate.py_test_cc_normalize.npy | Bin 2304 -> 2304 bytes
 tests/data/test_correlate.py_test_cc_simple.npy    | Bin 4528 -> 4528 bytes
 tests/test_ac_cc.py                                | 100 ++++-----
 tests/test_autocorrelate.py                        | 188 ++++++++---------
 tests/test_basic.py                                |   4 +-
 tests/test_correlate.py                            | 225 +++++++++++----------
 tests/test_ref_numpy.py                            | 127 ++++++------
 13 files changed, 431 insertions(+), 427 deletions(-)

diff --git a/CHANGELOG b/CHANGELOG
index 78f92dc..aa65120 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,9 @@
+0.1.7
+- code cleanup with pep8 and autopep8 
+- always use numpy dtypes
+- fix tests:
+    - take into account floating inaccuracies
+    - support i386 numpy dtypes
 0.1.6
 - also compute correlation for zero lag time (`G(tau==0)`)
 - support NumPy 1.11
diff --git a/MANIFEST.in b/MANIFEST.in
index 73c6c95..6efe489 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,11 +1,6 @@
 include CHANGELOG
 include LICENSE
 include README.rst
-include tests/*.py
-include tests/*.md
-include tests/data/*.npy
-include examples/*.py
-include doc/*.py
-include doc/*.rst
-include doc/*.md
-include doc/extensions/*.py
+recursive-include examples *.py
+recursive-include doc *.py *.md *.rst
+recursive-include tests *.py *.md test_*.npy
diff --git a/multipletau/__init__.py b/multipletau/__init__.py
index 7c31569..31efe66 100644
--- a/multipletau/__init__.py
+++ b/multipletau/__init__.py
@@ -1,27 +1,27 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
-u""" 
-This package provides a multiple-τ algorithm for Python 2.7 and 
+u"""
+This package provides a multiple-τ algorithm for Python 2.7 and
 Python 3.x and requires the package :py:mod:`numpy`.
 
 Multipe-τ correlation is computed on a logarithmic scale (less
 data points are computed) and is thus much faster than conventional
 correlation on a linear scale such as  :py:func:`numpy.correlate`.
 
-Recommended literature 
+Recommended literature
 ----------------------
 
 - Klaus Schaetzel and Rainer Peters; *Noise on multiple-tau photon
   correlation data*. Proc. SPIE 1430, Photon Correlation
-  Spectroscopy: Multicomponent Systems, 109 (June 1, 1991);    
+  Spectroscopy: Multicomponent Systems, 109 (June 1, 1991);
   http://doi.org/10.1117/12.44160
-  
-- Thorsten Wohland, Rudolf Rigler, and Horst Vogel; *The Standard 
+
+- Thorsten Wohland, Rudolf Rigler, and Horst Vogel; *The Standard
   Deviation in Fluorescence Correlation Spectroscopy*. Biophysical
-  Journal, 80 (June 1, 2001);  
+  Journal, 80 (June 1, 2001);
   http://dx.doi.org/10.1016/S0006-3495(01)76264-9
 
-Obtaining multipletau 
+Obtaining multipletau
 ---------------------
 If you have Python and :py:mod:`numpy` installed, simply run
 
@@ -34,7 +34,7 @@ https://github.com/FCS-analysis/multipletau.
 Citing multipletau
 ------------------
 The multipletau package should be cited like this (replace "x.x.x"
-with the actual version of multipletau that you used and "DD Month YYYY"
+with the actual version of multipletau used and "DD Month YYYY"
 with a matching date).
 
 .. topic:: cite
diff --git a/multipletau/_multipletau.py b/multipletau/_multipletau.py
index aa592a5..1e1b74f 100755
--- a/multipletau/_multipletau.py
+++ b/multipletau/_multipletau.py
@@ -1,6 +1,6 @@
 #!/usr/bin/python
 # -*- coding: utf-8 -*-
-""" 
+"""
 A multiple-τ algorithm for Python 2.7 and 3.x.
 
 Copyright (c) 2014 Paul Müller
@@ -11,7 +11,7 @@ met:
 
   1. Redistributions of source code must retain the above copyright
      notice, this list of conditions and the following disclaimer.
-   
+
   2. Redistributions in binary form must reproduce the above copyright
      notice, this list of conditions and the following disclaimer in
      the documentation and/or other materials provided with the
@@ -43,15 +43,15 @@ __all__ = ["autocorrelate", "correlate", "correlate_numpy"]
 
 def autocorrelate(a, m=16, deltat=1, normalize=False,
                   copy=True, dtype=None):
-    """ 
+    """
     Autocorrelation of a 1-dimensional sequence on a log2-scale.
 
-    This computes the correlation similar to 
+    This computes the correlation similar to
     :py:func:`numpy.correlate` for positive :math:`k` on a base 2
     logarithmic scale.
 
         :func:`numpy.correlate(a, a, mode="full")[len(a)-1:]`
-    
+
         :math:`z_k = \Sigma_n a_n a_{n+k}`
 
 
@@ -88,10 +88,11 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
     curve decaying to zero.
 
     For experiments like e.g. fluorescence correlation spectroscopy,
-    the signal can be normalized to :math:`M-k` by invoking ``normalize = True``.           
+    the signal can be normalized to :math:`M-k`
+    by invoking ``normalize = True``.
 
-    For normalizing according to the behavior of :py:func:`numpy.correlate`,
-    use ``normalize = False``.
+    For normalizing according to the behavior
+    of :py:func:`numpy.correlate`, use ``normalize = False``.
 
     For complex arrays, this method falls back to the method
     :func:`correlate`.
@@ -100,7 +101,7 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
     Examples
     --------
     >>> from multipletau import autocorrelate
-    >>> autocorrelate(range(42), m=2, dtype=np.float)
+    >>> autocorrelate(range(42), m=2, dtype=np.float_)
     array([[  0.00000000e+00,   2.38210000e+04],
            [  1.00000000e+00,   2.29600000e+04],
            [  2.00000000e+00,   2.21000000e+04],
@@ -126,21 +127,21 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
                          copy=copy,
                          dtype=dtype)
     elif dtype.kind != "f":
-        warnings.warn("Input dtype is not float; casting to np.float!")
-        dtype = np.dtype(np.float)
+        warnings.warn("Input dtype is not float; casting to np.float_!")
+        dtype = np.dtype(np.float_)
 
     # If copy is false and dtype is the same as the input array,
     # then this line does not have an effect:
     trace = np.array(a, dtype=dtype, copy=copy)
 
     # Check parameters
-    if m//2 != m/2:
+    if m // 2 != m / 2:
         mold = m
-        m = np.int((m//2 + 1) * 2)
+        m = np.int_((m // 2 + 1) * 2)
         warnings.warn("Invalid value of m={}. Using m={} instead"
                       .format(mold, m))
     else:
-        m = np.int(m)
+        m = np.int_(m)
 
     N = N0 = trace.shape[0]
 
@@ -148,12 +149,12 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
     # The integer k defines how many times we can average over
     # two neighboring array elements in order to obtain an array of
     # length just larger than m.
-    k = np.int(np.floor(np.log2(N/m)))
+    k = np.int_(np.floor(np.log2(N / m)))
 
     # In the base2 multiple-tau scheme, the length of the correlation
     # array is (only taking into account values that are computed from
     # traces that are just larger than m):
-    lenG = m + k*(m//2) + 1
+    lenG = m + k * (m // 2) + 1
 
     G = np.zeros((lenG, 2), dtype=dtype)
 
@@ -166,13 +167,13 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
     if normalize:
         trace -= traceavg
         assert traceavg != 0, "Cannot normalize: Average of `a` is zero!"
-    
+
     # Otherwise the following for-loop will fail:
-    assert N >= 2*m, "len(a) must be larger than 2m!"
+    assert N >= 2 * m, "len(a) must be larger than 2m!"
 
     # Calculate autocorrelation function for first m+1 bins
     # Discrete convolution of m elements
-    for n in range(0, m+1):
+    for n in range(0, m + 1):
         G[n, 0] = deltat * n
         # This is the computationally intensive step
         G[n, 1] = np.sum(trace[:N - n] * trace[n:])
@@ -181,7 +182,7 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
     # Now that we calculated the first m elements of G, let us
     # go on with the next m/2 elements.
     # Check if len(trace) is even:
-    if N%2 == 1:
+    if N % 2 == 1:
         N -= 1
     # Add up every second element
     trace = (trace[:N:2] + trace[1:N:2]) / 2
@@ -189,9 +190,9 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
     # Start iteration for each m/2 values
     for step in range(1, k + 1):
         # Get the next m/2 values via correlation of the trace
-        for n in range(1, m//2 + 1):
-            npmd2 = n + m//2
-            idx = m + n + (step - 1) * m//2
+        for n in range(1, m // 2 + 1):
+            npmd2 = n + m // 2
+            idx = m + n + (step - 1) * m // 2
             if len(trace[:N - npmd2]) == 0:
                 # This is a shortcut that stops the iteration once the
                 # length of the trace is too small to compute a corre-
@@ -223,7 +224,7 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
                 normstat[idx] = N - npmd2
                 normnump[idx] = N
         # Check if len(trace) is even:
-        if N%2 == 1:
+        if N % 2 == 1:
             N -= 1
         # Add up every second element
         trace = (trace[:N:2] + trace[1:N:2]) / 2
@@ -239,7 +240,7 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
 
 def correlate(a, v, m=16, deltat=1, normalize=False,
               copy=True, dtype=None):
-    """ 
+    """
     Cross-correlation of two 1-dimensional sequences
     on a log2-scale.
 
@@ -251,9 +252,9 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
 
         :math:`z_k = \Sigma_n a_n v_{n+k}`
 
-    Note that only the correlation in the positive direction is computed.
-    To obtain the correlation for negative lag times swap the input variables
-    ``a`` and ``v``.
+    Note that only the correlation in the positive direction is
+    computed. To obtain the correlation for negative lag times
+    swap the input variables ``a`` and ``v``.
 
     Parameters
     ----------
@@ -277,29 +278,30 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
     Returns
     -------
     cross_correlation : ndarray of shape (N,2)
-        the lag time (1st column) and the cross-correlation (2nd column).
+        the lag time (column 1) and the cross-correlation (column2).
 
 
     Notes
     -----
     .. versionchanged :: 0.1.6
-       Compute the correlation for zero lag time and correctly normalize
-       the correlation for a complex input sequence `v`.
+       Compute the correlation for zero lag time and correctly
+       normalize the correlation for a complex input sequence `v`.
 
     The algorithm computes the correlation with the convention of the
     curve decaying to zero.
 
     For experiments like e.g. fluorescence correlation spectroscopy,
-    the signal can be normalized to :math:`M-k` by invoking ``normalize = True``.           
+    the signal can be normalized to :math:`M-k`
+    by invoking ``normalize = True``.
 
-    For normalizing according to the behavior of :py:func:`numpy.correlate`,
-    use ``normalize = False``.
+    For normalizing according to the behavior of
+    :py:func:`numpy.correlate`, use ``normalize = False``.
 
 
     Examples
     --------
     >>> from multipletau import correlate
-    >>> correlate(range(42), range(1,43), m=2, dtype=np.float)
+    >>> correlate(range(42), range(1,43), m=2, dtype=np.float_)
     array([[  0.00000000e+00,   2.46820000e+04],
            [  1.00000000e+00,   2.38210000e+04],
            [  2.00000000e+00,   2.29600000e+04],
@@ -322,17 +324,18 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
         if dtype != dtype2:
             if dtype.kind == "c" or dtype2.kind == "c":
                 # The user might try to combine complex64 and float128.
-                warnings.warn("Input dtypes not equal; casting to np.complex!")
-                dtype = np.dtype(np.complex)
+                warnings.warn(
+                    "Input dtypes not equal; casting to np.complex_!")
+                dtype = np.dtype(np.complex_)
             else:
-                warnings.warn("Input dtypes not equal; casting to np.float!")
-                dtype = np.dtype(np.float)
+                warnings.warn("Input dtypes not equal; casting to np.float_!")
+                dtype = np.dtype(np.float_)
     else:
         dtype = np.dtype(dtype)
 
-    if not dtype.kind in ["c", "f"]:
-        warnings.warn("Input dtype is not float; casting to np.float!")
-        dtype = np.dtype(np.float)
+    if dtype.kind not in ["c", "f"]:
+        warnings.warn("Input dtype is not float; casting to np.float_!")
+        dtype = np.dtype(np.float_)
 
     trace1 = np.array(v, dtype=dtype, copy=copy)
 
@@ -350,26 +353,25 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
         np.conjugate(trace1, out=trace1)
 
     # Check parameters
-    if m//2 != m/2:
+    if m // 2 != m / 2:
         mold = m
-        m = np.int(m//2 + 1) * 2
+        m = np.int_(m // 2 + 1) * 2
         warnings.warn("Invalid value of m={}. Using m={} instead"
                       .format(mold, m))
     else:
-        m = np.int(m)
-
+        m = np.int_(m)
 
     N = N0 = trace1.shape[0]
     # Find out the length of the correlation function.
     # The integer k defines how many times we can average over
     # two neighboring array elements in order to obtain an array of
     # length just larger than m.
-    k = np.int(np.floor(np.log2(N/m)))
+    k = np.int_(np.floor(np.log2(N / m)))
 
     # In the base2 multiple-tau scheme, the length of the correlation
     # array is (only taking into account values that are computed from
     # traces that are just larger than m):
-    lenG = m + k * m//2 + 1
+    lenG = m + k * m // 2 + 1
 
     G = np.zeros((lenG, 2), dtype=dtype)
     normstat = np.zeros(lenG, dtype=dtype)
@@ -381,7 +383,7 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
         trace2 -= traceavg2
 
     # Otherwise the following for-loop will fail:
-    assert N >= 2*m, "len(a) must be larger than 2m!"
+    assert N >= 2 * m, "len(a) must be larger than 2m!"
 
     # Calculate autocorrelation function for first m+1 bins
     for n in range(0, m + 1):
@@ -390,7 +392,7 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
         normstat[n] = N - n
         normnump[n] = N
     # Check if len(trace) is even:
-    if N%2 == 1:
+    if N % 2 == 1:
         N -= 1
     # Add up every second element
     trace1 = (trace1[:N:2] + trace1[1:N:2]) / 2
@@ -399,9 +401,9 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
 
     for step in range(1, k + 1):
         # Get the next m/2 values of the trace
-        for n in range(1, m//2 + 1):
-            npmd2 = (n + m//2)
-            idx = m + n + (step - 1) * m//2
+        for n in range(1, m // 2 + 1):
+            npmd2 = (n + m // 2)
+            idx = m + n + (step - 1) * m // 2
             if len(trace1[:N - npmd2]) == 0:
                 # Abort
                 G = G[:idx - 1]
@@ -416,7 +418,7 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
                 normnump[idx] = N
 
         # Check if len(trace) is even:
-        if N%2 == 1:
+        if N % 2 == 1:
             N -= 1
         # Add up every second element
         trace1 = (trace1[:N:2] + trace1[1:N:2]) / 2
@@ -457,7 +459,7 @@ def correlate_numpy(a, v, deltat=1, normalize=False,
     Returns
     -------
     cross_correlation : ndarray of shape (N,2)
-        the lag time (1st column) and the cross-correlation (2nd column).
+        the lag time (column 1) and the cross-correlation (column 2).
 
 
     Notes
@@ -485,7 +487,7 @@ def correlate_numpy(a, v, deltat=1, normalize=False,
         N = len(Gd)
         m = N - np.arange(N)
         Gd /= m * avg * vvg
-    
+
     G = np.zeros((len(Gd), 2), dtype=dtype)
     G[:, 1] = Gd
     G[:, 0] = np.arange(len(Gd)) * deltat
diff --git a/multipletau/_version.py b/multipletau/_version.py
index 83777cb..68151fc 100644
--- a/multipletau/_version.py
+++ b/multipletau/_version.py
@@ -12,7 +12,7 @@ from __future__ import print_function
 
 # Put the entire script into a `True` statement and add the hint
 # `pragma: no cover` to ignore code coverage here.
-if True: # pragma: no cover
+if True:  # pragma: no cover
     import imp
     import os
     from os.path import join, abspath, dirname
@@ -31,8 +31,8 @@ if True: # pragma: no cover
         """
         # make sure we are in a directory that belongs to the correct
         # repository.
-        ourdir  = dirname(abspath(__file__))
-        
+        ourdir = dirname(abspath(__file__))
+
         def _minimal_ext_cmd(cmd):
             # construct minimal environment
             env = {}
@@ -44,25 +44,25 @@ if True: # pragma: no cover
             env['LANGUAGE'] = 'C'
             env['LANG'] = 'C'
             env['LC_ALL'] = 'C'
-            out = subprocess.Popen(cmd, stdout = subprocess.PIPE, env=env).communicate()[0]
+            cmd = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env)
+            out = cmd.communicate()[0]
             return out
-    
+
         # change directory
         olddir = abspath(os.curdir)
         os.chdir(ourdir)
-        
+
         try:
             out = _minimal_ext_cmd(['git', 'describe', '--tags', 'HEAD'])
             GIT_REVISION = out.strip().decode('ascii')
         except OSError:
             GIT_REVISION = ""
-            
+
         # go back to original directory
         os.chdir(olddir)
-        
+
         return GIT_REVISION
-    
-    
+
     def load_version(versionfile):
         """ load version from version_save.py
         """
@@ -70,63 +70,64 @@ if True: # pragma: no cover
         try:
             _version_save = imp.load_source("_version_save", versionfile)
             longversion = _version_save.longversion
-        except:
+        except BaseException:
             try:
                 from ._version_save import longversion
-            except:
+            except BaseException:
                 try:
                     from _version_save import longversion
-                except:
+                except BaseException:
                     pass
-    
+
         return longversion
-    
-    
+
     def save_version(version, versionfile):
         """ save version to version_save.py
         """
-        data="#!/usr/bin/env python\n"+\
-             "# This file was created automatically\n"+\
-             "longversion='{VERSION}'"
+        data = "#!/usr/bin/env python\n" \
+            + "# This file was created automatically\n" \
+            + "longversion='{VERSION}'"
         try:
             with open(versionfile, "w") as fd:
                 fd.write(data.format(VERSION=version))
-        except:
-            warnings.warn("Could not write package version to {}.".format(versionfile))
-    
+        except BaseException:
+            msg = "Could not write package version to {}.".format(versionfile)
+            warnings.warn(msg)
+
     versionfile = join(dirname(abspath(__file__)), "_version_save.py")
-    
-    ## Determine the accurate version
+
+    # Determine the accurate version
     longversion = ""
-    
+
     # 1. git describe
     try:
         # Get the version using `git describe`
         longversion = git_describe()
-    except:
+    except BaseException:
         pass
-    
+
     # 2. previously created version file
     if longversion == "":
         # Either this is this is not a git repository or we are in the
         # wrong git repository.
         # Get the version from the previously generated `_version_save.py`
         longversion = load_version(versionfile)
-    
+
     # 3. last resort: date
     if longversion == "":
         print("Could not determine version. Reason:")
         print(traceback.format_exc())
         ctime = os.stat(__file__)[8]
         longversion = time.strftime("%Y.%m.%d-%H-%M-%S", time.gmtime(ctime))
-        print("Using creation time to determine version: {}".format(longversion))
-    
+        print("Using creation time as version: {}".format(longversion))
+
     if not hasattr(sys, 'frozen'):
         # Save the version to `_version_save.py` to allow distribution using
         # `python setup.py sdist`.
-        # This is only done if the program is not frozen (with e.g. pyinstaller),
+        # This is only done if the program is not frozen (with e.g.
+        # pyinstaller),
         if longversion != load_version(versionfile):
             save_version(longversion, versionfile)
-    
+
     # PEP 440-conform development version:
     version = ".dev".join(longversion.split("-")[:2])
diff --git a/tests/data/test_correlate.py_test_cc_m.npy b/tests/data/test_correlate.py_test_cc_m.npy
index 7dcaa48..aaedf26 100644
Binary files a/tests/data/test_correlate.py_test_cc_m.npy and b/tests/data/test_correlate.py_test_cc_m.npy differ
diff --git a/tests/data/test_correlate.py_test_cc_normalize.npy b/tests/data/test_correlate.py_test_cc_normalize.npy
index b7e5c2a..5f4b89a 100644
Binary files a/tests/data/test_correlate.py_test_cc_normalize.npy and b/tests/data/test_correlate.py_test_cc_normalize.npy differ
diff --git a/tests/data/test_correlate.py_test_cc_simple.npy b/tests/data/test_correlate.py_test_cc_simple.npy
index 6f56a61..d5745c5 100644
Binary files a/tests/data/test_correlate.py_test_cc_simple.npy and b/tests/data/test_correlate.py_test_cc_simple.npy differ
diff --git a/tests/test_ac_cc.py b/tests/test_ac_cc.py
index 302615e..bf3a6a2 100644
--- a/tests/test_ac_cc.py
+++ b/tests/test_ac_cc.py
@@ -21,38 +21,46 @@ import multipletau
 from test_autocorrelate import get_sample_arrays
 
 
-def test_ac_cc_simple():
+def test_ac_cc_m():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
     
     arrs = get_sample_arrays()
-    
-    rescc = []
-    for a in arrs:
+
+    ms = [8, 16, 32, 64, 128]
+    a = np.concatenate(arrs)
+
+    res = []    
+    for m in ms:
+        r = multipletau.autocorrelate(a=a,
+                                      m=m,
+                                      deltat=1,
+                                      normalize=False,
+                                      copy=True,
+                                      dtype=np.float_)
+        res.append(r)
+    res = np.concatenate(res)
+
+    rescc = []    
+    for m in ms:
         r = multipletau.correlate(a=a, v=a,
-                                  m=16,
+                                  m=m,
                                   deltat=1,
                                   normalize=False,
                                   copy=True,
-                                  dtype=np.float)
+                                  dtype=np.float_)
         rescc.append(r)
+        # test minimal length of array
+        _r2 = multipletau.correlate(a=a[:2*m], v=a[:2*m],
+                                    m=m,
+                                    deltat=1,
+                                    normalize=False,
+                                    copy=True,
+                                    dtype=np.float_)
     
     rescc = np.concatenate(rescc)
-
-    resac = []
-    for a in arrs:
-        r = multipletau.autocorrelate(a=a,
-                                      m=16,
-                                      deltat=1,
-                                      normalize=False,
-                                      copy=True,
-                                      dtype=np.float)
-        resac.append(r)
-    
-    resac = np.concatenate(resac)
-    
-    assert np.all(resac==rescc)
+    assert np.all(res==rescc)
 
 
 def test_ac_cc_normalize():
@@ -69,7 +77,7 @@ def test_ac_cc_normalize():
                                       deltat=1,
                                       normalize=True,
                                       copy=True,
-                                      dtype=np.float)
+                                      dtype=np.float_)
         res.append(r)
     
     res = np.concatenate(res)
@@ -81,7 +89,7 @@ def test_ac_cc_normalize():
                                   deltat=1,
                                   normalize=True,
                                   copy=True,
-                                  dtype=np.float)
+                                  dtype=np.float_)
         rescc.append(r)
     
     rescc = np.concatenate(rescc)
@@ -89,46 +97,38 @@ def test_ac_cc_normalize():
     assert np.all(res==rescc)
 
 
-def test_ac_cc_m():
+def test_ac_cc_simple():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
     print("running ", myname)
     
     arrs = get_sample_arrays()
-
-    ms = [8, 16, 32, 64, 128]
-    a = np.concatenate(arrs)
-
-    res = []    
-    for m in ms:
-        r = multipletau.autocorrelate(a=a,
-                                      m=m,
-                                      deltat=1,
-                                      normalize=False,
-                                      copy=True,
-                                      dtype=np.float)
-        res.append(r)
-    res = np.concatenate(res)
-
-    rescc = []    
-    for m in ms:
+    
+    rescc = []
+    for a in arrs:
         r = multipletau.correlate(a=a, v=a,
-                                  m=m,
+                                  m=16,
                                   deltat=1,
                                   normalize=False,
                                   copy=True,
-                                  dtype=np.float)
+                                  dtype=np.float_)
         rescc.append(r)
-        # test minimal length of array
-        _r2 = multipletau.correlate(a=a[:2*m], v=a[:2*m],
-                                    m=m,
-                                    deltat=1,
-                                    normalize=False,
-                                    copy=True,
-                                    dtype=np.float)
     
     rescc = np.concatenate(rescc)
-    assert np.all(res==rescc)
+
+    resac = []
+    for a in arrs:
+        r = multipletau.autocorrelate(a=a,
+                                      m=16,
+                                      deltat=1,
+                                      normalize=False,
+                                      copy=True,
+                                      dtype=np.float_)
+        resac.append(r)
+    
+    resac = np.concatenate(resac)
+    
+    assert np.all(resac==rescc)
 
 
 if __name__ == "__main__":
diff --git a/tests/test_autocorrelate.py b/tests/test_autocorrelate.py
index 40a12f1..6eaec60 100644
--- a/tests/test_autocorrelate.py
+++ b/tests/test_autocorrelate.py
@@ -41,89 +41,6 @@ def get_sample_arrays():
     return arrs
 
 
-def test_ac_simple():
-    myframe = sys._getframe()
-    myname = myframe.f_code.co_name
-    print("running ", myname)
-    
-    arrs = get_sample_arrays()
-    
-    res = []
-    for a in arrs:
-        r = multipletau.autocorrelate(a=a,
-                                      m=16,
-                                      deltat=1,
-                                      normalize=False,
-                                      copy=True,
-                                      dtype=np.float)
-        res.append(r)
-    
-    res = np.concatenate(res)
-    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
-    ref = get_reference_data(myname, __file__)
-
-    assert np.all(res==ref)
-
-
-def test_ac_normalize():
-    myframe = sys._getframe()
-    myname = myframe.f_code.co_name
-    print("running ", myname)
-    
-    arrs = get_sample_arrays()
-    
-    res = []
-    for a in arrs:
-        r = multipletau.autocorrelate(a=a,
-                                      m=16,
-                                      deltat=1,
-                                      normalize=True,
-                                      copy=True,
-                                      dtype=np.float)
-        res.append(r)
-    
-    res = np.concatenate(res)
-    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
-    ref = get_reference_data(myname, __file__)
-
-    assert np.all(res==ref)
-
-
-def test_ac_m():
-    myframe = sys._getframe()
-    myname = myframe.f_code.co_name
-    print("running ", myname)
-    
-    arrs = get_sample_arrays()
-
-    ms = [8, 16, 32, 64, 128]
-    a = np.concatenate(arrs)
-
-    res = []    
-    for m in ms:
-        r = multipletau.autocorrelate(a=a,
-                                      m=m,
-                                      deltat=1,
-                                      normalize=False,
-                                      copy=True,
-                                      dtype=np.float)
-        res.append(r)
-
-        # test minimal length of array
-        _r2 = multipletau.autocorrelate(a=a[:2*m],
-                                        m=m,
-                                        deltat=1,
-                                        normalize=False,
-                                        copy=True,
-                                        dtype=np.float)
-    
-    res = np.concatenate(res)
-    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
-    ref = get_reference_data(myname, __file__)
-
-    assert np.all(res==ref)
-
-
 def test_ac_copy():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
@@ -138,7 +55,7 @@ def test_ac_copy():
                                       deltat=1,
                                       normalize=True,
                                       copy=True,
-                                      dtype=np.float)
+                                      dtype=np.float_)
         res1.append(r)
 
     res2 = []
@@ -148,7 +65,7 @@ def test_ac_copy():
                                       deltat=1,
                                       normalize=True,
                                       copy=False,
-                                      dtype=np.float)
+                                      dtype=np.float_)
         res2.append(r)
     
     # simple test if result is the same
@@ -160,7 +77,7 @@ def test_ac_copy():
     # make sure the copy function really changes something
     assert not np.all(arrs == refarrs)
 
-    
+
 def test_ac_dtype():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
@@ -175,7 +92,7 @@ def test_ac_dtype():
                                    deltat=1,
                                    normalize=True,
                                    copy=True,
-                                   dtype=np.float)
+                                   dtype=np.float_)
 
     ri = multipletau.autocorrelate(a=a,
                                    m=16,
@@ -191,12 +108,47 @@ def test_ac_dtype():
                                    copy=True,
                                    dtype=None)
     
-    assert ri.dtype == np.dtype(np.float), "if wrong dtype, dtype should default to np.float"
-    assert ri2.dtype == np.dtype(np.float), "if wrong dtype, dtype should default to np.float"
+    assert ri.dtype == np.dtype(np.float_), "if wrong dtype, dtype should default to np.float_"
+    assert ri2.dtype == np.dtype(np.float_), "if wrong dtype, dtype should default to np.float_"
     assert np.all(rf == ri), "result should be the same, because input us the same"
     assert np.all(rf == ri2), "result should be the same, because input us the same"
 
 
+def test_ac_m():
+    myframe = sys._getframe()
+    myname = myframe.f_code.co_name
+    print("running ", myname)
+    
+    arrs = get_sample_arrays()
+
+    ms = [8, 16, 32, 64, 128]
+    a = np.concatenate(arrs)
+
+    res = []    
+    for m in ms:
+        r = multipletau.autocorrelate(a=a,
+                                      m=m,
+                                      deltat=1,
+                                      normalize=False,
+                                      copy=True,
+                                      dtype=np.float_)
+        res.append(r)
+
+        # test minimal length of array
+        _r2 = multipletau.autocorrelate(a=a[:2*m],
+                                        m=m,
+                                        deltat=1,
+                                        normalize=False,
+                                        copy=True,
+                                        dtype=np.float_)
+    
+    res = np.concatenate(res)
+    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
+    ref = get_reference_data(myname, __file__)
+
+    assert np.allclose(res, ref, atol=0, rtol=1e-15)
+
+
 def test_ac_m_wrong():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
@@ -210,41 +162,89 @@ def test_ac_m_wrong():
                                    deltat=1,
                                    normalize=True,
                                    copy=True,
-                                   dtype=np.float)
+                                   dtype=np.float_)
 
     r2 = multipletau.autocorrelate(a=a,
                                    m=15,
                                    deltat=1,
                                    normalize=True,
                                    copy=True,
-                                   dtype=np.float)
+                                   dtype=np.float_)
 
     r3 = multipletau.autocorrelate(a=a,
                                    m=15.5,
                                    deltat=1,
                                    normalize=True,
                                    copy=True,
-                                   dtype=np.float)
+                                   dtype=np.float_)
 
     r4 = multipletau.autocorrelate(a=a,
                                    m=14.5,
                                    deltat=1,
                                    normalize=True,
                                    copy=True,
-                                   dtype=np.float)
+                                   dtype=np.float_)
 
     r5 = multipletau.autocorrelate(a=a,
                                    m=16.,
                                    deltat=1,
                                    normalize=True,
                                    copy=True,
-                                   dtype=np.float)
+                                   dtype=np.float_)
     assert np.all(r1==r2)
     assert np.all(r1==r3)
     assert np.all(r1==r4)
     assert np.all(r1==r5)
 
 
+def test_ac_normalize():
+    myframe = sys._getframe()
+    myname = myframe.f_code.co_name
+    print("running ", myname)
+    
+    arrs = get_sample_arrays()
+    
+    res = []
+    for a in arrs:
+        r = multipletau.autocorrelate(a=a,
+                                      m=16,
+                                      deltat=1,
+                                      normalize=True,
+                                      copy=True,
+                                      dtype=np.float_)
+        res.append(r)
+    
+    res = np.concatenate(res)
+    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
+    ref = get_reference_data(myname, __file__)
+
+    assert np.allclose(res, ref, atol=0, rtol=1e-14)
+
+
+def test_ac_simple():
+    myframe = sys._getframe()
+    myname = myframe.f_code.co_name
+    print("running ", myname)
+    
+    arrs = get_sample_arrays()
+    
+    res = []
+    for a in arrs:
+        r = multipletau.autocorrelate(a=a,
+                                      m=16,
+                                      deltat=1,
+                                      normalize=False,
+                                      copy=True,
+                                      dtype=np.float_)
+        res.append(r)
+    
+    res = np.concatenate(res)
+    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
+    ref = get_reference_data(myname, __file__)
+
+    assert np.allclose(res, ref, atol=0, rtol=1e-15)
+
+
 if __name__ == "__main__":
     # Run all tests
     loc = locals()
diff --git a/tests/test_basic.py b/tests/test_basic.py
index 0cb994c..e2484f4 100644
--- a/tests/test_basic.py
+++ b/tests/test_basic.py
@@ -13,7 +13,7 @@ from multipletau import autocorrelate, correlate
 
 
 def test_ac():
-    ist = autocorrelate(range(42), m=2, dtype=np.dtype(float))
+    ist = autocorrelate(range(42), m=2, dtype=np.float_)
     soll = np.array([[  0.00000000e+00,   2.38210000e+04],
                      [  1.00000000e+00,   2.29600000e+04],
                      [  2.00000000e+00,   2.21000000e+04],
@@ -23,7 +23,7 @@ def test_ac():
 
 
 def test_cc():
-    ist = correlate(range(42), range(1,43), m=2, dtype=np.dtype(float))
+    ist = correlate(range(42), range(1,43), m=2, dtype=np.float_)
     soll = np.array([[  0.00000000e+00,   2.46820000e+04],
                      [  1.00000000e+00,   2.38210000e+04],
                      [  2.00000000e+00,   2.29600000e+04],
diff --git a/tests/test_correlate.py b/tests/test_correlate.py
index 7861d5f..7484a87 100644
--- a/tests/test_correlate.py
+++ b/tests/test_correlate.py
@@ -12,6 +12,7 @@ import sys
 import warnings
 import zipfile
 
+
 # Add parent directory to beginning of path variable
 DIR = dirname(abspath(__file__))
 sys.path = [split(DIR)[0]] + sys.path
@@ -20,10 +21,11 @@ import multipletau
 
 from test_autocorrelate import get_reference_data
 
+
 def get_sample_arrays_cplx():
     a = [-4.3,   1,    9, -99.2, 13]
     b = [9921, 281, 23.5,   5.3, 77]
-    c = [  12,   0,    2,   1.3, 33]
+    c = [  12,   0,  2.1,   1.3, 33]
     d = [  32,  .1,   -2,   6.3, 88]
     l = [  33,  92,   47,    54, 99]
     r = [   0,   1,   12,     4,  0] 
@@ -39,107 +41,6 @@ def get_sample_arrays_cplx():
     return arrs
 
 
-def test_cc_simple():
-    myframe = sys._getframe()
-    myname = myframe.f_code.co_name
-    print("running ", myname)
-    
-    arrs = get_sample_arrays_cplx()
-    
-    res = []
-    for a in arrs:
-        r = multipletau.correlate(a=a,
-                                  v=a,
-                                  m=16,
-                                  deltat=1,
-                                  normalize=False,
-                                  copy=True,
-                                  dtype=np.complex)
-        res.append(r)
-    res = np.concatenate(res)
-
-    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
-    ref = get_reference_data(myname, __file__)
-
-    assert np.all(res==ref)
-
-    # also check result of autocorrelate
-    res2 = []
-    for a in arrs:
-        r = multipletau.autocorrelate(a=a,
-                                      m=16,
-                                      deltat=1,
-                                      normalize=False,
-                                      copy=True,
-                                      dtype=np.complex)
-        res2.append(r)
-    res2 = np.concatenate(res2)
-
-    assert np.all(res==res2)
-
-
-def test_cc_normalize():
-    myframe = sys._getframe()
-    myname = myframe.f_code.co_name
-    print("running ", myname)
-    
-    arrs = get_sample_arrays_cplx()
-    
-    res = []
-    for a in arrs:
-        r = multipletau.correlate(a=a.real,
-                                  v=a.imag,
-                                  m=16,
-                                  deltat=1,
-                                  normalize=True,
-                                  copy=True,
-                                  dtype=np.float)
-        res.append(r)
-    
-    res = np.concatenate(res)
-    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
-    ref = get_reference_data(myname, __file__)
-
-    assert np.all(res==ref)
-
-
-def test_cc_m():
-    myframe = sys._getframe()
-    myname = myframe.f_code.co_name
-    print("running ", myname)
-    
-    arrs = get_sample_arrays_cplx()
-
-    ms = [4, 8, 10, 16, 20, 64, 128]
-    a = np.concatenate(arrs)
-
-    res = []    
-    for m in ms:
-        r = multipletau.correlate(a=a,
-                                  v=a,
-                                  m=m,
-                                  deltat=1,
-                                  normalize=False,
-                                  copy=True,
-                                  dtype=np.complex)
-        res.append(r)
-
-        # test minimal length of array
-        _r2 = multipletau.correlate(a=a[:2*m],
-                                    v=a[:2*m],
-                                    m=m,
-                                    deltat=1,
-                                    normalize=False,
-                                    copy=True,
-                                    dtype=np.complex)
-    
-    res = np.concatenate(res)
-    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
-    ref = get_reference_data(myname, __file__)
-
-    assert np.all(res==ref)
-
-
 def test_cc_copy():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
@@ -176,7 +77,7 @@ def test_cc_copy():
     # make sure the copy function really changes something
     assert not np.all(arrs == refarrs)
 
-    
+
 def test_cc_dtype():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
@@ -192,7 +93,7 @@ def test_cc_dtype():
                                deltat=1,
                                normalize=True,
                                copy=True,
-                               dtype=np.float)
+                               dtype=np.float_)
 
     ri = multipletau.correlate(a=a,
                                v=a,
@@ -200,18 +101,18 @@ def test_cc_dtype():
                                deltat=1,
                                normalize=True,
                                copy=True,
-                               dtype=np.uint)
+                               dtype=np.int_)
 
-    ri2 = multipletau.correlate(a=np.array(a, dtype=np.uint),
-                                v=np.array(a, dtype=np.uint),
+    ri2 = multipletau.correlate(a=np.array(a, dtype=np.int_),
+                                v=np.array(a, dtype=np.int_),
                                 m=16,
                                 deltat=1,
                                 normalize=True,
                                 copy=True,
                                 dtype=None)
     
-    assert ri.dtype == np.dtype(np.float), "if wrong dtype, dtype should default to np.float"
-    assert ri2.dtype == np.dtype(np.float), "if wrong dtype, dtype should default to np.float"
+    assert ri.dtype == np.dtype(np.float_), "if wrong dtype, dtype should default to np.float_"
+    assert ri2.dtype == np.dtype(np.float_), "if wrong dtype, dtype should default to np.float_"
     assert np.all(rf == ri), "result should be the same, because input us the same"
     assert np.all(rf == ri2), "result should be the same, because input us the same"
 
@@ -230,16 +131,53 @@ def test_cc_dtype2():
                                deltat=1,
                                normalize=True,
                                copy=True)
-    assert np.dtype(rf.dtype) == np.dtype(np.complex)
+    assert np.dtype(rf.dtype) == np.dtype(np.complex_)
 
     print("this should issue a warning of unequal input dtypes, casting to float")
     rf2 = multipletau.correlate(a=a.real,
-                               v=np.array(a.imag, dtype=np.int),
+                               v=np.array(a.imag, dtype=np.int_),
                                m=16,
                                deltat=1,
                                normalize=True,
                                copy=True)
-    assert np.dtype(rf2.dtype) == np.dtype(np.float)
+    assert np.dtype(rf2.dtype) == np.dtype(np.float_)
+
+
+def test_cc_m():
+    myframe = sys._getframe()
+    myname = myframe.f_code.co_name
+    print("running ", myname)
+    
+    arrs = get_sample_arrays_cplx()
+
+    ms = [4, 8, 10, 16, 20, 64, 128]
+    a = np.concatenate(arrs)
+
+    res = []    
+    for m in ms:
+        r = multipletau.correlate(a=a,
+                                  v=a,
+                                  m=m,
+                                  deltat=1,
+                                  normalize=False,
+                                  copy=True,
+                                  dtype=np.complex_)
+        res.append(r)
+
+        # test minimal length of array
+        _r2 = multipletau.correlate(a=a[:2*m],
+                                    v=a[:2*m],
+                                    m=m,
+                                    deltat=1,
+                                    normalize=False,
+                                    copy=True,
+                                    dtype=np.complex_)
+    
+    res = np.concatenate(res)
+    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
+    ref = get_reference_data(myname, __file__)
+
+    assert np.allclose(res, ref, atol=0, rtol=1e-15)
 
 
 def test_cc_m_wrong():
@@ -291,6 +229,69 @@ def test_cc_m_wrong():
     assert np.all(r1==r5)
 
 
+def test_cc_normalize():
+    myframe = sys._getframe()
+    myname = myframe.f_code.co_name
+    print("running ", myname)
+    
+    arrs = get_sample_arrays_cplx()
+    
+    res = []
+    for a in arrs:
+        r = multipletau.correlate(a=a.real,
+                                  v=a.imag,
+                                  m=16,
+                                  deltat=1,
+                                  normalize=True,
+                                  copy=True,
+                                  dtype=np.float_)
+        res.append(r)
+    res = np.concatenate(res)
+    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
+    ref = get_reference_data(myname, __file__)
+
+    assert np.allclose(res, ref, atol=0, rtol=1e-14)
+
+
+def test_cc_simple():
+    myframe = sys._getframe()
+    myname = myframe.f_code.co_name
+    print("running ", myname)
+    
+    arrs = get_sample_arrays_cplx()
+    
+    res = []
+    for a in arrs:
+        r = multipletau.correlate(a=a,
+                                  v=a,
+                                  m=16,
+                                  deltat=1,
+                                  normalize=False,
+                                  copy=True,
+                                  dtype=np.complex_)
+        res.append(r)
+    res = np.concatenate(res)
+
+    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
+    ref = get_reference_data(myname, __file__)
+
+    assert np.allclose(res, ref, atol=0, rtol=1e-15)
+
+    # also check result of autocorrelate
+    res2 = []
+    for a in arrs:
+        r = multipletau.autocorrelate(a=a,
+                                      m=16,
+                                      deltat=1,
+                                      normalize=False,
+                                      copy=True,
+                                      dtype=np.complex_)
+        res2.append(r)
+    res2 = np.concatenate(res2)
+
+    assert np.allclose(res, res2, atol=0, rtol=1e-15)
+
+
 if __name__ == "__main__":
     # Run all tests
     loc = locals()
diff --git a/tests/test_ref_numpy.py b/tests/test_ref_numpy.py
index cdf3dfd..deb2358 100644
--- a/tests/test_ref_numpy.py
+++ b/tests/test_ref_numpy.py
@@ -14,7 +14,6 @@ import multipletau
 from test_correlate import get_sample_arrays_cplx
 
 
-
 def test_corresponds_ac():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
@@ -27,19 +26,74 @@ def test_corresponds_ac():
                                        m=m,
                                        copy=True,
                                        normalize=True,
-                                       dtype=np.float128)
+                                       dtype=np.float_)
 
     reslin = multipletau.correlate_numpy(a=1*a,
                                          v=1*a,
                                          copy=True,
                                          normalize=True,
-                                         dtype=np.float128)
+                                         dtype=np.float_)
 
     idx = np.array(restau[:,0].real, dtype=int)[:m]
 
     assert np.allclose(reslin[idx, 1], restau[:m,1])
 
 
+def test_corresponds_ac_first_loop():
+    """
+    numpy correlation:
+    G_m = sum_i(a_i*a_{i+m})
+    
+    multipletau correlation 2nd order:
+    b_j = (a_{2i} + a_{2i+1} / 2)
+    G_m = sum_j(b_j*b_{j+1})
+        = 1/4*sum_i(a_{2i}   * a_{2i+m}   +
+                    a_{2i}   * a_{2i+m+1} +
+                    a_{2i+1} * a_{2i+m}   +   
+                    a_{2i+1} * a_{2i+m+1}
+                    )
+    
+    The values after the first m+1 lag times in the multipletau
+    correlation differ from the normal correlation, because the
+    traces are averaged over two consecutive items, effectively
+    halving the size of the trace. The multiple-tau correlation
+    can be compared to the regular correlation by using an even
+    sized sequence (here 222) in which the elements 2i and 2i+1
+    are equal, as is done in this test.
+    """
+    myframe = sys._getframe()
+    myname = myframe.f_code.co_name
+    print("running ", myname)
+    
+    a = [ arr / np.average(arr) for arr in get_sample_arrays_cplx() ]
+    a = np.concatenate(a)[:222]
+    # two consecutive elements are the same, so the multiple-tau method
+    # corresponds to the numpy correlation for the first loop.
+    a[::2] = a[1::2]
+    
+    for m in [2,4,6,8,10,12,14,16]:
+        restau = multipletau.correlate(a=a,
+                                       v=a.imag+1j*a.real,
+                                       m=m,
+                                       copy=True,
+                                       normalize=False,
+                                       dtype=np.complex_)
+        
+        reslin = multipletau.correlate_numpy(a=a,
+                                             v=a.imag+1j*a.real,
+                                             copy=True,
+                                             normalize=False,
+                                             dtype=np.complex_)
+        
+        idtau = np.where(restau[:,0]==m+2)[0][0]
+        tau3 = restau[idtau, 1] #m+1 initial bins
+    
+        idref = np.where(reslin[:,0]==m+2)[0][0]
+        tau3ref = reslin[idref, 1]
+        
+        assert np.allclose(tau3, tau3ref)
+
+
 def test_corresponds_ac_nonormalize():
     myframe = sys._getframe()
     myname = myframe.f_code.co_name
@@ -52,13 +106,13 @@ def test_corresponds_ac_nonormalize():
                                        m=m,
                                        copy=True,
                                        normalize=False,
-                                       dtype=np.float128)
+                                       dtype=np.float_)
 
     reslin = multipletau.correlate_numpy(a=1*a,
                                          v=1*a,
                                          copy=True,
                                          normalize=False,
-                                         dtype=np.float128)
+                                         dtype=np.float_)
 
     idx = np.array(restau[:,0].real, dtype=int)[:m+1]
 
@@ -78,13 +132,13 @@ def test_corresponds_cc():
                                    m=m,
                                    copy=True,
                                    normalize=True,
-                                   dtype=np.complex256)
+                                   dtype=np.complex_)
 
     reslin = multipletau.correlate_numpy(a=a,
                                          v=a.imag+1j*a.real,
                                          copy=True,
                                          normalize=True,
-                                         dtype=np.complex256)
+                                         dtype=np.complex_)
 
     idx = np.array(restau[:,0].real, dtype=int)[:m+1]
 
@@ -104,74 +158,19 @@ def test_corresponds_cc_nonormalize():
                                    m=m,
                                    copy=True,
                                    normalize=False,
-                                   dtype=np.complex256)
+                                   dtype=np.complex_)
 
     reslin = multipletau.correlate_numpy(a=a,
                                          v=a.imag+1j*a.real,
                                          copy=True,
                                          normalize=False,
-                                         dtype=np.complex256)
+                                         dtype=np.complex_)
 
     idx = np.array(restau[:,0].real, dtype=int)[:m+1]
 
     assert np.allclose(reslin[idx, 1], restau[:m+1,1])
     
 
-def test_corresponds_ac_first_loop():
-    """
-    numpy correlation:
-    G_m = sum_i(a_i*a_{i+m})
-    
-    multipletau correlation 2nd order:
-    b_j = (a_{2i} + a_{2i+1} / 2)
-    G_m = sum_j(b_j*b_{j+1})
-        = 1/4*sum_i(a_{2i}   * a_{2i+m}   +
-                    a_{2i}   * a_{2i+m+1} +
-                    a_{2i+1} * a_{2i+m}   +   
-                    a_{2i+1} * a_{2i+m+1}
-                    )
-    
-    The values after the first m+1 lag times in the multipletau
-    correlation differ from the normal correlation, because the
-    traces are averaged over two consecutive items, effectively
-    halving the size of the trace. The multiple-tau correlation
-    can be compared to the regular correlation by using an even
-    sized sequence (here 222) in which the elements 2i and 2i+1
-    are equal, as is done in this test.
-    """
-    myframe = sys._getframe()
-    myname = myframe.f_code.co_name
-    print("running ", myname)
-    
-    a = [ arr / np.average(arr) for arr in get_sample_arrays_cplx() ]
-    a = np.concatenate(a)[:222]
-    # two consecutive elements are the same, so the multiple-tau method
-    # corresponds to the numpy correlation for the first loop.
-    a[::2] = a[1::2]
-    
-    for m in [2,4,6,8,10,12,14,16]:
-        restau = multipletau.correlate(a=a,
-                                       v=a.imag+1j*a.real,
-                                       m=m,
-                                       copy=True,
-                                       normalize=False,
-                                       dtype=np.complex256)
-        
-        reslin = multipletau.correlate_numpy(a=a,
-                                             v=a.imag+1j*a.real,
-                                             copy=True,
-                                             normalize=False,
-                                             dtype=np.complex256)
-        
-        idtau = np.where(restau[:,0]==m+2)[0][0]
-        tau3 = restau[idtau, 1] #m+1 initial bins
-    
-        idref = np.where(reslin[:,0]==m+2)[0][0]
-        tau3ref = reslin[idref, 1]
-        
-        assert np.allclose(tau3, tau3ref)
-
-
 if __name__ == "__main__":
     # Run all tests
     loc = locals()

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/python-multipletau.git



More information about the debian-med-commit mailing list