[Python-modules-commits] r31671 - in packages/scipy/trunk/debian (8 files)
jtaylor-guest at users.alioth.debian.org
jtaylor-guest at users.alioth.debian.org
Sat Jan 17 14:12:02 UTC 2015
Date: Saturday, January 17, 2015 @ 14:12:01
Author: jtaylor-guest
Revision: 31671
New upstream bugfix release
Modified:
packages/scipy/trunk/debian/changelog
packages/scipy/trunk/debian/patches/series
Deleted:
packages/scipy/trunk/debian/patches/numpy-version-fix.patch
packages/scipy/trunk/debian/patches/numpy_ufunc.patch
packages/scipy/trunk/debian/patches/put-_gen-classes-back.patch
packages/scipy/trunk/debian/patches/put-back-veccdf.patch
packages/scipy/trunk/debian/patches/sparse-fix-omitted-types.patch
packages/scipy/trunk/debian/patches/sparse-superlu-fix.patch
Modified: packages/scipy/trunk/debian/changelog
===================================================================
--- packages/scipy/trunk/debian/changelog 2015-01-17 11:48:54 UTC (rev 31670)
+++ packages/scipy/trunk/debian/changelog 2015-01-17 14:12:01 UTC (rev 31671)
@@ -1,5 +1,13 @@
-python-scipy (0.14.0-3) unstable; urgency=medium
+python-scipy (0.14.1-1) unstable; urgency=medium
+ * New upstream bugfix release
+ remove upstream applied patches:
+ - numpy-version-fix.patch
+ - numpy_ufunc.patch
+ - put-_gen-classes-back.patch
+ - put-back-veccdf.patch
+ - sparse-fix-omitted-types.patch
+ - sparse-superlu-fix.patch
* add python-gmpy2 dependency to autopkgtest to speed up mpmath tests
* add suggest on python-scipy-doc (Closes: #760522)
Deleted: packages/scipy/trunk/debian/patches/numpy-version-fix.patch
===================================================================
--- packages/scipy/trunk/debian/patches/numpy-version-fix.patch 2015-01-17 11:48:54 UTC (rev 31670)
+++ packages/scipy/trunk/debian/patches/numpy-version-fix.patch 2015-01-17 14:12:01 UTC (rev 31671)
@@ -1,25 +0,0 @@
-Description: fix numpy version checking in ndimage
- (cherry picked from commit 8d445c931c96c9d3f6d9fa7bece57d986de02945)
-Origin: 34b5c8022fe7b3d799f356045af422aaaedb81bd
-Author: alex <argriffi at ncsu.edu>
-Applied-Upstream: 0.14.1
-
---- a/scipy/ndimage/filters.py
-+++ b/scipy/ndimage/filters.py
-@@ -35,6 +35,7 @@ import numpy
- from . import _ni_support
- from . import _nd_image
- from scipy.misc import doccer
-+from scipy.lib._version import NumpyVersion
-
- __all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
- 'prewitt', 'sobel', 'generic_laplace', 'laplace',
-@@ -478,7 +479,7 @@ def generic_gradient_magnitude(input, de
- numpy.multiply(tmp, tmp, tmp)
- output += tmp
- # This allows the sqrt to work with a different default casting
-- if numpy.version.short_version > '1.6.1':
-+ if NumpyVersion(numpy.__version__) > '1.6.1':
- numpy.sqrt(output, output, casting='unsafe')
- else:
- numpy.sqrt(output, output)
Deleted: packages/scipy/trunk/debian/patches/numpy_ufunc.patch
===================================================================
--- packages/scipy/trunk/debian/patches/numpy_ufunc.patch 2015-01-17 11:48:54 UTC (rev 31670)
+++ packages/scipy/trunk/debian/patches/numpy_ufunc.patch 2015-01-17 14:12:01 UTC (rev 31671)
@@ -1,59 +0,0 @@
-Description:: Fix tests that expect numpy 1.9 to implement __numpy_ufunc__
- Change Numpy version tests to check for 1.10 instead.
- (cherry picked from commit 8b057576baef6f2bca5f3100db45d6d2915e3c1c)
-Origin: cba7222a8e38dcdf120b4b317d2c5e4f1d983d15
-From: Charles Harris <charlesr.harris at gmail.com>
-Applied-Upstream: 0.14.1
-
---- a/scipy/sparse/tests/test_base.py
-+++ b/scipy/sparse/tests/test_base.py
-@@ -1556,13 +1556,13 @@ class _TestCommon:
-
- def test_unary_ufunc_overrides(self):
- def check(name):
-- if NumpyVersion(np.__version__) < '1.9.0.dev-0':
-+ if NumpyVersion(np.__version__) < '1.10.0.dev-0':
- if name == "sign":
- raise nose.SkipTest("sign conflicts with comparison op "
-- "support on Numpy < 1.9")
-+ "support on Numpy < 1.10")
- if self.spmatrix in (dok_matrix, lil_matrix):
- raise nose.SkipTest("Unary ops not implemented for dok/lil "
-- "with Numpy < 1.9")
-+ "with Numpy < 1.10")
- ufunc = getattr(np, name)
-
- X = self.spmatrix(np.arange(20).reshape(4, 5) / 20.)
-@@ -1571,8 +1571,8 @@ class _TestCommon:
- X2 = ufunc(X)
- assert_array_equal(X2.toarray(), X0)
-
-- if not (NumpyVersion(np.__version__) < '1.9.0.dev-0'):
-- # the out argument doesn't work on Numpy < 1.9
-+ if not (NumpyVersion(np.__version__) < '1.10.0.dev-0'):
-+ # the out argument doesn't work on Numpy < 1.10
- out = np.zeros_like(X0)
- X3 = ufunc(X, out=out)
- assert_(X3 is out)
-@@ -1608,8 +1608,8 @@ class _TestCommon:
- a_items = dict(dense=a, scalar=c, cplx_scalar=d, int_scalar=e, sparse=asp)
- b_items = dict(dense=b, scalar=c, cplx_scalar=d, int_scalar=e, sparse=bsp)
-
-- @dec.skipif(NumpyVersion(np.__version__) < '1.9.0.dev-0',
-- "feature requires Numpy 1.9")
-+ @dec.skipif(NumpyVersion(np.__version__) < '1.10.0.dev-0',
-+ "feature requires Numpy 1.10")
- def check(i, j, dtype):
- ax = a_items[i]
- bx = b_items[j]
-@@ -1711,8 +1711,8 @@ class _TestCommon:
- yield check, i, j, dtype
-
-
-- @dec.skipif(NumpyVersion(np.__version__) < '1.9.0.dev-0',
-- "feature requires Numpy 1.9")
-+ @dec.skipif(NumpyVersion(np.__version__) < '1.10.0.dev-0',
-+ "feature requires Numpy 1.10")
- def test_ufunc_object_array(self):
- # This tests compatibility with previous Numpy object array
- # ufunc behavior. See gh-3345.
Deleted: packages/scipy/trunk/debian/patches/put-_gen-classes-back.patch
===================================================================
--- packages/scipy/trunk/debian/patches/put-_gen-classes-back.patch 2015-01-17 11:48:54 UTC (rev 31670)
+++ packages/scipy/trunk/debian/patches/put-_gen-classes-back.patch 2015-01-17 14:12:01 UTC (rev 31671)
@@ -1,173 +0,0 @@
-Description: put ``*_gen`` classes back into the scipy.stats.distributions namespace
-Bug: https://github.com/scipy/scipy/pull/3804
-Origin: 041ef465021a7ff33ea96c0b8ef690c4dfa8f685
-Author: alex <argriffi at ncsu.edu>
-Applied-Upstream: 0.14.1
-
---- a/scipy/stats/_continuous_distns.py
-+++ b/scipy/stats/_continuous_distns.py
-@@ -26,32 +26,12 @@ from ._tukeylambda_stats import (tukeyla
- tukeylambda_kurtosis as _tlkurt)
-
- from ._distn_infrastructure import (
-- rv_continuous, valarray,
-- _skew, _kurtosis, _lazywhere,
-- _ncx2_log_pdf, _ncx2_pdf, _ncx2_cdf,
-+ rv_continuous, valarray, _skew, _kurtosis, _lazywhere,
-+ _ncx2_log_pdf, _ncx2_pdf, _ncx2_cdf, get_distribution_names,
- )
-
- from ._constants import _XMIN, _EULER, _ZETA3
-
--__all__ = [
-- 'ksone', 'kstwobign', 'norm', 'alpha', 'anglit', 'arcsine',
-- 'beta', 'betaprime', 'bradford', 'burr', 'fisk', 'cauchy',
-- 'chi', 'chi2', 'cosine', 'dgamma', 'dweibull', 'erlang',
-- 'expon', 'exponweib', 'exponpow', 'fatiguelife', 'foldcauchy',
-- 'f', 'foldnorm', 'frechet_r', 'weibull_min', 'frechet_l',
-- 'weibull_max', 'genlogistic', 'genpareto', 'genexpon', 'genextreme',
-- 'gamma', 'gengamma', 'genhalflogistic', 'gompertz', 'gumbel_r',
-- 'gumbel_l', 'halfcauchy', 'halflogistic', 'halfnorm', 'hypsecant',
-- 'gausshyper', 'invgamma', 'invgauss', 'invweibull',
-- 'johnsonsb', 'johnsonsu', 'laplace', 'levy', 'levy_l',
-- 'levy_stable', 'logistic', 'loggamma', 'loglaplace', 'lognorm',
-- 'gilbrat', 'maxwell', 'mielke', 'nakagami', 'ncx2', 'ncf', 't',
-- 'nct', 'pareto', 'lomax', 'pearson3', 'powerlaw', 'powerlognorm',
-- 'powernorm', 'rdist', 'rayleigh', 'reciprocal', 'rice',
-- 'recipinvgauss', 'semicircular', 'triang', 'truncexpon',
-- 'truncnorm', 'tukeylambda', 'uniform', 'vonmises', 'vonmises_line',
-- 'wald', 'wrapcauchy']
--
-
- ## Kolmogorov-Smirnov one-sided and two-sided test statistics
- class ksone_gen(rv_continuous):
-@@ -4027,3 +4007,10 @@ class wrapcauchy_gen(rv_continuous):
- def _entropy(self, c):
- return log(2*pi*(1-c*c))
- wrapcauchy = wrapcauchy_gen(a=0.0, b=2*pi, name='wrapcauchy')
-+
-+
-+# Collect names of classes and objects in this module.
-+pairs = list(globals().items())
-+_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_continuous)
-+
-+__all__ = _distn_names + _distn_gen_names
---- a/scipy/stats/_discrete_distns.py
-+++ b/scipy/stats/_discrete_distns.py
-@@ -13,13 +13,7 @@ import numpy as np
- import numpy.random as mtrand
-
- from ._distn_infrastructure import (
-- rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf)
--
--__all__ = [
-- 'binom', 'bernoulli', 'nbinom', 'geom', 'hypergeom',
-- 'logser', 'poisson', 'planck', 'boltzmann', 'randint',
-- 'zipf', 'dlaplace', 'skellam'
-- ]
-+ rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
-
-
- class binom_gen(rv_discrete):
-@@ -761,3 +755,10 @@ class skellam_gen(rv_discrete):
- g2 = 1 / var
- return mean, var, g1, g2
- skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
-+
-+
-+# Collect names of classes and objects in this module.
-+pairs = list(globals().items())
-+_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
-+
-+__all__ = _distn_names + _distn_gen_names
---- a/scipy/stats/_distn_infrastructure.py
-+++ b/scipy/stats/_distn_infrastructure.py
-@@ -3177,3 +3177,36 @@ class rv_discrete(rv_generic):
- if count > maxcount:
- warnings.warn('expect(): sum did not converge', RuntimeWarning)
- return tot/invfac
-+
-+
-+def get_distribution_names(namespace_pairs, rv_base_class):
-+ """
-+ Collect names of statistical distributions and their generators.
-+
-+ Parameters
-+ ----------
-+ namespace_pairs : sequence
-+ A snapshot of (name, value) pairs in the namespace of a module.
-+ rv_base_class : class
-+ The base class of random variable generator classes in a module.
-+
-+ Returns
-+ -------
-+ distn_names : list of strings
-+ Names of the statistical distributions.
-+ distn_gen_names : list of strings
-+ Names of the generators of the statistical distributions.
-+ Note that these are not simply the names of the statistical
-+ distributions, with a _gen suffix added.
-+
-+ """
-+ distn_names = []
-+ distn_gen_names = []
-+ for name, value in namespace_pairs:
-+ if name.startswith('_'):
-+ continue
-+ if name.endswith('_gen') and issubclass(value, rv_base_class):
-+ distn_gen_names.append(name)
-+ if isinstance(value, rv_base_class):
-+ distn_names.append(name)
-+ return distn_names, distn_gen_names
---- a/scipy/stats/distributions.py
-+++ b/scipy/stats/distributions.py
-@@ -18,5 +18,8 @@ from ._discrete_distns import *
-
- # For backwards compatibility e.g. pymc expects distributions.__all__.
- __all__ = ['entropy', 'rv_discrete', 'rv_continuous']
--__all__ += _continuous_distns.__all__ + _discrete_distns.__all__
-+
-+# Add only the distribution names, not the *_gen names.
-+__all__ += _continuous_distns._distn_names
-+__all__ += _discrete_distns._distn_names
-
---- a/scipy/stats/tests/test_distributions.py
-+++ b/scipy/stats/tests/test_distributions.py
-@@ -19,6 +19,8 @@ from scipy.lib._version import NumpyVers
- from scipy import special
- import scipy.stats as stats
- from scipy.stats._distn_infrastructure import argsreduce
-+import scipy.stats.distributions
-+
- from scipy.special import xlogy
-
-
-@@ -26,7 +28,8 @@ from scipy.special import xlogy
- DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
-
-
--# generate test cases to test cdf and distribution consistency
-+# Generate test cases to test cdf and distribution consistency.
-+# Note that this list does not include all distributions.
- dists = ['uniform','norm','lognorm','expon','beta',
- 'powerlaw','bradford','burr','fisk','cauchy','halfcauchy',
- 'foldcauchy','gamma','gengamma','loggamma',
-@@ -40,6 +43,18 @@ dists = ['uniform','norm','lognorm','exp
- 'hypsecant', 'laplace', 'reciprocal','triang','tukeylambda',
- 'vonmises', 'vonmises_line', 'pearson3']
-
-+
-+def _assert_hasattr(a, b, msg=None):
-+ if msg is None:
-+ msg = '%s does not have attribute %s' % (a, b)
-+ assert_(hasattr(a, b), msg=msg)
-+
-+
-+def test_api_regression():
-+ # https://github.com/scipy/scipy/issues/3802
-+ _assert_hasattr(scipy.stats.distributions, 'f_gen')
-+
-+
- # check function for test generator
-
-
Deleted: packages/scipy/trunk/debian/patches/put-back-veccdf.patch
===================================================================
--- packages/scipy/trunk/debian/patches/put-back-veccdf.patch 2015-01-17 11:48:54 UTC (rev 31670)
+++ packages/scipy/trunk/debian/patches/put-back-veccdf.patch 2015-01-17 14:12:01 UTC (rev 31671)
@@ -1,48 +0,0 @@
-Description: put back veccdf/vecfunc/vec_generic_momemt
- Removed erroneously in gh-3243 for 0.14.0. These methods were always
- private ones but not marked as such, and as the comments on gh-3243 show
- they were used. Hence now deprecated.
-
- (cherry picked from commit 556220bb392539b95da9e111904f5d6ebe501711)
-Origin: b5db4cc1fead791a39af6d6c349c44d8ce04314b
-Author: Ralf Gommers <ralf.gommers at googlemail.com>
-Applied-Upstream: 0.14.1
-
---- a/doc/source/tutorial/stats.rst
-+++ b/doc/source/tutorial/stats.rst
-@@ -60,7 +60,8 @@ We can list all methods and properties o
- ``dir(norm)``. As it turns out, some of the methods are private
- methods although they are not named as such (their name does not start
- with a leading underscore), for example ``veccdf``, are only available
--for internal calculation.
-+for internal calculation (those methods will give warnings when one tries to
-+use them, and will be removed at some point).
-
- To obtain the `real` main methods, we list the methods of the frozen
- distribution. (We explain the meaning of a `frozen` distribution
---- a/scipy/stats/_distn_infrastructure.py
-+++ b/scipy/stats/_distn_infrastructure.py
-@@ -1430,6 +1430,11 @@ class rv_continuous(rv_generic):
- self._cdfvec = vectorize(self._cdf_single, otypes='d')
- self._cdfvec.nin = self.numargs + 1
-
-+ # backwards compat. these were removed in 0.14.0, put back but
-+ # deprecated in 0.14.1:
-+ self.vecfunc = np.deprecate(self._ppfvec, "vecfunc")
-+ self.veccdf = np.deprecate(self._cdfvec, "veccdf")
-+
- self.extradoc = extradoc
- if momtype == 0:
- self.generic_moment = vectorize(self._mom0_sc, otypes='d')
-@@ -2632,6 +2637,11 @@ class rv_discrete(rv_generic):
- _vec_generic_moment.nin = self.numargs + 2
- self.generic_moment = instancemethod(_vec_generic_moment,
- self, rv_discrete)
-+ # backwards compat. was removed in 0.14.0, put back but
-+ # deprecated in 0.14.1:
-+ self.vec_generic_moment = np.deprecate(_vec_generic_moment,
-+ "vec_generic_moment",
-+ "generic_moment")
-
- # correct nin for ppf vectorization
- _vppf = vectorize(_drv2_ppfsingle, otypes='d')
Modified: packages/scipy/trunk/debian/patches/series
===================================================================
--- packages/scipy/trunk/debian/patches/series 2015-01-17 11:48:54 UTC (rev 31670)
+++ packages/scipy/trunk/debian/patches/series 2015-01-17 14:12:01 UTC (rev 31671)
@@ -5,9 +5,3 @@
fitpack-alias.patch
fix-undefined-behavior-in-alngam.patch
relax-bounds-of-interpolate-test.patch
-sparse-superlu-fix.patch
-put-back-veccdf.patch
-sparse-fix-omitted-types.patch
-numpy-version-fix.patch
-put-_gen-classes-back.patch
-numpy_ufunc.patch
Deleted: packages/scipy/trunk/debian/patches/sparse-fix-omitted-types.patch
===================================================================
--- packages/scipy/trunk/debian/patches/sparse-fix-omitted-types.patch 2015-01-17 11:48:54 UTC (rev 31670)
+++ packages/scipy/trunk/debian/patches/sparse-fix-omitted-types.patch 2015-01-17 14:12:01 UTC (rev 31671)
@@ -1,57 +0,0 @@
-Origin: ef1e65a16a9a073cb1f75b24ea4e75bfb30eea55
-Author: Pauli Virtanen <pav at iki.fi>
-Description: fix omitted types in sparsetools typemaps
- (cherry picked from commit 816638f815a36a8ddde4fe448967bca3111a3c3e)
-Bug: https://github.com/scipy/scipy/issues/3780
-Applied-Upstream: 0.14.1
-
---- a/scipy/sparse/sparsetools/sparsetools.cxx
-+++ b/scipy/sparse/sparsetools/sparsetools.cxx
-@@ -429,9 +429,12 @@ static void *allocate_std_vector_typenum
- PROCESS(NPY_UBYTE, npy_ubyte);
- PROCESS(NPY_SHORT, npy_short);
- PROCESS(NPY_USHORT, npy_ushort);
-- PROCESS(NPY_INT, npy_uint);
-- PROCESS(NPY_LONG, npy_ulong);
-- PROCESS(NPY_LONGLONG, npy_ulonglong);
-+ PROCESS(NPY_INT, npy_int);
-+ PROCESS(NPY_UINT, npy_uint);
-+ PROCESS(NPY_LONG, npy_long);
-+ PROCESS(NPY_ULONG, npy_ulong);
-+ PROCESS(NPY_LONGLONG, npy_longlong);
-+ PROCESS(NPY_ULONGLONG, npy_ulonglong);
- PROCESS(NPY_FLOAT, npy_float);
- PROCESS(NPY_DOUBLE, npy_double);
- PROCESS(NPY_LONGDOUBLE, npy_longdouble);
-@@ -458,9 +461,12 @@ static void free_std_vector_typenum(int
- PROCESS(NPY_UBYTE, npy_ubyte);
- PROCESS(NPY_SHORT, npy_short);
- PROCESS(NPY_USHORT, npy_ushort);
-- PROCESS(NPY_INT, npy_uint);
-- PROCESS(NPY_LONG, npy_ulong);
-- PROCESS(NPY_LONGLONG, npy_ulonglong);
-+ PROCESS(NPY_INT, npy_int);
-+ PROCESS(NPY_UINT, npy_uint);
-+ PROCESS(NPY_LONG, npy_long);
-+ PROCESS(NPY_ULONG, npy_ulong);
-+ PROCESS(NPY_LONGLONG, npy_longlong);
-+ PROCESS(NPY_ULONGLONG, npy_ulonglong);
- PROCESS(NPY_FLOAT, npy_float);
- PROCESS(NPY_DOUBLE, npy_double);
- PROCESS(NPY_LONGDOUBLE, npy_longdouble);
-@@ -491,9 +497,12 @@ static PyObject *array_from_std_vector_a
- PROCESS(NPY_UBYTE, npy_ubyte);
- PROCESS(NPY_SHORT, npy_short);
- PROCESS(NPY_USHORT, npy_ushort);
-- PROCESS(NPY_INT, npy_uint);
-- PROCESS(NPY_LONG, npy_ulong);
-- PROCESS(NPY_LONGLONG, npy_ulonglong);
-+ PROCESS(NPY_INT, npy_int);
-+ PROCESS(NPY_UINT, npy_uint);
-+ PROCESS(NPY_LONG, npy_long);
-+ PROCESS(NPY_ULONG, npy_ulong);
-+ PROCESS(NPY_LONGLONG, npy_longlong);
-+ PROCESS(NPY_ULONGLONG, npy_ulonglong);
- PROCESS(NPY_FLOAT, npy_float);
- PROCESS(NPY_DOUBLE, npy_double);
- PROCESS(NPY_LONGDOUBLE, npy_longdouble);
Deleted: packages/scipy/trunk/debian/patches/sparse-superlu-fix.patch
===================================================================
--- packages/scipy/trunk/debian/patches/sparse-superlu-fix.patch 2015-01-17 11:48:54 UTC (rev 31670)
+++ packages/scipy/trunk/debian/patches/sparse-superlu-fix.patch 2015-01-17 14:12:01 UTC (rev 31671)
@@ -1,72 +0,0 @@
-Description: sparse/superlu: fix || vs && mistake in L/U attributes + add test
- (cherry picked from commit 047bf1fbb8ab576a5e8b6cc35eacd9207a828bf7)
-Origin: 89548c6c421bca52e72f0673356e52d4f4c42c40
-Author: Pauli Virtanen <pav at iki.fi>
-Applied-Upstream: 0.14.1
-
---- a/scipy/sparse/linalg/dsolve/_superluobject.c
-+++ b/scipy/sparse/linalg/dsolve/_superluobject.c
-@@ -574,8 +574,8 @@ LU_to_csc(SuperMatrix *L, SuperMatrix *U
- #define IS_ZERO(p) \
- ((dtype == SLU_S) ? (*(float*)(p) == 0) : \
- ((dtype == SLU_D) ? (*(double*)(p) == 0) : \
-- ((dtype == SLU_C) ? (*(float*)(p) == 0 || *((float*)(p)+1) == 0) : \
-- (*(double*)(p) == 0 || *((double*)(p)+1) == 0))))
-+ ((dtype == SLU_C) ? (*(float*)(p) == 0 && *((float*)(p)+1) == 0) : \
-+ (*(double*)(p) == 0 && *((double*)(p)+1) == 0))))
-
- U_colptr[0] = 0;
- L_colptr[0] = 0;
---- a/scipy/sparse/linalg/dsolve/tests/test_linsolve.py
-+++ b/scipy/sparse/linalg/dsolve/tests/test_linsolve.py
-@@ -401,23 +401,38 @@ class TestSplu(object):
- assert_(not np.isnan(B).any())
-
- def test_lu_attr(self):
-- A = self.A
-- n = A.shape[0]
-- lu = splu(A)
-
-- # Check that the decomposition is as advertized
-+ def check(dtype, complex_2=False):
-+ A = self.A.astype(dtype)
-
-- Pc = np.zeros((n, n))
-- Pc[np.arange(n), lu.perm_c] = 1
-+ if complex_2:
-+ A = A + 1j*A.T
-
-- Pr = np.zeros((n, n))
-- Pr[lu.perm_r, np.arange(n)] = 1
-+ n = A.shape[0]
-+ lu = splu(A)
-
-- Ad = A.toarray()
-- lhs = Pr.dot(Ad).dot(Pc)
-- rhs = (lu.L * lu.U).toarray()
-+ # Check that the decomposition is as advertized
-
-- assert_allclose(lhs, rhs, atol=1e-10)
-+ Pc = np.zeros((n, n))
-+ Pc[np.arange(n), lu.perm_c] = 1
-+
-+ Pr = np.zeros((n, n))
-+ Pr[lu.perm_r, np.arange(n)] = 1
-+
-+ Ad = A.toarray()
-+ lhs = Pr.dot(Ad).dot(Pc)
-+ rhs = (lu.L * lu.U).toarray()
-+
-+ eps = np.finfo(dtype).eps
-+
-+ assert_allclose(lhs, rhs, atol=100*eps)
-+
-+ check(np.float32)
-+ check(np.float64)
-+ check(np.complex64)
-+ check(np.complex128)
-+ check(np.complex64, True)
-+ check(np.complex128, True)
-
-
- if __name__ == "__main__":
More information about the Python-modules-commits
mailing list