[Python-modules-commits] [python-numpy] 01/12: Import python-numpy_1.11.0~b3.orig.tar.gz

Sandro Tosi morph at moszumanska.debian.org
Sat Feb 13 23:48:48 UTC 2016


This is an automated email from the git hooks/post-receive script.

morph pushed a commit to branch master
in repository python-numpy.

commit e515924842aad6077a213f46a4a514526c4c8f44
Author: Sandro Tosi <morph at debian.org>
Date:   Sat Feb 13 20:37:02 2016 +0000

    Import python-numpy_1.11.0~b3.orig.tar.gz
---
 PKG-INFO                                |   2 +-
 doc/release/1.11.0-notes.rst            |  16 +--
 numpy/core/function_base.py             |  11 ++-
 numpy/core/include/numpy/npy_3kcompat.h |   8 +-
 numpy/core/src/multiarray/iterators.c   |  22 ++++-
 numpy/core/src/multiarray/methods.c     |   2 -
 numpy/core/src/private/ufunc_override.h |   6 ++
 numpy/core/tests/test_function_base.py  |  15 ++-
 numpy/core/tests/test_multiarray.py     |  46 +++++----
 numpy/core/tests/test_umath.py          |  25 +++++
 numpy/distutils/fcompiler/gnu.py        |   2 +-
 numpy/lib/arraypad.py                   |   2 +-
 numpy/lib/function_base.py              | 167 +++++++++++++++++---------------
 numpy/lib/nanfunctions.py               | 162 ++++++++++++++++---------------
 numpy/lib/tests/test_arraypad.py        |  11 +++
 numpy/lib/tests/test_function_base.py   |  20 +++-
 numpy/lib/tests/test_io.py              |   2 +-
 numpy/lib/tests/test_nanfunctions.py    |   5 +-
 numpy/ma/core.py                        |  31 +++---
 numpy/ma/tests/test_core.py             |   8 --
 numpy/tests/test_scripts.py             |  26 ++++-
 numpy/version.py                        |   8 +-
 setup.py                                |   2 +-
 23 files changed, 366 insertions(+), 233 deletions(-)

diff --git a/PKG-INFO b/PKG-INFO
index 9012076..ce28900 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: numpy
-Version: 1.11.0b2
+Version: 1.11.0b3
 Summary: NumPy: array processing for numbers, strings, records, and objects.
 Home-page: http://www.numpy.org
 Author: NumPy Developers
diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst
index c9287ed..aa11cdf 100644
--- a/doc/release/1.11.0-notes.rst
+++ b/doc/release/1.11.0-notes.rst
@@ -76,12 +76,13 @@ printing it would convert from or to local time::
     >>>> np.datetime64('2000-01-01T00:00:00')
     numpy.datetime64('2000-01-01T00:00:00-0800')  # note the timezone offset -08:00
 
+
 A consensus of datetime64 users agreed that this behavior is undesirable
-and at odds with how datetime64 is usually used (e.g., by pandas_). For
-most use cases, a timezone naive datetime type is preferred, similar to the
-``datetime.datetime`` type in the Python standard library. Accordingly,
-datetime64 no longer assumes that input is in local time, nor does it print
-local times::
+and at odds with how datetime64 is usually used (e.g., by `pandas
+<http://pandas.pydata.org>`__). For most use cases, a timezone naive datetime
+type is preferred, similar to the ``datetime.datetime`` type in the Python
+standard library. Accordingly, datetime64 no longer assumes that input is in
+local time, nor does it print local times::
 
     >>>> np.datetime64('2000-01-01T00:00:00')
     numpy.datetime64('2000-01-01T00:00:00')
@@ -99,14 +100,12 @@ As a corollary to this change, we no longer prohibit casting between datetimes
 with date units and datetimes with time units. With timezone naive datetimes,
 the rule for casting from dates to times is no longer ambiguous.
 
-pandas_: http://pandas.pydata.org
-
 ``linalg.norm`` return type changes
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 The return type of the ``linalg.norm`` function is now floating point without
 exception.  Some of the norm types previously returned integers.
 
-and returns floating results.polynomial fit changes
+polynomial fit changes
 ~~~~~~~~~~~~~~~~~~~~~~
 The various fit functions in the numpy polynomial package no longer accept
 non-integers for degree specification.
@@ -143,6 +142,7 @@ FutureWarning to changed behavior
   due to a bug, sometimes no warning was raised and the dimensions were
   already preserved.
 
+
 C API
 ~~~~~
 
diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py
index c82c9bb..21ca1af 100644
--- a/numpy/core/function_base.py
+++ b/numpy/core/function_base.py
@@ -96,18 +96,23 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
 
     y = _nx.arange(0, num, dtype=dt)
 
+    delta = stop - start
     if num > 1:
-        delta = stop - start
         step = delta / div
         if step == 0:
             # Special handling for denormal numbers, gh-5437
             y /= div
-            y *= delta
+            y = y * delta
         else:
-            y *= step
+            # One might be tempted to use faster, in-place multiplication here,
+            # but this prevents step from overriding what class is produced,
+            # and thus prevents, e.g., use of Quantities; see gh-7142.
+            y = y * step
     else:
         # 0 and 1 item long sequences have an undefined step
         step = NaN
+        # Multiply with delta to allow possible override of output class.
+        y = y * delta
 
     y += start
 
diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h
index 6a11cf9..db60a31 100644
--- a/numpy/core/include/numpy/npy_3kcompat.h
+++ b/numpy/core/include/numpy/npy_3kcompat.h
@@ -320,7 +320,13 @@ static NPY_INLINE FILE *
 npy_PyFile_Dup2(PyObject *file,
                 const char *NPY_UNUSED(mode), npy_off_t *NPY_UNUSED(orig_pos))
 {
-    return PyFile_AsFile(file);
+    FILE * fp = PyFile_AsFile(file);
+    if (fp == NULL) {
+        PyErr_SetString(PyExc_IOError,
+                        "first argument must be an open file");
+        return NULL;
+    }
+    return fp;
 }
 
 static NPY_INLINE int
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index 5099e3e..1e027bb 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -207,6 +207,22 @@ slice_coerce_index(PyObject *o, npy_intp *v)
     return 1;
 }
 
+/*
+ * Same (slightly weird) semantics as slice_coerce_index, but raising a useful
+ * exception on failure. Written this way to get better error messages in
+ * 1.11, while creating minimal risky changes during beta cycle.
+ */
+static int
+slice_coerce_index_or_raise(PyObject *o, npy_intp *v)
+{
+    if (!slice_coerce_index(o, v)) {
+        PyErr_Format(PyExc_IndexError,
+                     "failed to coerce slice entry of type %s to integer",
+                     o->ob_type->tp_name);
+        return 0;
+    }
+    return 1;
+}
 
 /*
  * This is basically PySlice_GetIndicesEx, but with our coercion
@@ -226,7 +242,7 @@ slice_GetIndices(PySliceObject *r, npy_intp length,
         *step = 1;
     }
     else {
-        if (!slice_coerce_index(r->step, step)) {
+        if (!slice_coerce_index_or_raise(r->step, step)) {
             return -1;
         }
         if (*step == 0) {
@@ -241,7 +257,7 @@ slice_GetIndices(PySliceObject *r, npy_intp length,
         *start = *step < 0 ? length-1 : 0;
     }
     else {
-        if (!slice_coerce_index(r->start, start)) {
+        if (!slice_coerce_index_or_raise(r->start, start)) {
             return -1;
         }
         if (*start < 0) {
@@ -259,7 +275,7 @@ slice_GetIndices(PySliceObject *r, npy_intp length,
         *stop = defstop;
     }
     else {
-        if (!slice_coerce_index(r->stop, stop)) {
+        if (!slice_coerce_index_or_raise(r->stop, stop)) {
             return -1;
         }
         if (*stop < 0) {
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index 84d4e2c..56b6086 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -583,8 +583,6 @@ array_tofile(PyArrayObject *self, PyObject *args, PyObject *kwds)
 
     fd = npy_PyFile_Dup2(file, "wb", &orig_pos);
     if (fd == NULL) {
-        PyErr_SetString(PyExc_IOError,
-                "first argument must be a string or open file");
         goto fail;
     }
     if (PyArray_ToFile(self, fd, sep, format) < 0) {
diff --git a/numpy/core/src/private/ufunc_override.h b/numpy/core/src/private/ufunc_override.h
index 4042eae..59a90c7 100644
--- a/numpy/core/src/private/ufunc_override.h
+++ b/numpy/core/src/private/ufunc_override.h
@@ -198,6 +198,12 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
     /* Pos of each override in args */
     int with_override_pos[NPY_MAXARGS];
 
+    /* 2016-01-29: Disable for now in master -- can re-enable once details are
+     * sorted out. All commented bits are tagged NUMPY_UFUNC_DISABLED. -njs
+     */
+    result = NULL;
+    return 0;
+
     /*
      * Check inputs
      */
diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py
index 2df7ba3..6b54306 100644
--- a/numpy/core/tests/test_function_base.py
+++ b/numpy/core/tests/test_function_base.py
@@ -1,7 +1,7 @@
 from __future__ import division, absolute_import, print_function
 
 from numpy import (logspace, linspace, dtype, array, finfo, typecodes, arange,
-                   isnan)
+                   isnan, ndarray)
 from numpy.testing import (
     TestCase, run_module_suite, assert_, assert_equal, assert_raises,
     assert_array_equal
@@ -115,6 +115,19 @@ class TestLinspace(TestCase):
         b = PhysicalQuantity(1.0)
         assert_equal(linspace(a, b), linspace(0.0, 1.0))
 
+    def test_subclass(self):
+        class PhysicalQuantity2(ndarray):
+            __array_priority__ = 10
+
+        a = array(0).view(PhysicalQuantity2)
+        b = array(1).view(PhysicalQuantity2)
+        ls = linspace(a, b)
+        assert type(ls) is PhysicalQuantity2
+        assert_equal(ls, linspace(0.0, 1.0))
+        ls = linspace(a, b, 1)
+        assert type(ls) is PhysicalQuantity2
+        assert_equal(ls, linspace(0.0, 1.0, 1))
+
     def test_denormal_numbers(self):
         # Regression test for gh-5437. Will probably fail when compiled
         # with ICC, which flushes denormals to zero
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index f432aa9..d57e7c1 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -2136,6 +2136,9 @@ class TestMethods(TestCase):
         assert_equal(c, np.dot(a, b))
 
     def test_dot_override(self):
+        # 2016-01-29: NUMPY_UFUNC_DISABLED
+        return
+
         class A(object):
             def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
                 return "A"
@@ -2543,6 +2546,9 @@ class TestBinop(object):
         assert_array_equal(res, l[4] + l[4])
 
     def test_ufunc_override_rop_precedence(self):
+        # 2016-01-29: NUMPY_UFUNC_DISABLED
+        return
+
         # Check that __rmul__ and other right-hand operations have
         # precedence over __numpy_ufunc__
 
@@ -2661,6 +2667,9 @@ class TestBinop(object):
             yield check, op_name, False
 
     def test_ufunc_override_rop_simple(self):
+        # 2016-01-29: NUMPY_UFUNC_DISABLED
+        return
+
         # Check parts of the binary op overriding behavior in an
         # explicit test case that is easier to understand.
         class SomeClass(object):
@@ -2765,6 +2774,9 @@ class TestBinop(object):
         assert_(isinstance(res, SomeClass3))
 
     def test_ufunc_override_normalize_signature(self):
+        # 2016-01-29: NUMPY_UFUNC_DISABLED
+        return
+
         # gh-5674
         class SomeClass(object):
             def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
@@ -2781,6 +2793,9 @@ class TestBinop(object):
         assert_equal(kw['signature'], 'ii->i')
 
     def test_numpy_ufunc_index(self):
+        # 2016-01-29: NUMPY_UFUNC_DISABLED
+        return
+
         # Check that index is set appropriately, also if only an output
         # is passed on (latter is another regression tests for github bug 4753)
         class CheckIndex(object):
@@ -2818,6 +2833,9 @@ class TestBinop(object):
         assert_equal(np.add(a, dummy, out=a), 0)
 
     def test_out_override(self):
+        # 2016-01-29: NUMPY_UFUNC_DISABLED
+        return
+
         # regression test for github bug 4753
         class OutClass(np.ndarray):
             def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
@@ -3564,6 +3582,14 @@ class TestIO(object):
     def tearDown(self):
         shutil.rmtree(self.tempdir)
 
+    def test_nofile(self):
+        # this should probably be supported as a file
+        # but for now test for proper errors
+        b = io.BytesIO()
+        assert_raises(IOError, np.fromfile, b, np.uint8, 80)
+        d = np.ones(7);
+        assert_raises(IOError, lambda x: x.tofile(b), d)
+
     def test_bool_fromstring(self):
         v = np.array([True, False, True, False], dtype=np.bool_)
         y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
@@ -4615,24 +4641,6 @@ class TestDot(TestCase):
         assert_equal(np.dot(arr, 3), desired)
         assert_equal(np.dot(3, arr), desired)
 
-    def test_dot_override(self):
-        class A(object):
-            def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
-                return "A"
-
-        class B(object):
-            def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
-                return NotImplemented
-
-        a = A()
-        b = B()
-        c = np.array([[1]])
-
-        assert_equal(np.dot(a, b), "A")
-        assert_equal(c.dot(a), "A")
-        assert_raises(TypeError, np.dot, b, c)
-        assert_raises(TypeError, c.dot, b)
-
     def test_accelerate_framework_sgemv_fix(self):
 
         def aligned_array(shape, align, dtype, order='C'):
@@ -4893,6 +4901,8 @@ class MatmulCommon():
         assert_equal(res, tgt12_21)
 
     def test_numpy_ufunc_override(self):
+        # 2016-01-29: NUMPY_UFUNC_DISABLED
+        return
 
         class A(np.ndarray):
             def __new__(cls, *args, **kwargs):
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 2ba988b..917e05e 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -1215,7 +1215,24 @@ class TestSpecialMethods(TestCase):
         assert_equal(ncu.maximum(a, B()), 0)
         assert_equal(ncu.maximum(a, C()), 0)
 
+    def test_ufunc_override_disabled(self):
+        # 2016-01-29: NUMPY_UFUNC_DISABLED
+        # This test should be removed when __numpy_ufunc__ is re-enabled.
+
+        class MyArray(object):
+            def __numpy_ufunc__(self, *args, **kwargs):
+                self._numpy_ufunc_called = True
+
+        my_array = MyArray()
+        real_array = np.ones(10)
+        assert_raises(TypeError, lambda: real_array + my_array)
+        assert_raises(TypeError, np.add, real_array, my_array)
+        assert not hasattr(my_array, "_numpy_ufunc_called")
+
+
     def test_ufunc_override(self):
+        # 2016-01-29: NUMPY_UFUNC_DISABLED
+        return
 
         class A(object):
             def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
@@ -1241,6 +1258,8 @@ class TestSpecialMethods(TestCase):
         assert_equal(res1[5], {})
 
     def test_ufunc_override_mro(self):
+        # 2016-01-29: NUMPY_UFUNC_DISABLED
+        return
 
         # Some multi arg functions for testing.
         def tres_mul(a, b, c):
@@ -1332,6 +1351,8 @@ class TestSpecialMethods(TestCase):
         assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c)
 
     def test_ufunc_override_methods(self):
+        # 2016-01-29: NUMPY_UFUNC_DISABLED
+        return
 
         class A(object):
             def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
@@ -1436,6 +1457,8 @@ class TestSpecialMethods(TestCase):
         assert_equal(res[4], (a, [4, 2], 'b0'))
 
     def test_ufunc_override_out(self):
+        # 2016-01-29: NUMPY_UFUNC_DISABLED
+        return
 
         class A(object):
             def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
@@ -1470,6 +1493,8 @@ class TestSpecialMethods(TestCase):
         assert_equal(res7['out'][1], 'out1')
 
     def test_ufunc_override_exception(self):
+        # 2016-01-29: NUMPY_UFUNC_DISABLED
+        return
 
         class A(object):
             def __numpy_ufunc__(self, *a, **kwargs):
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
index 37be080..9ba5759 100644
--- a/numpy/distutils/fcompiler/gnu.py
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -313,7 +313,7 @@ class Gnu95FCompiler(GnuFCompiler):
                 if target:
                     d = os.path.normpath(self.get_libgcc_dir())
                     root = os.path.join(d, *((os.pardir,)*4))
-                    path = os.path.join(root, target, "lib")
+                    path = os.path.join(root, "lib")
                     mingwdir = os.path.normpath(path)
                     if os.path.exists(os.path.join(mingwdir, "libmingwex.a")):
                         opt.append(mingwdir)
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index dad1f47..c30ef6b 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -1337,7 +1337,7 @@ def pad(array, pad_width, mode, **kwargs):
         'reflect_type': 'even',
         }
 
-    if isinstance(mode, str):
+    if isinstance(mode, np.compat.basestring):
         # Make sure have allowed kwargs appropriate for mode
         for key in kwargs:
             if key not in allowedkwargs[mode]:
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 844c069..6a64ebe 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -124,7 +124,7 @@ def _hist_optim_numbins_estimator(a, estimator):
 
     def fd(x):
         """
-        Freedman Diaconis rule using Inter Quartile Range (IQR) for binwidth
+        Freedman Diaconis rule using interquartile range (IQR) for binwidth
         Considered a variation of the Scott rule with more robustness as the IQR
         is less affected by outliers than the standard deviation. However the IQR depends on
         fewer points than the sd so it is less accurate, especially for long tailed distributions.
@@ -944,11 +944,16 @@ def piecewise(x, condlist, funclist, *args, **kw):
             condlist = condlist.T
     if n == n2 - 1:  # compute the "otherwise" condition.
         totlist = np.logical_or.reduce(condlist, axis=0)
-        condlist = np.vstack([condlist, ~totlist])
+        # Only able to stack vertically if the array is 1d or less
+        if x.ndim <= 1:
+            condlist = np.vstack([condlist, ~totlist])
+        else:
+            condlist = [asarray(c, dtype=bool) for c in condlist]
+            totlist = condlist[0]
+            for k in range(1, n):
+                totlist |= condlist[k]
+            condlist.append(~totlist)
         n += 1
-    if (n != n2):
-        raise ValueError(
-                "function list and condition list must be the same")
 
     y = zeros(x.shape, x.dtype)
     for k in range(n):
@@ -1022,7 +1027,7 @@ def select(condlist, choicelist, default=0):
     dtype = np.result_type(*choicelist)
 
     # Convert conditions to arrays and broadcast conditions and choices
-    # as the shape is needed for the result. Doing it seperatly optimizes
+    # as the shape is needed for the result. Doing it separately optimizes
     # for example when all choices are scalars.
     condlist = np.broadcast_arrays(*condlist)
     choicelist = np.broadcast_arrays(*choicelist)
@@ -1244,7 +1249,7 @@ def gradient(f, *varargs, **kwargs):
 
     # Convert datetime64 data into ints. Make dummy variable `y`
     # that is a view of ints if the data is datetime64, otherwise
-    # just set y equal to the the array `f`.
+    # just set y equal to the array `f`.
     if f.dtype.char in ["M", "m"]:
         y = f.view('int64')
     else:
@@ -3228,22 +3233,22 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
     ----------
     a : array_like
         Input array or object that can be converted to an array.
-    axis : int or sequence of int, optional
-        Axis along which the medians are computed. The default (axis=None)
+    axis : {int, sequence of int, None}, optional
+        Axis or axes along which the medians are computed. The default
         is to compute the median along a flattened version of the array.
         A sequence of axes is supported since version 1.9.0.
     out : ndarray, optional
-        Alternative output array in which to place the result. It must have
-        the same shape and buffer length as the expected output, but the
-        type (of the output) will be cast if necessary.
+        Alternative output array in which to place the result. It must
+        have the same shape and buffer length as the expected output,
+        but the type (of the output) will be cast if necessary.
     overwrite_input : bool, optional
-       If True, then allow use of memory of input array (a) for
+       If True, then allow use of memory of input array `a` for
        calculations. The input array will be modified by the call to
-       median. This will save memory when you do not need to preserve the
-       contents of the input array. Treat the input as undefined, but it
-       will probably be fully or partially sorted. Default is False. Note
-       that, if `overwrite_input` is True and the input is not already an
-       ndarray, an error will be raised.
+       `median`. This will save memory when you do not need to preserve
+       the contents of the input array. Treat the input as undefined,
+       but it will probably be fully or partially sorted. Default is
+       False. If `overwrite_input` is ``True`` and `a` is not already an
+       `ndarray`, an error will be raised.
     keepdims : bool, optional
         If this is set to True, the axes which are reduced are left
         in the result as dimensions with size one. With this option,
@@ -3251,15 +3256,14 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
 
         .. versionadded:: 1.9.0
 
-
     Returns
     -------
     median : ndarray
-        A new array holding the result (unless `out` is specified, in which
-        case that array is returned instead).  If the input contains
-        integers, or floats of smaller precision than 64, then the output
-        data-type is float64.  Otherwise, the output data-type is the same
-        as that of the input.
+        A new array holding the result. If the input contains integers
+        or floats smaller than ``float64``, then the output data-type is
+        ``np.float64``.  Otherwise, the data-type of the output is the
+        same as that of the input. If `out` is specified, that array is
+        returned instead.
 
     See Also
     --------
@@ -3267,10 +3271,10 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
 
     Notes
     -----
-    Given a vector V of length N, the median of V is the middle value of
-    a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
-    odd.  When N is even, it is the average of the two middle values of
-    ``V_sorted``.
+    Given a vector ``V`` of length ``N``, the median of ``V`` is the
+    middle value of a sorted copy of ``V``, ``V_sorted`` - i
+    e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the
+    two middle values of ``V_sorted`` when ``N`` is even.
 
     Examples
     --------
@@ -3383,73 +3387,79 @@ def percentile(a, q, axis=None, out=None,
     """
     Compute the qth percentile of the data along the specified axis.
 
-    Returns the qth percentile of the array elements.
+    Returns the qth percentile(s) of the array elements.
 
     Parameters
     ----------
     a : array_like
         Input array or object that can be converted to an array.
     q : float in range of [0,100] (or sequence of floats)
-        Percentile to compute which must be between 0 and 100 inclusive.
-    axis : int or sequence of int, optional
-        Axis along which the percentiles are computed. The default (None)
-        is to compute the percentiles along a flattened version of the array.
-        A sequence of axes is supported since version 1.9.0.
+        Percentile to compute, which must be between 0 and 100 inclusive.
+    axis : {int, sequence of int, None}, optional
+        Axis or axes along which the percentiles are computed. The
+        default is to compute the percentile(s) along a flattened
+        version of the array. A sequence of axes is supported since
+        version 1.9.0.
     out : ndarray, optional
         Alternative output array in which to place the result. It must
         have the same shape and buffer length as the expected output,
         but the type (of the output) will be cast if necessary.
     overwrite_input : bool, optional
-        If True, then allow use of memory of input array `a` for
+        If True, then allow use of memory of input array `a` 
         calculations. The input array will be modified by the call to
-        percentile. This will save memory when you do not need to preserve
-        the contents of the input array. In this case you should not make
-        any assumptions about the content of the passed in array `a` after
-        this function completes -- treat it as undefined. Default is False.
-        Note that, if the `a` input is not already an array this parameter
-        will have no effect, `a` will be converted to an array internally
-        regardless of the value of this parameter.
+        `percentile`. This will save memory when you do not need to
+        preserve the contents of the input array. In this case you
+        should not make any assumptions about the contents of the input
+        `a` after this function completes -- treat it as undefined.
+        Default is False. If `a` is not already an array, this parameter
+        will have no effect as `a` will be converted to an array
+        internally regardless of the value of this parameter.
     interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
-        This optional parameter specifies the interpolation method to use,
-        when the desired quantile lies between two data points `i` and `j`:
-            * linear: `i + (j - i) * fraction`, where `fraction` is the
-              fractional part of the index surrounded by `i` and `j`.
-            * lower: `i`.
-            * higher: `j`.
-            * nearest: `i` or `j` whichever is nearest.
-            * midpoint: (`i` + `j`) / 2.
+        This optional parameter specifies the interpolation method to
+        use when the desired quantile lies between two data points
+        ``i < j``:
+            * linear: ``i + (j - i) * fraction``, where ``fraction``
+              is the fractional part of the index surrounded by ``i``
+              and ``j``.
+            * lower: ``i``.
+            * higher: ``j``.
+            * nearest: ``i`` or ``j``, whichever is nearest.
+            * midpoint: ``(i + j) / 2``.
 
         .. versionadded:: 1.9.0
     keepdims : bool, optional
-        If this is set to True, the axes which are reduced are left
-        in the result as dimensions with size one. With this option,
-        the result will broadcast correctly against the original array `a`.
+        If this is set to True, the axes which are reduced are left in
+        the result as dimensions with size one. With this option, the
+        result will broadcast correctly against the original array `a`.
 
         .. versionadded:: 1.9.0
 
     Returns
     -------
     percentile : scalar or ndarray
-        If a single percentile `q` is given and axis=None a scalar is
-        returned.  If multiple percentiles `q` are given an array holding
-        the result is returned. The results are listed in the first axis.
-        (If `out` is specified, in which case that array is returned
-        instead).  If the input contains integers, or floats of smaller
-        precision than 64, then the output data-type is float64. Otherwise,
-        the output data-type is the same as that of the input.
+        If `q` is a single percentile and `axis=None`, then the result
+        is a scalar. If multiple percentiles are given, first axis of
+        the result corresponds to the percentiles. The other axes are
+        the axes that remain after the reduction of `a`. If the input 
+        contains integers or floats smaller than ``float64``, the output
+        data-type is ``float64``. Otherwise, the output data-type is the
+        same as that of the input. If `out` is specified, that array is
+        returned instead.
 
     See Also
     --------
-    mean, median
+    mean, median, nanpercentile
 
     Notes
     -----
-    Given a vector V of length N, the q-th percentile of V is the q-th ranked
-    value in a sorted copy of V.  The values and distances of the two
-    nearest neighbors as well as the `interpolation` parameter will
-    determine the percentile if the normalized ranking does not match q
-    exactly. This function is the same as the median if ``q=50``, the same
-    as the minimum if ``q=0`` and the same as the maximum if ``q=100``.
+    Given a vector ``V`` of length ``N``, the ``q``-th percentile of
+    ``V`` is the value ``q/100`` of the way from the mimumum to the
+    maximum in in a sorted copy of ``V``. The values and distances of
+    the two nearest neighbors as well as the `interpolation` parameter
+    will determine the percentile if the normalized ranking does not
+    match the location of ``q`` exactly. This function is the same as
+    the median if ``q=50``, the same as the minimum if ``q=0`` and the
+    same as the maximum if ``q=100``.
 
     Examples
     --------
@@ -3458,28 +3468,26 @@ def percentile(a, q, axis=None, out=None,
     array([[10,  7,  4],
            [ 3,  2,  1]])
     >>> np.percentile(a, 50)
-    array([ 3.5])
+    3.5
     >>> np.percentile(a, 50, axis=0)
     array([[ 6.5,  4.5,  2.5]])
     >>> np.percentile(a, 50, axis=1)
+    array([ 7.,  2.])
+    >>> np.percentile(a, 50, axis=1, keepdims=True)
     array([[ 7.],
            [ 2.]])
 
     >>> m = np.percentile(a, 50, axis=0)
     >>> out = np.zeros_like(m)
-    >>> np.percentile(a, 50, axis=0, out=m)
+    >>> np.percentile(a, 50, axis=0, out=out)
     array([[ 6.5,  4.5,  2.5]])
     >>> m
     array([[ 6.5,  4.5,  2.5]])
 
     >>> b = a.copy()
     >>> np.percentile(b, 50, axis=1, overwrite_input=True)
-    array([[ 7.],
-           [ 2.]])
-    >>> assert not np.all(a==b)
-    >>> b = a.copy()
-    >>> np.percentile(b, 50, axis=None, overwrite_input=True)
-    array([ 3.5])
+    array([ 7.,  2.])
+    >>> assert not np.all(a == b)
 
     """
     q = array(q, dtype=np.float64, copy=True)
@@ -3541,7 +3549,7 @@ def _percentile(a, q, axis=None, out=None,
     elif interpolation == 'higher':
         indices = ceil(indices).astype(intp)
     elif interpolation == 'midpoint':
-        indices = floor(indices) + 0.5
+        indices = 0.5 * (floor(indices) + ceil(indices))
     elif interpolation == 'nearest':
         indices = around(indices).astype(intp)
     elif interpolation == 'linear':
@@ -3619,7 +3627,7 @@ def _percentile(a, q, axis=None, out=None,
             r = add(x1, x2)
 
     if np.any(n):
-        warnings.warn("Invalid value encountered in median",
+        warnings.warn("Invalid value encountered in percentile",
                               RuntimeWarning)
         if zerod:
             if ap.ndim == 1:
@@ -3731,7 +3739,8 @@ def trapz(y, x=None, dx=1.0, axis=-1):
 
 #always succeed
 def add_newdoc(place, obj, doc):
-    """Adds documentation to obj which is in module place.
+    """
+    Adds documentation to obj which is in module place.
 
     If doc is a string add it to obj as a docstring
 
@@ -3749,7 +3758,7 @@ def add_newdoc(place, obj, doc):
     in new-style classes or built-in functions. Because this
     routine never raises an error the caller must check manually
     that the docstrings were changed.
-       """
+    """
     try:
         new = getattr(__import__(place, globals(), {}, [obj]), obj)
         if isinstance(doc, str):
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index 6b28b4a..8fe7afd 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -729,8 +729,9 @@ def _nanmedian(a, axis=None, out=None, overwrite_input=False):
 
 def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):
     """
-    sort + indexing median, faster for small medians along multiple dimensions
-    due to the high overhead of apply_along_axis
+    sort + indexing median, faster for small medians along multiple
+    dimensions due to the high overhead of apply_along_axis
+
     see nanmedian for parameter usage
     """
     a = np.ma.masked_array(a, np.isnan(a))
@@ -754,36 +755,35 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=False):
     ----------
     a : array_like
         Input array or object that can be converted to an array.
-    axis : int, optional
-        Axis along which the medians are computed. The default (axis=None)
+    axis : {int, sequence of int, None}, optional
+        Axis or axes along which the medians are computed. The default
         is to compute the median along a flattened version of the array.
         A sequence of axes is supported since version 1.9.0.
     out : ndarray, optional
-        Alternative output array in which to place the result. It must have
-        the same shape and buffer length as the expected output, but the
-        type (of the output) will be cast if necessary.
+        Alternative output array in which to place the result. It must
+        have the same shape and buffer length as the expected output,
+        but the type (of the output) will be cast if necessary.
     overwrite_input : bool, optional
-       If True, then allow use of memory of input array (a) for
+       If True, then allow use of memory of input array `a` for
        calculations. The input array will be modified by the call to
-       median. This will save memory when you do not need to preserve
+       `median`. This will save memory when you do not need to preserve
        the contents of the input array. Treat the input as undefined,
        but it will probably be fully or partially sorted. Default is
-       False. Note that, if `overwrite_input` is True and the input
-       is not already an ndarray, an error will be raised.
+       False. If `overwrite_input` is ``True`` and `a` is not already an
+       `ndarray`, an error will be raised.
     keepdims : bool, optional
-        If this is set to True, the axes which are reduced are left
-        in the result as dimensions with size one. With this option,
-        the result will broadcast correctly against the original `arr`.
-
-
+        If this is set to True, the axes which are reduced are left in
+        the result as dimensions with size one. With this option, the
+        result will broadcast correctly against the original `arr`.
 
     Returns
     -------
     median : ndarray
-        A new array holding the result. If the input contains integers, or
-        floats of smaller precision than 64, then the output data-type is
-        float64.  Otherwise, the output data-type is the same as that of the
-        input.
+        A new array holding the result. If the input contains integers
+        or floats smaller than ``float64``, then the output data-type is
+        ``np.float64``.  Otherwise, the data-type of the output is the
+        same as that of the input. If `out` is specified, that array is
+        returned instead.
 
     See Also
     --------
@@ -791,10 +791,10 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=False):
 
     Notes
     -----
-    Given a vector V of length N, the median of V is the middle value of
-    a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
-    odd.  When N is even, it is the average of the two middle values of
-    ``V_sorted``.
+    Given a vector ``V`` of length ``N``, the median of ``V`` is the
+    middle value of a sorted copy of ``V``, ``V_sorted`` - i.e.,
+    ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two
+    middle values of ``V_sorted`` when ``N`` is even.
 
     Examples
     --------
@@ -838,10 +838,10 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=False):
 def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
                   interpolation='linear', keepdims=False):
     """
-    Compute the qth percentile of the data along the specified axis, while
-    ignoring nan values.
+    Compute the qth percentile of the data along the specified axis,
+    while ignoring nan values.
 
-    Returns the qth percentile of the array elements.
+    Returns the qth percentile(s) of the array elements.
 
     .. versionadded:: 1.9.0
 
@@ -850,11 +850,13 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
     a : array_like
         Input array or object that can be converted to an array.
     q : float in range of [0,100] (or sequence of floats)
-        Percentile to compute which must be between 0 and 100 inclusive.
-    axis : int or sequence of int, optional
-        Axis along which the percentiles are computed. The default (None)
-        is to compute the percentiles along a flattened version of the array.
-        A sequence of axes is supported since version 1.9.0.
+        Percentile to compute, which must be between 0 and 100
+        inclusive.
+    axis : {int, sequence of int, None}, optional
+        Axis or axes along which the percentiles are computed. The
+        default is to compute the percentile(s) along a flattened
+        version of the array. A sequence of axes is supported since
+        version 1.9.0.
     out : ndarray, optional
         Alternative output array in which to place the result. It must
         have the same shape and buffer length as the expected output,
@@ -862,39 +864,40 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
     overwrite_input : bool, optional
         If True, then allow use of memory of input array `a` for
         calculations. The input array will be modified by the call to
-        percentile. This will save memory when you do not need to preserve
-        the contents of the input array. In this case you should not make
-        any assumptions about the content of the passed in array `a` after
-        this function completes -- treat it as undefined. Default is False.
-        Note that, if the `a` input is not already an array this parameter
-        will have no effect, `a` will be converted to an array internally
-        regardless of the value of this parameter.
+        `percentile`. This will save memory when you do not need to
+        preserve the contents of the input array. In this case you
+        should not make any assumptions about the contents of the input
+        `a` after this function completes -- treat it as undefined.
+        Default is False. If `a` is not already an array, this parameter
+        will have no effect as `a` will be converted to an array
+        internally regardless of the value of this parameter.
     interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
-        This optional parameter specifies the interpolation method to use,
-        when the desired quantile lies between two data points `i` and `j`:
-            * linear: `i + (j - i) * fraction`, where `fraction` is the
-              fractional part of the index surrounded by `i` and `j`.
-            * lower: `i`.
-            * higher: `j`.
-            * nearest: `i` or `j` whichever is nearest.
-            * midpoint: (`i` + `j`) / 2.
-
+        This optional parameter specifies the interpolation method to
+        use when the desired quantile lies between two data points
+        ``i < j``:
+            * linear: ``i + (j - i) * fraction``, where ``fraction`` is
+              the fractional part of the index surrounded by ``i`` and
+              ``j``.
+            * lower: ``i``.
+            * higher: ``j``.
+            * nearest: ``i`` or ``j``, whichever is nearest.
+            * midpoint: ``(i + j) / 2``.
     keepdims : bool, optional
-        If this is set to True, the axes which are reduced are left
-        in the result as dimensions with size one. With this option,
-        the result will broadcast correctly against the original `arr`.
-
+        If this is set to True, the axes which are reduced are left in
+        the result as dimensions with size one. With this option, the
+        result will broadcast correctly against the original array `a`.
 
     Returns
     -------
-    nanpercentile : scalar or ndarray
-        If a single percentile `q` is given and axis=None a scalar is
-        returned.  If multiple percentiles `q` are given an array holding
-        the result is returned. The results are listed in the first axis.
-        (If `out` is specified, in which case that array is returned
-        instead).  If the input contains integers, or floats of smaller
-        precision than 64, then the output data-type is float64. Otherwise,
-        the output data-type is the same as that of the input.
+    percentile : scalar or ndarray
+        If `q` is a single percentile and `axis=None`, then the result
+        is a scalar. If multiple percentiles are given, first axis of
+        the result corresponds to the percentiles. The other axes are
+        the axes that remain after the reduction of `a`. If the input 
+        contains integers or floats smaller than ``float64``, the output
+        data-type is ``float64``. Otherwise, the output data-type is the
+        same as that of the input. If `out` is specified, that array is
+        returned instead.
 
     See Also
     --------
@@ -902,12 +905,14 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
 
     Notes
     -----
-    Given a vector V of length N, the q-th percentile of V is the q-th ranked
-    value in a sorted copy of V.  The values and distances of the two
-    nearest neighbors as well as the `interpolation` parameter will
-    determine the percentile if the normalized ranking does not match q
-    exactly. This function is the same as the median if ``q=50``, the same
-    as the minimum if ``q=0``and the same as the maximum if ``q=100``.
+    Given a vector ``V`` of length ``N``, the ``q``-th percentile of
+    ``V`` is the value ``q/100`` of the way from the mimumum to the
+    maximum in in a sorted copy of ``V``. The values and distances of
+    the two nearest neighbors as well as the `interpolation` parameter
+    will determine the percentile if the normalized ranking does not
+    match the location of ``q`` exactly. This function is the same as
+    the median if ``q=50``, the same as the minimum if ``q=0`` and the
+    same as the maximum if ``q=100``.
 
     Examples
     --------
@@ -921,24 +926,21 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
     >>> np.nanpercentile(a, 50)
     3.5
     >>> np.nanpercentile(a, 50, axis=0)
-    array([[ 6.5,  4.5,  2.5]])
-    >>> np.nanpercentile(a, 50, axis=1)
+    array([ 6.5,  2.,   2.5])
+    >>> np.nanpercentile(a, 50, axis=1, keepdims=True)
     array([[ 7.],
            [ 2.]])
     >>> m = np.nanpercentile(a, 50, axis=0)
     >>> out = np.zeros_like(m)
-    >>> np.nanpercentile(a, 50, axis=0, out=m)
-    array([[ 6.5,  4.5,  2.5]])
+    >>> np.nanpercentile(a, 50, axis=0, out=out)
+    array([ 6.5,  2.,   2.5])
     >>> m
-    array([[ 6.5,  4.5,  2.5]])
... 379 lines suppressed ...

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/python-modules/packages/python-numpy.git



More information about the Python-modules-commits mailing list