[Git][debian-gis-team/netcdf4-python][master] 4 commits: New upstream version 1.5.4

Bas Couwenberg gitlab at salsa.debian.org
Thu Jul 23 05:27:37 BST 2020



Bas Couwenberg pushed to branch master at Debian GIS Project / netcdf4-python


Commits:
8155fb8b by Bas Couwenberg at 2020-07-23T05:51:40+02:00
New upstream version 1.5.4
- - - - -
70f08fce by Bas Couwenberg at 2020-07-23T05:51:43+02:00
Update upstream source from tag 'upstream/1.5.4'

Update to upstream version '1.5.4'
with Debian dir 5e3b117b70aacfa603c970127350d7f44263959c
- - - - -
706c4282 by Bas Couwenberg at 2020-07-23T05:56:52+02:00
New upstream release.

- - - - -
1ab22765 by Bas Couwenberg at 2020-07-23T05:58:25+02:00
Set distribution to unstable.

- - - - -


14 changed files:

- .appveyor.yml
- .travis.yml
- Changelog
- README.md
- debian/changelog
- docs/netCDF4/index.html
- netCDF4/__init__.py
- netCDF4/_netCDF4.pyx
- netCDF4/utils.py
- setup.py
- + test/tst_chunk_cache.py
- test/tst_grps.py
- test/tst_masked4.py
- test/tst_vlen.py


Changes:

=====================================
.appveyor.yml
=====================================
@@ -1,16 +1,14 @@
 environment:
-  CONDA_INSTALL_LOCN: C:\\Miniconda36-x64
+  PYTHON: "C:\\myminiconda3"
   matrix:
-    - TARGET_ARCH: x64
-      NPY: 1.16
-      PY: 3.6
-
-    - TARGET_ARCH: x64
-      NPY: 1.16
+    - NPY: 1.17
       PY: 3.7
 
-platform:
-  - x64
+    - NPY: 1.17
+      PY: 3.8
+
+init:
+  - "ECHO %PYTHON_VERSION% %MINICONDA%"
 
 install:
   # If there is a newer build queued for the same PR, cancel this one.
@@ -24,30 +22,36 @@ install:
         throw "There are newer queued builds for this pull request, failing early." }
 
   # Add path, activate `conda` and update conda.
-  - cmd: call %CONDA_INSTALL_LOCN%\Scripts\activate.bat
-  - cmd: conda config --set always_yes yes --set changeps1 no --set show_channel_urls true
-  - cmd: conda update conda
-  - cmd: conda config --add channels conda-forge --force
-  - cmd: conda config --set channel_priority strict
-  - cmd: set PYTHONUNBUFFERED=1
-  - cmd: conda install conda-build vs2008_express_vc_python_patch
-  - cmd: call setup_x64
-
-  - cmd: conda create --name TEST python=%PY% numpy=%NPY% cython pip pytest hdf5 libnetcdf cftime
-  - cmd: conda info --all
-  - cmd: conda activate TEST
-
-  - cmd: echo [options] > setup.cfg
-  - cmd: echo [directories] >> setup.cfg
-  - cmd: echo HDF5_libdir = %CONDA_PREFIX%\Library\lib >> setup.cfg
-  - cmd: echo HDF5_incdir = %CONDA_PREFIX%\Library\include >> setup.cfg
-  - cmd: echo netCDF4_libdir = %CONDA_PREFIX%\Library\lib >> setup.cfg
-  - cmd: echo netCDF4_incdir = %CONDA_PREFIX%\Library\include >> setup.cfg
+  - set URL="https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe"
+  - curl -fsS -o miniconda3.exe %URL%
+  - start /wait "" miniconda3.exe /InstallationType=JustMe /RegisterPython=0 /S /D=%PYTHON%
+
+  - "set PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"
+  - call %PYTHON%\Scripts\activate
+
+  - conda config --set always_yes yes --set changeps1 no --set show_channel_urls true
+  - conda config --add channels conda-forge --force
+  - set PYTHONUNBUFFERED=1
+  - conda install conda-build vs2008_express_vc_python_patch
+  - call setup_x64
+
+  - conda create --name TEST python=%PY% numpy=%NPY% cython pip pytest hdf5 libnetcdf cftime
+  - conda activate TEST
+
+  - echo [options] > setup.cfg
+  - echo [directories] >> setup.cfg
+  - echo HDF5_libdir = %CONDA_PREFIX%\Library\lib >> setup.cfg
+  - echo HDF5_incdir = %CONDA_PREFIX%\Library\include >> setup.cfg
+  - echo netCDF4_libdir = %CONDA_PREFIX%\Library\lib >> setup.cfg
+  - echo netCDF4_incdir = %CONDA_PREFIX%\Library\include >> setup.cfg
+
+  - conda info --all
+  - conda list
 
 # Skip .NET project specific build phase.
 build: off
 
 test_script:
-  - python -m pip install . --no-deps --ignore-installed --no-cache-dir -vvv
+  - python -m pip install . --no-deps --ignore-installed --no-cache-dir -vv
   - set NO_NET=1
   - cd test && python run_all.py


=====================================
.travis.yml
=====================================
@@ -16,16 +16,13 @@ env:
     - MPI=0
 
 python:
-  - "2.7"
-  - "3.5"
   - "3.6"
   - "3.7"
-  - "3.8-dev"
+  - "3.8"
 
 matrix:
   allow_failures:
-    - python: "3.8-dev"
-    - python: 3.7
+    - python: 3.8
       env:
         - MPI=1
         - CC=mpicc.mpich
@@ -34,33 +31,14 @@ matrix:
         - NETCDF_DIR=$HOME
         - PATH=${NETCDF_DIR}/bin:${PATH} # pick up nc-config here
   include:
-    # Absolute minimum dependencies.
-    - python: 2.7
-      env:
-        - DEPENDS="numpy==1.10.0 cython==0.21 ordereddict==1.1 setuptools==18.0 cftime"
-    # test MPI with latest released version
-    - python: 3.7
-      env: 
-        - MPI=1
-        - CC=mpicc.mpich
-        - DEPENDS="numpy>=1.10.0 cython>=0.21 setuptools>=18.0 mpi4py>=1.3.1 cftime"
-        - NETCDF_VERSION=4.6.3
-        - NETCDF_DIR=$HOME
-        - PATH=${NETCDF_DIR}/bin:${PATH} # pick up nc-config here
-      addons:
-        apt:
-          packages:
-            - mpich
-            - libmpich-dev
-            - libhdf5-mpich-dev
     # test MPI with latest released version
-    - python: 3.7
+    - python: 3.8
       env: 
         - MPI=1
         - CC=mpicc.mpich
         - DEPENDS="numpy>=1.10.0 cython>=0.21 setuptools>=18.0 mpi4py>=1.3.1 cftime"
-        - NETCDF_VERSION=4.6.3
-        - PNETCDF_VERSION=1.11.0
+        - NETCDF_VERSION=4.7.3
+        - PNETCDF_VERSION=1.12.1
         - NETCDF_DIR=$HOME
         - PATH=${NETCDF_DIR}/bin:${PATH} # pick up nc-config here
       addons:
@@ -70,7 +48,7 @@ matrix:
             - libmpich-dev
             - libhdf5-mpich-dev
     # test with netcdf-c from github master
-    - python: 3.7
+    - python: 3.8
       env:
         - MPI=1
         - CC=mpicc.mpich
@@ -89,7 +67,6 @@ notifications:
   email: false
 
 before_install:
-  - pip install Cython # workaround for pip bug
   - pip install $DEPENDS
 
 install:


=====================================
Changelog
=====================================
@@ -1,3 +1,18 @@
+ version 1.5.4 (tag v1.5.4rel)
+==============================
+ * fix printing of variable objects for variables that end with the letter 'u'
+   (issue #983).
+ * make sure root group has 'name' attribute (issue #988).
+ * add the ability to pack vlen floats to integers using
+   scale_factor/add_offset (issue #1003)
+ * use len instead of deprecated numpy.alen (issue #1008)
+ * check size on valid_range instead of using len (issue #1013).
+ * add `set_chunk_cache/get_chunk_cache` module functions to reset the
+   default chunk cache sizes before opening a Dataset (issue #1018).
+ * replace use of numpy's deprecated tostring() method with tobytes()
+   (issue #1023).
+ * bump minimal numpy version to 1.9 (first version to have tobytes()).
+
  version 1.5.3 (tag v1.5.3rel)
 ==============================
  * make sure arrays are masked that are not filled when auto_fill is off 


=====================================
README.md
=====================================
@@ -10,6 +10,9 @@
 ## News
 For details on the latest updates, see the [Changelog](https://github.com/Unidata/netcdf4-python/blob/master/Changelog).
 
+07/23/2020: Version [1.5.4](https://pypi.python.org/pypi/netCDF4/1.5.4) released]. Now requires 
+numpy >= 1.9.
+ 
 10/27/2019: Version [1.5.3](https://pypi.python.org/pypi/netCDF4/1.5.3) released]. Fix for
 [issue #972](https://github.com/Unidata/netcdf4-python/issues/972), plus binary wheels for
 python 3.8.


=====================================
debian/changelog
=====================================
@@ -1,12 +1,13 @@
-netcdf4-python (1.5.3-2) UNRELEASED; urgency=medium
+netcdf4-python (1.5.4-1) unstable; urgency=medium
 
+  * New upstream release.
   * Drop Provides: ${python3:Provides}.
   * Drop Name field from upstream metadata.
   * Bump Standards-Version to 4.5.0, no changes.
   * Bump debhelper compat to 10, changes:
     - Drop --parallel option, enabled by default
 
- -- Bas Couwenberg <sebastic at debian.org>  Thu, 07 Nov 2019 18:39:23 +0100
+ -- Bas Couwenberg <sebastic at debian.org>  Thu, 23 Jul 2020 05:58:14 +0200
 
 netcdf4-python (1.5.3-1) unstable; urgency=medium
 


=====================================
docs/netCDF4/index.html
=====================================
The diff for this file was not included because it is too large.

=====================================
netCDF4/__init__.py
=====================================
@@ -9,4 +9,4 @@ from ._netCDF4 import (__version__, __netcdf4libversion__, __hdf5libversion__,
                        __has_nc_create_mem__, __has_cdf5_format__,
                        __has_parallel4_support__, __has_pnetcdf_support__)
 __all__ =\
-['Dataset','Variable','Dimension','Group','MFDataset','MFTime','CompoundType','VLType','date2num','num2date','date2index','stringtochar','chartostring','stringtoarr','getlibversion','EnumType']
+['Dataset','Variable','Dimension','Group','MFDataset','MFTime','CompoundType','VLType','date2num','num2date','date2index','stringtochar','chartostring','stringtoarr','getlibversion','EnumType','get_chunk_cache','set_chunk_cache']


=====================================
netCDF4/_netCDF4.pyx
=====================================
@@ -1,5 +1,5 @@
 """
-Version 1.5.3
+Version 1.5.4
 ---------------
 - - -
 
@@ -36,7 +36,6 @@ Download
 Requires
 ========
 
- - Python 2.7 or later (python 3 works too).
  - [numpy array module](http://numpy.scipy.org), version 1.10.0 or later.
  - [Cython](http://cython.org), version 0.21 or later.
  - [setuptools](https://pypi.python.org/pypi/setuptools), version 18.0 or
@@ -1206,7 +1205,7 @@ except ImportError:
     # python3: zip is already python2's itertools.izip
     pass
 
-__version__ = "1.5.3"
+__version__ = "1.5.4"
 
 # Initialize numpy
 import posixpath
@@ -1252,6 +1251,48 @@ used to build the module, and when it was built.
     """
     return (<char *>nc_inq_libvers()).decode('ascii')
 
+def get_chunk_cache():
+    """
+**`get_chunk_cache()`**
+
+return current netCDF chunk cache information in a tuple (size,nelems,preemption).
+See netcdf C library documentation for `nc_get_chunk_cache` for
+details. Values can be reset with `netCDF4.set_chunk_cache`."""
+    cdef int ierr
+    cdef size_t sizep, nelemsp
+    cdef float preemptionp
+    ierr = nc_get_chunk_cache(&sizep, &nelemsp, &preemptionp)
+    _ensure_nc_success(ierr)
+    size = sizep; nelems = nelemsp; preemption = preemptionp
+    return (size,nelems,preemption)
+
+def set_chunk_cache(size=None,nelems=None,preemption=None):
+    """
+**`set_chunk_cache(self,size=None,nelems=None,preemption=None)`**
+
+change netCDF4 chunk cache settings.
+See netcdf C library documentation for `nc_set_chunk_cache` for
+details."""
+    cdef int ierr
+    cdef size_t sizep, nelemsp
+    cdef float preemptionp
+    # reset chunk cache size, leave other parameters unchanged.
+    size_orig, nelems_orig, preemption_orig = get_chunk_cache()
+    if size is not None:
+        sizep = size
+    else:
+        sizep = size_orig
+    if nelems is not None:
+        nelemsp = nelems
+    else:
+        nelemsp = nelems_orig
+    if preemption is not None:
+        preemptionp = preemption
+    else:
+        preemptionp = preemption_orig
+    ierr = nc_set_chunk_cache(sizep,nelemsp, preemptionp)
+    _ensure_nc_success(ierr)
+
 __netcdf4libversion__ = getlibversion().split()[0]
 __hdf5libversion__ = _gethdf5libversion()
 __has_rename_grp__ = HAS_RENAME_GRP
@@ -1414,10 +1455,10 @@ cdef _get_att(grp, int varid, name, encoding='utf-8'):
         if name == '_FillValue' and python3:
             # make sure _FillValue for character arrays is a byte on python 3
             # (issue 271).
-            pstring = value_arr.tostring()
+            pstring = value_arr.tobytes()
         else:
             pstring =\
-            value_arr.tostring().decode(encoding,errors='replace').replace('\x00','')
+            value_arr.tobytes().decode(encoding,errors='replace').replace('\x00','')
         return pstring
     elif att_type == NC_STRING:
         values = <char**>PyMem_Malloc(sizeof(char*) * att_len)
@@ -2095,10 +2136,6 @@ strings.
         desirable, since the associated Variable instances may still be needed, but are
         rendered unusable when the parent Dataset instance is garbage collected.
 
-        **`_ncstring_attrs__`**: if `_ncstring_attrs__=True`, all string attributes will use
-        the variable length NC_STRING attributes (default `False`, ascii text
-        attributes written as NC_CHAR).
-
         **`memory`**: if not `None`, create or open an in-memory Dataset.
         If mode = 'r', the memory kwarg must contain a memory buffer object
         (an object that supports the python buffer interface).
@@ -2422,13 +2459,16 @@ version 4.1.2 or higher of the netcdf C lib, and rebuild netcdf4-python."""
         ncdump = [repr(type(self))]
         dimnames = tuple(_tostr(dimname)+'(%s)'%len(self.dimensions[dimname])\
         for dimname in self.dimensions.keys())
-        varnames = tuple(\
-        [_tostr(self.variables[varname].dtype)+' '+_tostr(varname)+
-        (((_tostr(self.variables[varname].dimensions)
-        .replace("u'",""))\
-        .replace("'",""))\
-        .replace(", ",","))\
-        .replace(",)",")") for varname in self.variables.keys()])
+        if python3:
+            varnames = tuple(\
+            [_tostr(self.variables[varname].dtype)+' '+_tostr(varname)+
+             ((_tostr(self.variables[varname].dimensions)).replace(",)",")")).replace("'","")
+             for varname in self.variables.keys()])
+        else: # don't try to remove quotes in python2
+            varnames = tuple(\
+            [_tostr(self.variables[varname].dtype)+' '+_tostr(varname)+
+             (_tostr(self.variables[varname].dimensions)).replace(",)",")")
+             for varname in self.variables.keys()])
         grpnames = tuple(_tostr(grpname) for grpname in self.groups.keys())
         if self.path == '/':
             ncdump.append('root group (%s data model, file format %s):' %
@@ -2637,7 +2677,7 @@ datatype."""
         """
 **`createVariable(self, varname, datatype, dimensions=(), zlib=False,
 complevel=4, shuffle=True, fletcher32=False, contiguous=False, chunksizes=None,
-endian='native', least_significant_digit=None, fill_value=None)`**
+endian='native', least_significant_digit=None, fill_value=None, chunk_cache=None)`**
 
 Creates a new variable with the given `varname`, `datatype`, and
 `dimensions`. If dimensions are not given, the variable is assumed to be
@@ -3199,6 +3239,22 @@ attribute does not exist on the variable. For example,
 
         return vs
 
+    def _getname(self):
+        # private method to get name associated with instance.
+        cdef int ierr
+        cdef char namstring[NC_MAX_NAME+1]
+        with nogil:
+            ierr = nc_inq_grpname(self._grpid, namstring)
+        _ensure_nc_success(ierr)
+        return namstring.decode('utf-8')
+
+    property name:
+        """string name of Group instance"""
+        def __get__(self):
+            return self._getname()
+        def __set__(self,value):
+            raise AttributeError("name cannot be altered")
+
 
 cdef class Group(Dataset):
     """
@@ -3285,22 +3341,6 @@ overrides `netCDF4.Dataset` close method which does not apply to `netCDF4.Group`
 instances, raises IOError."""
         raise IOError('cannot close a `netCDF4.Group` (only applies to Dataset)')
 
-    def _getname(self):
-        # private method to get name associated with instance.
-        cdef int ierr
-        cdef char namstring[NC_MAX_NAME+1]
-        with nogil:
-            ierr = nc_inq_grpname(self._grpid, namstring)
-        _ensure_nc_success(ierr)
-        return namstring.decode('utf-8')
-
-    property name:
-        """string name of Group instance"""
-        def __get__(self):
-            return self._getname()
-        def __set__(self,value):
-            raise AttributeError("name cannot be altered")
-
 
 cdef class Dimension:
     """
@@ -3579,7 +3619,7 @@ behavior is similar to Fortran or Matlab, but different than numpy.
         **`__init__(self, group, name, datatype, dimensions=(), zlib=False,
         complevel=4, shuffle=True, fletcher32=False, contiguous=False,
         chunksizes=None, endian='native',
-        least_significant_digit=None,fill_value=None)`**
+        least_significant_digit=None,fill_value=None,chunk_cache=None)`**
 
         `netCDF4.Variable` constructor.
 
@@ -3660,6 +3700,10 @@ behavior is similar to Fortran or Matlab, but different than numpy.
         is replaced with this value.  If fill_value is set to `False`, then
         the variable is not pre-filled. The default netCDF fill values can be found
         in `netCDF4.default_fillvals`.
+      
+        **`chunk_cache`**: If specified, sets the chunk cache size for this variable.
+        Persists as long as Dataset is open. Use `netCDF4.set_var_chunk_cache` to 
+        change it when Dataset is re-opened. 
 
         ***Note***: `netCDF4.Variable` instances should be created using the
         `netCDF4.Dataset.createVariable` method of a `netCDF4.Dataset` or
@@ -4431,7 +4475,8 @@ rename a `netCDF4.Variable` attribute named `oldname` to `newname`."""
         # set_auto_mask or set_auto_maskandscale), perform
         # automatic conversion to masked array using
         # missing_value/_Fill_Value.
-        # ignore for compound, vlen or enum datatypes.
+        # applied for primitive and (non-string) vlen,
+        # ignored for compound and enum datatypes.
         try: # check to see if scale_factor and add_offset is valid (issue 176).
             if hasattr(self,'scale_factor'): float(self.scale_factor)
             if hasattr(self,'add_offset'): float(self.add_offset)
@@ -4442,7 +4487,9 @@ rename a `netCDF4.Variable` attribute named `oldname` to `newname`."""
                 msg = 'invalid scale_factor or add_offset attribute, no unpacking done...'
                 warnings.warn(msg)
 
-        if self.mask and (self._isprimitive or self._isenum):
+        if self.mask and\
+           (self._isprimitive or self._isenum or\
+           (self._isvlen and self.dtype != str)):
             data = self._toma(data)
         else:
             # if attribute _Unsigned is True, and variable has signed integer
@@ -4452,7 +4499,9 @@ rename a `netCDF4.Variable` attribute named `oldname` to `newname`."""
                 if is_unsigned and data.dtype.kind == 'i':
                     data=data.view('%su%s'%(data.dtype.byteorder,data.dtype.itemsize))
 
-        if self.scale and self._isprimitive and valid_scaleoffset:
+        if self.scale and\
+           (self._isprimitive or (self._isvlen and self.dtype != str)) and\
+           valid_scaleoffset:
             # if variable has scale_factor and add_offset attributes, apply
             # them.
             if hasattr(self, 'scale_factor') and hasattr(self, 'add_offset'):
@@ -4602,7 +4651,7 @@ rename a `netCDF4.Variable` attribute named `oldname` to `newname`."""
         safe_validrange = self._check_safecast('valid_range')
         safe_validmin = self._check_safecast('valid_min')
         safe_validmax = self._check_safecast('valid_max')
-        if safe_validrange and len(self.valid_range) == 2:
+        if safe_validrange and self.valid_range.size == 2:
             validmin = numpy.array(self.valid_range[0], self.dtype)
             validmax = numpy.array(self.valid_range[1], self.dtype)
         else:
@@ -4668,6 +4717,51 @@ rename a `netCDF4.Variable` attribute named `oldname` to `newname`."""
 
         return data
 
+    def _pack(self,data):
+        # pack non-masked values using scale_factor and add_offset
+        if hasattr(self, 'scale_factor') and hasattr(self, 'add_offset'):
+            data = (data - self.add_offset)/self.scale_factor
+            if self.dtype.kind in 'iu': data = numpy.around(data)
+        elif hasattr(self, 'scale_factor'):
+            data = data/self.scale_factor
+            if self.dtype.kind in 'iu': data = numpy.around(data)
+        elif hasattr(self, 'add_offset'):
+            data = data - self.add_offset
+            if self.dtype.kind in 'iu': data = numpy.around(data)
+        if ma.isMA(data):
+            # if underlying data in masked regions of masked array
+            # corresponds to missing values, don't fill masked array -
+            # just use underlying data instead
+            if hasattr(self, 'missing_value') and \
+               numpy.all(numpy.in1d(data.data[data.mask],self.missing_value)):
+                data = data.data
+            else:
+                if hasattr(self, 'missing_value'):
+                    # if missing value is a scalar, use it as fill_value.
+                    # if missing value is a vector, raise an exception
+                    # since we then don't know how to fill in masked values.
+                    if numpy.array(self.missing_value).shape == ():
+                        fillval = self.missing_value
+                    else:
+                        msg="cannot assign fill_value for masked array when missing_value attribute is not a scalar"
+                        raise RuntimeError(msg)
+                    if numpy.array(fillval).shape != ():
+                        fillval = fillval[0]
+                elif hasattr(self, '_FillValue'):
+                    fillval = self._FillValue
+                else:
+                    fillval = default_fillvals[self.dtype.str[1:]]
+                # some versions of numpy have trouble handling
+                # MaskedConstants when filling - this is is
+                # a workaround (issue #850)
+                if data.shape == (1,) and data.mask.all():
+                    data = numpy.array([fillval],self.dtype)
+                else:
+                    data = data.filled(fill_value=fillval)
+        if self.dtype != data.dtype:
+            data = data.astype(self.dtype) # cast data to var type, if necessary.
+        return data
+
     def _assign_vlen(self, elem, data):
         """private method to assign data to a single item in a VLEN variable"""
         cdef size_t *startp
@@ -4822,6 +4916,9 @@ cannot be safely cast to variable data type""" % attname
                     # issue 458, allow Ellipsis to be used for scalar var
                     if type(elem) == type(Ellipsis) and not\
                        len(self.dimensions): elem = 0
+                    # pack as integers if desired.
+                    if self.scale:
+                        data = self._pack(data)
                     self._assign_vlen(elem, data)
                     return
 
@@ -4877,65 +4974,10 @@ cannot be safely cast to variable data type""" % attname
         # exists (improves compression).
         if self._has_lsd:
             data = _quantize(data,self.least_significant_digit)
-        # if auto_scale mode set to True, (through
-        # a call to set_auto_scale or set_auto_maskandscale),
-        # perform automatic unpacking using scale_factor/add_offset.
-        # if auto_mask mode is set to True (through a call to
-        # set_auto_mask or set_auto_maskandscale), perform
-        # automatic conversion to masked array using
-        # valid_min,validmax,missing_value,_Fill_Value.
-        # ignore if not a primitive or enum data type (not compound or vlen).
-
-        # remove this since it causes suprising behaviour (issue #777)
-        # (missing_value should apply to scaled data, not unscaled data)
-        #if self.mask and (self._isprimitive or self._isenum):
-        #    # use missing_value as fill value.
-        #    # if no missing value set, use _FillValue.
-        #    if hasattr(self, 'scale_factor') or hasattr(self, 'add_offset'):
-        #        # if not masked, create a masked array.
-        #        if not ma.isMA(data): data = self._toma(data)
 
         if self.scale and self._isprimitive:
             # pack non-masked values using scale_factor and add_offset
-            if hasattr(self, 'scale_factor') and hasattr(self, 'add_offset'):
-                data = (data - self.add_offset)/self.scale_factor
-                if self.dtype.kind in 'iu': data = numpy.around(data)
-            elif hasattr(self, 'scale_factor'):
-                data = data/self.scale_factor
-                if self.dtype.kind in 'iu': data = numpy.around(data)
-            elif hasattr(self, 'add_offset'):
-                data = data - self.add_offset
-                if self.dtype.kind in 'iu': data = numpy.around(data)
-            if ma.isMA(data):
-                # if underlying data in masked regions of masked array
-                # corresponds to missing values, don't fill masked array -
-                # just use underlying data instead
-                if hasattr(self, 'missing_value') and \
-                   numpy.all(numpy.in1d(data.data[data.mask],self.missing_value)):
-                    data = data.data
-                else:
-                    if hasattr(self, 'missing_value'):
-                        # if missing value is a scalar, use it as fill_value.
-                        # if missing value is a vector, raise an exception
-                        # since we then don't know how to fill in masked values.
-                        if numpy.array(self.missing_value).shape == ():
-                            fillval = self.missing_value
-                        else:
-                            msg="cannot assign fill_value for masked array when missing_value attribute is not a scalar"
-                            raise RuntimeError(msg)
-                        if numpy.array(fillval).shape != ():
-                            fillval = fillval[0]
-                    elif hasattr(self, '_FillValue'):
-                        fillval = self._FillValue
-                    else:
-                        fillval = default_fillvals[self.dtype.str[1:]]
-                    # some versions of numpy have trouble handling
-                    # MaskedConstants when filling - this is is
-                    # a workaround (issue #850)
-                    if data.shape == (1,) and data.mask.all():
-                        data = numpy.array([fillval],self.dtype)
-                    else:
-                        data = data.filled(fill_value=fillval)
+            data = self._pack(data)
 
         # Fill output array with data chunks.
         for (a,b,c,i) in zip(start, count, stride, put_ind):
@@ -6091,9 +6133,9 @@ and shape `a.shape + (N,)`, where N is the length of each string in a."""
     if dtype not in ["S","U"]:
         raise ValueError("type must string or unicode ('S' or 'U')")
     if encoding in ['none','None','bytes']:
-        b = numpy.array(tuple(a.tostring()),'S1')
+        b = numpy.array(tuple(a.tobytes()),'S1')
     else:
-        b = numpy.array(tuple(a.tostring().decode(encoding)),dtype+'1')
+        b = numpy.array(tuple(a.tobytes().decode(encoding)),dtype+'1')
     b.shape = a.shape + (a.itemsize,)
     return b
 
@@ -6117,9 +6159,9 @@ returns a numpy string array with datatype `'UN'` (or `'SN'`) and shape
     if dtype not in ["S","U"]:
         raise ValueError("type must be string or unicode ('S' or 'U')")
     if encoding in ['none','None','bytes']:
-        bs = b.tostring()
+        bs = b.tobytes()
     else:
-        bs = b.tostring().decode(encoding)
+        bs = b.tobytes().decode(encoding)
     slen = int(b.shape[-1])
     if encoding in ['none','None','bytes']:
         a = numpy.array([bs[n1:n1+slen] for n1 in range(0,len(bs),slen)],'S'+repr(slen))


=====================================
netCDF4/utils.py
=====================================
@@ -351,7 +351,7 @@ Boolean array must have the same shape as the data along this dimension."""
         # at this stage e is a slice, a scalar integer, or a 1d integer array.
         # integer array:  _get call for each True value
         if np.iterable(e):
-            sdim.append(np.alen(e))
+            sdim.append(len(e))
         # Scalar int or slice, just a single _get call
         else:
             sdim.append(1)


=====================================
setup.py
=====================================
@@ -4,7 +4,7 @@ from setuptools import setup, Extension
 from distutils.dist import Distribution
 
 setuptools_extra_kwargs = {
-    "install_requires": ["numpy>=1.7","cftime"],
+    "install_requires": ["numpy>=1.9","cftime"],
     "setup_requires": ['setuptools>=18.0', "cython>=0.19"],
     "entry_points": {
         'console_scripts': [
@@ -596,7 +596,7 @@ else:
 
 setup(name="netCDF4",
       cmdclass=cmdclass,
-      version="1.5.3",
+      version="1.5.4",
       long_description="netCDF version 4 has many features not found in earlier versions of the library, such as hierarchical groups, zlib compression, multiple unlimited dimensions, and new data types.  It is implemented on top of HDF5.  This module implements most of the new features, and can read and write netCDF files compatible with older versions of the library.  The API is modelled after Scientific.IO.NetCDF, and should be familiar to users of that module.\n\nThis project is hosted on a `GitHub repository <https://github.com/Unidata/netcdf4-python>`_ where you may access the most up-to-date source.",
       author="Jeff Whitaker",
       author_email="jeffrey.s.whitaker at noaa.gov",
@@ -608,12 +608,10 @@ setup(name="netCDF4",
       keywords=['numpy', 'netcdf', 'data', 'science', 'network', 'oceanography',
                 'meteorology', 'climate'],
       classifiers=["Development Status :: 3 - Alpha",
-                   "Programming Language :: Python :: 2",
-                   "Programming Language :: Python :: 2.7",
                    "Programming Language :: Python :: 3",
-                   "Programming Language :: Python :: 3.5",
                    "Programming Language :: Python :: 3.6",
                    "Programming Language :: Python :: 3.7",
+                   "Programming Language :: Python :: 3.8",
                    "Intended Audience :: Science/Research",
                    "License :: OSI Approved",
                    "Topic :: Software Development :: Libraries :: Python Modules",


=====================================
test/tst_chunk_cache.py
=====================================
@@ -0,0 +1,41 @@
+import unittest, netCDF4, tempfile, os
+
+file_name = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
+cache_size = 10000
+cache_nelems = 100
+cache_preempt = 0.5
+cache_size2 = 20000
+cache_nelems2 = 200
+cache_preempt2 = 1.0 
+
+class RefCountTestCase(unittest.TestCase):
+
+    def setUp(self):
+        nc = netCDF4.Dataset(file_name, mode='w', format='NETCDF4')
+        d = nc.createDimension('fred', 2000)
+        # can only change cache size in createVariable (not nelems or preemption)
+        # this change lasts only as long as file is open.
+        v = nc.createVariable('frank','f',('fred',),chunk_cache=15000)
+        size, nelems, preempt = v.get_var_chunk_cache()
+        assert(size==15000)
+        self.file=file_name
+        nc.close()
+
+    def tearDown(self):
+        # Remove the temporary files
+        os.remove(self.file)
+
+    def runTest(self):
+        """testing methods for accessing and changing chunk cache"""
+        # change cache parameters before opening fil.
+        netCDF4.set_chunk_cache(cache_size, cache_nelems, cache_preempt)
+        nc = netCDF4.Dataset(self.file, mode='r')
+        # check to see that chunk cache parameters were changed.
+        assert(netCDF4.get_chunk_cache() == (cache_size, cache_nelems, cache_preempt))
+        # change cache parameters for variable, check
+        nc['frank'].set_var_chunk_cache(cache_size2, cache_nelems2, cache_preempt2)
+        assert(nc['frank'].get_var_chunk_cache() == (cache_size2, cache_nelems2, cache_preempt2))
+        nc.close()
+
+if __name__ == '__main__':
+    unittest.main()


=====================================
test/tst_grps.py
=====================================
@@ -66,6 +66,8 @@ class GroupsTestCase(unittest.TestCase):
     def runTest(self):
         """testing groups"""
         f  = netCDF4.Dataset(self.file1, 'r')
+        # issue 988
+        f.name
         tree = [f.path]
         for children in walktree(f):
             for child in children:


=====================================
test/tst_masked4.py
=====================================
@@ -36,6 +36,7 @@ class SetValidMinMax(unittest.TestCase):
         v.missing_value = np.array(32767, v.dtype)
         v.valid_min = np.array(self.valid_min, v.dtype)
         v.valid_max = np.array(self.valid_max, v.dtype)
+        v.valid_range = np.array(0, v.dtype)  # issue 1013, this is wrong but should not raise an exception
 
         v[0] = self.valid_min-1
         v[1] = self.v[1]


=====================================
test/tst_vlen.py
=====================================
@@ -190,6 +190,46 @@ class VlenAppendTestCase(unittest.TestCase):
         v[0].size               # BOOM!
         f.close()
 
+class Vlen_ScaledInts(unittest.TestCase):
+    def setUp(self):
+        self.file = FILE_NAME
+        nc = Dataset(self.file, 'w')
+        vlen_type = nc.createVLType(np.uint8, 'vltest')
+        nc.createDimension('x', None)
+        v = nc.createVariable('vl', vlen_type, 'x')
+        v.scale_factor = 1./254.
+        v.missing_value=np.array(255,np.uint8)
+        # random lengths between 1 and 1000
+        ilen = np.random.randint(1,1000,size=100)
+        n = 0
+        for nlen in ilen:
+            data = np.random.uniform(low=0.0, high=1.0, size=nlen)
+            if n==99:
+                # mark last value as missing
+                mask = np.zeros(data.shape,dtype=bool)
+                mask[-1] = True
+                data = np.ma.masked_array(data, mask=mask)
+                self.data = data
+            v[n] = data
+            n += 1
+        nc.close()
+    def tearDown(self):
+        # Remove the temporary files
+        os.remove(self.file)
+    def runTest(self):
+        """testing packing float vlens as scaled integers (issue #1003)."""
+        nc = Dataset(self.file)
+        # see if data is masked
+        data = nc['vl'][-1]
+        assert(data[-1] is np.ma.masked)
+        # check max error of compression
+        err = np.abs(data - self.data)
+        assert(err.max() < nc['vl'].scale_factor)
+        # turn off auto-scaling
+        nc.set_auto_maskandscale(False)
+        data = nc['vl'][-1]
+        assert(data[-1] == 255)
+        nc.close()
 
 if __name__ == '__main__':
     unittest.main()



View it on GitLab: https://salsa.debian.org/debian-gis-team/netcdf4-python/-/compare/128acb837cbfdbe6c43ea8ea86113ddc8534795c...1ab2276541720da00e13fda8aadad95f953b60ff

-- 
View it on GitLab: https://salsa.debian.org/debian-gis-team/netcdf4-python/-/compare/128acb837cbfdbe6c43ea8ea86113ddc8534795c...1ab2276541720da00e13fda8aadad95f953b60ff
You're receiving this email because of your account on salsa.debian.org.


-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/pkg-grass-devel/attachments/20200723/3acb7450/attachment-0001.html>


More information about the Pkg-grass-devel mailing list