[Git][debian-gis-team/metpy][master] New 0003-Fix-compatibility-with-numpy-1.26.patch
Antonio Valentino (@antonio.valentino)
gitlab at salsa.debian.org
Sun Mar 31 19:54:57 BST 2024
Antonio Valentino pushed to branch master at Debian GIS Project / metpy
Commits:
1af7f7cc by Antonio Valentino at 2024-03-31T18:50:11+00:00
New 0003-Fix-compatibility-with-numpy-1.26.patch
- - - - -
3 changed files:
- debian/changelog
- + debian/patches/0003-Fix-compatibility-with-numpy-1.26.patch
- debian/patches/series
Changes:
=====================================
debian/changelog
=====================================
@@ -1,3 +1,11 @@
+metpy (1.6.1+ds-2) UNRELEASED; urgency=medium
+
+ * debian/patches:
+ - New 0003-Fix-compatibility-with-numpy-1.26.patch
+ (Closes: #1066732).
+
+ -- Antonio Valentino <antonio.valentino at tiscali.it> Sun, 24 Mar 2024 18:45:09 +0000
+
metpy (1.6.1+ds-1) unstable; urgency=medium
* New upstream release.
=====================================
debian/patches/0003-Fix-compatibility-with-numpy-1.26.patch
=====================================
@@ -0,0 +1,1391 @@
+From: Antonio Valentino <antonio.valentino at tiscali.it>
+Date: Sun, 24 Mar 2024 18:44:46 +0000
+Subject: Fix compatibility with numpy 1.26
+
+Forwarded: not-needed
+Origin: https://github.com/Unidata/MetPy/commit/cc2cb577ad2b14eba0c9c7f4a831340487420853
+---
+ pyproject.toml | 4 +-
+ src/metpy/calc/basic.py | 21 +-
+ src/metpy/calc/kinematics.py | 5 +-
+ src/metpy/calc/thermo.py | 15 +-
+ src/metpy/calc/tools.py | 54 ++--
+ src/metpy/io/gempak.py | 616 ++++++++++++++++++++---------------------
+ src/metpy/io/gini.py | 7 +-
+ src/metpy/io/metar.py | 3 +-
+ src/metpy/io/nexrad.py | 44 ++-
+ src/metpy/plots/declarative.py | 17 +-
+ src/metpy/units.py | 1 +
+ src/metpy/xarray.py | 13 +-
+ tests/calc/test_basic.py | 2 +-
+ tests/calc/test_thermo.py | 14 +-
+ tests/test_xarray.py | 8 +-
+ tests/units/test_units.py | 4 +-
+ 16 files changed, 409 insertions(+), 419 deletions(-)
+
+diff --git a/pyproject.toml b/pyproject.toml
+index 3d688bb..b5c8bfc 100644
+--- a/pyproject.toml
++++ b/pyproject.toml
+@@ -113,7 +113,9 @@ filterwarnings = [
+ "ignore:Conversion of an array with ndim > 0 to a scalar is deprecated:DeprecationWarning:pint.facets.plain.quantity:575",
+ # PyProj automatically dispatching for single point, will be waiting for NumPy 2.0 to address
+ # See: https://github.com/pyproj4/pyproj/issues/1309
+- "ignore:Conversion of an array with ndim > 0 to a scalar is deprecated:DeprecationWarning:pyproj.geod:404"
++ "ignore:Conversion of an array with ndim > 0 to a scalar is deprecated:DeprecationWarning:pyproj.geod:404",
++ # Pandas >=2.2 warns about PyArrow being a future dependency
++ 'ignore:\nPyarrow will become a required dependency of pandas:DeprecationWarning',
+ ]
+
+ [tool.ruff]
+diff --git a/src/metpy/calc/basic.py b/src/metpy/calc/basic.py
+index 53dd629..a290e3d 100644
+--- a/src/metpy/calc/basic.py
++++ b/src/metpy/calc/basic.py
+@@ -52,6 +52,13 @@ def wind_speed(u, v):
+ --------
+ wind_components
+
++ Examples
++ --------
++ >>> from metpy.calc import wind_speed
++ >>> from metpy.units import units
++ >>> wind_speed(10. * units('m/s'), 10. * units('m/s'))
++ <Quantity(14.1421356, 'meter / second')>
++
+ """
+ return np.hypot(u, v)
+
+@@ -88,6 +95,13 @@ def wind_direction(u, v, convention='from'):
+ In the case of calm winds (where `u` and `v` are zero), this function returns a direction
+ of 0.
+
++ Examples
++ --------
++ >>> from metpy.calc import wind_direction
++ >>> from metpy.units import units
++ >>> wind_direction(10. * units('m/s'), 10. * units('m/s'))
++ <Quantity(225.0, 'degree')>
++
+ """
+ wdir = units.Quantity(90., 'deg') - np.arctan2(-v, -u)
+ origshape = wdir.shape
+@@ -141,7 +155,7 @@ def wind_components(speed, wind_direction):
+ >>> from metpy.calc import wind_components
+ >>> from metpy.units import units
+ >>> wind_components(10. * units('m/s'), 225. * units.deg)
+- (<Quantity(7.07106781, 'meter / second')>, <Quantity(7.07106781, 'meter / second')>)
++ (<Quantity(7.07106781, 'meter / second')>, <Quantity(7.07106781, 'meter / second')>)
+
+ .. versionchanged:: 1.0
+ Renamed ``wdir`` parameter to ``wind_direction``
+@@ -906,10 +920,7 @@ def smooth_window(scalar_grid, window, passes=1, normalize_weights=True):
+ raise ValueError('The shape of the smoothing window must be odd in all dimensions.')
+
+ # Optionally normalize the supplied weighting window
+- if normalize_weights:
+- weights = window / np.sum(window)
+- else:
+- weights = window
++ weights = window / np.sum(window) if normalize_weights else window
+
+ # Set indexes
+ # Inner index for the centered array elements that are affected by the smoothing
+diff --git a/src/metpy/calc/kinematics.py b/src/metpy/calc/kinematics.py
+index c3d8ac8..fb80b3b 100644
+--- a/src/metpy/calc/kinematics.py
++++ b/src/metpy/calc/kinematics.py
+@@ -629,10 +629,7 @@ def geostrophic_wind(height, dx=None, dy=None, latitude=None, x_dim=-1, y_dim=-2
+
+ """
+ f = coriolis_parameter(latitude)
+- if height.dimensionality['[length]'] == 2.0:
+- norm_factor = 1. / f
+- else:
+- norm_factor = mpconsts.g / f
++ norm_factor = 1. / f if height.dimensionality['[length]'] == 2.0 else mpconsts.g / f
+
+ dhdx, dhdy = geospatial_gradient(height, dx=dx, dy=dy, x_dim=x_dim, y_dim=y_dim,
+ parallel_scale=parallel_scale,
+diff --git a/src/metpy/calc/thermo.py b/src/metpy/calc/thermo.py
+index 4e7382d..e60e65a 100644
+--- a/src/metpy/calc/thermo.py
++++ b/src/metpy/calc/thermo.py
+@@ -963,7 +963,7 @@ def parcel_profile(pressure, temperature, dewpoint):
+ >>> Td = dewpoint_from_relative_humidity(T, rh)
+ >>> # computer parcel temperature
+ >>> parcel_profile(p, T[0], Td[0]).to('degC')
+- <Quantity([ 29.3 28.61221952 25.22214738 23.46097535 21.5835928
++ <Quantity([ 29.3 28.61221952 25.22214738 23.46097684 21.5835928
+ 19.57260398 17.40636185 15.05748615 12.49064866 9.6592539
+ 6.50023491 2.92560365 -1.19172846 -6.04257884 -11.92497517
+ -19.3176536 -28.97672464 -41.94444385 -50.01173076 -59.30936248
+@@ -2375,7 +2375,7 @@ def cape_cin(pressure, temperature, dewpoint, parcel_profile, which_lfc='bottom'
+ >>> prof = parcel_profile(p, T[0], Td[0]).to('degC')
+ >>> # calculate surface based CAPE/CIN
+ >>> cape_cin(p, T, Td, prof)
+- (<Quantity(4703.77306, 'joule / kilogram')>, <Quantity(0, 'joule / kilogram')>)
++ (<Quantity(4703.77308, 'joule / kilogram')>, <Quantity(0, 'joule / kilogram')>)
+
+ See Also
+ --------
+@@ -2441,10 +2441,7 @@ def cape_cin(pressure, temperature, dewpoint, parcel_profile, which_lfc='bottom'
+ parcel_temperature_profile=parcel_profile, which=which_el)
+
+ # No EL and we use the top reading of the sounding.
+- if np.isnan(el_pressure):
+- el_pressure = pressure[-1].magnitude
+- else:
+- el_pressure = el_pressure.magnitude
++ el_pressure = pressure[-1].magnitude if np.isnan(el_pressure) else el_pressure.magnitude
+
+ # Difference between the parcel path and measured temperature profiles
+ y = (parcel_profile - temperature).to(units.degK)
+@@ -3016,7 +3013,7 @@ def most_unstable_cape_cin(pressure, temperature, dewpoint, **kwargs):
+ >>> Td = dewpoint_from_relative_humidity(T, rh)
+ >>> # calculate most unstbale CAPE/CIN
+ >>> most_unstable_cape_cin(p, T, Td)
+- (<Quantity(4703.77306, 'joule / kilogram')>, <Quantity(0, 'joule / kilogram')>)
++ (<Quantity(4703.77308, 'joule / kilogram')>, <Quantity(0, 'joule / kilogram')>)
+
+ See Also
+ --------
+@@ -3175,9 +3172,9 @@ def downdraft_cape(pressure, temperature, dewpoint):
+ >>> # calculate dewpoint
+ >>> Td = dewpoint_from_relative_humidity(T, rh)
+ >>> downdraft_cape(p, T, Td)
+- (<Quantity(1222.67968, 'joule / kilogram')>, <Quantity([1008. 1000. 950.
++ (<Quantity(1222.67967, 'joule / kilogram')>, <Quantity([1008. 1000. 950.
+ 900. 850. 800. 750. 700. 650. 600.], 'hectopascal')>, <Quantity([17.50959548
+- 17.20643425 15.237249 13.12607097 10.85045704 8.38243809 5.68671014 2.71808363
++ 17.20643425 15.237249 13.12607097 10.85045704 8.38243809 5.68671014 2.71808368
+ -0.58203825 -4.29053485], 'degree_Celsius')>)
+
+ See Also
+diff --git a/src/metpy/calc/tools.py b/src/metpy/calc/tools.py
+index 16e2c8c..6386726 100644
+--- a/src/metpy/calc/tools.py
++++ b/src/metpy/calc/tools.py
+@@ -298,12 +298,7 @@ def reduce_point_density(points, radius, priority=None):
+
+ # Need to use sorted indices rather than sorting the position
+ # so that the keep mask matches *original* order.
+- if priority is not None:
+- # Need to sort the locations in decreasing priority.
+- sorted_indices = np.argsort(priority)[::-1]
+- else:
+- # Take advantage of iterator nature of range here to avoid making big lists
+- sorted_indices = range(len(points))
++ sorted_indices = range(len(points)) if priority is None else np.argsort(priority)[::-1]
+
+ # Keep all good points initially
+ keep = np.logical_and.reduce(good_vals, axis=-1)
+@@ -1830,6 +1825,13 @@ def angle_to_direction(input_angle, full=False, level=3):
+ direction
+ The directional text
+
++ Examples
++ --------
++ >>> from metpy.calc import angle_to_direction
++ >>> from metpy.units import units
++ >>> angle_to_direction(225. * units.deg)
++ 'SW'
++
+ """
+ try: # strip units temporarily
+ origin_units = input_angle.units
+@@ -1843,8 +1845,11 @@ def angle_to_direction(input_angle, full=False, level=3):
+ else:
+ scalar = False
+
++ np_input_angle = np.array(input_angle).astype(float)
++ origshape = np_input_angle.shape
++ ndarray = len(origshape) > 1
+ # clean any numeric strings, negatives, and None does not handle strings with alphabet
+- input_angle = units.Quantity(np.array(input_angle).astype(float), origin_units)
++ input_angle = units.Quantity(np_input_angle, origin_units)
+ input_angle[input_angle < 0] = np.nan
+
+ # Normalize between 0 - 360
+@@ -1860,8 +1865,10 @@ def angle_to_direction(input_angle, full=False, level=3):
+ err_msg = 'Level of complexity cannot be less than 1 or greater than 3!'
+ raise ValueError(err_msg)
+
+- angle_dict = {i * BASE_DEGREE_MULTIPLIER.m * nskip: dir_str
+- for i, dir_str in enumerate(DIR_STRS[::nskip])}
++ angle_dict = {
++ i * BASE_DEGREE_MULTIPLIER.m * nskip: dir_str
++ for i, dir_str in enumerate(DIR_STRS[::nskip])
++ }
+ angle_dict[MAX_DEGREE_ANGLE.m] = 'N' # handle edge case of 360.
+ angle_dict[UND_ANGLE] = UND
+
+@@ -1877,18 +1884,25 @@ def angle_to_direction(input_angle, full=False, level=3):
+ # ['N', 'N', 'NE', 'NE', 'E', 'E', 'SE', 'SE',
+ # 'S', 'S', 'SW', 'SW', 'W', 'W', 'NW', 'NW']
+
+- multiplier = np.round(
+- (norm_angles / BASE_DEGREE_MULTIPLIER / nskip) - 0.001).m
+- round_angles = (multiplier * BASE_DEGREE_MULTIPLIER.m * nskip)
++ multiplier = np.round((norm_angles / BASE_DEGREE_MULTIPLIER / nskip) - 0.001).m
++ round_angles = multiplier * BASE_DEGREE_MULTIPLIER.m * nskip
+ round_angles[np.where(np.isnan(round_angles))] = UND_ANGLE
+-
+- dir_str_arr = itemgetter(*round_angles)(angle_dict) # for array
+- if not full:
+- return dir_str_arr
+-
+- dir_str_arr = ','.join(dir_str_arr)
+- dir_str_arr = _unabbreviate_direction(dir_str_arr)
+- return dir_str_arr.replace(',', ' ') if scalar else dir_str_arr.split(',')
++ if ndarray:
++ round_angles = round_angles.flatten()
++ dir_str_arr = itemgetter(*round_angles)(angle_dict) # returns str or tuple
++ if full:
++ dir_str_arr = ','.join(dir_str_arr)
++ dir_str_arr = _unabbreviate_direction(dir_str_arr)
++ dir_str_arr = dir_str_arr.split(',')
++ if scalar:
++ return dir_str_arr[0]
++ else:
++ return np.array(dir_str_arr).reshape(origshape)
++ else:
++ if scalar:
++ return dir_str_arr
++ else:
++ return np.array(dir_str_arr).reshape(origshape)
+
+
+ def _unabbreviate_direction(abb_dir_str):
+diff --git a/src/metpy/io/gempak.py b/src/metpy/io/gempak.py
+index 67f298c..53f9121 100644
+--- a/src/metpy/io/gempak.py
++++ b/src/metpy/io/gempak.py
+@@ -489,43 +489,25 @@ def _wx_to_wnum(wx1, wx2, wx3, missing=-9999):
+ Notes
+ -----
+ See GEMAPK function PT_WNMT.
+- """
+- metar_codes = [
+- 'BR', 'DS', 'DU', 'DZ', 'FC', 'FG', 'FU', 'GR', 'GS',
+- 'HZ', 'IC', 'PL', 'PO', 'RA', 'SA', 'SG', 'SN', 'SQ',
+- 'SS', 'TS', 'UP', 'VA', '+DS', '-DZ', '+DZ', '+FC',
+- '-GS', '+GS', '-PL', '+PL', '-RA', '+RA', '-SG',
+- '+SG', '-SN', '+SN', '+SS', 'BCFG', 'BLDU', 'BLPY',
+- 'BLSA', 'BLSN', 'DRDU', 'DRSA', 'DRSN', 'FZDZ', 'FZFG',
+- 'FZRA', 'MIFG', 'PRFG', 'SHGR', 'SHGS', 'SHPL', 'SHRA',
+- 'SHSN', 'TSRA', '+BLDU', '+BLSA', '+BLSN', '-FZDZ',
+- '+FZDZ', '+FZFG', '-FZRA', '+FZRA', '-SHGS', '+SHGS',
+- '-SHPL', '+SHPL', '-SHRA', '+SHRA', '-SHSN', '+SHSN',
+- '-TSRA', '+TSRA'
+- ]
+-
+- gempak_wnum = [
+- 9, 33, 8, 2, -2, 9, 7, 4, 25, 6, 36, 23, 40, 1, 35, 24, 3, 10,
+- 35, 5, 41, 11, 68, 17, 18, -1, 61, 62, 57, 58, 13, 14, 59, 60, 20,
+- 21, 69, 9, 33, 34, 35, 32, 33, 35, 32, 19, 30, 15, 31, 9, 27, 67,
+- 63, 16, 22, 66, 68, 69, 70, 53, 54, 30, 49, 50, 67, 67, 75, 76, 51,
+- 52, 55, 56, 77, 78
+- ]
+-
+- if wx1 in metar_codes:
+- wn1 = gempak_wnum[metar_codes.index(wx1)]
+- else:
+- wn1 = 0
+-
+- if wx2 in metar_codes:
+- wn2 = gempak_wnum[metar_codes.index(wx2)]
+- else:
+- wn2 = 0
+
+- if wx3 in metar_codes:
+- wn3 = gempak_wnum[metar_codes.index(wx3)]
+- else:
+- wn3 = 0
++ """
++ metar_to_gempak_wnum = {'BR': 9, 'DS': 33, 'DU': 8, 'DZ': 2, 'FC': -2, 'FG': 9, 'FU': 7,
++ 'GR': 4, 'GS': 25, 'HZ': 6, 'IC': 36, 'PL': 23, 'PO': 40, 'RA': 1,
++ 'SA': 35, 'SG': 24, 'SN': 3, 'SQ': 10, 'SS': 35, 'TS': 5, 'UP': 41,
++ 'VA': 11, '+DS': 68, '-DZ': 17, '+DZ': 18, '+FC': -1, '-GS': 61,
++ '+GS': 62, '-PL': 57, '+PL': 58, '-RA': 13, '+RA': 14, '-SG': 59,
++ '+SG': 60, '-SN': 20, '+SN': 21, '+SS': 69, 'BCFG': 9, 'BLDU': 33,
++ 'BLPY': 34, 'BLSA': 35, 'BLSN': 32, 'DRDU': 33, 'DRSA': 35,
++ 'DRSN': 32, 'FZDZ': 19, 'FZFG': 30, 'FZRA': 15, 'MIFG': 31,
++ 'PRFG': 9, 'SHGR': 27, 'SHGS': 67, 'SHPL': 63, 'SHRA': 16,
++ 'SHSN': 22, 'TSRA': 66, '+BLDU': 68, '+BLSA': 69, '+BLSN': 70,
++ '-FZDZ': 53, '+FZDZ': 54, '+FZFG': 30, '-FZRA': 49, '+FZRA': 50,
++ '-SHGS': 67, '+SHGS': 67, '-SHPL': 75, '+SHPL': 76, '-SHRA': 51,
++ '+SHRA': 52, '-SHSN': 55, '+SHSN': 56, '-TSRA': 77, '+TSRA': 78}
++
++ wn1 = metar_to_gempak_wnum.get(wx1, 0)
++ wn2 = metar_to_gempak_wnum.get(wx2, 0)
++ wn3 = metar_to_gempak_wnum.get(wx3, 0)
+
+ if all(w >= 0 for w in [wn1, wn2, wn3]):
+ wnum = wn3 * 80 * 80 + wn2 * 80 + wn1
+@@ -663,7 +645,7 @@ class GempakFile:
+ 'NavigationBlock')
+
+ if navb_size != nav_stuct.size // BYTES_PER_WORD:
+- raise ValueError('Navigation block size does not match GEMPAK specification')
++ raise ValueError('Navigation block size does not match GEMPAK specification.')
+ else:
+ self.navigation_block = (
+ self._buffer.read_struct(nav_stuct)
+@@ -683,7 +665,7 @@ class GempakFile:
+
+ if anlb_size not in [anlb1_struct.size // BYTES_PER_WORD,
+ anlb2_struct.size // BYTES_PER_WORD]:
+- raise ValueError('Analysis block size does not match GEMPAK specification')
++ raise ValueError('Analysis block size does not match GEMPAK specification.')
+ else:
+ anlb_type = self._buffer.read_struct(struct.Struct(self.prefmt + 'f'))[0]
+ self._buffer.jump_to(anlb_start)
+@@ -759,9 +741,9 @@ class GempakFile:
+
+ def _swap_bytes(self, binary):
+ """Swap between little and big endian."""
+- self.swaped_bytes = (struct.pack('@i', 1) != binary)
++ self.swapped_bytes = (struct.pack('@i', 1) != binary)
+
+- if self.swaped_bytes:
++ if self.swapped_bytes:
+ if sys.byteorder == 'little':
+ self.prefmt = '>'
+ self.endian = 'big'
+@@ -950,9 +932,9 @@ class GempakGrid(GempakFile):
+ if self._buffer.read_int(4, self.endian, False) == USED_FLAG:
+ self.column_headers.append(self._buffer.read_struct(column_headers_fmt))
+
+- self._gdinfo = []
++ self._gdinfo = set()
+ for n, head in enumerate(self.column_headers):
+- self._gdinfo.append(
++ self._gdinfo.add(
+ Grid(
+ n,
+ head.GTM1[0],
+@@ -972,7 +954,7 @@ class GempakGrid(GempakFile):
+
+ def gdinfo(self):
+ """Return grid information."""
+- return self._gdinfo
++ return sorted(self._gdinfo)
+
+ def _get_crs(self):
+ """Create CRS from GEMPAK navigation block."""
+@@ -1262,7 +1244,7 @@ class GempakGrid(GempakFile):
+ level2 = [level2]
+
+ # Figure out which columns to extract from the file
+- matched = self._gdinfo.copy()
++ matched = sorted(self._gdinfo)
+
+ if parameter is not None:
+ matched = filter(
+@@ -1309,9 +1291,8 @@ class GempakGrid(GempakFile):
+
+ grids = []
+ irow = 0 # Only one row for grids
+- for icol, col_head in enumerate(self.column_headers):
+- if icol not in gridno:
+- continue
++ for icol in gridno:
++ col_head = self.column_headers[icol]
+ for iprt, part in enumerate(self.parts):
+ pointer = (self.prod_desc.data_block_ptr
+ + (irow * self.prod_desc.columns * self.prod_desc.parts)
+@@ -1409,7 +1390,7 @@ class GempakSounding(GempakFile):
+
+ self.merged = 'SNDT' in (part.name for part in self.parts)
+
+- self._sninfo = []
++ self._sninfo = set()
+ for irow, row_head in enumerate(self.row_headers):
+ for icol, col_head in enumerate(self.column_headers):
+ pointer = (self.prod_desc.data_block_ptr
+@@ -1420,7 +1401,7 @@ class GempakSounding(GempakFile):
+ data_ptr = self._buffer.read_int(4, self.endian, False)
+
+ if data_ptr:
+- self._sninfo.append(
++ self._sninfo.add(
+ Sounding(
+ irow,
+ icol,
+@@ -1437,144 +1418,140 @@ class GempakSounding(GempakFile):
+
+ def sninfo(self):
+ """Return sounding information."""
+- return self._sninfo
++ return sorted(self._sninfo)
+
+ def _unpack_merged(self, sndno):
+ """Unpack merged sounding data."""
+ soundings = []
+- for irow, row_head in enumerate(self.row_headers):
+- for icol, col_head in enumerate(self.column_headers):
+- if (irow, icol) not in sndno:
++ for irow, icol in sndno:
++ row_head = self.row_headers[irow]
++ col_head = self.column_headers[icol]
++ sounding = {
++ 'STID': col_head.STID,
++ 'STNM': col_head.STNM,
++ 'SLAT': col_head.SLAT,
++ 'SLON': col_head.SLON,
++ 'SELV': col_head.SELV,
++ 'STAT': col_head.STAT,
++ 'COUN': col_head.COUN,
++ 'DATE': row_head.DATE,
++ 'TIME': row_head.TIME,
++ }
++ for iprt, part in enumerate(self.parts):
++ pointer = (self.prod_desc.data_block_ptr
++ + (irow * self.prod_desc.columns * self.prod_desc.parts)
++ + (icol * self.prod_desc.parts + iprt))
++ self._buffer.jump_to(self._start, _word_to_position(pointer))
++ self.data_ptr = self._buffer.read_int(4, self.endian, False)
++ if not self.data_ptr:
+ continue
+- sounding = {'STID': col_head.STID,
+- 'STNM': col_head.STNM,
+- 'SLAT': col_head.SLAT,
+- 'SLON': col_head.SLON,
+- 'SELV': col_head.SELV,
+- 'STAT': col_head.STAT,
+- 'COUN': col_head.COUN,
+- 'DATE': row_head.DATE,
+- 'TIME': row_head.TIME,
+- }
+- for iprt, part in enumerate(self.parts):
+- pointer = (self.prod_desc.data_block_ptr
+- + (irow * self.prod_desc.columns * self.prod_desc.parts)
+- + (icol * self.prod_desc.parts + iprt))
+- self._buffer.jump_to(self._start, _word_to_position(pointer))
+- self.data_ptr = self._buffer.read_int(4, self.endian, False)
+- if not self.data_ptr:
+- continue
+- self._buffer.jump_to(self._start, _word_to_position(self.data_ptr))
+- self.data_header_length = self._buffer.read_int(4, self.endian, False)
+- data_header = self._buffer.set_mark()
+- self._buffer.jump_to(data_header,
+- _word_to_position(part.header_length + 1))
+- lendat = self.data_header_length - part.header_length
+-
+- fmt_code = {
+- DataTypes.real: 'f',
+- DataTypes.realpack: 'i',
+- DataTypes.character: 's',
+- }.get(part.data_type)
+-
+- if fmt_code is None:
+- raise NotImplementedError(f'No methods for data type {part.data_type}')
+-
+- if fmt_code == 's':
+- lendat *= BYTES_PER_WORD
+-
+- packed_buffer = (
+- self._buffer.read_struct(
+- struct.Struct(f'{self.prefmt}{lendat}{fmt_code}')
+- )
++ self._buffer.jump_to(self._start, _word_to_position(self.data_ptr))
++ self.data_header_length = self._buffer.read_int(4, self.endian, False)
++ data_header = self._buffer.set_mark()
++ self._buffer.jump_to(data_header,
++ _word_to_position(part.header_length + 1))
++ lendat = self.data_header_length - part.header_length
++
++ fmt_code = {
++ DataTypes.real: 'f',
++ DataTypes.realpack: 'i',
++ }.get(part.data_type)
++
++ if fmt_code is None:
++ raise NotImplementedError(f'No methods for data type {part.data_type}')
++
++ packed_buffer = (
++ self._buffer.read_struct(
++ struct.Struct(f'{self.prefmt}{lendat}{fmt_code}')
+ )
++ )
+
+- parameters = self.parameters[iprt]
+- nparms = len(parameters['name'])
++ parameters = self.parameters[iprt]
++ nparms = len(parameters['name'])
+
+- if part.data_type == DataTypes.realpack:
+- unpacked = self._unpack_real(packed_buffer, parameters, lendat)
+- for iprm, param in enumerate(parameters['name']):
+- sounding[param] = unpacked[iprm::nparms]
+- else:
+- for iprm, param in enumerate(parameters['name']):
+- sounding[param] = np.array(
+- packed_buffer[iprm::nparms], dtype=np.float32
+- )
++ if part.data_type == DataTypes.realpack:
++ unpacked = self._unpack_real(packed_buffer, parameters, lendat)
++ for iprm, param in enumerate(parameters['name']):
++ sounding[param] = unpacked[iprm::nparms]
++ else:
++ for iprm, param in enumerate(parameters['name']):
++ sounding[param] = np.array(
++ packed_buffer[iprm::nparms], dtype=np.float32
++ )
+
+- soundings.append(sounding)
++ soundings.append(sounding)
+ return soundings
+
+ def _unpack_unmerged(self, sndno):
+ """Unpack unmerged sounding data."""
+ soundings = []
+- for irow, row_head in enumerate(self.row_headers):
+- for icol, col_head in enumerate(self.column_headers):
+- if (irow, icol) not in sndno:
++ for irow, icol in sndno:
++ row_head = self.row_headers[irow]
++ col_head = self.column_headers[icol]
++ sounding = {
++ 'STID': col_head.STID,
++ 'STNM': col_head.STNM,
++ 'SLAT': col_head.SLAT,
++ 'SLON': col_head.SLON,
++ 'SELV': col_head.SELV,
++ 'STAT': col_head.STAT,
++ 'COUN': col_head.COUN,
++ 'DATE': row_head.DATE,
++ 'TIME': row_head.TIME,
++ }
++ for iprt, part in enumerate(self.parts):
++ pointer = (self.prod_desc.data_block_ptr
++ + (irow * self.prod_desc.columns * self.prod_desc.parts)
++ + (icol * self.prod_desc.parts + iprt))
++ self._buffer.jump_to(self._start, _word_to_position(pointer))
++ self.data_ptr = self._buffer.read_int(4, self.endian, False)
++ if not self.data_ptr:
+ continue
+- sounding = {'STID': col_head.STID,
+- 'STNM': col_head.STNM,
+- 'SLAT': col_head.SLAT,
+- 'SLON': col_head.SLON,
+- 'SELV': col_head.SELV,
+- 'STAT': col_head.STAT,
+- 'COUN': col_head.COUN,
+- 'DATE': row_head.DATE,
+- 'TIME': row_head.TIME,
+- }
+- for iprt, part in enumerate(self.parts):
+- pointer = (self.prod_desc.data_block_ptr
+- + (irow * self.prod_desc.columns * self.prod_desc.parts)
+- + (icol * self.prod_desc.parts + iprt))
+- self._buffer.jump_to(self._start, _word_to_position(pointer))
+- self.data_ptr = self._buffer.read_int(4, self.endian, False)
+- if not self.data_ptr:
+- continue
+- self._buffer.jump_to(self._start, _word_to_position(self.data_ptr))
+- self.data_header_length = self._buffer.read_int(4, self.endian, False)
+- data_header = self._buffer.set_mark()
+- self._buffer.jump_to(data_header,
+- _word_to_position(part.header_length + 1))
+- lendat = self.data_header_length - part.header_length
+-
+- fmt_code = {
+- DataTypes.real: 'f',
+- DataTypes.realpack: 'i',
+- DataTypes.character: 's',
+- }.get(part.data_type)
+-
+- if fmt_code is None:
+- raise NotImplementedError(f'No methods for data type {part.data_type}')
+-
+- if fmt_code == 's':
+- lendat *= BYTES_PER_WORD
+-
+- packed_buffer = (
+- self._buffer.read_struct(
+- struct.Struct(f'{self.prefmt}{lendat}{fmt_code}')
+- )
++ self._buffer.jump_to(self._start, _word_to_position(self.data_ptr))
++ self.data_header_length = self._buffer.read_int(4, self.endian, False)
++ data_header = self._buffer.set_mark()
++ self._buffer.jump_to(data_header,
++ _word_to_position(part.header_length + 1))
++ lendat = self.data_header_length - part.header_length
++
++ fmt_code = {
++ DataTypes.real: 'f',
++ DataTypes.realpack: 'i',
++ DataTypes.character: 's',
++ }.get(part.data_type)
++
++ if fmt_code is None:
++ raise NotImplementedError(f'No methods for data type {part.data_type}')
++
++ if fmt_code == 's':
++ lendat *= BYTES_PER_WORD
++
++ packed_buffer = (
++ self._buffer.read_struct(
++ struct.Struct(f'{self.prefmt}{lendat}{fmt_code}')
+ )
++ )
+
+- parameters = self.parameters[iprt]
+- nparms = len(parameters['name'])
+- sounding[part.name] = {}
+-
+- if part.data_type == DataTypes.realpack:
+- unpacked = self._unpack_real(packed_buffer, parameters, lendat)
+- for iprm, param in enumerate(parameters['name']):
+- sounding[part.name][param] = unpacked[iprm::nparms]
+- elif part.data_type == DataTypes.character:
+- for iprm, param in enumerate(parameters['name']):
+- sounding[part.name][param] = (
+- self._decode_strip(packed_buffer[iprm])
+- )
+- else:
+- for iprm, param in enumerate(parameters['name']):
+- sounding[part.name][param] = (
+- np.array(packed_buffer[iprm::nparms], dtype=np.float32)
+- )
++ parameters = self.parameters[iprt]
++ nparms = len(parameters['name'])
++ sounding[part.name] = {}
++
++ if part.data_type == DataTypes.realpack:
++ unpacked = self._unpack_real(packed_buffer, parameters, lendat)
++ for iprm, param in enumerate(parameters['name']):
++ sounding[part.name][param] = unpacked[iprm::nparms]
++ elif part.data_type == DataTypes.character:
++ for iprm, param in enumerate(parameters['name']):
++ sounding[part.name][param] = (
++ self._decode_strip(packed_buffer[iprm])
++ )
++ else:
++ for iprm, param in enumerate(parameters['name']):
++ sounding[part.name][param] = (
++ np.array(packed_buffer[iprm::nparms], dtype=np.float32)
++ )
+
+- soundings.append(self._merge_sounding(sounding))
++ soundings.append(self._merge_sounding(sounding))
+ return soundings
+
+ def _merge_significant_temps(self, merged, parts, section, pbot):
+@@ -2177,7 +2154,7 @@ class GempakSounding(GempakFile):
+ country = [c.upper() for c in country]
+
+ # Figure out which columns to extract from the file
+- matched = self._sninfo.copy()
++ matched = sorted(self._sninfo)
+
+ if station_id is not None:
+ matched = filter(
+@@ -2216,10 +2193,7 @@ class GempakSounding(GempakFile):
+
+ sndno = [(s.DTNO, s.SNDNO) for s in matched]
+
+- if self.merged:
+- data = self._unpack_merged(sndno)
+- else:
+- data = self._unpack_unmerged(sndno)
++ data = self._unpack_merged(sndno) if self.merged else self._unpack_unmerged(sndno)
+
+ soundings = []
+ for snd in data:
+@@ -2299,7 +2273,7 @@ class GempakSurface(GempakFile):
+
+ self._get_surface_type()
+
+- self._sfinfo = []
++ self._sfinfo = set()
+ if self.surface_type == 'standard':
+ for irow, row_head in enumerate(self.row_headers):
+ for icol, col_head in enumerate(self.column_headers):
+@@ -2312,7 +2286,7 @@ class GempakSurface(GempakFile):
+ data_ptr = self._buffer.read_int(4, self.endian, False)
+
+ if data_ptr:
+- self._sfinfo.append(
++ self._sfinfo.add(
+ Surface(
+ irow,
+ icol,
+@@ -2338,7 +2312,7 @@ class GempakSurface(GempakFile):
+ data_ptr = self._buffer.read_int(4, self.endian, False)
+
+ if data_ptr:
+- self._sfinfo.append(
++ self._sfinfo.add(
+ Surface(
+ irow,
+ icol,
+@@ -2364,7 +2338,7 @@ class GempakSurface(GempakFile):
+ data_ptr = self._buffer.read_int(4, self.endian, False)
+
+ if data_ptr:
+- self._sfinfo.append(
++ self._sfinfo.add(
+ Surface(
+ irow,
+ icol,
+@@ -2383,15 +2357,22 @@ class GempakSurface(GempakFile):
+
+ def sfinfo(self):
+ """Return station information."""
+- return self._sfinfo
++ return sorted(self._sfinfo)
+
+ def _get_surface_type(self):
+- """Determine type of surface file."""
+- if len(self.row_headers) == 1:
++ """Determine type of surface file.
++
++ Notes
++ -----
++ See GEMPAK SFLIB documentation for type definitions.
++ """
++ if (len(self.row_headers) == 1
++ and 'DATE' in self.column_keys
++ and 'STID' in self.column_keys):
+ self.surface_type = 'ship'
+- elif 'DATE' in self.row_keys:
++ elif 'DATE' in self.row_keys and 'STID' in self.column_keys:
+ self.surface_type = 'standard'
+- elif 'DATE' in self.column_keys:
++ elif 'DATE' in self.column_keys and 'STID' in self.row_keys:
+ self.surface_type = 'climate'
+ else:
+ raise TypeError('Unknown surface data type')
+@@ -2414,92 +2395,91 @@ class GempakSurface(GempakFile):
+ def _unpack_climate(self, sfcno):
+ """Unpack a climate surface data file."""
+ stations = []
+- for icol, col_head in enumerate(self.column_headers):
+- for irow, row_head in enumerate(self.row_headers):
+- if (irow, icol) not in sfcno:
++ for irow, icol in sfcno:
++ col_head = self.column_headers[icol]
++ row_head = self.row_headers[irow]
++ station = {
++ 'STID': row_head.STID,
++ 'STNM': row_head.STNM,
++ 'SLAT': row_head.SLAT,
++ 'SLON': row_head.SLON,
++ 'SELV': row_head.SELV,
++ 'STAT': row_head.STAT,
++ 'COUN': row_head.COUN,
++ 'STD2': row_head.STD2,
++ 'SPRI': row_head.SPRI,
++ 'DATE': col_head.DATE,
++ 'TIME': col_head.TIME,
++ }
++ for iprt, part in enumerate(self.parts):
++ pointer = (self.prod_desc.data_block_ptr
++ + (irow * self.prod_desc.columns * self.prod_desc.parts)
++ + (icol * self.prod_desc.parts + iprt))
++ self._buffer.jump_to(self._start, _word_to_position(pointer))
++ self.data_ptr = self._buffer.read_int(4, self.endian, False)
++ if not self.data_ptr:
+ continue
+- station = {'STID': row_head.STID,
+- 'STNM': row_head.STNM,
+- 'SLAT': row_head.SLAT,
+- 'SLON': row_head.SLON,
+- 'SELV': row_head.SELV,
+- 'STAT': row_head.STAT,
+- 'COUN': row_head.COUN,
+- 'STD2': row_head.STD2,
+- 'SPRI': row_head.SPRI,
+- 'DATE': col_head.DATE,
+- 'TIME': col_head.TIME,
+- }
+- for iprt, part in enumerate(self.parts):
+- pointer = (self.prod_desc.data_block_ptr
+- + (irow * self.prod_desc.columns * self.prod_desc.parts)
+- + (icol * self.prod_desc.parts + iprt))
+- self._buffer.jump_to(self._start, _word_to_position(pointer))
+- self.data_ptr = self._buffer.read_int(4, self.endian, False)
+- if not self.data_ptr:
+- continue
+- self._buffer.jump_to(self._start, _word_to_position(self.data_ptr))
+- self.data_header_length = self._buffer.read_int(4, self.endian, False)
+- data_header = self._buffer.set_mark()
+- self._buffer.jump_to(data_header,
+- _word_to_position(part.header_length + 1))
+- lendat = self.data_header_length - part.header_length
+-
+- fmt_code = {
+- DataTypes.real: 'f',
+- DataTypes.realpack: 'i',
+- DataTypes.character: 's',
+- }.get(part.data_type)
+-
+- if fmt_code is None:
+- raise NotImplementedError(f'No methods for data type {part.data_type}')
+-
+- if fmt_code == 's':
+- lendat *= BYTES_PER_WORD
+-
+- packed_buffer = (
+- self._buffer.read_struct(
+- struct.Struct(f'{self.prefmt}{lendat}{fmt_code}')
+- )
++ self._buffer.jump_to(self._start, _word_to_position(self.data_ptr))
++ self.data_header_length = self._buffer.read_int(4, self.endian, False)
++ data_header = self._buffer.set_mark()
++ self._buffer.jump_to(data_header,
++ _word_to_position(part.header_length + 1))
++ lendat = self.data_header_length - part.header_length
++
++ fmt_code = {
++ DataTypes.real: 'f',
++ DataTypes.realpack: 'i',
++ DataTypes.character: 's',
++ }.get(part.data_type)
++
++ if fmt_code is None:
++ raise NotImplementedError(f'No methods for data type {part.data_type}')
++
++ if fmt_code == 's':
++ lendat *= BYTES_PER_WORD
++
++ packed_buffer = (
++ self._buffer.read_struct(
++ struct.Struct(f'{self.prefmt}{lendat}{fmt_code}')
+ )
++ )
+
+- parameters = self.parameters[iprt]
++ parameters = self.parameters[iprt]
+
+- if part.data_type == DataTypes.realpack:
+- unpacked = self._unpack_real(packed_buffer, parameters, lendat)
+- for iprm, param in enumerate(parameters['name']):
+- station[param] = unpacked[iprm]
+- elif part.data_type == DataTypes.character:
+- for iprm, param in enumerate(parameters['name']):
+- station[param] = self._decode_strip(packed_buffer[iprm])
+- else:
+- for iprm, param in enumerate(parameters['name']):
+- station[param] = np.array(
+- packed_buffer[iprm], dtype=np.float32
+- )
++ if part.data_type == DataTypes.realpack:
++ unpacked = self._unpack_real(packed_buffer, parameters, lendat)
++ for iprm, param in enumerate(parameters['name']):
++ station[param] = unpacked[iprm]
++ elif part.data_type == DataTypes.character:
++ for iprm, param in enumerate(parameters['name']):
++ station[param] = self._decode_strip(packed_buffer[iprm])
++ else:
++ for iprm, param in enumerate(parameters['name']):
++ station[param] = np.array(
++ packed_buffer[iprm], dtype=np.float32
++ )
+
+- stations.append(station)
++ stations.append(station)
+ return stations
+
+ def _unpack_ship(self, sfcno):
+ """Unpack ship (moving observation) surface data file."""
+ stations = []
+- irow = 0
+- for icol, col_head in enumerate(self.column_headers):
+- if (irow, icol) not in sfcno:
+- continue
+- station = {'STID': col_head.STID,
+- 'STNM': col_head.STNM,
+- 'SLAT': col_head.SLAT,
+- 'SLON': col_head.SLON,
+- 'SELV': col_head.SELV,
+- 'STAT': col_head.STAT,
+- 'COUN': col_head.COUN,
+- 'STD2': col_head.STD2,
+- 'SPRI': col_head.SPRI,
+- 'DATE': col_head.DATE,
+- 'TIME': col_head.TIME,
+- }
++ for irow, icol in sfcno: # irow should always be zero
++ col_head = self.column_headers[icol]
++ station = {
++ 'STID': col_head.STID,
++ 'STNM': col_head.STNM,
++ 'SLAT': col_head.SLAT,
++ 'SLON': col_head.SLON,
++ 'SELV': col_head.SELV,
++ 'STAT': col_head.STAT,
++ 'COUN': col_head.COUN,
++ 'STD2': col_head.STD2,
++ 'SPRI': col_head.SPRI,
++ 'DATE': col_head.DATE,
++ 'TIME': col_head.TIME,
++ }
+ for iprt, part in enumerate(self.parts):
+ pointer = (self.prod_desc.data_block_ptr
+ + (irow * self.prod_desc.columns * self.prod_desc.parts)
+@@ -2554,69 +2534,69 @@ class GempakSurface(GempakFile):
+ def _unpack_standard(self, sfcno):
+ """Unpack a standard surface data file."""
+ stations = []
+- for irow, row_head in enumerate(self.row_headers):
+- for icol, col_head in enumerate(self.column_headers):
+- if (irow, icol) not in sfcno:
++ for irow, icol in sfcno:
++ row_head = self.row_headers[irow]
++ col_head = self.column_headers[icol]
++ station = {
++ 'STID': col_head.STID,
++ 'STNM': col_head.STNM,
++ 'SLAT': col_head.SLAT,
++ 'SLON': col_head.SLON,
++ 'SELV': col_head.SELV,
++ 'STAT': col_head.STAT,
++ 'COUN': col_head.COUN,
++ 'STD2': col_head.STD2,
++ 'SPRI': col_head.SPRI,
++ 'DATE': row_head.DATE,
++ 'TIME': row_head.TIME,
++ }
++ for iprt, part in enumerate(self.parts):
++ pointer = (self.prod_desc.data_block_ptr
++ + (irow * self.prod_desc.columns * self.prod_desc.parts)
++ + (icol * self.prod_desc.parts + iprt))
++ self._buffer.jump_to(self._start, _word_to_position(pointer))
++ self.data_ptr = self._buffer.read_int(4, self.endian, False)
++ if not self.data_ptr:
+ continue
+- station = {'STID': col_head.STID,
+- 'STNM': col_head.STNM,
+- 'SLAT': col_head.SLAT,
+- 'SLON': col_head.SLON,
+- 'SELV': col_head.SELV,
+- 'STAT': col_head.STAT,
+- 'COUN': col_head.COUN,
+- 'STD2': col_head.STD2,
+- 'SPRI': col_head.SPRI,
+- 'DATE': row_head.DATE,
+- 'TIME': row_head.TIME,
+- }
+- for iprt, part in enumerate(self.parts):
+- pointer = (self.prod_desc.data_block_ptr
+- + (irow * self.prod_desc.columns * self.prod_desc.parts)
+- + (icol * self.prod_desc.parts + iprt))
+- self._buffer.jump_to(self._start, _word_to_position(pointer))
+- self.data_ptr = self._buffer.read_int(4, self.endian, False)
+- if not self.data_ptr:
+- continue
+- self._buffer.jump_to(self._start, _word_to_position(self.data_ptr))
+- self.data_header_length = self._buffer.read_int(4, self.endian, False)
+- data_header = self._buffer.set_mark()
+- self._buffer.jump_to(data_header,
+- _word_to_position(part.header_length + 1))
+- lendat = self.data_header_length - part.header_length
+-
+- fmt_code = {
+- DataTypes.real: 'f',
+- DataTypes.realpack: 'i',
+- DataTypes.character: 's',
+- }.get(part.data_type)
+-
+- if fmt_code is None:
+- raise NotImplementedError(f'No methods for data type {part.data_type}')
+-
+- if fmt_code == 's':
+- lendat *= BYTES_PER_WORD
+-
+- packed_buffer = (
+- self._buffer.read_struct(
+- struct.Struct(f'{self.prefmt}{lendat}{fmt_code}')
+- )
++ self._buffer.jump_to(self._start, _word_to_position(self.data_ptr))
++ self.data_header_length = self._buffer.read_int(4, self.endian, False)
++ data_header = self._buffer.set_mark()
++ self._buffer.jump_to(data_header,
++ _word_to_position(part.header_length + 1))
++ lendat = self.data_header_length - part.header_length
++
++ fmt_code = {
++ DataTypes.real: 'f',
++ DataTypes.realpack: 'i',
++ DataTypes.character: 's',
++ }.get(part.data_type)
++
++ if fmt_code is None:
++ raise NotImplementedError(f'No methods for data type {part.data_type}')
++
++ if fmt_code == 's':
++ lendat *= BYTES_PER_WORD
++
++ packed_buffer = (
++ self._buffer.read_struct(
++ struct.Struct(f'{self.prefmt}{lendat}{fmt_code}')
+ )
++ )
+
+- parameters = self.parameters[iprt]
++ parameters = self.parameters[iprt]
+
+- if part.data_type == DataTypes.realpack:
+- unpacked = self._unpack_real(packed_buffer, parameters, lendat)
+- for iprm, param in enumerate(parameters['name']):
+- station[param] = unpacked[iprm]
+- elif part.data_type == DataTypes.character:
+- for iprm, param in enumerate(parameters['name']):
+- station[param] = self._decode_strip(packed_buffer[iprm])
+- else:
+- for iprm, param in enumerate(parameters['name']):
+- station[param] = packed_buffer[iprm]
++ if part.data_type == DataTypes.realpack:
++ unpacked = self._unpack_real(packed_buffer, parameters, lendat)
++ for iprm, param in enumerate(parameters['name']):
++ station[param] = unpacked[iprm]
++ elif part.data_type == DataTypes.character:
++ for iprm, param in enumerate(parameters['name']):
++ station[param] = self._decode_strip(packed_buffer[iprm])
++ else:
++ for iprm, param in enumerate(parameters['name']):
++ station[param] = packed_buffer[iprm]
+
+- stations.append(station)
++ stations.append(station)
+ return stations
+
+ @staticmethod
+@@ -2793,7 +2773,7 @@ class GempakSurface(GempakFile):
+ country = [c.upper() for c in country]
+
+ # Figure out which columns to extract from the file
+- matched = self._sfinfo.copy()
++ matched = sorted(self._sfinfo)
+
+ if station_id is not None:
+ matched = filter(
+diff --git a/src/metpy/io/gini.py b/src/metpy/io/gini.py
+index 8a09cb6..f870ceb 100644
+--- a/src/metpy/io/gini.py
++++ b/src/metpy/io/gini.py
+@@ -18,7 +18,6 @@ from xarray.backends import BackendEntrypoint
+ from xarray.backends.common import AbstractDataStore
+ from xarray.coding.times import CFDatetimeCoder
+ from xarray.coding.variables import CFMaskCoder
+-from xarray.core.utils import FrozenDict
+
+ from ._tools import Bits, IOBuffer, NamedStruct, open_as_needed, zlib_decompress_all_frames
+ from ..package_tools import Exporter
+@@ -368,7 +367,7 @@ class GiniFile(AbstractDataStore):
+ variables.extend(self._make_coord_vars())
+ variables.extend(self._make_data_vars())
+
+- return FrozenDict(variables)
++ return dict(variables)
+
+ def get_attrs(self):
+ """Get the global attributes.
+@@ -376,8 +375,8 @@ class GiniFile(AbstractDataStore):
+ This is used by `xarray.open_dataset`.
+
+ """
+- return FrozenDict(satellite=self.prod_desc.creating_entity,
+- sector=self.prod_desc.sector_id)
++ return {'satellite': self.prod_desc.creating_entity,
++ 'sector': self.prod_desc.sector_id}
+
+
+ class GiniXarrayBackend(BackendEntrypoint):
+diff --git a/src/metpy/io/metar.py b/src/metpy/io/metar.py
+index c7d92fe..7cd7b22 100644
+--- a/src/metpy/io/metar.py
++++ b/src/metpy/io/metar.py
+@@ -348,7 +348,8 @@ def parse_metar_file(filename, *, year=None, month=None):
+ ----------
+ filename : str or file-like object
+ If str, the name of the file to be opened. If `filename` is a file-like object,
+- this will be read from directly.
++ this will be read from directly and needs to be opened in text mode (i.e. ``read()``
++ needs to return a string, not bytes).
+ year : int, optional
+ Year in which observation was taken, defaults to current year. Keyword-only argument.
+ month : int, optional
+diff --git a/src/metpy/io/nexrad.py b/src/metpy/io/nexrad.py
+index f49b361..8125b32 100644
+--- a/src/metpy/io/nexrad.py
++++ b/src/metpy/io/nexrad.py
+@@ -545,12 +545,17 @@ class Level2File:
+ attr = f'VCPAT{num}'
+ dat = self.rda[attr]
+ vcp_hdr = self.vcp_fmt.unpack_from(dat, 0)
+- off = self.vcp_fmt.size
+- els = []
+- for _ in range(vcp_hdr.num_el_cuts):
+- els.append(self.vcp_el_fmt.unpack_from(dat, off))
+- off += self.vcp_el_fmt.size
+- self.rda[attr] = vcp_hdr._replace(els=els)
++ # At some point these got changed to spares, so only try to parse the rest if
++ # it looks like the right data.
++ if vcp_hdr.num == num and 0 < 2 * vcp_hdr.size_hw <= len(dat):
++ off = self.vcp_fmt.size
++ els = []
++ for _ in range(vcp_hdr.num_el_cuts):
++ els.append(self.vcp_el_fmt.unpack_from(dat, off))
++ off += self.vcp_el_fmt.size
++ self.rda[attr] = vcp_hdr._replace(els=els)
++ else: # Otherwise this is just spare and we should dump
++ self.rda.pop(attr)
+
+ msg31_data_hdr_fmt = NamedStruct([('stid', '4s'), ('time_ms', 'L'),
+ ('date', 'H'), ('az_num', 'H'),
+@@ -717,15 +722,8 @@ def float16(val):
+ exp = (val >> 10) & 0x1F
+ sign = val >> 15
+
+- if exp:
+- value = 2 ** (exp - 16) * (1 + float(frac) / 2**10)
+- else:
+- value = float(frac) / 2**9
+-
+- if sign:
+- value *= -1
+-
+- return value
++ value = 2 ** (exp - 16) * (1 + float(frac) / 2**10) if exp else float(frac) / 2**9
++ return -value if sign else value
+
+
+ def float32(short1, short2):
+@@ -1850,10 +1848,10 @@ class Level3File:
+ log.debug('Symbology block info: %s', blk)
+
+ self.sym_block = []
+- assert blk.divider == -1, ('Bad divider for symbology block: {:d} should be -1'
+- .format(blk.divider))
+- assert blk.block_id == 1, ('Bad block ID for symbology block: {:d} should be 1'
+- .format(blk.block_id))
++ assert blk.divider == -1, (f'Bad divider for symbology block: {blk.divider} should '
++ 'be -1')
++ assert blk.block_id == 1, (f'Bad block ID for symbology block: {blk.block_id} should '
++ 'be 1')
+ for _ in range(blk.nlayer):
+ layer_hdr = self._buffer.read_struct(self.sym_layer_fmt)
+ assert layer_hdr.divider == -1
+@@ -1874,10 +1872,10 @@ class Level3File:
+ def _unpack_graphblock(self, start, offset):
+ self._buffer.jump_to(start, offset)
+ hdr = self._buffer.read_struct(self.graph_block_fmt)
+- assert hdr.divider == -1, ('Bad divider for graphical block: {:d} should be -1'
+- .format(hdr.divider))
+- assert hdr.block_id == 2, ('Bad block ID for graphical block: {:d} should be 1'
+- .format(hdr.block_id))
++ assert hdr.divider == -1, (f'Bad divider for graphical block: {hdr.divider} should '
++ f'be -1')
++ assert hdr.block_id == 2, (f'Bad block ID for graphical block: {hdr.block_id} should '
++ 'be 1')
+ self.graph_pages = []
+ for page in range(hdr.num_pages):
+ page_num = self._buffer.read_int(2, 'big', signed=False)
+diff --git a/src/metpy/plots/declarative.py b/src/metpy/plots/declarative.py
+index 5f5cd9c..843c82d 100644
+--- a/src/metpy/plots/declarative.py
++++ b/src/metpy/plots/declarative.py
+@@ -1823,13 +1823,8 @@ class PlotGeometry(MetPyHasTraits):
+ """
+ color = proposal['value']
+
+- if isinstance(color, str):
+- color = [color]
+ # `color` must be a collection if it is not a string
+- else:
+- color = list(color)
+-
+- return color
++ return [color] if isinstance(color, str) else list(color)
+
+ @staticmethod
+ @validate('labels')
+@@ -1877,10 +1872,7 @@ class PlotGeometry(MetPyHasTraits):
+ geo_obj = geo_obj.geoms[label_hash % len(geo_obj.geoms)]
+
+ # Get the list of coordinates of the polygon/line/point
+- if isinstance(geo_obj, Polygon):
+- coords = geo_obj.exterior.coords
+- else:
+- coords = geo_obj.coords
++ coords = geo_obj.exterior.coords if isinstance(geo_obj, Polygon) else geo_obj.coords
+
+ return coords[label_hash % len(coords)]
+
+@@ -1990,10 +1982,7 @@ class PlotGeometry(MetPyHasTraits):
+
+ # If polygon, put label directly on edge of polygon. If line or point, put
+ # label slightly below line/point.
+- if isinstance(geo_obj, (MultiPolygon, Polygon)):
+- offset = (0, 0)
+- else:
+- offset = (0, -12)
++ offset = (0, 0) if isinstance(geo_obj, (MultiPolygon, Polygon)) else (0, -12)
+
+ # Finally, draw the label
+ self._draw_label(label, lon, lat, fontcolor, fontoutline, offset)
+diff --git a/src/metpy/units.py b/src/metpy/units.py
+index 9ae7c2c..0ed468e 100644
+--- a/src/metpy/units.py
++++ b/src/metpy/units.py
+@@ -82,6 +82,7 @@ def setup_registry(reg):
+ '= degreeN')
+ reg.define('degrees_east = degree = degrees_E = degreesE = degree_east = degree_E '
+ '= degreeE')
++ reg.define('dBz = 1e-18 m^3; logbase: 10; logfactor: 10 = dBZ')
+
+ # Alias geopotential meters (gpm) to just meters
+ reg.define('@alias meter = gpm')
+diff --git a/src/metpy/xarray.py b/src/metpy/xarray.py
+index 4030a03..ab16f76 100644
+--- a/src/metpy/xarray.py
++++ b/src/metpy/xarray.py
+@@ -115,10 +115,10 @@ class MetPyDataArrayAccessor:
+ >>> temperature = xr.DataArray([[0, 1], [2, 3]] * units.degC, dims=('lat', 'lon'),
+ ... coords={'lat': [40, 41], 'lon': [-105, -104]})
+ >>> temperature.metpy.x
+- <xarray.DataArray 'lon' (lon: 2)>
++ <xarray.DataArray 'lon' (lon: 2)> Size: 16B
+ array([-105, -104])
+ Coordinates:
+- * lon (lon) int64 -105 -104
++ * lon (lon) int64 16B -105 -104
+ Attributes:
+ _metpy_axis: x,longitude
+
+@@ -338,15 +338,16 @@ class MetPyDataArrayAccessor:
+ def _generate_coordinate_map(self):
+ """Generate a coordinate map via CF conventions and other methods."""
+ coords = self._data_array.coords.values()
+- # Parse all the coordinates, attempting to identify x, longitude, y, latitude,
+- # vertical, time
+- coord_lists = {'time': [], 'vertical': [], 'y': [], 'latitude': [], 'x': [],
+- 'longitude': []}
++ # Parse all the coordinates, attempting to identify longitude, latitude, x, y,
++ # time, vertical, in that order.
++ coord_lists = {'longitude': [], 'latitude': [], 'x': [], 'y': [], 'time': [],
++ 'vertical': []}
+ for coord_var in coords:
+ # Identify the coordinate type using check_axis helper
+ for axis in coord_lists:
+ if check_axis(coord_var, axis):
+ coord_lists[axis].append(coord_var)
++ break # Ensure a coordinate variable only goes to one axis
+
+ # Fill in x/y with longitude/latitude if x/y not otherwise present
+ for geometric, graticule in (('y', 'latitude'), ('x', 'longitude')):
+diff --git a/tests/calc/test_basic.py b/tests/calc/test_basic.py
+index 4dcaa56..dc8770e 100644
+--- a/tests/calc/test_basic.py
++++ b/tests/calc/test_basic.py
+@@ -104,7 +104,7 @@ def test_direction_with_north_and_calm(array_type):
+ def test_direction_dimensions():
+ """Verify wind_direction returns degrees."""
+ d = wind_direction(3. * units('m/s'), 4. * units('m/s'))
+- assert str(d.units) == 'degree'
++ assert d.units == units('degree')
+
+
+ def test_oceanographic_direction(array_type):
+diff --git a/tests/calc/test_thermo.py b/tests/calc/test_thermo.py
+index 4cd9c60..5e2cf1f 100644
+--- a/tests/calc/test_thermo.py
++++ b/tests/calc/test_thermo.py
+@@ -1747,14 +1747,14 @@ def test_mixing_ratio_dimensions():
+ """Verify mixing ratio returns a dimensionless number."""
+ p = 998. * units.mbar
+ e = 73.75 * units.hPa
+- assert str(mixing_ratio(e, p).units) == 'dimensionless'
++ assert mixing_ratio(e, p).units == units('dimensionless')
+
+
+ def test_saturation_mixing_ratio_dimensions():
+ """Verify saturation mixing ratio returns a dimensionless number."""
+ p = 998. * units.mbar
+ temp = 20 * units.celsius
+- assert str(saturation_mixing_ratio(p, temp).units) == 'dimensionless'
++ assert saturation_mixing_ratio(p, temp).units == units('dimensionless')
+
+
+ def test_mixing_ratio_from_rh_dimensions():
+@@ -1762,8 +1762,8 @@ def test_mixing_ratio_from_rh_dimensions():
+ p = 1000. * units.mbar
+ temperature = 0. * units.degC
+ rh = 100. * units.percent
+- assert (str(mixing_ratio_from_relative_humidity(p, temperature, rh).units)
+- == 'dimensionless')
++ assert (mixing_ratio_from_relative_humidity(p, temperature, rh).units
++ == units('dimensionless'))
+
+
+ @pytest.fixture
+@@ -1936,9 +1936,9 @@ def test_dewpoint_specific_humidity_old_signature():
+ p = 1013.25 * units.mbar
+ temperature = 20. * units.degC
+ q = 0.012 * units.dimensionless
+- with pytest.deprecated_call(match='Temperature argument'):
+- with pytest.raises(ValueError, match='changed in version'):
+- dewpoint_from_specific_humidity(q, temperature, p)
++ with (pytest.deprecated_call(match='Temperature argument'),
++ pytest.raises(ValueError, match='changed in version')):
++ dewpoint_from_specific_humidity(q, temperature, p)
+
+
+ def test_dewpoint_specific_humidity_kwargs():
+diff --git a/tests/test_xarray.py b/tests/test_xarray.py
+index 212c07a..a5364f3 100644
+--- a/tests/test_xarray.py
++++ b/tests/test_xarray.py
+@@ -407,10 +407,10 @@ def test_resolve_axis_conflict_double_lonlat(test_ds_generic):
+ test_ds_generic['d'].attrs['_CoordinateAxisType'] = 'Lat'
+ test_ds_generic['e'].attrs['_CoordinateAxisType'] = 'Lon'
+
+- with pytest.warns(UserWarning, match='More than one x coordinate'),\
++ with pytest.warns(UserWarning, match=r'More than one \w+ coordinate'),\
+ pytest.raises(AttributeError):
+ test_ds_generic['test'].metpy.x
+- with pytest.warns(UserWarning, match='More than one y coordinate'),\
++ with pytest.warns(UserWarning, match=r'More than one \w+ coordinate'),\
+ pytest.raises(AttributeError):
+ test_ds_generic['test'].metpy.y
+
+@@ -422,10 +422,10 @@ def test_resolve_axis_conflict_double_xy(test_ds_generic):
+ test_ds_generic['d'].attrs['standard_name'] = 'projection_x_coordinate'
+ test_ds_generic['e'].attrs['standard_name'] = 'projection_y_coordinate'
+
+- with pytest.warns(UserWarning, match='More than one x coordinate'),\
++ with pytest.warns(UserWarning, match=r'More than one \w+ coordinate'),\
+ pytest.raises(AttributeError):
+ test_ds_generic['test'].metpy.x
+- with pytest.warns(UserWarning, match='More than one y coordinate'),\
++ with pytest.warns(UserWarning, match=r'More than one \w+ coordinate'),\
+ pytest.raises(AttributeError):
+ test_ds_generic['test'].metpy.y
+
+diff --git a/tests/units/test_units.py b/tests/units/test_units.py
+index c1830a6..713b521 100644
+--- a/tests/units/test_units.py
++++ b/tests/units/test_units.py
+@@ -193,7 +193,7 @@ def test_is_quantity_multiple():
+ def test_gpm_unit():
+ """Test that the gpm unit does alias to meters."""
+ x = 1 * units('gpm')
+- assert str(x.units) == 'meter'
++ assert x.units == units('meter')
+
+
+ def test_assert_nan():
+@@ -210,7 +210,7 @@ def test_assert_nan_checks_units():
+
+ def test_percent_units():
+ """Test that percent sign units are properly parsed and interpreted."""
+- assert str(units('%').units) == 'percent'
++ assert units('%').units == units('percent')
+
+
+ @pytest.mark.parametrize(
=====================================
debian/patches/series
=====================================
@@ -1,2 +1,3 @@
0001-Skip-tests-requiring-internet.patch
0002-Do-not-use-pooch-in-conftest.py.patch
+0003-Fix-compatibility-with-numpy-1.26.patch
View it on GitLab: https://salsa.debian.org/debian-gis-team/metpy/-/commit/1af7f7cc766060e6a2f3b4b7cac069837b385728
--
View it on GitLab: https://salsa.debian.org/debian-gis-team/metpy/-/commit/1af7f7cc766060e6a2f3b4b7cac069837b385728
You're receiving this email because of your account on salsa.debian.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/pkg-grass-devel/attachments/20240331/73eb0b75/attachment-0001.htm>
More information about the Pkg-grass-devel
mailing list