[Git][debian-gis-team/flox][master] 4 commits: New upstream version 0.8.2

Antonio Valentino (@antonio.valentino) gitlab at salsa.debian.org
Sat Nov 11 08:29:38 GMT 2023



Antonio Valentino pushed to branch master at Debian GIS Project / flox


Commits:
c5e9f233 by Antonio Valentino at 2023-11-11T08:17:59+00:00
New upstream version 0.8.2
- - - - -
a44e14ff by Antonio Valentino at 2023-11-11T08:18:00+00:00
Update upstream source from tag 'upstream/0.8.2'

Update to upstream version '0.8.2'
with Debian dir 457e5c704d22f78bfef3203ee955a359f9709ae1
- - - - -
ea0e3b06 by Antonio Valentino at 2023-11-11T08:19:02+00:00
New upstream release

- - - - -
1306583c by Antonio Valentino at 2023-11-11T08:22:27+00:00
Set distribution to unstable

- - - - -


10 changed files:

- asv_bench/benchmarks/reduce.py
- codecov.yml
- debian/changelog
- flox/aggregate_numbagg.py
- flox/aggregations.py
- flox/core.py
- flox/xrutils.py
- pyproject.toml
- tests/test_core.py
- tests/test_xarray.py


Changes:

=====================================
asv_bench/benchmarks/reduce.py
=====================================
@@ -18,7 +18,7 @@ NUMBAGG_FUNCS = ["nansum", "nanmean", "nanmax", "count", "all"]
 numbagg_skip = []
 for name in expected_names:
     numbagg_skip.extend(
-        list((func, expected_names[0], "numbagg") for func in funcs if func not in NUMBAGG_FUNCS)
+        list((func, name, "numbagg") for func in funcs if func not in NUMBAGG_FUNCS)
     )
 
 
@@ -80,7 +80,12 @@ class ChunkReduce1D(ChunkReduce):
         if "numbagg" in args:
             setup_jit()
 
-    @parameterize({"func": ["nansum", "nanmean", "nanmax", "count"], "engine": engines})
+    @parameterize(
+        {
+            "func": ["nansum", "nanmean", "nanmax", "count"],
+            "engine": [e for e in engines if e is not None],
+        }
+    )
     def time_reduce_bare(self, func, engine):
         # TODO: migrate to the other test cases, but we'll have to setup labels
         # appropriately ;(
@@ -105,7 +110,7 @@ class ChunkReduce2D(ChunkReduce):
 class ChunkReduce2DAllAxes(ChunkReduce):
     def setup(self, *args, **kwargs):
         self.array = np.ones((N, N))
-        self.labels = np.repeat(np.arange(N // 5), repeats=5)
+        self.labels = np.repeat(np.arange(N // 5), repeats=5)[np.newaxis, :]
         self.axis = None
         setup_jit()
 


=====================================
codecov.yml
=====================================
@@ -5,9 +5,8 @@ codecov:
 comment: false
 
 ignore:
-  - "benchmarks/*.py"
+  - "asv_bench/benchmarks/*.py"
   - "tests/*.py"
-  - "setup.py"
 
 coverage:
   precision: 2


=====================================
debian/changelog
=====================================
@@ -1,3 +1,9 @@
+flox (0.8.2-1) unstable; urgency=medium
+
+  * New upstream release.
+
+ -- Antonio Valentino <antonio.valentino at tiscali.it>  Sat, 11 Nov 2023 08:22:09 +0000
+
 flox (0.8.1-2) unstable; urgency=medium
 
   * debian/rules:


=====================================
flox/aggregate_numbagg.py
=====================================
@@ -4,64 +4,75 @@ import numbagg
 import numbagg.grouped
 import numpy as np
 
+DEFAULT_FILL_VALUE = {
+    "nansum": 0,
+    "nanmean": np.nan,
+    "nanvar": np.nan,
+    "nanstd": np.nan,
+    "nanmin": np.nan,
+    "nanmax": np.nan,
+    "nanany": False,
+    "nanall": False,
+    "nansum_of_squares": 0,
+    "nanprod": 1,
+    "nancount": 0,
+    "nanargmax": np.nan,
+    "nanargmin": np.nan,
+    "nanfirst": np.nan,
+    "nanlast": np.nan,
+}
+
+CAST_TO = {
+    # "nansum": {np.bool_: np.int64},
+    "nanmean": {np.int_: np.float64},
+    "nanvar": {np.int_: np.float64},
+    "nanstd": {np.int_: np.float64},
+}
+
+
+FILLNA = {"nansum": 0, "nanprod": 1}
+
 
 def _numbagg_wrapper(
     group_idx,
     array,
     *,
+    func,
     axis=-1,
-    func="sum",
     size=None,
     fill_value=None,
     dtype=None,
-    numbagg_func=None,
 ):
-    return numbagg_func(
-        array,
-        group_idx,
-        axis=axis,
-        num_labels=size,
-        # The following are unsupported
-        # fill_value=fill_value,
-        # dtype=dtype,
-    )
+    cast_to = CAST_TO.get(func, None)
+    if cast_to:
+        for from_, to_ in cast_to.items():
+            if np.issubdtype(array.dtype, from_):
+                array = array.astype(to_)
 
+    func_ = getattr(numbagg.grouped, f"group_{func}")
 
-def nansum(group_idx, array, *, axis=-1, size=None, fill_value=None, dtype=None):
-    if np.issubdtype(array.dtype, np.bool_):
-        array = array.astype(np.in64)
-    return numbagg.grouped.group_nansum(
+    result = func_(
         array,
         group_idx,
         axis=axis,
         num_labels=size,
+        # The following are unsupported
         # fill_value=fill_value,
         # dtype=dtype,
-    )
-
+    ).astype(dtype, copy=False)
 
-def nanmean(group_idx, array, *, axis=-1, size=None, fill_value=None, dtype=None):
-    if np.issubdtype(array.dtype, np.int_):
-        array = array.astype(np.float64)
-    return numbagg.grouped.group_nanmean(
-        array,
-        group_idx,
-        axis=axis,
-        num_labels=size,
-        # fill_value=fill_value,
-        # dtype=dtype,
-    )
+    return result
 
 
 def nanvar(group_idx, array, *, axis=-1, size=None, fill_value=None, dtype=None, ddof=0):
     assert ddof != 0
-    if np.issubdtype(array.dtype, np.int_):
-        array = array.astype(np.float64)
-    return numbagg.grouped.group_nanvar(
-        array,
+
+    return _numbagg_wrapper(
         group_idx,
+        array,
         axis=axis,
-        num_labels=size,
+        size=size,
+        func="nanvar",
         # ddof=0,
         # fill_value=fill_value,
         # dtype=dtype,
@@ -70,30 +81,33 @@ def nanvar(group_idx, array, *, axis=-1, size=None, fill_value=None, dtype=None,
 
 def nanstd(group_idx, array, *, axis=-1, size=None, fill_value=None, dtype=None, ddof=0):
     assert ddof != 0
-    if np.issubdtype(array.dtype, np.int_):
-        array = array.astype(np.float64)
-    return numbagg.grouped.group_nanstd(
-        array,
+
+    return _numbagg_wrapper(
         group_idx,
+        array,
         axis=axis,
-        num_labels=size,
+        size=size,
+        func="nanstd"
         # ddof=0,
         # fill_value=fill_value,
         # dtype=dtype,
     )
 
 
-nansum_of_squares = partial(_numbagg_wrapper, numbagg_func=numbagg.grouped.group_nansum_of_squares)
-nanlen = partial(_numbagg_wrapper, numbagg_func=numbagg.grouped.group_nancount)
-nanprod = partial(_numbagg_wrapper, numbagg_func=numbagg.grouped.group_nanprod)
-nanfirst = partial(_numbagg_wrapper, numbagg_func=numbagg.grouped.group_nanfirst)
-nanlast = partial(_numbagg_wrapper, numbagg_func=numbagg.grouped.group_nanlast)
-# nanargmax = partial(_numbagg_wrapper, numbagg_func=numbagg.grouped.group_nanargmax)
-# nanargmin = partial(_numbagg_wrapper, numbagg_func=numbagg.grouped.group_nanargmin)
-nanmax = partial(_numbagg_wrapper, numbagg_func=numbagg.grouped.group_nanmax)
-nanmin = partial(_numbagg_wrapper, numbagg_func=numbagg.grouped.group_nanmin)
-any = partial(_numbagg_wrapper, numbagg_func=numbagg.grouped.group_nanany)
-all = partial(_numbagg_wrapper, numbagg_func=numbagg.grouped.group_nanall)
+nansum = partial(_numbagg_wrapper, func="nansum")
+nanmean = partial(_numbagg_wrapper, func="nanmean")
+nanprod = partial(_numbagg_wrapper, func="nanprod")
+nansum_of_squares = partial(_numbagg_wrapper, func="nansum_of_squares")
+nanlen = partial(_numbagg_wrapper, func="nancount")
+nanprod = partial(_numbagg_wrapper, func="nanprod")
+nanfirst = partial(_numbagg_wrapper, func="nanfirst")
+nanlast = partial(_numbagg_wrapper, func="nanlast")
+# nanargmax = partial(_numbagg_wrapper, func="nanargmax)
+# nanargmin = partial(_numbagg_wrapper, func="nanargmin)
+nanmax = partial(_numbagg_wrapper, func="nanmax")
+nanmin = partial(_numbagg_wrapper, func="nanmin")
+any = partial(_numbagg_wrapper, func="nanany")
+all = partial(_numbagg_wrapper, func="nanall")
 
 # sum = nansum
 # mean = nanmean


=====================================
flox/aggregations.py
=====================================
@@ -3,7 +3,7 @@ from __future__ import annotations
 import copy
 import warnings
 from functools import partial
-from typing import TYPE_CHECKING, Any, Callable, TypedDict
+from typing import TYPE_CHECKING, Any, Callable, Literal, TypedDict
 
 import numpy as np
 from numpy.typing import DTypeLike
@@ -13,6 +13,7 @@ from . import xrdtypes as dtypes
 
 if TYPE_CHECKING:
     FuncTuple = tuple[Callable | str, ...]
+    OptionalFuncTuple = tuple[Callable | str | None, ...]
 
 
 def _is_arg_reduction(func: str | Aggregation) -> bool:
@@ -147,13 +148,12 @@ class Aggregation:
         chunk: str | FuncTuple | None,
         combine: str | FuncTuple | None,
         preprocess: Callable | None = None,
-        aggregate: Callable | None = None,
         finalize: Callable | None = None,
         fill_value=None,
         final_fill_value=dtypes.NA,
         dtypes=None,
         final_dtype: DTypeLike | None = None,
-        reduction_type="reduce",
+        reduction_type: Literal["reduce", "argreduce"] = "reduce",
     ):
         """
         Blueprint for computing grouped aggregations.
@@ -204,13 +204,11 @@ class Aggregation:
         self.reduction_type = reduction_type
         self.numpy: FuncTuple = (numpy,) if numpy else (self.name,)
         # initialize blockwise reduction
-        self.chunk: FuncTuple = _atleast_1d(chunk)
+        self.chunk: OptionalFuncTuple = _atleast_1d(chunk)
         # how to aggregate results after first round of reduction
-        self.combine: FuncTuple = _atleast_1d(combine)
+        self.combine: OptionalFuncTuple = _atleast_1d(combine)
         # simpler reductions used with the "simple combine" algorithm
-        self.simple_combine: tuple[Callable, ...] = ()
-        # final aggregation
-        self.aggregate: Callable | str = aggregate if aggregate else self.combine[0]
+        self.simple_combine: OptionalFuncTuple = ()
         # finalize results (see mean)
         self.finalize: Callable | None = finalize
 
@@ -249,7 +247,6 @@ class Aggregation:
             self.numpy,
             self.chunk,
             self.combine,
-            self.aggregate,
             self.finalize,
             self.fill_value,
             self.dtype,
@@ -261,7 +258,6 @@ class Aggregation:
                 f"{self.name!r}, fill: {self.fill_value.values()!r}, dtype: {self.dtype}",
                 f"chunk: {self.chunk!r}",
                 f"combine: {self.combine!r}",
-                f"aggregate: {self.aggregate!r}",
                 f"finalize: {self.finalize!r}",
                 f"min_count: {self.min_count!r}",
             )
@@ -284,13 +280,7 @@ count = Aggregation(
 sum_ = Aggregation("sum", chunk="sum", combine="sum", fill_value=0)
 nansum = Aggregation("nansum", chunk="nansum", combine="sum", fill_value=0)
 prod = Aggregation("prod", chunk="prod", combine="prod", fill_value=1, final_fill_value=1)
-nanprod = Aggregation(
-    "nanprod",
-    chunk="nanprod",
-    combine="prod",
-    fill_value=1,
-    final_fill_value=dtypes.NA,
-)
+nanprod = Aggregation("nanprod", chunk="nanprod", combine="prod", fill_value=1)
 
 
 def _mean_finalize(sum_, count):
@@ -584,6 +574,7 @@ def _initialize_aggregation(
     }
 
     # Replace sentinel fill values according to dtype
+    agg.fill_value["user"] = fill_value
     agg.fill_value["intermediate"] = tuple(
         _get_fill_value(dt, fv)
         for dt, fv in zip(agg.dtype["intermediate"], agg.fill_value["intermediate"])
@@ -618,7 +609,7 @@ def _initialize_aggregation(
     else:
         agg.min_count = 0
 
-    simple_combine: list[Callable] = []
+    simple_combine: list[Callable | None] = []
     for combine in agg.combine:
         if isinstance(combine, str):
             if combine in ["nanfirst", "nanlast"]:


=====================================
flox/core.py
=====================================
@@ -86,6 +86,26 @@ FactorProps = namedtuple("FactorProps", "offset_group nan_sentinel nanmask")
 DUMMY_AXIS = -2
 
 
+def _postprocess_numbagg(result, *, func, fill_value, size, seen_groups):
+    """Account for numbagg not providing a fill_value kwarg."""
+    from .aggregate_numbagg import DEFAULT_FILL_VALUE
+
+    if not isinstance(func, str) or func not in DEFAULT_FILL_VALUE:
+        return result
+    # The condition needs to be
+    # len(found_groups) < size; if so we mask with fill_value (?)
+    default_fv = DEFAULT_FILL_VALUE[func]
+    needs_masking = fill_value is not None and not np.array_equal(
+        fill_value, default_fv, equal_nan=True
+    )
+    groups = np.arange(size)
+    if needs_masking:
+        mask = np.isin(groups, seen_groups, assume_unique=True, invert=True)
+        if mask.any():
+            result[..., groups[mask]] = fill_value
+    return result
+
+
 def _issorted(arr: np.ndarray) -> bool:
     return bool((arr[:-1] <= arr[1:]).all())
 
@@ -780,18 +800,31 @@ def chunk_reduce(
     group_idx, grps, found_groups_shape, _, size, props = factorize_(
         (by,), axes, expected_groups=(expected_groups,), reindex=reindex, sort=sort
     )
-    groups = grps[0]
+    (groups,) = grps
+
+    # do this *before* possible broadcasting below.
+    # factorize_ has already taken care of offsetting
+    seen_groups = _unique(group_idx)
 
+    order = "C"
     if nax > 1:
         needs_broadcast = any(
             group_idx.shape[ax] != array.shape[ax] and group_idx.shape[ax] == 1
             for ax in range(-nax, 0)
         )
         if needs_broadcast:
+            # This is the dim=... case, it's a lot faster to ravel group_idx
+            # in fortran order since group_idx is then sorted
+            # I'm seeing 400ms -> 23ms for engine="flox"
+            # Of course we are slower to ravel `array` but we avoid argsorting
+            # both `array` *and* `group_idx` in _prepare_for_flox
             group_idx = np.broadcast_to(group_idx, array.shape[-by.ndim :])
+            if engine == "flox":
+                group_idx = group_idx.reshape(-1, order="F")
+                order = "F"
     # always reshape to 1D along group dimensions
     newshape = array.shape[: array.ndim - by.ndim] + (math.prod(array.shape[-by.ndim :]),)
-    array = array.reshape(newshape)
+    array = array.reshape(newshape, order=order)  # type: ignore[call-overload]
     group_idx = group_idx.reshape(-1)
 
     assert group_idx.ndim == 1
@@ -826,10 +859,9 @@ def chunk_reduce(
     for reduction, fv, kw, dt in zip(funcs, fill_values, kwargss, dtypes):
         if empty:
             result = np.full(shape=final_array_shape, fill_value=fv)
+        elif is_nanlen(reduction) and is_nanlen(previous_reduction):
+            result = results["intermediates"][-1]
         else:
-            if is_nanlen(reduction) and is_nanlen(previous_reduction):
-                result = results["intermediates"][-1]
-
             # fill_value here is necessary when reducing with "offset" groups
             kw_func = dict(size=size, dtype=dt, fill_value=fv)
             kw_func.update(kw)
@@ -842,11 +874,22 @@ def chunk_reduce(
                 result = generic_aggregate(
                     group_idx, array, axis=-1, engine=engine, func=reduction, **kw_func
                 ).astype(dt, copy=False)
+            if engine == "numbagg":
+                result = _postprocess_numbagg(
+                    result,
+                    func=reduction,
+                    size=size,
+                    fill_value=fv,
+                    # Unfortunately, we cannot reuse found_groups, it has not
+                    # been "offset" and is really expected_groups in nearly all cases
+                    seen_groups=seen_groups,
+                )
             if np.any(props.nanmask):
                 # remove NaN group label which should be last
                 result = result[..., :-1]
             result = result.reshape(final_array_shape[:-1] + found_groups_shape)
         results["intermediates"].append(result)
+        previous_reduction = reduction
 
     results["groups"] = np.broadcast_to(results["groups"], final_groups_shape)
     return results
@@ -1044,6 +1087,8 @@ def _grouped_combine(
     """Combine intermediates step of tree reduction."""
     from dask.utils import deepmap
 
+    combine = agg.combine
+
     if isinstance(x_chunk, dict):
         # Only one block at final step; skip one extra groupby
         return x_chunk
@@ -1084,7 +1129,8 @@ def _grouped_combine(
             results = chunk_argreduce(
                 array_idx,
                 groups,
-                func=agg.combine[slicer],  # count gets treated specially next
+                # count gets treated specially next
+                func=combine[slicer],  # type: ignore[arg-type]
                 axis=axis,
                 expected_groups=None,
                 fill_value=agg.fill_value["intermediate"][slicer],
@@ -1118,9 +1164,10 @@ def _grouped_combine(
     elif agg.reduction_type == "reduce":
         # Here we reduce the intermediates individually
         results = {"groups": None, "intermediates": []}
-        for idx, (combine, fv, dtype) in enumerate(
-            zip(agg.combine, agg.fill_value["intermediate"], agg.dtype["intermediate"])
+        for idx, (combine_, fv, dtype) in enumerate(
+            zip(combine, agg.fill_value["intermediate"], agg.dtype["intermediate"])
         ):
+            assert combine_ is not None
             array = _conc2(x_chunk, key1="intermediates", key2=idx, axis=axis)
             if array.shape[-1] == 0:
                 # all empty when combined
@@ -1134,7 +1181,7 @@ def _grouped_combine(
                 _results = chunk_reduce(
                     array,
                     groups,
-                    func=combine,
+                    func=combine_,
                     axis=axis,
                     expected_groups=None,
                     fill_value=(fv,),
@@ -1779,8 +1826,13 @@ def _choose_engine(by, agg: Aggregation):
 
     # numbagg only supports nan-skipping reductions
     # without dtype specified
-    if HAS_NUMBAGG and "nan" in agg.name:
-        if not_arg_reduce and dtype is None:
+    has_blockwise_nan_skipping = (agg.chunk[0] is None and "nan" in agg.name) or any(
+        (isinstance(func, str) and "nan" in func) for func in agg.chunk
+    )
+    if HAS_NUMBAGG:
+        if agg.name in ["all", "any"] or (
+            not_arg_reduce and has_blockwise_nan_skipping and dtype is None
+        ):
             return "numbagg"
 
     if not_arg_reduce and (not is_duck_dask_array(by) and _issorted(by)):
@@ -1814,7 +1866,8 @@ def groupby_reduce(
         Array to be reduced, possibly nD
     *by : ndarray or DaskArray
         Array of labels to group over. Must be aligned with ``array`` so that
-        ``array.shape[-by.ndim :] == by.shape``
+        ``array.shape[-by.ndim :] == by.shape`` or any disagreements in that
+        equality check are for dimensions of size 1 in `by`.
     func : {"all", "any", "count", "sum", "nansum", "mean", "nanmean", \
             "max", "nanmax", "min", "nanmin", "argmax", "nanargmax", "argmin", "nanargmin", \
             "quantile", "nanquantile", "median", "nanmedian", "mode", "nanmode", \
@@ -2040,7 +2093,7 @@ def groupby_reduce(
         nax = len(axis_)
 
     # When axis is a subset of possible values; then npg will
-    # apply it to groups that don't exist along a particular axis (for e.g.)
+    # apply the fill_value to groups that don't exist along a particular axis (for e.g.)
     # since these count as a group that is absent. thoo!
     # fill_value applies to all-NaN groups as well as labels in expected_groups that are not found.
     #     The only way to do this consistently is mask out using min_count
@@ -2080,8 +2133,7 @@ def groupby_reduce(
             # TODO: How else to narrow that array.chunks is there?
             assert isinstance(array, DaskArray)
 
-        # TODO: fix typing of FuncTuple in Aggregation
-        if agg.chunk[0] is None and method != "blockwise":  # type: ignore[unreachable]
+        if agg.chunk[0] is None and method != "blockwise":
             raise NotImplementedError(
                 f"Aggregation {agg.name!r} is only implemented for dask arrays when method='blockwise'."
                 f"Received method={method!r}"


=====================================
flox/xrutils.py
=====================================
@@ -339,6 +339,6 @@ def module_available(module: str, minversion: Optional[str] = None) -> bool:
     has = importlib.util.find_spec(module) is not None
     if has:
         mod = importlib.import_module(module)
-        return Version(mod.__version__) < Version(minversion) if minversion is not None else True
+        return Version(mod.__version__) >= Version(minversion) if minversion is not None else True
     else:
         return False


=====================================
pyproject.toml
=====================================
@@ -111,6 +111,7 @@ show_error_codes = true
 warn_unused_ignores = true
 warn_unreachable = true
 enable_error_code = ["ignore-without-code", "redundant-expr", "truthy-bool"]
+exclude=["asv_bench/pkgs"]
 
 [[tool.mypy.overrides]]
 module=[


=====================================
tests/test_core.py
=====================================
@@ -203,7 +203,7 @@ def test_groupby_reduce(
 def gen_array_by(size, func):
     by = np.ones(size[-1])
     rng = np.random.default_rng(12345)
-    array = rng.random(size)
+    array = rng.random(tuple(6 if s == 1 else s for s in size))
     if "nan" in func and "nanarg" not in func:
         array[[1, 4, 5], ...] = np.nan
     elif "nanarg" in func and len(size) > 1:
@@ -222,8 +222,8 @@ def gen_array_by(size, func):
         pytest.param(4, marks=requires_dask),
     ],
 )
+ at pytest.mark.parametrize("size", ((1, 12), (12,), (12, 9)))
 @pytest.mark.parametrize("nby", [1, 2, 3])
- at pytest.mark.parametrize("size", ((12,), (12, 9)))
 @pytest.mark.parametrize("add_nan_by", [True, False])
 @pytest.mark.parametrize("func", ALL_FUNCS)
 def test_groupby_reduce_all(nby, size, chunks, func, add_nan_by, engine):
@@ -1566,6 +1566,19 @@ def test_choose_engine(dtype):
         finalize_kwargs=None,
     )
 
+    # count_engine
+    for method in ["all", "any", "count"]:
+        agg = _initialize_aggregation(
+            method,
+            dtype=None,
+            array_dtype=dtype,
+            fill_value=0,
+            min_count=0,
+            finalize_kwargs=None,
+        )
+        engine = _choose_engine(np.array([1, 1, 2, 2]), agg=agg)
+        assert engine == ("numbagg" if HAS_NUMBAGG else "flox")
+
     # sorted by -> flox
     sorted_engine = _choose_engine(np.array([1, 1, 2, 2]), agg=mean)
     assert sorted_engine == ("numbagg" if numbagg_possible else "flox")
@@ -1573,3 +1586,17 @@ def test_choose_engine(dtype):
     assert _choose_engine(np.array([3, 1, 1]), agg=mean) == default
     # argmax does not give engine="flox"
     assert _choose_engine(np.array([1, 1, 2, 2]), agg=argmax) == "numpy"
+
+
+def test_xarray_fill_value_behaviour():
+    bar = np.array([1, 2, 3, np.nan, np.nan, np.nan, 4, 5, np.nan, np.nan])
+    times = np.arange(0, 20, 2)
+    actual, _ = groupby_reduce(bar, times, func="nansum", expected_groups=(np.arange(19),))
+    nan = np.nan
+    # fmt: off
+    expected = np.array(
+        [ 1., nan,  2., nan,  3., nan,  0., nan,  0.,
+         nan,  0., nan,  4., nan,  5., nan,  0., nan,  0.]
+    )
+    # fmt: on
+    assert_equal(expected, actual)


=====================================
tests/test_xarray.py
=====================================
@@ -561,3 +561,24 @@ def test_preserve_multiindex():
     )
 
     assert "region" in hist.coords
+
+
+def test_fill_value_xarray_behaviour():
+    times = pd.date_range("2000-01-01", freq="6H", periods=10)
+    ds = xr.Dataset(
+        {
+            "bar": (
+                "time",
+                [1, 2, 3, np.nan, np.nan, np.nan, 4, 5, np.nan, np.nan],
+                {"meta": "data"},
+            ),
+            "time": times,
+        }
+    )
+
+    expected_time = pd.date_range("2000-01-01", freq="3H", periods=19)
+    expected = ds.reindex(time=expected_time)
+    expected = ds.resample(time="3H").sum()
+    with xr.set_options(use_flox=True):
+        actual = ds.resample(time="3H").sum()
+    xr.testing.assert_identical(expected, actual)



View it on GitLab: https://salsa.debian.org/debian-gis-team/flox/-/compare/3e70c616d3f23d4c438fc44ff170787223ca17df...1306583c9cf398e40bee6215c6dc398b10660571

-- 
View it on GitLab: https://salsa.debian.org/debian-gis-team/flox/-/compare/3e70c616d3f23d4c438fc44ff170787223ca17df...1306583c9cf398e40bee6215c6dc398b10660571
You're receiving this email because of your account on salsa.debian.org.


-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/pkg-grass-devel/attachments/20231111/4c762fde/attachment-0001.htm>


More information about the Pkg-grass-devel mailing list