[Git][debian-gis-team/flox][upstream] New upstream version 0.11.0
Antonio Valentino (@antonio.valentino)
gitlab at salsa.debian.org
Wed Feb 4 22:15:04 GMT 2026
Antonio Valentino pushed to branch upstream at Debian GIS Project / flox
Commits:
e273d5a2 by Antonio Valentino at 2026-02-04T20:50:26+00:00
New upstream version 0.11.0
- - - - -
21 changed files:
- .github/workflows/benchmarks.yml
- .github/workflows/ci-additional.yaml
- .github/workflows/ci.yaml
- .github/workflows/pypi.yaml
- .github/workflows/testpypi-release.yaml
- .github/workflows/upstream-dev-ci.yaml
- .pre-commit-config.yaml
- .readthedocs.yml
- docs/source/implementation.md
- docs/source/intro.md
- docs/source/user-stories/climatology.ipynb
- docs/source/xarray.md
- flox/aggregate_flox.py
- flox/factorize.py
- flox/reindex.py
- flox/xrutils.py
- pyproject.toml
- tests/test_core.py
- tests/test_properties.py
- − uv-upstream.toml
- uv.lock
Changes:
=====================================
.github/workflows/benchmarks.yml
=====================================
@@ -67,7 +67,7 @@ jobs:
cp benchmarks/README_CI.md benchmarks.log .asv/results/
working-directory: ${{ env.ASV_DIR }}
- - uses: actions/upload-artifact at v5
+ - uses: actions/upload-artifact at v6
if: always()
with:
name: asv-benchmark-results-${{ runner.os }}
=====================================
.github/workflows/ci-additional.yaml
=====================================
@@ -36,7 +36,7 @@ jobs:
needs: detect-ci-trigger
if: needs.detect-ci-trigger.outputs.triggered == 'false'
env:
- PYTHON_VERSION: "3.13"
+ PYTHON_VERSION: "3.14"
steps:
- uses: actions/checkout at v6
@@ -63,7 +63,7 @@ jobs:
--ignore flox/tests \
--cov=./ --cov-report=xml
- name: Upload code coverage to Codecov
- uses: codecov/codecov-action at v5.5.1
+ uses: codecov/codecov-action at v5.5.2
with:
file: ./coverage.xml
flags: unittests
@@ -77,7 +77,7 @@ jobs:
needs: detect-ci-trigger
if: needs.detect-ci-trigger.outputs.triggered == 'false'
env:
- PYTHON_VERSION: "3.13"
+ PYTHON_VERSION: "3.14"
steps:
- uses: actions/checkout at v6
@@ -104,7 +104,7 @@ jobs:
uv run --no-dev mypy --cache-dir=.mypy_cache/ --cobertura-xml-report mypy_report
- name: Upload mypy coverage to Codecov
- uses: codecov/codecov-action at v5.5.1
+ uses: codecov/codecov-action at v5.5.2
with:
file: mypy_report/cobertura.xml
flags: mypy
=====================================
.github/workflows/ci.yaml
=====================================
@@ -23,14 +23,14 @@ jobs:
matrix:
os: ["ubuntu-latest"]
env: ["complete"]
- python-version: ["3.11", "3.13"]
+ python-version: ["3.11", "3.14"]
include:
- os: "windows-latest"
env: "complete"
- python-version: "3.13"
+ python-version: "3.14"
- os: "ubuntu-latest"
env: "no-dask"
- python-version: "3.13"
+ python-version: "3.14"
- os: "ubuntu-latest"
env: "minimal"
python-version: "3.11"
@@ -60,7 +60,7 @@ jobs:
# https://github.com/actions/cache/blob/main/tips-and-workarounds.md#update-a-cache
- name: Restore cached hypothesis directory
id: restore-hypothesis-cache
- uses: actions/cache/restore at v4
+ uses: actions/cache/restore at v5
with:
path: .hypothesis/
key: cache-hypothesis-${{ runner.os }}-${{ matrix.python-version }}-${{ github.run_id }}
@@ -73,7 +73,7 @@ jobs:
uv run --no-dev python -c "import xarray; xarray.show_versions()" || true
uv run --no-dev pytest --durations=20 --durations-min=0.5 -n auto --cov=./ --cov-report=xml --hypothesis-profile ci
- name: Upload code coverage to Codecov
- uses: codecov/codecov-action at v5.5.1
+ uses: codecov/codecov-action at v5.5.2
with:
file: ./coverage.xml
flags: unittests
@@ -85,7 +85,7 @@ jobs:
- name: Save cached hypothesis directory
id: save-hypothesis-cache
if: always() && steps.status.outcome != 'skipped'
- uses: actions/cache/save at v4
+ uses: actions/cache/save at v5
with:
path: .hypothesis/
key: cache-hypothesis-${{ runner.os }}-${{ matrix.python-version }}-${{ github.run_id }}
@@ -101,12 +101,14 @@ jobs:
- name: Set up Python and uv
uses: astral-sh/setup-uv at v7
with:
- python-version: "3.13"
+ python-version: "3.14"
enable-cache: true
cache-dependency-glob: "pyproject.toml"
- name: Install xarray and dependencies
run: |
- uv add --dev ".[complete]" "pint>=0.22"
+ uv add --dev ".[dev]" "pint>=0.22"
+ uv pip uninstall h5netcdf
+ uv pip install pytz
- name: Install upstream flox
run: |
uv add git+https://github.com/dcherian/flox.git@${{ github.ref }}
=====================================
.github/workflows/pypi.yaml
=====================================
@@ -15,7 +15,7 @@ jobs:
- name: Set up Python and uv
uses: astral-sh/setup-uv at v7
with:
- python-version: "3.13"
+ python-version: "3.14"
- name: Build and publish
run: |
uv venv
=====================================
.github/workflows/testpypi-release.yaml
=====================================
@@ -24,7 +24,7 @@ jobs:
- name: Set up Python and uv
uses: astral-sh/setup-uv at v7
with:
- python-version: "3.13"
+ python-version: "3.14"
- name: Build tarball and wheels
run: |
@@ -43,7 +43,7 @@ jobs:
echo "✅ Looks good"
fi
- - uses: actions/upload-artifact at v5
+ - uses: actions/upload-artifact at v6
with:
name: releases
path: dist
@@ -63,10 +63,10 @@ jobs:
- name: Set up Python and uv
uses: astral-sh/setup-uv at v7
with:
- python-version: "3.13"
+ python-version: "3.14"
- name: Get built artifact
- uses: actions/download-artifact at v6
+ uses: actions/download-artifact at v7
with:
name: releases
path: dist
=====================================
.github/workflows/upstream-dev-ci.yaml
=====================================
@@ -31,7 +31,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- python-version: ["3.13"]
+ python-version: ["3.14"]
steps:
- uses: actions/checkout at v6
with:
@@ -48,8 +48,20 @@ jobs:
- name: Install upstream dev dependencies
run: |
- # Install with upstream development versions using separate config
- uv sync --group upstream-dev --config-file uv-upstream.toml --no-dev
+ # First sync the upstream group without the overridden packages
+ uv sync --group upstream --no-dev
+
+ # Install upstream development versions from nightly/git
+ uv pip install \
+ --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple \
+ --pre \
+ numpy scipy pandas xarray
+
+ uv pip install \
+ "dask[core] @ git+https://github.com/dask/dask" \
+ "numpy-groupies @ git+https://github.com/ml31415/numpy-groupies" \
+ "sparse @ git+https://github.com/pydata/sparse" \
+ "cftime @ git+https://github.com/Unidata/cftime"
- name: List deps
=====================================
.pre-commit-config.yaml
=====================================
@@ -4,7 +4,7 @@ ci:
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
- rev: "v0.13.3"
+ rev: "v0.14.10"
hooks:
- id: ruff
args: ["--fix", "--show-fixes"]
@@ -19,7 +19,7 @@ repos:
- id: check-docstring-first
- repo: https://github.com/executablebooks/mdformat
- rev: 0.7.22
+ rev: 1.0.0
hooks:
- id: mdformat
additional_dependencies:
@@ -27,7 +27,7 @@ repos:
- mdformat-myst
- repo: https://github.com/kynan/nbstripout
- rev: 0.8.1
+ rev: 0.8.2
hooks:
- id: nbstripout
args: [--extra-keys=metadata.kernelspec metadata.language_info.version]
@@ -45,7 +45,7 @@ repos:
- id: validate-pyproject
- repo: https://github.com/rhysd/actionlint
- rev: v1.7.7
+ rev: v1.7.10
hooks:
- id: actionlint
files: ".github/workflows/"
=====================================
.readthedocs.yml
=====================================
@@ -7,7 +7,11 @@ sphinx:
build:
os: "ubuntu-lts-latest"
tools:
- python: "3.12"
+ python: "3.14"
+ jobs:
+ build:
+ html:
+ - NOCOLOR=1 sphinx-build -T -b html docs/source $READTHEDOCS_OUTPUT/html
python:
install:
=====================================
docs/source/implementation.md
=====================================
@@ -142,7 +142,7 @@ has a value for all 12 months. One could use `reindex=False` to control memory u
## `method="blockwise"`
One case where `method="map-reduce"` doesn't work well is the case of "resampling" reductions. An
-example here is resampling from daily frequency to monthly frequency data: `da.resample(time="M").mean()`
+example here is resampling from daily frequency to monthly frequency data: `da.resample(time="ME").mean()`
For resampling type reductions,
1. Group members occur sequentially (all days in January 2001 occur one after the other)
=====================================
docs/source/intro.md
=====================================
@@ -181,6 +181,6 @@ xarray_reduce(
## Resampling
-Use the xarray interface i.e. `da.resample(time="M").mean()`.
+Use the xarray interface i.e. `da.resample(time="ME").mean()`.
-Optionally pass [`method="blockwise"`](method-blockwise): `da.resample(time="M").mean(method="blockwise")`
+Optionally pass [`method="blockwise"`](method-blockwise): `da.resample(time="ME").mean(method="blockwise")`
=====================================
docs/source/user-stories/climatology.ipynb
=====================================
@@ -255,7 +255,7 @@
"metadata": {},
"outputs": [],
"source": [
- "newchunks = xr.ones_like(day).astype(int).resample(time=\"M\").count()"
+ "newchunks = xr.ones_like(day).astype(int).resample(time=\"ME\").count()"
]
},
{
=====================================
docs/source/xarray.md
=====================================
@@ -8,7 +8,7 @@ the best. Pass flox-specific kwargs to the specific reduction method:
```python
ds.groupby("time.month").mean(method="map-reduce", engine="flox")
ds.groupby_bins("lon", bins=[0, 10, 20]).mean(method="map-reduce")
-ds.resample(time="M").mean(method="blockwise")
+ds.resample(time="ME").mean(method="blockwise")
```
{py:func}`flox.xarray.xarray_reduce` used to provide extra functionality, but now Xarray's GroupBy object has been upgraded to match those capabilities with better API!
=====================================
flox/aggregate_flox.py
=====================================
@@ -146,6 +146,19 @@ def _np_grouped_op(
# assumes input is sorted, which I do in core._prepare_for_flox
aux = group_idx
+ # Handle empty group_idx case
+ if aux.size == 0:
+ if size is None:
+ size = 0
+ if dtype is None:
+ dtype = array.dtype
+ q = kwargs.get("q", None)
+ if q is None:
+ return np.full(array.shape[:-1] + (size,), fill_value=fill_value, dtype=dtype)
+ else:
+ nq = len(np.atleast_1d(q))
+ return np.full((nq,) + array.shape[:-1] + (size,), fill_value=fill_value, dtype=dtype)
+
flag = np.concatenate((np.asarray([True], like=aux), aux[1:] != aux[:-1]))
uniques = aux[flag]
(inv_idx,) = flag.nonzero()
=====================================
flox/factorize.py
=====================================
@@ -61,13 +61,21 @@ def _factorize_single(by, expect, *, sort: bool, reindex: bool) -> tuple[pd.Inde
# make it behave like pd.cut which uses -1:
if len(bins) > 1:
right = expect.closed_right
+ # For datetime64, convert both flat and bins to int64 for comparison
+ is_datetime = bins.dtype.kind == "M"
+ flat_for_digitize = flat.view(np.int64) if is_datetime else flat
+ bins_for_digitize = bins.view(np.int64) if is_datetime else bins
idx = np.digitize(
- flat,
- bins=bins.view(np.int64) if bins.dtype.kind == "M" else bins,
+ flat_for_digitize,
+ bins=bins_for_digitize,
right=right,
)
idx -= 1
- within_bins = flat <= bins.max() if right else flat < bins.max()
+ within_bins = (
+ flat_for_digitize <= bins_for_digitize.max()
+ if right
+ else flat_for_digitize < bins_for_digitize.max()
+ )
idx[~within_bins] = -1
else:
idx = np.zeros_like(flat, dtype=np.intp) - 1
@@ -75,7 +83,7 @@ def _factorize_single(by, expect, *, sort: bool, reindex: bool) -> tuple[pd.Inde
else:
if expect is not None and reindex:
sorter = np.argsort(expect)
- groups = expect[(sorter,)] if sort else expect
+ groups = expect[sorter] if sort else expect
idx = np.searchsorted(expect, flat, sorter=sorter)
mask = ~np.isin(flat, expect) | isnull(flat) | (idx == len(expect))
if not sort:
=====================================
flox/reindex.py
=====================================
@@ -91,7 +91,7 @@ class ReindexStrategy:
def reindex_numpy(array, from_: pd.Index, to: pd.Index, fill_value, dtype, axis: int):
idx = from_.get_indexer(to)
- indexer = [slice(None, None)] * array.ndim
+ indexer: list[slice | np.ndarray] = [slice(None, None)] * array.ndim
indexer[axis] = idx
reindexed = array[tuple(indexer)]
if (idx == -1).any():
=====================================
flox/xrutils.py
=====================================
@@ -141,7 +141,7 @@ def is_scalar(value: Any, include_0d: bool = True) -> bool:
or isinstance(value, str | bytes | dict)
or not (
isinstance(value, (Iterable,) + NON_NUMPY_SUPPORTED_ARRAY_TYPES)
- or hasattr(value, "__array_function__") # type: ignore[unreachable]
+ or hasattr(value, "__array_function__")
)
)
@@ -195,7 +195,7 @@ def isnull(data: Any):
# types. For full consistency with pandas, we should accept None as
# a null value as well as NaN, but it isn't clear how to do this
# with duck typing.
- return data != data # type: ignore[unreachable]
+ return data != data
def datetime_to_numeric(array, offset=None, datetime_unit=None, dtype=float):
=====================================
pyproject.toml
=====================================
@@ -13,6 +13,7 @@ classifiers = [
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
+ "Programming Language :: Python :: 3.14",
]
dependencies = [
"pandas>=2.1",
=====================================
tests/test_core.py
=====================================
@@ -200,7 +200,7 @@ def test_groupby_reduce(
elif isinstance(expected_groups, np.ndarray):
g_dtype = expected_groups.dtype
else:
- g_dtype = np.int64
+ g_dtype = np.dtype(np.int64)
assert_equal(groups_array, np.array([0, 1, 2], g_dtype))
assert_equal(expected_result, result)
=====================================
tests/test_properties.py
=====================================
@@ -107,7 +107,12 @@ def test_groupby_reduce(data, array, func: str) -> None:
# TODO: funny bugs with overflows here
is_cftime = _contains_cftime_datetimes(array)
- assume(not (is_cftime and func in ["prod", "nanprod", "var", "nanvar", "std", "nanstd"]))
+ assume(
+ not (
+ is_cftime
+ and func in ["prod", "nanprod", "var", "nanvar", "std", "nanstd", "quantile", "nanquantile"]
+ )
+ )
axis = -1
by = data.draw(
=====================================
uv-upstream.toml deleted
=====================================
@@ -1,16 +0,0 @@
-# Configuration for upstream development versions
-# Use with: uv sync --group upstream-dev --config-file uv-upstream.toml
-
-[[index]]
-name = "scientific-python-nightly"
-url = "https://pypi.anaconda.org/scientific-python-nightly-wheels/simple"
-
-[sources]
-numpy = { index = "scientific-python-nightly" }
-scipy = { index = "scientific-python-nightly" }
-pandas = { index = "scientific-python-nightly" }
-xarray = { index = "scientific-python-nightly" }
-dask = { git = "https://github.com/dask/dask" }
-numpy-groupies = { git = "https://github.com/ml31415/numpy-groupies" }
-sparse = { git = "https://github.com/pydata/sparse" }
-cftime = { git = "https://github.com/Unidata/cftime" }
=====================================
uv.lock
=====================================
The diff for this file was not included because it is too large.
View it on GitLab: https://salsa.debian.org/debian-gis-team/flox/-/commit/e273d5a2fd3a44f1a7fb02c17afae0a2cf8ffac1
--
View it on GitLab: https://salsa.debian.org/debian-gis-team/flox/-/commit/e273d5a2fd3a44f1a7fb02c17afae0a2cf8ffac1
You're receiving this email because of your account on salsa.debian.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/pkg-grass-devel/attachments/20260204/1bbd9cc9/attachment-0001.htm>
More information about the Pkg-grass-devel
mailing list