[Git][debian-gis-team/flox][upstream] New upstream version 0.10.7

Antonio Valentino (@antonio.valentino) gitlab at salsa.debian.org
Fri Sep 19 08:04:19 BST 2025



Antonio Valentino pushed to branch upstream at Debian GIS Project / flox


Commits:
62fdc5c2 by Antonio Valentino at 2025-09-19T06:52:48+00:00
New upstream version 0.10.7
- - - - -


29 changed files:

- .github/workflows/benchmarks.yml
- .github/workflows/ci-additional.yaml
- .github/workflows/ci.yaml
- .github/workflows/pypi.yaml
- .github/workflows/testpypi-release.yaml
- .github/workflows/upstream-dev-ci.yaml
- .readthedocs.yml
- asv_bench/asv.conf.json
- + asv_bench/environment.yml
- − ci/benchmark.yml
- − ci/docs.yml
- − ci/env-numpy1.yml
- − ci/environment.yml
- − ci/minimal-requirements.yml
- − ci/no-dask.yml
- − ci/no-numba.yml
- − ci/no-xarray.yml
- − ci/upstream-dev-env.yml
- flox/aggregations.py
- flox/core.py
- + flox/multiarray.py
- flox/xrutils.py
- pyproject.toml
- tests/strategies.py
- tests/test_core.py
- tests/test_properties.py
- + uv-numpy1.toml
- + uv-upstream.toml
- + uv.lock


Changes:

=====================================
.github/workflows/benchmarks.yml
=====================================
@@ -17,25 +17,22 @@ jobs:
     steps:
       # We need the full repo to avoid this issue
       # https://github.com/actions/checkout/issues/23
-      - uses: actions/checkout at v4
+      - uses: actions/checkout at v5
         with:
           fetch-depth: 0
 
-      - name: Set up conda environment
-        uses: mamba-org/setup-micromamba at v2
+      - name: Set up Python and uv
+        uses: astral-sh/setup-uv at v4
         with:
-          environment-name: flox-bench
-          create-args: >-
-            python=3.12
-            asv
-            mamba
-            libmambapy<2.0
-            conda-build
-          init-shell: bash
-          cache-environment: true
+          python-version: "3.12"
+          enable-cache: true
+          cache-dependency-glob: "pyproject.toml"
+
+      - name: Install dependencies
+        run: |
+          uv sync --group benchmark --no-dev
 
       - name: Run benchmarks
-        shell: bash -l {0}
         id: benchmark
         env:
           OPENBLAS_NUM_THREADS: 1
@@ -50,12 +47,12 @@ jobs:
         run: |
           # set -x
           # ID this runner
-          asv machine --yes
+          uv run --no-dev asv machine --yes
           echo "Baseline:  $LAST_HEAD_SHA ($BASE_LABEL)"
           echo "Contender: ${GITHUB_SHA} ($HEAD_LABEL)"
           # Run benchmarks for current commit against base
           ASV_OPTIONS="--split --show-stderr --factor $ASV_FACTOR"
-          asv continuous $ASV_OPTIONS $BASE_SHA ${GITHUB_SHA} \
+          uv run --no-dev asv continuous $ASV_OPTIONS $BASE_SHA ${GITHUB_SHA} \
               | sed "/Traceback \|failed$\|PERFORMANCE DECREASED/ s/^/::error::/" \
               | tee benchmarks.log
           # Report and export results for subsequent steps


=====================================
.github/workflows/ci-additional.yaml
=====================================
@@ -22,7 +22,7 @@ jobs:
     outputs:
       triggered: ${{ steps.detect-trigger.outputs.trigger-found }}
     steps:
-      - uses: actions/checkout at v4
+      - uses: actions/checkout at v5
         with:
           fetch-depth: 2
       - uses: xarray-contrib/ci-trigger at v1.2
@@ -35,49 +35,35 @@ jobs:
     runs-on: "ubuntu-latest"
     needs: detect-ci-trigger
     if: needs.detect-ci-trigger.outputs.triggered == 'false'
-    defaults:
-      run:
-        shell: bash -l {0}
-
     env:
-      CONDA_ENV_FILE: ci/environment.yml
       PYTHON_VERSION: "3.13"
 
     steps:
-      - uses: actions/checkout at v4
+      - uses: actions/checkout at v5
         with:
           fetch-depth: 0 # Fetch all history for all branches and tags.
 
-      - name: set environment variables
-        run: |
-          echo "TODAY=$(date  +'%Y-%m-%d')" >> $GITHUB_ENV
-
-      - name: Setup micromamba
-        uses: mamba-org/setup-micromamba at v2
+      - name: Set up Python and uv
+        uses: astral-sh/setup-uv at v4
         with:
-          environment-file: ${{env.CONDA_ENV_FILE}}
-          environment-name: flox-tests
-          init-shell: bash
-          cache-environment: true
-          cache-environment-key: "${{runner.os}}-${{runner.arch}}-py${{env.PYTHON_VERSION}}-${{env.TODAY}}-${{hashFiles(env.CONDA_ENV_FILE)}}"
-          create-args: |
-            python=${{ env.PYTHON_VERSION }}
+          python-version: ${{ env.PYTHON_VERSION }}
+          enable-cache: true
+          cache-dependency-glob: "pyproject.toml"
 
-      - name: Install flox
+      - name: Install dependencies
         run: |
-          python -m pip install --no-deps -e .
+          uv sync --group complete --no-dev
       - name: Version info
         run: |
-          conda info -a
-          conda list
+          uv pip list
       - name: Run doctests
         run: |
-          python -m pytest --doctest-modules \
+          uv run --no-dev python -m pytest --doctest-modules \
           flox/aggregations.py flox/core.py flox/xarray.py \
           --ignore flox/tests \
           --cov=./ --cov-report=xml
       - name: Upload code coverage to Codecov
-        uses: codecov/codecov-action at v5.4.3
+        uses: codecov/codecov-action at v5.5.0
         with:
           file: ./coverage.xml
           flags: unittests
@@ -90,49 +76,35 @@ jobs:
     runs-on: "ubuntu-latest"
     needs: detect-ci-trigger
     if: needs.detect-ci-trigger.outputs.triggered == 'false'
-    defaults:
-      run:
-        shell: bash -l {0}
     env:
-      CONDA_ENV_FILE: ci/environment.yml
       PYTHON_VERSION: "3.13"
 
     steps:
-      - uses: actions/checkout at v4
+      - uses: actions/checkout at v5
         with:
           fetch-depth: 0 # Fetch all history for all branches and tags.
 
-      - name: set environment variables
-        run: |
-          echo "TODAY=$(date +'%Y-%m-%d')" >> $GITHUB_ENV
-      - name: Setup micromamba
-        uses: mamba-org/setup-micromamba at v2
+      - name: Set up Python and uv
+        uses: astral-sh/setup-uv at v4
         with:
-          environment-file: ${{env.CONDA_ENV_FILE}}
-          environment-name: flox-tests
-          init-shell: bash
-          cache-environment: true
-          cache-environment-key: "${{runner.os}}-${{runner.arch}}-py${{env.PYTHON_VERSION}}-${{env.TODAY}}-${{hashFiles(env.CONDA_ENV_FILE)}}"
-          create-args: |
-            python=${{ env.PYTHON_VERSION }}
-      - name: Install flox
+          python-version: ${{ env.PYTHON_VERSION }}
+          enable-cache: true
+          cache-dependency-glob: "pyproject.toml"
+
+      - name: Install dependencies
         run: |
-          python -m pip install --no-deps -e .
+          uv sync --group complete --no-dev --group types
       - name: Version info
         run: |
-          conda info -a
-          conda list
-      - name: Install mypy
-        run: |
-          python -m pip install mypy
+          uv pip list
 
       - name: Run mypy
         run: |
           mkdir .mypy_cache
-          python -m mypy --install-types --non-interactive --cache-dir=.mypy_cache/ --cobertura-xml-report mypy_report
+          uv run --no-dev mypy --cache-dir=.mypy_cache/ --cobertura-xml-report mypy_report
 
       - name: Upload mypy coverage to Codecov
-        uses: codecov/codecov-action at v5.4.3
+        uses: codecov/codecov-action at v5.5.0
         with:
           file: mypy_report/cobertura.xml
           flags: mypy


=====================================
.github/workflows/ci.yaml
=====================================
@@ -18,47 +18,44 @@ jobs:
   test:
     name: Test (${{matrix.env}}, ${{ matrix.python-version }}, ${{ matrix.os }})
     runs-on: ${{ matrix.os }}
-    defaults:
-      run:
-        shell: bash -l {0}
     strategy:
       fail-fast: false
       matrix:
         os: ["ubuntu-latest"]
-        env: ["environment"]
+        env: ["complete"]
         python-version: ["3.11", "3.13"]
         include:
           - os: "windows-latest"
-            env: "environment"
+            env: "complete"
             python-version: "3.13"
           - os: "ubuntu-latest"
-            env: "no-dask" # "no-xarray", "no-numba"
+            env: "no-dask"
             python-version: "3.13"
           - os: "ubuntu-latest"
-            env: "minimal-requirements"
+            env: "minimal"
             python-version: "3.11"
           - os: "windows-latest"
-            env: "env-numpy1"
+            env: "numpy1"
             python-version: "3.11"
     steps:
-      - uses: actions/checkout at v4
+      - uses: actions/checkout at v5
         with:
           fetch-depth: 0 # Fetch all history for all branches and tags.
       - name: Set environment variables
         run: |
           echo "PYTHON_VERSION=${{ matrix.python-version }}" >> $GITHUB_ENV
-      - name: Set up conda environment
-        uses: mamba-org/setup-micromamba at v2
+      - name: Set up Python and uv
+        uses: astral-sh/setup-uv at v4
         with:
-          environment-file: ci/${{ matrix.env }}.yml
-          environment-name: flox-tests
-          init-shell: bash
-          cache-environment: true
-          create-args: |
-            python=${{ matrix.python-version }}
-      - name: Install flox
-        run: |
-          python -m pip install --no-deps -e .
+          python-version: ${{ matrix.python-version }}
+          enable-cache: true
+          cache-dependency-glob: "pyproject.toml"
+      - name: Install dependencies (with config file)
+        if: matrix.env == 'numpy1'
+        run: uv sync --group ${{ matrix.env }} --config-file uv-numpy1.toml --no-dev
+      - name: Install dependencies (without config file)
+        if: matrix.env != 'numpy1'
+        run: uv sync --group ${{ matrix.env }} --no-dev
 
       # https://github.com/actions/cache/blob/main/tips-and-workarounds.md#update-a-cache
       - name: Restore cached hypothesis directory
@@ -73,10 +70,10 @@ jobs:
       - name: Run Tests
         id: status
         run: |
-          python -c "import xarray; xarray.show_versions()"
-          pytest --durations=20 --durations-min=0.5 -n auto --cov=./ --cov-report=xml --hypothesis-profile ci
+          uv run --no-dev python -c "import xarray; xarray.show_versions()" || true
+          uv run --no-dev pytest --durations=20 --durations-min=0.5 -n auto --cov=./ --cov-report=xml --hypothesis-profile ci
       - name: Upload code coverage to Codecov
-        uses: codecov/codecov-action at v5.4.3
+        uses: codecov/codecov-action at v5.5.0
         with:
           file: ./coverage.xml
           flags: unittests
@@ -96,48 +93,39 @@ jobs:
   xarray-groupby:
     name: xarray-groupby
     runs-on: ubuntu-latest
-    defaults:
-      run:
-        shell: bash -l {0}
     steps:
-      - uses: actions/checkout at v4
+      - uses: actions/checkout at v5
         with:
           repository: "pydata/xarray"
           fetch-depth: 0 # Fetch all history for all branches and tags.
-      - name: Set up conda environment
-        uses: mamba-org/setup-micromamba at v2
+      - name: Set up Python and uv
+        uses: astral-sh/setup-uv at v4
         with:
-          environment-file: ci/requirements/environment.yml
-          environment-name: xarray-tests
-          init-shell: bash
-          cache-environment: true
-          create-args: >-
-            python=3.11
-            pint>=0.22
-      - name: Install xarray
+          python-version: "3.13"
+          enable-cache: true
+          cache-dependency-glob: "pyproject.toml"
+      - name: Install xarray and dependencies
         run: |
-          python -m pip install --no-deps .
+          uv add --dev ".[complete]" "pint>=0.22"
       - name: Install upstream flox
         run: |
-          python -m pip install --no-deps \
-            git+https://github.com/dcherian/flox.git@${{ github.ref }}
+          uv add git+https://github.com/dcherian/flox.git@${{ github.ref }}
       - name: Version info
         run: |
-          conda info -a
-          conda list
-          python xarray/util/print_versions.py
+          uv tree
+          uv run --no-dev python xarray/util/print_versions.py
       - name: import xarray
         run: |
-          python -c 'import xarray'
+          uv run --no-dev python -c 'import xarray'
       - name: import flox
         run: |
-          python -c 'import flox'
+          uv run --no-dev python -c 'import flox'
       - name: Run Tests
         if: success()
         id: status
         run: |
           set -euo pipefail
-          python -m pytest -n auto \
+          uv run --no-dev python -m pytest -n auto \
               xarray/tests/test_groupby.py \
               xarray/tests/test_units.py::TestDataArray::test_computation_objects \
               xarray/tests/test_units.py::TestDataArray::test_grouped_operations \


=====================================
.github/workflows/pypi.yaml
=====================================
@@ -9,18 +9,14 @@ jobs:
     runs-on: ubuntu-latest
     steps:
       - uses: actions/checkout at v4
-      - name: Set up Python
-        uses: actions/setup-python at v5
+      - name: Set up Python and uv
+        uses: astral-sh/setup-uv at v4
         with:
           python-version: "3.x"
-      - name: Install dependencies
-        run: |
-          python -m pip install --upgrade pip
-          pip install build setuptools setuptools-scm wheel twine
       - name: Build and publish
         env:
           TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
           TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
         run: |
-          python -m build
-          twine upload dist/*
+          uv build
+          uv publish


=====================================
.github/workflows/testpypi-release.yaml
=====================================
@@ -17,18 +17,14 @@ jobs:
     if: ${{ contains( github.event.pull_request.labels.*.name, 'test-build') && github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' }}
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout at v4
+      - uses: actions/checkout at v5
         with:
           fetch-depth: 0
 
-      - uses: actions/setup-python at v5
-        name: Install Python
-
-      - name: Install dependencies
-        run: |
-          python -m pip install --upgrade pip
-          python -m pip install build twine
-          python -m pip install tomli tomli_w
+      - name: Set up Python and uv
+        uses: astral-sh/setup-uv at v4
+        with:
+          python-version: "3.x"
 
       # - name: Disable local versions
       #   run: |
@@ -39,11 +35,12 @@ jobs:
       - name: Build tarball and wheels
         run: |
           git clean -xdf
-          python -m build
+          uv build
 
       - name: Check built artifacts
         run: |
-          python -m twine check --strict dist/*
+          uv pip install twine
+          uv run twine check --strict dist/*
           if [ -f dist/flox-999.tar.gz ]; then
             echo "❌ INVALID VERSION NUMBER"
             exit 1
@@ -60,8 +57,10 @@ jobs:
     needs: build-artifacts
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/setup-python at v5
-        name: Install Python
+      - name: Set up Python and uv
+        uses: astral-sh/setup-uv at v4
+        with:
+          python-version: "3.x"
       - uses: actions/download-artifact at v4
         with:
           name: releases
@@ -73,8 +72,7 @@ jobs:
 
       - name: Verify the built dist/wheel is valid
         run: |
-          python -m pip install --upgrade pip
-          python -m pip install dist/flox*.whl
+          uv pip install dist/flox*.whl
           # python -m cf_xarray.scripts.print_versions
 
       # - name: Publish package to TestPyPI


=====================================
.github/workflows/upstream-dev-ci.yaml
=====================================
@@ -28,72 +28,38 @@ jobs:
       || github.event_name == 'workflow_dispatch'
       || github.event_name == 'schedule'
       }}
-    defaults:
-      run:
-        shell: bash -l {0}
     strategy:
       fail-fast: false
       matrix:
         python-version: ["3.13"]
     steps:
-      - uses: actions/checkout at v4
+      - uses: actions/checkout at v5
         with:
           fetch-depth: 0 # Fetch all history for all branches and tags.
       - name: Set environment variables
         run: |
           echo "PYTHON_VERSION=${{ matrix.python-version }}" >> $GITHUB_ENV
-      - name: Set up conda environment
-        uses: mamba-org/setup-micromamba at v2
+      - name: Set up Python and uv
+        uses: astral-sh/setup-uv at v4
         with:
-          environment-name: flox-tests
-          init-shell: bash
-          # cache-environment: true
-          # micromamba list does not list pip dependencies, so install mamba
-          create-args: >-
-            mamba
-            pip
-            python=${{ matrix.python-version }}
-            pytest-reportlog
+          python-version: ${{ matrix.python-version }}
+          enable-cache: true
+          cache-dependency-glob: "pyproject.toml"
 
       - name: Install upstream dev dependencies
         run: |
-          # install cython for building cftime without build isolation
-          micromamba install -f ci/upstream-dev-env.yml
-          micromamba remove --force numpy scipy pandas cftime
-          python -m pip install \
-            -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple \
-            --no-deps \
-            --pre \
-            --upgrade \
-            numpy \
-            scipy \
-            pandas \
-            xarray
-          # without build isolation for packages compiling against numpy
-          # TODO: remove once there are `numpy>=2.0` builds for cftime
-          python -m pip install \
-            --no-deps \
-            --upgrade \
-            --no-build-isolation \
-            git+https://github.com/Unidata/cftime
-          python -m pip install \
-            git+https://github.com/dask/dask \
-            git+https://github.com/ml31415/numpy-groupies \
-            git+https://github.com/pydata/sparse
+          # Install with upstream development versions using separate config
+          uv sync --group upstream-dev --config-file uv-upstream.toml --no-dev
 
-      - name: Install flox
-        run: |
-          python -m pip install --no-deps -e .
 
       - name: List deps
         run: |
-          # micromamba list does not list pip dependencies
-          mamba list
+          uv pip list
       - name: Run Tests
         if: success()
         id: status
         run: |
-          pytest -rf -n auto --cov=./ --cov-report=xml \
+          uv run --no-dev pytest -rf -n auto --cov=./ --cov-report=xml \
             --report-log output-${{ matrix.python-version }}-log.jsonl \
              --hypothesis-profile ci
       - name: Generate and publish the report


=====================================
.readthedocs.yml
=====================================
@@ -7,9 +7,13 @@ sphinx:
 build:
   os: "ubuntu-lts-latest"
   tools:
-    python: "mambaforge-latest"
+    python: "3.12"
 
-conda:
-  environment: ci/docs.yml
+python:
+  install:
+    - method: pip
+      path: .
+      extra_requirements:
+        - docs
 
 formats: []


=====================================
asv_bench/asv.conf.json
=====================================
@@ -27,13 +27,6 @@
   //     "PIP_NO_BUILD_ISOLATION=false python -mpip wheel --no-deps --no-index -w {build_cache_dir} {build_dir}"
   // ],
   //
-  "build_command": [
-    "python setup.py build",
-    "python -mpip wheel --no-deps --no-build-isolation --no-index -w {build_cache_dir} {build_dir}"
-  ],
-  "install_command": [
-    "in-dir={env_dir} python -mpip install {wheel_file} --no-deps"
-  ],
 
   // List of branches to benchmark. If not provided, defaults to "master"
   // (for git) or "default" (for mercurial).
@@ -49,11 +42,16 @@
 
   // The Pythons you'd like to test against.  If not provided, defaults
   // to the current version of Python used to run `asv`.
-  // "pythons": ["3.9"],
+  "pythons": ["3.12"],
 
-  "environment_type": "mamba",
-  "conda_channels": ["conda-forge"],
-  "conda_environment_file": "../ci/benchmark.yml",
+  "environment_type": "rattler",
+  "conda_environment_file": "environment.yml",
+  "build_command": [
+    "python -m build --wheel --outdir {build_cache_dir} {build_dir}"
+  ],
+  "install_command": [
+    "python -m pip install --find-links {build_cache_dir} --no-deps --no-index flox"
+  ],
 
   // The directory (relative to the current directory) that benchmarks are
   // stored in.  If not provided, defaults to "benchmarks"


=====================================
asv_bench/environment.yml
=====================================
@@ -0,0 +1,24 @@
+# Conda environment file for ASV benchmarks
+# This file is used by the rattler backend in ASV to create benchmark environments
+# It includes all dependencies needed to run flox benchmarks
+name: asv-bench
+channels:
+  - conda-forge
+dependencies:
+  - python=3.12
+  - pip
+  - setuptools
+  - wheel
+  - dask-core
+  - xarray
+  - numpy>=1.26
+  - scipy>=1.12
+  - pandas>=2.1
+  - toolz
+  - packaging>=21.3
+  - numbagg>=0.3
+  - cachey
+  - pyarrow
+  - pip:
+    - build
+    - numpy-groupies>=0.9.19


=====================================
ci/benchmark.yml deleted
=====================================
@@ -1,16 +0,0 @@
-name: flox-bench
-channels:
-  - conda-forge
-dependencies:
-  - asv
-  - build
-  - cachey
-  - dask-core
-  - numpy<2.1
-  - mamba
-  - pip
-  - xarray
-  - numpy_groupies>=0.9.19
-  - numbagg>=0.3
-  - wheel
-  - scipy


=====================================
ci/docs.yml deleted
=====================================
@@ -1,27 +0,0 @@
-name: flox-doc
-channels:
-  - conda-forge
-dependencies:
-  - cubed>=0.20.0
-  - cubed-xarray
-  - dask-core
-  - pip
-  - xarray
-  - numpy>=1.26
-  - scipy
-  - numpydoc
-  - numpy_groupies>=0.9.19
-  - toolz
-  - matplotlib-base
-  - myst-parser
-  - myst-nb
-  - sparse
-  - sphinx
-  - sphinx-remove-toctrees
-  - furo>=2024.08
-  - ipykernel
-  - jupyter
-  - sphinx-codeautolink
-  - sphinx-copybutton
-  - pip:
-      - -e ..


=====================================
ci/env-numpy1.yml deleted
=====================================
@@ -1,30 +0,0 @@
-name: flox-tests
-channels:
-  - conda-forge
-dependencies:
-  - asv
-  - cachey
-  - cftime
-  - codecov
-  - cubed>=0.20.0
-  - dask-core
-  - pandas
-  - numpy<2
-  - scipy
-  - sparse
-  - lxml # for mypy coverage report
-  - matplotlib
-  - pip
-  - pytest
-  - pytest-cov
-  - pytest-pretty
-  - pytest-xdist
-  - syrupy
-  - pre-commit
-  - numpy_groupies>=0.9.19
-  - pooch
-  - toolz
-  - numba
-  - numbagg>=0.3
-  - hypothesis
-  - xarray


=====================================
ci/environment.yml deleted
=====================================
@@ -1,31 +0,0 @@
-name: flox-tests
-channels:
-  - conda-forge
-dependencies:
-  - asv
-  - cachey
-  - cftime
-  - codecov
-  - cubed>=0.20.0
-  - dask-core
-  - pandas
-  - numpy>=1.26
-  - scipy
-  - sparse
-  - lxml # for mypy coverage report
-  - matplotlib
-  - pip
-  - pytest
-  - pytest-cov
-  - pytest-pretty
-  - pytest-xdist
-  - syrupy
-  - pre-commit
-  - numpy_groupies>=0.9.19
-  - pooch
-  - toolz
-  - numba
-  - numbagg>=0.3
-  - hypothesis
-  - xarray
-  - zarr


=====================================
ci/minimal-requirements.yml deleted
=====================================
@@ -1,18 +0,0 @@
-name: flox-tests
-channels:
-  - conda-forge
-dependencies:
-  - codecov
-  - hypothesis
-  - pip
-  - pytest
-  - pytest-cov
-  - pytest-pretty
-  - pytest-xdist
-  - syrupy
-  - numpy==1.26
-  - scipy==1.12
-  - numpy_groupies==0.9.19
-  - pandas==2.1
-  - pooch
-  - toolz


=====================================
ci/no-dask.yml deleted
=====================================
@@ -1,25 +0,0 @@
-name: flox-tests
-channels:
-  - conda-forge
-dependencies:
-  - codecov
-  - pandas
-  - hypothesis
-  - cftime
-  - numpy>=1.26
-  - scipy
-  - sparse
-  - pip
-  - pytest
-  - pytest-cov
-  - pytest-pretty
-  - pytest-xdist
-  - syrupy
-  - numpydoc
-  - pre-commit
-  - numpy_groupies>=0.9.19
-  - pooch
-  - toolz
-  - numba
-  - numbagg>=0.3
-  - xarray


=====================================
ci/no-numba.yml deleted
=====================================
@@ -1,27 +0,0 @@
-name: flox-tests
-channels:
-  - conda-forge
-dependencies:
-  - asv
-  - cachey
-  - cftime
-  - codecov
-  - dask-core
-  - hypothesis
-  - pandas
-  - numpy>=1.26
-  - scipy
-  - sparse
-  - lxml # for mypy coverage report
-  - matplotlib
-  - pip
-  - pytest
-  - pytest-cov
-  - pytest-pretty
-  - pytest-xdist
-  - syrupy
-  - pre-commit
-  - numpy_groupies>=0.9.19
-  - pooch
-  - toolz
-  - xarray


=====================================
ci/no-xarray.yml deleted
=====================================
@@ -1,23 +0,0 @@
-name: flox-tests
-channels:
-  - conda-forge
-dependencies:
-  - codecov
-  - syrupy
-  - pandas
-  - numpy>=1.26
-  - scipy
-  - sparse
-  - pip
-  - pytest
-  - pytest-cov
-  - pytest-pretty
-  - pytest-xdist
-  - syrupy
-  - dask-core
-  - numpydoc
-  - pre-commit
-  - numpy_groupies>=0.9.19
-  - pooch
-  - toolz
-  - numba


=====================================
ci/upstream-dev-env.yml deleted
=====================================
@@ -1,31 +0,0 @@
-name: flox-tests
-channels:
-  - conda-forge
-dependencies:
-  - asv_runner # for test_asv
-  - cachey
-  - codecov
-  - pooch
-  - hypothesis
-  - toolz
-  # - numpy
-  # - pandas
-  # - scipy
-  - pytest-pretty
-  - pytest-xdist
-  - syrupy
-  - pip
-  # for cftime
-  - cython>=0.29.20
-  - py-cpuinfo
-  # - numba
-  - pytest
-  - pytest-cov
-  # for upstream pandas
-  - python-dateutil
-  - pytz
-  # - pip:
-  #     - git+https://github.com/pydata/xarray
-  #     - git+https://github.com/dask/dask
-  #     - git+https://github.com/ml31415/numpy-groupies
-  #     # - git+https://github.com/numbagg/numbagg


=====================================
flox/aggregations.py
=====================================
@@ -15,6 +15,8 @@ from numpy.typing import ArrayLike, DTypeLike
 from . import aggregate_flox, aggregate_npg, xrutils
 from . import xrdtypes as dtypes
 from .lib import dask_array_type, sparse_array_type
+from .multiarray import MultiArray
+from .xrutils import notnull
 
 if TYPE_CHECKING:
     FuncTuple = tuple[Callable | str, ...]
@@ -161,8 +163,8 @@ class Aggregation:
         self,
         name: str,
         *,
-        numpy: str | None = None,
-        chunk: str | FuncTuple | None,
+        numpy: partial | str | None = None,
+        chunk: partial | str | FuncTuple | None,
         combine: str | FuncTuple | None,
         preprocess: Callable | None = None,
         finalize: Callable | None = None,
@@ -343,57 +345,183 @@ nanmean = Aggregation(
 )
 
 
-# TODO: fix this for complex numbers
-def _var_finalize(sumsq, sum_, count, ddof=0):
+def var_chunk(
+    group_idx, array, *, skipna: bool, engine: str, axis=-1, size=None, fill_value=None, dtype=None
+):
+    # Calculate length and sum - important for the adjustment terms to sum squared deviations
+    array_lens = generic_aggregate(
+        group_idx,
+        array,
+        func="nanlen",
+        engine=engine,
+        axis=axis,
+        size=size,
+        fill_value=0,  # Unpack fill value bc it's currently defined for multiarray
+        dtype=dtype,
+    )
+
+    array_sums = generic_aggregate(
+        group_idx,
+        array,
+        func="nansum" if skipna else "sum",
+        engine=engine,
+        axis=axis,
+        size=size,
+        fill_value=0,  # Unpack fill value bc it's currently defined for multiarray
+        dtype=dtype,
+    )
+
+    # Calculate sum squared deviations - the main part of variance sum
     with np.errstate(invalid="ignore", divide="ignore"):
-        result = (sumsq - (sum_**2 / count)) / (count - ddof)
-    result[count <= ddof] = np.nan
-    return result
+        array_means = array_sums / array_lens
+
+    sum_squared_deviations = generic_aggregate(
+        group_idx,
+        (array - array_means[..., group_idx]) ** 2,
+        func="nansum" if skipna else "sum",
+        engine=engine,
+        axis=axis,
+        size=size,
+        fill_value=0,  # Unpack fill value bc it's currently defined for multiarray
+        dtype=dtype,
+    )
+
+    return MultiArray((sum_squared_deviations, array_sums, array_lens))
+
+
+def _var_combine(array, axis, keepdims=True):
+    def clip_last(array, ax, n=1):
+        """Return array except the last element along axis
+        Purely included to tidy up the adj_terms line
+        """
+        assert n > 0, "Clipping nothing off the end isn't implemented"
+        not_last = [slice(None, None) for i in range(array.ndim)]
+        not_last[ax] = slice(None, -n)
+        return array[*not_last]
+
+    def clip_first(array, ax, n=1):
+        """Return array except the first element along axis
+        Purely included to tidy up the adj_terms line
+        """
+        not_first = [slice(None, None) for i in range(array.ndim)]
+        not_first[ax] = slice(n, None)
+        return array[*not_first]
+
+    for ax in axis:
+        if array.shape[ax] == 1:
+            continue
+
+        sum_deviations, sum_X, sum_len = array.arrays
+
+        # Calculate parts needed for cascading combination
+        cumsum_X = np.cumsum(sum_X, axis=ax)
+        cumsum_len = np.cumsum(sum_len, axis=ax)
+
+        # There will be instances in which one or both chunks being merged are empty
+        # In which case, the adjustment term should be zero, but will throw a divide-by-zero error
+        # We're going to add a constant to the bottom of the adjustment term equation on those instances
+        # and count on the zeros on the top making our adjustment term still zero
+        zero_denominator = (clip_last(cumsum_len, ax) == 0) | (clip_first(sum_len, ax) == 0)
+
+        # Adjustment terms to tweak the sum of squared deviations because not every chunk has the same mean
+        with np.errstate(invalid="ignore", divide="ignore"):
+            adj_terms = (
+                clip_last(cumsum_len, ax) * clip_first(sum_X, ax)
+                - clip_first(sum_len, ax) * clip_last(cumsum_X, ax)
+            ) ** 2 / (
+                clip_last(cumsum_len, ax)
+                * clip_first(sum_len, ax)
+                * (clip_last(cumsum_len, ax) + clip_first(sum_len, ax))
+                + zero_denominator.astype(int)
+            )
+
+        check = adj_terms * zero_denominator
+        assert np.all(check[notnull(check)] == 0), (
+            "Instances where we add something to the denominator must come out to zero"
+        )
+
+        array = MultiArray(
+            (
+                np.sum(sum_deviations, axis=ax, keepdims=keepdims)
+                + np.sum(adj_terms, axis=ax, keepdims=keepdims),  # sum of squared deviations
+                np.sum(sum_X, axis=ax, keepdims=keepdims),  # sum of array items
+                np.sum(sum_len, axis=ax, keepdims=keepdims),  # sum of array lengths
+            )
+        )
+    return array
+
+
+def is_var_chunk_reduction(agg: Callable) -> bool:
+    if isinstance(agg, partial):
+        agg = agg.func
+    return agg is blockwise_or_numpy_var or agg is var_chunk
+
+
+def _var_finalize(multiarray, ddof=0):
+    den = multiarray.arrays[2]
+    den -= ddof
+    # preserve nans for groups with 0 obs; so these values are -ddof
+    with np.errstate(invalid="ignore", divide="ignore"):
+        ret = multiarray.arrays[0]
+        ret /= den
+    ret[den < 0] = np.nan
+    return ret
 
 
-def _std_finalize(sumsq, sum_, count, ddof=0):
-    return np.sqrt(_var_finalize(sumsq, sum_, count, ddof))
+def _std_finalize(multiarray, ddof=0):
+    return np.sqrt(_var_finalize(multiarray, ddof))
+
+
+def blockwise_or_numpy_var(*args, skipna: bool, ddof=0, std=False, **kwargs):
+    res = _var_finalize(var_chunk(*args, skipna=skipna, **kwargs), ddof)
+    return np.sqrt(res) if std else res
 
 
 # var, std always promote to float, so we set nan
 var = Aggregation(
     "var",
-    chunk=("sum_of_squares", "sum", "nanlen"),
-    combine=("sum", "sum", "sum"),
+    chunk=partial(var_chunk, skipna=False),
+    numpy=partial(blockwise_or_numpy_var, skipna=False),
+    combine=(_var_combine,),
     finalize=_var_finalize,
-    fill_value=0,
+    fill_value=((0, 0, 0),),
     final_fill_value=np.nan,
-    dtypes=(None, None, np.intp),
+    dtypes=(None,),
     final_dtype=np.floating,
 )
+
 nanvar = Aggregation(
     "nanvar",
-    chunk=("nansum_of_squares", "nansum", "nanlen"),
-    combine=("sum", "sum", "sum"),
+    chunk=partial(var_chunk, skipna=True),
+    numpy=partial(blockwise_or_numpy_var, skipna=True),
+    combine=(_var_combine,),
     finalize=_var_finalize,
-    fill_value=0,
+    fill_value=((0, 0, 0),),
     final_fill_value=np.nan,
-    dtypes=(None, None, np.intp),
+    dtypes=(None,),
     final_dtype=np.floating,
 )
+
 std = Aggregation(
     "std",
-    chunk=("sum_of_squares", "sum", "nanlen"),
-    combine=("sum", "sum", "sum"),
+    chunk=partial(var_chunk, skipna=False),
+    numpy=partial(blockwise_or_numpy_var, skipna=False, std=True),
+    combine=(_var_combine,),
     finalize=_std_finalize,
-    fill_value=0,
+    fill_value=((0, 0, 0),),
     final_fill_value=np.nan,
-    dtypes=(None, None, np.intp),
+    dtypes=(None,),
     final_dtype=np.floating,
 )
 nanstd = Aggregation(
     "nanstd",
-    chunk=("nansum_of_squares", "nansum", "nanlen"),
-    combine=("sum", "sum", "sum"),
+    chunk=partial(var_chunk, skipna=True),
+    numpy=partial(blockwise_or_numpy_var, skipna=True, std=True),
+    combine=(_var_combine,),
     finalize=_std_finalize,
-    fill_value=0,
+    fill_value=((0, 0, 0),),
     final_fill_value=np.nan,
-    dtypes=(None, None, np.intp),
+    dtypes=(None,),
     final_dtype=np.floating,
 )
 


=====================================
flox/core.py
=====================================
@@ -44,6 +44,7 @@ from .aggregations import (
     _atleast_1d,
     _initialize_aggregation,
     generic_aggregate,
+    is_var_chunk_reduction,
     quantile_new_dims_func,
 )
 from .cache import memoize
@@ -1289,7 +1290,8 @@ def chunk_reduce(
     # optimize that out.
     previous_reduction: T_Func = ""
     for reduction, fv, kw, dt in zip(funcs, fill_values, kwargss, dtypes):
-        if empty:
+        # UGLY! but this is because the `var` breaks our design assumptions
+        if empty and not is_var_chunk_reduction(reduction):
             result = np.full(shape=final_array_shape, fill_value=fv, like=array)
         elif is_nanlen(reduction) and is_nanlen(previous_reduction):
             result = results["intermediates"][-1]
@@ -1298,6 +1300,10 @@ def chunk_reduce(
             kw_func = dict(size=size, dtype=dt, fill_value=fv)
             kw_func.update(kw)
 
+            # UGLY! but this is because the `var` breaks our design assumptions
+            if is_var_chunk_reduction(reduction):
+                kw_func.update(engine=engine)
+
             if callable(reduction):
                 # passing a custom reduction for npg to apply per-group is really slow!
                 # So this `reduction` has to do the groupby-aggregation
@@ -2785,6 +2791,7 @@ def groupby_reduce(
             array = array.view(np.int64)
         elif is_cftime:
             offset = array.min()
+            assert offset is not None
             array = datetime_to_numeric(array, offset, datetime_unit="us")
 
     if nax == 1 and by_.ndim > 1 and expected_ is None:


=====================================
flox/multiarray.py
=====================================
@@ -0,0 +1,97 @@
+from collections.abc import Callable
+from typing import Self
+
+import numpy as np
+
+MULTIARRAY_HANDLED_FUNCTIONS: dict[Callable, Callable] = {}
+
+
+class MultiArray:
+    arrays: tuple[np.ndarray, ...]
+
+    def __init__(self, arrays):
+        self.arrays = arrays
+        assert all(arrays[0].shape == a.shape for a in arrays), "Expect all arrays to have the same shape"
+
+    def astype(self, dt, **kwargs) -> Self:
+        return type(self)(tuple(array.astype(dt, **kwargs) for array in self.arrays))
+
+    def reshape(self, shape, **kwargs) -> Self:
+        return type(self)(tuple(array.reshape(shape, **kwargs) for array in self.arrays))
+
+    def squeeze(self, axis=None) -> Self:
+        return type(self)(tuple(array.squeeze(axis) for array in self.arrays))
+
+    def __setitem__(self, key, value) -> None:
+        assert len(value) == len(self.arrays)
+        for array, val in zip(self.arrays, value):
+            array[key] = val
+
+    def __array_function__(self, func, types, args, kwargs):
+        if func not in MULTIARRAY_HANDLED_FUNCTIONS:
+            return NotImplemented
+        # Note: this allows subclasses that don't override
+        # __array_function__ to handle MyArray objects
+        # if not all(issubclass(t, MyArray) for t in types): # I can't see this being relevant at all for this code, but maybe it's safer to leave it in?
+        # return NotImplemented
+        return MULTIARRAY_HANDLED_FUNCTIONS[func](*args, **kwargs)
+
+    # Shape is needed, seems likely that the other two might be
+    # Making some strong assumptions here that all the arrays are the same shape, and I don't really like this
+    @property
+    def dtype(self) -> np.dtype:
+        return self.arrays[0].dtype
+
+    @property
+    def shape(self) -> tuple[int, ...]:
+        return self.arrays[0].shape
+
+    @property
+    def ndim(self) -> int:
+        return self.arrays[0].ndim
+
+    def __getitem__(self, key) -> Self:
+        return type(self)([array[key] for array in self.arrays])
+
+
+def implements(numpy_function):
+    """Register an __array_function__ implementation for MyArray objects."""
+
+    def decorator(func):
+        MULTIARRAY_HANDLED_FUNCTIONS[numpy_function] = func
+        return func
+
+    return decorator
+
+
+ at implements(np.expand_dims)
+def expand_dims(multiarray, axis) -> MultiArray:
+    return MultiArray(tuple(np.expand_dims(a, axis) for a in multiarray.arrays))
+
+
+ at implements(np.concatenate)
+def concatenate(multiarrays, axis) -> MultiArray:
+    n_arrays = len(multiarrays[0].arrays)
+    for ma in multiarrays[1:]:
+        assert len(ma.arrays) == n_arrays
+    return MultiArray(
+        tuple(np.concatenate(tuple(ma.arrays[i] for ma in multiarrays), axis) for i in range(n_arrays))
+    )
+
+
+ at implements(np.transpose)
+def transpose(multiarray, axes) -> MultiArray:
+    return MultiArray(tuple(np.transpose(a, axes) for a in multiarray.arrays))
+
+
+ at implements(np.squeeze)
+def squeeze(multiarray, axis) -> MultiArray:
+    return MultiArray(tuple(np.squeeze(a, axis) for a in multiarray.arrays))
+
+
+ at implements(np.full)
+def full(shape, fill_values, *args, **kwargs) -> MultiArray:
+    """All arguments except fill_value are shared by each array in the MultiArray.
+    Iterate over fill_values to create arrays
+    """
+    return MultiArray(tuple(np.full(shape, fv, *args, **kwargs) for fv in fill_values))


=====================================
flox/xrutils.py
=====================================
@@ -147,6 +147,9 @@ def is_scalar(value: Any, include_0d: bool = True) -> bool:
 
 
 def notnull(data):
+    if isinstance(data, tuple) and len(data) == 3 and data == (0, 0, 0):
+        # boo: another special case for Var
+        return True
     if not is_duck_array(data):
         data = np.asarray(data)
 
@@ -164,6 +167,9 @@ def notnull(data):
 
 
 def isnull(data: Any):
+    if isinstance(data, tuple) and len(data) == 3 and data == (0, 0, 0):
+        # boo: another special case for Var
+        return False
     if data is None:
         return False
     if not is_duck_array(data):


=====================================
pyproject.toml
=====================================
@@ -35,6 +35,25 @@ changelog = "https://github.com/xarray-contrib/flox/releases"
 [project.optional-dependencies]
 all = ["cachey", "dask", "numba", "numbagg", "xarray"]
 test = ["netCDF4"]
+docs = [
+    "cubed>=0.20.0",
+    "cubed-xarray",
+    "dask",
+    "xarray",
+    "numpydoc",
+    "matplotlib",
+    "myst-parser",
+    "myst-nb",
+    "sparse",
+    "sphinx",
+    "sphinx-remove-toctrees",
+    "furo>=2024.08",
+    "ipykernel",
+    "jupyter",
+    "sphinx-codeautolink",
+    "sphinx-copybutton",
+    "pyarrow",
+]
 
 [build-system]
 requires = [
@@ -149,3 +168,149 @@ testpaths = ["tests"]
 [tool.codespell]
 ignore-words-list = "nd,nax,coo"
 skip = "*.html"
+
+
+
+
+[dependency-groups]
+test = [
+    "hypothesis",
+    "pytest>=7",
+    "pytest-cov",
+    "pytest-pretty",
+    "pytest-xdist",
+    "syrupy",
+    "pooch",
+    "codecov",
+    "pyarrow",
+]
+all = ["cachey", "dask", "numba", "numbagg>=0.3", "xarray"]
+complete = [
+    {include-group = "test"},
+    "cachey",
+    "dask",
+    "numba",
+    "numbagg>=0.3",
+    "xarray",
+    "asv",
+    "cftime",
+    "cubed>=0.20.0",
+    "lxml",
+    "matplotlib",
+    "sparse",
+    "zarr",
+    "pre-commit",
+]
+minimal = [
+    {include-group = "test"},
+]
+no-dask = [
+    {include-group = "test"},
+    "cftime",
+    "sparse",
+    "numpydoc",
+    "numba",
+    "numbagg>=0.3",
+    "xarray",
+    "pre-commit",
+]
+no-numba = [
+    {include-group = "test"},
+    "asv",
+    "cachey",
+    "cftime",
+    "dask[core]",
+    "sparse",
+    "lxml",
+    "matplotlib",
+    "xarray",
+    "pre-commit",
+]
+no-xarray = [
+    {include-group = "test"},
+    "sparse",
+    "dask[core]",
+    "numpydoc",
+    "numba",
+    "pre-commit",
+]
+numpy1 = [
+    {include-group = "complete"},
+]
+docs = [
+    "cubed>=0.20.0",
+    "cubed-xarray",
+    "dask",
+    "xarray",
+    "numpydoc",
+    "matplotlib",
+    "myst-parser",
+    "myst-nb",
+    "sparse",
+    "sphinx",
+    "sphinx-remove-toctrees",
+    "furo>=2024.08",
+    "ipykernel",
+    "jupyter",
+    "sphinx-codeautolink",
+    "sphinx-copybutton",
+]
+benchmark = [
+    "asv>=0.6.4",
+    "py-rattler",
+    "build",
+    "cachey",
+    "dask[core]",
+    "xarray",
+    "numbagg>=0.3",
+    "wheel",
+    "pyarrow",
+]
+upstream = [
+    {include-group = "test"},
+    "asv_runner",
+    "cachey",
+    "pooch",
+    "hypothesis",
+    "toolz",
+    "pytest-reportlog",
+    "cython>=0.29.20",
+    "py-cpuinfo",
+    "python-dateutil",
+    "pytz",
+]
+upstream-dev = [
+    {include-group = "upstream"},
+    # These will be overridden by tool.uv.sources when this group is used
+    "numpy",
+    "scipy",
+    "pandas",
+    "xarray",
+    "dask[core]",
+    "numpy-groupies",
+    "sparse",
+    "cftime",
+]
+dev = [
+    {include-group = "complete"},
+    "dask[complete]",
+    "pdbpp",
+    "ipykernel",
+    "snakeviz",
+    "line_profiler",
+    "memory-profiler",
+    "memray",
+]
+types = [
+    "mypy",
+    "pandas-stubs",
+    "types-PyYAML",
+    "types-Pygments",
+    "types-defusedxml",
+    "types-docutils",
+    "types-pexpect",
+    "types-psutil",
+    "types-pytz",
+    "types-requests",
+    "types-setuptools",
+]


=====================================
tests/strategies.py
=====================================
@@ -108,9 +108,8 @@ NON_NUMPY_FUNCS = [
     "any",
     "all",
 ] + list(SCIPY_STATS_FUNCS)
-SKIPPED_FUNCS = ["var", "std", "nanvar", "nanstd"]
 
-func_st = st.sampled_from([f for f in ALL_FUNCS if f not in NON_NUMPY_FUNCS and f not in SKIPPED_FUNCS])
+func_st = st.sampled_from([f for f in ALL_FUNCS if f not in NON_NUMPY_FUNCS])
 
 
 @st.composite


=====================================
tests/test_core.py
=====================================
@@ -553,7 +553,7 @@ def test_groupby_agg_dask(func, shape, array_chunks, group_chunks, add_nan, dtyp
 def test_groupby_agg_cubed(func, shape, array_chunks, group_chunks, add_nan, engine, reindex):
     """Tests groupby_reduce with cubed arrays against groupby_reduce with numpy arrays"""
 
-    if func in ["first", "last"] or func in BLOCKWISE_FUNCS:
+    if func in ["first", "last", "var", "nanvar", "std", "nanstd"] or func in BLOCKWISE_FUNCS:
         pytest.skip()
 
     if "arg" in func and (engine in ["flox", "numbagg"] or reindex):
@@ -2240,3 +2240,38 @@ def test_sparse_nan_fill_value_reductions(chunks, fill_value, shape, func):
         expected = np.expand_dims(npfunc(numpy_array, axis=-1), axis=-1)
         actual, *_ = groupby_reduce(array, by, func=func, axis=-1)
     assert_equal(actual, expected)
+
+
+ at pytest.mark.parametrize("func", ("nanvar", "var"))
+ at pytest.mark.parametrize(
+    # Should fail at 10e8 for old algorithm, and survive 10e12 for current
+    "exponent",
+    (2, 4, 6, 8, 10, 12),
+)
+def test_std_var_precision(func, exponent, engine):
+    # Generate a dataset with small variance and big mean
+    # Check that func with engine gives you the same answer as numpy
+
+    size = 1000
+    offset = 10**exponent
+    array = np.linspace(-1, 1, size)  # has zero mean
+    labels = np.arange(size) % 2  # Ideally we'd parametrize this too.
+
+    # These two need to be the same function, but with the offset added and not added
+    no_offset, _ = groupby_reduce(array, labels, engine=engine, func=func)
+    with_offset, _ = groupby_reduce(array + offset, labels, engine=engine, func=func)
+
+    expected = np.concatenate([np.nanvar(array[::2], keepdims=True), np.nanvar(array[1::2], keepdims=True)])
+    expected_offset = np.concatenate(
+        [np.nanvar(array[::2] + offset, keepdims=True), np.nanvar(array[1::2] + offset, keepdims=True)]
+    )
+
+    tol = {"rtol": 3e-8, "atol": 1e-9}  # Not sure how stringent to be here
+
+    assert_equal(expected, no_offset, tol)
+    assert_equal(expected_offset, with_offset, tol)
+    if exponent < 10:
+        # TODO: figure this exponent limit
+        # TODO: Failure threshold in my external tests is dependent on dask chunksize,
+        #       maybe needs exploring better?
+        assert_equal(no_offset, with_offset, tol)


=====================================
tests/test_properties.py
=====================================
@@ -66,18 +66,25 @@ NUMPY_SCAN_FUNCS: dict[str, Callable] = {
 def not_overflowing_array(array: np.ndarray[Any, Any]) -> bool:
     if array.dtype.kind in "Mm":
         array = array.view(np.int64)
+    array = array.ravel()
+    array = array[notnull(array)]
+    if array.size == 0:
+        return True
+
     if array.dtype.kind == "f":
         info = np.finfo(array.dtype)
+        limit = 2 ** (info.nmant + 1)
     elif array.dtype.kind in ["i", "u"]:
         info = np.iinfo(array.dtype)  # type: ignore[assignment]
     else:
         return True
 
-    array = array.ravel()
-    array = array[notnull(array)]
     with warnings.catch_warnings():
         warnings.simplefilter("ignore", RuntimeWarning)
         result = bool(np.all((array < info.max / array.size) & (array > info.min / array.size)))
+        if array.dtype.kind == "f":
+            result = result and bool(np.all(np.abs(array) < limit / array.size))
+
     # note(f"returning {result}, {array.min()} vs {info.min}, {array.max()} vs {info.max}")
     return result
 
@@ -99,7 +106,7 @@ def test_groupby_reduce(data, array, func: str) -> None:
 
     # TODO: funny bugs with overflows here
     is_cftime = _contains_cftime_datetimes(array)
-    assume(not (is_cftime and func in ["prod", "nanprod"]))
+    assume(not (is_cftime and func in ["prod", "nanprod", "var", "nanvar", "std", "nanstd"]))
 
     axis = -1
     by = data.draw(
@@ -203,7 +210,7 @@ def test_groupby_reduce_numpy_vs_other(data, array, func: str) -> None:
     result_other, *_ = groupby_reduce(array, by, **kwargs)
     result_numpy, *_ = groupby_reduce(numpy_array, by, **kwargs)
     assert isinstance(result_other, type(array))
-    assert_equal(result_numpy, result_other)
+    assert_equal(result_other, result_numpy)
 
 
 @given(


=====================================
uv-numpy1.toml
=====================================
@@ -0,0 +1,6 @@
+# Configuration for numpy<2 testing
+# Use with: uv sync --group numpy1 --config-file uv-numpy1.toml
+
+override-dependencies = [
+    "numpy>=1.26,<2",
+]


=====================================
uv-upstream.toml
=====================================
@@ -0,0 +1,16 @@
+# Configuration for upstream development versions
+# Use with: uv sync --group upstream-dev --config-file uv-upstream.toml
+
+[[index]]
+name = "scientific-python-nightly"
+url = "https://pypi.anaconda.org/scientific-python-nightly-wheels/simple"
+
+[sources]
+numpy = { index = "scientific-python-nightly" }
+scipy = { index = "scientific-python-nightly" }
+pandas = { index = "scientific-python-nightly" }
+xarray = { index = "scientific-python-nightly" }
+dask = { git = "https://github.com/dask/dask" }
+numpy-groupies = { git = "https://github.com/ml31415/numpy-groupies" }
+sparse = { git = "https://github.com/pydata/sparse" }
+cftime = { git = "https://github.com/Unidata/cftime" }


=====================================
uv.lock
=====================================
The diff for this file was not included because it is too large.


View it on GitLab: https://salsa.debian.org/debian-gis-team/flox/-/commit/62fdc5c2dff80c758dcd1ac6aa2d3edc50d73a21

-- 
View it on GitLab: https://salsa.debian.org/debian-gis-team/flox/-/commit/62fdc5c2dff80c758dcd1ac6aa2d3edc50d73a21
You're receiving this email because of your account on salsa.debian.org.


-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/pkg-grass-devel/attachments/20250919/810d8ec9/attachment-0001.htm>


More information about the Pkg-grass-devel mailing list