[Git][debian-gis-team/flox][master] 2 commits: Add upstream patch to fix test failure on i386.
Bas Couwenberg (@sebastic)
gitlab at salsa.debian.org
Fri Jan 13 06:21:20 GMT 2023
Bas Couwenberg pushed to branch master at Debian GIS Project / flox
Commits:
ee43c256 by Bas Couwenberg at 2023-01-13T07:19:54+01:00
Add upstream patch to fix test failure on i386.
- - - - -
27678e14 by Bas Couwenberg at 2023-01-13T07:21:00+01:00
Revert "Disable i386 build where the testsuite fails."
This reverts commit c318cd663f083abe60c23ebb2d755689bda55876.
- - - - -
4 changed files:
- debian/.gitlab-ci.yml
- debian/changelog
- + debian/patches/pr201-32bit-support-int64-to-intp.patch
- debian/patches/series
Changes:
=====================================
debian/.gitlab-ci.yml
=====================================
@@ -2,5 +2,3 @@
include:
- https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/recipes/debian.yml
-variables:
- SALSA_CI_DISABLE_BUILD_PACKAGE_I386: 1
=====================================
debian/changelog
=====================================
@@ -2,6 +2,7 @@ flox (0.6.5-4) UNRELEASED; urgency=medium
* Team upload.
* Add gbp.conf to use pristine-tar & --source-only-changes by default.
+ * Add upstream patch to fix test failure on i386.
-- Bas Couwenberg <sebastic at debian.org> Tue, 10 Jan 2023 08:30:58 +0100
=====================================
debian/patches/pr201-32bit-support-int64-to-intp.patch
=====================================
@@ -0,0 +1,280 @@
+Description: 32bit support: int64 to intp
+Author: dcherian <deepak at cherian.net>
+Origin: https://github.com/xarray-contrib/flox/pull/201
+Bug: https://github.com/xarray-contrib/flox/issues/200
+
+--- a/tests/test_core.py
++++ b/tests/test_core.py
+@@ -140,7 +140,7 @@ def test_groupby_reduce(
+ elif func == "sum":
+ expected_result = np.array(expected, dtype=dtype)
+ elif func == "count":
+- expected_result = np.array(expected, dtype=np.int64)
++ expected_result = np.array(expected, dtype=np.intp)
+
+ result, groups, = groupby_reduce(
+ array,
+@@ -150,9 +150,9 @@ def test_groupby_reduce(
+ fill_value=123,
+ engine=engine,
+ )
+- # we use pd.Index(expected_groups).to_numpy() which is always int64
++ # we use pd.Index(expected_groups).to_numpy() which is always intp
+ # for the values in this tests
+- g_dtype = by.dtype if expected_groups is None else np.int64
++ g_dtype = by.dtype if expected_groups is None else np.intp
+
+ assert_equal(groups, np.array([0, 1, 2], g_dtype))
+ assert_equal(expected_result, result)
+@@ -284,7 +284,7 @@ def test_groupby_reduce_count():
+ array = np.array([0, 0, np.nan, np.nan, np.nan, 1, 1])
+ labels = np.array(["a", "b", "b", "b", "c", "c", "c"])
+ result, _ = groupby_reduce(array, labels, func="count")
+- assert_equal(result, np.array([1, 1, 2], dtype=np.int64))
++ assert_equal(result, np.array([1, 1, 2], dtype=np.intp))
+
+
+ def test_func_is_aggregation():
+@@ -299,7 +299,7 @@ def test_func_is_aggregation():
+
+ @requires_dask
+ @pytest.mark.parametrize("func", ("sum", "prod"))
+- at pytest.mark.parametrize("dtype", [np.float32, np.float64, np.int32, np.int64])
++ at pytest.mark.parametrize("dtype", [np.float32, np.float64, np.int32, np.intp])
+ def test_groupby_reduce_preserves_dtype(dtype, func):
+ array = np.ones((2, 12), dtype=dtype)
+ by = np.array([labels] * 2)
+@@ -405,32 +405,32 @@ def test_groupby_agg_dask(func, shape, a
+ def test_numpy_reduce_axis_subset(engine):
+ # TODO: add NaNs
+ by = labels2d
+- array = np.ones_like(by, dtype=np.int64)
++ array = np.ones_like(by, dtype=np.intp)
+ kwargs = dict(func="count", engine=engine, fill_value=0)
+ result, _ = groupby_reduce(array, by, **kwargs, axis=1)
+- assert_equal(result, np.array([[2, 3], [2, 3]], dtype=np.int64))
++ assert_equal(result, np.array([[2, 3], [2, 3]], dtype=np.intp))
+
+ by = np.broadcast_to(labels2d, (3, *labels2d.shape))
+ array = np.ones_like(by)
+ result, _ = groupby_reduce(array, by, **kwargs, axis=1)
+- subarr = np.array([[1, 1], [1, 1], [0, 2], [1, 1], [1, 1]], dtype=np.int64)
++ subarr = np.array([[1, 1], [1, 1], [0, 2], [1, 1], [1, 1]], dtype=np.intp)
+ expected = np.tile(subarr, (3, 1, 1))
+ assert_equal(result, expected)
+
+ result, _ = groupby_reduce(array, by, **kwargs, axis=2)
+- subarr = np.array([[2, 3], [2, 3]], dtype=np.int64)
++ subarr = np.array([[2, 3], [2, 3]], dtype=np.intp)
+ expected = np.tile(subarr, (3, 1, 1))
+ assert_equal(result, expected)
+
+ result, _ = groupby_reduce(array, by, **kwargs, axis=(1, 2))
+- expected = np.array([[4, 6], [4, 6], [4, 6]], dtype=np.int64)
++ expected = np.array([[4, 6], [4, 6], [4, 6]], dtype=np.intp)
+ assert_equal(result, expected)
+
+ result, _ = groupby_reduce(array, by, **kwargs, axis=(2, 1))
+ assert_equal(result, expected)
+
+ result, _ = groupby_reduce(array, by[0, ...], **kwargs, axis=(1, 2))
+- expected = np.array([[4, 6], [4, 6], [4, 6]], dtype=np.int64)
++ expected = np.array([[4, 6], [4, 6], [4, 6]], dtype=np.intp)
+ assert_equal(result, expected)
+
+
+@@ -438,7 +438,7 @@ def test_numpy_reduce_axis_subset(engine
+ def test_dask_reduce_axis_subset():
+
+ by = labels2d
+- array = np.ones_like(by, dtype=np.int64)
++ array = np.ones_like(by, dtype=np.intp)
+ with raise_if_dask_computes():
+ result, _ = groupby_reduce(
+ da.from_array(array, chunks=(2, 3)),
+@@ -447,11 +447,11 @@ def test_dask_reduce_axis_subset():
+ axis=1,
+ expected_groups=[0, 2],
+ )
+- assert_equal(result, np.array([[2, 3], [2, 3]], dtype=np.int64))
++ assert_equal(result, np.array([[2, 3], [2, 3]], dtype=np.intp))
+
+ by = np.broadcast_to(labels2d, (3, *labels2d.shape))
+ array = np.ones_like(by)
+- subarr = np.array([[1, 1], [1, 1], [123, 2], [1, 1], [1, 1]], dtype=np.int64)
++ subarr = np.array([[1, 1], [1, 1], [123, 2], [1, 1], [1, 1]], dtype=np.intp)
+ expected = np.tile(subarr, (3, 1, 1))
+ with raise_if_dask_computes():
+ result, _ = groupby_reduce(
+@@ -464,7 +464,7 @@ def test_dask_reduce_axis_subset():
+ )
+ assert_equal(result, expected)
+
+- subarr = np.array([[2, 3], [2, 3]], dtype=np.int64)
++ subarr = np.array([[2, 3], [2, 3]], dtype=np.intp)
+ expected = np.tile(subarr, (3, 1, 1))
+ with raise_if_dask_computes():
+ result, _ = groupby_reduce(
+@@ -580,9 +580,9 @@ def test_groupby_all_nan_blocks_dask(exp
+ nan_labels[:5] = np.nan
+
+ array, by, expected = (
+- np.ones((2, 12), dtype=np.int64),
++ np.ones((2, 12), dtype=np.intp),
+ np.array([nan_labels, nan_labels[::-1]]),
+- np.array([2, 8, 4], dtype=np.int64),
++ np.array([2, 8, 4], dtype=np.intp),
+ )
+
+ actual, _ = groupby_reduce(
+@@ -672,7 +672,7 @@ def test_groupby_bins(chunk_labels, chun
+ engine=engine,
+ method=method,
+ )
+- expected = np.array([3, 1, 0], dtype=np.int64)
++ expected = np.array([3, 1, 0], dtype=np.intp)
+ for left, right in zip(groups, pd.IntervalIndex.from_arrays([1, 2, 4], [2, 4, 5]).to_numpy()):
+ assert left == right
+ assert_equal(actual, expected)
+@@ -772,7 +772,7 @@ def test_fill_value_behaviour(func, chun
+
+ @requires_dask
+ @pytest.mark.parametrize("func", ["mean", "sum"])
+- at pytest.mark.parametrize("dtype", ["float32", "float64", "int32", "int64"])
++ at pytest.mark.parametrize("dtype", ["float32", "float64", "int32", "intp"])
+ def test_dtype_preservation(dtype, func, engine):
+ if func == "sum" or (func == "mean" and "float" in dtype):
+ expected = np.dtype(dtype)
+@@ -789,10 +789,8 @@ def test_dtype_preservation(dtype, func,
+
+
+ @requires_dask
+- at pytest.mark.parametrize("dtype", [np.int32, np.int64])
+- at pytest.mark.parametrize(
+- "labels_dtype", [pytest.param(np.int32, marks=pytest.mark.xfail), np.int64]
+-)
++ at pytest.mark.parametrize("dtype", [np.int32, np.intp])
++ at pytest.mark.parametrize("labels_dtype", [pytest.param(np.int32, marks=pytest.mark.xfail), np.intp])
+ @pytest.mark.parametrize("method", ["map-reduce", "cohorts"])
+ def test_cohorts_map_reduce_consistent_dtypes(method, dtype, labels_dtype):
+ repeats = np.array([4, 4, 12, 2, 3, 4], dtype=np.int32)
+@@ -801,7 +799,7 @@ def test_cohorts_map_reduce_consistent_d
+
+ actual, actual_groups = groupby_reduce(array, labels, func="count", method=method)
+ assert_equal(actual_groups, np.arange(6, dtype=labels.dtype))
+- assert_equal(actual, repeats.astype(np.int64))
++ assert_equal(actual, repeats.astype(np.intp))
+
+ actual, actual_groups = groupby_reduce(array, labels, func="sum", method=method)
+ assert_equal(actual_groups, np.arange(6, dtype=labels.dtype))
+@@ -817,7 +815,7 @@ def test_cohorts_nd_by(func, method, axi
+ o2 = dask.array.ones((2, 3), chunks=-1)
+
+ array = dask.array.block([[o, 2 * o], [3 * o2, 4 * o2]])
+- by = array.compute().astype(np.int64)
++ by = array.compute().astype(np.intp)
+ by[0, 1] = 30
+ by[2, 1] = 40
+ by[0, 4] = 31
+@@ -842,9 +840,9 @@ def test_cohorts_nd_by(func, method, axi
+
+ actual, groups = groupby_reduce(array, by, sort=False, **kwargs)
+ if method == "map-reduce":
+- assert_equal(groups, np.array([1, 30, 2, 31, 3, 4, 40], dtype=np.int64))
++ assert_equal(groups, np.array([1, 30, 2, 31, 3, 4, 40], dtype=np.intp))
+ else:
+- assert_equal(groups, np.array([1, 30, 2, 31, 3, 40, 4], dtype=np.int64))
++ assert_equal(groups, np.array([1, 30, 2, 31, 3, 40, 4], dtype=np.intp))
+ reindexed = reindex_(actual, groups, pd.Index(sorted_groups))
+ assert_equal(reindexed, expected)
+
+@@ -967,7 +965,7 @@ def test_factorize_values_outside_bins()
+ fastpath=True,
+ )
+ actual = vals[0]
+- expected = np.array([[-1, -1], [-1, 0], [6, 12], [18, 24], [-1, -1]], np.int64)
++ expected = np.array([[-1, -1], [-1, 0], [6, 12], [18, 24], [-1, -1]], np.intp)
+ assert_equal(expected, actual)
+
+
+@@ -978,7 +976,7 @@ def test_multiple_groupers_bins(chunk) -
+
+ xp = dask.array if chunk else np
+ array_kwargs = {"chunks": 2} if chunk else {}
+- array = xp.ones((5, 2), **array_kwargs, dtype=np.int64)
++ array = xp.ones((5, 2), **array_kwargs, dtype=np.intp)
+
+ actual, *_ = groupby_reduce(
+ array,
+@@ -991,7 +989,7 @@ def test_multiple_groupers_bins(chunk) -
+ ),
+ func="count",
+ )
+- expected = np.eye(5, 5, dtype=np.int64)
++ expected = np.eye(5, 5, dtype=np.intp)
+ assert_equal(expected, actual)
+
+
+@@ -1015,12 +1013,12 @@ def test_multiple_groupers(chunk, by1, b
+
+ xp = dask.array if chunk else np
+ array_kwargs = {"chunks": 2} if chunk else {}
+- array = xp.ones((5, 2), **array_kwargs, dtype=np.int64)
++ array = xp.ones((5, 2), **array_kwargs, dtype=np.intp)
+
+ if chunk:
+ by2 = dask.array.from_array(by2)
+
+- expected = np.ones((5, 2), dtype=np.int64)
++ expected = np.ones((5, 2), dtype=np.intp)
+ actual, *_ = groupby_reduce(
+ array, by1, by2, axis=(0, 1), func="count", expected_groups=expected_groups
+ )
+@@ -1066,38 +1064,38 @@ def test_factorize_reindex_sorting_strin
+ )
+
+ expected = factorize_(**kwargs, reindex=True, sort=True)[0]
+- assert_equal(expected, np.array([0, 1, 4, 2], dtype=np.int64))
++ assert_equal(expected, np.array([0, 1, 4, 2], dtype=np.intp))
+
+ expected = factorize_(**kwargs, reindex=True, sort=False)[0]
+- assert_equal(expected, np.array([0, 3, 4, 1], dtype=np.int64))
++ assert_equal(expected, np.array([0, 3, 4, 1], dtype=np.intp))
+
+ expected = factorize_(**kwargs, reindex=False, sort=False)[0]
+- assert_equal(expected, np.array([0, 1, 2, 3], dtype=np.int64))
++ assert_equal(expected, np.array([0, 1, 2, 3], dtype=np.intp))
+
+ expected = factorize_(**kwargs, reindex=False, sort=True)[0]
+- assert_equal(expected, np.array([0, 1, 3, 2], dtype=np.int64))
++ assert_equal(expected, np.array([0, 1, 3, 2], dtype=np.intp))
+
+
+ def test_factorize_reindex_sorting_ints():
+ kwargs = dict(
+ by=(np.array([-10, 1, 10, 2, 3, 5]),),
+ axis=-1,
+- expected_groups=(np.array([0, 1, 2, 3, 4, 5], np.int64),),
++ expected_groups=(np.array([0, 1, 2, 3, 4, 5], np.intp),),
+ )
+
+ expected = factorize_(**kwargs, reindex=True, sort=True)[0]
+- assert_equal(expected, np.array([6, 1, 6, 2, 3, 5], dtype=np.int64))
++ assert_equal(expected, np.array([6, 1, 6, 2, 3, 5], dtype=np.intp))
+
+ expected = factorize_(**kwargs, reindex=True, sort=False)[0]
+- assert_equal(expected, np.array([6, 1, 6, 2, 3, 5], dtype=np.int64))
++ assert_equal(expected, np.array([6, 1, 6, 2, 3, 5], dtype=np.intp))
+
+ kwargs["expected_groups"] = (np.arange(5, -1, -1),)
+
+ expected = factorize_(**kwargs, reindex=True, sort=True)[0]
+- assert_equal(expected, np.array([6, 1, 6, 2, 3, 5], dtype=np.int64))
++ assert_equal(expected, np.array([6, 1, 6, 2, 3, 5], dtype=np.intp))
+
+ expected = factorize_(**kwargs, reindex=True, sort=False)[0]
+- assert_equal(expected, np.array([6, 4, 6, 3, 2, 0], dtype=np.int64))
++ assert_equal(expected, np.array([6, 4, 6, 3, 2, 0], dtype=np.intp))
+
+
+ @requires_dask
=====================================
debian/patches/series
=====================================
@@ -1 +1,2 @@
0001-Compatibility-with-Pandas-older-than-1.4.patch
+pr201-32bit-support-int64-to-intp.patch
View it on GitLab: https://salsa.debian.org/debian-gis-team/flox/-/compare/177c81fb4161219fba59aef7c5751a62acf25ac3...27678e14896a306f556698981b77b692be2d1750
--
View it on GitLab: https://salsa.debian.org/debian-gis-team/flox/-/compare/177c81fb4161219fba59aef7c5751a62acf25ac3...27678e14896a306f556698981b77b692be2d1750
You're receiving this email because of your account on salsa.debian.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/pkg-grass-devel/attachments/20230113/0159ebbc/attachment-0001.htm>
More information about the Pkg-grass-devel
mailing list