[Git][debian-gis-team/python-geopandas][master] 4 commits: New upstream version 0.14.3
Bas Couwenberg (@sebastic)
gitlab at salsa.debian.org
Thu Feb 1 04:42:49 GMT 2024
Bas Couwenberg pushed to branch master at Debian GIS Project / python-geopandas
Commits:
00159570 by Bas Couwenberg at 2024-02-01T05:33:17+01:00
New upstream version 0.14.3
- - - - -
6b0b1a14 by Bas Couwenberg at 2024-02-01T05:33:40+01:00
Update upstream source from tag 'upstream/0.14.3'
Update to upstream version '0.14.3'
with Debian dir d99b8dc9f70bbc7a635f0fd4d9a44420140e6771
- - - - -
0eb072c9 by Bas Couwenberg at 2024-02-01T05:34:46+01:00
New upstream release.
- - - - -
3bc4a66e by Bas Couwenberg at 2024-02-01T05:35:41+01:00
Set distribution to unstable.
- - - - -
17 changed files:
- CHANGELOG.md
- debian/changelog
- geopandas/_version.py
- geopandas/array.py
- geopandas/geodataframe.py
- geopandas/geoseries.py
- geopandas/io/arrow.py
- geopandas/io/file.py
- geopandas/io/tests/test_arrow.py
- geopandas/io/tests/test_file.py
- geopandas/tests/test_extension_array.py
- geopandas/tests/test_geom_methods.py
- geopandas/tests/test_merge.py
- geopandas/tests/test_op_output_types.py
- geopandas/tests/test_pandas_methods.py
- geopandas/tools/overlay.py
- pyproject.toml
Changes:
=====================================
CHANGELOG.md
=====================================
@@ -1,5 +1,11 @@
# Changelog
+## Version 0.14.3 (Jan 31, 2024)
+
+- Several fixes for compatibility with the latest pandas 2.2 release.
+- Fix bug in `pandas.concat` CRS consistency checking where CRS differing by WKT
+ whitespace only were treated as incompatible (#3023).
+
## Version 0.14.2 (Jan 4, 2024)
- Fix regression in `overlay` where using `buffer(0)` instead of `make_valid` internally
=====================================
debian/changelog
=====================================
@@ -1,3 +1,10 @@
+python-geopandas (0.14.3-1) unstable; urgency=medium
+
+ * Team upload.
+ * New upstream release.
+
+ -- Bas Couwenberg <sebastic at debian.org> Thu, 01 Feb 2024 05:35:27 +0100
+
python-geopandas (0.14.2-1) unstable; urgency=medium
* Team upload.
=====================================
geopandas/_version.py
=====================================
@@ -25,9 +25,9 @@ def get_keywords() -> Dict[str, str]:
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
- git_refnames = " (tag: v0.14.2, 0.14.x)"
- git_full = "fb079bf07adb889a720532d871798d75c90f50b5"
- git_date = "2024-01-04 22:30:04 +0100"
+ git_refnames = " (tag: v0.14.3, 0.14.x)"
+ git_full = "5558c35297a537b05675d236ee550612460299ec"
+ git_date = "2024-01-31 20:20:12 +0100"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
=====================================
geopandas/array.py
=====================================
@@ -1532,15 +1532,20 @@ def _get_common_crs(arr_seq):
# mask out all None arrays with no crs (most likely auto generated by pandas
# from concat with missing column)
arr_seq = [ga for ga in arr_seq if not (ga.isna().all() and ga.crs is None)]
-
- crs_set = {arr.crs for arr in arr_seq}
- crs_not_none = [crs for crs in crs_set if crs is not None]
+ # determine unique crs without using a set, because CRS hash can be different
+ # for objects with the same CRS
+ unique_crs = []
+ for arr in arr_seq:
+ if arr.crs not in unique_crs:
+ unique_crs.append(arr.crs)
+
+ crs_not_none = [crs for crs in unique_crs if crs is not None]
names = [crs.name for crs in crs_not_none]
if len(crs_not_none) == 0:
return None
if len(crs_not_none) == 1:
- if len(crs_set) != 1:
+ if len(unique_crs) != 1:
warnings.warn(
"CRS not set for some of the concatenation inputs. "
f"Setting output's CRS as {names[0]} "
=====================================
geopandas/geodataframe.py
=====================================
@@ -1048,15 +1048,6 @@ individually so that features may have different properties
Requires 'pyarrow'.
- WARNING: this is an early implementation of Parquet file support and
- associated metadata, the specification for which continues to evolve.
- This is tracking version 0.4.0 of the GeoParquet specification at:
- https://github.com/opengeospatial/geoparquet
-
- This metadata specification does not yet make stability promises. As such,
- we do not yet recommend using this in a production setting unless you are
- able to rewrite your Parquet files.
-
.. versionadded:: 0.8
Parameters
@@ -1070,7 +1061,7 @@ individually so that features may have different properties
output except `RangeIndex` which is stored as metadata only.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
- schema_version : {'0.1.0', '0.4.0', None}
+ schema_version : {'0.1.0', '0.4.0', '1.0.0', None}
GeoParquet specification version; if not provided will default to
latest supported version.
kwargs
@@ -1117,15 +1108,6 @@ individually so that features may have different properties
Requires 'pyarrow' >= 0.17.
- WARNING: this is an early implementation of Parquet file support and
- associated metadata, the specification for which continues to evolve.
- This is tracking version 0.4.0 of the GeoParquet specification at:
- https://github.com/opengeospatial/geoparquet
-
- This metadata specification does not yet make stability promises. As such,
- we do not yet recommend using this in a production setting unless you are
- able to rewrite your Feather files.
-
.. versionadded:: 0.8
Parameters
@@ -1140,7 +1122,7 @@ individually so that features may have different properties
compression : {'zstd', 'lz4', 'uncompressed'}, optional
Name of the compression to use. Use ``"uncompressed"`` for no
compression. By default uses LZ4 if available, otherwise uncompressed.
- schema_version : {'0.1.0', '0.4.0', None}
+ schema_version : {'0.1.0', '0.4.0', '1.0.0', None}
GeoParquet specification version; if not provided will default to
latest supported version.
kwargs
@@ -1624,6 +1606,15 @@ individually so that features may have different properties
def _constructor(self):
return _geodataframe_constructor_with_fallback
+ def _constructor_from_mgr(self, mgr, axes):
+ # replicate _geodataframe_constructor_with_fallback behaviour
+ # unless safe to skip
+ if not any(isinstance(block.dtype, GeometryDtype) for block in mgr.blocks):
+ return _geodataframe_constructor_with_fallback(
+ pd.DataFrame._from_mgr(mgr, axes)
+ )
+ return GeoDataFrame._from_mgr(mgr, axes)
+
@property
def _constructor_sliced(self):
def _geodataframe_constructor_sliced(*args, **kwargs):
@@ -1650,6 +1641,13 @@ individually so that features may have different properties
return _geodataframe_constructor_sliced
+ def _constructor_sliced_from_mgr(self, mgr, axes):
+ is_row_proxy = mgr.index.is_(self.columns)
+
+ if isinstance(mgr.blocks[0].dtype, GeometryDtype) and not is_row_proxy:
+ return GeoSeries._from_mgr(mgr, axes)
+ return Series._from_mgr(mgr, axes)
+
def __finalize__(self, other, method=None, **kwargs):
"""propagate metadata from other to self"""
self = super().__finalize__(other, method=method, **kwargs)
=====================================
geopandas/geoseries.py
=====================================
@@ -51,20 +51,16 @@ def _geoseries_constructor_with_fallback(
return Series(data=data, index=index, **kwargs)
-def _geoseries_expanddim(data=None, *args, **kwargs):
+def _expanddim_logic(df):
+ """Shared logic for _constructor_expanddim and _constructor_from_mgr_expanddim."""
from geopandas import GeoDataFrame
- # pd.Series._constructor_expanddim == pd.DataFrame
- df = pd.DataFrame(data, *args, **kwargs)
- geo_col_name = None
- if isinstance(data, GeoSeries):
- # pandas default column name is 0, keep convention
- geo_col_name = data.name if data.name is not None else 0
-
- if df.shape[1] == 1:
- geo_col_name = df.columns[0]
-
if (df.dtypes == "geometry").sum() > 0:
+ if df.shape[1] == 1:
+ geo_col_name = df.columns[0]
+ else:
+ geo_col_name = None
+
if geo_col_name is None or not is_geometry_type(df[geo_col_name]):
df = GeoDataFrame(df)
df._geometry_column_name = None
@@ -74,6 +70,13 @@ def _geoseries_expanddim(data=None, *args, **kwargs):
return df
+def _geoseries_expanddim(data=None, *args, **kwargs):
+ # pd.Series._constructor_expanddim == pd.DataFrame, we start
+ # with that then specialize.
+ df = pd.DataFrame(data, *args, **kwargs)
+ return _expanddim_logic(df)
+
+
class GeoSeries(GeoPandasBase, Series):
"""
A Series object designed to store shapely geometry objects.
@@ -152,8 +155,6 @@ class GeoSeries(GeoPandasBase, Series):
"""
- _metadata = ["name"]
-
def __init__(self, data=None, index=None, crs: Optional[Any] = None, **kwargs):
if hasattr(data, "crs") and crs:
if not data.crs:
@@ -619,10 +620,22 @@ class GeoSeries(GeoPandasBase, Series):
def _constructor(self):
return _geoseries_constructor_with_fallback
+ def _constructor_from_mgr(self, mgr, axes):
+ assert isinstance(mgr, SingleBlockManager)
+
+ if not isinstance(mgr.blocks[0].dtype, GeometryDtype):
+ return Series._from_mgr(mgr, axes)
+
+ return GeoSeries._from_mgr(mgr, axes)
+
@property
def _constructor_expanddim(self):
return _geoseries_expanddim
+ def _constructor_expanddim_from_mgr(self, mgr, axes):
+ df = pd.DataFrame._from_mgr(mgr, axes)
+ return _expanddim_logic(df)
+
def _wrapped_pandas_method(self, mtd, *args, **kwargs):
"""Wrap a generic pandas method to ensure it returns a GeoSeries"""
val = getattr(super(), mtd)(*args, **kwargs)
=====================================
geopandas/io/arrow.py
=====================================
@@ -6,6 +6,8 @@ import numpy as np
from pandas import DataFrame, Series
import geopandas._compat as compat
+import shapely
+
from geopandas._compat import import_optional_dependency
from geopandas.array import from_wkb
from geopandas import GeoDataFrame
@@ -258,7 +260,7 @@ def _geopandas_to_arrow(df, index=None, schema_version=None):
geo_metadata = _create_metadata(df, schema_version=schema_version)
kwargs = {}
- if compat.USE_SHAPELY_20:
+ if compat.USE_SHAPELY_20 and shapely.geos.geos_version > (3, 10, 0):
kwargs = {"flavor": "iso"}
else:
for col in df.columns[df.dtypes == "geometry"]:
@@ -266,7 +268,8 @@ def _geopandas_to_arrow(df, index=None, schema_version=None):
if series.has_z.any():
warnings.warn(
"The GeoDataFrame contains 3D geometries, and when using "
- "shapely < 2.0, such geometries will be written not exactly "
+ "shapely < 2.0 or GEOS < 3.10, such geometries will be written "
+ "not exactly "
"following to the GeoParquet spec (not using ISO WKB). For "
"most use cases this should not be a problem (GeoPandas can "
"read such files fine).",
=====================================
geopandas/io/file.py
=====================================
@@ -396,16 +396,37 @@ def _read_file_fiona(
f_filt, crs=crs, columns=columns + ["geometry"]
)
for k in datetime_fields:
- as_dt = pd.to_datetime(df[k], errors="ignore")
- # if to_datetime failed, try again for mixed timezone offsets
- if as_dt.dtype == "object":
+ as_dt = None
+ # plain try catch for when pandas will raise in the future
+ # TODO we can tighten the exception type in future when it does
+ try:
+ with warnings.catch_warnings():
+ # pandas 2.x does not yet enforce this behaviour but raises a
+ # warning -> we want to to suppress this warning for our users,
+ # and do this by turning it into an error so we take the
+ # `except` code path to try again with utc=True
+ warnings.filterwarnings(
+ "error",
+ "In a future version of pandas, parsing datetimes with "
+ "mixed time zones will raise an error",
+ FutureWarning,
+ )
+ as_dt = pd.to_datetime(df[k])
+ except Exception:
+ pass
+ if as_dt is None or as_dt.dtype == "object":
+ # if to_datetime failed, try again for mixed timezone offsets
# This can still fail if there are invalid datetimes
- as_dt = pd.to_datetime(df[k], errors="ignore", utc=True)
+ try:
+ as_dt = pd.to_datetime(df[k], utc=True)
+ except Exception:
+ pass
# if to_datetime succeeded, round datetimes as
# fiona only supports up to ms precision (any microseconds are
# floating point rounding error)
- if not (as_dt.dtype == "object"):
+ if as_dt is not None and not (as_dt.dtype == "object"):
df[k] = as_dt.dt.round(freq="ms")
+
return df
@@ -628,10 +649,12 @@ def _to_file_fiona(df, filename, driver, schema, crs, mode, **kwargs):
with fiona_env():
crs_wkt = None
try:
- gdal_version = fiona.env.get_gdal_release_name()
- except AttributeError:
- gdal_version = "2.0.0" # just assume it is not the latest
- if Version(gdal_version) >= Version("3.0.0") and crs:
+ gdal_version = Version(
+ fiona.env.get_gdal_release_name().strip("e")
+ ) # GH3147
+ except (AttributeError, ValueError):
+ gdal_version = Version("2.0.0") # just assume it is not the latest
+ if gdal_version >= Version("3.0.0") and crs:
crs_wkt = crs.to_wkt()
elif crs:
crs_wkt = crs.to_wkt("WKT1_GDAL")
=====================================
geopandas/io/tests/test_arrow.py
=====================================
@@ -11,6 +11,7 @@ from pandas import DataFrame, read_parquet as pd_read_parquet
from pandas.testing import assert_frame_equal
import numpy as np
import pyproj
+import shapely
from shapely.geometry import box, Point, MultiPolygon
@@ -719,7 +720,7 @@ def test_write_iso_wkb(tmpdir):
gdf = geopandas.GeoDataFrame(
geometry=geopandas.GeoSeries.from_wkt(["POINT Z (1 2 3)"])
)
- if compat.USE_SHAPELY_20:
+ if compat.USE_SHAPELY_20 and shapely.geos.geos_version > (3, 10, 0):
gdf.to_parquet(tmpdir / "test.parquet")
else:
with pytest.warns(UserWarning, match="The GeoDataFrame contains 3D geometries"):
@@ -730,7 +731,7 @@ def test_write_iso_wkb(tmpdir):
table = read_table(tmpdir / "test.parquet")
wkb = table["geometry"][0].as_py().hex()
- if compat.USE_SHAPELY_20:
+ if compat.USE_SHAPELY_20 and shapely.geos.geos_version > (3, 10, 0):
# correct ISO flavor
assert wkb == "01e9030000000000000000f03f00000000000000400000000000000840"
else:
=====================================
geopandas/io/tests/test_file.py
=====================================
@@ -578,7 +578,7 @@ def test_read_file(engine):
"main/geopandas/tests/data/null_geom.geojson",
# url to zip file
"https://raw.githubusercontent.com/geopandas/geopandas/"
- "main/geopandas/datasets/nybb_16a.zip",
+ "main/geopandas/tests/data/nybb_16a.zip",
# url to zipfile without extension
"https://geonode.goosocean.org/download/480",
# url to web service
=====================================
geopandas/tests/test_extension_array.py
=====================================
@@ -25,7 +25,12 @@ import shapely.geometry
from shapely.geometry import Point
from geopandas.array import GeometryArray, GeometryDtype, from_shapely
-from geopandas._compat import ignore_shapely2_warnings, SHAPELY_GE_20, PANDAS_GE_15
+from geopandas._compat import (
+ ignore_shapely2_warnings,
+ SHAPELY_GE_20,
+ PANDAS_GE_15,
+ PANDAS_GE_22,
+)
import pytest
@@ -419,8 +424,20 @@ class TestMissing(extension_tests.BaseMissingTests):
def test_fillna_no_op_returns_copy(self, data):
pass
+ @pytest.mark.skip("fillna method not supported")
+ def test_ffill_limit_area(
+ self, data_missing, limit_area, input_ilocs, expected_ilocs
+ ):
+ pass
+
+
+if PANDAS_GE_22:
+ from pandas.tests.extension.base import BaseReduceTests
+else:
+ from pandas.tests.extension.base import BaseNoReduceTests as BaseReduceTests
+
-class TestReduce(extension_tests.BaseNoReduceTests):
+class TestReduce(BaseReduceTests):
@pytest.mark.skip("boolean reduce (any/all) tested in test_pandas_methods")
def test_reduce_series_boolean(self):
pass
=====================================
geopandas/tests/test_geom_methods.py
=====================================
@@ -657,6 +657,9 @@ class TestGeomMethods:
):
self.g0.hausdorff_distance(self.g9)
+ @pytest.mark.skipif(
+ shapely.geos.geos_version < (3, 10, 0), reason="buggy with GEOS<3.10"
+ )
@pytest.mark.skipif(
not (compat.USE_PYGEOS or compat.USE_SHAPELY_20),
reason="requires frechet_distance in shapely 2.0+",
@@ -935,6 +938,9 @@ class TestGeomMethods:
):
s.segmentize(max_segment_length=1)
+ @pytest.mark.skipif(
+ shapely.geos.geos_version < (3, 10, 0), reason="requires GEOS>=3.10"
+ )
@pytest.mark.skipif(
not (compat.USE_PYGEOS or compat.USE_SHAPELY_20),
reason="segmentize keyword introduced in shapely 2.0",
@@ -1015,6 +1021,9 @@ class TestGeomMethods:
):
assert_geoseries_equal(expected, self.g5.concave_hull())
+ @pytest.mark.skipif(
+ shapely.geos.geos_version < (3, 11, 0), reason="requires GEOS>=3.11"
+ )
@pytest.mark.skipif(
not compat.USE_SHAPELY_20,
reason="concave_hull is only implemented for shapely >= 2.0",
@@ -1022,6 +1031,9 @@ class TestGeomMethods:
def test_concave_hull(self):
assert_geoseries_equal(self.squares, self.squares.concave_hull())
+ @pytest.mark.skipif(
+ shapely.geos.geos_version < (3, 11, 0), reason="requires GEOS>=3.11"
+ )
@pytest.mark.skipif(
not compat.USE_SHAPELY_20,
reason="concave_hull is only implemented for shapely >= 2.0",
@@ -1898,6 +1910,9 @@ class TestGeomMethods:
assert_geoseries_equal(expected, oc)
assert isinstance(oc, GeoSeries)
+ @pytest.mark.skipif(
+ shapely.geos.geos_version < (3, 11, 0), reason="requires GEOS>=3.11"
+ )
@pytest.mark.skipif(
compat.SHAPELY_GE_20,
reason="remove_repeated_points is implemented for shapely >= 2.0",
@@ -1910,6 +1925,9 @@ class TestGeomMethods:
):
self.squares.remove_repeated_points()
+ @pytest.mark.skipif(
+ shapely.geos.geos_version < (3, 11, 0), reason="requires GEOS>=3.11"
+ )
@pytest.mark.skipif(
not (compat.USE_PYGEOS and compat.SHAPELY_GE_20),
reason="remove_repeated_points is only implemented for shapely >= 2.0",
@@ -1924,6 +1942,9 @@ class TestGeomMethods:
):
self.g1.remove_repeated_points()
+ @pytest.mark.skipif(
+ shapely.geos.geos_version < (3, 11, 0), reason="requires GEOS>=3.11"
+ )
@pytest.mark.skipif(
not compat.USE_SHAPELY_20,
reason="remove_repeated_points is only implemented for shapely >= 2.0",
=====================================
geopandas/tests/test_merge.py
=====================================
@@ -1,7 +1,10 @@
import warnings
import pandas as pd
+import pyproj
import pytest
+
+from geopandas._compat import PANDAS_GE_21
from geopandas.testing import assert_geodataframe_equal
from pandas.testing import assert_index_equal
@@ -133,6 +136,38 @@ class TestMerging:
partial_none_case.iloc[0] = None
pd.concat([single_geom_col, partial_none_case])
+ def test_concat_axis0_crs_wkt_mismatch(self):
+ # https://github.com/geopandas/geopandas/issues/326#issuecomment-1727958475
+ wkt_template = """GEOGCRS["WGS 84",
+ ENSEMBLE["World Geodetic System 1984 ensemble",
+ MEMBER["World Geodetic System 1984 (Transit)"],
+ MEMBER["World Geodetic System 1984 (G730)"],
+ MEMBER["World Geodetic System 1984 (G873)"],
+ MEMBER["World Geodetic System 1984 (G1150)"],
+ MEMBER["World Geodetic System 1984 (G1674)"],
+ MEMBER["World Geodetic System 1984 (G1762)"],
+ MEMBER["World Geodetic System 1984 (G2139)"],
+ ELLIPSOID["WGS 84",6378137,298.257223563,LENGTHUNIT["metre",1]],
+ ENSEMBLEACCURACY[2.0]],PRIMEM["Greenwich",0,
+ ANGLEUNIT["degree",0.0174532925199433]],CS[ellipsoidal,2],
+ AXIS["geodetic latitude (Lat)",north,ORDER[1],
+ ANGLEUNIT["degree",0.0174532925199433]],
+ AXIS["geodetic longitude (Lon)",east,ORDER[2],
+ ANGLEUNIT["degree",0.0174532925199433]],
+ USAGE[SCOPE["Horizontal component of 3D system."],
+ AREA["World.{}"],BBOX[-90,-180,90,180]],ID["EPSG",4326]]"""
+ wkt_v1 = wkt_template.format("")
+ wkt_v2 = wkt_template.format(" ") # add additional whitespace
+ crs1 = pyproj.CRS.from_wkt(wkt_v1)
+ crs2 = pyproj.CRS.from_wkt(wkt_v2)
+ # pyproj crs __hash__ based on WKT strings means these are distinct in a
+ # set are but equal by equality
+ assert len({crs1, crs2}) == 2
+ assert crs1 == crs2
+ expected = pd.concat([self.gdf, self.gdf]).set_crs(crs1)
+ res = pd.concat([self.gdf.set_crs(crs1), self.gdf.set_crs(crs2)])
+ assert_geodataframe_equal(expected, res)
+
def test_concat_axis1(self):
res = pd.concat([self.gdf, self.df], axis=1)
@@ -145,10 +180,18 @@ class TestMerging:
# https://github.com/geopandas/geopandas/issues/1230
# Expect that concat should fail gracefully if duplicate column names belonging
# to geometry columns are introduced.
- expected_err = (
- "GeoDataFrame does not support multiple columns using the geometry"
- " column name 'geometry'"
- )
+ if PANDAS_GE_21:
+ # _constructor_from_mgr changes mean we now get the concat specific error
+ # message in this case too
+ expected_err = (
+ "Concat operation has resulted in multiple columns using the geometry "
+ "column name 'geometry'."
+ )
+ else:
+ expected_err = (
+ "GeoDataFrame does not support multiple columns using the geometry"
+ " column name 'geometry'"
+ )
with pytest.raises(ValueError, match=expected_err):
pd.concat([self.gdf, self.gdf], axis=1)
=====================================
geopandas/tests/test_op_output_types.py
=====================================
@@ -6,7 +6,7 @@ from shapely.geometry import Point
import numpy as np
from geopandas import GeoDataFrame, GeoSeries
-
+import geopandas
crs_osgb = pyproj.CRS(27700)
crs_wgs = pyproj.CRS(4326)
@@ -144,6 +144,33 @@ def test_loc(df):
assert_object(df.loc[:, "value1"], pd.Series)
+ at pytest.mark.parametrize(
+ "geom_name",
+ [
+ "geometry",
+ pytest.param(
+ "geom",
+ marks=pytest.mark.xfail(
+ reason="pre-regression behaviour only works for geometry col geometry"
+ ),
+ ),
+ ],
+)
+def test_loc_add_row(geom_name):
+ # https://github.com/geopandas/geopandas/issues/3119
+
+ nybb_filename = geopandas.datasets.get_path("nybb")
+ nybb = geopandas.read_file(nybb_filename)[["BoroCode", "geometry"]]
+ if geom_name != "geometry":
+ nybb = nybb.rename_geometry(geom_name)
+ # crs_orig = nybb.crs
+
+ # add a new row
+ nybb.loc[5] = [6, nybb.geometry.iloc[0]]
+ assert nybb.geometry.dtype == "geometry"
+ assert nybb.crs is None # TODO this should be crs_orig, regressed in #2373
+
+
def test_iloc(df):
geo_name = df.geometry.name
assert_object(df.iloc[:, 0:2], pd.DataFrame)
=====================================
geopandas/tests/test_pandas_methods.py
=====================================
@@ -41,6 +41,7 @@ def test_repr(s, df):
assert "POINT" in df._repr_html_()
+ at pytest.mark.skipif(shapely.geos.geos_version < (3, 9, 0), reason="requires GEOS>=3.9")
def test_repr_boxed_display_precision():
# geographic coordinates
p1 = Point(10.123456789, 50.123456789)
@@ -662,27 +663,47 @@ def test_groupby_groups(df):
@pytest.mark.skip_no_sindex
@pytest.mark.parametrize("crs", [None, "EPSG:4326"])
-def test_groupby_metadata(crs):
+ at pytest.mark.parametrize("geometry_name", ["geometry", "geom"])
+def test_groupby_metadata(crs, geometry_name):
# https://github.com/geopandas/geopandas/issues/2294
df = GeoDataFrame(
{
- "geometry": [Point(0, 0), Point(1, 1), Point(0, 0)],
+ geometry_name: [Point(0, 0), Point(1, 1), Point(0, 0)],
"value1": np.arange(3, dtype="int64"),
"value2": np.array([1, 2, 1], dtype="int64"),
},
crs=crs,
+ geometry=geometry_name,
)
+ kwargs = {}
+ if compat.PANDAS_GE_22:
+ # pandas is deprecating that the group key is present as column in the
+ # dataframe passed to `func`. To suppress this warning, it introduced
+ # a new include_groups keyword
+ kwargs = dict(include_groups=False)
+
# dummy test asserting we can access the crs
def func(group):
assert isinstance(group, GeoDataFrame)
assert group.crs == crs
- df.groupby("value2").apply(func)
+ df.groupby("value2").apply(func, **kwargs)
+ # selecting the non-group columns -> no need to pass the keyword
+ if (
+ compat.PANDAS_GE_21 if geometry_name == "geometry" else compat.PANDAS_GE_20
+ ) and not compat.PANDAS_GE_22:
+ # https://github.com/geopandas/geopandas/pull/2966#issuecomment-1878816712
+ # with pandas 2.0 and 2.1 this is failing
+ with pytest.raises(AttributeError):
+ df.groupby("value2")[[geometry_name, "value1"]].apply(func)
+ else:
+ df.groupby("value2")[[geometry_name, "value1"]].apply(func)
# actual test with functionality
res = df.groupby("value2").apply(
- lambda x: geopandas.sjoin(x, x[["geometry", "value1"]], how="inner")
+ lambda x: geopandas.sjoin(x, x[[geometry_name, "value1"]], how="inner"),
+ **kwargs,
)
if compat.PANDAS_GE_22:
@@ -695,7 +716,7 @@ def test_groupby_metadata(crs):
expected = (
df.take(take_indices)
- .set_index("value2", drop=False, append=True)
+ .set_index("value2", drop=compat.PANDAS_GE_22, append=True)
.swaplevel()
.rename(columns={"value1": "value1_left"})
.assign(value1_right=value_right)
=====================================
geopandas/tools/overlay.py
=====================================
@@ -65,8 +65,8 @@ def _overlay_intersection(df1, df2):
right_index=True,
suffixes=("_1", "_2"),
)
- result["__idx1"] = None
- result["__idx2"] = None
+ result["__idx1"] = np.nan
+ result["__idx2"] = np.nan
return result[
result.columns.drop(df1.geometry.name).tolist() + [df1.geometry.name]
]
=====================================
pyproject.toml
=====================================
@@ -148,6 +148,8 @@ ignore = [ # space before : (needed for how black formats slicing)
# "B301", # not yet implemented
# Only works with python >=3.10
"B905",
+ # dict literals
+ "C408",
# Too many arguments to function call
"PLR0913",
# Too many returns
View it on GitLab: https://salsa.debian.org/debian-gis-team/python-geopandas/-/compare/2f22a8199af9747fdc327bdac18007114caf93ff...3bc4a66efda7aa97cfa416f4ec7786ea5391f36b
--
View it on GitLab: https://salsa.debian.org/debian-gis-team/python-geopandas/-/compare/2f22a8199af9747fdc327bdac18007114caf93ff...3bc4a66efda7aa97cfa416f4ec7786ea5391f36b
You're receiving this email because of your account on salsa.debian.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/pkg-grass-devel/attachments/20240201/e50f6592/attachment-0001.htm>
More information about the Pkg-grass-devel
mailing list