[python-geopandas] 01/07: New upstream version 0.3.0

Bas Couwenberg sebastic at debian.org
Tue Aug 29 10:27:14 UTC 2017


This is an automated email from the git hooks/post-receive script.

sebastic pushed a commit to branch master
in repository python-geopandas.

commit 21884d9697a5fe5c74a89037427adc6dec0d3edb
Author: Bas Couwenberg <sebastic at xs4all.nl>
Date:   Tue Aug 29 09:26:48 2017 +0200

    New upstream version 0.3.0
---
 .coveragerc                                        |   5 +
 .gitignore                                         |   3 +-
 .travis.yml                                        |  69 +-
 CHANGELOG => CHANGELOG.md                          |  28 +
 CONTRIBUTING.md                                    |   4 +-
 README.md                                          |   7 +-
 appveyor.yml                                       |  45 ++
 asv.conf.json                                      | 151 +++++
 benchmarks/__init__.py                             |   1 +
 benchmarks/geom_methods.py                         | 104 +++
 benchmarks/overlay.py                              |  45 ++
 benchmarks/plotting.py                             |  57 ++
 benchmarks/sjoin.py                                |  30 +
 doc/environment.yml                                |   5 +-
 doc/source/aggregation_with_dissolve.rst           |  12 +-
 doc/source/contributing.rst                        |   6 +-
 doc/source/data_structures.rst                     |  14 +-
 doc/source/geometric_manipulations.rst             |   7 +-
 doc/source/index.rst                               |   1 +
 doc/source/indexing.rst                            |  30 +
 doc/source/mapping.rst                             |  28 +-
 doc/source/projections.rst                         |  10 +-
 doc/source/set_operations.rst                      |  36 +-
 geopandas/_version.py                              |   4 +-
 geopandas/base.py                                  |  61 +-
 geopandas/datasets/README.md                       |  28 +
 geopandas/datasets/__init__.py                     |  16 +-
 geopandas/datasets/nybb_16a.zip                    | Bin 0 -> 661032 bytes
 geopandas/geodataframe.py                          |  65 +-
 geopandas/geoseries.py                             | 142 ++--
 geopandas/io/file.py                               |  19 +-
 geopandas/io/sql.py                                |   4 +-
 geopandas/io/tests/test_io.py                      |  49 +-
 geopandas/plotting.py                              | 679 +++++++++++--------
 geopandas/sindex.py                                |   1 -
 .../baseline_images/test_plotting/lines_plot.png   | Bin 10708 -> 0 bytes
 .../baseline_images/test_plotting/points_plot.png  | Bin 10814 -> 0 bytes
 .../baseline_images/test_plotting/poly_plot.png    | Bin 11281 -> 0 bytes
 .../test_plotting/poly_plot_with_kwargs.png        | Bin 30619 -> 0 bytes
 geopandas/tests/test_datasets.py                   |  18 +
 geopandas/tests/test_dissolve.py                   |  64 +-
 geopandas/tests/test_geocode.py                    |  82 +--
 geopandas/tests/test_geodataframe.py               | 320 ++++-----
 geopandas/tests/test_geom_methods.py               | 144 +++--
 geopandas/tests/test_geoseries.py                  | 136 ++--
 geopandas/tests/test_merge.py                      |  29 +-
 geopandas/tests/test_overlay.py                    |  88 ++-
 geopandas/tests/test_pandas_methods.py             | 262 ++++++++
 geopandas/tests/test_plotting.py                   | 717 +++++++++++++++------
 geopandas/tests/test_sindex.py                     |  99 +--
 geopandas/tests/test_types.py                      |  21 +-
 geopandas/tests/util.py                            |  68 +-
 geopandas/tools/overlay.py                         |   3 +-
 geopandas/tools/sjoin.py                           | 169 +++--
 geopandas/tools/tests/test_sjoin.py                | 257 ++++++--
 geopandas/tools/tests/test_tools.py                |  24 +-
 geopandas/tools/util.py                            |  12 +-
 requirements.test.txt                              |   4 +-
 requirements.txt                                   |   3 -
 setup.py                                           |  12 +-
 60 files changed, 2947 insertions(+), 1351 deletions(-)

diff --git a/.coveragerc b/.coveragerc
index d6b8b6a..ef195b9 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -5,3 +5,8 @@ exclude_lines =
     raise AssertionError
     raise NotImplementedError
     if __name__ == .__main__.:
+omit =
+    geopandas/tests/*
+    geopandas/io/tests/*
+    geopandas/tools/tests/*
+    geopandas/_version.py
diff --git a/.gitignore b/.gitignore
index a8d7cd8..930878e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,4 +7,5 @@ geopandas.egg-info
 geopandas/version.py
 *.py~
 doc/_static/world_*
-examples/nybb_*.zip
\ No newline at end of file
+examples/nybb_*.zip
+.cache/
diff --git a/.travis.yml b/.travis.yml
index 3980a09..5149495 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,9 +2,7 @@ language: python
 
 sudo: false
 
-cache:
-  directories:
-    - ~/.cache/pip
+cache: pip
 
 addons:
   apt:
@@ -14,71 +12,56 @@ addons:
     - libgdal-dev
     - libspatialindex-dev
 
-env:
-  global:
-    - PIP_WHEEL_DIR=$HOME/.cache/pip/wheels
-    - PIP_FIND_LINKS=file://$HOME/.cache/pip/wheels
-  
-# matrix creates 3+5x2 = 13 tests
 matrix:
   include:
     # Only one test for these Python versions
-    # Pandas >= 0.18.0, is not Python 2.6 compatible
-    - python: 2.6
-      env: PANDAS=0.16.2 MATPLOTLIB=1.2.1
-    - python: 3.3
-      env: PANDAS=0.17.1 MATPLOTLIB=1.3.1
     - python: 3.4
-      env: PANDAS=0.18.0 MATPLOTLIB=1.5.1
+      env: PANDAS=0.18.1 MATPLOTLIB=1.4.3 SHAPELY=1.5.17
+    - python: 3.5
+      env: PANDAS=0.19.2 MATPLOTLIB=1.5.3
 
-    # Python 2.7 and 3.5 test all supported Pandas versions
+    # Python 2.7 and 3.6 test all supported Pandas versions
+    - python: 2.7
+      env: PANDAS=0.16.2  MATPLOTLIB=1.4.3 SHAPELY=1.5.17
     - python: 2.7
-      env: PANDAS=0.15.2  MATPLOTLIB=1.2.1
+      env: PANDAS=0.17.1  MATPLOTLIB=1.4.3 SHAPELY=1.5.17
     - python: 2.7
-      env: PANDAS=0.16.2  MATPLOTLIB=1.3.1
+      env: PANDAS=0.18.1  MATPLOTLIB=1.5.3
     - python: 2.7
-      env: PANDAS=0.17.1  MATPLOTLIB=1.4.3
+      env: PANDAS=0.19.2  MATPLOTLIB=1.5.3
     - python: 2.7
-      env: PANDAS=0.18.0  MATPLOTLIB=1.5.1
+      env: PANDAS=0.20.2  MATPLOTLIB=2.0.2
     - python: 2.7
       env: PANDAS=master  MATPLOTLIB=master
 
-    # Note: Python 3.5 and matplotlib versions before 1.4.3 support is hit or miss.
-    - python: 3.5
-      env: PANDAS=0.15.2  MATPLOTLIB=1.4.3
-    - python: 3.5
-      env: PANDAS=0.16.2  MATPLOTLIB=1.4.3
-    - python: 3.5
-      env: PANDAS=0.17.1  MATPLOTLIB=1.4.3
-    - python: 3.5
-      env: PANDAS=0.18.0  MATPLOTLIB=1.5.1
-    - python: 3.5
+    - python: 3.6
+      env: PANDAS=0.16.2  MATPLOTLIB=1.4.3 SHAPELY=1.5.17
+    - python: 3.6
+      env: PANDAS=0.17.1  MATPLOTLIB=1.4.3 SHAPELY=1.5.17
+    - python: 3.6
+      env: PANDAS=0.18.1  MATPLOTLIB=1.5.3
+    - python: 3.6
+      env: PANDAS=0.19.2  MATPLOTLIB=1.5.3
+    - python: 3.6
+      env: PANDAS=0.20.2  MATPLOTLIB=2.0.2
+    - python: 3.6
       env: PANDAS=master  MATPLOTLIB=master
 
-  # matplotlib 2.x (development version) causes a few tests to fail
-  allow_failures:
-    - env: PANDAS=master  MATPLOTLIB=master
-  
-
 before_install:
   - pip install -U pip
-  - pip install wheel
 
 install:
-  - pip wheel numpy
-  - pip wheel -r requirements.txt
-  - pip wheel -r requirements.test.txt
-
   - pip install numpy
-  - if [[ $MATPLOTLIB == 'master' ]]; then pip install git+https://github.com/matplotlib/matplotlib.git; else pip wheel matplotlib==$MATPLOTLIB; pip install matplotlib==$MATPLOTLIB; fi
+  - if [[ $MATPLOTLIB == 'master' ]]; then pip install git+https://github.com/matplotlib/matplotlib.git; else pip install matplotlib==$MATPLOTLIB; fi
+  - if [ -n "$SHAPELY" ]; then pip install shapely==$SHAPELY; fi
 
   - pip install -r requirements.txt
   - pip install -r requirements.test.txt
 
-  - if [[ $PANDAS == 'master' ]]; then pip install git+https://github.com/pydata/pandas.git; else pip wheel pandas==$PANDAS; pip install pandas==$PANDAS; fi
+  - if [[ $PANDAS == 'master' ]]; then pip install git+https://github.com/pydata/pandas.git; else pip install pandas==$PANDAS; fi
 
 script:
   - py.test geopandas --cov geopandas -v --cov-report term-missing
 
 after_success:
-  - coveralls
+  - codecov
diff --git a/CHANGELOG b/CHANGELOG.md
similarity index 60%
rename from CHANGELOG
rename to CHANGELOG.md
index 58d7c8a..07840a5 100644
--- a/CHANGELOG
+++ b/CHANGELOG.md
@@ -1,3 +1,31 @@
+Changes
+=======
+
+Version 0.3.0 (August 29, 2017)
+-------------------------------
+
+Improvements:
+
+* Improve plotting performance using ``matplotlib.collections`` (#267)
+* Improve default plotting appearance. The defaults now follow the new matplotlib defaults (#318, #502, #510)
+* Provide access to x/y coordinates as attributes for Point GeoSeries (#383)
+* Make the NYBB dataset available through ``geopandas.datasets`` (#384)
+* Enable ``sjoin`` on non-integer-index GeoDataFrames (#422)
+* Add ``cx`` indexer to GeoDataFrame (#482)
+* ``GeoDataFrame.from_features`` now also accepts a Feature Collection (#225, #507)
+* Use index label instead of integer id in output of ``iterfeatures`` and
+  ``to_json`` (#421)
+* Return empty data frame rather than raising an error when performing a spatial join with non overlapping geodataframes (#335)
+
+Bug fixes:
+
+* Compatibility with shapely 1.6.0 (#512)
+* Fix ``fiona.filter`` results when bbox is not None (#372)
+* Fix ``dissolve`` to retain CRS (#389)
+* Fix ``cx`` behavior when using index of 0 (#478)
+* Fix display of lower bin in legend label of choropleth plots using a PySAL scheme (#450)
+
+
 Version 0.2.0
 -------------
 
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index d4330ca..1bd7fc4 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -10,8 +10,8 @@ readable code.  Performance matters, but not at the expense of those
 goals.
 
 In general, GeoPandas follows the conventions of the pandas project
-where applicable.  Please read [pandas contributing
-guidelines](https://github.com/pydata/pandas/blob/master/CONTRIBUTING.md).
+where applicable.  Please read the [pandas contributing
+guidelines](http://pandas.pydata.org/pandas-docs/stable/contributing.html).
 
 In particular, when submitting a pull request:
 
diff --git a/README.md b/README.md
index 5268f82..b8d3a7d 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-GeoPandas [![build status](https://secure.travis-ci.org/geopandas/geopandas.png?branch=master)](https://travis-ci.org/geopandas/geopandas) [![Coverage Status](https://coveralls.io/repos/geopandas/geopandas/badge.png)](https://coveralls.io/r/geopandas/geopandas)
+GeoPandas [![build status](https://secure.travis-ci.org/geopandas/geopandas.png?branch=master)](https://travis-ci.org/geopandas/geopandas) [![Coverage Status](https://codecov.io/gh/geopandas/geopandas/branch/master/graph/badge.svg)](https://codecov.io/gh/geopandas/geopandas) [![Join the chat at https://gitter.im/geopandas/geopandas](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/geopandas/geopandas?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
 =========
 
 Python tools for geographic data
@@ -85,9 +85,10 @@ GeoPandas objects also know how to plot themselves.  GeoPandas uses [descartes](
 
     >>> g.plot()
 
-GeoPandas also implements alternate constructors that can read any data format recognized by [fiona](http://toblerity.github.io/fiona).  To read a [file containing the boroughs of New York City](http://www1.nyc.gov/assets/planning/download/zip/data-maps/open-data/nybb_16a.zip):
+GeoPandas also implements alternate constructors that can read any data format recognized by [fiona](http://toblerity.github.io/fiona). To read a zip file containing an ESRI shapefile with the [boroughs boundaries of New York City](https://data.cityofnewyork.us/City-Government/Borough-Boundaries/tqmj-j8zm) (GeoPandas includes this as an example dataset):
 
-    >>> boros = GeoDataFrame.from_file('nybb.shp')
+    >>> nybb_path = geopandas.datasets.get_path('nybb')
+    >>> boros = GeoDataFrame.from_file(nybb_path)
     >>> boros.set_index('BoroCode', inplace=True)
     >>> boros.sort()
     >>> boros
diff --git a/appveyor.yml b/appveyor.yml
new file mode 100644
index 0000000..e747f66
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,45 @@
+# With infos from
+# http://tjelvarolsson.com/blog/how-to-continuously-test-your-python-code-on-windows-using-appveyor/
+# https://packaging.python.org/en/latest/appveyor/
+# https://github.com/rmcgibbo/python-appveyor-conda-example
+
+
+matrix:
+  fast_finish: true     # immediately finish build once one of the jobs fails.
+
+environment:
+  matrix:
+    - PYTHON_VERSION: "2.7"
+      MINICONDA: C:\Miniconda-x64
+
+    - PYTHON_VERSION: "3.5"
+      MINICONDA: C:\Miniconda35-x64
+
+# all our python builds have to happen in tests_script...
+build: false
+
+init:
+  - "ECHO %PYTHON_VERSION% %MINICONDA%"
+
+install:
+  # cancel older builds for the same PR
+  - ps: if ($env:APPVEYOR_PULL_REQUEST_NUMBER -and $env:APPVEYOR_BUILD_NUMBER -ne ((Invoke-RestMethod `
+        https://ci.appveyor.com/api/projects/$env:APPVEYOR_ACCOUNT_NAME/$env:APPVEYOR_PROJECT_SLUG/history?recordsNumber=50).builds | `
+        Where-Object pullRequestId -eq $env:APPVEYOR_PULL_REQUEST_NUMBER)[0].buildNumber) { `
+        throw "There are newer queued builds for this pull request, failing early." }
+
+  # set up environment
+  - "set PATH=%MINICONDA%;%MINICONDA%\\Scripts;%PATH%"
+  - conda config --set always_yes yes --set show_channel_urls true --set changeps1 no
+  - conda update -q conda
+  - conda config --add channels conda-forge
+  - conda info -a
+  - "conda create -q -n test-environment python=%PYTHON_VERSION%"
+  - activate test-environment
+  - conda install pandas shapely=1.5 fiona pyproj rtree matplotlib descartes
+  - conda install pytest mock
+
+test_script:
+  - activate test-environment
+  - conda list
+  - pytest geopandas -v
diff --git a/asv.conf.json b/asv.conf.json
new file mode 100644
index 0000000..1ed4d78
--- /dev/null
+++ b/asv.conf.json
@@ -0,0 +1,151 @@
+{
+    // The version of the config file format.  Do not change, unless
+    // you know what you are doing.
+    "version": 1,
+
+    // The name of the project being benchmarked
+    "project": "geopandas",
+
+    // The project's homepage
+    "project_url": "http://geopandas.org/",
+
+    // The URL or local path of the source code repository for the
+    // project being benchmarked
+    "repo": ".",
+
+    // List of branches to benchmark. If not provided, defaults to "master"
+    // (for git) or "default" (for mercurial).
+    // "branches": ["master"], // for git
+    // "branches": ["default"],    // for mercurial
+
+    // The DVCS being used.  If not set, it will be automatically
+    // determined from "repo" by looking at the protocol in the URL
+    // (if remote), or by looking for special directories, such as
+    // ".git" (if local).
+    // "dvcs": "git",
+
+    // The tool to use to create environments.  May be "conda",
+    // "virtualenv" or other value depending on the plugins in use.
+    // If missing or the empty string, the tool will be automatically
+    // determined by looking for tools on the PATH environment
+    // variable.
+    "environment_type": "conda",
+
+    // timeout in seconds for installing any dependencies in environment
+    // defaults to 10 min
+    //"install_timeout": 600,
+
+    // the base URL to show a commit for the project.
+    "show_commit_url": "http://github.com/geopandas/geopandas/commit/",
+
+    // The Pythons you'd like to test against.  If not provided, defaults
+    // to the current version of Python used to run `asv`.
+    // "pythons": ["2.7", "3.3"],
+
+    // The matrix of dependencies to test.  Each key is the name of a
+    // package (in PyPI) and the values are version numbers.  An empty
+    // list or empty string indicates to just test against the default
+    // (latest) version. null indicates that the package is to not be
+    // installed. If the package to be tested is only available from
+    // PyPi, and the 'environment_type' is conda, then you can preface
+    // the package name by 'pip+', and the package will be installed via
+    // pip (with all the conda available packages installed first,
+    // followed by the pip installed packages).
+    //
+    "matrix": {
+        "pandas": [],
+        "shapely": [],
+        "cython": [],
+        "fiona": [],
+        "pyproj": [],
+        "six": [],
+        "rtree": [],
+        "matplotlib": [],
+        "descartes": []
+    },
+    //     "numpy": ["1.6", "1.7"],
+    //     "six": ["", null],        // test with and without six installed
+    //     "pip+emcee": [""],   // emcee is only available for install with pip.
+    // },
+
+    // Combinations of libraries/python versions can be excluded/included
+    // from the set to test. Each entry is a dictionary containing additional
+    // key-value pairs to include/exclude.
+    //
+    // An exclude entry excludes entries where all values match. The
+    // values are regexps that should match the whole string.
+    //
+    // An include entry adds an environment. Only the packages listed
+    // are installed. The 'python' key is required. The exclude rules
+    // do not apply to includes.
+    //
+    // In addition to package names, the following keys are available:
+    //
+    // - python
+    //     Python version, as in the *pythons* variable above.
+    // - environment_type
+    //     Environment type, as above.
+    // - sys_platform
+    //     Platform, as in sys.platform. Possible values for the common
+    //     cases: 'linux2', 'win32', 'cygwin', 'darwin'.
+    //
+    // "exclude": [
+    //     {"python": "3.2", "sys_platform": "win32"}, // skip py3.2 on windows
+    //     {"environment_type": "conda", "six": null}, // don't run without six on conda
+    // ],
+    //
+    // "include": [
+    //     // additional env for python2.7
+    //     {"python": "2.7", "numpy": "1.8"},
+    //     // additional env if run on windows+conda
+    //     {"platform": "win32", "environment_type": "conda", "python": "2.7", "libpython": ""},
+    // ],
+
+    // The directory (relative to the current directory) that benchmarks are
+    // stored in.  If not provided, defaults to "benchmarks"
+    // "benchmark_dir": "benchmarks",
+
+    // The directory (relative to the current directory) to cache the Python
+    // environments in.  If not provided, defaults to "env"
+    "env_dir": ".asv/env",
+
+    // The directory (relative to the current directory) that raw benchmark
+    // results are stored in.  If not provided, defaults to "results".
+    "results_dir": ".asv/results",
+
+    // The directory (relative to the current directory) that the html tree
+    // should be written to.  If not provided, defaults to "html".
+    "html_dir": ".asv/html",
+
+    // The number of characters to retain in the commit hashes.
+    // "hash_length": 8,
+
+    // `asv` will cache wheels of the recent builds in each
+    // environment, making them faster to install next time.  This is
+    // number of builds to keep, per environment.
+    // "wheel_cache_size": 0
+
+    // The commits after which the regression search in `asv publish`
+    // should start looking for regressions. Dictionary whose keys are
+    // regexps matching to benchmark names, and values corresponding to
+    // the commit (exclusive) after which to start looking for
+    // regressions.  The default is to start from the first commit
+    // with results. If the commit is `null`, regression detection is
+    // skipped for the matching benchmark.
+    //
+    // "regressions_first_commits": {
+    //    "some_benchmark": "352cdf",  // Consider regressions only after this commit
+    //    "another_benchmark": null,   // Skip regression detection altogether
+    // }
+
+    // The thresholds for relative change in results, after which `asv
+    // publish` starts reporting regressions. Dictionary of the same
+    // form as in ``regressions_first_commits``, with values
+    // indicating the thresholds.  If multiple entries match, the
+    // maximum is taken. If no entry matches, the default is 5%.
+    //
+    // "regressions_thresholds": {
+    //    "some_benchmark": 0.01,     // Threshold of 1%
+    //    "another_benchmark": 0.5,   // Threshold of 50%
+    // }
+}
diff --git a/benchmarks/__init__.py b/benchmarks/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/benchmarks/__init__.py
@@ -0,0 +1 @@
+
diff --git a/benchmarks/geom_methods.py b/benchmarks/geom_methods.py
new file mode 100644
index 0000000..4c16b49
--- /dev/null
+++ b/benchmarks/geom_methods.py
@@ -0,0 +1,104 @@
+import random
+
+import numpy as np
+from geopandas import GeoSeries
+from shapely.geometry import Point, LineString, Polygon
+
+
+def with_attributes(**attrs):
+    def decorator(func):
+        for key, value in attrs.items():
+            setattr(func, key, value)
+        return func
+    return decorator
+
+
+class Bench:
+
+    def setup(self, *args):
+        self.points = GeoSeries([Point(i, i) for i in range(100000)])
+
+        triangles = GeoSeries([Polygon([(random.random(), random.random())
+                                        for _ in range(3)])
+                               for _ in range(1000)])
+        triangles2 = triangles.copy().iloc[np.random.choice(1000, 1000)]
+        triangles3 = GeoSeries([Polygon([(random.random(), random.random())
+                                         for _ in range(3)])
+                                for _ in range(10000)])
+        triangle = Polygon([(random.random(), random.random())
+                            for _ in range(3)])
+        self.triangles, self.triangles2 = triangles, triangles2
+        self.triangles_big = triangles3
+        self.triangle = triangle
+
+    @with_attributes(param_names=['op'],
+                     params=[('contains', 'crosses', 'disjoint', 'intersects',
+                              'overlaps', 'touches', 'within', 'geom_equals',
+                              'geom_almost_equals', 'geom_equals_exact')])
+    def time_binary_predicate(self, op):
+        getattr(self.triangles, op)(self.triangle)
+
+    @with_attributes(param_names=['op'],
+                     params=[('contains', 'crosses', 'disjoint', 'intersects',
+                              'overlaps', 'touches', 'within', 'geom_equals',
+                              'geom_almost_equals')])  # 'geom_equals_exact')])
+    def time_binary_predicate_vector(self, op):
+        getattr(self.triangles, op)(self.triangles2)
+
+    @with_attributes(param_names=['op'],
+                     params=[('distance')])
+    def time_binary_float(self, op):
+        getattr(self.triangles, op)(self.triangle)
+
+    @with_attributes(param_names=['op'],
+                     params=[('distance')])
+    def time_binary_float_vector(self, op):
+        getattr(self.triangles, op)(self.triangles2)
+
+    @with_attributes(param_names=['op'],
+                     params=[('difference', 'symmetric_difference', 'union',
+                              'intersection')])
+    def time_binary_geo(self, op):
+        getattr(self.triangles, op)(self.triangle)
+
+    @with_attributes(param_names=['op'],
+                     params=[('difference', 'symmetric_difference', 'union',
+                              'intersection')])
+    def time_binary_geo_vector(self, op):
+        getattr(self.triangles, op)(self.triangles2)
+
+    @with_attributes(param_names=['op'],
+                     params=[('is_valid', 'is_empty', 'is_simple', 'is_ring')])
+    def time_unary_predicate(self, op):
+        getattr(self.triangles, op)
+
+    @with_attributes(param_names=['op'],
+                     params=[('area', 'length')])
+    def time_unary_float(self, op):
+        getattr(self.triangles_big, op)
+
+    @with_attributes(param_names=['op'],
+                     params=[('boundary', 'centroid', 'convex_hull',
+                              'envelope', 'exterior', 'interiors')])
+    def time_unary_geo(self, op):
+        getattr(self.triangles, op)
+
+    def time_unary_geo_representative_point(self, *args):
+        getattr(self.triangles, 'representative_point')()
+
+    def time_geom_type(self, *args):
+        self.triangles_big.geom_type
+
+    def time_bounds(self, *args):
+        self.triangles.bounds
+
+    def time_unary_union(self, *args):
+        self.triangles.unary_union
+
+    def time_buffer(self, *args):
+        self.points.buffer(2)
+
+
+# TODO
+# project, interpolate, translate, rotate, scale, skew, explode
+# cx indexer
diff --git a/benchmarks/overlay.py b/benchmarks/overlay.py
new file mode 100644
index 0000000..7f1620a
--- /dev/null
+++ b/benchmarks/overlay.py
@@ -0,0 +1,45 @@
+from geopandas import GeoDataFrame, GeoSeries, read_file, datasets, overlay
+from shapely.geometry import Polygon
+
+
+class Countries:
+
+    param_names = ['op']
+    params = [('intersection', 'union', 'identity', 'symmetric_difference',
+               'difference')]
+
+    def setup(self, *args):
+        world = read_file(datasets.get_path('naturalearth_lowres'))
+        capitals = read_file(datasets.get_path('naturalearth_cities'))
+        countries = world[['geometry', 'name']]
+        countries = countries.to_crs('+init=epsg:3395')[
+            countries.name != "Antarctica"]
+        capitals = capitals.to_crs('+init=epsg:3395')
+        capitals['geometry'] = capitals.buffer(500000)
+
+        self.countries = countries
+        self.capitals = capitals
+
+    def time_overlay(self, op):
+        overlay(self.countries, self.capitals, how=op)
+
+
+class Small:
+
+    param_names = ['op']
+    params = [('intersection', 'union', 'identity', 'symmetric_difference',
+               'difference')]
+
+    def setup(self, *args):
+        polys1 = GeoSeries([Polygon([(0, 0), (2, 0), (2, 2), (0, 2)]),
+                            Polygon([(2, 2), (4, 2), (4, 4), (2, 4)])])
+        polys2 = GeoSeries([Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
+                            Polygon([(3, 3), (5, 3), (5, 5), (3, 5)])])
+
+        df1 = GeoDataFrame({'geometry': polys1, 'df1': [1, 2]})
+        df2 = GeoDataFrame({'geometry': polys2, 'df2': [1, 2]})
+
+        self.df1, self.df2 = df1, df2
+
+    def time_overlay(self, op):
+        overlay(self.df1, self.df2, how=op)
diff --git a/benchmarks/plotting.py b/benchmarks/plotting.py
new file mode 100644
index 0000000..3b4132e
--- /dev/null
+++ b/benchmarks/plotting.py
@@ -0,0 +1,57 @@
+import random
+
+from geopandas import GeoDataFrame, GeoSeries
+from shapely.geometry import Point, LineString, Polygon, MultiPolygon
+import numpy as np
+
+
+class Bench:
+
+    param_names = ['geom_type']
+    params = [('Point', 'LineString', 'Polygon', 'MultiPolygon', 'mixed')]
+
+    def setup(self, geom_type):
+
+        if geom_type == 'Point':
+            geoms = GeoSeries([Point(i, i) for i in range(1000)])
+        elif geom_type == 'LineString':
+            geoms = GeoSeries([LineString([(random.random(), random.random())
+                                           for _ in range(5)])
+                               for _ in range(100)])
+        elif geom_type == 'Polygon':
+            geoms = GeoSeries([Polygon([(random.random(), random.random())
+                                        for _ in range(3)])
+                               for _ in range(100)])
+        elif geom_type == 'MultiPolygon':
+            geoms = GeoSeries(
+                [MultiPolygon([Polygon([(random.random(), random.random())
+                                        for _ in range(3)])
+                               for _ in range(3)])
+                 for _ in range(20)])
+        elif geom_type == 'mixed':
+            g1 = GeoSeries([Point(i, i) for i in range(100)])
+            g2 = GeoSeries([LineString([(random.random(), random.random())
+                                        for _ in range(5)])
+                            for _ in range(100)])
+            g3 = GeoSeries([Polygon([(random.random(), random.random())
+                                     for _ in range(3)])
+                            for _ in range(100)])
+
+            geoms = g1
+            geoms.iloc[np.random.randint(0, 100, 50)] = g2
+            geoms.iloc[np.random.randint(0, 100, 33)] = g3
+
+            print(geoms.geom_type.value_counts())
+
+        df = GeoDataFrame({'geometry': geoms,
+                           'values': np.random.randn(len(geoms))})
+
+        self.geoms = geoms
+        self.df = df
+
+    def time_plot_series(self, *args):
+        self.geoms.plot()
+
+    def time_plot_values(self, *args):
+        self.df.plot(column='values')
+
diff --git a/benchmarks/sjoin.py b/benchmarks/sjoin.py
new file mode 100644
index 0000000..0d06391
--- /dev/null
+++ b/benchmarks/sjoin.py
@@ -0,0 +1,30 @@
+import random
+
+from geopandas import GeoDataFrame, GeoSeries, sjoin
+from shapely.geometry import Point, LineString, Polygon
+import numpy as np
+
+
+class Bench:
+
+    param_names = ['op']
+    params = [('intersects', 'contains', 'within')]
+
+    def setup(self, *args):
+        triangles = GeoSeries(
+            [Polygon([(random.random(), random.random()) for _ in range(3)])
+             for _ in range(1000)])
+
+        points = GeoSeries(
+            [Point(x, y) for x, y in zip(np.random.random(10000),
+                                         np.random.random(10000))])
+
+        df1 = GeoDataFrame({'val1': np.random.randn(len(triangles)),
+                            'geometry': triangles})
+        df2 = GeoDataFrame({'val1': np.random.randn(len(points)),
+                            'geometry': points})
+
+        self.df1, self.df2 = df1, df2
+
+    def time_sjoin(self, op):
+        sjoin(self.df1, self.df2, op=op)
diff --git a/doc/environment.yml b/doc/environment.yml
index 0daf4ef..2f0bd83 100644
--- a/doc/environment.yml
+++ b/doc/environment.yml
@@ -2,7 +2,7 @@ name: geopandas_docs
 channels:
 - conda-forge
 dependencies:
-- python=3.4
+- python=3.5
 - pandas
 - shapely
 - fiona
@@ -15,4 +15,5 @@ dependencies:
 - pysal
 - sphinx
 - sphinx_rtd_theme
-- ipython=4.0.1
+- ipython
+- pillow
diff --git a/doc/source/aggregation_with_dissolve.rst b/doc/source/aggregation_with_dissolve.rst
index a191fb5..65fed78 100644
--- a/doc/source/aggregation_with_dissolve.rst
+++ b/doc/source/aggregation_with_dissolve.rst
@@ -2,6 +2,9 @@
    :suppress:
 
    import geopandas as gpd
+   import matplotlib
+   orig = matplotlib.rcParams['figure.figsize']
+   matplotlib.rcParams['figure.figsize'] = [orig[0] * 1.5, orig[1]]
 
 
 Aggregation with dissolve
@@ -27,7 +30,7 @@ First, let's look at the most simple case where we just want continent shapes an
     world = world[['continent', 'geometry']]
     continents = world.dissolve(by='continent')
 
-    @savefig continents.png width=5in
+    @savefig continents1.png
     continents.plot();
 
     continents.head()
@@ -40,12 +43,17 @@ If we are interested in aggregate populations, however, we can pass different fu
    world = world[['continent', 'geometry', 'pop_est']]
    continents = world.dissolve(by='continent', aggfunc='sum')
 
-   @savefig continents.png width=5in
+   @savefig continents2.png
    continents.plot(column = 'pop_est', scheme='quantiles', cmap='YlOrRd');
 
    continents.head()
 
 
+.. ipython:: python
+    :suppress:
+
+    matplotlib.rcParams['figure.figsize'] = orig
+
 
 .. toctree::
    :maxdepth: 2
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index a384ab2..b18daaa 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -232,8 +232,8 @@ use cases and writing corresponding tests.
 Adding tests is one of the most common requests after code is pushed to *geopandas*.  Therefore,
 it is worth getting in the habit of writing tests ahead of time so this is never an issue.
 
-Like many packages, *geopandas* uses the `Nose testing system
-<http://nose.readthedocs.org/en/latest/index.html>`_ and the convenient
+*geopandas* uses the `pytest testing system
+<http://doc.pytest.org/en/latest/>`_ and the convenient
 extensions in `numpy.testing
 <http://docs.scipy.org/doc/numpy/reference/routines.testing.html>`_.
 
@@ -255,7 +255,7 @@ Running the test suite
 The tests can then be run directly inside your Git clone (without having to
 install *geopandas*) by typing::
 
-    nosetests -v
+    pytest
 
 6) Updating the Documentation
 -----------------------------
diff --git a/doc/source/data_structures.rst b/doc/source/data_structures.rst
index c80361d..a04d843 100644
--- a/doc/source/data_structures.rst
+++ b/doc/source/data_structures.rst
@@ -4,6 +4,10 @@
    :suppress:
 
    import geopandas as gpd
+   import matplotlib
+   orig = matplotlib.rcParams['figure.figsize']
+   matplotlib.rcParams['figure.figsize'] = [orig[0] * 1.5, orig[1]]
+
 
 
 Data Structures
@@ -93,7 +97,7 @@ An example using the ``worlds`` GeoDataFrame:
 
     world.head()
     #Plot countries
-    @savefig world_borders.png width=3in
+    @savefig world_borders.png
     world.plot();
 
 Currently, the column named "geometry" with country borders is the active
@@ -117,7 +121,7 @@ Now, we create centroids and make it the geometry:
     world['centroid_column'] = world.centroid
     world = world.set_geometry('centroid_column')
 
-    @savefig world_centroids.png width=3in
+    @savefig world_centroids.png
     world.plot();
 
 
@@ -133,3 +137,9 @@ Attributes and Methods
 Any of the attributes calls or methods described for a ``GeoSeries`` will work on a ``GeoDataFrame`` -- effectively, they are just applied to the "geometry" ``GeoSeries``.
 
 However, ``GeoDataFrames`` also have a few extra methods for input and output which are described on the :doc:`Input and Output <io>` page and for geocoding with are described in :doc:`Geocoding <geocoding>`.
+
+
+.. ipython:: python
+    :suppress:
+
+    matplotlib.rcParams['figure.figsize'] = orig
diff --git a/doc/source/geometric_manipulations.rst b/doc/source/geometric_manipulations.rst
index b1b256c..da5a0a7 100644
--- a/doc/source/geometric_manipulations.rst
+++ b/doc/source/geometric_manipulations.rst
@@ -112,11 +112,12 @@ GeoPandas objects also know how to plot themselves.  GeoPandas uses `descartes`_
 
     >>> g.plot()
 
-GeoPandas also implements alternate constructors that can read any data format recognized by `fiona`_.  To read a `file containing the boroughs of New York City`_:
+GeoPandas also implements alternate constructors that can read any data format recognized by `fiona`_.  To read a zip file containing an ESRI shapefile with the `borough boundaries of New York City`_ (GeoPandas includes this as an example dataset):
 
 .. sourcecode:: python
 
-    >>> boros = GeoDataFrame.from_file('nybb.shp')
+    >>> nybb_path = geopandas.datasets.get_path('nybb')
+    >>> boros = GeoDataFrame.from_file(nybb_path)
     >>> boros.set_index('BoroCode', inplace=True)
     >>> boros.sort()
     >>> boros
@@ -216,7 +217,7 @@ borough that are in the holes:
 .. _fiona: http://toblerity.github.io/fiona
 .. _geopy: https://github.com/geopy/geopy
 .. _geo_interface: https://gist.github.com/sgillies/2217756
-.. _file containing the boroughs of New York City: http://www.nyc.gov/html/dcp/download/bytes/nybb_14aav.zip
+.. _borough boundaries of New York City: https://data.cityofnewyork.us/City-Government/Borough-Boundaries/tqmj-j8zm
 
 .. toctree::
    :maxdepth: 2
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 0395b88..144dfab 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -29,6 +29,7 @@ such as PostGIS.
   Installation <install>
   Data Structures <data_structures>
   Reading and Writing Files <io>
+  Indexing and Selecting Data <indexing>
   Making Maps <mapping>
   Managing Projections <projections>
   Geometric Manipulations <geometric_manipulations>
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
new file mode 100644
index 0000000..877ab37
--- /dev/null
+++ b/doc/source/indexing.rst
@@ -0,0 +1,30 @@
+.. currentmodule:: geopandas
+
+.. ipython:: python
+   :suppress:
+
+   import geopandas as gpd
+
+
+Indexing and Selecting Data
+===========================
+
+GeoPandas inherits the standard ``pandas`` methods for indexing/selecting data. This includes label based indexing with ``.loc`` and integer position based indexing with ``.iloc``, which apply to both ``GeoSeries`` and ``GeoDataFrame`` objects. For more information on indexing/selecting, see the pandas_ documentation.
+
+.. _pandas: http://pandas.pydata.org/pandas-docs/stable/indexing.html
+
+In addition to the standard ``pandas`` methods, GeoPandas also provides
+coordinate based indexing with the ``cx`` indexer, which slices using a bounding
+box. Geometries in the ``GeoSeries`` or ``GeoDataFrame`` that intersect the
+bounding box will be returned.
+
+Using the ``world`` dataset, we can use this functionality to quickly select all
+countries whose boundaries extend into the southern hemisphere.
+
+.. ipython:: python
+
+   world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
+   southern_world = world.cx[:, :0]
+   @savefig world_southern.png
+   southern_world.plot(figsize=(10, 3));
+
diff --git a/doc/source/mapping.rst b/doc/source/mapping.rst
index ff75960..e09c616 100644
--- a/doc/source/mapping.rst
+++ b/doc/source/mapping.rst
@@ -4,6 +4,10 @@
    :suppress:
 
    import geopandas as gpd
+   import matplotlib
+   orig = matplotlib.rcParams['figure.figsize']
+   matplotlib.rcParams['figure.figsize'] = [orig[0] * 1.5, orig[1]]
+
 
 
 Mapping Tools
@@ -27,7 +31,7 @@ We can now plot those GeoDataFrames:
     world.head()
 
     # Basic plot, random colors
-    @savefig world_randomcolors.png width=5in
+    @savefig world_randomcolors.png
     world.plot();
 
 Note that in general, any options one can pass to `pyplot <http://matplotlib.org/api/pyplot_api.html>`_ in ``matplotlib`` (or `style options that work for lines <http://matplotlib.org/api/lines_api.html>`_) can be passed to the ``plot()`` method.
@@ -43,7 +47,7 @@ Chloropleth Maps
     # Plot by GDP per capta
     world = world[(world.pop_est>0) & (world.name!="Antarctica")]
     world['gdp_per_cap'] = world.gdp_md_est / world.pop_est
-    @savefig world_gdp_per_cap.png width=5in
+    @savefig world_gdp_per_cap.png
     world.plot(column='gdp_per_cap');
 
 
@@ -54,7 +58,7 @@ One can also modify the colors used by ``plot`` with the ``cmap`` option (for a
 
 .. ipython:: python
 
-    @savefig world_gdp_per_cap_red.png width=5in
+    @savefig world_gdp_per_cap_red.png
     world.plot(column='gdp_per_cap', cmap='OrRd');
 
 
@@ -62,7 +66,7 @@ The way color maps are scaled can also be manipulated with the ``scheme`` option
 
 .. ipython:: python
 
-    @savefig world_gdp_per_cap_quantiles.png width=5in
+    @savefig world_gdp_per_cap_quantiles.png
     world.plot(column='gdp_per_cap', cmap='OrRd', scheme='quantiles');
 
 
@@ -77,7 +81,7 @@ Before combining maps, however, remember to always ensure they share a common CR
 
     # Look at capitals
     # Note use of standard `pyplot` line style options
-    @savefig capitals.png width=5in
+    @savefig capitals.png
     cities.plot(marker='*', color='green', markersize=5);
 
     # Check crs
@@ -91,8 +95,8 @@ Before combining maps, however, remember to always ensure they share a common CR
 
 .. ipython:: python
 
-    base = world.plot(color='white')
-    @savefig capitals_over_countries_1.png width=5in
+    base = world.plot(color='white', edgecolor='black')
+    @savefig capitals_over_countries_1.png
     cities.plot(ax=base, marker='o', color='red', markersize=5);
 
 **Method 2: Using matplotlib objects**
@@ -107,9 +111,9 @@ Before combining maps, however, remember to always ensure they share a common CR
     # working with pyplot directly.
     ax.set_aspect('equal')
 
-    world.plot(ax=ax, color='white')
+    world.plot(ax=ax, color='white', edgecolor='black')
     cities.plot(ax=ax, marker='o', color='red', markersize=5)
-    @savefig capitals_over_countries_2.png width=5in
+    @savefig capitals_over_countries_2.png
     plt.show();
 
 
@@ -118,3 +122,9 @@ Other Resources
 Links to jupyter Notebooks for different mapping tasks:
 
 `Making Heat Maps <http://nbviewer.jupyter.org/gist/perrygeo/c426355e40037c452434>`_
+
+
+.. ipython:: python
+    :suppress:
+
+    matplotlib.rcParams['figure.figsize'] = orig
diff --git a/doc/source/projections.rst b/doc/source/projections.rst
index bfca83c..68c8331 100644
--- a/doc/source/projections.rst
+++ b/doc/source/projections.rst
@@ -61,11 +61,13 @@ Re-projecting is the process of changing the representation of locations from on
     world.crs
 
     # Visualize
-    @savefig world_starting.png width=3in
-    world.plot();
+    ax = world.plot()
+    @savefig world_starting.png
+    ax.set_title("WGS84 (lat/lon)");
 
     # Reproject to Mercator (after dropping Antartica)
     world = world[(world.name != "Antarctica") & (world.name != "Fr. S. Antarctic Lands")]
     world = world.to_crs({'init': 'epsg:3395'}) # world.to_crs(epsg=3395) would also work
-    @savefig world_reproj.png width=3in
-    world.plot();
+    ax = world.plot()
+    @savefig world_reproj.png
+    ax.set_title("Mercator");
diff --git a/doc/source/set_operations.rst b/doc/source/set_operations.rst
index 2db553b..ddaaec5 100644
--- a/doc/source/set_operations.rst
+++ b/doc/source/set_operations.rst
@@ -52,7 +52,7 @@ These two GeoDataFrames have some overlapping areas:
 
     ax = df1.plot(color='red');
     @savefig overlay_example.png width=5in
-    df2.plot(ax=ax, color='green');
+    df2.plot(ax=ax, color='green', alpha=0.5);
 
 We illustrate the different overlay modes with the above example.
 The ``overlay`` function will determine the set of all individual geometries
@@ -67,10 +67,10 @@ When using ``how='union'``, all those possible geometries are returned:
     res_union = gpd.overlay(df1, df2, how='union')
     res_union
 
-    ax = res_union.plot()
-    df1.plot(ax=ax, facecolor='none');
+    ax = res_union.plot(alpha=0.5, cmap='tab10')
+    df1.plot(ax=ax, facecolor='none', edgecolor='k');
     @savefig overlay_example_union.png width=5in
-    df2.plot(ax=ax, facecolor='none');
+    df2.plot(ax=ax, facecolor='none', edgecolor='k');
 
 The other ``how`` operations will return different subsets of those geometries.
 With ``how='intersection'``, it returns only those geometries that are contained
@@ -81,10 +81,10 @@ by both GeoDataFrames:
     res_intersection = gpd.overlay(df1, df2, how='intersection')
     res_intersection
 
-    ax = res_intersection.plot()
-    df1.plot(ax=ax, facecolor='none');
+    ax = res_intersection.plot(cmap='tab10')
+    df1.plot(ax=ax, facecolor='none', edgecolor='k');
     @savefig overlay_example_intersection.png width=5in
-    df2.plot(ax=ax, facecolor='none');
+    df2.plot(ax=ax, facecolor='none', edgecolor='k');
 
 ``how='symmetric_difference'`` is the opposite of ``'intersection'`` and returns
 the geometries that are only part of one of the GeoDataFrames but not of both:
@@ -94,10 +94,10 @@ the geometries that are only part of one of the GeoDataFrames but not of both:
     res_symdiff = gpd.overlay(df1, df2, how='symmetric_difference')
     res_symdiff
 
-    ax = res_symdiff.plot()
-    df1.plot(ax=ax, facecolor='none');
+    ax = res_symdiff.plot(cmap='tab10')
+    df1.plot(ax=ax, facecolor='none', edgecolor='k');
     @savefig overlay_example_symdiff.png width=5in
-    df2.plot(ax=ax, facecolor='none');
+    df2.plot(ax=ax, facecolor='none', edgecolor='k');
 
 To obtain the geometries that are part of ``df1`` but are not contained in
 ``df2``, you can use ``how='difference'``:
@@ -107,10 +107,10 @@ To obtain the geometries that are part of ``df1`` but are not contained in
     res_difference = gpd.overlay(df1, df2, how='difference')
     res_difference
 
-    ax = res_difference.plot()
-    df1.plot(ax=ax, facecolor='none');
+    ax = res_difference.plot(cmap='tab10')
+    df1.plot(ax=ax, facecolor='none', edgecolor='k');
     @savefig overlay_example_difference.png width=5in
-    df2.plot(ax=ax, facecolor='none');
+    df2.plot(ax=ax, facecolor='none', edgecolor='k');
 
 Finally, with ``how='identity'``, the result consists of the surface of ``df1``,
 but with the geometries obtained from overlaying ``df1`` with ``df2``:
@@ -120,10 +120,10 @@ but with the geometries obtained from overlaying ``df1`` with ``df2``:
     res_identity = gpd.overlay(df1, df2, how='identity')
     res_identity
 
-    ax = res_identity.plot()
-    df1.plot(ax=ax, facecolor='none');
+    ax = res_identity.plot(cmap='tab10')
+    df1.plot(ax=ax, facecolor='none', edgecolor='k');
     @savefig overlay_example_identity.png width=5in
-    df2.plot(ax=ax, facecolor='none');
+    df2.plot(ax=ax, facecolor='none', edgecolor='k');
 
 
 Overlay Countries Example
@@ -171,7 +171,7 @@ To select only the portion of countries within 500km of a capital, we specify th
 
    country_cores = gpd.overlay(countries, capitals, how='intersection')
    @savefig country_cores.png width=5in
-   country_cores.plot();
+   country_cores.plot(alpha=0.5, edgecolor='k', cmap='tab10');
 
 Changing the "how" option allows for different types of overlay operations. For example, if we were interested in the portions of countries *far* from capitals (the peripheries), we would compute the difference of the two.
 
@@ -179,7 +179,7 @@ Changing the "how" option allows for different types of overlay operations. For
 
    country_peripheries = gpd.overlay(countries, capitals, how='difference')
    @savefig country_peripheries.png width=5in
-   country_peripheries.plot();
+   country_peripheries.plot(alpha=0.5, edgecolor='k', cmap='tab10');
 
 
 
diff --git a/geopandas/_version.py b/geopandas/_version.py
index a4ef5bb..3f12cb5 100644
--- a/geopandas/_version.py
+++ b/geopandas/_version.py
@@ -23,8 +23,8 @@ def get_keywords():
     # setup.py/versioneer.py will grep for the variable names, so they must
     # each be defined on a line of their own. _version.py will just call
     # get_keywords().
-    git_refnames = " (tag: v0.2.1, 0.2.x)"
-    git_full = "771f5608000c0707f0ab59ad1f165465ea8b3f44"
+    git_refnames = " (tag: v0.3.0)"
+    git_full = "e48af9b2f990c333bc6c59c0f566cd919a3338d1"
     keywords = {"refnames": git_refnames, "full": git_full}
     return keywords
 
diff --git a/geopandas/base.py b/geopandas/base.py
index ec475ce..8b1bec8 100644
--- a/geopandas/base.py
+++ b/geopandas/base.py
@@ -1,13 +1,12 @@
 from warnings import warn
 
-from shapely.geometry import MultiPoint, MultiLineString, MultiPolygon
-from shapely.geometry.base import BaseGeometry
-from shapely.ops import cascaded_union, unary_union
-import shapely.affinity as affinity
-
 import numpy as np
 import pandas as pd
 from pandas import Series, DataFrame, MultiIndex
+from pandas.core.indexing import _NDFrameIndexer
+from shapely.geometry import box, MultiPoint, MultiLineString, MultiPolygon
+from shapely.ops import cascaded_union, unary_union
+import shapely.affinity as affinity
 
 import geopandas as gpd
 
@@ -54,11 +53,13 @@ def _series_op(this, other, op, **kwargs):
         return Series([getattr(s, op)(other, **kwargs) if s else null_val
                       for s in this.geometry], index=this.index)
 
+
 def _geo_unary_op(this, op):
     """Unary operation that returns a GeoSeries"""
     return gpd.GeoSeries([getattr(geom, op) for geom in this.geometry],
                      index=this.index, crs=this.crs)
 
+
 def _series_unary_op(this, op, null_value=False):
     """Unary operation that returns a Series"""
     return Series([getattr(geom, op, null_value) for geom in this.geometry],
@@ -66,9 +67,10 @@ def _series_unary_op(this, op, null_value=False):
 
 
 class GeoPandasBase(object):
+    _sindex = None
+    _sindex_generated = False
 
     def _generate_sindex(self):
-        self._sindex = None
         if not HAS_SINDEX:
             warn("Cannot generate spatial index: Missing package `rtree`.")
         else:
@@ -83,6 +85,7 @@ class GeoPandasBase(object):
             # and move on. See https://github.com/Toblerity/rtree/issues/20.
             except RTreeError:
                 pass
+        self._sindex_generated = True
 
     def _invalidate_sindex(self):
         """
@@ -91,7 +94,7 @@ class GeoPandasBase(object):
 
         """
         self._sindex = None
-        self._sindex_valid = False
+        self._sindex_generated = False
 
     @property
     def area(self):
@@ -184,7 +187,7 @@ class GeoPandasBase(object):
     @property
     def cascaded_union(self):
         """Deprecated: Return the unary_union of all geometries"""
-        return cascaded_union(self.values)
+        return cascaded_union(self.geometry.values)
 
     @property
     def unary_union(self):
@@ -281,16 +284,15 @@ class GeoPandasBase(object):
         """
 
         b = self.bounds
-        return (b['minx'].min(),
-                b['miny'].min(),
-                b['maxx'].max(),
-                b['maxy'].max())
+        return np.array((b['minx'].min(),
+                         b['miny'].min(),
+                         b['maxx'].max(),
+                         b['maxy'].max()))
 
     @property
     def sindex(self):
-        if not self._sindex_valid:
+        if not self._sindex_generated:
             self._generate_sindex()
-            self._sindex_valid = True
         return self._sindex
 
     def buffer(self, distance, resolution=16):
@@ -474,6 +476,37 @@ class GeoPandasBase(object):
         return gpd.GeoSeries(geometries,
             index=MultiIndex.from_tuples(index)).__finalize__(self)
 
+
+class _CoordinateIndexer(_NDFrameIndexer):
+    """
+    Coordinate based indexer to select by intersection with bounding box.
+
+    Format of input should be ``.cx[xmin:xmax, ymin:ymax]``. Any of ``xmin``,
+    ``xmax``, ``ymin``, and ``ymax`` can be provided, but input must
+    include a comma separating x and y slices. That is, ``.cx[:, :]`` will
+    return the full series/frame, but ``.cx[:]`` is not implemented.
+    """
+
+    def _getitem_tuple(self, tup):
+        obj = self.obj
+        xs, ys = tup
+        # handle numeric values as x and/or y coordinate index
+        if type(xs) is not slice:
+            xs = slice(xs, xs)
+        if type(ys) is not slice:
+            ys = slice(ys, ys)
+        # don't know how to handle step; should this raise?
+        if xs.step is not None or ys.step is not None:
+            warn("Ignoring step - full interval is used.")
+        xmin, ymin, xmax, ymax = obj.total_bounds
+        bbox = box(xs.start if xs.start is not None else xmin,
+                   ys.start if ys.start is not None else ymin,
+                   xs.stop if xs.stop is not None else xmax,
+                   ys.stop if ys.stop is not None else ymax)
+        idx = obj.intersects(bbox)
+        return obj[idx]
+
+
 def _array_input(arr):
     if isinstance(arr, (MultiPoint, MultiLineString, MultiPolygon)):
         # Prevent against improper length detection when input is a
diff --git a/geopandas/datasets/README.md b/geopandas/datasets/README.md
new file mode 100644
index 0000000..ce86c35
--- /dev/null
+++ b/geopandas/datasets/README.md
@@ -0,0 +1,28 @@
+# Datasets included with geopandas
+
+- `'nybb'`: Borough boundaries of New York City, data provided Department of City Planning (DCP), https://data.cityofnewyork.us/City-Government/Borough-Boundaries/tqmj-j8zm
+- `'naturalearth_cities'`: capital cities, based on http://www.naturalearthdata.com/downloads/10m-cultural-vectors/10m-populated-places/
+- `'naturalearth_lowres'`: country boundaries, based on http://www.naturalearthdata.com/downloads/110m-cultural-vectors/110m-admin-0-countries/
+
+
+## Data sources and licenses
+
+### `nybb`
+
+#### Summary
+
+These districts were created by the Department of City Planning to aid city agencies in administering public services.
+
+#### Description
+
+The borough boundaries of New York City clipped to the shoreline at mean high tide.
+
+#### Credits
+
+Department of City Planning.
+
+#### Use limitations
+
+This dataset is being provided by the Department of City Planning (DCP) on DCP’s website for informational purposes only. DCP does not warranty the completeness, accuracy, content, or fitness for any particular purpose or use of the dataset, nor are any such warranties to be implied or inferred with respect to the dataset as furnished on the website. DCP and the City are not liable for any deficiencies in the completeness, accuracy, content, or fitness for any particular purpose or use t [...]
+
+
diff --git a/geopandas/datasets/__init__.py b/geopandas/datasets/__init__.py
index 35b24e0..3b1e464 100644
--- a/geopandas/datasets/__init__.py
+++ b/geopandas/datasets/__init__.py
@@ -3,9 +3,11 @@ import os
 
 __all__ = ['available', 'get_path']
 
-module_path = os.path.dirname(__file__)
-available = [p for p in next(os.walk(module_path))[1]
-             if not p.startswith('__')]
+_module_path = os.path.dirname(__file__)
+_available_dir = [p for p in next(os.walk(_module_path))[1]
+                  if not p.startswith('__')]
+_available_zip = {'nybb': 'nybb_16a.zip'}
+available = _available_dir + list(_available_zip.keys())
 
 
 def get_path(dataset):
@@ -19,9 +21,13 @@ def get_path(dataset):
         all options.
 
     """
-    if dataset in available:
+    if dataset in _available_dir:
         return os.path.abspath(
-            os.path.join(module_path, dataset, dataset + '.shp'))
+            os.path.join(_module_path, dataset, dataset + '.shp'))
+    elif dataset in _available_zip:
+        fpath = os.path.abspath(
+            os.path.join(_module_path, _available_zip[dataset]))
+        return 'zip://' + fpath
     else:
         msg = "The dataset '{data}' is not available".format(data=dataset)
         raise ValueError(msg)
diff --git a/geopandas/datasets/nybb_16a.zip b/geopandas/datasets/nybb_16a.zip
new file mode 100644
index 0000000..514dc94
Binary files /dev/null and b/geopandas/datasets/nybb_16a.zip differ
diff --git a/geopandas/geodataframe.py b/geopandas/geodataframe.py
index 3f0817d..a98acb8 100644
--- a/geopandas/geodataframe.py
+++ b/geopandas/geodataframe.py
@@ -1,26 +1,18 @@
-try:
-    from collections import OrderedDict
-except ImportError:
-    # Python 2.6
-    from ordereddict import OrderedDict
 import json
-import os
-import sys
 
 import numpy as np
-from pandas import DataFrame, Series, Index
+from pandas import DataFrame, Series
 from shapely.geometry import mapping, shape
 from shapely.geometry.base import BaseGeometry
-from six import string_types
+from six import string_types, PY3
 
-from geopandas import GeoSeries
-from geopandas.base import GeoPandasBase
+from geopandas.base import GeoPandasBase, _CoordinateIndexer
+from geopandas.geoseries import GeoSeries
 from geopandas.plotting import plot_dataframe
 import geopandas.io
 
 
 DEFAULT_GEO_COLUMN_NAME = 'geometry'
-PY3 = sys.version_info[0] == 3
 
 
 class GeoDataFrame(GeoPandasBase, DataFrame):
@@ -178,13 +170,43 @@ class GeoDataFrame(GeoPandasBase, DataFrame):
     def from_features(cls, features, crs=None):
         """
         Alternate constructor to create GeoDataFrame from an iterable of
-        features. Each element must be a feature dictionary or implement
-        the __geo_interface__.
-        See: https://gist.github.com/sgillies/2217756
+        features or a feature collection.
+
+        Parameters
+        ----------
+        features
+            - Iterable of features, where each element must be a feature
+              dictionary or implement the __geo_interface__.
+            - Feature collection, where the 'features' key contains an
+              iterable of features.
+            - Object holding a feature collection that implements the
+              ``__geo_interface__``.
+        crs : str or dict (optional)
+            Coordinate reference system to set on the resulting frame.
+
+        Returns
+        -------
+        GeoDataFrame
+
+        Notes
+        -----
+        For more information about the ``__geo_interface__``, see
+        https://gist.github.com/sgillies/2217756
 
         """
+        # Handle feature collections
+        if hasattr(features, "__geo_interface__"):
+            fs = features.__geo_interface__
+        else:
+            fs = features
+
+        if isinstance(fs, dict) and fs.get('type') == 'FeatureCollection':
+            features_lst = fs['features']
+        else:
+            features_lst = features
+
         rows = []
-        for f in features:
+        for f in features_lst:
             if hasattr(f, "__geo_interface__"):
                 f = f.__geo_interface__
             else:
@@ -286,12 +308,12 @@ class GeoDataFrame(GeoPandasBase, DataFrame):
             raise ValueError('Unknown na method {0}'.format(na))
         f = na_methods[na]
 
-        for i, row in self.iterrows():
+        for name, row in self.iterrows():
             properties = f(row)
             del properties[self._geometry_column_name]
 
             feature = {
-                'id': str(i),
+                'id': str(name),
                 'type': 'Feature',
                 'properties': properties,
                 'geometry': mapping(row[self._geometry_column_name])
@@ -313,7 +335,7 @@ class GeoDataFrame(GeoPandasBase, DataFrame):
                'features': list(self.iterfeatures(**kwargs))}
 
         if kwargs.get('show_bbox', False):
-            geo['bbox'] = self.total_bounds
+            geo['bbox'] = tuple(self.total_bounds)
 
         return geo
 
@@ -486,7 +508,7 @@ class GeoDataFrame(GeoPandasBase, DataFrame):
         g = self.groupby(by=by, group_keys=False)[self.geometry.name].agg(merge_geometries)
 
         # Aggregate
-        aggregated_geometry = GeoDataFrame(g, geometry=self.geometry.name)
+        aggregated_geometry = GeoDataFrame(g, geometry=self.geometry.name, crs=self.crs)
         # Recombine
         aggregated = aggregated_geometry.join(aggregated_data)
 
@@ -510,3 +532,6 @@ else:
     import types
     DataFrame.set_geometry = types.MethodType(_dataframe_set_geometry, None,
                                               DataFrame)
+
+
+GeoDataFrame._create_indexer('cx', _CoordinateIndexer)
diff --git a/geopandas/geoseries.py b/geopandas/geoseries.py
index e4c3529..2b70752 100644
--- a/geopandas/geoseries.py
+++ b/geopandas/geoseries.py
@@ -1,21 +1,16 @@
 from functools import partial
 import json
-from warnings import warn
 
 import numpy as np
-from pandas import Series, DataFrame
-from pandas.core.indexing import _NDFrameIndexer
-from pandas.util.decorators import cache_readonly
+from pandas import Series
 import pyproj
-from shapely.geometry import box, shape, Polygon, Point
-from shapely.geometry.collection import GeometryCollection
+from shapely.geometry import shape, Point
 from shapely.geometry.base import BaseGeometry
 from shapely.ops import transform
 
 from geopandas.plotting import plot_series
-from geopandas.base import GeoPandasBase
+from geopandas.base import GeoPandasBase, _series_unary_op, _CoordinateIndexer
 
-OLD_PANDAS = issubclass(Series, np.ndarray)
 
 def _is_empty(x):
     try:
@@ -23,32 +18,6 @@ def _is_empty(x):
     except:
         return False
 
-def _convert_array_args(args):
-    if len(args) == 1 and isinstance(args[0], BaseGeometry):
-        args = ([args[0]],)
-    return args
-
-class _CoordinateIndexer(_NDFrameIndexer):
-    """ Indexing by coordinate slices """
-    def _getitem_tuple(self, tup):
-        obj = self.obj
-        xs, ys = tup
-        # handle numeric values as x and/or y coordinate index
-        if type(xs) is not slice:
-            xs = slice(xs, xs)
-        if type(ys) is not slice:
-            ys = slice(ys, ys)
-        # don't know how to handle step; should this raise?
-        if xs.step is not None or ys.step is not None:
-            warn("Ignoring step - full interval is used.")
-        xmin, ymin, xmax, ymax = obj.total_bounds
-        bbox = box(xs.start or xmin,
-                   ys.start or ymin,
-                   xs.stop or xmax,
-                   ys.stop or ymax)
-        idx = obj.intersects(bbox)
-        return obj[idx]
-
 
 class GeoSeries(GeoPandasBase, Series):
     """A Series object designed to store shapely geometry objects."""
@@ -56,19 +25,17 @@ class GeoSeries(GeoPandasBase, Series):
 
     def __new__(cls, *args, **kwargs):
         kwargs.pop('crs', None)
-        if OLD_PANDAS:
-            args = _convert_array_args(args)
-            arr = Series.__new__(cls, *args, **kwargs)
-        else:
-            arr = Series.__new__(cls)
+        arr = Series.__new__(cls)
         if type(arr) is GeoSeries:
             return arr
         else:
             return arr.view(GeoSeries)
 
     def __init__(self, *args, **kwargs):
-        if not OLD_PANDAS:
-            args = _convert_array_args(args)
+        # fix problem for scalar geometries passed
+        if len(args) == 1 and isinstance(args[0], BaseGeometry):
+            args = ([args[0]],)
+
         crs = kwargs.pop('crs', None)
 
         super(GeoSeries, self).__init__(*args, **kwargs)
@@ -82,24 +49,42 @@ class GeoSeries(GeoPandasBase, Series):
     def geometry(self):
         return self
 
+    @property
+    def x(self):
+        """Return the x location of point geometries in a GeoSeries"""
+        if (self.geom_type == "Point").all():
+            return _series_unary_op(self, 'x', null_value=np.nan)
+        else:
+            message = "x attribute access only provided for Point geometries"
+            raise ValueError(message)
+
+    @property
+    def y(self):
+        """Return the y location of point geometries in a GeoSeries"""
+        if (self.geom_type == "Point").all():
+            return _series_unary_op(self, 'y', null_value=np.nan)
+        else:
+            message = "y attribute access only provided for Point geometries"
+            raise ValueError(message)
+
     @classmethod
     def from_file(cls, filename, **kwargs):
         """
         Alternate constructor to create a GeoSeries from a file
-        
+
         Parameters
         ----------
-        
+
         filename : str
             File path or file handle to read from. Depending on which kwargs
             are included, the content of filename may vary, see:
             http://toblerity.github.io/fiona/README.html#usage
             for usage details.
         kwargs : key-word arguments
-            These arguments are passed to fiona.open, and can be used to 
+            These arguments are passed to fiona.open, and can be used to
             access multi-layer data, data stored within archives (zip files),
             etc.
-        
+
         """
         import fiona
         geoms = []
@@ -125,7 +110,7 @@ class GeoSeries(GeoPandasBase, Series):
                           index=self.index)
         data.crs = self.crs
         data.to_file(filename, driver, **kwargs)
-        
+
     #
     # Implement pandas methods
     #
@@ -183,12 +168,50 @@ class GeoSeries(GeoPandasBase, Series):
         return GeoSeries(self.values.copy(order), index=self.index,
                       name=self.name).__finalize__(self)
 
-    def isnull(self):
-        """Null values in a GeoSeries are represented by empty geometric objects"""
+    def isna(self):
+        """
+        N/A values in a GeoSeries can be represented by empty geometric
+        objects, in addition to standard representations such as None and
+        np.nan.
+
+        Returns
+        -------
+        A boolean pandas Series of the same size as the GeoSeries,
+        True where a value is N/A.
+
+        See Also
+        --------
+        GeoSereies.notna : inverse of isna
+        """
         non_geo_null = super(GeoSeries, self).isnull()
         val = self.apply(_is_empty)
         return np.logical_or(non_geo_null, val)
 
+    def isnull(self):
+        """Alias for `isna` method. See `isna` for more detail."""
+        return self.isna()
+
+    def notna(self):
+        """
+        N/A values in a GeoSeries can be represented by empty geometric
+        objects, in addition to standard representations such as None and
+        np.nan.
+
+        Returns
+        -------
+        A boolean pandas Series of the same size as the GeoSeries,
+        False where a value is N/A.
+
+        See Also
+        --------
+        GeoSeries.isna : inverse of notna
+        """
+        return ~self.isna()
+
+    def notnull(self):
+        """Alias for `notna` method. See `notna` for more detail."""
+        return self.notna()
+
     def fillna(self, value=None, method=None, inplace=False,
                **kwargs):
         """Fill NA/NaN values with a geometry (empty polygon by default).
@@ -196,27 +219,14 @@ class GeoSeries(GeoPandasBase, Series):
         "method" is currently not implemented for pandas <= 0.12.
         """
         if value is None:
-            value = Point()
-        if not OLD_PANDAS:
-            return super(GeoSeries, self).fillna(value=value, method=method,
-                                                 inplace=inplace, **kwargs)
-        else:
-            # FIXME: this is an ugly way to support pandas <= 0.12
-            if method is not None:
-                raise NotImplementedError('Fill method is currently not implemented for GeoSeries')
-            if isinstance(value, BaseGeometry):
-                result = self.copy() if not inplace else self
-                mask = self.isnull()
-                result[mask] = value
-                if not inplace:
-                    return GeoSeries(result)
-            else:
-                raise ValueError('Non-geometric fill values not allowed for GeoSeries')
+            value = BaseGeometry()
+        return super(GeoSeries, self).fillna(value=value, method=method,
+                                             inplace=inplace, **kwargs)
 
     def align(self, other, join='outer', level=None, copy=True,
               fill_value=None, **kwargs):
         if fill_value is None:
-            fill_value = Point()
+            fill_value = BaseGeometry()
         left, right = super(GeoSeries, self).align(other, join=join,
                                                    level=level, copy=copy,
                                                    fill_value=fill_value,
@@ -241,7 +251,7 @@ class GeoSeries(GeoPandasBase, Series):
 
     def plot(self, *args, **kwargs):
         return plot_series(self, *args, **kwargs)
-    
+
     plot.__doc__ = plot_series.__doc__
 
     #
diff --git a/geopandas/io/file.py b/geopandas/io/file.py
index d407615..b0cf8ed 100644
--- a/geopandas/io/file.py
+++ b/geopandas/io/file.py
@@ -2,9 +2,7 @@ import os
 
 import fiona
 import numpy as np
-from shapely.geometry import mapping
 
-from six import iteritems
 from geopandas import GeoDataFrame
 
 
@@ -14,7 +12,7 @@ def read_file(filename, **kwargs):
 
     *filename* is either the absolute or relative path to the file to be
     opened and *kwargs* are keyword args to be passed to the `open` method
-    in the fiona library when opening the file. For more information on 
+    in the fiona library when opening the file. For more information on
     possible keywords, type: ``import fiona; help(fiona.open)``
     """
     bbox = kwargs.pop('bbox', None)
@@ -25,7 +23,11 @@ def read_file(filename, **kwargs):
             f_filt = f.filter(bbox=bbox)
         else:
             f_filt = f
-        gdf = GeoDataFrame.from_features(f, crs=crs)
+        gdf = GeoDataFrame.from_features(f_filt, crs=crs)
+
+        # re-order with column order from metadata, with geometry last
+        columns = list(f.meta["schema"]["properties"]) + ["geometry"]
+        gdf = gdf[columns]
 
     return gdf
 
@@ -57,10 +59,11 @@ def to_file(df, filename, driver="ESRI Shapefile", schema=None,
     if schema is None:
         schema = infer_schema(df)
     filename = os.path.abspath(os.path.expanduser(filename))
-    with fiona.open(filename, 'w', driver=driver, crs=df.crs,
-                    schema=schema, **kwargs) as c:
-        for feature in df.iterfeatures():
-            c.write(feature)
+    with fiona.drivers():
+        with fiona.open(filename, 'w', driver=driver, crs=df.crs,
+                        schema=schema, **kwargs) as colxn:
+            for feature in df.iterfeatures():
+                colxn.write(feature)
 
 
 def infer_schema(df):
diff --git a/geopandas/io/sql.py b/geopandas/io/sql.py
index b88f412..5540636 100644
--- a/geopandas/io/sql.py
+++ b/geopandas/io/sql.py
@@ -9,7 +9,7 @@ from geopandas import GeoSeries, GeoDataFrame
 def read_postgis(sql, con, geom_col='geom', crs=None, index_col=None,
                  coerce_float=True, params=None):
     """
-    Returns a GeoDataFrame corresponding to the result of the query 
+    Returns a GeoDataFrame corresponding to the result of the query
     string, which must contain a geometry column.
 
     Examples:
@@ -25,7 +25,7 @@ def read_postgis(sql, con, geom_col='geom', crs=None, index_col=None,
     crs: optional
         CRS to use for the returned GeoDataFrame
 
-    See the documentation for pandas.read_sql for further explanation 
+    See the documentation for pandas.read_sql for further explanation
     of the following parameters:
     index_col, coerce_float, params
 
diff --git a/geopandas/io/tests/test_io.py b/geopandas/io/tests/test_io.py
index e8e87f9..c15e203 100644
--- a/geopandas/io/tests/test_io.py
+++ b/geopandas/io/tests/test_io.py
@@ -2,39 +2,38 @@ from __future__ import absolute_import
 
 import fiona
 
+import geopandas
 from geopandas import read_postgis, read_file
-from geopandas.tests.util import download_nybb, connect, create_db, \
-     PANDAS_NEW_SQL_API, unittest, validate_boro_df
 
+import pytest
+from geopandas.tests.util import connect, create_db, validate_boro_df
 
-class TestIO(unittest.TestCase):
-    def setUp(self):
-        nybb_filename, nybb_zip_path = download_nybb()
-        vfs = 'zip://' + nybb_filename
-        self.df = read_file(nybb_zip_path, vfs=vfs)
-        with fiona.open(nybb_zip_path, vfs=vfs) as f:
+
+class TestIO:
+    def setup_method(self):
+        nybb_zip_path = geopandas.datasets.get_path('nybb')
+        self.df = read_file(nybb_zip_path)
+        with fiona.open(nybb_zip_path) as f:
             self.crs = f.crs
+            self.columns = list(f.meta["schema"]["properties"].keys())
 
     def test_read_postgis_default(self):
         con = connect('test_geopandas')
         if con is None or not create_db(self.df):
-            raise unittest.case.SkipTest()
+            raise pytest.skip()
 
         try:
             sql = "SELECT * FROM nybb;"
             df = read_postgis(sql, con)
         finally:
-            if PANDAS_NEW_SQL_API:
-                # It's not really a connection, it's an engine
-                con = con.connect()
             con.close()
 
-        validate_boro_df(self, df)
+        validate_boro_df(df)
 
     def test_read_postgis_custom_geom_col(self):
         con = connect('test_geopandas')
         if con is None or not create_db(self.df):
-            raise unittest.case.SkipTest()
+            raise pytest.skip()
 
         try:
             sql = """SELECT
@@ -43,14 +42,24 @@ class TestIO(unittest.TestCase):
                      FROM nybb;"""
             df = read_postgis(sql, con, geom_col='__geometry__')
         finally:
-            if PANDAS_NEW_SQL_API:
-                # It's not really a connection, it's an engine
-                con = con.connect()
             con.close()
 
-        validate_boro_df(self, df)
+        validate_boro_df(df)
 
     def test_read_file(self):
         df = self.df.rename(columns=lambda x: x.lower())
-        validate_boro_df(self, df)
-        self.assert_(df.crs == self.crs)
+        validate_boro_df(df)
+        assert df.crs == self.crs
+        # get lower case columns, and exclude geometry column from comparison
+        lower_columns = [c.lower() for c in self.columns]
+        assert (df.columns[:-1] == lower_columns).all()
+
+    def test_filtered_read_file(self):
+        full_df_shape = self.df.shape
+        nybb_filename = geopandas.datasets.get_path('nybb')
+        bbox = (1031051.7879884212, 224272.49231459625, 1047224.3104931959,
+                244317.30894023244)
+        filtered_df = read_file(nybb_filename, bbox=bbox)
+        filtered_df_shape = filtered_df.shape
+        assert full_df_shape != filtered_df_shape
+        assert filtered_df_shape == (2, 5)
diff --git a/geopandas/plotting.py b/geopandas/plotting.py
index 38d404e..31b6284 100644
--- a/geopandas/plotting.py
+++ b/geopandas/plotting.py
@@ -1,373 +1,518 @@
 from __future__ import print_function
 
+from distutils.version import LooseVersion
 import warnings
 
 import numpy as np
-from six import next
-from six.moves import xrange
-from shapely.geometry import Polygon
 
 
-def plot_polygon(ax, poly, facecolor='red', edgecolor='black', alpha=0.5, linewidth=1.0, **kwargs):
-    """ Plot a single Polygon geometry """
+def _flatten_multi_geoms(geoms, colors=None):
+    """
+    Returns Series like geoms and colors, except that any Multi geometries
+    are split into their components and colors are repeated for all component
+    in the same Multi geometry.  Maintains 1:1 matching of geometry to color.
+    Passing `color` is optional, and when no `color` is passed a list of None
+    values is returned as `component_colors`.
+
+    "Colors" are treated opaquely and so can actually contain any values.
+
+    Returns
+    -------
+
+    components : list of geometry
+
+    component_colors : list of whatever type `colors` contains
+    """
+    if colors is None:
+        colors = [None] * len(geoms)
+
+    components, component_colors = [], []
+
+    # precondition, so zip can't short-circuit
+    assert len(geoms) == len(colors)
+    for geom, color in zip(geoms, colors):
+        if geom.type.startswith('Multi'):
+            for poly in geom:
+                components.append(poly)
+                # repeat same color for all components
+                component_colors.append(color)
+        else:
+            components.append(geom)
+            component_colors.append(color)
+
+    return components, component_colors
+
+
+def plot_polygon_collection(ax, geoms, values=None, color=None,
+                            cmap=None, vmin=None, vmax=None, **kwargs):
+    """
+    Plots a collection of Polygon and MultiPolygon geometries to `ax`
+
+    Parameters
+    ----------
+
+    ax : matplotlib.axes.Axes
+        where shapes will be plotted
+
+    geoms : a sequence of `N` Polygons and/or MultiPolygons (can be mixed)
+
+    values : a sequence of `N` values, optional
+        Values will be mapped to colors using vmin/vmax/cmap. They should
+        have 1:1 correspondence with the geometries (not their components).
+        Otherwise follows `color` / `facecolor` kwargs.
+
+    edgecolor : single color or sequence of `N` colors
+        Color for the edge of the polygons
+
+    facecolor : single color or sequence of `N` colors
+        Color to fill the polygons. Cannot be used together with `values`.
+
+    color : single color or sequence of `N` colors
+        Sets both `edgecolor` and `facecolor`
+
+    **kwargs
+        Additional keyword arguments passed to the collection
+
+    Returns
+    -------
+
+    collection : matplotlib.collections.Collection that was plotted
+    """
     from descartes.patch import PolygonPatch
-    a = np.asarray(poly.exterior)
-    if poly.has_z:
-        poly = Polygon(zip(*poly.exterior.xy))
+    from matplotlib.collections import PatchCollection
+
+    geoms, values = _flatten_multi_geoms(geoms, values)
+    if None in values:
+        values = None
+
+    # PatchCollection does not accept some kwargs.
+    if 'markersize' in kwargs:
+        del kwargs['markersize']
+
+    # color=None overwrites specified facecolor/edgecolor with default color
+    if color is not None:
+        kwargs['color'] = color
 
-    # without Descartes, we could make a Patch of exterior
-    ax.add_patch(PolygonPatch(poly, facecolor=facecolor, linewidth=0, alpha=alpha))  # linewidth=0 because boundaries are drawn separately
-    ax.plot(a[:, 0], a[:, 1], color=edgecolor, linewidth=linewidth, **kwargs)
-    for p in poly.interiors:
-        x, y = zip(*p.coords)
-        ax.plot(x, y, color=edgecolor, linewidth=linewidth)
+    collection = PatchCollection([PolygonPatch(poly) for poly in geoms],
+                                 **kwargs)
 
+    if values is not None:
+        collection.set_array(np.asarray(values))
+        collection.set_cmap(cmap)
+        collection.set_clim(vmin, vmax)
 
-def plot_multipolygon(ax, geom, facecolor='red', edgecolor='black', alpha=0.5, linewidth=1.0, **kwargs):
-    """ Can safely call with either Polygon or Multipolygon geometry
+    ax.add_collection(collection, autolim=True)
+    ax.autoscale_view()
+    return collection
+
+
+def plot_linestring_collection(ax, geoms, values=None, color=None,
+                               cmap=None, vmin=None, vmax=None, **kwargs):
     """
-    if geom.type == 'Polygon':
-        plot_polygon(ax, geom, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha, linewidth=linewidth, **kwargs)
-    elif geom.type == 'MultiPolygon':
-        for poly in geom.geoms:
-            plot_polygon(ax, poly, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha, linewidth=linewidth, **kwargs)
+    Plots a collection of LineString and MultiLineString geometries to `ax`
+
+    Parameters
+    ----------
+
+    ax : matplotlib.axes.Axes
+        where shapes will be plotted
+
+    geoms : a sequence of `N` LineStrings and/or MultiLineStrings (can be
+            mixed)
 
+    values : a sequence of `N` values, optional
+        Values will be mapped to colors using vmin/vmax/cmap. They should
+        have 1:1 correspondence with the geometries (not their components).
 
-def plot_linestring(ax, geom, color='black', linewidth=1.0, **kwargs):
-    """ Plot a single LineString geometry """
-    a = np.array(geom)
-    ax.plot(a[:, 0], a[:, 1], color=color, linewidth=linewidth, **kwargs)
+    color : single color or sequence of `N` colors
+        Cannot be used together with `values`.
 
+    Returns
+    -------
+
+    collection : matplotlib.collections.Collection that was plotted
 
-def plot_multilinestring(ax, geom, color='red', linewidth=1.0, **kwargs):
-    """ Can safely call with either LineString or MultiLineString geometry
     """
-    if geom.type == 'LineString':
-        plot_linestring(ax, geom, color=color, linewidth=linewidth, **kwargs)
-    elif geom.type == 'MultiLineString':
-        for line in geom.geoms:
-            plot_linestring(ax, line, color=color, linewidth=linewidth, **kwargs)
+    from matplotlib.collections import LineCollection
+
+    geoms, values = _flatten_multi_geoms(geoms, values)
+    if None in values:
+        values = None
+
+    # LineCollection does not accept some kwargs.
+    if 'markersize' in kwargs:
+        del kwargs['markersize']
+
+    # color=None gives black instead of default color cycle
+    if color is not None:
+        kwargs['color'] = color
+
+    segments = [np.array(linestring)[:, :2] for linestring in geoms]
+    collection = LineCollection(segments, **kwargs)
 
+    if values is not None:
+        collection.set_array(np.asarray(values))
+        collection.set_cmap(cmap)
+        collection.set_clim(vmin, vmax)
 
-def plot_point(ax, pt, marker='o', markersize=2, color='black', **kwargs):
-    """ Plot a single Point geometry """
-    ax.plot(pt.x, pt.y, marker=marker, markersize=markersize, color=color, **kwargs)
+    ax.add_collection(collection, autolim=True)
+    ax.autoscale_view()
+    return collection
 
 
-def gencolor(N, colormap='Set1'):
+def plot_point_collection(ax, geoms, values=None, color=None,
+                          cmap=None, vmin=None, vmax=None,
+                          marker='o', markersize=None, **kwargs):
     """
-    Color generator intended to work with one of the ColorBrewer
-    qualitative color scales.
+    Plots a collection of Point geometries to `ax`
 
-    Suggested values of colormap are the following:
+    Parameters
+    ----------
 
-        Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3
+    ax : matplotlib.axes.Axes
+        where shapes will be plotted
 
-    (although any matplotlib colormap will work).
+    geoms : sequence of `N` Points
+
+    values : a sequence of `N` values, optional
+        Values mapped to colors using vmin, vmax, and cmap.
+        Cannot be specified together with `color`.
+
+    markersize : scalar or array-like, optional
+        Size of the markers. Note that under the hood ``scatter`` is
+        used, so the specified value will be proportional to the
+        area of the marker (size in points^2).
+
+    Returns
+    -------
+    collection : matplotlib.collections.Collection that was plotted
     """
-    from matplotlib import cm
-    # don't use more than 9 discrete colors
-    n_colors = min(N, 9)
-    cmap = cm.get_cmap(colormap, n_colors)
-    colors = cmap(range(n_colors))
-    for i in xrange(N):
-        yield colors[i % n_colors]
+    if values is not None and color is not None:
+        raise ValueError("Can only specify one of 'values' and 'color' kwargs")
 
+    x = geoms.x.values
+    y = geoms.y.values
 
-def plot_series(s, cmap='Set1', color=None, ax=None, linewidth=1.0,
-                figsize=None, **color_kwds):
-    """ Plot a GeoSeries
+    # matplotlib 1.4 does not support c=None, and < 2.0 does not support s=None
+    if values is not None:
+        kwargs['c'] = values
+    if markersize is not None:
+        kwargs['s'] = markersize
 
-        Generate a plot of a GeoSeries geometry with matplotlib.
+    collection = ax.scatter(x, y, color=color, vmin=vmin, vmax=vmax, cmap=cmap,
+                            marker=marker, **kwargs)
+    return collection
 
-        Parameters
-        ----------
 
-        Series
-            The GeoSeries to be plotted.  Currently Polygon,
-            MultiPolygon, LineString, MultiLineString and Point
-            geometries can be plotted.
+def plot_series(s, cmap=None, color=None, ax=None, figsize=None, **style_kwds):
+    """
+    Plot a GeoSeries.
+
+    Generate a plot of a GeoSeries geometry with matplotlib.
 
-        cmap : str (default 'Set1')
-            The name of a colormap recognized by matplotlib.  Any
-            colormap will work, but categorical colormaps are
-            generally recommended.  Examples of useful discrete
-            colormaps include:
+    Parameters
+    ----------
 
-                Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3
+    s : Series
+        The GeoSeries to be plotted.  Currently Polygon,
+        MultiPolygon, LineString, MultiLineString and Point
+        geometries can be plotted.
 
-        color : str (default None)
-            If specified, all objects will be colored uniformly.
+    cmap : str (default None)
+        The name of a colormap recognized by matplotlib.  Any
+        colormap will work, but categorical colormaps are
+        generally recommended.  Examples of useful discrete
+        colormaps include:
 
-        ax : matplotlib.pyplot.Artist (default None)
-            axes on which to draw the plot
+            tab10, tab20, Accent, Dark2, Paired, Pastel1, Set1, Set2
 
-        linewidth : float (default 1.0)
-            Line width for geometries.
+    color : str (default None)
+        If specified, all objects will be colored uniformly.
 
-        figsize : pair of floats (default None)
-            Size of the resulting matplotlib.figure.Figure. If the argument
-            ax is given explicitly, figsize is ignored.
+    ax : matplotlib.pyplot.Artist (default None)
+        axes on which to draw the plot
 
-        **color_kwds : dict
-            Color options to be passed on to the actual plot function
+    figsize : pair of floats (default None)
+        Size of the resulting matplotlib.figure.Figure. If the argument
+        ax is given explicitly, figsize is ignored.
 
-        Returns
-        -------
+    **style_kwds : dict
+        Color options to be passed on to the actual plot function, such
+        as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,
+        ``alpha``.
 
-        matplotlib axes instance
+    Returns
+    -------
+
+    matplotlib axes instance
     """
-    if 'colormap' in color_kwds:
+    if 'colormap' in style_kwds:
         warnings.warn("'colormap' is deprecated, please use 'cmap' instead "
                       "(for consistency with matplotlib)", FutureWarning)
-        cmap = color_kwds.pop('colormap')
-    if 'axes' in color_kwds:
+        cmap = style_kwds.pop('colormap')
+    if 'axes' in style_kwds:
         warnings.warn("'axes' is deprecated, please use 'ax' instead "
                       "(for consistency with pandas)", FutureWarning)
-        ax = color_kwds.pop('axes')
+        ax = style_kwds.pop('axes')
 
     import matplotlib.pyplot as plt
     if ax is None:
         fig, ax = plt.subplots(figsize=figsize)
         ax.set_aspect('equal')
-    color_generator = gencolor(len(s), colormap=cmap)
-    for geom in s:
-        if color is None:
-            col = next(color_generator)
-        else:
-            col = color
-        if geom.type == 'Polygon' or geom.type == 'MultiPolygon':
-            if 'facecolor' in color_kwds:
-                plot_multipolygon(ax, geom, linewidth=linewidth, **color_kwds)
-            else:
-                plot_multipolygon(ax, geom, facecolor=col, linewidth=linewidth, **color_kwds)
-        elif geom.type == 'LineString' or geom.type == 'MultiLineString':
-            plot_multilinestring(ax, geom, color=col, linewidth=linewidth, **color_kwds)
-        elif geom.type == 'Point':
-            plot_point(ax, geom, color=col, **color_kwds)
-    plt.draw()
-    return ax
 
+    # if cmap is specified, create range of colors based on cmap
+    values = None
+    if cmap is not None:
+        values = np.arange(len(s))
+        if hasattr(cmap, 'N'):
+            values = values % cmap.N
+        style_kwds['vmin'] = style_kwds.get('vmin', values.min())
+        style_kwds['vmax'] = style_kwds.get('vmax', values.max())
+
+    geom_types = s.geometry.type
+    poly_idx = np.asarray((geom_types == 'Polygon')
+                          | (geom_types == 'MultiPolygon'))
+    line_idx = np.asarray((geom_types == 'LineString')
+                          | (geom_types == 'MultiLineString'))
+    point_idx = np.asarray(geom_types == 'Point')
+
+    # plot all Polygons and all MultiPolygon components in the same collection
+    polys = s.geometry[poly_idx]
+
+    if not polys.empty:
+        # color overrides both face and edgecolor. As we want people to be
+        # able to use edgecolor as well, pass color to facecolor
+        facecolor = style_kwds.pop('facecolor', None)
+        if color is not None:
+            facecolor = color
+        values_ = values[poly_idx] if cmap else None
+        plot_polygon_collection(ax, polys, values_, facecolor=facecolor,
+                                cmap=cmap, **style_kwds)
+
+    # plot all LineStrings and MultiLineString components in same collection
+    lines = s.geometry[line_idx]
+    if not lines.empty:
+        values_ = values[line_idx] if cmap else None
+        plot_linestring_collection(ax, lines, values_, color=color, cmap=cmap,
+                                   **style_kwds)
+
+    # plot all Points in the same collection
+    points = s.geometry[point_idx]
+    if not points.empty:
+        values_ = values[point_idx] if cmap else None
+        plot_point_collection(ax, points, values_, color=color, cmap=cmap,
+                              **style_kwds)
 
-def plot_dataframe(s, column=None, cmap=None, color=None, linewidth=1.0,
-                   categorical=False, legend=False, ax=None,
-                   scheme=None, k=5, vmin=None, vmax=None, figsize=None,
-                   **color_kwds):
-    """ Plot a GeoDataFrame
-
-        Generate a plot of a GeoDataFrame with matplotlib.  If a
-        column is specified, the plot coloring will be based on values
-        in that column.  Otherwise, a categorical plot of the
-        geometries in the `geometry` column will be generated.
+    plt.draw()
+    return ax
 
-        Parameters
-        ----------
 
-        GeoDataFrame
-            The GeoDataFrame to be plotted.  Currently Polygon,
-            MultiPolygon, LineString, MultiLineString and Point
-            geometries can be plotted.
+def plot_dataframe(df, column=None, cmap=None, color=None, ax=None,
+                   categorical=False, legend=False, scheme=None, k=5,
+                   vmin=None, vmax=None, figsize=None, **style_kwds):
+    """
+    Plot a GeoDataFrame.
 
-        column : str (default None)
-            The name of the column to be plotted.
+    Generate a plot of a GeoDataFrame with matplotlib.  If a
+    column is specified, the plot coloring will be based on values
+    in that column.
 
-        categorical : bool (default False)
-            If False, cmap will reflect numerical values of the
-            column being plotted.  For non-numerical columns (or if
-            column=None), this will be set to True.
+    Parameters
+    ----------
 
-        cmap : str (default 'Set1')
-            The name of a colormap recognized by matplotlib.
+    df : GeoDataFrame
+        The GeoDataFrame to be plotted.  Currently Polygon,
+        MultiPolygon, LineString, MultiLineString and Point
+        geometries can be plotted.
 
-        color : str (default None)
-            If specified, all objects will be colored uniformly.
+    column : str (default None)
+        The name of the column to be plotted. Ignored if `color` is also set.
 
-        linewidth : float (default 1.0)
-            Line width for geometries.
+    cmap : str (default None)
+        The name of a colormap recognized by matplotlib.
 
-        legend : bool (default False)
-            Plot a legend (Experimental; currently for categorical
-            plots only)
+    categorical : bool (default False)
+        If False, cmap will reflect numerical values of the
+        column being plotted.  For non-numerical columns, this
+        will be set to True.
 
-        ax : matplotlib.pyplot.Artist (default None)
-            axes on which to draw the plot
+    color : str (default None)
+        If specified, all objects will be colored uniformly.
 
-        scheme : pysal.esda.mapclassify.Map_Classifier
-            Choropleth classification schemes (requires PySAL)
+    legend : bool (default False)
+        Plot a legend. Ignored if no `column` is given, or if `color` is given.
 
-        k   : int (default 5)
-            Number of classes (ignored if scheme is None)
+    ax : matplotlib.pyplot.Artist (default None)
+        axes on which to draw the plot
 
-        vmin : None or float (default None)
+    scheme : str (default None)
+        Name of a choropleth classification scheme (requires PySAL).
+        A pysal.esda.mapclassify.Map_Classifier object will be used
+        under the hood. Supported schemes: 'Equal_interval', 'Quantiles',
+        'Fisher_Jenks'
 
-            Minimum value of cmap. If None, the minimum data value
-            in the column to be plotted is used.
+    k   : int (default 5)
+        Number of classes (ignored if scheme is None)
 
-        vmax : None or float (default None)
+    vmin : None or float (default None)
+        Minimum value of cmap. If None, the minimum data value
+        in the column to be plotted is used.
 
-            Maximum value of cmap. If None, the maximum data value
-            in the column to be plotted is used.
+    vmax : None or float (default None)
+        Maximum value of cmap. If None, the maximum data value
+        in the column to be plotted is used.
 
-        figsize
-            Size of the resulting matplotlib.figure.Figure. If the argument
-            axes is given explicitly, figsize is ignored.
+    figsize
+        Size of the resulting matplotlib.figure.Figure. If the argument
+        axes is given explicitly, figsize is ignored.
 
-        **color_kwds : dict
-            Color options to be passed on to the actual plot function
+    **style_kwds : dict
+        Color options to be passed on to the actual plot function, such
+        as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,
+        ``alpha``.
 
-        Returns
-        -------
+    Returns
+    -------
+    matplotlib axes instance
 
-        matplotlib axes instance
     """
-    if 'colormap' in color_kwds:
+    if 'colormap' in style_kwds:
         warnings.warn("'colormap' is deprecated, please use 'cmap' instead "
                       "(for consistency with matplotlib)", FutureWarning)
-        cmap = color_kwds.pop('colormap')
-    if 'axes' in color_kwds:
+        cmap = style_kwds.pop('colormap')
+    if 'axes' in style_kwds:
         warnings.warn("'axes' is deprecated, please use 'ax' instead "
                       "(for consistency with pandas)", FutureWarning)
-        ax = color_kwds.pop('axes')
+        ax = style_kwds.pop('axes')
+    if column and color:
+        warnings.warn("Only specify one of 'column' or 'color'. Using "
+                      "'color'.", UserWarning)
+        column = None
 
+    import matplotlib
     import matplotlib.pyplot as plt
-    from matplotlib.lines import Line2D
-    from matplotlib.colors import Normalize
-    from matplotlib import cm
 
     if column is None:
-        return plot_series(s.geometry, cmap=cmap, color=color,
-                           ax=ax, linewidth=linewidth, figsize=figsize,
-                           **color_kwds)
+        return plot_series(df.geometry, cmap=cmap, color=color, ax=ax,
+                           figsize=figsize, **style_kwds)
+
+    if df[column].dtype is np.dtype('O'):
+        categorical = True
+
+    # Define `values` as a Series
+    if categorical:
+        if cmap is None:
+            if LooseVersion(matplotlib.__version__) >= '2.0':
+                cmap = 'tab10'
+            else:
+                cmap = 'Set1'
+        categories = list(set(df[column].values))
+        categories.sort()
+        valuemap = dict([(k, v) for (v, k) in enumerate(categories)])
+        values = np.array([valuemap[k] for k in df[column]])
     else:
-        if s[column].dtype is np.dtype('O'):
-            categorical = True
+        values = df[column]
+    if scheme is not None:
+        binning = __pysal_choro(values, scheme, k=k)
+        # set categorical to True for creating the legend
+        categorical = True
+        binedges = [values.min()] + binning.bins.tolist()
+        categories = ['{0:.2f} - {1:.2f}'.format(binedges[i], binedges[i+1])
+                      for i in range(len(binedges)-1)]
+        values = np.array(binning.yb)
+
+    if ax is None:
+        fig, ax = plt.subplots(figsize=figsize)
+        ax.set_aspect('equal')
+
+    mn = values.min() if vmin is None else vmin
+    mx = values.max() if vmax is None else vmax
+
+    geom_types = df.geometry.type
+    poly_idx = np.asarray((geom_types == 'Polygon')
+                          | (geom_types == 'MultiPolygon'))
+    line_idx = np.asarray((geom_types == 'LineString')
+                          | (geom_types == 'MultiLineString'))
+    point_idx = np.asarray(geom_types == 'Point')
+
+    # plot all Polygons and all MultiPolygon components in the same collection
+    polys = df.geometry[poly_idx]
+    if not polys.empty:
+        plot_polygon_collection(ax, polys, values[poly_idx],
+                                vmin=mn, vmax=mx, cmap=cmap, **style_kwds)
+
+    # plot all LineStrings and MultiLineString components in same collection
+    lines = df.geometry[line_idx]
+    if not lines.empty:
+        plot_linestring_collection(ax, lines, values[line_idx],
+                                   vmin=mn, vmax=mx, cmap=cmap, **style_kwds)
+
+    # plot all Points in the same collection
+    points = df.geometry[point_idx]
+    if not points.empty:
+        plot_point_collection(ax, points, values[point_idx],
+                              vmin=mn, vmax=mx, cmap=cmap, **style_kwds)
+
+    if legend and not color:
+        from matplotlib.lines import Line2D
+        from matplotlib.colors import Normalize
+        from matplotlib import cm
+
+        norm = Normalize(vmin=mn, vmax=mx)
+        n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)
         if categorical:
-            if cmap is None:
-                cmap = 'Set1'
-            categories = list(set(s[column].values))
-            categories.sort()
-            valuemap = dict([(k, v) for (v, k) in enumerate(categories)])
-            values = [valuemap[k] for k in s[column]]
+            patches = []
+            for value, cat in enumerate(categories):
+                patches.append(
+                    Line2D([0], [0], linestyle="none", marker="o",
+                           alpha=style_kwds.get('alpha', 1), markersize=10,
+                           markerfacecolor=n_cmap.to_rgba(value)))
+            ax.legend(patches, categories, numpoints=1, loc='best')
         else:
-            values = s[column]
-        if scheme is not None:
-            binning = __pysal_choro(values, scheme, k=k)
-            values = binning.yb
-            # set categorical to True for creating the legend
-            categorical = True
-            binedges = [binning.yb.min()] + binning.bins.tolist()
-            categories = ['{0:.2f} - {1:.2f}'.format(binedges[i], binedges[i+1])
-                          for i in range(len(binedges)-1)]
-        cmap = norm_cmap(values, cmap, Normalize, cm, vmin=vmin, vmax=vmax)
-        if ax is None:
-            fig, ax = plt.subplots(figsize=figsize)
-            ax.set_aspect('equal')
-        for geom, value in zip(s.geometry, values):
-            if color is None:
-                col = cmap.to_rgba(value)
-            else:
-                col = color
-            if geom.type == 'Polygon' or geom.type == 'MultiPolygon':
-                plot_multipolygon(ax, geom, facecolor=col, linewidth=linewidth, **color_kwds)
-            elif geom.type == 'LineString' or geom.type == 'MultiLineString':
-                plot_multilinestring(ax, geom, color=col, linewidth=linewidth, **color_kwds)
-            elif geom.type == 'Point':
-                plot_point(ax, geom, color=col, **color_kwds)
-        if legend:
-            if categorical:
-                patches = []
-                for value, cat in enumerate(categories):
-                    patches.append(Line2D([0], [0], linestyle="none",
-                                          marker="o", alpha=color_kwds.get('alpha', 0.5),
-                                          markersize=10, markerfacecolor=cmap.to_rgba(value)))
-                ax.legend(patches, categories, numpoints=1, loc='best')
-            else:
-                # TODO: show a colorbar
-                raise NotImplementedError
+            n_cmap.set_array([])
+            ax.get_figure().colorbar(n_cmap)
+
     plt.draw()
     return ax
 
 
 def __pysal_choro(values, scheme, k=5):
-    """ Wrapper for choropleth schemes from PySAL for use with plot_dataframe
+    """
+    Wrapper for choropleth schemes from PySAL for use with plot_dataframe
 
-        Parameters
-        ----------
+    Parameters
+    ----------
 
-        values
-            Series to be plotted
+    values
+        Series to be plotted
 
-        scheme
-            pysal.esda.mapclassify classificatin scheme
-            ['Equal_interval'|'Quantiles'|'Fisher_Jenks']
+    scheme
+        pysal.esda.mapclassify classificatin scheme
+        ['Equal_interval'|'Quantiles'|'Fisher_Jenks']
 
-        k
-            number of classes (2 <= k <=9)
+    k
+        number of classes (2 <= k <=9)
 
-        Returns
-        -------
+    Returns
+    -------
 
-        binning
-            Binning objects that holds the Series with values replaced with
-            class identifier and the bins.
-    """
+    binning
+        Binning objects that holds the Series with values replaced with
+        class identifier and the bins.
 
+    """
     try:
-        from pysal.esda.mapclassify import Quantiles, Equal_Interval, Fisher_Jenks
+        from pysal.esda.mapclassify import (
+            Quantiles, Equal_Interval, Fisher_Jenks)
         schemes = {}
         schemes['equal_interval'] = Equal_Interval
         schemes['quantiles'] = Quantiles
         schemes['fisher_jenks'] = Fisher_Jenks
-        s0 = scheme
         scheme = scheme.lower()
         if scheme not in schemes:
-            scheme = 'quantiles'
-            warnings.warn('Unrecognized scheme "{0}". Using "Quantiles" '
-                          'instead'.format(s0), UserWarning, stacklevel=3)
-        if k < 2 or k > 9:
-            warnings.warn('Invalid k: {0} (2 <= k <= 9), setting k=5 '
-                          '(default)'.format(k), UserWarning, stacklevel=3)
-            k = 5
+            raise ValueError("Invalid scheme. Scheme must be in the"
+                             " set: %r" % schemes.keys())
         binning = schemes[scheme](values, k)
         return binning
     except ImportError:
         raise ImportError("PySAL is required to use the 'scheme' keyword")
-
-
-def norm_cmap(values, cmap, normalize, cm, vmin=None, vmax=None):
-
-    """ Normalize and set colormap
-
-        Parameters
-        ----------
-
-        values
-            Series or array to be normalized
-
-        cmap
-            matplotlib Colormap
-
-        normalize
-            matplotlib.colors.Normalize
-
-        cm
-            matplotlib.cm
-
-        vmin
-            Minimum value of colormap. If None, uses min(values).
-
-        vmax
-            Maximum value of colormap. If None, uses max(values).
-
-        Returns
-        -------
-        n_cmap
-            mapping of normalized values to colormap (cmap)
-
-    """
-
-    mn = min(values) if vmin is None else vmin
-    mx = max(values) if vmax is None else vmax
-    norm = normalize(vmin=mn, vmax=mx)
-    n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)
-    return n_cmap
diff --git a/geopandas/sindex.py b/geopandas/sindex.py
index 669582a..413e0a9 100644
--- a/geopandas/sindex.py
+++ b/geopandas/sindex.py
@@ -1,7 +1,6 @@
 from geopandas import base
 
 if base.HAS_SINDEX:
-    from rtree.core import RTreeError
     from rtree.index import Index as RTreeIndex
 
 
diff --git a/geopandas/tests/baseline_images/test_plotting/lines_plot.png b/geopandas/tests/baseline_images/test_plotting/lines_plot.png
deleted file mode 100644
index e048aed..0000000
Binary files a/geopandas/tests/baseline_images/test_plotting/lines_plot.png and /dev/null differ
diff --git a/geopandas/tests/baseline_images/test_plotting/points_plot.png b/geopandas/tests/baseline_images/test_plotting/points_plot.png
deleted file mode 100644
index 74a6ce7..0000000
Binary files a/geopandas/tests/baseline_images/test_plotting/points_plot.png and /dev/null differ
diff --git a/geopandas/tests/baseline_images/test_plotting/poly_plot.png b/geopandas/tests/baseline_images/test_plotting/poly_plot.png
deleted file mode 100644
index a204e7c..0000000
Binary files a/geopandas/tests/baseline_images/test_plotting/poly_plot.png and /dev/null differ
diff --git a/geopandas/tests/baseline_images/test_plotting/poly_plot_with_kwargs.png b/geopandas/tests/baseline_images/test_plotting/poly_plot_with_kwargs.png
deleted file mode 100644
index a2f88f3..0000000
Binary files a/geopandas/tests/baseline_images/test_plotting/poly_plot_with_kwargs.png and /dev/null differ
diff --git a/geopandas/tests/test_datasets.py b/geopandas/tests/test_datasets.py
new file mode 100644
index 0000000..b34022b
--- /dev/null
+++ b/geopandas/tests/test_datasets.py
@@ -0,0 +1,18 @@
+from __future__ import absolute_import
+
+from geopandas import read_file, GeoDataFrame
+from geopandas.datasets import get_path
+
+
+class TestDatasets:
+
+    def test_read_paths(self):
+
+        gdf = read_file(get_path('naturalearth_lowres'))
+        assert isinstance(gdf, GeoDataFrame)
+
+        gdf = read_file(get_path('naturalearth_cities'))
+        assert isinstance(gdf, GeoDataFrame)
+
+        gdf = read_file(get_path('nybb'))
+        assert isinstance(gdf, GeoDataFrame)
diff --git a/geopandas/tests/test_dissolve.py b/geopandas/tests/test_dissolve.py
index 45cc877..c562d3c 100644
--- a/geopandas/tests/test_dissolve.py
+++ b/geopandas/tests/test_dissolve.py
@@ -1,40 +1,37 @@
 from __future__ import absolute_import
-import tempfile
-import shutil
+
 import numpy as np
-from shapely.geometry import Point
+import pandas as pd
+
+import geopandas
 from geopandas import GeoDataFrame, read_file
-from geopandas.tools import overlay
-from .util import unittest, download_nybb
+
 from pandas.util.testing import assert_frame_equal
-from pandas import Index
-from distutils.version import LooseVersion
-import pandas as pd
 
-pandas_0_15_problem = 'fails under pandas < 0.16 due to issue 324,'\
-                      'not problem with dissolve.'
 
-class TestDataFrame(unittest.TestCase):
+class TestDataFrame:
 
-    def setUp(self):
+    def setup_method(self):
 
-        nybb_filename, nybb_zip_path = download_nybb()
-        self.polydf = read_file(nybb_zip_path, vfs='zip://' + nybb_filename)
+        nybb_filename = geopandas.datasets.get_path('nybb')
+        self.polydf = read_file(nybb_filename)
         self.polydf = self.polydf[['geometry', 'BoroName', 'BoroCode']]
 
-        self.polydf = self.polydf.rename(columns={'geometry':'myshapes'})
+        self.polydf = self.polydf.rename(columns={'geometry': 'myshapes'})
         self.polydf = self.polydf.set_geometry('myshapes')
 
         self.polydf['manhattan_bronx'] = 5
-        self.polydf.loc[3:4,'manhattan_bronx']=6
+        self.polydf.loc[3:4, 'manhattan_bronx'] = 6
 
         # Merged geometry
-        manhattan_bronx = self.polydf.loc[3:4,]
-        others = self.polydf.loc[0:2,]
+        manhattan_bronx = self.polydf.loc[3:4, ]
+        others = self.polydf.loc[0:2, ]
 
-        collapsed = [others.geometry.unary_union, manhattan_bronx.geometry.unary_union]
-        merged_shapes = GeoDataFrame({'myshapes': collapsed}, geometry='myshapes',
-                             index=Index([5,6], name='manhattan_bronx'))
+        collapsed = [others.geometry.unary_union,
+                     manhattan_bronx.geometry.unary_union]
+        merged_shapes = GeoDataFrame(
+            {'myshapes': collapsed}, geometry='myshapes',
+            index=pd.Index([5, 6], name='manhattan_bronx'))
 
         # Different expected results
         self.first = merged_shapes.copy()
@@ -42,21 +39,27 @@ class TestDataFrame(unittest.TestCase):
         self.first['BoroCode'] = [5, 1]
 
         self.mean = merged_shapes.copy()
-        self.mean['BoroCode'] = [4,1.5]
+        self.mean['BoroCode'] = [4, 1.5]
 
-
-    @unittest.skipIf(str(pd.__version__) < LooseVersion('0.16'), pandas_0_15_problem)
     def test_geom_dissolve(self):
         test = self.polydf.dissolve('manhattan_bronx')
-        self.assertTrue(test.geometry.name == 'myshapes')
-        self.assertTrue(test.geom_almost_equals(self.first).all())
+        assert test.geometry.name == 'myshapes'
+        assert test.geom_almost_equals(self.first).all()
+
+    def test_dissolve_retains_existing_crs(self):
+        assert self.polydf.crs is not None
+        test = self.polydf.dissolve('manhattan_bronx')
+        assert test.crs is not None
+
+    def test_dissolve_retains_nonexisting_crs(self):
+        self.polydf.crs = None
+        test = self.polydf.dissolve('manhattan_bronx')
+        assert test.crs is None
 
-    @unittest.skipIf(str(pd.__version__) < LooseVersion('0.16'), pandas_0_15_problem)
     def test_first_dissolve(self):
         test = self.polydf.dissolve('manhattan_bronx')
         assert_frame_equal(self.first, test, check_column_type=False)
 
-    @unittest.skipIf(str(pd.__version__) < LooseVersion('0.16'), pandas_0_15_problem)
     def test_mean_dissolve(self):
         test = self.polydf.dissolve('manhattan_bronx', aggfunc='mean')
         assert_frame_equal(self.mean, test, check_column_type=False)
@@ -64,11 +67,11 @@ class TestDataFrame(unittest.TestCase):
         test = self.polydf.dissolve('manhattan_bronx', aggfunc=np.mean)
         assert_frame_equal(self.mean, test, check_column_type=False)
 
-    @unittest.skipIf(str(pd.__version__) < LooseVersion('0.16'), pandas_0_15_problem)
     def test_multicolumn_dissolve(self):
         multi = self.polydf.copy()
         multi['dup_col'] = multi.manhattan_bronx
-        multi_test = multi.dissolve(['manhattan_bronx', 'dup_col'], aggfunc='first')
+        multi_test = multi.dissolve(['manhattan_bronx', 'dup_col'],
+                                    aggfunc='first')
 
         first = self.first.copy()
         first['dup_col'] = first.index
@@ -76,7 +79,6 @@ class TestDataFrame(unittest.TestCase):
 
         assert_frame_equal(multi_test, first, check_column_type=False)
 
-    @unittest.skipIf(str(pd.__version__) < LooseVersion('0.16'), pandas_0_15_problem)
     def test_reset_index(self):
         test = self.polydf.dissolve('manhattan_bronx', as_index=False)
         comparison = self.first.reset_index()
diff --git a/geopandas/tests/test_geocode.py b/geopandas/tests/test_geocode.py
index 5f7b23a..9524c53 100644
--- a/geopandas/tests/test_geocode.py
+++ b/geopandas/tests/test_geocode.py
@@ -1,27 +1,27 @@
 from __future__ import absolute_import
 
-from fiona.crs import from_epsg
+import numpy as np
 import pandas as pd
-import pandas.util.testing as tm
+from fiona.crs import from_epsg
 from shapely.geometry import Point
-import geopandas as gpd
-import nose
 
-from geopandas import GeoSeries
+from geopandas import GeoSeries, GeoDataFrame
 from geopandas.tools import geocode, reverse_geocode
 from geopandas.tools.geocoding import _prepare_geocode_result
 
-from geopandas.tests.util import unittest, mock, assert_geoseries_equal
+import pytest
+from pandas.util.testing import assert_series_equal
+from geopandas.tests.util import mock, assert_geoseries_equal
 
 
 def _skip_if_no_geopy():
     try:
         import geopy
     except ImportError:
-        raise nose.SkipTest("Geopy not installed. Skipping tests.")
+        raise pytest.skip("Geopy not installed. Skipping tests.")
     except SyntaxError:
-        raise nose.SkipTest("Geopy is known to be broken on Python 3.2. "
-                            "Skipping tests.")
+        raise pytest.skip("Geopy is known to be broken on Python 3.2. "
+                          "Skipping tests.")
 
 
 class ForwardMock(mock.MagicMock):
@@ -58,8 +58,8 @@ class ReverseMock(mock.MagicMock):
         return super(ReverseMock, self).__call__(*args, **kwargs)
 
 
-class TestGeocode(unittest.TestCase):
-    def setUp(self):
+class TestGeocode:
+    def setup_method(self):
         _skip_if_no_geopy()
         self.locations = ['260 Broadway, New York, NY',
                           '77 Massachusetts Ave, Cambridge, MA']
@@ -69,75 +69,77 @@ class TestGeocode(unittest.TestCase):
     def test_prepare_result(self):
         # Calls _prepare_result with sample results from the geocoder call
         # loop
-        p0 = Point(12.3, -45.6) # Treat these as lat/lon
+        p0 = Point(12.3, -45.6)  # Treat these as lat/lon
         p1 = Point(-23.4, 56.7)
         d = {'a': ('address0', p0.coords[0]),
              'b': ('address1', p1.coords[0])}
 
         df = _prepare_geocode_result(d)
-        assert type(df) is gpd.GeoDataFrame
-        self.assertEqual(from_epsg(4326), df.crs)
-        self.assertEqual(len(df), 2)
-        self.assert_('address' in df)
+        assert type(df) is GeoDataFrame
+        assert from_epsg(4326) == df.crs
+        assert len(df) == 2
+        assert 'address' in df
 
         coords = df.loc['a']['geometry'].coords[0]
         test = p0.coords[0]
         # Output from the df should be lon/lat
-        self.assertAlmostEqual(coords[0], test[1])
-        self.assertAlmostEqual(coords[1], test[0])
+        assert coords[0] == pytest.approx(test[1])
+        assert coords[1] == pytest.approx(test[0])
 
         coords = df.loc['b']['geometry'].coords[0]
         test = p1.coords[0]
-        self.assertAlmostEqual(coords[0], test[1])
-        self.assertAlmostEqual(coords[1], test[0])
+        assert coords[0] == pytest.approx(test[1])
+        assert coords[1] == pytest.approx(test[0])
 
     def test_prepare_result_none(self):
-        p0 = Point(12.3, -45.6) # Treat these as lat/lon
+        p0 = Point(12.3, -45.6)  # Treat these as lat/lon
         d = {'a': ('address0', p0.coords[0]),
              'b': (None, None)}
 
         df = _prepare_geocode_result(d)
-        assert type(df) is gpd.GeoDataFrame
-        self.assertEqual(from_epsg(4326), df.crs)
-        self.assertEqual(len(df), 2)
-        self.assert_('address' in df)
+        assert type(df) is GeoDataFrame
+        assert from_epsg(4326) == df.crs
+        assert len(df) == 2
+        assert 'address' in df
 
         row = df.loc['b']
-        self.assertEqual(len(row['geometry'].coords), 0)
-        self.assert_(pd.np.isnan(row['address']))
+        assert len(row['geometry'].coords) == 0
+        assert np.isnan(row['address'])
     
     def test_bad_provider_forward(self):
-        with self.assertRaises(ValueError):
+        with pytest.raises(ValueError):
             geocode(['cambridge, ma'], 'badprovider')
 
     def test_bad_provider_reverse(self):
-        with self.assertRaises(ValueError):
+        with pytest.raises(ValueError):
             reverse_geocode(['cambridge, ma'], 'badprovider')
 
     def test_forward(self):
         with mock.patch('geopy.geocoders.googlev3.GoogleV3.geocode',
                         ForwardMock()) as m:
             g = geocode(self.locations, provider='googlev3', timeout=2)
-            self.assertEqual(len(self.locations), m.call_count)
+            assert len(self.locations) == m.call_count
 
         n = len(self.locations)
-        self.assertIsInstance(g, gpd.GeoDataFrame)
-        expected = GeoSeries([Point(float(x) + 0.5, float(x)) for x in range(n)],
-                             crs=from_epsg(4326))
+        assert isinstance(g, GeoDataFrame)
+        expected = GeoSeries(
+            [Point(float(x) + 0.5, float(x)) for x in range(n)],
+            crs=from_epsg(4326))
         assert_geoseries_equal(expected, g['geometry'])
-        tm.assert_series_equal(g['address'],
-                               pd.Series(self.locations, name='address'))
+        assert_series_equal(g['address'],
+                            pd.Series(self.locations, name='address'))
 
     def test_reverse(self):
         with mock.patch('geopy.geocoders.googlev3.GoogleV3.reverse',
                         ReverseMock()) as m:
             g = reverse_geocode(self.points, provider='googlev3', timeout=2)
-            self.assertEqual(len(self.points), m.call_count)
+            assert len(self.points) == m.call_count
 
-        self.assertIsInstance(g, gpd.GeoDataFrame)
+        assert isinstance(g, GeoDataFrame)
 
         expected = GeoSeries(self.points, crs=from_epsg(4326))
         assert_geoseries_equal(expected, g['geometry'])
-        address = pd.Series(['address' + str(x) for x in range(len(self.points))],
-                            name='address')
-        tm.assert_series_equal(g['address'], address)
+        address = pd.Series(
+            ['address' + str(x) for x in range(len(self.points))],
+            name='address')
+        assert_series_equal(g['address'], address)
diff --git a/geopandas/tests/test_geodataframe.py b/geopandas/tests/test_geodataframe.py
index 6739cd2..17e66b2 100644
--- a/geopandas/tests/test_geodataframe.py
+++ b/geopandas/tests/test_geodataframe.py
@@ -7,40 +7,43 @@ import shutil
 
 import numpy as np
 import pandas as pd
-from pandas.util.testing import assert_frame_equal
 from shapely.geometry import Point, Polygon
-
 import fiona
+
+import geopandas
 from geopandas import GeoDataFrame, read_file, GeoSeries
-from geopandas.tests.util import assert_geoseries_equal, connect, create_db, \
-    download_nybb, PACKAGE_DIR, PANDAS_NEW_SQL_API, unittest, validate_boro_df
 
+from geopandas.tests.util import (
+    assert_geoseries_equal, connect, create_db, PACKAGE_DIR,
+    validate_boro_df)
+from pandas.util.testing import assert_frame_equal
+import pytest
 
-class TestDataFrame(unittest.TestCase):
 
-    def setUp(self):
+class TestDataFrame:
+
+    def setup_method(self):
         N = 10
 
-        nybb_filename, nybb_zip_path = download_nybb()
+        nybb_filename = geopandas.datasets.get_path('nybb')
 
-        self.df = read_file(nybb_zip_path, vfs='zip://' + nybb_filename)
-        with fiona.open(nybb_zip_path, vfs='zip://' + nybb_filename) as f:
-            self.schema = f.schema
+        self.df = read_file(nybb_filename)
         self.tempdir = tempfile.mkdtemp()
         self.boros = self.df['BoroName']
         self.crs = {'init': 'epsg:4326'}
         self.df2 = GeoDataFrame([
             {'geometry': Point(x, y), 'value1': x + y, 'value2': x * y}
             for x, y in zip(range(N), range(N))], crs=self.crs)
-        self.df3 = read_file(os.path.join(PACKAGE_DIR, 'examples', 'null_geom.geojson'))
+        self.df3 = read_file(
+            os.path.join(PACKAGE_DIR, 'examples', 'null_geom.geojson'))
         self.line_paths = self.df3['Name']
 
-    def tearDown(self):
+    def teardown_method(self):
         shutil.rmtree(self.tempdir)
 
     def test_df_init(self):
-        self.assertTrue(type(self.df2) is GeoDataFrame)
-        self.assertTrue(self.df2.crs == self.crs)
+        assert type(self.df2) is GeoDataFrame
+        assert self.df2.crs == self.crs
 
     def test_different_geo_colname(self):
         data = {"A": range(5), "B": range(-5, 0),
@@ -48,16 +51,16 @@ class TestDataFrame(unittest.TestCase):
         df = GeoDataFrame(data, crs=self.crs, geometry='location')
         locs = GeoSeries(data['location'], crs=self.crs)
         assert_geoseries_equal(df.geometry, locs)
-        self.assert_('geometry' not in df)
-        self.assertEqual(df.geometry.name, 'location')
+        assert 'geometry' not in df
+        assert df.geometry.name == 'location'
         # internal implementation detail
-        self.assertEqual(df._geometry_column_name, 'location')
+        assert df._geometry_column_name == 'location'
 
         geom2 = [Point(x, y) for x, y in zip(range(5, 10), range(5))]
         df2 = df.set_geometry(geom2, crs='dummy_crs')
-        self.assert_('location' in df2)
-        self.assertEqual(df2.crs, 'dummy_crs')
-        self.assertEqual(df2.geometry.crs, 'dummy_crs')
+        assert 'location' in df2
+        assert df2.crs == 'dummy_crs'
+        assert df2.geometry.crs == 'dummy_crs'
         # reset so it outputs okay
         df2.crs = df.crs
         assert_geoseries_equal(df2.geometry, GeoSeries(geom2, crs=df2.crs))
@@ -66,28 +69,29 @@ class TestDataFrame(unittest.TestCase):
         data = {"A": range(5), "B": range(-5, 0),
                 "location": [Point(x, y) for x, y in zip(range(5), range(5))]}
         df = GeoDataFrame(data, crs=self.crs, geometry='location')
-        self.assert_(isinstance(df.geometry, GeoSeries))
+        assert isinstance(df.geometry, GeoSeries)
         df['geometry'] = df["A"]
-        self.assert_(isinstance(df.geometry, GeoSeries))
-        self.assertEqual(df.geometry[0], data['location'][0])
+        assert isinstance(df.geometry, GeoSeries)
+        assert df.geometry[0] == data['location'][0]
         # good if this changed in the future
-        self.assert_(not isinstance(df['geometry'], GeoSeries))
-        self.assert_(isinstance(df['location'], GeoSeries))
+        assert not isinstance(df['geometry'], GeoSeries)
+        assert isinstance(df['location'], GeoSeries)
 
-        data["geometry"] = [Point(x + 1, y - 1) for x, y in zip(range(5), range(5))]
+        data["geometry"] = [Point(x + 1, y - 1) for x, y in zip(range(5),
+                                                                range(5))]
         df = GeoDataFrame(data, crs=self.crs)
-        self.assert_(isinstance(df.geometry, GeoSeries))
-        self.assert_(isinstance(df['geometry'], GeoSeries))
+        assert isinstance(df.geometry, GeoSeries)
+        assert isinstance(df['geometry'], GeoSeries)
         # good if this changed in the future
-        self.assert_(not isinstance(df['location'], GeoSeries))
+        assert not isinstance(df['location'], GeoSeries)
 
     def test_geometry_property(self):
         assert_geoseries_equal(self.df.geometry, self.df['geometry'],
-                                  check_dtype=True, check_index_type=True)
+                               check_dtype=True, check_index_type=True)
 
         df = self.df.copy()
         new_geom = [Point(x, y) for x, y in zip(range(len(self.df)),
-                                               range(len(self.df)))]
+                                                range(len(self.df)))]
         df.geometry = new_geom
 
         new_geom = GeoSeries(new_geom, index=df.index, crs=df.crs)
@@ -97,36 +101,36 @@ class TestDataFrame(unittest.TestCase):
         # new crs
         gs = GeoSeries(new_geom, crs="epsg:26018")
         df.geometry = gs
-        self.assertEqual(df.crs, "epsg:26018")
+        assert df.crs == "epsg:26018"
 
     def test_geometry_property_errors(self):
-        with self.assertRaises(AttributeError):
+        with pytest.raises(AttributeError):
             df = self.df.copy()
             del df['geometry']
             df.geometry
 
         # list-like error
-        with self.assertRaises(ValueError):
+        with pytest.raises(ValueError):
             df = self.df2.copy()
             df.geometry = 'value1'
 
         # list-like error
-        with self.assertRaises(ValueError):
+        with pytest.raises(ValueError):
             df = self.df.copy()
             df.geometry = 'apple'
 
         # non-geometry error
-        with self.assertRaises(TypeError):
+        with pytest.raises(TypeError):
             df = self.df.copy()
             df.geometry = list(range(df.shape[0]))
 
-        with self.assertRaises(KeyError):
+        with pytest.raises(KeyError):
             df = self.df.copy()
             del df['geometry']
             df['geometry']
 
         # ndim error
-        with self.assertRaises(ValueError):
+        with pytest.raises(ValueError):
             df = self.df.copy()
             df.geometry = df
 
@@ -135,32 +139,32 @@ class TestDataFrame(unittest.TestCase):
         original_geom = self.df.geometry
 
         df2 = self.df.set_geometry(geom)
-        self.assert_(self.df is not df2)
+        assert self.df is not df2
         assert_geoseries_equal(df2.geometry, geom)
         assert_geoseries_equal(self.df.geometry, original_geom)
         assert_geoseries_equal(self.df['geometry'], self.df.geometry)
         # unknown column
-        with self.assertRaises(ValueError):
+        with pytest.raises(ValueError):
             self.df.set_geometry('nonexistent-column')
 
         # ndim error
-        with self.assertRaises(ValueError):
+        with pytest.raises(ValueError):
             self.df.set_geometry(self.df)
 
         # new crs - setting should default to GeoSeries' crs
         gs = GeoSeries(geom, crs="epsg:26018")
         new_df = self.df.set_geometry(gs)
-        self.assertEqual(new_df.crs, "epsg:26018")
+        assert new_df.crs == "epsg:26018"
 
         # explicit crs overrides self and dataframe
         new_df = self.df.set_geometry(gs, crs="epsg:27159")
-        self.assertEqual(new_df.crs, "epsg:27159")
-        self.assertEqual(new_df.geometry.crs, "epsg:27159")
+        assert new_df.crs == "epsg:27159"
+        assert new_df.geometry.crs == "epsg:27159"
 
         # Series should use dataframe's
         new_df = self.df.set_geometry(geom.values)
-        self.assertEqual(new_df.crs, self.df.crs)
-        self.assertEqual(new_df.geometry.crs, self.df.crs)
+        assert new_df.crs == self.df.crs
+        assert new_df.geometry.crs == self.df.crs
 
     def test_set_geometry_col(self):
         g = self.df.geometry
@@ -169,18 +173,18 @@ class TestDataFrame(unittest.TestCase):
         df2 = self.df.set_geometry('simplified_geometry')
 
         # Drop is false by default
-        self.assert_('simplified_geometry' in df2)
+        assert 'simplified_geometry' in df2
         assert_geoseries_equal(df2.geometry, g_simplified)
 
         # If True, drops column and renames to geometry
         df3 = self.df.set_geometry('simplified_geometry', drop=True)
-        self.assert_('simplified_geometry' not in df3)
+        assert 'simplified_geometry' not in df3
         assert_geoseries_equal(df3.geometry, g_simplified)
 
     def test_set_geometry_inplace(self):
         geom = [Point(x, y) for x, y in zip(range(5), range(5))]
         ret = self.df.set_geometry(geom, inplace=True)
-        self.assert_(ret is None)
+        assert ret is None
         geom = GeoSeries(geom, index=self.df.index, crs=self.df.crs)
         assert_geoseries_equal(self.df.geometry, geom)
 
@@ -202,14 +206,14 @@ class TestDataFrame(unittest.TestCase):
         df = self.df.set_geometry(g)
 
         for i, r in df.iterrows():
-            self.assertAlmostEqual(i, r['geometry'].x)
-            self.assertAlmostEqual(i, r['geometry'].y)
+            assert i == r['geometry'].x
+            assert i == r['geometry'].y
 
     def test_to_json(self):
         text = self.df.to_json()
         data = json.loads(text)
-        self.assertTrue(data['type'] == 'FeatureCollection')
-        self.assertTrue(len(data['features']) == 5)
+        assert data['type'] == 'FeatureCollection'
+        assert len(data['features']) == 5
 
     def test_to_json_geom_col(self):
         df = self.df.copy()
@@ -219,72 +223,72 @@ class TestDataFrame(unittest.TestCase):
 
         text = df.to_json()
         data = json.loads(text)
-        self.assertTrue(data['type'] == 'FeatureCollection')
-        self.assertTrue(len(data['features']) == 5)
+        assert data['type'] == 'FeatureCollection'
+        assert len(data['features']) == 5
 
     def test_to_json_na(self):
         # Set a value as nan and make sure it's written
-        self.df.loc[self.df['BoroName']=='Queens', 'Shape_Area'] = np.nan
+        self.df.loc[self.df['BoroName'] == 'Queens', 'Shape_Area'] = np.nan
 
         text = self.df.to_json()
         data = json.loads(text)
-        self.assertTrue(len(data['features']) == 5)
+        assert len(data['features']) == 5
         for f in data['features']:
             props = f['properties']
-            self.assertEqual(len(props), 4)
+            assert len(props) == 4
             if props['BoroName'] == 'Queens':
-                self.assertTrue(props['Shape_Area'] is None)
+                assert props['Shape_Area'] is None
 
     def test_to_json_bad_na(self):
         # Check that a bad na argument raises error
-        with self.assertRaises(ValueError):
-            text = self.df.to_json(na='garbage')
+        with pytest.raises(ValueError):
+            self.df.to_json(na='garbage')
 
     def test_to_json_dropna(self):
-        self.df.loc[self.df['BoroName']=='Queens', 'Shape_Area'] = np.nan
-        self.df.loc[self.df['BoroName']=='Bronx', 'Shape_Leng'] = np.nan
+        self.df.loc[self.df['BoroName'] == 'Queens', 'Shape_Area'] = np.nan
+        self.df.loc[self.df['BoroName'] == 'Bronx', 'Shape_Leng'] = np.nan
 
         text = self.df.to_json(na='drop')
         data = json.loads(text)
-        self.assertEqual(len(data['features']), 5)
+        assert len(data['features']) == 5
         for f in data['features']:
             props = f['properties']
             if props['BoroName'] == 'Queens':
-                self.assertEqual(len(props), 3)
-                self.assertTrue('Shape_Area' not in props)
+                assert len(props) == 3
+                assert 'Shape_Area' not in props
                 # Just make sure setting it to nan in a different row
                 # doesn't affect this one
-                self.assertTrue('Shape_Leng' in props)
+                assert 'Shape_Leng' in props
             elif props['BoroName'] == 'Bronx':
-                self.assertEqual(len(props), 3)
-                self.assertTrue('Shape_Leng' not in props)
-                self.assertTrue('Shape_Area' in props)
+                assert len(props) == 3
+                assert 'Shape_Leng' not in props
+                assert 'Shape_Area' in props
             else:
-                self.assertEqual(len(props), 4)
+                assert len(props) == 4
 
     def test_to_json_keepna(self):
-        self.df.loc[self.df['BoroName']=='Queens', 'Shape_Area'] = np.nan
-        self.df.loc[self.df['BoroName']=='Bronx', 'Shape_Leng'] = np.nan
+        self.df.loc[self.df['BoroName'] == 'Queens', 'Shape_Area'] = np.nan
+        self.df.loc[self.df['BoroName'] == 'Bronx', 'Shape_Leng'] = np.nan
 
         text = self.df.to_json(na='keep')
         data = json.loads(text)
-        self.assertEqual(len(data['features']), 5)
+        assert len(data['features']) == 5
         for f in data['features']:
             props = f['properties']
-            self.assertEqual(len(props), 4)
+            assert len(props) == 4
             if props['BoroName'] == 'Queens':
-                self.assertTrue(np.isnan(props['Shape_Area']))
+                assert np.isnan(props['Shape_Area'])
                 # Just make sure setting it to nan in a different row
                 # doesn't affect this one
-                self.assertTrue('Shape_Leng' in props)
+                assert 'Shape_Leng' in props
             elif props['BoroName'] == 'Bronx':
-                self.assertTrue(np.isnan(props['Shape_Leng']))
-                self.assertTrue('Shape_Area' in props)
+                assert np.isnan(props['Shape_Leng'])
+                assert 'Shape_Area' in props
 
     def test_copy(self):
         df2 = self.df.copy()
-        self.assertTrue(type(df2) is GeoDataFrame)
-        self.assertEqual(self.df.crs, df2.crs)
+        assert type(df2) is GeoDataFrame
+        assert self.df.crs == df2.crs
 
     def test_to_file(self):
         """ Test to_file and from_file """
@@ -292,18 +296,18 @@ class TestDataFrame(unittest.TestCase):
         self.df.to_file(tempfilename)
         # Read layer back in
         df = GeoDataFrame.from_file(tempfilename)
-        self.assertTrue('geometry' in df)
-        self.assertTrue(len(df) == 5)
-        self.assertTrue(np.alltrue(df['BoroName'].values == self.boros))
+        assert 'geometry' in df
+        assert len(df) == 5
+        assert np.alltrue(df['BoroName'].values == self.boros)
 
         # Write layer with null geometry out to file
         tempfilename = os.path.join(self.tempdir, 'null_geom.shp')
         self.df3.to_file(tempfilename)
         # Read layer back in
         df3 = GeoDataFrame.from_file(tempfilename)
-        self.assertTrue('geometry' in df3)
-        self.assertTrue(len(df3) == 2)
-        self.assertTrue(np.alltrue(df3['Name'].values == self.line_paths))
+        assert 'geometry' in df3
+        assert len(df3) == 2
+        assert np.alltrue(df3['Name'].values == self.line_paths)
 
     def test_to_file_types(self):
         """ Test various integer type columns (GH#93) """
@@ -312,7 +316,7 @@ class TestDataFrame(unittest.TestCase):
                      np.uint8, np.uint16, np.uint32, np.uint64, np.long]
         geometry = self.df2.geometry
         data = dict((str(i), np.arange(len(geometry), dtype=dtype))
-                     for i, dtype in enumerate(int_types))
+                    for i, dtype in enumerate(int_types))
         df = GeoDataFrame(data, geometry=geometry)
         df.to_file(tempfilename)
 
@@ -320,20 +324,17 @@ class TestDataFrame(unittest.TestCase):
         """ Test that mixed geometry types raise error when writing to file """
         tempfilename = os.path.join(self.tempdir, 'test.shp')
         s = GeoDataFrame({'geometry': [Point(0, 0),
-                                        Polygon([(0, 0), (1, 0), (1, 1)])]})
-        with self.assertRaises(ValueError):
+                                       Polygon([(0, 0), (1, 0), (1, 1)])]})
+        with pytest.raises(ValueError):
             s.to_file(tempfilename)
 
     def test_to_file_schema(self):
         """
         Ensure that the file is written according to the schema
         if it is specified
-        
+
         """
-        try:
-            from collections import OrderedDict
-        except ImportError:
-            from ordereddict import OrderedDict
+        from collections import OrderedDict
 
         tempfilename = os.path.join(self.tempdir, 'test.shp')
         properties = OrderedDict([
@@ -350,23 +351,31 @@ class TestDataFrame(unittest.TestCase):
         with fiona.open(tempfilename) as f:
             result_schema = f.schema
 
-        self.assertEqual(result_schema, schema)
+        assert result_schema == schema
 
     def test_bool_index(self):
         # Find boros with 'B' in their name
         df = self.df[self.df['BoroName'].str.contains('B')]
-        self.assertTrue(len(df) == 2)
+        assert len(df) == 2
         boros = df['BoroName'].values
-        self.assertTrue('Brooklyn' in boros)
-        self.assertTrue('Bronx' in boros)
-        self.assertTrue(type(df) is GeoDataFrame)
+        assert 'Brooklyn' in boros
+        assert 'Bronx' in boros
+        assert type(df) is GeoDataFrame
+
+    def test_coord_slice_points(self):
+        assert self.df2.cx[-2:-1, -2:-1].empty
+        assert_frame_equal(self.df2, self.df2.cx[:, :])
+        assert_frame_equal(self.df2.loc[5:], self.df2.cx[5:, :])
+        assert_frame_equal(self.df2.loc[5:], self.df2.cx[:, 5:])
+        assert_frame_equal(self.df2.loc[5:], self.df2.cx[5:, 5:])
 
     def test_transform(self):
         df2 = self.df2.copy()
         df2.crs = {'init': 'epsg:26918', 'no_defs': True}
         lonlat = df2.to_crs(epsg=4326)
         utm = lonlat.to_crs(epsg=26918)
-        self.assertTrue(all(df2['geometry'].geom_almost_equals(utm['geometry'], decimal=2)))
+        assert all(df2['geometry'].geom_almost_equals(utm['geometry'],
+                                                      decimal=2))
 
     def test_to_crs_geo_column_name(self):
         # Test to_crs() with different geometry column name (GH#339)
@@ -376,37 +385,36 @@ class TestDataFrame(unittest.TestCase):
         df2.set_geometry('geom', inplace=True)
         lonlat = df2.to_crs(epsg=4326)
         utm = lonlat.to_crs(epsg=26918)
-        self.assertEqual(lonlat.geometry.name, 'geom')
-        self.assertEqual(utm.geometry.name, 'geom')
-        self.assertTrue(all(df2.geometry.geom_almost_equals(utm.geometry, decimal=2)))
+        assert lonlat.geometry.name == 'geom'
+        assert utm.geometry.name == 'geom'
+        assert all(df2.geometry.geom_almost_equals(utm.geometry, decimal=2))
 
     def test_from_features(self):
-        nybb_filename, nybb_zip_path = download_nybb()
-        with fiona.open(nybb_zip_path,
-                        vfs='zip://' + nybb_filename) as f:
+        nybb_filename = geopandas.datasets.get_path('nybb')
+        with fiona.open(nybb_filename) as f:
             features = list(f)
             crs = f.crs
 
         df = GeoDataFrame.from_features(features, crs=crs)
         df.rename(columns=lambda x: x.lower(), inplace=True)
-        validate_boro_df(self, df)
-        self.assert_(df.crs == crs)
+        validate_boro_df(df)
+        assert df.crs == crs
 
     def test_from_features_unaligned_properties(self):
         p1 = Point(1, 1)
         f1 = {'type': 'Feature',
-                'properties': {'a': 0},
-                'geometry': p1.__geo_interface__}
+              'properties': {'a': 0},
+              'geometry': p1.__geo_interface__}
 
         p2 = Point(2, 2)
         f2 = {'type': 'Feature',
-                'properties': {'b': 1},
-                'geometry': p2.__geo_interface__}
+              'properties': {'b': 1},
+              'geometry': p2.__geo_interface__}
 
         p3 = Point(3, 3)
         f3 = {'type': 'Feature',
-                'properties': {'a': 2},
-                'geometry': p3.__geo_interface__}
+              'properties': {'a': 2},
+              'geometry': p3.__geo_interface__}
 
         df = GeoDataFrame.from_features([f1, f2, f3])
 
@@ -416,26 +424,46 @@ class TestDataFrame(unittest.TestCase):
                                            {'a': 2, 'b': np.nan}])
         assert_frame_equal(expected, result)
 
+    def test_from_feature_collection(self):
+        data = {'name': ['a', 'b', 'c'],
+                'lat': [45, 46, 47.5],
+                'lon': [-120, -121.2, -122.9]}
+
+        df = pd.DataFrame(data)
+        geometry = [Point(xy) for xy in zip(df['lon'], df['lat'])]
+        gdf = GeoDataFrame(df, geometry=geometry)
+        # from_features returns sorted columns
+        expected = gdf[['geometry', 'lat', 'lon', 'name']]
+
+        # test FeatureCollection
+        res = GeoDataFrame.from_features(gdf.__geo_interface__)
+        assert_frame_equal(res, expected)
+
+        # test list of Features
+        res = GeoDataFrame.from_features(gdf.__geo_interface__['features'])
+        assert_frame_equal(res, expected)
+
+        # test __geo_interface__ attribute (a GeoDataFrame has one)
+        res = GeoDataFrame.from_features(gdf)
+        assert_frame_equal(res, expected)
+
     def test_from_postgis_default(self):
         con = connect('test_geopandas')
         if con is None or not create_db(self.df):
-            raise unittest.case.SkipTest()
+            raise pytest.skip()
 
         try:
             sql = "SELECT * FROM nybb;"
             df = GeoDataFrame.from_postgis(sql, con)
         finally:
-            if PANDAS_NEW_SQL_API:
-                # It's not really a connection, it's an engine
-                con = con.connect()
             con.close()
 
-        validate_boro_df(self, df)
+        validate_boro_df(df)
 
     def test_from_postgis_custom_geom_col(self):
         con = connect('test_geopandas')
         if con is None or not create_db(self.df):
-            raise unittest.case.SkipTest()
+            raise pytest.skip()
 
         try:
             sql = """SELECT
@@ -444,61 +472,57 @@ class TestDataFrame(unittest.TestCase):
                      FROM nybb;"""
             df = GeoDataFrame.from_postgis(sql, con, geom_col='__geometry__')
         finally:
-            if PANDAS_NEW_SQL_API:
-                # It's not really a connection, it's an engine
-                con = con.connect()
             con.close()
 
-        validate_boro_df(self, df)
+        validate_boro_df(df)
 
     def test_dataframe_to_geodataframe(self):
         df = pd.DataFrame({"A": range(len(self.df)), "location":
                            list(self.df.geometry)}, index=self.df.index)
         gf = df.set_geometry('location', crs=self.df.crs)
-        self.assertIsInstance(df, pd.DataFrame)
-        self.assertIsInstance(gf, GeoDataFrame)
+        assert isinstance(df, pd.DataFrame)
+        assert isinstance(gf, GeoDataFrame)
         assert_geoseries_equal(gf.geometry, self.df.geometry)
-        self.assertEqual(gf.geometry.name, 'location')
-        self.assert_('geometry' not in gf)
+        assert gf.geometry.name == 'location'
+        assert 'geometry' not in gf
 
         gf2 = df.set_geometry('location', crs=self.df.crs, drop=True)
-        self.assertIsInstance(df, pd.DataFrame)
-        self.assertIsInstance(gf2, GeoDataFrame)
-        self.assertEqual(gf2.geometry.name, 'geometry')
-        self.assert_('geometry' in gf2)
-        self.assert_('location' not in gf2)
-        self.assert_('location' in df)
+        assert isinstance(df, pd.DataFrame)
+        assert isinstance(gf2, GeoDataFrame)
+        assert gf2.geometry.name == 'geometry'
+        assert 'geometry' in gf2
+        assert 'location' not in gf2
+        assert 'location' in df
 
         # should be a copy
         df.ix[0, "A"] = 100
-        self.assertEqual(gf.ix[0, "A"], 0)
-        self.assertEqual(gf2.ix[0, "A"], 0)
+        assert gf.ix[0, "A"] == 0
+        assert gf2.ix[0, "A"] == 0
 
-        with self.assertRaises(ValueError):
+        with pytest.raises(ValueError):
             df.set_geometry('location', inplace=True)
 
     def test_geodataframe_geointerface(self):
-        self.assertEqual(self.df.__geo_interface__['type'], 'FeatureCollection')
-        self.assertEqual(len(self.df.__geo_interface__['features']),
-                         self.df.shape[0])
+        assert self.df.__geo_interface__['type'] == 'FeatureCollection'
+        assert len(self.df.__geo_interface__['features']) == self.df.shape[0]
 
     def test_geodataframe_geojson_no_bbox(self):
         geo = self.df._to_geo(na="null", show_bbox=False)
-        self.assertFalse('bbox' in geo.keys())
+        assert 'bbox' not in geo.keys()
         for feature in geo['features']:
-            self.assertFalse('bbox' in feature.keys())
+            assert 'bbox' not in feature.keys()
 
     def test_geodataframe_geojson_bbox(self):
         geo = self.df._to_geo(na="null", show_bbox=True)
-        self.assertTrue('bbox' in geo.keys())
-        self.assertEqual(len(geo['bbox']), 4)
-        self.assertTrue(isinstance(geo['bbox'], tuple))
+        assert 'bbox' in geo.keys()
+        assert len(geo['bbox']) == 4
+        assert isinstance(geo['bbox'], tuple)
         for feature in geo['features']:
-            self.assertTrue('bbox' in feature.keys())
+            assert 'bbox' in feature.keys()
 
     def test_pickle(self):
         filename = os.path.join(self.tempdir, 'df.pkl')
         self.df.to_pickle(filename)
         unpickled = pd.read_pickle(filename)
         assert_frame_equal(self.df, unpickled)
-        self.assertEqual(self.df.crs, unpickled.crs)
+        assert self.df.crs == unpickled.crs
diff --git a/geopandas/tests/test_geom_methods.py b/geopandas/tests/test_geom_methods.py
index 0bccec3..b4c0623 100644
--- a/geopandas/tests/test_geom_methods.py
+++ b/geopandas/tests/test_geom_methods.py
@@ -3,31 +3,32 @@ from __future__ import absolute_import
 import string
 
 import numpy as np
-from numpy.testing import assert_array_equal
-from pandas.util.testing import assert_series_equal, assert_frame_equal
 from pandas import Series, DataFrame, MultiIndex
 from shapely.geometry import (
-    Point, LinearRing, LineString, Polygon, MultiPoint
-)
+    Point, LinearRing, LineString, Polygon, MultiPoint)
 from shapely.geometry.collection import GeometryCollection
 from shapely.ops import unary_union
 
 from geopandas import GeoSeries, GeoDataFrame
 from geopandas.base import GeoPandasBase
+
 from geopandas.tests.util import (
-    unittest, geom_equals, geom_almost_equals, assert_geoseries_equal
-)
+    geom_equals, geom_almost_equals, assert_geoseries_equal)
+
+import pytest
+from numpy.testing import assert_array_equal
+from pandas.util.testing import assert_series_equal, assert_frame_equal
 
 
-class TestGeomMethods(unittest.TestCase):
+class TestGeomMethods:
 
-    def setUp(self):
+    def setup_method(self):
         self.t1 = Polygon([(0, 0), (1, 0), (1, 1)])
         self.t2 = Polygon([(0, 0), (1, 1), (0, 1)])
         self.t3 = Polygon([(2, 0), (3, 0), (3, 1)])
         self.sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
         self.inner_sq = Polygon([(0.25, 0.25), (0.75, 0.25), (0.75, 0.75),
-                            (0.25, 0.75)])
+                                 (0.25, 0.75)])
         self.nested_squares = Polygon(self.sq.boundary,
                                       [self.inner_sq.boundary])
         self.p0 = Point(5, 5)
@@ -60,13 +61,12 @@ class TestGeomMethods(unittest.TestCase):
 
         # Placeholder for testing, will just drop in different geometries
         # when needed
-        self.gdf1 = GeoDataFrame({'geometry' : self.g1,
-                                  'col0' : [1.0, 2.0],
-                                  'col1' : ['geo', 'pandas']})
-        self.gdf2 = GeoDataFrame({'geometry' : self.g1,
-                                  'col3' : [4, 5],
-                                  'col4' : ['rand', 'string']})
-
+        self.gdf1 = GeoDataFrame({'geometry': self.g1,
+                                  'col0': [1.0, 2.0],
+                                  'col1': ['geo', 'pandas']})
+        self.gdf2 = GeoDataFrame({'geometry': self.g1,
+                                  'col3': [4, 5],
+                                  'col4': ['rand', 'string']})
 
     def _test_unary_real(self, op, expected, a):
         """ Tests for 'area', 'length', 'is_valid', etc. """
@@ -77,7 +77,7 @@ class TestGeomMethods(unittest.TestCase):
         if isinstance(expected, GeoPandasBase):
             fcmp = assert_geoseries_equal
         else:
-            fcmp = lambda a, b: self.assert_(a.equals(b))
+            def fcmp(a, b): assert a.equals(b)
         self._test_unary(op, expected, a, fcmp)
 
     def _test_binary_topological(self, op, expected, a, b, *args, **kwargs):
@@ -85,19 +85,20 @@ class TestGeomMethods(unittest.TestCase):
         if isinstance(expected, GeoPandasBase):
             fcmp = assert_geoseries_equal
         else:
-            fcmp = lambda a, b: self.assert_(geom_equals(a, b))
+            def fcmp(a, b): assert geom_equals(a, b)
 
         if isinstance(b, GeoPandasBase):
             right_df = True
         else:
             right_df = False
 
-        self._binary_op_test(op, expected, a, b, fcmp, True, right_df, 
-                        *args, **kwargs)
+        self._binary_op_test(op, expected, a, b, fcmp, True, right_df,
+                             *args, **kwargs)
 
     def _test_binary_real(self, op, expected, a, b, *args, **kwargs):
         fcmp = assert_series_equal
-        self._binary_op_test(op, expected, a, b, fcmp, True, False, *args, **kwargs)
+        self._binary_op_test(op, expected, a, b, fcmp, True, False,
+                             *args, **kwargs)
 
     def _test_binary_operator(self, op, expected, a, b):
         """
@@ -108,7 +109,7 @@ class TestGeomMethods(unittest.TestCase):
         if isinstance(expected, GeoPandasBase):
             fcmp = assert_geoseries_equal
         else:
-            fcmp = lambda a, b: self.assert_(geom_equals(a, b))
+            def fcmp(a, b): assert geom_equals(a, b)
 
         if isinstance(b, GeoPandasBase):
             right_df = True
@@ -118,7 +119,7 @@ class TestGeomMethods(unittest.TestCase):
         self._binary_op_test(op, expected, a, b, fcmp, False, right_df)
 
     def _binary_op_test(self, op, expected, left, right, fcmp, left_df,
-                        right_df, 
+                        right_df,
                         *args, **kwargs):
         """
         This is a helper to call a function on GeoSeries and GeoDataFrame
@@ -129,12 +130,12 @@ class TestGeomMethods(unittest.TestCase):
 
         Parameters
         ----------
-        
+
         expected : str
             The operation to be tested. e.g., 'intersection'
         left: GeoSeries
         right: GeoSeries
-        fcmp: function 
+        fcmp: function
             Called with the result of the operation and expected. It should
             assert if the result is incorrect
         left_df: bool
@@ -148,16 +149,16 @@ class TestGeomMethods(unittest.TestCase):
             n = len(s)
             col1 = string.ascii_lowercase[:n]
             col2 = range(n)
-            
-            return GeoDataFrame({'geometry': s.values, 
-                                 'col1' : col1, 
-                                 'col2' : col2},
-                                 index=s.index, crs=s.crs)
+
+            return GeoDataFrame({'geometry': s.values,
+                                 'col1': col1,
+                                 'col2': col2},
+                                index=s.index, crs=s.crs)
 
         # Test GeoSeries.op(GeoSeries)
         result = getattr(left, op)(right, *args, **kwargs)
         fcmp(result, expected)
-        
+
         if left_df:
             # Test GeoDataFrame.op(GeoSeries)
             gdf_left = _make_gdf(left)
@@ -186,7 +187,7 @@ class TestGeomMethods(unittest.TestCase):
         fcmp(result, expected)
 
     def test_intersection(self):
-        self._test_binary_topological('intersection', self.t1, 
+        self._test_binary_topological('intersection', self.t1,
                                       self.g1, self.g2)
 
     def test_union_series(self):
@@ -232,8 +233,8 @@ class TestGeomMethods(unittest.TestCase):
         # Set columns to get the order right
         expected = DataFrame({'minx': [0.0, 0.0], 'miny': [0.0, 0.0],
                               'maxx': [1.0, 1.0], 'maxy': [1.0, 1.0]},
-                              index=self.g1.index,
-                              columns=['minx', 'miny', 'maxx', 'maxy'])
+                             index=self.g1.index,
+                             columns=['minx', 'miny', 'maxx', 'maxy'])
 
         result = self.g1.bounds
         assert_frame_equal(expected, result)
@@ -275,10 +276,8 @@ class TestGeomMethods(unittest.TestCase):
         assert_array_equal(expected, self.g0.disjoint(self.t1))
 
     def test_distance(self):
-        expected = Series(np.array([
-                          np.sqrt((5 - 1)**2 + (5 - 1)**2),
-                          np.nan]),
-                    self.na_none.index)
+        expected = Series(np.array([np.sqrt((5 - 1)**2 + (5 - 1)**2), np.nan]),
+                          self.na_none.index)
         assert_array_equal(expected, self.na_none.distance(self.p0))
 
         expected = Series(np.array([np.sqrt(4**2 + 4**2), np.nan]),
@@ -326,6 +325,21 @@ class TestGeomMethods(unittest.TestCase):
         expected = Series(np.array([True] * len(self.g1)), self.g1.index)
         self._test_unary_real('is_simple', expected, self.g1)
 
+    def test_xy_points(self):
+        expected_x = [-73.9847, -74.0446]
+        expected_y = [40.7484, 40.6893]
+
+        assert_array_equal(expected_x, self.landmarks.geometry.x)
+        assert_array_equal(expected_y, self.landmarks.geometry.y)
+
+    def test_xy_polygons(self):
+        # accessing x attribute in polygon geoseries should raise an error
+        with pytest.raises(ValueError):
+            _ = self.gdf1.geometry.x
+        # and same for accessing y attribute in polygon geoseries
+        with pytest.raises(ValueError):
+            _ = self.gdf1.geometry.y
+
     def test_exterior(self):
         exp_exterior = GeoSeries([LinearRing(p.boundary) for p in self.g3])
         for expected, computed in zip(exp_exterior, self.g3.exterior):
@@ -337,7 +351,6 @@ class TestGeomMethods(unittest.TestCase):
         for expected, computed in zip(exp_interiors, square_series.interiors):
             assert computed[0].equals(expected)
 
-
     def test_interpolate(self):
         expected = GeoSeries([Point(0.5, 1.0), Point(0.75, 1.0)])
         self._test_binary_topological('interpolate', expected, self.g5,
@@ -358,22 +371,21 @@ class TestGeomMethods(unittest.TestCase):
 
     def test_translate_tuple(self):
         trans = self.sol.x - self.esb.x, self.sol.y - self.esb.y
-        self.assert_(self.landmarks.translate(*trans)[0].equals(self.sol))
+        assert self.landmarks.translate(*trans)[0].equals(self.sol)
 
         res = self.gdf1.set_geometry(self.landmarks).translate(*trans)[0]
-        self.assert_(res.equals(self.sol))
+        assert res.equals(self.sol)
 
     def test_rotate(self):
         angle = 98
         expected = self.g4
 
-        o = Point(0,0)
+        o = Point(0, 0)
         res = self.g4.rotate(angle, origin=o).rotate(-angle, origin=o)
-        self.assert_(geom_almost_equals(self.g4, res))
+        assert geom_almost_equals(self.g4, res)
 
-        res = self.gdf1.set_geometry(self.g4).rotate(angle, origin=Point(0,0))
-        self.assert_(geom_almost_equals(expected,
-                                        res.rotate(-angle, origin=o)))
+        res = self.gdf1.set_geometry(self.g4).rotate(angle, origin=Point(0, 0))
+        assert geom_almost_equals(expected, res.rotate(-angle, origin=o))
 
     def test_scale(self):
         expected = self.g4
@@ -381,57 +393,59 @@ class TestGeomMethods(unittest.TestCase):
         scale = 2., 1.
         inv = tuple(1./i for i in scale)
 
-        o = Point(0,0)
+        o = Point(0, 0)
         res = self.g4.scale(*scale, origin=o).scale(*inv, origin=o)
-        self.assertTrue(geom_almost_equals(expected, res))
+        assert geom_almost_equals(expected, res)
 
         res = self.gdf1.set_geometry(self.g4).scale(*scale, origin=o)
         res = res.scale(*inv, origin=o)
-        self.assert_(geom_almost_equals(expected, res))
+        assert geom_almost_equals(expected, res)
 
     def test_skew(self):
         expected = self.g4
 
         skew = 45.
-        o = Point(0,0)
+        o = Point(0, 0)
 
         # Test xs
         res = self.g4.skew(xs=skew, origin=o).skew(xs=-skew, origin=o)
-        self.assert_(geom_almost_equals(expected, res))
+        assert geom_almost_equals(expected, res)
 
         res = self.gdf1.set_geometry(self.g4).skew(xs=skew, origin=o)
         res = res.skew(xs=-skew, origin=o)
-        self.assert_(geom_almost_equals(expected, res))
+        assert geom_almost_equals(expected, res)
 
         # Test ys
         res = self.g4.skew(ys=skew, origin=o).skew(ys=-skew, origin=o)
-        self.assert_(geom_almost_equals(expected, res))
+        assert geom_almost_equals(expected, res)
 
         res = self.gdf1.set_geometry(self.g4).skew(ys=skew, origin=o)
         res = res.skew(ys=-skew, origin=o)
-        self.assert_(geom_almost_equals(expected, res))
+        assert geom_almost_equals(expected, res)
 
     def test_envelope(self):
         e = self.g3.envelope
-        self.assertTrue(np.alltrue(e.geom_equals(self.sq)))
-        self.assertIsInstance(e, GeoSeries)
-        self.assertEqual(self.g3.crs, e.crs)
+        assert np.all(e.geom_equals(self.sq))
+        assert isinstance(e, GeoSeries)
+        assert self.g3.crs == e.crs
 
     def test_total_bounds(self):
         bbox = self.sol.x, self.sol.y, self.esb.x, self.esb.y
-        self.assert_(self.landmarks.total_bounds, bbox)
+        assert isinstance(self.landmarks.total_bounds, np.ndarray)
+        assert tuple(self.landmarks.total_bounds) == bbox
 
         df = GeoDataFrame({'geometry': self.landmarks,
                            'col1': range(len(self.landmarks))})
-        self.assert_(df.total_bounds, bbox)
+        assert tuple(df.total_bounds) == bbox
 
     def test_explode(self):
-        s = GeoSeries([MultiPoint([(0,0), (1,1)]),
-                      MultiPoint([(2,2), (3,3), (4,4)])])
+        s = GeoSeries([MultiPoint([(0, 0), (1, 1)]),
+                       MultiPoint([(2, 2), (3, 3), (4, 4)])])
 
         index = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 2)]
-        expected = GeoSeries([Point(0,0), Point(1,1), Point(2,2), Point(3,3),
-                              Point(4,4)], index=MultiIndex.from_tuples(index))
+        expected = GeoSeries([Point(0, 0), Point(1, 1), Point(2, 2),
+                              Point(3, 3), Point(4, 4)],
+                             index=MultiIndex.from_tuples(index))
 
         assert_geoseries_equal(expected, s.explode())
 
@@ -455,10 +469,10 @@ class TestGeomMethods(unittest.TestCase):
     def test_symmetric_difference_operator(self):
         self._test_binary_operator('__xor__', self.sq, self.g3, self.g4)
 
-    def test_difference_series(self):
+    def test_difference_series2(self):
         expected = GeoSeries([GeometryCollection(), self.t2])
         self._test_binary_operator('__sub__', expected, self.g1, self.g2)
 
-    def test_difference_poly(self):
+    def test_difference_poly2(self):
         expected = GeoSeries([self.t1, self.t1])
         self._test_binary_operator('__sub__', expected, self.g1, self.t2)
diff --git a/geopandas/tests/test_geoseries.py b/geopandas/tests/test_geoseries.py
index 42b585b..ec96775 100644
--- a/geopandas/tests/test_geoseries.py
+++ b/geopandas/tests/test_geoseries.py
@@ -4,18 +4,24 @@ import os
 import json
 import shutil
 import tempfile
+
 import numpy as np
-from numpy.testing import assert_array_equal
+import pandas as pd
 from shapely.geometry import (Polygon, Point, LineString,
                               MultiPoint, MultiLineString, MultiPolygon)
 from shapely.geometry.base import BaseGeometry
+
 from geopandas import GeoSeries
-from geopandas.tests.util import unittest, geom_equals
+
+import pytest
+from geopandas.tests.util import geom_equals
+from numpy.testing import assert_array_equal
+from pandas.util.testing import assert_series_equal
 
 
-class TestSeries(unittest.TestCase):
+class TestSeries:
 
-    def setUp(self):
+    def setup_method(self):
         self.tempdir = tempfile.mkdtemp()
         self.t1 = Polygon([(0, 0), (1, 0), (1, 1)])
         self.t2 = Polygon([(0, 0), (1, 1), (0, 1)])
@@ -39,14 +45,14 @@ class TestSeries(unittest.TestCase):
         self.l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1)])
         self.g5 = GeoSeries([self.l1, self.l2])
 
-    def tearDown(self):
+    def teardown_method(self):
         shutil.rmtree(self.tempdir)
 
     def test_single_geom_constructor(self):
-        p = Point(1,2)
+        p = Point(1, 2)
         line = LineString([(2, 3), (4, 5), (5, 6)])
         poly = Polygon([(0, 0), (1, 0), (1, 1)],
-                          [[(.1, .1), (.9, .1), (.9, .9)]])
+                       [[(.1, .1), (.9, .1), (.9, .9)]])
         mp = MultiPoint([(1, 2), (3, 4), (5, 6)])
         mline = MultiLineString([[(1, 2), (3, 4), (5, 6)], [(7, 8), (9, 10)]])
 
@@ -59,53 +65,59 @@ class TestSeries(unittest.TestCase):
 
         for g in geoms:
             gs = GeoSeries(g)
-            self.assert_(len(gs) == 1)
-            self.assert_(gs.iloc[0] is g)
+            assert len(gs) == 1
+            assert gs.iloc[0] is g
 
             gs = GeoSeries(g, index=index)
-            self.assert_(len(gs) == len(index))
+            assert len(gs) == len(index)
             for x in gs:
-                self.assert_(x is g)
+                assert x is g
 
     def test_copy(self):
         gc = self.g3.copy()
-        self.assertTrue(type(gc) is GeoSeries)
-        self.assertEqual(self.g3.name, gc.name)
-        self.assertEqual(self.g3.crs, gc.crs)
+        assert type(gc) is GeoSeries
+        assert self.g3.name == gc.name
+        assert self.g3.crs == gc.crs
 
     def test_in(self):
-        self.assertTrue(self.t1 in self.g1)
-        self.assertTrue(self.sq in self.g1)
-        self.assertTrue(self.t1 in self.a1)
-        self.assertTrue(self.t2 in self.g3)
-        self.assertTrue(self.sq not in self.g3)
-        self.assertTrue(5 not in self.g3)
+        assert self.t1 in self.g1
+        assert self.sq in self.g1
+        assert self.t1 in self.a1
+        assert self.t2 in self.g3
+        assert self.sq not in self.g3
+        assert 5 not in self.g3
 
     def test_geom_equals(self):
-        self.assertTrue(np.alltrue(self.g1.geom_equals(self.g1)))
+        assert np.all(self.g1.geom_equals(self.g1))
         assert_array_equal(self.g1.geom_equals(self.sq), [False, True])
 
     def test_geom_equals_align(self):
         a = self.a1.geom_equals(self.a2)
-        self.assertFalse(a['A'])
-        self.assertTrue(a['B'])
-        self.assertFalse(a['C'])
+        exp = pd.Series([False, True, False], index=['A', 'B', 'C'])
+        assert_series_equal(a, exp)
 
     def test_align(self):
         a1, a2 = self.a1.align(self.a2)
-        self.assertTrue(a2['A'].is_empty)
-        self.assertTrue(a1['B'].equals(a2['B']))
-        self.assertTrue(a1['C'].is_empty)
+        assert a2['A'].is_empty
+        assert a1['B'].equals(a2['B'])
+        assert a1['C'].is_empty
 
     def test_geom_almost_equals(self):
         # TODO: test decimal parameter
-        self.assertTrue(np.alltrue(self.g1.geom_almost_equals(self.g1)))
+        assert np.all(self.g1.geom_almost_equals(self.g1))
         assert_array_equal(self.g1.geom_almost_equals(self.sq), [False, True])
 
     def test_geom_equals_exact(self):
         # TODO: test tolerance parameter
-        self.assertTrue(np.alltrue(self.g1.geom_equals_exact(self.g1, 0.001)))
-        assert_array_equal(self.g1.geom_equals_exact(self.sq, 0.001), [False, True])
+        assert np.all(self.g1.geom_equals_exact(self.g1, 0.001))
+        assert_array_equal(self.g1.geom_equals_exact(self.sq, 0.001),
+                           [False, True])
+
+    def test_equal_comp_op(self):
+        s = GeoSeries([Point(x, x) for x in range(3)])
+        res = s == Point(1, 1)
+        exp = pd.Series([False, True, False])
+        assert_series_equal(res, exp)
 
     def test_to_file(self):
         """ Test to_file and from_file """
@@ -113,72 +125,82 @@ class TestSeries(unittest.TestCase):
         self.g3.to_file(tempfilename)
         # Read layer back in?
         s = GeoSeries.from_file(tempfilename)
-        self.assertTrue(all(self.g3.geom_equals(s)))
+        assert all(self.g3.geom_equals(s))
         # TODO: compare crs
 
     def test_to_json(self):
-        """Test whether GeoSeries.to_json works and returns an actual json file."""
+        """
+        Test whether GeoSeries.to_json works and returns an actual json file.
+        """
         json_str = self.g3.to_json()
         json_dict = json.loads(json_str)
         # TODO : verify the output is a valid GeoJSON.
 
     def test_representative_point(self):
-        self.assertTrue(np.alltrue(self.g1.contains(self.g1.representative_point())))
-        self.assertTrue(np.alltrue(self.g2.contains(self.g2.representative_point())))
-        self.assertTrue(np.alltrue(self.g3.contains(self.g3.representative_point())))
-        self.assertTrue(np.alltrue(self.g4.contains(self.g4.representative_point())))
+        assert np.all(self.g1.contains(self.g1.representative_point()))
+        assert np.all(self.g2.contains(self.g2.representative_point()))
+        assert np.all(self.g3.contains(self.g3.representative_point()))
+        assert np.all(self.g4.contains(self.g4.representative_point()))
 
     def test_transform(self):
         utm18n = self.landmarks.to_crs(epsg=26918)
         lonlat = utm18n.to_crs(epsg=4326)
-        self.assertTrue(np.alltrue(self.landmarks.geom_almost_equals(lonlat)))
-        with self.assertRaises(ValueError):
+        assert np.all(self.landmarks.geom_almost_equals(lonlat))
+        with pytest.raises(ValueError):
             self.g1.to_crs(epsg=4326)
-        with self.assertRaises(TypeError):
+        with pytest.raises(TypeError):
             self.landmarks.to_crs(crs=None, epsg=None)
 
     def test_fillna(self):
-        na = self.na_none.fillna(Point())
-        self.assertTrue(isinstance(na[2], BaseGeometry))
-        self.assertTrue(na[2].is_empty)
-        self.assertTrue(geom_equals(self.na_none[:2], na[:2]))
+        # default is to fill with empty geometry
+        na = self.na_none.fillna()
+        assert isinstance(na[2], BaseGeometry)
+        assert na[2].is_empty
+        assert geom_equals(self.na_none[:2], na[:2])
         # XXX: method works inconsistently for different pandas versions
-        #self.na_none.fillna(method='backfill')
+        # self.na_none.fillna(method='backfill')
 
     def test_coord_slice(self):
         """ Test CoordinateSlicer """
         # need some better test cases
-        self.assertTrue(geom_equals(self.g3, self.g3.cx[:, :]))
-        self.assertTrue(geom_equals(self.g3[[True, False]], self.g3.cx[0.9:, :0.1]))
-        self.assertTrue(geom_equals(self.g3[[False, True]], self.g3.cx[0:0.1, 0.9:1.0]))
+        assert geom_equals(self.g3, self.g3.cx[:, :])
+        assert geom_equals(self.g3[[True, False]], self.g3.cx[0.9:, :0.1])
+        assert geom_equals(self.g3[[False, True]], self.g3.cx[0:0.1, 0.9:1.0])
+
+    def test_coord_slice_with_zero(self):
+        # Test that CoordinateSlice correctly handles zero slice (#GH477).
+
+        gs = GeoSeries([Point(x, x) for x in range(-3, 4)])
+        assert geom_equals(gs.cx[:0, :0], gs.loc[:3])
+        assert geom_equals(gs.cx[:, :0], gs.loc[:3])
+        assert geom_equals(gs.cx[:0, :], gs.loc[:3])
+        assert geom_equals(gs.cx[0:, 0:], gs.loc[3:])
+        assert geom_equals(gs.cx[0:, :], gs.loc[3:])
+        assert geom_equals(gs.cx[:, 0:], gs.loc[3:])
 
     def test_geoseries_geointerface(self):
-        self.assertEqual(self.g1.__geo_interface__['type'], 'FeatureCollection')
-        self.assertEqual(len(self.g1.__geo_interface__['features']),
-                         self.g1.shape[0])
+        assert self.g1.__geo_interface__['type'] == 'FeatureCollection'
+        assert len(self.g1.__geo_interface__['features']) == self.g1.shape[0]
 
     def test_proj4strings(self):
         # As string
         reprojected = self.g3.to_crs('+proj=utm +zone=30N')
         reprojected_back = reprojected.to_crs(epsg=4326)
-        self.assertTrue(np.alltrue(self.g3.geom_almost_equals(reprojected_back)))
+        assert np.all(self.g3.geom_almost_equals(reprojected_back))
 
         # As dict
         reprojected = self.g3.to_crs({'proj': 'utm', 'zone': '30N'})
         reprojected_back = reprojected.to_crs(epsg=4326)
-        self.assertTrue(np.alltrue(self.g3.geom_almost_equals(reprojected_back)))
+        assert np.all(self.g3.geom_almost_equals(reprojected_back))
 
         # Set to equivalent string, convert, compare to original
         copy = self.g3.copy()
         copy.crs = '+init=epsg:4326'
         reprojected = copy.to_crs({'proj': 'utm', 'zone': '30N'})
         reprojected_back = reprojected.to_crs(epsg=4326)
-        self.assertTrue(np.alltrue(self.g3.geom_almost_equals(reprojected_back)))
+        assert np.all(self.g3.geom_almost_equals(reprojected_back))
 
         # Conversions by different format
         reprojected_string = self.g3.to_crs('+proj=utm +zone=30N')
         reprojected_dict = self.g3.to_crs({'proj': 'utm', 'zone': '30N'})
-        self.assertTrue(np.alltrue(reprojected_string.geom_almost_equals(reprojected_dict)))
-
-if __name__ == '__main__':
-    unittest.main()
+        assert np.all(reprojected_string.geom_almost_equals(reprojected_dict))
diff --git a/geopandas/tests/test_merge.py b/geopandas/tests/test_merge.py
index 17cd238..5e29edf 100644
--- a/geopandas/tests/test_merge.py
+++ b/geopandas/tests/test_merge.py
@@ -4,12 +4,11 @@ import pandas as pd
 from shapely.geometry import Point
 
 from geopandas import GeoDataFrame, GeoSeries
-from geopandas.tests.util import unittest
 
 
-class TestMerging(unittest.TestCase):
+class TestMerging:
 
-    def setUp(self):
+    def setup_method(self):
 
         self.gseries = GeoSeries([Point(i, i) for i in range(3)])
         self.series = pd.Series([1, 2, 3])
@@ -18,18 +17,18 @@ class TestMerging(unittest.TestCase):
 
     def _check_metadata(self, gdf, geometry_column_name='geometry', crs=None):
 
-        self.assertEqual(gdf._geometry_column_name, geometry_column_name)
-        self.assertEqual(gdf.crs, crs)
+        assert gdf._geometry_column_name == geometry_column_name
+        assert gdf.crs == crs
 
     def test_merge(self):
 
         res = self.gdf.merge(self.df, left_on='values', right_on='col1')
 
         # check result is a GeoDataFrame
-        self.assert_(isinstance(res, GeoDataFrame))
+        assert isinstance(res, GeoDataFrame)
 
         # check geometry property gives GeoSeries
-        self.assert_(isinstance(res.geometry, GeoSeries))
+        assert isinstance(res.geometry, GeoSeries)
 
         # check metadata
         self._check_metadata(res)
@@ -39,24 +38,24 @@ class TestMerging(unittest.TestCase):
         self.gdf = (self.gdf.rename(columns={'geometry': 'points'})
                             .set_geometry('points'))
         res = self.gdf.merge(self.df, left_on='values', right_on='col1')
-        self.assert_(isinstance(res, GeoDataFrame))
-        self.assert_(isinstance(res.geometry, GeoSeries))
+        assert isinstance(res, GeoDataFrame)
+        assert isinstance(res.geometry, GeoSeries)
         self._check_metadata(res, 'points', self.gdf.crs)
 
     def test_concat_axis0(self):
 
         res = pd.concat([self.gdf, self.gdf])
 
-        self.assertEqual(res.shape, (6, 2))
-        self.assert_(isinstance(res, GeoDataFrame))
-        self.assert_(isinstance(res.geometry, GeoSeries))
+        assert res.shape == (6, 2)
+        assert isinstance(res, GeoDataFrame)
+        assert isinstance(res.geometry, GeoSeries)
         self._check_metadata(res)
 
     def test_concat_axis1(self):
 
         res = pd.concat([self.gdf, self.df], axis=1)
 
-        self.assertEqual(res.shape, (3, 4))
-        self.assert_(isinstance(res, GeoDataFrame))
-        self.assert_(isinstance(res.geometry, GeoSeries))
+        assert res.shape == (3, 4)
+        assert isinstance(res, GeoDataFrame)
+        assert isinstance(res.geometry, GeoSeries)
         self._check_metadata(res)
diff --git a/geopandas/tests/test_overlay.py b/geopandas/tests/test_overlay.py
index 051618a..d928224 100644
--- a/geopandas/tests/test_overlay.py
+++ b/geopandas/tests/test_overlay.py
@@ -1,34 +1,34 @@
 from __future__ import absolute_import
 
-import tempfile
-import shutil
-
 from shapely.geometry import Point
 
-from geopandas import GeoDataFrame, read_file
-from geopandas.tests.util import unittest, download_nybb
-from geopandas import overlay
+import geopandas
+from geopandas import GeoDataFrame, read_file, overlay
+
+import pytest
 
 
-class TestDataFrame(unittest.TestCase):
+class TestDataFrame:
 
-    def setUp(self):
+    def setup_method(self):
         N = 10
 
-        nybb_filename, nybb_zip_path = download_nybb()
+        nybb_filename = geopandas.datasets.get_path('nybb')
 
-        self.polydf = read_file(nybb_zip_path, vfs='zip://' + nybb_filename)
-        self.tempdir = tempfile.mkdtemp()
+        self.polydf = read_file(nybb_filename)
         self.crs = {'init': 'epsg:4326'}
         b = [int(x) for x in self.polydf.total_bounds]
-        self.polydf2 = GeoDataFrame([
-            {'geometry' : Point(x, y).buffer(10000), 'value1': x + y, 'value2': x - y}
-            for x, y in zip(range(b[0], b[2], int((b[2]-b[0])/N)),
-                            range(b[1], b[3], int((b[3]-b[1])/N)))], crs=self.crs)
-        self.pointdf = GeoDataFrame([
-            {'geometry' : Point(x, y), 'value1': x + y, 'value2': x - y}
-            for x, y in zip(range(b[0], b[2], int((b[2]-b[0])/N)),
-                            range(b[1], b[3], int((b[3]-b[1])/N)))], crs=self.crs)
+        self.polydf2 = GeoDataFrame(
+            [{'geometry': Point(x, y).buffer(10000), 'value1': x + y,
+              'value2': x - y}
+             for x, y in zip(range(b[0], b[2], int((b[2]-b[0])/N)),
+                             range(b[1], b[3], int((b[3]-b[1])/N)))],
+            crs=self.crs)
+        self.pointdf = GeoDataFrame(
+            [{'geometry': Point(x, y), 'value1': x + y, 'value2': x - y}
+             for x, y in zip(range(b[0], b[2], int((b[2]-b[0])/N)),
+                             range(b[1], b[3], int((b[3]-b[1])/N)))],
+            crs=self.crs)
 
         # TODO this appears to be necessary;
         # why is the sindex not generated automatically?
@@ -36,79 +36,69 @@ class TestDataFrame(unittest.TestCase):
 
         self.union_shape = (180, 7)
 
-    def tearDown(self):
-        shutil.rmtree(self.tempdir)
-
     def test_union(self):
         df = overlay(self.polydf, self.polydf2, how="union")
-        self.assertTrue(type(df) is GeoDataFrame)
-        self.assertEquals(df.shape, self.union_shape)
-        self.assertTrue('value1' in df.columns and 'Shape_Area' in df.columns)
+        assert type(df) is GeoDataFrame
+        assert df.shape == self.union_shape
+        assert 'value1' in df.columns and 'Shape_Area' in df.columns
 
     def test_union_no_index(self):
         # explicitly ignore indicies
         dfB = overlay(self.polydf, self.polydf2, how="union", use_sindex=False)
-        self.assertEquals(dfB.shape, self.union_shape)
+        assert dfB.shape == self.union_shape
 
         # remove indicies from df
         self.polydf._sindex = None
         self.polydf2._sindex = None
         dfC = overlay(self.polydf, self.polydf2, how="union")
-        self.assertEquals(dfC.shape, self.union_shape)
+        assert dfC.shape == self.union_shape
 
     def test_intersection(self):
         df = overlay(self.polydf, self.polydf2, how="intersection")
-        self.assertIsNotNone(df['BoroName'][0])
-        self.assertEquals(df.shape, (68, 7))
+        assert df['BoroName'][0] is not None
+        assert df.shape == (68, 7)
 
     def test_identity(self):
         df = overlay(self.polydf, self.polydf2, how="identity")
-        self.assertEquals(df.shape, (154, 7))
+        assert df.shape == (154, 7)
 
     def test_symmetric_difference(self):
         df = overlay(self.polydf, self.polydf2, how="symmetric_difference")
-        self.assertEquals(df.shape, (122, 7))
+        assert df.shape == (122, 7)
 
     def test_difference(self):
         df = overlay(self.polydf, self.polydf2, how="difference")
-        self.assertEquals(df.shape, (86, 7))
+        assert df.shape == (86, 7)
 
     def test_bad_how(self):
-        self.assertRaises(ValueError,
-                          overlay, self.polydf, self.polydf, how="spandex")
+        with pytest.raises(ValueError):
+            overlay(self.polydf, self.polydf, how="spandex")
 
     def test_nonpoly(self):
-        self.assertRaises(TypeError,
-                          overlay, self.pointdf, self.polydf, how="union")
+        with pytest.raises(TypeError):
+            overlay(self.pointdf, self.polydf, how="union")
 
     def test_duplicate_column_name(self):
         polydf2r = self.polydf2.rename(columns={'value2': 'Shape_Area'})
         df = overlay(self.polydf, polydf2r, how="union")
-        self.assertTrue('Shape_Area_2' in df.columns and 'Shape_Area' in df.columns)
+        assert 'Shape_Area_2' in df.columns and 'Shape_Area' in df.columns
 
     def test_geometry_not_named_geometry(self):
         # Issue #306
         # Add points and flip names
         polydf3 = self.polydf.copy()
-        polydf3 = polydf3.rename(columns={'geometry':'polygons'})
+        polydf3 = polydf3.rename(columns={'geometry': 'polygons'})
         polydf3 = polydf3.set_geometry('polygons')
         polydf3['geometry'] = self.pointdf.geometry.loc[0:4]
-        self.assertTrue(polydf3.geometry.name == 'polygons')
+        assert polydf3.geometry.name == 'polygons'
 
         df = overlay(polydf3, self.polydf2, how="union")
-        self.assertTrue(type(df) is GeoDataFrame)
+        assert type(df) is GeoDataFrame
         
         df2 = overlay(self.polydf, self.polydf2, how="union")
-        self.assertTrue(df.geom_almost_equals(df2).all())
+        assert df.geom_almost_equals(df2).all()
 
     def test_geoseries_warning(self):
         # Issue #305
-
-        def f():
+        with pytest.raises(NotImplementedError):
             overlay(self.polydf, self.polydf2.geometry, how="union")
-        self.assertRaises(NotImplementedError, f)
-
-
-
-
-
diff --git a/geopandas/tests/test_pandas_methods.py b/geopandas/tests/test_pandas_methods.py
new file mode 100644
index 0000000..38d653b
--- /dev/null
+++ b/geopandas/tests/test_pandas_methods.py
@@ -0,0 +1,262 @@
+from __future__ import absolute_import
+
+from distutils.version import LooseVersion
+
+from six import PY3
+
+import numpy as np
+import pandas as pd
+import shapely
+from shapely.geometry import Point, Polygon
+
+from geopandas import GeoDataFrame, GeoSeries
+from geopandas.tests.util import assert_geoseries_equal
+
+import pytest
+from numpy.testing import assert_array_equal
+from pandas.util.testing import assert_frame_equal, assert_series_equal
+
+
+ at pytest.fixture
+def s():
+    return GeoSeries([Point(x, y) for x, y in zip(range(3), range(3))])
+
+
+ at pytest.fixture
+def df():
+    return GeoDataFrame({'geometry': [Point(x, x) for x in range(3)],
+                         'value1': np.arange(3, dtype='int64'),
+                         'value2': np.array([1, 2, 1], dtype='int64')})
+
+
+def test_repr(s, df):
+    assert 'POINT' in repr(s)
+    assert 'POINT' in repr(df)
+
+
+def test_indexing(s, df):
+
+    # accessing scalar from the geometry (colunm)
+    exp = Point(1, 1)
+    assert s[1] == exp
+    assert s.loc[1] == exp
+    assert s.iloc[1] == exp
+    assert df.loc[1, 'geometry'] == exp
+    assert df.iloc[1, 0] == exp
+
+    # multiple values
+    exp = GeoSeries([Point(2, 2), Point(0, 0)], index=[2, 0])
+    assert_geoseries_equal(s.loc[[2, 0]], exp)
+    assert_geoseries_equal(s.iloc[[2, 0]], exp)
+    assert_geoseries_equal(s.reindex([2, 0]), exp)
+    assert_geoseries_equal(df.loc[[2, 0], 'geometry'], exp)
+    # TODO here iloc does not return a GeoSeries
+    assert_series_equal(df.iloc[[2, 0], 0], exp, check_series_type=False,
+                        check_names=False)
+
+    # boolean indexing
+    exp = GeoSeries([Point(0, 0), Point(2, 2)], index=[0, 2])
+    mask = np.array([True, False, True])
+    assert_geoseries_equal(s[mask], exp)
+    assert_geoseries_equal(s.loc[mask], exp)
+    assert_geoseries_equal(df[mask]['geometry'], exp)
+    assert_geoseries_equal(df.loc[mask, 'geometry'], exp)
+
+
+def test_assignment(s, df):
+    exp = GeoSeries([Point(10, 10), Point(1, 1), Point(2, 2)])
+
+    s2 = s.copy()
+    s2[0] = Point(10, 10)
+    assert_geoseries_equal(s2, exp)
+
+    s2 = s.copy()
+    s2.loc[0] = Point(10, 10)
+    assert_geoseries_equal(s2, exp)
+
+    s2 = s.copy()
+    s2.iloc[0] = Point(10, 10)
+    assert_geoseries_equal(s2, exp)
+
+    df2 = df.copy()
+    df2.loc[0, 'geometry'] = Point(10, 10)
+    assert_geoseries_equal(df2['geometry'], exp)
+
+    df2 = df.copy()
+    df2.iloc[0, 0] = Point(10, 10)
+    assert_geoseries_equal(df2['geometry'], exp)
+
+
+def test_assign(df):
+    res = df.assign(new=1)
+    exp = df.copy()
+    exp['new'] = 1
+    assert isinstance(res, GeoDataFrame)
+    assert_frame_equal(res, exp, )
+
+
+def test_astype(s):
+
+    with pytest.raises(TypeError):
+        s.astype(int)
+
+    assert s.astype(str)[0] == 'POINT (0 0)'
+
+
+def test_to_csv(df):
+
+    exp = ('geometry,value1,value2\nPOINT (0 0),0,1\nPOINT (1 1),1,2\n'
+           'POINT (2 2),2,1\n')
+    assert df.to_csv(index=False) == exp
+
+
+ at pytest.mark.skipif(str(pd.__version__) < LooseVersion('0.17'),
+                    reason="s.max() does not raise on 0.16")
+def test_numerical_operations(s, df):
+
+    # df methods ignore the geometry column
+    exp = pd.Series([3, 4], index=['value1', 'value2'])
+    assert_series_equal(df.sum(), exp)
+
+    # series methods raise error
+    with pytest.raises(TypeError):
+        s.sum()
+
+    if PY3:
+        # in python 2, objects are still orderable
+        with pytest.raises(TypeError):
+            s.max()
+
+    with pytest.raises(TypeError):
+        s.idxmax()
+
+    # numerical ops raise an error
+    with pytest.raises(TypeError):
+        df + 1
+
+    with pytest.raises(TypeError):
+        s + 1
+
+    # boolean comparisons work
+    res = df == 100
+    exp = pd.DataFrame(False, index=df.index, columns=df.columns)
+    assert_frame_equal(res, exp)
+
+
+def test_where(s):
+    res = s.where(np.array([True, False, True]))
+    exp = s.copy()
+    exp[1] = np.nan
+    assert_series_equal(res, exp)
+
+
+def test_select_dtypes(df):
+    res = df.select_dtypes(include=[np.number])
+    exp = df[['value1', 'value2']]
+    assert_frame_equal(res, exp)
+
+# Missing values
+
+
+ at pytest.mark.xfail
+def test_fillna():
+    # this currently does not work (it seems to fill in the second coordinate
+    # of the point
+    s2 = GeoSeries([Point(0, 0), None, Point(2, 2)])
+    res = s2.fillna(Point(1, 1))
+    assert_geoseries_equal(res, s)
+
+
+ at pytest.mark.xfail
+def test_dropna():
+    # this currently does not work (doesn't drop)
+    s2 = GeoSeries([Point(0, 0), None, Point(2, 2)])
+    res = s2.dropna()
+    exp = s2.loc[[0, 2]]
+    assert_geoseries_equal(res, exp)
+
+
+ at pytest.mark.parametrize("NA", [None, np.nan, Point(), Polygon()])
+def test_isna(NA):
+    s2 = GeoSeries([Point(0, 0), NA, Point(2, 2)])
+    exp = pd.Series([False, True, False])
+    res = s2.isnull()
+    assert_series_equal(res, exp)
+    res = s2.isna()
+    assert_series_equal(res, exp)
+    res = s2.notnull()
+    assert_series_equal(res, ~exp)
+    res = s2.notna()
+    assert_series_equal(res, ~exp)
+
+
+# Groupby / algos
+
+
+ at pytest.mark.xfail
+def test_unique():
+    # this currently raises a TypeError
+    s = GeoSeries([Point(0, 0), Point(0, 0), Point(2, 2)])
+    exp = np.array([Point(0, 0), Point(2, 2)])
+    assert_array_equal(s.unique(), exp)
+
+
+ at pytest.mark.xfail
+def test_value_counts():
+    # each object is considered unique
+    s = GeoSeries([Point(0, 0), Point(1, 1), Point(0, 0)])
+    res = s.value_counts()
+    exp = pd.Series([2, 1], index=[Point(0, 0), Point(1, 1)])
+    assert_series_equal(res, exp)
+
+
+ at pytest.mark.xfail
+def test_drop_duplicates_series():
+    # currently, geoseries with identical values are not recognized as
+    # duplicates
+    dups = GeoSeries([Point(0, 0), Point(0, 0)])
+    dropped = dups.drop_duplicates()
+    assert len(dropped) == 1
+
+
+ at pytest.mark.xfail
+def test_drop_duplicates_frame():
+    # currently, dropping duplicates in a geodataframe produces a TypeError
+    # better behavior would be dropping the duplicated points
+    gdf_len = 3
+    dup_gdf = GeoDataFrame({'geometry': [Point(0, 0) for _ in range(gdf_len)],
+                            'value1': range(gdf_len)})
+    dropped_geometry = dup_gdf.drop_duplicates(subset="geometry")
+    assert len(dropped_geometry) == 1
+    dropped_all = dup_gdf.drop_duplicates()
+    assert len(dropped_all) == gdf_len
+
+
+def test_groupby(df):
+
+    # counts work fine
+    res = df.groupby('value2').count()
+    exp = pd.DataFrame({'geometry': [2, 1], 'value1': [2, 1],
+                        'value2': [1, 2]}).set_index('value2')
+    assert_frame_equal(res, exp)
+
+    # reductions ignore geometry column
+    res = df.groupby('value2').sum()
+    exp = pd.DataFrame({'value1': [2, 1],
+                        'value2': [1, 2]}, dtype='int64').set_index('value2')
+    assert_frame_equal(res, exp)
+
+    # applying on the geometry column
+    res = df.groupby('value2')['geometry'].apply(lambda x: x.cascaded_union)
+    exp = pd.Series([shapely.geometry.MultiPoint([(0, 0), (2, 2)]),
+                     Point(1, 1)],
+                    index=pd.Index([1, 2], name='value2'), name='geometry')
+    assert_series_equal(res, exp)
+
+
+def test_groupby_groups(df):
+    g = df.groupby('value2')
+    res = g.get_group(1)
+    assert isinstance(res, GeoDataFrame)
+    exp = df.loc[[0, 2]]
+    assert_frame_equal(res, exp)
diff --git a/geopandas/tests/test_plotting.py b/geopandas/tests/test_plotting.py
index ac6a468..b3f98b7 100644
--- a/geopandas/tests/test_plotting.py
+++ b/geopandas/tests/test_plotting.py
@@ -1,311 +1,632 @@
 from __future__ import absolute_import, division
 
-import numpy as np
-import os
-import shutil
-import tempfile
 from distutils.version import LooseVersion
+import itertools
+import warnings
 
+import numpy as np
 import matplotlib
-matplotlib.use('Agg', warn=False)
-from matplotlib.pyplot import Artist, savefig, clf, cm, get_cmap
-from matplotlib.testing.noseclasses import ImageComparisonFailure
-from matplotlib.testing.compare import compare_images
-from numpy import cos, sin, pi
-from shapely.geometry import Polygon, LineString, Point
-from six.moves import xrange
-from .util import unittest
+matplotlib.use('Agg')
+import matplotlib.pyplot as plt
+from shapely.affinity import rotate
+from shapely.geometry import MultiPolygon, Polygon, LineString, Point
 
 from geopandas import GeoSeries, GeoDataFrame, read_file
 
+import pytest
 
-# If set to True, generate images rather than perform tests (all tests will pass!)
-GENERATE_BASELINE = False
 
-BASELINE_DIR = os.path.join(os.path.dirname(__file__), 'baseline_images', 'test_plotting')
+ at pytest.fixture(autouse=True)
+def close_figures(request):
+    yield
+    plt.close('all')
 
-TRAVIS = bool(os.environ.get('TRAVIS', False))
-MPL_DEV = matplotlib.__version__ > LooseVersion('1.5.1')
 
-class TestImageComparisons(unittest.TestCase):
+try:
+    cycle = matplotlib.rcParams['axes.prop_cycle'].by_key()
+    MPL_DFT_COLOR = cycle['color'][0]
+except KeyError:
+    MPL_DFT_COLOR = matplotlib.rcParams['axes.color_cycle'][0]
 
-    @classmethod
-    def setUpClass(cls):
-        cls.tempdir = tempfile.mkdtemp()
-        return
 
-    @classmethod
-    def tearDownClass(cls):
-        shutil.rmtree(cls.tempdir)
-        return
-
-    def _compare_images(self, ax, filename, tol=10):
-        """ Helper method to do the comparisons """
-        assert isinstance(ax, Artist)
-        if GENERATE_BASELINE:
-            savefig(os.path.join(BASELINE_DIR, filename))
-        savefig(os.path.join(self.tempdir, filename))
-        err = compare_images(os.path.join(BASELINE_DIR, filename),
-                             os.path.join(self.tempdir, filename),
-                             tol, in_decorator=True)
-        if err:
-            raise ImageComparisonFailure('images not close: %(actual)s '
-                                         'vs. %(expected)s '
-                                         '(RMS %(rms).3f)' % err)
-
-    @unittest.skipIf(MPL_DEV, 'Skip for development version of matplotlib')
-    def test_poly_plot(self):
-        """ Test plotting a simple series of polygons """
-        clf()
-        filename = 'poly_plot.png'
-        t1 = Polygon([(0, 0), (1, 0), (1, 1)])
-        t2 = Polygon([(1, 0), (2, 0), (2, 1)])
-        polys = GeoSeries([t1, t2])
-        ax = polys.plot()
-        self._compare_images(ax=ax, filename=filename)
-
-    @unittest.skipIf(MPL_DEV, 'Skip for development version of matplotlib')
-    def test_point_plot(self):
-        """ Test plotting a simple series of points """
-        clf()
-        filename = 'points_plot.png'
-        N = 10
-        points = GeoSeries(Point(i, i) for i in xrange(N))
-        ax = points.plot()
-        self._compare_images(ax=ax, filename=filename)
-
-    @unittest.skipIf(MPL_DEV, 'Skip for development version of matplotlib')
-    def test_line_plot(self):
-        """ Test plotting a simple series of lines """
-        clf()
-        filename = 'lines_plot.png'
-        N = 10
-        lines = GeoSeries([LineString([(0, i), (9, i)]) for i in xrange(N)])
-        ax = lines.plot()
-        self._compare_images(ax=ax, filename=filename)
-
-    @unittest.skipIf(TRAVIS, 'Skip on Travis (fails even though it passes locally)')
-    def test_plot_GeoDataFrame_with_kwargs(self):
-        """
-        Test plotting a simple GeoDataFrame consisting of a series of polygons
-        with increasing values using various extra kwargs.
-        """
-        clf()
-        filename = 'poly_plot_with_kwargs.png'
-        ts = np.linspace(0, 2*pi, 10, endpoint=False)
-
-        # Build GeoDataFrame from a series of triangles wrapping around in a ring
-        # and a second column containing a list of increasing values.
-        r1 = 1.0  # radius of inner ring boundary
-        r2 = 1.5  # radius of outer ring boundary
-
-        def make_triangle(t0, t1):
-            return Polygon([(r1*cos(t0), r1*sin(t0)),
-                            (r2*cos(t0), r2*sin(t0)),
-                            (r1*cos(t1), r1*sin(t1))])
-
-        polys = GeoSeries([make_triangle(t0, t1) for t0, t1 in zip(ts, ts[1:])])
-        values = np.arange(len(polys))
-        df = GeoDataFrame({'geometry': polys, 'values': values})
-
-        # Plot the GeoDataFrame using various keyword arguments to see if they are honoured
-        ax = df.plot(column='values', cmap=cm.RdBu, vmin=+2, vmax=None, figsize=(8, 4))
-        self._compare_images(ax=ax, filename=filename)
-
-
-
-class TestPointPlotting(unittest.TestCase):
-
-    def setUp(self):
+class TestPointPlotting:
 
+    def setup_method(self):
         self.N = 10
         self.points = GeoSeries(Point(i, i) for i in range(self.N))
         values = np.arange(self.N)
         self.df = GeoDataFrame({'geometry': self.points, 'values': values})
 
-    @unittest.skipIf(MPL_DEV, 'Skip for development version of matplotlib')
+    def test_figsize(self):
+
+        ax = self.points.plot(figsize=(1, 1))
+        np.testing.assert_array_equal(ax.figure.get_size_inches(), (1, 1))
+
+        ax = self.df.plot(figsize=(1, 1))
+        np.testing.assert_array_equal(ax.figure.get_size_inches(), (1, 1))
+
     def test_default_colors(self):
 
-        ## without specifying values -> max 9 different colors
+        # # without specifying values -> uniform color
 
         # GeoSeries
         ax = self.points.plot()
-        cmap = get_cmap('Set1', 9)
-        expected_colors = cmap(list(range(9))*2)
-        _check_colors(ax.get_lines(), expected_colors)
+        _check_colors(self.N, ax.collections[0].get_facecolors(),
+                      [MPL_DFT_COLOR] * self.N)
 
-        # GeoDataFrame -> uses 'jet' instead of 'Set1'
+        # GeoDataFrame
         ax = self.df.plot()
-        cmap = get_cmap('jet', 9)
-        expected_colors = cmap(list(range(9))*2)
-        _check_colors(ax.get_lines(), expected_colors)
-
-        ## with specifying values
+        _check_colors(self.N, ax.collections[0].get_facecolors(),
+                      [MPL_DFT_COLOR] * self.N)
 
+        # # with specifying values -> different colors for all 10 values
         ax = self.df.plot(column='values')
-        cmap = get_cmap('jet')
+        cmap = plt.get_cmap()
         expected_colors = cmap(np.arange(self.N)/(self.N-1))
-
-        _check_colors(ax.get_lines(), expected_colors)
+        _check_colors(self.N, ax.collections[0].get_facecolors(),
+                      expected_colors)
 
     def test_colormap(self):
 
-        ## without specifying values -> max 9 different colors
+        # without specifying values but cmap specified -> no uniform color
+        # but different colors for all points
 
         # GeoSeries
         ax = self.points.plot(cmap='RdYlGn')
-        cmap = get_cmap('RdYlGn', 9)
-        expected_colors = cmap(list(range(9))*2)
-        _check_colors(ax.get_lines(), expected_colors)
+        cmap = plt.get_cmap('RdYlGn')
+        exp_colors = cmap(np.arange(self.N) / (self.N - 1))
+        _check_colors(self.N, ax.collections[0].get_facecolors(), exp_colors)
 
-        # GeoDataFrame -> same as GeoSeries in this case
         ax = self.df.plot(cmap='RdYlGn')
-        _check_colors(ax.get_lines(), expected_colors)
-
-        ## with specifying values
+        _check_colors(self.N, ax.collections[0].get_facecolors(), exp_colors)
 
+        # # with specifying values -> different colors for all 10 values
         ax = self.df.plot(column='values', cmap='RdYlGn')
-        cmap = get_cmap('RdYlGn')
-        expected_colors = cmap(np.arange(self.N)/(self.N-1))
-        _check_colors(ax.get_lines(), expected_colors)
+        cmap = plt.get_cmap('RdYlGn')
+        _check_colors(self.N, ax.collections[0].get_facecolors(), exp_colors)
+
+        # when using a cmap with specified lut -> limited number of different
+        # colors
+        ax = self.points.plot(cmap=plt.get_cmap('Set1', lut=5))
+        cmap = plt.get_cmap('Set1', lut=5)
+        exp_colors = cmap(list(range(5))*3)
+        _check_colors(self.N, ax.collections[0].get_facecolors(), exp_colors)
 
     def test_single_color(self):
 
         ax = self.points.plot(color='green')
-        _check_colors(ax.get_lines(), ['green']*self.N)
+        _check_colors(self.N, ax.collections[0].get_facecolors(), ['green']*self.N)
 
         ax = self.df.plot(color='green')
-        _check_colors(ax.get_lines(), ['green']*self.N)
+        _check_colors(self.N, ax.collections[0].get_facecolors(), ['green']*self.N)
 
-        ax = self.df.plot(column='values', color='green')
-        _check_colors(ax.get_lines(), ['green']*self.N)
+        with warnings.catch_warnings(record=True) as _:  # don't print warning
+            # 'color' overrides 'column'
+            ax = self.df.plot(column='values', color='green')
+            _check_colors(self.N, ax.collections[0].get_facecolors(), ['green']*self.N)
 
     def test_style_kwargs(self):
 
-        # markersize
         ax = self.points.plot(markersize=10)
-        ms = [l.get_markersize() for l in ax.get_lines()]
-        assert ms == [10] * self.N
+        assert ax.collections[0].get_sizes() == [10]
 
         ax = self.df.plot(markersize=10)
-        ms = [l.get_markersize() for l in ax.get_lines()]
-        assert ms == [10] * self.N
+        assert ax.collections[0].get_sizes() == [10]
 
         ax = self.df.plot(column='values', markersize=10)
-        ms = [l.get_markersize() for l in ax.get_lines()]
-        assert ms == [10] * self.N
+        assert ax.collections[0].get_sizes() == [10]
 
+    def test_legend(self):
+        with warnings.catch_warnings(record=True) as _:  # don't print warning
+            # legend ignored if color is given.
+            ax = self.df.plot(column='values', color='green', legend=True)
+            assert len(ax.get_figure().axes) == 1  # no separate legend axis
+
+        # legend ignored if no column is given.
+        ax = self.df.plot(legend=True)
+        assert len(ax.get_figure().axes) == 1  # no separate legend axis
+
+        # # Continuous legend
+        # the colorbar matches the Point colors
+        ax = self.df.plot(column='values', cmap='RdYlGn', legend=True)
+        point_colors = ax.collections[0].get_facecolors()
+        cbar_colors = ax.get_figure().axes[1].collections[0].get_facecolors()
+        # first point == bottom of colorbar
+        np.testing.assert_array_equal(point_colors[0], cbar_colors[0])
+        # last point == top of colorbar
+        np.testing.assert_array_equal(point_colors[-1], cbar_colors[-1])
+
+        # # Categorical legend
+        # the colorbar matches the Point colors
+        ax = self.df.plot(column='values', categorical=True, legend=True)
+        point_colors = ax.collections[0].get_facecolors()
+        cbar_colors = ax.get_legend().axes.collections[0].get_facecolors()
+        # first point == bottom of colorbar
+        np.testing.assert_array_equal(point_colors[0], cbar_colors[0])
+        # last point == top of colorbar
+        np.testing.assert_array_equal(point_colors[-1], cbar_colors[-1])
+
+
+class TestPointZPlotting:
+
+    def setup_method(self):
+        self.N = 10
+        self.points = GeoSeries(Point(i, i, i) for i in range(self.N))
+        values = np.arange(self.N)
+        self.df = GeoDataFrame({'geometry': self.points, 'values': values})
 
-class TestLineStringPlotting(unittest.TestCase):
+    def test_plot(self):
+        # basic test that points with z coords don't break plotting
+        self.df.plot()
 
-    def setUp(self):
 
+class TestLineStringPlotting:
+
+    def setup_method(self):
         self.N = 10
         values = np.arange(self.N)
-        self.lines = GeoSeries([LineString([(0, i), (9, i)]) for i in xrange(self.N)])
+        self.lines = GeoSeries([LineString([(0, i), (4, i+0.5), (9, i)])
+                                for i in range(self.N)],
+                               index=list('ABCDEFGHIJ'))
         self.df = GeoDataFrame({'geometry': self.lines, 'values': values})
 
     def test_single_color(self):
 
         ax = self.lines.plot(color='green')
-        _check_colors(ax.get_lines(), ['green']*self.N)
+        _check_colors(self.N, ax.collections[0].get_colors(), ['green']*self.N)
 
         ax = self.df.plot(color='green')
-        _check_colors(ax.get_lines(), ['green']*self.N)
+        _check_colors(self.N, ax.collections[0].get_colors(), ['green']*self.N)
 
-        ax = self.df.plot(column='values', color='green')
-        _check_colors(ax.get_lines(), ['green']*self.N)
+        with warnings.catch_warnings(record=True) as _:  # don't print warning
+            # 'color' overrides 'column'
+            ax = self.df.plot(column='values', color='green')
+            _check_colors(self.N, ax.collections[0].get_colors(), ['green']*self.N)
 
     def test_style_kwargs(self):
 
-        # linestyle
-        ax = self.lines.plot(linestyle='dashed')
-        ls = [l.get_linestyle() for l in ax.get_lines()]
-        assert ls == ['--'] * self.N
+        # linestyle (style patterns depend on linewidth, therefore pin to 1)
+        linestyle = 'dashed'
+        ax = self.lines.plot(linestyle=linestyle, linewidth=1)
+        exp_ls = _style_to_linestring_onoffseq(linestyle)
+        for ls in ax.collections[0].get_linestyles():
+            assert ls[0] == exp_ls[0]
+            assert tuple(ls[1]) == exp_ls[1]
 
-        ax = self.df.plot(linestyle='dashed')
-        ls = [l.get_linestyle() for l in ax.get_lines()]
-        assert ls == ['--'] * self.N
+        ax = self.df.plot(linestyle=linestyle, linewidth=1)
+        for ls in ax.collections[0].get_linestyles():
+            assert ls[0] == exp_ls[0]
+            assert tuple(ls[1]) == exp_ls[1]
 
-        ax = self.df.plot(column='values', linestyle='dashed')
-        ls = [l.get_linestyle() for l in ax.get_lines()]
-        assert ls == ['--'] * self.N
+        ax = self.df.plot(column='values', linestyle=linestyle, linewidth=1)
+        for ls in ax.collections[0].get_linestyles():
+            assert ls[0] == exp_ls[0]
+            assert tuple(ls[1]) == exp_ls[1]
 
 
-class TestPolygonPlotting(unittest.TestCase):
+class TestPolygonPlotting:
 
-    def setUp(self):
+    def setup_method(self):
 
         t1 = Polygon([(0, 0), (1, 0), (1, 1)])
         t2 = Polygon([(1, 0), (2, 0), (2, 1)])
-        self.polys = GeoSeries([t1, t2])
+        self.polys = GeoSeries([t1, t2], index=list('AB'))
         self.df = GeoDataFrame({'geometry': self.polys, 'values': [0, 1]})
-        return
+
+        multipoly1 = MultiPolygon([t1, t2])
+        multipoly2 = rotate(multipoly1, 180)
+        self.df2 = GeoDataFrame({'geometry': [multipoly1, multipoly2],
+                                 'values': [0, 1]})
 
     def test_single_color(self):
 
         ax = self.polys.plot(color='green')
-        _check_colors(ax.patches, ['green']*2, alpha=0.5)
+        _check_colors(2, ax.collections[0].get_facecolors(), ['green']*2)
+        # color only sets facecolor
+        _check_colors(2, ax.collections[0].get_edgecolors(), ['k'] * 2)
 
         ax = self.df.plot(color='green')
-        _check_colors(ax.patches, ['green']*2, alpha=0.5)
+        _check_colors(2, ax.collections[0].get_facecolors(), ['green']*2)
+        _check_colors(2, ax.collections[0].get_edgecolors(), ['k'] * 2)
 
-        ax = self.df.plot(column='values', color='green')
-        _check_colors(ax.patches, ['green']*2, alpha=0.5)
+        with warnings.catch_warnings(record=True) as _:  # don't print warning
+            # 'color' overrides 'values'
+            ax = self.df.plot(column='values', color='green')
+            _check_colors(2, ax.collections[0].get_facecolors(), ['green']*2)
 
     def test_vmin_vmax(self):
-
         # when vmin == vmax, all polygons should be the same color
+
+        # non-categorical
+        ax = self.df.plot(column='values', categorical=False, vmin=0, vmax=0)
+        actual_colors = ax.collections[0].get_facecolors()
+        np.testing.assert_array_equal(actual_colors[0], actual_colors[1])
+
+        # categorical
         ax = self.df.plot(column='values', categorical=True, vmin=0, vmax=0)
-        cmap = get_cmap('Set1', 2)
-        self.assertEqual(ax.patches[0].get_facecolor(), ax.patches[1].get_facecolor())
+        actual_colors = ax.collections[0].get_facecolors()
+        np.testing.assert_array_equal(actual_colors[0], actual_colors[1])
 
-    def test_facecolor(self):
-        t1 = Polygon([(0, 0), (1, 0), (1, 1)])
-        t2 = Polygon([(1, 0), (2, 0), (2, 1)])
-        polys = GeoSeries([t1, t2])
-        df = GeoDataFrame({'geometry': polys, 'values': [0, 1]})
+    def test_style_kwargs(self):
+
+        # facecolor overrides default cmap when color is not set
+        ax = self.polys.plot(facecolor='k')
+        _check_colors(2, ax.collections[0].get_facecolors(), ['k']*2)
+
+        # facecolor overrides more general-purpose color when both are set
+        ax = self.polys.plot(color='red', facecolor='k')
+        # TODO with new implementation, color overrides facecolor
+        # _check_colors(2, ax.collections[0], ['k']*2, alpha=0.5)
 
-        ax = polys.plot(facecolor='k')
-        _check_colors(ax.patches, ['k']*2, alpha=0.5)
+        # edgecolor
+        ax = self.polys.plot(edgecolor='red')
+        np.testing.assert_array_equal([(1, 0, 0, 1)],
+                                      ax.collections[0].get_edgecolors())
 
+        ax = self.df.plot('values', edgecolor='red')
+        np.testing.assert_array_equal([(1, 0, 0, 1)],
+                                      ax.collections[0].get_edgecolors())
 
-class TestPySALPlotting(unittest.TestCase):
+        # alpha sets both edge and face
+        ax = self.polys.plot(facecolor='g', edgecolor='r', alpha=0.4)
+        _check_colors(2, ax.collections[0].get_facecolors(), ['g'] * 2, alpha=0.4)
+        _check_colors(2, ax.collections[0].get_edgecolors(), ['r'] * 2, alpha=0.4)
+
+    def test_multipolygons(self):
+
+        # MultiPolygons
+        ax = self.df2.plot()
+        assert len(ax.collections[0].get_paths()) == 4
+        _check_colors(4, ax.collections[0].get_facecolors(), [MPL_DFT_COLOR]*4)
+
+        ax = self.df2.plot('values')
+        cmap = plt.get_cmap(lut=2)
+        # colors are repeated for all components within a MultiPolygon
+        expected_colors = [cmap(0), cmap(0), cmap(1), cmap(1)]
+        _check_colors(4, ax.collections[0].get_facecolors(), expected_colors)
+
+
+class TestPolygonZPlotting:
+
+    def setup_method(self):
+
+        t1 = Polygon([(0, 0, 0), (1, 0, 0), (1, 1, 1)])
+        t2 = Polygon([(1, 0, 0), (2, 0, 0), (2, 1, 1)])
+        self.polys = GeoSeries([t1, t2], index=list('AB'))
+        self.df = GeoDataFrame({'geometry': self.polys, 'values': [0, 1]})
+
+        multipoly1 = MultiPolygon([t1, t2])
+        multipoly2 = rotate(multipoly1, 180)
+        self.df2 = GeoDataFrame({'geometry': [multipoly1, multipoly2],
+                                 'values': [0, 1]})
+
+    def test_plot(self):
+        # basic test that points with z coords don't break plotting
+        self.df.plot()
+
+
+class TestNonuniformGeometryPlotting:
+
+    def setup_method(self):
+        pytest.importorskip('matplotlib', '1.5.0')
+
+        poly = Polygon([(1, 0), (2, 0), (2, 1)])
+        line = LineString([(0.5, 0.5), (1, 1), (1, 0.5), (1.5, 1)])
+        point = Point(0.75, 0.25)
+        self.series = GeoSeries([poly, line, point])
+        self.df = GeoDataFrame({'geometry': self.series, 'values': [1, 2, 3]})
+
+    def test_colors(self):
+        # default uniform color
+        ax = self.series.plot()
+        _check_colors(1, ax.collections[0].get_facecolors(), [MPL_DFT_COLOR])
+        _check_colors(1, ax.collections[1].get_edgecolors(), [MPL_DFT_COLOR])
+        _check_colors(1, ax.collections[2].get_facecolors(), [MPL_DFT_COLOR])
+
+        # colormap: different colors
+        ax = self.series.plot(cmap='RdYlGn')
+        cmap = plt.get_cmap('RdYlGn')
+        exp_colors = cmap(np.arange(3) / (3 - 1))
+        _check_colors(1, ax.collections[0].get_facecolors(), [exp_colors[0]])
+        _check_colors(1, ax.collections[1].get_edgecolors(), [exp_colors[1]])
+        _check_colors(1, ax.collections[2].get_facecolors(), [exp_colors[2]])
+
+    def test_style_kwargs(self):
+        ax = self.series.plot(markersize=10)
+        assert ax.collections[2].get_sizes() == [10]
+        ax = self.df.plot(markersize=10)
+        assert ax.collections[2].get_sizes() == [10]
+
+
+class TestPySALPlotting:
 
     @classmethod
-    def setUpClass(cls):
+    def setup_class(cls):
         try:
             import pysal as ps
         except ImportError:
-            raise unittest.SkipTest("PySAL is not installed")
+            raise pytest.skip("PySAL is not installed")
 
         pth = ps.examples.get_path("columbus.shp")
-        cls.tracts = read_file(pth)
+        cls.df = read_file(pth)
+        cls.df['NEGATIVES'] = np.linspace(-10, 10, len(cls.df.index))
 
     def test_legend(self):
-        ax = self.tracts.plot(column='CRIME', scheme='QUANTILES', k=3,
-                         cmap='OrRd', legend=True)
+        ax = self.df.plot(column='CRIME', scheme='QUANTILES', k=3,
+                          cmap='OrRd', legend=True)
+        labels = [t.get_text() for t in ax.get_legend().get_texts()]
+        expected = [u'0.18 - 26.07', u'26.07 - 41.97', u'41.97 - 68.89']
+        assert labels == expected
 
+    def test_negative_legend(self):
+        ax = self.df.plot(column='NEGATIVES', scheme='FISHER_JENKS', k=3,
+                          cmap='OrRd', legend=True)
         labels = [t.get_text() for t in ax.get_legend().get_texts()]
-        expected = [u'0.00 - 26.07', u'26.07 - 41.97', u'41.97 - 68.89']
-        self.assertEqual(labels, expected)
+        expected = [u'-10.00 - -3.33', u'-3.33 - 3.33', u'3.33 - 10.00']
+        assert labels == expected
+
+    def test_invalid_scheme(self):
+        with pytest.raises(ValueError):
+            scheme = 'invalid_scheme_*#&)(*#'
+            self.df.plot(column='CRIME', scheme=scheme, k=3,
+                         cmap='OrRd', legend=True)
 
 
-def _check_colors(collection, expected_colors, alpha=None):
+class TestPlotCollections:
 
-    from matplotlib.lines import Line2D
+    def setup_method(self):
+        self.N = 3
+        self.values = np.arange(self.N)
+        self.points = GeoSeries(Point(i, i) for i in range(self.N))
+        self.lines = GeoSeries([LineString([(0, i), (4, i + 0.5), (9, i)])
+                                for i in range(self.N)])
+        self.polygons = GeoSeries([Polygon([(0, i), (4, i + 0.5), (9, i)])
+                                   for i in range(self.N)])
+
+    def test_points(self):
+        # failing with matplotlib 1.4.3 (edge stays black even when specified)
+        pytest.importorskip('matplotlib', '1.5.0')
+
+        from geopandas.plotting import plot_point_collection
+        from matplotlib.collections import PathCollection
+
+        fig, ax = plt.subplots()
+        coll = plot_point_collection(ax, self.points)
+        assert isinstance(coll, PathCollection)
+        ax.cla()
+
+        # default: single default matplotlib color
+        coll = plot_point_collection(ax, self.points)
+        _check_colors(self.N, coll.get_facecolors(), [MPL_DFT_COLOR] * self.N)
+        # edgecolor depends on matplotlib version
+        # _check_colors(self.N, coll.get_edgecolors(), [MPL_DFT_COLOR]*self.N)
+        ax.cla()
+
+        # specify single other color
+        coll = plot_point_collection(ax, self.points, color='g')
+        _check_colors(self.N, coll.get_facecolors(), ['g'] * self.N)
+        _check_colors(self.N, coll.get_edgecolors(), ['g'] * self.N)
+        ax.cla()
+
+        # specify edgecolor/facecolor
+        coll = plot_point_collection(ax, self.points, facecolor='g',
+                                     edgecolor='r')
+        _check_colors(self.N, coll.get_facecolors(), ['g'] * self.N)
+        _check_colors(self.N, coll.get_edgecolors(), ['r'] * self.N)
+        ax.cla()
+
+        # list of colors
+        coll = plot_point_collection(ax, self.points, color=['r', 'g', 'b'])
+        _check_colors(self.N, coll.get_facecolors(), ['r', 'g', 'b'])
+        _check_colors(self.N, coll.get_edgecolors(), ['r', 'g', 'b'])
+        ax.cla()
+
+    def test_points_values(self):
+        from geopandas.plotting import plot_point_collection
+
+        # default colormap
+        fig, ax = plt.subplots()
+        coll = plot_point_collection(ax, self.points, self.values)
+        fig.canvas.draw_idle()
+        cmap = plt.get_cmap()
+        expected_colors = cmap(np.arange(self.N) / (self.N - 1))
+        _check_colors(self.N, coll.get_facecolors(), expected_colors)
+        # edgecolor depends on matplotlib version
+        # _check_colors(self.N, coll.get_edgecolors(), expected_colors)
+
+    def test_linestrings(self):
+        from geopandas.plotting import plot_linestring_collection
+        from matplotlib.collections import LineCollection
+
+        fig, ax = plt.subplots()
+        coll = plot_linestring_collection(ax, self.lines)
+        assert isinstance(coll, LineCollection)
+        ax.cla()
+
+        # default: single default matplotlib color
+        coll = plot_linestring_collection(ax, self.lines)
+        _check_colors(self.N, coll.get_color(), [MPL_DFT_COLOR] * self.N)
+        ax.cla()
+
+        # specify single other color
+        coll = plot_linestring_collection(ax, self.lines, color='g')
+        _check_colors(self.N, coll.get_colors(), ['g'] * self.N)
+        ax.cla()
+
+        # specify edgecolor / facecolor
+        coll = plot_linestring_collection(ax, self.lines, facecolor='g',
+                                          edgecolor='r')
+        _check_colors(self.N, coll.get_facecolors(), ['g'] * self.N)
+        _check_colors(self.N, coll.get_edgecolors(), ['r'] * self.N)
+        ax.cla()
+
+        # list of colors
+        coll = plot_linestring_collection(ax, self.lines,
+                                          color=['r', 'g', 'b'])
+        _check_colors(self.N, coll.get_colors(), ['r', 'g', 'b'])
+        ax.cla()
+
+        # pass through of kwargs
+        coll = plot_linestring_collection(ax, self.lines, linestyle='--',
+                                          linewidth=1)
+        exp_ls = _style_to_linestring_onoffseq('dashed')
+        res_ls = coll.get_linestyle()[0]
+        assert res_ls[0] == exp_ls[0]
+        assert tuple(res_ls[1]) == exp_ls[1]
+        ax.cla()
+
+    def test_linestrings_values(self):
+        from geopandas.plotting import plot_linestring_collection
+
+        fig, ax = plt.subplots()
+
+        # default colormap
+        coll = plot_linestring_collection(ax, self.lines, self.values)
+        fig.canvas.draw_idle()
+        cmap = plt.get_cmap()
+        expected_colors = cmap(np.arange(self.N) / (self.N - 1))
+        _check_colors(self.N, coll.get_color(), expected_colors)
+        ax.cla()
+
+        # specify colormap
+        coll = plot_linestring_collection(ax, self.lines, self.values,
+                                          cmap='RdBu')
+        fig.canvas.draw_idle()
+        cmap = plt.get_cmap('RdBu')
+        expected_colors = cmap(np.arange(self.N) / (self.N - 1))
+        _check_colors(self.N, coll.get_color(), expected_colors)
+        ax.cla()
+
+        # specify vmin/vmax
+        coll = plot_linestring_collection(ax, self.lines, self.values,
+                                          vmin=3, vmax=5)
+        fig.canvas.draw_idle()
+        cmap = plt.get_cmap()
+        expected_colors = cmap([0])
+        _check_colors(self.N, coll.get_color(), expected_colors)
+        ax.cla()
+
+    def test_polygons(self):
+        from geopandas.plotting import plot_polygon_collection
+        from matplotlib.collections import PatchCollection
+
+        fig, ax = plt.subplots()
+        coll = plot_polygon_collection(ax, self.polygons)
+        assert isinstance(coll, PatchCollection)
+        ax.cla()
+
+        # default: single default matplotlib color
+        coll = plot_polygon_collection(ax, self.polygons)
+        _check_colors(self.N, coll.get_facecolor(), [MPL_DFT_COLOR] * self.N)
+        _check_colors(self.N, coll.get_edgecolor(), ['k'] * self.N)
+        ax.cla()
+
+        # default: color sets both facecolor and edgecolor
+        coll = plot_polygon_collection(ax, self.polygons, color='g')
+        _check_colors(self.N, coll.get_facecolor(), ['g'] * self.N)
+        _check_colors(self.N, coll.get_edgecolor(), ['g'] * self.N)
+        ax.cla()
+
+        # only setting facecolor keeps default for edgecolor
+        coll = plot_polygon_collection(ax, self.polygons, facecolor='g')
+        _check_colors(self.N, coll.get_facecolor(), ['g'] * self.N)
+        _check_colors(self.N, coll.get_edgecolor(), ['k'] * self.N)
+        ax.cla()
+
+        # custom facecolor and edgecolor
+        coll = plot_polygon_collection(ax, self.polygons, facecolor='g',
+                                       edgecolor='r')
+        _check_colors(self.N, coll.get_facecolor(), ['g'] * self.N)
+        _check_colors(self.N, coll.get_edgecolor(), ['r'] * self.N)
+        ax.cla()
+
+    def test_polygons_values(self):
+        from geopandas.plotting import plot_polygon_collection
+
+        fig, ax = plt.subplots()
+
+        # default colormap, edge is still black by default
+        coll = plot_polygon_collection(ax, self.polygons, self.values)
+        fig.canvas.draw_idle()
+        cmap = plt.get_cmap()
+        exp_colors = cmap(np.arange(self.N) / (self.N - 1))
+        _check_colors(self.N, coll.get_facecolor(), exp_colors)
+        # edgecolor depends on matplotlib version
+        #_check_colors(self.N, coll.get_edgecolor(), ['k'] * self.N)
+        ax.cla()
+
+        # specify colormap
+        coll = plot_polygon_collection(ax, self.polygons, self.values,
+                                       cmap='RdBu')
+        fig.canvas.draw_idle()
+        cmap = plt.get_cmap('RdBu')
+        exp_colors = cmap(np.arange(self.N) / (self.N - 1))
+        _check_colors(self.N, coll.get_facecolor(), exp_colors)
+        ax.cla()
+
+        # specify vmin/vmax
+        coll = plot_polygon_collection(ax, self.polygons, self.values,
+                                       vmin=3, vmax=5)
+        fig.canvas.draw_idle()
+        cmap = plt.get_cmap()
+        exp_colors = cmap([0])
+        _check_colors(self.N, coll.get_facecolor(), exp_colors)
+        ax.cla()
+
+        # override edgecolor
+        coll = plot_polygon_collection(ax, self.polygons, self.values,
+                                       edgecolor='g')
+        fig.canvas.draw_idle()
+        cmap = plt.get_cmap()
+        exp_colors = cmap(np.arange(self.N) / (self.N - 1))
+        _check_colors(self.N, coll.get_facecolor(), exp_colors)
+        _check_colors(self.N, coll.get_edgecolor(), ['g'] * self.N)
+        ax.cla()
+
+
+def _check_colors(N, actual_colors, expected_colors, alpha=None):
+    """
+    Asserts that the members of `collection` match the `expected_colors`
+    (in order)
+
+    Parameters
+    ----------
+    N : int
+        The number of geometries believed to be in collection.
+        matplotlib.collection is implemented such that the number of geoms in
+        `collection` doesn't have to match the number of colors assignments in
+        the collection: the colors will cycle to meet the needs of the geoms.
+        `N` helps us resolve this.
+    collection : matplotlib.collections.Collection
+        The colors of this collection's patches are read from
+        `collection.get_facecolors()`
+    expected_colors : sequence of RGBA tuples
+    alpha : float (optional)
+        If set, this alpha transparency will be applied to the
+        `expected_colors`. (Any transparency on the `collecton` is assumed
+        to be set in its own facecolor RGBA tuples.)
+    """
     import matplotlib.colors as colors
     conv = colors.colorConverter
 
-    for patch, color in zip(collection, expected_colors):
-        if isinstance(patch, Line2D):
-            # points/lines
-            result = patch.get_color()
-        else:
-            # polygons
-            result = patch.get_facecolor()
-        assert conv.to_rgba(result) == conv.to_rgba(color, alpha=alpha)
-
-
-if __name__ == '__main__':
-    unittest.main()
+    # Convert 2D numpy array to a list of RGBA tuples.
+    actual_colors = map(tuple, actual_colors)
+    all_actual_colors = list(itertools.islice(
+        itertools.cycle(actual_colors), N))
+
+    for actual, expected in zip(all_actual_colors, expected_colors):
+        assert actual == conv.to_rgba(expected, alpha=alpha), \
+            '{} != {}'.format(actual, conv.to_rgba(expected, alpha=alpha))
+
+
+def _style_to_linestring_onoffseq(linestyle):
+    """ Converts a linestyle string representation, namely one of:
+            ['dashed',  'dotted', 'dashdot', 'solid'],
+        documented in `Collections.set_linestyle`,
+        to the form `onoffseq`.
+    """
+    if LooseVersion(matplotlib.__version__) >= '2.0':
+        return matplotlib.lines._get_dash_pattern(linestyle)
+    else:
+        from matplotlib.backend_bases import GraphicsContextBase
+        return GraphicsContextBase.dashd[linestyle]
diff --git a/geopandas/tests/test_sindex.py b/geopandas/tests/test_sindex.py
index d978d20..265d642 100644
--- a/geopandas/tests/test_sindex.py
+++ b/geopandas/tests/test_sindex.py
@@ -1,117 +1,124 @@
+import sys
+
 from shapely.geometry import Polygon, Point
 
+import geopandas
 from geopandas import GeoSeries, GeoDataFrame, base, read_file
-from geopandas.tests.util import unittest, download_nybb
+
+import pytest
 
 
- at unittest.skipIf(not base.HAS_SINDEX, 'Rtree absent, skipping')
-class TestSeriesSindex(unittest.TestCase):
+ at pytest.mark.skipif(sys.platform.startswith("win"), reason="fails on AppVeyor")
+ at pytest.mark.skipif(not base.HAS_SINDEX, reason='Rtree absent, skipping')
+class TestSeriesSindex:
 
     def test_empty_index(self):
-        self.assert_(GeoSeries().sindex is None)
+        assert GeoSeries().sindex is None
 
     def test_point(self):
         s = GeoSeries([Point(0, 0)])
-        self.assertEqual(s.sindex.size, 1)
+        assert s.sindex.size == 1
         hits = s.sindex.intersection((-1, -1, 1, 1))
-        self.assertEqual(len(list(hits)), 1)
+        assert len(list(hits)) == 1
         hits = s.sindex.intersection((-2, -2, -1, -1))
-        self.assertEqual(len(list(hits)), 0)
+        assert len(list(hits)) == 0
 
     def test_empty_point(self):
         s = GeoSeries([Point()])
-        self.assert_(GeoSeries().sindex is None)
+        assert s.sindex is None
+        assert s._sindex_generated is True
+
+    def test_empty_geo_series(self):
+        assert GeoSeries().sindex is None
 
     def test_polygons(self):
         t1 = Polygon([(0, 0), (1, 0), (1, 1)])
         t2 = Polygon([(0, 0), (1, 1), (0, 1)])
         sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
         s = GeoSeries([t1, t2, sq])
-        self.assertEqual(s.sindex.size, 3)
+        assert s.sindex.size == 3
 
     def test_polygons_append(self):
         t1 = Polygon([(0, 0), (1, 0), (1, 1)])
         t2 = Polygon([(0, 0), (1, 1), (0, 1)])
         sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
         s = GeoSeries([t1, t2, sq])
-        t = GeoSeries([t1, t2, sq], [3,4,5])
+        t = GeoSeries([t1, t2, sq], [3, 4, 5])
         s = s.append(t)
-        self.assertEqual(len(s), 6)
-        self.assertEqual(s.sindex.size, 6)
+        assert len(s) == 6
+        assert s.sindex.size == 6
 
     def test_lazy_build(self):
         s = GeoSeries([Point(0, 0)])
-        self.assert_(s._sindex is None)
-        self.assertEqual(s.sindex.size, 1)
-        self.assert_(s._sindex is not None)
+        assert s._sindex is None
+        assert s.sindex.size == 1
+        assert s._sindex is not None
 
 
- at unittest.skipIf(not base.HAS_SINDEX, 'Rtree absent, skipping')
-class TestFrameSindex(unittest.TestCase):
-    def setUp(self):
+ at pytest.mark.skipif(sys.platform.startswith("win"), reason="fails on AppVeyor")
+ at pytest.mark.skipif(not base.HAS_SINDEX, reason='Rtree absent, skipping')
+class TestFrameSindex:
+    def setup_method(self):
         data = {"A": range(5), "B": range(-5, 0),
                 "location": [Point(x, y) for x, y in zip(range(5), range(5))]}
         self.df = GeoDataFrame(data, geometry='location')
 
     def test_sindex(self):
         self.df.crs = {'init': 'epsg:4326'}
-        self.assertEqual(self.df.sindex.size, 5)
-        hits = list(self.df.sindex.intersection((2.5, 2.5, 4, 4), objects=True))
-        self.assertEqual(len(hits), 2)
-        self.assertEqual(hits[0].object, 3)
+        assert self.df.sindex.size == 5
+        hits = list(self.df.sindex.intersection((2.5, 2.5, 4, 4),
+                                                objects=True))
+        assert len(hits) == 2
+        assert hits[0].object == 3
 
     def test_lazy_build(self):
-        self.assert_(self.df._sindex is None)
-        self.assertEqual(self.df.sindex.size, 5)
-        self.assert_(self.df._sindex is not None)
+        assert self.df._sindex is None
+        assert self.df.sindex.size == 5
+        assert self.df._sindex is not None
 
     def test_sindex_rebuild_on_set_geometry(self):
         # First build the sindex
-        self.assert_(self.df.sindex is not None)
+        assert self.df.sindex is not None
         self.df.set_geometry(
             [Point(x, y) for x, y in zip(range(5, 10), range(5, 10))],
             inplace=True)
-        self.assert_(self.df._sindex_valid == False)
+        assert self.df._sindex_generated is False
 
 
 # Skip to accommodate Shapely geometries being unhashable
- at unittest.skip
-class TestJoinSindex(unittest.TestCase):
+ at pytest.mark.skip
+class TestJoinSindex:
 
-    def setUp(self):
-        nybb_filename, nybb_zip_path = download_nybb()
-        self.boros = read_file(nybb_zip_path, vfs='zip://' + nybb_filename)
+    def setup_method(self):
+        nybb_filename = geopandas.datasets.get_path('nybb')
+        self.boros = read_file(nybb_filename)
 
     def test_merge_geo(self):
         # First check that we gets hits from the boros frame.
         tree = self.boros.sindex
         hits = tree.intersection((1012821.80, 229228.26), objects=True)
-        self.assertEqual(
-            [self.boros.ix[hit.object]['BoroName'] for hit in hits],
-            ['Bronx', 'Queens'])
+        res = [self.boros.ix[hit.object]['BoroName'] for hit in hits]
+        assert res == ['Bronx', 'Queens']
 
         # Check that we only get the Bronx from this view.
         first = self.boros[self.boros['BoroCode'] < 3]
         tree = first.sindex
         hits = tree.intersection((1012821.80, 229228.26), objects=True)
-        self.assertEqual(
-            [first.ix[hit.object]['BoroName'] for hit in hits],
-            ['Bronx'])
+        res = [first.ix[hit.object]['BoroName'] for hit in hits]
+        assert res == ['Bronx']
 
         # Check that we only get Queens from this view.
         second = self.boros[self.boros['BoroCode'] >= 3]
         tree = second.sindex
         hits = tree.intersection((1012821.80, 229228.26), objects=True)
-        self.assertEqual(
-            [second.ix[hit.object]['BoroName'] for hit in hits],
-            ['Queens'])
+        res = [second.ix[hit.object]['BoroName'] for hit in hits],
+        assert res == ['Queens']
 
         # Get both the Bronx and Queens again.
         merged = first.merge(second, how='outer')
-        self.assertEqual(len(merged), 5)
-        self.assertEqual(merged.sindex.size, 5)
+        assert len(merged) == 5
+        assert merged.sindex.size == 5
         tree = merged.sindex
         hits = tree.intersection((1012821.80, 229228.26), objects=True)
-        self.assertEqual(
-            [merged.ix[hit.object]['BoroName'] for hit in hits],
-            ['Bronx', 'Queens'])
+        res = [merged.ix[hit.object]['BoroName'] for hit in hits]
+        assert res == ['Bronx', 'Queens']
diff --git a/geopandas/tests/test_types.py b/geopandas/tests/test_types.py
index f5f1d59..2729dee 100644
--- a/geopandas/tests/test_types.py
+++ b/geopandas/tests/test_types.py
@@ -1,18 +1,14 @@
 from __future__ import absolute_import
 
-import numpy as np
-from shapely.geometry import Point
 from pandas import Series, DataFrame
+from shapely.geometry import Point
 
 from geopandas import GeoSeries, GeoDataFrame
-from geopandas.tests.util import unittest
-
-OLD_PANDAS = issubclass(Series, np.ndarray)
 
 
-class TestSeries(unittest.TestCase):
+class TestSeries:
 
-    def setUp(self):
+    def setup_method(self):
         N = self.N = 10
         r = 0.5
         self.pts = GeoSeries([Point(x, y) for x, y in zip(range(N), range(N))])
@@ -48,25 +44,24 @@ class TestSeries(unittest.TestCase):
     def test_select(self):
         assert type(self.pts.select(lambda x: x % 2 == 0)) is GeoSeries
 
-    @unittest.skipIf(OLD_PANDAS, 'Groupby not supported on pandas <= 0.12')
     def test_groupby(self):
         for f, s in self.pts.groupby(lambda x: x % 2):
             assert type(s) is GeoSeries
 
 
-class TestDataFrame(unittest.TestCase):
+class TestDataFrame:
 
-    def setUp(self):
+    def setup_method(self):
         N = 10
         self.df = GeoDataFrame([
-            {'geometry' : Point(x, y), 'value1': x + y, 'value2': x*y}
+            {'geometry': Point(x, y), 'value1': x + y, 'value2': x*y}
             for x, y in zip(range(N), range(N))])
 
     def test_geometry(self):
         assert type(self.df.geometry) is GeoSeries
         # still GeoSeries if different name
-        df2 = GeoDataFrame({"coords": [Point(x,y) for x, y in zip(range(5),
-                                                                  range(5))],
+        df2 = GeoDataFrame({"coords": [Point(x, y) for x, y in zip(range(5),
+                                                                   range(5))],
                             "nums": range(5)}, geometry="coords")
         assert type(df2.geometry) is GeoSeries
         assert type(df2['coords']) is GeoSeries
diff --git a/geopandas/tests/util.py b/geopandas/tests/util.py
index 51f2f94..311659a 100644
--- a/geopandas/tests/util.py
+++ b/geopandas/tests/util.py
@@ -1,24 +1,11 @@
-import io
 import os.path
-import sys
-import zipfile
-
-from six.moves.urllib.request import urlopen
-from pandas.util.testing import assert_isinstance
 
 from geopandas import GeoDataFrame, GeoSeries
 
+
 HERE = os.path.abspath(os.path.dirname(__file__))
 PACKAGE_DIR = os.path.dirname(os.path.dirname(HERE))
 
-# Compatibility layer for Python 2.6: try loading unittest2
-if sys.version_info[:2] == (2, 6):
-    try:
-        import unittest2 as unittest
-    except ImportError:
-        import unittest
-else:
-    import unittest
 
 try:
     import psycopg2
@@ -32,47 +19,17 @@ try:
 except ImportError:
     import mock
 
-try:
-    from pandas import read_sql_table
-except ImportError:
-    PANDAS_NEW_SQL_API = False
-else:
-    PANDAS_NEW_SQL_API = True
-
-
-def download_nybb():
-    """ Returns the path to the NYC boroughs file. Downloads if necessary.
 
-    returns tuple (zip file name, shapefile's name and path within zip file)"""
-    # Data from http://www.nyc.gov/html/dcp/download/bytes/nybb_14aav.zip
-    # saved as geopandas/examples/nybb_14aav.zip.
-    filename = 'nybb_16a.zip'
-    full_path_name = os.path.join(PACKAGE_DIR, 'examples', filename)
-    if not os.path.exists(full_path_name):
-        with io.open(full_path_name, 'wb') as f:
-            response = urlopen('http://www1.nyc.gov/assets/planning/download/zip/data-maps/open-data/{0}'.format(filename))
-            f.write(response.read())
-
-    shp_zip_path = None
-    zf = zipfile.ZipFile(full_path_name, 'r')
-    # finds path name in zip file
-    for zip_filename_path in zf.namelist():
-        if zip_filename_path.endswith('nybb.shp'):
-            break
-
-    return full_path_name, ('/' + zip_filename_path)
-
-
-def validate_boro_df(test, df):
+def validate_boro_df(df):
     """ Tests a GeoDataFrame that has been read in from the nybb dataset."""
-    test.assertTrue(isinstance(df, GeoDataFrame))
+    assert isinstance(df, GeoDataFrame)
     # Make sure all the columns are there and the geometries
     # were properly loaded as MultiPolygons
-    test.assertEqual(len(df), 5)
+    assert len(df) == 5
     columns = ('borocode', 'boroname', 'shape_leng', 'shape_area')
     for col in columns:
-        test.assertTrue(col in df.columns, 'Column {0} missing'.format(col))
-    test.assertTrue(all(df.geometry.type == 'MultiPolygon'))
+        assert col in df.columns
+    assert all(df.geometry.type == 'MultiPolygon')
 
 
 def connect(dbname):
@@ -132,9 +89,12 @@ def create_db(df):
 
 
 def assert_seq_equal(left, right):
-    """Poor man's version of assert_almost_equal which isn't working with Shapely
-    objects right now"""
-    assert len(left) == len(right), "Mismatched lengths: %d != %d" % (len(left), len(right))
+    """
+    Poor man's version of assert_almost_equal which isn't working with Shapely
+    objects right now
+    """
+    assert (len(left) == len(right),
+            "Mismatched lengths: %d != %d" % (len(left), len(right)))
 
     for elem_left, elem_right in zip(left, right):
         assert elem_left == elem_right, "%r != %r" % (left, right)
@@ -195,7 +155,7 @@ def assert_geoseries_equal(left, right, check_dtype=False,
     assert len(left) == len(right), "%d != %d" % (len(left), len(right))
 
     if check_index_type:
-        assert_isinstance(left.index, type(right.index))
+        assert isinstance(left.index, type(right.index))
 
     if check_dtype:
         assert left.dtype == right.dtype, "dtype: %s != %s" % (left.dtype,
@@ -203,7 +163,7 @@ def assert_geoseries_equal(left, right, check_dtype=False,
 
     if check_series_type:
         assert isinstance(left, GeoSeries)
-        assert_isinstance(left, type(right))
+        assert isinstance(left, type(right))
 
         if check_crs:
             assert(left.crs == right.crs)
diff --git a/geopandas/tools/overlay.py b/geopandas/tools/overlay.py
index 8abbca7..1695cca 100644
--- a/geopandas/tools/overlay.py
+++ b/geopandas/tools/overlay.py
@@ -1,6 +1,7 @@
+import pandas as pd
 from shapely.ops import unary_union, polygonize
 from shapely.geometry import MultiLineString
-import pandas as pd
+
 from geopandas import GeoDataFrame, GeoSeries
 
 
diff --git a/geopandas/tools/sjoin.py b/geopandas/tools/sjoin.py
index 206f4cb..c11ac45 100644
--- a/geopandas/tools/sjoin.py
+++ b/geopandas/tools/sjoin.py
@@ -38,88 +38,119 @@ def sjoin(left_df, right_df, how='inner', op='intersects',
         raise ValueError("`op` was \"%s\" but is expected to be in %s" % \
             (op, allowed_ops))
 
-    if op == "within":
-        # within implemented as the inverse of contains; swap names
-        left_df, right_df = right_df, left_df
-
     if left_df.crs != right_df.crs:
         print('Warning: CRS does not match!')
 
-    tree_idx = rtree.index.Index()
-    right_df_bounds = right_df['geometry'].apply(lambda x: x.bounds)
-    for i in right_df_bounds.index:
-        tree_idx.insert(i, right_df_bounds[i])
-
-    idxmatch = (left_df['geometry'].apply(lambda x: x.bounds)
-                .apply(lambda x: list(tree_idx.intersection(x))))
-    idxmatch = idxmatch[idxmatch.apply(len) > 0]
-
-    r_idx = np.concatenate(idxmatch.values)
-    l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
+    index_left = 'index_%s' % lsuffix
+    index_right = 'index_%s' % rsuffix
 
-    # Vectorize predicate operations
-    def find_intersects(a1, a2):
-        return a1.intersects(a2)
+    # due to GH 352
+    if (any(left_df.columns.isin([index_left, index_right]))
+            or any(right_df.columns.isin([index_left, index_right]))):
+        raise ValueError("'{0}' and '{1}' cannot be names in the frames being"
+                         " joined".format(index_left, index_right))
 
-    def find_contains(a1, a2):
-        return a1.contains(a2)
+    # the rtree spatial index only allows limited (numeric) index types, but an
+    # index in geopandas may be any arbitrary dtype. so reset both indices now
+    # and store references to the original indices, to be reaffixed later.
+    # GH 352
+    left_df.index.name = index_left
+    right_df.index.name = index_right
+    left_df = left_df.reset_index()
+    right_df = right_df.reset_index()
 
-    predicate_d = {'intersects': find_intersects,
-                   'contains': find_contains,
-                   'within': find_contains}
+    if op == "within":
+        # within implemented as the inverse of contains; swap names
+        left_df, right_df = right_df, left_df
 
-    check_predicates = np.vectorize(predicate_d[op])
+    # insert the bounds in the rtree spatial index
+    right_df_bounds = right_df.geometry.apply(lambda x: x.bounds)
+    stream = ((i, b, None) for i, b in enumerate(right_df_bounds))
+    tree_idx = rtree.index.Index(stream)
 
-    result = (
-              pd.DataFrame(
-                  np.column_stack(
-                      [l_idx,
-                       r_idx,
-                       check_predicates(
-                           left_df['geometry']
-                           .apply(lambda x: prepared.prep(x))[l_idx],
-                           right_df['geometry'][r_idx])
-                       ]))
-               )
+    idxmatch = (left_df.geometry.apply(lambda x: x.bounds)
+                .apply(lambda x: list(tree_idx.intersection(x))))
+    idxmatch = idxmatch[idxmatch.apply(len) > 0]
 
-    result.columns = ['index_%s' % lsuffix, 'index_%s' % rsuffix, 'match_bool']
-    result = (
-              pd.DataFrame(result[result['match_bool']==1])
-              .drop('match_bool', axis=1)
-              )
+    if idxmatch.shape[0] > 0:
+        # if output from join has overlapping geometries
+        r_idx = np.concatenate(idxmatch.values)
+        l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
+
+        # Vectorize predicate operations
+        def find_intersects(a1, a2):
+            return a1.intersects(a2)
+
+        def find_contains(a1, a2):
+            return a1.contains(a2)
+
+        predicate_d = {'intersects': find_intersects,
+                       'contains': find_contains,
+                       'within': find_contains}
+
+        check_predicates = np.vectorize(predicate_d[op])
+
+        result = (
+                  pd.DataFrame(
+                      np.column_stack(
+                          [l_idx,
+                           r_idx,
+                           check_predicates(
+                               left_df.geometry
+                               .apply(lambda x: prepared.prep(x))[l_idx],
+                               right_df[right_df.geometry.name][r_idx])
+                           ]))
+                   )
+
+        result.columns = ['_key_left', '_key_right', 'match_bool']
+        result = (
+                  pd.DataFrame(result[result['match_bool']==1])
+                  .drop('match_bool', axis=1)
+                  )
+
+    else:
+        # when output from the join has no overlapping geometries
+        result = pd.DataFrame(columns=['_key_left', '_key_right'])
 
     if op == "within":
         # within implemented as the inverse of contains; swap names
         left_df, right_df = right_df, left_df
-        result = result.rename(columns={
-                    'index_%s' % (lsuffix): 'index_%s' % (rsuffix),
-                    'index_%s' % (rsuffix): 'index_%s' % (lsuffix)})
+        result = result.rename(columns={'_key_left': '_key_right',
+                                        '_key_right': '_key_left'})
+
 
     if how == 'inner':
-        result = result.set_index('index_%s' % lsuffix)
-        return (
-                left_df
-                .merge(result, left_index=True, right_index=True)
-                .merge(right_df.drop('geometry', axis=1),
-                    left_on='index_%s' % rsuffix, right_index=True,
-                    suffixes=('_%s' % lsuffix, '_%s' % rsuffix))
-                )
+        result = result.set_index('_key_left')
+        joined = (
+                  left_df
+                  .merge(result, left_index=True, right_index=True)
+                  .merge(right_df.drop(right_df.geometry.name, axis=1),
+                      left_on='_key_right', right_index=True,
+                      suffixes=('_%s' % lsuffix, '_%s' % rsuffix))
+                 )
+        joined = joined.set_index(index_left).drop(['_key_right'], axis=1)
+        joined.index.name = None
     elif how == 'left':
-        result = result.set_index('index_%s' % lsuffix)
-        return (
-                left_df
-                .merge(result, left_index=True, right_index=True, how='left')
-                .merge(right_df.drop('geometry', axis=1),
-                    how='left', left_on='index_%s' % rsuffix, right_index=True,
-                    suffixes=('_%s' % lsuffix, '_%s' % rsuffix))
-                )
-    elif how == 'right':
-        return (
-                left_df
-                .drop('geometry', axis=1)
-                .merge(result.merge(right_df,
-                    left_on='index_%s' % rsuffix, right_index=True,
-                    how='right'), left_index=True,
-                    right_on='index_%s' % lsuffix, how='right')
-                .set_index('index_%s' % rsuffix)
-                )
+        result = result.set_index('_key_left')
+        joined = (
+                  left_df
+                  .merge(result, left_index=True, right_index=True, how='left')
+                  .merge(right_df.drop(right_df.geometry.name, axis=1),
+                      how='left', left_on='_key_right', right_index=True,
+                      suffixes=('_%s' % lsuffix, '_%s' % rsuffix))
+                 )
+        joined = joined.set_index(index_left).drop(['_key_right'], axis=1)
+        joined.index.name = None
+    else:  # how == 'right':
+        joined = (
+                  left_df
+                  .drop(left_df.geometry.name, axis=1)
+                  .merge(result.merge(right_df,
+                      left_on='_key_right', right_index=True,
+                      how='right'), left_index=True,
+                      right_on='_key_left', how='right')
+                  .set_index(index_right)
+                 )
+        joined = joined.drop(['_key_left', '_key_right'], axis=1)
+
+    return joined
diff --git a/geopandas/tools/tests/test_sjoin.py b/geopandas/tools/tests/test_sjoin.py
index cfaaf37..39cd991 100644
--- a/geopandas/tools/tests/test_sjoin.py
+++ b/geopandas/tools/tests/test_sjoin.py
@@ -1,88 +1,267 @@
 from __future__ import absolute_import
 
-import tempfile
-import shutil
+from distutils.version import LooseVersion
 
 import numpy as np
-from shapely.geometry import Point
+import pandas as pd
+from shapely.geometry import Point, Polygon
 
-from geopandas import GeoDataFrame, read_file, base
-from geopandas.tests.util import unittest, download_nybb
+import geopandas
+from geopandas import GeoDataFrame, GeoSeries, read_file, base
 from geopandas import sjoin
 
+import pytest
+from pandas.util.testing import assert_frame_equal
 
- at unittest.skipIf(not base.HAS_SINDEX, 'Rtree absent, skipping')
-class TestSpatialJoin(unittest.TestCase):
 
-    def setUp(self):
-        nybb_filename, nybb_zip_path = download_nybb()
-        self.polydf = read_file(nybb_zip_path, vfs='zip://' + nybb_filename)
-        self.tempdir = tempfile.mkdtemp()
-        self.crs = {'init': 'epsg:4326'}
+pandas_0_18_problem = 'fails under pandas < 0.19 due to pandas issue 15692,'\
+                        'not problem with sjoin.'
+
+
+ at pytest.fixture()
+def dfs(request):
+    polys1 = GeoSeries(
+        [Polygon([(0, 0), (5, 0), (5, 5), (0, 5)]),
+         Polygon([(5, 5), (6, 5), (6, 6), (5, 6)]),
+         Polygon([(6, 0), (9, 0), (9, 3), (6, 3)])])
+
+    polys2 = GeoSeries(
+        [Polygon([(1, 1), (4, 1), (4, 4), (1, 4)]),
+         Polygon([(4, 4), (7, 4), (7, 7), (4, 7)]),
+         Polygon([(7, 7), (10, 7), (10, 10), (7, 10)])])
+
+    df1 = GeoDataFrame({'geometry': polys1, 'df1': [0, 1, 2]})
+    df2 = GeoDataFrame({'geometry': polys2, 'df2': [3, 4, 5]})
+    if request.param == 'string-index':
+        df1.index = ['a', 'b', 'c']
+        df2.index = ['d', 'e', 'f']
+
+    # construction expected frames
+    expected = {}
+
+    part1 = df1.copy().reset_index().rename(
+        columns={'index': 'index_left'})
+    part2 = df2.copy().iloc[[0, 1, 1, 2]].reset_index().rename(
+        columns={'index': 'index_right'})
+    part1['_merge'] = [0, 1, 2]
+    part2['_merge'] = [0, 0, 1, 3]
+    exp = pd.merge(part1, part2, on='_merge', how='outer')
+    expected['intersects'] = exp.drop('_merge', axis=1).copy()
+
+    part1 = df1.copy().reset_index().rename(
+        columns={'index': 'index_left'})
+    part2 = df2.copy().reset_index().rename(
+        columns={'index': 'index_right'})
+    part1['_merge'] = [0, 1, 2]
+    part2['_merge'] = [0, 3, 3]
+    exp = pd.merge(part1, part2, on='_merge', how='outer')
+    expected['contains'] = exp.drop('_merge', axis=1).copy()
+
+    part1['_merge'] = [0, 1, 2]
+    part2['_merge'] = [3, 1, 3]
+    exp = pd.merge(part1, part2, on='_merge', how='outer')
+    expected['within'] = exp.drop('_merge', axis=1).copy()
+
+    return [request.param, df1, df2, expected]
+
+
+ at pytest.mark.skipif(not base.HAS_SINDEX, reason='Rtree absent, skipping')
+class TestSpatialJoin:
+
+    @pytest.mark.parametrize('dfs', ['default-index', 'string-index'],
+                             indirect=True)
+    @pytest.mark.parametrize('op', ['intersects', 'contains', 'within'])
+    def test_inner(self, op, dfs):
+        index, df1, df2, expected = dfs
+
+        res = sjoin(df1, df2, how='inner', op=op)
+
+        exp = expected[op].dropna().copy()
+        exp = exp.drop('geometry_y', axis=1).rename(
+            columns={'geometry_x': 'geometry'})
+        exp[['df1', 'df2']] = exp[['df1', 'df2']].astype('int64')
+        if index == 'default-index':
+            exp[['index_left', 'index_right']] = \
+                exp[['index_left', 'index_right']].astype('int64')
+        exp = exp.set_index('index_left')
+        exp.index.name = None
+
+        assert_frame_equal(res, exp)
+
+    @pytest.mark.parametrize('dfs', ['default-index', 'string-index'],
+                             indirect=True)
+    @pytest.mark.parametrize('op', ['intersects', 'contains', 'within'])
+    def test_left(self, op, dfs):
+        index, df1, df2, expected = dfs
+
+        res = sjoin(df1, df2, how='left', op=op)
+
+        exp = expected[op].dropna(subset=['index_left']).copy()
+        exp = exp.drop('geometry_y', axis=1).rename(
+            columns={'geometry_x': 'geometry'})
+        exp['df1'] = exp['df1'].astype('int64')
+        if index == 'default-index':
+            exp['index_left'] = exp['index_left'].astype('int64')
+            # TODO: in result the dtype is object
+            res['index_right'] = res['index_right'].astype(float)
+        exp = exp.set_index('index_left')
+        exp.index.name = None
+
+        assert_frame_equal(res, exp)
+
+    @pytest.mark.parametrize('dfs', ['default-index', 'string-index'],
+                             indirect=True)
+    @pytest.mark.parametrize('op', ['intersects', 'contains', 'within'])
+    def test_right(self, op, dfs):
+        index, df1, df2, expected = dfs
+
+        res = sjoin(df1, df2, how='right', op=op)
+
+        exp = expected[op].dropna(subset=['index_right']).copy()
+        exp = exp.drop('geometry_x', axis=1).rename(
+            columns={'geometry_y': 'geometry'})
+        exp['df2'] = exp['df2'].astype('int64')
+        if index == 'default-index':
+            exp['index_right'] = exp['index_right'].astype('int64')
+            res['index_left'] = res['index_left'].astype(float)
+        exp = exp.set_index('index_right')
+        exp = exp.reindex(columns=res.columns)
+
+        assert_frame_equal(res, exp, check_index_type=False)
+
+
+ at pytest.mark.skipif(not base.HAS_SINDEX, reason='Rtree absent, skipping')
+class TestSpatialJoinNYBB:
+
+    def setup_method(self):
+        nybb_filename = geopandas.datasets.get_path('nybb')
+        self.polydf = read_file(nybb_filename)
+        self.crs = self.polydf.crs
         N = 20
         b = [int(x) for x in self.polydf.total_bounds]
-        self.pointdf = GeoDataFrame([
-            {'geometry' : Point(x, y), 'pointattr1': x + y, 'pointattr2': x - y}
-            for x, y in zip(range(b[0], b[2], int((b[2]-b[0])/N)),
-                            range(b[1], b[3], int((b[3]-b[1])/N)))], crs=self.crs)
+        self.pointdf = GeoDataFrame(
+            [{'geometry': Point(x, y),
+              'pointattr1': x + y, 'pointattr2': x - y}
+             for x, y in zip(range(b[0], b[2], int((b[2]-b[0])/N)),
+                             range(b[1], b[3], int((b[3]-b[1])/N)))],
+            crs=self.crs)
 
-    def tearDown(self):
-        shutil.rmtree(self.tempdir)
+    def test_geometry_name(self):
+        # test sjoin is working with other geometry name
+        polydf_original_geom_name = self.polydf.geometry.name
+        self.polydf = (self.polydf.rename(columns={'geometry': 'new_geom'})
+                                  .set_geometry('new_geom'))
+        assert polydf_original_geom_name != self.polydf.geometry.name
+        res = sjoin(self.polydf, self.pointdf, how="left")
+        assert self.polydf.geometry.name == res.geometry.name
 
     def test_sjoin_left(self):
         df = sjoin(self.pointdf, self.polydf, how='left')
-        self.assertEquals(df.shape, (21,8))
+        assert df.shape == (21, 8)
         for i, row in df.iterrows():
-            self.assertEquals(row.geometry.type, 'Point')
-        self.assertTrue('pointattr1' in df.columns)
-        self.assertTrue('BoroCode' in df.columns)
+            assert row.geometry.type == 'Point'
+        assert 'pointattr1' in df.columns
+        assert 'BoroCode' in df.columns
 
     def test_sjoin_right(self):
         # the inverse of left
         df = sjoin(self.pointdf, self.polydf, how="right")
         df2 = sjoin(self.polydf, self.pointdf, how="left")
-        self.assertEquals(df.shape, (12, 8))
-        self.assertEquals(df.shape, df2.shape)
+        assert df.shape == (12, 8)
+        assert df.shape == df2.shape
         for i, row in df.iterrows():
-            self.assertEquals(row.geometry.type, 'MultiPolygon')
+            assert row.geometry.type == 'MultiPolygon'
         for i, row in df2.iterrows():
-            self.assertEquals(row.geometry.type, 'MultiPolygon')
+            assert row.geometry.type == 'MultiPolygon'
 
     def test_sjoin_inner(self):
         df = sjoin(self.pointdf, self.polydf, how="inner")
-        self.assertEquals(df.shape, (11, 8))
+        assert df.shape == (11, 8)
 
     def test_sjoin_op(self):
         # points within polygons
         df = sjoin(self.pointdf, self.polydf, how="left", op="within")
-        self.assertEquals(df.shape, (21,8))
-        self.assertEquals(df.ix[1]['BoroName'], 'Staten Island')
+        assert df.shape == (21, 8)
+        assert df.ix[1]['BoroName'] == 'Staten Island'
 
         # points contain polygons? never happens so we should have nulls
         df = sjoin(self.pointdf, self.polydf, how="left", op="contains")
-        self.assertEquals(df.shape, (21, 8))
-        self.assertTrue(np.isnan(df.ix[1]['Shape_Area']))
+        assert df.shape == (21, 8)
+        assert np.isnan(df.ix[1]['Shape_Area'])
 
     def test_sjoin_bad_op(self):
         # AttributeError: 'Point' object has no attribute 'spandex'
-        self.assertRaises(ValueError, sjoin,
-            self.pointdf, self.polydf, how="left", op="spandex")
+        with pytest.raises(ValueError):
+            sjoin(self.pointdf, self.polydf, how="left", op="spandex")
 
     def test_sjoin_duplicate_column_name(self):
         pointdf2 = self.pointdf.rename(columns={'pointattr1': 'Shape_Area'})
         df = sjoin(pointdf2, self.polydf, how="left")
-        self.assertTrue('Shape_Area_left' in df.columns)
-        self.assertTrue('Shape_Area_right' in df.columns)
+        assert 'Shape_Area_left' in df.columns
+        assert 'Shape_Area_right' in df.columns
 
     def test_sjoin_values(self):
         # GH190
         self.polydf.index = [1, 3, 4, 5, 6]
         df = sjoin(self.pointdf, self.polydf, how='left')
-        self.assertEquals(df.shape, (21,8))
+        assert df.shape == (21, 8)
         df = sjoin(self.polydf, self.pointdf, how='left')
-        self.assertEquals(df.shape, (12,8))
+        assert df.shape == (12, 8)
+
+    @pytest.mark.skipif(str(pd.__version__) < LooseVersion('0.19'),
+                        reason=pandas_0_18_problem)
+    @pytest.mark.xfail
+    def test_no_overlapping_geometry(self):
+        # Note: these tests are for correctly returning GeoDataFrame
+        # when result of the join is empty
+
+        df_inner = sjoin(self.pointdf.iloc[17:], self.polydf, how='inner')
+        df_left = sjoin(self.pointdf.iloc[17:], self.polydf, how='left')
+        df_right = sjoin(self.pointdf.iloc[17:], self.polydf, how='right')
+
+        # Recent Pandas development has introduced a new way of handling merges
+        # this change has altered the output when no overlapping geometries
+        if str(pd.__version__) > LooseVersion('0.18.1'):
+            right_idxs = pd.Series(range(0, 5), name='index_right',
+                                   dtype='int64')
+        else:
+            right_idxs = pd.Series(name='index_right', dtype='int64')
+
+        expected_inner_df = pd.concat(
+            [self.pointdf.iloc[:0],
+             pd.Series(name='index_right', dtype='int64'),
+             self.polydf.drop('geometry', axis=1).iloc[:0]],
+            axis=1)
+
+        expected_inner = GeoDataFrame(
+            expected_inner_df, crs={'init': 'epsg:4326', 'no_defs': True})
+
+        expected_right_df = pd.concat(
+            [self.pointdf.drop('geometry', axis=1).iloc[:0],
+             pd.concat([pd.Series(name='index_left', dtype='int64'),
+                        right_idxs],
+                       axis=1),
+             self.polydf],
+            axis=1)
+
+        expected_right = GeoDataFrame(
+            expected_right_df, crs={'init': 'epsg:4326', 'no_defs': True})\
+            .set_index('index_right')
+
+        expected_left_df = pd.concat(
+            [self.pointdf.iloc[17:],
+             pd.Series(name='index_right', dtype='int64'),
+             self.polydf.iloc[:0].drop('geometry', axis=1)],
+            axis=1)
+
+        expected_left = GeoDataFrame(
+            expected_left_df, crs={'init': 'epsg:4326', 'no_defs': True})
+
+        assert expected_inner.equals(df_inner)
+        assert expected_right.equals(df_right)
+        assert expected_left.equals(df_left)
 
-    @unittest.skip("Not implemented")
+    @pytest.mark.skip("Not implemented")
     def test_sjoin_outer(self):
         df = sjoin(self.pointdf, self.polydf, how="outer")
-        self.assertEquals(df.shape, (21,8))
+        assert df.shape == (21, 8)
diff --git a/geopandas/tools/tests/test_tools.py b/geopandas/tools/tests/test_tools.py
index 6c73bf9..87b7c43 100644
--- a/geopandas/tools/tests/test_tools.py
+++ b/geopandas/tools/tests/test_tools.py
@@ -1,13 +1,15 @@
 from __future__ import absolute_import
 
 from shapely.geometry import Point, MultiPoint, LineString
+
 from geopandas import GeoSeries
 from geopandas.tools import collect
-from geopandas.tests.util import unittest
+
+import pytest
 
 
-class TestTools(unittest.TestCase):
-    def setUp(self):
+class TestTools:
+    def setup_method(self):
         self.p1 = Point(0, 0)
         self.p2 = Point(1, 1)
         self.p3 = Point(2, 2)
@@ -18,34 +20,34 @@ class TestTools(unittest.TestCase):
 
     def test_collect_single(self):
         result = collect(self.p1)
-        self.assert_(self.p1.equals(result))
+        assert self.p1.equals(result)
 
     def test_collect_single_force_multi(self):
         result = collect(self.p1, multi=True)
         expected = MultiPoint([self.p1])
-        self.assert_(expected.equals(result))
+        assert expected.equals(result)
 
     def test_collect_multi(self):
         result = collect(self.mp1)
-        self.assert_(self.mp1.equals(result))
+        assert self.mp1.equals(result)
 
     def test_collect_multi_force_multi(self):
         result = collect(self.mp1)
-        self.assert_(self.mp1.equals(result))
+        assert self.mp1.equals(result)
 
     def test_collect_list(self):
         result = collect([self.p1, self.p2, self.p3])
-        self.assert_(self.mpc.equals(result))
+        assert self.mpc.equals(result)
 
     def test_collect_GeoSeries(self):
         s = GeoSeries([self.p1, self.p2, self.p3])
         result = collect(s)
-        self.assert_(self.mpc.equals(result))
+        assert self.mpc.equals(result)
 
     def test_collect_mixed_types(self):
-        with self.assertRaises(ValueError):
+        with pytest.raises(ValueError):
             collect([self.p1, self.line1])
 
     def test_collect_mixed_multi(self):
-        with self.assertRaises(ValueError):
+        with pytest.raises(ValueError):
             collect([self.mpc, self.mp1])
diff --git a/geopandas/tools/util.py b/geopandas/tools/util.py
index c057eba..7d528e4 100644
--- a/geopandas/tools/util.py
+++ b/geopandas/tools/util.py
@@ -1,13 +1,5 @@
 import pandas as pd
-import geopandas as gpd
-from shapely.geometry import (
-    Point,
-    LineString,
-    Polygon,
-    MultiPoint,
-    MultiLineString,
-    MultiPolygon
-)
+from shapely.geometry import MultiPoint, MultiLineString, MultiPolygon
 from shapely.geometry.base import BaseGeometry
 
 _multi_type_map = {
@@ -23,7 +15,7 @@ def collect(x, multi=False):
     Parameters
     ----------
     x : an iterable or Series of Shapely geometries, a GeoSeries, or
-        a single Shapely geometry        
+        a single Shapely geometry
     multi : boolean, default False
         if True, force returned geometries to be Multi* even if they
         only have one component.
diff --git a/requirements.test.txt b/requirements.test.txt
index 02991bd..bf7a8c0 100644
--- a/requirements.test.txt
+++ b/requirements.test.txt
@@ -4,6 +4,8 @@ geopy==1.10.0
 matplotlib>=1.2.1
 descartes>=1.0
 mock>=1.0.1  # technically not need for python >= 3.3
+pytest>=3.1.0
 pytest-cov
-coveralls
+codecov
 rtree>=0.8
+pysal
diff --git a/requirements.txt b/requirements.txt
index 7c19f99..3f2efce 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,6 +3,3 @@ shapely>=1.2.18
 fiona>=1.0.1
 pyproj>=1.9.3
 six>=1.3.0
-# for Python 2.6 (environment marker support as of pip 6.0)
-unittest2 ; python_version < '2.7'
-ordereddict ; python_version < '2.7'
diff --git a/setup.py b/setup.py
index 20b4b83..6149dd5 100644
--- a/setup.py
+++ b/setup.py
@@ -35,9 +35,12 @@ else:
 data_files = []
 
 for item in os.listdir("geopandas/datasets"):
-    if os.path.isdir(os.path.join("geopandas/datasets/", item)) \
-            and not item.startswith('__'):
-        data_files.append(os.path.join("datasets", item, '*'))
+    if not item.startswith('__'):
+        if os.path.isdir(os.path.join("geopandas/datasets/", item)):
+            data_files.append(os.path.join("datasets", item, '*'))
+        elif item.endswith('.zip'):
+            data_files.append(os.path.join("datasets", item))
+
 
 setup(name='geopandas',
       version=versioneer.get_version(),
@@ -48,7 +51,8 @@ setup(name='geopandas',
       url='http://geopandas.org',
       long_description=LONG_DESCRIPTION,
       packages=['geopandas', 'geopandas.io', 'geopandas.tools',
-                'geopandas.datasets'],
+                'geopandas.datasets',
+                'geopandas.tests', 'geopandas.tools.tests'],
       package_data={'geopandas': data_files},
       install_requires=INSTALL_REQUIRES,
       cmdclass=versioneer.get_cmdclass())

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-grass/python-geopandas.git



More information about the Pkg-grass-devel mailing list