[pdal] 01/09: Imported Upstream version 1.5.0~rc1

Bas Couwenberg sebastic at debian.org
Mon Apr 3 19:25:26 UTC 2017


This is an automated email from the git hooks/post-receive script.

sebastic pushed a commit to branch master
in repository pdal.

commit 9ae234c83c66fb2e7a3733a4c5db59544745e0fc
Author: Bas Couwenberg <sebastic at xs4all.nl>
Date:   Mon Apr 3 20:29:42 2017 +0200

    Imported Upstream version 1.5.0~rc1
---
 .travis.yml                                        |    2 +-
 CMakeLists.txt                                     |    5 +-
 HOWTORELEASE.txt                                   |   43 +-
 LICENSE.txt                                        |    4 +-
 Vagrantfile                                        |  140 ---
 apps/pdal-config                                   |    4 +-
 apps/pdal.cpp                                      |    7 +-
 cmake/curl.cmake                                   |    2 +
 cmake/laszip.cmake                                 |    3 +
 cmake/modules/FindLASzip.cmake                     |   10 +-
 cmake/modules/FindMBSystem.cmake                   |   57 +
 cmake/options.cmake                                |    9 +-
 dimbuilder/DimBuilder.cpp                          |    4 +-
 doc/{workshop/pdal-introduction.rst => about.rst}  |  207 ++--
 doc/api/index.rst                                  |    3 +-
 doc/apps/delta.rst                                 |   10 +-
 doc/apps/density.rst                               |   16 +-
 doc/apps/diff.rst                                  |    8 +-
 doc/apps/ground.rst                                |    4 -
 doc/apps/index.rst                                 |   16 +-
 doc/apps/info.rst                                  |   35 +-
 doc/apps/merge.rst                                 |    5 +-
 doc/apps/pcl.rst                                   |   10 +-
 doc/apps/pipeline.rst                              |   69 +-
 doc/apps/random.rst                                |   22 +-
 doc/apps/sort.rst                                  |    8 +-
 doc/apps/split.rst                                 |   11 +-
 doc/apps/tindex.rst                                |   48 +-
 doc/apps/translate.rst                             |   42 +-
 doc/community.rst                                  |   14 +-
 doc/development/compilation/dependencies.rst       |   14 +-
 doc/development/compilation/index.rst              |    1 -
 doc/development/compilation/python.rst             |   35 -
 doc/development/docker.rst                         |   85 ++
 doc/development/index.rst                          |    3 +-
 doc/development/integration.rst                    |    3 +-
 doc/development/metadata.rst                       |  281 ++---
 doc/{tutorial => development}/overview.rst         |   57 +-
 doc/{tutorial => development}/pipeline.png         |  Bin
 doc/download.rst                                   |   12 +-
 doc/faq.rst                                        |    4 +
 doc/images/docker-maintenance-branch.png           |  Bin 0 -> 21212 bytes
 doc/images/docker-master-branch.png                |  Bin 0 -> 21749 bytes
 doc/images/foss4g-2017.png                         |  Bin 0 -> 21420 bytes
 doc/images/las-reproject-pgpointcloud.png          |  Bin 0 -> 71518 bytes
 doc/images/python-pdal-pipeline.png                |  Bin 0 -> 97244 bytes
 doc/images/reproject-merge-pipeline.png            |  Bin 72049 -> 165082 bytes
 doc/index.rst                                      |   43 +-
 doc/pipeline.rst                                   |   85 +-
 doc/python.rst                                     |  128 ++
 doc/quickstart.rst                                 |   31 +-
 doc/stages/filters.assign.rst                      |   41 +
 doc/stages/filters.cluster.rst                     |   41 +
 doc/stages/filters.crop.rst                        |    6 +-
 doc/stages/filters.groupby.rst                     |   29 +
 doc/stages/filters.locate.rst                      |   38 +
 .../{filters.attribute.rst => filters.overlay.rst} |   53 +-
 doc/stages/filters.pmf.rst                         |   31 +
 doc/stages/filters.predicate.rst                   |    8 +
 doc/stages/filters.programmable.rst                |  129 +-
 doc/stages/filters.range.rst                       |   60 +
 doc/stages/filters.reprojection.rst                |   46 +-
 doc/stages/filters.smrf.rst                        |   35 +-
 doc/stages/filters.sort.rst                        |   16 +-
 doc/stages/ranges.rst                              |   57 -
 doc/stages/readers.faux.rst                        |   28 +-
 doc/stages/readers.gdal.rst                        |   19 +-
 doc/stages/readers.las.rst                         |   17 +-
 doc/stages/readers.mbio.rst                        |   53 +
 doc/stages/readers.nitf.rst                        |    8 +
 doc/stages/readers.ply.rst                         |   10 +-
 doc/stages/readers.pts.rst                         |    3 +-
 doc/stages/readers.tindex.rst                      |   11 +-
 doc/stages/writers.derivative.rst                  |   81 --
 doc/stages/writers.gdal.rst                        |   13 +-
 doc/stages/writers.las.rst                         |   88 +-
 doc/stages/writers.oci.rst                         |    4 +
 doc/stages/writers.p2g.rst                         |   84 --
 doc/stages/writers.pgpointcloud.rst                |    2 +-
 doc/stages/writers.rialto.rst                      |   48 -
 doc/stages/writers.rst                             |    2 +-
 doc/tutorial/clipping-with-shapefile.rst           |   16 +-
 doc/tutorial/index.rst                             |    2 +-
 doc/tutorial/las.rst                               |  608 ++++++++++
 doc/tutorial/pcl_ground.rst                        |   17 +-
 doc/tutorial/pcl_spec.rst                          |   16 +-
 doc/workshop/agenda.rst                            |    2 +-
 .../exercises/analysis/boundary/boundary.rst       |    2 +-
 .../exercises/analysis/clipping/clipping.json      |    2 +-
 .../exercises/analysis/clipping/clipping.rst       |    9 +-
 .../exercises/analysis/dtm/dtm-run-command.txt     |    2 +-
 doc/workshop/exercises/analysis/dtm/dtm.rst        |    8 +-
 .../exercises/analysis/dtm/{p2g.json => gdal.json} |    2 +-
 doc/workshop/exercises/info/metadata.rst           |    4 +-
 doc/workshop/includes/substitutions.rst            |    7 +-
 doc/workshop/index.rst                             |    1 -
 doc/workshop/slides/source/clipping.rst            |    4 +-
 doc/workshop/slides/source/dtm.rst                 |    6 +-
 doc/workshop/slides/source/pdal_intro.rst          |    2 +-
 filters/ApproximateCoplanarFilter.cpp              |    9 +-
 filters/AssignFilter.cpp                           |  155 +++
 io/PlyWriter.hpp => filters/AssignFilter.hpp       |   39 +-
 filters/{OutlierFilter.hpp => ClusterFilter.cpp}   |   72 +-
 filters/{OutlierFilter.hpp => ClusterFilter.hpp}   |   39 +-
 filters/ColorinterpFilter.cpp                      |   70 +-
 filters/ColorizationFilter.cpp                     |   87 +-
 filters/ComputeRangeFilter.cpp                     |    9 +-
 filters/CropFilter.cpp                             |  168 +--
 filters/CropFilter.hpp                             |   36 +-
 filters/DividerFilter.cpp                          |   33 +-
 filters/EigenvaluesFilter.cpp                      |    2 +-
 filters/FerryFilter.cpp                            |   24 +-
 filters/{OutlierFilter.hpp => GroupByFilter.cpp}   |   84 +-
 filters/{OutlierFilter.hpp => GroupByFilter.hpp}   |   37 +-
 filters/HAGFilter.cpp                              |    7 +-
 filters/IQRFilter.cpp                              |   26 +-
 .../{MortonOrderFilter.cpp => LocateFilter.cpp}    |  106 +-
 filters/{OutlierFilter.hpp => LocateFilter.hpp}    |   36 +-
 filters/MADFilter.cpp                              |   14 +-
 filters/MongusFilter.cpp                           |   24 +-
 filters/MortonOrderFilter.cpp                      |    3 +-
 filters/NormalFilter.cpp                           |   12 +-
 filters/OutlierFilter.cpp                          |   92 +-
 filters/OutlierFilter.hpp                          |   16 +-
 filters/{AttributeFilter.cpp => OverlayFilter.cpp} |  171 ++-
 filters/{AttributeFilter.hpp => OverlayFilter.hpp} |   36 +-
 filters/PMFFilter.cpp                              |  312 +++--
 filters/PMFFilter.hpp                              |   82 +-
 filters/RangeFilter.cpp                            |  131 +-
 filters/RangeFilter.hpp                            |   38 +-
 filters/ReprojectionFilter.cpp                     |   52 +-
 filters/SMRFilter.cpp                              | 1264 +++++++-------------
 filters/SMRFilter.hpp                              |  132 +-
 filters/SampleFilter.cpp                           |    2 +-
 filters/SortFilter.cpp                             |   52 +
 filters/SortFilter.hpp                             |   39 +-
 filters/StatsFilter.cpp                            |   15 +-
 filters/TransformationFilter.cpp                   |    4 +-
 filters/private/DimRange.cpp                       |  168 +++
 .../{OutlierFilter.hpp => private/DimRange.hpp}    |   75 +-
 filters/private/crop/Point.cpp                     |   16 +-
 filters/private/crop/Point.hpp                     |    7 +-
 io/BpfCompressor.cpp                               |    6 +-
 io/BpfCompressor.hpp                               |    9 +-
 io/BpfHeader.cpp                                   |    7 +-
 io/BpfHeader.hpp                                   |    6 +
 io/BpfReader.cpp                                   |   43 +-
 io/BpfWriter.cpp                                   |   79 +-
 io/DerivativeWriter.cpp                            |  191 ---
 io/DerivativeWriter.hpp                            |  103 --
 io/FauxReader.cpp                                  |   84 +-
 io/FauxReader.hpp                                  |    9 +-
 io/GDALGrid.cpp                                    |   67 +-
 io/GDALGrid.hpp                                    |    9 +
 io/GDALReader.cpp                                  |    4 +-
 io/GDALWriter.cpp                                  |  131 +-
 io/GDALWriter.hpp                                  |   24 +-
 io/GeotiffSupport.cpp                              |  250 ++--
 io/GeotiffSupport.hpp                              |   56 +-
 io/Ilvis2MetadataReader.cpp                        |   71 +-
 io/Ilvis2MetadataReader.hpp                        |    8 +-
 io/Ilvis2Reader.cpp                                |  104 +-
 io/Ilvis2Reader.hpp                                |    7 +-
 io/LasHeader.cpp                                   |   74 +-
 io/LasHeader.hpp                                   |    6 +
 io/LasReader.cpp                                   |   97 +-
 io/LasSummaryData.cpp                              |    2 +-
 io/LasSummaryData.hpp                              |    6 +
 io/LasUtils.cpp                                    |   22 +-
 io/LasUtils.hpp                                    |    6 +
 io/LasWriter.cpp                                   |  241 ++--
 io/LasWriter.hpp                                   |   10 +-
 io/LasZipPoint.cpp                                 |   10 +-
 io/LasZipPoint.hpp                                 |   14 +-
 io/OptechCommon.hpp                                |   11 -
 io/OptechReader.cpp                                |   21 +-
 io/PlyReader.cpp                                   |  205 ++--
 io/PlyReader.hpp                                   |    6 +
 io/PlyWriter.cpp                                   |   71 +-
 io/PlyWriter.hpp                                   |    8 +-
 io/PtsReader.cpp                                   |   14 +-
 io/QfitReader.cpp                                  |   26 +-
 io/QfitReader.hpp                                  |    9 -
 io/SbetReader.cpp                                  |    2 +-
 io/TIndexReader.cpp                                |   54 +-
 io/TerrasolidReader.cpp                            |   11 +-
 io/TerrasolidReader.hpp                            |    9 -
 io/TextReader.cpp                                  |  133 +-
 io/TextReader.hpp                                  |   20 +-
 io/TextWriter.cpp                                  |   14 +-
 java/README.md                                     |   63 +-
 java/build.sbt                                     |   10 +-
 java/project/{Environment.scala => Commands.scala} |   21 +-
 java/project/Environment.scala                     |    4 +-
 java/scripts/publish-212.sh                        |   30 +-
 java/scripts/publish-all.sh                        |    6 +-
 java/scripts/publish-javastyle.sh                  |   30 +-
 java/scripts/publish-local-212.sh                  |   19 +
 java/scripts/publish-local.sh                      |   16 +
 java/scripts/publish.sh                            |   30 +-
 kernels/GroundKernel.cpp                           |    8 +-
 kernels/InfoKernel.cpp                             |  112 +-
 kernels/PipelineKernel.cpp                         |   34 +-
 kernels/PipelineKernel.hpp                         |    3 +
 kernels/RandomKernel.cpp                           |    5 +-
 kernels/SortKernel.cpp                             |   10 +-
 kernels/TIndexKernel.cpp                           |   16 +-
 kernels/TranslateKernel.cpp                        |   97 +-
 kernels/TranslateKernel.hpp                        |    2 +-
 pdal/EigenUtils.cpp                                |  118 +-
 pdal/EigenUtils.hpp                                |  188 ++-
 pdal/FlexWriter.hpp                                |   11 +-
 pdal/GDALUtils.cpp                                 |   32 +-
 pdal/GDALUtils.hpp                                 |    2 +
 pdal/Geometry.cpp                                  |   53 +-
 pdal/Geometry.hpp                                  |    4 +-
 pdal/KDIndex.hpp                                   |   40 +-
 pdal/Kernel.cpp                                    |  132 +-
 pdal/Kernel.hpp                                    |    9 +-
 pdal/Log.cpp                                       |   34 +-
 pdal/Log.hpp                                       |    5 +-
 pdal/Options.cpp                                   |   24 +-
 pdal/Options.hpp                                   |    9 +-
 pdal/PDALUtils.hpp                                 |    1 -
 pdal/PipelineExecutor.cpp                          |    5 +-
 pdal/PipelineManager.cpp                           |  207 +++-
 pdal/PipelineManager.hpp                           |   30 +-
 pdal/PipelineReaderJSON.cpp                        |   15 +-
 pdal/PipelineReaderXML.cpp                         |   53 +-
 pdal/PipelineWriter.cpp                            |   27 +-
 pdal/PluginManager.cpp                             |    2 +-
 pdal/PointLayout.cpp                               |   19 +
 pdal/PointLayout.hpp                               |    4 +
 pdal/PointTable.cpp                                |   28 +-
 pdal/PointTable.hpp                                |   10 +-
 pdal/PointView.hpp                                 |    4 +-
 pdal/PointViewIter.hpp                             |    4 +-
 pdal/Polygon.cpp                                   |   14 +-
 pdal/Polygon.hpp                                   |    5 +-
 pdal/Segmentation.cpp                              |  146 +++
 .../cpd/kernel/Cpd.hpp => pdal/Segmentation.hpp    |   61 +-
 pdal/SpatialReference.cpp                          |   27 +-
 pdal/Stage.cpp                                     |   97 +-
 pdal/Stage.hpp                                     |   35 +-
 pdal/StageFactory.cpp                              |   21 +-
 pdal/gitsha.cpp                                    |    2 +-
 pdal/pdal_config.cpp                               |    2 +-
 pdal/pdal_types.hpp                                |    5 +-
 pdal/plang/BufferedInvocation.cpp                  |  121 --
 pdal/plang/CMakeLists.txt                          |    1 -
 pdal/plang/Environment.cpp                         |   63 +-
 pdal/plang/Invocation.cpp                          |  185 ++-
 pdal/plang/Invocation.hpp                          |   14 +-
 pdal/private/PipelineReaderXML.hpp                 |    4 +-
 pdal/util/Bounds.cpp                               |   16 +-
 pdal/util/Bounds.hpp                               |   23 +-
 .../crop/Point.hpp => pdal/util/NullOStream.hpp    |   46 +-
 pdal/util/ProgramArgs.hpp                          |  128 +-
 pdal/util/Utils.hpp                                |   66 +-
 plugins/CMakeLists.txt                             |    8 +-
 plugins/cpd/CMakeLists.txt                         |   22 +-
 plugins/cpd/kernel/{Cpd.cpp => CpdKernel.cpp}      |  185 ++-
 plugins/cpd/kernel/{Cpd.hpp => CpdKernel.hpp}      |   24 +-
 plugins/cpd/test/CpdKernelTest.cpp                 |  118 --
 plugins/greyhound/CMakeLists.txt                   |    4 +-
 plugins/greyhound/io/GreyhoundReader.cpp           |   78 +-
 plugins/greyhound/io/GreyhoundReader.hpp           |    2 -
 plugins/greyhound/test/GreyhoundReaderTest.cpp     |   10 +-
 plugins/hexbin/CMakeLists.txt                      |    5 +-
 plugins/hexbin/kernel/DensityKernel.cpp            |   19 +-
 plugins/hexbin/kernel/DensityKernel.hpp            |    5 +
 plugins/icebridge/io/Hdf5Handler.cpp               |   18 +-
 plugins/icebridge/io/Hdf5Handler.hpp               |    6 +
 plugins/icebridge/io/IcebridgeReader.cpp           |   19 +-
 plugins/matlab/io/MatlabWriter.cpp                 |   58 +-
 plugins/mbio/CMakeLists.txt                        |   27 +
 .../mbio/io/MbError.cpp                            |  103 +-
 .../LogTest.cpp => plugins/mbio/io/MbError.hpp     |   24 +-
 plugins/mbio/io/MbFormat.cpp                       |  194 +++
 .../mbio/io/MbFormat.hpp                           |   26 +-
 plugins/mbio/io/MbReader.cpp                       |  238 ++++
 .../mbio/io/MbReader.hpp                           |   89 +-
 .../mbio/test/MBSystemTest.cpp                     |   35 +-
 plugins/mrsid/io/MrsidReader.cpp                   |    2 +-
 plugins/nitf/CMakeLists.txt                        |    6 +-
 plugins/nitf/io/MetadataReader.cpp                 |   74 +-
 plugins/nitf/io/MetadataReader.hpp                 |    6 +
 plugins/nitf/io/NitfFileReader.cpp                 |   38 +-
 plugins/nitf/io/NitfFileReader.hpp                 |    6 +
 plugins/nitf/io/NitfFileWriter.cpp                 |   40 +-
 plugins/nitf/io/NitfFileWriter.hpp                 |   10 +-
 plugins/nitf/io/NitfReader.cpp                     |   15 +-
 plugins/nitf/io/NitfWriter.cpp                     |   24 +-
 plugins/oci/io/OciCommon.cpp                       |    3 +-
 plugins/oci/io/OciCommon.hpp                       |   18 -
 plugins/oci/io/OciReader.cpp                       |   28 +-
 plugins/oci/io/OciWriter.cpp                       |  188 +--
 plugins/oci/io/OciWriter.hpp                       |    1 +
 plugins/p2g/CMakeLists.txt                         |   14 -
 plugins/p2g/io/P2gWriter.cpp                       |  203 ----
 plugins/p2g/io/P2gWriter.hpp                       |   92 --
 plugins/pcl/filters/PCLBlock.cpp                   |    4 +-
 plugins/pcl/io/PcdReader.cpp                       |    2 +-
 plugins/pcl/kernel/PCLKernel.cpp                   |   11 +-
 plugins/pcl/kernel/SmoothKernel.cpp                |   10 +-
 plugins/pgpointcloud/io/PgReader.cpp               |   13 +-
 plugins/pgpointcloud/io/PgWriter.cpp               |   35 +-
 .../pgpointcloud/test/PgpointcloudWriterTest.cpp   |   17 +
 plugins/python/CMakeLists.txt                      |    4 +-
 plugins/python/filters/CMakeLists.txt              |    4 +-
 plugins/python/filters/PredicateFilter.cpp         |   14 +-
 plugins/python/filters/PredicateFilter.hpp         |    7 +-
 plugins/python/filters/ProgrammableFilter.cpp      |   10 +-
 plugins/python/filters/ProgrammableFilter.hpp      |    7 +-
 plugins/python/test/ProgrammableFilterTest.cpp     |   61 +-
 plugins/rxp/CMakeLists.txt                         |    4 +-
 plugins/rxp/test/RxpReaderTest.cpp                 |    1 -
 plugins/sqlite/io/SQLiteReader.cpp                 |   24 +-
 plugins/sqlite/io/SQLiteWriter.cpp                 |   25 +-
 python/README.rst                                  |    4 +-
 python/VERSION.txt                                 |    2 +-
 python/pdal/__init__.py                            |    2 +-
 python/test/test_pipeline.py                       |   14 +-
 scripts/appveyor/config.cmd                        |    1 -
 scripts/ci/script.sh                               |    3 -
 scripts/docker/Dockerfile                          |   25 +-
 scripts/docker/dependencies/Dockerfile             |  207 ++--
 scripts/docker/docbuild/Dockerfile                 |    2 -
 scripts/linux-install-scripts/pdal.sh              |    1 -
 test/data/gdal/grid2.txt                           |    8 +
 test/data/las/spec_3.las                           |  Bin 0 -> 898 bytes
 test/data/logs/logtest_1.txt                       |    1 -
 test/data/logs/logtest_123.txt                     |    5 -
 test/data/logs/logtest_2.txt                       |    2 -
 test/data/logs/logtest_3.txt                       |    2 -
 test/data/logs/t1                                  |    2 +
 test/data/mbio/mbf_em300raw.mb56                   |  Bin 0 -> 127066 bytes
 test/data/pipeline/assign.json.in                  |   14 +
 test/data/pipeline/options.json.in                 |   15 +
 .../{attribute.json.in => overlay.json.in}         |    9 +-
 test/data/pts/autzen.pts                           |   11 +
 test/data/text/crlf_test.txt                       |   11 +
 test/temp/SbetWriterTest.sbet                      |  Bin 272 -> 0 bytes
 test/temp/colorized.las                            |  Bin 36687 -> 0 bytes
 test/temp/crop-wkt-2d-classification.las           |  Bin 1825 -> 0 bytes
 test/temp/foo.las                                  |  Bin 27257 -> 0 bytes
 test/temp/issue895.sqlite                          |  Bin 3072 -> 0 bytes
 test/temp/meta.json                                |   91 --
 test/temp/mylog_three.txt                          |    1 -
 test/temp/out.las                                  |  Bin 3740744 -> 0 bytes
 test/temp/out.ply                                  |  Bin 21176 -> 0 bytes
 test/temp/out2.las                                 |  Bin 27353 -> 0 bytes
 test/temp/outfile.txt                              |    3 -
 test/temp/simple.las                               |  Bin 68425 -> 0 bytes
 test/temp/spat.sqlite                              |  Bin 5808128 -> 0 bytes
 test/temp/spver.sqlite                             |    0
 .../temp-SqliteWriterTest_test_simple_las.sqlite   |  Bin 5824512 -> 0 bytes
 test/temp/temp_nitf.ntf                            |  Bin 37941 -> 0 bytes
 test/temp/test.bpf                                 |  Bin 21756 -> 0 bytes
 test/temp/test_1.bpf                               |  Bin 16412 -> 0 bytes
 test/temp/test_1.las                               |  Bin 12297 -> 0 bytes
 test/temp/test_1.ntf                               |  Bin 2955 -> 0 bytes
 test/temp/test_2.bpf                               |  Bin 16412 -> 0 bytes
 test/temp/test_2.las                               |  Bin 12297 -> 0 bytes
 test/temp/test_2.ntf                               |  Bin 2955 -> 0 bytes
 test/temp/test_3.bpf                               |  Bin 16412 -> 0 bytes
 test/temp/test_3.las                               |  Bin 12297 -> 0 bytes
 test/temp/test_3.ntf                               |  Bin 2955 -> 0 bytes
 test/temp/test_flex.bpf                            |  Bin 47652 -> 0 bytes
 test/temp/test_flex.las                            |  Bin 36437 -> 0 bytes
 test/temp/test_flex.ntf                            |  Bin 5335 -> 0 bytes
 test/temp/tmp.bpf                                  |  Bin 47768 -> 0 bytes
 test/temp/tmp.las                                  |  Bin 82155 -> 0 bytes
 test/temp/tmp.tif                                  |  Bin 808 -> 0 bytes
 test/temp/trimtest.las                             |  Bin 3740744 -> 0 bytes
 test/temp/triple.las                               |  Bin 1497 -> 0 bytes
 test/temp/utm17.txt                                |   11 -
 test/unit/CMakeLists.txt                           |   14 +-
 test/unit/EigenTest.cpp                            |  139 ++-
 test/unit/KernelTest.cpp                           |   22 +-
 test/unit/LogTest.cpp                              |   57 +-
 test/unit/OldPCLBlockTest.cpp                      |  274 +++--
 test/unit/PluginManagerTest.cpp                    |    9 +-
 test/unit/PointTableTest.cpp                       |   28 +-
 test/unit/SegmentationTest.cpp                     |   97 ++
 test/unit/StageFactoryTest.cpp                     |    5 +
 test/unit/apps/AppTest.cpp                         |    6 +-
 test/unit/apps/RandomTest.cpp                      |   12 +-
 test/unit/apps/TranslateTest.cpp                   |  126 +-
 test/unit/apps/pcpipelineTest.cpp                  |  299 -----
 test/unit/apps/pcpipelineTestJSON.cpp              |  102 +-
 test/unit/filters/AssignFilterTest.cpp             |  129 ++
 test/unit/filters/CropFilterTest.cpp               |  110 +-
 .../{LogTest.cpp => filters/GroupByFilterTest.cpp} |   40 +-
 .../unit/filters/LocateFilterTest.cpp              |   87 +-
 ...tributeFilterTest.cpp => OverlayFilterTest.cpp} |   87 +-
 test/unit/filters/SortFilterTest.cpp               |   29 +-
 test/unit/io/FauxReaderTest.cpp                    |   70 ++
 test/unit/io/GDALWriterTest.cpp                    |   94 ++
 test/unit/io/Ilvis2ReaderWithMDReaderTest.cpp      |    3 +-
 test/unit/io/LasWriterTest.cpp                     |  174 ++-
 test/unit/io/TextReaderTest.cpp                    |  113 +-
 tools/lasdump/Dumper.cpp                           |    4 +-
 tools/nitfwrap/NitfWrap.cpp                        |    5 +-
 vendor/arbiter/arbiter.cpp                         | 1251 ++++++++++++-------
 vendor/arbiter/arbiter.hpp                         |  627 +++++++---
 vendor/nanoflann/nanoflann.hpp                     |  353 +++---
 407 files changed, 11232 insertions(+), 8124 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index c62cf12..a2b2451 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -24,7 +24,7 @@ script:
 
 after_success:
   - echo "secure travis:" "$TRAVIS_SECURE_ENV_VARS"
-  - sh -c 'if test "$TRAVIS_SECURE_ENV_VARS" = "true" -a "$TRAVIS_BRANCH" = "1.4-maintenance" -a "$PDAL_OPTIONAL_COMPONENTS" = "all"; then echo "publish website"; ./scripts/ci/build_docs.sh; ./scripts/ci/add_deploy_key.sh; ./scripts/ci/deploy_website.sh $TRAVIS_BUILD_DIR/doc/build /tmp; fi'
+  - sh -c 'if test "$TRAVIS_SECURE_ENV_VARS" = "true" -a "$TRAVIS_BRANCH" = "1.5-maintenance" -a "$PDAL_OPTIONAL_COMPONENTS" = "all"; then echo "publish website"; ./scripts/ci/build_docs.sh; ./scripts/ci/add_deploy_key.sh; ./scripts/ci/deploy_website.sh $TRAVIS_BUILD_DIR/doc/build /tmp; fi'
 
 notifications:
   on_success: always
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 63bc7ef..a91cee1 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -28,14 +28,14 @@ mark_as_advanced(CMAKE_VERBOSE_MAKEFILE)
 
 # the next line is the ONLY place in the entire pdal system where
 # the version info is hard-coded
-set(PDAL_VERSION_STRING "1.4.0" CACHE STRING "PDAL version" FORCE)
+set(PDAL_VERSION_STRING "1.5.0" CACHE STRING "PDAL version" FORCE)
 
 DISSECT_VERSION()
 GET_OS_INFO()
 SET_INSTALL_DIRS()
 
 set(PDAL_API_VERSION "4")
-set(PDAL_BUILD_VERSION "5.0.0")
+set(PDAL_BUILD_VERSION "5.1.0")
 
 # Name of C++ library
 
@@ -231,6 +231,7 @@ target_include_directories(${PDAL_BASE_LIB_NAME}
         ${LIBXML2_INCLUDE_DIR}
     INTERFACE
         ${GDAL_INCLUDE_DIR}
+        ${LASZIP_INCLUDE_DIR}
 )
 target_link_libraries(${PDAL_BASE_LIB_NAME}
     PUBLIC
diff --git a/HOWTORELEASE.txt b/HOWTORELEASE.txt
index e237866..77ae096 100644
--- a/HOWTORELEASE.txt
+++ b/HOWTORELEASE.txt
@@ -4,7 +4,7 @@ Steps for Making a PDAL Release
 
 :Author: Howard Butler
 :Contact: howard at hobu.co
-:Date: 09/11/2015
+:Date: 03/14/2017
 
 This document describes the process for releasing a new version of PDAL.
 
@@ -35,6 +35,8 @@ Release Process
 
     "$TRAVIS_BRANCH" = "1.2-maintenance"
 
+  - Make DockerHub build entry for new release branch.
+
 
 2) Update README to include any relevant info about the release that
    might have changed.
@@ -93,15 +95,42 @@ Release Process
 11) Upload Python extension to PyPI
 
 12) Publish JNI Bindings
-    What you need: 
+    What you need:
         - an account on sonatype (https://issues.sonatype.org/secure/Signup!default.jspa)
-        - ~/.sbt/0.13/sonatype.sbt file with the following content: 
+        - ~/.sbt/0.13/sonatype.sbt file with the following content:
             credentials += Credentials("Sonatype Nexus Repository Manager",
                            "oss.sonatype.org",
                            "<your username>",
                            "<your password>")
 
-    Sonatype publish:
-    ::        
-          export PDAL_VERSION_SUFFIX="" # -SNAPSHOT by default
-          cd ./java; ./scripts/publish-all.sh
+    The description of the Sonatype publishment process (everything described below is in a java dir: cd PDAL/java):
+        - Publishing snaphots:
+            Snapshot can be published without PGP sign, it is published to a snapshot repo and allows immediate snaphot updates.
+            To publish everything in a local repo use command:
+              - ./scripts/publish-local.sh (publishes scala 2.11 version)
+              - ./scripts/publish-local-212.sh (publishes scala 2.12 version)
+            To publish everything into sonatype snapshot repo use:
+              - ./scripts/publish-all.sh
+            Summary:
+              - Run ./scripts/publish-all.sh and everything is available in a snaphost repository
+        - Publishing releases:
+            To publish everything into sonatype snapshot repo (staging repo) use:
+              - ./scripts/publish-all.sh --suffix="" --signed
+               `suffix` defines version suffix (for example `--suffix="-RC1"`)
+               `signed` means that jar would be uploaded into a staging sonatype repo with a PGP sign
+            Staging means a special repository in a pre released condition.
+              - Go into staging repos panel: https://oss.sonatype.org/#stagingRepositories (log in using sonatype user / pwd)
+              - Filter by package name (pdal in our case) and select staging repo
+              - Press Close button on the top of the table with repos. It would run packages
+                validation and will close staging repo in a succesfull case
+              - After succesfull closing press Release button. It would be immediately published into sonatype releases repo,
+                and synced with maven central ~ in 10 minutes and ~ in 2 hours it would be indexed here:
+                http://search.maven.org/#search%7Cga%7C1%7Cio.pdal
+            Full official guide: http://central.sonatype.org/pages/ossrh-guide.html
+            Deploying to sonatype using sbt official doc: http://www.scala-sbt.org/release/docs/Using-Sonatype.html
+            Official sonatype guide with pics of (https://oss.sonatype.org/#stagingRepositories) and answers the question what
+            to do after jars were published into a staging repo (in our case after ./scripts/publish-all.sh --suffix="" --signed step)
+            Summary:
+              - Run ./scripts/publish-all.sh --suffix="" --signed to publish everything into staging repo
+              - Go to sonatype panel https://oss.sonatype.org/#stagingRepositories and release the jar
+              - Await ~10 minutes to have jars published to maven central
diff --git a/LICENSE.txt b/LICENSE.txt
index a72ac4c..78b62f8 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1,6 +1,6 @@
 Unless otherwise indicated, all files in the PDAL distribution are
 
-  Copyright (c) 2015, Hobu, Inc. (howard at hobu.co)
+  Copyright (c) 2017, Hobu, Inc. (howard at hobu.co)
 
 and are released under the terms of the BSD open source license.
 
@@ -10,7 +10,7 @@ This file contains the license terms of all files within PDAL.
 Overall PDAL license (BSD)
 ===========================
 
- Copyright (c) 2015, Hobu, Inc. (howard at hobu.co)
+ Copyright (c) 2017, Hobu, Inc. (howard at hobu.co)
 
  All rights reserved.
 
diff --git a/Vagrantfile b/Vagrantfile
deleted file mode 100644
index 001b915..0000000
--- a/Vagrantfile
+++ /dev/null
@@ -1,140 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-require 'socket'
-require 'ipaddr'
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
-
-# Evaluate a block passing one argument, an integer plucked from an environment
-# variable. If that integer is zero, or the environment variable evaluates to
-# zero with String#to_i, then don't evaluate the block.
-def with_nonzero_integer_envvar(envvar, default = 0)
-  integer = ENV[envvar] ? ENV[envvar].to_i : default
-  if integer == 0
-    # noop
-  else
-    yield integer
-  end
-end
-
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
-  config.vm.box = "trusty64"
-
-  config.vm.hostname = "pdal-vagrant"
-  config.vm.box_url = "https://cloud-images.ubuntu.com/vagrant/trusty/current/trusty-server-cloudimg-amd64-vagrant-disk1.box"
-  config.vm.host_name = "pdal-vagrant"
-
-  # Set the bash environment variable PDAL_VAGRANT_SSH_FORWARD_AGENT to any
-  # value to turn on ssh forwarding. This allows you to use your host machine's
-  # ssh credentials inside your guest box, for example when interacting with
-  # private github repositories.
-  #
-  # To confirm that ssh fowarding is working, run the following from inside the
-  # guest machine:
-  #
-  #   ssh -T git at github.com
-  #
-  # You should see something like "Hi <your name here>! You've successfully
-  # authenticated, but GitHub does not provide shell access."
-  #
-  # You may need to run `ssh-add` on your host machine to add your private key
-  # identities to the authentication agent.
-  if ENV['PDAL_VAGRANT_SSH_FORWARD_AGENT']
-    config.ssh.forward_agent = true
-  end
-
-  # Set PDAL_VAGRANT_PORT_80_FORWARD to customize the target port
-  # for the guest port 80. To disable guest port 80 forwarding, set
-  # PDAL_VAGRANT_PORT_80_FORWARD to any value that cannot be parsed to
-  # an integer with ruby's String#to_i method (e.g. 'false').
-  with_nonzero_integer_envvar('PDAL_VAGRANT_PORT_80_FORWARD', 8080) do |host_port|
-    config.vm.network :forwarded_port, guest: 80, host: host_port
-  end
-
-  config.vm.provider :virtualbox do |vb|
-    # Set PDAL_VAGRANT_VIRTUALBOX_MEMORY to customize the virtualbox vm memory
-    with_nonzero_integer_envvar('PDAL_VAGRANT_VIRTUALBOX_MEMORY', 4096) do |memory|
-      vb.customize ["modifyvm", :id, "--memory", memory]
-    end
-    # Set PDAL_VAGRANT_VIRTUALBOX_CPUS to customize the virtualbox vm cpus
-    with_nonzero_integer_envvar('PDAL_VAGRANT_VIRTUALBOX_CPUS', 2) do |cpus|
-      vb.customize ["modifyvm", :id, "--cpus", cpus]
-    end
-    # Set PDAL_VAGRANT_VIRTUALBOX_IOAPIC to customize the virtualbox vm ioapic
-    vb.customize ["modifyvm", :id, "--ioapic", ENV['PDAL_VAGRANT_VIRTUALBOX_IOAPIC'] || "on"]
-    vb.name = "pdal-vagrant"
-
-    # Set PDAL_VAGRANT_VIRTUALBOX_ENABLE_GUI to turn on the gui
-    if ENV['PDAL_VAGRANT_VIRTUALBOX_ENABLE_GUI']
-      vb.gui = true
-    end
-  end
-
-
-  if RUBY_PLATFORM.include? "darwin"
-    # If on a Mac, set PDAL_VAGRANT_PRIVATE_NETWORK_IP to customize
-    # the private network's IP. Set to a non-IP value to disable private networking.
-    if ENV['PDAL_VAGRANT_PRIVATE_NETWORK_IP']
-      begin
-        ipaddr = IPAddr.new ENV['PDAL_VAGRANT_PRIVATE_NETWORK_IP']
-      rescue ArgumentError
-        # noop
-      else
-        config.vm.network "private_network", ip: ipaddr
-      end
-    else
-      config.vm.network "private_network", ip: "192.168.10.4"
-    end
-
-    # If on a Mac, set PDAL_VAGRANT_DISABLE_NFS to false to disable nfs mounting
-    use_nfs = !ENV['PDAL_VAGRANT_DISABLE_NFS']
-    config.vm.synced_folder ".", "/vagrant", nfs: use_nfs
-
-    if Socket.gethostname.include? "pyro" # Howard's machine
-      config.vm.synced_folder "/Users/hobu/dev/git/pointcloud", "/pointcloud", nfs: use_nfs
-    end
-  end
-
-  if RUBY_PLATFORM.include? "win32"
-    config.vm.synced_folder ".", "/vagrant", type: "smb"
-  end
-
-  ppaRepos = [
-    "ppa:ubuntugis/ubuntugis-unstable",
-  ]
-
-	  pkg_cmd = ""
-
-	  pkg_cmd << "apt-get update -qq; apt-get install -q -y python-software-properties; "
-
-	  if ppaRepos.length > 0
-		  ppaRepos.each { |repo| pkg_cmd << "add-apt-repository -y " << repo << " ; " }
-		  pkg_cmd << "apt-get update -qq; "
-	  end
-
-	  config.vm.provision :shell, :inline => pkg_cmd
-      scripts_path = "scripts/linux-install-scripts/"
-      config.vm.provision :shell, :path => scripts_path << "packages.sh"
-      pkg_cmd = ""
-
-	  # install packages we need we need
-    scripts = [
-      "startup.sh",
-      "libgeotiff.sh",
-      "nitro.sh",
-      "hexer.sh",
-      "lazperf.sh",
-      "p2g.sh",
-      "laszip.sh",
-      "pcl.sh",
-      "websocketpp.sh",
-      "geowave.sh",
-      "pdal.sh",
-      "pgpointcloud.sh"
-    ];
-    scripts.each { |script| config.vm.provision :shell, :path => "scripts/linux-install-scripts/" << script }
-end
diff --git a/apps/pdal-config b/apps/pdal-config
index 39649c1..bb890ca 100644
--- a/apps/pdal-config
+++ b/apps/pdal-config
@@ -46,7 +46,7 @@ case $1 in
     ;;
 
   --includes)
-    echo -I/usr/include -I/usr/include/gdal -I/usr/include/libxml2 -I/usr/include -I/usr/include
+    echo -I/usr/include -I/usr/include/gdal -I/usr/include/libxml2 -I/usr/include -I/usr/include/laszip
     ;;
 
   --cflags)
@@ -58,7 +58,7 @@ case $1 in
     ;;
 
   --version)
-    echo 1.4.0
+    echo 1.5.0
     ;;
 
   --python-version)
diff --git a/apps/pdal.cpp b/apps/pdal.cpp
index ad6e78e..3193196 100644
--- a/apps/pdal.cpp
+++ b/apps/pdal.cpp
@@ -86,6 +86,7 @@ private:
     bool m_showVersion;
     std::string m_showOptions;
     bool m_showJSON;
+    std::string m_log;
 };
 
 
@@ -206,7 +207,8 @@ void App::outputOptions(std::string const& stageName, std::ostream& strm)
 
     if (!m_showJSON)
     {
-        strm  << stageName << " -- " << PluginManager::link(stageName) << std::endl;
+        strm  << stageName << " -- " << PluginManager::link(stageName) <<
+            std::endl;
         strm  << headline << std::endl;
 
         args.dump2(strm , 2, 6, headline.size());
@@ -279,6 +281,8 @@ void App::addArgs(ProgramArgs& args)
     args.add("version", "Show program version", m_showVersion);
     args.add("options", "Show options for specified driver (or 'all')",
         m_showOptions);
+    args.add("log", "Log filename (accepts stderr, stdout, stdlog, devnull"
+        " as special cases)", m_log, "stderr");
     Arg& json = args.add("showjson", "List options or drivers as JSON output",
         m_showJSON);
     json.setHidden();
@@ -344,6 +348,7 @@ int App::execute(StringList& cmdArgs, LogPtr& log)
         return -1;
     }
 
+    log.reset(new Log("PDAL", m_log));
     if (m_logLevel != LogLevel::None)
         log->setLevel(m_logLevel);
     else if (m_debug)
diff --git a/cmake/curl.cmake b/cmake/curl.cmake
index 5d9bfef..d2d8217 100644
--- a/cmake/curl.cmake
+++ b/cmake/curl.cmake
@@ -8,6 +8,7 @@ if (CURL_FOUND)
     find_package(Threads REQUIRED)
     include_directories(${CURL_INCLUDE_DIR})
     set(PDAL_ARBITER_ENABLED 1)
+    add_definitions("-DARBITER_CURL")
 
     if (WIN32)
         add_definitions("-DWINDOWS")
@@ -15,3 +16,4 @@ if (CURL_FOUND)
         add_definitions("-DUNIX")
     endif()
 endif()
+
diff --git a/cmake/laszip.cmake b/cmake/laszip.cmake
index 403aeb5..5418411 100644
--- a/cmake/laszip.cmake
+++ b/cmake/laszip.cmake
@@ -22,4 +22,7 @@ if(WITH_LASZIP)
         set(LASZIP_LIBRARY "")
         set(WITH_LASZIP FALSE)
     endif()
+else()
+        set(LASZIP_LIBRARY "")
+        set(WITH_LASZIP FALSE)
 endif()
diff --git a/cmake/modules/FindLASzip.cmake b/cmake/modules/FindLASzip.cmake
index 38d32c1..dc2129f 100644
--- a/cmake/modules/FindLASzip.cmake
+++ b/cmake/modules/FindLASzip.cmake
@@ -35,13 +35,14 @@ IF(WIN32)
   ENDIF()
 ENDIF()
 
-
 FIND_PATH(LASZIP_INCLUDE_DIR
-  laszip/laszip.hpp
+  laszip.hpp
   PATHS
+  /usr/include/laszip
+  /usr/local/include/laszip
+  ${OSGEO4W_ROOT_DIR}/include/laszip
   /usr/include
   /usr/local/include
-  /tmp/lasjunk/include
   ${OSGEO4W_ROOT_DIR}/include)
 
 SET(LASZIP_NAMES ${OSGEO4W_IMPORT_LIBRARY} laszip)
@@ -51,13 +52,12 @@ FIND_LIBRARY(LASZIP_LIBRARY
   PATHS
   /usr/lib
   /usr/local/lib
-  /tmp/lasjunk/lib
   ${OSGEO4W_ROOT_DIR}/lib)
 
 IF(LASZIP_INCLUDE_DIR)
   SET(LASZIP_VERSION 0)
 
-  SET(LASZIP_VERSION_H "${LASZIP_INCLUDE_DIR}/laszip/laszip.hpp")
+  SET(LASZIP_VERSION_H "${LASZIP_INCLUDE_DIR}/laszip.hpp")
   FILE(READ ${LASZIP_VERSION_H} LASZIP_VERSION_H_CONTENTS)
 
   IF (DEFINED LASZIP_VERSION_H_CONTENTS)
diff --git a/cmake/modules/FindMBSystem.cmake b/cmake/modules/FindMBSystem.cmake
new file mode 100644
index 0000000..bf546f4
--- /dev/null
+++ b/cmake/modules/FindMBSystem.cmake
@@ -0,0 +1,57 @@
+# - try to find MBSYSTEM library
+#
+# Cache Variables: (probably not for direct use in your scripts)
+#  MBSYSTEM_INCLUDE_DIR
+#  MBSYSTEM_LIBRARY
+#
+# Non-cache variables you might use in your CMakeLists.txt:
+#  MBSYSTEM_FOUND
+#  MBSYSTEM_INCLUDE_DIRS
+#  MBSYSTEM_LIBRARIES
+#
+# Requires these CMake modules:
+#  FindPackageHandleStandardArgs (known included with CMake >=2.6.2)
+#
+# Author:
+# 2011 Philippe Crassous (ENSAM ParisTech / Institut Image) p.crassous _at_ free.fr
+#
+# Adapted from the Virtual Reality Peripheral Network library.
+# https://github.com/rpavlik/vrpn/blob/master/README.Legal
+#
+
+set(MBSYSTEM_ROOT_DIR
+	"${MBSYSTEM_ROOT_DIR}"
+	CACHE
+	PATH
+	"Directory to search for MBSYSTEM")
+
+find_library(MBSYSTEM_LIBRARY
+	NAMES
+	mbio
+	PATHS
+	"${MBSYSTEM_ROOT_DIR}/libs"
+	/usr/lib/${CMAKE_LIBRARY_ARCHITECTURE})
+
+find_path(MBSYSTEM_INCLUDE_DIR
+	NAMES
+	mb_io.h
+	PATHS
+	"${MBSYSTEM_ROOT_DIR}"
+	/usr/include/mbsystem
+	PATH_SUFFIXES
+	include)
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(MBSYSTEM
+	DEFAULT_MSG
+	MBSYSTEM_LIBRARY
+	MBSYSTEM_INCLUDE_DIR)
+
+if(MBSYSTEM_FOUND)
+	set(MBSYSTEM_LIBRARIES "${MBSYSTEM_LIBRARY}")
+	set(MBSYSTEM_INCLUDE_DIRS "${MBSYSTEM_INCLUDE_DIR}")
+	mark_as_advanced(MBSYSTEM_ROOT_DIR)
+endif()
+
+mark_as_advanced(MBSYSTEM_INCLUDE_DIR MBSYSTEM_LIBRARY)
+
diff --git a/cmake/options.cmake b/cmake/options.cmake
index e8fa854..c30f4bf 100644
--- a/cmake/options.cmake
+++ b/cmake/options.cmake
@@ -55,8 +55,6 @@ endif(DEFINED ENV{ORACLE_HOME})
 option(BUILD_PLUGIN_OCI
     "Choose if OCI support should be built" ${DEFINED_ORACLE_HOME})
 
-option(BUILD_PLUGIN_P2G "Choose if Points2Grid support should be built" FALSE)
-
 option(BUILD_PLUGIN_PCL "Choose if PCL support should be built" FALSE)
 add_feature_info("PCL plugin" BUILD_PLUGIN_PCL
     "provides PCL-based readers, writers, filters, and kernels")
@@ -83,6 +81,11 @@ option(BUILD_PLUGIN_PYTHON
 add_feature_info("Python plugin" BUILD_PLUGIN_PYTHON
     "add features that depend on python")
 
+option(BUILD_PLUGIN_MBIO
+    "Choose if MBIO support should be built" FALSE)
+add_feature_info("MBIO plugin" BUILD_PLUGIN_MBIO
+    "add features that depend on MBIO")
+
 option(BUILD_TOOLS_NITFWRAP "Choose if nitfwrap tool should be built" FALSE)
 
 option(WITH_TESTS
@@ -114,4 +117,4 @@ cmake_dependent_option(BUILD_RIVLIB_TESTS
     ON "BUILD_PLUGIN_RIVLIB; WITH_TESTS" OFF)
 cmake_dependent_option(BUILD_PIPELINE_TESTS
     "Choose if pipeline tests should be built"
-    OFF "WITH_APPS; WITH_TESTS" OFF)
+    OFF "WITH_TESTS" OFF)
diff --git a/dimbuilder/DimBuilder.cpp b/dimbuilder/DimBuilder.cpp
index 7146980..6b14129 100644
--- a/dimbuilder/DimBuilder.cpp
+++ b/dimbuilder/DimBuilder.cpp
@@ -437,10 +437,10 @@ void DimBuilder::writeTypes(std::ostream& out)
     }
     out << "    case Id::Unknown:\n";
     out << "        throw pdal_error(\"No type found for undefined "
-        "dimension ID.\");\n";
+        "dimension.\");\n";
     out << "    }\n";
     out << "    throw pdal_error(\"No type found for undefined "
-        "dimension ID.\");\n";
+        "dimension.\");\n";
     out << "}\n";
 }
 
diff --git a/doc/workshop/pdal-introduction.rst b/doc/about.rst
similarity index 67%
rename from doc/workshop/pdal-introduction.rst
rename to doc/about.rst
index b3fded7..9aa062b 100644
--- a/doc/workshop/pdal-introduction.rst
+++ b/doc/about.rst
@@ -1,35 +1,45 @@
-.. _pdal-introduction:
+.. _about:
 
-Introduction to PDAL
+About
 ================================================================================
 
-.. include:: ./includes/substitutions.rst
+.. include:: ./workshop/includes/substitutions.rst
 
 What is PDAL?
 --------------------------------------------------------------------------------
 
-|PDAL| is Point Data Abstraction Library, and it is an open source software for
-translating and processing point cloud data. It is not limited to just |LiDAR|
-data, although the focus and impetus for many of the tools have their origins
-in LiDAR.
+|PDAL| is Point Data Abstraction Library.  It is a C/C++ open source library
+and applications for translating and processing `point cloud data`_. It is not
+limited to |LiDAR| data, although the focus and impetus for many of the
+tools in the library have their origins in LiDAR.
+
+.. _`point cloud data`: https://en.wikipedia.org/wiki/Point_cloud
 
 What is its big idea?
+--------------------------------------------------------------------------------
+
+PDAL allows you to compose :ref:`operations <filters>` on point clouds into
+:ref:`pipelines <pipeline>` of :ref:`stages <stage_index>`. These pipelines can
+be written in a declarative JSON syntax or constructed using the available API.
+
+Why would you want to do that?
 ................................................................................
 
-Say you wanted to load some `ASPRS LAS`_ (the most common LiDAR binary format)
+A task might be to load some `ASPRS LAS`_ (the most common LiDAR binary format)
 data into a database, but you wanted to transform it into a common coordinate
-system along the way. One option would be to write a specialized program that
-reads LAS data, reprojects it as necessary, and then handles the necessary
-operations to insert the data in the appropriate format in the database.
-
-This approach has a distinct disadvantage. It is a kind of one-off, and it
-could quickly spiral out of control as you look to add new little tweaks and
-features to the operation. It ends up being very specific, and it
+system along the way.
+
+One option would be to write a specialized monolithic
+program that reads LAS data, reprojects it as necessary, and then handles the
+necessary operations to insert the data in the appropriate format in the
+database.  This approach has a distinct disadvantage in that without careful
+planning it could quickly spiral out of control as you add new little tweaks
+and features to the operation. It ends up being very specific, and it
 does not allow you to easily reuse the component that reads the LAS data
 separately from the component that transforms the data.
 
-Little programs that encapsulate specific functionality that can be composed
-together provide a more streamlined approach to the problem. They allow for
+The PDAL approach is to chain together a set of components,
+each of which encapsulates specific functionality.  The components allow for
 reuse, composition, and separation of concerns.  PDAL views point cloud
 processing operations as a pipeline composed as a series of stages.  You might
 have a simple pipeline composed of a :ref:`LAS Reader <readers.las>` stage, a
@@ -38,14 +48,40 @@ have a simple pipeline composed of a :ref:`LAS Reader <readers.las>` stage, a
 specialized program to perform this operation, you can dynamically compose it
 as a sequence of steps or operations.
 
-.. figure:: ./images/intro-pdal-simple-pipeline.png
+.. figure:: ./images/las-reproject-pgpointcloud.png
 
     A simple PDAL pipeline composed of a reader, filter, and writer
     stages.
 
-PDAL can compose intermediate stages, for operations such as filtering,
+A PDAL JSON :ref:`pipeline` that composes this operation to reproject
+and load the data into PostgreSQL might look something like the following:
+
+.. code-block:: json
+    :linenos:
+    :emphasize-lines: 4, 8, 12
+
+    {
+      "pipeline":[
+        {
+          "type":"readers.las",
+          "filename":"input.las"
+        },
+        {
+          "type":"filters.reprojection",
+          "out_srs":"EPSG:3857"
+        },
+        {
+          "type":"writers.pgpointcloud",
+          "connection":"host='localhost' dbname='lidar' user='hobu'",
+          "table":"output",
+          "srid":"3857"
+        }
+      ]
+    }
+
+PDAL can compose intermediate stages for operations such as filtering,
 clipping, tiling, transforming into a processing pipeline and reuse as
-necessary. It allows you to define these pipelines as `JSON`_ or `XML`_, and it
+necessary. It allows you to define these pipelines as `JSON`_, and it
 provides a command, :ref:`pipeline_command`, to allow you to execute them.
 
 .. note::
@@ -56,41 +92,48 @@ provides a command, :ref:`pipeline_command`, to allow you to execute them.
 
 .. _`Virtual Raster Format`: http://www.gdal.org/gdal_vrttut.html
 .. _`JSON`: https://en.wikipedia.org/wiki/JSON
-.. _`XML`: https://en.wikipedia.org/wiki/XML
 
 
 How is it different than other tools?
-................................................................................
+--------------------------------------------------------------------------------
 
 LAStools
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+................................................................................
 
 .. index:: LAStools
 
-One of the most common open source processing tool suites available is `LAStools`_
-from `Martin Isenburg`_. PDAL is different in philosophy in a number of important
-ways:
+One of the most common open source processing tool suites available for LiDAR
+processing is `LAStools`_ from `Martin Isenburg`_. PDAL is different in
+philosophy in a number of important ways:
 
-1. All components of PDAL are released as open source software under an `OSI`_-approved
-   license.
+1. All components of PDAL are released as open source software under an
+   `OSI`_-approved license.
 2. PDAL allows application developers to provide proprietary extensions that
-   act as stages in processing pipelines. These might be things like custom format
+   act as stages in processing pipelines. These might be things like custom
+   format
    readers, specialized exploitation algorithms, or entire processing pipelines.
-3. PDAL must be able to generically operate on point cloud data of any format --
-   not just `ASPRS LAS`_. `LAStools`_ can read and write formats other than LAS, but
-   its view of formats it understands is within the context of the dimension
-   types provided by the LAS format.
+3. PDAL can operate on point cloud data of any format
+   -- not just `ASPRS LAS`_. `LAStools`_ can read and write formats other than
+   LAS, but relates all data to its internal handling of LAS data, limiting
+   it to :ref:`dimension <dimensions>` types provided by the LAS format.
+4. PDAL is coordinated by users with its declarative :ref:`JSON <pipeline>`
+   syntax. LAStools is coordinated by linking lots of small, specialized
+   command line utilities together with intricate arguments.
+5. PDAL is an open source project, with all of its development activites
+   available online at https://github.com/PDAL/PDAL
+
+.. _about_pcl:
 
 PCL
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+................................................................................
 
 .. index:: PCL
 
 `PCL`_ is a complementary, rather than substitute, open source software
 processing suite for point cloud data. The developer community of the PCL
 library is focused on algorithm development, robotic and computer vision, and
-real-time laser scanner processing. PDAL links and uses PCL, and PDAL provides a
-convenient pipeline mechanism to orchestrate PCL operations.
+real-time laser scanner processing. PDAL links and uses PCL, and PDAL provides
+a convenient pipeline mechanism to orchestrate PCL operations.
 
 .. note::
 
@@ -98,29 +141,33 @@ convenient pipeline mechanism to orchestrate PCL operations.
     PCL capabilities within PDAL operations.
 
 Greyhound and Entwine
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+................................................................................
 
 .. index:: Greyhound, Entwine
 
-`Greyhound`_ is an open source software from Hobu, Inc. that allows clients
-over the internet to query and stream progressive point cloud data.  `Entwine`_
-is an open source software from Hobu, Inc. that organizes massive point cloud
+`Greyhound`_ is an open source software from `Hobu, Inc.`_ that allows clients
+to query and stream progressive point cloud data over the network.  `Entwine`_
+is open source software from Hobu, Inc. that organizes massive point cloud
 collections into `Greyhound`_-streamable data services. These two software
 projects allow province-scale LiDAR collections to be organized and served
-via HTTP clients over the internet.
+via HTTP clients over the internet. PDAL provides :ref:`readers.greyhound` to
+allow users to read data into PDAL processes from that server.
 
+.. _`Hobu, Inc.`: https://hobu.co
 
-.. _`Entwine`: http://github.com/connormanning/entwine
-.. _`Greyhound`: http://github.com/hobu/greyhound
+.. _`Entwine`: https://entwine.io
+.. _`Greyhound`: http://greyhound.io
 
 plas.io and Potree
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+................................................................................
 
 `plas.io`_ is a `WebGL`_ HTML5 point cloud renderer that speaks `ASPRS LAS`_ and
-`LASzip`_ compressed LAS.
+`LASzip`_ compressed LAS. You can find the software for it at plasiojs.io and
+https://github.com/hobu/plasio-ui
 
 `Potree`_ is a `WebGL`_ HTML5 point cloud renderer that speaks `ASPRS LAS`_ and
-`LASzip`_ compressed LAS.
+`LASzip`_ compressed LAS. You can find the software at
+https://github.com/potree/potree/
 
 .. note::
 
@@ -133,14 +180,14 @@ plas.io and Potree
 .. _`LASzip`: http://laszip.org
 
 Others
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+................................................................................
 
 .. index:: OrfeoToolbox, libLAS, CloudCompare, Fusion
 
-Other open source point cloud softwares tend to be GUI, rather than library,
-focused.  They include some processing operations, and sometimes they even
-embed tools such as PDAL. We're obviously biased toward PDAL, but you might
-find useful bits of functionality in them. These other tools include:
+Other open source point cloud softwares tend to be Desktop GUI, rather than
+library, focused.  They include some processing operations, and sometimes they
+even embed tools such as PDAL. We're obviously biased toward PDAL, but you
+might find useful bits of functionality in them. These other tools include:
 
 * `libLAS`_
 * `CloudCompare`_
@@ -169,7 +216,7 @@ find useful bits of functionality in them. These other tools include:
 .. _`Martin Isenburg`: https://www.cs.unc.edu/~isenburg/
 
 Where did PDAL come from?
-................................................................................
+--------------------------------------------------------------------------------
 
 PDAL takes its cue from another very popular open source project -- |GDAL|.
 GDAL is Geospatial Data Abstraction Library, and it is used throughout the geospatial
@@ -207,7 +254,8 @@ characteristics demand a library oriented toward these approaches and PDAL
 achieves it.
 
 What tasks are PDAL good at?
-................................................................................
+--------------------------------------------------------------------------------
+
 
 PDAL is great at point cloud data translation work flows. It allows users to
 apply algorithms to data by providing an abstract API to the content -- freeing
@@ -225,16 +273,16 @@ features make it attractive to software developers, data managers, and
 scientists.
 
 What are PDAL's weak points?
-................................................................................
+--------------------------------------------------------------------------------
 
 PDAL doesn't provide a friendly GUI interface, it expects that you have the
-confidence to dig into a command-line interface, and it sometimes forgets that
-you don't always want to read source code to figure out what exactly is
-happening.  PDAL is an open source project in active development, and because
-of that, we're always working to improve it. Please visit :ref:`community` to
-find out how you can participate if you are interested. The project is always
-looking for contribution, and the mailing list is the place to ask for help if
-you are stuck.
+confidence to dig into the options of :ref:`filters`, :ref:`readers`, and
+:ref:`writers`. We sometimes forget that you don't always want to read source
+code to figure out how things work. PDAL is an open source project in active
+development, and because of that, we're always working to improve it. Please
+visit :ref:`community` to find out how you can participate if you are
+interested. The project is always looking for contribution, and the mailing
+list is the place to ask for help if you are stuck.
 
 High Level Overview
 --------------------------------------------------------------------------------
@@ -251,8 +299,11 @@ Core C++ Software Library
 ................................................................................
 
 PDAL provides a :ref:`C++ API <api>` software developers can use to provide
-point cloud processing capabilities in their own software. PDAL is cross-platform
-C++, and it can compile and run on Linux, OS X, and Windows.
+point cloud processing capabilities in their own software. PDAL is
+cross-platform C++, and it can compile and run on Linux, OS X, and Windows. The
+best place to learn how to use PDAL's C API is the :ref:`test suite
+<pdal_test>` and its `source code
+<https://github.com/PDAL/PDAL/tree/master/test/unit>`__.
 
 .. seealso::
 
@@ -271,7 +322,8 @@ coordinate and construct point cloud processing work flows. Some key tasks
 users can achieve with these applications include:
 
 * Print :ref:`info <info_command>` about a data set
-* Data :ref:`translation <translate_command>` from one point cloud format to another
+* Data :ref:`translation <translate_command>` from one point cloud format to
+  another
 * Application of exploitation algorithms
 
   * Generate a DTM
@@ -289,34 +341,11 @@ Python API
 
 PDAL supports both embedding |Python| and extending with |Python|. These
 allow you to dynamically interact with point cloud data in a more
-comfortable and familiar language environment for geospatial practitioners
-
-Embed
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. index:: Embed, Python
-
-By embedding Python, PDAL allows you to interact with point cloud data using
-typical `Numpy`_ features. PDAL embeds |Python| scripts in your processing work
-flows with the :ref:`filters.programmable` and :ref:`filters.predicate`
-filters. Your Python scripts can process and interact with point cloud data
-during the execution of a :ref:`PDAL pipeline <pipeline>`, and you are free to
-dynamically do whatever you want in your scripts.
-
-Extension
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. index:: Extension
-
-PDAL also provides a Python extension for software developers who simply
-want to use data as a mechanism to abstract data formats. This approach
-works really well in algorithm work bench scenarios, simple data testing
-and validation challenges, or situations where full C++ applications would be
-too much effort or complexity.
+comfortable and familiar language environment for geospatial practitioners.
 
 .. seealso::
 
-    The :ref:`python_installation` document contains information on how to
+    The :ref:`python` document contains information on how to
     install and use the PDAL Python extension.
 
 .. _`Numpy`: http://www.numpy.org/
diff --git a/doc/api/index.rst b/doc/api/index.rst
index 35a8662..e3b492a 100644
--- a/doc/api/index.rst
+++ b/doc/api/index.rst
@@ -5,8 +5,7 @@ API
 ******************************************************************************
 
 PDAL is a C++ library, and its primary API is in that language. There is also a
-`Python API <https://pypi.python.org/pypi/PDAL>`__ that allows reading of data
-and interaction with `Numpy`_.
+:ref:`python` API that allows reading of data and interaction with `Numpy`_.
 
 .. toctree::
    :maxdepth: 2
diff --git a/doc/apps/delta.rst b/doc/apps/delta.rst
index b356a82..79ff35a 100644
--- a/doc/apps/delta.rst
+++ b/doc/apps/delta.rst
@@ -16,10 +16,12 @@ Standard out is used if no output file is specified.
 
 ::
 
-    --source arg     Non-positional option for specifying source filename
-    --candidate arg  Non-positional option for specifying candidate filename
-    --output arg     Non-positional option for specifying output filename [/dev/stdout]
-    --2d             only 2D comparisons/indexing
+    --source           source file name
+    --candidate        candidate file name
+    --output           output file name
+    --2d               only 2D comparisons/indexing
+    --detail           Output deltas per-point
+    --alldims          Compute diffs for all dimensions (not just X,Y,Z)
 
 Example 1:
 --------------------------------------------------------------------------------
diff --git a/doc/apps/density.rst b/doc/apps/density.rst
index 35b6156..b3f983c 100644
--- a/doc/apps/density.rst
+++ b/doc/apps/density.rst
@@ -9,11 +9,21 @@ output of :ref:`filters.hexbin`.
 
 .. note::
 
-    The ``density`` command is only available when PDAL is linked with Hexer.
+    The ``density`` command is only available when PDAL is linked with Hexer
+    (BUILD_PLUGIN_HEXBIN=ON in CMakeCache.txt).
 
 ::
 
-    --input, -i        input point cloud file name
-    --output, -o       output vector data source
+    $ pdal density <input> <output>
+
+::
+
+    --input, -i        Input point cloud file name
+    --output, -o       Output vector data source
     --lyr_name         OGR layer name to write into datasource
     --ogrdriver, -f    OGR driver name to use
+    --sample_size      Sample size for automatic edge length calculation. [5000]
+    --threshold        Required cell density [15]
+    --hole_cull_tolerance_area
+                       Tolerance area to apply to holes before cull
+    --smooth           Smooth boundary output
diff --git a/doc/apps/diff.rst b/doc/apps/diff.rst
index 2942824..7749221 100644
--- a/doc/apps/diff.rst
+++ b/doc/apps/diff.rst
@@ -13,8 +13,12 @@ between two sources.
 
 ::
 
-    --source arg     Non-positional option for specifying filename of source file.
-    --candidate arg  Non-positional option for specifying filename to test against source.
+  --source       source file name
+  --candidate    candidate file name
+  --output       output file name
+  --2d           only 2D comparisons/indexing
+  --detail       Output deltas per-point
+  --alldims      Compute diffs for all dimensions (not just X,Y,Z)
 
 The command returns 0 and produces no output if the files describe the same
 point data in the same format, otherwise 1 is returned and a JSON-formatted
diff --git a/doc/apps/ground.rst b/doc/apps/ground.rst
index f1c8295..d5e1799 100644
--- a/doc/apps/ground.rst
+++ b/doc/apps/ground.rst
@@ -13,10 +13,6 @@ versus non-ground returns.
 
 ::
 
-    --developer-debug   Enable developer debug (don't trap exceptions)
-    --label             A string to label the process with
-    --visualize         Visualize result
-    --driver            Override reader driver
     --input, -i         Input filename
     --output, -o        Output filename
     --max_window_size   Max window size
diff --git a/doc/apps/index.rst b/doc/apps/index.rst
index 97afe9c..8bd8e24 100644
--- a/doc/apps/index.rst
+++ b/doc/apps/index.rst
@@ -4,8 +4,8 @@
 Applications
 ******************************************************************************
 
-PDAL contains consists of a single application, called ``pdal``.  Applications
-are run by invoking the ``pdal`` application along with the command name:
+PDAL contains consists of a single application, called ``pdal``.  Operations
+are run by invoking the ``pdal`` application along with a command name:
 
 ::
 
@@ -23,6 +23,15 @@ drivers and their options:
     $ pdal translate --drivers
     $ pdal pipeline --options writers.las
 
+
+All commands support the following options:
+
+::
+
+    --developer-debug   Enable developer debug (don't trap exceptions).
+    --label             A string to use as a process label.
+    --driver            Name of driver to use to override that inferred from file type.
+
 Additional driver-specific options may be specified by using a
 namespace-prefixed option name. For example, it is possible to set the LAS day
 of year at translation time with the following option:
@@ -36,8 +45,7 @@ of year at translation time with the following option:
 
 .. note::
 
-    Driver specific options can be identified using the ``pdal info --options``
-    invocation.
+    Driver-specific options can be identified using the ``pdal <command> --help`` invocation.
 
 .. toctree::
    :maxdepth: 2
diff --git a/doc/apps/info.rst b/doc/apps/info.rst
index 5868005..8a18379 100644
--- a/doc/apps/info.rst
+++ b/doc/apps/info.rst
@@ -4,7 +4,7 @@
 info
 ********************************************************************************
 
-Dumps information about a point cloud file, such as:
+Displays information about a point cloud file, such as:
 
 * basic properties (extents, number of points, point format)
 * coordinate reference system
@@ -19,26 +19,19 @@ Dumps information about a point cloud file, such as:
 
 ::
 
-    --input arg       Non-positional argument to specify input filename.
-    --point [-p] arg  Display points for particular points.  Points can be specified in
-                      a range or list: 4-10, 15, 255-300.
-    --query arg       Add a listing of points based on the distance from the provided
-                      location.  The number of points returned can be limited by
-                      providing an optional count.
-                      --query "25.34,35.123/3" or --query "11532.23 -10e23 1.234/10"
-    --stats           Display the minimum, maximum, average and count of each
-                      dimension.
-    --boundary        Compute a hexagonal boundary that contains all points.
-    --dimensions arg  Use with --stats to limit the dimensions on which statistics
-                      should be computed.
-                      --dimensions "X, Y,Red"
-    --schema          Dump the schema of the internal point storage.
-    --pipeline-serialization
-                      Create a JSON representation of the pipeline used to generate
-                      the output.
-    --summary         Dump the point count, spatial reference, extrema and dimension
-                      names.
-    --metadata        Dump the metadata associated with the input file.
+  --input, -i               Input file name
+  --all                     Dump statistics, schema and metadata
+  --point, -p               Point to dump --point="1-5,10,100-200" (0 indexed)
+  --query                   Return points in order of distance from the
+      specified location (2D or 3D) --query Xcoord,Ycoord[,Zcoord][/count]
+  --stats                   Dump stats on all points (reads entire dataset)
+  --boundary                Compute a hexagonal hull/boundary of dataset
+  --dimensions              Dimensions on which to compute statistics
+  --schema                  Dump the schema
+  --pipeline-serialization  Output filename for pipeline serialization
+  --summary                 Dump summary of the info
+  --metadata                Dump file metadata info
+  --stdin, -s               Read a pipeline file from standard input
 
 If no options are provided, ``--stats`` is assumed.
 
diff --git a/doc/apps/merge.rst b/doc/apps/merge.rst
index f3ac1a5..9dfca63 100644
--- a/doc/apps/merge.rst
+++ b/doc/apps/merge.rst
@@ -6,15 +6,14 @@ merge
 
 The ``merge`` command will combine input files into a single output file.
 
-
 ::
 
     $ pdal merge <input> ... <output>
 
 ::
 
-    --files [-f] arg  Non-positional argument to specify filenames.  The last
-      file listed is taken to be the output file.
+    --files, -f    List of filenames.  The last file listed is taken to be
+        the output file.
 
 This command provides simple merging of files.  It provides no facility for
 filtering, reprojection, etc.  The file type of the input files may be
diff --git a/doc/apps/pcl.rst b/doc/apps/pcl.rst
index e394cec..a0b5da1 100644
--- a/doc/apps/pcl.rst
+++ b/doc/apps/pcl.rst
@@ -17,10 +17,10 @@ The ``pcl`` command is used to invoke a PCL JSON pipeline. See
 
 ::
 
-    --input [-i] arg   Non-positional argument to specify input file name.
-    --output [-o] arg  Non-positional argument to specify output file name.
-    --pcl [-p] arg     Non-positional argument to specify pcl file name.
-    --compress [-z]    Compress output data (if supported by output format)
-    --metadata [-m]    Forward metadata from previous stages.
+    --input, -i        Input filename
+    --output, -o       Output filename
+    --pcl, -p          PCL filename
+    --compress, -z     Compress output data (if supported by output format)
+    --metadata, -m     Forward metadata (VLRs, header entries, etc) from previous
 
 
diff --git a/doc/apps/pipeline.rst b/doc/apps/pipeline.rst
index 3635bf7..d4b3438 100644
--- a/doc/apps/pipeline.rst
+++ b/doc/apps/pipeline.rst
@@ -13,29 +13,58 @@ The ``pipeline`` command is used to execute :ref:`pipeline` JSON. See
 
 ::
 
-    --developer-debug         Enable developer debug (don't trap exceptions)
-    --label                   A string to label the process with
-    --driver                  Override reader driver
-    --input, -i               input file name
-    --pipeline-serialization  Output file for pipeline serialization
-    --validate                Validate the pipeline (including serialization), but do not write
-                              points
-    --progress                Name of file or FIFO to which stages should write progress
-                              information. The file/FIFO must exist. PDAL will not create the progress file.
-    --stdin, -s               Read pipeline from standard input
+  --input, -i               Input filename
+  --pipeline-serialization  Output file for pipeline serialization
+  --validate                Validate the pipeline (including serialization),
+      but do not write points
+  --progress                Name of file or FIFO to which stages should write
+      progress information. The file/FIFO must exist. PDAL will not create the
+      progress file.
+  --stdin, -s               Read pipeline from standard input
+  --stream                  Attempt to run pipeline in streaming mode.
+  --metadata                Metadata filename
 
-.. note::
 
-    The ``pipeline`` command can accept option substitutions, and they replace
-    existing options that are specified in the input JSON pipeline.  If
-    multiple stages of the same name exist in the pipeline, `all` stages would
-    be overridden. For example, to set the output and input LAS files for a
-    pipeline that does a translation, the ``filename`` for the reader and the
-    writer can be overridden:
+Substitutions
+................................................................................
 
-    ::
+The ``pipeline`` command can accept command-line option substitutions and
+they replace
+existing options that are specified in the input JSON pipeline.  If
+multiple stages of the same name exist in the pipeline, `all` stages would
+be overridden. For example, to set the output and input LAS files for a
+pipeline that does a translation, the ``filename`` for the reader and the
+writer can be overridden:
 
-        $ pdal pipeline translate.json --writers.las.filename=output.laz \
-            --readers.las.filename=input.las
+::
+
+    $ pdal pipeline translate.json --writers.las.filename=output.laz \
+        --readers.las.filename=input.las
+
+Option substitution can also refer to the tag of an individual stage.
+This can be done by using the syntax --stage.<tagname>.<option>.  This
+allows options to be set on individual stages, even if there are multiple
+stages of the same type.  For example, if a pipeline contained two LAS
+readers with tags ``las1`` and ``las2`` respectively, the following
+command would allow assignment of different filenames to each stage:
+
+::
+
+    {
+        "pipeline" : [
+            {
+                "tag" : "las1",
+                "type" : "readers.las"
+            },
+            {
+                "tag" : "las2",
+                "type" : "readers.las"
+            },
+            "placeholder.laz"
+        ]
+    }
 
+    $ pdal pipeline translate.json --writers.las.filename=output.laz \
+        --stage.las1.filename=file1.las --stage.las2.filename=file2.las
 
+Options specified by tag names override options specified by stage types.
diff --git a/doc/apps/random.rst b/doc/apps/random.rst
index 81e2fcf..bdd1a75 100644
--- a/doc/apps/random.rst
+++ b/doc/apps/random.rst
@@ -17,18 +17,14 @@ each of the x, y, and z dimensions.
 
 ::
 
-    --output [-o] arg   Non-positional argument to specify output file name.
-    --compress [-z]     Compress output data (if supported by output format)
-    --count arg         Number of points in created point cloud [0].
-    --bounds arg        Extent (in XYZ to clip output to):
-                        --bounds "([xmin,xmax],[ymin,ymax],[zmin,zmax])"
-    --mean arg          List of means (for --distribution normal)
-                        --mean 0.0,0.0,0.0
-                        --mean "0.0 0.0 0.0"
-    --stdev arg         List of standard deviations (for --distribution normal)
-                        --stdev 0.0,0.0,0.0
-                        --stdev "0.0 0.0 0.0"
-    --distribution arg  Distribution type (uniform or normal) [uniform]
-
+  --output, -o       Output file name
+  --compress, -z     Compress output data (if supported by output format)
+  --count            How many points should we write?
+  --bounds           Extent (in XYZ to clip output to)
+  --mean             A comma-separated or quoted, space-separated list of means
+      (normal mode): --mean 0.0,0.0,0.0 --mean "0.0 0.0 0.0"
+  --stdev            A comma-separated or quoted, space-separated list of
+      standard deviations (normal mode): --stdev 0.0,0.0,0.0 --stdev "0.0 0.0 0.0"
+  --distribution     Distribution (uniform / normal)
 
 
diff --git a/doc/apps/sort.rst b/doc/apps/sort.rst
index 897efea..41ef130 100644
--- a/doc/apps/sort.rst
+++ b/doc/apps/sort.rst
@@ -12,5 +12,9 @@ The ``sort`` command uses :ref:`filters.mortonorder` to sort data by XY values.
 
 ::
 
-    --input [-i] arg   Non-positional argument to specify input file name.
-    --output [-o] arg  Non-positional argument to specify output file name.
+    --input, -i        Input filename
+    --output, -o       Output filename
+    --compress, -z     Compress output data (if supported by output format)
+    --metadata, -m     Forward metadata (VLRs, header entries, etc) from previous stages
+
+
diff --git a/doc/apps/split.rst b/doc/apps/split.rst
index 5e194b3..eabba12 100644
--- a/doc/apps/split.rst
+++ b/doc/apps/split.rst
@@ -14,10 +14,13 @@ template) or output directory specification.
 
 ::
 
-    --input [-i] arg   Non-positional option for specifying input file name
-    --output [-o] arg  Non-positional option for specifying output file/directory name
-    --length arg       Edge length for splitter cells.  See :ref:`filters.splitter`.
-    --capacity arg     Point capacity for chipper cells.  See :ref:`filters.chipper`.
+
+    --input, -i     Input filename
+    --output, -o    Output filename
+    --length        Edge length for splitter cells
+    --capacity      Point capacity of chipper cells
+    --origin_x      Origin in X axis for splitter cells
+    --origin_y      Origin in Y axis for splitter cells
 
 If neither the ``--length`` nor ``--capacity`` arguments are specified, an
 implcit argument of capacity with a value of 100000 is added.
diff --git a/doc/apps/tindex.rst b/doc/apps/tindex.rst
index 9a5f557..7be9b7a 100644
--- a/doc/apps/tindex.rst
+++ b/doc/apps/tindex.rst
@@ -19,6 +19,21 @@ tindex Creation Mode
 
     $ pdal tindex <tindex> <filespec>
 
+::
+
+    --tindex               OGR-readable/writeable tile index output
+    --filespec             Build: Pattern of files to index. Merge: Output filename
+    --fast_boundary        Use extent instead of exact boundary
+    --lyr_name             OGR layer name to write into datasource
+    --tindex_name          Tile index column name
+    --ogrdriver, -f        OGR driver name to use
+    --t_srs                Target SRS of tile index
+    --a_srs                Assign SRS of tile with no SRS to this value
+    --write_absolute_path  Write absolute rather than relative file paths
+    --merge                Whether we're merging the entries in a tindex file.
+    --stdin, -s            Read filespec pattern from standard input
+
+
 This command will index the files referred to by ``filespec`` and place the
 result in ``tindex``.  The ``tindex`` is a vector file or database that
 will be created by ``pdal`` as necessary to store the file index.
@@ -33,22 +48,7 @@ feature in a layer in the index file. The ``filespec`` is a `glob pattern
 <http://man7.org/linux/man-pages/man7/glob.7.html>`_.  and normally needs to be
 quoted to prevent shell expansion of wildcard characters.
 
-::
 
-    --tindex                   Non-positional option for specifying the index file name.
-    --filespec                 Non-positional option for specifying pattern of files to
-                               be indexed.
-    --lyr_name                 Name of layer in which to store the features. Defaults to
-                               the base name of the first file indexed.
-    --tindex_name              Name of the field in the feature in which to store the
-                               indexed file name. ["location"]
-    --ogrdriver                OGR driver name. ["ESRI Shapefile"]
-    --t_srs                    Spatial reference system in which to store index vector
-                               data. ["EPSG:4326"]
-    --a_srs                    Spatial reference assumed to be the reference for the
-                               source data.  If the source data includes spatial reference
-                               information, this value is IGNORED. ["EPSG:4326"]
-    --write_absolute_path arg  Write absolute rather than relative file paths [false]
 
 tindex Merge Mode
 --------------------------------------------------------------------------------
@@ -65,15 +65,15 @@ extension.
 
 ::
 
-    --tindex    Non-positional option for specifying the index filename.
-    --filespec  Non-positional option for specifying the merge output filename.
-    --polygon   Well-known text representation of geometric filter.  Only
-                points inside the object will be written to the output file.
-    --bounds    Bounding box for clipping points.  Only points inside the box
-                will be written to the output file.
-                --bounds "([xmin,xmax],[ymin,ymax],[zmin,zmax])"
-    --t_srs     Spatial reference system in which the output data should be
-                represented. ["EPSG:4326"]
+    --tindex         OGR-readable/writeable tile index output
+    --filespec       Build: Pattern of files to index. Merge: Output filename
+    --lyr_name       OGR layer name to write into datasource
+    --tindex_name    Tile index column name
+    --ogrdriver, -f  OGR driver name to use
+    --t_srs          Target SRS of tile index
+    --bounds         Extent (in XYZ) to clip output to
+    --polygon        Well-known text of polygon to clip output
+
 
 Example 1:
 --------------------------------------------------------------------------------
diff --git a/doc/apps/translate.rst b/doc/apps/translate.rst
index 629261c..281d83c 100644
--- a/doc/apps/translate.rst
+++ b/doc/apps/translate.rst
@@ -17,7 +17,7 @@ from the command-line.
     --input, -i        Input filename
     --output, -o       Output filename
     --filter, -f       Filter type
-    --json             JSON array of filters
+    --json             PDAL pipeline from which to extract filters.
     --pipeline, -p     Pipeline output
     --metadata, -m     Dump metadata output to the specified file
     --reader, -r       Reader type
@@ -25,16 +25,19 @@ from the command-line.
 
 The ``--input`` and ``--output`` file names are required options.
 
-The ``--pipeline`` file name is optional. If given, the pipeline constructed
-from the command-line arguments will be written to disk for reuse in the
-:ref:`pipeline_command`.
-
-The ``--json`` flag can use used to specify a JSON array of filters
-as if they were being specified in a :ref:`pipeline_command`.  If a filename
-follows the flag, the file is opened and it is assumed that the file
-contains a valid JSON array of filter specifications.  If the flag value
-is not a filename, the value is taken to be a literal JSON string that is
-the array of filters.  The flag
+If provided, the ``--pipeline`` option will write the pipeline constructed
+from the command-line arguments to the specified file.  The translate
+command will not actually run when this argument is given.
+
+The ``--json`` flag can use used to specify a PDAL pipeline from which
+filters will be extracted.  If a reader or writer exist in the pipeline,
+they will be removed and replaced with the input and output provided on
+the command lines.  If a reader/writer stage referenced tags in the
+provided pipeline, the overriding files will assume those tags.  If the
+argument to the ``--json`` option references an existing file, it is assumed
+that the file contains the pipeline to be processed.  If the argument value
+is not a filename, it is taken to be a literal JSON string that is
+the pipeline.  The flag
 can't be used if filters are listed on the command line or explicitly
 with the ``--filter`` option.
 
@@ -52,7 +55,8 @@ the correct drivers from the input and output file name extensions respectively.
 Example 1:
 --------------------------------------------------------------------------------
 
-The ``translate`` command can be augmented by specifying full-path options at
+The ``translate`` command can be augmented by specifying fully specified
+options at
 the command-line invocation. For example, the following invocation will
 translate ``1.2-with-color.las`` to ``output.laz`` while doing the following:
 
@@ -97,4 +101,16 @@ output (including the output from the stats filter) is written to the file
 ::
 
     $ pdal translate myfile output.las --metadata=meta.json -r readers.text \
-        --json="[ { \"type\":\"filters.stats\" } ]"
+        --json="{ \"pipeline\": [ { \"type\":\"filters.stats\" } ] }"
+
+Example 4:
+--------------------------------------------------------------------------------
+
+This command reprojects the points in the file "input.las" to another spatial
+reference system and writes the result to the file "output.las".
+
+::
+
+    $ pdal translate input.las output.las -f filters.reprojection \
+      --filters.reprojection.out_srs="EPSG:4326"
+
diff --git a/doc/community.rst b/doc/community.rst
index 7e87a2b..4bad636 100644
--- a/doc/community.rst
+++ b/doc/community.rst
@@ -4,7 +4,7 @@
 Community
 ******************************************************************************
 
-PDAL's community interacts through `Mailing List`_, `GitHub`_, and `IRC`_.
+PDAL's community interacts through `Mailing List`_, `GitHub`_, `Gitter`_ and `IRC`_.
 Please feel welcome to ask questions and participate in all of the venues.
 The `Mailing List`_ communication channel is for general questions, development
 discussion, and feedback. The `GitHub`_ communication channel is for development
@@ -41,14 +41,22 @@ take contributions in all forms, and we welcome those who are willing to roll
 up their sleeves and start filing tickets, pushing code, generating builds, and
 answering questions.
 
-There is also a public Gitter chat room integrated with the `GitHub`_ repository
-and available at https://gitter.im/PDAL/PDAL or Gitter client.
 
 .. seealso::
 
     :ref:`development_index` provides more information on how the PDAL software
     development activities operate.
 
+Gitter
+..............................................................................
+
+Some PDAL developers are active on `Gitter`_ and you can use that mechanism
+for asking questions and interacting with the developers in a mode that is
+similar to `IRC`_. Gitter uses your `GitHub`_ credentials for access, so
+you will need an account to get started.
+
+.. _`Gitter`: https://gitter.im/PDAL/PDAL
+
 IRC
 ..............................................................................
 
diff --git a/doc/development/compilation/dependencies.rst b/doc/development/compilation/dependencies.rst
index bac0c95..fd3f11a 100644
--- a/doc/development/compilation/dependencies.rst
+++ b/doc/development/compilation/dependencies.rst
@@ -37,9 +37,6 @@ and Windows has the `OSGeo4W`_ platform.
     If you are using `OSGeo4W`_ as your provider of GDAL, you must make sure
     to use the GDAL 1.9 package.
 
-Optional Dependencies
-------------------------------------------------------------------------------
-
 GeoTIFF
 ..............................................................................
 
@@ -71,6 +68,9 @@ Proj.4_ is the projection engine that PDAL uses for the
     transformation support. Otherwise, older versions should be
     sufficient.
 
+Optional Dependencies
+------------------------------------------------------------------------------
+
 libxml2
 ..............................................................................
 
@@ -107,14 +107,6 @@ Point Cloud read/write support.
 .. note::
     MSVC should only require the oci.lib and oci.dll library and dlls.
 
-Points2Grid
-..............................................................................
-
-`Points2Grid`_ is a library with a simple `CMake`-based build system that
-provides simple, out-of-process interpolation of large point sets using
-Boost_. It can be obtained via github.com at https://github.com/CRREL/points2grid
-It is used by :ref:`writers.p2g` to output point cloud interpolation.
-
 Hexer
 ..............................................................................
 
diff --git a/doc/development/compilation/index.rst b/doc/development/compilation/index.rst
index 185b50e..5598e66 100644
--- a/doc/development/compilation/index.rst
+++ b/doc/development/compilation/index.rst
@@ -25,5 +25,4 @@ Contents:
    unix
    windows
    dependencies
-   python
 
diff --git a/doc/development/compilation/python.rst b/doc/development/compilation/python.rst
deleted file mode 100644
index e1645fc..0000000
--- a/doc/development/compilation/python.rst
+++ /dev/null
@@ -1,35 +0,0 @@
-.. _python_installation:
-
-********************************************************************
-Python installation
-********************************************************************
-
-Beginning in PDAL 1.0.0, a Python extension to execute pipelines
-and read its output as a numpy array is available.
-
-To install it need to compile and install :ref:`PDAL <building>` and
-then install the PDAL Python extension
-
-Install from local
--------------------------
-In the source code of PDAL there is a ``python`` folder, you have to enter
-there and run ::
-
-    python setup.py build
-    # this should be run as administrator/super user
-    python setup.py install
-
-Install from repository
---------------------------
-The second method to install the PDAL Python extension is to use `pip`_
-or `easy_install`_, you have to run the command as administrator. ::
-
-    pip install PDAL
-
-.. note::
-
-    To install pip please read
-    `here <https://pip.pypa.io/en/stable/installing/>`_
-
-.. _`pip`: https://pip.pypa.io/en/stable/
-.. _`easy_install`: https://pypi.python.org/pypi/setuptools
diff --git a/doc/development/docker.rst b/doc/development/docker.rst
new file mode 100644
index 0000000..f4f9574
--- /dev/null
+++ b/doc/development/docker.rst
@@ -0,0 +1,85 @@
+.. _development_docker:
+
+================================================================================
+Building Docker Containers for PDAL
+================================================================================
+
+
+PDAL's :ref:`repository <source>` is linked to `DockerHub`_ for automatic building
+of `Docker`_ containers. PDAL keeps three Docker containers current.
+
+* ``pdal/dependencies:latest`` -- PDAL's dependencies
+* ``pdal/pdal:latest`` -- PDAL master, manually specified
+* ``pdal/pdal:1.4`` -- PDAL maintenance, automatically generated
+
+
+
+
+.. _`Docker`: https://www.docker.com/
+.. _`DockerHub`: https://hub.docker.com/r/pdal/pdal/
+
+.. figure:: ../images/docker-master-branch.png
+
+.. note::
+
+    Containers are built upon the `Dependences`_ container, but the
+    `Dependences`_ container is not pinned to specific Xenial or PDAL release
+    times. It corresponds to where ever the ``dependencies`` tag of
+    the PDAL source tree at https://github.com/PDAL/PDAL resides
+
+.. _`DockerHub`: https://hub.docker.com/r/pdal/pdal/
+
+Dependences
+================================================================================
+
+The PDAL dependencies Docker container is used by both the latest and release
+branch Docker containers. The dependencies container is also used during
+:ref:`integration` testing by Travis. It is built using the
+Dockerfile at https://github.com/PDAL/PDAL/blob/master/scripts/docker/dependencies/Dockerfile
+
+The ``pdal/pdal:dependencies`` image is generated by force-pushing a tag
+of the SHA you wish to use to have `DockerHub`_ build.
+
+::
+
+    git tag -f dependencies
+    git push origin refs/tags/dependencies -f
+
+.. note::
+
+    The dependencies container is currently built upon
+    `Ubuntu Xenial`_. When the next Ubuntu LTS is released,
+    the PDAL project will likely move to it.
+
+.. _`Ubuntu Xenial`: http://releases.ubuntu.com/16.04/
+
+Maintenance
+================================================================================
+
+A PDAL container corresponding to the last major release is automatically created
+and maintained with every commit to the active release branch. For example, the
+``1.4-maintenance`` branch will have a corresponding ``pdal/pdal:1.4`` container
+made with every commit on `DockerHub`_. Users are encouraged to use these containers
+for testing, bug confirmation, and deployment
+
+.. figure:: ../images/docker-maintenance-branch.png
+
+    Docker containers on maintenance branch correspond to
+    major PDAL releases.
+
+
+Latest (or master)
+================================================================================
+
+A PDAL container corresponding to a developer-selected release point is
+made available at ``pdal/pdal:latest`` and corresponds to the manual push
+of a ``docker-master`` tag by PDAL developers. This container is typically
+used for testing and verification of fixes, and it is recommended that users
+looking to depend on PDAL's Docker containers always use known release
+versions off of the last stable release branch.
+
+::
+
+    git tag -f docker-master
+
+    git push origin refs/tags/docker-master -f
diff --git a/doc/development/index.rst b/doc/development/index.rst
index 5e310fc..b3022df 100644
--- a/doc/development/index.rst
+++ b/doc/development/index.rst
@@ -12,14 +12,15 @@ developing new code can be found in this section.
 .. toctree::
    :maxdepth: 2
 
+   overview
    compilation/index
    conventions
    contributors
    docs
+   docker
    errorhandling
    metadata
    goals
    testing
    integration
 
-
diff --git a/doc/development/integration.rst b/doc/development/integration.rst
index 11202b8..0b72658 100644
--- a/doc/development/integration.rst
+++ b/doc/development/integration.rst
@@ -35,7 +35,8 @@ It uses the ``pdal/dependencies`` :ref:`docker` image found at
 https://hub.docker.com/r/pdal/dependencies as a base platform for providing
 prerequisite software and running the test suite. If you want to add new test
 functionality based on a dependency, you will need to update that Docker image
-to do so.
+to do so. See :ref:`development_docker` for more detail on how PDAL builds
+and uses Docker containers for testing.
 
 
 .. _appveyor:
diff --git a/doc/development/metadata.rst b/doc/development/metadata.rst
index e231b9e..8e4325f 100644
--- a/doc/development/metadata.rst
+++ b/doc/development/metadata.rst
@@ -4,220 +4,95 @@
 Metadata
 ******************************************************************************
 
-Metadata is an important component of any data processing story. PDAL attempts
-to allow users to operate with metadata in a relatively free-form way, with
-its main Metadata utility, :cpp:class:`pdal::Metadata`.
-
-The basic structure of a :cpp:class:`pdal::Metadata` instance is the following
-tree structure:
-
-.. code-block:: javascript
-
-    {
-        "type": "blank",
-        "value": "",
-        "name": "name",
-        "metadata": {}
-    }
-
-.. note::
-
-    Metadata instances can contain other Metadata instances by adding them
-    with the :cpp:func:`pdal::Metadata::addMetadata` method.  They will be
-    added to the `metadata` sub-tree of the internal property_tree.
-
-Metadata Types
+In addition to point data, PDAL stores metadata during the processing of
+a pipeline.  Metadata is stored internally as strings, though the API
+accepts a variety of types that are automatically converted as necessary.
+Each item of metadata consists of a name, a description (optional), a value
+and a type.  In addition, each item of metadata can have a list of child
+metadata values.
+
+Metadata is made available to users of PDAL through a JSON tree.  Commands
+such as :ref:`pdal pipeline <pipeline_command>` and
+:ref:`pdal translate <translate_command>` provide options to allow
+the JSON-formatted metadata created by PDAL to be written to a file.
+
+Metadata Nodes
 ------------------------------------------------------------------------------
 
-:cpp:class:`pdal::Metadata` instances require that any classes that are added
-to them be copy-constructable and have an ostream<< operator for them.  While
-these constraints mean they are all serializable to strings,
-:cpp:class:`pdal::Metadata` also keeps an explicit type variable, `type` for
-each instance. This property allows us to say something extra about the
-Metadata entry, and allows them to go in and out of :cpp:class:`pdal::Stage` and
-:cpp:class:`pdal::PointView` with type fidelity.
-
-The metadata `type` variable roughly maps to the `XSD type names`_.  The following
-types are valid for current PDAL versions, though more may be added.
-
-.. csv-table:: PDAL :cpp:class:`pdal::Metadata` types
-
-    double, float, integer
-    nonNegativeInteger, boolean, string
-    base64Binary, uuid, bounds
-    spatialreference, blank
-
-.. _`XSD type names`:  http://infohost.nmt.edu/tcc/help/pubs/rnc/xsd.html
-
-.. warning::
-
-    Explicitly-sized types are not supported. Assume that `integer` or
-    `nonNegativeInteger` map to the typical 4-byte signed and unsigned types.
-    You might be required to adjust the value based on an explicit
-    interpretation and cast it into these larger types.
-
-`JSON`_ representation
+Each item of metadata is stored in an object known as a MetadataNode.
+Metadata nodes are reference types that can be copied cheaply.  Metadata nodes
+are annotated with the original data type to allow better interpretation of
+the data.
+For example, when binary data is stored in a base 64-encoded
+format, knowing that the data doesn't ulitmately represent a string can allow
+algorithms to convert it back to its binary representation when desired.
+Similarly, knowing that data is numeric allows it
+to be written as a JSON numeric type rather than as a string.
+
+The name of a metadata node is immutable.  If you wish to add a copy of
+metadata (and subchildren) to some node using a different name, you need
+to call the provided function "clone()".
+
+A metadata node is added as a child to another node using add().  Usually
+the type of the data assigned to the metadata node is determined through
+overloading, but there are instances where this is impossible and the
+programmer must call a specific function to set the type of the metadata node.
+Binary data that has been converted to a string by base 64 encoding can
+be tagged as a such by calling addEncoded().  Programmers can specify the
+type of a node explictly by calling addWithType().  Currently supported
+types are: "boolean", "string", "float", "double", "bounds",
+"nonNegativeInteger", "integer", "uuid" and "base64Binary".
+
+Metadata nodes can be presented as lists when transformed to JSON.  If
+multiple nodes with the same name are added to a parent node, those
+subnodes will automatically be tagged as list nodes and will be enclosed in
+square brackets.  Single nodes can be forced to be treated as JSON lists
+by calling addList() instead of add() on a parent node.
+
+
+Metadata and Stages
 ------------------------------------------------------------------------------
 
-A more interesting metadata tree might come from the
-:cpp:class:`pdal::drivers::las::Reader`.  Interesting things to note include
+Stages in PDAL each have a base metadata node.  You can retrieve a stage's
+metadata node by calling getMetadata().  When a PDAL pipeline is run, its
+metadata is organized as a list of stage nodes to which subnodes have been
+added.  From within the implementation of a stage, metadata is typically
+added similarly to the following:
 
+.. code-block:: c++
 
+    MetadataNode root = getMetadata();
+    root.add("nodename", "Some string data");
+    root.add("intlist", 45);
+    root.add("intlist", 55);
+    Uuid nullUuid;
+    MetadataNode pnode("parent");
+    root.add(pnode);
+    pnode.add("nulluuidnode", nullUuid);
+    pnode.addList("num_in_list", 66);
 
-.. _`JSON`: http://www.json.org/
+If the above code was part of a stage "writers.test", a transformation to JSON
+would produce the following output:
 
-.. code-block:: javascript
+.. code-block:: json
 
     {
-        "name": "readers.las",
-        "type": "blank",
-        "value": "",
-        "metadata":
+      "writers.test":
+      {
+        "intlist":
+        [
+          45,
+          55
+        ],
+        "nodename": "Some string data",
+        "parent":
         {
-            "compressed":
-            {
-                "name": "compressed",
-                "description": "true if this LAS file is compressed",
-                "type": "boolean",
-                "value": "false"
-            },
-            "dataformatid":
-            {
-                "name": "dataformatid",
-                "description": "The Point Format ID as specified in the LAS specification",
-                "type": "nonNegativeInteger",
-                "value": "3"
-            },
-            ...
-            "project_id":
-            {
-                "name": "project_id",
-                "description": "Project ID (GUID data): The four fields that comprise a complete Globally Unique Identifier (GUID) are now reserved for use as a Project Identifier (Project ID). The field remains optional. The time of assignment of the Project ID is at the discretion of processing software. The Project ID should be the same for all files that are associated with a unique project. By assigning a Project ID and using a File Source ID (defined above) every file within a proj [...]
-                "type": "uuid",
-                "value": "00000000-0000-0000-0000-000000000000"
-            },
-            "system_id":
-            {
-                "name": "system_id",
-                "description": "",
-                "type": "string",
-                "value": "HOBU-SYSTEMID"
-            },
-            ...
-            "vlr_0":
-            {
-                "name": "vlr_0",
-                "description": "A Polygon WKT entry",
-                "type": "base64Binary",
-                "value": "UE9MWUdPTigoNiAxNSwgMTAgMTAsIDIwIDEwLCAyNSAxNSwgMjUgMzUsIDE5IDQwLCAxMSA0MCwgNiAyNSwgNiAxNSkpCg==",
-                "metadata":
-                {
-                    "reserved":
-                    {
-                        "name": "reserved",
-                        "description": "Two bytes of padded, unused space. Some softwares expect the values of these bytes to be 0xAABB as specified in the 1.0 version of the LAS specification",
-                        "type": "nonNegativeInteger",
-                        "value": "43707"
-                    },
-                    "user_id":
-                    {
-                        "name": "user_id",
-                        "description": "The User ID field is ASCII character data that identifies the user which created the variable length record. It is possible to have many Variable Length Records from different sources with different User IDs. If the character data is less than 16 characters, the remaining data must be null. The User ID must be registered with the LAS specification managing body. The management of these User IDs ensures that no two individuals accidentally use the s [...]
-                        "type": "string",
-                        "value": "hobu"
-                    },
-                    "record_id":
-                    {
-                        "name": "record_id",
-                        "description": "The Record ID is dependent upon the User ID. There can be 0 to 65535 Record IDs for every User ID. The LAS specification manages its own Record IDs (User IDs owned by the specification), otherwise Record IDs will be managed by the owner of the given User ID. Thus each User ID is allowed to assign 0 to 65535 Record IDs in any manner they desire. Publicizing the meaning of a given Record ID is left to the owner of the given User ID. Unknown User ID\/ [...]
-                        "type": "nonNegativeInteger",
-                        "value": "1234"
-                    },
-                    "description":
-                    {
-                        "name": "description",
-                        "description": "",
-                        "type": "string",
-                        "value": "A Polygon WKT entry"
-                    }
-                }
-            },
-            ...
+          "nulluuidnode": "00000000-0000-0000-0000-000000000000",
+          "num_in_list":
+          [
+            66
+          ]
         }
+      }
     }
 
-.. _metadatajson:
-
-:ref:`Pipeline` XML representation
-------------------------------------------------------------------------------
-
-The :ref:`Pipeline` representation of the :cpp:class:`pdal::Metadata` is a
-little bit flatter...
-
-
-::
-
-    <?xml version="1.0" encoding="utf-8"?>
-    <Reader type="readers.las">
-      <Option name="debug">false</Option>
-      <Option name="filename">test/data/interesting.las</Option>
-      <Option name="verbose">0</Option>
-      <Metadata name="writers.las" type="blank">
-        <Metadata name="compressed" type="boolean">false</Metadata>
-        <Metadata name="dataformatid" type="nonNegativeInteger">3</Metadata>
-        <Metadata name="version_major" type="nonNegativeInteger">1</Metadata>
-        <Metadata name="version_minor" type="nonNegativeInteger">2</Metadata>
-        <Metadata name="filesource_id" type="nonNegativeInteger">0</Metadata>
-        <Metadata name="reserved" type="nonNegativeInteger">0</Metadata>
-        <Metadata name="project_id" type="uuid">00000000-0000-0000-0000-000000000000</Metadata>
-        <Metadata name="system_id" type="string">HOBU-SYSTEMID</Metadata>
-        <Metadata name="software_id" type="string">HOBU-GENERATING</Metadata>
-        <Metadata name="creation_doy" type="nonNegativeInteger">145</Metadata>
-        <Metadata name="creation_year" type="nonNegativeInteger">2012</Metadata>
-        <Metadata name="header_size" type="nonNegativeInteger">227</Metadata>
-        <Metadata name="dataoffset" type="nonNegativeInteger">1488</Metadata>
-        <Metadata name="scale_x" type="double">0.01</Metadata>
-        <Metadata name="scale_y" type="double">0.01</Metadata>
-        <Metadata name="scale_z" type="double">0.01</Metadata>
-        <Metadata name="offset_x" type="double">-0</Metadata>
-        <Metadata name="offset_y" type="double">-0</Metadata>
-        <Metadata name="offset_z" type="double">-0</Metadata>
-        <Metadata name="minx" type="double">635619.85</Metadata>
-        <Metadata name="miny" type="double">848899.7000000001</Metadata>
-        <Metadata name="minz" type="double">406.59</Metadata>
-        <Metadata name="maxx" type="double">638982.55</Metadata>
-        <Metadata name="maxy" type="double">853535.4300000001</Metadata>
-        <Metadata name="maxz" type="double">586.38</Metadata>
-        <Metadata name="count" type="nonNegativeInteger">1065</Metadata>
-        <Metadata name="vlr_0" type="base64Binary">UE9MWUdPTigoNiAxNSwgMTAgMTAsIDIwIDEwLCAyNSAxNSwgMjUgMzUsIDE5IDQwLCAxMSA0MCwgNiAyNSwgNiAxNSkpCg==
-            <Metadata name="reserved" type="nonNegativeInteger">43707</Metadata>
-            <Metadata name="user_id" type="string">hobu</Metadata>
-            <Metadata name="record_id" type="nonNegativeInteger">1234</Metadata>
-            <Metadata name="description" type="string">A Polygon WKT entry</Metadata>
-        </Metadata>
-        <Metadata name="vlr_1" type="base64Binary">AQABAAAAFQAABAAAAQABAAEEAAABAAEAAgSxhywAAAAACAAAAQD/fwEIsYdqACwAAggAAAEA/38GCAAAAQCOIwgIAAABAP9/CQiwhwEABgALCLCHAQAHAA0IsIcBAAgAAAwAAAEA/38CDAAAAQD/fwMMAAABAAgABAwAAAEAKiMGDLCHAQACAAcMsIcBAAMADAywhwEAAQANDLCHAQAAAA4MsIcBAAQADwywhwEABQAAAAAAAAAAAA==
-            <Metadata name="reserved" type="nonNegativeInteger">43707</Metadata>
-            <Metadata name="user_id" type="string">LASF_Projection</Metadata>
-            <Metadata name="record_id" type="nonNegativeInteger">34735</Metadata>
-            <Metadata name="description" type="string">GeoTIFF GeoKeyDirectoryTag</Metadata>
-        </Metadata>
-        <Metadata name="vlr_2" type="base64Binary">AAAAAADgREAAAAAAACBewAAAAAAAgEVAAAAAAADARkD//////2kYQQAAAAAAAAAAAAAAQKZUWEGo+euUHaRyQAAAAAAAAAAA
-            <Metadata name="reserved" type="nonNegativeInteger">43707</Metadata>
-            <Metadata name="user_id" type="string">LASF_Projection</Metadata>
-            <Metadata name="record_id" type="nonNegativeInteger">34736</Metadata>
-            <Metadata name="description" type="string">GeoTIFF GeoDoubleParamsTag</Metadata>
-        </Metadata>
-        <Metadata name="vlr_3" type="base64Binary">TkFEXzE5ODNfT3JlZ29uX1N0YXRld2lkZV9MYW1iZXJ0X0ZlZXRfSW50bHxHQ1MgTmFtZSA9IEdDU19Ob3J0aF9BbWVyaWNhbl8xOTgzfERhdHVtID0gRF9Ob3J0aF9BbWVyaWNhbl8xOTgzfEVsbGlwc29pZCA9IEdSU18xOTgwfFByaW1lbSA9IEdyZWVud2ljaHx8AA==
-            <Metadata name="reserved" type="nonNegativeInteger">43707</Metadata>
-            <Metadata name="user_id" type="string">LASF_Projection</Metadata>
-            <Metadata name="record_id" type="nonNegativeInteger">34737</Metadata>
-            <Metadata name="description" type="string">GeoTIFF GeoAsciiParamsTag</Metadata>
-        </Metadata>
-        <Metadata name="vlr_4" type="base64Binary">UFJPSkNTWyJOQURfMTk4M19PcmVnb25fU3RhdGV3aWRlX0xhbWJlcnRfRmVldF9JbnRsIixHRU9HQ1NbIkdDU19Ob3J0aF9BbWVyaWNhbl8xOTgzIixEQVRVTVsiRF9Ob3J0aF9BbWVyaWNhbl8xOTgzIixTUEhFUk9JRFsiR1JTXzE5ODAiLDYzNzgxMzcuMCwyOTguMjU3MjIyMTAxXV0sUFJJTUVNWyJHcmVlbndpY2giLDAuMF0sVU5JVFsiRGVncmVlIiwwLjAxNzQ1MzI5MjUxOTk0MzI5NV1dLFBST0pFQ1RJT05bIkxhbWJlcnRfQ29uZm9ybWFsX0NvbmljXzJTUCJdLFBBUkFNRVRFUlsiRmFsc2VfRWFzdGluZyIsMTMxMjMzNS45NTgwMDUyNDldLFBBUkFNRVRFUlsiRmFsc2VfTm9yd [...]
-            <Metadata name="reserved" type="nonNegativeInteger">43707</Metadata>
-            <Metadata name="user_id" type="string">liblas</Metadata>
-            <Metadata name="record_id" type="nonNegativeInteger">2112</Metadata>
-            <Metadata name="description" type="string">OGR variant of OpenGIS WKT SRS</Metadata>
-        </Metadata>
-      </Metadata>
-    </Reader>
diff --git a/doc/tutorial/overview.rst b/doc/development/overview.rst
similarity index 92%
rename from doc/tutorial/overview.rst
rename to doc/development/overview.rst
index 7752e98..661733a 100644
--- a/doc/tutorial/overview.rst
+++ b/doc/development/overview.rst
@@ -35,7 +35,7 @@ writers, all of which are known as stages.  Any merge operation or filter may be
 placed after any reader.  Output filters are distinct from other filters only in
 that they may create more than one set of points to be further filtered or
 written.  The arrangement of readers, filters and writers is called a PDAL
-pipeline.  Pipelines can be specified using XML as detailed later.
+pipeline.  Pipelines can be specified using JSON as detailed later.
 
 Extending PDAL
 ................................................................................
@@ -56,13 +56,8 @@ formats provide this information in a header or preamble.  PDAL calls each of
 the elements that make up a point a dimension.  PDAL predefines the dimensions
 that are in common use by the formats that it currently supports.  Readers may
 register their use of a predefined dimension or may have PDAL create a
-<<<<<<< Updated upstream
-dimension with a name and type as requested.  Dimensions are described by the
-enumeration pdal::Dimension::Id and associated functions in Dimension.hpp.
-=======
 dimension with a name and type as requested.  Dimensions are described in a
 JSON file, Dimension.json.
->>>>>>> Stashed changes
 
 PDAL has a default type (Double, Float, Signed32, etc.) for each of its
 predefined dimensions which is believed to be sufficient to accurately
@@ -87,7 +82,7 @@ requested type is a 16 bit unsigned integer (Unsigned16), PDAL will use a
 Point Layout
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-PDAL stores the set of dimension information in a point layout structure
+PDAL stores the dimension information in a point layout structure
 (PointLayout object).  It stores information about the physical layout of
 data of each point in memory and also stores the type and name of each
 dimension.
@@ -99,7 +94,7 @@ PDAL stores points in what is called a point table (PointTable object).  Each
 point table has an associated point layout describing its format.  All
 points in a single point table have the same dimensions and all operations on
 a PDAL pipeline make use of a single point table.  In addition to storing
-points, a point table also stores pipeline metadata that may get created as
+points, a point table also stores pipeline metadata that may be created as
 pipeline stages are executed.  Most functions receive a PointTableRef object,
 which refers to the active point table.  A PointTableRef can be stored
 or copied cheaply.
@@ -147,7 +142,7 @@ Making a Stage (Reader, Filter or Writer):
 All stages (Stage object) share a common interface, though readers, filters and
 writers each have a simplified interface if the generic stage interface is more
 complex than necessary.  One should create a new stage by creating a subclass of
-reader (Reader object), filter (Filter or MultiFilter object) or writer (Writer
+reader (Reader object), filter (Filter object) or writer (Writer
 object).  When a pipeline is made, each stage is created using its default
 constructor.
 
@@ -163,17 +158,12 @@ functions calls, none of which need to be implemented in a stage unless desired.
 Each stage is guaranteed to be prepared after all stages that precede it in the
 pipeline.
 
-1) void processOptions(const Options& options)
+1) void addArgs(ProgramArgs& args)
 
-    PDAL allows users to specify various options at the command line and in
-    pipeline files.  Those options relevant to a stage are passed to the stage
-    during preparation through this method.  This method should extract any
-    necessary data from the options and set data in member variables or perform
-    other configuration as necessary.  It is not recommended that options passed
-    into this function be copied, as they may become non-copyable in a future
-    version of the library.  Handling all option processing at this point also
-    allows an exception to be thrown in the case of an invalid option that can
-    be properly interpreted by the pipeline.
+    Stages can accept various options to control processing.  These options
+    can be declared and bound to variables in this function.  When arguments
+    are added, the stage also provides a description and optionally a default
+    value for the argument.
 
 2) void initialize() OR void initialize(PointTableRef)
 
@@ -185,7 +175,7 @@ pipeline.
     implement the no-argument version.  Whether to place initialization code
     at this step or in prepared() or ready() (see below) is a judgement call,
     but detection of errors earlier in the process allows faster termination of
-    a pipeline.
+    a command..
 
 3) void addDimensions(PointLayoutPtr layout)
 
@@ -193,8 +183,7 @@ pipeline.
     that it would like as part of the record of each point.  Usually, only
     readers add dimensions to a point table, but there is no prohibition on
     filters or writers from adding dimensions if necessary.  Dimensions should
-    not be added to the layout of a pipeline’s point layout except in this
-    method.
+    not be added to the layout outside of this method.
 
 4) void prepared(PointTableRef)
 
@@ -246,9 +235,9 @@ during processing.  There are some situations that may make this undesirable.
 As an alternative, PDAL allows execution of data with a point table that
 contains a fixed number of points (StreamPointTable).  When a StreamPointTable
 is passed to the execute() function, the private run() function detailed above
-isn't called, and instead processOne() is called.  If a StreamPointTable is
-passed to execute() but a pipeline stage doesn't implement processOne(),
-an exception is thrown.
+isn't called, and instead processOne() is called for each point.  If a
+StreamPointTable is passed to execute() but a pipeline stage doesn't
+implement processOne(), an exception is thrown.
 
 bool processOne(PointRef& ref)
 
@@ -258,7 +247,7 @@ bool processOne(PointRef& ref)
     When a filter returns 'false' from this funciton, it indicates
     that the point just processed should be filtered out and not passed
     to subsequent stages for processing.
-    
+
 Implementing a Reader
 ................................................................................
 
@@ -300,15 +289,14 @@ precision floating point.
         for (auto di = header.names.begin(), di != header.names.end(); ++di)
         {
             std::string dimName = *di;
-            Dimension::Id id = layout->registerOrAssignDim(
-                dimName,
+            Dimension::Id id = layout->registerOrAssignDim(dimName,
                 Dimension::Type::Double);
         }
     }
 
 If a reader implements initialize() and opens a source file during the function,
 the file should be closed again before exiting the function to ensure that
-filehandles aren't exhausted when processing a large number of files.
+file handles aren't exhausted when processing a large number of files.
 
 Readers should use the ready() function to reset the input data to a state
 where the first point can be read from the source.  The done() function
@@ -398,7 +386,7 @@ point_count_t read(PointViewPtr view, point_count_t count)
             while (remaining--)
             {
                 PointRef point(view->point(nextId));
-               
+
                 processOne(point);
                 nextId++;
             }
@@ -501,9 +489,12 @@ individual points in write() and close the file in done().
 
 Like a filter, a writer may receive multiple point views during processing
 of a pipeline.  This will result in the write() function being called once
-for each of the input point views.  Some current writers do not produce
-correct output when provided with multiple point views.  Users should
-use a merge filter immediately prior to such writers to avoid errors.
+for each of the input point views.  Writers may produce a separate output
+file for each input point view or may produce a single output file.  The
+documentation should clearly state this behavior.  Placing a merge filter
+in front of a writer in the pipeline will make sure that a single point
+view is passed to the writer.
+
 As new writers are created, developers should try to make sure
 that they behave reasonably if passed multiple point views -- they
 correctly handle write() being called multiple times after a single
diff --git a/doc/tutorial/pipeline.png b/doc/development/pipeline.png
similarity index 100%
rename from doc/tutorial/pipeline.png
rename to doc/development/pipeline.png
diff --git a/doc/download.rst b/doc/download.rst
index 30599a6..b55d2e7 100644
--- a/doc/download.rst
+++ b/doc/download.rst
@@ -43,7 +43,7 @@ The main repository for PDAL is located on github at https://github.com/PDAL/PDA
 
 You can obtain a copy of the active source code by issuing the following command::
 
-    git clone git at github.com:PDAL/PDAL.git pdal
+    git clone https://github.com/PDAL/PDAL.git pdal
 
 
 
@@ -58,20 +58,20 @@ tutorial at :ref:`docker` for more information.
 
 ::
 
-    docker pull pdal/pdal:1.2
+    docker pull pdal/pdal:1.5
 
 
 Windows
 ................................................................................
 
-A 1.1.0 release of PDAL is available via `OSGeo4W`_. It is only 64-bit at this
-time. Use the :ref:`docker` builds if you want to use the PDAL :ref:`apps`, otherwise,
-a call for help with building current Windows PDAL builds is at https://lists.osgeo.org/pipermail/pdal/2016-November/001089.html
+Windows users are asked to use the :ref:`docker` builds for :ref:`apps` access,
+otherwise, a call for help with building current Windows PDAL builds is at
+https://lists.osgeo.org/pipermail/pdal/2016-November/001089.html
 
 RPMs
 ................................................................................
 
-RPMs for PDAL are available at http://pdal.s3-website-us-east-1.amazonaws.com/rpms/
+RPMs for PDAL are available at https://copr.fedorainfracloud.org/coprs/neteler/pdal/
 
 Debian
 ................................................................................
diff --git a/doc/faq.rst b/doc/faq.rst
index bfe1969..e5d3356 100644
--- a/doc/faq.rst
+++ b/doc/faq.rst
@@ -17,6 +17,10 @@ FAQ
   declarative pipeline syntax for orchestrating translation operations.
   PDAL can also use PCL through the :ref:`filters.pclblock` mechanism.
 
+  .. seealso::
+
+        :ref:`about_pcl` describes PDAL and PCL's relationship.
+
 * What is PDAL's relationship to libLAS?
 
   The idea behind libLAS was limited to LIDAR data and basic
diff --git a/doc/images/docker-maintenance-branch.png b/doc/images/docker-maintenance-branch.png
new file mode 100644
index 0000000..fc1a344
Binary files /dev/null and b/doc/images/docker-maintenance-branch.png differ
diff --git a/doc/images/docker-master-branch.png b/doc/images/docker-master-branch.png
new file mode 100644
index 0000000..6e4712c
Binary files /dev/null and b/doc/images/docker-master-branch.png differ
diff --git a/doc/images/foss4g-2017.png b/doc/images/foss4g-2017.png
new file mode 100644
index 0000000..92ad213
Binary files /dev/null and b/doc/images/foss4g-2017.png differ
diff --git a/doc/images/las-reproject-pgpointcloud.png b/doc/images/las-reproject-pgpointcloud.png
new file mode 100644
index 0000000..e432968
Binary files /dev/null and b/doc/images/las-reproject-pgpointcloud.png differ
diff --git a/doc/images/python-pdal-pipeline.png b/doc/images/python-pdal-pipeline.png
new file mode 100644
index 0000000..a8adb58
Binary files /dev/null and b/doc/images/python-pdal-pipeline.png differ
diff --git a/doc/images/reproject-merge-pipeline.png b/doc/images/reproject-merge-pipeline.png
index b3bc695..ff8657a 100644
Binary files a/doc/images/reproject-merge-pipeline.png and b/doc/images/reproject-merge-pipeline.png differ
diff --git a/doc/index.rst b/doc/index.rst
index 52dbaeb..d597d55 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -10,25 +10,35 @@ PDAL - Point Data Abstraction Library
 
 PDAL is a C++ `BSD`_ library for translating and manipulating `point cloud data`_.
 It is very much like the `GDAL`_ library which handles raster and vector data.
-See :ref:`readers` and :ref:`writers` for data formats PDAL supports, and see
-:ref:`filters` for filtering operations that you can apply with PDAL.
+See :ref:`readers` and :ref:`writers` for data formats PDAL supports, see
+:ref:`filters` for filtering operations that you can apply with PDAL, and
+visit :ref:`about` for a high level over of the library and its philosophy.
 
 In addition to the library code, PDAL provides a suite of command-line
 applications that users can conveniently use to process, filter, translate, and
-query point cloud data.  See :ref:`apps` for more information.
+query point cloud data.  :ref:`apps` provides more information on that topic.
+
+Finally, PDAL speaks Python. Visit :ref:`python` to find out how you can
+use PDAL with Python to process point cloud data.
 
 The entire website is available as a single PDF at http://pdal.io/PDAL.pdf
 
 News
 --------------------------------------------------------------------------------
 
-**08-29-2016**
+**12-15-2016**
 ................................................................................
 
-PDAL 1.3.0 has been released. Visit :ref:`download` to obtain a copy of the
+.. image:: ./images/foss4g-2017.png
+    :scale: 40%
+    :align: right
+    :target: http://2017.foss4g.org
+
+PDAL 1.4.0 has been released. Visit :ref:`download` to obtain a copy of the
 source code, or follow the :ref:`quickstart` to get going in a hurry with
 `Docker`_.
 
+
 .. _`Docker`: https://www.docker.com/
 
 .. _`Howard Butler`: http://github.com/hobu
@@ -37,6 +47,14 @@ source code, or follow the :ref:`quickstart` to get going in a hurry with
 .. _`Point cloud web services with Greyhound, Entwine, and PDAL`: https://2016.foss4g-na.org/session/point-cloud-web-services-greyhound-entwine-and-pdal
 .. _`Filtering point clouds with PDAL and PCL`: https://2016.foss4g-na.org/session/filtering-point-clouds-pdal-and-pcl
 
+About
+--------------------------------------------------------------------------------
+
+.. toctree::
+   :maxdepth: 2
+
+   about
+
 
 Download
 --------------------------------------------------------------------------------
@@ -81,8 +99,23 @@ Drivers
    stages/readers
    stages/writers
    stages/filters
+
+Dimensions
+--------------------------------------------------------------------------------
+
+.. toctree::
+   :maxdepth: 2
+
    dimensions
 
+Python
+--------------------------------------------------------------------------------
+
+.. toctree::
+   :maxdepth: 2
+
+   python
+
 Tutorials
 --------------------------------------------------------------------------------
 
diff --git a/doc/pipeline.rst b/doc/pipeline.rst
index 27da6de..7a85d99 100644
--- a/doc/pipeline.rst
+++ b/doc/pipeline.rst
@@ -4,10 +4,11 @@
 Pipeline
 ******************************************************************************
 
-Pipelines are the operative construct in PDAL. PDAL constructs a pipeline to
+Pipelines are the operative construct in PDAL, and it is how data are modeled
+from reading, processing, and writing. PDAL internally constructs a pipeline to
 perform data translation operations using :ref:`translate_command`, for
 example. While specific :ref:`applications <apps>` are useful in many contexts,
-a pipeline provides some useful advantages for more complex things:
+a pipeline provides useful advantages for more complex things:
 
 1. You have a record of the operation(s) applied to the data
 2. You can construct a skeleton of an operation and substitute specific
@@ -23,9 +24,9 @@ a pipeline provides some useful advantages for more complex things:
 
 .. warning::
 
-    As of PDAL 1.2, `JSON`_ is now the preferred specification language
-    for PDAL pipelines. XML read support is still available at 1.2, but
-    JSON is preferred. XML support will be dropped in a future release.
+    As of PDAL 1.2, `JSON`_ is the preferred specification language
+    for PDAL pipelines. XML read support is still available at 1.5, but
+    XML support will be dropped at the 1.6 release.
 
 .. _`JSON`: http://www.json.org/
 
@@ -34,16 +35,11 @@ Introduction
 --------------------------------------------------------------------------------
 
 
-A PDAL JSON object represents a processing pipeline.
+A JSON object represents a PDAL processing pipeline.  The structure is always a
+JSON object, with the primary object called ``pipeline`` being an array of
+inferred or explicit PDAL :ref:`stage_object` representations.
 
-A complete PDAL JSON data structure is always an object (in JSON terms). In PDAL
-JSON, an object consists of a collection of name/value pairs -- also called
-members. For each member, the name is always a string. Member values are either
-a string, number, object, array or one of the literals: "true", "false", and
-"null". An array consists of elements where each element is a value as
-described above.
-
-Examples
+Simple Example
 ................................................................................
 
 A simple PDAL pipeline, inferring the appropriate drivers for the reader and
@@ -67,9 +63,12 @@ writer from filenames, and able to be specified as a set of sequential steps:
     A simple pipeline to convert :ref:`LAS <readers.las>` to :ref:`BPF <readers.bpf>`
     while only keeping points inside the box :math:`[0 \leq x \leq 100, 0 \leq y \leq 100]`.
 
-A more complex PDAL pipeline, that reprojects the stage tagged ``A1``, merges
-the result with ``B``, and writes the merged output with the :ref:`writers.p2g`
-plugin.:
+Reprojection Example
+................................................................................
+
+A more complex PDAL pipeline reprojects the stage tagged ``A1``, merges
+the result with ``B``, and writes the merged output to a GeoTIFF file
+with the :ref:`writers.gdal` writer:
 
 .. code-block:: json
 
@@ -98,7 +97,7 @@ plugin.:
               ]
           },
           {
-              "type":"writers.p2g",
+              "type":"writers.gdal",
               "filename":"output.tif"
           }
       ]
@@ -113,17 +112,6 @@ plugin.:
 .. _`UTM`: http://spatialreference.org/ref/epsg/nad83-utm-zone-16n/
 .. _`Geographic`: http://spatialreference.org/ref/epsg/4326/
 
-Definitions
-................................................................................
-
-* JavaScript Object Notation (JSON), and the terms object, name, value, array,
-  and number, are defined in IETF RTC 4627, at
-  http://www.ietf.org/rfc/rfc4627.txt.
-
-* The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD",
-  "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this documention are to
-  be interpreted as described in IETF RFC 2119, at
-  http://www.ietf.org/rfc/rfc2119.txt.
 
 Pipeline Objects
 --------------------------------------------------------------------------------
@@ -185,6 +173,9 @@ For more on PDAL stages and their options, check the PDAL documentation on
   stage-specific option names and their respective values. Values provided as
   JSON objects or arrays will be stringified and parsed within the stage.
 
+* Applications can place a ``user_data`` node on any stage object and it will be
+  carried through to any serialized pipeline output.
+
 Filename Globbing
 ................................................................................
 
@@ -227,7 +218,8 @@ reading the input file, the ferry filter is used to copy the Z dimension into a
 new height above ground (HAG) dimension. Next, the :ref:`filters.programmable`
 is used with a Python script to compute height above ground values by comparing
 the Z values to a surface model. These height above ground values are then
-written back into the Z dimension for further analysis.
+written back into the Z dimension for further analysis. See the Python
+code at `hag.py`_.
 
 .. seealso::
 
@@ -253,13 +245,15 @@ written back into the Z dimension for further analysis.
       ]
   }
 
+.. _`hag.py`: https://raw.githubusercontent.com/PDAL/PDAL/master/test/data/autzen/hag.py.in
+
 DTM
 ................................................................................
 
 A common task is to create a digital terrain model (DTM) from the input point
 cloud. This pipeline infers the reader type, applies an approximate ground
 segmentation filter using :ref:`filters.pmf`, and then creates the DTM using
-the :ref:`writers.p2g` with only the ground returns.
+the :ref:`writers.gdal` with only the ground returns.
 
 .. code-block:: json
 
@@ -278,7 +272,7 @@ the :ref:`writers.p2g` with only the ground returns.
               "classify":false
           },
           {
-              "type":"writers.p2g",
+              "type":"writers.gdal",
               "filename":"autzen-surface.tif",
               "output_type":"min",
               "output_format":"tif",
@@ -467,30 +461,3 @@ PDAL. Readers follow the pattern of :ref:`readers.las` or
     Issuing the command ``pdal info --options`` will list all available
     stages and their options. See :ref:`info_command` for more.
 
-Options
-..............................................................................
-
-Options are the mechanism that PDAL uses to inform :cpp:class:`pdal::Stage`
-entities how to process data. The following example sorts the data using a
-`Morton ordering`_ using :ref:`filters.mortonorder` and writes out a `LASzip`_
-file as the result. We use options to define the ``compression`` function
-for the :ref:`writers.las` :cpp:class:`pdal::Stage`.
-
-.. _`LASzip`: http://www.laszip.org
-.. _`Morton ordering`: http://en.wikipedia.org/wiki/Z-order_curve
-
-.. code-block:: json
-
-    {
-      "pipeline":[
-        "uncompressed.las",
-        {
-          "type":"filters.mortonorder"
-        }
-        {
-          "type":"writers.las",
-          "filename":"compressed.laz",
-          "compression":"true"
-        }
-      ]
-    }
diff --git a/doc/python.rst b/doc/python.rst
new file mode 100644
index 0000000..0660e7e
--- /dev/null
+++ b/doc/python.rst
@@ -0,0 +1,128 @@
+.. _python:
+
+********************************************************************
+Python
+********************************************************************
+
+.. index:: Numpy, Python
+
+
+PDAL provides Python support in two significant ways. First it `embeds`_ Python
+to allow you to write Python programs that interact with data using
+:ref:`filters.programmable` and :ref:`filters.predicate` filters. Second,
+it `extends`_ Python by providing an extension that Python programmers
+can use to leverage PDAL capabilities in their own applications.
+
+.. _`embeds`: https://docs.python.org/3/extending/embedding.html
+.. _`extends`: https://docs.python.org/3/extending/extending.html
+
+.. note::
+
+    PDAL's Python story always revolves around `Numpy`_ support. PDAL's
+    data is provided to both the filters ands the extension as
+    Numpy arrays.
+
+.. _NumPy: http://www.numpy.org/
+
+Versions
+--------------------------------------------------------------------------------
+
+PDAL supports both Python 2.7 and Python 3.4+. :ref:`integration` tests Python
+2.7 on both Linux and Windows. Python 3 is used by a number of developers
+for adhoc development and testing.
+
+Embed
+--------------------------------------------------------------------------------
+
+.. index:: Embed, Python
+
+PDAL allows users to embed Python functions inline with other :ref:`pipeline`
+processing operations. The purpose of this capability is to allow users to
+write small programs that implement interesting actions without requiring a
+full C++ development activity of building a PDAL stage to implement it. A
+Python filter is an opportunity to interactively and iteratively prototype a
+data operation without strong considerations of performance or generality.  If
+something works well enough, maybe one takes on the effort to formalize it, but
+that isn't necessary. PDAL's embed of Python allows you to be as grimy as you
+need to get the job done.
+
+.. figure:: ./images/python-pdal-pipeline.png
+
+    Embedding a Python function to take Z values read from a
+    :ref:`readers.las` and then output them to a :ref:`writers.bpf`.
+
+Extend
+--------------------------------------------------------------------------------
+
+.. index:: Extension, Python
+
+PDAL provides a Python extension that gives users access to executing
+:ref:`pipeline` instantiations and capturing the results as `Numpy`_ arrays.
+This mode of operation is useful if you are looking to have PDAL simply act as
+your data format and processing handler.
+
+Python extension users are expected to construct their own JSON :ref:`pipeline`
+using Python's ``json`` library, or whatever other libraries they wish to
+manipulate JSON. They then feed it into the extension and get back the
+results as `Numpy`_ arrays:
+
+.. code-block:: python
+
+
+    json = """
+    {
+      "pipeline": [
+        "1.2-with-color.las",
+        {
+            "type": "filters.sort",
+            "dimension": "X"
+        }
+      ]
+    }"""
+
+    import pdal
+    pipeline = pdal.Pipeline(json)
+    pipeline.validate() # check if our JSON and options were good
+    pipeline.loglevel = 8 #really noisy
+    count = pipeline.execute()
+    arrays = pipeline.arrays
+    metadata = pipeline.metadata
+    log = pipeline.log
+
+Installation
+................................................................................
+
+PDAL Python bindings require a working PDAL install (:ref:`PDAL <building>`)
+and then installation of the Python extension. The extension lives on `PyPI`_
+at https://pypi.python.org/pypi/PDAL and you should use that version as your
+canonical Python extension install.
+
+.. _`PyPI`: https://pypi.python.org/pypi/PDAL
+
+Install from local
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In the source code of PDAL there is a ``python`` folder, you have to enter
+there and run ::
+
+    python setup.py build
+    # this should be run as administrator/super user
+    python setup.py install
+
+Install from repository
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. index:: Install, Python
+
+The second method to install the PDAL Python extension is to use `pip`_
+or `easy_install`_, you have to run the command as administrator. ::
+
+    pip install PDAL
+
+.. note::
+
+    To install pip please read
+    `here <https://pip.pypa.io/en/stable/installing/>`_
+
+.. _`pip`: https://pip.pypa.io/en/stable/
+.. _`easy_install`: https://pypi.python.org/pypi/setuptools
diff --git a/doc/quickstart.rst b/doc/quickstart.rst
index 5df5745..188f8f5 100644
--- a/doc/quickstart.rst
+++ b/doc/quickstart.rst
@@ -21,6 +21,24 @@ It will utilize the PDAL :ref:`command line application <apps>` to inspect the
 file.
 
 
+.. note::
+
+    While Docker is convenient, it is not for everyone. You can also obtain the
+    software by installing a Linux package from :ref:`download` or compiling it
+    yourself from :ref:`building`.
+
+    Docker is not required to use PDAL, and there are packages available on
+    Linux (Debian, RPM) and OSX (`Homebrew`_). See :ref:`download` to obtain
+    them. If you are a developer looking to leverage PDAL, you will need access
+    to the library in your environment, but this quick start document is for
+    those looking to quickly interact with data using PDAL's :ref:`command line
+    applications <apps>` and :ref:`pipeline`.
+
+    If you need to compile your own copy of PDAL, see :ref:`building` for
+    more details.
+
+.. _`Homebrew`: http://brew.sh
+
 .. _docker:
 
 Install Docker
@@ -69,14 +87,15 @@ Obtain PDAL Image
 ................................................................................
 
 A PDAL image based on the latest release, including all recent patches, is
-pushed to `Docker Hub`_ with every code change on the PDAL maintenance branch.
+pushed to `Docker Hub`_ with every code change on the PDAL maintenance branch (find
+out more about that at :ref:`here <development_docker>`).
 We need to pull it locally so we can use it to run PDAL commands. Once it is
 pulled, we don't have to pull it again unless we want to refresh it for
 whatever reason.
 
 ::
 
-    docker pull pdal/pdal:1.4
+    docker pull pdal/pdal:1.5
 
 
 .. image:: ./images/docker-quickstart-pull.png
@@ -84,7 +103,7 @@ whatever reason.
 .. note::
 
     Other PDAL versions are provided at the same `Docker Hub`_ location,
-    with an expected tag name (ie ``pdal/pdal:1.4``, or ``pdal/pdal:1.x``) for
+    with an expected tag name (ie ``pdal/pdal:1.5``, or ``pdal/pdal:1.x``) for
     major PDAL versions. The PDAL Docker hub location at
     https://hub.docker.com/u/pdal/ has images and more information
     on this topic.
@@ -114,7 +133,7 @@ Print the first point
 
 ::
 
-    docker run -v /c/Users/hobu:/data pdal/pdal:1.4 pdal info /data/autzen.laz -p 0
+    docker run -v /c/Users/hobu:/data pdal/pdal:1.5 pdal info /data/autzen.laz -p 0
 
 Here's a summary of what's going on with that command invocation
 
@@ -132,7 +151,7 @@ Here's a summary of what's going on with that command invocation
        The `Docker Volume <https://docs.docker.com/engine/userguide/dockervolumes/>`__
        document describes mounting volumes in more detail.
 
-4. ``pdal/pdal:1.4``: This is the Docker image we are going to run. We fetched it
+4. ``pdal/pdal:1.5``: This is the Docker image we are going to run. We fetched it
    with the command above. If it were not already fetched, Docker would attempt
    to fetch it when we run this command.
 
@@ -162,6 +181,8 @@ What's next?
 * :ref:`The PDAL workshop <workshop>` contains numerous hands-on examples with screenshots and
   example data of how to use PDAL :ref:`apps` to tackle point cloud data
   processing tasks.
+* :ref:`python` describes how PDAL embeds and extends Python and
+  how you can leverage these capabilities in your own programs.
 
 .. seealso::
 
diff --git a/doc/stages/filters.assign.rst b/doc/stages/filters.assign.rst
new file mode 100644
index 0000000..189aa6c
--- /dev/null
+++ b/doc/stages/filters.assign.rst
@@ -0,0 +1,41 @@
+.. _filters.assign:
+
+filters.assign
+===================
+
+The assign filter allows you set the value of a dimension for all points
+to a provided value that pass a range filter.
+
+
+Example 1
+---------
+
+This pipeline resets the Classification of all points with classiciations
+2 or 3 to 0 and all points with classification of 5 to 4.
+
+.. code-block:: json
+
+    {
+      "pipeline":[
+        "autzen-dd.las",
+        {
+          "type":"filters.assign",
+          "assignment" : "Classification[2:3]=0",
+          "assignment" : "Classification[5:5]=4"
+        },
+        {
+          "filename":"attributed.las",
+          "scale_x":0.0000001,
+          "scale_y":0.0000001
+        }
+      ]
+    }
+
+
+Options
+-------
+
+assignment
+  A :ref:`range <ranges>` followed by an assignment of a value (see example).
+  Can be specified multiple times.  The assignments are applied sequentially
+  to the dimension value as set when the filter began processing.
diff --git a/doc/stages/filters.cluster.rst b/doc/stages/filters.cluster.rst
new file mode 100644
index 0000000..87f23fb
--- /dev/null
+++ b/doc/stages/filters.cluster.rst
@@ -0,0 +1,41 @@
+.. _filters.cluster:
+
+===============================================================================
+filters.cluster
+===============================================================================
+
+The Cluster filter first performs Euclidean Cluster Extraction on the input
+``PointView`` and then labels each point with its associated cluster ID.
+
+Example
+-------
+
+.. code-block:: json
+
+    {
+      "pipeline":[
+        "input.las",
+        {
+          "type":"filters.cluster"
+        },
+        {
+          "type":"writers.bpf",
+          "filename":"output.bpf",
+          "output_dims":"X,Y,Z,ClusterID"
+        }
+      ]
+    }
+
+Options
+-------
+
+min_points
+  Minimum number of points to be considered a cluster. [Default: **1**]
+
+max_points
+  Maximum number of points to be considered a cluster. [Default: **UINT64_MAX**]
+
+tolerance
+  Cluster tolerance - maximum Euclidean distance for a point to be added to the
+  cluster. [Default: **1.0**]
+  
diff --git a/doc/stages/filters.crop.rst b/doc/stages/filters.crop.rst
index add6e4e..fbe9a10 100644
--- a/doc/stages/filters.crop.rst
+++ b/doc/stages/filters.crop.rst
@@ -4,7 +4,7 @@ filters.crop
 ============
 
 The crop filter removes points that fall outside or inside a cropping bounding
-box (2D), polygon, or point+radius.  If more than one bounding region is
+box (2D), polygon, or point+distance.  If more than one bounding region is
 specified, the filter will pass all input points through each bounding region,
 creating an output point set for each input crop region.
 
@@ -42,7 +42,7 @@ outside
   Invert the cropping logic and only take points **outside** the cropping bounds or polygon. [Default: **false**]
 
 point
-  An array of WKT or GeoJSON 2D or 3D points. Requires ``radius``.
+  An array of WKT or GeoJSON 2D or 3D points. Requires ``distance``.
 
-radius
+distance
   Distance in units of common X, Y, and Z :ref:`dimensions` to crop circle or sphere in combination with ``point``.
diff --git a/doc/stages/filters.groupby.rst b/doc/stages/filters.groupby.rst
new file mode 100644
index 0000000..c13b1f8
--- /dev/null
+++ b/doc/stages/filters.groupby.rst
@@ -0,0 +1,29 @@
+.. _filters.groupby:
+
+filters.groupby
+===============================================================================
+
+The groupby filter takes a single PointView as its input and creates a PointView
+for each category in the named ``dimension`` as its output.
+
+Example
+-------
+
+.. code-block:: json
+
+    {
+      "pipeline":[
+        "input.las",
+        {
+          "type":"filters.groupby",
+          "dimension":"Classification"
+        },
+        "output_#.las"
+      ]
+    }
+
+Options
+-------
+
+dimension
+  The dimension containing data to be grouped.
diff --git a/doc/stages/filters.locate.rst b/doc/stages/filters.locate.rst
new file mode 100644
index 0000000..9165050
--- /dev/null
+++ b/doc/stages/filters.locate.rst
@@ -0,0 +1,38 @@
+.. _filters.locate:
+
+===============================================================================
+filters.locate
+===============================================================================
+
+The Locate filter searches the specified ``dimension`` for the minimum or
+maximum value and returns a single point at this location. If multiple points
+share the min/max value, the first will be returned. All dimensions of the input
+``PointView`` will be output, subject to any overriding writer options.
+
+Example
+-------
+
+This example returns the point at the highest elevation.
+
+.. code-block:: json
+
+    {
+      "pipeline":[
+        "input.las",
+        {
+          "type":"filters.locate",
+          "dimension":"Z",
+          "minmax":"max"
+        },
+        "output.las"
+      ]
+    }
+
+Options
+-------
+
+dimension
+  Name of the dimension in which to search for min/max value.
+
+minmax
+  Whether to return the minimum or maximum value in the dimension.
diff --git a/doc/stages/filters.attribute.rst b/doc/stages/filters.overlay.rst
similarity index 66%
rename from doc/stages/filters.attribute.rst
rename to doc/stages/filters.overlay.rst
index b70cf61..fb9f5e2 100644
--- a/doc/stages/filters.attribute.rst
+++ b/doc/stages/filters.overlay.rst
@@ -1,16 +1,11 @@
-.. _filters.attribute:
+.. _filters.overlay:
 
-filters.attribute
+filters.overlay
 ===================
 
-The attribute filter allows you to set the values of a
-selected dimension. Two scenarios are supported:
 
-* Set the value of a dimension of all points to single value
-  (use option 'value')
-
-* Set points inside an OGR-readable Polygon or MultiPolygon
-  (use option 'datasource')
+The overlay filter allows you to set the values of a selected dimension
+based on an OGR-readable polygon or multi-polygon.
 
 OGR SQL support
 ----------------
@@ -43,7 +38,7 @@ feature.
       "pipeline":[
         "autzen-dd.las",
         {
-          "type":"filters.attribute",
+          "type":"filters.overlay",
           "dimension":"Classification",
           "datasource":"attributes.shp",
           "layer":"attributes",
@@ -57,33 +52,8 @@ feature.
       ]
     }
 
-Example 2
----------
-
-This pipeline sets the PointSourceId of all points from 'autzen-dd.las'
-to the value '26'.
-
-.. code-block:: json
 
-    {
-      "pipeline":[
-        "autzen-dd.las",
-        {
-          "type":"filters.attribute",
-          "dimension":"PointSourceId",
-          "value":26
-        },
-        {
-          "filename":"attributed.las",
-          "scale_x":0.0000001,
-          "scale_y":0.0000001
-        }
-      ]
-    }
-
-
-
-Example 3
+Example 2
 --------------------------------------------------------------------------------
 
 This example sets the Intensity attribute to ``CLS`` values read from the
@@ -97,7 +67,7 @@ This example sets the Intensity attribute to ``CLS`` values read from the
       "pipeline":[
         "autzen-dd.las",
         {
-          "type":"filters.attribute",
+          "type":"filters.overlay",
           "dimension":"Intensity",
           "datasource":"attributes.shp",
           "query":"SELECT CLS FROM attributes where cls!=6",
@@ -113,13 +83,10 @@ Options
 -------
 
 dimension
-  Name of the dimension whose value should be altered.  [Default: none]
-
-value
-  Value to apply to the dimension.  [Default: none]
+  Name of the dimension whose value should be altered.  [Required]
 
 datasource
-  OGR-readable datasource for Polygon or MultiPolygon data.  [Default: none]
+  OGR-readable datasource for Polygon or MultiPolygon data.  [Required]
 
 column
   The OGR datasource column from which to read the attribute.
@@ -127,7 +94,7 @@ column
 
 query
   OGR SQL query to execute on the datasource to fetch geometry and attributes.
-  [Default: none]
+  The entire layer is fetched if no query is provided.  [Default: none]
 
 layer
   The data source's layer to use. [Defalt: first layer]
diff --git a/doc/stages/filters.pmf.rst b/doc/stages/filters.pmf.rst
index 7b2f17e..ab93fe8 100644
--- a/doc/stages/filters.pmf.rst
+++ b/doc/stages/filters.pmf.rst
@@ -26,6 +26,37 @@ Example
       ]
     }
 
+Notes
+-------------------------------------------------------------------------------
+
+* ``slope`` controls the height threshold at each iteration. A slope of ``1.0``
+  represents a 1:1 or 45º.
+
+* ``initial_distance`` is _intended_ to be set to account for z noise, so for a
+  flat surface if you have an uncertainty of around 15 cm, you set
+  ``initial_distance`` large enough to not exclude these points from the ground.
+
+* For a given iteration, the height threshold is determined by multiplying
+  ``slope`` by ``cell_size`` by the difference in window size between the current
+  and last iteration, plus the ``initial_distance``. This height threshold is
+  constant across all cells and is maxed out at the ``max_distance`` value. If
+  the difference in elevation between a point and its “opened” value (from the
+  morphological operator) exceeds the height threshold, it is treated as
+  non-ground.  So, bigger slope leads to bigger height thresholds, and these
+  grow with each iteration (not to exceed the max).  With flat terrain,
+  keep this low, the thresholds are small, and stuff is more aggressively
+  dumped into non-ground class.  In rugged terrain, open things up
+  a little, but then you can start missing buildings, veg, etc.
+
+* Very large ``max_window_size`` values will result in a lot of potentially
+  extra iteration. This parameter can have a strongly negative impact on
+  computation performance.
+
+.. note::
+    [Zhang2003]_ describes the consequences and relationships of the
+    parameters in more detail and is the canonnical resource on the
+    topic.
+
 Options
 -------------------------------------------------------------------------------
 
diff --git a/doc/stages/filters.predicate.rst b/doc/stages/filters.predicate.rst
index a6a9345..809e5a5 100644
--- a/doc/stages/filters.predicate.rst
+++ b/doc/stages/filters.predicate.rst
@@ -8,6 +8,11 @@ Like the :ref:`filters.programmable` filter, the predicate filter applies a
 the stream by setting true/false values into a special "Mask" dimension in the
 output point array.
 
+.. note::
+
+    See :ref:`filters.programmable` for documentation about how to access
+    the ``metadata``, ``spatialreference``, and ``schema`` variables.
+
 .. code-block:: python
 
   import numpy as np
@@ -87,6 +92,9 @@ module
 function
   The function to call.
 
+pdalargs
+  A JSON dictionary of items you wish to pass into the modules globals as the
+  ``pdalargs`` object.
 
 
 .. _Python: http://python.org
diff --git a/doc/stages/filters.programmable.rst b/doc/stages/filters.programmable.rst
index e58fb62..8f4f8a4 100644
--- a/doc/stages/filters.programmable.rst
+++ b/doc/stages/filters.programmable.rst
@@ -3,15 +3,20 @@
 filters.programmable
 ====================
 
-The programmable filter takes a stream of points and applies a `Python`_
-function to each point in the stream.
+The programmable filter allows `Python`_ software to be embedded in a
+:ref:`pipeline` that interacts with a `NumPy`_ array of the data and allows
+you to modify those points. Additionally, some global :ref:`metadata` is also
+available that Python functions can interact with.
 
-The function must have two `NumPy`_ arrays as arguments, `ins` and `outs`. The
-`ins` array represents input points, the `outs` array represents output points.
-Each array contains all the dimensions of the point schema, for a number of
-points (depending on how large a point buffer the pipeline is processing at the
-time, a run-time consideration). Individual arrays for each dimension can be
-read from the input point and written to the output point.
+The function must have two `NumPy`_ arrays as arguments, ``ins`` and ``outs``.
+The ``ins`` array represents the points before the ``filters.programmable``
+filter and the ``outs`` array represents the points after filtering.
+
+.. warning::
+
+    Each array contains all the :ref:`dimensions` of the incoming ``ins`` point schema.
+    Each array in the ``outs`` list match `NumPy`_ array of the
+    same type as provided as ``ins`` for shape and type.
 
 
 .. code-block:: python
@@ -24,14 +29,20 @@ read from the input point and written to the output point.
       outs['Z'] = Z
       return True
 
-Note that the function always returns `True`. If the function returned `False`,
-an error would be thrown and the translation shut down.
 
-If you want to write a dimension that might not be available, use can use one
-or more `add_dimension` options.
 
-To filter points based on a `Python`_ function, use the
-:ref:`filters.predicate` filter.
+1) The function must always return `True` upon success. If the function returned `False`,
+   an error would be thrown and the :ref:`pipeline` exited.
+
+
+
+2) If you want to write a dimension that might not be available, use can use one
+   or more ``add_dimension`` options.
+
+.. note::
+
+    To filter points based on a `Python`_ function, use the
+    :ref:`filters.predicate` filter.
 
 Example
 -------
@@ -71,9 +82,93 @@ which scales up the Z coordinate by a factor of 10.
       outs['Z'] = Z
       return True
 
+Module Globals
+--------------------------------------------------------------------------------
+
+Three global variables are added to the Python module as it is run to allow
+you to get :ref:`dimensions`, :ref:`metadata`, and coordinate system information.
+Additionally, the ``metadata`` object can be set by the function to modify metadata
+for the in-scope :ref:`filters.programmable` :cpp:class:`pdal::Stage`.
+
+.. code-block:: python
+
+   def myfunc(ins,outs):
+       print ('schema: ', schema)
+       print ('srs: ', spatialreference)
+       print ('metadata: ', metadata)
+       outs = ins
+       return True
+
+Updating metadata
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The filter can update the global ``metadata`` dictionary as needed, define it as a
+**global** Python variable for the function's scope, and the updates will be
+reflected back into the pipeline from that stage forward.
+
+.. code-block:: python
+
+   def myfunc(ins,outs):
+     global metadata
+     metadata = {'name': 'root', 'value': 'a string', 'type': 'string', 'description': 'a description', 'children': [{'name': 'filters.programmable', 'value': 52, 'type': 'integer', 'description': 'a filter description', 'children': []}, {'name': 'readers.faux', 'value': 'another string', 'type': 'string', 'description': 'a reader description', 'children': []}]}
+     return True
+
+Passing Python objects
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As of PDAL 1.5, it is possible to pass an option to :ref:`filters.programmable` and
+:ref:`filters.predicate` of JSON representing a Python dictionary containing objects
+you want to use in your function. This feature is useful in situations where you
+wish to call :ref:`pipeline_command` with substitutions.
+
+If we needed to be able to provide the Z scaling factor of `Example`_ with a
+Python argument, we can place that in a dictionary and pass that to the filter
+as a separate argument. This feature allows us to be able easily reuse the same
+basic Python function while substituting values as necessary.
+
+.. code-block:: json
+
+    {
+      "pipeline":[
+        "input.las",
+        {
+          "type":"filters.programmable",
+          "module":"anything",
+          "function":"filter",
+          "source":"arguments.py"
+          "pdalargs":"{\"factor\":0.3048,\"an_argument\":42, \"another\": \"a string\"}"
+        },
+        "output.las"
+      ]
+    }
+
+With that option set, you can now fetch the ``pdalargs`` dictionary in your
+Python script and use it:
+
+.. code-block:: python
+
+  import numpy as np
+
+  def multiply_z(ins,outs):
+      Z = ins['Z']
+      Z = Z * float(pdalargs['factor'])
+      outs['Z'] = Z
+      return True
+
+
+
+
+Standard output and error
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A ``redirector`` module is available for scripts to output to PDAL's log stream
+explicitly. The module handles redirecting ``sys.stderr`` and ``sys.stdout`` for you
+transparently, but it can be used directly by scripts. See the PDAL source
+code for more details.
+
 
 Options
--------
+--------------------------------------------------------------------------------
 
 script
   When reading a function from a separate `Python`_ file, the file name to read
@@ -91,5 +186,9 @@ source
 add_dimension
   The name of a dimension to add to the pipeline that does not already exist.
 
+pdalargs
+  A JSON dictionary of items you wish to pass into the modules globals as the
+  ``pdalargs`` object.
+
 .. _Python: http://python.org/
 .. _NumPy: http://www.numpy.org/
diff --git a/doc/stages/filters.range.rst b/doc/stages/filters.range.rst
index 5f7fcdb..6b4044e 100644
--- a/doc/stages/filters.range.rst
+++ b/doc/stages/filters.range.rst
@@ -3,6 +3,8 @@
 filters.range
 ======================
 
+.. contents::
+
 The range filter applies rudimentary filtering to the input point cloud
 based on a set of criteria on the given dimensions.
 
@@ -58,3 +60,61 @@ limits
   1, 2, 6 or 7 and have a blue value or 25-75 and have a red value of
   1-50 or 75-255.  In this case, all values are inclusive.
 
+
+.. _ranges:
+
+Ranges
+--------------------------------------------------------------------------------
+
+A range specification is a dimension name, followed by an optional negation
+character ('!'), and a starting and ending value separated by a colon,
+surrounded by parentheses or square brackets.  Either the starting or ending
+values can be omitted.  Parentheses indicate an open endpoint that doesn't
+include the adjacent value.  Square brackets indicate a closed endpoint
+that includes the adjacent value.
+
+Example 1:
+................................................................................
+
+::
+
+  Z[10:]
+
+Selects all points with a Z value greater than or equal to 10.
+
+Example 2:
+................................................................................
+
+::
+
+  Classification[2:2]
+
+Selects all points with a classification of 2.
+
+Example 3:
+................................................................................
+
+::
+
+  Red!(20:40]
+
+Selects all points with red values less than or equal to 20 and those with
+values greater than 40
+
+Example 4:
+................................................................................
+
+::
+
+  Blue[:255)
+
+Selects all points with a blue value less than 255.
+
+Example 5:
+................................................................................
+
+::
+
+  Intensity![25:25]
+
+Selects all points with an intensity not equal to 25.
diff --git a/doc/stages/filters.reprojection.rst b/doc/stages/filters.reprojection.rst
index 5ce61ca..02ddfbb 100644
--- a/doc/stages/filters.reprojection.rst
+++ b/doc/stages/filters.reprojection.rst
@@ -21,8 +21,13 @@ if you want to preserve the old coordinates for future processing, use a
     for storing the data.
 
 
-Example
--------
+Example 1
+--------------------------------------------------------------------------------
+
+This pipeline reprojecs terrain points with Z-values between 0 and 100 by first
+applying a range filter and then specifing both the input and output spatial
+reference as EPSG-codes. The X and Y dimensions are scaled to allow enough
+precision in the output coordinates.
 
 .. code-block:: json
 
@@ -55,15 +60,48 @@ Example
       ]
     }
 
+Example 2
+--------------------------------------------------------------------------------
+
+In some cases it is not possible to use a EPSG-code as a spatial reference.
+Instead `Proj.4 <http:/proj4.org>`_ parameters can be used to define a spatial
+reference.  In this example the vertical component of points in a laz file is
+converted from geometric (ellipsoidal) heights to orthometric heights by using
+the ``geoidgrids`` parameter from Proj.4.  Here we change the vertical datum
+from the GRS80 ellipsoid to DVR90, the vertical datum in Denmark. In the
+writing stage of the pipeline the spatial reference of the file is set to
+EPSG:7416. The last step is needed since PDAL will otherwise reference the
+vertical datum as "Unnamed Vertical Datum" in the spatial reference VLR.
+
+
+.. code-block:: json
+    :linenos:
+
+    {
+      "pipeline":[
+        "./1km_6135_632.laz",
+        {
+            "type":"filters.reprojection",
+            "in_srs":"EPSG:25832",
+            "out_srs":"+init=epsg:25832 +geoidgrids=C:/data/geoids/dvr90.gtx"
+        },
+        {
+          "type":"writers.las",
+          "a_srs":"EPSG:7416",
+          "filename":"1km_6135_632_DVR90.laz"
+        }
+      ]
+    }
+
 Options
 -------
 
 in_srs
   Spatial reference system of the input data. Express as an EPSG string (eg
-  "EPSG:4326" for WGS86 geographic) or a well-known text string. [Required if
+  "EPSG:4326" for WGS84 geographic), Proj.4 string or a well-known text string. [Required if
   input reader does not supply SRS information]
 
 out_srs
   Spatial reference system of the output data. Express as an EPSG string (eg
-  "EPSG:4326" for WGS86 geographic) or a well-known text string. [Required]
+  "EPSG:4326" for WGS84 geographic), Proj.4 string or a well-known text string. [Required]
 
diff --git a/doc/stages/filters.smrf.rst b/doc/stages/filters.smrf.rst
index a9a59db..d36f267 100644
--- a/doc/stages/filters.smrf.rst
+++ b/doc/stages/filters.smrf.rst
@@ -6,20 +6,6 @@ filters.smrf
 Filter ground returns using the Simple Morphological Filter (SMRF) approach
 outlined in [Pingel2013]_.
 
-.. note::
-  
-  Our implmentation of SMRF is in an alpha state. We'd love to have you kick
-  the tires and provide feedback, but do not plan on using this in production.
-  
-The current implementation of ``filters.smrf`` differs slightly from the
-original paper. We weren't too happy with the performance of (our implementation
-of) the inpainting routine, so we started exploring some other methods.
-
-Some warts about the current implementation:
-
-* It writes a bunch of intermediate/debugging outputs to the current directory
-  while processing. This should be made optional and then eventually go away.
-  
 .. [Pingel2013] Pingel, T.J., Clarke, K.C., McBride, W.A., 2013. An improved simple morphological filter for the terrain classification of airborne LIDAR data. ISPRS J. Photogramm. Remote Sens. 77, 21–30.
 
 Example
@@ -34,9 +20,12 @@ returns, writing only the ground returns to the output file.
       "pipeline":[
         "input.las",
         {
-          "type":"filters.smrf",
-          "extract":true
+          "type":"filters.smrf"
         },
+        {
+          "type":"filters.range",
+          "limits":"Classification[2:2]"
+        }
         "output.laz"
       ]
     }
@@ -47,20 +36,20 @@ Options
 cell
   Cell size. [Default: **1.0**]
 
-classify
-  Apply classification labels (i.e., ground = 2)? [Default: **true**]
-
 cut
   Cut net size (``cut=0`` skips the net cutting step). [Default: **0.0**]
   
-extract
-  Extract ground returns (non-ground returns are cropped)? [Default: **false**]
+outdir
+  Optional output directory for debugging intermediate rasters.
+  
+scalar
+  Elevation scalar. [Default: **1.25**]
   
 slope
   Slope (rise over run). [Default: **0.15**]
   
 threshold
-  Elevation threshold. [Default: **0.15**]
+  Elevation threshold. [Default: **0.5**]
   
 window
-  Max window size. [Default: **21.0**]
+  Max window size. [Default: **18.0**]
diff --git a/doc/stages/filters.sort.rst b/doc/stages/filters.sort.rst
index 0d9ce6b..5325b12 100644
--- a/doc/stages/filters.sort.rst
+++ b/doc/stages/filters.sort.rst
@@ -3,9 +3,8 @@
 filters.sort
 ============
 
-The sort filter orders a point view based on the values of a dimension.
-The current filter only supports sorting based on a single dimension in
-increasing order.
+The sort filter orders a point view based on the values of a dimension. The
+sorting can be done in increasing (ascending) or decreasing (descending) order.
 
 Example
 -------
@@ -22,6 +21,9 @@ Example
         <Option name="dimension">
           X
         </Option>
+        <Option name="order">
+          ASC
+        </Option>
         <Reader type="readers.las">
           <Option name="filename">
             unsorted.las
@@ -38,9 +40,5 @@ Options
 dimension
   The dimension on which to sort the points.
 
-Notes
------
-
-The sorting algorithm used is not stable, meaning that one cannot chain
-multiple Sort filters to sort order point buffer heirarchically (say,
-primarily by the dimension X and secondairly by the dimension Y).
+order
+  The order in which to sort, ASC or DESC [Default: **ASC**]
diff --git a/doc/stages/ranges.rst b/doc/stages/ranges.rst
deleted file mode 100644
index 70bccb1..0000000
--- a/doc/stages/ranges.rst
+++ /dev/null
@@ -1,57 +0,0 @@
-.. _ranges:
-
-Ranges
-======
-
-A range specification is a dimension name, followed by an optional negation
-character ('!'), and a starting and ending value separated by a colon, 
-surrounded by parentheses or square brackets.  Either the starting or ending
-values can be omitted.  Parentheses indicate an open endpoint that doesn't
-include the adjacent value.  Square brackets indicate a closed endpoint
-that includes the adjacent value.
-
-Example 1:
-----------
-
-::
-
-  Z[10:]
-
-Selects all points with a Z value greater than or equal to 10.
-  
-Example 2:
-----------
-
-::
-
-  Classification[2:2]
-
-Selects all points with a classification of 2.
-
-Example 3:
-----------
-
-::
-
-  Red!(20:40]
-
-Selects all points with red values less than or equal to 20 and those with
-values greater than 40
-
-Example 4:
-----------
-
-::
-
-  Blue[:255)
-  
-Selects all points with a blue value less than 255.
-
-Example 5:
-----------
-
-::
-
-  Intesity![25:25]
-
-Selects all points with an intensity not equal to 25.
diff --git a/doc/stages/readers.faux.rst b/doc/stages/readers.faux.rst
index 61b5920..1c4dcc6 100644
--- a/doc/stages/readers.faux.rst
+++ b/doc/stages/readers.faux.rst
@@ -3,9 +3,29 @@
 readers.faux
 ============
 
-The "**faux reader**" is used for testing pipelines. It does not read from a
+The faux reader is used for testing pipelines. It does not read from a
 file or database, but generates synthetic data to feed into the pipeline.
 
+The faux reader requires a mode argume to define the method in which points
+should be generated.  Valid modes are as follows:
+
+constant
+    The values provided as the minimums to the bounds argument are
+    used for the X, Y and Z value, respectively, for every point.
+random
+    Random values are chosen within the provided bounds.
+ramp
+    Value increase uniformly from the minimum values to the maximum values.
+uniform
+    Random values of each dimension are uniformly distributed in the
+    provided ranges.
+normal
+    Random values of each dimension are normally distributed in the
+    provided ranges.
+grid
+    Creates points with integer-valued coordinates in the range provided
+    (excluding the upper bound).
+
 Example
 -------
 
@@ -46,9 +66,5 @@ stdev_x|y|z
   only) [Default: 1]
 
 mode
-  How to generate synthetic points. One of "constant" (repeat single value),
-  "random" (random values within bounds), "ramp" (steadily increasing values
-  within the bounds), "uniform" (uniformly distributed within bounds), or
-  "normal" (normal distribution with given mean and standard deviation).
-  [Required]
+  "constant", "random", "ramp", "uniform", "normal" or "grid" [Required]
 
diff --git a/doc/stages/readers.gdal.rst b/doc/stages/readers.gdal.rst
index 6325d0a..eebab52 100644
--- a/doc/stages/readers.gdal.rst
+++ b/doc/stages/readers.gdal.rst
@@ -11,20 +11,23 @@ The `GDAL`_ reader reads `GDAL readable raster`_ data sources as point clouds.
 Each pixel is given an X and Y coordinate (and corresponding PDAL dimensions)
 that are center pixel, and each band is represented by "band-1", "band-2", or
 "band-n". The user must know what the bands correspond to, and use
-:ref:`filters.ferry` to copy data into known dimensions as needed.
+:ref:`filters.ferry` to copy data into known :ref:`dimensions` as needed.
 
 
 .. note::
 
-    :ref:`filters.ferry` is needed because raster data do not map to
-    typical dimension names. For output to formats such as :ref:`LAS <writers.las>`,
-    this mapping is required.
+    :ref:`filters.ferry` is needed to map GDAL output to typical :ref:`dimensions`
+    names. For output to formats such as :ref:`LAS <writers.las>`, this mapping
+    is required.
 
 
 Basic Example
 --------------------------------------------------------------------------------
 
+Simply writing every pixel of a JPEG to a text file is not very useful.
+
 .. code-block:: json
+    :linenos:
 
     {
       "pipeline":[
@@ -44,11 +47,13 @@ Basic Example
 LAS Example
 --------------------------------------------------------------------------------
 
-The following example writes a JPG as an `ASPRS LAS`_ file.
+The following example assigns the bands from a JPG to the
+RGB values of an `ASPRS LAS`_ file using :ref:`writers.las`.
 
 .. _`ASPRS LAS`: http://www.asprs.org/Committee-General/LASer-LAS-File-Format-Exchange-Activities.html
 
 .. code-block:: json
+    :linenos:
 
     {
       "pipeline":[
@@ -73,6 +78,8 @@ Options
 --------------------------------------------------------------------------------
 
 filename
-  GDALOpen'able raster file to read [Required]
+  `GDALOpen`_ 'able raster file to read [Required]
+
+.. _`GDALOpen`: http://www.gdal.org/gdal_8h.html#a6836f0f810396c5e45622c8ef94624d4
 
 
diff --git a/doc/stages/readers.las.rst b/doc/stages/readers.las.rst
index 565a3b0..a2dd96f 100644
--- a/doc/stages/readers.las.rst
+++ b/doc/stages/readers.las.rst
@@ -22,7 +22,7 @@ the two supported decompressors, `LASzip`_ or `LAZperf`_.  See the
   input LAS file to an output LAS file will frequently want to use the same
   scale factors and offsets in the output file as existed in the input
   file in order to
-  maintain the precision of the data.  Use the 'forward' option on the
+  maintain the precision of the data.  Use the `forward` option on the
   :ref:`writers.las` to facilitate transfer of header information from
   source to destination LAS/LAZ files.
 
@@ -40,7 +40,8 @@ the two supported decompressors, `LASzip`_ or `LAZperf`_.  See the
   on the extra bytes VLR in the `LAS Specification`_ for more information
   on the extra bytes VLR and array datatypes.
 
-.. note::
+.. warning::
+
   LAS 1.4 files that use the extra bytes VLR and datatype 0 will be accepted,
   but the data associated with a dimension of datatype 0 will be ignored
   (no PDAL dimension will be created).
@@ -49,6 +50,7 @@ Example
 -------
 
 .. code-block:: json
+    :linenos:
 
     {
       "pipeline":[
@@ -73,10 +75,13 @@ _`extra_dims`
   Extra dimensions to be read as part of each point beyond those specified by
   the LAS point format.  The format of the option is
   <dimension_name>=<type>, ... where type is one of:
-  int8, int16, int32, int64, uint8, uint16, uint32, uint64, float, double
-  '_t' may be added to any of the type names as well (e.g., uint32_t).  NOTE:
-  the presence of an extra bytes VLR causes when reading a version 1.4 LAS
-  file causes this option to be ignored.
+  int8, int16, int32, int64, uint8, uint16, uint32, uint64, float, double.
+  `_t` may be added to any of the type names as well (e.g., uint32_t).
+
+  .. note::
+
+      The presence of an extra bytes VLR causes when reading a version 1.4 LAS file
+      causes this option to be ignored.
 
 .. _LAS format: http://asprs.org/Committee-General/LASer-LAS-File-Format-Exchange-Activities.html
 .. _LAS Specification: http://www.asprs.org/a/society/committees/standards/LAS_1_4_r13.pdf
diff --git a/doc/stages/readers.mbio.rst b/doc/stages/readers.mbio.rst
new file mode 100644
index 0000000..2a9c0da
--- /dev/null
+++ b/doc/stages/readers.mbio.rst
@@ -0,0 +1,53 @@
+.. _readers.mbio:
+
+readers.mbio
+============
+
+The mbio reader allows sonar bathymetry data to be read into PDAL and
+treated as data collected using LIDAR sources.  PDAL uses the `MB-System`_
+library to read the data and therefore supports `all formats`_ supported by
+that library.  Some common sonar systems are NOT supported by MB-System,
+notably Kongsberg, Reson and Norbit.  The mbio reader reads each "beam"
+of data after averaging and processing by the MB-System software and stores
+the values for the dimensions 'X', 'Y', 'Z' and 'Amplitude'.  X and Y use
+longitude and latitude for units and the Z values are in meters (negative,
+being below the surface).  Units for 'Amplitude' is not specified and may
+vary.
+
+
+Example
+-------
+
+This reads beams from a sonar data file and writes points to a LAS file.
+
+.. code-block:: json
+
+    {
+      "pipeline":[
+        {
+          "type" : "readers.mbio",
+          "filename" : "shipdata.m57",
+          "format" : "MBF_EM3000RAW"
+        },
+        {
+          "type":"writers.las",
+          "filename":"outputfile.las"
+        }
+      ]
+    }
+
+
+Options
+-------
+
+filename
+  Filename to read from [Required]
+
+format
+  Name of number of format of file being read.  See MB-System documentation
+  for a list of `all formats`_. [Required]
+
+
+.. _MB-System: http://www.ldeo.columbia.edu/res/pi/MB-System/
+
+.. _all formats: https://www.ldeo.columbia.edu/res/pi/MB-System/html/mbio.html#lbAI
diff --git a/doc/stages/readers.nitf.rst b/doc/stages/readers.nitf.rst
index c30b936..2385ba7 100644
--- a/doc/stages/readers.nitf.rst
+++ b/doc/stages/readers.nitf.rst
@@ -22,10 +22,18 @@ and types for convenience in file format transformation.
     the :ref:`readers.las` and :ref:`writers.las` :ref:`stages <stage_index>`
     to actually read and write the data.
 
+.. note::
+
+    PDAL uses a fork of the `NITF Nitro`_ library available at
+    https://github.com/hobu/nitro for NITF read and write support.
+
+.. _`NITF Nitro`: http://nitro-nitf.sourceforge.net/wikka.php?wakka=HomePage
+
 Example
 -------
 
 .. code-block:: json
+    :linenos:
 
     {
       "pipeline":[
diff --git a/doc/stages/readers.ply.rst b/doc/stages/readers.ply.rst
index bac3a48..30d0c37 100644
--- a/doc/stages/readers.ply.rst
+++ b/doc/stages/readers.ply.rst
@@ -3,10 +3,14 @@
 readers.ply
 ===========
 
-The **ply reader** reads the `polygon file format`_, a common file format for storing three dimensional models.
-The `rply library`_ is included with the PDAL source, so there are no external dependencies.
+The **ply reader** reads points and vertices from the `polygon file format`_, a
+common file format for storing three dimensional models.  The `rply library`_
+is included with the PDAL source, so there are no external dependencies.
 
-The ply reader can read ASCII and binary ply files.
+
+.. note::
+
+    The ply reader can read ASCII and binary ply files.
 
 
 Example
diff --git a/doc/stages/readers.pts.rst b/doc/stages/readers.pts.rst
index b6a4fbb..b84c7ae 100644
--- a/doc/stages/readers.pts.rst
+++ b/doc/stages/readers.pts.rst
@@ -3,7 +3,8 @@
 readers.pts
 ============
 
-The **PTS reader** reads data from PTS files.
+The **PTS reader** reads data from Leica Cyclone PTS files. It is
+not very sophisticated.
 
 
 Example Pipeline
diff --git a/doc/stages/readers.tindex.rst b/doc/stages/readers.tindex.rst
index 906f3fe..32f64e5 100644
--- a/doc/stages/readers.tindex.rst
+++ b/doc/stages/readers.tindex.rst
@@ -42,12 +42,9 @@ merge the data.
       "pipeline":[
         {
           "type":"readers.tindex",
-          "sql":"SELECT * from pdal",
           "filter_srs":"+proj=lcc +lat_1=43 +lat_2=45.5 +lat_0=41.75 +lon_0=-120.5 +x_0=399999.9999999999 +y_0=0 +ellps=GRS80 +units=ft +no_defs",
-          "merge":"true",
           "filename":"index.sqlite",
           "where":"location LIKE \'%nteresting.las%\'",
-          "boundary":"([635629.85, 638982.55], [848999.70 , 853535.43])",
           "polygon":"POLYGON ((635629.85000000 848999.70000000, 635629.85000000 853535.43000000, 638982.55000000 853535.43000000, 638982.55000000 848999.70000000, 635629.85000000 848999.70000000))"
         },
         {
@@ -82,14 +79,14 @@ tindex_name
 sql
   `OGR SQL`_ to use to define the tile index layer.
 
+bounds
+  A 2D box to pre-filter the tile index. If it is set,
+  it will override any ``wkt`` option.
+
 wkt
   A geometry to pre-filter the tile index using
   OGR
 
-boundary
-  A 2D box to pre-filter the tile index. If it is set,
-  it will override any ``wkt`` option.
-
 t_srs
   Reproject the layer SRS, otherwise default to the
   tile index layer's SRS.
diff --git a/doc/stages/writers.derivative.rst b/doc/stages/writers.derivative.rst
deleted file mode 100644
index 3625fb6..0000000
--- a/doc/stages/writers.derivative.rst
+++ /dev/null
@@ -1,81 +0,0 @@
-.. _writers.derivative:
-
-writers.derivative
-==================
-
-The **Derivative Writer** supports writing of primary topographic attributes.
-
-
-.. note::
-    This driver uses `GDAL`_ to write the data. Only the `GeoTIFF`_ driver
-    is supported at this time.
-
-.. _`GDAL`: http://gdal.org
-.. _`GeoTiff`: http://www.gdal.org/frmt_gtiff.html
-
-Example #1
-----------
-
-Create a single GeoTIFF with slope values calculated using the D8 method.
-
-.. code-block:: json
-
-    {
-      "pipeline":[
-        "inputfile.las",
-        {
-          "type":"writers.derivative",
-          "filename":"outputfile.tiff",
-          "primitive_type":"slope_d8"
-        }
-      ]
-    }
-    
-Example #2
-----------
-
-Create multiple GeoTIFFs containing slope, hillshade, and contour curvature
-values.
-
-.. code-block:: json
-
-    {
-      "pipeline":[
-        "inputfile.las",
-        {
-          "type":"writers.derivative",
-          "filename":"outputfile_#.tiff",
-          "primitive_type":"slope_d8,hillshade,contour_curvature"
-        }
-      ]
-    }
-
-
-Options
--------
-
-filename
-  `GeoTiff`_ file to write.  [Required]
-
-primitive_type
-  Topographic attribute to compute.  [Default: slope_d8]
-
-  * slope_d8
-  * slope_fd
-  * aspect_d8
-  * aspect_fd
-  * contour_curvature
-  * profile_curvature
-  * tangential_curvature
-  * total_curvature
-  * hillshade
-
-edge_length
-  Size of grid cell in X and Y dimensions using native units of the input point
-  cloud.  [Default: 15.0]
-
-altitude
-  Illumination altitude in degrees (hillshade only). [Default: 45.0]
-
-azimuth
-  Illumination azimuth in degrees (hillshade only). [Default: 315.0]
diff --git a/doc/stages/writers.gdal.rst b/doc/stages/writers.gdal.rst
index 6afcb7c..0ff2baa 100644
--- a/doc/stages/writers.gdal.rst
+++ b/doc/stages/writers.gdal.rst
@@ -20,6 +20,13 @@ potentially contributes to the raster's value.
     it is possible that some points will not be considered at all, including
     those that may be within the bounds of the raster cell.
 
+.. note::
+    If no radius_ is provided, it is set to the product of the resolution_ and
+    the square root of two. This is consistent with the original Points2Grid_
+    application from which this algorithm has its roots.
+
+.. _Points2Grid: http://www.opentopography.org/otsoftware/points2grid
+
 The GDAL writer creates rasters using the data specified in the ``dimension``
 option (defaults to `Z`).The writer will creates up to six rasters based on
 different statistics in the output dataset.  The order of the layers in the
@@ -89,12 +96,16 @@ Options
 filename
     Name of output file. [Required]
 
+.. _resolution:
+
 resolution
     Length of raster cell edges in X/Y units.  [Required]
 
+.. _radius:
+
 radius
     Radius about cell center bounding points to use to calculate a cell value.
-    [Required]
+    [Default: ``resolution`` * sqrt(2)]
 
 gdaldriver
     Name of the GDAL driver to use to write the output. [Default: "GTiff"]
diff --git a/doc/stages/writers.las.rst b/doc/stages/writers.las.rst
index 028dd66..777d318 100644
--- a/doc/stages/writers.las.rst
+++ b/doc/stages/writers.las.rst
@@ -9,7 +9,45 @@ interchange file format for LIDAR data.
 .. warning::
 
     Scale/offset are not preserved from an input LAS file.  See below for
-    information on the scale/offset options and the 'forward' option.
+    information on the scale/offset options and the `forward` option.
+
+VLRs
+-------
+
+VLRs can be created by providing a JSON node called `vlrs` with objects
+containing `user_id` and `data` items.
+
+.. code-block:: json
+
+    {
+      "pipeline":[
+        {
+          "type":"readers.las",
+          "filename":"inputfile.las"
+        },
+        {
+          "type":"writers.las",
+          "vlrs": [{
+                    "description": "A description under 32 bytes",
+                    "record_id": 42,
+                    "user_id": "hobu",
+                    "data": "dGhpcyBpcyBzb21lIHRleHQ="
+                   },
+                   {
+                    "description": "A description under 32 bytes",
+                    "record_id": 43,
+                    "user_id": "hobu",
+                    "data": "dGhpcyBpcyBzb21lIG1vcmUgdGV4dA=="
+                    }
+                  ],
+          "filename":"outputfile.las"
+        }
+      ]
+    }
+
+
+
+
 
 Example
 -------
@@ -35,7 +73,7 @@ Options
 
 filename
   LAS file to read. The writer will accept a filename containing
-  a single placeholder character ('#').  If input to the writer consists
+  a single placeholder character (`#`).  If input to the writer consists
   of multiple PointViews, each will be written to a separate file, where
   the placeholder will be replaced with an incrementing integer.  If no
   placeholder is found, all PointViews provided to the writer are
@@ -49,24 +87,26 @@ forward
   LAS file.  The
   option can be specified multiple times, which has the same effect as
   listing values separated by a comma.  The following values are valid:
-  'major_version', 'minor_version', 'dataformat_id', 'filesource_id',
-  'global_encoding', 'project_id', 'system_id', 'software_id', 'creation_doy',
-  'creation_year', 'scale_x', 'scale_y', 'scale_z', 'offset_x', 'offset_y',
-  'offset_z'.  In addition, the special value 'header' can be specified,
+  ``major_version``, ``minor_version``, ``dataformat_id``, ``filesource_id``,
+  ``global_encoding``, ``project_id``, ``system_id``, ``software_id``, ``creation_doy``,
+  ``creation_year``, ``scale_x``, ``scale_y``, ``scale_z``, ``offset_x``, ``offset_y``,
+  ``offset_z``.  In addition, the special value ``header`` can be specified,
   which is equivalent to specifying all the values EXCEPT the scale and
   offset values.  Scale and offset values can be forwarded as a group by
-  using the special values 'scale' and 'offset' respectively.  The special
-  value 'all' is equivalent to specifying 'header', 'scale', 'offset' and
-  'vlr' (see below).
+  using the special values ``scale`` and ``offset`` respectively.  The special
+  value ``all`` is equivalent to specifying ``header``, ``scale``, ``offset`` and
+  ``vlr`` (see below).
   If a header option is specified explicitly, it will override any forwarded
   header value.
   If a LAS file is the result of multiple LAS input files, the header values
   to be forwarded must match or they will be ignored and a default will
   be used instead.
 
-  VLRs can be forwarded by using the special value 'vlr'.  VLRs containing
-  the following User IDs are NOT forwarded: 'LASF_Projection', 'LASF_Spec',
-  'liblas', 'laszip encoded'.  These VLRs are known to contain information
+  VLRs can be forwarded by using the special value ``vlr``.  VLRs containing
+  the following User IDs are NOT forwarded: ``LASF_Projection``,
+  ``liblas``, ``laszip encoded``.  VLRs with the User ID ``LASF_Spec`` and
+  a record ID other than 0 or 3 are also not forwarded.  These VLRs are known
+  to contain information
   regarding the formatting of the data and will be rebuilt properly in the
   output file as necessary.  Unlike header values, VLRs from multiple input
   files are accumulated and each is written to the output file.  Forwarded
@@ -107,7 +147,8 @@ system_id
   String identifying the system that created this LAS file. [Default: "PDAL"]
 
 a_srs
-  The spatial reference system of the file to be written. Can be an EPSG string (e.g. "EPSG:268910") or a WKT string. [Default: Not set]
+  The spatial reference system of the file to be written. Can be an EPSG string
+  (e.g. "EPSG:268910") or a WKT string. [Default: Not set]
 
 global_encoding
   Various indicators to describe the data.  See the LAS documentation.  Note
@@ -125,7 +166,7 @@ compression
 
 scale_x, scale_y, scale_z
   Scale to be divided from the X, Y and Z nominal values, respectively, after
-  the offset has been applied.  The special value "auto" can be specified,
+  the offset has been applied.  The special value ``auto`` can be specified,
   which causes the writer to select a scale to set the stored values of the
   dimensions to range from [0, 2147483647].  [Default: .01]
 
@@ -133,7 +174,7 @@ scale_x, scale_y, scale_z
 
 offset_x, offset_y, offset_z
    Offset to be subtracted from the X, Y and Z nominal values, respectively,
-   before the value is scaled.  The special value "auto" can be specified,
+   before the value is scaled.  The special value ``auto`` can be specified,
    which causes the writer to set the offset to the minimum value of the
    dimension.  [Default: 0]
 
@@ -153,15 +194,22 @@ extra_dims
   by the LAS point format.  The format of the option is
   <dimension_name>=<type>, ... where type is one of:
   int8, int16, int32, int64, uint8, uint16, uint32, uint64, float, double
-  '_t' may be added to any of the type names as well (e.g., uint32_t).  When
+  ``_t`` may be added to any of the type names as well (e.g., uint32_t).  When
   the version of the output file is specified as 1.4 or greater, an extra
   bytes VLR (User ID: LASF_Spec, Record ID: 4), is created that describes the
   extra dimensions specified by this option.
 
-  The special value 'all' can be used in place of a dimension/type list
-  to request
-  that all dimensions that can't be stored in the predefined LAS point
-  record get added as extra data at the end of each point record.
+  The special value ``all`` can be used in place of a dimension/type list
+  to request that all dimensions that can't be stored in the predefined
+  LAS point record get added as extra data at the end of each point record.
+
+  Setting --verbose=Info will provide output on the names, types and order
+  of dimensions being written as part of the LAS extra bytes.
+
+pdal_metadata
+  Write two VLRs containing `JSON`_ output with both the :ref:`metadata` and
+  :ref:`pipeline` serialization. [Default: **false**]
 
+.. _`JSON`: http://www.json.org/
 .. _LAS format: http://asprs.org/Committee-General/LASer-LAS-File-Format-Exchange-Activities.html
 
diff --git a/doc/stages/writers.oci.rst b/doc/stages/writers.oci.rst
index ac34089..fb15dae 100644
--- a/doc/stages/writers.oci.rst
+++ b/doc/stages/writers.oci.rst
@@ -128,5 +128,9 @@ output_dims
   If specified, limits the dimensions written for each point.  Dimensions
   are listed by name and separated by commas.
 
+tolerance
+  Oracle geometry tolerance. X, Y, and Z dimensions are all
+  currently specified as a single value [Default: **0.05**]
+
 .. _Oracle point cloud: http://docs.oracle.com/cd/B28359_01/appdev.111/b28400/sdo_pc_pkg_ref.htm
 
diff --git a/doc/stages/writers.p2g.rst b/doc/stages/writers.p2g.rst
deleted file mode 100644
index ee1e86a..0000000
--- a/doc/stages/writers.p2g.rst
+++ /dev/null
@@ -1,84 +0,0 @@
-.. _writers.p2g:
-
-writers.p2g
-===========
-
-The **points to grid writer** takes in a stream of point data and writes out
-gridded summaries of the stream. Each cell in the output grids can give one of
-the: minimum value, maximum value, average value, average value, inverse
-distance weighted interpolation (for sparse points), or density. The points to
-grid writer supports creating multiple output grids simultaneously, so it is
-possible to generate all grid variants in one pass.
-
-
-.. warning::
-
-    :ref:`writers.gdal` is a replacement for `writers.p2g`. It doesn't require
-    an externally installed library, it supports
-    more GDAL output formats and options, and it supports the ability to write
-    all output to a single GeoTIFF.
-
-.. note::
-
-    A project called `lidar2dems`_ by `Applied GeoSolutions`_ integrates the P2G
-    writer and other PDAL components into a series of scripts and utilities that
-    make it more convenient to do DEM production with PDAL.
-
-.. _`lidar2dems`: https://github.com/Applied-GeoSolutions/lidar2dems
-.. _`Applied GeoSolutions`: http://www.appliedgeosolutions.com/
-
-Example
--------
-
-.. code-block:: json
-
-    {
-      "pipeline":[
-        {
-          "type":"readers.las",
-          "filename":"inputfile.las"
-        },
-        {
-          "type":"writers.p2g",
-          "grid_dist_x":"6.0",
-          "grid_dist_y":"6.0",
-          "radius":"8.4852813742385713",
-          "filename":"autzen_grid",
-          "output_type":"min",
-          "output_type":"max",
-          "output_type":"mean",
-          "output_type":"idw",
-          "output_type":"den",
-          "output_format":"asc",
-        }
-      ]
-    }
-
-Options
--------
-
-grid_dist_x
-  Size of grid cell in x dimension [Default: **6**]
-
-grid_dist_y
-  Size of grid cell in y dimension. [Default: **6**]
-
-radius
-  ??? [Default: **8.48528**]
-
-filename
-  Base file name for output files. [Required]
-
-output_type
-  One or many options, specifying "min", "max", "mean", "idw" (inverse distance weighted), "den" (density), or "all" to get all variants with just one option. [Default: **all**]
-
-output_format
-  File output format to use, one of "grid", "tif", or "asc". [Default: **grid**]
-
-z
-  Name of the 'z' dimension to use. [Default: 'Z']
-
-bounds
-  Custom bounds for output raster(s).
-  If not provided, bounds will be calculated from the bounds of the input data.
-  [Default: **none**]
diff --git a/doc/stages/writers.pgpointcloud.rst b/doc/stages/writers.pgpointcloud.rst
index 9c39f5f..42262fb 100644
--- a/doc/stages/writers.pgpointcloud.rst
+++ b/doc/stages/writers.pgpointcloud.rst
@@ -59,7 +59,7 @@ compression
   * **ght** applies a "geohash tree" compression by sorting the points into a prefix tree
 
 overwrite
-  To drop the table before writing set to 'true'. To append to the table set to 'false'. [Default: **true**]
+  To drop the table before writing set to 'true'. To append to the table set to 'false'. [Default: **false**]
 
 srid
   Spatial reference ID (relative to the `spatial_ref_sys` table in PostGIS) to store with the point cloud schema. [Default: **4326**]
diff --git a/doc/stages/writers.rialto.rst b/doc/stages/writers.rialto.rst
deleted file mode 100644
index 5b6ad9e..0000000
--- a/doc/stages/writers.rialto.rst
+++ /dev/null
@@ -1,48 +0,0 @@
-.. _writers.rialto:
-
-writers.rialto
-==============
-
-The **RialtoWriter** supports writing to `Rialto-formatted
-tiles <http://lists.osgeo.org/pipermail/pointdown/2015-February/000001.html>`__.
-
-Example
--------
-
-.. code-block:: json
-
-    {
-      "pipeline":[
-        {
-          "type":"readers.las",
-          "filename":"inputfile.las"
-        },
-        {
-          "type":"filters.reprojection",
-          "out_srs":"EPSG:4326"
-        }
-        {
-          "type":"writers.rialto",
-          "max_levels":"18",
-          "overwrite":"true",
-          "filename":"outputfile.ria"
-        }
-      ]
-    }
-
-Options
--------
-
-filename
-  The **directory** to stage the Rialto tiles in. An exception will be thrown
-  if the directory exists, unless the overwrite option is set to true (see
-  below). [Required]
-
-max_levels
-  The maximum number of levels in the quadtree. Each rectangular node at level
-  L reduces to 4 equally-sized nodes at level L+1. Each tile at level N-1
-  contains 1/4 of the points contained in the level N nodes. [Default: 16]
-
-overwrite
-  Delete the target directory prior to writing results? [Default: false]
-
diff --git a/doc/stages/writers.rst b/doc/stages/writers.rst
index 4147c2c..e52bc5d 100644
--- a/doc/stages/writers.rst
+++ b/doc/stages/writers.rst
@@ -9,7 +9,7 @@ dimension type, while others only understand fixed dimension names.
 .. note::
 
     PDAL predefined dimension names can be found in the dimension registry:
-    https://github.com/PDAL/PDAL/blob/master/src/Dimension.json
+    :ref:`dimensions`
 
 .. toctree::
    :maxdepth: 1
diff --git a/doc/tutorial/clipping-with-shapefile.rst b/doc/tutorial/clipping-with-shapefile.rst
index 29e6943..0fefb2d 100644
--- a/doc/tutorial/clipping-with-shapefile.rst
+++ b/doc/tutorial/clipping-with-shapefile.rst
@@ -41,10 +41,10 @@ Stage Operations
 -------------------------------------------------------------------------------
 
 This operation depends on two :ref:`stages <stage_index>` PDAL provides.
-The first is the :ref:`filters.attribute` stage, which allows you to assign
-point values based on polygons read from `OGR`_. The second is the :ref:`filters.range`,
-which allows you to keep or reject points from the set that match given
-criteria.
+The first is the :ref:`filters.overlay` stage, which allows you to assign
+point values based on polygons read from `OGR`_. The second is the
+:ref:`filters.range`, which allows you to keep or reject points from the
+set that match given criteria.
 
 .. seealso::
 
@@ -118,7 +118,7 @@ as they are read, filtered, and written.
     "pipeline":[
       "autzen.laz",
       {
-        "type":"filters.attribute",
+        "type":"filters.overlay",
         "dimension":"Classification",
         "datasource":"attributes.vrt",
         "layer":"OGRGeoJSON",
@@ -134,7 +134,7 @@ as they are read, filtered, and written.
 
 * :ref:`readers.las`: Define a reader that can read `ASPRS LAS`_ or `LASzip`_
   data.
-* :ref:`filters.attribute`: Using the VRT we defined in `Data Preparation`_,
+* :ref:`filters.overlay`: Using the VRT we defined in `Data Preparation`_,
   read attribute polygons out of the data source and assign the values from the
   ``CLS`` column to the ``Classification`` field.
 * :ref:`filters.range`: Given that we have set the ``Classification`` values
@@ -146,7 +146,7 @@ as they are read, filtered, and written.
 .. note::
 
     You don't have to use only ``Classification`` to set the attributes
-    with :ref:`filters.attribute`. Any valid dimension name could work, but
+    with :ref:`filters.overlay`. Any valid dimension name could work, but
     most LiDAR softwares will display categorical coloring for the
     ``Classification`` field, and we can leverage that behavior in this
     scenario.
@@ -173,7 +173,7 @@ Conclusion
 -------------------------------------------------------------------------------
 
 PDAL allows the composition of point cloud operations. This tutorial demonstrated
-how to use the :ref:`filters.attribute` and :ref:`filters.range` stages to clip
+how to use the :ref:`filters.overlay` and :ref:`filters.range` stages to clip
 points with shapefiles.
 
 .. _`CloudCompare`: http://www.danielgm.net/cc/
diff --git a/doc/tutorial/index.rst b/doc/tutorial/index.rst
index 16b8986..5e5c354 100644
--- a/doc/tutorial/index.rst
+++ b/doc/tutorial/index.rst
@@ -14,7 +14,6 @@ Getting Started
    :maxdepth: 1
 
    using
-   overview
 
 Using PDAL
 ----------
@@ -24,6 +23,7 @@ Using PDAL
 
    reading
    writing
+   las
    pcl_block_tutorial
    pcl_ground
    clipping-with-shapefile
diff --git a/doc/tutorial/las.rst b/doc/tutorial/las.rst
new file mode 100644
index 0000000..9b5e3b3
--- /dev/null
+++ b/doc/tutorial/las.rst
@@ -0,0 +1,608 @@
+.. las_tutorial:
+
+================================================================================
+LAS Reading and Writing with PDAL
+================================================================================
+
+.. include:: ../workshop/includes/substitutions.rst
+
+:Author: Howard Butler
+:Contact: howard at hobu.co
+:Date: 3/27/2017
+
+.. contents:: Table of Contents
+   :depth: 2
+
+
+This tutorial will describe reading and writing |ASPRSLAS| data with PDAL,
+discuss the capabilities that PDAL :ref:`readers.las` and :ref:`writers.las`
+can provide for this format.
+
+Introduction
+-------------------------------------------------------------------------------
+
+|ASPRSLAS| is probably the most commonly used |LiDAR| format, and PDAL's support
+of LAS is important for many users of the library. This tutorial describes and
+demonstrates some of the capabilities the drivers provide, points out items to
+be aware of when using the drivers, and hopefully provides some examples you
+can use to get what you need out of the LAS drivers.
+
+
+LAS Versions
+-------------------------------------------------------------------------------
+
+There are five LAS versions -- 1.0 to 1.4. Each iteration added some
+complexity to the format in terms of capabilities it supports, possible data
+types it stores, and metadata. Users of LAS must balance the features they need
+with the use of the data by downstream applications. While LAS support in some
+form is quite widespread throughout the industry, most applications do not
+support each and every feature of the format. PDAL works to provide many of
+these features, but it doesn't support everything either.
+
+Version Example
+................................................................................
+
+We can use the ``minor_version`` option of :ref:`writers.las` to adjust which
+version PDAL should output. The following example will output a 1.1 version LAS
+file. Depending on the features you need, this may or may not be what you want.
+
+.. code-block:: json
+    :linenos:
+    :emphasize-lines: 9
+
+    {
+        "pipeline": [
+            {
+                "type" : "readers.las",
+                "filename" : "input.las"
+            },
+            {
+                "type" : "writers.las",
+                "minor_version": 1,
+                "filename" : "output.las"
+            }
+        ]
+    }
+
+.. note::
+
+    PDAL defaults to writing a LAS 1.2 version if no ``minor_version`` is specified
+    or the ``forward`` option of :ref:`writers.las` is not used to carry along
+    a version from a previously read file.
+
+
+Spatial Reference System
+--------------------------------------------------------------------------------
+
+LAS 1.0 to 1.3 used |GeoTIFF| keys for storing coordinate system information,
+while LAS 1.4 uses |WellKnownText|. GeoTIFF is well supported by most softwares
+that read LAS, but it is not possible to express some coordinate system
+specifics with GeoTIFF. WKT is more expressive, but also presents some software
+challenges.
+
+
+Assignment Example
+................................................................................
+
+The PDAL :ref:`writers.las` allows you to override or assign the coordinate
+system to an explicit value if you need. Often the coordinate system defined by
+a file might be incorrect or non-existent, and you can set this with PDAL.
+
+The following example sets the ``out_srs`` option of the :ref:`writers.las` to
+``EPSG:4326``.
+
+
+.. code-block:: json
+    :linenos:
+    :emphasize-lines: 9
+
+    {
+        "pipeline": [
+            {
+                "type" : "readers.las",
+                "filename" : "input.las"
+            },
+            {
+                "type" : "writers.las",
+                "out_srs": "EPSG:4326",
+                "filename" : "output.las"
+            }
+        ]
+    }
+
+.. note::
+
+    Remember to set your ``offset_x``, ``offset_y``, ``scale_x``, and
+    ``scale_y`` values to something appropriate if your are storing decimal
+    degree data in LAS files. The special value ``auto`` can be used for the
+    offset values, but you should set an explicit value for the scale values
+    to prevent overdriving the precision of the data and disrupting
+    `Compression`_ with |LASzip|.
+
+
+Vertical Datum Example
+................................................................................
+
+Vertical coordinate control is important in |LiDAR|, and PDAL supports assignment
+and reprojection/transform of vertical coordinates using |Proj.4| and |GDAL|.
+The coordinate system description magic happens in GDAL, and you assign a compound
+coordinate system (both vertical and horizontal definitions) using the following
+syntax:
+
+::
+
+    EPSG:4326+3855
+
+This assignment states typical 4326 horizontal coordinate system plus a vertical one that
+represents `EGM08`_. In |WellKnownText|, this coordinate system is described by:
+
+::
+
+    $ gdalsrsinfo "EPSG:4326+3855"
+
+::
+
+    COMPD_CS["WGS 84 + EGM2008 geoid height",
+        GEOGCS["WGS 84",
+            DATUM["WGS_1984",
+                SPHEROID["WGS 84",6378137,298.257223563,
+                    AUTHORITY["EPSG","7030"]],
+                AUTHORITY["EPSG","6326"]],
+            PRIMEM["Greenwich",0,
+                AUTHORITY["EPSG","8901"]],
+            UNIT["degree",0.0174532925199433,
+                AUTHORITY["EPSG","9122"]],
+            AUTHORITY["EPSG","4326"]],
+        VERT_CS["EGM2008 geoid height",
+            VERT_DATUM["EGM2008 geoid",2005,
+                AUTHORITY["EPSG","1027"],
+                EXTENSION["PROJ4_GRIDS","egm08_25.gtx"]],
+            UNIT["metre",1,
+                AUTHORITY["EPSG","9001"]],
+            AXIS["Up",UP],
+            AUTHORITY["EPSG","3855"]]
+
+.. _`EGM08`: http://earth-info.nga.mil/GandG/wgs84/gravitymod/egm2008/egm08_wgs84.html
+
+As in `Assignment Example`_, it is common to need to reassign the coordinate
+system. The following example defines the both the horizontal and vertical
+coordinate system for a file to `UTM Zone 15N NAD83`_ for horizontal and
+`NAVD88`_ for the vertical.
+
+.. _`UTM Zone 15N NAD83`: http://epsg.io/26915
+.. _`NAVD88`: http://epsg.io/5703
+
+.. code-block:: json
+    :linenos:
+    :emphasize-lines: 9
+
+    {
+        "pipeline": [
+            {
+                "type" : "readers.las",
+                "filename" : "input.las"
+            },
+            {
+                "type" : "writers.las",
+                "out_srs": "EPSG:26915+5703",
+                "filename" : "output.las"
+            }
+        ]
+    }
+
+
+.. note::
+
+    Any coordinate system description format supported by GDAL's `SetFromUserInput`_
+    method can be used to assign or set the coordinate system in PDAL.
+    This includes WKT, |Proj.4| definitions, or OGC URNs. It is your responsibility
+    to escape or massage any input data to make it be valid JSON, however.
+
+.. _`SetFromUserInput`: http://www.gdal.org/ogr__srs__api_8h.html#a927749db01cec3af8aa5e577d032956bk
+
+
+Reprojection Example
+................................................................................
+
+A common desire is to transform the coordinates of an |ASPRSLAS| file
+from one coordinate system to another. The mechanism to do that with
+PDAL is :ref:`filters.reprojection`.
+
+
+
+.. code-block:: json
+    :linenos:
+    :emphasize-lines: 9
+
+    {
+        "pipeline": [
+            {
+                "type" : "readers.las",
+                "filename" : "input.las"
+            },
+            {
+                "type":"filters.reprojection",
+                "out_srs":"EPSG:26915"
+            },
+            {
+                "type" : "writers.las",
+                "filename" : "output.las"
+            }
+        ]
+    }
+
+.. warning::
+
+    When you are transforming coordinates, you might need to set the
+    ``scale_x``, ``scale_y``, ``offset_x``, and ``offset_y`` values to
+    something reasonable for your output coordinate system.
+
+.. note ::
+
+    The ``in_srs`` option of :ref:`filters.reprojection` can come from the data
+    itself (in our case it would have been set in the ``input.las`` file) or
+    you can explicitly override it.
+
+
+
+Point Formats
+--------------------------------------------------------------------------------
+
+As each revision was released, more point formats were added. A Point Format is
+the fixed set of :ref:`dimensions` that a LAS file must present and store in
+the file. Their definition, size, and composition all change in minor LAS
+versions (ie, 1.2 point formats vs those in 1.4). It is generally true,
+however, that point formats between minor revisions have similar semantic
+meanings, and in many cases their storage size is the same. For example, a 1.0
+file with points of type "Point Format 0" represents essentially the same data
+as a 1.4 file with points of type "Point Format 0".
+
+Point Format Example
+................................................................................
+
+Point format or `dataformat_id` is an integer that defines the set of fixed
+:ref:`dimensions` a LAS file must contain. All LAS files have at minimum a fixed
+set of the following dimensions:
+
+.. csv-table:: Base LAS :ref:`dimensions`
+    :widths: auto
+
+    "X", "Y", "Z"
+    "Intensity", "ReturnNumber", "NumberOfReturns"
+    "ScanDirectionFlag", "EdgeOfFlightLine", "Classification"
+    "ScanAngleRank", "UserData", "PointSourceId"
+
+These fixed formats have known field sizes, and explicit meanings. Because LAS
+data are fluffy, adding 14 bytes per point to add both GPSTime and
+Red/Green/Blue fields can waste a lot of storage if there isn't any actual data
+in them or you would like to disregard it.
+
+A simple ``dataformat_id`` option along with the ``forward`` option to carry
+along all of the rest of the fields will be useful in our case to remove
+both the time and color fields (Point format 0):
+
+.. code-block:: json
+    :linenos:
+    :emphasize-lines: 10
+
+    {
+        "pipeline": [
+            {
+                "type" : "readers.las",
+                "filename" : "input.las"
+            },
+            {
+                "type" : "writers.las",
+                "forward": "all",
+                "dataformat_id": 0,
+                "filename" : "output.las"
+            }
+        ]
+    }
+
+.. note::
+
+    The |LASzip| storage of GPSTime and Red/Green/Blue fields with no
+    data is perfectly efficient.
+
+Extra Dimensions
+--------------------------------------------------------------------------------
+
+A LAS Point Format ID defines the fixed sent of :ref:`dimensions` a file must
+store, but softwares are allowed to store extra data beyond that fixed set.
+This feature of the format was regularized in LAS 1.4 as something called
+"extra bytes" or "extra dims", but formats with versions less than LAS 1.4 can
+also store these extra per-point attributes.
+
+Extra Dimension Example
+................................................................................
+
+The following example will write a LAS 1.4 file with all non-LAS dimensions
+written into the file along with a description of those dimensions in the
+Extra Bytes VLR in the 1.4 file:
+
+.. code-block:: json
+    :linenos:
+    :emphasize-lines: 9
+
+    {
+        "pipeline": [
+            {
+                "type" : "readers.las",
+                "filename" : "input.las"
+            },
+            {
+                "type" : "writers.las",
+                "extra_dims": "all",
+                "minor_version" : "1.4",
+                "filename" : "output.las"
+            }
+        ]
+    }
+Required Header Fields
+--------------------------------------------------------------------------------
+
+Readers of the ASPRS LAS Specification will see there are many fields that
+softwares are required to write, with their content mandated by various options
+and configurations in the format. PDAL does not assume responsibility for
+writing these fields and coercing meaning from the content to fit the
+specification.  It is the PDAL users' responsibility to do so. Fields where
+this might matter include:
+
+* `project_id`
+* `global_encoding`
+* `system_id`
+* `software_id`
+* `filesource_id`
+
+
+Header Fields Example
+................................................................................
+
+The ``forward`` option of :ref:`writers.las` is the easiest way to get most of
+what you might want in terms of header settings copied from an input to an
+output file upon processing. Imagine the scenario of zero'ing out the
+classification values for an LAS file in preparation for using
+:ref:`filters.pmf` to reassign them. During this scenario, we'd like to keep
+all of the other LAS header information, such as `Variable Length Records`_,
+extent information, and format settings.
+
+.. code-block:: json
+    :linenos:
+    :emphasize-lines: 19
+
+    {
+        "pipeline": [
+            {
+                "type" : "readers.las",
+                "filename" : "input.las"
+            },
+            {
+                "type" : "filters.assign",
+                "assignment" : "Classification[0:32]=0"
+            },
+            {
+                "type" : "filters.pmf",
+                "cell_size" : 2.5,
+                "approximate" : false,
+                "max_distance" : 25
+            },
+            {
+                "type" : "writers.las",
+                "forward": "all",
+                "filename" : "output.las"
+            }
+        ]
+    }
+
+Coordinate Scaling
+--------------------------------------------------------------------------------
+
+LAS stores coordinates as 32 bit integers. It is the user's responsibility to
+ensure that the coordinate domain required by the data in the file fits within
+the 32 bit integer domain. Users should scale the data in relation to the
+measurement scale of the data, and they should not use the full 32 bit integer
+box of precision available to them to store it.
+
+It is usual to preserve the coordinate scale of data when translating LAS data,
+but in situations that change the coordinate precision, such as reprojecting
+from UTM to decimal degrees, it is not always possible to do so. Overdriven
+coordinate scale also hurts `Compression`_ with |LASzip| and disrupts
+communication of realistic accuracy.
+
+Auto Offset Example
+................................................................................
+
+Users an allow PDAL select scale and offset values for data with the ``auto``
+option.  This can have some detrimental effects on downstream processing.
+``auto`` for scale values will have the effect of storing the data evenly
+across the 32-bit integer domain. This maximizes the precision available to
+store the data, but this will have a detrimental effect on |LASzip| storage
+efficiency.  ``auto`` for offset calculation is just fine, however. When given
+the option, choose to store |ASPRSLAS| data with an explicit scale for the X,
+Y, and Z dimensions that represents actual expected data precision, not
+artificial storage precision or maximal storage precision.
+
+
+.. code-block:: json
+    :linenos:
+    :emphasize-lines: 9-14
+
+    {
+        "pipeline": [
+            {
+                "type" : "readers.las",
+                "filename" : "input.las"
+            },
+            {
+                "type" : "writers.las",
+                "scale_x":"0.0000001",
+                "scale_y":"0.0000001",
+                "scale_z":"0.01",
+                "offset_x":"auto",
+                "offset_y":"auto",
+                "offset_z":"auto",
+                "filename" : "output.las"
+            }
+        ]
+    }
+
+Compression
+--------------------------------------------------------------------------------
+
+|LASzip| is an open source, lossless compression technique for |ASPRSLAS| data.
+It is supported by two different software libraries, and it can be used in both
+the C/C++ and the JavaScript execution environments.  LAZ support is provided
+by both :ref:`readers.las` and :ref:`writers.las` as an option, the
+``compression`` one, and t is a flag to determine whether or not the data
+should be compressed.  LAZ efficiency is hurt by over-specified coordinate
+precision, and it is not fully compatible with LAS 1.4 as of March 2017. A
+revision with 1.4 support is expected.
+
+Compression Example
+................................................................................
+
+Compressing LAS data with |LASzip| is a simple option on the :ref:`writers.las`:
+
+
+.. code-block:: json
+    :linenos:
+    :emphasize-lines: 9
+
+    {
+        "pipeline": [
+            {
+                "type" : "readers.las",
+                "filename" : "input.las"
+            },
+            {
+                "type" : "writers.las",
+                "compression":"laszip",
+                "filename" : "output.laz"
+            }
+        ]
+    }
+
+.. note::
+
+    If the filename ends in the extension ``.laz`` but no ``compression`` option
+    is given, the :ref:`writers.las` will set the compression to ``laszip`` for
+    you and write out a |LASzip|-compressed file.
+
+
+Variable Length Records
+................................................................................
+
+Variable Length Records, or VLRs, are blobs of binary data that the LAS format
+supports to allow applications to store their own data. Coordinate system
+information is one type of data stored in VLRs, and many different LAS-using
+applications store data and metadata with this format capability. PDAL allows
+users to access VLR information, forward it along to newly written files, and
+create VLRs that store processing history information.
+
+
+Variable Length Records (VLRs) are how applications can insert their own data
+into LAS files. Common VLR data include:
+
+* Coordinate system
+* Metadata
+* Processing history
+* Indexing
+
+.. note::
+
+    There are VLRs that are defined by the specification, and they
+    always have the VLR ``user_id`` of `LASF_Spec`. The most important
+    ones are used for describing the coordinate system of the data.
+
+VLRs are inserted into the file as a header that stores a length along with a
+binary block of data. For LAS 1.0-1.3, the VLR length could be no larger than
+65535 bytes. For EVLRs, stored at the end of the file in LAS 1.4, this limit
+was increased to 4gb.
+
+VLR Example
+................................................................................
+
+You can add your own VLRs to files to store processing information or whatever you
+want by providing a JSON block via :ref:`writers.las` ``vlrs`` option that
+defines the ``user_id`` and ``data`` items for the VLR. The ``data`` option
+must be `base64`_-encoded string output. The data will be converted to binary
+information and stored in the VLR when the file is written.
+
+.. code-block:: json
+    :linenos:
+
+    {
+      "pipeline":[
+         "input.las",
+         {
+           "type":"writers.las",
+           "filename":"output.las",
+           "vlrs": [   {
+                        "description": "A description under 32 bytes",
+                        "record_id": 42,
+                        "user_id": "hobu",
+                        "data": "dGhpcyBpcyBzb21lIHRleHQ="
+                       },
+                       {
+                        "description": "A description under 32 bytes",
+                        "record_id": 43,
+                        "user_id": "hobu",
+                        "data": "dGhpcyBpcyBzb21lIG1vcmUgdGV4dA=="
+                        }
+                      ]
+          }
+       ]
+    }
+
+.. _`base64`: https://en.wikipedia.org/wiki/Base64
+
+
+
+PDAL Metadata
+-------------------------------------------------------------------------------
+
+The :ref:`writers.las` driver supports an option, ``pdal_metadata``, that writes
+two `PDAL` VLRs to LAS files. The first is the quivalent of :ref:`info_command`'s
+``--metadata`` output. The second is a dump of the ``--pipeline`` serialization
+to include all stages and their options that wrote the file. These two VLRs may
+be useful in tracking down processing history of data, allow you to determine
+which versions of PDAL may have written a file and what filter options were set
+when it was written, and give you the ability to store metadata and other
+information via pipeline ``user_data`` from your own applications.
+
+Metadata Example
+................................................................................
+
+The :ref:`pipeline` used to construct the file and all of its :ref:`metadata` can
+be written into VLRs in |ASPRSLAS| files under the `PDAL` `VLR key`_.
+
+
+.. _`VLR key`: http://www.asprs.org/misc/las-key-list.html
+
+.. code-block:: json
+    :linenos:
+    :emphasize-lines: 9
+
+    {
+        "pipeline": [
+            {
+                "type" : "readers.las",
+                "filename" : "input.las"
+            },
+            {
+                "type" : "writers.las",
+                "pdal_metadata":"true",
+                "filename" : "output.laz"
+            }
+        ]
+    }
+
+.. warning::
+
+    VLRs only support storing 64K of information, and it is possible, though
+    improbable that the metadata or pipeline stored in the VLRs will not fit
+    in that space. For LAS 1.4 data, an EVLR would be used, but versions less
+    than 1.4 do not have that option.
+
+
diff --git a/doc/tutorial/pcl_ground.rst b/doc/tutorial/pcl_ground.rst
index bdb2ca7..5e84d06 100644
--- a/doc/tutorial/pcl_ground.rst
+++ b/doc/tutorial/pcl_ground.rst
@@ -13,7 +13,7 @@ Implements the Progressive Morphological Filter for segmentation of ground
 points.
 
 .. note::
-  
+
   ``filters.ground`` required PCL and has since been replaced by
   :ref:`filters.pmf`, which is a native PDAL filter. :ref:`ground_command` has
   been retained, but now calls :ref:`filters.pmf` under the hood as opposed to
@@ -56,14 +56,7 @@ Let's start by running ``pdal ground`` with the default parameters.
 
 ::
 
-    $ pdal ground -i CSite1_orig-utm.laz -o CSite1_orig-utm-ground.laz --visualize
-
-.. note::
-
-   In this tutorial, we use ``--visualize`` to visualize results, but this is
-   only available if PCL is built with VTK and visualization support. If your
-   install does not support VTK/visualization, simply drop ``--visualize`` and
-   visualize the result with the viewer of your choice.
+    $ pdal ground -i CSite1_orig-utm.laz -o CSite1_orig-utm-ground.laz
 
 To get an idea of what's happening during each iteration, you can optionally
 increase the verbosity of the output. We'll try ``-v4``.  Here we see a summary
@@ -72,7 +65,7 @@ remaining ground points.
 
 ::
 
-    $ pdal ground -i CSite1_orig-utm.laz -o CSite1_orig-utm-ground.laz --visualize -v4
+    $ pdal ground -i CSite1_orig-utm.laz -o CSite1_orig-utm-ground.laz -v4
 
     --------------------------------------------------------------------------------
     NAME:    ()
@@ -118,7 +111,7 @@ specified with ``-p``.
 
 ::
 
-    $ pdal pcl -i CSite1_orig-utm.laz -o CSite1_orig-utm-ground.laz -p sor-pmf.json --visualize -v4
+    $ pdal pcl -i CSite1_orig-utm.laz -o CSite1_orig-utm-ground.laz -p sor-pmf.json -v4
 
     --------------------------------------------------------------------------------
     NAME:   Progressive Morphological Filter with Outlier Removal (1.0)
@@ -165,7 +158,7 @@ increasing the cell size, we can do a better job of removing such features.
 
 ::
 
-    $ pdal pcl -i CSite1_orig-utm.laz -o CSite1_orig-utm-ground.laz -p sor-pmf2.json --visualize -v4
+    $ pdal pcl -i CSite1_orig-utm.laz -o CSite1_orig-utm-ground.laz -p sor-pmf2.json -v4
 
     --------------------------------------------------------------------------------
     NAME:   Progressive Morphological Filter with Outlier Removal (1.0)
diff --git a/doc/tutorial/pcl_spec.rst b/doc/tutorial/pcl_spec.rst
index 9eaa335..2296dde 100644
--- a/doc/tutorial/pcl_spec.rst
+++ b/doc/tutorial/pcl_spec.rst
@@ -133,7 +133,7 @@ PCL JSON always consists of a single array of PCL JSON objects. This array
 
 
 Filters
-.......
+--------------------------------------------------------------------------------
 
 A filter is any of the PCL filters that has been exposed through the PCL
 pipeline class.
@@ -146,7 +146,7 @@ Any JSON keys not recognized by the spec are blissfully ignored.
 
 
 ApproximateProgressiveMorphologicalFilter (APMF)
-````````````````````````````````````````````````
+................................................................................
 
 .. seealso:
 
@@ -212,7 +212,7 @@ setNegative: bool
 
 
 GridMinimum
-```````````
+................................................................................
 
 This filter assembles a local 2D grid over a given PointCloud, then downsamples
 the data.
@@ -238,7 +238,7 @@ setResolution: float
 
 
 PassThrough
-```````````
+................................................................................
 
 **Description**
 
@@ -284,7 +284,7 @@ setFilterLimits: object `{"min": float, "max": float}`
 
 
 ProgressiveMorphologicalFilter (PMF)
-````````````````````````````````````
+................................................................................
 
 
 .. seealso::
@@ -351,7 +351,7 @@ setNegative: bool
 
 
 RadiusOutlierRemoval
-````````````````````
+................................................................................
 
 .. seealso::
 
@@ -391,7 +391,7 @@ setRadiusSearch: float
 
 
 StatisticalOutlierRemoval
-`````````````````````````
+................................................................................
 
 .. seealso::
 
@@ -429,7 +429,7 @@ setStddevMulThresh: float
 
 
 VoxelGrid
-`````````
+................................................................................
 
 .. seealso::
 
diff --git a/doc/workshop/agenda.rst b/doc/workshop/agenda.rst
index 50d9f6f..11db4c8 100644
--- a/doc/workshop/agenda.rst
+++ b/doc/workshop/agenda.rst
@@ -5,7 +5,7 @@ Introduction
 
 1. :ref:`lidar-introduction`
 
-2. :ref:`pdal-introduction`
+2. :ref:`Introduction to PDAL <about>`
 
 3. :ref:`software`
 
diff --git a/doc/workshop/exercises/analysis/boundary/boundary.rst b/doc/workshop/exercises/analysis/boundary/boundary.rst
index da93f94..1132720 100644
--- a/doc/workshop/exercises/analysis/boundary/boundary.rst
+++ b/doc/workshop/exercises/analysis/boundary/boundary.rst
@@ -11,7 +11,7 @@ Finding the boundary
 This exercise uses PDAL to find a tight-fighting boundary of an aerial scan.
 Printing the coordinates of the boundary for the file is quite simple using a
 single ``pdal info`` call, but visualizing the boundary is more complicated. To
-complete this exercise, we are going to use :ref:`qgis` to visualize the
+complete this exercise, we are going to use :ref:`qgis` to view the
 boundary, which means we must first install it on our system.
 
 Exercise
diff --git a/doc/workshop/exercises/analysis/clipping/clipping.json b/doc/workshop/exercises/analysis/clipping/clipping.json
index ca9b29f..d5b35c1 100644
--- a/doc/workshop/exercises/analysis/clipping/clipping.json
+++ b/doc/workshop/exercises/analysis/clipping/clipping.json
@@ -6,7 +6,7 @@
             "datasource": "/data/exercises/analysis/clipping/attributes.vrt",
             "dimension": "Classification",
             "layer": "OGRGeoJSON",
-            "type": "filters.attribute"
+            "type": "filters.overlay"
         },
         {
             "limits": "Classification[6:6]",
diff --git a/doc/workshop/exercises/analysis/clipping/clipping.rst b/doc/workshop/exercises/analysis/clipping/clipping.rst
index 1ac557c..f977553 100644
--- a/doc/workshop/exercises/analysis/clipping/clipping.rst
+++ b/doc/workshop/exercises/analysis/clipping/clipping.rst
@@ -85,11 +85,12 @@ Pipeline breakdown
 
 ``autzen.laz`` is the `LASzip`_ file we will clip.
 
-2. :ref:`filters.attribute`
+2. :ref:`filters.overlay`
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-The :ref:`filters.attribute` filter allows you to assign values for coincident
-polygons. Using the VRT we defined in `Data preparation`_, :ref:`filters.attribute` will
+The :ref:`filters.overlay` filter allows you to assign values for coincident
+polygons. Using the VRT we defined in `Data preparation`_,
+:ref:`filters.overlay` will
 assign the values from the ``CLS`` column to the ``Classification`` field.
 
 
@@ -132,7 +133,7 @@ website.
 Notes
 --------------------------------------------------------------------------------
 
-1. :ref:`filters.attribute` does point-in-polygon checks against every point
+1. :ref:`filters.overlay` does point-in-polygon checks against every point
    that is read.
 
 2. Points that are *on* the boundary are included.
diff --git a/doc/workshop/exercises/analysis/dtm/dtm-run-command.txt b/doc/workshop/exercises/analysis/dtm/dtm-run-command.txt
index 9b4a19c..ea4654e 100644
--- a/doc/workshop/exercises/analysis/dtm/dtm-run-command.txt
+++ b/doc/workshop/exercises/analysis/dtm/dtm-run-command.txt
@@ -1,3 +1,3 @@
 docker run -v /c/Users/Howard/PDAL:/data -t pdal/pdal \
        pdal pipeline \
-       /data/exercises/analysis/dtm/p2g.json
+       /data/exercises/analysis/dtm/gdal.json
diff --git a/doc/workshop/exercises/analysis/dtm/dtm.rst b/doc/workshop/exercises/analysis/dtm/dtm.rst
index 1aa33e8..d387764 100644
--- a/doc/workshop/exercises/analysis/dtm/dtm.rst
+++ b/doc/workshop/exercises/analysis/dtm/dtm.rst
@@ -8,7 +8,7 @@ Generating a DTM
 .. index:: elevation model, DTM, DSM
 
 This exercise uses PDAL to generate an elevation model surface using the
-output from the :ref:`ground` exercise, PDAL's :ref:`writers.p2g` operation,
+output from the :ref:`ground` exercise, PDAL's :ref:`writers.gdal` operation,
 and |GDAL| to generate an elevation and hillshade surface from point cloud
 data.
 
@@ -36,7 +36,7 @@ Command
 Invoke the following command, substituting accordingly, in your `Docker
 Quickstart Terminal`:
 
-PDAL capability to generate rasterized output is provided by the :ref:`writers.p2g`
+PDAL capability to generate rasterized output is provided by the :ref:`writers.gdal`
 stage. There is no :ref:`application <apps>` to drive this stage, and we
 must use a pipeline.
 
@@ -44,7 +44,7 @@ Pipeline breakdown
 ................................................................................
 
 
-.. include:: ./p2g.json
+.. include:: ./gdal.json
     :literal:
 
 .. note::
@@ -60,7 +60,7 @@ Pipeline breakdown
 created this output as part of the :ref:`ground` exercise.
 
 
-2. :ref:`writers.p2g`
+2. :ref:`writers.gdal`
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 The `Points2grid`_ writer that bins the point cloud data into an elevation
diff --git a/doc/workshop/exercises/analysis/dtm/p2g.json b/doc/workshop/exercises/analysis/dtm/gdal.json
similarity index 89%
rename from doc/workshop/exercises/analysis/dtm/p2g.json
rename to doc/workshop/exercises/analysis/dtm/gdal.json
index dd55869..068810f 100644
--- a/doc/workshop/exercises/analysis/dtm/p2g.json
+++ b/doc/workshop/exercises/analysis/dtm/gdal.json
@@ -7,7 +7,7 @@
             "output_type":"all",
             "grid_dist_x":"2.0",
             "grid_dist_y":"2.0",
-            "type": "writers.p2g"
+            "type": "writers.gdal"
         }
     ]
 }
diff --git a/doc/workshop/exercises/info/metadata.rst b/doc/workshop/exercises/info/metadata.rst
index 14f23fc..732440a 100644
--- a/doc/workshop/exercises/info/metadata.rst
+++ b/doc/workshop/exercises/info/metadata.rst
@@ -14,7 +14,7 @@ This exercise uses PDAL to print metadata information. Issue the
 following command in your `Docker Quickstart Terminal`.
 
 
-literalinclude:: ./metadata-command.txt
+.. literalinclude:: ./metadata-command.txt
     :linenos:
 
 
@@ -33,10 +33,12 @@ literalinclude:: ./metadata-command.txt
 
     * `Python JSON library`_
     * `jsawk`_ (like ``awk`` but for JSON data)
+    * `jq`_ (command line processor for JSON)
     * `Ruby JSON library`_
 
 .. _`Python JSON library`: https://docs.python.org/2/library/json.html
 .. _`jsawk`: https://github.com/micha/jsawk
+.. _`jq`: https://stedolan.github.io/jq/
 .. _`Ruby JSON library`: http://ruby-doc.org/stdlib-2.0.0/libdoc/json/rdoc/JSON.html
 
 
diff --git a/doc/workshop/includes/substitutions.rst b/doc/workshop/includes/substitutions.rst
index 4b41811..74ba5ad 100644
--- a/doc/workshop/includes/substitutions.rst
+++ b/doc/workshop/includes/substitutions.rst
@@ -2,7 +2,9 @@
 .. |GRID| replace:: `GRiD <http://lidar.io/>`__
 .. |ASPRSLAS| replace:: `ASPRS LAS <http://www.asprs.org/Committee-General/LASer-LAS-File-Format-Exchange-Activities.html>`__
 .. |PCL| replace:: `PCL <http://pointclouds.org>`__
-.. |PDAL| replace:: `PDAL <http://pdal.io/>`__
+.. |PDAL| replace:: `PDAL <https://pdal.io/>`__
+.. |Proj.4| replace:: `Proj.4 <http://proj4.org>`__
+.. |GeoTIFF| replace:: `GeoTIFF <https://trac.osgeo.org/geotiff/>`__
 .. |libLAS| replace:: `libLAS <http://liblas.org/>`__
 .. |QGIS| replace:: `QGIS <http://qgis.org>`__
 .. |GDAL| replace:: `GDAL <http://gdal.org/>`__
@@ -12,9 +14,10 @@
 .. |Docker| replace:: `Docker <https://www.docker.com/>`__
 .. |LiDAR| replace:: `LiDAR <https://en.wikipedia.org/wiki/Lidar>`__
 .. |CRREL| replace:: `CRREL <http://www.erdc.usace.army.mil/Locations/ColdRegionsResearchandEngineeringLaboratory.aspx>`__
-.. |Hobu| replace:: `Hobu <http://hobu.co/>`__
+.. |Hobu| replace:: `Hobu <httsp://hobu.co/>`__
 .. |Optech| replace:: `Optech <http://www.teledyneoptech.com/>`__
 .. |Riegl| replace:: `Riegl <http://riegl.com/>`__
 .. |NCALM| replace:: `NCALM <http://ncalm.cive.uh.edu/>`__
 .. |UTM| replace:: `UTM <https://en.wikipedia.org/wiki/Universal_Transverse_Mercator_coordinate_system>`__
 .. |WGS84| replace:: `WGS84 <https://en.wikipedia.org/wiki/Geodetic_datum>`__
+.. |WellKnownText| replace:: `Well Known Text <https://en.wikipedia.org/wiki/Well-known_text#Coordinate_reference_systems>`__
diff --git a/doc/workshop/index.rst b/doc/workshop/index.rst
index cdf035d..3a0556b 100644
--- a/doc/workshop/index.rst
+++ b/doc/workshop/index.rst
@@ -17,6 +17,5 @@ Point Cloud Processing and Analysis with PDAL
 
    agenda
    lidar-introduction
-   pdal-introduction
    software
    exercises/index
diff --git a/doc/workshop/slides/source/clipping.rst b/doc/workshop/slides/source/clipping.rst
index ee81bd6..432e670 100644
--- a/doc/workshop/slides/source/clipping.rst
+++ b/doc/workshop/slides/source/clipping.rst
@@ -59,7 +59,7 @@ Pipeline
 ================================================================================
 
 1. :ref:`readers.las`
-2. :ref:`filters.attribute`
+2. :ref:`filters.overlay`
 3. :ref:`filters.range`
 4. :ref:`writers.las`
 
@@ -79,7 +79,7 @@ Range Filter
 Pipeline Strategy
 ================================================================================
 
-1. Assign with `filters.attribute`
+1. Assign with `filters.overlay`
 2. Filter with `filters.range`
 
 
diff --git a/doc/workshop/slides/source/dtm.rst b/doc/workshop/slides/source/dtm.rst
index 74f0de5..f1cb8e8 100644
--- a/doc/workshop/slides/source/dtm.rst
+++ b/doc/workshop/slides/source/dtm.rst
@@ -12,10 +12,10 @@ Purpose:
 :ref:`DTM Workshop Materials <unavco:dtm>`
 
 
-writers.p2g
+writers.gdal
 ================================================================================
 
-* :ref:`writers.p2g`
+* :ref:`writers.gdal`
 * Generated using points2grid (OpenTopography)
 * Write TIFF/ASCII raster
 * Control pixel size
@@ -23,7 +23,7 @@ writers.p2g
 DTM (pipeline)
 ================================================================================
 
-.. literalinclude:: ../../exercises/analysis/dtm/p2g.json
+.. literalinclude:: ../../exercises/analysis/dtm/gdal.json
     :linenos:
 
 DTM (execution)
diff --git a/doc/workshop/slides/source/pdal_intro.rst b/doc/workshop/slides/source/pdal_intro.rst
index 52758ad..336d305 100644
--- a/doc/workshop/slides/source/pdal_intro.rst
+++ b/doc/workshop/slides/source/pdal_intro.rst
@@ -117,7 +117,7 @@ Writers (database)
 Filters
 ================================================================================
 
-* :ref:`Attribute assignment<filters.attribute>`
+* :ref:`Geographic attribute assignment <filters.overlay>`
 * :ref:`Polygon clipping <filters.crop>`
 * :ref:`Splitting by volume <filters.chipper>`
 * :ref:`Splitting by geometry <filters.divider>`
diff --git a/filters/ApproximateCoplanarFilter.cpp b/filters/ApproximateCoplanarFilter.cpp
index 600cbb9..47502b6 100644
--- a/filters/ApproximateCoplanarFilter.cpp
+++ b/filters/ApproximateCoplanarFilter.cpp
@@ -48,7 +48,7 @@ namespace pdal
 {
 
 static PluginInfo const s_info =
-    PluginInfo("filters.approximatecoplanar", "ApproximateCoplanar Filter", 
+    PluginInfo("filters.approximatecoplanar", "ApproximateCoplanar Filter",
                "http://pdal.io/stages/filters.approximatecoplanar.html");
 
 CREATE_STATIC_PLUGIN(1, 0, ApproximateCoplanarFilter, Filter, s_info)
@@ -69,7 +69,8 @@ void ApproximateCoplanarFilter::addArgs(ProgramArgs& args)
 
 void ApproximateCoplanarFilter::addDimensions(PointLayoutPtr layout)
 {
-    m_coplanar = layout->registerOrAssignDim("Coplanar", Dimension::Type::Unsigned8);
+    m_coplanar = layout->registerOrAssignDim("Coplanar",
+        Dimension::Type::Unsigned8);
 }
 
 void ApproximateCoplanarFilter::filter(PointView& view)
@@ -90,9 +91,9 @@ void ApproximateCoplanarFilter::filter(PointView& view)
         // perform the eigen decomposition
         SelfAdjointEigenSolver<Matrix3f> solver(B);
         if (solver.info() != Success)
-            throw pdal_error("Cannot perform eigen decomposition.");
+            throwError("Cannot perform eigen decomposition.");
         auto ev = solver.eigenvalues();
-        
+
         // test eigenvalues to label points that are approximately coplanar
         if ((ev[1] > m_thresh1 * ev[0]) && (m_thresh2 * ev[1] > ev[2]))
             view.setField(m_coplanar, i, 1u);
diff --git a/filters/AssignFilter.cpp b/filters/AssignFilter.cpp
new file mode 100644
index 0000000..8269aed
--- /dev/null
+++ b/filters/AssignFilter.cpp
@@ -0,0 +1,155 @@
+/******************************************************************************
+* Copyright (c) 2017, Hobu Inc., info at hobu.co
+*
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following
+* conditions are met:
+*
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above copyright
+*       notice, this list of conditions and the following disclaimer in
+*       the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
+*       names of its contributors may be used to endorse or promote
+*       products derived from this software without specific prior
+*       written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+* OF SUCH DAMAGE.
+****************************************************************************/
+
+#include "AssignFilter.hpp"
+
+#include <pdal/pdal_macros.hpp>
+#include <pdal/StageFactory.hpp>
+#include <pdal/util/ProgramArgs.hpp>
+
+#include "private/DimRange.hpp"
+
+namespace pdal
+{
+
+static PluginInfo const s_info = PluginInfo(
+    "filters.assign",
+    "Assign values for a dimension using a specified value.",
+    "http://pdal.io/stages/filters.assign.html" );
+
+CREATE_STATIC_PLUGIN(1, 0, AssignFilter, Filter, s_info)
+
+struct AssignRange : public DimRange
+{
+    void parse(const std::string& r);
+    double m_value;
+};
+
+void AssignRange::parse(const std::string& r)
+{
+    std::string::size_type pos, count;
+    const char *start;
+    char *end;
+
+    pos = subParse(r);
+    count = Utils::extract(r, pos, (int(*)(int))std::isspace);
+    pos += count;
+
+    if (r[pos] != '=')
+        throw error("Missing '=' assignment separator.");
+    pos++;
+
+    count = Utils::extract(r, pos, (int(*)(int))std::isspace);
+    pos += count;
+
+    // Extract value
+    start = r.data() + pos;
+    m_value = std::strtod(start, &end);
+    if (start == end)
+        throw error("Missing value to assign following '='.");
+    pos += (end - start);
+
+    if (pos != r.size())
+        throw error("Invalid characters following valid range.");
+}
+
+
+std::istream& operator>>(std::istream& in, AssignRange& r)
+{
+    std::string s;
+
+    std::getline(in, s);
+    r.parse(s);
+    return in;
+}
+
+
+std::ostream& operator<<(std::ostream& out, const AssignRange& r)
+{
+    out << (const DimRange&)r;
+    out << "=" << r.m_name;
+    return out;
+}
+
+
+AssignFilter::AssignFilter()
+{}
+
+
+AssignFilter::~AssignFilter()
+{}
+
+
+void AssignFilter::addArgs(ProgramArgs& args)
+{
+    args.add("assignment", "Values to assign to dimensions based on range.",
+        m_assignments);
+}
+
+
+void AssignFilter::prepared(PointTableRef table)
+{
+    PointLayoutPtr layout(table.layout());
+
+    for (auto& r : m_assignments)
+    {
+        r.m_id = layout->findDim(r.m_name);
+        if (r.m_id == Dimension::Id::Unknown)
+            throwError("Invalid dimension name in 'values' option: '" +
+                r.m_name + "'.");
+    }
+}
+
+
+bool AssignFilter::processOne(PointRef& point)
+{
+    for (AssignRange& r : m_assignments)
+        if (r.valuePasses(point.getFieldAs<double>(r.m_id)))
+            point.setField(r.m_id, r.m_value);
+    return true;
+}
+
+
+void AssignFilter::filter(PointView& view)
+{
+    PointRef point(view, 0);
+    for (PointId id = 0; id < view.size(); ++id)
+    {
+        point.setPointId(id);
+        processOne(point);
+    }
+}
+
+} // namespace pdal
+
diff --git a/io/PlyWriter.hpp b/filters/AssignFilter.hpp
similarity index 74%
copy from io/PlyWriter.hpp
copy to filters/AssignFilter.hpp
index f492510..4fdd4ea 100644
--- a/io/PlyWriter.hpp
+++ b/filters/AssignFilter.hpp
@@ -1,5 +1,5 @@
 /******************************************************************************
-* Copyright (c) 2015, Peter J. Gadomski <pete.gadomski at gmail.com>
+* Copyright (c) 2017, Hobu Inc. <hobu.inc at gmail.com>
 *
 * All rights reserved.
 *
@@ -32,40 +32,39 @@
 * OF SUCH DAMAGE.
 ****************************************************************************/
 
-#include <rply/rply.h>
+#pragma once
 
-#include <pdal/PointView.hpp>
-#include <pdal/Writer.hpp>
 #include <pdal/plugin.hpp>
+#include <pdal/Filter.hpp>
 
-extern "C" int32_t PlyWriter_ExitFunc();
-extern "C" PF_ExitFunc PlyWriter_InitPlugin();
+extern "C" int32_t AssignFilter_ExitFunc();
+extern "C" PF_ExitFunc AssignFilter_InitPlugin();
 
 namespace pdal
 {
 
-class PDAL_DLL PlyWriter : public Writer
+struct AssignRange;
+
+class PDAL_DLL AssignFilter : public Filter
 {
 public:
+    AssignFilter();
+    ~AssignFilter();
+
     static void * create();
     static int32_t destroy(void *);
-    std::string getName() const;
-
-    PlyWriter();
+    std::string getName() const { return "filters.assign"; }
 
 private:
     virtual void addArgs(ProgramArgs& args);
-    virtual void initialize();
-    virtual void ready(PointTableRef table);
-    virtual void write(const PointViewPtr data);
-    virtual void done(PointTableRef table);
+    virtual void prepared(PointTableRef table);
+    virtual bool processOne(PointRef& point);
+    virtual void filter(PointView& view);
 
-    std::string m_filename;
-    p_ply m_ply;
-    PointViewPtr m_pointCollector;
-    std::string m_storageModeSpec;
-    e_ply_storage_mode m_storageMode;
+    AssignFilter& operator=(const AssignFilter&) = delete;
+    AssignFilter(const AssignFilter&) = delete;
 
+    std::vector<AssignRange> m_assignments;
 };
 
-}
+} // namespace pdal
diff --git a/filters/OutlierFilter.hpp b/filters/ClusterFilter.cpp
similarity index 61%
copy from filters/OutlierFilter.hpp
copy to filters/ClusterFilter.cpp
index 7f42a7f..ffe1aa8 100644
--- a/filters/OutlierFilter.hpp
+++ b/filters/ClusterFilter.cpp
@@ -32,56 +32,54 @@
  * OF SUCH DAMAGE.
  ****************************************************************************/
 
-#pragma once
+#include "ClusterFilter.hpp"
 
-#include <pdal/Filter.hpp>
-#include <pdal/plugin.hpp>
+#include <pdal/pdal_macros.hpp>
+#include <pdal/Segmentation.hpp>
 
-#include <memory>
-#include <map>
 #include <string>
 
-extern "C" int32_t OutlierFilter_ExitFunc();
-extern "C" PF_ExitFunc OutlierFilter_InitPlugin();
-
 namespace pdal
 {
 
-class Options;
+static PluginInfo const s_info =
+    PluginInfo("filters.cluster", "Label clusters",
+               "http://pdal.io/stages/filters.cluster.html");
 
-struct Indices
-{
-    std::vector<PointId> inliers;
-    std::vector<PointId> outliers;
-};
+CREATE_STATIC_PLUGIN(1, 0, ClusterFilter, Filter, s_info)
 
-class PDAL_DLL OutlierFilter : public pdal::Filter
+std::string ClusterFilter::getName() const
 {
-public:
-    OutlierFilter() : Filter()
-    {}
+    return s_info.name;
+}
 
-    static void * create();
-    static int32_t destroy(void *);
-    std::string getName() const;
+void ClusterFilter::addArgs(ProgramArgs& args)
+{
+    args.add("min_points", "Min points per cluster", m_minPoints,
+        static_cast<uint64_t>(1));
+    args.add("max_points", "Max points per cluster", m_maxPoints,
+        std::numeric_limits<uint64_t>::max());
+    args.add("tolerance", "Radius", m_tolerance, 1.0);
+}
 
-private:
-    std::string m_method;
-    int m_minK;
-    double m_radius;
-    int m_meanK;
-    double m_multiplier;
-    bool m_classify;
-    bool m_extract;
+void ClusterFilter::addDimensions(PointLayoutPtr layout)
+{
+    using namespace Dimension;
+    m_cluster = layout->registerOrAssignDim("ClusterID", Type::Unsigned64);
+}
 
-    virtual void addDimensions(PointLayoutPtr layout);
-    virtual void addArgs(ProgramArgs& args);
-    Indices processRadius(PointViewPtr inView);
-    Indices processStatistical(PointViewPtr inView);
-    virtual PointViewSet run(PointViewPtr view);
+void ClusterFilter::filter(PointView& view)
+{
+    auto clusters = Segmentation::extractClusters(view, m_minPoints,
+        m_maxPoints, m_tolerance);
 
-    OutlierFilter& operator=(const OutlierFilter&); // not implemented
-    OutlierFilter(const OutlierFilter&); // not implemented
-};
+    uint64_t id = 0;
+    for (auto const& c : clusters)
+    {
+        for (auto const& i : c)
+            view.setField(m_cluster, i, id);
+        id++;
+    }
+}
 
 } // namespace pdal
diff --git a/filters/OutlierFilter.hpp b/filters/ClusterFilter.hpp
similarity index 74%
copy from filters/OutlierFilter.hpp
copy to filters/ClusterFilter.hpp
index 7f42a7f..e3d8397 100644
--- a/filters/OutlierFilter.hpp
+++ b/filters/ClusterFilter.hpp
@@ -37,28 +37,20 @@
 #include <pdal/Filter.hpp>
 #include <pdal/plugin.hpp>
 
-#include <memory>
-#include <map>
 #include <string>
 
-extern "C" int32_t OutlierFilter_ExitFunc();
-extern "C" PF_ExitFunc OutlierFilter_InitPlugin();
+extern "C" int32_t ClusterFilter_ExitFunc();
+extern "C" PF_ExitFunc ClusterFilter_InitPlugin();
 
 namespace pdal
 {
 
-class Options;
+class ProgramArgs;
 
-struct Indices
-{
-    std::vector<PointId> inliers;
-    std::vector<PointId> outliers;
-};
-
-class PDAL_DLL OutlierFilter : public pdal::Filter
+class PDAL_DLL ClusterFilter : public Filter
 {
 public:
-    OutlierFilter() : Filter()
+    ClusterFilter() : Filter()
     {}
 
     static void * create();
@@ -66,22 +58,17 @@ public:
     std::string getName() const;
 
 private:
-    std::string m_method;
-    int m_minK;
-    double m_radius;
-    int m_meanK;
-    double m_multiplier;
-    bool m_classify;
-    bool m_extract;
+    uint64_t m_minPoints;
+    uint64_t m_maxPoints;
+    double m_tolerance;
+    Dimension::Id m_cluster;
 
-    virtual void addDimensions(PointLayoutPtr layout);
     virtual void addArgs(ProgramArgs& args);
-    Indices processRadius(PointViewPtr inView);
-    Indices processStatistical(PointViewPtr inView);
-    virtual PointViewSet run(PointViewPtr view);
+    virtual void addDimensions(PointLayoutPtr layout);
+    virtual void filter(PointView& view);
 
-    OutlierFilter& operator=(const OutlierFilter&); // not implemented
-    OutlierFilter(const OutlierFilter&); // not implemented
+    ClusterFilter& operator=(const ClusterFilter&); // not implemented
+    ClusterFilter(const ClusterFilter&); // not implemented
 };
 
 } // namespace pdal
diff --git a/filters/ColorinterpFilter.cpp b/filters/ColorinterpFilter.cpp
index 028528c..470ec90 100644
--- a/filters/ColorinterpFilter.cpp
+++ b/filters/ColorinterpFilter.cpp
@@ -52,7 +52,8 @@
 namespace pdal
 {
 
-static std::vector<std::string> ramps = {"awesome_green", "black_orange", "blue_hue", "blue_red", "heat_map", "pestel_shades", "blue_orange"};
+static std::vector<std::string> ramps = {"awesome_green", "black_orange",
+    "blue_hue", "blue_red", "heat_map", "pestel_shades", "blue_orange"};
 
 static PluginInfo const s_info = PluginInfo(
     "filters.colorinterp",
@@ -77,9 +78,9 @@ std::string ColorinterpFilter::getName() const { return s_info.name; }
         (void)VSIFileFromMemBuffer(rampFilename.c_str(), location, size, FALSE); \
     }
 //
-std::shared_ptr<pdal::gdal::Raster> openRamp(std::string& rampFilename)
+std::shared_ptr<gdal::Raster> openRamp(std::string& rampFilename)
 {
-    // If the user] selected a default ramp name, it will be opened by
+    // If the user selected a default ramp name, it will be opened by
     // one of these macros if it matches. Otherwise, we just open with the
     // GDALOpen'able the user gave us
 
@@ -94,7 +95,8 @@ std::shared_ptr<pdal::gdal::Raster> openRamp(std::string& rampFilename)
     GETRAMP(heat_map);
     GETRAMP(pestel_shades);
 
-    std::shared_ptr<pdal::gdal::Raster> output (new pdal::gdal::Raster(rampFilename.c_str()));
+    std::shared_ptr<gdal::Raster>
+        output(new gdal::Raster(rampFilename.c_str()));
     return output;
 }
 
@@ -103,16 +105,21 @@ void ColorinterpFilter::addArgs(ProgramArgs& args)
     args.add("dimension", "Dimension to interpolate", m_interpDimString, "Z");
     args.add("minimum", "Minimum value to use for scaling", m_min);
     args.add("maximum", "Maximum value to use for scaling", m_max);
-    args.add("ramp", "GDAL-readable color ramp image to use", m_colorramp, "pestel_shades");
+    args.add("ramp", "GDAL-readable color ramp image to use", m_colorramp,
+        "pestel_shades");
     args.add("invert", "Invert the ramp direction", m_invertRamp, false);
-    args.add("mad", "Use Median Absolute Deviation to compute ramp bounds in combination with 'k' ", m_useMAD, false);
-    args.add("mad_multiplier", "MAD threshold multiplier", m_madMultiplier, 1.4862);
-    args.add("k", "Number of deviations to compute minimum/maximum ", m_stdDevThreshold, 0.0);
+    args.add("mad", "Use Median Absolute Deviation to compute ramp bounds "
+        "in combination with 'k' ", m_useMAD, false);
+    args.add("mad_multiplier", "MAD threshold multiplier",
+        m_madMultiplier, 1.4862);
+    args.add("k", "Number of deviations to compute minimum/maximum ",
+        m_stdDevThreshold, 0.0);
 }
 
 void ColorinterpFilter::addDimensions(PointLayoutPtr layout)
 {
-    layout->registerDims({Dimension::Id::Red, Dimension::Id::Green, Dimension::Id::Blue});
+    layout->registerDims({Dimension::Id::Red,
+        Dimension::Id::Green, Dimension::Id::Blue});
 }
 
 void ColorinterpFilter::initialize()
@@ -122,14 +129,15 @@ void ColorinterpFilter::initialize()
     m_raster = openRamp(m_colorramp);
     m_raster->open();
 
-    log()->get(LogLevel::Debug) << getName() << " raster connection: "
-                                             << m_raster->filename() << std::endl;
+    log()->get(LogLevel::Debug) << getName() << " raster connection: " <<
+        m_raster->filename() << std::endl;
 
     m_interpDim = Dimension::id(m_interpDimString);
     if (m_interpDim == Dimension::Id::Unknown)
-        throw pdal_error("Dimension name is not known!");
+        throwError("provided dimension name is not known.");
 }
 
+
 void ColorinterpFilter::filter(PointView& view)
 {
     double median(0.0);
@@ -142,7 +150,8 @@ void ColorinterpFilter::filter(PointView& view)
     {
         std::vector<double> values(view.size());
 
-        pdal::stats::Summary summary(pdal::Dimension::name(m_interpDim), pdal::stats::Summary::NoEnum);
+        stats::Summary summary(Dimension::name(m_interpDim),
+            stats::Summary::NoEnum);
         for (PointId idx = 0; idx < view.size(); ++idx)
         {
             double v = view.getFieldAs<double>(m_interpDim, idx);
@@ -152,9 +161,10 @@ void ColorinterpFilter::filter(PointView& view)
 
         auto compute_median = [](std::vector<double> vals)
         {
-            std::nth_element(vals.begin(), vals.begin()+vals.size()/2, vals.end());
+            std::nth_element(vals.begin(), vals.begin() + vals.size() / 2,
+                vals.end());
 
-            return *(vals.begin()+vals.size()/2);
+            return *(vals.begin() + vals.size() / 2);
         };
 
         median = compute_median(values);
@@ -169,10 +179,14 @@ void ColorinterpFilter::filter(PointView& view)
              m_min = median - threshold;
              m_max = median + threshold;
 
-             log()->get(LogLevel::Debug) << getName() << " mad " << mad << std::endl;
-             log()->get(LogLevel::Debug) << getName() << " median " << median << std::endl;
-             log()->get(LogLevel::Debug) << getName() << " minimum " << m_min << std::endl;
-             log()->get(LogLevel::Debug) << getName() << " maximum " << m_max << std::endl;
+             log()->get(LogLevel::Debug) << getName() << " mad " <<
+                mad << std::endl;
+             log()->get(LogLevel::Debug) << getName() << " median " <<
+                median << std::endl;
+             log()->get(LogLevel::Debug) << getName() << " minimum " <<
+                m_min << std::endl;
+             log()->get(LogLevel::Debug) << getName() << " maximum " <<
+                m_max << std::endl;
         }
         else
         {
@@ -180,10 +194,14 @@ void ColorinterpFilter::filter(PointView& view)
              m_min = median - threshold;
              m_max = median + threshold;
 
-             log()->get(LogLevel::Debug) << getName() << " stddev threshold " << threshold << std::endl;
-             log()->get(LogLevel::Debug) << getName() << " median " << median << std::endl;
-             log()->get(LogLevel::Debug) << getName() << " minimum " << m_min << std::endl;
-             log()->get(LogLevel::Debug) << getName() << " maximum " << m_max << std::endl;
+             log()->get(LogLevel::Debug) << getName() <<
+                " stddev threshold " << threshold << std::endl;
+             log()->get(LogLevel::Debug) << getName() << " median " <<
+                median << std::endl;
+             log()->get(LogLevel::Debug) << getName() << " minimum " <<
+                m_min << std::endl;
+             log()->get(LogLevel::Debug) << getName() << " maximum " <<
+                m_max << std::endl;
         }
 
     }
@@ -192,7 +210,8 @@ void ColorinterpFilter::filter(PointView& view)
     // compute them.
     else if ((m_min == 0.0 && m_max == 0.0) )
     {
-        pdal::stats::Summary summary(pdal::Dimension::name(m_interpDim), pdal::stats::Summary::NoEnum);
+        stats::Summary summary(Dimension::name(m_interpDim),
+            stats::Summary::NoEnum);
         for (PointId idx = 0; idx < view.size(); ++idx)
         {
             double v = view.getFieldAs<double>(m_interpDim, idx);
@@ -207,8 +226,6 @@ void ColorinterpFilter::filter(PointView& view)
     m_raster->readBand(m_greenBand, 2 );
     m_raster->readBand(m_blueBand, 3);
 
-
-
     for (PointId idx = 0; idx < view.size(); ++idx)
     {
 
@@ -236,7 +253,6 @@ void ColorinterpFilter::filter(PointView& view)
         view.setField(Dimension::Id::Red, idx, red);
         view.setField(Dimension::Id::Green, idx, green);
         view.setField(Dimension::Id::Blue, idx, blue);
-
     }
 }
 
diff --git a/filters/ColorizationFilter.cpp b/filters/ColorizationFilter.cpp
index 30e6ced..1cefaf9 100644
--- a/filters/ColorizationFilter.cpp
+++ b/filters/ColorizationFilter.cpp
@@ -68,18 +68,28 @@ ColorizationFilter::BandInfo parseDim(const std::string& dim,
     uint32_t band = defaultBand;
     double scale = 1.0;
 
-    try
-    {
-        pos = 0;
-        // Skip leading whitespace.
-        count = Utils::extract(dim, pos, (int(*)(int))std::isspace);
-        pos += count;
+    pos = 0;
+    // Skip leading whitespace.
+    count = Utils::extract(dim, pos, (int(*)(int))std::isspace);
+    pos += count;
 
-        count = Dimension::extractName(dim, pos);
-        if (count == 0)
-           throw std::string("No dimension name provided.");
-        name = dim.substr(pos, count);
-        pos += count;
+    count = Dimension::extractName(dim, pos);
+    if (count == 0)
+        throw std::string("No dimension name provided.");
+    name = dim.substr(pos, count);
+    pos += count;
+
+    count = Utils::extract(dim, pos, (int(*)(int))std::isspace);
+    pos += count;
+
+    if (pos < dim.size() && dim[pos] == ':')
+    {
+        pos++;
+        start = dim.data() + pos;
+        band = std::strtoul(start, &end, 10);
+        if (start == end)
+            band = defaultBand;
+        pos += (end - start);
 
         count = Utils::extract(dim, pos, (int(*)(int))std::isspace);
         pos += count;
@@ -88,43 +98,22 @@ ColorizationFilter::BandInfo parseDim(const std::string& dim,
         {
             pos++;
             start = dim.data() + pos;
-            band = std::strtoul(start, &end, 10);
+            scale = std::strtod(start, &end);
             if (start == end)
-                band = defaultBand;
+                scale = 1.0;
             pos += (end - start);
-
-            count = Utils::extract(dim, pos, (int(*)(int))std::isspace);
-            pos += count;
-
-            if (pos < dim.size() && dim[pos] == ':')
-            {
-                pos++;
-                start = dim.data() + pos;
-                scale = std::strtod(start, &end);
-                if (start == end)
-                    scale = 1.0;
-                pos += (end - start);
-            }
         }
+    }
 
-        count = Utils::extract(dim, pos, (int(*)(int))std::isspace);
-        pos += count;
-
-        if (pos != dim.size())
-        {
-            std::ostringstream oss;
+    count = Utils::extract(dim, pos, (int(*)(int))std::isspace);
+    pos += count;
 
-            oss << "Invalid character '" << dim[pos] <<
-                "' following dimension specification.";
-            throw pdal_error(oss.str());
-        }
-    }
-    catch (std::string s)
+    if (pos != dim.size())
     {
         std::ostringstream oss;
-        oss << "filters.colorization: invalid --dimensions option: '" << dim <<
-            "': " << s;
-        throw pdal_error(oss.str());
+        oss << "Invalid character '" << dim[pos] <<
+            "' following dimension specification.";
+        throw oss.str();
     }
     return ColorizationFilter::BandInfo(name, band, scale);
 }
@@ -146,9 +135,16 @@ void ColorizationFilter::initialize()
     uint32_t defaultBand = 1;
     for (std::string& dim : m_dimSpec)
     {
-        BandInfo bi = parseDim(dim, defaultBand);
-        defaultBand = bi.m_band + 1;
-        m_bands.push_back(bi);
+        try
+        {
+            BandInfo bi = parseDim(dim, defaultBand);
+            defaultBand = bi.m_band + 1;
+            m_bands.push_back(bi);
+        }
+        catch(const std::string& what)
+        {
+            throwError("invalid --dimensions option: '" + dim + "': " + what);
+        }
     }
 
     gdal::registerDrivers();
@@ -180,7 +176,7 @@ void ColorizationFilter::ready(PointTableRef table)
         }
         else
         {
-            throw pdal_error(getName() + ": " + m_raster->errorMsg());
+            throwError(m_raster->errorMsg());
         }
     }
 }
@@ -207,6 +203,7 @@ bool ColorizationFilter::processOne(PointRef& point)
     return false;
 }
 
+
 void ColorizationFilter::filter(PointView& view)
 {
     PointRef point = view.point(0);
diff --git a/filters/ComputeRangeFilter.cpp b/filters/ComputeRangeFilter.cpp
index 80d6300..ab0120b 100644
--- a/filters/ComputeRangeFilter.cpp
+++ b/filters/ComputeRangeFilter.cpp
@@ -52,12 +52,13 @@ std::string ComputeRangeFilter::getName() const
     return s_info.name;
 }
 
+
 void ComputeRangeFilter::addDimensions(PointLayoutPtr layout)
 {
-    using namespace Dimension;
-    m_range = layout->registerOrAssignDim("Range", Type::Double);
+    m_range = layout->registerOrAssignDim("Range", Dimension::Type::Double);
 }
 
+
 void ComputeRangeFilter::prepared(PointTableRef table)
 {
     using namespace Dimension;
@@ -66,11 +67,11 @@ void ComputeRangeFilter::prepared(PointTableRef table)
 
     m_frameNumber = layout->findDim("Frame Number");
     if (m_frameNumber == Id::Unknown)
-        throw pdal_error("ComputeRangeFilter: missing Frame Number dimension in input PointView");
+        throwError("missing Frame Number dimension in input PointView");
 
     m_pixelNumber = layout->findDim("Pixel Number");
     if (m_pixelNumber == Id::Unknown)
-        throw pdal_error("ComputeRangeFilter: missing Pixel Number dimension in input PointView");
+        throwError("missing Pixel Number dimension in input PointView");
 }
 
 void ComputeRangeFilter::filter(PointView& view)
diff --git a/filters/CropFilter.cpp b/filters/CropFilter.cpp
index 8ee1282..825ffb1 100644
--- a/filters/CropFilter.cpp
+++ b/filters/CropFilter.cpp
@@ -34,14 +34,13 @@
 
 #include "CropFilter.hpp"
 
-#include <iomanip>
-
+#include <pdal/GDALUtils.hpp>
 #include <pdal/PointView.hpp>
 #include <pdal/StageFactory.hpp>
 #include <pdal/Polygon.hpp>
 #include <pdal/pdal_macros.hpp>
 #include <pdal/util/ProgramArgs.hpp>
-#include <pdal/KDIndex.hpp>
+#include <filters/private/crop/Point.hpp>
 
 #include <sstream>
 #include <cstdarg>
@@ -58,11 +57,11 @@ CREATE_STATIC_PLUGIN(1, 0, CropFilter, Filter, s_info)
 
 std::string CropFilter::getName() const { return s_info.name; }
 
-CropFilter::CropFilter() : pdal::Filter()
-{
-    m_cropOutside = false;
-}
+CropFilter::CropFilter() : m_cropOutside(false)
+{}
 
+CropFilter::~CropFilter()
+{}
 
 void CropFilter::addArgs(ProgramArgs& args)
 {
@@ -70,10 +69,12 @@ void CropFilter::addArgs(ProgramArgs& args)
         "bounding region", m_cropOutside);
     args.add("a_srs", "Spatial reference for bounding region", m_assignedSrs);
     args.add("bounds", "Point box for cropped points", m_bounds);
-    args.add("point", "Crop within 'distance' from a 2D or 3D point", m_points).
-        setErrorText("Invalid point specification must be in the form \"(1.00, 1.00)\""
-                "or \"(1.00, 1.00, 1.00)\"");
-    args.add("distance", "Crop with this distance from 2D or 3D 'point'", m_distance);
+    args.add("point", "Center of circular/spherical crop region.  Use with "
+        "'distance'.", m_centers).setErrorText("Invalid point specification.  "
+            "Must be valid GeoJSON/WKT. "
+            "Ex: \"(1.00, 1.00)\" or \"(1.00, 1.00, 1.00)\"");
+    args.add("distance", "Crop with this distance from 2D or 3D 'point'",
+        m_distance);
     args.add("polygon", "Bounding polying for cropped points", m_polys).
         setErrorText("Invalid polygon specification.  "
             "Must be valid GeoJSON/WKT");
@@ -82,35 +83,28 @@ void CropFilter::addArgs(ProgramArgs& args)
 
 void CropFilter::initialize()
 {
-
     // Set geometry from polygons.
     if (m_polys.size())
     {
         m_geoms.clear();
         for (Polygon& poly : m_polys)
         {
-            GeomPkg g;
-
             // Throws if invalid.
             poly.valid();
-            if (!m_assignedSrs.empty())
-                poly.setSpatialReference(m_assignedSrs);
-            g.m_geom = poly;
-            m_geoms.push_back(g);
+            m_geoms.push_back(poly);
         }
     }
-
+    m_distance2 = m_distance * m_distance;
 }
 
 
 void CropFilter::ready(PointTableRef table)
 {
+    // If the user didn't provide an SRS, take one from the table.
+    if (m_assignedSrs.empty())
+        m_assignedSrs = table.anySpatialReference();
     for (auto& geom : m_geoms)
-    {
-        // If we already overrode the SRS, use that instead
-        if (m_assignedSrs.empty())
-            geom.m_geom.setSpatialReference(table.anySpatialReference());
-    }
+        geom.setSpatialReference(m_assignedSrs);
 }
 
 
@@ -124,29 +118,67 @@ bool CropFilter::processOne(PointRef& point)
         if (!crop(point, box.to2d()))
             return false;
 
+    for (auto& center: m_centers)
+        if (!crop(point, center))
+            return false;
+
     return true;
 }
 
 
-PointViewSet CropFilter::run(PointViewPtr view)
+void CropFilter::spatialReferenceChanged(const SpatialReference& srs)
 {
-    PointViewSet viewSet;
-    SpatialReference srs = view->spatialReference();
+    transform(srs);
+}
+
 
+void CropFilter::transform(const SpatialReference& srs)
+{
+    // If we don't have any SRS, do nothing.
     for (auto& geom : m_geoms)
     {
-        // If this is the first time through or the SRS has changed,
-        // prepare the crop polygon.
-        if (srs != m_lastSrs)
+        try
+        {
+            geom = geom.transform(srs);
+        }
+        catch (pdal_error& err)
         {
-            geom.m_geom = geom.m_geom.transform(srs);
+            throwError(err.what());
         }
+    }
+
+    if (srs.empty() && m_assignedSrs.empty())
+        return;
+    if (srs.empty() || m_assignedSrs.empty())
+        throwError("Unable to transform crop geometry to point "
+            "coordinate system.");
+
+    for (auto& box : m_bounds)
+    {
+        BOX3D b3d = box.to3d();
+        gdal::reprojectBounds(b3d, m_assignedSrs.getWKT(), srs.getWKT());
+        box = b3d;
+    }
+    for (auto& point : m_centers)
+    {
+        gdal::reprojectPoint(point.x, point.y, point.z,
+            m_assignedSrs.getWKT(), srs.getWKT());
+    }
+    m_assignedSrs = srs;
+}
+
 
+PointViewSet CropFilter::run(PointViewPtr view)
+{
+    PointViewSet viewSet;
+
+    transform(view->spatialReference());
+    for (auto& geom : m_geoms)
+    {
         PointViewPtr outView = view->makeNew();
         crop(geom, *view, *outView);
         viewSet.insert(outView);
     }
-    m_lastSrs = srs;
 
     for (auto& box : m_bounds)
     {
@@ -155,10 +187,10 @@ PointViewSet CropFilter::run(PointViewPtr view)
         viewSet.insert(outView);
     }
 
-    for (auto& point: m_points)
+    for (auto& point: m_centers)
     {
         PointViewPtr outView = view->makeNew();
-        crop(point, m_distance, *view, *outView);
+        crop(point, *view, *outView);
         viewSet.insert(outView);
     }
 
@@ -166,7 +198,7 @@ PointViewSet CropFilter::run(PointViewPtr view)
 }
 
 
-bool CropFilter::crop(PointRef& point, const BOX2D& box)
+bool CropFilter::crop(const PointRef& point, const BOX2D& box)
 {
     double x = point.getFieldAs<double>(Dimension::Id::X);
     double y = point.getFieldAs<double>(Dimension::Id::Y);
@@ -182,62 +214,64 @@ void CropFilter::crop(const BOX2D& box, PointView& input, PointView& output)
     for (PointId idx = 0; idx < input.size(); ++idx)
     {
         point.setPointId(idx);
-        if (crop(point, box))
+        if (m_cropOutside != crop(point, box))
             output.appendPoint(input, idx);
     }
 }
 
-bool CropFilter::crop(PointRef& point, const GeomPkg& g)
+
+bool CropFilter::crop(const PointRef& point, const Polygon& g)
 {
-    bool covers = g.m_geom.covers(point);
-    bool keep = (m_cropOutside != covers);
-    return keep;
+    return (m_cropOutside != g.covers(point));
 }
 
-void CropFilter::crop(const GeomPkg& g, PointView& input, PointView& output)
+
+void CropFilter::crop(const Polygon& g, PointView& input, PointView& output)
 {
     PointRef point = input.point(0);
     for (PointId idx = 0; idx < input.size(); ++idx)
     {
         point.setPointId(idx);
-        bool covers = g.m_geom.covers(point);
-        bool keep = (m_cropOutside != covers);
-        if (keep)
+        if (crop(point, g))
             output.appendPoint(input, idx);
     }
 }
 
-void CropFilter::crop(const cropfilter::Point& point, double distance, PointView& input, PointView& output)
-{
 
-    bool bIs3D = point.is3d();
+bool CropFilter::crop(const PointRef& point, const cropfilter::Point& center)
+{
+    double x = point.getFieldAs<double>(Dimension::Id::X);
+    double y = point.getFieldAs<double>(Dimension::Id::Y);
+    x -= center.x;
+    y -= center.y;
+    if (x > m_distance || y > m_distance)
+        return (m_cropOutside);
 
-    if (bIs3D)
+    bool inside;
+    if (center.is3d())
     {
-        KD3Index index(input);
-        index.build();
-        std::vector<PointId> points = index.radius(point.x, point.y, point.z, m_distance);
-        for (PointId idx = 0; idx < points.size(); ++idx)
-        {
-            if (!m_cropOutside)
-                output.appendPoint(input, idx);
-        }
+        double z = point.getFieldAs<double>(Dimension::Id::Z);
+        z -= center.z;
+        if (z > m_distance)
+            return (m_cropOutside);
+        inside = (x * x + y * y + z * z < m_distance2);
     }
-
     else
-    {
-        KD2Index index(input);
-        index.build();
-        std::vector<PointId> points = index.radius(point.x, point.y, m_distance);
+        inside = (x * x + y * y < m_distance2);
+    return (m_cropOutside != inside);
+}
 
-        for (PointId idx = 0; idx < points.size(); ++idx)
-        {
-            if (!m_cropOutside)
-                output.appendPoint(input, idx);
-        }
 
+void CropFilter::crop(const cropfilter::Point& center, PointView& input,
+    PointView& output)
+{
+    PointRef point = input.point(0);
+    for (PointId idx = 0; idx < input.size(); ++idx)
+    {
+        point.setPointId(idx);
+        if (crop(point, center))
+            output.appendPoint(input, idx);
     }
 }
 
-
 } // namespace pdal
diff --git a/filters/CropFilter.hpp b/filters/CropFilter.hpp
index b9614e8..197eb9e 100644
--- a/filters/CropFilter.hpp
+++ b/filters/CropFilter.hpp
@@ -37,13 +37,17 @@
 #include <pdal/Filter.hpp>
 #include <pdal/Polygon.hpp>
 #include <pdal/plugin.hpp>
-#include "filters/private/crop/Point.hpp"
 
 extern "C" int32_t CropFilter_ExitFunc();
 extern "C" PF_ExitFunc CropFilter_InitPlugin();
 
 namespace pdal
 {
+namespace cropfilter
+{
+    class Point;
+};
+
 
 class ProgramArgs;
 
@@ -53,7 +57,7 @@ class PDAL_DLL CropFilter : public Filter
 {
 public:
     CropFilter();
-
+    ~CropFilter();
     static void * create();
     static int32_t destroy(void *);
     std::string getName() const;
@@ -63,31 +67,25 @@ private:
     bool m_cropOutside;
     std::vector<Polygon> m_polys;
     SpatialReference m_assignedSrs;
-    SpatialReference m_lastSrs;
     double m_distance;
-    std::vector<cropfilter::Point> m_points;
-
-    struct GeomPkg
-    {
-        GeomPkg()
-        {}
-
-        Polygon m_geom;
-        Polygon m_geomXform;
-    };
-
-    std::vector<GeomPkg> m_geoms;
+    double m_distance2;
+    std::vector<cropfilter::Point> m_centers;
+    std::vector<Polygon> m_geoms;
 
     void addArgs(ProgramArgs& args);
     virtual void initialize();
     virtual void ready(PointTableRef table);
+    virtual void spatialReferenceChanged(const SpatialReference& srs);
     virtual bool processOne(PointRef& point);
     virtual PointViewSet run(PointViewPtr view);
-    bool crop(PointRef& point, const BOX2D& box);
+    bool crop(const PointRef& point, const BOX2D& box);
     void crop(const BOX2D& box, PointView& input, PointView& output);
-    bool crop(PointRef& point, const GeomPkg& g);
-    void crop(const GeomPkg& g, PointView& input, PointView& output);
-    void crop(const cropfilter::Point& point, double distance, PointView& input, PointView& output);
+    bool crop(const PointRef& point, const Polygon& g);
+    void crop(const Polygon& g, PointView& input, PointView& output);
+    bool crop(const PointRef& point, const cropfilter::Point& center);
+    void crop(const cropfilter::Point& center, PointView& input,
+        PointView& output);
+    void transform(const SpatialReference& srs);
 
     CropFilter& operator=(const CropFilter&); // not implemented
     CropFilter(const CropFilter&); // not implemented
diff --git a/filters/DividerFilter.cpp b/filters/DividerFilter.cpp
index 25ef9cd..5f3f29a 100644
--- a/filters/DividerFilter.cpp
+++ b/filters/DividerFilter.cpp
@@ -51,19 +51,15 @@ std::istream& operator>>(std::istream& in, DividerFilter::Mode& mode)
 {
     std::string s;
     in >> s;
-    
+
     s = Utils::tolower(s);
     if (s == "round_robin")
         mode = DividerFilter::Mode::RoundRobin;
     else if (s == "partition")
         mode = DividerFilter::Mode::Partition;
     else
-    {
-        std::ostringstream oss;
-        oss << "filters.divider: Invalid 'mode' option '" << s << "'. "
-            "Valid options are 'partition' and 'round_robin'";
-        throw pdal_error(oss.str());
-    }
+        throw pdal_error("filters.divider: Invalid 'mode' option '" + s + "'. "
+            "Valid options are 'partition' and 'round_robin'");
     return in;
 }
 
@@ -77,7 +73,7 @@ std::ostream& operator<<(std::ostream& out, const DividerFilter::Mode& mode)
     case DividerFilter::Mode::Partition:
         out << "partition";
     }
-    return out;    
+    return out;
 }
 
 
@@ -96,29 +92,14 @@ void DividerFilter::addArgs(ProgramArgs& args)
 void DividerFilter::initialize()
 {
     if (m_cntArg->set() && m_capArg->set())
-    {
-        std::ostringstream oss;
-        oss << getName() << ": Can't specify both option 'count' and "
-            "option 'capacity.";
-        throw pdal_error(oss.str());
-    }
+        throwError("Can't specify both option 'count' and option 'capacity.");
     if (!m_cntArg->set() && !m_capArg->set())
-    {
-        std::ostringstream oss;
-        oss << getName() << ": Must specify either option 'count' or "
-            "option 'capacity'.";
-        throw pdal_error(oss.str());
-    }
+        throwError("Must specify either option 'count' or option 'capacity'.");
     if (m_cntArg->set())
     {
         m_sizeMode = SizeMode::Count;
         if (m_size < 2 || m_size > 1000)
-        {
-            std::ostringstream oss;
-            oss << getName() << ": Option 'count' must be in the range "
-                "[2, 1000].";
-            throw pdal_error(oss.str());
-        }
+            throwError("Option 'count' must be in the range [2, 1000].");
     }
     if (m_capArg->set())
         m_sizeMode = SizeMode::Capacity;
diff --git a/filters/EigenvaluesFilter.cpp b/filters/EigenvaluesFilter.cpp
index aea169b..7b74230 100644
--- a/filters/EigenvaluesFilter.cpp
+++ b/filters/EigenvaluesFilter.cpp
@@ -90,7 +90,7 @@ void EigenvaluesFilter::filter(PointView& view)
         // perform the eigen decomposition
         SelfAdjointEigenSolver<Matrix3f> solver(B);
         if (solver.info() != Success)
-            throw pdal_error("Cannot perform eigen decomposition.");
+            throwError("Cannot perform eigen decomposition.");
         auto ev = solver.eigenvalues();
 
         view.setField(m_e0, i, ev[0]);
diff --git a/filters/FerryFilter.cpp b/filters/FerryFilter.cpp
index 3316e2f..7c982b0 100644
--- a/filters/FerryFilter.cpp
+++ b/filters/FerryFilter.cpp
@@ -63,21 +63,13 @@ void FerryFilter::initialize()
     {
         StringList s = Utils::split2(dim, '=');
         if (s.size() != 2)
-        {
-            std::ostringstream oss;
-            oss << "Invalid dimension specified '" << dim <<
-                "'.  Need <from dimension>=<to dimension>.  See "
-                "documentation for details.";
-            throw pdal_error(oss.str());
-        }
+            throwError("Invalid dimension specified '" + dim + "'.  Need "
+                "<from dimension>=<to dimension>.  See documentation for "
+                "details.");
         Utils::trim(s[0]);
         Utils::trim(s[1]);
         if (s[0] == s[1])
-        {
-            std::ostringstream oss;
-            oss << "Can't ferry dimension '" << s[0] << "' to itself.";
-            throw pdal_error(oss.str());
-        }
+            throwError("Can't ferry dimension '" + s[0] + "' to itself.");
         m_name_map[s[0]] = s[1];
     }
 }
@@ -96,12 +88,8 @@ void FerryFilter::prepared(PointTableRef table)
 {
     for (const auto& dims : m_name_map)
         if (table.layout()->findDim(dims.first) == Dimension::Id::Unknown)
-        {
-            std::ostringstream oss;
-            oss << "Can't ferry dimension '" << dims.first << "'. "
-                "Dimension doesn't exist.";
-            throw pdal_error(oss.str());
-        }
+            throwError("Can't ferry dimension '" + dims.first + "'. "
+                "Dimension doesn't exist.");
 }
 
 void FerryFilter::ready(PointTableRef table)
diff --git a/filters/OutlierFilter.hpp b/filters/GroupByFilter.cpp
similarity index 54%
copy from filters/OutlierFilter.hpp
copy to filters/GroupByFilter.cpp
index 7f42a7f..dffe358 100644
--- a/filters/OutlierFilter.hpp
+++ b/filters/GroupByFilter.cpp
@@ -32,56 +32,62 @@
  * OF SUCH DAMAGE.
  ****************************************************************************/
 
-#pragma once
+#include "GroupByFilter.hpp"
 
-#include <pdal/Filter.hpp>
-#include <pdal/plugin.hpp>
-
-#include <memory>
-#include <map>
-#include <string>
-
-extern "C" int32_t OutlierFilter_ExitFunc();
-extern "C" PF_ExitFunc OutlierFilter_InitPlugin();
+#include <pdal/pdal_macros.hpp>
+#include <pdal/util/ProgramArgs.hpp>
 
 namespace pdal
 {
 
-class Options;
+static PluginInfo const s_info =
+    PluginInfo("filters.groupby", "Split data categorically by dimension.",
+               "http://pdal.io/stages/filters.groupby.html");
+
+CREATE_STATIC_PLUGIN(1, 0, GroupByFilter, Filter, s_info)
 
-struct Indices
+GroupByFilter::GroupByFilter() : m_viewMap()
+{}
+
+std::string GroupByFilter::getName() const
 {
-    std::vector<PointId> inliers;
-    std::vector<PointId> outliers;
-};
+    return s_info.name;
+}
 
-class PDAL_DLL OutlierFilter : public pdal::Filter
+void GroupByFilter::addArgs(ProgramArgs& args)
 {
-public:
-    OutlierFilter() : Filter()
-    {}
+    args.add("dimension", "Dimension containing data to be grouped", m_dimName);
+}
 
-    static void * create();
-    static int32_t destroy(void *);
-    std::string getName() const;
+void GroupByFilter::prepared(PointTableRef table)
+{
+    PointLayoutPtr layout(table.layout());
+    m_dimId = layout->findDim(m_dimName);
+    if (m_dimId == Dimension::Id::Unknown)
+        throwError("Invalid dimension name '" + m_dimName + "'.");
+    // also need to check that we have a dimension with discrete values
+}
 
-private:
-    std::string m_method;
-    int m_minK;
-    double m_radius;
-    int m_meanK;
-    double m_multiplier;
-    bool m_classify;
-    bool m_extract;
+PointViewSet GroupByFilter::run(PointViewPtr inView)
+{
+    PointViewSet viewSet;
+    if (!inView->size())
+        return viewSet;
 
-    virtual void addDimensions(PointLayoutPtr layout);
-    virtual void addArgs(ProgramArgs& args);
-    Indices processRadius(PointViewPtr inView);
-    Indices processStatistical(PointViewPtr inView);
-    virtual PointViewSet run(PointViewPtr view);
+    for (PointId idx = 0; idx < inView->size(); idx++)
+    {
+        uint64_t val = inView->getFieldAs<uint64_t>(m_dimId, idx);
+        PointViewPtr& outView = m_viewMap[val];
+        if (!outView)
+            outView = inView->makeNew();
+        outView->appendPoint(*inView.get(), idx);
+    }
 
-    OutlierFilter& operator=(const OutlierFilter&); // not implemented
-    OutlierFilter(const OutlierFilter&); // not implemented
-};
+    // Pull the buffers out of the map and stick them in the standard
+    // output set.
+    for (auto bi = m_viewMap.begin(); bi != m_viewMap.end(); ++bi)
+        viewSet.insert(bi->second);
+    return viewSet;
+}
 
-} // namespace pdal
+} // pdal
diff --git a/filters/OutlierFilter.hpp b/filters/GroupByFilter.hpp
similarity index 74%
copy from filters/OutlierFilter.hpp
copy to filters/GroupByFilter.hpp
index 7f42a7f..d381b84 100644
--- a/filters/OutlierFilter.hpp
+++ b/filters/GroupByFilter.hpp
@@ -37,51 +37,38 @@
 #include <pdal/Filter.hpp>
 #include <pdal/plugin.hpp>
 
-#include <memory>
 #include <map>
 #include <string>
 
-extern "C" int32_t OutlierFilter_ExitFunc();
-extern "C" PF_ExitFunc OutlierFilter_InitPlugin();
+extern "C" int32_t GroupByFilter_ExitFunc();
+extern "C" PF_ExitFunc GroupByFilter_InitPlugin();
 
 namespace pdal
 {
 
-class Options;
+class PointView;
+class ProgramArgs;
 
-struct Indices
-{
-    std::vector<PointId> inliers;
-    std::vector<PointId> outliers;
-};
-
-class PDAL_DLL OutlierFilter : public pdal::Filter
+class PDAL_DLL GroupByFilter : public Filter
 {
 public:
-    OutlierFilter() : Filter()
-    {}
+    GroupByFilter();
 
     static void * create();
     static int32_t destroy(void *);
     std::string getName() const;
 
 private:
-    std::string m_method;
-    int m_minK;
-    double m_radius;
-    int m_meanK;
-    double m_multiplier;
-    bool m_classify;
-    bool m_extract;
+    std::map<uint64_t, PointViewPtr> m_viewMap;
+    std::string m_dimName;
+    Dimension::Id m_dimId;
 
-    virtual void addDimensions(PointLayoutPtr layout);
     virtual void addArgs(ProgramArgs& args);
-    Indices processRadius(PointViewPtr inView);
-    Indices processStatistical(PointViewPtr inView);
+    virtual void prepared(PointTableRef table);
     virtual PointViewSet run(PointViewPtr view);
 
-    OutlierFilter& operator=(const OutlierFilter&); // not implemented
-    OutlierFilter(const OutlierFilter&); // not implemented
+    GroupByFilter& operator=(const GroupByFilter&); // not implemented
+    GroupByFilter(const GroupByFilter&); // not implemented
 };
 
 } // namespace pdal
diff --git a/filters/HAGFilter.cpp b/filters/HAGFilter.cpp
index 64cf7fa..b2200b9 100644
--- a/filters/HAGFilter.cpp
+++ b/filters/HAGFilter.cpp
@@ -63,7 +63,7 @@ void HAGFilter::prepared(PointTableRef table)
 {
     const PointLayoutPtr layout(table.layout());
     if (!layout->hasDim(Dimension::Id::Classification))
-        throw pdal_error("HAGFilter: missing Classification dimension in input PointView");
+        throwError("Missing Classification dimension in input PointView.");
 }
 
 void HAGFilter::filter(PointView& view)
@@ -90,13 +90,14 @@ void HAGFilter::filter(PointView& view)
 
     // Bail if there weren't any points classified as ground.
     if (gView->size() == 0)
-        throw pdal_error("HAGFilter: the input PointView does not appear to have any points classified as ground");
+        throwError("Input PointView does not have any points classified "
+            "as ground");
 
     // Build the 2D KD-tree.
     KD2Index kdi(*gView);
     kdi.build();
 
-    // Second pass: Find Z difference between non-ground points and the nearest 
+    // Second pass: Find Z difference between non-ground points and the nearest
     // neighbor (2D) in the ground view.
     for (PointId i = 0; i < ngView->size(); ++i)
     {
diff --git a/filters/IQRFilter.cpp b/filters/IQRFilter.cpp
index de47a24..8b05f78 100644
--- a/filters/IQRFilter.cpp
+++ b/filters/IQRFilter.cpp
@@ -65,45 +65,40 @@ void IQRFilter::prepared(PointTableRef table)
     PointLayoutPtr layout(table.layout());
     m_dimId = layout->findDim(m_dimName);
     if (m_dimId == Dimension::Id::Unknown)
-    {
-        std::ostringstream oss;
-        oss << "Invalid dimension name in filters.iqr 'dimension' "
-            "option: '" << m_dimName << "'.";
-        throw pdal_error(oss.str());
-    }
+        throwError("Dimension '" + m_dimName + "' does not exist.");
 }
 
 PointViewSet IQRFilter::run(PointViewPtr view)
 {
     using namespace Dimension;
 
-    PointViewSet viewSet;
     PointViewPtr output = view->makeNew();
 
     auto quartile = [](std::vector<double> vals, double percent)
     {
-        std::nth_element(vals.begin(), vals.begin()+int(vals.size()*percent), vals.end());
+        std::nth_element(vals.begin(),
+            vals.begin() + int(vals.size() * percent), vals.end());
 
-        return *(vals.begin()+int(vals.size()*percent));
+        return *(vals.begin() + int(vals.size() * percent));
     };
 
     std::vector<double> z(view->size());
     for (PointId j = 0; j < view->size(); ++j)
         z[j] = view->getFieldAs<double>(m_dimId, j);
-    
-    
+
+
     double pc25 = quartile(z, 0.25);
     log()->get(LogLevel::Debug) << "25th percentile: " << pc25 << std::endl;
 
     double pc75 = quartile(z, 0.75);
     log()->get(LogLevel::Debug) << "75th percentile: " << pc75 << std::endl;
-    
+
     double iqr = pc75-pc25;
     log()->get(LogLevel::Debug) << "IQR: " << iqr << std::endl;
-    
+
     double low_fence = pc25 - m_multiplier * iqr;
     double hi_fence = pc75 + m_multiplier * iqr;
-    
+
     for (PointId j = 0; j < view->size(); ++j)
     {
         double val = view->getFieldAs<double>(m_dimId, j);
@@ -114,9 +109,8 @@ PointViewSet IQRFilter::run(PointViewPtr view)
                                 << " in the range (" << low_fence
                                 << "," << hi_fence << ")" << std::endl;
 
-    viewSet.erase(view);
+    PointViewSet viewSet;
     viewSet.insert(output);
-
     return viewSet;
 }
 
diff --git a/filters/MortonOrderFilter.cpp b/filters/LocateFilter.cpp
similarity index 50%
copy from filters/MortonOrderFilter.cpp
copy to filters/LocateFilter.cpp
index 0702f0f..852a1fd 100644
--- a/filters/MortonOrderFilter.cpp
+++ b/filters/LocateFilter.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * Copyright (c) 2014, Bradley J Chambers (brad.chambers at gmail.com)
+ * Copyright (c) 2016, Bradley J Chambers (brad.chambers at gmail.com)
  *
  * All rights reserved.
  *
@@ -32,91 +32,75 @@
  * OF SUCH DAMAGE.
  ****************************************************************************/
 
-#include "MortonOrderFilter.hpp"
-#include <pdal/pdal_macros.hpp>
+#include "LocateFilter.hpp"
 
-#include <climits>
-#include <iostream>
-#include <limits>
-#include <map>
+#include <pdal/pdal_macros.hpp>
+#include <pdal/util/ProgramArgs.hpp>
+#include <pdal/util/Utils.hpp>
 
 namespace pdal
 {
 
-static PluginInfo const s_info = PluginInfo(
-    "filters.mortonorder",
-    "Morton or z-order sorting of points. See http://en.wikipedia.org/wiki/Z-order_curve for more detail.",
-    "http://pdal.io/stages/filters.mortonorder.html" );
-
-CREATE_STATIC_PLUGIN(1, 0, MortonOrderFilter, Filter, s_info)
+static PluginInfo const s_info =
+    PluginInfo("filters.locate",
+               "Return a single point with min/max value in the named dimension.",
+               "http://pdal.io/stages/filters.locate.html");
 
-std::string MortonOrderFilter::getName() const { return s_info.name; }
+CREATE_STATIC_PLUGIN(1, 0, LocateFilter, Filter, s_info)
 
-//This used to be a lambda, but the VS compiler exploded, I guess.
-typedef std::pair<double, double> Coord;
-namespace
+std::string LocateFilter::getName() const
 {
-bool less_msb(const int& x, const int& y)
-{
-    return x < y && x < (x ^ y);
-};
+    return s_info.name;
+}
 
-class CmpZOrder
+void LocateFilter::addArgs(ProgramArgs& args)
 {
-public:
-    bool operator()(const Coord& c1, const Coord& c2) const
-    {
-        int a[2] = {(int)(c1.first * INT_MAX), (int)(c1.second * INT_MAX)};
-        int b[2] = {(int)(c2.first * INT_MAX), (int)(c2.second * INT_MAX)};
-
-        int j = 0;
-        int x = 0;
+    args.add("dimension", "Dimension in which to locate max", m_dimName);
+    args.add("minmax", "Whether to search for the minimum or maximum value",
+        m_minmax, "max");
+}
 
-        for (int k = 0; k < 2; k++)
-        {
-            int y = a[k] ^ b[k];
-            if (less_msb(x, y))
-            {
-                j = k;
-                x = y;
-            }
-        }
-        return (a[j] - b[j]) < 0;
-    };
-};
+void LocateFilter::prepared(PointTableRef table)
+{
+    PointLayoutPtr layout(table.layout());
+    m_dimId = layout->findDim(m_dimName);
+    if (m_dimId == Dimension::Id::Unknown)
+        throwError("Invalid dimension '" + m_dimName + "'.");
 }
 
-PointViewSet MortonOrderFilter::run(PointViewPtr inView)
+PointViewSet LocateFilter::run(PointViewPtr inView)
 {
     PointViewSet viewSet;
     if (!inView->size())
         return viewSet;
-    CmpZOrder compare;
-    std::multimap<Coord, PointId, CmpZOrder> sorted(compare);
 
-    BOX2D buffer_bounds;
-    inView->calculateBounds(buffer_bounds);
-    double xrange = buffer_bounds.maxx - buffer_bounds.minx;
-    double yrange = buffer_bounds.maxy - buffer_bounds.miny;
+    PointId minidx, maxidx;
+    double minval = std::numeric_limits<double>::max();
+    double maxval = std::numeric_limits<double>::lowest();
 
     for (PointId idx = 0; idx < inView->size(); idx++)
     {
-        double xpos = (inView->getFieldAs<double>(Dimension::Id::X, idx) -
-            buffer_bounds.minx) / xrange;
-        double ypos = (inView->getFieldAs<double>(Dimension::Id::Y, idx) -
-            buffer_bounds.miny) / yrange;
-        Coord loc(xpos, ypos);
-        sorted.insert(std::make_pair(loc, idx));
+        double val = inView->getFieldAs<double>(m_dimId, idx);
+        if (val > maxval)
+        {
+            maxval = val;
+            maxidx = idx;
+        }
+        if (val < minval)
+        {
+            minval = val;
+            minidx = idx;
+        }
     }
 
     PointViewPtr outView = inView->makeNew();
-    std::multimap<Coord, PointId, CmpZOrder>::iterator pos;
-    for (pos = sorted.begin(); pos != sorted.end(); ++pos)
-    {
-        outView->appendPoint(*inView, pos->second);
-    }
-    viewSet.insert(outView);
 
+    if (Utils::iequals("min", m_minmax))
+        outView->appendPoint(*inView.get(), minidx);
+    if (Utils::iequals("max", m_minmax))
+        outView->appendPoint(*inView.get(), maxidx);
+
+    viewSet.insert(outView);
     return viewSet;
 }
 
diff --git a/filters/OutlierFilter.hpp b/filters/LocateFilter.hpp
similarity index 74%
copy from filters/OutlierFilter.hpp
copy to filters/LocateFilter.hpp
index 7f42a7f..265ed48 100644
--- a/filters/OutlierFilter.hpp
+++ b/filters/LocateFilter.hpp
@@ -37,28 +37,22 @@
 #include <pdal/Filter.hpp>
 #include <pdal/plugin.hpp>
 
-#include <memory>
 #include <map>
 #include <string>
 
-extern "C" int32_t OutlierFilter_ExitFunc();
-extern "C" PF_ExitFunc OutlierFilter_InitPlugin();
+extern "C" int32_t LocateFilter_ExitFunc();
+extern "C" PF_ExitFunc LocateFilter_InitPlugin();
 
 namespace pdal
 {
 
-class Options;
+class PointView;
+class ProgramArgs;
 
-struct Indices
-{
-    std::vector<PointId> inliers;
-    std::vector<PointId> outliers;
-};
-
-class PDAL_DLL OutlierFilter : public pdal::Filter
+class PDAL_DLL LocateFilter : public Filter
 {
 public:
-    OutlierFilter() : Filter()
+    LocateFilter() : Filter()
     {}
 
     static void * create();
@@ -66,22 +60,16 @@ public:
     std::string getName() const;
 
 private:
-    std::string m_method;
-    int m_minK;
-    double m_radius;
-    int m_meanK;
-    double m_multiplier;
-    bool m_classify;
-    bool m_extract;
+    std::string m_dimName;
+    Dimension::Id m_dimId;
+    std::string m_minmax;
 
-    virtual void addDimensions(PointLayoutPtr layout);
     virtual void addArgs(ProgramArgs& args);
-    Indices processRadius(PointViewPtr inView);
-    Indices processStatistical(PointViewPtr inView);
+    virtual void prepared(PointTableRef table);
     virtual PointViewSet run(PointViewPtr view);
 
-    OutlierFilter& operator=(const OutlierFilter&); // not implemented
-    OutlierFilter(const OutlierFilter&); // not implemented
+    LocateFilter& operator=(const LocateFilter&); // not implemented
+    LocateFilter(const LocateFilter&); // not implemented
 };
 
 } // namespace pdal
diff --git a/filters/MADFilter.cpp b/filters/MADFilter.cpp
index 8cc6fb4..90a7e0f 100644
--- a/filters/MADFilter.cpp
+++ b/filters/MADFilter.cpp
@@ -66,19 +66,13 @@ void MADFilter::prepared(PointTableRef table)
     PointLayoutPtr layout(table.layout());
     m_dimId = layout->findDim(m_dimName);
     if (m_dimId == Dimension::Id::Unknown)
-    {
-        std::ostringstream oss;
-        oss << "Invalid dimension name in filters.mad 'dimension' "
-            "option: '" << m_dimName << "'.";
-        throw pdal_error(oss.str());
-    }
+        throwError("Dimension '" + m_dimName + "' does not exist.");
 }
 
 PointViewSet MADFilter::run(PointViewPtr view)
 {
     using namespace Dimension;
 
-    PointViewSet viewSet;
     PointViewPtr output = view->makeNew();
 
     auto estimate_median = [](std::vector<double> vals)
@@ -92,7 +86,8 @@ PointViewSet MADFilter::run(PointViewPtr view)
         z[j] = view->getFieldAs<double>(m_dimId, j);
 
     double median = estimate_median(z);
-    log()->get(LogLevel::Debug) << getName() << " estimated median value: " << median << std::endl;
+    log()->get(LogLevel::Debug) << getName() <<
+        " estimated median value: " << median << std::endl;
 
     std::transform(z.begin(), z.end(), z.begin(),
        [median](double v) { return std::fabs(v - median); });
@@ -112,9 +107,8 @@ PointViewSet MADFilter::run(PointViewPtr view)
                                 << " in the range (" << low_fence
                                 << "," << hi_fence << ")" << std::endl;
 
-    viewSet.erase(view);
+    PointViewSet viewSet;
     viewSet.insert(output);
-
     return viewSet;
 }
 
diff --git a/filters/MongusFilter.cpp b/filters/MongusFilter.cpp
index e64f900..5f84fc9 100644
--- a/filters/MongusFilter.cpp
+++ b/filters/MongusFilter.cpp
@@ -180,7 +180,8 @@ std::vector<PointId> MongusFilter::processGround(PointViewPtr view)
         if ((mc(i) - cz(i)) >= 1.0)
             cz(i) = mc(i);
     }
-    // cz is still at native resolution, with low points replaced by morphological operators
+    // cz is still at native resolution, with low points replaced by
+    // morphological operators
     writeControl(cx, cy, cz, "grid_mins_adjusted.laz");
 
     // downsample control at max_level
@@ -193,7 +194,8 @@ std::vector<PointId> MongusFilter::processGround(PointViewPtr view)
     // Top-level control samples are assumed to be ground points, no filtering
     // is applied.
     downsampleMin(&cx, &cy, &cz, &x_prev, &y_prev, &z_prev, cur_cell_size);
-    // x|y|z_prev are control points downsampled to coarsest resolution for the hierarchy, e.g., for 512x512, this would be 2x2
+    // x|y|z_prev are control points downsampled to coarsest resolution for
+    // the hierarchy, e.g., for 512x512, this would be 2x2
     writeControl(x_prev, y_prev, z_prev, "control_init.laz");
 
     // Point-filtering is performed iteratively at each level of the
@@ -216,7 +218,8 @@ std::vector<PointId> MongusFilter::processGround(PointViewPtr view)
         downsampleMin(&cx, &cy, &cz, &x_samp, &y_samp, &z_samp, cur_cell_size);
         // 4x4, 8x8, 16x16, 32x32, 64x64, 128x128, 256x256
 
-        MatrixXd surface = eigen::computeSpline(x_prev, y_prev, z_prev, x_samp, y_samp);
+        MatrixXd surface = eigen::computeSpline(x_prev, y_prev, z_prev,
+            x_samp, y_samp);
 
         // if (l == 3)
         // {
@@ -279,7 +282,8 @@ std::vector<PointId> MongusFilter::processGround(PointViewPtr view)
             std::cerr << "median troubleshooting\n";
             std::cerr << vals.size() << "\t" << cp.size() << std::endl;
             std::cerr << cp.size() % 2 << std::endl;
-            std::cerr << cp[cp.size()/2-1] << "\t" << cp[cp.size()/2] << std::endl;
+            std::cerr << cp[cp.size()/2-1] << "\t" <<
+                cp[cp.size()/2] << std::endl;
             if (l == 7)
             {
                 for (auto const& v : cp)
@@ -510,7 +514,8 @@ PointViewSet MongusFilter::run(PointViewPtr view)
 
         if (m_classify)
         {
-            log()->get(LogLevel::Debug2) << "Labeled " << idx.size() << " ground returns!\n";
+            log()->get(LogLevel::Debug2) << "Labeled " << idx.size() <<
+                " ground returns!\n";
 
             // set the classification label of ground returns as 2
             // (corresponding to ASPRS LAS specification)
@@ -524,7 +529,8 @@ PointViewSet MongusFilter::run(PointViewPtr view)
 
         if (m_extract)
         {
-            log()->get(LogLevel::Debug2) << "Extracted " << idx.size() << " ground returns!\n";
+            log()->get(LogLevel::Debug2) << "Extracted " << idx.size() <<
+                " ground returns!\n";
 
             // create new PointView containing only ground returns
             PointViewPtr output = view->makeNew();
@@ -540,10 +546,12 @@ PointViewSet MongusFilter::run(PointViewPtr view)
     else
     {
         if (idx.empty())
-            log()->get(LogLevel::Debug2) << "Filtered cloud has no ground returns!\n";
+            log()->get(LogLevel::Debug2) << "Filtered cloud has no "
+                "ground returns!\n";
 
         if (!(m_classify || m_extract))
-            log()->get(LogLevel::Debug2) << "Must choose --classify or --extract\n";
+            log()->get(LogLevel::Debug2) << "Must choose --classify or "
+                "--extract\n";
 
         // return the view buffer unchanged
         viewSet.insert(view);
diff --git a/filters/MortonOrderFilter.cpp b/filters/MortonOrderFilter.cpp
index 0702f0f..8331cf6 100644
--- a/filters/MortonOrderFilter.cpp
+++ b/filters/MortonOrderFilter.cpp
@@ -45,7 +45,8 @@ namespace pdal
 
 static PluginInfo const s_info = PluginInfo(
     "filters.mortonorder",
-    "Morton or z-order sorting of points. See http://en.wikipedia.org/wiki/Z-order_curve for more detail.",
+    "Morton or z-order sorting of points. See "
+        "http://en.wikipedia.org/wiki/Z-order_curve for more detail.",
     "http://pdal.io/stages/filters.mortonorder.html" );
 
 CREATE_STATIC_PLUGIN(1, 0, MortonOrderFilter, Filter, s_info)
diff --git a/filters/NormalFilter.cpp b/filters/NormalFilter.cpp
index e31459f..4cc4770 100644
--- a/filters/NormalFilter.cpp
+++ b/filters/NormalFilter.cpp
@@ -48,7 +48,7 @@ namespace pdal
 {
 
 static PluginInfo const s_info =
-    PluginInfo("filters.normal", "Normal Filter", 
+    PluginInfo("filters.normal", "Normal Filter",
                "http://pdal.io/stages/filters.normal.html");
 
 CREATE_STATIC_PLUGIN(1, 0, NormalFilter, Filter, s_info)
@@ -70,7 +70,8 @@ void NormalFilter::addDimensions(PointLayoutPtr layout)
     m_nx = layout->registerOrAssignDim("NormalX", Dimension::Type::Double);
     m_ny = layout->registerOrAssignDim("NormalY", Dimension::Type::Double);
     m_nz = layout->registerOrAssignDim("NormalZ", Dimension::Type::Double);
-    m_curvature = layout->registerOrAssignDim("Curvature", Dimension::Type::Double);
+    m_curvature = layout->registerOrAssignDim("Curvature",
+        Dimension::Type::Double);
 }
 
 void NormalFilter::filter(PointView& view)
@@ -91,7 +92,7 @@ void NormalFilter::filter(PointView& view)
         // perform the eigen decomposition
         SelfAdjointEigenSolver<Matrix3f> solver(B);
         if (solver.info() != Success)
-            throw pdal_error("Cannot perform eigen decomposition.");
+            throwError("Cannot perform eigen decomposition.");
         auto eval = solver.eigenvalues();
         auto evec = solver.eigenvectors().col(0);
 
@@ -100,10 +101,7 @@ void NormalFilter::filter(PointView& view)
         view.setField(m_nz, i, evec[2]);
 
         double sum = eval[0] + eval[1] + eval[2];
-        if (sum != 0)
-            view.setField(m_curvature, i, std::fabs(eval[0]/sum));
-        else
-            view.setField(m_curvature, i, 0);
+        view.setField(m_curvature, i, sum ? std::fabs(eval[0] / sum) : 0);
     }
 }
 
diff --git a/filters/OutlierFilter.cpp b/filters/OutlierFilter.cpp
index f99757c..394789c 100644
--- a/filters/OutlierFilter.cpp
+++ b/filters/OutlierFilter.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * Copyright (c) 2016, Bradley J Chambers (brad.chambers at gmail.com)
+ * Copyright (c) 2016-2017, Bradley J Chambers (brad.chambers at gmail.com)
  *
  * All rights reserved.
  *
@@ -35,9 +35,9 @@
 #include "OutlierFilter.hpp"
 
 #include <pdal/KDIndex.hpp>
-#include <pdal/util/Utils.hpp>
 #include <pdal/pdal_macros.hpp>
 #include <pdal/util/ProgramArgs.hpp>
+#include <pdal/util/Utils.hpp>
 
 #include <string>
 #include <vector>
@@ -56,26 +56,22 @@ std::string OutlierFilter::getName() const
     return s_info.name;
 }
 
-
 void OutlierFilter::addArgs(ProgramArgs& args)
 {
     args.add("method", "Method [default: statistical]", m_method,
-        "statistical");
+             "statistical");
     args.add("min_k", "Minimum number of neighbors in radius", m_minK, 2);
     args.add("radius", "Radius", m_radius, 1.0);
     args.add("mean_k", "Mean number of neighbors", m_meanK, 8);
     args.add("multiplier", "Standard deviation threshold", m_multiplier, 2.0);
-    args.add("classify", "Apply classification labels?", m_classify, true);
-    args.add("extract", "Extract ground returns?", m_extract);
+    args.add("class", "Class to use for noise points", m_class, uint8_t(7));
 }
 
-
 void OutlierFilter::addDimensions(PointLayoutPtr layout)
 {
     layout->registerDim(Dimension::Id::Classification);
 }
 
-
 Indices OutlierFilter::processRadius(PointViewPtr inView)
 {
     KD3Index index(*inView);
@@ -97,7 +93,6 @@ Indices OutlierFilter::processRadius(PointViewPtr inView)
     return Indices{inliers, outliers};
 }
 
-
 Indices OutlierFilter::processStatistical(PointViewPtr inView)
 {
     KD3Index index(*inView);
@@ -107,7 +102,7 @@ Indices OutlierFilter::processStatistical(PointViewPtr inView)
 
     std::vector<PointId> inliers, outliers;
 
-    std::vector<double> distances(np);
+    std::vector<double> distances(np, 0.0);
     for (PointId i = 0; i < np; ++i)
     {
         // we increase the count by one because the query point itself will
@@ -118,21 +113,29 @@ Indices OutlierFilter::processStatistical(PointViewPtr inView)
         std::vector<double> sqr_dists(count);
         index.knnSearch(i, count, &indices, &sqr_dists);
 
-        double dist_sum = 0.0;
-        for (auto const& d : sqr_dists)
-            dist_sum += sqrt(d);
-        distances[i] = dist_sum / m_meanK;
+        for (size_t j = 1; j < count; ++j)
+        {
+            double delta = std::sqrt(sqr_dists[j]) - distances[i];
+            distances[i] += (delta / j);
+        }
     }
 
-    double sum = 0.0, sq_sum = 0.0;
+    size_t n(0);
+    double M1(0.0);
+    double M2(0.0);
     for (auto const& d : distances)
     {
-        sum += d;
-        sq_sum += d * d;
+        size_t n1(n);
+        n++;
+        double delta = d - M1;
+        double delta_n = delta / n;
+        M1 += delta_n;
+        M2 += delta * delta_n * n1;
     }
-    double mean = sum / np;
-    double variance = (sq_sum - sum * sum / np) / (np - 1);
-    double stdev = sqrt(variance);
+    double mean = M1;
+    double variance = M2 / (n - 1.0);
+    double stdev = std::sqrt(variance);
+
     double threshold = mean + m_multiplier * stdev;
 
     for (PointId i = 0; i < np; ++i)
@@ -146,7 +149,6 @@ Indices OutlierFilter::processStatistical(PointViewPtr inView)
     return Indices{inliers, outliers};
 }
 
-
 PointViewSet OutlierFilter::run(PointViewPtr inView)
 {
     PointViewSet viewSet;
@@ -165,7 +167,8 @@ PointViewSet OutlierFilter::run(PointViewPtr inView)
     else
     {
         log()->get(LogLevel::Warning) << "Requested method is unrecognized. "
-                                      << "Please choose from \"statistical\" " << "or \"radius\".\n";
+                                         "Please choose from \"statistical\" "
+                                         "or \"radius\".\n";
         viewSet.insert(inView);
         return viewSet;
     }
@@ -173,51 +176,28 @@ PointViewSet OutlierFilter::run(PointViewPtr inView)
     if (indices.inliers.empty())
     {
         log()->get(LogLevel::Warning) << "Requested filter would remove all "
-                                      << "points. Try a larger radius/smaller " << "minimum neighbors.\n";
+                                         "points. Try a larger radius/smaller "
+                                         "minimum neighbors.\n";
         viewSet.insert(inView);
         return viewSet;
     }
 
-    if (!indices.outliers.empty() && (m_classify || m_extract))
+    if (!indices.outliers.empty())
     {
-        if (m_classify)
-        {
-            log()->get(LogLevel::Debug2) << "Labeled "
-                                         << indices.outliers.size()
-                                         << " outliers as noise!\n";
-
-            // set the classification label of outlier returns as 18
-            // (corresponding to ASPRS LAS specification for high noise)
-            for (const auto& i : indices.outliers)
-                inView->setField(Dimension::Id::Classification, i, 18);
+        log()->get(LogLevel::Debug2)
+            << "Labeled " << indices.outliers.size() << " outliers as noise!\n";
 
-            viewSet.insert(inView);
-        }
-
-        if (m_extract)
-        {
-            log()->get(LogLevel::Debug2) << "Extracted "
-                                         << indices.inliers.size()
-                                         << " inliers!\n";
+        // set the classification label of outlier returns
+        for (const auto& i : indices.outliers)
+            inView->setField(Dimension::Id::Classification, i, m_class);
 
-            // create new PointView containing only outliers
-            PointViewPtr output = inView->makeNew();
-            for (const auto& i : indices.inliers)
-                output->appendPoint(*inView, i);
-
-            viewSet.erase(inView);
-            viewSet.insert(output);
-        }
+        viewSet.insert(inView);
     }
     else
     {
         if (indices.outliers.empty())
-            log()->get(LogLevel::Warning) << "Filtered cloud has no "
-                                          << "outliers!\n";
-
-        if (!(m_classify || m_extract))
-            log()->get(LogLevel::Warning) << "Must choose --classify or "
-                                          << "--extract\n";
+            log()->get(LogLevel::Warning)
+                << "Filtered cloud has no outliers!\n";
 
         // return the input buffer unchanged
         viewSet.insert(inView);
diff --git a/filters/OutlierFilter.hpp b/filters/OutlierFilter.hpp
index 7f42a7f..c76f05a 100644
--- a/filters/OutlierFilter.hpp
+++ b/filters/OutlierFilter.hpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * Copyright (c) 2016, Bradley J Chambers (brad.chambers at gmail.com)
+ * Copyright (c) 2016-2017, Bradley J Chambers (brad.chambers at gmail.com)
  *
  * All rights reserved.
  *
@@ -37,8 +37,8 @@
 #include <pdal/Filter.hpp>
 #include <pdal/plugin.hpp>
 
-#include <memory>
 #include <map>
+#include <memory>
 #include <string>
 
 extern "C" int32_t OutlierFilter_ExitFunc();
@@ -59,10 +59,11 @@ class PDAL_DLL OutlierFilter : public pdal::Filter
 {
 public:
     OutlierFilter() : Filter()
-    {}
+    {
+    }
 
-    static void * create();
-    static int32_t destroy(void *);
+    static void* create();
+    static int32_t destroy(void*);
     std::string getName() const;
 
 private:
@@ -71,8 +72,7 @@ private:
     double m_radius;
     int m_meanK;
     double m_multiplier;
-    bool m_classify;
-    bool m_extract;
+    uint8_t m_class;
 
     virtual void addDimensions(PointLayoutPtr layout);
     virtual void addArgs(ProgramArgs& args);
@@ -81,7 +81,7 @@ private:
     virtual PointViewSet run(PointViewPtr view);
 
     OutlierFilter& operator=(const OutlierFilter&); // not implemented
-    OutlierFilter(const OutlierFilter&); // not implemented
+    OutlierFilter(const OutlierFilter&);            // not implemented
 };
 
 } // namespace pdal
diff --git a/filters/AttributeFilter.cpp b/filters/OverlayFilter.cpp
similarity index 51%
rename from filters/AttributeFilter.cpp
rename to filters/OverlayFilter.cpp
index 9268a5c..ab7d8f5 100644
--- a/filters/AttributeFilter.cpp
+++ b/filters/OverlayFilter.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
-* Copyright (c) 2014, Howard Butler, howard at hobu.co
+* Copyright (c) 2017, Hobu Inc., info at hobu.co
 *
 * All rights reserved.
 *
@@ -32,28 +32,25 @@
 * OF SUCH DAMAGE.
 ****************************************************************************/
 
-#include "AttributeFilter.hpp"
+#include "OverlayFilter.hpp"
 
-#include <memory>
 #include <vector>
 
 #include <pdal/GDALUtils.hpp>
-#include <pdal/pdal_macros.hpp>
-#include <pdal/Polygon.hpp>
 #include <pdal/QuadIndex.hpp>
-#include <pdal/StageFactory.hpp>
 #include <pdal/util/ProgramArgs.hpp>
+#include <pdal/pdal_macros.hpp>
 
 namespace pdal
 {
 
 static PluginInfo const s_info = PluginInfo(
-    "filters.attribute",
-    "Assign values for a dimension using a specified value, \n" \
-        "an OGR-readable data source, or an OGR SQL query.",
-    "http://pdal.io/stages/filters.attribute.html" );
+    "filters.overlay",
+    "Assign values to a dimension based on the extent of an OGR-readable data "
+    " source or an OGR SQL query.",
+    "http://pdal.io/stages/filters.overlay.html" );
 
-CREATE_STATIC_PLUGIN(1, 0, AttributeFilter, Filter, s_info)
+CREATE_STATIC_PLUGIN(1, 0, OverlayFilter, Filter, s_info)
 
 struct OGRDataSourceDeleter
 {
@@ -76,87 +73,40 @@ struct OGRFeatureDeleter
 };
 
 
-void AttributeFilter::addArgs(ProgramArgs& args)
+void OverlayFilter::addArgs(ProgramArgs& args)
 {
     args.add("dimension", "Dimension on which to filter", m_dimName).
         setPositional();
-    m_valArg = &args.add("value", "Value to set on matching points", m_value,
-        std::numeric_limits<double>::quiet_NaN());
-    m_dsArg = &args.add("datasource", "OGR-readable datasource for Polygon or "
-        "Multipolygon data", m_datasource);
-    m_colArg = &args.add("column", "OGR datasource column from which to "
+    args.add("datasource", "OGR-readable datasource for Polygon or "
+        "Multipolygon data", m_datasource).setPositional();
+    args.add("column", "OGR datasource column from which to "
         "read the attribute.", m_column);
-    m_queryArg = &args.add("query", "OGR SQL query to execute on the "
+    args.add("query", "OGR SQL query to execute on the "
         "datasource to fetch geometry and attributes", m_query);
-    m_layerArg = &args.add("layer", "Datasource layer to use", m_layer);
+    args.add("layer", "Datasource layer to use", m_layer);
 }
 
 
-void AttributeFilter::initialize()
+void OverlayFilter::initialize()
 {
-    if (m_valArg->set() && m_dsArg->set())
-    {
-        std::ostringstream oss;
-        oss << getName() << ": options 'value' and 'datasource' mutually "
-            "exclusive.";
-        throw pdal_error(oss.str());
-    }
-
-    if (!m_valArg->set() && !m_dsArg->set())
-    {
-        std::ostringstream oss;
-        oss << getName() << ": Either option 'value' or 'datasource' must "
-            "be specified.";
-        throw pdal_error(oss.str());
-    }
-
-    Arg *args[] = { m_colArg, m_queryArg, m_layerArg };
-    for (auto& a : args)
-    {
-        if (m_valArg->set() && a->set())
-        {
-            std::ostringstream oss;
-            oss << getName() << ": option '" << a->longname() << "' invalid "
-                "with option 'value'.";
-            throw pdal_error(oss.str());
-        }
-    }
     gdal::registerDrivers();
 }
 
 
-void AttributeFilter::prepared(PointTableRef table)
+void OverlayFilter::prepared(PointTableRef table)
 {
     m_dim = table.layout()->findDim(m_dimName);
     if (m_dim == Dimension::Id::Unknown)
-    {
-        std::ostringstream oss;
-        oss << getName() << ": Dimension '" << m_dimName << "' not found.";
-        throw pdal_error(oss.str());
-    }
+        throwError("Dimension '" + m_dimName + "' not found.");
 }
 
 
-void AttributeFilter::ready(PointTableRef table)
+void OverlayFilter::ready(PointTableRef table)
 {
-    if (m_value != m_value)
-    {
-        m_ds = OGRDSPtr(OGROpen(m_datasource.c_str(), 0, 0),
+    m_ds = OGRDSPtr(OGROpen(m_datasource.c_str(), 0, 0),
             OGRDataSourceDeleter());
-        if (!m_ds)
-        {
-            std::ostringstream oss;
-            oss << getName() << ": Unable to open data source '" <<
-                    m_datasource << "'";
-            throw pdal_error(oss.str());
-        }
-    }
-}
-
-
-void AttributeFilter::UpdateGEOSBuffer(PointView& view)
-{
-    QuadIndex idx(view);
+    if (!m_ds)
+        throwError("Unable to open data source '" + m_datasource + "'");
 
     if (m_layer.size())
         m_lyr = OGR_DS_GetLayerByName(m_ds.get(), m_layer.c_str());
@@ -166,11 +116,7 @@ void AttributeFilter::UpdateGEOSBuffer(PointView& view)
         m_lyr = OGR_DS_GetLayer(m_ds.get(), 0);
 
     if (!m_lyr)
-    {
-        std::ostringstream oss;
-        oss << getName() << ": Unable to select layer '" << m_layer << "'";
-        throw pdal_error(oss.str());
-    }
+        throwError("Unable to select layer '" + m_layer + "'");
 
     OGRFeaturePtr feature = OGRFeaturePtr(OGR_L_GetNextFeature(m_lyr),
         OGRFeatureDeleter());
@@ -180,15 +126,10 @@ void AttributeFilter::UpdateGEOSBuffer(PointView& view)
     {
         field_index = OGR_F_GetFieldIndex(feature.get(), m_column.c_str());
         if (field_index == -1)
-        {
-            std::ostringstream oss;
-            oss << getName() << ": No column name '" << m_column <<
-                "' was found.";
-            throw pdal_error(oss.str());
-        }
+            throwError("No column name '" + m_column + "' was found.");
     }
 
-    while (feature)
+    do
     {
         OGRGeometryH geom = OGR_F_GetGeometryRef(feature.get());
         OGRwkbGeometryType t = OGR_G_GetGeometryType(geom);
@@ -199,39 +140,61 @@ void AttributeFilter::UpdateGEOSBuffer(PointView& view)
             t == wkbPolygon25D ||
             t == wkbMultiPolygon25D))
         {
-            std::ostringstream oss;
-            oss << getName() << ": Geometry is not Polygon or MultiPolygon!";
-            throw pdal::pdal_error(oss.str());
+            throwError("Geometry is not Polygon or MultiPolygon!");
         }
 
-        pdal::Polygon p(geom, view.spatialReference());
+        // Don't think Polygon meets criteria for implicit move ctor.
+        m_polygons.push_back(
+            { Polygon(geom, table.anySpatialReference()), fieldVal} );
 
-        // Compute a total bounds for the geometry. Query the QuadTree to
-        // find out the points that are inside the bbox. Then test each
-        // point in the bbox against the prepared geometry.
-        BOX3D box = p.bounds();
-        std::vector<PointId> ids = idx.getPoints(box);
+        feature = OGRFeaturePtr(OGR_L_GetNextFeature(m_lyr),
+            OGRFeatureDeleter());
+    }
+    while (feature);
+}
 
 
-        for (const auto& i : ids)
+void OverlayFilter::spatialReferenceChanged(const SpatialReference& srs)
+{
+    for (auto& poly : m_polygons)
+    {
+        try
         {
-            PointRef ref(view, i);
-            if (p.covers(ref))
-                view.setField(m_dim, i, fieldVal);
+            poly.geom = poly.geom.transform(srs);
+        }
+        catch (pdal_error& err)
+        {
+            throwError(err.what());
         }
-        feature = OGRFeaturePtr(OGR_L_GetNextFeature(m_lyr),
-            OGRFeatureDeleter());
     }
 }
 
 
-void AttributeFilter::filter(PointView& view)
+bool OverlayFilter::processOne(PointRef& point)
 {
-    if (m_value == m_value)
-        for (PointId i = 0; i < view.size(); ++i)
-            view.setField(m_dim, i, m_value);
-    else
-        UpdateGEOSBuffer(view);
+    for (const auto& poly : m_polygons)
+        if (poly.geom.covers(point))
+            point.setField(m_dim, poly.val);
+    return true;
+}
+
+
+void OverlayFilter::filter(PointView& view)
+{
+    QuadIndex idx(view);
+
+    for (const auto& poly : m_polygons)
+    {
+        std::vector<PointId> ids = idx.getPoints(poly.geom.bounds());
+
+        PointRef point(view, 0);
+        for (PointId id : ids)
+        {
+            point.setPointId(id);
+            if (poly.geom.covers(point))
+                point.setField(m_dim, poly.val);
+        }
+    }
 }
 
 } // namespace pdal
diff --git a/filters/AttributeFilter.hpp b/filters/OverlayFilter.hpp
similarity index 80%
rename from filters/AttributeFilter.hpp
rename to filters/OverlayFilter.hpp
index 7198958..67b6bd5 100644
--- a/filters/AttributeFilter.hpp
+++ b/filters/OverlayFilter.hpp
@@ -1,5 +1,5 @@
 /******************************************************************************
-* Copyright (c) 2014, Howard Butler <hobu.inc at gmail.com>
+* Copyright (c) 2017, Hobu Inc. <hobu.inc at gmail.com>
 *
 * All rights reserved.
 *
@@ -36,19 +36,19 @@
 
 #include <pdal/plugin.hpp>
 #include <pdal/Filter.hpp>
+#include <pdal/Polygon.hpp>
 
 #include <map>
 #include <memory>
 #include <string>
 
-extern "C" int32_t AttributeFilter_ExitFunc();
-extern "C" PF_ExitFunc AttributeFilter_InitPlugin();
+extern "C" int32_t OverlayFilter_ExitFunc();
+extern "C" PF_ExitFunc OverlayFilter_InitPlugin();
 
 typedef struct GEOSContextHandle_HS *GEOSContextHandle_t;
 
 typedef void *OGRLayerH;
 
-
 namespace pdal
 {
 
@@ -63,45 +63,45 @@ typedef std::shared_ptr<void> OGRGeometryPtr;
 
 class Arg;
 
-class PDAL_DLL AttributeFilter : public Filter
+class PDAL_DLL OverlayFilter : public Filter
 {
+    struct PolyVal
+    {
+        Polygon geom;
+        int32_t val;
+    };
+
 public:
-    AttributeFilter() : m_ds(0), m_lyr(0)
+    OverlayFilter() : m_ds(0), m_lyr(0)
     {}
 
     static void * create();
     static int32_t destroy(void *);
-    std::string getName() const { return "filters.attribute"; }
+    std::string getName() const { return "filters.overlay"; }
 
 private:
     virtual void addArgs(ProgramArgs& args);
+    virtual void spatialReferenceChanged(const SpatialReference& srs);
+    virtual bool processOne(PointRef& point);
     virtual void initialize();
     virtual void prepared(PointTableRef table);
     virtual void ready(PointTableRef table);
     virtual void filter(PointView& view);
 
-    AttributeFilter& operator=(const AttributeFilter&) = delete;
-    AttributeFilter(const AttributeFilter&) = delete;
+    OverlayFilter& operator=(const OverlayFilter&) = delete;
+    OverlayFilter(const OverlayFilter&) = delete;
 
     typedef std::shared_ptr<void> OGRDSPtr;
 
     OGRDSPtr m_ds;
     OGRLayerH m_lyr;
     std::string m_dimName;
-    double m_value;
-    Arg *m_valArg;
-    Arg *m_dsArg;
-    Arg *m_colArg;
-    Arg *m_queryArg;
-    Arg *m_layerArg;
     std::string m_datasource;
     std::string m_column;
     std::string m_query;
     std::string m_layer;
     Dimension::Id m_dim;
-
-    void UpdateGEOSBuffer(PointView& view);
-
+    std::vector<PolyVal> m_polygons;
 };
 
 } // namespace pdal
diff --git a/filters/PMFFilter.cpp b/filters/PMFFilter.cpp
index 3b25383..373181d 100644
--- a/filters/PMFFilter.cpp
+++ b/filters/PMFFilter.cpp
@@ -1,45 +1,48 @@
 /******************************************************************************
-* Copyright (c) 2015, Bradley J Chambers (brad.chambers at gmail.com)
-*
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following
-* conditions are met:
-*
-*     * Redistributions of source code must retain the above copyright
-*       notice, this list of conditions and the following disclaimer.
-*     * Redistributions in binary form must reproduce the above copyright
-*       notice, this list of conditions and the following disclaimer in
-*       the documentation and/or other materials provided
-*       with the distribution.
-*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
-*       names of its contributors may be used to endorse or promote
-*       products derived from this software without specific prior
-*       written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
-* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
-* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
-* OF SUCH DAMAGE.
-****************************************************************************/
+ * Copyright (c) 2015-2017, Bradley J Chambers (brad.chambers at gmail.com)
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
+ *       names of its contributors may be used to endorse or promote
+ *       products derived from this software without specific prior
+ *       written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ ****************************************************************************/
 
 #include "PMFFilter.hpp"
 
 #include <pdal/EigenUtils.hpp>
-#include <pdal/pdal_macros.hpp>
+#include <pdal/KDIndex.hpp>
 #include <pdal/QuadIndex.hpp>
+#include <pdal/Segmentation.hpp>
+#include <pdal/pdal_macros.hpp>
 #include <pdal/util/ProgramArgs.hpp>
+#include <pdal/util/Utils.hpp>
 
-#include <Eigen/Dense>
+#include "private/DimRange.hpp"
 
 namespace pdal
 {
@@ -55,7 +58,6 @@ std::string PMFFilter::getName() const
     return s_info.name;
 }
 
-
 void PMFFilter::addArgs(ProgramArgs& args)
 {
     args.add("max_window_size", "Maximum window size", m_maxWindowSize, 33.0);
@@ -63,17 +65,36 @@ void PMFFilter::addArgs(ProgramArgs& args)
     args.add("max_distance", "Maximum distance", m_maxDistance, 2.5);
     args.add("initial_distance", "Initial distance", m_initialDistance, 0.15);
     args.add("cell_size", "Cell size", m_cellSize, 1.0);
-    args.add("classify", "Apply classification labels?", m_classify, true);
-    args.add("extract", "Extract ground returns?", m_extract);
     args.add("approximate", "Use approximate algorithm?", m_approximate);
+    args.add("ignore", "Ignore values", m_ignored);
+    args.add("last", "Consider last returns only?", m_lastOnly, true);
 }
 
-
 void PMFFilter::addDimensions(PointLayoutPtr layout)
 {
     layout->registerDim(Dimension::Id::Classification);
 }
 
+void PMFFilter::prepared(PointTableRef table)
+{
+    const PointLayoutPtr layout(table.layout());
+
+    m_ignored.m_id = layout->findDim(m_ignored.m_name);
+
+    if (m_lastOnly)
+    {
+        if (!layout->hasDim(Dimension::Id::ReturnNumber) ||
+            !layout->hasDim(Dimension::Id::NumberOfReturns))
+        {
+            log()->get(LogLevel::Warning) << "Could not find ReturnNumber and "
+                                             "NumberOfReturns. Skipping "
+                                             "segmentation of last returns and "
+                                             "proceeding with all returns.\n";
+            m_lastOnly = false;
+        }
+    }
+}
+
 std::vector<double> PMFFilter::morphOpen(PointViewPtr view, float radius)
 {
     point_count_t np(view->size());
@@ -88,7 +109,8 @@ std::vector<double> PMFFilter::morphOpen(PointViewPtr view, float radius)
         double x = view->getFieldAs<double>(Dimension::Id::X, i);
         double y = view->getFieldAs<double>(Dimension::Id::Y, i);
 
-        std::vector<PointId> ids = idx.getPoints(x-radius, y-radius, x+radius, y+radius);
+        std::vector<PointId> ids =
+            idx.getPoints(x - radius, y - radius, x + radius, y + radius);
 
         double localMin(std::numeric_limits<double>::max());
         for (auto const& j : ids)
@@ -106,7 +128,8 @@ std::vector<double> PMFFilter::morphOpen(PointViewPtr view, float radius)
         double x = view->getFieldAs<double>(Dimension::Id::X, i);
         double y = view->getFieldAs<double>(Dimension::Id::Y, i);
 
-        std::vector<PointId> ids = idx.getPoints(x-radius, y-radius, x+radius, y+radius);
+        std::vector<PointId> ids =
+            idx.getPoints(x - radius, y - radius, x + radius, y + radius);
 
         double localMax(std::numeric_limits<double>::lowest());
         for (auto const& j : ids)
@@ -136,13 +159,14 @@ std::vector<PointId> PMFFilter::processGround(PointViewPtr view)
         if (1) // exponential
             ws = m_cellSize * (2.0f * std::pow(2, iter) + 1.0f);
         else
-            ws = m_cellSize * (2.0f * (iter+1) * 2 + 1.0f);
+            ws = m_cellSize * (2.0f * (iter + 1) * 2 + 1.0f);
 
         // Calculate the height threshold to be used in the next iteration.
         if (iter == 0)
             ht = m_initialDistance;
         else
-            ht = m_slope * (ws - wsvec[iter-1]) * m_cellSize + m_initialDistance;
+            ht = m_slope * (ws - wsvec[iter - 1]) * m_cellSize +
+                 m_initialDistance;
 
         // Enforce max distance on height threshold
         if (ht > m_maxDistance)
@@ -166,14 +190,13 @@ std::vector<PointId> PMFFilter::processGround(PointViewPtr view)
         for (auto const& i : groundIdx)
             ground->appendPoint(*view, i);
 
-        log()->get(LogLevel::Debug) <<  "Iteration " << j
-                                    << " (height threshold = " << htvec[j]
-                                    << ", window size = " << wsvec[j]
-                                    << ")...\n";
+        log()->get(LogLevel::Debug)
+            << "Iteration " << j << " (height threshold = " << htvec[j]
+            << ", window size = " << wsvec[j] << ")...\n";
 
         // Create new cloud to hold the filtered results. Apply the
         // morphological opening operation at the current window size.
-        auto maxZ = morphOpen(ground, wsvec[j]*0.5);
+        auto maxZ = morphOpen(ground, wsvec[j] * 0.5);
 
         // Find indices of the points whose difference between the source and
         // filtered point clouds is less than the current height threshold.
@@ -188,25 +211,87 @@ std::vector<PointId> PMFFilter::processGround(PointViewPtr view)
 
         groundIdx.swap(groundNewIdx);
 
-        log()->get(LogLevel::Debug) << "Ground now has " << groundIdx.size()
-                                    << " points.\n";
+        log()->get(LogLevel::Debug)
+            << "Ground now has " << groundIdx.size() << " points.\n";
     }
 
     return groundIdx;
 }
 
-std::vector<PointId> PMFFilter::processGroundApprox(PointViewPtr view)
+std::vector<double> PMFFilter::fillNearest(PointViewPtr view, size_t rows,
+                                           size_t cols, double cell_size,
+                                           BOX2D bounds)
 {
-    using namespace Eigen;
+    using namespace Dimension;
+
+    std::vector<double> ZImin(rows * cols,
+                              std::numeric_limits<double>::quiet_NaN());
 
+    for (PointId i = 0; i < view->size(); ++i)
+    {
+        double x = view->getFieldAs<double>(Id::X, i);
+        double y = view->getFieldAs<double>(Id::Y, i);
+        double z = view->getFieldAs<double>(Id::Z, i);
+
+        int c = static_cast<int>(floor(x - bounds.minx) / cell_size);
+        int r = static_cast<int>(floor(y - bounds.miny) / cell_size);
+
+        if (z < ZImin[c * rows + r] || std::isnan(ZImin[c * rows + r]))
+            ZImin[c * rows + r] = z;
+    }
+
+    // convert cz into PointView
+    PointViewPtr temp = view->makeNew();
+    PointId i(0);
+    for (size_t c = 0; c < cols; ++c)
+    {
+        for (size_t r = 0; r < rows; ++r)
+        {
+            if (std::isnan(ZImin[c * rows + r]))
+                continue;
+
+            temp->setField(Id::X, i, bounds.minx + (c + 0.5) * cell_size);
+            temp->setField(Id::Y, i, bounds.miny + (r + 0.5) * cell_size);
+            temp->setField(Id::Z, i, ZImin[c * rows + r]);
+            i++;
+        }
+    }
+
+    // make a 2D KDIndex
+    KD2Index kdi(*temp);
+    kdi.build();
+
+    std::vector<double> out = ZImin;
+    for (size_t c = 0; c < cols; ++c)
+    {
+        for (size_t r = 0; r < rows; ++r)
+        {
+            if (!std::isnan(out[c * rows + r]))
+                continue;
+
+            // find k nearest points
+            double x = bounds.minx + (c + 0.5) * cell_size;
+            double y = bounds.miny + (r + 0.5) * cell_size;
+            int k = 1;
+            std::vector<PointId> neighbors(k);
+            std::vector<double> sqr_dists(k);
+            kdi.knnSearch(x, y, k, &neighbors, &sqr_dists);
+
+            out[c * rows + r] =
+                temp->getFieldAs<double>(Dimension::Id::Z, neighbors[0]);
+        }
+    }
+
+    return out;
+};
+
+std::vector<PointId> PMFFilter::processGroundApprox(PointViewPtr view)
+{
     BOX2D bounds;
     view->calculateBounds(bounds);
 
-    double extent_x = floor(bounds.maxx) - ceil(bounds.minx);
-    double extent_y = floor(bounds.maxy) - ceil(bounds.miny);
-
-    int cols = static_cast<int>(ceil(extent_x/m_cellSize)) + 1;
-    int rows = static_cast<int>(ceil(extent_y/m_cellSize)) + 1;
+    size_t cols = ((bounds.maxx - bounds.minx) / m_cellSize) + 1;
+    size_t rows = ((bounds.maxy - bounds.miny) / m_cellSize) + 1;
 
     // Compute the series of window sizes and height thresholds
     std::vector<float> htvec;
@@ -221,13 +306,14 @@ std::vector<PointId> PMFFilter::processGroundApprox(PointViewPtr view)
         if (1) // exponential
             ws = m_cellSize * (2.0f * std::pow(2, iter) + 1.0f);
         else
-            ws = m_cellSize * (2.0f * (iter+1) * 2 + 1.0f);
+            ws = m_cellSize * (2.0f * (iter + 1) * 2 + 1.0f);
 
         // Calculate the height threshold to be used in the next iteration.
         if (iter == 0)
             ht = m_initialDistance;
         else
-            ht = m_slope * (ws - wsvec[iter-1]) * m_cellSize + m_initialDistance;
+            ht = m_slope * (ws - wsvec[iter - 1]) * m_cellSize +
+                 m_initialDistance;
 
         // Enforce max distance on height threshold
         if (ht > m_maxDistance)
@@ -243,18 +329,20 @@ std::vector<PointId> PMFFilter::processGroundApprox(PointViewPtr view)
     for (PointId i = 0; i < view->size(); ++i)
         groundIdx.push_back(i);
 
-    MatrixXd ZImin = eigen::createMinMatrix(*view.get(), rows, cols, m_cellSize,
-                                            bounds);
+    std::vector<double> ZImin =
+        fillNearest(view, rows, cols, m_cellSize, bounds);
 
     // Progressively filter ground returns using morphological open
     for (size_t j = 0; j < wsvec.size(); ++j)
     {
-        log()->get(LogLevel::Debug) <<  "Iteration " << j
-                                    << " (height threshold = " << htvec[j]
-                                    << ", window size = " << wsvec[j]
-                                    << ")...\n";
+        log()->get(LogLevel::Debug)
+            << "Iteration " << j << " (height threshold = " << htvec[j]
+            << ", window size = " << wsvec[j] << ")...\n";
 
-        MatrixXd mo = eigen::matrixOpen(ZImin, 0.5*(wsvec[j]-1));
+        std::vector<double> me =
+            eigen::erodeDiamond(ZImin, rows, cols, 0.5 * (wsvec[j] - 1));
+        std::vector<double> mo =
+            eigen::dilateDiamond(me, rows, cols, 0.5 * (wsvec[j] - 1));
 
         std::vector<PointId> groundNewIdx;
         for (auto p_idx : groundIdx)
@@ -263,19 +351,18 @@ std::vector<PointId> PMFFilter::processGroundApprox(PointViewPtr view)
             double y = view->getFieldAs<double>(Dimension::Id::Y, p_idx);
             double z = view->getFieldAs<double>(Dimension::Id::Z, p_idx);
 
-            int r = static_cast<int>(std::floor((y-bounds.miny) / m_cellSize));
-            int c = static_cast<int>(std::floor((x-bounds.minx) / m_cellSize));
+            int c = static_cast<int>(floor((x - bounds.minx) / m_cellSize));
+            int r = static_cast<int>(floor((y - bounds.miny) / m_cellSize));
 
-            float diff = z - mo(r, c);
-            if (diff < htvec[j])
+            if ((z - mo[c * rows + r]) < htvec[j])
                 groundNewIdx.push_back(p_idx);
         }
 
         ZImin.swap(mo);
         groundIdx.swap(groundNewIdx);
 
-        log()->get(LogLevel::Debug) << "Ground now has " << groundIdx.size()
-                                    << " points.\n";
+        log()->get(LogLevel::Debug)
+            << "Ground now has " << groundIdx.size() << " points.\n";
     }
 
     return groundIdx;
@@ -283,63 +370,66 @@ std::vector<PointId> PMFFilter::processGroundApprox(PointViewPtr view)
 
 PointViewSet PMFFilter::run(PointViewPtr input)
 {
-    bool logOutput = log()->getLevel() > LogLevel::Debug1;
-    if (logOutput)
-        log()->floatPrecision(8);
-    log()->get(LogLevel::Debug2) << "Process PMFFilter...\n";
+    PointViewSet viewSet;
+    if (!input->size())
+        return viewSet;
+
+    // Segment input view into ignored/kept views.
+    PointViewPtr ignoredView = input->makeNew();
+    PointViewPtr keptView = input->makeNew();
+    if (m_ignored.m_id == Dimension::Id::Unknown)
+        keptView->append(*input);
+    else
+        Segmentation::ignoreDimRange(m_ignored, input, keptView, ignoredView);
+
+    // Segment kept view into last/other-than-last return views.
+    PointViewPtr lastView = keptView->makeNew();
+    PointViewPtr nonlastView = keptView->makeNew();
+    if (m_lastOnly)
+        Segmentation::segmentLastReturns(keptView, lastView, nonlastView);
+    else
+        lastView->append(*keptView);
+
+    for (PointId i = 0; i < nonlastView->size(); ++i)
+        nonlastView->setField(Dimension::Id::Classification, i, 1);
+
+    for (PointId i = 0; i < lastView->size(); ++i)
+        lastView->setField(Dimension::Id::Classification, i, 1);
 
     std::vector<PointId> idx;
     if (m_approximate)
-        idx = processGroundApprox(input);
+        idx = processGroundApprox(lastView);
     else
-        idx = processGround(input);
+        idx = processGround(lastView);
 
-    PointViewSet viewSet;
-    if (!idx.empty() && (m_classify || m_extract))
+    PointViewPtr outView = input->makeNew();
+    if (!idx.empty())
     {
 
-        if (m_classify)
-        {
-            log()->get(LogLevel::Debug2) << "Labeled " << idx.size()
-                                         << " ground returns!\n";
-
-            // set the classification label of ground returns as 2
-            // (corresponding to ASPRS LAS specification)
-            for (const auto& i : idx)
-            {
-                input->setField(Dimension::Id::Classification, i, 2);
-            }
-
-            viewSet.insert(input);
-        }
+        log()->get(LogLevel::Debug2)
+            << "Labeled " << idx.size() << " ground returns!\n";
 
-        if (m_extract)
+        // set the classification label of ground returns as 2
+        // (corresponding to ASPRS LAS specification)
+        for (const auto& i : idx)
         {
-            log()->get(LogLevel::Debug2) << "Extracted " << idx.size()
-                                         << " ground returns!\n";
-
-            // create new PointView containing only ground returns
-            PointViewPtr output = input->makeNew();
-            for (const auto& i : idx)
-            {
-                output->appendPoint(*input, i);
-            }
-
-            viewSet.erase(input);
-            viewSet.insert(output);
+            lastView->setField(Dimension::Id::Classification, i, 2);
         }
+
+        outView->append(*ignoredView);
+        outView->append(*nonlastView);
+        outView->append(*lastView);
     }
     else
     {
         if (idx.empty())
-            log()->get(LogLevel::Debug2) << "Filtered cloud has no ground returns!\n";
-
-        if (!(m_classify || m_extract))
-            log()->get(LogLevel::Debug2) << "Must choose --classify or --extract\n";
+            log()->get(LogLevel::Debug2) << "Filtered cloud has no "
+                                            "ground returns!\n";
 
         // return the input buffer unchanged
-        viewSet.insert(input);
+        outView->append(*input);
     }
+    viewSet.insert(outView);
 
     return viewSet;
 }
diff --git a/filters/PMFFilter.hpp b/filters/PMFFilter.hpp
index 6d71767..d2655d4 100644
--- a/filters/PMFFilter.hpp
+++ b/filters/PMFFilter.hpp
@@ -1,42 +1,44 @@
 /******************************************************************************
-* Copyright (c) 2016, Bradley J Chambers (brad.chambers at gmail.com)
-*
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following
-* conditions are met:
-*
-*     * Redistributions of source code must retain the above copyright
-*       notice, this list of conditions and the following disclaimer.
-*     * Redistributions in binary form must reproduce the above copyright
-*       notice, this list of conditions and the following disclaimer in
-*       the documentation and/or other materials provided
-*       with the distribution.
-*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
-*       names of its contributors may be used to endorse or promote
-*       products derived from this software without specific prior
-*       written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
-* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
-* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
-* OF SUCH DAMAGE.
-****************************************************************************/
+ * Copyright (c) 2016-2017, Bradley J Chambers (brad.chambers at gmail.com)
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
+ *       names of its contributors may be used to endorse or promote
+ *       products derived from this software without specific prior
+ *       written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ ****************************************************************************/
 
 #pragma once
 
 #include <pdal/Filter.hpp>
 #include <pdal/plugin.hpp>
 
+#include "private/DimRange.hpp"
+
 #include <memory>
 
 extern "C" int32_t PMFFilter_ExitFunc();
@@ -54,10 +56,11 @@ class PDAL_DLL PMFFilter : public Filter
 {
 public:
     PMFFilter() : Filter()
-    {}
+    {
+    }
 
-    static void * create();
-    static int32_t destroy(void *);
+    static void* create();
+    static int32_t destroy(void*);
     std::string getName() const;
 
 private:
@@ -66,19 +69,22 @@ private:
     double m_maxDistance;
     double m_initialDistance;
     double m_cellSize;
-    bool m_classify;
-    bool m_extract;
     bool m_approximate;
+    DimRange m_ignored;
+    bool m_lastOnly;
 
     virtual void addDimensions(PointLayoutPtr layout);
     virtual void addArgs(ProgramArgs& args);
+    std::vector<double> fillNearest(PointViewPtr view, size_t rows, size_t cols,
+                                    double cell_size, BOX2D bounds);
     std::vector<double> morphOpen(PointViewPtr view, float radius);
+    virtual void prepared(PointTableRef table);
     std::vector<PointId> processGround(PointViewPtr view);
     std::vector<PointId> processGroundApprox(PointViewPtr view);
     virtual PointViewSet run(PointViewPtr view);
 
     PMFFilter& operator=(const PMFFilter&); // not implemented
-    PMFFilter(const PMFFilter&); // not implemented
+    PMFFilter(const PMFFilter&);            // not implemented
 };
 
 } // namespace pdal
diff --git a/filters/RangeFilter.cpp b/filters/RangeFilter.cpp
index 55c758b..d99432e 100644
--- a/filters/RangeFilter.cpp
+++ b/filters/RangeFilter.cpp
@@ -38,6 +38,8 @@
 #include <pdal/util/ProgramArgs.hpp>
 #include <pdal/util/Utils.hpp>
 
+#include "private/DimRange.hpp"
+
 #include <cctype>
 #include <limits>
 #include <map>
@@ -58,98 +60,13 @@ std::string RangeFilter::getName() const
     return s_info.name;
 }
 
-namespace
-{
 
-RangeFilter::Range parseRange(const std::string& r)
-{
-    std::string::size_type pos, count;
-    bool ilb = true;
-    bool iub = true;
-    bool negate = false;
-    const char *start;
-    char *end;
-    std::string name;
-    double ub, lb;
-
-    try
-    {
-        pos = 0;
-        // Skip leading whitespace.
-        count = Utils::extract(r, pos, (int(*)(int))std::isspace);
-        pos += count;
-
-        count = Utils::extract(r, pos, (int(*)(int))std::isalpha);
-        if (count == 0)
-           throw std::string("No dimension name.");
-        name = r.substr(pos, count);
-        pos += count;
-
-        if (r[pos] == '!')
-        {
-            negate = true;
-            pos++;
-        }
+RangeFilter::RangeFilter()
+{}
 
-        if (r[pos] == '(')
-            ilb = false;
-        else if (r[pos] != '[')
-            throw std::string("Missing '(' or '['.");
-        pos++;
-
-        // Extract lower bound.
-        start = r.data() + pos;
-        lb = std::strtod(start, &end);
-        if (start == end)
-            lb = std::numeric_limits<double>::min();
-        pos += (end - start);
-
-        count = Utils::extract(r, pos, (int(*)(int))std::isspace);
-        pos += count;
-
-        if (r[pos] != ':')
-            throw std::string("Missing ':' limit separator.");
-        pos++;
-
-        start = r.data() + pos;
-        ub = std::strtod(start, &end);
-        if (start == end)
-            ub = std::numeric_limits<double>::max();
-        pos += (end - start);
-
-        count = Utils::extract(r, pos, (int(*)(int))std::isspace);
-        pos += count;
-
-        if (r[pos] == ')')
-            iub = false;
-        else if (r[pos] != ']')
-            throw std::string("Missing ')' or ']'.");
-        pos++;
-
-        count = Utils::extract(r, pos, (int(*)(int))std::isspace);
-        pos += count;
-
-        if (pos != r.size())
-            throw std::string("Invalid characters following valid range.");
-    }
-    catch (std::string s)
-    {
-        std::ostringstream oss;
-        oss << "filters.range: invalid 'limits' option: '" << r << "': " << s;
-        throw pdal_error(oss.str());
-    }
-    return RangeFilter::Range(name, lb, ub, ilb, iub, negate);
-}
 
-} // unnamed namespace
-
-
-bool operator < (const RangeFilter::Range& r1, const RangeFilter::Range& r2)
-{
-    return (r1.m_name < r2.m_name ? true :
-        r1.m_name > r2.m_name ? false :
-        &r1 < &r2);
-}
+RangeFilter::~RangeFilter()
+{}
 
 
 void RangeFilter::addArgs(ProgramArgs& args)
@@ -162,7 +79,18 @@ void RangeFilter::initialize()
 {
     // Would be better to have the range know how to read from an input stream.
     for (auto const& r : m_rangeSpec)
-        m_range_list.push_back(parseRange(r));
+    {
+        try
+        {
+            DimRange range;
+            range.parse(r);
+            m_range_list.push_back(range);
+        }
+        catch (const DimRange::error& err)
+        {
+            throwError("Invalid 'limits' option: '" + r + "': " + err.what());
+        }
+    }
 }
 
 
@@ -174,29 +102,13 @@ void RangeFilter::prepared(PointTableRef table)
     {
         r.m_id = layout->findDim(r.m_name);
         if (r.m_id == Dimension::Id::Unknown)
-        {
-            std::ostringstream oss;
-            oss << "Invalid dimension name in filters.range 'limits' "
-                "option: '" << r.m_name << "'.";
-            throw pdal_error(oss.str());
-        }
+            throwError("Invalid dimension name in 'limits' option: '" +
+                r.m_name + "'.");
     }
     std::sort(m_range_list.begin(), m_range_list.end());
 }
 
 
-// Determine if a point passes a single range.
-bool RangeFilter::dimensionPasses(double v, const Range& r) const
-{
-    bool fail = ((r.m_inclusive_lower_bound && v < r.m_lower_bound) ||
-        (!r.m_inclusive_lower_bound && v <= r.m_lower_bound) ||
-        (r.m_inclusive_upper_bound && v > r.m_upper_bound) ||
-        (!r.m_inclusive_upper_bound && v >= r.m_upper_bound));
-    if (r.m_negate)
-        fail = !fail;
-    return !fail;
-}
-
 // The range list is sorted by dimension, so the logic here should work
 // as ORs between ranges of the same dimension and ANDs between ranges
 // of different dimensions.  This is simple logic, but is probably the most
@@ -221,7 +133,7 @@ bool RangeFilter::processOne(PointRef& point)
         // a new dimension.
         else if (passes)
             continue;
-        passes = dimensionPasses(point.getFieldAs<double>(r.m_id), r);
+        passes = r.valuePasses(point.getFieldAs<double>(r.m_id));
     }
     return passes;
 }
@@ -247,4 +159,3 @@ PointViewSet RangeFilter::run(PointViewPtr inView)
 }
 
 } // namespace pdal
-
diff --git a/filters/RangeFilter.hpp b/filters/RangeFilter.hpp
index efa0758..7b78f35 100644
--- a/filters/RangeFilter.hpp
+++ b/filters/RangeFilter.hpp
@@ -47,42 +47,13 @@ extern "C" PF_ExitFunc RangeFilter_InitPlugin();
 namespace pdal
 {
 
-class Options;
+struct DimRange;
 
 class PDAL_DLL RangeFilter : public pdal::Filter
 {
 public:
-
-    struct Range
-    {
-        Range(const std::string name,
-              double lower_bound,
-              double upper_bound,
-              bool inclusive_lower_bound,
-              bool inclusive_upper_bound,
-              bool negate) :
-            m_name(name), m_id(Dimension::Id::Unknown),
-            m_lower_bound(lower_bound), m_upper_bound(upper_bound),
-            m_inclusive_lower_bound(inclusive_lower_bound),
-            m_inclusive_upper_bound(inclusive_upper_bound),
-            m_negate(negate)
-        {}
-
-        Range()
-            {}
-
-        std::string m_name;
-        Dimension::Id m_id;
-        double m_lower_bound;
-        double m_upper_bound;
-        bool m_inclusive_lower_bound;
-        bool m_inclusive_upper_bound;
-        bool m_negate;
-    };
-
-
-    RangeFilter() : Filter()
-    {}
+    RangeFilter();
+    ~RangeFilter();
 
     static void * create();
     static int32_t destroy(void *);
@@ -90,14 +61,13 @@ public:
 
 private:
     StringList m_rangeSpec;
-    std::vector<Range> m_range_list;
+    std::vector<DimRange> m_range_list;
 
     virtual void addArgs(ProgramArgs& args);
     virtual void initialize();
     virtual void prepared(PointTableRef table);
     virtual bool processOne(PointRef& point);
     virtual PointViewSet run(PointViewPtr view);
-    bool dimensionPasses(double v, const Range& r) const;
 
     RangeFilter& operator=(const RangeFilter&) = delete;
     RangeFilter(const RangeFilter&) = delete;
diff --git a/filters/ReprojectionFilter.cpp b/filters/ReprojectionFilter.cpp
index db7d1c6..dd71032 100644
--- a/filters/ReprojectionFilter.cpp
+++ b/filters/ReprojectionFilter.cpp
@@ -64,6 +64,7 @@ ReprojectionFilter::ReprojectionFilter()
     , m_errorHandler(new gdal::ErrorHandler())
 {}
 
+
 ReprojectionFilter::~ReprojectionFilter()
 {
     if (m_transform_ptr)
@@ -86,20 +87,11 @@ void ReprojectionFilter::initialize()
 {
     m_inferInputSRS = m_inSRS.empty();
 
-    m_out_ref_ptr = OSRNewSpatialReference(0);
+    m_out_ref_ptr = OSRNewSpatialReference(m_outSRS.getWKT().c_str());
     if (!m_out_ref_ptr)
-        throw pdal::pdal_error("Unable to allocate new OSR SpatialReference "
-            "in initialize()!");
-
-    int result = OSRSetFromUserInput(m_out_ref_ptr, m_outSRS.getWKT().c_str());
-    if (result != OGRERR_NONE)
-    {
-        std::ostringstream oss;
-        oss << getName() << ": Invalid output spatial reference '" <<
-            m_outSRS.getWKT() << "'.  This is usually caused by a "
-            "bad value for the 'out_srs' option.";
-        throw pdal_error(oss.str());
-    }
+        throwError("Invalid output spatial reference '" + m_outSRS.getWKT() +
+            "'.  This is usually caused by a bad value for the 'out_srs' "
+            "option.");
 }
 
 
@@ -116,43 +108,27 @@ void ReprojectionFilter::createTransform(const SpatialReference& srsSRS)
     {
         m_inSRS = srsSRS;
         if (m_inSRS.empty())
-        {
-            std::ostringstream oss;
-            oss << getName() << ": source data has no spatial reference and "
-                "none is specified with the 'in_srs' option.";
-            throw pdal_error(oss.str());
-        }
+            throwError("source data has no spatial reference and "
+                "none is specified with the 'in_srs' option.");
     }
 
     if (m_in_ref_ptr)
         OSRDestroySpatialReference(m_in_ref_ptr);
-    m_in_ref_ptr = OSRNewSpatialReference(0);
+    m_in_ref_ptr = OSRNewSpatialReference(m_inSRS.getWKT().c_str());
     if (!m_in_ref_ptr)
-        throw pdal::pdal_error("Unable to allocate new OSR SpatialReference for input coordinate system in createTransform()!");
-
-    int result = OSRSetFromUserInput(m_in_ref_ptr, m_inSRS.getWKT().c_str());
-    if (result != OGRERR_NONE)
-    {
-        std::ostringstream oss;
-        oss << getName() << ": Invalid input spatial reference '" <<
-            m_inSRS.getWKT() << "'.  This is usually caused by "
-            "a bad value for the 'in_srs' option or an invalid "
-            "spatial reference in the source file.";
-        throw pdal_error(oss.str());
-    }
+        throwError("Invalid input spatial reference '" + m_inSRS.getWKT() +
+            "'.  This is usually caused by a bad value for the 'in_srs' "
+            "option or an invalid spatial reference in the source file.");
     if (m_transform_ptr)
         OCTDestroyCoordinateTransformation(m_transform_ptr);
     m_transform_ptr = OCTNewCoordinateTransformation(m_in_ref_ptr,
         m_out_ref_ptr);
     if (!m_transform_ptr)
-    {
-        std::ostringstream oss;
-        oss << getName() << ": Could not construct coordinate "
-            "transformation object in createTransform";
-        throw pdal_error(oss.str());
-    }
+        throwError("Could not construct coordinate transformation object "
+            "in createTransform");
 }
 
+
 PointViewSet ReprojectionFilter::run(PointViewPtr view)
 {
     PointViewSet viewSet;
diff --git a/filters/SMRFilter.cpp b/filters/SMRFilter.cpp
index b523b1f..20c4a08 100644
--- a/filters/SMRFilter.cpp
+++ b/filters/SMRFilter.cpp
@@ -1,75 +1,76 @@
 /******************************************************************************
-* Copyright (c) 2016, Bradley J Chambers (brad.chambers at gmail.com)
-*
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following
-* conditions are met:
-*
-*     * Redistributions of source code must retain the above copyright
-*       notice, this list of conditions and the following disclaimer.
-*     * Redistributions in binary form must reproduce the above copyright
-*       notice, this list of conditions and the following disclaimer in
-*       the documentation and/or other materials provided
-*       with the distribution.
-*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
-*       names of its contributors may be used to endorse or promote
-*       products derived from this software without specific prior
-*       written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
-* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
-* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
-* OF SUCH DAMAGE.
-****************************************************************************/
+ * Copyright (c) 2016-2017, Bradley J Chambers (brad.chambers at gmail.com)
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
+ *       names of its contributors may be used to endorse or promote
+ *       products derived from this software without specific prior
+ *       written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ ****************************************************************************/
+
+// PDAL implementation of T. J. Pingel, K. C. Clarke, and W. A. McBride, “An
+// improved simple morphological filter for the terrain classification of
+// airborne LIDAR data,” ISPRS J. Photogramm. Remote Sens., vol. 77, pp. 21–30,
+// 2013.
 
 #include "SMRFilter.hpp"
 
 #include <pdal/EigenUtils.hpp>
+#include <pdal/KDIndex.hpp>
+#include <pdal/Segmentation.hpp>
 #include <pdal/pdal_macros.hpp>
-#include <pdal/PipelineManager.hpp>
-#include <pdal/SpatialReference.hpp>
 #include <pdal/util/FileUtils.hpp>
 #include <pdal/util/ProgramArgs.hpp>
-#include <pdal/util/Utils.hpp>
-#include <io/BufferReader.hpp>
+
+#include "private/DimRange.hpp"
 
 #include <Eigen/Dense>
-#include <Eigen/Sparse>
+
+#include <algorithm>
+#include <cmath>
+#include <iterator>
+#include <limits>
+#include <numeric>
+#include <string>
+#include <vector>
 
 namespace pdal
 {
+
+using namespace Dimension;
 using namespace Eigen;
+using namespace eigen;
 
-static PluginInfo const s_info =
-    PluginInfo("filters.smrf", "Pingel et al. (2013)",
-               "http://pdal.io/stages/filters.smrf.html");
+static PluginInfo const s_info = PluginInfo(
+    "filters.smrf", "Simple Morphological Filter (Pingel et al., 2013)",
+    "http://pdal.io/stages/filters.smrf.html");
 
 CREATE_STATIC_PLUGIN(1, 0, SMRFilter, Filter, s_info)
 
-struct distElev
-{
-    double dist;
-    double elev;
-};
-
-struct by_dist
-{
-    bool operator()(distElev const& a, distElev const& b)
-    {
-        return a.dist < b.dist;
-    }
-};
-
 std::string SMRFilter::getName() const
 {
     return s_info.name;
@@ -77,874 +78,521 @@ std::string SMRFilter::getName() const
 
 void SMRFilter::addArgs(ProgramArgs& args)
 {
-    args.add("classify", "Apply classification labels?", m_classify, true);
-    args.add("extract", "Extract ground returns?", m_extract);
-    args.add("cell", "Cell size?", m_cellSize, 1.0);
-    args.add("slope", "Slope?", m_percentSlope, 0.15);
-    args.add("window", "Max window size?", m_maxWindow, 18.0);
+    args.add("cell", "Cell size?", m_cell, 1.0);
+    args.add("slope", "Percent slope?", m_slope, 0.15);
+    args.add("window", "Max window size?", m_window, 18.0);
     args.add("scalar", "Elevation scalar?", m_scalar, 1.25);
     args.add("threshold", "Elevation threshold?", m_threshold, 0.5);
-    args.add("cut", "Cut net size?", m_cutNet, 0.0);
-    args.add("outdir", "Optional output directory for debugging", m_outDir);
+    args.add("cut", "Cut net size?", m_cut, 0.0);
+    args.add("dir", "Optional output directory for debugging", m_dir);
+    args.add("ignore", "Ignore values", m_ignored);
+    args.add("last", "Consider last returns only?", m_lastOnly, true);
 }
 
 void SMRFilter::addDimensions(PointLayoutPtr layout)
 {
-    layout->registerDim(Dimension::Id::Classification);
+    layout->registerDim(Id::Classification);
 }
 
-void SMRFilter::ready(PointTableRef table)
+void SMRFilter::prepared(PointTableRef table)
 {
-    if (m_outDir.empty())
-        return;
-        
-    if (!FileUtils::directoryExists(m_outDir))
-        throw pdal_error("Output directory does not exist");
-}
+    const PointLayoutPtr layout(table.layout());
 
-MatrixXd SMRFilter::inpaintKnn(MatrixXd cx, MatrixXd cy, MatrixXd cz)
-{
-    MatrixXd out = cz;
+    m_ignored.m_id = layout->findDim(m_ignored.m_name);
 
-    for (auto c = 0; c < m_numCols; ++c)
+    if (m_lastOnly)
     {
-        for (auto r = 0; r < m_numRows; ++r)
+        if (!layout->hasDim(Dimension::Id::ReturnNumber) ||
+            !layout->hasDim(Dimension::Id::NumberOfReturns))
         {
-            if (!std::isnan(cz(r, c)))
-                continue;
+            log()->get(LogLevel::Warning) << "Could not find ReturnNumber and "
+                                             "NumberOfReturns. Skipping "
+                                             "segmentation of last returns and "
+                                             "proceeding with all returns.\n";
+            m_lastOnly = false;
+        }
+    }
+}
 
-            int radius = 1;
-            bool enough = false;
+void SMRFilter::ready(PointTableRef table)
+{
+    if (m_dir.empty())
+        return;
 
-            while (!enough)
-            {
-                // log()->get(LogLevel::Debug) << r << "\t" << c << "\t" << radius << std::endl;
-                int cs = Utils::clamp(c-radius, 0, m_numCols-1);
-                int ce = Utils::clamp(c+radius, 0, m_numCols-1);
-                int col_size = ce - cs + 1;
-                int rs = Utils::clamp(r-radius, 0, m_numRows-1);
-                int re = Utils::clamp(r+radius, 0, m_numRows-1);
-                int row_size = re - rs + 1;
-
-                // MatrixXd Xn = cx.block(rs, cs, row_size, col_size);
-                // MatrixXd Yn = cy.block(rs, cs, row_size, col_size);
-                MatrixXd Zn = cz.block(rs, cs, row_size, col_size);
-
-                auto notNaN = [](double x)
-                {
-                    return !std::isnan(x);
-                };
+    if (!FileUtils::directoryExists(m_dir))
+        throwError("Output directory '" + m_dir + "' does not exist");
+}
 
-                enough = Zn.unaryExpr(notNaN).count() >= 8;
-                if (!enough)
-                {
-                    ++radius;
-                    continue;
-                }
+PointViewSet SMRFilter::run(PointViewPtr view)
+{
+    PointViewSet viewSet;
+    if (!view->size())
+        return viewSet;
+
+    // Segment input view into ignored/kept views.
+    PointViewPtr ignoredView = view->makeNew();
+    PointViewPtr keptView = view->makeNew();
+    if (m_ignored.m_id == Dimension::Id::Unknown)
+        keptView->append(*view);
+    else
+        Segmentation::ignoreDimRange(m_ignored, view, keptView, ignoredView);
 
-                // auto zNotNaN = [](double x)
-                // {
-                //     if (!std::isnan(x))
-                //         return x;
-                //     else
-                //         return 0.0;
-                // };
-                //
-                // // proceed to find 8 nearest neighbors and average the z values
-                // // std::cerr << Zn.unaryExpr(zNotNaN).sum() << "\t" << Zn.size() << "\t" << Zn.unaryExpr(zNotNaN).sum() / Zn.size() << std::endl;
-                // out(r, c) = Zn.unaryExpr(zNotNaN).sum() / Zn.size();
-
-                std::vector<distElev> de;
-
-                for (auto cc = cs; cc <= ce; ++cc)
-                {
-                    for (auto rr = rs; rr <= re; ++rr)
-                    {
-                        if (std::isnan(cz(rr, cc)))
-                            continue;
-
-                        // compute distance to !isnan neighbor
-                        double dx = cx(rr, cc) - cx(r, c);
-                        double dy = cy(rr, cc) - cy(r, c);
-                        double sqrdist = dx * dx + dy * dy;
-                        de.push_back(distElev{sqrdist, cz(rr, cc)});
-                    }
-                }
-                // sort dists
-                std::sort(de.begin(), de.end(), by_dist());
+    // Segment kept view into last/other-than-last return views.
+    PointViewPtr lastView = keptView->makeNew();
+    PointViewPtr nonlastView = keptView->makeNew();
+    if (m_lastOnly)
+        Segmentation::segmentLastReturns(keptView, lastView, nonlastView);
+    else
+        lastView->append(*keptView);
 
-                // average elevatio of lowest eight dists
-                double sum = 0.0;
-                for (auto i = 0; i < 8; ++i)
-                {
-                    sum += de[i].elev;
-                }
-                sum /= 8.0;
+    for (PointId i = 0; i < nonlastView->size(); ++i)
+        nonlastView->setField(Dimension::Id::Classification, i, 1);
 
-                out(r, c) = sum;
-            }
-        }
-    }
+    m_srs = lastView->spatialReference();
 
-    return out;
-}
+    lastView->calculateBounds(m_bounds);
+    m_cols = ((m_bounds.maxx - m_bounds.minx) / m_cell) + 1;
+    m_rows = ((m_bounds.maxy - m_bounds.miny) / m_cell) + 1;
 
-std::vector<PointId> SMRFilter::processGround(PointViewPtr view)
-{
-    log()->get(LogLevel::Info) << "processGround: Running SMRF...\n";
-
-    // The algorithm consists of four conceptually distinct stages. The first is
-    // the creation of the minimum surface (ZImin). The second is the processing
-    // of the minimum surface, in which grid cells from the raster are
-    // identified as either containing bare earth (BE) or objects (OBJ). This
-    // second stage represents the heart of the algorithm. The third step is the
-    // creation of a DEM from these gridded points. The fourth step is the
-    // identification of the original LIDAR points as either BE or OBJ based on
-    // their relationship to the interpolated
-
-    std::vector<PointId> groundIdx;
-    
-    BOX2D bounds;
-    view->calculateBounds(bounds);
-    SpatialReference srs(view->spatialReference());
-
-    // Determine the number of rows and columns at the given cell size.
-    m_numCols = ((bounds.maxx - bounds.minx) / m_cellSize) + 1;
-    m_numRows = ((bounds.maxy - bounds.miny) / m_cellSize) + 1;
-
-    MatrixXd cx(m_numRows, m_numCols);
-    MatrixXd cy(m_numRows, m_numCols);
-    for (auto c = 0; c < m_numCols; ++c)
-    {
-        for (auto r = 0; r < m_numRows; ++r)
-        {
-            cx(r, c) = bounds.minx + (c + 0.5) * m_cellSize;
-            cy(r, c) = bounds.miny + (r + 0.5) * m_cellSize;
-        }
-    }
+    // Create raster of minimum Z values per element.
+    std::vector<double> ZImin = createZImin(lastView);
 
-    // STEP 1:
+    // Create raster mask of pixels containing low outlier points.
+    std::vector<int> Low = createLowMask(ZImin);
 
-    // As with many other ground filtering algorithms, the first step is
-    // generation of ZImin from the cell size parameter and the extent of the
-    // data. The two vectors corresponding to [min:cellSize:max] for each
-    // coordinate – xi and yi – may be supplied by the user or may be easily and
-    // automatically calculated from the data. Without supplied ranges, the SMRF
-    // algorithm creates a raster from the ceiling of the minimum to the floor
-    // of the maximum values for each of the (x,y) dimensions. If the supplied
-    // cell size parameter is not an integer, the same general rule applies to
-    // values evenly divisible by the cell size. For example, if cell size is
-    // equal to 0.5 m, and the x values range from 52345.6 to 52545.4, the range
-    // would be [52346 52545].
-
-    // The minimum surface grid ZImin defined by vectors (xi,yi) is filled with
-    // the nearest, lowest elevation from the original point cloud (x,y,z)
-    // values, provided that the distance to the nearest point does not exceed
-    // the supplied cell size parameter. This provision means that some grid
-    // points of ZImin will go unfilled. To fill these values, we rely on
-    // computationally inexpensive image inpainting techniques. Image inpainting
-    // involves the replacement of the empty cells in an image (or matrix) with
-    // values calculated from other nearby values. It is a type of interpolation
-    // technique derived from artistic replacement of damaged portions of
-    // photographs and paintings, where preservation of texture is an important
-    // concern (Bertalmio et al., 2000). When empty values are spread through
-    // the image, and the ratio of filled to empty pixels is quite high, most
-    // methods of inpainting will produce satisfactory results. In an evaluation
-    // of inpainting methods on ground identification from the final terrain
-    // model, we found that Laplacian techniques produced error rates nearly
-    // three times higher than either an average of the eight nearest neighbors
-    // or D’Errico’s spring-metaphor inpainting technique (D’Errico, 2004). The
-    // spring-metaphor technique imagines springs connecting each cell with its
-    // eight adjacent neighbors, where the inpainted value corresponds to the
-    // lowest energy state of the set, and where the entire (sparse) set of
-    // linear equations is solved using partial differential equations. Both of
-    // these latter techniques were nearly the same with regards to total error,
-    // with the spring technique performing slightly better than the k-nearest
-    // neighbor (KNN) approach.
-
-    MatrixXd ZImin = eigen::createMinMatrix(*view.get(), m_numRows, m_numCols,
-                                            m_cellSize, bounds);
-    
-    // MatrixXd ZImin_painted = inpaintKnn(cx, cy, ZImin);
-    // MatrixXd ZImin_painted = TPS(cx, cy, ZImin);
-    MatrixXd ZImin_painted = expandingTPS(cx, cy, ZImin);
-    
-    if (!m_outDir.empty())
-    {
-        std::string filename = FileUtils::toAbsolutePath("zimin.tif", m_outDir);
-        eigen::writeMatrix(ZImin, filename, "GTiff", m_cellSize, bounds, srs);
-        
-        filename = FileUtils::toAbsolutePath("zimin_painted.tif", m_outDir);
-        eigen::writeMatrix(ZImin_painted, filename, "GTiff", m_cellSize, bounds, srs);
-    }
+    // Create raster mask of net cuts. Net cutting is used to when a scene
+    // contains large buildings in highly differentiated terrain.
+    std::vector<int> isNetCell = createNetMask();
 
-    ZImin = ZImin_painted;
+    // Apply net cutting to minimum Z raster.
+    std::vector<double> ZInet = createZInet(ZImin, isNetCell);
 
-    // STEP 2:
+    // Create raster mask of pixels containing object points. Note that we use
+    // ZInet, the result of net cutting, to identify object pixels.
+    std::vector<int> Obj = createObjMask(ZInet);
 
-    // The second stage of the ground identification algorithm involves the
-    // application of a progressive morphological filter to the minimum surface
-    // grid (ZImin). At the first iteration, the filter applies an image opening
-    // operation to the minimum surface. An opening operation consists of an
-    // application of an erosion filter followed by a dilation filter. The
-    // erosion acts to snap relative high values to relative lows, where a
-    // supplied window radius and shape (or structuring element) defines the
-    // search neighborhood. The dilation uses the same window radius and
-    // structuring element, acting to outwardly expand relative highs. Fig. 2
-    // illustrates an opening operation on a cross section of a transect from
-    // Sample 1–1 in the ISPRS LIDAR reference dataset (Sithole and Vosselman,
-    // 2003), following Zhang et al. (2003).
-
-    // paper has low point happening later, i guess it doesn't matter too much, this is where he does it in matlab code
-    MatrixXi Low = progressiveFilter(-ZImin, m_cellSize, 5.0, 1.0);
-
-    // matlab code has net cutting occurring here
-    MatrixXd ZInet = ZImin;
-    MatrixXi isNetCell = MatrixXi::Zero(m_numRows, m_numCols);
-    if (m_cutNet > 0.0)
-    {
-        MatrixXd bigOpen = eigen::matrixOpen(ZImin, 2*std::ceil(m_cutNet / m_cellSize));
-        for (auto c = 0; c < m_numCols; c += std::ceil(m_cutNet/m_cellSize))
-        {
-            for (auto r = 0; r < m_numRows; ++r)
-            {
-                isNetCell(r, c) = 1;
-            }
-        }
-        for (auto c = 0; c < m_numCols; ++c)
-        {
-            for (auto r = 0; r < m_numRows; r += std::ceil(m_cutNet/m_cellSize))
-            {
-                isNetCell(r, c) = 1;
-            }
-        }
-        for (auto c = 0; c < m_numCols; ++c)
-        {
-            for (auto r = 0; r < m_numRows; ++r)
-            {
-                if (isNetCell(r, c)==1)
-                    ZInet(r, c) = bigOpen(r, c);
-            }
-        }
-    }
+    // Create raster representing the provisional DEM. Note that we use the
+    // original ZImin (not ZInet), however the net cut mask will still force
+    // interpolation at these pixels.
+    std::vector<double> ZIpro =
+        createZIpro(lastView, ZImin, Low, isNetCell, Obj);
 
-    // and finally object detection
-    MatrixXi Obj = progressiveFilter(ZInet, m_cellSize, m_percentSlope, m_maxWindow);
+    // Classify ground returns by comparing elevation values to the provisional
+    // DEM.
+    classifyGround(lastView, ZIpro);
 
-    // STEP 3:
+    PointViewPtr outView = view->makeNew();
+    outView->append(*ignoredView);
+    outView->append(*nonlastView);
+    outView->append(*lastView);
+    viewSet.insert(outView);
 
-    // The end result of the iteration process described above is a binary grid
-    // where each cell is classified as being either bare earth (BE) or object
-    // (OBJ). The algorithm then applies this mask to the starting minimum
-    // surface to eliminate nonground cells. These cells are then inpainted
-    // according to the same process described previously, producing a
-    // provisional DEM (ZIpro).
+    return viewSet;
+}
 
-    // we currently aren't checking for net cells or empty cells (haven't i already marked empty cells as NaNs?)
-    MatrixXd ZIpro = ZImin;
-    for (int i = 0; i < Obj.size(); ++i)
+void SMRFilter::classifyGround(PointViewPtr view, std::vector<double>& ZIpro)
+{
+    // "While many authors use a single value for the elevation threshold, we
+    // suggest that a second parameter be used to increase the threshold on
+    // steep slopes, transforming the threshold to a slope-dependent value. The
+    // total permissible distance is then equal to a fixed elevation threshold
+    // plus the scaling value multiplied by the slope of the DEM at each LIDAR
+    // point. The rationale behind this approach is that small horizontal and
+    // vertical displacements yield larger errors on steep slopes, and as a
+    // result the BE/OBJ threshold distance should be more permissive at these
+    // points."
+    MatrixXd gsurfs(m_rows, m_cols);
+    MatrixXd thresh(m_rows, m_cols);
     {
-        if (Obj(i) == 1 || Low(i) == 1 || isNetCell(i) == 1)
-            ZIpro(i) = std::numeric_limits<double>::quiet_NaN();
-    }
+        MatrixXd ZIproM = Map<MatrixXd>(ZIpro.data(), m_rows, m_cols);
+        MatrixXd scaled = ZIproM / m_cell;
+
+        MatrixXd gx = gradX(scaled);
+        MatrixXd gy = gradY(scaled);
+        gsurfs = (gx.cwiseProduct(gx) + gy.cwiseProduct(gy)).cwiseSqrt();
+        std::vector<double> gsurfsV(gsurfs.data(),
+                                    gsurfs.data() + gsurfs.size());
+        std::vector<double> gsurfs_fillV = knnfill(view, gsurfsV);
+        gsurfs = Map<MatrixXd>(gsurfs_fillV.data(), m_rows, m_cols);
+        thresh = (m_threshold + m_scalar * gsurfs.array()).matrix();
+
+        if (!m_dir.empty())
+        {
+            std::string fname = FileUtils::toAbsolutePath("gx.tif", m_dir);
+            writeMatrix(gx, fname, "GTiff", m_cell, m_bounds, m_srs);
 
-    // MatrixXd ZIpro_painted = inpaintKnn(cx, cy, ZIpro);
-    // MatrixXd ZIpro_painted = TPS(cx, cy, ZIpro);
-    MatrixXd ZIpro_painted = expandingTPS(cx, cy, ZIpro);
-    
-    if (!m_outDir.empty())
-    {
-        std::string filename = FileUtils::toAbsolutePath("zilow.tif", m_outDir);
-        eigen::writeMatrix(Low.cast<double>(), filename, "GTiff", m_cellSize, bounds, srs);
-        
-        filename = FileUtils::toAbsolutePath("zinet.tif", m_outDir);
-        eigen::writeMatrix(ZInet, filename, "GTiff", m_cellSize, bounds, srs);
-        
-        filename = FileUtils::toAbsolutePath("ziobj.tif", m_outDir);
-        eigen::writeMatrix(Obj.cast<double>(), filename, "GTiff", m_cellSize, bounds, srs);
-        
-        filename = FileUtils::toAbsolutePath("zipro.tif", m_outDir);
-        eigen::writeMatrix(ZIpro, filename, "GTiff", m_cellSize, bounds, srs);
-        
-        filename = FileUtils::toAbsolutePath("zipro_painted.tif", m_outDir);
-        eigen::writeMatrix(ZIpro_painted, filename, "GTiff", m_cellSize, bounds, srs);
-    }
+            fname = FileUtils::toAbsolutePath("gy.tif", m_dir);
+            writeMatrix(gy, fname, "GTiff", m_cell, m_bounds, m_srs);
 
-    ZIpro = ZIpro_painted;
-
-    // STEP 4:
-
-    // The final step of the algorithm is the identification of ground/object
-    // LIDAR points. This is accomplished by measuring the vertical distance
-    // between each LIDAR point and the provisional DEM, and applying a
-    // threshold calculation. While many authors use a single value for the
-    // elevation threshold, we suggest that a second parameter be used to
-    // increase the threshold on steep slopes, transforming the threshold to a
-    // slope-dependent value. The total permissible distance is then equal to a
-    // fixed elevation threshold plus the scaling value multiplied by the slope
-    // of the DEM at each LIDAR point. The rationale behind this approach is
-    // that small horizontal and vertical displacements yield larger errors on
-    // steep slopes, and as a result the BE/OBJ threshold distance should be
-    // more per- missive at these points.
-
-    // The calculation requires that both elevation and slope are interpolated
-    // from the provisional DEM. There are any number of interpolation
-    // techniques that might be used, and even nearest neighbor approaches work
-    // quite well, so long as the cell size of the DEM nearly corresponds to the
-    // resolution of the LIDAR data. A comparison of how well these different
-    // methods of interpolation perform is given in the next section. Based on
-    // these results, we find that a splined cubic interpolation provides the
-    // best results.
-
-    // It is common in LIDAR point clouds to have a small number of outliers
-    // which may be either above or below the terrain surface. While
-    // above-ground outliers (e.g., a random return from a bird in flight) are
-    // filtered during the normal algorithm routine, the below-ground outliers
-    // (e.g., those caused by a reflection) require a separate approach. Early
-    // in the routine and along a separate processing fork, the minimum surface
-    // is checked for low outliers by inverting the point cloud in the z-axis
-    // and applying the filter with parameters (slope = 500%, maxWindowSize =
-    // 1). The resulting mask is used to flag low outlier cells as OBJ before
-    // the inpainting of the provisional DEM. This outlier identification
-    // methodology is functionally the same as that of Zhang et al. (2003).
-
-    // The provisional DEM (ZIpro), created by removing OBJ cells from the
-    // original minimum surface (ZImin) and then inpainting, tends to be less
-    // smooth than one might wish, especially when the surfaces are to be used
-    // to create visual products like immersive geographic virtual environments.
-    // As a result, it is often worthwhile to reinter- polate a final DEM from
-    // the identified ground points of the original LIDAR data (ZIfin). Surfaces
-    // created from these data tend to be smoother and more visually satisfying
-    // than those derived from the provisional DEM.
-
-    // Very large (>40m in length) buildings can sometimes prove troublesome to
-    // remove on highly differentiated terrain. To accommodate the removal of
-    // such objects, we implemented a feature in the published SMRF algorithm
-    // which is helpful in removing such features. We accomplish this by
-    // introducing into the initial minimum surface a ‘‘net’’ of minimum values
-    // at a spacing equal to the maximum window diameter, where these minimum
-    // values are found by applying a morphological open operation with a disk
-    // shaped structuring element of radius (2?wkmax). Since only one example in
-    // this dataset had features this large (Sample 4–2, a trainyard) we did not
-    // include this portion of the algorithm in the formal testing procedure,
-    // though we provide a brief analysis of the effect of using this net filter
-    // in the next section.
-    
-    MatrixXd scaled = ZIpro / m_cellSize;
-
-    MatrixXd gx = eigen::gradX(scaled);
-    MatrixXd gy = eigen::gradY(scaled);
-    MatrixXd gsurfs = (gx.cwiseProduct(gx) + gy.cwiseProduct(gy)).cwiseSqrt();
-
-    // MatrixXd gsurfs_painted = inpaintKnn(cx, cy, gsurfs);
-    // MatrixXd gsurfs_painted = TPS(cx, cy, gsurfs);
-    MatrixXd gsurfs_painted = expandingTPS(cx, cy, gsurfs);
-    
-    if (!m_outDir.empty())
-    {
-        std::string filename = FileUtils::toAbsolutePath("gx.tif", m_outDir);
-        eigen::writeMatrix(gx, filename, "GTiff", m_cellSize, bounds, srs);
-        
-        filename = FileUtils::toAbsolutePath("gy.tif", m_outDir);
-        eigen::writeMatrix(gy, filename, "GTiff", m_cellSize, bounds, srs);
-        
-        filename = FileUtils::toAbsolutePath("gsurfs.tif", m_outDir);
-        eigen::writeMatrix(gsurfs, filename, "GTiff", m_cellSize, bounds, srs);
-        
-        filename = FileUtils::toAbsolutePath("gsurfs_painted.tif", m_outDir);
-        eigen::writeMatrix(gsurfs_painted, filename, "GTiff", m_cellSize, bounds, srs);
-    }
+            fname = FileUtils::toAbsolutePath("gsurfs.tif", m_dir);
+            writeMatrix(gsurfs, fname, "GTiff", m_cell, m_bounds, m_srs);
 
-    gsurfs = gsurfs_painted;
+            fname = FileUtils::toAbsolutePath("gsurfs_fill.tif", m_dir);
+            MatrixXd gsurfs_fill =
+                Map<MatrixXd>(gsurfs_fillV.data(), m_rows, m_cols);
+            writeMatrix(gsurfs_fill, fname, "GTiff", m_cell, m_bounds, m_srs);
 
-    MatrixXd thresh = (m_threshold + m_scalar * gsurfs.array()).matrix();
-    
-    if (!m_outDir.empty())
-    {
-        std::string filename = FileUtils::toAbsolutePath("thresh.tif", m_outDir);
-        eigen::writeMatrix(thresh, filename, "GTiff", m_cellSize, bounds, srs);
+            fname = FileUtils::toAbsolutePath("thresh.tif", m_dir);
+            writeMatrix(thresh, fname, "GTiff", m_cell, m_bounds, m_srs);
+        }
     }
 
     for (PointId i = 0; i < view->size(); ++i)
     {
-        using namespace Dimension;
         double x = view->getFieldAs<double>(Id::X, i);
         double y = view->getFieldAs<double>(Id::Y, i);
         double z = view->getFieldAs<double>(Id::Z, i);
 
-        int c = Utils::clamp(static_cast<int>(floor(x - bounds.minx) / m_cellSize), 0, m_numCols-1);
-        int r = Utils::clamp(static_cast<int>(floor(y - bounds.miny) / m_cellSize), 0, m_numRows-1);
-
-        // author uses spline interpolation to get value from ZIpro and gsurfs
-
-        if (std::isnan(ZIpro(r, c)))
+        size_t c = static_cast<size_t>(std::floor(x - m_bounds.minx) / m_cell);
+        size_t r = static_cast<size_t>(std::floor(y - m_bounds.miny) / m_cell);
+
+        // TODO(chambbj): We don't quite do this by the book and yet it seems to
+        // work reasonably well:
+        // "The calculation requires that both elevation and slope are
+        // interpolated from the provisional DEM. There are any number of
+        // interpolation techniques that might be used, and even nearest
+        // neighbor approaches work quite well, so long as the cell size of the
+        // DEM nearly corresponds to the resolution of the LIDAR data. Based on
+        // these results, we find that a splined cubic interpolation provides
+        // the best results."
+        if (std::isnan(ZIpro[c * m_rows + r]))
             continue;
 
-        // not sure i should just brush this under the rug...
         if (std::isnan(gsurfs(r, c)))
             continue;
 
-        double ez = ZIpro(r, c);
-        // double ez = interp2(r, c, cx, cy, ZIpro);
-        // double si = gsurfs(r, c);
-        // double si = interp2(r, c, cx, cy, gsurfs);
-        // double reqVal = m_threshold + 1.2 * si;
-
-        if (std::abs(ez - z) > thresh(r, c))
-            continue;
-
-        // if (std::abs(ZIpro(r, c) - z) > m_threshold)
-        //     continue;
-
-        groundIdx.push_back(i);
+        // "The final step of the algorithm is the identification of
+        // ground/object LIDAR points. This is accomplished by measuring the
+        // vertical distance between each LIDAR point and the provisional
+        // DEM, and applying a threshold calculation."
+        if (std::fabs(ZIpro[c * m_rows + r] - z) > thresh(r, c))
+            view->setField(Id::Classification, i, 1);
+        else
+            view->setField(Id::Classification, i, 2);
     }
-
-    return groundIdx;
 }
 
-MatrixXi SMRFilter::progressiveFilter(MatrixXd const& ZImin, double cell_size,
-                                      double slope, double max_window)
+std::vector<int> SMRFilter::createLowMask(std::vector<double> const& ZImin)
 {
-    log()->get(LogLevel::Info) << "progressiveFilter: Progressive filtering...\n";
-
-    MatrixXi Obj(m_numRows, m_numCols);
-    Obj.setZero();
-
-    // In this case, we selected a disk-shaped structuring element, and the
-    // radius of the element at each step was increased by one pixel from a
-    // starting value of one pixel to the pixel equivalent of the maximum value
-    // (wkmax). The maximum window radius is supplied as a distance metric
-    // (e.g., 21 m), but is internally converted to a pixel equivalent by
-    // dividing it by the cell size and rounding the result toward positive
-    // infinity (i.e., taking the ceiling value). For example, for a supplied
-    // maximum window radius of 21 m, and a cell size of 2m per pixel, the
-    // result would be a maximum window radius of 11 pixels. While this
-    // represents a relatively slow progression in the expansion of the window
-    // radius, we believe that the high efficiency associated with the opening
-    // operation mitigates the potential for computational waste. The
-    // improvements in classification accuracy using slow, linear progressions
-    // are documented in the next section.
-    int max_radius = ceil(max_window/cell_size);
-    MatrixXd ZIlocal = ZImin;
-    for (int radius = 1; radius <= max_radius; ++radius)
+    // "[The] minimum surface is checked for low outliers by inverting the point
+    // cloud in the z-axis and applying the filter with parameters (slope =
+    // 500%, maxWindowSize = 1). The resulting mask is used to flag low outlier
+    // cells as OBJ before the inpainting of the provisional DEM."
+
+    // Need to add a step to negate ZImin
+    std::vector<double> negZImin;
+    std::transform(ZImin.begin(), ZImin.end(), std::back_inserter(negZImin),
+                   [](double v) { return -v; });
+    std::vector<int> LowV = progressiveFilter(negZImin, 5.0, 1.0);
+
+    if (!m_dir.empty())
     {
-        // On the first iteration, the minimum surface (ZImin) is opened using a
-        // disk-shaped structuring element with a radius of one pixel.
-        MatrixXd mo = eigen::matrixOpen(ZIlocal, radius);
-
-        // An elevation threshold is then calculated, where the value is equal
-        // to the supplied slope tolerance parameter multiplied by the product
-        // of the window radius and the cell size. For example, if the user
-        // supplied a slope tolerance parameter of 15%, a cell size of 2m per
-        // pixel, the elevation threshold would be 0.3m at a window of one pixel
-        // (0.15 ? 1 ? 2).
-        double threshold = slope * cell_size * radius;
-
-        // This elevation threshold is applied to the difference of the minimum
-        // and the opened surfaces.
-        MatrixXd diff = ZIlocal - mo;
-
-        // Any grid cell with a difference value exceeding the calculated
-        // elevation threshold for the iteration is then flagged as an OBJ cell.
-        for (int i = 0; i < diff.size(); ++i)
-        {
-            if (diff(i) > threshold)
-                Obj(i) = 1;
-        }
-        // eigen::writeMatrix(Obj, "obj.tif", "GTiff", m_cellSize, bounds, srs);
-
-        // The algorithm then proceeds to the next window radius (up to the
-        // maximum), and proceeds as above with the last opened surface acting
-        // as the ‘‘minimum surface’’ for the next difference calculation.
-        ZIlocal = mo;
-
-        log()->get(LogLevel::Info) << "progressiveFilter: Radius = " << radius
-                                   << ", " << Obj.sum() << " object pixels\n";
+        std::string fname = FileUtils::toAbsolutePath("zilow.tif", m_dir);
+        MatrixXi Low = Map<MatrixXi>(LowV.data(), m_rows, m_cols);
+        writeMatrix(Low.cast<double>(), fname, "GTiff", m_cell, m_bounds,
+                    m_srs);
     }
 
-    return Obj;
+    return LowV;
 }
 
-PointViewSet SMRFilter::run(PointViewPtr view)
+std::vector<int> SMRFilter::createNetMask()
 {
-    log()->get(LogLevel::Info) << "run: Process SMRFilter...\n";
-
-    std::vector<PointId> idx = processGround(view);
-
-    PointViewSet viewSet;
-
-    if (!idx.empty() && (m_classify || m_extract))
+    // "To accommodate the removal of [very large buildings on highly
+    // differentiated terrain], we implemented a feature in the published SMRF
+    // algorithm which is helpful in removing such features. We accomplish this
+    // by introducing into the initial minimum surface a "net" of minimum values
+    // at a spacing equal to the maximum window diameter, where these minimum
+    // values are found by applying a morphological open operation with a disk
+    // shaped structuring element of radius (2*wkmax)."
+    std::vector<int> isNetCell(m_rows * m_cols, 0);
+    if (m_cut > 0.0)
     {
+        int v = std::ceil(m_cut / m_cell);
 
-        if (m_classify)
+        for (auto c = 0; c < m_cols; c += v)
         {
-            log()->get(LogLevel::Info) << "run: Labeled " << idx.size() << " ground returns!\n";
-
-            // set the classification label of ground returns as 2
-            // (corresponding to ASPRS LAS specification)
-            for (const auto& i : idx)
+            for (auto r = 0; r < m_rows; ++r)
             {
-                view->setField(Dimension::Id::Classification, i, 2);
+                isNetCell[c * m_rows + r] = 1;
             }
-
-            viewSet.insert(view);
         }
-
-        if (m_extract)
+        for (auto c = 0; c < m_cols; ++c)
         {
-            log()->get(LogLevel::Info) << "run: Extracted " << idx.size() << " ground returns!\n";
-
-            // create new PointView containing only ground returns
-            PointViewPtr output = view->makeNew();
-            for (const auto& i : idx)
+            for (auto r = 0; r < m_rows; r += v)
             {
-                output->appendPoint(*view, i);
+                isNetCell[c * m_rows + r] = 1;
             }
-
-            viewSet.erase(view);
-            viewSet.insert(output);
         }
     }
-    else
-    {
-        if (idx.empty())
-            log()->get(LogLevel::Info) << "run: Filtered cloud has no ground returns!\n";
 
-        if (!(m_classify || m_extract))
-            log()->get(LogLevel::Info) << "run: Must choose --classify or --extract\n";
+    return isNetCell;
+}
 
-        // return the view buffer unchanged
-        viewSet.insert(view);
+std::vector<int> SMRFilter::createObjMask(std::vector<double> const& ZImin)
+{
+    // "The second stage of the ground identification algorithm involves the
+    // application of a progressive morphological filter to the minimum surface
+    // grid (ZImin)."
+    std::vector<int> ObjV = progressiveFilter(ZImin, m_slope, m_window);
+
+    if (!m_dir.empty())
+    {
+        std::string fname = FileUtils::toAbsolutePath("ziobj.tif", m_dir);
+        MatrixXi Obj = Map<MatrixXi>(ObjV.data(), m_rows, m_cols);
+        writeMatrix(Obj.cast<double>(), fname, "GTiff", m_cell, m_bounds,
+                    m_srs);
     }
 
-    return viewSet;
+    return ObjV;
 }
 
-MatrixXd SMRFilter::TPS(MatrixXd cx, MatrixXd cy, MatrixXd cz)
+std::vector<double> SMRFilter::createZImin(PointViewPtr view)
 {
-    log()->get(LogLevel::Info) << "TPS: Reticulating splines...\n";
-
-    MatrixXd S = cz;
+    using namespace Dimension;
 
-    int num_nan_detect(0);
-    int num_nan_replace(0);
+    // "As with many other ground filtering algorithms, the first step is
+    // generation of ZImin from the cell size parameter and the extent of the
+    // data."
+    std::vector<double> ZIminV(m_rows * m_cols,
+                               std::numeric_limits<double>::quiet_NaN());
 
-    for (auto outer_col = 0; outer_col < m_numCols; ++outer_col)
+    for (PointId i = 0; i < view->size(); ++i)
     {
-        for (auto outer_row = 0; outer_row < m_numRows; ++outer_row)
-        {
-            if (!std::isnan(S(outer_row, outer_col)))
-                continue;
+        double x = view->getFieldAs<double>(Id::X, i);
+        double y = view->getFieldAs<double>(Id::Y, i);
+        double z = view->getFieldAs<double>(Id::Z, i);
 
-            num_nan_detect++;
+        int c = static_cast<int>(floor(x - m_bounds.minx) / m_cell);
+        int r = static_cast<int>(floor(y - m_bounds.miny) / m_cell);
 
-            // Further optimizations are achieved by estimating only the
-            // interpolated surface within a local neighbourhood (e.g. a 7 x 7
-            // neighbourhood is used in our case) of the cell being filtered.
-            int radius = 3;
+        if (z < ZIminV[c * m_rows + r] || std::isnan(ZIminV[c * m_rows + r]))
+            ZIminV[c * m_rows + r] = z;
+    }
 
-            int cs = Utils::clamp(outer_col-radius, 0, m_numCols-1);
-            int ce = Utils::clamp(outer_col+radius, 0, m_numCols-1);
-            int col_size = ce - cs + 1;
-            int rs = Utils::clamp(outer_row-radius, 0, m_numRows-1);
-            int re = Utils::clamp(outer_row+radius, 0, m_numRows-1);
-            int row_size = re - rs + 1;
+    // "...some grid points of ZImin will go unfilled. To fill these values, we
+    // rely on computationally inexpensive image inpainting techniques. Image
+    // inpainting involves the replacement of the empty cells in an image (or
+    // matrix) with values calculated from other nearby values."
+    std::vector<double> ZImin_fillV = knnfill(view, ZIminV);
 
-            MatrixXd Xn = cx.block(rs, cs, row_size, col_size);
-            MatrixXd Yn = cy.block(rs, cs, row_size, col_size);
-            MatrixXd Hn = cz.block(rs, cs, row_size, col_size);
+    if (!m_dir.empty())
+    {
+        std::string fname = FileUtils::toAbsolutePath("zimin.tif", m_dir);
+        MatrixXd ZImin = Map<MatrixXd>(ZIminV.data(), m_rows, m_cols);
+        writeMatrix(ZImin, fname, "GTiff", m_cell, m_bounds, m_srs);
 
-            int nsize = Hn.size();
-            VectorXd T = VectorXd::Zero(nsize);
-            MatrixXd P = MatrixXd::Zero(nsize, 3);
-            MatrixXd K = MatrixXd::Zero(nsize, nsize);
+        fname = FileUtils::toAbsolutePath("zimin_fill.tif", m_dir);
+        MatrixXd ZImin_fill = Map<MatrixXd>(ZImin_fillV.data(), m_rows, m_cols);
+        writeMatrix(ZImin_fill, fname, "GTiff", m_cell, m_bounds, m_srs);
+    }
 
-            int numK(0);
-            for (auto id = 0; id < Hn.size(); ++id)
+    return ZImin_fillV;
+}
+
+std::vector<double> SMRFilter::createZInet(std::vector<double> const& ZImin,
+                                           std::vector<int> const& isNetCell)
+{
+    // "To accommodate the removal of [very large buildings on highly
+    // differentiated terrain], we implemented a feature in the published SMRF
+    // algorithm which is helpful in removing such features. We accomplish this
+    // by introducing into the initial minimum surface a "net" of minimum values
+    // at a spacing equal to the maximum window diameter, where these minimum
+    // values are found by applying a morphological open operation with a disk
+    // shaped structuring element of radius (2*wkmax)."
+    std::vector<double> ZInetV = ZImin;
+    if (m_cut > 0.0)
+    {
+        int v = std::ceil(m_cut / m_cell);
+        std::vector<double> bigErode =
+            erodeDiamond(ZImin, m_rows, m_cols, 2 * v);
+        std::vector<double> bigOpen =
+            dilateDiamond(bigErode, m_rows, m_cols, 2 * v);
+        for (auto c = 0; c < m_cols; ++c)
+        {
+            for (auto r = 0; r < m_rows; ++r)
             {
-                double xj = Xn(id);
-                double yj = Yn(id);
-                double zj = Hn(id);
-                if (std::isnan(zj))
-                    continue;
-                numK++;
-                T(id) = zj;
-                P.row(id) << 1, xj, yj;
-                for (auto id2 = 0; id2 < Hn.size(); ++id2)
+                if (isNetCell[c * m_rows + r] == 1)
                 {
-                    if (id == id2)
-                        continue;
-                    double xk = Xn(id2);
-                    double yk = Yn(id2);
-                    double rsqr = (xj - xk) * (xj - xk) + (yj - yk) * (yj - yk);
-                    if (rsqr == 0.0)
-                        continue;
-                    K(id, id2) = rsqr * std::log10(std::sqrt(rsqr));
+                    ZInetV[c * m_rows + r] = bigOpen[c * m_rows + r];
                 }
             }
+        }
+    }
 
-            if (numK < 20)
-                continue;
-
-            MatrixXd A = MatrixXd::Zero(nsize+3, nsize+3);
-            A.block(0,0,nsize,nsize) = K;
-            A.block(0,nsize,nsize,3) = P;
-            A.block(nsize,0,3,nsize) = P.transpose();
+    if (!m_dir.empty())
+    {
+        std::string fname = FileUtils::toAbsolutePath("zinet.tif", m_dir);
+        MatrixXd ZInet = Map<MatrixXd>(ZInetV.data(), m_rows, m_cols);
+        writeMatrix(ZInet, fname, "GTiff", m_cell, m_bounds, m_srs);
+    }
 
-            VectorXd b = VectorXd::Zero(nsize+3);
-            b.head(nsize) = T;
+    return ZInetV;
+}
 
-            VectorXd x = A.fullPivHouseholderQr().solve(b);
+std::vector<double> SMRFilter::createZIpro(PointViewPtr view,
+                                           std::vector<double> const& ZImin,
+                                           std::vector<int> const& Low,
+                                           std::vector<int> const& isNetCell,
+                                           std::vector<int> const& Obj)
+{
+    // "The end result of the iteration process described above is a binary grid
+    // where each cell is classified as being either bare earth (BE) or object
+    // (OBJ). The algorithm then applies this mask to the starting minimum
+    // surface to eliminate nonground cells."
+    std::vector<double> ZIproV = ZImin;
+    for (size_t i = 0; i < Obj.size(); ++i)
+    {
+        if (Obj[i] == 1 || Low[i] == 1 || isNetCell[i] == 1)
+            ZIproV[i] = std::numeric_limits<double>::quiet_NaN();
+    }
 
-            Vector3d a = x.tail(3);
-            VectorXd w = x.head(nsize);
+    // "These cells are then inpainted according to the same process described
+    // previously, producing a provisional DEM (ZIpro)."
+    std::vector<double> ZIpro_fillV = knnfill(view, ZIproV);
 
-            double sum = 0.0;
-            double xi2 = cx(outer_row, outer_col);
-            double yi2 = cy(outer_row, outer_col);
-            for (auto j = 0; j < nsize; ++j)
-            {
-                double xj = Xn(j);
-                double yj = Yn(j);
-                double rsqr = (xj - xi2) * (xj - xi2) + (yj - yi2) * (yj - yi2);
-                if (rsqr == 0.0)
-                    continue;
-                sum += w(j) * rsqr * std::log10(std::sqrt(rsqr));
-            }
+    if (!m_dir.empty())
+    {
+        std::string fname = FileUtils::toAbsolutePath("zipro.tif", m_dir);
+        MatrixXd ZIpro = Map<MatrixXd>(ZIproV.data(), m_rows, m_cols);
+        writeMatrix(ZIpro, fname, "GTiff", m_cell, m_bounds, m_srs);
 
-            S(outer_row, outer_col) = a(0) + a(1)*xi2 + a(2)*yi2 + sum;
-
-            if (!std::isnan(S(outer_row, outer_col)))
-                num_nan_replace++;
-
-            // std::cerr << std::fixed;
-            // std::cerr << std::setprecision(3)
-            //           << std::left
-            //           << "S(" << outer_row << "," << outer_col << "): "
-            //           << std::setw(10)
-            //           << S(outer_row, outer_col)
-            //           // << std::setw(3)
-            //           // << "\tz: "
-            //           // << std::setw(10)
-            //           // << zi
-            //           // << std::setw(7)
-            //           // << "\tzdiff: "
-            //           // << std::setw(5)
-            //           // << zi - S(outer_row, outer_col)
-            //           // << std::setw(7)
-            //           // << "\txdiff: "
-            //           // << std::setw(5)
-            //           // << xi2 - xi
-            //           // << std::setw(7)
-            //           // << "\tydiff: "
-            //           // << std::setw(5)
-            //           // << yi2 - yi
-            //           << std::setw(7)
-            //           << "\t# pts: "
-            //           << std::setw(3)
-            //           << nsize
-            //           << std::setw(5)
-            //           << "\tsum: "
-            //           << std::setw(10)
-            //           << sum
-            //           << std::setw(9)
-            //           << "\tw.sum(): "
-            //           << std::setw(5)
-            //           << w.sum()
-            //           << std::setw(6)
-            //           << "\txsum: "
-            //           << std::setw(5)
-            //           << w.dot(P.col(1))
-            //           << std::setw(6)
-            //           << "\tysum: "
-            //           << std::setw(5)
-            //           << w.dot(P.col(2))
-            //           << std::setw(3)
-            //           << "\ta: "
-            //           << std::setw(8)
-            //           << a.transpose()
-            //           << std::endl;
-        }
+        fname = FileUtils::toAbsolutePath("zipro_fill.tif", m_dir);
+        MatrixXd ZIpro_fill = Map<MatrixXd>(ZIpro_fillV.data(), m_rows, m_cols);
+        writeMatrix(ZIpro_fill, fname, "GTiff", m_cell, m_bounds, m_srs);
     }
 
-    double frac = static_cast<double>(num_nan_replace);
-    frac /= static_cast<double>(num_nan_detect);
-    log()->get(LogLevel::Info) << "TPS: Filled " << num_nan_replace << " of "
-                               << num_nan_detect << " holes ("
-                               << frac * 100.0 << "%)\n";
-
-    return S;
+    return ZIpro_fillV;
 }
 
-MatrixXd SMRFilter::expandingTPS(MatrixXd cx, MatrixXd cy, MatrixXd cz)
+// Fill voids with the average of eight nearest neighbors.
+std::vector<double> SMRFilter::knnfill(PointViewPtr view,
+                                       std::vector<double> const& cz)
 {
-    log()->get(LogLevel::Info) << "TPS: Reticulating splines...\n";
+    // Create a temporary PointView that encodes our raster values so that we
+    // can construct a 2D KDIndex and perform nearest neighbor searches.
+    PointViewPtr temp = view->makeNew();
+    PointId i(0);
+    for (int c = 0; c < m_cols; ++c)
+    {
+        for (int r = 0; r < m_rows; ++r)
+        {
+            if (std::isnan(cz[c * m_rows + r]))
+                continue;
 
-    MatrixXd S = cz;
+            temp->setField(Id::X, i, m_bounds.minx + (c + 0.5) * m_cell);
+            temp->setField(Id::Y, i, m_bounds.miny + (r + 0.5) * m_cell);
+            temp->setField(Id::Z, i, cz[c * m_rows + r]);
+            i++;
+        }
+    }
 
-    int num_nan_detect(0);
-    int num_nan_replace(0);
+    KD2Index kdi(*temp);
+    kdi.build();
 
-    for (auto outer_col = 0; outer_col < m_numCols; ++outer_col)
+    // Where the raster has voids (i.e., NaN), we search for that cell's eight
+    // nearest neighbors, and fill the void with the average value of the
+    // neighbors.
+    std::vector<double> out = cz;
+    for (int c = 0; c < m_cols; ++c)
     {
-        for (auto outer_row = 0; outer_row < m_numRows; ++outer_row)
+        for (int r = 0; r < m_rows; ++r)
         {
-            if (!std::isnan(S(outer_row, outer_col)))
+            if (!std::isnan(out[c * m_rows + r]))
                 continue;
 
-            num_nan_detect++;
+            double x = m_bounds.minx + (c + 0.5) * m_cell;
+            double y = m_bounds.miny + (r + 0.5) * m_cell;
+            int k = 8;
+            std::vector<PointId> neighbors(k);
+            std::vector<double> sqr_dists(k);
+            kdi.knnSearch(x, y, k, &neighbors, &sqr_dists);
 
-            // Further optimizations are achieved by estimating only the
-            // interpolated surface within a local neighbourhood (e.g. a 7 x 7
-            // neighbourhood is used in our case) of the cell being filtered.
-            int radius = 3;
-            bool solution = false;
-
-            while (!solution)
+            double M1(0.0);
+            size_t j(0);
+            for (auto const& n : neighbors)
             {
-                // std::cerr << radius;
-                int cs = Utils::clamp(outer_col-radius, 0, m_numCols-1);
-                int ce = Utils::clamp(outer_col+radius, 0, m_numCols-1);
-                int col_size = ce - cs + 1;
-                int rs = Utils::clamp(outer_row-radius, 0, m_numRows-1);
-                int re = Utils::clamp(outer_row+radius, 0, m_numRows-1);
-                int row_size = re - rs + 1;
-
-                MatrixXd Xn = cx.block(rs, cs, row_size, col_size);
-                MatrixXd Yn = cy.block(rs, cs, row_size, col_size);
-                MatrixXd Hn = cz.block(rs, cs, row_size, col_size);
-
-                int nsize = Hn.size();
-                VectorXd T = VectorXd::Zero(nsize);
-                MatrixXd P = MatrixXd::Zero(nsize, 3);
-                MatrixXd K = MatrixXd::Zero(nsize, nsize);
-
-                int numK(0);
-                for (auto id = 0; id < Hn.size(); ++id)
-                {
-                    double xj = Xn(id);
-                    double yj = Yn(id);
-                    double zj = Hn(id);
-                    if (std::isnan(zj))
-                        continue;
-                    numK++;
-                    T(id) = zj;
-                    P.row(id) << 1, xj, yj;
-                    for (auto id2 = 0; id2 < Hn.size(); ++id2)
-                    {
-                        if (id == id2)
-                            continue;
-                        double xk = Xn(id2);
-                        double yk = Yn(id2);
-                        double rsqr = (xj - xk) * (xj - xk) + (yj - yk) * (yj - yk);
-                        if (rsqr == 0.0)
-                            continue;
-                        K(id, id2) = rsqr * std::log10(std::sqrt(rsqr));
-                    }
-                }
-
-                // if (numK < 20)
-                //     continue;
-
-                MatrixXd A = MatrixXd::Zero(nsize+3, nsize+3);
-                A.block(0,0,nsize,nsize) = K;
-                A.block(0,nsize,nsize,3) = P;
-                A.block(nsize,0,3,nsize) = P.transpose();
-
-                VectorXd b = VectorXd::Zero(nsize+3);
-                b.head(nsize) = T;
-
-                VectorXd x = A.fullPivHouseholderQr().solve(b);
-
-                Vector3d a = x.tail(3);
-                VectorXd w = x.head(nsize);
-
-                double sum = 0.0;
-                double xi2 = cx(outer_row, outer_col);
-                double yi2 = cy(outer_row, outer_col);
-                for (auto j = 0; j < nsize; ++j)
-                {
-                    double xj = Xn(j);
-                    double yj = Yn(j);
-                    double rsqr = (xj - xi2) * (xj - xi2) + (yj - yi2) * (yj - yi2);
-                    if (rsqr == 0.0)
-                        continue;
-                    sum += w(j) * rsqr * std::log10(std::sqrt(rsqr));
-                }
-
-                double val = a(0) + a(1)*xi2 + a(2)*yi2 + sum;
-                solution = !std::isnan(val);
-
-                if (!solution)
-                {
-                    std::cerr << "..." << radius << std::endl;;
-                    ++radius;
-                    continue;
-                }
-
-                S(outer_row, outer_col) = val;
-                num_nan_replace++;
-
-                // std::cerr << std::endl;
-
-                // std::cerr << std::fixed;
-                // std::cerr << std::setprecision(3)
-                //           << std::left
-                //           << "S(" << outer_row << "," << outer_col << "): "
-                //           << std::setw(10)
-                //           << S(outer_row, outer_col)
-                //           // << std::setw(3)
-                //           // << "\tz: "
-                //           // << std::setw(10)
-                //           // << zi
-                //           // << std::setw(7)
-                //           // << "\tzdiff: "
-                //           // << std::setw(5)
-                //           // << zi - S(outer_row, outer_col)
-                //           // << std::setw(7)
-                //           // << "\txdiff: "
-                //           // << std::setw(5)
-                //           // << xi2 - xi
-                //           // << std::setw(7)
-                //           // << "\tydiff: "
-                //           // << std::setw(5)
-                //           // << yi2 - yi
-                //           << std::setw(7)
-                //           << "\t# pts: "
-                //           << std::setw(3)
-                //           << nsize
-                //           << std::setw(5)
-                //           << "\tsum: "
-                //           << std::setw(10)
-                //           << sum
-                //           << std::setw(9)
-                //           << "\tw.sum(): "
-                //           << std::setw(5)
-                //           << w.sum()
-                //           << std::setw(6)
-                //           << "\txsum: "
-                //           << std::setw(5)
-                //           << w.dot(P.col(1))
-                //           << std::setw(6)
-                //           << "\tysum: "
-                //           << std::setw(5)
-                //           << w.dot(P.col(2))
-                //           << std::setw(3)
-                //           << "\ta: "
-                //           << std::setw(8)
-                //           << a.transpose()
-                //           << std::endl;
+                j++;
+                double delta = temp->getFieldAs<double>(Id::Z, n) - M1;
+                M1 += (delta / j);
             }
+
+            out[c * m_rows + r] = M1;
         }
     }
 
-    double frac = static_cast<double>(num_nan_replace);
-    frac /= static_cast<double>(num_nan_detect);
-    log()->get(LogLevel::Info) << "TPS: Filled " << num_nan_replace << " of "
-                               << num_nan_detect << " holes ("
-                               << frac * 100.0 << "%)\n";
+    return out;
+};
 
-    return S;
+// Iteratively open the estimated surface. progressiveFilter can be used to
+// identify both low points and object (i.e., non-ground) points, depending on
+// the inputs.
+std::vector<int> SMRFilter::progressiveFilter(std::vector<double> const& ZImin,
+                                              double slope, double max_window)
+{
+    // "The maximum window radius is supplied as a distance metric (e.g., 21 m),
+    // but is internally converted to a pixel equivalent by dividing it by the
+    // cell size and rounding the result toward positive infinity (i.e., taking
+    // the ceiling value)."
+    int max_radius = std::ceil(max_window / m_cell);
+    std::vector<double> prevSurface = ZImin;
+    std::vector<double> prevErosion = ZImin;
+
+    // "...the radius of the element at each step [is] increased by one pixel
+    // from a starting value of one pixel to the pixel equivalent of the maximum
+    // value."
+    std::vector<int> Obj(m_rows * m_cols, 0);
+    for (int radius = 1; radius <= max_radius; ++radius)
+    {
+        // "On the first iteration, the minimum surface (ZImin) is opened using
+        // a disk-shaped structuring element with a radius of one pixel."
+        std::vector<double> curErosion =
+            erodeDiamond(prevErosion, m_rows, m_cols, 1);
+        std::vector<double> curOpening =
+            dilateDiamond(curErosion, m_rows, m_cols, radius);
+        prevErosion = curErosion;
+
+        // "An elevation threshold is then calculated, where the value is equal
+        // to the supplied slope tolerance parameter multiplied by the product
+        // of the window radius and the cell size."
+        double threshold = slope * m_cell * radius;
+
+        // "This elevation threshold is applied to the difference of the minimum
+        // and the opened surfaces."
+
+        // Need to provide means of diffing two vectors.
+        std::vector<double> diff;
+        std::transform(prevSurface.begin(), prevSurface.end(),
+                       curOpening.begin(), std::back_inserter(diff),
+                       [&](double l, double r) { return std::fabs(l - r); });
+
+        // "Any grid cell with a difference value exceeding the calculated
+        // elevation threshold for the iteration is then flagged as an OBJ
+        // cell."
+        std::vector<int> foo;
+        std::transform(diff.begin(), diff.end(), std::back_inserter(foo),
+                       [threshold](double x) {
+                           return (x > threshold) ? int(1) : int(0);
+                       });
+        std::transform(Obj.begin(), Obj.end(), foo.begin(), Obj.begin(),
+                       [](int a, int b) { return std::max(a, b); });
+
+        // "The algorithm then proceeds to the next window radius (up to the
+        // maximum), and proceeds as above with the last opened surface acting
+        // as the minimum surface for the next difference calculation."
+        prevSurface = curOpening;
+
+        size_t ng = std::count(Obj.begin(), Obj.end(), 1);
+        size_t g(Obj.size() - ng);
+        double p(100.0 * double(ng) / double(Obj.size()));
+        log()->floatPrecision(2);
+        log()->get(LogLevel::Debug) << "progressiveFilter: radius = " << radius
+                                    << "\t" << g << " ground"
+                                    << "\t" << ng << " non-ground"
+                                    << "\t(" << p << "%)\n";
+    }
+
+    return Obj;
 }
 
 } // namespace pdal
diff --git a/filters/SMRFilter.hpp b/filters/SMRFilter.hpp
index 23d27b6..68beda9 100644
--- a/filters/SMRFilter.hpp
+++ b/filters/SMRFilter.hpp
@@ -1,46 +1,45 @@
 /******************************************************************************
-* Copyright (c) 2016, Bradley J Chambers (brad.chambers at gmail.com)
-*
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following
-* conditions are met:
-*
-*     * Redistributions of source code must retain the above copyright
-*       notice, this list of conditions and the following disclaimer.
-*     * Redistributions in binary form must reproduce the above copyright
-*       notice, this list of conditions and the following disclaimer in
-*       the documentation and/or other materials provided
-*       with the distribution.
-*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
-*       names of its contributors may be used to endorse or promote
-*       products derived from this software without specific prior
-*       written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
-* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
-* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
-* OF SUCH DAMAGE.
-****************************************************************************/
+ * Copyright (c) 2016-2017, Bradley J Chambers (brad.chambers at gmail.com)
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
+ *       names of its contributors may be used to endorse or promote
+ *       products derived from this software without specific prior
+ *       written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ ****************************************************************************/
 
 #pragma once
 
 #include <pdal/Filter.hpp>
 #include <pdal/plugin.hpp>
 
-#include <Eigen/Dense>
+#include "private/DimRange.hpp"
 
-#include <memory>
-#include <unordered_map>
+#include <string>
 
 extern "C" int32_t SMRFilter_ExitFunc();
 extern "C" PF_ExitFunc SMRFilter_InitPlugin();
@@ -48,56 +47,55 @@ extern "C" PF_ExitFunc SMRFilter_InitPlugin();
 namespace pdal
 {
 
-using namespace Eigen;
-
-class PointLayout;
-class PointView;
-
 class PDAL_DLL SMRFilter : public Filter
 {
 public:
     SMRFilter() : Filter()
-    {}
+    {
+    }
 
-    static void * create();
-    static int32_t destroy(void *);
+    static void* create();
+    static int32_t destroy(void*);
     std::string getName() const;
 
 private:
-    bool m_classify;
-    bool m_extract;
-    int m_numRows;
-    int m_numCols;
-    double m_cellSize;
-    double m_cutNet;
-    double m_percentSlope;
-    double m_maxWindow;
+    int m_rows;
+    int m_cols;
+    double m_cell;
+    double m_cut;
+    double m_slope;
+    double m_window;
     double m_scalar;
     double m_threshold;
-    std::string m_outDir;
+    std::string m_dir;
+    DimRange m_ignored;
+    bool m_lastOnly;
+    BOX2D m_bounds;
+    SpatialReference m_srs;
 
     virtual void addArgs(ProgramArgs& args);
     virtual void addDimensions(PointLayoutPtr layout);
+    virtual void prepared(PointTableRef table);
     virtual void ready(PointTableRef table);
-
-    MatrixXd inpaintKnn(MatrixXd cx, MatrixXd cy, MatrixXd cz);
-
-    // processGround implements the SMRF algorithm, returning a vector
-    // of ground indices.
-    std::vector<PointId> processGround(PointViewPtr view);
-
-    // progressiveFilter is the core of the SMRF algorithm.
-    MatrixXi progressiveFilter(MatrixXd const& ZImin, double cell_size,
-                               double slope, double max_window);
-
     virtual PointViewSet run(PointViewPtr view);
 
-    // TPS returns an interpolated matrix using thin plate splines.
-    MatrixXd TPS(MatrixXd cx, MatrixXd cy, MatrixXd cz);
-    MatrixXd expandingTPS(MatrixXd cx, MatrixXd cy, MatrixXd cz);
+    void classifyGround(PointViewPtr, std::vector<double>&);
+    std::vector<int> createLowMask(std::vector<double> const&);
+    std::vector<int> createNetMask();
+    std::vector<int> createObjMask(std::vector<double> const&);
+    std::vector<double> createZImin(PointViewPtr view);
+    std::vector<double> createZInet(std::vector<double> const&,
+                                    std::vector<int> const&);
+    std::vector<double> createZIpro(PointViewPtr, std::vector<double> const&,
+                                    std::vector<int> const&,
+                                    std::vector<int> const&,
+                                    std::vector<int> const&);
+    std::vector<double> knnfill(PointViewPtr, std::vector<double> const&);
+    std::vector<int> progressiveFilter(std::vector<double> const&, double,
+                                       double);
 
     SMRFilter& operator=(const SMRFilter&); // not implemented
-    SMRFilter(const SMRFilter&); // not implemented
+    SMRFilter(const SMRFilter&);            // not implemented
 };
 
 } // namespace pdal
diff --git a/filters/SampleFilter.cpp b/filters/SampleFilter.cpp
index e6a5a47..d1f6beb 100644
--- a/filters/SampleFilter.cpp
+++ b/filters/SampleFilter.cpp
@@ -95,7 +95,7 @@ PointViewSet SampleFilter::run(PointViewPtr inView)
     // neighbors within the user-specified radius, their value is changed to 0.
     std::vector<int> keep(np, 1);
 
-    // We are able to subsample in a single pass over the shufflled indices.
+    // We are able to subsample in a single pass over the shuffled indices.
     for (auto const& i : indices)
     {
         // If a point is masked, it is forever masked, and cannot be part of the
diff --git a/filters/SortFilter.cpp b/filters/SortFilter.cpp
index 95408b7..2f3e642 100644
--- a/filters/SortFilter.cpp
+++ b/filters/SortFilter.cpp
@@ -47,5 +47,57 @@ CREATE_STATIC_PLUGIN(1, 0, SortFilter, Filter, s_info)
 
 std::string SortFilter::getName() const { return s_info.name; }
 
+void SortFilter::addArgs(ProgramArgs& args)
+{
+    args.add("dimension", "Dimension on which to sort", m_dimName).
+        setPositional();
+    args.add("order", "Sort order ASC(ending) or DESC(ending)", m_order, SortOrder::ASC);
+}
+
+void SortFilter::prepared(PointTableRef table)
+{
+    m_dim = table.layout()->findDim(m_dimName);
+    if (m_dim == Dimension::Id::Unknown)
+        throwError("Dimension '" + m_dimName + "' not found.");
+}
+
+void SortFilter::filter(PointView& view)
+{
+    auto cmp = [this](const PointIdxRef& p1, const PointIdxRef& p2)
+    {
+        bool result = p1.compare(m_dim, p2);
+        return (m_order == SortOrder::ASC) ? result : !result;
+    };
+
+    std::stable_sort(view.begin(), view.end(), cmp);
+}
+
+std::istream& operator >> (std::istream& in, SortOrder& order)
+{
+    std::string s;
+
+    in >> s;
+    s = Utils::toupper(s);
+    if (s == "ASC")
+        order = SortOrder::ASC;
+    else if (s == "DESC")
+        order = SortOrder::DESC;
+    else
+        in.setstate(std::ios::failbit);
+    return in;
+}
+
+std::ostream& operator<<(std::ostream& out, const SortOrder& order)
+{
+    switch (order)
+    {
+    case SortOrder::ASC:
+        out << "ASC";
+    case SortOrder::DESC:
+        out << "DESC";
+    }
+    return out;
+}
+
 } // namespace pdal
 
diff --git a/filters/SortFilter.hpp b/filters/SortFilter.hpp
index ad3649c..bb237bb 100644
--- a/filters/SortFilter.hpp
+++ b/filters/SortFilter.hpp
@@ -45,6 +45,16 @@ extern "C" PF_ExitFunc SortFilter_InitPlugin();
 namespace pdal
 {
 
+enum class SortOrder
+{
+    ASC, // ascending
+    DESC // descending
+};
+
+std::istream& operator >> (std::istream& in, SortOrder& order);
+std::ostream& operator << (std::ostream& in, const SortOrder& order);
+
+
 class PDAL_DLL SortFilter : public Filter
 {
 public:
@@ -61,31 +71,12 @@ private:
     // Dimension name.
     std::string m_dimName;
 
-    virtual void addArgs(ProgramArgs& args)
-    {
-        args.add("dimension", "Dimension on which to sort", m_dimName).
-            setPositional();
-    }
-
-    virtual void prepared(PointTableRef table)
-    {
-        m_dim = table.layout()->findDim(m_dimName);
-        if (m_dim == Dimension::Id::Unknown)
-        {
-            std::ostringstream oss;
-            oss << getName() << ": Invalid sort dimension '" << m_dimName <<
-                "'.";
-            throw oss.str();
-        }
-    }
-
-    virtual void filter(PointView& view)
-    {
-        auto cmp = [this](const PointIdxRef& p1, const PointIdxRef& p2)
-            { return p1.compare(m_dim, p2); };
+    // Sort order.
+    SortOrder m_order;
 
-        std::sort(view.begin(), view.end(), cmp);
-    }
+    virtual void addArgs(ProgramArgs& args);
+    virtual void prepared(PointTableRef table);
+    virtual void filter(PointView& view);
 
     SortFilter& operator=(const SortFilter&) = delete;
     SortFilter(const SortFilter&) = delete;
diff --git a/filters/StatsFilter.cpp b/filters/StatsFilter.cpp
index 67839c4..4ddf740 100644
--- a/filters/StatsFilter.cpp
+++ b/filters/StatsFilter.cpp
@@ -261,8 +261,9 @@ void StatsFilter::extractMetadata(PointTableRef table)
         zs != m_stats.end() &&
         bNoPoints)
     {
-        BOX3D box(xs->second.minimum(), ys->second.minimum(), zs->second.minimum(),
-                  xs->second.maximum(), ys->second.maximum(), zs->second.maximum());
+        BOX3D box(xs->second.minimum(), ys->second.minimum(),
+            zs->second.minimum(), xs->second.maximum(), ys->second.maximum(),
+            zs->second.maximum());
         pdal::Polygon p(box);
 
         MetadataNode mbox = Utils::toMetadata(box);
@@ -273,7 +274,8 @@ void StatsFilter::extractMetadata(PointTableRef table)
         Json::Value json;
         jsonReader.parse(p.json(), json);
 
-        MetadataNode boundary = metadata.addWithType("boundary", json.toStyledString(), "json", "GeoJSON boundary");
+        MetadataNode boundary = metadata.addWithType("boundary",
+            json.toStyledString(), "json", "GeoJSON boundary");
         MetadataNode bbox = metadata.add(mbox);
         SpatialReference ref = table.anySpatialReference();
         // if we don't get an SRS from the PointTableRef,
@@ -292,9 +294,8 @@ void StatsFilter::extractMetadata(PointTableRef table)
             Json::Value json;
             jsonReader.parse(pdd.json(), json);
 
-            MetadataNode ddboundary = dddbox.addWithType("boundary", json.toStyledString(), "json", "GeoJSON boundary");
-
-
+            MetadataNode ddboundary = dddbox.addWithType("boundary",
+                json.toStyledString(), "json", "GeoJSON boundary");
         }
     }
 }
@@ -308,7 +309,7 @@ const Summary& StatsFilter::getStats(Dimension::Id dim) const
         if (d == dim)
             return di->second;
     }
-    throw pdal_error("Dimension not found");
+    throw pdal_error("filters.stats: Dimension not found.");
 }
 
 } // namespace pdal
diff --git a/filters/TransformationFilter.cpp b/filters/TransformationFilter.cpp
index eb135ce..3269a5b 100644
--- a/filters/TransformationFilter.cpp
+++ b/filters/TransformationFilter.cpp
@@ -64,7 +64,7 @@ TransformationMatrix transformationMatrixFromString(const std::string& s)
             std::stringstream msg;
             msg << "Too many entries in transformation matrix, should be "
                 << matrix.size();
-            throw pdal_error(msg.str());
+            throw pdal_error("filters.transformation: " + msg.str());
         }
         matrix[i++] = entry;
     }
@@ -78,7 +78,7 @@ TransformationMatrix transformationMatrixFromString(const std::string& s)
             << matrix.size()
             << ")";
 
-        throw pdal_error(msg.str());
+        throw pdal_error("filters.transformation: " + msg.str());
     }
 
     return matrix;
diff --git a/filters/private/DimRange.cpp b/filters/private/DimRange.cpp
new file mode 100644
index 0000000..83fddd0
--- /dev/null
+++ b/filters/private/DimRange.cpp
@@ -0,0 +1,168 @@
+/******************************************************************************
+ * Copyright (c) 2015, Brad Chambers (brad.chambers at gmail.com)
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
+ *       names of its contributors may be used to endorse or promote
+ *       products derived from this software without specific prior
+ *       written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ ****************************************************************************/
+
+#include "DimRange.hpp"
+
+#include <pdal/util/Utils.hpp>
+
+namespace pdal
+{
+
+std::string::size_type DimRange::subParse(const std::string& r)
+{
+    bool& ilb(m_inclusive_lower_bound);
+    bool& iub(m_inclusive_upper_bound);
+    bool& negate(m_negate);
+    double& ub(m_upper_bound);
+    double& lb(m_lower_bound);
+    std::string& name(m_name);
+
+    std::string::size_type pos, count;
+    const char *start;
+    char *end;
+
+    ilb = true;
+    iub = true;
+    negate = false;
+    pos = 0;
+    // Skip leading whitespace.
+    count = Utils::extract(r, pos, (int(*)(int))std::isspace);
+    pos += count;
+
+    count = Utils::extract(r, pos, (int(*)(int))std::isalpha);
+    if (count == 0)
+        throw error("No dimension name.");
+    name = r.substr(pos, count);
+    pos += count;
+
+    if (r[pos] == '!')
+    {
+        negate = true;
+        pos++;
+    }
+
+    if (r[pos] == '(')
+        ilb = false;
+    else if (r[pos] != '[')
+        throw error("Missing '(' or '['.");
+    pos++;
+
+    // Extract lower bound.
+    start = r.data() + pos;
+    lb = std::strtod(start, &end);
+    if (start == end)
+        lb = std::numeric_limits<double>::lowest();
+    pos += (end - start);
+
+    count = Utils::extract(r, pos, (int(*)(int))std::isspace);
+    pos += count;
+
+    if (r[pos] != ':')
+        throw error("Missing ':' limit separator.");
+    pos++;
+
+    start = r.data() + pos;
+    ub = std::strtod(start, &end);
+    if (start == end)
+        ub = std::numeric_limits<double>::max();
+    pos += (end - start);
+
+    count = Utils::extract(r, pos, (int(*)(int))std::isspace);
+    pos += count;
+
+    if (r[pos] == ')')
+        iub = false;
+    else if (r[pos] != ']')
+        throw error("Missing ')' or ']'.");
+    pos++;
+
+    count = Utils::extract(r, pos, (int(*)(int))std::isspace);
+    pos += count;
+    return pos;
+}
+
+
+bool DimRange::valuePasses(double v) const
+{
+    // Determine if a point passes a range.
+    bool fail = ((m_inclusive_lower_bound && v < m_lower_bound) ||
+        (!m_inclusive_lower_bound && v <= m_lower_bound) ||
+        (m_inclusive_upper_bound && v > m_upper_bound) ||
+        (!m_inclusive_upper_bound && v >= m_upper_bound));
+    if (m_negate)
+        fail = !fail;
+    return !fail;
+}
+
+
+void DimRange::parse(const std::string& r)
+{
+    std::string::size_type pos = subParse(r);
+    if (pos != r.size())
+        throw error("Invalid characters following valid range.");
+}
+
+
+bool operator < (const DimRange& r1, const DimRange& r2)
+{
+    return (r1.m_name < r2.m_name ? true :
+        r1.m_name > r2.m_name ? false :
+        &r1 < &r2);
+}
+
+
+std::istream& operator>>(std::istream& in, DimRange& r)
+{
+    std::string s;
+
+    std::getline(in, s);
+    r.parse(s);
+    return in;
+}
+
+
+std::ostream& operator<<(std::ostream& out, const DimRange& r)
+{
+    out << (r.m_inclusive_lower_bound ? '[' : '(');
+    if (r.m_lower_bound != std::numeric_limits<double>::lowest())
+        out << r.m_lower_bound;
+    out << ':';
+    if (r.m_upper_bound != std::numeric_limits<double>::max())
+        out << r.m_upper_bound;
+    out << (r.m_inclusive_upper_bound ? ']' : ')');
+    return out;
+}
+
+} // namespace pdal
+
diff --git a/filters/OutlierFilter.hpp b/filters/private/DimRange.hpp
similarity index 57%
copy from filters/OutlierFilter.hpp
copy to filters/private/DimRange.hpp
index 7f42a7f..ca67a67 100644
--- a/filters/OutlierFilter.hpp
+++ b/filters/private/DimRange.hpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * Copyright (c) 2016, Bradley J Chambers (brad.chambers at gmail.com)
+ * Copyright (c) 2015, Bradley J Chambers (brad.chambers at gmail.com)
  *
  * All rights reserved.
  *
@@ -34,54 +34,57 @@
 
 #pragma once
 
-#include <pdal/Filter.hpp>
-#include <pdal/plugin.hpp>
-
-#include <memory>
-#include <map>
 #include <string>
 
-extern "C" int32_t OutlierFilter_ExitFunc();
-extern "C" PF_ExitFunc OutlierFilter_InitPlugin();
+#include <pdal/Dimension.hpp>
 
 namespace pdal
 {
 
-class Options;
-
-struct Indices
+struct DimRange
 {
-    std::vector<PointId> inliers;
-    std::vector<PointId> outliers;
-};
+    struct error : public std::runtime_error
+    {
+        error(const std::string& err) : std::runtime_error(err)
+        {}
+    };
 
-class PDAL_DLL OutlierFilter : public pdal::Filter
-{
-public:
-    OutlierFilter() : Filter()
+    DimRange(const std::string name,
+        double lower_bound,
+        double upper_bound,
+        bool inclusive_lower_bound,
+        bool inclusive_upper_bound,
+        bool negate) :
+    m_name(name), m_id(Dimension::Id::Unknown),
+    m_lower_bound(lower_bound), m_upper_bound(upper_bound),
+    m_inclusive_lower_bound(inclusive_lower_bound),
+    m_inclusive_upper_bound(inclusive_upper_bound),
+    m_negate(negate)
     {}
 
-    static void * create();
-    static int32_t destroy(void *);
-    std::string getName() const;
+    DimRange() : m_id(Dimension::Id::Unknown), m_lower_bound(0),
+        m_upper_bound(0), m_inclusive_lower_bound(true),
+        m_inclusive_upper_bound(true), m_negate(false)
+    {}
 
-private:
-    std::string m_method;
-    int m_minK;
-    double m_radius;
-    int m_meanK;
-    double m_multiplier;
-    bool m_classify;
-    bool m_extract;
+    void parse(const std::string& s);
+    bool valuePasses(double d) const;
 
-    virtual void addDimensions(PointLayoutPtr layout);
-    virtual void addArgs(ProgramArgs& args);
-    Indices processRadius(PointViewPtr inView);
-    Indices processStatistical(PointViewPtr inView);
-    virtual PointViewSet run(PointViewPtr view);
+    std::string m_name;
+    Dimension::Id m_id;
+    double m_lower_bound;
+    double m_upper_bound;
+    bool m_inclusive_lower_bound;
+    bool m_inclusive_upper_bound;
+    bool m_negate;
 
-    OutlierFilter& operator=(const OutlierFilter&); // not implemented
-    OutlierFilter(const OutlierFilter&); // not implemented
+protected:
+    std::string::size_type subParse(const std::string& r);
 };
 
+bool operator < (const DimRange& r1, const DimRange& r2);
+std::istream& operator>>(std::istream& in, DimRange& r);
+std::ostream& operator<<(std::ostream& out, const DimRange& r);
+
+
 } // namespace pdal
diff --git a/filters/private/crop/Point.cpp b/filters/private/crop/Point.cpp
index d1f476e..072f5e9 100644
--- a/filters/private/crop/Point.cpp
+++ b/filters/private/crop/Point.cpp
@@ -53,20 +53,17 @@ Point::Point()
     , x(LOWEST)
     , y(LOWEST)
     , z(LOWEST)
-{
+{}
 
-};
 
 Point::Point(const std::string& wkt_or_json, SpatialReference ref)
-    : Geometry(wkt_or_json, ref)
-{
+    : Geometry(wkt_or_json, ref), x(LOWEST), y(LOWEST), z(LOWEST)
+{}
 
-}
 
-void Point::update(const std::string& wkt_or_json, SpatialReference ref)
+void Point::update(const std::string& wkt_or_json)
 {
-
-    Geometry::update(wkt_or_json, ref);
+    Geometry::update(wkt_or_json);
 
     int t = GEOSGeomTypeId_r(m_geoserr.ctx(), m_geom.get());
     if (t == -1)
@@ -97,7 +94,6 @@ void Point::update(const std::string& wkt_or_json, SpatialReference ref)
         if (numInputDims > 2)
             GEOSCoordSeq_getOrdinate_r(m_geoserr.ctx(), coords, i, 2, &z);
     }
-
 }
 
 
@@ -115,7 +111,7 @@ bool Point::empty() const
 
 bool Point::is3d() const
 {
-    return (z != LOWEST );
+    return (z != LOWEST);
 }
 
 } //namespace cropfilter
diff --git a/filters/private/crop/Point.hpp b/filters/private/crop/Point.hpp
index 6115a42..792fa3e 100644
--- a/filters/private/crop/Point.hpp
+++ b/filters/private/crop/Point.hpp
@@ -39,14 +39,12 @@
 namespace pdal
 {
 
-
 namespace cropfilter
 {
 
 class PDAL_DLL Point : public Geometry
 {
 public:
-
     Point();
     Point(const std::string& wkt_or_json,
            SpatialReference ref);
@@ -54,14 +52,11 @@ public:
     bool empty() const;
     void clear();
 
-
-    virtual void update(const std::string& wkt_or_json,
-        SpatialReference ref = SpatialReference());
+    virtual void update(const std::string& wkt_or_json);
 
     double x;
     double y;
     double z;
-
 };
 } // namespace cropfilter
 } // namespace pdal
diff --git a/io/BpfCompressor.cpp b/io/BpfCompressor.cpp
index df2eadd..166fa75 100644
--- a/io/BpfCompressor.cpp
+++ b/io/BpfCompressor.cpp
@@ -46,7 +46,7 @@ void BpfCompressor::startBlock()
     m_strm.zfree = Z_NULL;
     m_strm.opaque = Z_NULL;
     if (deflateInit(&m_strm, Z_DEFAULT_COMPRESSION) != Z_OK)
-        throw pdal_error("Could not initialize BPF compressor.");
+        throw error("Could not initialize BPF compressor.");
 
     m_rawSize = 0;
     m_compressedSize = 0;
@@ -115,7 +115,7 @@ void BpfCompressor::finish()
         m_strm.next_out = m_tmpbuf;
     }
     if (ret != Z_STREAM_END)
-        throw pdal_error("Couldn't close BPF compression stream.");
+        throw error("Couldn't close BPF compression stream.");
     deflateEnd(&m_strm);
 
     // Mark our position so that we can get back here.
@@ -128,5 +128,5 @@ void BpfCompressor::finish()
     // Set the position back to the end of the block.
     blockEnd.rewind();
 }
-   
+
 } // namespace pdal
diff --git a/io/BpfCompressor.hpp b/io/BpfCompressor.hpp
index 4852293..13dfc99 100644
--- a/io/BpfCompressor.hpp
+++ b/io/BpfCompressor.hpp
@@ -34,6 +34,7 @@
 
 #pragma once
 
+#include <stdexcept>
 #include <ostream>
 #include <zlib.h>
 
@@ -46,6 +47,12 @@ namespace pdal
 class BpfCompressor
 {
 public:
+    struct error : public std::runtime_error
+    {
+        error(const std::string& err) : std::runtime_error(err)
+        {}
+    };
+
     BpfCompressor(OLeStream& out, size_t maxSize) :
         m_out(out), m_inbuf(maxSize), m_blockStart(out), m_rawSize(0),
         m_compressedSize(0)
@@ -53,7 +60,7 @@ public:
     void startBlock();
     void finish();
     void compress();
-   
+
 private:
     static const int CHUNKSIZE = 1000000;
 
diff --git a/io/BpfHeader.cpp b/io/BpfHeader.cpp
index 8094eaf..2654e10 100644
--- a/io/BpfHeader.cpp
+++ b/io/BpfHeader.cpp
@@ -141,7 +141,7 @@ bool BpfHeader::readV3(ILeStream& stream)
         m_pointFormat = BpfFormat::ByteMajor;
         break;
     default:
-        throw "Invalid BPF file: unknown interleave type.";
+        throw error("Invalid BPF file: unknown interleave type.");
     }
     return (bool)stream;
 }
@@ -192,7 +192,7 @@ bool BpfHeader::write(OLeStream& stream)
     uint8_t numDim;
 
     if (!Utils::numericCast(m_numDim, numDim))
-        throw pdal_error("Can't write a BPF file of more than 255 dimensions.");
+        throw error("Can't write a BPF file of more than 255 dimensions.");
 
     stream.put("BPF!");
     stream.put("0003");
@@ -246,8 +246,7 @@ bool BpfHeader::readDimensions(ILeStream& stream, BpfDimensionList& dims)
             z = true;
     }
     if (!x || !y || !z)
-        throw pdal_error("BPF file missing at least one of X, Y or Z "
-            "dimensions.");
+        throw error("BPF file missing at least one of X, Y or Z dimensions.");
     return true;
 }
 
diff --git a/io/BpfHeader.hpp b/io/BpfHeader.hpp
index 6c9c289..a4a828f 100644
--- a/io/BpfHeader.hpp
+++ b/io/BpfHeader.hpp
@@ -139,6 +139,12 @@ typedef std::vector<BpfDimension> BpfDimensionList;
 
 struct BpfHeader
 {
+    struct error : std::runtime_error
+    {
+        error(const std::string& err) : std::runtime_error(err)
+        {}
+    };
+
     BpfHeader() : m_version(0), m_len(176), m_numDim(0),
         m_compression(Utils::toNative(BpfCompression::None)), m_numPts(0),
         m_coordType(Utils::toNative(BpfCoordType::Cartesian)), m_coordId(0),
diff --git a/io/BpfReader.cpp b/io/BpfReader.cpp
index 4dbb492..27277dd 100644
--- a/io/BpfReader.cpp
+++ b/io/BpfReader.cpp
@@ -94,7 +94,7 @@ QuickInfo BpfReader::inspect()
 void BpfReader::initialize()
 {
     if (m_filename.empty())
-        throw pdal_error("Can't read BPF file without filename.");
+        throwError("Can't read BPF file without filename.");
 
     // Logfile doesn't get set until options are processed.
     m_header.setLog(log());
@@ -104,13 +104,19 @@ void BpfReader::initialize()
     // Resets the stream position in case it was already open.
     m_stream.seek(0);
     // In order to know the dimensions we must read the file header.
-    if (!m_header.read(m_stream))
-        return;
-
-    if (!m_header.readDimensions(m_stream, m_dims))
-        return;
+    try
+    {
+        if (!m_header.read(m_stream))
+            return;
+        if (!m_header.readDimensions(m_stream, m_dims))
+            return;
+    }
+    catch (const BpfHeader::error& err)
+    {
+        throwError(err.what());
+    }
 
-    std::string code("");
+    std::string code;
     if (m_header.m_coordType == static_cast<int>(BpfCoordType::Cartesian))
        code = std::string("EPSG:4326");
     else if (m_header.m_coordType == static_cast<int>(BpfCoordType::UTM))
@@ -118,17 +124,21 @@ void BpfReader::initialize()
        uint32_t zone(abs(m_header.m_coordId));
 
        if (m_header.m_coordId > 0 && m_header.m_coordId <= 60)
-          code = std::string("EPSG:326") + (zone < 10 ? "0" : "") + Utils::toString(zone);
+          code = std::string("EPSG:326");
        else if (m_header.m_coordId < 0 && m_header.m_coordId >= -60)
-          code = std::string("EPSG:327") + (zone < 10 ? "0" : "") + Utils::toString(zone);
+          code = std::string("EPSG:327");
        else
-          throw pdal_error("BPF file contains an invalid UTM zone");
+          throwError("BPF file contains an invalid UTM zone" +
+            Utils::toString(zone));
+       code += (zone < 10 ? "0" : "") + Utils::toString(zone);
     }
     else
     {
-       //BPF supports something called Terrestrial Centered Rotational (BpfCoordType::TCR) and East North Up (BpfCoordType::ENU)
-       //which we can figure out when we run into a file with these coordinate systems.
-       throw pdal_error("BPF file contains unsupported coordinate system");
+       //BPF supports something called Terrestrial Centered Rotational
+       //(BpfCoordType::TCR) and East North Up (BpfCoordType::ENU)
+       //which we can figure out when we run into a file with these
+       // coordinate systems.
+       throwError("BPF file contains unsupported coordinate system");
     }
     SpatialReference srs(code);
     setSpatialReference(srs);
@@ -160,12 +170,7 @@ void BpfReader::initialize()
     // Fast forward file to end of header as reported by base header.
     std::streampos pos = m_stream.position();
     if (pos > m_header.m_len)
-    {
-        std::ostringstream oss;
-        oss << getName() << ": BPF Header length exceeded that reported by "
-            "file.";
-        throw pdal_error(oss.str());
-    }
+        throwError("BPF Header length exceeded that reported by file.");
     m_stream.close();
 }
 
diff --git a/io/BpfWriter.cpp b/io/BpfWriter.cpp
index 43b206a..2564c52 100644
--- a/io/BpfWriter.cpp
+++ b/io/BpfWriter.cpp
@@ -95,29 +95,16 @@ void BpfWriter::initialize()
     for (auto file : m_bundledFilesSpec)
     {
         if (!FileUtils::fileExists(file))
-        {
-            std::ostringstream oss;
-
-            oss << getName() << ": bundledfile '" << file << "' doesn't exist.";
-            throw pdal_error(oss.str());
-        }
+            throwError("Bundledfile '" + file + "' doesn't exist.");
 
         size_t size = FileUtils::fileSize(file);
         if (size > (std::numeric_limits<uint32_t>::max)())
-        {
-            std::ostringstream oss;
-            oss << getName() << ": bundledfile '" << file << "' too large.";
-            throw pdal_error(oss.str());
-        }
+            throwError("Bundledfile '" + file + "' too large.");
 
         BpfUlemFile ulemFile(size, FileUtils::getFilename(file), file);
         if (ulemFile.m_filename.length() > 32)
-        {
-            std::ostringstream oss;
-            oss << getName() << ": bundledfile '" << file << "' name "
-                "exceeds maximum length of 32.";
-            throw pdal_error(oss.str());
-        }
+            throwError("Bundledfile '" + file + "' name exceeds "
+                "maximum length of 32.");
         m_bundledFiles.push_back(ulemFile);
     }
 
@@ -152,7 +139,14 @@ void BpfWriter::readyFile(const std::string& filename, const SpatialReference&)
 
     // We will re-write the header and dimensions to account for the point
     // count and dimension min/max.
-    m_header.write(m_stream);
+    try
+    {
+        m_header.write(m_stream);
+    }
+    catch (const BpfHeader::error& err)
+    {
+        throwError(err.what());
+    }
     m_header.writeDimensions(m_stream, m_dims);
     for (auto& file : m_bundledFiles)
         file.write(m_stream);
@@ -176,13 +170,9 @@ void BpfWriter::loadBpfDimensions(PointLayoutPtr layout)
        {
            Dimension::Id id = layout->findDim(s);
            if (id == Dimension::Id::Unknown)
-           {
-               std::ostringstream oss;
-               oss << "Invalid dimension '" << s << "' specified for "
-                   "'output_dims' option.";
-               throw pdal_error(oss.str());
-            }
-            dims.push_back(id);
+               throwError("Invalid dimension '" + s + "' specified for "
+                   "'output_dims' option.");
+           dims.push_back(id);
        }
     }
     else
@@ -194,8 +184,7 @@ void BpfWriter::loadBpfDimensions(PointLayoutPtr layout)
     if (dims.size() < 3 || dims[0] != Dimension::Id::X ||
         dims[1] != Dimension::Id::Y || dims[2] != Dimension::Id::Z)
     {
-        throw pdal_error("Missing one of dimensions X, Y or Z.  "
-            "Can't write BPF.");
+        throwError("Missing one of dimensions X, Y or Z.  Can't write BPF.");
     }
 
     for (auto id : dims)
@@ -220,17 +209,24 @@ void BpfWriter::writeView(const PointViewPtr dataShared)
     m_dims[1].m_offset = m_scaling.m_yXform.m_offset.m_val;
     m_dims[2].m_offset = m_scaling.m_zXform.m_offset.m_val;
 
-    switch (m_header.m_pointFormat)
+    try
+    {
+        switch (m_header.m_pointFormat)
+        {
+            case BpfFormat::PointMajor:
+                writePointMajor(data);
+                break;
+            case BpfFormat::DimMajor:
+                writeDimMajor(data);
+                break;
+            case BpfFormat::ByteMajor:
+                writeByteMajor(data);
+                break;
+        }
+    }
+    catch (const BpfCompressor::error& err)
     {
-    case BpfFormat::PointMajor:
-        writePointMajor(data);
-        break;
-    case BpfFormat::DimMajor:
-        writeDimMajor(data);
-        break;
-    case BpfFormat::ByteMajor:
-        writeByteMajor(data);
-        break;
+        throwError(err.what());
     }
     m_header.m_numPts += data->size();
 }
@@ -350,7 +346,14 @@ void BpfWriter::doneFile()
     // Rewrite the header to update the the correct number of points and
     // statistics.
     m_stream.seek(0);
-    m_header.write(m_stream);
+    try
+    {
+        m_header.write(m_stream);
+    }
+    catch (const BpfHeader::error& err)
+    {
+        throwError(err.what());
+    }
     m_header.writeDimensions(m_stream, m_dims);
     m_stream.close();
     getMetadata().addList("filename", m_curFilename);
diff --git a/io/DerivativeWriter.cpp b/io/DerivativeWriter.cpp
deleted file mode 100644
index 2821e69..0000000
--- a/io/DerivativeWriter.cpp
+++ /dev/null
@@ -1,191 +0,0 @@
-/******************************************************************************
-* Copyright (c) 2015-2016, Bradley J Chambers, brad.chambers at gmail.com
-*
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following
-* conditions are met:
-*
-*     * Redistributions of source code must retain the above copyright
-*       notice, this list of conditions and the following disclaimer.
-*     * Redistributions in binary form must reproduce the above copyright
-*       notice, this list of conditions and the following disclaimer in
-*       the documentation and/or other materials provided
-*       with the distribution.
-*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
-*       names of its contributors may be used to endorse or promote
-*       products derived from this software without specific prior
-*       written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
-* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
-* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
-* OF SUCH DAMAGE.
-****************************************************************************/
-
-#include "DerivativeWriter.hpp"
-
-#include <pdal/EigenUtils.hpp>
-#include <pdal/PointView.hpp>
-#include <pdal/util/Utils.hpp>
-#include <pdal/pdal_macros.hpp>
-
-namespace pdal
-{
-static PluginInfo const s_info =
-    PluginInfo("writers.derivative", "Derivative writer",
-               "http://pdal.io/stages/writers.derivative.html");
-
-CREATE_STATIC_PLUGIN(1, 0, DerivativeWriter, Writer, s_info)
-
-std::string DerivativeWriter::getName() const
-{
-    return s_info.name;
-}
-
-void DerivativeWriter::addArgs(ProgramArgs& args)
-{
-    args.add("filename", "Output filename", m_filename).setPositional();
-    args.add("edge_length", "Edge length", m_edgeLength, 15.0);
-    args.add("primitive_type", "Primitive type", m_primTypesSpec, {"slope_d8"});
-    args.add("altitude", "Illumination altitude (degrees)", m_illumAltDeg,
-             45.0);
-    args.add("azimuth", "Illumination azimuth (degrees)", m_illumAzDeg, 315.0);
-    args.add("driver", "GDAL format driver", m_driver, "GTiff");
-}
-
-
-void DerivativeWriter::initialize()
-{
-    static std::map<std::string, PrimitiveType> primtypes =
-    {
-        {"slope_d8", SLOPE_D8},
-        {"slope_fd", SLOPE_FD},
-        {"aspect_d8", ASPECT_D8},
-        {"aspect_fd", ASPECT_FD},
-        {"hillshade", HILLSHADE},
-        {"contour_curvature", CONTOUR_CURVATURE},
-        {"profile_curvature", PROFILE_CURVATURE},
-        {"tangential_curvature", TANGENTIAL_CURVATURE},
-        {"total_curvature", TOTAL_CURVATURE}
-    };
-
-    auto hashPos = handleFilenameTemplate(m_filename);
-    if (hashPos == std::string::npos && m_primTypesSpec.size() > 1)
-    {
-        std::ostringstream oss;
-
-        oss << getName() << ": No template placeholder ('#') found in "
-            "filename '" << m_filename << "' when one is required with "
-            "multiple primitive types.";
-        throw pdal_error(oss.str());
-    }
-
-    for (std::string os : m_primTypesSpec)
-    {
-        std::string s = Utils::tolower(os);
-        auto pi = primtypes.find(s);
-        if (pi == primtypes.end())
-        {
-            std::ostringstream oss;
-            oss << getName() << ": Unrecognized primitive type '" << os <<
-                "'.";
-            throw pdal_error(oss.str());
-        }
-        TypeOutput to;
-        to.m_type = pi->second;
-        to.m_filename = generateFilename(pi->first, hashPos);
-        m_primitiveTypes.push_back(to);
-    }
-}
-
-
-std::string DerivativeWriter::generateFilename(const std::string& primName,
-        std::string::size_type hashPos) const
-{
-    std::string filename = m_filename;
-    if (hashPos != std::string::npos)
-        filename.replace(hashPos, 1, primName);
-    return filename;
-}
-
-
-void DerivativeWriter::write(const PointViewPtr data)
-{
-    using namespace eigen;
-    using namespace Eigen;
-
-    // Bounds are required for computing number of rows and columns, and for
-    // later indexing individual points into the appropriate raster cells.
-    BOX2D bounds;
-    data->calculateBounds(bounds);
-    SpatialReference srs = data->spatialReference();
-
-    // Determine the number of rows and columns at the given cell size.
-    size_t cols = ((bounds.maxx - bounds.minx) / m_edgeLength) + 1;
-    size_t rows = ((bounds.maxy - bounds.miny) / m_edgeLength) + 1;
-
-    // Begin by creating a DSM of max elevations per XY cell.
-    MatrixXd DSM = createMaxMatrix(*data.get(), rows, cols, m_edgeLength,
-                                   bounds);
-
-    // Continue by cleaning the DSM.
-    MatrixXd cleanedDSM = cleanDSM(DSM);
-
-    // We will pad the edges by 1 cell, though according to some texts we should
-    // simply compute forward- or backward-difference as opposed to centered
-    // difference at these points.
-    MatrixXd paddedDSM = padMatrix(cleanedDSM, 1);
-
-    // Prepare the out matrix.
-    MatrixXd out(cleanedDSM.rows(), cleanedDSM.cols());
-    out.setConstant(std::numeric_limits<double>::quiet_NaN());
-
-    for (TypeOutput& to : m_primitiveTypes)
-    {
-        for (int r = 1; r < paddedDSM.rows()-1; ++r)
-        {
-            for (int c = 1; c < paddedDSM.cols()-1; ++c)
-            {
-                double val = paddedDSM(r, c);
-                if (std::isnan(val))
-                    continue;
-                Matrix3d block = paddedDSM.block(r-1, c-1, 3, 3);
-                if (to.m_type == SLOPE_D8)
-                    out(r-1, c-1) = computeSlopeD8(block, m_edgeLength);
-                if (to.m_type == SLOPE_FD)
-                    out(r-1, c-1) = computeSlopeFD(block, m_edgeLength);
-                if (to.m_type == ASPECT_D8)
-                    out(r-1, c-1) = computeAspectD8(block, m_edgeLength);
-                if (to.m_type == ASPECT_FD)
-                    out(r-1, c-1) = computeAspectFD(block, m_edgeLength);
-                if (to.m_type == HILLSHADE)
-                    out(r-1, c-1) = computeHillshade(block, m_edgeLength,
-                                                     m_illumAltDeg,
-                                                     m_illumAzDeg);
-                if (to.m_type == CONTOUR_CURVATURE)
-                    out(r-1, c-1) = computeContour(block, m_edgeLength);
-                if (to.m_type == PROFILE_CURVATURE)
-                    out(r-1, c-1) = computeProfile(block, m_edgeLength);
-                if (to.m_type == TANGENTIAL_CURVATURE)
-                    out(r-1, c-1) = computeTangential(block, m_edgeLength);
-                if (to.m_type == TOTAL_CURVATURE)
-                    out(r-1, c-1) = computeTotal(block, m_edgeLength);
-            }
-        }
-
-        // Finally, write our Matrix as a GDAL raster (specifically GTiff).
-        writeMatrix(out, to.m_filename, m_driver, m_edgeLength, bounds, srs);
-    }
-}
-
-} // namespace pdal
diff --git a/io/DerivativeWriter.hpp b/io/DerivativeWriter.hpp
deleted file mode 100644
index 975ff58..0000000
--- a/io/DerivativeWriter.hpp
+++ /dev/null
@@ -1,103 +0,0 @@
-/******************************************************************************
-* Copyright (c) 2015-2016, Bradley J Chambers, brad.chambers at gmail.com
-*
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following
-* conditions are met:
-*
-*     * Redistributions of source code must retain the above copyright
-*       notice, this list of conditions and the following disclaimer.
-*     * Redistributions in binary form must reproduce the above copyright
-*       notice, this list of conditions and the following disclaimer in
-*       the documentation and/or other materials provided
-*       with the distribution.
-*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
-*       names of its contributors may be used to endorse or promote
-*       products derived from this software without specific prior
-*       written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
-* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
-* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
-* OF SUCH DAMAGE.
-****************************************************************************/
-
-#pragma once
-
-#include <pdal/Writer.hpp>
-#include <pdal/plugin.hpp>
-#include <pdal/util/ProgramArgs.hpp>
-
-#include <Eigen/Core>
-
-#include <string>
-#include <vector>
-
-extern "C" int32_t DerivativeWriter_ExitFunc();
-extern "C" PF_ExitFunc DerivativeWriter_InitPlugin();
-
-namespace pdal
-{
-
-class BOX2D;
-
-class PDAL_DLL DerivativeWriter : public Writer
-{
-    enum PrimitiveType
-    {
-        SLOPE_D8,
-        SLOPE_FD,
-        ASPECT_D8,
-        ASPECT_FD,
-        HILLSHADE,
-        CONTOUR_CURVATURE,
-        PROFILE_CURVATURE,
-        TANGENTIAL_CURVATURE,
-        TOTAL_CURVATURE
-    };
-
-    struct TypeOutput
-    {
-        PrimitiveType m_type;
-        std::string m_filename;
-    };
-
-public:
-    static void * create();
-    static int32_t destroy(void *);
-    std::string getName() const;
-
-    DerivativeWriter()
-    {}
-
-private:
-    virtual void addArgs(ProgramArgs& args);
-    virtual void initialize();
-    virtual void write(const PointViewPtr view);
-
-    std::string generateFilename(const std::string& primName,
-                                 std::string::size_type hashPos) const;
-
-    std::string m_filename;
-    std::string m_driver;
-    double m_edgeLength;
-    double m_illumAltDeg;
-    double m_illumAzDeg;
-    StringList m_primTypesSpec;
-    std::vector<TypeOutput> m_primitiveTypes;
-
-    DerivativeWriter& operator=(const DerivativeWriter&); // not implemented
-    DerivativeWriter(const DerivativeWriter&); // not implemented
-};
-
-} // namespace pdal
diff --git a/io/FauxReader.cpp b/io/FauxReader.cpp
index c82aaec..a20e086 100644
--- a/io/FauxReader.cpp
+++ b/io/FauxReader.cpp
@@ -70,23 +70,55 @@ void FauxReader::addArgs(ProgramArgs& args)
 void FauxReader::initialize()
 {
     if (m_numReturns > 10)
+        throwError("Option 'number_of_returns' must be in the range [0,10].");
+    if (m_mode == Mode::Grid)
     {
-        std::ostringstream oss;
-        oss << getName() << ": Option 'number_of_returns' must be in the range "
-            "[0,10].";
-        throw pdal_error(oss.str());
-    }
-    if (m_count > 1)
-    {
-        m_delX = (m_bounds.maxx - m_bounds.minx) / (m_count - 1);
-        m_delY = (m_bounds.maxy - m_bounds.miny) / (m_count - 1);
-        m_delZ = (m_bounds.maxz - m_bounds.minz) / (m_count - 1);
+        m_bounds.minx = ceil(m_bounds.minx);
+        m_bounds.maxx = ceil(m_bounds.maxx);
+        m_bounds.miny = ceil(m_bounds.miny);
+        m_bounds.maxy = ceil(m_bounds.maxy);
+        m_bounds.minz = ceil(m_bounds.minz);
+        m_bounds.maxz = ceil(m_bounds.maxz);
+        // Here delX/Y/Z represent the number of points in each direction.
+        m_count = 1;
+        if (m_bounds.maxx <= m_bounds.minx)
+            m_delX = 0;
+        else
+        {
+            m_delX = m_bounds.maxx - m_bounds.minx;
+            m_count *= m_delX;
+        }
+        if (m_bounds.maxy <= m_bounds.miny)
+            m_delY = 0;
+        else
+        {
+            m_delY = m_bounds.maxy - m_bounds.miny;
+            m_count *= m_delY;
+        }
+        if (m_bounds.maxz <= m_bounds.minz)
+            m_delZ = 0;
+        else
+        {
+            m_delZ = m_bounds.maxz - m_bounds.minz;
+            m_count *= m_delZ;
+        }
+        if (!m_delX && !m_delY && !m_delZ)
+            m_count = 0;
     }
     else
     {
-        m_delX = 0;
-        m_delY = 0;
-        m_delZ = 0;
+        if (m_count > 1)
+        {
+            m_delX = (m_bounds.maxx - m_bounds.minx) / (m_count - 1);
+            m_delY = (m_bounds.maxy - m_bounds.miny) / (m_count - 1);
+            m_delZ = (m_bounds.maxz - m_bounds.minz) / (m_count - 1);
+        }
+        else
+        {
+            m_delX = 0;
+            m_delY = 0;
+            m_delZ = 0;
+        }
     }
 }
 
@@ -137,7 +169,7 @@ bool FauxReader::processOne(PointRef& point)
         break;
     case Mode::Ramp:
         x = m_bounds.minx + m_delX * m_index;
-        y = m_bounds.miny+ m_delY * m_index;
+        y = m_bounds.miny + m_delY * m_index;
         z = m_bounds.minz + m_delZ * m_index;
         break;
     case Mode::Uniform:
@@ -150,6 +182,30 @@ bool FauxReader::processOne(PointRef& point)
         y = Utils::normal(m_mean_y, m_stdev_y, m_seed++);
         z = Utils::normal(m_mean_z, m_stdev_z, m_seed++);
         break;
+    case Mode::Grid:
+    {
+        if (m_delX)
+            x = m_index % (point_count_t)m_delX;
+
+        if (m_delY)
+        {
+            if (m_delX)
+                y = (m_index / (point_count_t)m_delX) % (point_count_t)m_delY;
+            else
+                y = m_index % (point_count_t)m_delY;
+        }
+
+        if (m_delZ)
+        {
+            if (m_delX && m_delY)
+                z = m_index / (point_count_t)(m_delX * m_delY);
+            else if (m_delX)
+                z = m_index / (point_count_t)m_delX;
+            else if (m_delY)
+                z = m_index / (point_count_t)m_delY;
+        }
+        break;
+    }
     }
 
     point.setField(Dimension::Id::X, x);
diff --git a/io/FauxReader.hpp b/io/FauxReader.hpp
index 4bad5bc..dd97d1c 100644
--- a/io/FauxReader.hpp
+++ b/io/FauxReader.hpp
@@ -49,7 +49,8 @@ enum class Mode
     Random,
     Ramp,
     Uniform,
-    Normal
+    Normal,
+    Grid
 };
 
 inline std::istream& operator>>(std::istream& in, Mode& m)
@@ -68,6 +69,8 @@ inline std::istream& operator>>(std::istream& in, Mode& m)
         m = Mode::Uniform;
     else if (s == "normal")
         m = Mode::Normal;
+    else if (s == "grid")
+        m = Mode::Grid;
     else
         in.setstate(std::ios::failbit);
     return in;
@@ -84,9 +87,11 @@ inline std::ostream& operator<<(std::ostream& out, const Mode& m)
     case Mode::Ramp:
         out << "Ramp";
     case Mode::Uniform:
-        out << "Uniform";            
+        out << "Uniform";
     case Mode::Normal:
         out << "Normal";
+    case Mode::Grid:
+        out << "Grid";
     }
     return out;
 }
diff --git a/io/GDALGrid.cpp b/io/GDALGrid.cpp
index 5329904..af38bcd 100644
--- a/io/GDALGrid.cpp
+++ b/io/GDALGrid.cpp
@@ -38,6 +38,7 @@
 #include <cmath>
 #include <limits>
 #include <iostream>
+#include <pdal/pdal_types.hpp>
 
 namespace pdal
 {
@@ -67,6 +68,66 @@ GDALGrid::GDALGrid(size_t width, size_t height, double edgeLength,
 }
 
 
+/**
+  Expand the grid to a new size.
+
+  /param width
+*/
+void GDALGrid::expand(size_t width, size_t height, size_t xshift, size_t yshift)
+{
+    if (width < m_width)
+        throw error("Expanded grid must have width at least as large "
+            "as existing grid.");
+    if (height < m_height)
+        throw error("Expanded grid must have height at least as large "
+            "as existing grid.");
+    if (m_width + xshift > width || m_height + yshift > height)
+        throw error("Can't shift existing grid outside of new grid "
+            "during expansion.");
+    if (width == m_width && height == m_height)
+        return;
+
+    // Grid (raster) works upside down from standard X/Y.
+    yshift = height - (m_height + yshift);
+    auto moveVec = [=](DataPtr& src, double initializer = 0)
+    {
+        // Compute an index in the destination given source index coords.
+        auto dstIndex = [width, xshift, yshift](size_t i, size_t j)
+        {
+            return ((yshift + j) * width) + i + xshift;
+        };
+
+        size_t size(width * height);
+        DataPtr dst(new DataVec(size, initializer));
+        for (size_t j = 0; j < m_height; ++j)
+        {
+            size_t srcPos = index(0, j);
+            size_t dstPos = dstIndex(0, j);
+            std::copy(src->begin() + srcPos, src->begin() + srcPos + m_width,
+                dst->begin() + dstPos);
+        }
+        src = std::move(dst);
+    };
+
+    moveVec(m_count);
+    if (m_outputTypes & statMin)
+        moveVec(m_min, std::numeric_limits<double>::max());
+    if (m_outputTypes & statMax)
+        moveVec(m_max, std::numeric_limits<double>::lowest());
+    if (m_outputTypes & statIdw)
+    {
+        moveVec(m_idw);
+        moveVec(m_idwDist);
+    }
+    if ((m_outputTypes & statMean) || (m_outputTypes & statStdDev))
+        moveVec(m_mean);
+    if (m_outputTypes & statStdDev)
+        moveVec(m_stdDev);
+    m_width = width;
+    m_height = height;
+}
+
+
 int GDALGrid::numBands() const
 {
     int num = 0;
@@ -138,7 +199,7 @@ void GDALGrid::addPoint(double x, double y, double z)
     //          ^ | -->
     //        ^ | | --->
     //      ^ | | | ---->
-    //   <------- X ------> 
+    //   <------- X ------>
     //    <------ | | | v
     //     <----- | | v
     //       <--- | v
@@ -234,7 +295,7 @@ void GDALGrid::addPoint(double x, double y, double z)
 void GDALGrid::update(int i, int j, double val, double dist)
 {
     // Once we determine that a point is close enough to a cell to count it,
-    // this function does the actual math.  We use the value of the 
+    // this function does the actual math.  We use the value of the
     // point (val) and its distance from the cell center (dist).  There's
     // a little math that needs to be done once all points are added.  See
     // finalize() for that.
@@ -285,7 +346,7 @@ void GDALGrid::update(int i, int j, double val, double dist)
             if (dist == 0)
             {
                 idw = val;
-                idwDist = std::numeric_limits<double>::quiet_NaN();   
+                idwDist = std::numeric_limits<double>::quiet_NaN();
             }
             else
             {
diff --git a/io/GDALGrid.hpp b/io/GDALGrid.hpp
index da3719c..e6e0ad8 100644
--- a/io/GDALGrid.hpp
+++ b/io/GDALGrid.hpp
@@ -36,6 +36,7 @@
 #include <memory>
 #include <string>
 #include <vector>
+#include <stdexcept>
 
 namespace pdal
 {
@@ -50,9 +51,17 @@ public:
     static const int statStdDev = 16;
     static const int statIdw = 32;
 
+    struct error : public std::runtime_error
+    {
+        error(const std::string& err) : std::runtime_error(err)
+        {}
+    };
+
     GDALGrid(size_t width, size_t height, double edgeLength, double radius,
         double noData, int outputTypes, size_t windowSize);
 
+    void expand(size_t width, size_t height, size_t xshift, size_t yshift);
+
     // Get the number of bands represented by this grid.
     int numBands() const;
 
diff --git a/io/GDALReader.cpp b/io/GDALReader.cpp
index feed83d..010e8cd 100644
--- a/io/GDALReader.cpp
+++ b/io/GDALReader.cpp
@@ -93,7 +93,7 @@ QuickInfo GDALReader::inspect()
 
     m_raster = std::unique_ptr<gdal::Raster>(new gdal::Raster(m_filename));
     if (m_raster->open() == gdal::GDALError::CantOpen)
-        throw pdal_error("Couldn't open raster file '" + m_filename + "'.");
+        throwError("Couldn't open raster file '" + m_filename + "'.");
 
     qi.m_pointCount = m_raster->width() * m_raster->height();
     // qi.m_bounds = ???;
@@ -121,7 +121,7 @@ void GDALReader::ready(PointTableRef table)
 {
     m_index = 0;
     if (m_raster->open() == gdal::GDALError::CantOpen)
-        throw pdal_error("Couldn't open raster file '" + m_filename + "'.");
+        throwError("Couldn't open raster file '" + m_filename + "'.");
 }
 
 
diff --git a/io/GDALWriter.cpp b/io/GDALWriter.cpp
index f51859d..f0249fb 100644
--- a/io/GDALWriter.cpp
+++ b/io/GDALWriter.cpp
@@ -61,8 +61,8 @@ void GDALWriter::addArgs(ProgramArgs& args)
     args.add("filename", "Output filename", m_filename).setPositional();
     args.add("resolution", "Cell edge size, in units of X/Y",
         m_edgeLength).setPositional();
-    args.add("radius", "Radius from cell center to use to locate influencing "
-        "points", m_radius).setPositional();
+    m_radiusArg = &args.add("radius", "Radius from cell center to use to locate"
+        " influencing points", m_radius);
     args.add("gdaldriver", "GDAL writer driver name", m_drivername, "GTiff");
     args.add("gdalopts", "GDAL driver options (name=value,name=value...)",
         m_options);
@@ -72,6 +72,8 @@ void GDALWriter::addArgs(ProgramArgs& args)
         m_windowSize);
     args.add("nodata", "No data value", m_noData, -9999.0);
     args.add("dimension", "Dimension to use", m_interpDimString, "Z");
+    args.add("bounds", "Bounds of data.  Required in streaming mode.",
+        m_bounds);
 }
 
 
@@ -98,11 +100,7 @@ void GDALWriter::initialize()
         else if (ts == "stdev")
             m_outputTypes |= GDALGrid::statStdDev;
         else
-        {
-            std::ostringstream oss;
-            oss << "Invalid writers.gdal output type: '" << ts << "'.";
-            throw pdal_error(oss.str());
-        }
+            throwError("Invalid output type: '" + ts + "'.");
     }
 
     gdal::registerDrivers();
@@ -113,69 +111,119 @@ void GDALWriter::prepared(PointTableRef table)
 {
     m_interpDim = table.layout()->findDim(m_interpDimString);
     if (m_interpDim == Dimension::Id::Unknown)
-    {
-        std::ostringstream oss;
-
-        oss << getName() << ": specified dimension '" << m_interpDimString <<
-            "' does not exist.";
-        throw pdal_error(oss.str());
-    }
+        throwError("Specified dimension '" + m_interpDimString +
+            "' does not exist.");
+    if (!m_radiusArg->set())
+        m_radius = m_edgeLength * sqrt(2.0);
 }
 
 
-void GDALWriter::ready(PointTableRef table)
+void GDALWriter::readyTable(PointTableRef table)
 {
-    if (!table.spatialReferenceUnique())
-    {
-        std::ostringstream oss;
+    if (m_bounds.to2d().empty() && !table.supportsView())
+        throwError("Option 'bounds' required in streaming mode.");
+}
 
-        oss << getName() << ": Can't write output with multiple spatial "
-            "references.";
-        throw pdal_error(oss.str());
-    }
+
+void GDALWriter::readyFile(const std::string& filename,
+    const SpatialReference& srs)
+{
+    m_outputFilename = filename;
+    m_srs = srs;
+    if (m_bounds.to2d().valid())
+        createGrid(m_bounds.to2d());
 }
 
 
-void GDALWriter::write(const PointViewPtr view)
+void GDALWriter::createGrid(BOX2D bounds)
 {
-    view->calculateBounds(m_bounds);
-    size_t width = ((m_bounds.maxx - m_bounds.minx) / m_edgeLength) + 1;
-    size_t height = ((m_bounds.maxy - m_bounds.miny) / m_edgeLength) + 1;
+    m_curBounds = bounds;
+    size_t width = ((m_curBounds.maxx - m_curBounds.minx) / m_edgeLength) + 1;
+    size_t height = ((m_curBounds.maxy - m_curBounds.miny) / m_edgeLength) + 1;
     m_grid.reset(new GDALGrid(width, height, m_edgeLength, m_radius, m_noData,
         m_outputTypes, m_windowSize));
+}
+
+
+void GDALWriter::expandGrid(BOX2D bounds)
+{
+    if (bounds == m_curBounds)
+        return;
+
+    bounds.grow(m_curBounds);
+    size_t xshift = ceil((m_curBounds.minx - bounds.minx) / m_edgeLength);
+    bounds.minx = m_curBounds.minx - (xshift * m_edgeLength);
+    size_t yshift = ceil((m_curBounds.miny - bounds.miny) / m_edgeLength);
+    bounds.miny = m_curBounds.miny - (yshift * m_edgeLength);
+
+    size_t width = ((bounds.maxx - bounds.minx) / m_edgeLength) + 1;
+    size_t height = ((bounds.maxy - bounds.miny) / m_edgeLength) + 1;
+    try
+    {
+        m_grid->expand(width, height, xshift, yshift);
+    }
+    catch (const GDALGrid::error& err)
+    {
+        throwError(err.what()); // Add the stage name onto the error text.
+    }
+    m_curBounds = bounds;
+}
+
 
+void GDALWriter::writeView(const PointViewPtr view)
+{
+    BOX2D bounds;
+    if (m_bounds.to2d().valid())
+        bounds = m_bounds.to2d();
+    else
+        view->calculateBounds(bounds);
+
+    if (!m_grid)
+        createGrid(bounds);
+    else
+        expandGrid(bounds);
+
+    PointRef point(*view, 0);
     for (PointId idx = 0; idx < view->size(); ++idx)
     {
-        double x = view->getFieldAs<double>(Dimension::Id::X, idx) -
-            m_bounds.minx;
-        double y = view->getFieldAs<double>(Dimension::Id::Y, idx) -
-            m_bounds.miny;
-        double z = view->getFieldAs<double>(m_interpDim, idx);
-
-        m_grid->addPoint(x, y, z);
-   }
+        point.setPointId(idx);
+        processOne(point);
+    }
 }
 
 
-void GDALWriter::done(PointTableRef table)
+bool GDALWriter::processOne(PointRef& point)
+{
+    double x = point.getFieldAs<double>(Dimension::Id::X) -
+        m_curBounds.minx;
+    double y = point.getFieldAs<double>(Dimension::Id::Y) -
+        m_curBounds.miny;
+    double z = point.getFieldAs<double>(m_interpDim);
+
+    m_grid->addPoint(x, y, z);
+    return true;
+}
+
+
+void GDALWriter::doneFile()
 {
     std::array<double, 6> pixelToPos;
 
-    pixelToPos[0] = m_bounds.minx;
+    pixelToPos[0] = m_curBounds.minx;
     pixelToPos[1] = m_edgeLength;
     pixelToPos[2] = 0;
-    pixelToPos[3] = m_bounds.miny + (m_edgeLength * m_grid->height());
+    pixelToPos[3] = m_curBounds.miny + (m_edgeLength * m_grid->height());
     pixelToPos[4] = 0;
     pixelToPos[5] = -m_edgeLength;
-    gdal::Raster raster(m_filename, m_drivername, table.spatialReference(),
-        pixelToPos);
+    gdal::Raster raster(m_outputFilename, m_drivername, m_srs, pixelToPos);
 
     m_grid->finalize();
 
     gdal::GDALError err = raster.open(m_grid->width(), m_grid->height(),
-        m_grid->numBands(), Dimension::Type::Double, m_grid->noData());
+        m_grid->numBands(), Dimension::Type::Double, m_grid->noData(),
+        m_options);
     if (err != gdal::GDALError::None)
-        throw pdal_error(raster.errorMsg());
+        throwError(raster.errorMsg());
     int bandNum = 1;
     uint8_t *buf;
     buf = m_grid->data("min");
@@ -201,4 +249,3 @@ void GDALWriter::done(PointTableRef table)
 }
 
 } // namespace pdal
-
diff --git a/io/GDALWriter.hpp b/io/GDALWriter.hpp
index 2266c3f..3a8236b 100644
--- a/io/GDALWriter.hpp
+++ b/io/GDALWriter.hpp
@@ -35,8 +35,9 @@
 #include <algorithm>
 
 #include <pdal/PointView.hpp>
-#include <pdal/Writer.hpp>
+#include <pdal/FlexWriter.hpp>
 #include <pdal/plugin.hpp>
+#include <pdal/util/ProgramArgs.hpp>
 
 #include "GDALGrid.hpp"
 
@@ -46,7 +47,7 @@ extern "C" PF_ExitFunc GDALWriter_InitPlugin();
 namespace pdal
 {
 
-class PDAL_DLL GDALWriter : public Writer
+class PDAL_DLL GDALWriter : public FlexWriter
 {
 public:
     static void * create();
@@ -60,14 +61,22 @@ private:
     virtual void addArgs(ProgramArgs& args);
     virtual void initialize();
     virtual void prepared(PointTableRef table);
-    virtual void ready(PointTableRef table);
-    virtual void write(const PointViewPtr data);
-    virtual void done(PointTableRef table);
+    virtual void readyTable(PointTableRef table);
+    virtual void readyFile(const std::string& filename,
+        const SpatialReference& srs);
+    virtual void writeView(const PointViewPtr view);
+    virtual bool processOne(PointRef& point);
+    virtual void doneFile();
+    void createGrid(BOX2D bounds);
+    void expandGrid(BOX2D bounds);
 
-    std::string m_filename;
+    std::string m_outputFilename;
     std::string m_drivername;
-    BOX2D m_bounds;
+    SpatialReference m_srs;
+    Bounds m_bounds;
+    BOX2D m_curBounds;
     double m_edgeLength;
+    Arg *m_radiusArg;
     double m_radius;
     StringList m_options;
     StringList m_outputTypeString;
@@ -77,7 +86,6 @@ private:
     double m_noData;
     Dimension::Id m_interpDim;
     std::string m_interpDimString;
-
 };
 
 }
diff --git a/io/GeotiffSupport.cpp b/io/GeotiffSupport.cpp
index 931a832..bf117d3 100644
--- a/io/GeotiffSupport.cpp
+++ b/io/GeotiffSupport.cpp
@@ -36,15 +36,9 @@
 
 #include <sstream>
 
-// GDAL
 #include <geo_normalize.h>
 #include <ogr_spatialref.h>
-
-// See http://lists.osgeo.org/pipermail/gdal-dev/2013-November/037429.html
-#define CPL_SERV_H_INCLUDED
-
 #include <geo_simpletags.h>
-#include <cpl_conv.h>
 
 PDAL_C_START
 
@@ -55,54 +49,44 @@ int CPL_DLL GTIFSetFromOGISDefn(GTIF*, const char*);
 
 PDAL_C_END
 
-#include <pdal/GDALUtils.hpp>
-
-struct StTiff : public ST_TIFF
-{};
+#include <io/LasVLR.hpp>
 
 namespace pdal
 {
 
-GeotiffSupport::~GeotiffSupport()
+namespace
 {
-    if (m_gtiff != 0)
-    {
-        GTIFFree(m_gtiff);
-        m_gtiff = 0;
-    }
-    if (m_tiff != 0)
-    {
-        ST_Destroy(m_tiff);
-        m_tiff = 0;
-    }
-}
-
 
-void GeotiffSupport::resetTags()
+struct GeotiffCtx
 {
-    // If we already have m_gtiff and m_tiff, that is because we have
-    // already called GetGTIF once before.  VLRs ultimately drive how the
-    // SpatialReference is defined, not the GeoTIFF keys.
-    if (m_tiff != 0)
+public:
+    GeotiffCtx() : gtiff(nullptr)
     {
-        ST_Destroy(m_tiff);
-        m_tiff = 0;
+        tiff = ST_Create();
     }
 
-    if (m_gtiff != 0)
+    ~GeotiffCtx()
     {
-        GTIFFree(m_gtiff);
-        m_gtiff = 0;
+        if (gtiff)
+            GTIFFree(gtiff);
+        ST_Destroy(tiff);
     }
 
-    m_tiff = (StTiff *)ST_Create();
+    ST_TIFF *tiff;
+    GTIF *gtiff;
+};
 
-    return;
 }
 
-
-bool GeotiffSupport::setShortKeys(int tag, void *data, int size)
+GeotiffSrs::GeotiffSrs(const std::vector<uint8_t>& directoryRec,
+    const std::vector<uint8_t>& doublesRec,
+    const std::vector<uint8_t>& asciiRec)
 {
+    GeotiffCtx ctx;
+
+    if (directoryRec.empty())
+        return;
+
     // Make sure struct is 16 bytes.
 #pragma pack(push)
 #pragma pack(1)
@@ -115,166 +99,82 @@ bool GeotiffSupport::setShortKeys(int tag, void *data, int size)
     };
 #pragma pack(pop)
 
-    ShortKeyHeader *header = (ShortKeyHeader *)data;
-    int declaredSize = (header->numKeys + 1) * 4;
-    if (size < declaredSize)
-        return false;
-    ST_SetKey(m_tiff, tag, (1 + header->numKeys) * 4, STT_SHORT, data);
-    return true;
-}
-
-
-bool GeotiffSupport::setDoubleKeys(int tag, void *data, int size)
-{
-    ST_SetKey(m_tiff, tag, size / sizeof(double), STT_DOUBLE, data);
-    return true;
-}
-
-
-bool GeotiffSupport::setAsciiKeys(int tag, void *data, int size)
-{
-    ST_SetKey(m_tiff, tag, size, STT_ASCII, data);
-    return true;
-}
-
-
-/// Get the geotiff data associated with a tag.
-/// \param tag - geotiff tag.
-/// \param count - Number of items fetched.
-/// \param data_ptr - Pointer to fill with address of filled data.
-/// \return  Size of data referred to by \c data_ptr
-size_t GeotiffSupport::getKey(int tag, int *count, void **data_ptr) const
-{
-    int st_type;
-
-    if (m_tiff == 0)
-        return 0;
-
-    if (!ST_GetKey(m_tiff, tag, count, &st_type, data_ptr))
-        return 0;
-
-    if (st_type == STT_ASCII)
-        return *count;
-    else if (st_type == STT_SHORT)
-        return 2 * *count;
-    else if (st_type == STT_DOUBLE)
-        return 8 * *count;
-    return 8 * *count;
-}
+    ShortKeyHeader *header = (ShortKeyHeader *)directoryRec.data();
+    size_t declaredSize = (header->numKeys + 1) * 4;
+    if (directoryRec.size() < declaredSize)
+        return;
+    ST_SetKey(ctx.tiff, GEOTIFF_DIRECTORY_RECORD_ID,
+        (1 + header->numKeys) * 4, STT_SHORT, (void *)directoryRec.data());
 
+    if (doublesRec.size())
+        ST_SetKey(ctx.tiff, GEOTIFF_DOUBLES_RECORD_ID,
+            doublesRec.size() / sizeof(double), STT_DOUBLE,
+            (void *)doublesRec.data());
 
-void GeotiffSupport::setTags()
-{
-    m_gtiff = GTIFNewSimpleTags(m_tiff);
-    if (!m_gtiff)
-        throw std::runtime_error("The geotiff keys could not be read "
-            "from VLR records");
-}
+    if (asciiRec.size())
+        ST_SetKey(ctx.tiff, GEOTIFF_ASCII_RECORD_ID,
+            asciiRec.size(), STT_ASCII, (void *)asciiRec.data());
 
+    ctx.gtiff = GTIFNewSimpleTags(ctx.tiff);
 
-SpatialReference GeotiffSupport::srs() const
-{
     GTIFDefn sGTIFDefn;
-    SpatialReference srs;
-
-    if (m_gtiff && GTIFGetDefn(m_gtiff, &sGTIFDefn))
+    if (GTIFGetDefn(ctx.gtiff, &sGTIFDefn))
     {
-        char *pszWKT = GTIFGetOGISDefn(m_gtiff, &sGTIFDefn);
-        if (pszWKT)
-            srs.set(pszWKT);
+        char *wkt = GTIFGetOGISDefn(ctx.gtiff, &sGTIFDefn);
+        if (wkt)
+            m_srs.set(wkt);
     }
-    return srs;
 }
 
 
-void GeotiffSupport::rebuildGTIF()
+GeotiffTags::GeotiffTags(const SpatialReference& srs)
 {
-    // If we already have m_gtiff and m_tiff, that is because we have
-    // already called GetGTIF once before.  VLRs ultimately drive how the
-    // SpatialReference is defined, not the GeoTIFF keys.
-    if (m_tiff != 0)
-    {
-        ST_Destroy(m_tiff);
-        m_tiff = 0;
-    }
-
-    if (m_gtiff != 0)
-    {
-        GTIFFree(m_gtiff);
-        m_gtiff = 0;
-    }
-
-    m_tiff = (StTiff *)ST_Create();
-
-    // (here it used to read in the VLRs)
-
-    m_gtiff = GTIFNewSimpleTags(m_tiff);
-    if (!m_gtiff)
-        throw std::runtime_error("The geotiff keys could not be read from "
-            "VLR records");
-}
-
-
-void GeotiffSupport::setWkt(const std::string& v)
-{
-    if (!m_gtiff)
-        rebuildGTIF();
-
-    if (v.empty())
+    if (srs.empty())
         return;
 
-    if (!GTIFSetFromOGISDefn(m_gtiff, v.c_str()))
-        throw pdal_error("Could not set m_gtiff from WKT");
+    GeotiffCtx ctx;
+    ctx.gtiff = GTIFNewSimpleTags(ctx.tiff);
 
-    if (!GTIFWriteKeys(m_gtiff))
-        throw pdal_error("Unable to write SRS as Geotiff keys.");
-}
+    // Set tiff tags from WKT
+    if (!GTIFSetFromOGISDefn(ctx.gtiff, srs.getWKT().c_str()))
+        throw error("Could not set m_gtiff from WKT");
+    GTIFWriteKeys(ctx.gtiff);
 
+    auto sizeFromType = [](int type, int count) -> size_t
+    {
+        if (type == STT_ASCII)
+            return count;
+        else if (type == STT_SHORT)
+            return 2 * count;
+        else if (type == STT_DOUBLE)
+            return 8 * count;
+        return 8 * count;
+    };
 
-// Utility functor with accompanying to print GeoTIFF directory.
-struct geotiff_dir_printer
-{
-    geotiff_dir_printer() {}
-
-    std::string output() const
+    int count;
+    int st_type;
+    uint8_t *data;
+    if (ST_GetKey(ctx.tiff, GEOTIFF_DIRECTORY_RECORD_ID,
+        &count, &st_type, (void **)&data))
     {
-        return m_oss.str();
+        size_t size = sizeFromType(st_type, count);
+        m_directoryRec.resize(size);
+        std::copy(data, data + size, m_directoryRec.begin());
     }
-    std::string::size_type size() const
+    if (ST_GetKey(ctx.tiff, GEOTIFF_DOUBLES_RECORD_ID,
+        &count, &st_type, (void **)&data))
     {
-        return m_oss.str().size();
+        size_t size = sizeFromType(st_type, count);
+        m_doublesRec.resize(size);
+        std::copy(data, data + size, m_doublesRec.begin());
     }
-
-    void operator()(char* data, void* /*aux*/)
+    if (ST_GetKey(ctx.tiff, GEOTIFF_ASCII_RECORD_ID,
+        &count, &st_type, (void **)&data))
     {
-        if (0 != data)
-        {
-            m_oss << data;
-        }
+        size_t size = sizeFromType(st_type, count);
+        m_asciiRec.resize(size);
+        std::copy(data, data + size, m_asciiRec.begin());
     }
-
-private:
-    std::ostringstream m_oss;
-};
-
-
-static int pdalGeoTIFFPrint(char* data, void* aux)
-{
-    geotiff_dir_printer* printer = reinterpret_cast<geotiff_dir_printer*>(aux);
-    (*printer)(data, 0);
-    return static_cast<int>(printer->size());
-}
-
-
-std::string GeotiffSupport::getText() const
-{
-    if (m_gtiff == NULL)
-        return std::string("");
-
-    geotiff_dir_printer geotiff_printer;
-    GTIFPrint(m_gtiff, pdalGeoTIFFPrint, &geotiff_printer);
-    const std::string s = geotiff_printer.output();
-    return s;
 }
 
 } // namespace pdal
diff --git a/io/GeotiffSupport.hpp b/io/GeotiffSupport.hpp
index 09ff0a7..8ae6653 100644
--- a/io/GeotiffSupport.hpp
+++ b/io/GeotiffSupport.hpp
@@ -36,45 +36,43 @@
 
 #include <pdal/SpatialReference.hpp>
 
-// GDAL
-#include <geo_normalize.h>
-#include <ogr_spatialref.h>
-
-// See http://lists.osgeo.org/pipermail/gdal-dev/2013-November/037429.html
-#define CPL_SERV_H_INCLUDED
-
-#include <string>
-#include <stdexcept>
-
-struct StTiff;
-
 namespace pdal
 {
 
-class PDAL_DLL GeotiffSupport
+class GeotiffSrs
 {
 public:
-    GeotiffSupport() : m_gtiff(0), m_tiff(0)
-    {}
-    ~GeotiffSupport();
+    GeotiffSrs(const std::vector<uint8_t>& directoryRec,
+        const std::vector<uint8_t>& doublesRec,
+        const std::vector<uint8_t>& asciiRec);
+    SpatialReference srs() const
+        { return m_srs; }
+private:
+    SpatialReference m_srs;
+};
 
-    void resetTags();
-    bool setShortKeys(int tag, void *data, int size);
-    bool setDoubleKeys(int tag, void *data, int size);
-    bool setAsciiKeys(int tag, void *data, int size);
-    size_t getKey(int tag, int *count, void **data_ptr) const;
-    void setTags();
+class GeotiffTags
+{
+public:
+    struct error : public std::runtime_error
+    {
+        error(const std::string& err) : std::runtime_error(err)
+        {}
+    };
 
-    SpatialReference srs() const;
-    void setWkt(const std::string&);
+    GeotiffTags(const SpatialReference& srs);
 
-    std::string getText() const;
+    std::vector<uint8_t>& directoryData()
+        { return m_directoryRec; }
+    std::vector<uint8_t>& doublesData()
+        { return m_doublesRec; }
+    std::vector<uint8_t>& asciiData()
+        { return m_asciiRec; }
 
 private:
-    void rebuildGTIF();
-
-    GTIF *m_gtiff;
-    StTiff *m_tiff;
+    std::vector<uint8_t> m_directoryRec;
+    std::vector<uint8_t> m_doublesRec;
+    std::vector<uint8_t> m_asciiRec;
 };
 
 } // namespace pdal
diff --git a/io/Ilvis2MetadataReader.cpp b/io/Ilvis2MetadataReader.cpp
index 88a0f87..876b62d 100644
--- a/io/Ilvis2MetadataReader.cpp
+++ b/io/Ilvis2MetadataReader.cpp
@@ -37,7 +37,8 @@
 namespace pdal
 {
 
-void Ilvis2MetadataReader::readMetadataFile(std::string filename, MetadataNode* m)
+void Ilvis2MetadataReader::readMetadataFile(std::string filename,
+    MetadataNode* m)
 {
     xmlDocPtr doc;
     xmlNodePtr node;
@@ -57,7 +58,8 @@ void Ilvis2MetadataReader::readMetadataFile(std::string filename, MetadataNode*
 }
 
 
-void Ilvis2MetadataReader::parseGranuleMetaDataFile(xmlNodePtr node, MetadataNode* m)
+void Ilvis2MetadataReader::parseGranuleMetaDataFile(xmlNodePtr node,
+    MetadataNode* m)
 {
     assertElementIs(node, "GranuleMetaDataFile");
 
@@ -77,7 +79,9 @@ void Ilvis2MetadataReader::parseGranuleMetaDataFile(xmlNodePtr node, MetadataNod
     assertEndOfElements(child);
 }
 
-void Ilvis2MetadataReader::parseGranuleURMetaData(xmlNodePtr node, MetadataNode* m)
+
+void Ilvis2MetadataReader::parseGranuleURMetaData(xmlNodePtr node,
+    MetadataNode* m)
 {
     assertElementIs(node, "GranuleURMetaData");
 
@@ -177,7 +181,8 @@ void Ilvis2MetadataReader::parseGranuleURMetaData(xmlNodePtr node, MetadataNode*
 }
 
 
-void Ilvis2MetadataReader::parseCollectionMetaData(xmlNodePtr node, MetadataNode * m)
+void Ilvis2MetadataReader::parseCollectionMetaData(xmlNodePtr node,
+    MetadataNode * m)
 {
     assertElementIs(node, "CollectionMetaData");
 
@@ -212,7 +217,8 @@ void Ilvis2MetadataReader::parseDataFiles(xmlNodePtr node, MetadataNode * m)
 }
 
 
-void Ilvis2MetadataReader::parseDataFileContainer(xmlNodePtr node, MetadataNode * m)
+void Ilvis2MetadataReader::parseDataFileContainer(xmlNodePtr node,
+    MetadataNode * m)
 {
     assertElementIs(node, "DataFileContainer");
 
@@ -247,7 +253,8 @@ void Ilvis2MetadataReader::parseDataFileContainer(xmlNodePtr node, MetadataNode
 }
 
 
-void Ilvis2MetadataReader::parseECSDataGranule(xmlNodePtr node, MetadataNode * m)
+void Ilvis2MetadataReader::parseECSDataGranule(xmlNodePtr node,
+    MetadataNode * m)
 {
     assertElementIs(node, "ECSDataGranule");
 
@@ -301,7 +308,8 @@ void Ilvis2MetadataReader::parseRangeDateTime(xmlNodePtr node, MetadataNode * m)
 }
 
 
-void Ilvis2MetadataReader::parseSpatialDomainContainer(xmlNodePtr node, MetadataNode * m)
+void Ilvis2MetadataReader::parseSpatialDomainContainer(xmlNodePtr node,
+    MetadataNode * m)
 {
     assertElementIs(node, "SpatialDomainContainer");
 
@@ -339,12 +347,8 @@ void Ilvis2MetadataReader::parseGPolygon(xmlNodePtr node, MetadataNode * m)
         // There must be at least 3 points to be valid per the schema.
         int numPoints = countChildElements(child, "Point");
         if (numPoints < 3)
-        {
-            std::ostringstream oss;
-            oss << "Found a polygon boundary with less than 3 points, " <<
-                "invalid for this schema";
-            throw pdal_error(oss.str());
-        }
+            throw error("Found a polygon boundary with less than 3 points, "
+                "invalid for this schema");
 
         GEOSCoordSeq points = GEOSCoordSeq_create(numPoints + 1, 2);
         xmlNodePtr bdChild = getFirstChildElementNode(child);
@@ -476,7 +480,8 @@ void Ilvis2MetadataReader::parseSensor(xmlNodePtr node, MetadataNode * m)
 }
 
 
-void Ilvis2MetadataReader::parseSensorCharacteristic(xmlNodePtr node, MetadataNode * m)
+void Ilvis2MetadataReader::parseSensorCharacteristic(xmlNodePtr node,
+    MetadataNode * m)
 {
     assertElementIs(node, "SensorCharacteristic");
 
@@ -544,7 +549,8 @@ void Ilvis2MetadataReader::parsePSA(xmlNodePtr node, MetadataNode * m)
 
 // Since the Browse, PH, QA, and MP product nodes have the same structure
 // just differing prefixes, they can share this code.
-void Ilvis2MetadataReader::parseXXProduct(std::string type, xmlNodePtr node, MetadataNode * m)
+void Ilvis2MetadataReader::parseXXProduct(std::string type, xmlNodePtr node,
+    MetadataNode * m)
 {
     std::string fullBase = type + "Product";
     std::string fullSub = type + "GranuleId";
@@ -572,16 +578,19 @@ std::string Ilvis2MetadataReader::extractString(xmlNodePtr node)
     return nodeStr;
 }
 
+
 double Ilvis2MetadataReader::extractDouble(xmlNodePtr node)
 {
     return atof((char*)node->children->content);
 }
 
+
 int Ilvis2MetadataReader::extractInt(xmlNodePtr node)
 {
     return atoi((char*)node->children->content);
 }
 
+
 long Ilvis2MetadataReader::extractLong(xmlNodePtr node)
 {
     return atol((char*)node->children->content);
@@ -632,26 +641,28 @@ bool Ilvis2MetadataReader::nodeElementIs(xmlNodePtr node, std::string expected)
             reinterpret_cast<const xmlChar*>(expected.c_str())) == 0;
 }
 
+
 // Throws an error if the next element is not what it expects
 void Ilvis2MetadataReader::assertElementIs(xmlNodePtr node, std::string expected)
 {
     if (!node || !nodeElementIs(node, expected))
-    {
-        errWrongElement(node, expected);
-    }
+        throw error("Expected element '" + expected + "', found '" +
+            std::string((const char *)node->name) + "'");
 }
 
+
 // Throws an error if the node is not null
 void Ilvis2MetadataReader::assertEndOfElements(xmlNodePtr node)
 {
     if (node)
-    {
-        errExpectedEnd(node);
-    }
+        throw("Expected to find no more elements, found '" +
+            std::string((const char *)node->name) + "'");
 }
 
+
 // Counts the number of child element nodes with a given name
-int Ilvis2MetadataReader::countChildElements(xmlNodePtr node, std::string childName)
+int Ilvis2MetadataReader::countChildElements(xmlNodePtr node,
+    std::string childName)
 {
     xmlNodePtr child = getFirstChildElementNode(node);
     int ctr = 0;
@@ -668,21 +679,5 @@ int Ilvis2MetadataReader::countChildElements(xmlNodePtr node, std::string childN
     return ctr;
 }
 
-
-// Errors used when a file doesn't match the schema.
-void Ilvis2MetadataReader::errWrongElement(xmlNodePtr node, std::string expected)
-{
-    std::ostringstream oss;
-    oss << "Expected element '" << expected << "', found '" << node->name << "'";
-    throw pdal_error(oss.str());
-}
-
-void Ilvis2MetadataReader::errExpectedEnd(xmlNodePtr node)
-{
-    std::ostringstream oss;
-    oss << "Expected to find no more elements, found '" << node->name << "'";
-    throw pdal_error(oss.str());
-}
-
 } // namespace pdal
 
diff --git a/io/Ilvis2MetadataReader.hpp b/io/Ilvis2MetadataReader.hpp
index d789361..2c80496 100644
--- a/io/Ilvis2MetadataReader.hpp
+++ b/io/Ilvis2MetadataReader.hpp
@@ -49,6 +49,12 @@ namespace pdal
 class PDAL_DLL Ilvis2MetadataReader
 {
 public:
+    struct error : public std::runtime_error
+    {
+        error(const std::string& s) : std::runtime_error(s)
+        {}
+    };
+
     void readMetadataFile(std::string filename, MetadataNode* m);
 
 protected:
@@ -100,8 +106,6 @@ private:
     void assertElementIs(xmlNodePtr node, std::string expected);
     void assertEndOfElements(xmlNodePtr node);
     int countChildElements(xmlNodePtr node, std::string childName);
-    void errWrongElement(xmlNodePtr node, std::string expected);
-    void errExpectedEnd(xmlNodePtr node);
 };
 
 }
diff --git a/io/Ilvis2Reader.cpp b/io/Ilvis2Reader.cpp
index bea3db5..175ba8f 100644
--- a/io/Ilvis2Reader.cpp
+++ b/io/Ilvis2Reader.cpp
@@ -58,11 +58,11 @@ std::istream& operator >> (std::istream& in, Ilvis2Reader::IlvisMapping& mval)
 
     in >> s;
     s = Utils::toupper(s);
-    
+
     static std::map<std::string, Ilvis2Reader::IlvisMapping> m =
-        { { "INVALID", Ilvis2Reader::IlvisMapping::INVALID }, 
-          { "LOW", Ilvis2Reader::IlvisMapping::LOW }, 
-          { "HIGH", Ilvis2Reader::IlvisMapping::HIGH }, 
+        { { "INVALID", Ilvis2Reader::IlvisMapping::INVALID },
+          { "LOW", Ilvis2Reader::IlvisMapping::LOW },
+          { "HIGH", Ilvis2Reader::IlvisMapping::HIGH },
           { "ALL", Ilvis2Reader::IlvisMapping::ALL } };
 
     mval = m[s];
@@ -131,11 +131,7 @@ Dimension::IdList Ilvis2Reader::getDefaultDimensions()
 void Ilvis2Reader::initialize(PointTableRef)
 {
     if (!m_metadataFile.empty() && !FileUtils::fileExists(m_metadataFile))
-    {
-        std::ostringstream oss;
-        oss << "Invalid metadata file: '" << m_metadataFile << "'";
-        throw pdal_error(oss.str());
-    }
+        throwError("Invalid metadata file: '" + m_metadataFile + "'");
 
     // Data are WGS84 (4326) with ITRF2000 datum (6656)
     // See http://nsidc.org/data/docs/daac/icebridge/ilvis2/index.html for
@@ -150,12 +146,8 @@ T convert(const StringList& s, const std::string& name, size_t fieldno)
 {
     T output;
     if (!Utils::fromString(s[fieldno], output))
-    {
-        std::stringstream oss;
-        oss << "Unable to convert " << name << ", " << s[fieldno] <<
-            ", to double";
-        throw pdal_error(oss.str());
-    }
+        throw Ilvis2Reader::error("Unable to convert " + name +
+            ", " + s[fieldno] + ", to double");
 
     return output;
 }
@@ -210,7 +202,14 @@ void Ilvis2Reader::ready(PointTableRef table)
 {
     if (!m_metadataFile.empty())
     {
-        m_mdReader.readMetadataFile(m_metadataFile, &m_metadata);
+        try
+        {
+            m_mdReader.readMetadataFile(m_metadataFile, &m_metadata);
+        }
+        catch (const Ilvis2MetadataReader::error& err)
+        {
+            throwError(err.what());
+        }
     }
 
     static const int HeaderSize = 2;
@@ -235,41 +234,45 @@ bool Ilvis2Reader::processOne(PointRef& point)
 // Format:
 // LVIS_LFID SHOTNUMBER TIME LONGITUDE_CENTROID LATITUDE_CENTROID ELEVATION_CENTROID LONGITUDE_LOW LATITUDE_LOW ELEVATION_LOW LONGITUDE_HIGH LATITUDE_HIGH ELEVATION_HIGH
 
-    // This handles the second time through for this data line when we have
-    // an "ALL" mapping and the high and low elevations are different.
-    if (m_resample)
-    {
-        readPoint(point, m_fields, "HIGH");
-        m_resample = false;
-        return true;
-    }
-
-    if (!std::getline(m_stream, line))
-        return false;
-    m_fields = Utils::split2(line, ' ');
-    if (m_fields.size() != 12)
+    try
     {
-        std::stringstream oss;
-        oss << getName() << ": Invalid format for line " << m_lineNum <<
-            ".  Expected 12 fields, got " << m_fields.size() << ".";
-        throw pdal_error(oss.str());
+        // This handles the second time through for this data line when we have
+        // an "ALL" mapping and the high and low elevations are different.
+        if (m_resample)
+        {
+            readPoint(point, m_fields, "HIGH");
+            m_resample = false;
+            return true;
+        }
+
+        if (!std::getline(m_stream, line))
+            return false;
+        m_fields = Utils::split2(line, ' ');
+        if (m_fields.size() != 12)
+            throwError("Invalid format for line " +
+                Utils::toString(m_lineNum) + ".  Expected 12 fields, got " +
+                Utils::toString(m_fields.size()) + ".");
+
+        double low_elev = convert<double>(m_fields, "ELEVATION_LOW", 8);
+        double high_elev = convert<double>(m_fields, "ELEVATION_HIGH", 11);
+
+        // write LOW point if specified, or for ALL
+        if (m_mapping == IlvisMapping::LOW || m_mapping == IlvisMapping::ALL)
+        {
+            readPoint(point, m_fields, "LOW");
+            // If we have ALL mapping and the high elevation is different
+            // from that of the low elevation, we'll a second point with the
+            // high elevation.
+            if (m_mapping == IlvisMapping::ALL && (low_elev != high_elev))
+                m_resample = true;
+        }
+        else if (m_mapping == IlvisMapping::HIGH)
+            readPoint(point, m_fields, "HIGH");
     }
-
-    double low_elev = convert<double>(m_fields, "ELEVATION_LOW", 8);
-    double high_elev = convert<double>(m_fields, "ELEVATION_HIGH", 11);
-
-    // write LOW point if specified, or for ALL
-    if (m_mapping == IlvisMapping::LOW || m_mapping == IlvisMapping::ALL)
+    catch (const error& err)
     {
-        readPoint(point, m_fields, "LOW");
-        // If we have ALL mapping and the high elevation is different
-        // from that of the low elevation, we'll a second point with the
-        // high elevation.
-        if (m_mapping == IlvisMapping::ALL && (low_elev != high_elev))
-            m_resample = true;
+        throwError(err.what());
     }
-    else if (m_mapping == IlvisMapping::HIGH)
-        readPoint(point, m_fields, "HIGH");
     return true;
 }
 
@@ -293,12 +296,5 @@ point_count_t Ilvis2Reader::read(PointViewPtr view, point_count_t count)
     return numRead;
 }
 
-
-void Ilvis2Reader::done(PointTableRef table)
-{
-
-
-}
-
 } // namespace pdal
 
diff --git a/io/Ilvis2Reader.hpp b/io/Ilvis2Reader.hpp
index 03b0c5f..cf65be6 100644
--- a/io/Ilvis2Reader.hpp
+++ b/io/Ilvis2Reader.hpp
@@ -67,6 +67,12 @@ public:
       ALL
     };
 
+    struct error : public std::runtime_error
+    {
+        error(const std::string& err) : std::runtime_error(err)
+        {}
+    };
+
     Ilvis2Reader()
     {}
 
@@ -90,7 +96,6 @@ private:
     virtual void addArgs(ProgramArgs& args);
     virtual void initialize(PointTableRef table);
     virtual void ready(PointTableRef table);
-    virtual void done(PointTableRef table);
     virtual bool processOne(PointRef& point);
     virtual point_count_t read(PointViewPtr view, point_count_t count);
 
diff --git a/io/LasHeader.cpp b/io/LasHeader.cpp
index 9dfaaf6..86be602 100644
--- a/io/LasHeader.cpp
+++ b/io/LasHeader.cpp
@@ -89,8 +89,15 @@ LasHeader::LasHeader() : m_fileSig(FILE_SIGNATURE), m_sourceId(0),
 void LasHeader::setSummary(const LasSummaryData& summary)
 {
     m_pointCount = summary.getTotalNumPoints();
-    for (size_t num = 0; num < RETURN_COUNT; ++num)
-        m_pointCountByReturn[num] = (int)summary.getReturnCount(num);
+    try
+    {
+        for (size_t num = 0; num < RETURN_COUNT; ++num)
+            m_pointCountByReturn[num] = (int)summary.getReturnCount(num);
+    }
+    catch (const LasSummaryData::error& err)
+    {
+        throw error(err.what());
+    }
     m_bounds = summary.getBounds();
 }
 
@@ -101,13 +108,13 @@ void LasHeader::setScaling(const Scaling& scaling)
     const double& ys = scaling.m_yXform.m_scale.m_val;
     const double& zs = scaling.m_zXform.m_scale.m_val;
     if (xs == 0)
-        throw std::invalid_argument("X scale of 0.0 is invalid!");
+        throw error("X scale of 0.0 is invalid!");
 
     if (ys == 0)
-        throw std::invalid_argument("Y scale of 0.0 is invalid!");
+        throw error("Y scale of 0.0 is invalid!");
 
     if (zs == 0)
-        throw std::invalid_argument("Z scale of 0.0 is invalid!");
+        throw error("Z scale of 0.0 is invalid!");
 
     m_scales[0] = xs;
     m_scales[1] = ys;
@@ -266,51 +273,43 @@ void LasHeader::setSrsFromWkt()
 
 void LasHeader::setSrsFromGeotiff()
 {
-// These are defined in geo_simpletags.h
-// We're not including that file because it includes
-// geotiff.h, which includes a ton of other stuff
-// that might conflict with the messy libgeotiff/GDAL
-// symbol mess
-
-#define STT_SHORT   1
-#define STT_DOUBLE  2
-#define STT_ASCII   3
-
-    GeotiffSupport geotiff;
-    geotiff.resetTags();
-
     LasVLR *vlr;
+    uint8_t *data;
+    size_t dataLen;
 
     vlr = findVlr(TRANSFORM_USER_ID, GEOTIFF_DIRECTORY_RECORD_ID);
     // We must have a directory entry.
     if (!vlr)
         return;
-    if (!geotiff.setShortKeys(vlr->recordId(), (void *)vlr->data(),
-        (int)vlr->dataLen()))
-    {
-        std::ostringstream oss;
+    data = (uint8_t *)vlr->data();
+    dataLen = vlr->dataLen();
 
-        oss << "Invalid GeoTIFF directory record.  Can't "
-            "interpret spatial reference.";
-        throw pdal_error(oss.str());
-    }
+    std::vector<uint8_t> directoryRec(data, data + dataLen);
 
     vlr = findVlr(TRANSFORM_USER_ID, GEOTIFF_DOUBLES_RECORD_ID);
+    data = NULL;
+    dataLen = 0;
     if (vlr && !vlr->isEmpty())
-        geotiff.setDoubleKeys(vlr->recordId(), (void *)vlr->data(),
-            (int)vlr->dataLen());
+    {
+        data = (uint8_t *)vlr->data();
+        dataLen = vlr->dataLen();
+    }
+    std::vector<uint8_t> doublesRec(data, data + dataLen);
+
     vlr = findVlr(TRANSFORM_USER_ID, GEOTIFF_ASCII_RECORD_ID);
+    data = NULL;
+    dataLen = 0;
     if (vlr && !vlr->isEmpty())
-        geotiff.setAsciiKeys(vlr->recordId(), (void *)vlr->data(),
-            (int)vlr->dataLen());
+    {
+        data = (uint8_t *)vlr->data();
+        dataLen = vlr->dataLen();
+    }
+    std::vector<uint8_t> asciiRec(data, data + dataLen);
 
-    geotiff.setTags();
+    GeotiffSrs geotiff(directoryRec, doublesRec, asciiRec);
     SpatialReference gtiffSrs = geotiff.srs();
     if (!gtiffSrs.empty())
         m_srs = gtiffSrs;
-
-    m_log->get(LogLevel::Debug5) << "GeoTIFF keys: " << geotiff.getText() <<
-        std::endl;
 }
 
 
@@ -321,11 +320,10 @@ ILeStream& operator>>(ILeStream& in, LasHeader& h)
     uint32_t legacyReturnCount;
 
     in.get(h.m_fileSig, 4);
-    if (!Utils::iequals(h.m_fileSig, "LASF"))
-    {
-        throw pdal::pdal_error("File signature is not 'LASF', "
+    if (!Utils::iequals(h.m_fileSig, LasHeader::FILE_SIGNATURE))
+        throw LasHeader::error("File signature is not 'LASF', "
             "is this an LAS/LAZ file?");
-    }
+
     in >> h.m_sourceId >> h.m_globalEncoding;
     LasHeader::get(in, h.m_projectUuid);
     in >> versionMajor >> h.m_versionMinor;
diff --git a/io/LasHeader.hpp b/io/LasHeader.hpp
index 17e4b30..709121d 100644
--- a/io/LasHeader.hpp
+++ b/io/LasHeader.hpp
@@ -60,6 +60,12 @@ class Scaling;
 class PDAL_DLL LasHeader
 {
 public:
+    struct error : public std::runtime_error
+    {
+        error(const std::string& s) : std::runtime_error(s)
+        {}
+    };
+
     static const size_t LEGACY_RETURN_COUNT = 5;
     static const size_t RETURN_COUNT = 15;
     static const std::string FILE_SIGNATURE;
diff --git a/io/LasReader.cpp b/io/LasReader.cpp
index efccc40..cc4070e 100644
--- a/io/LasReader.cpp
+++ b/io/LasReader.cpp
@@ -56,10 +56,9 @@ namespace pdal
 namespace
 {
 
-class invalid_stream : public pdal_error
+struct invalid_stream : public std::runtime_error
 {
-public:
-    invalid_stream(const std::string& msg) : pdal_error(msg)
+    invalid_stream(const std::string& msg) : std::runtime_error(msg)
         {}
 };
 
@@ -111,7 +110,14 @@ QuickInfo LasReader::inspect()
 
 void LasReader::initializeLocal(PointTableRef table, MetadataNode& m)
 {
-    m_extraDims = LasUtils::parse(m_extraDimSpec);
+    try
+    {
+        m_extraDims = LasUtils::parse(m_extraDimSpec);
+    }
+    catch (const LasUtils::error& err)
+    {
+        throwError(err.what());
+    }
 
     std::string compression = Utils::toupper(m_compression);
 #if defined(PDAL_HAVE_LAZPERF) && defined(PDAL_HAVE_LASZIP)
@@ -122,26 +128,21 @@ void LasReader::initializeLocal(PointTableRef table, MetadataNode& m)
     if (compression == "EITHER")
         compression = "LASZIP";
     if (compression == "LAZPERF")
-        throw pdal_error("Can't decompress with LAZperf.  PDAL not built "
+        throwError("Can't decompress with LAZperf.  PDAL not built "
             "with LAZperf.");
 #endif
 #if defined(PDAL_HAVE_LAZPERF) && !defined(PDAL_HAVE_LASZIP)
     if (compression == "EITHER")
         compression = "LAZPERF";
     if (compression == "LASZIP")
-        throw pdal_error("Can't decompress with LASzip.  PDAL not built "
+        throwError("Can't decompress with LASzip.  PDAL not built "
             "with LASzip.");
 #endif
 
 #if defined(PDAL_HAVE_LAZPERF) || defined(PDAL_HAVE_LASZIP)
     if (compression != "LAZPERF" && compression != "LASZIP")
-    {
-        std::ostringstream oss;
-
-        oss << "Invalid value for option for compression: '" <<
-            m_compression << "'.  Value values are 'lazperf' and 'laszip'.";
-        throw pdal_error(oss.str());
-    }
+        throwError("Invalid value for option for compression: '" +
+            m_compression + "'.  Value values are 'lazperf' and 'laszip'.");
 #endif
 
     // Set case-corrected value.
@@ -160,21 +161,14 @@ void LasReader::initializeLocal(PointTableRef table, MetadataNode& m)
     {
         in >> m_header;
     }
-    catch (pdal_error e)
+    catch (const LasHeader::error& e)
     {
-        std::ostringstream oss;
-
-        oss << getName() << e.what();
-        throw pdal_error(oss.str());
+        throwError(e.what());
     }
 
     if (!m_header.pointFormatSupported())
-    {
-        std::ostringstream oss;
-        oss << "Unsupported LAS input point format: " <<
-            (int)m_header.pointFormat() << ".";
-       throw pdal_error(oss.str());
-    }
+        throwError("Unsupported LAS input point format: " +
+            Utils::toString((int)m_header.pointFormat()) + ".");
 
     if (m_header.versionAtLeast(1, 4))
         readExtraBytesVlr();
@@ -200,7 +194,14 @@ void LasReader::ready(PointTableRef table)
         {
             LasVLR *vlr = m_header.findVlr(LASZIP_USER_ID,
                 LASZIP_RECORD_ID);
-            m_zipPoint.reset(new LasZipPoint(vlr));
+            try
+            {
+                m_zipPoint.reset(new LasZipPoint(vlr));
+            }
+            catch (const LasZipPoint::error& err)
+            {
+                throwError(err.what());
+            }
 
             if (!m_unzipper)
             {
@@ -216,8 +217,8 @@ void LasReader::ready(PointTableRef table)
                     const char* err = m_unzipper->get_error();
                     if (err == NULL)
                         err = "(unknown error)";
-                    oss << "Failed to open LASzip stream: " << std::string(err);
-                    throw pdal_error(oss.str());
+                    throwError("Failed to open LASzip stream: " +
+                        std::string(err) + ".");
                 }
             }
         }
@@ -235,7 +236,7 @@ void LasReader::ready(PointTableRef table)
 #endif
 
 #if !defined(PDAL_HAVE_LAZPERF) && !defined(PDAL_HAVE_LASZIP)
-        throw pdal_error("Can't read compressed file without LASzip or "
+        throwError("Can't read compressed file without LASzip or "
             "LAZperf decompression library.");
 #endif
     }
@@ -358,6 +359,25 @@ void LasReader::extractHeaderMetadata(MetadataNode& forward, MetadataNode& m)
     m.add<uint32_t>("count",
         m_header.pointCount(), "This field contains the total "
         "number of point records within the file.");
+
+    // PDAL metadata VLR
+    LasVLR *vlr = m_header.findVlr("PDAL", 12);
+    if (vlr)
+    {
+        const char *pos = vlr->data();
+        size_t size = vlr->dataLen();
+        m.addWithType("pdal_metadata", std::string(pos, size), "json", "PDAL Processing Metadata");
+    }
+    //
+    // PDAL pipeline VLR
+    vlr = m_header.findVlr("PDAL", 13);
+    if (vlr)
+    {
+        const char *pos = vlr->data();
+        size_t size = vlr->dataLen();
+        m.addWithType("pdal_pipeline", std::string(pos, size), "json", "PDAL Processing Pipeline");
+    }
+
 }
 
 
@@ -434,11 +454,14 @@ void LasReader::extractVlrMetadata(MetadataNode& forward, MetadataNode& m)
             "Record ID specified by the user.");
         vlrNode.add("description", vlr.description());
 
-        if ((vlr.userId() != TRANSFORM_USER_ID) &&
-            (vlr.userId() != SPEC_USER_ID) &&
-            (vlr.userId() != LASZIP_USER_ID) &&
-            (vlr.userId() != LIBLAS_USER_ID))
-            forward.add(vlrNode);
+        if (vlr.userId() == TRANSFORM_USER_ID||
+            vlr.userId() == LASZIP_USER_ID ||
+            vlr.userId() == LIBLAS_USER_ID)
+            continue;
+        if (vlr.userId() == SPEC_USER_ID &&
+            vlr.recordId() != 0 && vlr.recordId() != 3)
+            continue;
+        forward.add(vlrNode);
     }
 }
 
@@ -506,8 +529,8 @@ bool LasReader::processOne(PointRef& point)
                 const char* err = m_unzipper->get_error();
                 if (!err)
                     err = "(unknown error)";
-                error += err;
-                throw pdal_error(error);
+                error += std::string(err) + ".";
+                throwError(error);
             }
             loadPoint(point, (char *)m_zipPoint->m_lz_point_data.data(),
                 pointLen);
@@ -522,7 +545,7 @@ bool LasReader::processOne(PointRef& point)
         }
 #endif
 #if !defined(PDAL_HAVE_LAZPERF) && !defined(PDAL_HAVE_LASZIP)
-        throw pdal_error("Can't read compressed file without LASzip or "
+        throwError("Can't read compressed file without LASzip or "
             "LAZperf decompression library.");
 #endif
     } // compression
@@ -559,7 +582,7 @@ point_count_t LasReader::read(PointViewPtr view, point_count_t count)
             }
         }
 #else
-        throw pdal_error("Can't read compressed file without LASzip or "
+        throwError("Can't read compressed file without LASzip or "
             "LAZperf decompression library.");
 #endif
     }
diff --git a/io/LasSummaryData.cpp b/io/LasSummaryData.cpp
index de2bcf8..3ba1f7f 100644
--- a/io/LasSummaryData.cpp
+++ b/io/LasSummaryData.cpp
@@ -77,7 +77,7 @@ BOX3D LasSummaryData::getBounds() const
 point_count_t LasSummaryData::getReturnCount(int returnNumber) const
 {
     if (returnNumber < 0 || (size_t)returnNumber >= m_returnCounts.size())
-        throw pdal_error("getReturnCount: point returnNumber is out of range");
+        throw error("Point return number is out of range");
     return m_returnCounts[returnNumber];
 }
 
diff --git a/io/LasSummaryData.hpp b/io/LasSummaryData.hpp
index 16e8fa8..9bc7cf1 100644
--- a/io/LasSummaryData.hpp
+++ b/io/LasSummaryData.hpp
@@ -47,6 +47,12 @@ namespace pdal
 class PDAL_DLL LasSummaryData
 {
 public:
+    struct error : public std::runtime_error
+    {
+        error(const std::string& err) : std::runtime_error(err)
+        {}
+    };
+
     LasSummaryData();
 
     void addPoint(double x, double y, double z, int returnNumber);
diff --git a/io/LasUtils.cpp b/io/LasUtils.cpp
index ad10fa6..eeebaa0 100644
--- a/io/LasUtils.cpp
+++ b/io/LasUtils.cpp
@@ -195,24 +195,16 @@ std::vector<ExtraDim> parse(const StringList& dimString)
 
         StringList s = Utils::split2(dim, '=');
         if (s.size() != 2)
-        {
-            std::ostringstream oss;
-            oss << "Invalid extra dimension specified: '" << dim <<
+            throw error("Invalid extra dimension specified: '" + dim +
                 "'.  Need <dimension>=<type>.  See documentation "
-                " for details.";
-            throw pdal_error(oss.str());
-        }
+                " for details.");
         Utils::trim(s[0]);
         Utils::trim(s[1]);
         Dimension::Type type = Dimension::type(s[1]);
         if (type == Dimension::Type::None)
-        {
-            std::ostringstream oss;
-            oss << "Invalid extra dimension type specified: '" <<
-                dim << "'.  Need <dimension>=<type>.  See documentations "
-                " for details.";
-            throw pdal_error(oss.str());
-        }
+            throw error("Invalid extra dimension type specified: '" + dim +
+                "'.  Need <dimension>=<type>.  See documentation "
+                " for details.");
         ExtraDim ed(s[0], type);
         extraDims.push_back(ed);
     }
@@ -220,8 +212,8 @@ std::vector<ExtraDim> parse(const StringList& dimString)
     if (all)
     {
         if (extraDims.size())
-            throw (pdal_error("Can't specify specific extra dimensions with "
-                "special 'all' keyword."));
+            throw error("Can't specify specific extra dimensions with "
+                "special 'all' keyword.");
         extraDims.push_back(ExtraDim("all", Dimension::Type::None));
     }
 
diff --git a/io/LasUtils.hpp b/io/LasUtils.hpp
index d2d5a0c..cdffb56 100644
--- a/io/LasUtils.hpp
+++ b/io/LasUtils.hpp
@@ -165,6 +165,12 @@ private:
 namespace LasUtils
 {
 
+struct error : public std::runtime_error
+{
+    error(const std::string& err) : std::runtime_error(err)
+    {}
+};
+
 std::vector<ExtraDim> parse(const StringList& dimString);
 
 } // namespace LasUtils
diff --git a/io/LasWriter.cpp b/io/LasWriter.cpp
index 732bb98..5823ddf 100644
--- a/io/LasWriter.cpp
+++ b/io/LasWriter.cpp
@@ -37,6 +37,7 @@
 #include <iostream>
 
 #include <pdal/Compression.hpp>
+#include <pdal/DimUtil.hpp>
 #include <pdal/PDALUtils.hpp>
 #include <pdal/PointView.hpp>
 #include <pdal/util/Algorithm.hpp>
@@ -64,7 +65,8 @@ CREATE_STATIC_PLUGIN(1, 0, LasWriter, Writer, s_info)
 
 std::string LasWriter::getName() const { return s_info.name; }
 
-LasWriter::LasWriter() : m_ostream(NULL), m_compression(LasCompression::None)
+LasWriter::LasWriter() : m_ostream(NULL), m_compression(LasCompression::None),
+    m_srsCnt(0)
 {}
 
 
@@ -104,12 +106,15 @@ void LasWriter::addArgs(ProgramArgs& args)
         decltype(m_creationDoy)(doy));
     args.add("creation_year", "Creation year", m_creationYear,
         decltype(m_creationYear)(year));
+    args.add("pdal_metadata", "Write PDAL metadata as VLR?", m_writePDALMetadata,
+        decltype(m_writePDALMetadata)(false));
     args.add("scale_x", "X scale factor", m_scaleX, decltype(m_scaleX)(".01"));
     args.add("scale_y", "Y scale factor", m_scaleY, decltype(m_scaleY)(".01"));
     args.add("scale_z", "Z scale factor", m_scaleZ, decltype(m_scaleZ)(".01"));
     args.add("offset_x", "X offset", m_offsetX);
     args.add("offset_y", "Y offset", m_offsetY);
     args.add("offset_z", "Z offset", m_offsetZ);
+    args.add("vlrs", "List of VLRs to set", m_userVLRs);
 }
 
 void LasWriter::initialize()
@@ -125,11 +130,28 @@ void LasWriter::initialize()
         m_lasHeader.setCompressed(true);
 #if !defined(PDAL_HAVE_LASZIP) && !defined(PDAL_HAVE_LAZPERF)
     if (m_compression != LasCompression::None)
-        throw pdal_error("Can't write LAZ output.  "
-            "PDAL not built with LASzip or LAZperf.");
+        throwError("Can't write LAZ output.  PDAL not built with "
+            "LASzip or LAZperf.");
 #endif
-    m_extraDims = LasUtils::parse(m_extraDimSpec);
+    try
+    {
+        m_extraDims = LasUtils::parse(m_extraDimSpec);
+    }
+    catch (const LasUtils::error& err)
+    {
+        throwError(err.what());
+    }
     fillForwardList();
+    collectUserVLRs();
+}
+
+
+void LasWriter::spatialReferenceChanged(const SpatialReference&)
+{
+    if (++m_srsCnt > 1)
+        log()->get(LogLevel::Error) << getName() <<
+            ": Attempting to write '" << m_filename << "' with multiple "
+            "point spatial references." << std::endl;
 }
 
 
@@ -139,6 +161,10 @@ void LasWriter::prepared(PointTableRef table)
 
     PointLayoutPtr layout = table.layout();
 
+    // Make sure the dataformatID is set so that we can get the proper
+    // dimensions being written as part of the standard LAS record.
+    fillHeader();
+
     // If we've asked for all dimensions, add to extraDims all dimensions
     // in the layout that aren't already destined for LAS output.
     if (m_extraDims.size() == 1 && m_extraDims[0].m_name == "all")
@@ -159,17 +185,50 @@ void LasWriter::prepared(PointTableRef table)
     {
         dim.m_dimType.m_id = table.layout()->findDim(dim.m_name);
         if (dim.m_dimType.m_id == Dimension::Id::Unknown)
-        {
-            std::ostringstream oss;
-            oss << "Dimension '" << dim.m_name << "' specified in "
-                "'extra_dim' option not found.";
-            throw pdal_error(oss.str());
-        }
+            throwError("Dimension '" + dim.m_name + "' specified in "
+                "'extra_dim' option not found.");
         m_extraByteLen += Dimension::size(dim.m_dimType.m_type);
+        log()->get(LogLevel::Info) << getName() << ": Writing dimension " <<
+            dim.m_name <<
+            "(" << Dimension::interpretationName(dim.m_dimType.m_type) <<
+            ") " << " to LAS extra bytes." << std::endl;
     }
 }
 
 
+// Capture user-specified VLRs
+void LasWriter::collectUserVLRs()
+{
+
+    for (const auto& v : m_userVLRs)
+    {
+        uint16_t recordId(1);
+        std::string userId("");
+        std::string description("");
+        std::string b64data("");
+        std::string user("");
+        if (! v.isMember("user_id"))
+            throw pdal_error("VLR must contain a 'user_id'!");
+        userId = v["user_id"].asString();
+
+        if (!v.isMember("data"))
+            throw pdal_error("VLR must contain a base64-encoded 'data' member");
+        b64data = v["data"].asString();
+
+        if (v.isMember("record_id"))
+            recordId = v["record_id"].asUInt64();
+
+        if (v.isMember("description"))
+            description = v["description"].asString();
+
+        std::vector<uint8_t> data = Utils::base64_decode(b64data);
+        addVlr(userId, recordId, description, data);
+
+    }
+
+}
+
+
 // Get header info from options and store in map for processing with
 // metadata.
 void LasWriter::fillForwardList()
@@ -211,13 +270,8 @@ void LasWriter::fillForwardList()
         else if (Utils::contains(all, name))
             m_forwards.insert(name);
         else
-        {
-            std::ostringstream oss;
-
-            oss << "Error in 'forward' option.  Unknown field for "
-                "forwarding: '" << name << "'.";
-            throw pdal_error(oss.str());
-        }
+            throwError("Error in 'forward' option.  Unknown field for "
+                "forwarding: '" + name + "'.");
     }
 }
 
@@ -225,6 +279,9 @@ void LasWriter::fillForwardList()
 void LasWriter::readyTable(PointTableRef table)
 {
     m_forwardMetadata = table.privateMetadata("lasforward");
+    MetadataNode m = table.metadata();
+    if(m_writePDALMetadata)
+        setPDALVLRs(m);
     setExtraBytesVlr();
 }
 
@@ -234,13 +291,7 @@ void LasWriter::readyFile(const std::string& filename,
 {
     std::ostream *out = Utils::createFile(filename, true);
     if (!out)
-    {
-        std::stringstream out;
-
-        out << "writers.las couldn't open file '" << filename <<
-            "' for output.";
-        throw pdal_error(out.str());
-    }
+        throwError("Couldn't open file '" + filename + "' for output.");
     m_curFilename = filename;
     m_error.setFilename(filename);
     Utils::writeProgress(m_progressFd, "READYFILE", filename);
@@ -250,6 +301,9 @@ void LasWriter::readyFile(const std::string& filename,
 
 void LasWriter::prepOutput(std::ostream *outStream, const SpatialReference& srs)
 {
+
+
+
     // Use stage SRS if provided.
     m_srs = getSpatialReference().empty() ? srs : getSpatialReference();
 
@@ -263,6 +317,7 @@ void LasWriter::prepOutput(std::ostream *outStream, const SpatialReference& srs)
     setVlrsFromSpatialRef();
     setVlrsFromMetadata(m_forwardMetadata);
 
+
     m_summaryData.reset(new LasSummaryData());
     m_ostream = outStream;
     if (m_lasHeader.compressed())
@@ -329,6 +384,26 @@ MetadataNode LasWriter::findVlrMetadata(MetadataNode node,
     return node.find(pred);
 }
 
+void LasWriter::setPDALVLRs(MetadataNode& forward)
+{
+    auto store = [this](std::string json, int recordId, std::string description)
+    {
+        std::vector<uint8_t> data;
+        data.resize(json.size());
+        std::copy(json.begin(), json.end(), data.begin());
+        addVlr("PDAL", recordId, description, data);
+    };
+
+    std::ostringstream ostr;
+    Utils::toJSON(forward, ostr);
+    std::string json = ostr.str();
+    store(ostr.str(), 12, "PDAL metadata");
+
+    ostr.str("");
+    PipelineWriter::writePipeline(this, ostr);
+    store(ostr.str(), 13, "PDAL pipeline");
+}
+
 
 /// Set VLRs from metadata for forwarded info.
 void LasWriter::setVlrsFromMetadata(MetadataNode& forward)
@@ -376,42 +451,27 @@ void LasWriter::setVlrsFromSpatialRef()
 
 void LasWriter::addGeotiffVlrs()
 {
-    GeotiffSupport geotiff;
-    geotiff.resetTags();
-
-    geotiff.setWkt(m_srs.getWKT());
+    if (m_srs.empty())
+        return;
 
-    addGeotiffVlr(geotiff, GEOTIFF_DIRECTORY_RECORD_ID,
-        "GeoTiff GeoKeyDirectoryTag");
-    addGeotiffVlr(geotiff, GEOTIFF_DOUBLES_RECORD_ID,
-        "GeoTiff GeoDoubleParamsTag");
-    addGeotiffVlr(geotiff, GEOTIFF_ASCII_RECORD_ID,
-        "GeoTiff GeoAsciiParamsTag");
-}
+    try
+    {
+        GeotiffTags tags(m_srs);
 
+        if (tags.directoryData().empty())
+            throwError("Invalid spatial reference for writing GeoTiff VLR.");
 
-/// Add a geotiff VLR from the information associated with the record ID.
-/// \param  geotiff - Geotiff support structure reference.
-/// \param  recordId - Record ID associated with the VLR/Geotiff ref.
-/// \param  description - Description to use with the VLR
-/// \return  Whether the VLR was added.
-void LasWriter::addGeotiffVlr(GeotiffSupport& geotiff, uint16_t recordId,
-    const std::string& description)
-{
-    void *data;
-    int count;
-
-    size_t size = geotiff.getKey(recordId, &count, &data);
-    if (size == 0)
+        addVlr(TRANSFORM_USER_ID, GEOTIFF_DIRECTORY_RECORD_ID,
+                "GeoTiff GeoKeyDirectoryTag", tags.directoryData());
+        addVlr(TRANSFORM_USER_ID, GEOTIFF_DOUBLES_RECORD_ID,
+                "GeoTiff GeoDoubleParamsTag", tags.doublesData());
+        addVlr(TRANSFORM_USER_ID, GEOTIFF_ASCII_RECORD_ID,
+                "GeoTiff GeoAsciiParamsTag", tags.asciiData());
+    }
+    catch (GeotiffTags::error& err)
     {
-        log()->get(LogLevel::Warning) << getName() << ": Invalid spatial "
-            "reference for writing GeoTiff VLR." << std::endl;
-        return;
+        throwError(err.what());
     }
-
-    std::vector<uint8_t> buf(size);
-    memcpy(buf.data(), data, size);
-    addVlr(TRANSFORM_USER_ID, recordId, description, buf);
 }
 
 
@@ -537,7 +597,14 @@ void LasWriter::fillHeader()
 {
     const uint16_t WKT_MASK = (1 << 4);
 
-    m_lasHeader.setScaling(m_scaling);
+    try
+    {
+        m_lasHeader.setScaling(m_scaling);
+    }
+    catch (const LasHeader::error& err)
+    {
+        throwError(err.what());
+    }
     m_lasHeader.setVlrCount(m_vlrs.size());
     m_lasHeader.setEVlrCount(m_eVlrs.size());
 
@@ -558,12 +625,8 @@ void LasWriter::fillHeader()
     m_lasHeader.setGlobalEncoding(globalEncoding);
 
     if (!m_lasHeader.pointFormatSupported())
-    {
-        std::ostringstream oss;
-        oss << "Unsupported LAS output point format: " <<
-            (int)m_lasHeader.pointFormat() << ".";
-        throw pdal_error(oss.str());
-    }
+        throwError("Unsupported LAS output point format: " +
+            Utils::toString((int)m_lasHeader.pointFormat()) + ".");
 }
 
 
@@ -579,8 +642,18 @@ void LasWriter::readyCompression()
 void LasWriter::readyLasZipCompression()
 {
 #ifdef PDAL_HAVE_LASZIP
-    m_zipPoint.reset(new LasZipPoint(m_lasHeader.pointFormat(),
-        m_lasHeader.pointLen()));
+    if (m_lasHeader.pointFormat() > 5)
+        throwError("LASzip doesn't currently support compression using LAS "
+            "1.4 point formats (dataformat_id > 5).");
+    try
+    {
+        m_zipPoint.reset(new LasZipPoint(m_lasHeader.pointFormat(),
+            m_lasHeader.pointLen()));
+    }
+    catch (const LasZipPoint::error& err)
+    {
+        throwError(err.what());
+    }
     m_zipper.reset(new LASzipper());
     // Note: this will make the VLR count in the header incorrect, but we
     // rewrite that bit in finishOutput() to fix it up.
@@ -594,7 +667,7 @@ void LasWriter::readyLazPerfCompression()
 {
 #ifdef PDAL_HAVE_LAZPERF
     if (m_lasHeader.versionAtLeast(1, 4))
-        throw pdal_error("Can't write version 1.4 output with LAZperf.");
+        throwError("Can't write version 1.4 output with LAZperf.");
 
     laszip::factory::record_schema schema;
     schema.push(laszip::factory::record_item::POINT10);
@@ -624,8 +697,7 @@ void LasWriter::openCompression()
         const char* err = m_zipper->get_error();
         if (err == NULL)
             err = "(unknown error)";
-        oss << "Error opening LASzipper: " << std::string(err);
-        throw pdal_error(oss.str());
+        throwError("Error opening LASzipper: " + std::string(err) + ".");
     }
 #endif
 }
@@ -694,8 +766,7 @@ void LasWriter::writeLasZipBuf(char *pos, size_t pointLen, point_count_t numPts)
             const char* err = m_zipper->get_error();
             if (err == NULL)
                 err = "(unknown error)";
-            oss << "Error writing point: " << std::string(err);
-            throw pdal_error(oss.str());
+            throwError("Error writing point: " + std::string(err) + ".");
         }
         pos += pointLen;
     }
@@ -754,16 +825,13 @@ bool LasWriter::fillPointBuf(PointRef& point, LeInserter& ostream)
 
     auto converter = [this](double d, Dimension::Id dim) -> int32_t
     {
-        int32_t i;
+        int32_t i(0);
 
         if (!Utils::numericCast(d, i))
-        {
-            std::ostringstream oss;
-            oss << "Unable to convert scaled value (" << d << ") to "
-                "int32 for dimension '" << Dimension::name(dim) <<
-                "' when writing LAS/LAZ file " << m_curFilename << ".";
-            throw pdal_error(oss.str());
-        }
+            throwError("Unable to convert scaled value (" +
+                Utils::toString(d) + ") to "
+                "int32 for dimension '" + Dimension::name(dim) +
+                "' when writing LAS/LAZ file " + m_curFilename + ".");
         return i;
     };
 
@@ -877,6 +945,7 @@ void LasWriter::doneFile()
 
 void LasWriter::finishOutput()
 {
+
     if (m_compression == LasCompression::LasZip)
         finishLasZipOutput();
     else if (m_compression == LasCompression::LazPerf)
@@ -895,10 +964,24 @@ void LasWriter::finishOutput()
     }
 
     // Reset the offset/scale since it may have been auto-computed
-    m_lasHeader.setScaling(m_scaling);
+    try
+    {
+        m_lasHeader.setScaling(m_scaling);
+    }
+    catch (const LasHeader::error& err)
+    {
+        throwError(err.what());
+    }
 
     // The summary is calculated as points are written.
-    m_lasHeader.setSummary(*m_summaryData);
+    try
+    {
+        m_lasHeader.setSummary(*m_summaryData);
+    }
+    catch (const LasHeader::error& err)
+    {
+        throwError(err.what());
+    }
 
     out.seek(0);
     out << m_lasHeader;
diff --git a/io/LasWriter.hpp b/io/LasWriter.hpp
index 0ac8103..e6b7066 100644
--- a/io/LasWriter.hpp
+++ b/io/LasWriter.hpp
@@ -45,6 +45,8 @@
 #include "LasSummaryData.hpp"
 #include "LasZipPoint.hpp"
 
+#include <json/json.h>
+
 extern "C" int32_t LasWriter_ExitFunc();
 extern "C" PF_ExitFunc LasWriter_InitPlugin();
 
@@ -103,6 +105,7 @@ private:
     LasCompression m_compression;
     std::vector<char> m_pointBuf;
     SpatialReference m_aSrs;
+    int m_srsCnt;
 
     NumHeaderVal<uint8_t, 1, 1> m_majorVersion;
     NumHeaderVal<uint8_t, 1, 4> m_minorVersion;
@@ -125,6 +128,8 @@ private:
     StringHeaderVal<20> m_offsetY;
     StringHeaderVal<20> m_offsetZ;
     MetadataNode m_forwardMetadata;
+    bool m_writePDALMetadata;
+    Json::Value m_userVLRs;
 
     virtual void addArgs(ProgramArgs& args);
     virtual void initialize();
@@ -134,9 +139,11 @@ private:
         const SpatialReference& srs);
     virtual void writeView(const PointViewPtr view);
     virtual bool processOne(PointRef& point);
+    void spatialReferenceChanged(const SpatialReference& srs);
     virtual void doneFile();
 
     void fillForwardList();
+    void collectUserVLRs();
     template <typename T>
     void handleHeaderForward(const std::string& s, T& headerVal,
         const MetadataNode& base);
@@ -148,6 +155,7 @@ private:
     void writeLasZipBuf(char *data, size_t pointLen, point_count_t numPts);
     void writeLazPerfBuf(char *data, size_t pointLen, point_count_t numPts);
     void setVlrsFromMetadata(MetadataNode& forward);
+    void setPDALVLRs(MetadataNode& m);
     MetadataNode findVlrMetadata(MetadataNode node, uint16_t recordId,
         const std::string& userId);
     void setExtraBytesVlr();
@@ -160,8 +168,6 @@ private:
         const std::string& description, std::vector<uint8_t>& data);
     void deleteVlr(const std::string& userId, uint16_t recordId);
     void addGeotiffVlrs();
-    void addGeotiffVlr(GeotiffSupport& geotiff, uint16_t recordId,
-        const std::string& description);
     bool addWktVlr();
     void finishLasZipOutput();
     void finishLazPerfOutput();
diff --git a/io/LasZipPoint.cpp b/io/LasZipPoint.cpp
index 8018133..d990abc 100644
--- a/io/LasZipPoint.cpp
+++ b/io/LasZipPoint.cpp
@@ -54,10 +54,9 @@ LasZipPoint::LasZipPoint(LasVLR *vlr) :
     {
         std::ostringstream oss;
         const char* err = m_zip->get_error();
-        if (err == NULL) 
+        if (err == NULL)
             err = "(unknown error)";
-        oss << "Error unpacking zip VLR data: " << std::string(err);
-        throw pdal_error(oss.str());
+        throw error("Error unpacking zip VLR data: " + std::string(err) + ".");
     }
     ConstructItems();
 }
@@ -73,9 +72,8 @@ LasZipPoint::LasZipPoint(uint8_t format, uint16_t pointLen) :
         const char* err = m_zip->get_error();
         if (err == NULL)
             err = "(unknown error)";
-        oss << "Error setting up LASzip for format " << format << ": " <<
-            err;
-        throw pdal_error(oss.str());
+        throw error("Error setting up LASzip for format " +
+            Utils::toString(format) + ": " + err);
     }
     ConstructItems();
 }
diff --git a/io/LasZipPoint.hpp b/io/LasZipPoint.hpp
index c887d6b..57c049b 100644
--- a/io/LasZipPoint.hpp
+++ b/io/LasZipPoint.hpp
@@ -37,9 +37,9 @@
 #include <vector>
 
 #ifdef PDAL_HAVE_LASZIP
-#include <laszip/laszip.hpp>
-#include <laszip/lasunzipper.hpp>
-#include <laszip/laszipper.hpp>
+#include <laszip.hpp>
+#include <lasunzipper.hpp>
+#include <laszipper.hpp>
 #endif
 
 namespace pdal
@@ -52,6 +52,12 @@ class LasVLR;
 class PDAL_DLL LasZipPoint
 {
 public:
+    struct error : public std::runtime_error
+    {
+        error(const std::string& err) : std::runtime_error(err)
+        {}
+    };
+
     LasZipPoint(LasVLR *lasHeader);
     LasZipPoint(uint8_t format, uint16_t pointLen);
     ~LasZipPoint();
@@ -59,7 +65,7 @@ public:
     std::vector<uint8_t> vlrData() const;
     LASzip* GetZipper() const
         { return m_zip.get(); }
-    
+
 private:
     std::unique_ptr<LASzip> m_zip;
 
diff --git a/io/OptechCommon.hpp b/io/OptechCommon.hpp
index 483cec8..e744885 100644
--- a/io/OptechCommon.hpp
+++ b/io/OptechCommon.hpp
@@ -37,17 +37,6 @@
 namespace pdal
 {
 
-
-class optech_error : public pdal_error
-{
-public:
-    optech_error(const std::string& msg)
-        : pdal_error(msg)
-    {
-    }
-};
-
-
 // Optech csd files contain misalignment angles and IMU offsets.
 // Misalignment angles and IMU offsets combine to form the boresight matrix.
 typedef struct
diff --git a/io/OptechReader.cpp b/io/OptechReader.cpp
index f8ce0ed..8c8fd7a 100644
--- a/io/OptechReader.cpp
+++ b/io/OptechReader.cpp
@@ -105,20 +105,13 @@ void OptechReader::initialize()
 {
     ILeStream stream(Utils::openFile(m_filename));
     if (!stream)
-    {
-        std::stringstream ss;
-        ss << "Unable to open " << m_filename << " for reading.";
-        throw pdal_error(ss.str());
-    }
+        throwError("Unable to open " + m_filename + " for reading.");
 
     stream.get(m_header.signature, 4);
     if (strcmp(m_header.signature, "CSD") != 0)
-    {
-        std::stringstream ss;
-        ss << "Invalid header signature when reading CSD file: '"
-           << m_header.signature << "'";
-        throw optech_error(ss.str());
-    }
+        throwError("Invalid header signature when reading CSD file: '" +
+            std::string(m_header.signature) + "'");
+
     stream.get(m_header.vendorId, 64);
     stream.get(m_header.softwareVersion, 32);
     stream >> m_header.formatVersion >> m_header.headerSize >>
@@ -154,11 +147,7 @@ void OptechReader::ready(PointTableRef)
 {
     m_istream.reset(new IStream(m_filename));
     if (!*m_istream)
-    {
-        std::stringstream ss;
-        ss << "Unable to open " << m_filename << " for reading.";
-        throw pdal_error(ss.str());
-    }
+        throwError("Unable to open " + m_filename + " for reading.");
 
     m_istream->seek(m_header.headerSize);
     m_recordIndex = 0;
diff --git a/io/PlyReader.cpp b/io/PlyReader.cpp
index 57f8aea..7f5e78a 100644
--- a/io/PlyReader.cpp
+++ b/io/PlyReader.cpp
@@ -53,9 +53,7 @@ struct CallbackContext
 
 void plyErrorCallback(p_ply ply, const char * message)
 {
-    std::stringstream ss;
-    ss << "Error opening ply file: " << message;
-    throw pdal_error(ss.str());
+    throw PlyReader::error(message);
 }
 
 
@@ -63,17 +61,11 @@ p_ply openPly(std::string filename)
 {
     p_ply ply = ply_open(filename.c_str(), &plyErrorCallback, 0, nullptr);
     if (!ply)
-    {
-        std::stringstream ss;
-        ss << "Unable to open file " << filename << " for reading.";
-        throw pdal_error(ss.str());
-    }
+        throw PlyReader::error("Unable to open file " + filename +
+            " for reading.");
+
     if (!ply_read_header(ply))
-    {
-        std::stringstream ss;
-        ss << "Unable to read header of " << filename << ".";
-        throw pdal_error(ss.str());
-    }
+        throw PlyReader::error("Unable to read header of " + filename + ".");
     return ply;
 }
 
@@ -88,35 +80,21 @@ int readPlyCallback(p_ply_argument argument)
     const char * propertyName;
 
     if (!ply_get_argument_element(argument, &element, &index))
-    {
-        std::stringstream ss;
-        ss << "Error getting argument element.";
-        throw pdal_error(ss.str());
-    }
+        throw PlyReader::error("Error getting argument element.");
+
     if (!ply_get_argument_user_data(argument, &contextAsVoid, &numToRead))
-    {
-        std::stringstream ss;
-        ss << "Error getting argument user data.";
-        throw pdal_error(ss.str());
-    }
+        throw PlyReader::error("Error getting argument user data.");
+
     // We've read enough, abort the callback cycle
     if (numToRead <= index)
-    {
         return 0;
-    }
 
     if (!ply_get_argument_property(argument, &property, nullptr, nullptr))
-    {
-        std::stringstream ss;
-        ss << "Error getting argument property.";
-        throw pdal_error(ss.str());
-    }
-    if (!ply_get_property_info(property, &propertyName, nullptr, nullptr, nullptr))
-    {
-        std::stringstream ss;
-        ss << "Error getting property info.";
-        throw pdal_error(ss.str());
-    }
+        throw PlyReader::error("Error getting argument property.");
+
+    if (!ply_get_property_info(property, &propertyName, nullptr,
+        nullptr, nullptr))
+        throw PlyReader::error("Error getting property info.");
 
     CallbackContext * context = static_cast<CallbackContext *>(contextAsVoid);
     double value = ply_get_argument_value(argument);
@@ -152,73 +130,71 @@ PlyReader::PlyReader()
 
 void PlyReader::initialize()
 {
-    p_ply ply = openPly(m_filename);
-    p_ply_element vertex_element = nullptr;
-    bool found_vertex_element = false;
-    const char* element_name;
-    long element_count;
-    while ((vertex_element = ply_get_next_element(ply, vertex_element)))
+    try
     {
-        if (!ply_get_element_info(vertex_element, &element_name, &element_count))
+        p_ply ply = openPly(m_filename);
+        p_ply_element vertex_element = nullptr;
+        bool found_vertex_element = false;
+        const char* element_name;
+        long element_count;
+        while ((vertex_element = ply_get_next_element(ply, vertex_element)))
         {
-            std::stringstream ss;
-            ss << "Error reading element info in " << m_filename << ".";
-            throw pdal_error(ss.str());
+            if (!ply_get_element_info(vertex_element, &element_name,
+                        &element_count))
+                throwError("Error reading element info in " + m_filename + ".");
+            if (strcmp(element_name, "vertex") == 0)
+            {
+                found_vertex_element = true;
+                break;
+            }
         }
-        if (strcmp(element_name, "vertex") == 0)
+        if (!found_vertex_element)
+            throwError("File " + m_filename + " does not contain a vertex "
+                    "element.");
+
+        static std::map<int, Dimension::Type> types =
+        {
+            { PLY_INT8, Dimension::Type::Signed8 },
+            { PLY_UINT8, Dimension::Type::Unsigned8 },
+            { PLY_INT16, Dimension::Type::Signed16 },
+            { PLY_UINT16, Dimension::Type::Unsigned16 },
+            { PLY_INT32, Dimension::Type::Signed32 },
+            { PLY_UINT32, Dimension::Type::Unsigned32 },
+            { PLY_FLOAT32, Dimension::Type::Float },
+            { PLY_FLOAT64, Dimension::Type::Double },
+
+            { PLY_CHAR, Dimension::Type::Signed8 },
+            { PLY_UCHAR, Dimension::Type::Unsigned8 },
+            { PLY_SHORT, Dimension::Type::Signed16 },
+            { PLY_USHORT, Dimension::Type::Unsigned16 },
+            { PLY_INT, Dimension::Type::Signed32 },
+            { PLY_UINT, Dimension::Type::Unsigned32 },
+            { PLY_FLOAT, Dimension::Type::Float },
+            { PLY_DOUBLE, Dimension::Type::Double }
+        };
+
+        p_ply_property property = nullptr;
+        while ((property = ply_get_next_property(vertex_element, property)))
         {
-            found_vertex_element = true;
-            break;
+            const char* name;
+            e_ply_type type;
+            e_ply_type length_type;
+            e_ply_type value_type;
+            if (!ply_get_property_info(property, &name, &type,
+                        &length_type, &value_type))
+                throwError("Error reading property info in " +
+                    m_filename + ".");
+            // For now, we'll just use PDAL's built in dimension matching.
+            // We could be smarter about this, e.g. by using the length
+            // and value type attributes.
+            m_vertexTypes[name] = types[type];
         }
+        ply_close(ply);
     }
-    if (!found_vertex_element)
+    catch (const error& err)
     {
-        std::stringstream ss;
-        ss << "File " << m_filename << " does not contain a vertex element.";
-        throw pdal_error(ss.str());
+        throwError(err.what());
     }
-
-    static std::map<int, Dimension::Type> types =
-    {
-        { PLY_INT8, Dimension::Type::Signed8 },
-        { PLY_UINT8, Dimension::Type::Unsigned8 },
-        { PLY_INT16, Dimension::Type::Signed16 },
-        { PLY_UINT16, Dimension::Type::Unsigned16 },
-        { PLY_INT32, Dimension::Type::Signed32 },
-        { PLY_UINT32, Dimension::Type::Unsigned32 },
-        { PLY_FLOAT32, Dimension::Type::Float },
-        { PLY_FLOAT64, Dimension::Type::Double },
-
-        { PLY_CHAR, Dimension::Type::Signed8 },
-        { PLY_UCHAR, Dimension::Type::Unsigned8 },
-        { PLY_SHORT, Dimension::Type::Signed16 },
-        { PLY_USHORT, Dimension::Type::Unsigned16 },
-        { PLY_INT, Dimension::Type::Signed32 },
-        { PLY_UINT, Dimension::Type::Unsigned32 },
-        { PLY_FLOAT, Dimension::Type::Float },
-        { PLY_DOUBLE, Dimension::Type::Double }
-    };
-
-    p_ply_property property = nullptr;
-    while ((property = ply_get_next_property(vertex_element, property)))
-    {
-        const char* name;
-        e_ply_type type;
-        e_ply_type length_type;
-        e_ply_type value_type;
-        if (!ply_get_property_info(property, &name, &type,
-            &length_type, &value_type))
-        {
-            std::stringstream ss;
-            ss << "Error reading property info in " << m_filename << ".";
-            throw pdal_error(ss.str());
-        }
-        // For now, we'll just use PDAL's built in dimension matching.
-        // We could be smarter about this, e.g. by using the length
-        // and value type attributes.
-        m_vertexTypes[name] = types[type];
-    }
-    ply_close(ply);
 }
 
 
@@ -236,7 +212,14 @@ void PlyReader::addDimensions(PointLayoutPtr layout)
 
 void PlyReader::ready(PointTableRef table)
 {
-    m_ply = openPly(m_filename);
+    try
+    {
+        m_ply = openPly(m_filename);
+    }
+    catch (const error& err)
+    {
+        throwError(err.what());
+    }
 }
 
 
@@ -250,16 +233,21 @@ point_count_t PlyReader::read(PointViewPtr view, point_count_t num)
     // long that is the maximum rply (don't know about ply) point count.
     long cnt;
     cnt = Utils::inRange<long>(num) ? num : (std::numeric_limits<long>::max)();
-    for (auto it : m_vertexDimensions)
+    try
     {
-        ply_set_read_cb(m_ply, "vertex", it.first.c_str(), readPlyCallback,
-            &context, cnt);
+        for (auto it : m_vertexDimensions)
+        {
+            ply_set_read_cb(m_ply, "vertex", it.first.c_str(), readPlyCallback,
+                    &context, cnt);
+        }
+        if (!ply_read(m_ply))
+        {
+            throwError("Error reading " + m_filename + ".");
+        }
     }
-    if (!ply_read(m_ply))
+    catch(const error& err)
     {
-        std::stringstream ss;
-        ss << "Error reading " << m_filename << ".";
-        throw pdal_error(ss.str());
+        throwError(err.what());
     }
     return view->size();
 }
@@ -267,11 +255,14 @@ point_count_t PlyReader::read(PointViewPtr view, point_count_t num)
 
 void PlyReader::done(PointTableRef table)
 {
-    if (!ply_close(m_ply))
+    try
+    {
+        if (!ply_close(m_ply))
+            throwError("Error closing " + m_filename + ".");
+    }
+    catch (const error& err)
     {
-        std::stringstream ss;
-        ss << "Error closing " << m_filename << ".";
-        throw pdal_error(ss.str());
+        throwError(err.what());
     }
 }
 
diff --git a/io/PlyReader.hpp b/io/PlyReader.hpp
index c7e0642..728646e 100644
--- a/io/PlyReader.hpp
+++ b/io/PlyReader.hpp
@@ -52,6 +52,12 @@ namespace pdal
 class PDAL_DLL PlyReader : public Reader
 {
 public:
+    struct error : public std::runtime_error
+    {
+        error(const std::string& e) : std::runtime_error(e)
+        {}
+    };
+
     static void *create();
     static int32_t destroy(void *);
     std::string getName() const;
diff --git a/io/PlyWriter.cpp b/io/PlyWriter.cpp
index a3cb20b..cfa888c 100644
--- a/io/PlyWriter.cpp
+++ b/io/PlyWriter.cpp
@@ -46,9 +46,7 @@ namespace
 
 void createErrorCallback(p_ply ply, const char* message)
 {
-    std::stringstream ss;
-    ss << "Error when creating ply file: " << message;
-    throw pdal_error(ss.str());
+    throw PlyWriter::error(message);
 }
 
 
@@ -120,24 +118,25 @@ void PlyWriter::initialize()
     }
     else
     {
-        std::stringstream ss;
-        ss << "Unknown storage mode '" << m_storageModeSpec <<
+        throwError("Unknown storage mode '" + m_storageModeSpec +
             "'. Known storage modes are: 'ascii', 'little endian', "
-            "'big endian', and 'default'";
-        throw pdal_error(ss.str());
+            "'big endian', and 'default'");
     }
 }
 
 
 void PlyWriter::ready(PointTableRef table)
 {
-    m_ply = ply_create(m_filename.c_str(), m_storageMode, createErrorCallback,
-        0, nullptr);
-    if (!m_ply)
+    try
     {
-        std::stringstream ss;
-        ss << "Could not open file for writing: " << m_filename;
-        throw pdal_error(ss.str());
+        m_ply = ply_create(m_filename.c_str(), m_storageMode,
+            createErrorCallback, 0, nullptr);
+        if (!m_ply)
+            throwError("Could not open file '" + m_filename + "for writing.");
+    }
+    catch(const error& err)
+    {
+        throwError(err.what());
     }
     m_pointCollector.reset(new PointView(table));
 }
@@ -151,35 +150,23 @@ void PlyWriter::write(const PointViewPtr data)
 
 void PlyWriter::done(PointTableRef table)
 {
-    if (!ply_add_element(m_ply, "vertex", m_pointCollector->size()))
+    try
     {
-        std::stringstream ss;
-        ss << "Could not add vertex element";
-        throw pdal_error(ss.str());
-    }
+    if (!ply_add_element(m_ply, "vertex", m_pointCollector->size()))
+        throwError("Could not add vertex element");
+
     auto dimensions = table.layout()->dims();
     for (auto dim : dimensions) {
-        std::string name = Dimension::name(dim);
+        std::string name = table.layout()->dimName(dim);
         e_ply_type plyType = getPlyType(Dimension::defaultType(dim));
         if (!ply_add_scalar_property(m_ply, name.c_str(), plyType))
-        {
-            std::stringstream ss;
-            ss << "Could not add scalar property '" << name << "'";
-            throw pdal_error(ss.str());
-        }
+            throwError("Could not add scalar property '" + name  + "'");
     }
     if (!ply_add_comment(m_ply, "Generated by PDAL"))
-    {
-        std::stringstream ss;
-        ss << "Could not add comment";
-        throw pdal_error(ss.str());
-    }
+        throwError("Could not add comment");
+
     if (!ply_write_header(m_ply))
-    {
-        std::stringstream ss;
-        ss << "Could not write ply header";
-        throw pdal_error(ss.str());
-    }
+        throwError("Could not write ply header");
 
     for (PointId index = 0; index < m_pointCollector->size(); ++index)
     {
@@ -187,17 +174,19 @@ void PlyWriter::done(PointTableRef table)
         {
             double value = m_pointCollector->getFieldAs<double>(dim, index);
             if (!ply_write(m_ply, value))
-            {
-                std::stringstream ss;
-                ss << "Error writing dimension '" << Dimension::name(dim) <<
-                    "' of point number " << index;
-                throw pdal_error(ss.str());
-            }
+                throwError("Error writing dimension '" +
+                    table.layout()->dimName(dim) + "' of point number " +
+                    Utils::toString(index) + ".");
         }
     }
 
     if (!ply_close(m_ply))
-        throw pdal_error("Error closing ply file");
+        throwError("Error closing file");
+    }
+    catch (const error& err)
+    {
+        throwError(err.what());
+    }
 
     getMetadata().addList("filename", m_filename);
 }
diff --git a/io/PlyWriter.hpp b/io/PlyWriter.hpp
index f492510..39dff77 100644
--- a/io/PlyWriter.hpp
+++ b/io/PlyWriter.hpp
@@ -47,6 +47,13 @@ namespace pdal
 class PDAL_DLL PlyWriter : public Writer
 {
 public:
+    struct error : public std::runtime_error
+    {
+        error(const std::string& e) : std::runtime_error(e)
+        {}
+    };
+
+public:
     static void * create();
     static int32_t destroy(void *);
     std::string getName() const;
@@ -65,7 +72,6 @@ private:
     PointViewPtr m_pointCollector;
     std::string m_storageModeSpec;
     e_ply_storage_mode m_storageMode;
-
 };
 
 }
diff --git a/io/PtsReader.cpp b/io/PtsReader.cpp
index bf2e300..5e419c4 100644
--- a/io/PtsReader.cpp
+++ b/io/PtsReader.cpp
@@ -55,12 +55,7 @@ void PtsReader::initialize(PointTableRef table)
 {
     m_istream = Utils::openFile(m_filename);
     if (!m_istream)
-    {
-        std::ostringstream oss;
-        oss << getName() << ": Unable to open pts file '" <<
-            m_filename << "'.";
-        throw pdal_error(oss.str());
-    }
+        throwError("Unable to open file '" + m_filename + "'.");
 
     std::string buf;
     std::getline(*m_istream, buf);
@@ -95,12 +90,7 @@ void PtsReader::ready(PointTableRef table)
 {
     m_istream = Utils::openFile(m_filename);
     if (!m_istream)
-    {
-        std::ostringstream oss;
-        oss << getName() << ": Unable to open text file '" <<
-            m_filename << "'.";
-        throw pdal_error(oss.str());
-    }
+        throwError("Unable to open file '" + m_filename + "'.");
 
     // Skip header line.
     std::string buf;
diff --git a/io/QfitReader.cpp b/io/QfitReader.cpp
index c71ea46..99a863d 100644
--- a/io/QfitReader.cpp
+++ b/io/QfitReader.cpp
@@ -205,11 +205,7 @@ void QfitReader::initialize()
 {
     ISwitchableStream str(m_filename);
     if (!str)
-    {
-        std::ostringstream oss;
-        oss << "Unable to open file '" << m_filename << "'";
-        throw qfit_error(oss.str());
-    }
+        throwError("Unable to open file '" + m_filename + "'");
     str.seek(0);
 
     int32_t int4(0);
@@ -248,7 +244,7 @@ void QfitReader::initialize()
         int4 = int32_t(be32toh(uint32_t(int4)));
 
     if (int4 % 4 != 0)
-        throw qfit_error("Base QFIT format is not a multiple of 4, "
+        throwError("Base QFIT format is not a multiple of 4, "
             "unrecognized format!");
 
     m_size = int4;
@@ -277,7 +273,7 @@ void QfitReader::addArgs(ProgramArgs& args)
     args.add("scale_z", "Z scale. Use 0.001 to go from mm to m",
         m_scale_z, 0.001);
 }
-    
+
 
 void QfitReader::addDimensions(PointLayoutPtr layout)
 {
@@ -317,12 +313,8 @@ void QfitReader::ready(PointTableRef)
 {
     m_numPoints = m_point_bytes / m_size;
     if (m_point_bytes % m_size)
-    {
-        std::ostringstream msg;
-        msg << "Error calculating file point count.  File size is "
-            "inconsistent with point size.";
-        throw qfit_error(msg.str());
-    }
+        throwError("Error calculating file point count.  File size is "
+            "inconsistent with point size.");
     m_index = 0;
     m_istream.reset(new IStream(m_filename));
     m_istream->seek(m_offset);
@@ -332,13 +324,9 @@ void QfitReader::ready(PointTableRef)
 point_count_t QfitReader::read(PointViewPtr data, point_count_t count)
 {
     if (!m_istream->good())
-    {
-        throw pdal_error("QFIT file stream is no good!");
-    }
+        throwError("Corrupted file/file read error.");
     if (m_istream->stream()->eof())
-    {
-        throw pdal_error("QFIT file stream is eof!");
-    }
+        throwError("End of file detected.");
 
     count = std::min(m_numPoints - m_index, count);
     std::vector<char> buf(m_size);
diff --git a/io/QfitReader.hpp b/io/QfitReader.hpp
index 1ada5dd..ea31bdd 100644
--- a/io/QfitReader.hpp
+++ b/io/QfitReader.hpp
@@ -57,15 +57,6 @@ enum QFIT_Format_Type
     QFIT_Format_Unknown = 128
 };
 
-class qfit_error : public pdal_error
-{
-public:
-
-    qfit_error(std::string const& msg)
-        : pdal_error(msg)
-    {}
-};
-
 class PDAL_DLL QfitReader : public pdal::Reader
 {
 public:
diff --git a/io/SbetReader.cpp b/io/SbetReader.cpp
index 371ccdd..44ada0d 100644
--- a/io/SbetReader.cpp
+++ b/io/SbetReader.cpp
@@ -62,7 +62,7 @@ void SbetReader::ready(PointTableRef)
     size_t fileSize = FileUtils::fileSize(m_filename);
     size_t pointSize = getDefaultDimensions().size() * sizeof(double);
     if (fileSize % pointSize != 0)
-        throw pdal_error("invalid sbet file size");
+        throwError("Invalid file size.");
     m_numPts = fileSize / pointSize;
     m_index = 0;
     m_stream.reset(new ILeStream(m_filename));
diff --git a/io/TIndexReader.cpp b/io/TIndexReader.cpp
index 640a8dd..10da1d5 100644
--- a/io/TIndexReader.cpp
+++ b/io/TIndexReader.cpp
@@ -58,22 +58,12 @@ TIndexReader::FieldIndexes TIndexReader::getFields()
     indexes.m_filename = OGR_FD_GetFieldIndex(fDefn,
         m_tileIndexColumnName.c_str());
     if (indexes.m_filename < 0)
-    {
-        std::ostringstream out;
-
-        out << "Unable to find field '" << m_tileIndexColumnName <<
-            "' in file '" << m_filename << "'.";
-        throw pdal_error(out.str());
-    }
+        throwError("Unable to find field '" + m_tileIndexColumnName +
+            "' in file '" + m_filename + "'.");
     indexes.m_srs = OGR_FD_GetFieldIndex(fDefn, m_srsColumnName.c_str());
     if (indexes.m_srs < 0)
-    {
-        std::ostringstream out;
-
-        out << "Unable to find field '" << m_srsColumnName << "' in file '" <<
-            m_filename << "'.";
-        throw pdal_error(out.str());
-    }
+        throwError("Unable to find field '" + m_srsColumnName + "' in file '" +
+            m_filename + "'.");
 
     indexes.m_ctime = OGR_FD_GetFieldIndex(fDefn, "created");
     indexes.m_mtime = OGR_FD_GetFieldIndex(fDefn, "modified");
@@ -161,11 +151,7 @@ void TIndexReader::initialize()
     gdal::registerDrivers();
     m_dataset = OGROpen(m_filename.c_str(), FALSE, NULL);
     if (!m_dataset)
-    {
-        std::stringstream oss;
-        oss << "unable to datasource '" << m_filename << "'";
-        throw pdal::pdal_error(oss.str());
-    }
+        throwError("Unable to datasource '" + m_filename + "'");
 
     OGRGeometryH geometry(0);
     if (m_sql.size())
@@ -178,12 +164,8 @@ void TIndexReader::initialize()
         m_layer = OGR_DS_GetLayerByName(m_dataset, m_layerName.c_str());
     }
     if (!m_layer)
-    {
-        std::stringstream oss;
-        oss << getName() << ": Unable to open layer '" << m_layerName <<
-            "' from OGR datasource '" << m_filename << "'";
-        throw pdal::pdal_error(oss.str());
-    }
+        throwError("Unable to open layer '" + m_layerName +
+            "' from OGR datasource '" + m_filename + "'");
 
     m_out_ref->setFromLayer(m_layer);
 
@@ -222,13 +204,8 @@ void TIndexReader::initialize()
         OGRErr err = OGR_L_SetAttributeFilter(m_layer,
             m_attributeFilter.c_str());
         if (err != OGRERR_NONE)
-        {
-            std::stringstream oss;
-            oss << getName() << ": Unable to set attribute filter '"
-                << m_attributeFilter << "' for OGR datasource '"
-                << m_filename << "'";
-            throw pdal::pdal_error(oss.str());
-        }
+            throwError("Unable to set attribute filter '" + m_attributeFilter +
+                "' for OGR datasource '" + m_filename + "'");
     }
 
     Options cropOptions;
@@ -237,19 +214,14 @@ void TIndexReader::initialize()
 
     for (auto f : getFiles())
     {
-        log()->get(LogLevel::Debug) << "Adding file "
-                                    << f.m_filename
-                                    << " to merge filter" <<std::endl;
+        log()->get(LogLevel::Debug) << "Adding file " << f.m_filename <<
+            " to merge filter" << std::endl;
 
         std::string driver = m_factory.inferReaderDriver(f.m_filename);
         Stage *reader = m_factory.createStage(driver);
         if (!reader)
-        {
-            std::stringstream out;
-            out << "Unable to create reader for file '"
-                << f.m_filename << "'.";
-            throw pdal_error(out.str());
-        }
+            throwError("Unable to create reader for file '" + f.m_filename +
+                "'.");
         Options readerOptions;
         readerOptions.add("filename", f.m_filename);
         reader->setOptions(readerOptions);
diff --git a/io/TerrasolidReader.cpp b/io/TerrasolidReader.cpp
index 8264d97..989b1ae 100644
--- a/io/TerrasolidReader.cpp
+++ b/io/TerrasolidReader.cpp
@@ -65,7 +65,7 @@ void TerrasolidReader::initialize()
         m_header->OrgY >> m_header->OrgZ >> m_header->Time >> m_header->Color;
 
     if (m_header->RecogVal != 970401)
-        throw terrasolid_error("Header identifier was not '970401', is this "
+        throwError("Header identifier was not '970401', is this "
             "a TerraSolid .bin file?");
 
     m_haveColor = (m_header->Color != 0);
@@ -73,12 +73,9 @@ void TerrasolidReader::initialize()
     m_format = static_cast<TERRASOLID_Format_Type>(m_header->HdrVersion);
 
     if ((m_format != TERRASOLID_Format_1) && (m_format != TERRASOLID_Format_2))
-    {
-        std::ostringstream oss;
-        oss << "Version was '" << m_format << "', not '" <<
-            TERRASOLID_Format_1 << "' or '" << TERRASOLID_Format_2 << "'";
-        throw terrasolid_error(oss.str());
-    }
+        throwError("Version was '" + Utils::toString(m_format) + "', not '" +
+            Utils::toString(TERRASOLID_Format_1) + "' or '" +
+            Utils::toString(TERRASOLID_Format_2) + "'");
 
     log()->get(LogLevel::Debug) << "TerraSolid Reader::initialize format: " <<
         m_format << std::endl;
diff --git a/io/TerrasolidReader.hpp b/io/TerrasolidReader.hpp
index 853e1aa..796029a 100644
--- a/io/TerrasolidReader.hpp
+++ b/io/TerrasolidReader.hpp
@@ -48,7 +48,6 @@ extern "C" PF_ExitFunc TerrasolidReader_InitPlugin();
 namespace pdal
 {
 
-
 enum TERRASOLID_Format_Type
 {
     TERRASOLID_Format_1 = 20010712,
@@ -85,14 +84,6 @@ struct TerraSolidHeader
 };
 
 typedef std::unique_ptr<TerraSolidHeader> TerraSolidHeaderPtr;
-class terrasolid_error : public pdal_error
-{
-public:
-
-    terrasolid_error(std::string const& msg)
-        : pdal_error(msg)
-    {}
-};
 
 class PDAL_DLL TerrasolidReader : public pdal::Reader
 {
diff --git a/io/TextReader.cpp b/io/TextReader.cpp
index 5f4a770..40122a8 100644
--- a/io/TextReader.cpp
+++ b/io/TextReader.cpp
@@ -56,12 +56,7 @@ void TextReader::initialize(PointTableRef table)
 {
     m_istream = Utils::openFile(m_filename);
     if (!m_istream)
-    {
-        std::ostringstream oss;
-        oss << getName() << ": Unable to open text file '" <<
-            m_filename << "'.";
-        throw pdal_error(oss.str());
-    }
+        throwError("Unable to open text file '" + m_filename + "'.");
 
     std::string buf;
     std::getline(*m_istream, buf);
@@ -69,39 +64,44 @@ void TextReader::initialize(PointTableRef table)
     auto isspecial = [](char c)
         { return (!std::isalnum(c) && c != ' '); };
 
-    // Scan string for some character not a number, space or letter.
-    for (size_t i = 0; i < buf.size(); ++i)
-        if (isspecial(buf[i]))
-        {
-            m_separator = buf[i];
-            break;
-        }
+    // If the separator wasn't provided on the command line extract it
+    // from the header line.
+    if (m_separator == ' ')
+    {
+        // Scan string for some character not a number, space or letter.
+        for (size_t i = 0; i < buf.size(); ++i)
+            if (isspecial(buf[i]))
+            {
+                m_separator = buf[i];
+                break;
+            }
+    }
 
     if (m_separator != ' ')
-    {
-        Utils::remove(buf, ' ');
         m_dimNames = Utils::split(buf, m_separator);
-    }
     else
         m_dimNames = Utils::split2(buf, m_separator);
     Utils::closeFile(m_istream);
 }
 
 
+void TextReader::addArgs(ProgramArgs& args)
+{
+    args.add("separator", "Separator character that overrides special "
+        "character in header line", m_separator, ' ');
+}
+
+
 void TextReader::addDimensions(PointLayoutPtr layout)
 {
     for (auto name : m_dimNames)
     {
+        Utils::trim(name);
         Dimension::Id id = layout->registerOrAssignDim(name,
             Dimension::Type::Double);
         if (Utils::contains(m_dims, id))
-        {
-            std::ostringstream oss;
-
-            oss << getName() << ": Duplicate dimension '" << name <<
-                "' detected in input file '" << m_filename << "'.";
-            throw pdal_error(oss.str());
-        }
+            throwError("Duplicate dimension '" + name +
+                "' detected in input file '" + m_filename + "'.");
         m_dims.push_back(id);
     }
 }
@@ -111,67 +111,84 @@ void TextReader::ready(PointTableRef table)
 {
     m_istream = Utils::openFile(m_filename);
     if (!m_istream)
-    {
-        std::ostringstream oss;
-        oss << getName() << ": Unable to open text file '" <<
-            m_filename << "'.";
-        throw pdal_error(oss.str());
-    }
+        throwError("Unable to open text file '" + m_filename + "'.");
 
     // Skip header line.
     std::string buf;
     std::getline(*m_istream, buf);
+    m_line = 1;
 }
 
 
 point_count_t TextReader::read(PointViewPtr view, point_count_t numPts)
 {
     PointId idx = view->size();
-
     point_count_t cnt = 0;
-    size_t line = 1;
-    while (m_istream->good() && cnt < numPts)
+    PointRef point(*view, idx);
+    while (cnt < numPts)
     {
+        point.setPointId(idx);
+        if (!processOne(point))
+            break;
+        cnt++;
+        idx++;
+    }
+    return cnt;
+}
+
+
+bool TextReader::processOne(PointRef& point)
+{
+    if (!fillFields())
+        return false;
+
+    double d;
+    for (size_t i = 0; i < m_fields.size(); ++i)
+    {
+        if (!Utils::fromString(m_fields[i], d))
+        {
+            log()->get(LogLevel::Error) << "Can't convert "
+                "field '" << m_fields[i] << "' to numeric value on line " <<
+                m_line << " in '" << m_filename << "'.  Setting to 0." <<
+                std::endl;
+            d = 0;
+        }
+        point.setField(m_dims[i], d);
+    }
+    return true;
+}
+
+
+bool TextReader::fillFields()
+{
+    while (true)
+    {
+        if (!m_istream->good())
+            return false;
+
         std::string buf;
-        StringList fields;
 
         std::getline(*m_istream, buf);
-        line++;
+        m_line++;
         if (buf.empty())
             continue;
         if (m_separator != ' ')
         {
             Utils::remove(buf, ' ');
-            fields = Utils::split(buf, m_separator);
+            m_fields = Utils::split(buf, m_separator);
         }
         else
-            fields = Utils::split2(buf, m_separator);
-        if (fields.size() != m_dims.size())
+            m_fields = Utils::split2(buf, m_separator);
+        if (m_fields.size() != m_dims.size())
         {
-            log()->get(LogLevel::Error) << "Line " << line <<
-               " in '" << m_filename << "' contains " << fields.size() <<
-               " fields when " << m_dims.size() << " were expected.  "
-               "Ignoring." << std::endl;
+            log()->get(LogLevel::Error) << "Line " << m_line <<
+                " in '" << m_filename << "' contains " << m_fields.size() <<
+                " fields when " << m_dims.size() << " were expected.  "
+                "Ignoring." << std::endl;
             continue;
         }
-
-        double d;
-        for (size_t i = 0; i < fields.size(); ++i)
-        {
-            if (!Utils::fromString(fields[i], d))
-            {
-                log()->get(LogLevel::Error) << "Can't convert "
-                    "field '" << fields[i] << "' to numeric value on line " <<
-                    line << " in '" << m_filename << "'.  Setting to 0." <<
-                    std::endl;
-                d = 0;
-            }
-            view->setField(m_dims[i], idx, d);
-        }
-        cnt++;
-        idx++;
+        return true;
     }
-    return cnt;
 }
 
 
diff --git a/io/TextReader.hpp b/io/TextReader.hpp
index 5b18cfe..8b4ca64 100644
--- a/io/TextReader.hpp
+++ b/io/TextReader.hpp
@@ -52,7 +52,7 @@ public:
     static int32_t destroy(void *);
     std::string getName() const;
 
-    TextReader() : m_separator(' '), m_istream(NULL)
+    TextReader() : m_istream(NULL)
     {}
 
 private:
@@ -65,6 +65,12 @@ private:
     virtual void initialize(PointTableRef table);
 
     /**
+      Add arguments to those accepted at the command line.
+      \param args  Argument list to modify.
+    */
+    virtual void addArgs(ProgramArgs& args);
+
+    /**
       Add dimensions found in the header line to the layout.
 
       \param layout  Layout to which the dimenions are added.
@@ -94,11 +100,23 @@ private:
     */
     virtual void done(PointTableRef table);
 
+    /**
+      Read a single point from the input.
+
+      \param point  Reference to point to fill with data.
+      \return  False if no point could be read.
+    */
+    virtual bool processOne(PointRef& point);
+
+    bool fillFields();
+
 private:
     char m_separator;
     std::istream *m_istream;
     StringList m_dimNames;
     Dimension::IdList m_dims;
+    StringList m_fields;
+    size_t m_line;
 };
 
 } // namespace pdal
diff --git a/io/TextWriter.cpp b/io/TextWriter.cpp
index 7be7d5e..de7ecb1 100644
--- a/io/TextWriter.cpp
+++ b/io/TextWriter.cpp
@@ -92,12 +92,7 @@ void TextWriter::initialize(PointTableRef table)
     m_stream = FileStreamPtr(Utils::createFile(m_filename, true),
         FileStreamDeleter());
     if (!m_stream)
-    {
-        std::stringstream out;
-        out << "writers.text couldn't open '" << m_filename <<
-            "' for output.";
-        throw pdal_error(out.str());
-    }
+        throwError("Couldn't open '" + m_filename + "' for output.");
     m_outputType = Utils::toupper(m_outputType);
 }
 
@@ -114,12 +109,7 @@ void TextWriter::ready(PointTableRef table)
         Utils::trim(dim);
         Dimension::Id d = table.layout()->findDim(dim);
         if (d == Dimension::Id::Unknown)
-        {
-            std::ostringstream oss;
-            oss << getName() << ": Dimension not found with name '" <<
-                dim << "'.";
-            throw pdal_error(oss.str());
-        }
+            throwError("Dimension not found with name '" + dim + "'.");
         m_dims.push_back(d);
     }
 
diff --git a/java/README.md b/java/README.md
index ea1fc10..15ae0b1 100644
--- a/java/README.md
+++ b/java/README.md
@@ -4,15 +4,10 @@
 
 Java bindings to use PDAL on JVM.
 
-## How to compile
-
-1. Install PDAL (using brew / package managers (unix) / build from sources / etc)
-2. Build native libs `./sbt native/nativeCompile` (optionally, binaries would be built during tests run)
-3. Run `./sbt core/test` to run PDAL tests
-
-## Using with SBT
+## Using PDAL JNI with SBT
 
 ```scala
+// pdal is published to maven central, but you can use following repos in addition
 resolvers ++= Seq(
   Resolver.sonatypeRepo("releases"),
   Resolver.sonatypeRepo("snapshots") // for snaphots
@@ -23,12 +18,60 @@ libraryDependencies ++= Seq(
 )
 ```
 
-It's required to have native JNI binary into your app classpath:
+It's required to have native JNI binary in `java.library.path`:
 
 ```scala
-// Mac OS X example with manual jni installation 
-// It's strongly recommended to use WITH_PDAL_JNI flag to build the whole PDAL
+// Mac OS X example with manual JNI installation
+// Though it's strongly recommended to use WITH_PDAL_JNI during PDAL build
 // cp -f native/target/resource_managed/main/native/x86_64-darwin/libpdaljni.1.4.dylib /usr/local/lib/libpdaljni.1.4.dylib
 // place built binary into /usr/local/lib, and pass java.library.path to your JVM
 javaOptions += "-Djava.library.path=/usr/local/lib"
 ```
+
+## How to compile
+
+Development purposes (including binaries):
+  1. Install PDAL (using brew / package managers (unix) / build from sources / etc) _without_ `-DWITH_PDAL_JNI=ON` flag     
+  2. Build native libs `./sbt native/nativeCompile` (optionally, binaries would be built during tests run)
+  3. Run `./sbt core/test` to run PDAL tests
+
+Only Java development purposes:
+  1. Provide `$LD_LIBRARY_PATH` or `$DYLD_LIBRARY_PATH`
+  2. If you don't want to provide global variable you can pass `-Djava.library.path=<path>` into sbt:
+    `./sbt -Djava.library.path=<path>`
+  3. Set `PDAL_DEPEND_ON_NATIVE=false` (to disable `native` project build)
+  4. Run `PDAL_DEPEND_ON_NATIVE=false ./sbt`
+
+Finally the possible command to launch and build PDAL JNI bindings could be:
+
+```bash
+# Including binaries build
+# WARN: PDAL should be built without `-DWITH_PDAL_JNI=ON` flag
+./sbt
+```
+
+```bash
+# Java side development without binaries build
+# WARN: PDAL should be built with `-DWITH_PDAL_JNI=ON` flag
+PDAL_DEPEND_ON_NATIVE=false ./sbt -Djava.library.path=<path>
+```
+
+### Possible issues and solutions
+
+1. In case of not installed as global PDAL change [this](./java/native/src/CMakeLists.txt#L25) line to:
+
+  ```cmake
+  set(CMAKE_CXX_FLAGS "$ENV{PDAL_LD_FLAGS} $ENV{PDAL_CXX_FLAGS} -std=c++11")
+  ```
+  In this case sbt launch would be the following:
+
+  ```bash
+  PDAL_LD_FLAGS=`pdal-config --libs` PDAL_CXX_FLAGS=`pdal-config --includes` ./sbt
+  ```
+
+2. Sometimes can happen a bad dynamic linking issue (somehow spoiled environment),
+   the quick workaround would be to replace [this](./java/native/src/CMakeLists.txt#L25) line to:
+
+  ```cmake
+  set(CMAKE_CXX_FLAGS "-L<path to dynamic libs> -std=c++11")
+  ```
diff --git a/java/build.sbt b/java/build.sbt
index 617052c..6bd0c4e 100644
--- a/java/build.sbt
+++ b/java/build.sbt
@@ -21,12 +21,10 @@ lazy val commonSettings = Seq(
     "-feature"),
   test in assembly := {},
   shellPrompt := { s => Project.extract(s).currentProject.id + " > " },
-  commands += Command.command("publish-javastyle")((state: State) => {
-    val extracted = Project extract state
-    import extracted._
-    val publishState = Command.process("publish", append(Seq(crossPaths := false), state))
-    append(Seq(crossPaths := true), publishState)
-  }),
+  commands ++= Seq(
+    Commands.processJavastyleCommand("publish"),
+    Commands.processJavastyleCommand("publishSigned")
+  ),
   publishArtifact in Test := false,
   publishTo := {
     val nexus = "https://oss.sonatype.org/"
diff --git a/java/project/Environment.scala b/java/project/Commands.scala
similarity index 79%
copy from java/project/Environment.scala
copy to java/project/Commands.scala
index 5926822..f4ea01a 100644
--- a/java/project/Environment.scala
+++ b/java/project/Commands.scala
@@ -31,15 +31,16 @@
   * OF SUCH DAMAGE.
   ****************************************************************************/
 
-import sbt.ClasspathDependency
+import sbt._
+import sbt.Keys._
 
-import scala.util.Properties
-
-object Environment {
-  def either(environmentVariable: String, default: String): String =
-    Properties.envOrElse(environmentVariable, default)
-
-  lazy val versionSuffix = either("PDAL_VERSION_SUFFIX", "-SNAPSHOT")
-  lazy val pdalDependOnNative = either("PDAL_DEPEND_ON_NATIVE", "true")
-  def dependOnNative(native: ClasspathDependency) = if(pdalDependOnNative == "true") Seq(native) else Seq.empty
+object Commands {
+  def processJavastyleCommand(commandProcess: String) = {
+    Command.command(s"${commandProcess}-javastyle")((state: State) => {
+      val extracted = Project extract state
+      import extracted._
+      val publishState = Command.process(commandProcess, append(Seq(crossPaths := false), state))
+      append(Seq(crossPaths := true), publishState)
+    })
+  }
 }
\ No newline at end of file
diff --git a/java/project/Environment.scala b/java/project/Environment.scala
index 5926822..c627c93 100644
--- a/java/project/Environment.scala
+++ b/java/project/Environment.scala
@@ -39,7 +39,9 @@ object Environment {
   def either(environmentVariable: String, default: String): String =
     Properties.envOrElse(environmentVariable, default)
 
+  def dependOnNative(native: ClasspathDependency) =
+    if(pdalDependOnNative == "true") Seq(native) else Seq.empty
+
   lazy val versionSuffix = either("PDAL_VERSION_SUFFIX", "-SNAPSHOT")
   lazy val pdalDependOnNative = either("PDAL_DEPEND_ON_NATIVE", "true")
-  def dependOnNative(native: ClasspathDependency) = if(pdalDependOnNative == "true") Seq(native) else Seq.empty
 }
\ No newline at end of file
diff --git a/java/scripts/publish-212.sh b/java/scripts/publish-212.sh
index dbce4dd..9eab675 100755
--- a/java/scripts/publish-212.sh
+++ b/java/scripts/publish-212.sh
@@ -1,3 +1,31 @@
 #!/usr/bin/env bash
 
-PDAL_DEPEND_ON_NATIVE=false ./sbt "-212" "project core" publish
+# --suffix: sets the suffix you want to publish lib with
+# --signed: makes a PGP signed publish of the library, for maven central this sign is required
+
+for i in "$@"
+do
+    case $i in
+        --suffix=*)
+            PDAL_VERSION_SUFFIX="${i#*=}"
+            shift
+            ;;
+        --signed)
+            SIGNED=true
+            shift
+            ;;
+        *)
+            ;;
+    esac
+done
+
+export PDAL_VERSION_SUFFIX=${PDAL_VERSION_SUFFIX:-"-SNAPSHOT"}
+SIGNED=${SIGNED:-false}
+
+COMMAND=publish
+
+if ${SIGNED}; then
+    COMMAND=publishSigned
+fi
+
+PDAL_DEPEND_ON_NATIVE=false ./sbt "-212" "project core" ${COMMAND}
diff --git a/java/scripts/publish-all.sh b/java/scripts/publish-all.sh
index 84edf56..030533c 100755
--- a/java/scripts/publish-all.sh
+++ b/java/scripts/publish-all.sh
@@ -1,5 +1,5 @@
 #!/usr/bin/env bash
 
-./scripts/publish.sh
-./scripts/publish-212.sh
-./scripts/publish-javastyle.sh
+./scripts/publish.sh "$@"
+./scripts/publish-212.sh "$@"
+./scripts/publish-javastyle.sh "$@"
diff --git a/java/scripts/publish-javastyle.sh b/java/scripts/publish-javastyle.sh
index 2ccf8b5..149afc1 100755
--- a/java/scripts/publish-javastyle.sh
+++ b/java/scripts/publish-javastyle.sh
@@ -1,3 +1,31 @@
 #!/usr/bin/env bash
 
-PDAL_DEPEND_ON_NATIVE=false ./sbt "-212" "project core" publish-javastyle
+# --suffix: sets the suffix you want to publish lib with
+# --signed: makes a PGP signed publish of the library, for maven central this sign is required
+
+for i in "$@"
+do
+    case $i in
+        --suffix=*)
+            PDAL_VERSION_SUFFIX="${i#*=}"
+            shift
+            ;;
+        --signed)
+            SIGNED=true
+            shift
+            ;;
+        *)
+            ;;
+    esac
+done
+
+export PDAL_VERSION_SUFFIX=${PDAL_VERSION_SUFFIX:-"-SNAPSHOT"}
+SIGNED=${SIGNED:-false}
+
+COMMAND=publish
+
+if ${SIGNED}; then
+    COMMAND=publishSigned
+fi
+
+PDAL_DEPEND_ON_NATIVE=false ./sbt "-212" "project core" ${COMMAND}-javastyle
diff --git a/java/scripts/publish-local-212.sh b/java/scripts/publish-local-212.sh
new file mode 100755
index 0000000..b9352e2
--- /dev/null
+++ b/java/scripts/publish-local-212.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+
+# --suffix: sets the suffix you want to publish lib with
+
+for i in "$@"
+do
+    case $i in
+        --suffix=*)
+            PDAL_VERSION_SUFFIX="${i#*=}"
+            shift
+            ;;
+        *)
+            ;;
+    esac
+done
+
+export PDAL_VERSION_SUFFIX=${PDAL_VERSION_SUFFIX:-"-SNAPSHOT"}
+
+PDAL_DEPEND_ON_NATIVE=false ./sbt "-212" "project core" publish-local
diff --git a/java/scripts/publish-local.sh b/java/scripts/publish-local.sh
index fa8f3df..ea1a7cc 100755
--- a/java/scripts/publish-local.sh
+++ b/java/scripts/publish-local.sh
@@ -1,3 +1,19 @@
 #!/usr/bin/env bash
 
+# --suffix: sets the suffix you want to publish lib with
+
+for i in "$@"
+do
+    case $i in
+        --suffix=*)
+            PDAL_VERSION_SUFFIX="${i#*=}"
+            shift
+            ;;
+        *)
+            ;;
+    esac
+done
+
+export PDAL_VERSION_SUFFIX=${PDAL_VERSION_SUFFIX:-"-SNAPSHOT"}
+
 PDAL_DEPEND_ON_NATIVE=false ./sbt "project core" publish-local
diff --git a/java/scripts/publish.sh b/java/scripts/publish.sh
index 29b0bc8..c10d7c5 100755
--- a/java/scripts/publish.sh
+++ b/java/scripts/publish.sh
@@ -1,3 +1,31 @@
 #!/usr/bin/env bash
 
-PDAL_DEPEND_ON_NATIVE=false ./sbt "project core" publish
+# --suffix: sets the suffix you want to publish lib with
+# --signed: makes a PGP signed publish of the library, for maven central this sign is required
+
+for i in "$@"
+do
+    case $i in
+        --suffix=*)
+            PDAL_VERSION_SUFFIX="${i#*=}"
+            shift
+            ;;
+        --signed)
+            SIGNED=true
+            shift
+            ;;
+        *)
+            ;;
+    esac
+done
+
+export PDAL_VERSION_SUFFIX=${PDAL_VERSION_SUFFIX:-"-SNAPSHOT"}
+SIGNED=${SIGNED:-false}
+
+COMMAND=publish
+
+if ${SIGNED}; then
+    COMMAND=publishSigned
+fi
+
+PDAL_DEPEND_ON_NATIVE=false ./sbt "project core" ${COMMAND}
diff --git a/kernels/GroundKernel.cpp b/kernels/GroundKernel.cpp
index 453051c..1631e10 100644
--- a/kernels/GroundKernel.cpp
+++ b/kernels/GroundKernel.cpp
@@ -109,13 +109,7 @@ int GroundKernel::execute()
     Stage& writer(makeWriter(m_outputFile, groundStage, ""));
 
     writer.prepare(table);
-
-    // process the data, grabbing the PointViewSet for visualization of the
-    // resulting PointView
-    PointViewSet viewSetOut = writer.execute(table);
-
-    if (isVisualize())
-        visualize(*viewSetOut.begin());
+    writer.execute(table);
 
     return 0;
 }
diff --git a/kernels/InfoKernel.cpp b/kernels/InfoKernel.cpp
index fafcc75..c90dfa0 100644
--- a/kernels/InfoKernel.cpp
+++ b/kernels/InfoKernel.cpp
@@ -122,27 +122,27 @@ void InfoKernel::validateSwitches(ProgramArgs& args)
 
 void InfoKernel::addSwitches(ProgramArgs& args)
 {
-    args.add("input,i", "input file name", m_inputFile).setOptionalPositional();
-    args.add("all", "dump statistics, schema and metadata", m_showAll);
-    args.add("point,p", "point to dump\n--point=\"1-5,10,100-200\"",
+    args.add("input,i", "Input file name", m_inputFile).setOptionalPositional();
+    args.add("all", "Dump statistics, schema and metadata", m_showAll);
+    args.add("point,p", "Point to dump\n--point=\"1-5,10,100-200\" (0 indexed)",
         m_pointIndexes);
     args.add("query",
          "Return points in order of distance from the specified "
          "location (2D or 3D)\n"
          "--query Xcoord,Ycoord[,Zcoord][/count]",
          m_queryPoint);
-    args.add("stats", "dump stats on all points (reads entire dataset)",
+    args.add("stats", "Dump stats on all points (reads entire dataset)",
         m_showStats);
-    args.add("boundary", "compute a hexagonal hull/boundary of dataset",
+    args.add("boundary", "Compute a hexagonal hull/boundary of dataset",
         m_boundary);
-    args.add("dimensions", "dimensions on which to compute statistics",
+    args.add("dimensions", "Dimensions on which to compute statistics",
         m_dimensions);
-    args.add("schema", "dump the schema", m_showSchema);
-    args.add("pipeline-serialization", "Output file for pipeline serialization",
-         m_pipelineFile);
-    args.add("summary", "dump summary of the info", m_showSummary);
-    args.add("metadata", "dump file metadata info", m_showMetadata);
-    args.add("pointcloudschema", "dump PointCloudSchema XML output",
+    args.add("schema", "Dump the schema", m_showSchema);
+    args.add("pipeline-serialization", "Output filename for pipeline "
+        "serialization", m_pipelineFile);
+    args.add("summary", "Dump summary of the info", m_showSummary);
+    args.add("metadata", "Dump file metadata info", m_showMetadata);
+    args.add("pointcloudschema", "Dump PointCloudSchema XML output",
         m_PointCloudSchemaOutput).setHidden();
     args.add("stdin,s", "Read a pipeline file from standard input", m_usestdin);
 }
@@ -204,11 +204,18 @@ MetadataNode InfoKernel::dumpPoints(PointViewPtr inView) const
 
     // Stick points in a inViewfer.
     std::vector<PointId> points = getListOfPoints(m_pointIndexes);
+    bool oorMsg = false;
     for (size_t i = 0; i < points.size(); ++i)
     {
         PointId id = (PointId)points[i];
         if (id < inView->size())
             outView->appendPoint(*inView.get(), id);
+        else if (!oorMsg)
+        {
+            m_log->get(LogLevel::Warning) << "Attempt to display points with "
+                "IDs not available in input dataset." << std::endl;
+            oorMsg = true;
+        }
     }
 
     MetadataNode tree = outView->toMetadata();
@@ -227,19 +234,24 @@ MetadataNode InfoKernel::dumpSummary(const QuickInfo& qi)
 {
     MetadataNode summary;
     summary.add("num_points", qi.m_pointCount);
-    summary.add("spatial_reference", qi.m_srs.getWKT());
-    MetadataNode srs = qi.m_srs.toMetadata();
-    summary.add(srs);
-    MetadataNode bounds = summary.add("bounds");
-    MetadataNode x = bounds.add("X");
-    x.add("min", qi.m_bounds.minx);
-    x.add("max", qi.m_bounds.maxx);
-    MetadataNode y = bounds.add("Y");
-    y.add("min", qi.m_bounds.miny);
-    y.add("max", qi.m_bounds.maxy);
-    MetadataNode z = bounds.add("Z");
-    z.add("min", qi.m_bounds.minz);
-    z.add("max", qi.m_bounds.maxz);
+    if (qi.m_srs.valid())
+    {
+        MetadataNode srs = qi.m_srs.toMetadata();
+        summary.add(srs);
+    }
+    if (qi.m_bounds.valid())
+    {
+        MetadataNode bounds = summary.add("bounds");
+        MetadataNode x = bounds.add("X");
+        x.add("min", qi.m_bounds.minx);
+        x.add("max", qi.m_bounds.maxx);
+        MetadataNode y = bounds.add("Y");
+        y.add("min", qi.m_bounds.miny);
+        y.add("max", qi.m_bounds.maxy);
+        MetadataNode z = bounds.add("Z");
+        z.add("min", qi.m_bounds.minz);
+        z.add("max", qi.m_bounds.maxz);
+    }
 
     std::string dims;
     auto di = qi.m_dimNames.begin();
@@ -250,7 +262,8 @@ MetadataNode InfoKernel::dumpSummary(const QuickInfo& qi)
         if (di != qi.m_dimNames.end())
            dims += ", ";
     }
-    summary.add("dimensions", dims);
+    if (dims.size())
+        summary.add("dimensions", dims);
     return summary;
 }
 
@@ -299,7 +312,16 @@ void InfoKernel::setup(const std::string& filename)
         stage = m_statsStage;
     }
     if (m_boundary)
-        m_hexbinStage = &m_manager.makeFilter("filters.hexbin", *stage);
+    {
+        try
+        {
+            m_hexbinStage = &m_manager.makeFilter("filters.hexbin", *stage);
+        } catch (pdal::pdal_error&)
+        {
+            m_hexbinStage = nullptr;
+
+        }
+    }
 }
 
 
@@ -311,6 +333,9 @@ MetadataNode InfoKernel::run(const std::string& filename)
     if (m_showSummary)
     {
         QuickInfo qi = m_reader->preview();
+        if (!qi.valid())
+            throw pdal_error("No summary data available for '" +
+                filename + "'.");
         MetadataNode summary = dumpSummary(qi).clone("summary");
         root.add(summary);
     }
@@ -330,7 +355,7 @@ MetadataNode InfoKernel::run(const std::string& filename)
 void InfoKernel::dump(MetadataNode& root)
 {
     if (m_showSchema)
-        root.add(m_manager.pointTable().toMetadata().clone("schema"));
+        root.add(m_manager.pointTable().layout()->toMetadata().clone("schema"));
 
     if (m_PointCloudSchemaOutput.size() > 0)
     {
@@ -357,7 +382,9 @@ void InfoKernel::dump(MetadataNode& root)
     {
         PointViewSet viewSet = m_manager.views();
         assert(viewSet.size() == 1);
-        root.add(dumpPoints(*viewSet.begin()).clone("points"));
+        MetadataNode points = dumpPoints(*viewSet.begin());
+        if (points.valid())
+            root.add(points.clone("points"));
     }
 
     if (m_queryPoint.size())
@@ -383,7 +410,32 @@ void InfoKernel::dump(MetadataNode& root)
     {
         PointViewSet viewSet = m_manager.views();
         assert(viewSet.size() == 1);
-        root.add(m_hexbinStage->getMetadata().clone("boundary"));
+        if (m_hexbinStage)
+            root.add(m_hexbinStage->getMetadata().clone("boundary"));
+        else
+        {
+            pdal::BOX2D bounds;
+            for (auto const &v: viewSet)
+            {
+                pdal::BOX2D b;
+                v->calculateBounds(b);
+                bounds.grow(b);
+            }
+            std::stringstream polygon;
+            polygon << "POLYGON ((";
+
+            polygon <<         bounds.minx << " " << bounds.miny;
+            polygon << ", " << bounds.maxx << " " << bounds.miny;
+            polygon << ", " << bounds.maxx << " " << bounds.maxy;
+            polygon << ", " << bounds.minx << " " << bounds.maxy;
+            polygon << ", " << bounds.minx << " " << bounds.miny;
+            polygon << "))";
+
+            MetadataNode m("boundary");
+            m.add("boundary",polygon.str(), "Simple boundary of polygon");
+            root.add(m);
+
+        }
     }
 }
 
diff --git a/kernels/PipelineKernel.cpp b/kernels/PipelineKernel.cpp
index 52c1f8f..1628e27 100644
--- a/kernels/PipelineKernel.cpp
+++ b/kernels/PipelineKernel.cpp
@@ -65,9 +65,16 @@ void PipelineKernel::validateSwitches(ProgramArgs& args)
 }
 
 
+bool PipelineKernel::isStagePrefix(const std::string& stage)
+{
+    return Kernel::isStagePrefix(stage) || stage == "stage";
+}
+
+
 void PipelineKernel::addSwitches(ProgramArgs& args)
 {
-    args.add("input,i", "input file name", m_inputFile).setOptionalPositional();
+    args.add("input,i", "Input filename", m_inputFile).setOptionalPositional();
+
     args.add("pipeline-serialization", "Output file for pipeline serialization",
         m_pipelineFile);
     args.add("validate", "Validate the pipeline (including serialization), "
@@ -80,14 +87,20 @@ void PipelineKernel::addSwitches(ProgramArgs& args)
     args.add("pointcloudschema", "dump PointCloudSchema XML output",
         m_PointCloudSchemaOutput).setHidden();
     args.add("stdin,s", "Read pipeline from standard input", m_usestdin);
+    args.add("stream", "Attempt to run pipeline in streaming mode.", m_stream);
+    args.add("metadata", "Metadata filename", m_metadataFile);
 }
 
+
 int PipelineKernel::execute()
 {
     if (!Utils::fileExists(m_inputFile))
         throw pdal_error("file not found: " + m_inputFile);
     if (m_progressFile.size())
+    {
         m_progressFd = Utils::openProgress(m_progressFile);
+        m_manager.setProgressFd(m_progressFd);
+    }
 
     m_manager.readPipeline(m_inputFile);
 
@@ -100,9 +113,24 @@ int PipelineKernel::execute()
         return 0;
     }
 
-    m_manager.execute();
+    if (m_stream)
+    {
+        FixedPointTable table(10000);
+        m_manager.executeStream(table);
+    }
+    else
+        m_manager.execute();
 
-    if (m_pipelineFile.size() > 0)
+    if (m_metadataFile.size())
+    {
+        std::ostream *out = Utils::createFile(m_metadataFile, false);
+        if (!out)
+            throw pdal_error("Can't open file '" + m_metadataFile +
+                "' for metadata output.");
+        Utils::toJSON(m_manager.getMetadata(), *out);
+        Utils::closeFile(out);
+    }
+    if (m_pipelineFile.size())
         PipelineWriter::writePipeline(m_manager.getStage(), m_pipelineFile);
 
     if (m_PointCloudSchemaOutput.size() > 0)
diff --git a/kernels/PipelineKernel.hpp b/kernels/PipelineKernel.hpp
index 4cfc329..926555c 100644
--- a/kernels/PipelineKernel.hpp
+++ b/kernels/PipelineKernel.hpp
@@ -58,14 +58,17 @@ private:
     PipelineKernel();
     void addSwitches(ProgramArgs& args);
     void validateSwitches(ProgramArgs& args);
+    virtual bool isStagePrefix(const std::string& stage);
 
     std::string m_inputFile;
     std::string m_pipelineFile;
+    std::string m_metadataFile;
     bool m_validate;
     std::string m_PointCloudSchemaOutput;
     std::string m_progressFile;
     int m_progressFd;
     bool m_usestdin;
+    bool m_stream;
 };
 
 } // pdal
diff --git a/kernels/RandomKernel.cpp b/kernels/RandomKernel.cpp
index 813cf7b..d456e1b 100644
--- a/kernels/RandomKernel.cpp
+++ b/kernels/RandomKernel.cpp
@@ -99,10 +99,7 @@ int RandomKernel::execute()
 
     PointTable table;
     writer.prepare(table);
-    PointViewSet viewSet = writer.execute(table);
-
-    if (isVisualize())
-        visualize(*viewSet.begin());
+    writer.execute(table);
 
     return 0;
 }
diff --git a/kernels/SortKernel.cpp b/kernels/SortKernel.cpp
index a81368e..cb334b8 100644
--- a/kernels/SortKernel.cpp
+++ b/kernels/SortKernel.cpp
@@ -72,9 +72,6 @@ int SortKernel::execute()
 {
     Stage& readerStage = makeReader(m_inputFile, m_driverOverride);
 
-    // go ahead and prepare/execute on reader stage only to grab input
-    // PointViewSet, this makes the input PointView available to both the
-    // processing pipeline and the visualizer
     PointTable table;
     readerStage.prepare(table);
     PointViewSet viewSetIn = readerStage.execute(table);
@@ -96,12 +93,7 @@ int SortKernel::execute()
     Stage& writer = makeWriter(m_outputFile, sortStage, "", writerOptions);
 
     writer.prepare(table);
-
-    // process the data, grabbing the PointViewSet for visualization of the
-    PointViewSet viewSetOut = writer.execute(table);
-
-    if (isVisualize())
-        visualize(*viewSetOut.begin());
+    writer.execute(table);
 
     return 0;
 }
diff --git a/kernels/TIndexKernel.cpp b/kernels/TIndexKernel.cpp
index 6e73395..7feebb0 100644
--- a/kernels/TIndexKernel.cpp
+++ b/kernels/TIndexKernel.cpp
@@ -124,6 +124,8 @@ void TIndexKernel::validateSwitches(ProgramArgs& args)
         StringList invalidArgs;
         invalidArgs.push_back("a_srs");
         invalidArgs.push_back("src_srs_name");
+        invalidArgs.push_back("stdin");
+        invalidArgs.push_back("fast_boundary");
         for (auto arg : invalidArgs)
             if (args.set(arg))
             {
@@ -216,6 +218,10 @@ void TIndexKernel::createFile()
     else
         m_files = readSTDIN();
 
+    if (m_absPath)
+        for (auto& s : m_files)
+            s = FileUtils::toAbsolutePath(s);
+
     if (m_files.empty())
     {
         std::ostringstream out;
@@ -463,7 +469,10 @@ TIndexKernel::FileInfo TIndexKernel::getFileInfo(KernelFactory& factory,
     // Need to make sure options get set.
     Stage& reader = manager.makeReader(filename, "");
 
-    if (m_fastBoundary)
+    // If we aren't able to make a hexbin filter, we
+    // will just do a simple fast_boundary.
+    Stage* hexer = &manager.makeFilter("filters.hexbin", reader);
+    if (m_fastBoundary || !hexer)
     {
         QuickInfo qi = reader.preview();
 
@@ -482,11 +491,10 @@ TIndexKernel::FileInfo TIndexKernel::getFileInfo(KernelFactory& factory,
     }
     else
     {
-        Stage& hexer = manager.makeFilter("filters.hexbin", reader);
 
         PointTable table;
-        hexer.prepare(table);
-        PointViewSet set = hexer.execute(table);
+        hexer->prepare(table);
+        PointViewSet set = hexer->execute(table);
 
         MetadataNode m = table.metadata();
         m = m.findChild("filters.hexbin:boundary");
diff --git a/kernels/TranslateKernel.cpp b/kernels/TranslateKernel.cpp
index 2595be6..cd40a01 100644
--- a/kernels/TranslateKernel.cpp
+++ b/kernels/TranslateKernel.cpp
@@ -40,9 +40,10 @@
 #include <pdal/PipelineWriter.hpp>
 #include <pdal/PointTable.hpp>
 #include <pdal/PointView.hpp>
-#include <pdal/Stage.hpp>
+#include <pdal/Reader.hpp>
 #include <pdal/StageFactory.hpp>
 #include <pdal/PipelineReaderJSON.hpp>
+#include <pdal/Writer.hpp>
 #include <pdal/util/FileUtils.hpp>
 #include <json/json.h>
 
@@ -80,14 +81,16 @@ void TranslateKernel::addSwitches(ProgramArgs& args)
         setPositional();
     args.add("filter,f", "Filter type", m_filterType).
         setOptionalPositional();
-    args.add("json", "JSON array of filters", m_filterJSON);
-    args.add("pipeline,p", "Pipeline output", m_pipelineOutput);
+    args.add("json", "PDAL pipeline from which to extract filters.",
+        m_filterJSON);
+    args.add("pipeline,p", "Pipeline output", m_pipelineOutputFile);
     args.add("metadata,m", "Dump metadata output to the specified file",
         m_metadataFile);
     args.add("reader,r", "Reader type", m_readerType);
     args.add("writer,w", "Writer type", m_writerType);
 }
 
+
 /*
   Build a pipeline from a JSON filter specification.
 */
@@ -100,43 +103,44 @@ void TranslateKernel::makeJSONPipeline()
 
     if (json.empty())
         json = m_filterJSON;
+    std::stringstream in(json);
+    m_manager.readPipeline(in);
 
-    Json::Reader jsonReader;
-    Json::Value filters;
-    jsonReader.parse(json, filters);
-    if (filters.type() != Json::arrayValue || filters.empty())
-        throw pdal_error("JSON must be an array of filter specifications");
-
-    Json::Value pipeline(Json::arrayValue);
+    std::vector<Stage *> roots = m_manager.roots();
+    if (roots.size() > 1)
+        throw pdal_error("Can't process pipeline with more than one root.");
 
-    // Add the input file, the filters (as provided) and the output file.
-    if (m_readerType.size())
+    Stage *r(nullptr);
+    if (roots.size())
+        r = dynamic_cast<Reader *>(roots[0]);
+    if (r)
     {
-        Json::Value node(Json::objectValue);
-        node["filename"] = m_inputFile;
-        node["type"] = m_readerType;
-        pipeline.append(node);
+        StageCreationOptions ops { m_inputFile, m_readerType, nullptr,
+            Options(), r->tag() };
+        m_manager.replace(r, &m_manager.makeReader(ops));
     }
     else
-        pipeline.append(Json::Value(m_inputFile));
-    for (Json::ArrayIndex i = 0; i < filters.size(); ++i)
-        pipeline.append(filters[i]);
-    if (m_writerType.size())
     {
-        Json::Value node(Json::objectValue);
-        node["filename"] = m_outputFile;
-        node["type"] = m_writerType;
-        pipeline.append(node);
+        r = &m_manager.makeReader(m_inputFile, m_readerType);
+        if (roots.size())
+            roots[0]->setInput(*r);
     }
-    else
-        pipeline.append(Json::Value(m_outputFile));
 
-    Json::Value root;
-    root["pipeline"] = pipeline;
+    std::vector<Stage *> leaves = m_manager.leaves();
+    if (leaves.size() != 1)
+        throw pdal_error("Can't process pipeline with more than one "
+            "terminal stage.");
 
-    std::stringstream pipeline_str;
-    pipeline_str << root;
-    m_manager.readPipeline(pipeline_str);
+    Stage *w = dynamic_cast<Writer *>(leaves[0]);
+    if (w)
+        m_manager.replace(w, &m_manager.makeWriter(m_outputFile, m_writerType));
+    else
+    {
+        // We know we have a leaf because we added a reader.
+        StageCreationOptions ops { m_outputFile, m_writerType, leaves[0],
+            Options(), "" };  // These last two args just keep compiler quiet.
+        m_manager.makeWriter(ops);
+    }
 }
 
 
@@ -145,7 +149,10 @@ void TranslateKernel::makeJSONPipeline()
 */
 void TranslateKernel::makeArgPipeline()
 {
-    Stage& reader = m_manager.makeReader(m_inputFile, m_readerType);
+    std::string readerType(m_readerType);
+    if (!readerType.empty() && !Utils::startsWith(readerType, "readers."))
+        readerType.insert(0, "readers.");
+    Stage& reader = m_manager.makeReader(m_inputFile, readerType);
     Stage* stage = &reader;
 
     // add each filter provided on the command-line,
@@ -160,7 +167,10 @@ void TranslateKernel::makeArgPipeline()
         Stage& filter = m_manager.makeFilter(filter_name, *stage);
         stage = &filter;
     }
-    m_manager.makeWriter(m_outputFile, m_writerType, *stage);
+    std::string writerType(m_writerType);
+    if (!writerType.empty() && !Utils::startsWith(writerType, "writers."))
+        writerType.insert(0, "writers.");
+    m_manager.makeWriter(m_outputFile, writerType, *stage);
 }
 
 
@@ -168,24 +178,35 @@ int TranslateKernel::execute()
 {
     std::ostream *metaOut(nullptr);
 
+    if (m_filterJSON.size() && m_filterType.size())
+        throw pdal_error("Cannot set both --filter options and --json options");
+
     if (m_metadataFile.size())
     {
+        if (m_pipelineOutputFile.size())
+            m_log->get(LogLevel::Info) << "Metadata will not be written. "
+                "'pipeline' option prevents execution.";
+        else
+        {
         metaOut = FileUtils::createFile(m_metadataFile);
         if (! metaOut)
             throw pdal_error("Couldn't output metadata output file '" +
                 m_metadataFile + "'.");
+        }
     }
 
-    if (m_filterJSON.size() && m_filterType.size())
-        throw pdal_error("Cannot set both --filter options and --json options");
-
     if (!m_filterJSON.empty())
         makeJSONPipeline();
     else
         makeArgPipeline();
 
-    if (m_pipelineOutput.size() > 0)
-        PipelineWriter::writePipeline(m_manager.getStage(), m_pipelineOutput);
+    // If we write pipeline output, we don't run, and therefore don't write
+    if (m_pipelineOutputFile.size() > 0)
+    {
+        PipelineWriter::writePipeline(m_manager.getStage(),
+            m_pipelineOutputFile);
+        return 0;
+    }
     m_manager.execute();
     if (metaOut)
     {
diff --git a/kernels/TranslateKernel.hpp b/kernels/TranslateKernel.hpp
index f51cf8f..cd5766f 100644
--- a/kernels/TranslateKernel.hpp
+++ b/kernels/TranslateKernel.hpp
@@ -66,7 +66,7 @@ private:
 
     std::string m_inputFile;
     std::string m_outputFile;
-    std::string m_pipelineOutput;
+    std::string m_pipelineOutputFile;
     std::string m_readerType;
     StringList m_filterType;
     std::string m_writerType;
diff --git a/pdal/EigenUtils.cpp b/pdal/EigenUtils.cpp
index 2dae5d4..5526107 100644
--- a/pdal/EigenUtils.cpp
+++ b/pdal/EigenUtils.cpp
@@ -34,6 +34,7 @@
 
 #include <pdal/EigenUtils.hpp>
 #include <pdal/GDALUtils.hpp>
+#include <pdal/KDIndex.hpp>
 #include <pdal/PointView.hpp>
 #include <pdal/SpatialReference.hpp>
 #include <pdal/util/Bounds.hpp>
@@ -42,6 +43,7 @@
 #include <Eigen/Dense>
 
 #include <cfloat>
+#include <numeric>
 #include <vector>
 
 namespace pdal
@@ -255,6 +257,44 @@ Eigen::MatrixXd createMaxMatrix(PointView& view, int rows, int cols,
     return ZImax;
 }
 
+Eigen::MatrixXd createMaxMatrix2(PointView& view, int rows, int cols,
+                                 double cell_size, BOX2D bounds)
+{
+    using namespace Dimension;
+    using namespace Eigen;
+
+    KD2Index kdi(view);
+    kdi.build();
+
+    MatrixXd ZImax(rows, cols);
+    ZImax.setConstant(std::numeric_limits<double>::quiet_NaN());
+
+    // for each grid center, search PointView for neighbors, and find max of those
+    for (int c = 0; c < cols; ++c)
+    {
+        double x = bounds.minx + (c + 0.5) * cell_size;
+
+        for (int r = 0; r < rows; ++r)
+        {
+            double y = bounds.miny + (r + 0.5) * cell_size;
+
+            auto neighbors = kdi.radius(x, y, cell_size * std::sqrt(2.0));
+            
+            double val(std::numeric_limits<double>::lowest());
+            for (auto const& n : neighbors)
+            {
+                double z(view.getFieldAs<double>(Id::Z, n));
+                if (z > val)
+                    val = z;
+            }
+            if (val > std::numeric_limits<double>::lowest())
+                ZImax(r, c) = val;
+        }
+    }
+
+    return ZImax;
+}
+
 Eigen::MatrixXd extendedLocalMinimum(PointView& view, int rows, int cols,
                                      double cell_size, BOX2D bounds)
 {
@@ -428,22 +468,72 @@ Eigen::MatrixXd matrixOpen(Eigen::MatrixXd data, int radius)
     return maxZ.block(radius, radius, data.rows(), data.cols());
 }
 
-Eigen::MatrixXd padMatrix(Eigen::MatrixXd d, int r)
+std::vector<double> dilateDiamond(std::vector<double> data, size_t rows, size_t cols, int iterations)
 {
-    using namespace Eigen;
+    std::vector<double> out(data.size(), std::numeric_limits<double>::lowest());
+    std::vector<size_t> idx(5);
+    
+    for (int iter = 0; iter < iterations; ++iter)
+    {
+        for (size_t col = 0; col < cols; ++col)
+        {
+            size_t index = col*rows;
+            for (size_t row = 0; row < rows; ++row)
+            {
+                size_t j = 0;
+                idx[j++] = index+row;
+                if (row > 0)
+                    idx[j++] = idx[0]-1;
+                if (row < rows-1)
+                    idx[j++] = idx[0]+1;
+                if (col > 0)
+                    idx[j++] = idx[0]-rows;
+                if (col < cols-1)
+                    idx[j++] = idx[0]+rows;
+                for (size_t i = 0; i < j; ++i)
+                {
+                    if (data[idx[i]] > out[index+row])
+                        out[index+row] = data[idx[i]];
+                }
+            }
+        }
+        data.swap(out);
+    }
+    return data;
+}
 
-    MatrixXd out = MatrixXd::Zero(d.rows()+2*r, d.cols()+2*r);
-    out.block(r, r, d.rows(), d.cols()) = d;
-    out.block(r, 0, d.rows(), r) =
-        d.block(0, 0, d.rows(), r).rowwise().reverse();
-    out.block(r, d.cols()+r, d.rows(), r) =
-        d.block(0, d.cols()-r, d.rows(), r).rowwise().reverse();
-    out.block(0, 0, r, out.cols()) =
-        out.block(r, 0, r, out.cols()).colwise().reverse();
-    out.block(d.rows()+r, 0, r, out.cols()) =
-        out.block(out.rows()-r-1, 0, r, out.cols()).colwise().reverse();
-
-    return out;
+std::vector<double> erodeDiamond(std::vector<double> data, size_t rows, size_t cols, int iterations)
+{
+    std::vector<double> out(data.size(), std::numeric_limits<double>::max());
+    std::vector<size_t> idx(5);
+    
+    for (int iter = 0; iter < iterations; ++iter)
+    {
+        for (size_t col = 0; col < cols; ++col)
+        {
+            size_t index = col*rows;
+            for (size_t row = 0; row < rows; ++row)
+            {
+                size_t j = 0;
+                idx[j++] = index+row;
+                if (row > 0)
+                    idx[j++] = idx[0]-1;
+                if (row < rows-1)
+                    idx[j++] = idx[0]+1;
+                if (col > 0)
+                    idx[j++] = idx[0]-rows;
+                if (col < cols-1)
+                    idx[j++] = idx[0]+rows;
+                for (size_t i = 0; i < j; ++i)
+                {
+                    if (data[idx[i]] < out[index+row])
+                        out[index+row] = data[idx[i]];
+                }
+            }
+        }
+        data.swap(out);
+    }
+    return data;
 }
 
 Eigen::MatrixXd pointViewToEigen(const PointView& view)
diff --git a/pdal/EigenUtils.hpp b/pdal/EigenUtils.hpp
index 0120b95..6bda076 100644
--- a/pdal/EigenUtils.hpp
+++ b/pdal/EigenUtils.hpp
@@ -250,6 +250,9 @@ PDAL_DLL uint8_t computeRank(PointView& view, std::vector<PointId> ids,
 PDAL_DLL Eigen::MatrixXd createMaxMatrix(PointView& view, int rows, int cols,
         double cell_size, BOX2D bounds);
 
+PDAL_DLL Eigen::MatrixXd createMaxMatrix2(PointView& view, int rows, int cols,
+        double cell_size, BOX2D bounds);
+
 /**
   Create matrix of minimum Z values.
 
@@ -265,7 +268,7 @@ PDAL_DLL Eigen::MatrixXd createMaxMatrix(PointView& view, int rows, int cols,
 */
 PDAL_DLL Eigen::MatrixXd createMinMatrix(PointView& view, int rows, int cols,
         double cell_size, BOX2D bounds);
-
+        
 /**
   Find local minimum elevations by extended local minimum.
 
@@ -314,6 +317,44 @@ PDAL_DLL Eigen::MatrixXd matrixClose(Eigen::MatrixXd data, int radius);
 PDAL_DLL Eigen::MatrixXd matrixOpen(Eigen::MatrixXd data, int radius);
 
 /**
+  Perform a morphological dilation of the input raster.
+
+  Performs a morphological dilation of the input raster using a diamond
+  structuring element. Larger structuring elements are approximated by applying
+  multiple iterations of the opening operation. The input and output rasters are
+  stored in column major order.
+
+  \param data the input raster.
+  \param rows the number of rows.
+  \param cols the number of cols.
+  \param iterations the number of iterations used to approximate a larger 
+         structuring element.
+  \return the morphological dilation of the input raster.
+*/
+PDAL_DLL std::vector<double> dilateDiamond(std::vector<double> data,
+                                           size_t rows, size_t cols,
+                                           int iterations);
+
+/**
+  Perform a morphological erosion of the input raster.
+
+  Performs a morphological erosion of the input raster using a diamond
+  structuring element. Larger structuring elements are approximated by applying
+  multiple iterations of the opening operation. The input and output rasters are
+  stored in column major order.
+
+  \param data the input raster.
+  \param rows the number of rows.
+  \param cols the number of cols.
+  \param iterations the number of iterations used to approximate a larger 
+         structuring element.
+  \return the morphological erosion of the input raster.
+*/
+PDAL_DLL std::vector<double> erodeDiamond(std::vector<double> data,
+                                          size_t rows, size_t cols,
+                                          int iterations);
+
+/**
   Pad input matrix symmetrically.
 
   Symmetrically pads the input matrix with given radius.
@@ -322,7 +363,24 @@ PDAL_DLL Eigen::MatrixXd matrixOpen(Eigen::MatrixXd data, int radius);
   \param r the radius of the padding.
   \return the padded matrix.
 */
-PDAL_DLL Eigen::MatrixXd padMatrix(Eigen::MatrixXd d, int r);
+template <typename Derived>
+PDAL_DLL Derived padMatrix(const Eigen::MatrixBase<Derived>& d, int r)
+{
+    using namespace Eigen;
+
+    Derived out = Derived::Zero(d.rows()+2*r, d.cols()+2*r);
+    out.block(r, r, d.rows(), d.cols()) = d;
+    out.block(r, 0, d.rows(), r) =
+        d.block(0, 0, d.rows(), r).rowwise().reverse();
+    out.block(r, d.cols()+r, d.rows(), r) =
+        d.block(0, d.cols()-r, d.rows(), r).rowwise().reverse();
+    out.block(0, 0, r, out.cols()) =
+        out.block(r, 0, r, out.cols()).colwise().reverse();
+    out.block(d.rows()+r, 0, r, out.cols()) =
+        out.block(out.rows()-r-1, 0, r, out.cols()).colwise().reverse();
+
+    return out;
+}
 
 /**
   Converts a PointView into an Eigen::MatrixXd.
@@ -721,12 +779,14 @@ PDAL_DLL double computeSlopeD8(const Eigen::MatrixBase<Derived>& data,
     submatrix.setConstant(data(1, 1));
     submatrix -= data;
     submatrix /= spacing;
-    submatrix(0, 1) /= std::sqrt(2.0);
-    submatrix(1, 0) /= std::sqrt(2.0);
-    submatrix(1, 2) /= std::sqrt(2.0);
-    submatrix(2, 1) /= std::sqrt(2.0);
-
-    // find max and convert to degrees
+    submatrix(0, 0) /= std::sqrt(2.0);
+    submatrix(0, 2) /= std::sqrt(2.0);
+    submatrix(2, 0) /= std::sqrt(2.0);
+    submatrix(2, 2) /= std::sqrt(2.0);
+
+    // Why not just use Eigen's maxCoeff reduction to find the max? Well, as it
+    // turns out, if there is a chance that we will have NaN's then maxCoeff
+    // has no way to ignore the NaN.
     double maxval = std::numeric_limits<double>::lowest();
     for (int i = 0; i < submatrix.size(); ++i)
     {
@@ -761,6 +821,118 @@ PDAL_DLL double computeSlopeFD(const Eigen::MatrixBase<Derived>& data,
 }
 
 /**
+  Perform a morphological dilation of the input matrix.
+
+  Performs a morphological dilation of the input matrix using a circular
+  structuring element of given radius.
+
+  \param data the input matrix.
+  \param radius the radius of the circular structuring element.
+  \return the morphological dilation of the input matrix.
+*/
+template <typename Derived>
+PDAL_DLL Derived dilate(const Eigen::MatrixBase<Derived>& A, int radius)
+{
+    Derived B = Derived::Constant(A.rows(), A.cols(), 0);
+
+    int length = 2 * radius + 1;
+    bool match_flag;
+    for (int c = 0; c < A.cols(); ++c)
+    {
+        for (int r = 0; r < A.rows(); ++r)
+        {
+            match_flag = false;
+            for (int k = 0; k < length; ++k)
+            {
+                if (match_flag)
+                    break;
+                int cdiff = k-radius;
+                int cpos = c+cdiff;
+                if (cpos < 0 || cpos >= A.cols())
+                    continue;
+                for (int l = 0; l < length; ++l)
+                {
+                    int rdiff = l-radius;
+                    int rpos = r+rdiff;
+                    if (rpos < 0 || rpos >= A.rows())
+                        continue;
+                    if ((cdiff*cdiff+rdiff*rdiff) > radius*radius)
+                        continue;
+                    if (A(rpos, cpos) == 1)
+                    {
+                        match_flag = true;
+                        break;
+                    }
+                }
+            }
+            // Assign value according to match flag
+            B(r, c) = (match_flag) ? 1 : 0;
+        }
+    }
+
+    return B;
+}
+
+/**
+  Perform a morphological erosion of the input matrix.
+
+  Performs a morphological erosion of the input matrix using a circular
+  structuring element of given radius.
+
+  \param data the input matrix.
+  \param radius the radius of the circular structuring element.
+  \return the morphological erosion of the input matrix.
+*/
+template <typename Derived>
+PDAL_DLL Derived erode(const Eigen::MatrixBase<Derived>& A, int radius)
+{
+    Derived B = Derived::Constant(A.rows(), A.cols(), 1);
+
+    int length = 2 * radius + 1;
+    bool mismatch_flag;
+    for (int c = 0; c < A.cols(); ++c)
+    {
+        for (int r = 0; r < A.rows(); ++r)
+        {
+            if (A(r, c) == 0)
+            {
+                B(r, c) = 0;
+                continue;
+            }
+            mismatch_flag = false;
+            for (int k = 0; k < length; k++)
+            {
+                if (mismatch_flag)
+                    break;
+                int cdiff = k-radius;
+                int cpos = c+cdiff;
+                if (cpos < 0 || cpos >= A.cols())
+                    continue;
+                for (int l = 0; l < length; l++)
+                {
+                    int rdiff = l-radius;
+                    int rpos = r+rdiff;
+                    if (rpos < 0 || rpos >= A.rows())
+                        continue;
+                    if ((cdiff*cdiff+rdiff*rdiff) > radius*radius)
+                        continue;
+                    if (A(rpos, cpos) == 0)
+                    {
+                        B(r, c) = 0;
+                        mismatch_flag = true;
+                        break;
+                    }
+                }
+            }
+            // Assign value according to mismatch flag
+            B(r, c) = (mismatch_flag) ? 0 : 1;
+        }
+    }
+
+    return B;
+}
+
+/**
   Thin Plate Spline interpolation.
 
   \param x the x coordinate of the input data.
diff --git a/pdal/FlexWriter.hpp b/pdal/FlexWriter.hpp
index b881ff2..900c356 100644
--- a/pdal/FlexWriter.hpp
+++ b/pdal/FlexWriter.hpp
@@ -57,7 +57,7 @@ protected:
             std::ostringstream oss;
             oss << getName() << ": Can't write with template-based "
                 "filename using streaming point table.";
-            throw oss.str();
+            throw pdal_error(oss.str());
         }
     }
 
@@ -90,12 +90,9 @@ private:
         if (m_hashPos == std::string::npos)
         {
             if (!table.spatialReferenceUnique())
-            {
-                std::ostringstream oss;
-                oss << getName() << ": Attempting to write '" << m_filename <<
-                    "' with multiple spatial references.";
-                Utils::printError(oss.str());
-            }
+                log()->get(LogLevel::Error) << getName() <<
+                    ": Attempting to write '" << m_filename <<
+                    "' with multiple point spatial references.";
             readyFile(generateFilename(), table.spatialReference());
         }
     }
diff --git a/pdal/GDALUtils.cpp b/pdal/GDALUtils.cpp
index 1d6608e..d7ca411 100644
--- a/pdal/GDALUtils.cpp
+++ b/pdal/GDALUtils.cpp
@@ -111,7 +111,7 @@ GDALDataType toGdalType(Dimension::Type t)
         throw pdal_error("PDAL 'none' type unsupported.");
 	default:
         throw pdal_error("Unrecognized PDAL dimension type.");
-	
+
     }
 }
 
@@ -146,6 +146,36 @@ bool reprojectBounds(BOX3D& box, const std::string& srcSrs,
 }
 
 
+/**
+  Reproject a point from a source projection to a destination.
+  \param x  X coordinate of point to be reprojected.
+  \param y  Y coordinate of point to be reprojected.
+  \param z  Z coordinate of point to be reprojected.
+  \param srcSrs  String in WKT or other suitable format of box coordinates.
+  \param dstSrs  String in WKT or other suitable format to which
+    coordinates should be projected.
+  \return  Whether the reprojection was successful or not.
+*/
+bool reprojectPoint(double& x, double& y, double& z, const std::string& srcSrs,
+    const std::string& dstSrs)
+{
+    OGRSpatialReference src;
+    OGRSpatialReference dst;
+
+    OGRErr srcOk = OSRSetFromUserInput(&src, srcSrs.c_str());
+    OGRErr dstOk = OSRSetFromUserInput(&dst, dstSrs.c_str());
+    if (srcOk != OGRERR_NONE || dstOk != OGRERR_NONE)
+        return false;
+
+    OGRCoordinateTransformationH transform =
+        OCTNewCoordinateTransformation(&src, &dst);
+
+    bool ok = (OCTTransform(transform, 1, &x, &y, &z));
+    OCTDestroyCoordinateTransformation(transform);
+    return ok;
+}
+
+
 std::string lastError()
 {
     return CPLGetLastErrorMsg();
diff --git a/pdal/GDALUtils.hpp b/pdal/GDALUtils.hpp
index 284e504..b0209ad 100644
--- a/pdal/GDALUtils.hpp
+++ b/pdal/GDALUtils.hpp
@@ -65,6 +65,8 @@ PDAL_DLL void registerDrivers();
 PDAL_DLL void unregisterDrivers();
 PDAL_DLL bool reprojectBounds(BOX3D& box, const std::string& srcSrs,
     const std::string& dstSrs);
+PDAL_DLL bool reprojectPoint(double& x, double& y, double& z,
+    const std::string& srcSrs, const std::string& dstSrs);
 PDAL_DLL std::string lastError();
 
 typedef std::shared_ptr<void> RefPtr;
diff --git a/pdal/Geometry.cpp b/pdal/Geometry.cpp
index 3689175..dbd6704 100644
--- a/pdal/Geometry.cpp
+++ b/pdal/Geometry.cpp
@@ -56,7 +56,7 @@ Geometry::Geometry(const std::string& wkt_or_json, SpatialReference ref)
     , m_srs(ref)
     , m_geoserr(geos::ErrorHandler::get())
 {
-    update(wkt_or_json, ref);
+    update(wkt_or_json);
 }
 
 
@@ -69,7 +69,7 @@ Geometry::~Geometry()
 }
 
 
-void Geometry::update(const std::string& wkt_or_json, SpatialReference ref)
+void Geometry::update(const std::string& wkt_or_json)
 {
     bool isJson = wkt_or_json.find("{") != wkt_or_json.npos ||
                   wkt_or_json.find("}") != wkt_or_json.npos;
@@ -79,7 +79,8 @@ void Geometry::update(const std::string& wkt_or_json, SpatialReference ref)
     if (!isJson)
     {
         geos::GeometryDeleter geom_del(m_geoserr);
-        GEOSGeomPtr p(GEOSWKTReader_read_r(m_geoserr.ctx(), geosreader, wkt_or_json.c_str()), geom_del);
+        GEOSGeomPtr p(GEOSWKTReader_read_r(m_geoserr.ctx(), geosreader,
+            wkt_or_json.c_str()), geom_del);
         m_geom.swap(p);
     }
     else
@@ -95,7 +96,8 @@ void Geometry::update(const std::string& wkt_or_json, SpatialReference ref)
         OGRErr err = OGR_G_ExportToWkt(json, &gdal_wkt);
 
         geos::GeometryDeleter geom_del(m_geoserr);
-        GEOSGeomPtr p(GEOSWKTReader_read_r(m_geoserr.ctx(), geosreader, gdal_wkt), geom_del);
+        GEOSGeomPtr p(GEOSWKTReader_read_r(m_geoserr.ctx(), geosreader,
+            gdal_wkt), geom_del);
         m_geom.swap(p);
 
         OGRFree(gdal_wkt);
@@ -121,13 +123,13 @@ void Geometry::prepare()
 
 Geometry& Geometry::operator=(const Geometry& input)
 {
-
-    if (&input!= this)
+    if (&input != this)
     {
         m_geoserr = input.m_geoserr;
         m_srs = input.m_srs;
         geos::GeometryDeleter geom_del(m_geoserr);
-        GEOSGeomPtr p(GEOSGeom_clone_r(m_geoserr.ctx(),  input.m_geom.get()), geom_del);
+        GEOSGeomPtr p(GEOSGeom_clone_r(m_geoserr.ctx(),  input.m_geom.get()),
+            geom_del);
         m_geom.swap(p);
         prepare();
     }
@@ -141,7 +143,8 @@ Geometry::Geometry(const Geometry& input)
 {
     assert(input.m_geom.get() != 0);
     geos::GeometryDeleter geom_del(m_geoserr);
-    GEOSGeomPtr p(GEOSGeom_clone_r(m_geoserr.ctx(),  input.m_geom.get()), geom_del);
+    GEOSGeomPtr p(GEOSGeom_clone_r(m_geoserr.ctx(),  input.m_geom.get()),
+        geom_del);
     m_geom.swap(p);
     assert(m_geom.get() != 0);
     m_prepGeom = 0;
@@ -179,7 +182,8 @@ Geometry::Geometry(OGRGeometryH g, const SpatialReference& srs)
     GEOSWKBReader* reader = GEOSWKBReader_create_r(m_geoserr.ctx());
 
     geos::GeometryDeleter geom_del(m_geoserr);
-    GEOSGeomPtr p(GEOSWKBReader_read_r(m_geoserr.ctx(),  reader, wkb.data(), wkbSize), geom_del);
+    GEOSGeomPtr p(GEOSWKBReader_read_r(m_geoserr.ctx(),  reader, wkb.data(),
+        wkbSize), geom_del);
     m_geom.swap(p);
     prepare();
 
@@ -187,13 +191,16 @@ Geometry::Geometry(OGRGeometryH g, const SpatialReference& srs)
 }
 
 
-
 Geometry Geometry::transform(const SpatialReference& ref) const
 {
     if (m_srs.empty())
-        throw pdal_error("Geometry::transform failed due to m_srs being empty");
+        throw pdal_error("Geometry::transform failed.  "
+            "Source missing spatial reference.");
     if (ref.empty())
-        throw pdal_error("Geometry::transform failed due to ref being empty");
+        throw pdal_error("Geometry::transform failed.  "
+            "Invalid destination spatial reference.");
+    if (ref == m_srs)
+        return *this;
 
     gdal::SpatialRef fromRef(m_srs.getWKT());
     gdal::SpatialRef toRef(ref.getWKT());
@@ -241,7 +248,8 @@ BOX3D Geometry::bounds() const
 
 bool Geometry::equals(const Geometry& p, double tolerance) const
 {
-    return (bool) GEOSEqualsExact_r(m_geoserr.ctx(), m_geom.get(), p.m_geom.get(), tolerance);
+    return (bool) GEOSEqualsExact_r(m_geoserr.ctx(), m_geom.get(),
+        p.m_geom.get(), tolerance);
 }
 
 
@@ -281,11 +289,13 @@ std::string Geometry::validReason() const
 std::string Geometry::wkt(double precision, bool bOutputZ) const
 {
     GEOSWKTWriter *writer = GEOSWKTWriter_create_r(m_geoserr.ctx());
-    GEOSWKTWriter_setRoundingPrecision_r(m_geoserr.ctx(), writer, (int)precision);
+    GEOSWKTWriter_setRoundingPrecision_r(m_geoserr.ctx(), writer,
+        (int)precision);
     if (bOutputZ)
         GEOSWKTWriter_setOutputDimension_r(m_geoserr.ctx(), writer, 3);
 
-    char *smoothWkt = GEOSWKTWriter_write_r(m_geoserr.ctx(), writer, m_geom.get());
+    char *smoothWkt = GEOSWKTWriter_write_r(m_geoserr.ctx(), writer,
+        m_geom.get());
     std::string output(smoothWkt);
     GEOSFree_r(m_geoserr.ctx(), smoothWkt);
     GEOSWKTWriter_destroy_r(m_geoserr.ctx(), writer);
@@ -301,13 +311,10 @@ std::string Geometry::json(double precision) const
     papszOptions = CSLSetNameValue(papszOptions, "COORDINATE_PRECISION",
         prec.str().c_str() );
 
-    std::string w(wkt());
-
     gdal::SpatialRef srs(m_srs.getWKT());
-    gdal::Geometry g(w, srs);
+    gdal::Geometry g(wkt(), srs);
 
     char* json = OGR_G_ExportToJsonEx(g.get(), papszOptions);
-
     std::string output(json);
     OGRFree(json);
     return output;
@@ -323,20 +330,16 @@ std::ostream& operator<<(std::ostream& ostr, const Geometry& p)
 
 std::istream& operator>>(std::istream& istr, Geometry& p)
 {
-
     std::ostringstream oss;
     oss << istr.rdbuf();
 
-    std::string wkt = oss.str();
-
     try
     {
-        p.update(wkt);
+        p.update(oss.str());
     }
-    catch (pdal_error& err)
+    catch (pdal_error& )
     {
         istr.setstate(std::ios::failbit);
-        throw;
     }
     return istr;
 }
diff --git a/pdal/Geometry.hpp b/pdal/Geometry.hpp
index 7b6e45d..30582b5 100644
--- a/pdal/Geometry.hpp
+++ b/pdal/Geometry.hpp
@@ -98,9 +98,7 @@ public:
 
     OGRGeometryH getOGRHandle();
 
-
-    virtual void update(const std::string& wkt_or_json,
-        SpatialReference ref = SpatialReference());
+    virtual void update(const std::string& wkt_or_json);
 
     void setSpatialReference( const SpatialReference& ref)
         { m_srs = ref; }
diff --git a/pdal/KDIndex.hpp b/pdal/KDIndex.hpp
index e6dee01..522f9a7 100644
--- a/pdal/KDIndex.hpp
+++ b/pdal/KDIndex.hpp
@@ -58,7 +58,7 @@ class PDAL_DLL KDIndex
 protected:
     KDIndex(const PointView& buf) : m_buf(buf)
     {}
-   
+
     ~KDIndex()
     {}
 
@@ -72,8 +72,7 @@ public:
     template <class BBOX> bool kdtree_get_bbox(BBOX& bb) const;
     void build()
     {
-        m_index.reset(new my_kd_tree_t(DIM, *this,
-            nanoflann::KDTreeSingleIndexAdaptorParams(10, DIM)));
+        m_index.reset(new my_kd_tree_t(DIM, *this));
         m_index->buildIndex();
     }
 
@@ -139,7 +138,7 @@ public:
     {
         double x = m_buf.getFieldAs<double>(Dimension::Id::X, idx);
         double y = m_buf.getFieldAs<double>(Dimension::Id::Y, idx);
- 
+
         return neighbors(x, y, k);
     }
 
@@ -147,9 +146,32 @@ public:
     {
         double x = point.getFieldAs<double>(Dimension::Id::X);
         double y = point.getFieldAs<double>(Dimension::Id::Y);
- 
+
         return neighbors(x, y, k);
     }
+    
+    void knnSearch(double x, double y, point_count_t k,
+        std::vector<PointId> *indices, std::vector<double> *sqr_dists)
+    {
+        k = std::min(m_buf.size(), k);
+        nanoflann::KNNResultSet<double, PointId, point_count_t> resultSet(k);
+
+        resultSet.init(&indices->front(), &sqr_dists->front());
+
+        std::vector<double> pt;
+        pt.push_back(x);
+        pt.push_back(y);
+        m_index->findNeighbors(resultSet, &pt[0], nanoflann::SearchParams(10));
+    }
+    
+    void knnSearch(PointId idx, point_count_t k, std::vector<PointId> *indices,
+        std::vector<double> *sqr_dists)
+    {
+        double x = m_buf.getFieldAs<double>(Dimension::Id::X, idx);
+        double y = m_buf.getFieldAs<double>(Dimension::Id::Y, idx);
+
+        knnSearch(x, y, k, indices, sqr_dists);
+    }
 
     std::vector<PointId> radius(double const& x, double const& y,
         double const& r) const
@@ -235,7 +257,7 @@ public:
         pt.push_back(x);
         pt.push_back(y);
         pt.push_back(z);
-        m_index->findNeighbors(resultSet, &pt[0], nanoflann::SearchParams(10));
+        m_index->findNeighbors(resultSet, &pt[0], nanoflann::SearchParams());
         return output;
     }
 
@@ -256,15 +278,15 @@ public:
 
         return neighbors(x, y, z, k);
     }
-   
+
     void knnSearch(double x, double y, double z, point_count_t k,
         std::vector<PointId> *indices, std::vector<double> *sqr_dists)
     {
         k = std::min(m_buf.size(), k);
         nanoflann::KNNResultSet<double, PointId, point_count_t> resultSet(k);
-        
+
         resultSet.init(&indices->front(), &sqr_dists->front());
-        
+
         std::vector<double> pt;
         pt.push_back(x);
         pt.push_back(y);
diff --git a/pdal/Kernel.cpp b/pdal/Kernel.cpp
index 0c6988f..1b2ca01 100644
--- a/pdal/Kernel.cpp
+++ b/pdal/Kernel.cpp
@@ -52,11 +52,18 @@
 namespace pdal
 {
 
-namespace
+Kernel::Kernel() : m_showTime(false), m_hardCoreDebug(false)
+{}
+
+bool Kernel::isStagePrefix(const std::string& stageType)
 {
+   return (stageType == "readers" || stageType == "writers" ||
+        stageType == "filters");
+}
 
-bool parseOption(std::string o, std::string& stage, std::string& option,
-    std::string& value)
+
+bool Kernel::parseStageOption(std::string o, std::string& stage,
+    std::string& option, std::string& value)
 {
     value.clear();
     if (o.size() < 2)
@@ -84,9 +91,8 @@ bool parseOption(std::string o, std::string& stage, std::string& option,
     // Get stage_type.
     count = Utils::extract(o, pos, islc);
     pos += count;
-    std::string stage_type = o.substr(0, pos);
-    if (stage_type != "readers" && stage_type != "writers" &&
-        stage_type != "filters")
+    std::string stageType = o.substr(0, pos);
+    if (!isStagePrefix(stageType))
         return false;
     if (pos >= o.length() || o[pos++] != '.')
         return false;
@@ -117,15 +123,6 @@ bool parseOption(std::string o, std::string& stage, std::string& option,
     return true;
 }
 
-} // unnamed namespace
-
-
-Kernel::Kernel() :
-    m_showTime(false)
-    , m_hardCoreDebug(false)
-    , m_visualize(false)
-{}
-
 
 std::ostream& operator<<(std::ostream& ostr, const Kernel& kernel)
 {
@@ -148,7 +145,7 @@ void Kernel::doSwitches(const StringList& cmdArgs, ProgramArgs& args)
     {
         std::string stageName, opName, value;
 
-        if (parseOption(cmdArgs[i], stageName, opName, value))
+        if (parseStageOption(cmdArgs[i], stageName, opName, value))
         {
             if (value.empty())
             {
@@ -273,95 +270,10 @@ int Kernel::innerRun(ProgramArgs& args)
         return -1;
     }
 
-    parseCommonOptions();
     return execute();
 }
 
 
-bool Kernel::isVisualize() const
-{
-    return m_visualize;
-}
-
-
-void Kernel::visualize(PointViewPtr view)
-{
-    PipelineManager manager;
-
-    manager.commonOptions() = m_manager.commonOptions();
-    manager.stageOptions() = m_manager.stageOptions();
-
-    BufferReader& reader =
-        static_cast<BufferReader&>(manager.makeReader("", "readers.buffer"));
-    reader.addView(view);
-
-    Stage& writer = manager.makeWriter("", "writers.pclvisualizer", reader);
-
-    PointTable table;
-    writer.prepare(table);
-    writer.execute(table);
-}
-
-/*
-void Kernel::visualize(PointViewPtr input_view, PointViewPtr output_view) const
-{
-#ifdef PDAL_HAVE_PCL_VISUALIZE
-    int viewport = 0;
-
-    // Determine XYZ bounds
-    BOX3D const& input_bounds = input_view->calculateBounds();
-    BOX3D const& output_bounds = output_view->calculateBounds();
-
-    // Convert PointView to a PCL PointCloud
-    pcl::PointCloud<pcl::PointXYZ>::Ptr input_cloud(
-        new pcl::PointCloud<pcl::PointXYZ>);
-    pclsupport::PDALtoPCD(
-        const_cast<PointViewPtr>(*input_view), *input_cloud, input_bounds);
-    pcl::PointCloud<pcl::PointXYZ>::Ptr output_cloud(
-        new pcl::PointCloud<pcl::PointXYZ>);
-    pclsupport::PDALtoPCD(
-        const_cast<PointViewPtr>(*output_view), *output_cloud, output_bounds);
-
-    // Create PCLVisualizer
-    std::shared_ptr<pcl::visualization::PCLVisualizer> p(
-        new pcl::visualization::PCLVisualizer("3D Viewer"));
-
-    // Set background to black
-    p->setBackgroundColor(0, 0, 0);
-
-    // Use Z dimension to colorize points
-    pcl::visualization::PointCloudColorHandlerGenericField<pcl::PointXYZ>
-        input_color(input_cloud, "z");
-    pcl::visualization::PointCloudColorHandlerGenericField<pcl::PointXYZ>
-        output_color(output_cloud, "z");
-
-    // Add point cloud to the viewer with the Z dimension color handler
-    p->createViewPort(0, 0, 0.5, 1, viewport);
-    p->addPointCloud<pcl::PointXYZ> (input_cloud, input_color, "cloud");
-    p->createViewPort(0.5, 0, 1, 1, viewport);
-    p->addPointCloud<pcl::PointXYZ> (output_cloud, output_color, "cloud1");
-
-    p->resetCamera();
-
-    while (!p->wasStopped())
-    {
-        p->spinOnce(100);
-        std::this_thread::sleep_for(std::chrono::microseconds(100000));
-    }
-#endif
-}
-*/
-
-
-void Kernel::parseCommonOptions()
-{
-    Options& options = m_manager.commonOptions();
-
-    if (m_visualize)
-        options.add("visualize", m_visualize);
-}
-
-
 void Kernel::outputHelp(ProgramArgs& args)
 {
     std::cout << "usage: " << "pdal " << getShortName() << " [options] " <<
@@ -382,9 +294,7 @@ void Kernel::addBasicSwitches(ProgramArgs& args)
     args.add("developer-debug",
         "Enable developer debug (don't trap exceptions)", m_hardCoreDebug);
     args.add("label", "A string to label the process with", m_label);
-
-    args.add("visualize", "Visualize result", m_visualize);
-    args.add("driver", "Override reader driver", m_driverOverride, "");
+    args.add("driver", "Override reader driver", m_driverOverride);
 }
 
 Stage& Kernel::makeReader(const std::string& inputFile, std::string driver)
@@ -433,10 +343,20 @@ Stage& Kernel::makeWriter(const std::string& outputFile, Stage& parent,
 }
 
 
-bool Kernel::test_parseOption(std::string o, std::string& stage,
+bool Kernel::test_parseStageOption(std::string o, std::string& stage,
     std::string& option, std::string& value)
 {
-    return parseOption(o, stage, option, value);
+    class TestKernel : public Kernel
+    {
+    public:
+        virtual std::string getName() const
+            { return "TestKernel"; }
+        int execute()
+            { return 0; }
+    };
+
+    TestKernel k;
+    return k.parseStageOption(o, stage, option, value);
 }
 
 } // namespace pdal
diff --git a/pdal/Kernel.hpp b/pdal/Kernel.hpp
index 46f1265..c4f9dc3 100644
--- a/pdal/Kernel.hpp
+++ b/pdal/Kernel.hpp
@@ -75,8 +75,6 @@ public:
         StringList names = Utils::split2(getName(), '.');
         return names.size() == 2 ? names[1] : std::string();
     }
-    bool isVisualize() const;
-    void visualize(PointViewPtr view);
 
 protected:
     // this is protected; your derived class ctor will be the public entry point
@@ -92,6 +90,7 @@ protected:
         std::string driver);
     Stage& makeWriter(const std::string& outputFile, Stage& parent,
         std::string driver, Options options);
+    virtual bool isStagePrefix(const std::string& stageType);
 
 public:
     virtual void addSwitches(ProgramArgs& args)
@@ -116,20 +115,20 @@ private:
     void outputHelp(ProgramArgs& args);
     void outputVersion();
     void addBasicSwitches(ProgramArgs& args);
-    void parseCommonOptions();
 
     void doSwitches(const StringList& cmdArgs, ProgramArgs& args);
     int doStartup();
     int doExecution(ProgramArgs& args);
+    bool parseStageOption(std::string o, std::string& stage,
+        std::string& option, std::string& value);
 
-    static bool test_parseOption(std::string o, std::string& stage,
+    static bool test_parseStageOption(std::string o, std::string& stage,
         std::string& option, std::string& value);
 
     bool m_showHelp;
     bool m_showOptions;
     bool m_showTime;
     bool m_hardCoreDebug;
-    bool m_visualize;
     std::string m_label;
 
     Kernel& operator=(const Kernel&); // not implemented
diff --git a/pdal/Log.cpp b/pdal/Log.cpp
index f02982d..b06bbc8 100644
--- a/pdal/Log.cpp
+++ b/pdal/Log.cpp
@@ -43,11 +43,10 @@ namespace pdal
 
 Log::Log(std::string const& leaderString,
          std::string const& outputName)
-    : m_level(LogLevel::Error)
+    : m_level(LogLevel::Warning)
     , m_deleteStreamOnCleanup(false)
 {
 
-    makeNullStream();
     if (Utils::iequals(outputName, "stdlog"))
         m_log = &std::clog;
     else if (Utils::iequals(outputName, "stderr"))
@@ -55,7 +54,7 @@ Log::Log(std::string const& leaderString,
     else if (Utils::iequals(outputName, "stdout"))
         m_log = &std::cout;
     else if (Utils::iequals(outputName, "devnull"))
-        m_log = m_nullStream;
+        m_log = &m_nullStream;
     else
     {
         m_log = Utils::createFile(outputName);
@@ -71,7 +70,6 @@ Log::Log(std::string const& leaderString,
     , m_deleteStreamOnCleanup(false)
 {
     m_log = v;
-    makeNullStream();
     m_leaders.push(leaderString);
 }
 
@@ -84,19 +82,6 @@ Log::~Log()
         m_log->flush();
         delete m_log;
     }
-    delete m_nullStream;
-}
-
-
-void Log::makeNullStream()
-{
-#ifdef _WIN32
-    std::string nullFilename = "nul";
-#else
-    std::string nullFilename = "/dev/null";
-#endif
-
-    m_nullStream = new std::ofstream(nullFilename);
 }
 
 
@@ -121,14 +106,17 @@ std::ostream& Log::get(LogLevel level)
     const auto nativeDebug(Utils::toNative(LogLevel::Debug));
     if (incoming <= stored)
     {
-        *m_log << "(" << leader() << " "<< getLevelString(level) <<": " <<
-            incoming << "): " <<
-            std::string(incoming < nativeDebug ? 0 : incoming - nativeDebug,
-                    '\t');
+        const std::string l = leader();
+
+        *m_log << "(" << l;
+         if (l.size())
+             *m_log << " ";
+         *m_log << getLevelString(level) <<") " <<
+         std::string(incoming < nativeDebug ? 0 : incoming - nativeDebug,
+             '\t');
         return *m_log;
     }
-    return *m_nullStream;
-
+    return m_nullStream;
 }
 
 
diff --git a/pdal/Log.hpp b/pdal/Log.hpp
index e28944a..e2b44f6 100644
--- a/pdal/Log.hpp
+++ b/pdal/Log.hpp
@@ -39,6 +39,7 @@
 #include <stack>
 
 #include <pdal/pdal_internal.hpp>
+#include <pdal/util/NullOStream.hpp>
 
 // Adapted from http://drdobbs.com/cpp/201804215
 
@@ -133,17 +134,15 @@ public:
 
 protected:
     std::ostream *m_log;
-    std::ostream *m_nullStream;
 
 private:
     Log(const Log&);
     Log& operator =(const Log&);
 
-    void makeNullStream();
-
     LogLevel m_level;
     bool m_deleteStreamOnCleanup;
     std::stack<std::string> m_leaders;
+    NullOStream m_nullStream;
 };
 
 typedef std::shared_ptr<Log> LogPtr;
diff --git a/pdal/Options.cpp b/pdal/Options.cpp
index 4123957..75f7978 100644
--- a/pdal/Options.cpp
+++ b/pdal/Options.cpp
@@ -50,10 +50,14 @@ std::string Option::toArg() const
 
 void Option::toMetadata(MetadataNode& parent) const
 {
-    parent.add(getName(), getValue());
+    // 'user_data' nodes on Stages are JSON
+    if (!Utils::iequals(getName(), "user_data"))
+        parent.add(getName(), getValue());
+    else
+        parent.addWithType(getName(), getValue(), "json", "User JSON");
+
 }
 
-//---------------------------------------------------------------------------
 
 bool Option::nameValid(const std::string& name, bool reportError)
 {
@@ -69,6 +73,9 @@ bool Option::nameValid(const std::string& name, bool reportError)
 }
 
 
+//---------------------------------------------------------------------------
+
+
 void Options::add(const Option& option)
 {
     assert(Option::nameValid(option.getName(), true));
@@ -76,6 +83,12 @@ void Options::add(const Option& option)
 }
 
 
+void Options::add(const Options& o)
+{
+    m_options.insert(o.m_options.begin(), o.m_options.end());
+}
+
+
 void Options::addConditional(const Option& option)
 {
     assert(Option::nameValid(option.getName(), true));
@@ -84,6 +97,13 @@ void Options::addConditional(const Option& option)
 }
 
 
+void Options::addConditional(const Options& options)
+{
+    for (auto& o : options.m_options)
+        addConditional(o.second);
+}
+
+
 void Options::remove(const Option& option)
 {
     m_options.erase(option.getName());
diff --git a/pdal/Options.hpp b/pdal/Options.hpp
index fddeec7..9b549ba 100644
--- a/pdal/Options.hpp
+++ b/pdal/Options.hpp
@@ -129,7 +129,9 @@ public:
         { add(opt); }
 
     void add(const Option& option);
+    void add(const Options& options);
     void addConditional(const Option& option);
+    void addConditional(const Options& option);
 
     // if option name not present, just returns
     void remove(const Option& option);
@@ -152,7 +154,12 @@ public:
                    vs += ", ";
                vs += *vi;
             }
-            parent.add(k, vs);
+
+            // 'userData' keys on stages and such are JSON
+            if (!Utils::iequals(k, "user_data"))
+                parent.add(k, vs);
+            else
+                parent.addWithType(k, vs, "json", "User JSON");
         }
     }
 
diff --git a/pdal/PDALUtils.hpp b/pdal/PDALUtils.hpp
index ca53a7a..0d86633 100644
--- a/pdal/PDALUtils.hpp
+++ b/pdal/PDALUtils.hpp
@@ -272,7 +272,6 @@ std::ostream PDAL_DLL *createFile(const std::string& path,
 void PDAL_DLL closeFile(std::istream *in);
 void PDAL_DLL closeFile(std::ostream *out);
 bool PDAL_DLL fileExists(const std::string& path);
-std::string PDAL_DLL expandTilde(const std::string& path);
 std::vector<std::string> PDAL_DLL maybeGlob(const std::string& path);
 double PDAL_DLL computeHausdorff(PointViewPtr srcView, PointViewPtr candView);
 
diff --git a/pdal/PipelineExecutor.cpp b/pdal/PipelineExecutor.cpp
index ce96fa1..a018d86 100644
--- a/pdal/PipelineExecutor.cpp
+++ b/pdal/PipelineExecutor.cpp
@@ -76,7 +76,7 @@ std::string PipelineExecutor::getSchema() const
         throw pdal_error("Pipeline has not been executed!");
 
     std::stringstream strm;
-    MetadataNode root = m_manager.pointTable().toMetadata().clone("schema");
+    MetadataNode root = m_manager.pointTable().layout()->toMetadata().clone("schema");
     pdal::Utils::toJSON(root, strm);
     return strm.str();
 }
@@ -117,6 +117,9 @@ void PipelineExecutor::setLogStream(std::ostream& strm)
 
 void PipelineExecutor::setLogLevel(int level)
 {
+    if (level < 0 || level > 8)
+        throw pdal_error("log level must be between 0 and 8!");
+
     m_logLevel = static_cast<pdal::LogLevel>(level);
     setLogStream(m_logStream);
 }
diff --git a/pdal/PipelineManager.cpp b/pdal/PipelineManager.cpp
index ea0c32a..c538f01 100644
--- a/pdal/PipelineManager.cpp
+++ b/pdal/PipelineManager.cpp
@@ -35,10 +35,15 @@
 #include <pdal/PipelineManager.hpp>
 #include <pdal/PipelineReaderJSON.hpp>
 #include <pdal/PDALUtils.hpp>
+#include <pdal/util/Algorithm.hpp>
 #include <pdal/util/FileUtils.hpp>
 
 #include "private/PipelineReaderXML.hpp"
 
+#if defined(PDAL_COMPILER_CLANG) || defined(PDAL_COMPILER_GCC)
+#  pragma GCC diagnostic ignored "-Wmissing-field-initializers"
+#endif
+
 namespace pdal
 {
 
@@ -93,7 +98,17 @@ void PipelineManager::readPipeline(const std::string& filename)
     {
         Utils::closeFile(m_input);
         m_input = Utils::openFile(filename);
-        readPipeline(*m_input);
+        if (!m_input)
+            throw pdal_error("Can't open file '" + filename + "' as pipeline "
+                "input.");
+        try
+        {
+            readPipeline(*m_input);
+        }
+        catch (const pdal_error& err)
+        {
+            throw pdal_error(filename + ": " + err.what());
+        }
     }
 }
 
@@ -154,7 +169,8 @@ void PipelineManager::validateStageOptions() const
         const std::string& stageName = si.first;
         auto it = std::find_if(m_stages.begin(), m_stages.end(),
             [stageName](Stage *s)
-            { return (s->getName() == stageName); });
+            { return (s->getName() == stageName ||
+                "stage." + s->tag() == stageName); });
 
         // If the option stage name matches no created stage, then error.
         if (it == m_stages.end())
@@ -207,6 +223,18 @@ point_count_t PipelineManager::execute()
 }
 
 
+void PipelineManager::executeStream(FixedPointTable& table)
+{
+    validateStageOptions();
+    Stage *s = getStage();
+    if (!s)
+        return;
+
+    s->prepare(table);
+    s->execute(table);
+}
+
+
 MetadataNode PipelineManager::getMetadata() const
 {
     MetadataNode output("stages");
@@ -222,63 +250,80 @@ MetadataNode PipelineManager::getMetadata() const
 Stage& PipelineManager::makeReader(const std::string& inputFile,
     std::string driver)
 {
-    static Options nullOpts;
+    StageCreationOptions ops { inputFile, driver };
 
-    return makeReader(inputFile, driver, nullOpts);
+    return makeReader(ops);
 }
 
 
 Stage& PipelineManager::makeReader(const std::string& inputFile,
     std::string driver, Options options)
 {
-    if (driver.empty())
+    StageCreationOptions ops { inputFile, driver, nullptr, options };
+
+    return makeReader(ops);
+}
+
+
+Stage& PipelineManager::makeReader(StageCreationOptions& o)
+{
+    if (o.m_driver.empty())
     {
-        driver = StageFactory::inferReaderDriver(inputFile);
-        if (driver.empty())
+        o.m_driver = StageFactory::inferReaderDriver(o.m_filename);
+        if (o.m_driver.empty())
             throw pdal_error("Cannot determine reader for input file: " +
-                inputFile);
+                o.m_filename);
     }
-    if (!inputFile.empty())
-        options.replace("filename", inputFile);
+    if (!o.m_filename.empty())
+        o.m_options.replace("filename", o.m_filename);
 
-    Stage& reader = addReader(driver);
-    setOptions(reader, options);
+    Stage& reader = addReader(o.m_driver);
+    reader.setTag(o.m_tag);
+    setOptions(reader, o.m_options);
     return reader;
 }
 
 
 Stage& PipelineManager::makeFilter(const std::string& driver)
 {
-    static Options nullOps;
+    StageCreationOptions ops { "", driver };
 
-    Stage& filter = addFilter(driver);
-    setOptions(filter, nullOps);
-    return filter;
+    return makeFilter(ops);
 }
 
 
 Stage& PipelineManager::makeFilter(const std::string& driver, Options options)
 {
-    Stage& filter = addFilter(driver);
-    setOptions(filter, options);
-    return filter;
+    StageCreationOptions ops { "", driver, nullptr, options };
+
+    return makeFilter(ops);
 }
 
 
 Stage& PipelineManager::makeFilter(const std::string& driver, Stage& parent)
 {
-    static Options nullOps;
+    StageCreationOptions ops { "", driver, &parent };
 
-    return makeFilter(driver, parent, nullOps);
+    return makeFilter(ops);
 }
 
 
 Stage& PipelineManager::makeFilter(const std::string& driver, Stage& parent,
     Options options)
 {
-    Stage& filter = addFilter(driver);
-    setOptions(filter, options);
-    filter.setInput(parent);
+    StageCreationOptions ops { "", driver, &parent, options };
+
+    return makeFilter(ops);
+}
+
+
+Stage& PipelineManager::makeFilter(StageCreationOptions& o)
+{
+    Stage& filter = addFilter(o.m_driver);
+    filter.setTag(o.m_tag);
+    setOptions(filter, o.m_options);
+    if (o.m_parent)
+        filter.setInput(*o.m_parent);
     return filter;
 }
 
@@ -286,44 +331,57 @@ Stage& PipelineManager::makeFilter(const std::string& driver, Stage& parent,
 Stage& PipelineManager::makeWriter(const std::string& outputFile,
     std::string driver)
 {
-    static Options nullOps;
+    StageCreationOptions ops { outputFile, driver };
 
-    return makeWriter(outputFile, driver, nullOps);
+    return makeWriter(ops);
 }
 
+
 Stage& PipelineManager::makeWriter(const std::string& outputFile,
-    std::string driver, Options options)
+    std::string driver, Stage& parent)
 {
-    if (driver.empty())
-    {
-        driver = StageFactory::inferWriterDriver(outputFile);
-        if (driver.empty())
-            throw pdal_error("Cannot determine writer for output file: " +
-                outputFile);
-    }
-
-    if (!outputFile.empty())
-        options.replace("filename", outputFile);
+    StageCreationOptions ops { outputFile, driver, &parent };
 
-    auto& writer = addWriter(driver);
-    setOptions(writer, options);
-    return writer;
+    return makeWriter(ops);
 }
 
 
 Stage& PipelineManager::makeWriter(const std::string& outputFile,
-    std::string driver, Stage& parent)
+    std::string driver, Stage& parent, Options options)
 {
-    static Options nullOps;
+    StageCreationOptions ops { outputFile, driver, &parent, options };
 
-    return makeWriter(outputFile, driver, parent, nullOps);
+    return makeWriter(ops);
 }
 
+
 Stage& PipelineManager::makeWriter(const std::string& outputFile,
-    std::string driver, Stage& parent, Options options)
+    std::string driver, Options options)
 {
-    Stage& writer = makeWriter(outputFile, driver, options);
-    writer.setInput(parent);
+    StageCreationOptions ops { outputFile, driver, nullptr, options };
+
+    return makeWriter(ops);
+}
+
+
+Stage& PipelineManager::makeWriter(StageCreationOptions& o)
+{
+    if (o.m_driver.empty())
+    {
+        o.m_driver = StageFactory::inferWriterDriver(o.m_filename);
+        if (o.m_driver.empty())
+            throw pdal_error("Cannot determine writer for output file: " +
+                o.m_filename);
+    }
+
+    if (!o.m_filename.empty())
+        o.m_options.replace("filename", o.m_filename);
+
+    auto& writer = addWriter(o.m_driver);
+    writer.setTag(o.m_tag);
+    setOptions(writer, o.m_options);
+    if (o.m_parent)
+        writer.setInput(*o.m_parent);
     return writer;
 }
 
@@ -339,20 +397,65 @@ void PipelineManager::setOptions(Stage& stage, const Options& addOps)
     stage.addOptions(addOps);
 
     // Apply options provided on the command line, overriding others.
-    Options& ops = stageOptions(stage);
+    Options ops = stageOptions(stage);
     stage.removeOptions(ops);
     stage.addOptions(ops);
 }
 
 
-Options& PipelineManager::stageOptions(Stage& stage)
+Options PipelineManager::stageOptions(Stage& stage)
 {
-    static Options nullOpts;
+    Options opts;
 
+    std::string tag = stage.tag();
+    if (tag.size())
+    {
+        tag = "stage." + tag;
+        auto oi = m_stageOptions.find(tag);
+        if (oi != m_stageOptions.end())
+            opts.add(oi->second);
+    }
+    // Tag-based options options override stagename-based options, so
+    // we call addConditional.
     auto oi = m_stageOptions.find(stage.getName());
-    if (oi == m_stageOptions.end())
-        return nullOpts;
-    return oi->second;
+    if (oi != m_stageOptions.end())
+        opts.addConditional(oi->second);
+    return opts;
+}
+
+
+std::vector<Stage *> PipelineManager::roots() const
+{
+    std::vector<Stage *> rlist;
+
+    for (Stage *s : m_stages)
+        if (s->getInputs().empty())
+            rlist.push_back(s);
+    return rlist;
+}
+
+
+std::vector<Stage *> PipelineManager::leaves() const
+{
+    std::vector<Stage *> llist = m_stages;
+    for (Stage *s : m_stages)
+        for (Stage *ss : s->getInputs())
+           Utils::remove(llist, ss);
+    return llist;
+}
+
+
+void PipelineManager::replace(Stage *sOld, Stage *sNew)
+{
+    Utils::remove(m_stages, sNew);
+    for (Stage * & s : m_stages)
+    {
+        if (s == sOld)
+            s = sNew;
+        for (Stage * & ss : s->getInputs())
+            if (ss == sOld)
+                ss = sNew;
+    }
 }
 
 } // namespace pdal
diff --git a/pdal/PipelineManager.hpp b/pdal/PipelineManager.hpp
index e3c159c..6a88de5 100644
--- a/pdal/PipelineManager.hpp
+++ b/pdal/PipelineManager.hpp
@@ -45,6 +45,15 @@ namespace pdal
 
 class Options;
 
+struct StageCreationOptions
+{
+    std::string m_filename;
+    std::string m_driver;
+    Stage *m_parent;
+    Options m_options;
+    std::string m_tag;
+};
+
 class PDAL_DLL PipelineManager
 {
     FRIEND_TEST(json, tags);
@@ -52,17 +61,11 @@ public:
     PipelineManager() : m_tablePtr(new PointTable()), m_table(*m_tablePtr),
             m_progressFd(-1), m_input(nullptr)
         {}
-    PipelineManager(int progressFd) : m_tablePtr(new PointTable()),
-            m_table(*m_tablePtr), m_progressFd(progressFd), m_input(nullptr)
-        {}
-    PipelineManager(PointTableRef table) : m_table(table), m_progressFd(-1),
-            m_input(nullptr)
-        {}
-    PipelineManager(PointTableRef table, int progressFd) : m_table(table),
-            m_progressFd(progressFd), m_input(nullptr)
-        {}
     ~PipelineManager();
 
+    void setProgressFd(int fd)
+        { m_progressFd = fd; }
+
     void readPipeline(std::istream& input);
     void readPipeline(const std::string& filename);
 
@@ -76,12 +79,14 @@ public:
     Stage& makeReader(const std::string& inputFile, std::string driver);
     Stage& makeReader(const std::string& inputFile, std::string driver,
         Options options);
+    Stage& makeReader(StageCreationOptions& opts);
 
     Stage& makeFilter(const std::string& driver);
     Stage& makeFilter(const std::string& driver, Options options);
     Stage& makeFilter(const std::string& driver, Stage& parent);
     Stage& makeFilter(const std::string& driver, Stage& parent,
         Options options);
+    Stage& makeFilter(StageCreationOptions& ops);
 
     Stage& makeWriter(const std::string& outputFile, std::string driver);
     Stage& makeWriter(const std::string& outputFile, std::string driver,
@@ -90,6 +95,7 @@ public:
         Stage& parent);
     Stage& makeWriter(const std::string& outputFile, std::string driver,
         Stage& parent, Options options);
+    Stage& makeWriter(StageCreationOptions& ops);
 
     // returns true if the pipeline endpoint is a writer
     bool isWriterPipeline() const
@@ -107,6 +113,7 @@ public:
     QuickInfo preview() const;
     void prepare() const;
     point_count_t execute();
+    void executeStream(FixedPointTable& table);
     void validateStageOptions() const;
 
     // Get the resulting point views.
@@ -122,10 +129,13 @@ public:
         { return m_commonOptions; }
     OptionsMap& stageOptions()
         { return m_stageOptions; }
-    Options& stageOptions(Stage& stage);
+    std::vector<Stage *> roots() const;
+    std::vector<Stage *> leaves() const;
+    void replace(Stage *sOld, Stage *sNew);
 
 private:
     void setOptions(Stage& stage, const Options& addOps);
+    Options stageOptions(Stage& stage);
 
     StageFactory m_factory;
     std::unique_ptr<PointTable> m_tablePtr;
diff --git a/pdal/PipelineReaderJSON.cpp b/pdal/PipelineReaderJSON.cpp
index 2d2ca9e..5a5dbb9 100644
--- a/pdal/PipelineReaderJSON.cpp
+++ b/pdal/PipelineReaderJSON.cpp
@@ -39,6 +39,7 @@
 #include <pdal/PluginManager.hpp>
 #include <pdal/Options.hpp>
 #include <pdal/util/FileUtils.hpp>
+#include <pdal/util/Algorithm.hpp>
 #include <pdal/util/Utils.hpp>
 
 #include <json/json.h>
@@ -99,7 +100,8 @@ void PipelineReaderJSON::parsePipeline(Json::Value& tree)
 
             for (const std::string& path : files)
             {
-                s = &m_manager.makeReader(path, type, options);
+                StageCreationOptions ops { path, type, nullptr, options, tag };
+                s = &m_manager.makeReader(ops);
 
                 if (specifiedInputs.size())
                     throw pdal_error("JSON pipeline: Inputs not permitted for "
@@ -109,7 +111,8 @@ void PipelineReaderJSON::parsePipeline(Json::Value& tree)
         }
         else if (type.empty() || Utils::startsWith(type, "writers."))
         {
-            s = &m_manager.makeWriter(filename, type, options);
+            StageCreationOptions ops { filename, type, nullptr, options, tag };
+            s = &m_manager.makeWriter(ops);
             for (Stage *ts : inputs)
                 s->setInput(*ts);
             inputs.clear();
@@ -118,7 +121,8 @@ void PipelineReaderJSON::parsePipeline(Json::Value& tree)
         {
             if (filename.size())
                 options.add("filename", filename);
-            s = &m_manager.makeFilter(type, options);
+            StageCreationOptions ops { "", type, nullptr, options, tag };
+            s = &m_manager.makeFilter(ops);
             for (Stage *ts : inputs)
                 s->setInput(*ts);
             inputs.clear();
@@ -241,7 +245,7 @@ std::string PipelineReaderJSON::extractTag(Json::Value& node, TagMap& tags)
                         tag + "'.");
             }
             else
-                throw pdal_error("JSON pipeline: 'tag' must be "
+                throw pdal_error("JSON pipeline: tag must be "
                     "specified as a string.");
         }
         node.removeMember("tag");
@@ -249,6 +253,9 @@ std::string PipelineReaderJSON::extractTag(Json::Value& node, TagMap& tags)
             throw pdal_error("JSON pipeline: found duplicate 'tag' "
                "entry in stage definition.");
     }
+    if (Utils::contains(tag, '.'))
+        throw pdal_error("JSON pipeline: Stage tag name can't contain "
+            "'.' character.");
     return tag;
 }
 
diff --git a/pdal/PipelineReaderXML.cpp b/pdal/PipelineReaderXML.cpp
index a0e21e6..6e08808 100644
--- a/pdal/PipelineReaderXML.cpp
+++ b/pdal/PipelineReaderXML.cpp
@@ -438,17 +438,49 @@ void PipelineReaderXML::parseElement_Pipeline(const ptree& tree)
     }
 }
 
+namespace
+{
 
-void PipelineReaderXML::readPipeline(std::istream& input)
+class pipeline_error
+{};
+
+}
+
+void PipelineReaderXML::baseReadPipeline(std::istream& input)
 {
     ptree tree;
 
-    xml_parser::read_xml(input, tree, xml_parser::no_comments);
+    try
+    {
+        xml_parser::read_xml(input, tree, xml_parser::no_comments);
+
+        pdalboost::optional<ptree> opt(tree.get_child_optional("Pipeline"));
+        if (!opt.is_initialized())
+            throw pdal_error("PipelineReaderXML: root element is not Pipeline");
+        parseElement_Pipeline(opt.get());
+    }
+    catch (const pdal_error&)
+    {
+        throw;
+    }
+    catch (...)
+    {
+        throw pipeline_error();
+    }
+}
+
 
-    pdalboost::optional<ptree> opt(tree.get_child_optional("Pipeline"));
-    if (!opt.is_initialized())
-        throw pdal_error("PipelineReaderXML: root element is not Pipeline");
-    parseElement_Pipeline(opt.get());
+void PipelineReaderXML::readPipeline(std::istream& input)
+{
+    try
+    {
+        baseReadPipeline(input);
+    }
+    catch (pipeline_error)
+    {
+        throw pdal_error("Unable to process pipeline from stream. "
+            "XML is invalid.");
+    }
 }
 
 
@@ -456,17 +488,14 @@ void PipelineReaderXML::readPipeline(const std::string& filename)
 {
     m_inputXmlFile = filename;
 
+std::cerr << "Read XML pipeline!\n";
     std::istream* input = Utils::openFile(filename);
 
     try
     {
-        readPipeline(*input);
+        baseReadPipeline(*input);
     }
-    catch (const pdal_error& )
-    {
-        throw;
-    }
-    catch (...)
+    catch (pipeline_error)
     {
         Utils::closeFile(input);
         std::ostringstream oss;
diff --git a/pdal/PipelineWriter.cpp b/pdal/PipelineWriter.cpp
index 0e787a0..91fb74b 100644
--- a/pdal/PipelineWriter.cpp
+++ b/pdal/PipelineWriter.cpp
@@ -44,7 +44,7 @@ namespace pdal
 namespace
 {
 
-void generateTags(Stage *stage, PipelineWriter::TagMap& tags)
+std::string generateTag(Stage *stage, PipelineWriter::TagMap& tags)
 {
     auto tagExists = [tags](const std::string& tag)
     {
@@ -56,16 +56,25 @@ void generateTags(Stage *stage, PipelineWriter::TagMap& tags)
         return false;
     };
 
-    for (Stage *s : stage->getInputs())
-        generateTags(s, tags);
-    std::string tag;
-    for (size_t i = 1; ; ++i)
+    std::string tag = stage->tag();
+    if (tag.empty())
     {
-        tag = stage->tagName() + std::to_string(i);
-        if (!tagExists(tag))
-            break;
+        for (size_t i = 1; ; ++i)
+        {
+            tag = stage->getName() + std::to_string(i);
+            if (!tagExists(tag))
+                break;
+        }
+        tag = Utils::replaceAll(tag, ".", "_");
     }
-    tags[stage] = tag;
+    return tag;
+}
+
+void generateTags(Stage *stage, PipelineWriter::TagMap& tags)
+{
+    for (Stage *s : stage->getInputs())
+        generateTags(s, tags);
+    tags[stage] = generateTag(stage, tags);
 }
 
 } // anonymous namespace
diff --git a/pdal/PluginManager.cpp b/pdal/PluginManager.cpp
index a242e07..8758a1f 100644
--- a/pdal/PluginManager.cpp
+++ b/pdal/PluginManager.cpp
@@ -60,7 +60,7 @@ static PluginManager s_instance;
 
 #if defined(__APPLE__) && defined(__MACH__)
     const std::string dynamicLibraryExtension(".dylib");
-#elif defined(__linux__) || defined(__FreeBSD__) || defined(__DragonFly__) || defined(__GNU__)
+#elif defined(__linux__) || defined(__FreeBSD__) || defined(__DragonFly__) || defined(__FreeBSD_kernel__) || defined(__GNU__)
     const std::string dynamicLibraryExtension(".so");
 #elif defined _WIN32
     const std::string dynamicLibraryExtension(".dll");
diff --git a/pdal/PointLayout.cpp b/pdal/PointLayout.cpp
index d3da017..8aa436f 100644
--- a/pdal/PointLayout.cpp
+++ b/pdal/PointLayout.cpp
@@ -318,5 +318,24 @@ Dimension::Type PointLayout::resolveType(Dimension::Type t1,
     }
 }
 
+MetadataNode PointLayout::toMetadata() const
+{
+
+    MetadataNode root;
+
+    for (const auto& id : dims())
+    {
+        MetadataNode dim("dimensions");
+        dim.add("name", dimName(id));
+        Dimension::Type t = dimType(id);
+        dim.add("type", Dimension::toName(Dimension::base(t)));
+        dim.add("size", dimSize(id));
+        root.addList(dim);
+    }
+
+    return root;
+}
+
 } // namespace pdal
 
+
diff --git a/pdal/PointLayout.hpp b/pdal/PointLayout.hpp
index aa23eb5..0e6f34d 100644
--- a/pdal/PointLayout.hpp
+++ b/pdal/PointLayout.hpp
@@ -41,6 +41,7 @@
 
 #include <pdal/DimDetail.hpp>
 #include <pdal/DimType.hpp>
+#include <pdal/Metadata.hpp>
 
 namespace pdal
 {
@@ -225,6 +226,9 @@ public:
     */
     PDAL_DLL const Dimension::Detail *dimDetail(Dimension::Id id) const;
 
+
+    PDAL_DLL MetadataNode toMetadata() const;
+
 private:
     PDAL_DLL virtual bool update(Dimension::Detail dd, const std::string& name);
 
diff --git a/pdal/PointTable.cpp b/pdal/PointTable.cpp
index e9e0214..556d601 100644
--- a/pdal/PointTable.cpp
+++ b/pdal/PointTable.cpp
@@ -47,6 +47,19 @@ MetadataNode BasePointTable::privateMetadata(const std::string& name)
 }
 
 
+void BasePointTable::addSpatialReference(const SpatialReference& spatialRef)
+{
+    auto it = std::find(m_spatialRefs.begin(), m_spatialRefs.end(), spatialRef);
+
+    // If not found, add to the beginning.
+    if (it == m_spatialRefs.end())
+        m_spatialRefs.push_front(spatialRef);
+    // If not the first element, move the found element to the front.
+    else if (it != m_spatialRefs.begin())
+        m_spatialRefs.splice(m_spatialRefs.begin(), m_spatialRefs, it);
+}
+
+
 void SimplePointTable::setFieldInternal(Dimension::Id id, PointId idx,
     const void *value)
 {
@@ -95,20 +108,7 @@ char *PointTable::getPoint(PointId idx)
 
 MetadataNode BasePointTable::toMetadata() const
 {
-    const PointLayoutPtr l(layout());
-    MetadataNode root;
-
-    for (const auto& id : l->dims())
-    {
-        MetadataNode dim("dimensions");
-        dim.add("name", l->dimName(id));
-        Dimension::Type t = l->dimType(id);
-        dim.add("type", Dimension::toName(Dimension::base(t)));
-        dim.add("size", l->dimSize(id));
-        root.addList(dim);
-    }
-
-    return root;
+    return layout()->toMetadata();
 }
 
 } // namespace pdal
diff --git a/pdal/PointTable.hpp b/pdal/PointTable.hpp
index 211a381..bcc0a86 100644
--- a/pdal/PointTable.hpp
+++ b/pdal/PointTable.hpp
@@ -34,7 +34,7 @@
 
 #pragma once
 
-#include <set>
+#include <list>
 #include <vector>
 
 #include "pdal/SpatialReference.hpp"
@@ -48,6 +48,7 @@ namespace pdal
 
 class PDAL_DLL BasePointTable : public PointContainer
 {
+    FRIEND_TEST(PointTable, srs);
     friend class PointView;
 
 protected:
@@ -75,10 +76,9 @@ public:
     }
     void clearSpatialReferences()
         { m_spatialRefs.clear(); }
-    void addSpatialReference(const SpatialReference& srs)
-        { m_spatialRefs.insert(srs); }
+    void addSpatialReference(const SpatialReference& srs);
     bool spatialReferenceUnique() const
-        { return m_spatialRefs.size() == 1; }
+        { return m_spatialRefs.size() <= 1; }
     SpatialReference spatialReference() const
     {
         return spatialReferenceUnique() ? anySpatialReference() :
@@ -103,7 +103,7 @@ protected:
 
 protected:
     MetadataPtr m_metadata;
-    std::set<SpatialReference> m_spatialRefs;
+    std::list<SpatialReference> m_spatialRefs;
     PointLayout& m_layoutRef;
 };
 typedef BasePointTable& PointTableRef;
diff --git a/pdal/PointView.hpp b/pdal/PointView.hpp
index bf0bccf..3031b0b 100644
--- a/pdal/PointView.hpp
+++ b/pdal/PointView.hpp
@@ -56,7 +56,7 @@ namespace pdal
 {
 namespace plang
 {
-    class BufferedInvocation;
+    class Invocation;
 }
 
 struct PointViewLess;
@@ -68,7 +68,7 @@ typedef std::set<PointViewPtr, PointViewLess> PointViewSet;
 
 class PDAL_DLL PointView : public PointContainer
 {
-    friend class plang::BufferedInvocation;
+    friend class plang::Invocation;
     friend class PointIdxRef;
     friend struct PointViewLess;
 public:
diff --git a/pdal/PointViewIter.hpp b/pdal/PointViewIter.hpp
index c519292..0c387be 100644
--- a/pdal/PointViewIter.hpp
+++ b/pdal/PointViewIter.hpp
@@ -113,7 +113,7 @@ public:
     typedef PointIdxRef reference;
     typedef void * pointer;
 
-
+    PointViewIter() {}
     PointViewIter(PointView *buf, PointId id) : m_buf(buf), m_id(id)
     {}
 
@@ -134,7 +134,7 @@ public:
         { return PointViewIter(m_buf, m_id - n); }
     PointViewIter operator-=(const difference_type& n)
         { m_id -= n; return *this; }
-    difference_type operator-(const PointViewIter& i)
+    difference_type operator-(const PointViewIter& i) const
         { return m_id - i.m_id; }
 
     bool operator==(const PointViewIter& i)
diff --git a/pdal/Polygon.cpp b/pdal/Polygon.cpp
index 186c0b5..caa3305 100644
--- a/pdal/Polygon.cpp
+++ b/pdal/Polygon.cpp
@@ -62,11 +62,6 @@ Polygon::Polygon(const Polygon& input)
 {
 }
 
-Polygon::Polygon(const Geometry& input)
-    : Geometry(input)
-{
-}
-
 
 Polygon& Polygon::operator=(const Polygon& input)
 {
@@ -176,6 +171,9 @@ void Polygon::initializeFromBounds(const BOX3D& box)
 
 Polygon Polygon::transform(const SpatialReference& ref) const
 {
+    if (ref.empty() && m_srs.empty())
+        return *this;
+
     if (m_srs.empty())
         throw pdal_error("Polygon::transform failed due to m_srs being empty");
     if (ref.empty())
@@ -185,7 +183,7 @@ Polygon Polygon::transform(const SpatialReference& ref) const
     gdal::SpatialRef toRef(ref.getWKT());
     gdal::Geometry geom(wkt(12, true), fromRef);
     geom.transform(toRef);
-    return Geometry(geom.wkt(), ref);
+    return Polygon(geom.wkt(), ref);
 }
 
 
@@ -250,7 +248,7 @@ Polygon Polygon::simplify(double distance_tolerance,
 
     GEOSGeometry* o = GEOSGeom_createCollection_r(m_geoserr.ctx(), GEOS_MULTIPOLYGON,
         geometries.data(), geometries.size());
-    Geometry p(o, m_srs);
+    Polygon p(o, m_srs);
     GEOSGeom_destroy_r(m_geoserr.ctx(), smoothed);
     GEOSGeom_destroy_r(m_geoserr.ctx(), o);
 
@@ -267,7 +265,7 @@ double Polygon::area() const
 }
 
 
-bool Polygon::covers(PointRef& ref) const
+bool Polygon::covers(const PointRef& ref) const
 {
     GEOSCoordSequence* coords = GEOSCoordSeq_create_r(m_geoserr.ctx(), 1, 3);
     if (!coords)
diff --git a/pdal/Polygon.hpp b/pdal/Polygon.hpp
index dcd2487..be762ee 100644
--- a/pdal/Polygon.hpp
+++ b/pdal/Polygon.hpp
@@ -52,10 +52,9 @@ public:
     Polygon();
     Polygon(const std::string& wkt_or_json,
            SpatialReference ref = SpatialReference());
+    Polygon(const Polygon&);
     Polygon(const BOX2D&);
     Polygon(const BOX3D&);
-    Polygon(const Polygon&);
-    Polygon(const Geometry&);
     Polygon(GEOSGeometry* g, const SpatialReference& srs);
     Polygon(OGRGeometryH g, const SpatialReference& srs);
     Polygon& operator=(const Polygon&);
@@ -68,7 +67,7 @@ public:
     Polygon transform(const SpatialReference& ref) const;
     double area() const;
 
-    bool covers(PointRef& ref) const;
+    bool covers(const PointRef& ref) const;
     bool equal(const Polygon& p) const;
     bool covers(const Polygon& p) const;
     bool overlaps(const Polygon& p) const;
diff --git a/pdal/Segmentation.cpp b/pdal/Segmentation.cpp
new file mode 100644
index 0000000..4c89f7a
--- /dev/null
+++ b/pdal/Segmentation.cpp
@@ -0,0 +1,146 @@
+/******************************************************************************
+ * Copyright (c) 2016-2017, Bradley J. Chambers (brad.chambers at gmail.com)
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
+ *       names of its contributors may be used to endorse or promote
+ *       products derived from this software without specific prior
+ *       written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ ****************************************************************************/
+
+#include <pdal/PDALUtils.hpp>
+
+#include <pdal/KDIndex.hpp>
+#include <pdal/PointView.hpp>
+#include <pdal/Segmentation.hpp>
+#include <pdal/pdal_types.hpp>
+
+#include "../filters/private/DimRange.hpp"
+
+#include <vector>
+
+namespace pdal
+{
+
+namespace Segmentation
+{
+
+std::vector<std::vector<PointId>> extractClusters(PointView& view,
+                                                  uint64_t min_points,
+                                                  uint64_t max_points,
+                                                  double tolerance)
+{
+    // Index the incoming PointView for subsequent radius searches.
+    KD3Index kdi(view);
+    kdi.build();
+
+    // Create variables to track PointIds that have already been added to
+    // clusters and to build the list of cluster indices.
+    std::vector<PointId> processed(view.size(), 0);
+    std::vector<std::vector<PointId>> clusters;
+
+    for (PointId i = 0; i < view.size(); ++i)
+    {
+        // Points can only belong to a single cluster.
+        if (processed[i])
+            continue;
+
+        // Initialize list of indices belonging to current cluster, marking the
+        // seed point as processed.
+        std::vector<PointId> seed_queue;
+        size_t sq_idx = 0;
+        seed_queue.push_back(i);
+        processed[i] = 1;
+
+        // Check each point in the cluster for additional neighbors within the
+        // given tolerance, remembering that the list can grow if we add points
+        // to the cluster.
+        while (sq_idx < seed_queue.size())
+        {
+            // Find neighbors of the next cluster point.
+            PointId j = seed_queue[sq_idx];
+            std::vector<PointId> ids = kdi.radius(j, tolerance);
+
+            // The case where the only neighbor is the query point.
+            if (ids.size() == 1)
+            {
+                sq_idx++;
+                continue;
+            }
+
+            // Skip neighbors that already belong to a cluster and add the rest
+            // to this cluster.
+            for (auto const& k : ids)
+            {
+                if (processed[k])
+                    continue;
+                seed_queue.push_back(k);
+                processed[k] = 1;
+            }
+
+            sq_idx++;
+        }
+
+        // Keep clusters that are within the min/max number of points.
+        if (seed_queue.size() >= min_points && seed_queue.size() <= max_points)
+            clusters.push_back(seed_queue);
+    }
+
+    return clusters;
+}
+
+void ignoreDimRange(DimRange dr, PointViewPtr input, PointViewPtr keep,
+                    PointViewPtr ignore)
+{
+    PointRef point(*input, 0);
+    for (PointId i = 0; i < input->size(); ++i)
+    {
+        point.setPointId(i);
+        if (dr.valuePasses(point.getFieldAs<double>(dr.m_id)))
+            ignore->appendPoint(*input, i);
+        else
+            keep->appendPoint(*input, i);
+    }
+}
+
+void segmentLastReturns(PointViewPtr input, PointViewPtr last,
+                        PointViewPtr other)
+{
+    PointRef point(*input, 0);
+    for (PointId i = 0; i < input->size(); ++i)
+    {
+        point.setPointId(i);
+        if (point.getFieldAs<uint8_t>(Dimension::Id::ReturnNumber) ==
+            point.getFieldAs<uint8_t>(Dimension::Id::NumberOfReturns))
+            last->appendPoint(*input, i);
+        else
+            other->appendPoint(*input, i);
+    }
+}
+
+} // namespace Segmentation
+} // namespace pdal
diff --git a/plugins/cpd/kernel/Cpd.hpp b/pdal/Segmentation.hpp
similarity index 55%
copy from plugins/cpd/kernel/Cpd.hpp
copy to pdal/Segmentation.hpp
index 742da68..1744a56 100644
--- a/plugins/cpd/kernel/Cpd.hpp
+++ b/pdal/Segmentation.hpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * Copyright (c) 2014, Pete Gadomski (pete.gadomski at gmail.com)
+ * Copyright (c) 2016-2017, Bradley J. Chambers (brad.chambers at gmail.com)
  *
  * All rights reserved.
  *
@@ -34,40 +34,45 @@
 
 #pragma once
 
-#include <cpd/matrix.hpp>
-
-#include <pdal/Kernel.hpp>
 #include <pdal/pdal_export.hpp>
+#include <pdal/pdal_types.hpp>
+
+#include "../filters/private/DimRange.hpp"
+
+#include <vector>
 
 namespace pdal
 {
 
-class PDAL_DLL CpdKernel : public Kernel
+class PointView;
+
+namespace Segmentation
 {
-public:
-    static void *create();
-    static int32_t destroy(void *);
-    std::string getName() const;
-    int execute();
 
-private:
-    CpdKernel() : Kernel() {};
-    virtual void addSwitches(ProgramArgs& args);
-    cpd::Matrix readFile(const std::string& filename);
+/**
+  Extract clusters of points from input PointView.
+
+  For each point, find neighbors within a given tolerance (Euclidean distance).
+  If a neighbor already belongs to another cluster, skip it. Otherwise, add it
+  to the current cluster. Recursively visit newly added cluster points, looking
+  for neighbors to add to the cluster.
+
+  \param[in] view the input PointView.
+  \param[in] min_points the minimum number of points in a cluster.
+  \param[in] max_points the maximum number of points in a cluster.
+  \param[in] tolerance the tolerance for adding points to a cluster.
+  \returns a vector of clusters (themselves vectors of PointIds).
+*/
+PDAL_DLL std::vector<std::vector<PointId>> extractClusters(PointView& view,
+                                                           uint64_t min_points,
+                                                           uint64_t max_points,
+                                                           double tolerance);
+
+PDAL_DLL void ignoreDimRange(DimRange dr, PointViewPtr input, PointViewPtr keep,
+                             PointViewPtr ignore);
 
-    std::string m_method;
-    std::string m_filex;
-    std::string m_filey;
-    std::string m_output;
-    double m_tolerance;
-    size_t m_max_it;
-    double m_outliers;
-    bool m_no_reflections;
-    bool m_allow_scaling;
-    double m_beta;
-    double m_lambda;
-    BOX3D m_bounds;
-    double m_sigma2;
-};
+PDAL_DLL void segmentLastReturns(PointViewPtr input, PointViewPtr last,
+                                 PointViewPtr other);
 
+} // namespace Segmentation
 } // namespace pdal
diff --git a/pdal/SpatialReference.cpp b/pdal/SpatialReference.cpp
index 8626535..c11b0ac 100644
--- a/pdal/SpatialReference.cpp
+++ b/pdal/SpatialReference.cpp
@@ -75,6 +75,9 @@ bool SpatialReference::empty() const
 bool SpatialReference::valid() const
 {
     OGRSpatialReferenceH current = OSRNewSpatialReference(m_wkt.c_str());
+    if (!current)
+        return false;
+
     OGRErr err = OSRValidate(current);
     OSRDestroySpatialReference(current);
     return err == OGRERR_NONE;
@@ -155,6 +158,11 @@ std::string SpatialReference::getVertical() const
 
     OGRSpatialReference* poSRS =
         (OGRSpatialReference*)OSRNewSpatialReference(m_wkt.c_str());
+
+    // Above can fail if m_wkt is bad.
+    if (!poSRS)
+        return tmp;
+
     char *pszWKT = NULL;
 
     OGR_SRSNode* node = poSRS->GetAttrNode("VERT_CS");
@@ -243,8 +251,13 @@ std::string SpatialReference::getHorizontalUnits() const
 
 bool SpatialReference::equals(const SpatialReference& input) const
 {
+    if (getWKT() == input.getWKT())
+        return true;
+
     OGRSpatialReferenceH current = OSRNewSpatialReference(getWKT().c_str());
     OGRSpatialReferenceH other = OSRNewSpatialReference(input.getWKT().c_str());
+    if (!current || !other)
+        return false;
 
     int output = OSRIsSame(current, other);
     OSRDestroySpatialReference(current);
@@ -276,6 +289,9 @@ const std::string& SpatialReference::getName() const
 bool SpatialReference::isGeographic() const
 {
     OGRSpatialReferenceH current = OSRNewSpatialReference(m_wkt.c_str());
+    if (!current)
+        return false;
+
     bool output = OSRIsGeographic(current);
     OSRDestroySpatialReference(current);
     return output;
@@ -285,6 +301,9 @@ bool SpatialReference::isGeographic() const
 bool SpatialReference::isGeocentric() const
 {
     OGRSpatialReferenceH current = OSRNewSpatialReference(m_wkt.c_str());
+    if (!current)
+        return false;
+
     bool output = OSRIsGeocentric(current);
     OSRDestroySpatialReference(current);
     return output;
@@ -348,14 +367,18 @@ bool SpatialReference::isWKT(const std::string& wkt)
 
 std::string SpatialReference::prettyWkt(const std::string& wkt)
 {
+    std::string outWkt;
+
     OGRSpatialReference *srs =
         (OGRSpatialReference *)OSRNewSpatialReference(wkt.data());
+    if (!srs)
+        return outWkt;
 
     char *buf = nullptr;
     srs->exportToPrettyWkt(&buf, FALSE);
     OSRDestroySpatialReference(srs);
 
-    std::string outWkt(buf);
+    outWkt = buf;
     CPLFree(buf);
     return outWkt;
 }
@@ -368,7 +391,7 @@ int SpatialReference::computeUTMZone(const BOX3D& box) const
         return 0;
 
     OGRSpatialReferenceH current = OSRNewSpatialReference(m_wkt.c_str());
-    if (! current)
+    if (!current)
         throw pdal_error("Could not fetch current SRS");
 
     OGRSpatialReferenceH wgs84 = OSRNewSpatialReference(0);
diff --git a/pdal/Stage.cpp b/pdal/Stage.cpp
index c1370d8..ac6b1c5 100644
--- a/pdal/Stage.cpp
+++ b/pdal/Stage.cpp
@@ -38,6 +38,7 @@
 #include <pdal/Stage.hpp>
 #include <pdal/SpatialReference.hpp>
 #include <pdal/PDALUtils.hpp>
+#include <pdal/util/Algorithm.hpp>
 #include <pdal/util/ProgramArgs.hpp>
 
 #include "private/StageRunner.hpp"
@@ -171,8 +172,10 @@ PointViewSet Stage::execute(PointTableRef table)
     //   writer wants to check a table's SRS.
     SpatialReference srs;
     table.clearSpatialReferences();
-    for (auto const& it : views)
-        table.addSpatialReference(it->spatialReference());
+    // Iterating backwards will ensure that the SRS for the first view is
+    // first on the list for table.
+    for (auto it = views.rbegin(); it != views.rend(); it++)
+        table.addSpatialReference((*it)->spatialReference());
     gdal::ErrorHandler::getGlobalErrorHandler().set(m_log, m_debug);
 
     // Do the ready operation and then start running all the views
@@ -209,17 +212,59 @@ PointViewSet Stage::execute(PointTableRef table)
 // Streamed execution.
 void Stage::execute(StreamPointTable& table)
 {
-    typedef std::list<Stage *> StageList;
+    struct StageList : public std::list<Stage *>
+    {
+        StageList operator - (const StageList& other) const
+        {
+            StageList resultList;
+            auto ti = rbegin();
+            auto oi = other.rbegin();
+
+            while (oi != other.rend() && ti != rend() && *ti == *oi)
+            {
+                oi++;
+                ti++;
+            }
+            while (ti != rend())
+                resultList.push_front(*ti++);
+            return resultList;
+        };
 
+        void ready(PointTableRef& table)
+        {
+            for (auto s : *this)
+            {
+                s->pushLogLeader();
+                s->ready(table);
+                s->pushLogLeader();
+                SpatialReference srs = s->getSpatialReference();
+                if (!srs.empty())
+                    table.setSpatialReference(srs);
+            }
+        }
+
+        void done(PointTableRef& table)
+        {
+            for (auto s : *this)
+            {
+                s->pushLogLeader();
+                s->l_done(table);
+                s->popLogLeader();
+            }
+        }
+    };
+
+    SpatialReference srs;
     std::list<StageList> lists;
     StageList stages;
+    StageList lastRunStages;
 
     table.finalize();
 
     // Walk from the current stage backwards.  As we add each input, copy
     // the list of stages and push it on a list.  We then pull a list from the
-    // front of list and keep going.  Placing on the back and pulling from the
-    // front insures that the stages will be executed in the order that they
+    // back of list and keep going.  Pushing on the front and pulling from the
+    // back insures that the stages will be executed in the order that they
     // were added.  If we hit stage with no previous stages, we execute
     // the stage list.
     // All this often amounts to a bunch of list copying for
@@ -234,7 +279,15 @@ void Stage::execute(StreamPointTable& table)
     while (true)
     {
         if (s->m_inputs.empty())
+        {
+            // Call done on all the stages we ran last time and aren't
+            // using this time.
+            (lastRunStages - stages).done(table);
+            // Call ready on all the stages we didn't run last time.
+            (stages - lastRunStages).ready(table);
             execute(table, stages);
+            lastRunStages = stages;
+        }
         else
         {
             for (auto s2 : s->m_inputs)
@@ -245,7 +298,10 @@ void Stage::execute(StreamPointTable& table)
             }
         }
         if (lists.empty())
+        {
+            lastRunStages.done(table);
             break;
+        }
         stages = lists.back();
         lists.pop_back();
         s = stages.front();
@@ -258,6 +314,7 @@ void Stage::execute(StreamPointTable& table, std::list<Stage *>& stages)
     std::vector<bool> skips(table.capacity());
     std::list<Stage *> filters;
     SpatialReference srs;
+    std::map<Stage *, SpatialReference> srsMap;
 
     // Separate out the first stage.
     Stage *reader = stages.front();
@@ -268,14 +325,6 @@ void Stage::execute(StreamPointTable& table, std::list<Stage *>& stages)
     begin++;
     std::copy(begin, stages.end(), std::back_inserter(filters));
 
-    for (Stage *s : stages)
-    {
-        s->ready(table);
-        srs = s->getSpatialReference();
-        if (!srs.empty())
-            table.setSpatialReference(srs);
-    }
-
     // Loop until we're finished.  We handle the number of points up to
     // the capacity of the StreamPointTable that we've been provided.
 
@@ -292,6 +341,9 @@ void Stage::execute(StreamPointTable& table, std::list<Stage *>& stages)
         // When we get false back from a reader, we're done, so set
         // the point limit to the number of points processed in this loop
         // of the table.
+        if (!pointLimit)
+            finished = true;
+
         for (PointId idx = 0; idx < pointLimit; idx++)
         {
             point.setPointId(idx);
@@ -309,6 +361,11 @@ void Stage::execute(StreamPointTable& table, std::list<Stage *>& stages)
         // processed by subsequent filters.
         for (Stage *s : filters)
         {
+            if (srsMap[s] != srs)
+            {
+                s->spatialReferenceChanged(srs);
+                srsMap[s] = srs;
+            }
             s->pushLogLeader();
             for (PointId idx = 0; idx < pointLimit; idx++)
             {
@@ -329,13 +386,6 @@ void Stage::execute(StreamPointTable& table, std::list<Stage *>& stages)
             skips[i] = false;
         table.reset();
     }
-
-    for (Stage *s : stages)
-    {
-        s->pushLogLeader();
-        s->l_done(table);
-        s->popLogLeader();
-    }
 }
 
 void Stage::l_done(PointTableRef table)
@@ -345,6 +395,7 @@ void Stage::l_done(PointTableRef table)
 
 void Stage::l_addArgs(ProgramArgs& args)
 {
+    args.add("user_data", "User JSON", m_userDataJSON);
     args.add("log", "Debug output filename", m_logname);
     readerAddArgs(args);
 }
@@ -418,5 +469,11 @@ void Stage::setSpatialReference(MetadataNode& m,
     }
 }
 
+
+void Stage::throwError(const std::string& s) const
+{
+    throw pdal_error(getName() + ": " + s);
+}
+
 } // namespace pdal
 
diff --git a/pdal/Stage.hpp b/pdal/Stage.hpp
index bb40116..8f0e447 100644
--- a/pdal/Stage.hpp
+++ b/pdal/Stage.hpp
@@ -253,24 +253,25 @@ public:
     virtual std::string getName() const = 0;
 
     /**
-      Return the tag name of a stage.
+        Set a specific tag name.
+    */
+    void setTag(const std::string& tag)
+        { m_tag = tag; }
 
-      The tag name is used when writing a JSON pipeline.  It is generally
-      the same as the stage name, but a number is appended to maintain
-      uniqueness when stages appear more than once in a pipeline.
-      the same as
+    /**
+      Return the tag name of a stage.
 
-      \return  The tag's name.
+      \return  The tag name.
     */
-    virtual std::string tagName() const
-        { return getName(); }
+    virtual std::string tag() const
+        { return m_tag; }
 
     /**
       Return a list of the stage's inputs.
 
       \return  A vector pointers to input stages.
     **/
-    const std::vector<Stage*>& getInputs() const
+    std::vector<Stage*>& getInputs()
         { return m_inputs; }
 
     /**
@@ -298,6 +299,7 @@ protected:
 
     void setSpatialReference(MetadataNode& m, SpatialReference const&);
     void addSpatialReferenceArg(ProgramArgs& args);
+    void throwError(const std::string& s) const;
 
 private:
     bool m_debug;
@@ -308,6 +310,11 @@ private:
     std::string m_logLeader;
     SpatialReference m_spatialReference;
     std::unique_ptr<ProgramArgs> m_args;
+    std::string m_tag;
+    // This is never used after it is set.  It just provides a place to
+    // bind the user_data argument that is essentially a comment in pipeline
+    // files.
+    std::string m_userDataJSON;
 
     Stage& operator=(const Stage&); // not implemented
     Stage(const Stage&); // not implemented
@@ -409,6 +416,16 @@ private:
     }
 
     /**
+      (Streaming mode)  Notification that the points that will follow in
+      processing are from a spatial reference different than the previous
+      spatial reference.
+
+       \param srs  New spatial reference.
+    */
+    virtual void spatialReferenceChanged(const SpatialReference& srs)
+    {}
+
+    /**
       Process all points in a view.  Implement in subclass.
 
       \param view  PointView to process.
diff --git a/pdal/StageFactory.cpp b/pdal/StageFactory.cpp
index debf2f1..3e0385f 100644
--- a/pdal/StageFactory.cpp
+++ b/pdal/StageFactory.cpp
@@ -37,8 +37,9 @@
 #include <pdal/util/FileUtils.hpp>
 
 #include <filters/ApproximateCoplanarFilter.hpp>
-#include <filters/AttributeFilter.hpp>
+#include <filters/AssignFilter.hpp>
 #include <filters/ChipperFilter.hpp>
+#include <filters/ClusterFilter.hpp>
 #include <filters/ColorizationFilter.hpp>
 #include <filters/ColorinterpFilter.hpp>
 #include <filters/ComputeRangeFilter.hpp>
@@ -48,9 +49,11 @@
 #include <filters/EigenvaluesFilter.hpp>
 #include <filters/EstimateRankFilter.hpp>
 #include <filters/FerryFilter.hpp>
+#include <filters/GroupByFilter.hpp>
 #include <filters/HAGFilter.hpp>
 #include <filters/IQRFilter.hpp>
 #include <filters/KDistanceFilter.hpp>
+#include <filters/LocateFilter.hpp>
 #include <filters/LOFFilter.hpp>
 #include <filters/MADFilter.hpp>
 #include <filters/MergeFilter.hpp>
@@ -58,6 +61,7 @@
 #include <filters/MortonOrderFilter.hpp>
 #include <filters/NormalFilter.hpp>
 #include <filters/OutlierFilter.hpp>
+#include <filters/OverlayFilter.hpp>
 #include <filters/PMFFilter.hpp>
 #include <filters/RadialDensityFilter.hpp>
 #include <filters/RangeFilter.hpp>
@@ -91,7 +95,6 @@
 #include <io/LasWriter.hpp>
 #include <io/PlyWriter.hpp>
 #include <io/SbetWriter.hpp>
-#include <io/DerivativeWriter.hpp>
 #include <io/TextWriter.hpp>
 #include <io/NullWriter.hpp>
 
@@ -131,11 +134,11 @@ StringList StageFactory::extensions(const std::string& driver)
         { "writers.matlab", { "mat" } },
         { "writers.nitf", { "nitf", "nsf", "ntf" } },
         { "writers.pcd", { "pcd" } },
-        { "writers.pclvisualizer", { "pclvis" } },
         { "writers.ply", { "ply" } },
         { "writers.sbet", { "sbet" } },
         { "writers.derivative", { "derivative" } },
         { "writers.sqlite", { "sqlite" } },
+        { "writers.gdal", { "tif", "tiff", "vrt" } },
     };
 
     return exts[driver];
@@ -204,7 +207,6 @@ std::string StageFactory::inferWriterDriver(const std::string& filename)
         { "mat", "writers.matlab" },
         { "ntf", "writers.nitf" },
         { "pcd", "writers.pcd" },
-        { "pclviz", "writers.pclvisualizer" },
         { "ply", "writers.ply" },
         { "sbet", "writers.sbet" },
         { "derivative", "writers.derivative" },
@@ -212,7 +214,9 @@ std::string StageFactory::inferWriterDriver(const std::string& filename)
         { "txt", "writers.text" },
         { "xyz", "writers.text" },
         { "", "writers.text" },
-        { "tif", "writers.gdal" }
+        { "tif", "writers.gdal" },
+        { "tiff", "writers.gdal" },
+        { "vrt", "writers.gdal" }
     };
 
     // Strip off '.' and make lowercase.
@@ -233,8 +237,9 @@ StageFactory::StageFactory(bool no_plugins)
 
     // filters
     PluginManager::initializePlugin(ApproximateCoplanarFilter_InitPlugin);
-    PluginManager::initializePlugin(AttributeFilter_InitPlugin);
+    PluginManager::initializePlugin(AssignFilter_InitPlugin);
     PluginManager::initializePlugin(ChipperFilter_InitPlugin);
+    PluginManager::initializePlugin(ClusterFilter_InitPlugin);
     PluginManager::initializePlugin(ColorizationFilter_InitPlugin);
     PluginManager::initializePlugin(ColorinterpFilter_InitPlugin);
     PluginManager::initializePlugin(ComputeRangeFilter_InitPlugin);
@@ -244,9 +249,11 @@ StageFactory::StageFactory(bool no_plugins)
     PluginManager::initializePlugin(EigenvaluesFilter_InitPlugin);
     PluginManager::initializePlugin(EstimateRankFilter_InitPlugin);
     PluginManager::initializePlugin(FerryFilter_InitPlugin);
+    PluginManager::initializePlugin(GroupByFilter_InitPlugin);
     PluginManager::initializePlugin(HAGFilter_InitPlugin);
     PluginManager::initializePlugin(IQRFilter_InitPlugin);
     PluginManager::initializePlugin(KDistanceFilter_InitPlugin);
+    PluginManager::initializePlugin(LocateFilter_InitPlugin);
     PluginManager::initializePlugin(LOFFilter_InitPlugin);
     PluginManager::initializePlugin(MADFilter_InitPlugin);
     PluginManager::initializePlugin(MergeFilter_InitPlugin);
@@ -254,6 +261,7 @@ StageFactory::StageFactory(bool no_plugins)
     PluginManager::initializePlugin(MortonOrderFilter_InitPlugin);
     PluginManager::initializePlugin(NormalFilter_InitPlugin);
     PluginManager::initializePlugin(OutlierFilter_InitPlugin);
+    PluginManager::initializePlugin(OverlayFilter_InitPlugin);
     PluginManager::initializePlugin(PMFFilter_InitPlugin);
     PluginManager::initializePlugin(RadialDensityFilter_InitPlugin);
     PluginManager::initializePlugin(RangeFilter_InitPlugin);
@@ -282,7 +290,6 @@ StageFactory::StageFactory(bool no_plugins)
 
     // writers
     PluginManager::initializePlugin(BpfWriter_InitPlugin);
-    PluginManager::initializePlugin(DerivativeWriter_InitPlugin);
     PluginManager::initializePlugin(GDALWriter_InitPlugin);
     PluginManager::initializePlugin(LasWriter_InitPlugin);
     PluginManager::initializePlugin(PlyWriter_InitPlugin);
diff --git a/pdal/gitsha.cpp b/pdal/gitsha.cpp
index 046be42..ebf3b88 100644
--- a/pdal/gitsha.cpp
+++ b/pdal/gitsha.cpp
@@ -1,3 +1,3 @@
 #include <pdal/gitsha.h>
-#define GIT_SHA1 "3851ef74cbef164112b6a3709dc09d59c3b70a05"
+#define GIT_SHA1 "f4cc476f6aca2c3b47157cea4f4413107c2f5f94"
 const char g_GIT_SHA1[] = GIT_SHA1;
diff --git a/pdal/pdal_config.cpp b/pdal/pdal_config.cpp
index ca45025..2703ebc 100644
--- a/pdal/pdal_config.cpp
+++ b/pdal/pdal_config.cpp
@@ -61,7 +61,7 @@
 #endif
 
 #ifdef PDAL_HAVE_LASZIP
-#include <laszip/laszip.hpp>
+#include <laszip.hpp>
 #endif
 
 #include <geos_c.h>
diff --git a/pdal/pdal_types.hpp b/pdal/pdal_types.hpp
index eb7d487..e7d9f53 100644
--- a/pdal/pdal_types.hpp
+++ b/pdal/pdal_types.hpp
@@ -187,7 +187,10 @@ inline std::istream& operator>>(std::istream& in, LogLevel& level)
         sval = Utils::tolower(sval);
         for (size_t i = 0; i < logNames.size(); ++i)
             if (logNames[i] == sval)
+            {
                 level = (LogLevel)i;
+                break;
+            }
     }
     if (level == LogLevel::None)
         in.setstate(std::ios_base::failbit);
@@ -201,7 +204,7 @@ inline std::ostream& operator<<(std::ostream& out, const LogLevel& level)
     if ((size_t)level < logNames.size())
     {
         sval = logNames[(size_t)level];
-        sval[0] = toupper(sval[0]);   // Make "Debug", "Error", etc.
+        sval[0] = (char)toupper(sval[0]);   // Make "Debug", "Error", etc.
     }
     out << sval;
     return out;
diff --git a/pdal/plang/BufferedInvocation.cpp b/pdal/plang/BufferedInvocation.cpp
deleted file mode 100644
index 39ae511..0000000
--- a/pdal/plang/BufferedInvocation.cpp
+++ /dev/null
@@ -1,121 +0,0 @@
-/******************************************************************************
-* Copyright (c) 2011, Michael P. Gerlek (mpg at flaxen.com)
-*
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following
-* conditions are met:
-*
-*     * Redistributions of source code must retain the above copyright
-*       notice, this list of conditions and the following disclaimer.
-*     * Redistributions in binary form must reproduce the above copyright
-*       notice, this list of conditions and the following disclaimer in
-*       the documentation and/or other materials provided
-*       with the distribution.
-*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
-*       names of its contributors may be used to endorse or promote
-*       products derived from this software without specific prior
-*       written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
-* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
-* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
-* OF SUCH DAMAGE.
-****************************************************************************/
-
-#include <pdal/plang/BufferedInvocation.hpp>
-
-#ifdef PDAL_COMPILER_MSVC
-#  pragma warning(disable: 4127)  // conditional expression is constant
-#  pragma warning(disable: 4505)  // unreferenced local function has been removed
-#endif
-
-using namespace pdal;
-
-namespace pdal
-{
-namespace plang
-{
-
-
-BufferedInvocation::BufferedInvocation(const Script& script)
-    : Invocation(script)
-{}
-
-
-void BufferedInvocation::begin(PointView& view, MetadataNode m)
-{
-    PointLayoutPtr layout(view.m_pointTable.layout());
-    Dimension::IdList const& dims = layout->dims();
-
-    for (auto di = dims.begin(); di != dims.end(); ++di)
-    {
-        Dimension::Id d = *di;
-        const Dimension::Detail *dd = layout->dimDetail(d);
-        void *data = malloc(dd->size() * view.size());
-        m_buffers.push_back(data);  // Hold pointer for deallocation
-        char *p = (char *)data;
-        for (PointId idx = 0; idx < view.size(); ++idx)
-        {
-            view.getFieldInternal(d, idx, (void *)p);
-            p += dd->size();
-        }
-        std::string name = layout->dimName(*di);
-        insertArgument(name, (uint8_t *)data, dd->type(), view.size());
-    }
-    Py_XDECREF(m_metaIn);
-    m_metaIn = plang::fromMetadata(m);
-}
-
-
-void BufferedInvocation::end(PointView& view, MetadataNode m)
-{
-    // for each entry in the script's outs dictionary,
-    // look up that entry's name in the schema and then
-    // copy the data into the right dimension spot in the
-    // buffer
-
-    std::vector<std::string> names;
-    getOutputNames(names);
-
-    PointLayoutPtr layout(view.m_pointTable.layout());
-    Dimension::IdList const& dims = layout->dims();
-
-    for (auto di = dims.begin(); di != dims.end(); ++di)
-    {
-        Dimension::Id d = *di;
-        const Dimension::Detail *dd = layout->dimDetail(d);
-        std::string name = layout->dimName(*di);
-        auto found = std::find(names.begin(), names.end(), name);
-        if (found == names.end()) continue; // didn't have this dim in the names
-
-        assert(name == *found);
-        assert(hasOutputVariable(name));
-
-        size_t size = dd->size();
-        void *data = extractResult(name, dd->type());
-        char *p = (char *)data;
-        for (PointId idx = 0; idx < view.size(); ++idx)
-        {
-            view.setField(d, dd->type(), idx, (void *)p);
-            p += size;
-        }
-    }
-    for (auto bi = m_buffers.begin(); bi != m_buffers.end(); ++bi)
-        free(*bi);
-    m_buffers.clear();
-    addMetadata(m_metaOut, m);
-}
-
-} //namespace plang
-} //namespace pdal
-
diff --git a/pdal/plang/CMakeLists.txt b/pdal/plang/CMakeLists.txt
index f708f37..63eaa6c 100644
--- a/pdal/plang/CMakeLists.txt
+++ b/pdal/plang/CMakeLists.txt
@@ -1,7 +1,6 @@
 
 set(plang_srcs
     Array.cpp
-    BufferedInvocation.cpp
     Invocation.cpp
     Environment.cpp
     Redirector.cpp
diff --git a/pdal/plang/Environment.cpp b/pdal/plang/Environment.cpp
index a4ccb0d..40d7138 100644
--- a/pdal/plang/Environment.cpp
+++ b/pdal/plang/Environment.cpp
@@ -238,35 +238,35 @@ PyObject *fromMetadata(MetadataNode m)
     std::string description = m.description();
 
     MetadataNodeList children = m.children();
-    PyObject *submeta = NULL;
+    PyObject *submeta = PyList_New(0);
     if (children.size())
     {
-        submeta = PyList_New(0);
         for (MetadataNode& child : children)
             PyList_Append(submeta, fromMetadata(child));
     }
-    PyObject *data = PyTuple_New(5);
-    PyTuple_SetItem(data, 0, PyUnicode_FromString(name.data()));
-    PyTuple_SetItem(data, 1, PyUnicode_FromString(value.data()));
-    PyTuple_SetItem(data, 2, PyUnicode_FromString(type.data()));
-    PyTuple_SetItem(data, 3, PyUnicode_FromString(description.data()));
-    PyTuple_SetItem(data, 4, submeta);
+    PyObject *data = PyDict_New();
+    PyDict_SetItemString(data, "name", PyUnicode_FromString(name.data()));
+    PyDict_SetItemString(data, "value", PyUnicode_FromString(value.data()));
+    PyDict_SetItemString(data, "type", PyUnicode_FromString(type.data()));
+    PyDict_SetItemString(data, "description", PyUnicode_FromString(description.data()));
+    PyDict_SetItemString(data, "children", submeta);
 
     return data;
 }
 
-std::string readPythonString(PyObject* list, Py_ssize_t index)
+std::string readPythonString(PyObject* dict, const std::string& key)
 {
     std::stringstream ss;
 
-    PyObject* o = PyTuple_GetItem(list, index);
+    PyObject* o = PyDict_GetItemString(dict, key.c_str());
     if (!o)
     {
         std::stringstream oss;
-        oss << "Unable to get list item number " << index << " for list of length " << PyTuple_Size(list);
+        oss << "Unable to get dictionary item '" << key << "'";
         throw pdal_error(oss.str());
     }
-    PyObject* r = PyObject_Repr(o);
+
+    PyObject* r = PyObject_Str(o);
     if (!r)
         throw pdal::pdal_error("unable to get repr in readPythonString");
 #if PY_MAJOR_VERSION >= 3
@@ -279,31 +279,38 @@ std::string readPythonString(PyObject* list, Py_ssize_t index)
 
     return ss.str();
 }
-void addMetadata(PyObject *list, MetadataNode m)
+void addMetadata(PyObject *dict, MetadataNode m)
 {
 
-    if (!PyList_Check(list))
+    if (! dict)
+    {
         return;
+    }
 
-    for (Py_ssize_t i = 0; i < PyList_Size(list); ++i)
-    {
-        PyObject *tuple = PyList_GetItem(list, i);
-        if (!PyTuple_Check(tuple) || PyTuple_Size(tuple) != 5)
-            continue;
+    if (!PyDict_Check(dict) )
+        throw pdal::pdal_error("'metadata' member must be a dictionary!");
 
-        std::string name = readPythonString(tuple, 0);
-        std::string value = readPythonString(tuple, 1);
+    std::string name = readPythonString(dict, "name");
+    std::string value = readPythonString(dict, "value");
 
-        std::string type = readPythonString(tuple, 2);
-        if (type.empty())
-            type = Metadata::inferType(value);
+    std::string type = readPythonString(dict, "type");
+    if (type.empty())
+        type = Metadata::inferType(value);
 
-        std::string description = readPythonString(tuple, 3);
+    std::string description = readPythonString(dict, "description");
 
-        PyObject *submeta = PyTuple_GetItem(tuple, 4);
+    PyObject *submeta = PyDict_GetItemString(dict, "children");
+    if (submeta)
+    {
+        if (!PyList_Check(submeta))
+            throw pdal::pdal_error("'children' metadata member must be a list!");
+
+        for (Py_ssize_t i = 0; i < PyList_Size(submeta); ++i)
+        {
+            PyObject* p = PyList_GetItem(submeta, i);
+            addMetadata(p, m);
+        }
         MetadataNode child =  m.addWithType(name, value, type, description);
-        if (submeta)
-            addMetadata(submeta, child);
     }
 }
 
diff --git a/pdal/plang/Invocation.cpp b/pdal/plang/Invocation.cpp
index 8eed314..c5fc338 100644
--- a/pdal/plang/Invocation.cpp
+++ b/pdal/plang/Invocation.cpp
@@ -75,10 +75,8 @@ namespace pdal
 namespace plang
 {
 
-Invocation::Invocation(const Script& script) :
-    m_metaIn(NULL)
-    , m_metaOut(NULL)
-    , m_script(script)
+Invocation::Invocation(const Script& script)
+    : m_script(script)
     , m_bytecode(NULL)
     , m_module(NULL)
     , m_dictionary(NULL)
@@ -87,6 +85,10 @@ Invocation::Invocation(const Script& script) :
     , m_varsOut(NULL)
     , m_scriptArgs(NULL)
     , m_scriptResult(NULL)
+    , m_metadata_PyObject(NULL)
+    , m_schema_PyObject(NULL)
+    , m_srs_PyObject(NULL)
+    , m_pdalargs_PyObject(NULL)
 {
     plang::Environment::get();
     resetArguments();
@@ -137,8 +139,6 @@ void Invocation::cleanup()
         Py_XDECREF(m_pyInputArrays[i]);
     m_pyInputArrays.clear();
     Py_XDECREF(m_bytecode);
-    Py_XDECREF(m_metaIn);
-    Py_XDECREF(m_metaOut);
 }
 
 
@@ -147,8 +147,6 @@ void Invocation::resetArguments()
     cleanup();
     m_varsIn = PyDict_New();
     m_varsOut = PyDict_New();
-    m_metaIn = PyList_New(0);
-    m_metaOut = PyList_New(0);
 }
 
 
@@ -265,27 +263,186 @@ bool Invocation::execute()
         throw pdal::pdal_error("No code has been compiled");
 
     Py_INCREF(m_varsIn);
-    Py_INCREF(m_varsOut);
     Py_ssize_t numArgs = argCount(m_function);
     m_scriptArgs = PyTuple_New(numArgs);
+
+    if (numArgs > 2)
+        throw pdal::pdal_error("Only two arguments -- ins and outs numpy arrays -- can be passed!");
+
     PyTuple_SetItem(m_scriptArgs, 0, m_varsIn);
     if (numArgs > 1)
+    {
+        Py_INCREF(m_varsOut);
         PyTuple_SetItem(m_scriptArgs, 1, m_varsOut);
-    if (numArgs > 2)
-        PyTuple_SetItem(m_scriptArgs, 2, m_metaIn);
-    if (numArgs > 3)
-        PyTuple_SetItem(m_scriptArgs, 3, m_metaOut);
+    }
+
+    int success(0);
+
+    if (m_metadata_PyObject)
+    {
+        success = PyModule_AddObject(m_module, "metadata", m_metadata_PyObject);
+        if (success)
+            throw pdal::pdal_error("unable to set metadata global");
+        Py_INCREF(m_metadata_PyObject);
+    }
+
+    if (m_schema_PyObject)
+    {
+        success = PyModule_AddObject(m_module, "schema", m_schema_PyObject);
+        if (success)
+            throw pdal::pdal_error("unable to set schema global");
+        Py_INCREF(m_srs_PyObject);
+    }
+
+    if (m_srs_PyObject)
+    {
+        success = PyModule_AddObject(m_module, "spatialreference", m_srs_PyObject);
+        if (success)
+            throw pdal::pdal_error("unable to set spatialreference global");
+        Py_INCREF(m_schema_PyObject);
+    }
+
+    if (m_pdalargs_PyObject)
+    {
+        success = PyModule_AddObject(m_module, "pdalargs", m_pdalargs_PyObject);
+        if (success)
+            throw pdal::pdal_error("unable to set pdalargs global");
+        Py_INCREF(m_pdalargs_PyObject);
+    }
 
     m_scriptResult = PyObject_CallObject(m_function, m_scriptArgs);
     if (!m_scriptResult)
         throw pdal::pdal_error(getTraceback());
-
     if (!PyBool_Check(m_scriptResult))
         throw pdal::pdal_error("User function return value not a boolean type.");
 
+    PyObject* mod_vars = PyModule_GetDict(m_module);
+
+    PyObject* b =  PyUnicode_FromString("metadata");
+    if (PyDict_Contains(mod_vars, PyUnicode_FromString("metadata")) == 1)
+        m_metadata_PyObject = PyDict_GetItem(m_dictionary, b);
+
     return (m_scriptResult == Py_True);
 }
 
+PyObject* getPyJSON(std::string const& str)
+{
+
+    PyObject* raw_json =  PyUnicode_FromString(str.c_str());
+    PyObject* json_module = PyImport_ImportModule("json");
+    if (!json_module)
+        throw pdal::pdal_error(getTraceback());
+
+    PyObject* json_mod_dict = PyModule_GetDict(json_module);
+    if (!json_mod_dict)
+        throw pdal::pdal_error(getTraceback());
+
+    PyObject* loads_func = PyDict_GetItemString(json_mod_dict, "loads");
+    if (!loads_func)
+        throw pdal::pdal_error(getTraceback());
+
+    PyObject* json_args = PyTuple_New(1);
+    if (!json_args)
+        throw pdal::pdal_error(getTraceback());
+
+    int success = PyTuple_SetItem(json_args, 0, raw_json);
+    if (success != 0)
+        throw pdal::pdal_error(getTraceback());
+
+    PyObject* json = PyObject_CallObject(loads_func, json_args);
+    if (!json)
+        throw pdal::pdal_error(getTraceback());
+
+    return json;
+}
+
+void Invocation::setKWargs(std::string const& s)
+{
+    Py_XDECREF(m_pdalargs_PyObject);
+    m_pdalargs_PyObject = getPyJSON(s);
+}
+
+void Invocation::begin(PointView& view, MetadataNode m)
+{
+    PointLayoutPtr layout(view.m_pointTable.layout());
+    Dimension::IdList const& dims = layout->dims();
+
+    for (auto di = dims.begin(); di != dims.end(); ++di)
+    {
+        Dimension::Id d = *di;
+        const Dimension::Detail *dd = layout->dimDetail(d);
+        void *data = malloc(dd->size() * view.size());
+        m_buffers.push_back(data);  // Hold pointer for deallocation
+        char *p = (char *)data;
+        for (PointId idx = 0; idx < view.size(); ++idx)
+        {
+            view.getFieldInternal(d, idx, (void *)p);
+            p += dd->size();
+        }
+        std::string name = layout->dimName(*di);
+        insertArgument(name, (uint8_t *)data, dd->type(), view.size());
+    }
+
+    // Put pipeline 'metadata' variable into module scope
+    Py_XDECREF(m_metadata_PyObject);
+    m_metadata_PyObject= plang::fromMetadata(m);
+
+    // Put 'schema' dict into module scope
+    Py_XDECREF(m_schema_PyObject);
+    MetadataNode s = view.layout()->toMetadata();
+    std::ostringstream ostrm;
+    Utils::toJSON(s, ostrm);
+    m_schema_PyObject = getPyJSON(ostrm.str());
+    ostrm.str("");
+
+    Py_XDECREF(m_srs_PyObject);
+    MetadataNode srs = view.spatialReference().toMetadata();
+    Utils::toJSON(srs, ostrm);
+    m_srs_PyObject = getPyJSON(ostrm.str());
+    ostrm.str("");
+}
+
+
+void Invocation::end(PointView& view, MetadataNode m)
+{
+    // for each entry in the script's outs dictionary,
+    // look up that entry's name in the schema and then
+    // copy the data into the right dimension spot in the
+    // buffer
+
+    std::vector<std::string> names;
+    getOutputNames(names);
+
+    PointLayoutPtr layout(view.m_pointTable.layout());
+    Dimension::IdList const& dims = layout->dims();
+
+    for (auto di = dims.begin(); di != dims.end(); ++di)
+    {
+        Dimension::Id d = *di;
+        const Dimension::Detail *dd = layout->dimDetail(d);
+        std::string name = layout->dimName(*di);
+        auto found = std::find(names.begin(), names.end(), name);
+        if (found == names.end()) continue; // didn't have this dim in the names
+
+        assert(name == *found);
+        assert(hasOutputVariable(name));
+
+        size_t size = dd->size();
+        void *data = extractResult(name, dd->type());
+        char *p = (char *)data;
+        for (PointId idx = 0; idx < view.size(); ++idx)
+        {
+            view.setField(d, dd->type(), idx, (void *)p);
+            p += size;
+        }
+    }
+    for (auto bi = m_buffers.begin(); bi != m_buffers.end(); ++bi)
+        free(*bi);
+    m_buffers.clear();
+    if (m_metadata_PyObject)
+        addMetadata(m_metadata_PyObject, m);
+}
+
 } // namespace plang
 } // namespace pdal
 
diff --git a/pdal/plang/Invocation.hpp b/pdal/plang/Invocation.hpp
index 56790ee..7bd762f 100644
--- a/pdal/plang/Invocation.hpp
+++ b/pdal/plang/Invocation.hpp
@@ -40,6 +40,7 @@
 #include "Environment.hpp"
 
 #include <pdal/Dimension.hpp>
+#include <pdal/PointView.hpp>
 
 namespace pdal
 {
@@ -81,9 +82,10 @@ public:
     // possible names from the schema)
     void getOutputNames(std::vector<std::string>& names);
 
-protected:
-    PyObject* m_metaIn;
-    PyObject* m_metaOut;
+    void begin(PointView& view, MetadataNode m);
+    void end(PointView& view, MetadataNode m);
+
+    void setKWargs(std::string const& s);
 
 private:
     void cleanup();
@@ -102,6 +104,12 @@ private:
     std::vector<PyObject*> m_pyInputArrays;
 
     Invocation& operator=(Invocation const& rhs); // nope
+
+    std::vector<void *> m_buffers;
+    PyObject* m_metadata_PyObject;
+    PyObject* m_schema_PyObject;
+    PyObject* m_srs_PyObject;
+    PyObject* m_pdalargs_PyObject;
 };
 
 } // namespace plang
diff --git a/pdal/private/PipelineReaderXML.hpp b/pdal/private/PipelineReaderXML.hpp
index 6d84b71..c58a268 100644
--- a/pdal/private/PipelineReaderXML.hpp
+++ b/pdal/private/PipelineReaderXML.hpp
@@ -66,7 +66,7 @@ public:
 
     /**
       Read an XML pipeline from a stream into a PipelineManager.
-      
+
       \param input  Stream to read from.
     */
     void readPipeline(std::istream& input);
@@ -92,6 +92,8 @@ private:
 
     PipelineReaderXML& operator=(const PipelineReaderXML&); // not implemented
     PipelineReaderXML(const PipelineReaderXML&); // not implemented
+
+    void baseReadPipeline(std::istream& input);
 };
 
 } // namespace pdal
diff --git a/pdal/util/Bounds.cpp b/pdal/util/Bounds.cpp
index 1b62cbe..8929e28 100644
--- a/pdal/util/Bounds.cpp
+++ b/pdal/util/Bounds.cpp
@@ -92,7 +92,7 @@ const double LOWEST = (std::numeric_limits<double>::lowest)();
 const double HIGHEST = (std::numeric_limits<double>::max)();
 
 }
-    
+
 void BOX2D::clear()
 {
     minx = HIGHEST; miny = HIGHEST;
@@ -112,11 +112,21 @@ bool BOX2D::empty() const
         miny == HIGHEST && maxy == LOWEST;
 }
 
+bool BOX2D::valid() const
+{
+    return  !empty();
+}
+
 bool BOX3D::empty() const
 {
     return  BOX2D::empty() && minz == HIGHEST && maxz == LOWEST;
 }
 
+bool BOX3D::valid() const
+{
+    return !empty();
+}
+
 void BOX2D::grow(double x, double y)
 {
     if (x < minx) minx = x;
@@ -137,14 +147,14 @@ const BOX2D& BOX2D::getDefaultSpatialExtent()
 {
     static BOX2D v(LOWEST, LOWEST, HIGHEST, HIGHEST);
     return v;
-}    
+}
 
 
 const BOX3D& BOX3D::getDefaultSpatialExtent()
 {
     static BOX3D v(LOWEST, LOWEST, LOWEST, HIGHEST, HIGHEST, HIGHEST);
     return v;
-}    
+}
 
 Bounds::Bounds(const BOX3D& box) : m_box(box)
 {}
diff --git a/pdal/util/Bounds.hpp b/pdal/util/Bounds.hpp
index aaab4df..45d93ca 100644
--- a/pdal/util/Bounds.hpp
+++ b/pdal/util/Bounds.hpp
@@ -72,14 +72,20 @@ public:
     {}
 
     /**
-      Determine whether a bounds box has any bounds set (is in a state
-      as if default-constructed).
+      Determine whether a bounds box has not had any bounds set.
 
       \return  Whether the bounds box is empty.
     */
     bool empty() const;
 
     /**
+      Determine whether a bounds box has had any bounds set.
+
+      \return  Whether the bounds box is valid.
+    */
+    bool valid() const;
+
+    /**
       Clear the bounds box to an empty state.
     */
     void clear();
@@ -317,7 +323,7 @@ public:
     {}
 
     /**
-      Determine whether a bounds box has any bounds set (is in a state
+      Determine whether a bounds box has not had any bounds set (is in a state
       as if default-constructed).
 
       \return  Whether the bounds box is empty.
@@ -325,6 +331,13 @@ public:
     bool empty() const;
 
     /**
+      Determine whether a bounds box has had any bounds set.
+
+      \return  \true if the bounds box is not empty
+    */
+    bool valid() const;
+
+    /**
       Expand the bounds of the box if a value is less than the current
       minimum or greater than the current maximum.  If the bounds box is
       currently empty, both minimum and maximum box bounds will be set to
@@ -477,7 +490,7 @@ public:
 
     /**
       Convert this box to a well-known text string.
-      
+
       \param precision  Precision for output [default: 8]
       \return  String format of this box.
     */
@@ -545,7 +558,7 @@ public:
 
 /**
   Wrapper for BOX3D and BOX2D to allow extraction as either.  Typically used
-  to facilitate streaming either a BOX2D or BOX3D 
+  to facilitate streaming either a BOX2D or BOX3D
 */
 class PDAL_DLL Bounds
 {
diff --git a/filters/private/crop/Point.hpp b/pdal/util/NullOStream.hpp
similarity index 70%
copy from filters/private/crop/Point.hpp
copy to pdal/util/NullOStream.hpp
index 6115a42..7d709fc 100644
--- a/filters/private/crop/Point.hpp
+++ b/pdal/util/NullOStream.hpp
@@ -1,5 +1,5 @@
 /******************************************************************************
-* Copyright (c) 2016, Howard Butler (howard at hobu.co)
+* Copyright (c) 2017, Hobu Inc. (info at hobu.co)
 *
 * All rights reserved.
 *
@@ -13,7 +13,7 @@
 *       notice, this list of conditions and the following disclaimer in
 *       the documentation and/or other materials provided
 *       with the distribution.
-*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
+*     * Neither the name of Hobu, Inc. nor the
 *       names of its contributors may be used to endorse or promote
 *       products derived from this software without specific prior
 *       written permission.
@@ -32,36 +32,32 @@
 * OF SUCH DAMAGE.
 ****************************************************************************/
 
-#pragma once
+// Copied from http://stackoverflow.com/questions/8243743/is-there-a-null-stdostream-implementation-in-c-or-libraries
 
-#include <pdal/Geometry.hpp>
+#include <iostream>
+#include <streambuf>
 
 namespace pdal
 {
 
-
-namespace cropfilter
+class NullStreambuf : public std::streambuf
 {
+    char dummyBuffer[64];
+protected:
+    virtual int overflow(int c)
+    {
+        setp(dummyBuffer, dummyBuffer + sizeof(dummyBuffer));
+        return (c == traits_type::eof()) ? '\0' : c;
+    }
+};
 
-class PDAL_DLL Point : public Geometry
+class NullOStream : private NullStreambuf, public std::ostream
 {
 public:
-
-    Point();
-    Point(const std::string& wkt_or_json,
-           SpatialReference ref);
-    bool is3d() const;
-    bool empty() const;
-    void clear();
-
-
-    virtual void update(const std::string& wkt_or_json,
-        SpatialReference ref = SpatialReference());
-
-    double x;
-    double y;
-    double z;
-
+    NullOStream() : std::ostream(this)
+        {}
+    NullStreambuf* rdbuf() const
+        { return const_cast<NullOStream *>(this); }
 };
-} // namespace cropfilter
-} // namespace pdal
+
+} // namespace
diff --git a/pdal/util/ProgramArgs.hpp b/pdal/util/ProgramArgs.hpp
index f4cba0e..cf4dc49 100644
--- a/pdal/util/ProgramArgs.hpp
+++ b/pdal/util/ProgramArgs.hpp
@@ -394,30 +394,24 @@ public:
     {
         if (m_set)
         {
-            std::ostringstream oss;
-            oss << "Attempted to set value twice for argument '" <<
-                m_longname << "'.";
-            throw arg_val_error(oss.str());
+            throw arg_val_error("Attempted to set value twice for argument '" +
+                m_longname + "'.");
         }
         if (s.empty())
         {
-            std::stringstream oss;
-            oss << "Argument '" << m_longname << "' needs a value and none "
-                "was provided.";
-            throw arg_val_error(oss.str());
+            throw arg_val_error("Argument '" + m_longname +
+                "' needs a value and none was provided.");
         }
+
         m_rawVal = s;
         if (!Utils::fromString(s, m_var))
         {
-            std::ostringstream oss;
-            if (m_error.size())
-                throw arg_val_error(m_error);
-            else
-            {
-                oss << "Invalid value '" << s << "' for argument '" <<
-                    m_longname << "'.";
-                throw arg_val_error(oss.str());
-            }
+            std::string error(m_error);
+
+            if (error.empty())
+                error = "Invalid value '" + s + "' for argument '" +
+                    m_longname + "'.";
+            throw arg_val_error(error);
         }
         m_set = true;
     }
@@ -459,13 +453,8 @@ public:
             return;
         }
         if (m_positional == PosType::Required)
-        {
-            std::ostringstream oss;
-
-            oss << "Missing value for positional argument '" <<
-                m_longname << "'.";
-            throw arg_error(oss.str());
-        }
+            throw arg_error("Missing value for positional argument '" +
+                m_longname + "'.");
     }
 
     /**
@@ -557,10 +546,8 @@ public:
     {
         if (s.size() && s[0] == '-')
         {
-            std::stringstream oss;
-            oss << "Argument '" << m_longname << "' needs a value and none "
-                "was provided.";
-            throw arg_val_error(oss.str());
+            throw arg_val_error("Argument '" + m_longname +
+                "' needs a value and none was provided.");
         }
         if (s == "invert")
             m_val = !m_defaultVal;
@@ -593,9 +580,8 @@ public:
     */
     virtual Arg& setPositional()
     {
-        std::ostringstream oss;
-        oss << "Boolean argument '" << m_longname << "' can't be positional.";
-        throw arg_error(oss.str());
+        throw arg_error("Boolean argument '" + m_longname +
+            "' can't be positional.");
         return *this;
     }
 
@@ -607,9 +593,8 @@ public:
     */
     virtual Arg& setOptionalPositional()
     {
-        std::ostringstream oss;
-        oss << "Boolean argument '" << m_longname << "' can't be positional.";
-        throw arg_error(oss.str());
+        throw arg_error("Boolean argument '" + m_longname +
+            "' can't be positional.");
         return *this;
     }
     /**
@@ -689,11 +674,8 @@ public:
         }
         if (cnt == 0 && m_positional == PosType::Required)
         {
-            std::ostringstream oss;
-
-            oss << "Missing value for positional argument '" <<
-                m_longname << "'.";
-            throw arg_error(oss.str());
+            throw arg_error("Missing value for positional argument '" +
+                m_longname + "'.");
         }
     }
 
@@ -769,18 +751,18 @@ public:
     {
         if (s.size() && s[0] == '-')
         {
-            std::stringstream oss;
-            oss << "Argument '" << m_longname << "' needs a value and none "
-                "was provided.";
-            throw arg_val_error(oss.str());
+            throw arg_val_error("Argument '" + m_longname +
+                "' needs a value and none was provided.");
         }
         m_rawVal = s;
         T var;
         if (!Utils::fromString(s, var))
         {
-            std::ostringstream oss;
-            oss << "Invalid value for argument '" << m_longname << "'.";
-            throw arg_val_error(oss.str());
+            std::string error(m_error);
+
+            if (error.empty())
+                error = "Invalid value for argument '" + m_longname + "'.";
+            throw arg_val_error(error);
         }
         if (!m_set)
             m_var.clear();
@@ -886,10 +868,8 @@ public:
 
         if ((s.size() && s[0] == '-') || slist.empty())
         {
-            std::ostringstream oss;
-
-            oss << "Missing value for argument '" << m_longname << "'.";
-            throw arg_val_error(oss.str());
+            throw arg_val_error("Missing value for argument '" + m_longname +
+                "'.");
         }
         m_rawVal = s;
         if (!m_set)
@@ -1369,12 +1349,7 @@ private:
         if (name.empty())
             return;
         if (findLongArg(name))
-        {
-            std::ostringstream oss;
-
-            oss << "Argument --" << name << " already exists.";
-            throw arg_error(oss.str());
-        }
+            throw arg_error("Argument --" + name + " already exists.");
         m_longargs[name] = arg;
     }
 
@@ -1389,12 +1364,7 @@ private:
         if (name.empty())
             return;
         if (findShortArg(name[0]))
-        {
-            std::ostringstream oss;
-
-            oss << "Argument -" << name << " already exists.";
-            throw arg_error(oss.str());
-        }
+            throw arg_error("Argument -" + name + " already exists.");
         m_shortargs[name] = arg;
     }
 
@@ -1483,11 +1453,7 @@ private:
 
         Arg *arg = findLongArg(name);
         if (!arg)
-        {
-            std::ostringstream oss;
-            oss << "Unexpected argument '" << name << "'.";
-            throw arg_error(oss.str());
-        }
+            throw arg_error("Unexpected argument '" + name + "'.");
 
         if (!arg->needsValue())
         {
@@ -1495,10 +1461,9 @@ private:
             {
                 if (value != "true" && value != "false")
                 {
-                    std::ostringstream oss;
-                    oss << "Value '" << value << "' provided for argument '" <<
-                        name << "' when none is expected.";
-                    throw arg_error(oss.str());
+                    throw arg_error("Value '" + value +
+                        "' provided for argument '" + name +
+                        "' when none is expected.");
                 }
             }
             else
@@ -1528,11 +1493,8 @@ private:
 
         Arg *arg = findShortArg(name[1]);
         if (!arg)
-        {
-            std::ostringstream oss;
-            oss << "Unexpected argument '-" << name[1] << "'.";
-            throw arg_error(oss.str());
-        }
+            throw arg_error("Unexpected argument '-" + std::string(1, name[1]) +
+                "'.");
 
         int cnt;
         if (arg->needsValue())
@@ -1541,10 +1503,8 @@ private:
             // rather than a value.
             if (value.empty() || value[0] == '-')
             {
-                std::ostringstream oss;
-                oss << "Short option '" << name << "' expects value "
-                    "but none directly follows.";
-                throw arg_error(oss.str());
+                throw arg_error("Short option '" + name + "' expects value "
+                    "but none directly follows.");
             }
             else
             {
@@ -1573,12 +1533,8 @@ private:
             if (arg->positional() == Arg::PosType::Optional)
                 opt = true;
             if (opt && (arg->positional() == Arg::PosType::Required))
-            {
-                std::ostringstream oss;
-                oss << "Found required positional argument '" <<
-                    arg->longname() << "' after optional positional argument.";
-                throw arg_error(oss.str());
-            }
+                throw arg_error("Found required positional argument '" +
+                    arg->longname() + "' after optional positional argument.");
         }
     }
 
diff --git a/pdal/util/Utils.hpp b/pdal/util/Utils.hpp
index ce55597..9f5eda4 100644
--- a/pdal/util/Utils.hpp
+++ b/pdal/util/Utils.hpp
@@ -52,6 +52,8 @@
 #include <type_traits>
 #include <vector>
 
+#include <iostream>
+
 #include "pdal_util_export.hpp"
 
 namespace pdal
@@ -867,12 +869,23 @@ namespace Utils
     template<>
     inline bool fromString<char>(const std::string& s, char& to)
     {
-        int i = std::stoi(s);
-        if (i >= std::numeric_limits<char>::lowest() &&
-            i <= std::numeric_limits<char>::max())
+        try
         {
-            to = static_cast<char>(i);
-            return true;
+            int i = std::stoi(s);
+            if (i >= std::numeric_limits<char>::lowest() &&
+                    i <= std::numeric_limits<char>::max())
+            {
+                to = static_cast<char>(i);
+                return true;
+            }
+        }
+        catch (std::invalid_argument) // Character that isn't a number?
+        {
+            if (s.length() == 1)
+            {
+                to = s[0];
+                return true;
+            }
         }
         return false;
     }
@@ -888,13 +901,25 @@ namespace Utils
     inline bool fromString<unsigned char>(const std::string& s,
         unsigned char& to)
     {
-        int i = std::stoi(s);
-        if (i >= std::numeric_limits<unsigned char>::lowest() &&
-            i <= std::numeric_limits<unsigned char>::max())
+        try
         {
-            to = static_cast<unsigned char>(i);
-            return true;
+            int i  = std::stoi(s);
+            if (i >= std::numeric_limits<unsigned char>::lowest() &&
+                i <= std::numeric_limits<unsigned char>::max())
+            {
+                to = static_cast<unsigned char>(i);
+                return true;
+            }
         }
+        catch (std::invalid_argument) // Character that isn't a number?
+        {
+            if (s.length() == 1)
+            {
+                to = s[0];
+                return true;
+            }
+        }
+
         return false;
     }
 
@@ -908,12 +933,23 @@ namespace Utils
     template<>
     inline bool fromString<signed char>(const std::string& s, signed char& to)
     {
-        int i = std::stoi(s);
-        if (i >= std::numeric_limits<signed char>::lowest() &&
-            i <= std::numeric_limits<signed char>::max())
+        try
         {
-            to = static_cast<signed char>(i);
-            return true;
+            int i = std::stoi(s);
+            if (i >= std::numeric_limits<signed char>::lowest() &&
+                    i <= std::numeric_limits<signed char>::max())
+            {
+                to = static_cast<signed char>(i);
+                return true;
+            }
+        }
+        catch (std::invalid_argument) // Character that isn't a number?
+        {
+            if (s.length() == 1)
+            {
+                to = s[0];
+                return true;
+            }
         }
         return false;
     }
diff --git a/plugins/CMakeLists.txt b/plugins/CMakeLists.txt
index 49302e0..581b07f 100644
--- a/plugins/CMakeLists.txt
+++ b/plugins/CMakeLists.txt
@@ -27,6 +27,10 @@ if(BUILD_PLUGIN_MATLAB)
     add_subdirectory(matlab)
 endif()
 
+if(BUILD_PLUGIN_MBIO)
+    add_subdirectory(mbio)
+endif()
+
 if(BUILD_PLUGIN_MRSID)
     add_subdirectory(mrsid)
 endif()
@@ -42,10 +46,6 @@ if(BUILD_PLUGIN_OCI)
     add_subdirectory(oci)
 endif()
 
-if(BUILD_PLUGIN_P2G)
-    add_subdirectory(p2g)
-endif()
-
 if(BUILD_PLUGIN_PCL)
     add_subdirectory(pcl)
 endif()
diff --git a/plugins/cpd/CMakeLists.txt b/plugins/cpd/CMakeLists.txt
index 671aa5b..2d27bec 100644
--- a/plugins/cpd/CMakeLists.txt
+++ b/plugins/cpd/CMakeLists.txt
@@ -1,4 +1,4 @@
-find_package(Cpd 0.3 REQUIRED)
+find_package(Cpd 0.5 REQUIRED)
 
 set_package_properties(Cpd PROPERTIES
     DESCRIPTION "Coherent Point Drift"
@@ -7,22 +7,8 @@ set_package_properties(Cpd PROPERTIES
     PURPOSE "Register two point sets using the Coherent Point Drift algorithm"
     )
 
-PDAL_ADD_PLUGIN(cpd_kernel_lib_name kernel cpd
-    FILES kernel/Cpd.cpp
+pdal_add_plugin(cpd_kernel_lib_name kernel cpd
+    FILES kernel/CpdKernel.cpp
     LINK_WITH Cpd::Library-C++
     )
-target_include_directories(${cpd_kernel_lib_name} PRIVATE
-    ${ROOT_DIR}
-    ${CMAKE_CURRENT_LIST_DIR} )
-
-if (WITH_TESTS)
-    PDAL_ADD_TEST(pdal_plugins_cpd_kernel_test
-        FILES test/CpdKernelTest.cpp
-        LINK_WITH Cpd::Library-C++
-        )
-    target_include_directories(pdal_plugins_cpd_kernel_test
-        PRIVATE
-        ${ROOT_DIR}
-        ${CMAKE_CURRENT_LIST_DIR}
-        )
-endif()
+target_include_directories(${cpd_kernel_lib_name} PRIVATE ${CMAKE_CURRENT_LIST_DIR})
diff --git a/plugins/cpd/kernel/Cpd.cpp b/plugins/cpd/kernel/CpdKernel.cpp
similarity index 52%
rename from plugins/cpd/kernel/Cpd.cpp
rename to plugins/cpd/kernel/CpdKernel.cpp
index b9f4658..b2d5a98 100644
--- a/plugins/cpd/kernel/Cpd.cpp
+++ b/plugins/cpd/kernel/CpdKernel.cpp
@@ -32,74 +32,71 @@
  * OF SUCH DAMAGE.
  ****************************************************************************/
 
-#include "kernel/Cpd.hpp"
+#include "kernel/CpdKernel.hpp"
 #include <pdal/pdal_macros.hpp>
 
-#include <cpd/rigid.hpp>
 #include <cpd/nonrigid.hpp>
+#include <cpd/rigid.hpp>
 
-#include <pdal/KernelFactory.hpp>
-#include <pdal/StageFactory.hpp>
 #include <filters/CropFilter.hpp>
 #include <io/BufferReader.hpp>
+#include <pdal/KernelFactory.hpp>
+#include <pdal/StageFactory.hpp>
+#include <pdal/EigenUtils.hpp>
 
-namespace pdal
-{
+namespace pdal {
 
 static PluginInfo const s_info = PluginInfo(
-                                     "kernels.cpd",
-                                     "CPD Kernel",
-                                     "http://pdal.io/kernels/kernels.cpd.html" );
+    "kernels.cpd", "CPD Kernel", "http://pdal.io/kernels/kernels.cpd.html");
 
 CREATE_SHARED_PLUGIN(1, 0, CpdKernel, Kernel, s_info)
 
-std::string CpdKernel::getName() const {
-    return s_info.name;
-}
+std::string CpdKernel::getName() const { return s_info.name; }
 
-void CpdKernel::addSwitches(ProgramArgs& args)
-{
-    Arg& method = args.add("method,M", "registration method (rigid, nonrigid)",
-                           m_method);
+void CpdKernel::addSwitches(ProgramArgs& args) {
+    Arg& method =
+        args.add("method,M", "registration method (rigid, nonrigid)", m_method);
     method.setPositional();
-    Arg& filex = args.add("filex,x", "input file containing the source points",
-                          m_filex);
-    filex.setPositional();
-    Arg& filey = args.add("filey,y", "input file containg target points, "
-                          "i.e. the points that will be registered", m_filey);
-    filey.setPositional();
+    Arg& fixed =
+        args.add("fixed,f", "input file containing the fixed points", m_fixed);
+    fixed.setPositional();
+    Arg& moving = args.add("moving,m",
+                          "input file containing the moving points, "
+                          "i.e. the points that will be registered",
+                          m_moving);
+    moving.setPositional();
     Arg& output = args.add("output,o", "output file name", m_output);
     output.setPositional();
-    args.add("tolerance,t", "tolerance criterium", m_tolerance,
-             cpd::Rigid::DEFAULT_TOLERANCE);
-    args.add("max-iterations,m", "maximum number of iterations allowed",
-             m_max_it, cpd::Rigid::DEFAULT_MAX_ITERATIONS);
-    args.add("outliers,O", "the weight of noise and outliers",
-             m_outliers, cpd::Rigid::DEFAULT_OUTLIER_WEIGHT);
-    args.add("no-reflections,r", "Prevent reflections of the data",
-             m_no_reflections, true);
-    args.add("allow-scaling,S", "Allow scaling of the data",
-             m_allow_scaling, false);
-    args.add("beta,b", "std of gaussian filter (Green's function, used "
-             "for nonrigid registrations only)", m_beta, cpd::Nonrigid::DEFAULT_BETA);
-    args.add("lambda,l", "regularization weight (used for nonrigid "
-             "registrations only)", m_lambda, cpd::Nonrigid::DEFAULT_LAMBDA);
     args.add("bounds", "Extent (in XYZ) to clip output to", m_bounds);
-    args.add("sigma2",
-             "The starting sigma2 value. To improve CPD runs, set to a bit "
-             "more than you expect the average motion to be",
-             m_sigma2, 0.0);
-}
 
+    args.add("max-iterations", "maximum number of iterations allowed",
+             m_max_iterations, cpd::DEFAULT_MAX_ITERATIONS);
+    args.add("normalize", "whether cpd should normalize the points before running",
+             m_normalize, true);
+    args.add("outliers", "a number between zero and one that represents the tolerance for outliers",
+             m_outliers, cpd::DEFAULT_OUTLIERS);
+    args.add("sigma2", "the starting sigma2 value.",
+             m_sigma2, cpd::DEFAULT_SIGMA2);
+    args.add("tolerance", "the amount the error must change to continue iterations",
+             m_tolerance, cpd::DEFAULT_TOLERANCE);
+
+    args.add("reflections", "should rigid registrations allow reflections",
+            m_reflections, false);
+    args.add("scale", "should rigid registrations allow scaling",
+            m_reflections, false);
+
+    args.add("beta", "beta parameter for nonrigid registrations",
+            m_beta, cpd::DEFAULT_BETA);
+    args.add("lambda", "lambda parameter for nonrigid registrations",
+            m_lambda, cpd::DEFAULT_LAMBDA);
+}
 
-cpd::Matrix CpdKernel::readFile(const std::string& filename)
-{
+cpd::Matrix CpdKernel::readFile(const std::string& filename) {
     Stage& reader = makeReader(filename, "");
 
     PointTable table;
     PointViewSet viewSet;
-    if (!m_bounds.empty())
-    {
+    if (!m_bounds.empty()) {
         Options boundsOptions;
         boundsOptions.add("bounds", m_bounds);
 
@@ -107,82 +104,42 @@ cpd::Matrix CpdKernel::readFile(const std::string& filename)
         crop.setOptions(boundsOptions);
         crop.prepare(table);
         viewSet = crop.execute(table);
-    }
-    else
-    {
+    } else {
         reader.prepare(table);
         viewSet = reader.execute(table);
     }
 
-    cpd::Matrix matrix(0, 3);
-    for (auto it = viewSet.begin(); it != viewSet.end(); ++it)
-    {
-        PointViewPtr view = *it;
-        point_count_t rowidx;
-        if (matrix.rows() == 0)
-        {
-            rowidx = 0;
-            matrix.resize(view->size(), 3);
-        }
-        else
-        {
-            rowidx = matrix.rows();
-            matrix.conservativeResize(matrix.rows() + view->size(), 3);
-        }
-
-        for (point_count_t bufidx = 0; bufidx < view->size(); ++bufidx, ++rowidx)
-        {
-            matrix(rowidx, 0) = view->getFieldAs<double>(Dimension::Id::X, bufidx);
-            matrix(rowidx, 1) = view->getFieldAs<double>(Dimension::Id::Y, bufidx);
-            matrix(rowidx, 2) = view->getFieldAs<double>(Dimension::Id::Z, bufidx);
-        }
-    }
-    return matrix;
+    return eigen::pointViewToEigen(**viewSet.begin());
 }
 
+int CpdKernel::execute() {
+    cpd::Matrix fixed = readFile(m_fixed);
+    cpd::Matrix moving = readFile(m_moving);
 
-int CpdKernel::execute()
-{
-    PointTable tableX;
-    PointTable tableY;
-
-    cpd::Matrix X = readFile(m_filex);
-    cpd::Matrix Y = readFile(m_filey);
-
-    if (X.rows() == 0 || Y.rows() == 0)
-    {
+    if (fixed.rows() == 0 || moving.rows() == 0) {
         throw pdal_error("No points to process.");
     }
 
     cpd::Matrix result;
     if (m_method == "rigid") {
         cpd::Rigid rigid;
-        rigid
-            .set_tolerance(m_tolerance)
-            .set_max_iterations(m_max_it)
-            .set_outlier_weight(m_outliers);
-        rigid
-            .no_reflections(m_no_reflections)
-            .allow_scaling(m_allow_scaling);
-        if (m_sigma2 > 0) {
-            result = rigid.compute(X, Y, m_sigma2).points;
-        } else {
-            result = rigid.compute(X, Y).points;
-        }
+        rigid.max_iterations(m_max_iterations)
+            .normalize(m_normalize)
+            .outliers(m_outliers)
+            .sigma2(m_sigma2)
+            .tolerance(m_tolerance);
+        rigid.reflections(m_reflections)
+            .scale(m_scale);
+        result = rigid.run(fixed, moving).points;
     } else if (m_method == "nonrigid") {
         cpd::Nonrigid nonrigid;
-        nonrigid
-            .set_tolerance(m_tolerance)
-            .set_max_iterations(m_max_it)
-            .set_outlier_weight(m_outliers);
-        nonrigid
-            .set_beta(m_beta)
-            .set_lambda(m_lambda);
-        if (m_sigma2 > 0) {
-            result = nonrigid.compute(X, Y, m_sigma2).points;
-        } else {
-            result = nonrigid.compute(X, Y).points;
-        }
+        nonrigid.max_iterations(m_max_iterations)
+            .normalize(m_normalize)
+            .outliers(m_outliers)
+            .sigma2(m_sigma2)
+            .tolerance(m_tolerance);
+        nonrigid.beta(m_beta).lambda(m_lambda);
+        result = nonrigid.run(fixed, moving).points;
     } else {
         std::stringstream ss;
         ss << "Invalid cpd method: " << m_method << std::endl;
@@ -199,26 +156,24 @@ int CpdKernel::execute()
     outLayout->registerDim(Dimension::Id::ZVelocity);
     PointViewPtr outView(new PointView(outTable));
 
-    size_t M = Y.rows();
-    for (size_t i = 0; i < M; ++i)
-    {
+    size_t rows = moving.rows();
+    for (size_t i = 0; i < rows; ++i) {
         outView->setField<double>(Dimension::Id::X, i, result(i, 0));
         outView->setField<double>(Dimension::Id::Y, i, result(i, 1));
         outView->setField<double>(Dimension::Id::Z, i, result(i, 2));
         outView->setField<double>(Dimension::Id::XVelocity, i,
-                                  Y(i, 0) - result(i, 0));
+                                  moving(i, 0) - result(i, 0));
         outView->setField<double>(Dimension::Id::YVelocity, i,
-                                  Y(i, 1) - result(i, 1));
+                                  moving(i, 1) - result(i, 1));
         outView->setField<double>(Dimension::Id::ZVelocity, i,
-                                  Y(i, 2) - result(i, 2));
+                                  moving(i, 2) - result(i, 2));
     }
 
     BufferReader reader;
     reader.addView(outView);
 
     Options writerOpts;
-    if (StageFactory::inferReaderDriver(m_output) == "writers.text")   
-    {
+    if (StageFactory::inferReaderDriver(m_output) == "writers.text") {
         writerOpts.add("order", "X,Y,Z,XVelocity,YVelocity,ZVelocity");
         writerOpts.add("keep_unspecified", false);
     }
@@ -229,4 +184,4 @@ int CpdKernel::execute()
     return 0;
 }
 
-} // namespace pdal
+}  // namespace pdal
diff --git a/plugins/cpd/kernel/Cpd.hpp b/plugins/cpd/kernel/CpdKernel.hpp
similarity index 92%
rename from plugins/cpd/kernel/Cpd.hpp
rename to plugins/cpd/kernel/CpdKernel.hpp
index 742da68..19d7b44 100644
--- a/plugins/cpd/kernel/Cpd.hpp
+++ b/plugins/cpd/kernel/CpdKernel.hpp
@@ -35,7 +35,6 @@
 #pragma once
 
 #include <cpd/matrix.hpp>
-
 #include <pdal/Kernel.hpp>
 #include <pdal/pdal_export.hpp>
 
@@ -56,18 +55,25 @@ private:
     cpd::Matrix readFile(const std::string& filename);
 
     std::string m_method;
-    std::string m_filex;
-    std::string m_filey;
+    std::string m_fixed;
+    std::string m_moving;
     std::string m_output;
-    double m_tolerance;
-    size_t m_max_it;
+    BOX3D m_bounds;
+
+    // cpd::Transform
+    size_t m_max_iterations;
+    bool m_normalize;
     double m_outliers;
-    bool m_no_reflections;
-    bool m_allow_scaling;
+    double m_sigma2;
+    double m_tolerance;
+
+    // cpd::Rigid
+    bool m_reflections;
+    bool m_scale;
+
+    // cpd::Nonrigid
     double m_beta;
     double m_lambda;
-    BOX3D m_bounds;
-    double m_sigma2;
 };
 
 } // namespace pdal
diff --git a/plugins/cpd/test/CpdKernelTest.cpp b/plugins/cpd/test/CpdKernelTest.cpp
deleted file mode 100644
index 59d6b0f..0000000
--- a/plugins/cpd/test/CpdKernelTest.cpp
+++ /dev/null
@@ -1,118 +0,0 @@
-/******************************************************************************
-* Copyright (c) 2015, Pete Gadomski <pete.gadomski at gmail.com>
-*
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following
-* conditions are met:
-*
-*     * Redistributions of source code must retain the above copyright
-*       notice, this list of conditions and the following disclaimer.
-*     * Redistributions in binary form must reproduce the above copyright
-*       notice, this list of conditions and the following disclaimer in
-*       the documentation and/or other materials provided
-*       with the distribution.
-*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
-*       names of its contributors may be used to endorse or promote
-*       products derived from this software without specific prior
-*       written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
-* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
-* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
-* OF SUCH DAMAGE.
-****************************************************************************/
-
-#include <pdal/pdal_test_main.hpp>
-#include "kernel/Cpd.hpp"
-
-#include <pdal/Filter.hpp>
-#include <pdal/KernelFactory.hpp>
-#include <pdal/PipelineManager.hpp>
-#include <pdal/PluginManager.hpp>
-#include <pdal/Reader.hpp>
-#include <pdal/util/FileUtils.hpp>
-#include <io/LasReader.hpp>
-#include "Support.hpp"
-
-namespace pdal
-{
-
-namespace
-{
-
-class CpdKernelTest : public ::testing::Test
-{
-public:
-
-    CpdKernelTest()
-        : m_x(Support::datapath("las/simple.las"))
-        , m_y(Support::datapath("las/simple_transformed.las"))
-        , m_outfile(Support::datapath("las/simple_cpd.las"))
-    {}
-
-protected:
-
-    virtual void SetUp()
-    {
-        PipelineManager mrManager;
-
-        Options readerOptions;
-        readerOptions.add("filename", m_x);
-        Stage& reader = mrManager.addReader("readers.las");
-        reader.setOptions(readerOptions);
-
-        Options transformationOptions;
-        transformationOptions.add("matrix",
-            "1 0 0 1\n0 1 0 2\n0 0 1 3\n0 0 0 1");
-        Stage& filter = mrManager.addFilter("filters.transformation");
-        filter.setInput(reader);
-        filter.setOptions(transformationOptions);
-
-        Options writerOptions;
-        writerOptions.add("filename", m_y);
-        Stage& writer = mrManager.addWriter("writers.las");
-        writer.setInput(filter);
-        writer.setOptions(writerOptions);
-
-        point_count_t np = mrManager.execute();
-    }
-
-    virtual void TearDown()
-    {
-        FileUtils::deleteFile(m_y);
-        FileUtils::deleteFile(m_outfile);
-    }
-
-    std::string m_x;
-    std::string m_y;
-    std::string m_outfile;
-
-};
-
-} // namespace
-
-
-TEST_F(CpdKernelTest, Execution)
-{
-    KernelFactory f;
-    void *stage = PluginManager::createObject("kernels.cpd");
-    std::unique_ptr<Kernel> cpdKernel(static_cast<Kernel*>(stage));
-
-    int argc = 4;
-    LogPtr log(new Log("pdal cpd", &std::clog));
-    StringList argv { "rigid", m_x, m_y, m_outfile };
-    int retval = cpdKernel->run(argv, log);
-    EXPECT_EQ(0, retval);
-}
-
-} // namespace pdal
diff --git a/plugins/greyhound/CMakeLists.txt b/plugins/greyhound/CMakeLists.txt
index f6c9370..8d89bb8 100644
--- a/plugins/greyhound/CMakeLists.txt
+++ b/plugins/greyhound/CMakeLists.txt
@@ -13,7 +13,7 @@ PDAL_ADD_PLUGIN(libname reader greyhound
     LINK_WITH ${PDAL_JSONCPP_LIB_NAME})
 target_include_directories(${libname} PRIVATE
     ${PDAL_JSONCPP_INCLUDE_DIR}
-    ${PDAL_VENDOR_DIR}) 
+    ${PDAL_VENDOR_DIR})
 
 if (WITH_TESTS)
     PDAL_ADD_TEST(greyhoundreadertest
@@ -22,5 +22,5 @@ if (WITH_TESTS)
         LINK_WITH ${libname} )
 target_include_directories(greyhoundreadertest PRIVATE
     ${PDAL_JSONCPP_INCLUDE_DIR}
-    ${PDAL_VENDOR_DIR}) 
+    ${PDAL_VENDOR_DIR})
 endif()
diff --git a/plugins/greyhound/io/GreyhoundReader.cpp b/plugins/greyhound/io/GreyhoundReader.cpp
index 54bee77..3aa4403 100644
--- a/plugins/greyhound/io/GreyhoundReader.cpp
+++ b/plugins/greyhound/io/GreyhoundReader.cpp
@@ -66,8 +66,7 @@ namespace
                 const std::string jsonError(reader.getFormattedErrorMessages());
                 if (!jsonError.empty())
                 {
-                    throw std::runtime_error(
-                            "Error during parsing: " + jsonError);
+                    throw pdal_error("Error during parsing: " + jsonError);
                 }
             }
         }
@@ -238,42 +237,20 @@ void GreyhoundReader::initialize(PointTableRef table)
     std::string infoUrl = m_url + "/resource/" + m_resource + "/info";
     log()->get(LogLevel::Debug) << "Fetching info URL: " << infoUrl <<
         std::endl;
-    m_info = parse(m_arbiter->get(infoUrl));
-
-    m_depthBegin = m_depthBeginArg;
-    m_depthEnd = m_depthEndArg;
-
-    if (m_info.isMember("scale"))
+    try
     {
-        m_scale.reset(new greyhound::Point(m_info["scale"]));
+        m_info = parse(m_arbiter->get(infoUrl));
     }
-
-    if (m_info.isMember("offset"))
+    catch (const pdal_error& err)
     {
-        m_offset.reset(new greyhound::Point(m_info["offset"]));
+        throwError(err.what());
     }
 
-    if (m_scale && !m_offset) m_offset.reset(new greyhound::Point(0, 0, 0));
-    if (m_offset && !m_scale) m_scale.reset(new greyhound::Point(1, 1, 1));
+    m_depthBegin = m_depthBeginArg;
+    m_depthEnd = m_depthEndArg;
 
     m_fullBounds = m_info["bounds"];
 
-    if (m_scale)
-    {
-        // Unscale the full bounds.  Since the query bounds will come in as
-        // native coordinates, don't modify those.
-        m_fullBounds = m_fullBounds.unscale(*m_scale, *m_offset);
-
-        // Now inverse our scale/offset.
-        m_scale->x = 1.0 / m_scale->x;
-        m_scale->y = 1.0 / m_scale->y;
-        m_scale->z = 1.0 / m_scale->z;
-
-        m_offset->x = -m_offset->x;
-        m_offset->y = -m_offset->y;
-        m_offset->z = -m_offset->z;
-    }
-
     m_queryBounds = toBounds(m_queryBox).intersection(m_fullBounds);
 
     if (m_pathsArg.size())
@@ -335,8 +312,7 @@ void GreyhoundReader::prepared(PointTableRef table)
 
         if (m_dims.back().m_id == Dimension::Id::Unknown)
         {
-            throw std::runtime_error(
-                    "Could not find dimension " + j["name"].asString());
+            throwError("Could not find dimension " + j["name"].asString());
         }
     }
 }
@@ -444,7 +420,8 @@ point_count_t GreyhoundReader::read(PointViewPtr view, point_count_t count)
     launchPooledReads(*view, zoomBounds, depthSplit, pool);
 
     pool.await();
-    if (m_error) throw pdal_error(*m_error);
+    if (m_error)
+        throwError(*m_error);
 
     return m_numPoints;
 }
@@ -509,7 +486,8 @@ void GreyhoundReader::launchPooledReads(
 
         // If any tasks failed, rethrow in the main thread.
         lock.lock();
-        if (m_error) throw pdal_error(*m_error);
+        if (m_error)
+            throwError(*m_error);
     }
 }
 
@@ -592,15 +570,20 @@ std::vector<point_count_t> GreyhoundReader::fetchVerticalHierarchy(
     url << "&depthEnd=" << depthEnd;
     url << "&vertical=true";
 
-    if (m_scale) url << "&scale=" << write(m_scale->toJson());
-    if (m_offset) url << "&offset=" << write(m_offset->toJson());
-
     log()->get(LogLevel::Debug) << "Hierarchy: " << url.str() << std::endl;
-    const Json::Value json(parse(m_arbiter->get(url.str())));
 
     std::vector<point_count_t> results;
-    for (const auto& v : json) results.push_back(v.asUInt64());
+    try
+    {
+        const Json::Value json(parse(m_arbiter->get(url.str())));
 
+        for (const auto& v : json)
+            results.push_back(v.asUInt64());
+    }
+    catch (const pdal_error& err)
+    {
+        throwError(err.what());
+    }
     return results;
 }
 
@@ -615,11 +598,18 @@ Json::Value GreyhoundReader::fetchHierarchy(
     url << "&depthBegin=" << depthBegin;
     url << "&depthEnd=" << depthEnd;
 
-    if (m_scale) url << "&scale=" << write(m_scale->toJson());
-    if (m_offset) url << "&offset=" << write(m_offset->toJson());
-
     log()->get(LogLevel::Debug) << "Hierarchy: " << url.str() << std::endl;
-    return parse(m_arbiter->get(url.str()));
+
+    Json::Value json;
+    try
+    {
+        json = parse(m_arbiter->get(url.str()));
+    }
+    catch (const pdal_error& err)
+    {
+        throwError(err.what());
+    }
+    return json;
 }
 
 point_count_t GreyhoundReader::fetchData(
@@ -633,8 +623,6 @@ point_count_t GreyhoundReader::fetchData(
     url << "/read?bounds=" << arbiter::http::sanitize(stringify(bounds));
     url << "&depthBegin=" << depthBegin;
     url << "&depthEnd=" << depthEnd;
-    if (m_scale) url << "&scale=" << write(m_scale->toJson());
-    if (m_offset) url << "&offset=" << write(m_offset->toJson());
 
 #ifdef PDAL_HAVE_LAZPERF
     url << "&compress=true";
diff --git a/plugins/greyhound/io/GreyhoundReader.hpp b/plugins/greyhound/io/GreyhoundReader.hpp
index 6aad81d..f489d66 100644
--- a/plugins/greyhound/io/GreyhoundReader.hpp
+++ b/plugins/greyhound/io/GreyhoundReader.hpp
@@ -77,8 +77,6 @@ private:
     std::size_t m_sparseDepth;
     Json::Value m_info;
     Json::Value m_schema;
-    std::unique_ptr<greyhound::Point> m_scale;
-    std::unique_ptr<greyhound::Point> m_offset;
 
     mutable std::mutex m_mutex;
     point_count_t m_numPoints = 0;
diff --git a/plugins/greyhound/test/GreyhoundReaderTest.cpp b/plugins/greyhound/test/GreyhoundReaderTest.cpp
index 98df4fc..7525d04 100644
--- a/plugins/greyhound/test/GreyhoundReaderTest.cpp
+++ b/plugins/greyhound/test/GreyhoundReaderTest.cpp
@@ -40,6 +40,8 @@
 #include <pdal/StageFactory.hpp>
 #include <pdal/util/Algorithm.hpp>
 
+#include <arbiter/arbiter.hpp>
+
 #include "Support.hpp"
 #include "../io/GreyhoundReader.hpp"
 #include "../io/bounds.hpp"
@@ -58,7 +60,6 @@ const greyhound::Bounds originBounds(
         greyhound::Bounds(-92369, 123812, -11170, -22218, 230745, 2226)
         .unscale(.01, greyhound::Point(637300, 851210, 520)));
 
-
 std::string toString(const greyhound::Bounds& b)
 {
     std::ostringstream ss;
@@ -69,8 +70,8 @@ std::string toString(const greyhound::Bounds& b)
     return ss.str();
 }
 
-const std::string server("http://dev.greyhound.io");
-const std::string resource("autzen-chipped");
+const std::string server("http://data.greyhound.io");
+const std::string resource("dev/autzen-chipped");
 
 Options greyhoundOptions(
         const greyhound::Bounds* b = nullptr,
@@ -100,7 +101,8 @@ public:
         : m_doTests(false)
     {
         static std::string path(server + "/resource/" + resource + "/info");
-        static bool good(arbiter::Arbiter().tryGetSize(path));
+        static arbiter::Arbiter a;
+        static bool good(a.hasDriver(path) && a.tryGetSize(path));
         m_doTests = good;
     }
 
diff --git a/plugins/hexbin/CMakeLists.txt b/plugins/hexbin/CMakeLists.txt
index 9c02216..a6d88d2 100644
--- a/plugins/hexbin/CMakeLists.txt
+++ b/plugins/hexbin/CMakeLists.txt
@@ -11,9 +11,8 @@ if (HEXER_FOUND)
             filters/HexBin.cpp
         LINK_WITH
             ${HEXER_LIBRARY})
-    target_include_directories(${libname} PRIVATE
-        ${PDAL_VENDOR_DIR}/pdalboost
-        ${HEXER_INCLUDE_DIR})
+    target_include_directories(${libname} PRIVATE ${PDAL_VENDOR_DIR}/pdalboost)
+    target_include_directories(${libname} PUBLIC ${HEXER_INCLUDE_DIR})
 
     if (WITH_TESTS)
         PDAL_ADD_TEST(hexbintest
diff --git a/plugins/hexbin/kernel/DensityKernel.cpp b/plugins/hexbin/kernel/DensityKernel.cpp
index f5a82bb..fee3df0 100644
--- a/plugins/hexbin/kernel/DensityKernel.cpp
+++ b/plugins/hexbin/kernel/DensityKernel.cpp
@@ -54,11 +54,18 @@ std::string DensityKernel::getName() const { return s_info.name; }
 
 void DensityKernel::addSwitches(ProgramArgs& args)
 {
-    args.add("input,i", "input point cloud file name", m_inputFile);
-    args.add("output,o", "output vector data source", m_outputFile);
+    args.add("input,i", "input point cloud file name", m_inputFile).setPositional();
+    args.add("output,o", "output vector data source", m_outputFile).setPositional();
     args.add("ogrdriver,f", "OGR driver name to use ", m_driverName,
         "ESRI Shapefile");
     args.add("lyr_name", "OGR layer name to use", m_layerName, "");
+    args.add("sample_size", "Sample size for auto-edge length calculation",
+        m_sampleSize, 5000U);
+    args.add("threshold", "Required cell density", m_density, 15);
+    args.add("edge_length", "Length of hex edge", m_edgeLength);
+    args.add("hole_cull_area_tolerance", "Tolerance area to "
+            "apply to holes before cull", m_cullArea);
+    args.add("smooth", "Smooth boundary output", m_doSmooth, true);
 }
 
 
@@ -91,8 +98,14 @@ int DensityKernel::execute()
     {
         m_manager.makeReader(m_inputFile, "");
     }
+    Options options;
+    options.add("sample_size", m_sampleSize);
+    options.add("threshold", m_density);
+    options.add("edge_length", m_edgeLength);
+    options.add("hole_cull_area_tolerance", m_cullArea);
+    options.add("smooth", m_doSmooth);
     m_hexbinStage = &(m_manager.makeFilter("filters.hexbin",
-        *m_manager.getStage()));
+        *m_manager.getStage(), options));
     m_manager.execute();
     outputDensity(m_manager.pointTable().anySpatialReference());
     return 0;
diff --git a/plugins/hexbin/kernel/DensityKernel.hpp b/plugins/hexbin/kernel/DensityKernel.hpp
index 5691b87..f009de8 100644
--- a/plugins/hexbin/kernel/DensityKernel.hpp
+++ b/plugins/hexbin/kernel/DensityKernel.hpp
@@ -57,6 +57,11 @@ private:
     std::string m_outputFile;
     std::string m_driverName;
     std::string m_layerName;
+    uint32_t m_sampleSize;
+    int32_t m_density;
+    double m_edgeLength;
+    double m_cullArea;
+    bool m_doSmooth;
 
     virtual void addSwitches(ProgramArgs& args);
     void outputDensity(pdal::SpatialReference const& ref);
diff --git a/plugins/icebridge/io/Hdf5Handler.cpp b/plugins/icebridge/io/Hdf5Handler.cpp
index 4f4c342..0c24fdf 100644
--- a/plugins/icebridge/io/Hdf5Handler.cpp
+++ b/plugins/icebridge/io/Hdf5Handler.cpp
@@ -1,6 +1,6 @@
 /******************************************************************************
 * Copyright (c) 2014, Connor Manning, connor at hobu.co
-* 
+*
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
@@ -56,7 +56,7 @@ void Hdf5Handler::initialize(
     }
     catch (const H5::FileIException&)
     {
-        throw pdal_error("Could not open HDF5 file.");
+        throw error("Could not open HDF5 file '" + filename + "'.");
     }
 
     try
@@ -74,13 +74,13 @@ void Hdf5Handler::initialize(
                         ColumnData(predType, dataSet, dataSpace)));
 
             // Does not check whether all the columns are the same length.
-            m_numPoints =
-                std::max((uint64_t)getColumnNumEntries(dataSetName), m_numPoints);
+            m_numPoints = std::max((uint64_t)getColumnNumEntries(dataSetName),
+                m_numPoints);
         }
     }
     catch (const H5::Exception&)
     {
-        throw pdal_error("Could not initialize data set information.");
+        throw error("Could not initialize data set information.");
     }
 }
 
@@ -116,16 +116,16 @@ void Hdf5Handler::getColumnEntries(
         columnData.dataSet.read(
                 data,
                 columnData.predType,
-                outSpace, 
+                outSpace,
                 columnData.dataSpace);
     }
     catch (const H5::Exception&)
     {
-        throw pdal_error("Could not read from dataset.");
+        throw error("Could not read from dataset.");
     }
 }
 
-hsize_t 
+hsize_t
 Hdf5Handler::getColumnNumEntries(const std::string& dataSetName) const
 {
     hsize_t entries = 0;
@@ -142,7 +142,7 @@ Hdf5Handler::getColumnData(const std::string& dataSetName) const
 
     if (columnDataIt == m_columnDataMap.end())
     {
-        throw pdal_error("Could not retrieve column data.");
+        throw error("Could not retrieve column data.");
     }
 
     return columnDataIt->second;
diff --git a/plugins/icebridge/io/Hdf5Handler.hpp b/plugins/icebridge/io/Hdf5Handler.hpp
index e33eb67..f782df2 100644
--- a/plugins/icebridge/io/Hdf5Handler.hpp
+++ b/plugins/icebridge/io/Hdf5Handler.hpp
@@ -61,6 +61,12 @@ namespace hdf5
 class Hdf5Handler
 {
 public:
+    struct error : public std::runtime_error
+    {
+        error(const std::string& err) : std::runtime_error(err)
+        {}
+    };
+
     Hdf5Handler();
 
     void initialize(
diff --git a/plugins/icebridge/io/IcebridgeReader.cpp b/plugins/icebridge/io/IcebridgeReader.cpp
index f2ea00e..454d8b2 100644
--- a/plugins/icebridge/io/IcebridgeReader.cpp
+++ b/plugins/icebridge/io/IcebridgeReader.cpp
@@ -103,7 +103,14 @@ void IcebridgeReader::addDimensions(PointLayoutPtr layout)
 
 void IcebridgeReader::ready(PointTableRef table)
 {
-    m_hdf5Handler.initialize(m_filename, hdf5Columns);
+    try
+    {
+        m_hdf5Handler.initialize(m_filename, hdf5Columns);
+    }
+    catch (const Hdf5Handler::error& err)
+    {
+        throwError(err.what());
+    }
     m_index = 0;
     if (!m_metadataFile.empty())
     {
@@ -181,11 +188,9 @@ point_count_t IcebridgeReader::read(PointViewPtr view, point_count_t count)
                     view->setField(*di, nextId++, *ival++);
             }
         }
-        catch(...)
+        catch(const Hdf5Handler::error& err)
         {
-            std::ostringstream oss;
-            oss << getName() << ": Error fetching column data.";
-            throw pdal_error(oss.str());
+            throwError(err.what());
         }
     }
     return count;
@@ -200,9 +205,7 @@ void IcebridgeReader::initialize()
 {
     if (!m_metadataFile.empty() && !FileUtils::fileExists(m_metadataFile))
     {
-        std::ostringstream oss;
-        oss << "Invalid metadata file: '" << m_metadataFile << "'";
-        throw pdal_error(oss.str());
+        throwError("Invalid metadata file: '" + m_metadataFile + "'");
     }
 
     // Data are WGS84 (4326) with ITRF2000 datum (6656)
diff --git a/plugins/matlab/io/MatlabWriter.cpp b/plugins/matlab/io/MatlabWriter.cpp
index 4cca48b..6638a96 100644
--- a/plugins/matlab/io/MatlabWriter.cpp
+++ b/plugins/matlab/io/MatlabWriter.cpp
@@ -70,12 +70,8 @@ void MatlabWriter::prepared(PointTableRef table)
         {
             DimType dimType = table.layout()->findDimType(s);
             if (dimType.m_id == Dimension::Id::Unknown)
-            {
-                std::ostringstream oss;
-                oss << "Invalid dimension '" << s << "' specified for "
-                    "'output_dims' option.";
-                throw pdal_error(oss.str());
-            }
+                throwError("Invalid dimension '" + s + "' specified for "
+                    "'output_dims' option.");
             m_dimTypes.push_back(dimType);
         }
     }
@@ -86,11 +82,7 @@ void MatlabWriter::ready(PointTableRef table)
 {
     m_matfile = matOpen(m_filename.c_str(), "w");
     if (!m_matfile)
-    {
-        std::stringstream ss;
-        ss << "Could not open file for writing: " << m_filename;
-        throw pdal_error(ss.str());
-    }
+        throwError("Could not open file '" + m_filename + "' for writing.");
 }
 
 
@@ -107,26 +99,15 @@ void MatlabWriter::write(const PointViewPtr view)
     }
     mxArray * dimensionNames = mxCreateString(dimensionsString.str().c_str());
     if (!dimensionNames)
-    {
-        std::stringstream ss;
-        ss << "Could not create string '" << dimensionsString.str() << "'";
-        throw pdal_error(ss.str());
-    }
-    int result = matPutVariable(m_matfile, "Dimensions", dimensionNames);
-    if (result != 0)
-    {
-        std::stringstream ss;
-        ss << "Could not write dimension names to file: " << m_filename;
-        throw pdal_error(ss.str());
-    }
+        throwError("Could not create string '" + dimensionsString.str() + "'");
+    if (matPutVariable(m_matfile, "Dimensions", dimensionNames));
+        throwError("Could not write dimension names to file '" +
+            m_filename + "'.");
 
     mxArray * points = mxCreateDoubleMatrix(nPoints, nDimensions, mxREAL);
-    if (!points) {
-        std::stringstream ss;
-        ss << "Could not create a points array with dimensions " <<
-            nPoints << "x" << nDimensions;
-        throw pdal_error(ss.str());
-    }
+    if (!points)
+        throwError("Could not create a points array with dimensions " +
+            Utils::toString(nPoints) + "x" + Utils::toString(nDimensions));
 
     double * pointsPtr = mxGetPr(points);
     // Matlab is column-major
@@ -140,28 +121,17 @@ void MatlabWriter::write(const PointViewPtr view)
                     sizeof(double));
         }
     }
-    result = matPutVariable(m_matfile, "Points", points);
-    if (result != 0)
-    {
-        std::stringstream ss;
-        ss << "Could not write points to file: " << m_filename;
-        throw pdal_error(ss.str());
-    }
+    if (matPutVariable(m_matfile, "Points", points))
+        throwError("Could not write points to file '" + m_filename + "'.");
     mxDestroyArray(points);
 }
 
 
 void MatlabWriter::done(PointTableRef table)
 {
-    int result = matClose(m_matfile);
-    if (result != 0)
-    {
-        std::stringstream ss;
-        ss << "Unsuccessful write: " << m_filename;
-        throw pdal_error(ss.str());
-    }
+    if (matClose(m_matfile))
+        throwError("Unsuccessful write.");
     getMetadata().addList("filename", m_filename);
 }
 
-
 }
diff --git a/plugins/mbio/CMakeLists.txt b/plugins/mbio/CMakeLists.txt
new file mode 100644
index 0000000..677ec87
--- /dev/null
+++ b/plugins/mbio/CMakeLists.txt
@@ -0,0 +1,27 @@
+#
+# MBIO plugin CMake configuration (MB-System)
+#
+
+find_package(MBSystem)
+if (MBSYSTEM_FOUND)
+    PDAL_ADD_PLUGIN(reader_libname reader mbio
+        FILES
+            io/MbReader.cpp
+            io/MbFormat.cpp
+            io/MbError.cpp
+        LINK_WITH ${MBSYSTEM_LIBRARY})
+    target_include_directories(${reader_libname} PRIVATE
+        ${PDAL_IO_DIR}
+        ${MBSYSTEM_INCLUDE_DIR})
+    target_compile_definitions(${reader_libname} PRIVATE -DHAVE_MBSYSTEM=1)
+
+    if (WITH_TESTS)
+        PDAL_ADD_TEST(mbsystemtest
+            FILES test/MBSystemTest.cpp
+            LINK_WITH ${reader_libname})
+        target_include_directories(mbsystemtest PRIVATE
+            ${PDAL_IO_DIR})
+    endif()
+else()
+    message(STATUS "Building without MB-Sytem support")
+endif()
diff --git a/pdal/PipelineWriter.cpp b/plugins/mbio/io/MbError.cpp
similarity index 51%
copy from pdal/PipelineWriter.cpp
copy to plugins/mbio/io/MbError.cpp
index 0e787a0..307209b 100644
--- a/pdal/PipelineWriter.cpp
+++ b/plugins/mbio/io/MbError.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
-* Copyright (c) 2011, Michael P. Gerlek (mpg at flaxen.com)
+* Copyright (c) 2017, Howard Butler (hobu at hob.co)
 *
 * All rights reserved.
 *
@@ -32,65 +32,76 @@
 * OF SUCH DAMAGE.
 ****************************************************************************/
 
-#include <pdal/PipelineWriter.hpp>
+#include <pdal/util/Utils.hpp>
 
-#include <pdal/Metadata.hpp>
-#include <pdal/PDALUtils.hpp>
-#include <pdal/Stage.hpp>
+#include "MbFormat.hpp"
+
+typedef std::pair<int, std::string> MbError;
 
 namespace pdal
 {
 
-namespace
+namespace MbError
 {
 
-void generateTags(Stage *stage, PipelineWriter::TagMap& tags)
+namespace
 {
-    auto tagExists = [tags](const std::string& tag)
-    {
-        for (auto& t : tags)
-        {
-            if (t.second == tag)
-                return true;
-        }
-        return false;
-    };
-
-    for (Stage *s : stage->getInputs())
-        generateTags(s, tags);
-    std::string tag;
-    for (size_t i = 1; ; ++i)
-    {
-        tag = stage->tagName() + std::to_string(i);
-        if (!tagExists(tag))
-            break;
-    }
-    tags[stage] = tag;
-}
 
-} // anonymous namespace
-
-namespace PipelineWriter
+std::map<int, std::string> errors =
 {
+    { 0, "No error." },
+    { 1, "Memory allocation failure." },
+    { 2, "Can't open file." },
+    { 3, "Bad format." },
+    { 4, "End of file detected." },
+    { 5, "Write failure." },
+    { 6, "No beams in bounds." },
+    { 7, "No beams in time window." },
+    { 8, "Bad descriptor." },
+    { 9, "Bad usage." },
+    { 10, "No pings binned." },
+    { 11, "Bad record type (kind)." },
+    { 12, "Bad parameter." },
+    { 13, "Bad buffer ID." },
+    { 14, "Bad system." },
+    { 15, "Bad data." },
+    { 16, "Missing data." },
+    { -1, "Time gap." },
+    { -2, "Position out of bounds." },
+    { -3, "Time out of bounds." },
+    { -4, "Speed too small." },
+    { -5, "Comment." },
+    { -6, "Sub-bottom." },
+    { -7, "Water column." },
+    { -8, "Other." },
+    { -9, "Unintelligible." },
+    { -10, "Ignore." },
+    { -11, "No data requested." },
+    { -12, "Buffer full." },
+    { -13, "No data loaded." },
+    { -14, "Buffer empty." },
+    { -15, "No data dumped." },
+    { -16, "No more data." },
+    { -17, "Data not inserted." },
+    { -18, "Bad projection." },
+    { -19, "Missing projections." },
+    { -20, "Missing Navattitude." },
+    { -21, "Not enough data." },
+    { -22, "File not found." },
+    { -23, "File locked." },
+    { -24, "Initialization failure." },
+};
 
-PDAL_DLL void writePipeline(Stage *stage, const std::string& filename)
-{
-    std::ostream *out = Utils::createFile(filename, false);
-    writePipeline(stage, *out);
-    Utils::closeFile(out);
-}
+} // unnamed namespace
 
-PDAL_DLL void writePipeline(Stage *stage, std::ostream& strm)
+std::string text(int errorCode)
 {
-    TagMap tags;
-    generateTags(stage, tags);
-
-    MetadataNode root;
-    stage->serialize(root, tags);
-    Utils::toJSON(root, strm);
+    auto ei = errors.find(errorCode);
+    if (ei != errors.end())
+        return ei->second;
+    return "";
 }
 
-} // namespace PipelineWriter
+} // namespace MbError
 
 } // namespace pdal
-
diff --git a/test/unit/LogTest.cpp b/plugins/mbio/io/MbError.hpp
similarity index 81%
copy from test/unit/LogTest.cpp
copy to plugins/mbio/io/MbError.hpp
index aa78e92..bbf5d4e 100644
--- a/test/unit/LogTest.cpp
+++ b/plugins/mbio/io/MbError.hpp
@@ -1,5 +1,5 @@
 /******************************************************************************
-* Copyright (c) 2012, Michael P. Gerlek (mpg at flaxen.com)
+* Copyright (c) 2017, Howard Butler (hobu at hob.co)
 *
 * All rights reserved.
 *
@@ -13,7 +13,7 @@
 *       notice, this list of conditions and the following disclaimer in
 *       the documentation and/or other materials provided
 *       with the distribution.
-*     * Neither the name of Hobu, Inc. or Flaxen Consulting LLC nor the
+*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
 *       names of its contributors may be used to endorse or promote
 *       products derived from this software without specific prior
 *       written permission.
@@ -32,13 +32,17 @@
 * OF SUCH DAMAGE.
 ****************************************************************************/
 
-#include <pdal/pdal_test_main.hpp>
-#include <pdal/Options.hpp>
-#include <pdal/PointView.hpp>
-#include <pdal/StageFactory.hpp>
-#include <io/FauxReader.hpp>
-#include "Support.hpp"
+#pragma once
 
-using namespace pdal;
+#include <string>
 
-//ABELL - Need some tests here, but what we had was crap.
+namespace pdal
+{
+
+namespace MbError
+{
+
+std::string text(int errorCode);
+};
+
+} // namespace pdal
diff --git a/plugins/mbio/io/MbFormat.cpp b/plugins/mbio/io/MbFormat.cpp
new file mode 100644
index 0000000..379302d
--- /dev/null
+++ b/plugins/mbio/io/MbFormat.cpp
@@ -0,0 +1,194 @@
+/******************************************************************************
+* Copyright (c) 2017, Howard Butler (hobu at hob.co)
+*
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following
+* conditions are met:
+*
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above copyright
+*       notice, this list of conditions and the following disclaimer in
+*       the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
+*       names of its contributors may be used to endorse or promote
+*       products derived from this software without specific prior
+*       written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+* OF SUCH DAMAGE.
+****************************************************************************/
+
+#include <pdal/util/Utils.hpp>
+
+#include "MbFormat.hpp"
+
+typedef std::pair<std::string, int> MbFormatInfo;
+
+namespace pdal
+{
+
+namespace
+{
+
+std::vector<MbFormatInfo> formats =
+{
+    { "MBF_SBSIOMRG", 11 },
+    { "MBF_SBSIOCEN", 12 },
+    { "MBF_SBSIOLSI", 13 },
+    { "MBF_SBURICEN", 14 },
+    { "MBF_SBURIVAX", 15 },
+    { "MBF_SBSIOSWB", 16 },
+    { "MBF_SBIFREMR", 17 },
+    { "MBF_HSATLRAW", 21 },
+    { "MBF_HSLDEDMB", 22 },
+    { "MBF_HSURICEN", 23 },
+    { "MBF_HSLDEOIH", 24 },
+    { "MBF_HSURIVAX", 25 },
+    { "MBF_HSUNKNWN", 26 },
+    { "MBF_SB2000RW", 31 },
+    { "MBF_SB2000SB", 32 },
+    { "MBF_SB2000SS", 33 },
+    { "MBF_SB2100RW", 41 },
+    { "MBF_SB2100B1", 42 },
+    { "MBF_SB2100B2", 43 },
+    { "MBF_EMOLDRAW", 51 },
+    { "MBF_EM12IFRM", 53 },
+    { "MBF_EM12DARW", 54 },
+    { "MBF_EM300RAW", 56 },
+    { "MBF_EM300MBA", 57 },
+    { "MBF_EM710RAW", 58 },
+    { "MBF_EM710MBA", 59 },
+    { "MBF_MR1PRHIG", 61 },
+    { "MBF_MR1ALDEO", 62 },
+    { "MBF_MR1BLDEO", 63 },
+    { "MBF_MR1PRVR2", 64 },
+    { "MBF_MBLDEOIH", 71 },
+    { "MBF_MBNETCDF", 75 },
+    { "MBF_MBNCDFXT", 76 },
+    { "MBF_CBAT9001", 81 },
+    { "MBF_CBAT8101", 82 },
+    { "MBF_HYPC8101", 83 },
+    { "MBF_XTFR8101", 84 },
+    { "MBF_RESONS8K", 85 },
+    { "MBF_SBATPROC", 86 },
+    { "MBF_RESON7KR", 88 },
+    { "MBF_RESON7KP", 89 },
+    { "MBF_BCHRTUNB", 91 },
+    { "MBF_ELMK2UNB", 92 },
+    { "MBF_BCHRXUNB", 93 },
+    { "MBF_L3XSERAW", 94 },
+    { "MBF_HSMDARAW", 101 },
+    { "MBF_HSMDLDIH", 102 },
+    { "MBF_DSL120PF", 111 },
+    { "MBF_DSL120SF", 112 },
+    { "MBF_GSFGENMB", 121 },
+    { "MBF_MSTIFFSS", 131 },
+    { "MBF_EDGJSTAR", 132 },
+    { "MBF_EDGJSTR2", 133 },
+    { "MBF_OICGEODA", 141 },
+    { "MBF_OICMBARI", 142 },
+    { "MBF_OMGHDCSJ", 151 },
+    { "MBF_SEGYSEGY", 160 },
+    { "MBF_MGD77DAT", 161 },
+    { "MBF_ASCIIXYZ", 162 },
+    { "MBF_ASCIIYXZ", 163 },
+    { "MBF_HYDROB93", 164 },
+    { "MBF_MBARIROV", 165 },
+    { "MBF_MBPRONAV", 166 },
+    { "MBF_NVNETCDF", 167 },
+    { "MBF_ASCIIXYT", 168 },
+    { "MBF_ASCIIYXT", 169 },
+    { "MBF_MBARROV2", 170 },
+    { "MBF_HS10JAMS", 171 },
+    { "MBF_HIR2RNAV", 172 },
+    { "MBF_MGD77TXT", 173 },
+    { "MBF_MGD77TAB", 174 },
+    { "MBF_SAMESURF", 181 },
+    { "MBF_HSDS2RAW", 182 },
+    { "MBF_HSDS2LAM", 183 },
+    { "MBF_IMAGE83P", 191 },
+    { "MBF_IMAGEMBA", 192 },
+    { "MBF_HYSWEEP1", 201 },
+    { "MBF_XTFB1624", 211 },
+    { "MBF_SWPLSSXI", 221 },
+    { "MBF_SWPLSSXP", 222 },
+    { "MBF_3DDEPTHP", 231 },
+    { "MBF_WASSPENL", 241 },
+    { "MBF_PHOTGRAM", 251 }
+};
+
+} // unnamed namespace
+
+MbFormat::MbFormat() : m_value(0)
+{}
+
+
+MbFormat::operator int () const
+{
+    return m_value;
+}
+
+
+std::istream& operator>>(std::istream& in, MbFormat& f)
+{
+    std::string s;
+
+    f.m_value = 0;
+    in >> s;
+    try
+    {
+        int val = stoi(s);
+        for (MbFormatInfo& fi : formats)
+            if (val == fi.second)
+            {
+                f.m_value = val;
+                break;
+            }
+    }
+    catch (std::exception)
+    {
+        s = Utils::toupper(s);
+        for (MbFormatInfo& fi : formats)
+            if (s == fi.first)
+            {
+                f.m_value = fi.second;
+                break;
+            }
+    }
+    if (f.m_value == 0)
+        in.setstate(std::ios_base::failbit);
+    return in;
+}
+
+
+std::ostream& operator<<(std::ostream& out, const MbFormat& f)
+{
+    std::string sval("MY_SYS_NONE");
+
+    for (MbFormatInfo& fi : formats)
+    {
+        if (f.m_value == fi.second)
+        {
+            sval = fi.first;
+            break;
+        }
+    }
+    out << sval;
+    return out;
+}
+
+} // namespace pdal
diff --git a/pdal/plang/BufferedInvocation.hpp b/plugins/mbio/io/MbFormat.hpp
similarity index 79%
rename from pdal/plang/BufferedInvocation.hpp
rename to plugins/mbio/io/MbFormat.hpp
index 134358c..8ce36e9 100644
--- a/pdal/plang/BufferedInvocation.hpp
+++ b/plugins/mbio/io/MbFormat.hpp
@@ -1,5 +1,5 @@
 /******************************************************************************
-* Copyright (c) 2011, Michael P. Gerlek (mpg at flaxen.com)
+* Copyright (c) 2017, Howard Butler (hobu at hob.co)
 *
 * All rights reserved.
 *
@@ -34,28 +34,24 @@
 
 #pragma once
 
-#include "../plang/Invocation.hpp"
-
-#include <pdal/PointView.hpp>
+#include <iostream>
+#include <string>
 
 namespace pdal
 {
-namespace plang
-{
 
-class PDAL_DLL BufferedInvocation : public Invocation
+class MbFormat
 {
+private:
+    int m_value;
+
 public:
-    BufferedInvocation(const Script& script);
+    MbFormat();
 
-    void begin(PointView& view, MetadataNode m);
-    void end(PointView& view, MetadataNode m);
+    operator int() const;
 
-private:
-    std::vector<void *> m_buffers;
-    BufferedInvocation& operator=(BufferedInvocation const& rhs); // nope
+    friend std::istream& operator>>(std::istream& out, MbFormat& f);
+    friend std::ostream& operator<<(std::ostream& out, const MbFormat& f);
 };
 
-} // namespace plang
 } // namespace pdal
-
diff --git a/plugins/mbio/io/MbReader.cpp b/plugins/mbio/io/MbReader.cpp
new file mode 100644
index 0000000..a0cd2f2
--- /dev/null
+++ b/plugins/mbio/io/MbReader.cpp
@@ -0,0 +1,238 @@
+/******************************************************************************
+* Copyright (c) 2017, Howard Butler (hobu at hob.co)
+*
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following
+* conditions are met:
+*
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above copyright
+*       notice, this list of conditions and the following disclaimer in
+*       the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
+*       names of its contributors may be used to endorse or promote
+*       products derived from this software without specific prior
+*       written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+* OF SUCH DAMAGE.
+****************************************************************************/
+
+#include "MbReader.hpp"
+#include "MbError.hpp"
+
+#include <pdal/pdal_macros.hpp>
+#include <pdal/util/ProgramArgs.hpp>
+#include <pdal/DimUtil.hpp>
+
+namespace pdal
+{
+
+static PluginInfo const s_info = PluginInfo(
+    "readers.mbio",
+    "MBSystem Reader",
+    "http://www.pdal.io/stages/readers.mbio.html" );
+
+CREATE_SHARED_PLUGIN(1, 0, MbReader, Reader, s_info)
+
+std::string MbReader::getName() const { return s_info.name; }
+
+MbReader::MbReader() : m_bath(nullptr), m_bathlon(nullptr),
+    m_bathlat(nullptr), m_amp(nullptr), m_bathflag(nullptr), m_ss(nullptr),
+    m_sslon(nullptr), m_sslat(nullptr)
+{}
+
+
+MbReader::~MbReader()
+{}
+
+
+void MbReader::addArgs(ProgramArgs& args)
+{
+    args.add("format", "Name or number of MBIO data format",
+        m_format).setPositional();
+}
+
+
+void MbReader::addDimensions(PointLayoutPtr layout)
+{
+    using namespace Dimension;
+
+    std::vector<Dimension::Id> dims { Id::X, Id::Y, Id::Z, Id::Amplitude };
+
+    layout->registerDims(dims);
+}
+
+
+void MbReader::ready(PointTableRef table)
+{
+    int verbose = 0;
+    int pings = 0;  // Perhaps an argument for this?
+    int lonflip = 0; // Longitude -180 -> 180
+    double bounds[4] { -180, 180, -90, 90 };
+    int btime_i[7] { 0, 0, 0, 0, 0, 0, 0 };
+    int etime_i[7] { std::numeric_limits<int>::max(), 0, 0, 0, 0, 0 };
+    double speedmin { 0 };
+    double timegap { 0 };
+    char *mbio_ptr;
+    double btime_d;
+    double etime_d;
+    int beams_bath;
+    int beams_amp;
+    int pixels_ss;
+    int error;
+
+    mb_read_init(verbose, const_cast<char *>(m_filename.data()),
+        (int)m_format, pings, lonflip, bounds, btime_i, etime_i,
+        speedmin, timegap, &m_ctx, &btime_d, &etime_d,
+        &beams_bath, &beams_amp, &pixels_ss, &error);
+    if (error > 0)
+        throwError("Can't initialize mb-system reader: " +
+            MbError::text(error));
+
+    mb_register_array(verbose, m_ctx, 1, sizeof(double),
+        (void **)&m_bath, &error);
+    mb_register_array(verbose, m_ctx, 1, sizeof(double),
+        (void **)&m_bathlon, &error);
+    mb_register_array(verbose, m_ctx, 1, sizeof(double),
+        (void **)&m_bathlat, &error);
+    mb_register_array(verbose, m_ctx, 1, sizeof(char),
+        (void **)&m_bathflag, &error);
+    mb_register_array(verbose, m_ctx, 2, sizeof(double),
+        (void **)&m_amp, &error);
+    mb_register_array(verbose, m_ctx, 3, sizeof(double),
+        (void **)&m_ss, &error);
+    mb_register_array(verbose, m_ctx, 3, sizeof(double),
+        (void **)&m_sslon, &error);
+    mb_register_array(verbose, m_ctx, 3, sizeof(double),
+        (void **)&m_sslat, &error);
+}
+
+
+bool MbReader::loadData()
+{
+    int verbose = 0;
+    int kind;
+    int pings;
+    int pingTime[7];
+    double pingTimeT;
+    double lon;
+    double lat;
+    double speed;
+    double heading;
+    double distance;
+    double altitude;
+    double sonarDepth;
+    int numBath;
+    int numAmp;
+    int numSs;
+    char comment[MB_COMMENT_MAXLINE];
+    int error;
+
+    while (true)
+    {
+        int status = mb_read(verbose, m_ctx, &kind, &pings, pingTime,
+            &pingTimeT, &lon, &lat, &speed, &heading, &distance, &altitude,
+            &sonarDepth, &numBath, &numAmp, &numSs, m_bathflag, m_bath,
+            m_amp, m_bathlon, m_bathlat, m_ss, m_sslon, m_sslat, comment,
+            &error);
+
+        if (status == 0)
+        {
+            if (error > 0)
+                throwError("Error reading data: " + MbError::text(error));
+            return false;
+        }
+
+        if (kind == 1)
+        {
+            for (size_t i = 0; i < (size_t)numBath; ++i)
+            {
+                if (m_bathflag[i] & 1)
+                    continue;
+                m_bathQueue.emplace(m_bathlon[i], m_bathlat[i], -m_bath[i],
+                    m_amp[i]);
+            }
+            if (numBath != numAmp)
+                log()->get(LogLevel::Warning) << getName() << ": Number of "
+                    "bathymetry values doesn't match number of amplitude "
+                    "values." << std::endl;
+            if (m_bathQueue.size())
+                break;
+        }
+    }
+    return true;
+}
+
+
+bool MbReader::processOne(PointRef& point)
+{
+    if (m_bathQueue.empty())
+        if (!loadData())
+            return false;
+
+    BathData& bd = m_bathQueue.front();
+
+    point.setField(Dimension::Id::X, bd.m_bathlon);
+    point.setField(Dimension::Id::Y, bd.m_bathlat);
+    point.setField(Dimension::Id::Z, bd.m_bath);
+    point.setField(Dimension::Id::Amplitude, bd.m_amp);
+    m_bathQueue.pop();
+    return true;
+}
+
+
+QuickInfo MbReader::inspect()
+{
+    QuickInfo qi;
+    std::unique_ptr<PointLayout> layout(new PointLayout());
+
+    addDimensions(layout.get());
+
+    Dimension::IdList dims = layout->dims();
+    for (auto di = dims.begin(); di != dims.end(); ++di)
+        qi.m_dimNames.push_back(layout->dimName(*di));
+    qi.m_valid = true;
+    return qi;
+}
+
+
+point_count_t MbReader::read(PointViewPtr view, point_count_t count)
+{
+    using namespace pdal::Dimension;
+
+    PointRef point = view->point(0);
+    PointId id;
+    for (id = 0; id < count; ++id)
+    {
+        point.setPointId(id);
+        if (!processOne(point))
+            break;
+    }
+    return id;
+}
+
+
+void MbReader::done(PointTableRef table)
+{
+    int error;
+
+    mb_close(0, &m_ctx, &error);
+    getMetadata().addList("filename", m_filename);
+}
+
+} // namespace pdal
diff --git a/io/Ilvis2Reader.hpp b/plugins/mbio/io/MbReader.hpp
similarity index 66%
copy from io/Ilvis2Reader.hpp
copy to plugins/mbio/io/MbReader.hpp
index 03b0c5f..dfce627 100644
--- a/io/Ilvis2Reader.hpp
+++ b/plugins/mbio/io/MbReader.hpp
@@ -1,5 +1,5 @@
 /******************************************************************************
-* Copyright (c) 2015, Howard Butler (howard at hobu.co)
+* Copyright (c) 2017, Howard Butler (howard at hobu.co)
 *
 * All rights reserved.
 *
@@ -32,72 +32,73 @@
 * OF SUCH DAMAGE.
 ****************************************************************************/
 
-#include <pdal/PointView.hpp>
+#pragma once
+
+#include <queue>
+
 #include <pdal/Reader.hpp>
-#include <pdal/util/IStream.hpp>
 #include <pdal/plugin.hpp>
-#include <map>
 
-#ifndef PDAL_HAVE_LIBXML2
-namespace pdal
+extern "C"
 {
-  class Ilvis2MetadataReader
-  {
-  public:
-      inline void readMetadataFile(std::string filename, pdal::MetadataNode* m) {};
-  };
+#include <mb_define.h>
 }
-#else
-    #include "Ilvis2MetadataReader.hpp"
-#endif
 
-extern "C" int32_t Ilvis2Reader_ExitFunc();
-extern "C" PF_ExitFunc Ilvis2Reader_InitPlugin();
+#include "MbFormat.hpp"
+
+extern "C" int32_t MbReader_ExitFunc();
+extern "C" PF_ExitFunc MbReader_InitPlugin();
 
 namespace pdal
 {
-class PDAL_DLL Ilvis2Reader : public pdal::Reader
+
+struct BathData;
+
+class PDAL_DLL MbReader : public pdal::Reader
 {
-public:
-    enum class IlvisMapping
+    struct BathData
     {
-      INVALID,
-      LOW,
-      HIGH,
-      ALL
+        double m_bathlon;
+        double m_bathlat;
+        double m_bath;
+        double m_amp;
+
+        BathData(double bathlon, double bathlat, double bath, double amp) :
+            m_bathlon(bathlon), m_bathlat(bathlat), m_bath(bath), m_amp(amp)
+        {}
     };
 
-    Ilvis2Reader()
-    {}
+public:
+    MbReader();
+    virtual ~MbReader();
+    MbReader& operator=(const MbReader&) = delete;
+    MbReader(const MbReader&) = delete;
 
     static void * create();
     static int32_t destroy(void *);
     std::string getName() const;
 
-    static Dimension::IdList getDefaultDimensions();
-
 private:
-    std::ifstream m_stream;
-    IlvisMapping m_mapping;
-    StringList m_fields;
-    size_t m_lineNum;
-    bool m_resample;
-    PointLayoutPtr m_layout;
-    std::string m_metadataFile;
-    Ilvis2MetadataReader m_mdReader;
-
     virtual void addDimensions(PointLayoutPtr layout);
+    virtual QuickInfo inspect();
     virtual void addArgs(ProgramArgs& args);
-    virtual void initialize(PointTableRef table);
-    virtual void ready(PointTableRef table);
-    virtual void done(PointTableRef table);
     virtual bool processOne(PointRef& point);
+    virtual void ready(PointTableRef table);
     virtual point_count_t read(PointViewPtr view, point_count_t count);
+    virtual void done(PointTableRef table);
+    bool loadData();
 
-    virtual void readPoint(PointRef& point, StringList s, std::string pointMap);
+    void *m_ctx;
+    double *m_bath;
+    double *m_bathlon;
+    double *m_bathlat;
+    double *m_amp;
+    char *m_bathflag;
+    double *m_ss;
+    double *m_sslon;
+    double *m_sslat;
+    std::queue<BathData> m_bathQueue;
+    MbFormat m_format;
 };
 
-std::ostream& operator<<(std::ostream& out,
-    const Ilvis2Reader::IlvisMapping& mval);
-
-} // namespace pdal
+} // namespace PDAL
diff --git a/test/unit/LogTest.cpp b/plugins/mbio/test/MBSystemTest.cpp
similarity index 69%
copy from test/unit/LogTest.cpp
copy to plugins/mbio/test/MBSystemTest.cpp
index aa78e92..f227b86 100644
--- a/test/unit/LogTest.cpp
+++ b/plugins/mbio/test/MBSystemTest.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
-* Copyright (c) 2012, Michael P. Gerlek (mpg at flaxen.com)
+* Copyright (c) 2017, Howard Butler (howard at hobu.co)
 *
 * All rights reserved.
 *
@@ -13,7 +13,7 @@
 *       notice, this list of conditions and the following disclaimer in
 *       the documentation and/or other materials provided
 *       with the distribution.
-*     * Neither the name of Hobu, Inc. or Flaxen Consulting LLC nor the
+*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
 *       names of its contributors may be used to endorse or promote
 *       products derived from this software without specific prior
 *       written permission.
@@ -33,12 +33,39 @@
 ****************************************************************************/
 
 #include <pdal/pdal_test_main.hpp>
+
 #include <pdal/Options.hpp>
 #include <pdal/PointView.hpp>
+#include <pdal/PipelineManager.hpp>
 #include <pdal/StageFactory.hpp>
-#include <io/FauxReader.hpp>
+#include <pdal/util/FileUtils.hpp>
+
 #include "Support.hpp"
 
 using namespace pdal;
 
-//ABELL - Need some tests here, but what we had was crap.
+std::string getFilePath()
+{
+    return Support::datapath("mbio/mbf_em300raw.mb56");
+}
+
+TEST(MBSystemReaderTest, testRead)
+{
+    StageFactory f;
+    Stage* reader(f.createStage("readers.mbio"));
+    EXPECT_TRUE(reader);
+
+    Option filename("filename", getFilePath());
+    Options options;
+    options.add("filename", getFilePath());
+    options.add("format", "mbf_em300raw");
+    reader->setOptions(options);
+
+    PointTable table;
+    reader->prepare(table);
+    PointViewSet viewSet = reader->execute(table);
+    EXPECT_EQ(viewSet.size(), 1u);
+    PointViewPtr view = *viewSet.begin();
+    EXPECT_EQ(view->size(), 112u);
+
+}
diff --git a/plugins/mrsid/io/MrsidReader.cpp b/plugins/mrsid/io/MrsidReader.cpp
index 9a9c235..0f26c64 100644
--- a/plugins/mrsid/io/MrsidReader.cpp
+++ b/plugins/mrsid/io/MrsidReader.cpp
@@ -112,7 +112,7 @@ void MrsidReader::addDimensions(PointLayoutPtr layout)
     using namespace Dimension;
 
     if (!m_PS)
-        throw pdal_error("MrSID object not initialized!");
+        throwError("MrSID object not initialized.");
     const LizardTech::PointInfo& pointinfo = m_PS->getPointInfo();
 
     // add a map for PDAL names that aren't the same as LT ones (GPSTime vs Time)
diff --git a/plugins/nitf/CMakeLists.txt b/plugins/nitf/CMakeLists.txt
index a4827bf..ad00d2d 100644
--- a/plugins/nitf/CMakeLists.txt
+++ b/plugins/nitf/CMakeLists.txt
@@ -19,6 +19,7 @@ PDAL_ADD_PLUGIN(reader_libname reader nitf
         ${NITRO_LIBRARIES})
 target_include_directories(${reader_libname} PRIVATE
     ${PDAL_VENDOR_DIR}/pdalboost
+	${PDAL_JSONCPP_INCLUDE_DIR}
     ${ROOT_DIR})
 
 #
@@ -35,6 +36,7 @@ PDAL_ADD_PLUGIN(writer_libname writer nitf
         ${NITRO_LIBRARIES})
 target_include_directories(${writer_libname} PRIVATE
     ${PDAL_VENDOR_DIR}/pdalboost
+	${PDAL_JSONCPP_INCLUDE_DIR}
     ${ROOT_DIR})
 
 if (WITH_TESTS)
@@ -43,12 +45,12 @@ if (WITH_TESTS)
             test/NitfWriterTest.cpp
         LINK_WITH ${writer_libname})
     target_include_directories(pdal_io_nitf_writer_test PRIVATE
-        ${ROOT_DIR})
+        ${ROOT_DIR} ${PDAL_JSONCPP_INCLUDE_DIR})
 
     PDAL_ADD_TEST(pdal_io_nitf_reader_test
         FILES
             test/NitfReaderTest.cpp
         LINK_WITH ${reader_libname})
     target_include_directories(pdal_io_nitf_reader_test PRIVATE
-        ${ROOT_DIR})
+        ${ROOT_DIR} ${PDAL_JSONCPP_INCLUDE_DIR})
 endif()
diff --git a/plugins/nitf/io/MetadataReader.cpp b/plugins/nitf/io/MetadataReader.cpp
index 5a987f5..ee0da98 100644
--- a/plugins/nitf/io/MetadataReader.cpp
+++ b/plugins/nitf/io/MetadataReader.cpp
@@ -62,11 +62,11 @@ void MetadataReader::read()
     // dump the file header
     //
     ::nitf::FileHeader header = m_record.getHeader();
-    ::nitf::FileSecurity security = header.getSecurityGroup();        
-    
+    ::nitf::FileSecurity security = header.getSecurityGroup();
+
     doFileHeader("FH", header);
     doSecurity("FH", "F", security);
-    
+
     ::nitf::Extensions ext = header.getExtendedSection();
     doExtensions("FH", ext);
 
@@ -82,19 +82,19 @@ void MetadataReader::read()
     for (i=0; i<num; i++)
     {
         const std::string key = "IM:" + std::to_string(i);
-        
+
         ::nitf::ImageSegment segment = *iter;
         ::nitf::ImageSubheader header = segment.getSubheader();
-        
+
         doImageSubheader(key, header);
-        
+
         ::nitf::Extensions ext = header.getExtendedSection();
         doExtensions(key, ext);
-        
+
         ::nitf::Extensions ext2 = header.getUserDefinedSection();
         doExtensions(key, ext2);
     }
-    
+
     //
     // dump the DE info, for each DE
     //
@@ -103,13 +103,13 @@ void MetadataReader::read()
     for (i=0; i<num; i++)
     {
         const std::string key = "DE:" + std::to_string(i);
-        
+
         ::nitf::DESegment segment = *iter;
         ::nitf::DESubheader header = segment.getSubheader();
         ::nitf::FileSecurity security = header.getSecurityGroup();
-        
+
         doDESubheader(key, header);
-        
+
         ::nitf::Extensions ext = header.getUserDefinedSection();
         doExtensions(key, ext);
     }
@@ -121,7 +121,7 @@ void MetadataReader::writeField(const std::string& parentkey,
                                 ::nitf::Field field)
 {
     std::string v;
-  
+
     if (field.getType() == (::nitf::Field::FieldType)NITF_BCS_A)
     {
         v = field.toString();
@@ -147,16 +147,16 @@ void MetadataReader::writeField(const std::string& parentkey,
     }
     else
     {
-        throw pdal_error("error reading nitf (2)");
-    }    
-   
+        throw error("error reading nitf (2)");
+    }
+
     Utils::trim(v);
-    const bool blank = (v.length() == 0);    
+    const bool blank = (v.length() == 0);
     if (!blank || (blank && m_showEmptyFields))
     {
         m_node.add<std::string>(parentkey + "." + key, v);
     }
-    
+
     return;
 }
 
@@ -164,7 +164,7 @@ void MetadataReader::writeField(const std::string& parentkey,
 void MetadataReader::writeInt(const std::string& parentkey,
                               const std::string& key,
                               int thevalue)
-{   
+{
     m_node.add<std::string>(parentkey + "." + key, std::to_string(thevalue));
 }
 
@@ -172,11 +172,11 @@ void MetadataReader::writeInt(const std::string& parentkey,
 void MetadataReader::writeString(const std::string& parentkey,
                                  const std::string& key,
                                  const std::string& thevalue)
-{   
+{
     m_node.add<std::string>(parentkey + "." + key, thevalue);
 }
-    
-   
+
+
 void MetadataReader::doFileHeader(const std::string& parentkey,
                                   ::nitf::FileHeader& header)
 {
@@ -203,7 +203,7 @@ void MetadataReader::doFileHeader(const std::string& parentkey,
     writeField("FH", "NUMDES", header.getNumDataExtensions());
     writeField("FH", "NUMRES", header.getNumReservedExtensions());
 }
-    
+
 
 void MetadataReader::doSecurity(const std::string& parentkey,
                                 const std::string& prefix,
@@ -226,7 +226,7 @@ void MetadataReader::doSecurity(const std::string& parentkey,
     writeField(parentkey, prefix + "SCTLN", security.getSecurityControlNumber());
 }
 
-    
+
 void MetadataReader::doBands(const std::string& key,
                              ::nitf::ImageSubheader& header)
 {
@@ -238,12 +238,12 @@ void MetadataReader::doBands(const std::string& key,
         doBand(subkey, bandinfo);
     }
 }
-    
+
 
 void MetadataReader::doBand(const std::string& key,
                             ::nitf::BandInfo& band)
 {
-    writeField(key, "IREPBAND", band.getRepresentation());    
+    writeField(key, "IREPBAND", band.getRepresentation());
     writeField(key, "ISUBCAT", band.getSubcategory());
     writeField(key, "IFC", band.getImageFilterCondition());
     writeField(key, "IMFLT", band.getImageFilterCode());
@@ -264,7 +264,7 @@ void MetadataReader::doImageSubheader(const std::string& key,
     writeField(key, "TGTID", subheader.getTargetId());
     writeField(key, "IID2", subheader.getImageTitle());
     writeField(key, "ISCLAS", subheader.getImageSecurityClass());
-    
+
     ::nitf::FileSecurity security = subheader.getSecurityGroup();
     doSecurity(key, "I", security);
 
@@ -283,7 +283,7 @@ void MetadataReader::doImageSubheader(const std::string& key,
 
     ::nitf::List list = subheader.getImageComments();
     doComments(key, list);
-    
+
     writeField(key, "IC", subheader.getImageCompression());
     writeField(key, "COMRAT", subheader.getCompressionRate());
     writeField(key, "NBANDS", subheader.getNumImageBands());
@@ -303,7 +303,7 @@ void MetadataReader::doImageSubheader(const std::string& key,
     writeField(key, "ILOC", subheader.getImageLocation());
     writeField(key, "IMAG", subheader.getImageMagnification());
 }
-    
+
 
 void MetadataReader::doDESubheader(const std::string& key,
                                    ::nitf::DESubheader& subheader)
@@ -333,7 +333,7 @@ void MetadataReader::doTRE(const std::string& key,
     // however: instead, we'll call getField(key) and get the
     // value as represented by a Field object (which will tell us
     // the formatting, etc).
-    
+
     ::nitf::TREFieldIterator iter = tre.begin();
     while (iter != tre.end())
     {
@@ -343,9 +343,9 @@ void MetadataReader::doTRE(const std::string& key,
             // if there's no pair object set (would be nice if
             // there was a is_valid() function or something...)
             ::nitf::Pair pair = *iter;
-            
+
             const char* key = pair.first();
-            
+
             // only put into metadata things that look like legit
             // stringy things
             if (strcmp(key, "raw_data") != 0)
@@ -353,17 +353,17 @@ void MetadataReader::doTRE(const std::string& key,
                 ::nitf::Field field = tre.getField(key);
                 writeField(tag, key, field);
             }
-        }            
+        }
         catch (::except::NullPointerReference&)
         {
             // oh, well - skip this one, go to the next iteration
         }
-        
+
         ++iter;
     }
 }
 
-    
+
 void MetadataReader::doExtensions(const std::string& key,
                                   ::nitf::Extensions& ext)
 {
@@ -377,7 +377,7 @@ void MetadataReader::doExtensions(const std::string& key,
         ++iter;
     }
 }
-    
+
 
 void MetadataReader::doComments(const std::string& key,
                                 ::nitf::List& list)
@@ -389,7 +389,7 @@ void MetadataReader::doComments(const std::string& key,
         ::nitf::Field field = *iter;
 
         const std::string subkey = "ICOM:" + std::to_string(i);
-        
+
         writeField(key, subkey, field);
 
         ++i;
@@ -398,6 +398,6 @@ void MetadataReader::doComments(const std::string& key,
 
     return;
 }
-    
+
 
 } // namespaces
diff --git a/plugins/nitf/io/MetadataReader.hpp b/plugins/nitf/io/MetadataReader.hpp
index ca1f4bb..c6d2491 100644
--- a/plugins/nitf/io/MetadataReader.hpp
+++ b/plugins/nitf/io/MetadataReader.hpp
@@ -76,6 +76,12 @@ namespace pdal
 class PDAL_DLL MetadataReader
 {
 public:
+    struct error : public std::runtime_error
+    {
+        error(const std::string& err) : std::runtime_error(err)
+        {}
+    };
+
     MetadataReader(::nitf::Record&, MetadataNode&, bool showEmptyFields=true);
 
     void read();
diff --git a/plugins/nitf/io/NitfFileReader.cpp b/plugins/nitf/io/NitfFileReader.cpp
index 39915dd..96afa3a 100644
--- a/plugins/nitf/io/NitfFileReader.cpp
+++ b/plugins/nitf/io/NitfFileReader.cpp
@@ -60,14 +60,21 @@ NitfFileReader::NitfFileReader(const std::string& filename) :
     m_validLidarSegments(false),
     m_lidarDataSegment(0)
 {
-    register_tre_plugins();
+    try
+    {
+        register_tre_plugins();
+    }
+    catch (const pdal_error& err)
+    {
+        throw error(err.what());
+    }
 }
 
 
 void NitfFileReader::open()
 {
     if (nitf::Reader::getNITFVersion(m_filename.c_str()) == NITF_VER_UNKNOWN)
-        throw pdal_error("Unable to determine NITF file version");
+        throw error("Unable to determine NITF file version");
 
     // read the major NITF data structures, courtesy Nitro
     try
@@ -76,7 +83,7 @@ void NitfFileReader::open()
     }
     catch (nitf::NITFException& e)
     {
-        throw pdal_error("unable to open NITF file (" + e.getMessage() + ")");
+        throw error("unable to open NITF file (" + e.getMessage() + ")");
     }
     try
     {
@@ -85,7 +92,7 @@ void NitfFileReader::open()
     }
     catch (nitf::NITFException& e)
     {
-        throw pdal_error("unable to read NITF file (" + e.getMessage() + ")");
+        throw error("unable to read NITF file (" + e.getMessage() + ")");
     }
 
     // find the image segment corresponding the the lidar data, if any
@@ -94,7 +101,7 @@ void NitfFileReader::open()
     const bool imageOK = locateLidarImageSegment();
     if (REQUIRE_LIDAR_SEGMENTS && !imageOK)
     {
-        throw pdal_error("Unable to find lidar-compatible image "
+        throw error("Unable to find lidar-compatible image "
             "segment in NITF file");
     }
 
@@ -102,7 +109,7 @@ void NitfFileReader::open()
     const bool dataOK = locateLidarDataSegment();
     if (REQUIRE_LIDAR_SEGMENTS && !dataOK)
     {
-        throw pdal_error("Unable to find LIDARA data extension segment "
+        throw error("Unable to find LIDARA data extension segment "
             "in NITF file");
     }
 
@@ -140,14 +147,21 @@ void NitfFileReader::getLasOffset(uint64_t& offset, uint64_t& length)
             return;
         }
     }
-    throw pdal_error("error reading nitf (1)");
+    throw error("error reading nitf (1)");
 }
 
 
 void NitfFileReader::extractMetadata(MetadataNode& node)
 {
-    MetadataReader mr(m_record, node, SHOW_EMPTY_FIELDS);
-    mr.read();
+    try
+    {
+        MetadataReader mr(m_record, node, SHOW_EMPTY_FIELDS);
+        mr.read();
+    }
+    catch (const MetadataReader::error& err)
+    {
+        throw error(err.what());
+    }
 }
 
 
@@ -169,7 +183,7 @@ bool NitfFileReader::locateLidarImageSegment()
         ::nitf::Field field = subheader.getImageId();
         ::nitf::Field::FieldType fieldType = field.getType();
         if (fieldType != (::nitf::Field::FieldType)NITF_BCS_A)
-            throw pdal_error("error reading nitf (5)");
+            throw error("error reading nitf (5)");
         std::string iid1 = field.toString();
 
         // BUG: shouldn't allow "None" here!
@@ -202,11 +216,11 @@ bool NitfFileReader::locateLidarDataSegment()
 
         ::nitf::Field idField = subheader.getTypeID();
         if (idField.getType() != (::nitf::Field::FieldType)NITF_BCS_A)
-            throw pdal_error("error reading nitf (6)");
+            throw error("error reading nitf (6)");
 
         ::nitf::Field verField = subheader.getVersion();
         if (verField.getType() != (::nitf::Field::FieldType)NITF_BCS_N)
-            throw pdal_error("error reading nitf (7)");
+            throw error("error reading nitf (7)");
 
         const std::string id = idField.toString();
         const int ver = (int)verField;
diff --git a/plugins/nitf/io/NitfFileReader.hpp b/plugins/nitf/io/NitfFileReader.hpp
index c09aec5..3ee01f9 100644
--- a/plugins/nitf/io/NitfFileReader.hpp
+++ b/plugins/nitf/io/NitfFileReader.hpp
@@ -76,6 +76,12 @@ namespace pdal
 class PDAL_DLL NitfFileReader
 {
 public:
+    struct error : public std::runtime_error
+    {
+        error(const std::string& err) : std::runtime_error(err)
+        {}
+    };
+
     NitfFileReader(const std::string& filename);
     NitfFileReader(const NitfFileReader&) = delete;
     NitfFileReader& operator=(const NitfFileReader&) = delete;
diff --git a/plugins/nitf/io/NitfFileWriter.cpp b/plugins/nitf/io/NitfFileWriter.cpp
index ad94728..40f8598 100644
--- a/plugins/nitf/io/NitfFileWriter.cpp
+++ b/plugins/nitf/io/NitfFileWriter.cpp
@@ -41,9 +41,16 @@
 namespace pdal
 {
 
-NitfFileWriter::NitfFileWriter()
+void NitfFileWriter::initialize()
 {
-    register_tre_plugins();
+    try
+    {
+        register_tre_plugins();
+    }
+    catch (const pdal_error& err)
+    {
+        throw error(err.what());
+    }
 }
 
 
@@ -83,15 +90,10 @@ void NitfFileWriter::write()
     // because this is a flex writer and we don't know the filename
     // until the execute() step.
     if (m_fileTitle.size() > header.getFileTitle().getLength())
-    {
-        std::ostringstream oss;
-
-        oss << "writers.nitf: Can't write file.  " <<
-            "FTITLE field (usually filename) can't be longer than " <<
-            header.getFileTitle().getLength() << ".  Use 'ftitle' option " <<
-            "to set appropriately sized FTITLE.";
-        throw pdal_error(oss.str());
-    }
+        throw error("Can't write file.  FTITLE field (usually filename) "
+            "can't be longer than " +
+            Utils::toString(header.getFileTitle().getLength()) +
+            ".  Use 'ftitle' option to set appropriately sized FTITLE.");
 
     header.getFileHeader().set("NITF");
     header.getComplianceLevel().set(m_cLevel);
@@ -200,12 +202,8 @@ void NitfFileWriter::write()
     {
         StringList v = Utils::split2(s, ':');
         if (v.size() != 2)
-        {
-            std::ostringstream oss;
-            oss << "writers.nitf: Invalid name/value for AIMIDB '" << s <<
-                "'.  Format: <name>:<value>.";
-            throw pdal_error(oss.str());
-        }
+            throw error("Invalid name/value for AIMIDB '" + s +
+                "'.  Format: <name>:<value>.");
         Utils::trim(v[0]);
         Utils::trim(v[1]);
         aimidbTre.setField(v[0], v[1]);
@@ -240,12 +238,8 @@ void NitfFileWriter::write()
     {
         StringList v = Utils::split2(s, ':');
         if (v.size() != 2)
-        {
-            std::ostringstream oss;
-            oss << "writers.nitf: Invalid name/value for ACFTB '" << s <<
-                "'.  Format: <name>:<value>.";
-            throw pdal_error(oss.str());
-        }
+            throw error("Invalid name/value for ACFTB '" + s +
+                "'.  Format: <name>:<value>.");
         Utils::trim(v[0]);
         Utils::trim(v[1]);
         acftbTre.setField(v[0], v[1]);
diff --git a/plugins/nitf/io/NitfFileWriter.hpp b/plugins/nitf/io/NitfFileWriter.hpp
index 9009c37..04c1fea 100644
--- a/plugins/nitf/io/NitfFileWriter.hpp
+++ b/plugins/nitf/io/NitfFileWriter.hpp
@@ -71,10 +71,18 @@ namespace pdal
 class PDAL_DLL NitfFileWriter
 {
 public:
-    NitfFileWriter();
+    struct error : public std::runtime_error
+    {
+        error(const std::string& err) : std::runtime_error(err)
+        {}
+    };
+
+    NitfFileWriter()
+    {}
     NitfFileWriter(const NitfFileWriter&) = delete;
     NitfFileWriter& operator=(const NitfFileWriter&) = delete;
 
+    void initialize();
     void setFilename(const std::string& filename)
         { m_filename = filename; }
     void wrapData(const char *buf, size_t size);
diff --git a/plugins/nitf/io/NitfReader.cpp b/plugins/nitf/io/NitfReader.cpp
index 9cac20e..caa202b 100644
--- a/plugins/nitf/io/NitfReader.cpp
+++ b/plugins/nitf/io/NitfReader.cpp
@@ -100,10 +100,17 @@ std::string NitfReader::getName() const { return s_info.name; }
 
 void NitfReader::initialize(PointTableRef table)
 {
-    NitfFileReader nitf(m_filename);
-    nitf.open();
-    nitf.getLasOffset(m_offset, m_length);
-    nitf.extractMetadata(m_metadata);
+    try
+    {
+        NitfFileReader nitf(m_filename);
+        nitf.open();
+        nitf.getLasOffset(m_offset, m_length);
+        nitf.extractMetadata(m_metadata);
+    }
+    catch (const NitfFileReader::error& err)
+    {
+        throwError(err.what());
+    }
     m_metadata.add("DESDATA_OFFSET", m_offset);
     m_metadata.add("DESDATA_LENGTH", m_length);
 
diff --git a/plugins/nitf/io/NitfWriter.cpp b/plugins/nitf/io/NitfWriter.cpp
index 7aaf0d0..9ce0a1c 100644
--- a/plugins/nitf/io/NitfWriter.cpp
+++ b/plugins/nitf/io/NitfWriter.cpp
@@ -85,20 +85,22 @@ BOX3D NitfWriter::reprojectBoxToDD(const SpatialReference& reference,
 
     BOX3D output(box);
     if (!gdal::reprojectBounds(output, reference.getWKT(), "EPSG:4326"))
-    {
-        std::ostringstream msg;
-
-        msg << getName() << ": Couldn't reproject corner points to "
-            "geographic: " << gdal::lastError();
-        throw pdal_error(msg.str());
-    }
+        throwError("Couldn't reproject corner points to geographic: " +
+            gdal::lastError());
     return output;
 }
 
 
 NitfWriter::NitfWriter()
 {
-    register_tre_plugins();
+    try
+    {
+        m_nitf.initialize();
+    }
+    catch (const NitfFileWriter::error& err)
+    {
+        throwError(err.what());
+    }
 }
 
 
@@ -147,11 +149,9 @@ void NitfWriter::doneFile()
     {
         m_nitf.write();
     }
-    catch (except::Throwable & t)
+    catch (const NitfFileWriter::error& err)
     {
-        std::ostringstream oss;
-        // std::cout << t.getTrace();
-        throw pdal_error(t.getMessage());
+        throwError(err.what());
     }
 }
 
diff --git a/plugins/oci/io/OciCommon.cpp b/plugins/oci/io/OciCommon.cpp
index d8ae64a..fb2e034 100644
--- a/plugins/oci/io/OciCommon.cpp
+++ b/plugins/oci/io/OciCommon.cpp
@@ -55,7 +55,8 @@ Connection connect(std::string connSpec)
         if (!input->good())
         {
             Utils::closeFile(input);
-            throw pdal_error("Unable to open connection filename for Oracle!");
+            throw connection_failed("Unable to open connection filename "
+                "for Oracle.");
         }
 
         std::string output;
diff --git a/plugins/oci/io/OciCommon.hpp b/plugins/oci/io/OciCommon.hpp
index de05ae6..e87c845 100644
--- a/plugins/oci/io/OciCommon.hpp
+++ b/plugins/oci/io/OciCommon.hpp
@@ -57,24 +57,6 @@ public:
     {}
 };
 
-
-class buffer_too_small : public pdal_error
-{
-public:
-    buffer_too_small(std::string const& msg)
-        : pdal_error(msg)
-    {}
-};
-
-
-class read_error : public pdal_error
-{
-public:
-    read_error(std::string const& msg)
-        : pdal_error(msg)
-    {}
-};
-
 class Block
 {
 public:
diff --git a/plugins/oci/io/OciReader.cpp b/plugins/oci/io/OciReader.cpp
index 54dc996..9701454 100644
--- a/plugins/oci/io/OciReader.cpp
+++ b/plugins/oci/io/OciReader.cpp
@@ -68,13 +68,19 @@ void OciReader::addArgs(ProgramArgs& args)
 void OciReader::initialize()
 {
     m_compression = false;
-    m_connection = connect(m_connSpec);
+    try
+    {
+        m_connection = connect(m_connSpec);
+    }
+    catch (const connection_failed& err)
+    {
+        throwError(err.what());
+    }
     m_block = BlockPtr(new Block(m_connection));
 
     gdal::registerDrivers();
     if (m_query.empty())
-        throw pdal_error("'query' statement is empty. No data can be read "
-            "from pdal::OciReader");
+        throwError("'query' statement is empty. No data can be read.");
 
     m_stmt = Statement(m_connection->CreateStatement(m_query.c_str()));
     m_stmt->Execute(0);
@@ -86,7 +92,7 @@ void OciReader::initialize()
 
     // Fetch an initial row of data.
     if (!m_stmt->Fetch())
-        throw pdal_error("Unable to fetch a point cloud entry entry!");
+        throwError("Unable to fetch a point cloud entry entry.");
     m_block->setFetched();
 
     // If the spatial reference wasn't provided as an option, fetch it from
@@ -179,12 +185,8 @@ void OciReader::validateQuery()
     }
 
     if (!typeCorrect)
-    {
-        std::ostringstream oss;
-        oss << "Select statement '" << m_query <<
-            "' does not fetch a SDO_PC object.";
-        throw pdal_error(oss.str());
-    }
+        throwError("Select statement '" + m_query + "' does not fetch "
+            "a SDO_PC object.");
 
     // If we found all the fields, the list of required fields will be empty.
     // If not, throw an exception.
@@ -201,7 +203,7 @@ void OciReader::validateQuery()
             if (i != reqFields.end())
                oss << ",";
         }
-        throw pdal_error(oss.str());
+        throwError(oss.str());
     }
 }
 
@@ -330,7 +332,7 @@ point_count_t OciReader::readPointMajor(PointView& view,
             numRead++;
         }
 #else
-        throw pdal_error("Can't decompress without LAZperf.");
+        throwError("Can't decompress without LAZperf.");
 #endif
     }
     else
@@ -403,7 +405,7 @@ void OciReader::readBlob(Statement stmt, BlockPtr block)
 
     if (!stmt->ReadBlob(block->locator, (void*)(block->chunk.data()),
                         block->chunk.size() , &amountRead))
-        throw pdal_error("Did not read all blob data!");
+        throwError("Did not read all blob data.");
 
     block->chunk.resize(amountRead);
 }
diff --git a/plugins/oci/io/OciWriter.cpp b/plugins/oci/io/OciWriter.cpp
index 41925be..38df990 100644
--- a/plugins/oci/io/OciWriter.cpp
+++ b/plugins/oci/io/OciWriter.cpp
@@ -73,112 +73,6 @@ OciWriter::OciWriter()
 {}
 
 
-/**
-Options OciWriter::getDefaultOptions()
-{
-    Options options;
-
-    Option solid("solid", false,
-        "Define the point cloud's PC_EXTENT geometry gtype as (1,1007,3) "
-        "instead of the normal (1,1003,3), and use gtype 3008/2008 vs "
-        " 3003/2003 for BLK_EXTENT geometry values.");
-    Option overwrite("overwrite", false,
-        "Wipe the block table and recreate it before loading data");
-    Option srid("srid", 0,
-        "The Oracle numerical SRID value to use for PC_EXTENT, "
-        "BLK_EXTENT, and indexing");
-    Option stream_output_precision("stream_output_precision", 8,
-        "The number of digits past the decimal place for outputting "
-        "floats/doubles to streams. This is used for creating the SDO_PC "
-        "object and adding the index entry to the USER_SDO_GEOM_METADATA "
-        "for the block table");
-    Option connection("connection", "",
-        "Oracle connection string to connect to database");
-    Option block_table_name("block_table_name", "output",
-        "The table in which block data for the created SDO_PC will be placed");
-    Option block_table_partition_column("block_table_partition_column", "",
-        "The column name for which 'block_table_partition_value' will "
-        "be placed in the 'block_table_name'");
-    Option block_table_partition_value("block_table_partition_value", 0,
-        "Integer value to use to assing partition IDs in the block table. "
-        "Used in conjunction with 'block_table_partition_column'");
-    Option base_table_name("base_table_name", "hobu",
-        "The name of the table which will contain the SDO_PC object");
-    Option cloud_column_name("cloud_column_name", "CLOUD",
-        "The column name in 'base_table_name' that will hold the SDO_PC "
-        "object");
-    Option base_table_aux_columns("base_table_aux_columns", "",
-        "Quoted, comma-separated list of columns to add to the SQL that "
-        "gets executed as part of the point cloud insertion into the "
-        "'base_table_name' table");
-    Option base_table_aux_values("base_table_aux_values", "",
-        "Quoted, comma-separated values that correspond to "
-        "'base_table_aux_columns', entries that will get inserted as part "
-        "of the creation of the SDO_PC entry in the 'base_table_name' table");
-    Option base_table_boundary_column("base_table_boundary_column", "",
-        "The SDO_GEOMETRY column in 'base_table_name' in which to insert "
-        "the WKT in 'base_table_boundary_wkt' representing a boundary for "
-        "the SDO_PC object. Note this is not the same as the "
-        "'base_table_bounds', which is just a bounding box that is placed "
-        "on the SDO_PC object itself.");
-    Option base_table_boundary_wkt("base_table_boundary_wkt", "",
-        "WKT, in the form of a string or a file location, to insert into "
-        "the SDO_GEOMTRY column defined by 'base_table_boundary_column'");
-    Option pre_block_sql("pre_block_sql", "",
-        "SQL, in the form of a string or file location, that is executed "
-        "after the SDO_PC object has been created but before the block data "
-        "in 'block_table_name' are inserted into the database");
-    Option pre_sql("pre_sql", "",
-        "SQL, in the form of a string or file location, that is executed "
-        "before the SDO_PC object is created.");
-    Option post_block_sql("post_block_sql", "",
-        "SQL, in the form of a string or file location, that is executed "
-        "after the block data in 'block_table_name' have been inserted");
-    Option base_table_bounds("base_table_bounds", BOX3D(),
-        "A bounding box, given in the Oracle SRID specified in 'srid' to "
-        "set on the PC_EXTENT object of the SDO_PC. If none is specified, "
-        "the cumulated bounds of all of the block data are used.");
-    Option pc_id("pc_id", -1, "Point Cloud id");
-    Option do_trace("do_trace", false,
-        "turn on server-side binds/waits tracing -- needs ALTER SESSION privs");
-    Option stream_chunks("stream_chunks", false,
-        "Stream block data chunk-wise by the DB's chunk size rather than "
-        "as an entire blob");
-    Option blob_chunk_count("blob_chunk_count", 16,
-        "When streaming, the number of chunks per write to use");
-    Option store_dimensional_orientation("store_dimensional_orientation", false,
-        "Store the points oriented in DIMENSION_INTERLEAVED instead of "
-        "POINT_INTERLEAVED orientation");
-    options.add(is3d);
-    options.add(solid);
-    options.add(overwrite);
-    options.add(srid);
-    options.add(stream_output_precision);
-    options.add(connection);
-    options.add(block_table_name);
-    options.add(block_table_partition_column);
-    options.add(block_table_partition_value);
-    options.add(base_table_name);
-    options.add(cloud_column_name);
-    options.add(base_table_aux_columns);
-    options.add(base_table_aux_values);
-    options.add(base_table_boundary_column);
-    options.add(base_table_boundary_wkt);
-    options.add(pre_block_sql);
-    options.add(pre_sql);
-    options.add(post_block_sql);
-    options.add(base_table_bounds);
-    options.add(pc_id);
-    options.add(do_trace);
-    options.add(stream_chunks);
-    options.add(blob_chunk_count);
-    options.add(store_dimensional_orientation);
-
-    return options;
-}
-**/
-
-
 void OciWriter::runCommand(std::ostringstream const& command)
 {
     Statement statement(m_connection->CreateStatement(command.str().c_str()));
@@ -281,7 +175,6 @@ void OciWriter::createSDOEntry()
     else
         s_srid << m_srid;
 
-    double tolerance = 0.05;
     BOX3D e = m_bounds;
     if (isGeographic(m_srid))
     {
@@ -293,21 +186,21 @@ void OciWriter::createSDOEntry()
         e.minz = 0.0;
         e.maxz = 20000.0;
 
-        tolerance = 0.0005;
+        m_tolerance = 0.0005;
     }
 
     oss <<  "INSERT INTO user_sdo_geom_metadata VALUES ('" <<
         m_blockTableName << "','blk_extent', MDSYS.SDO_DIM_ARRAY(";
     oss << "MDSYS.SDO_DIM_ELEMENT('X', " << e.minx << "," <<
-        e.maxx <<"," << tolerance << "),"
+        e.maxx <<"," << m_tolerance << "),"
         "MDSYS.SDO_DIM_ELEMENT('Y', " << e.miny << "," <<
-        e.maxy <<"," << tolerance << ")";
+        e.maxy <<"," << m_tolerance << ")";
 
     if (m_3d)
     {
         oss << ",";
         oss <<"MDSYS.SDO_DIM_ELEMENT('Z', "<< e.minz << "," <<
-            e.maxz << "," << tolerance << ")";
+            e.maxz << "," << m_tolerance << ")";
     }
     oss << ")," << s_srid.str() << ")";
 
@@ -375,11 +268,8 @@ bool OciWriter::isGeographic(int32_t srid)
     }
     catch (pdal_error const& e)
     {
-        std::ostringstream oss;
-        oss << getName();
-        oss << ": Failed to fetch geographicness of srid " << srid << std::endl;
-        oss << e.what() << std::endl;
-        throw pdal_error(oss.str());
+        throwError("Failed to fetch geographicness of srid " +
+            Utils::toString(srid) + ": " + e.what());
     }
 
     std::string k = Utils::toupper(kind.get());
@@ -390,11 +280,7 @@ bool OciWriter::isGeographic(int32_t srid)
 std::string OciWriter::loadSQLData(std::string const& filename)
 {
     if (!Utils::fileExists(filename))
-    {
-        std::ostringstream oss;
-        oss << filename << " does not exist";
-        throw pdal_error(oss.str());
-    }
+        throwError("File '" + filename + "' does not exist");
 
     std::istream::pos_type size;
     std::istream* input = Utils::openFile(filename, true);
@@ -588,24 +474,17 @@ void OciWriter::createPCEntry()
         if (!Utils::fileExists(m_baseTableBoundaryWkt))
         {
             if (!isValidWKT(m_baseTableBoundaryWkt))
-            {
-                std::ostringstream oss;
-                oss << "WKT for base_table_boundary_wkt was not valid and '" <<
-                    m_baseTableBoundaryWkt << "' doesn't exist as a file";
-                throw pdal::pdal_error(oss.str());
-            }
+                throwError("WKT for base_table_boundary_wkt was not valid "
+                    "and '" + m_baseTableBoundaryWkt + "' doesn't exist as "
+                    "a file");
             wkt_s << m_baseTableBoundaryWkt;
         }
         else
         {
             std::string wkt = loadSQLData(m_baseTableBoundaryWkt);
             if (!isValidWKT(wkt))
-            {
-                std::ostringstream oss;
-                oss << "WKT for base_table_boundary_wkt was from file '" <<
-                    m_baseTableBoundaryWkt << "' is not valid";
-                throw pdal::pdal_error(oss.str());
-            }
+                throwError("WKT for base_table_boundary_wkt was from file '" +
+                    m_baseTableBoundaryWkt + "' is not valid");
             wkt_s << wkt;
         }
     }
@@ -624,10 +503,8 @@ void OciWriter::createPCEntry()
     }
     catch (std::runtime_error const& e)
     {
-        std::ostringstream oss;
-        oss << "Failed at creating Point Cloud entry into " <<
-            m_baseTableName << " table. Does the table exist? " << e.what();
-        throw pdal_error(oss.str());
+        throwError("Failed at creating Point Cloud entry into " +
+            m_baseTableName + " table: " + e.what() + ".");
     }
 }
 
@@ -710,6 +587,7 @@ void OciWriter::addArgs(ProgramArgs& args)
     args.add("compression", "Set to turn compression on", m_compression);
     args.add("pre_sql", "SQL to run before query", m_preSql);
     args.add("post_block_sql", "SQL to run when stage is done", m_postBlockSql);
+    args.add("tolerance", "Oracle geometry tolerance ", m_tolerance, 0.05);
 }
 
 
@@ -722,11 +600,19 @@ void OciWriter::initialize()
         Orientation::PointMajor;
 
     if (m_compression && (m_orientation == Orientation::DimensionMajor))
-        throw pdal_error("LAZperf compression not supported for "
-            "dimension-major point storage.");
+        throwError("LAZperf compression not supported for dimension-major "
+            "point storage.");
 
     gdal::registerDrivers();
-    m_connection = connect(m_connSpec);
+    try
+    {
+        m_connection = connect(m_connSpec);
+    }
+    catch (const connection_failed& err)
+    {
+        throwError(err.what());
+    }
+
     m_gtype = getGType();
 }
 
@@ -867,14 +753,14 @@ void OciWriter::writePointMajor(PointViewPtr view, std::vector<char>& outbuf)
                 compressor.compress(ptBuf.data(), size);
             }
         }
-        catch (pdal_error)
+        catch (const pdal_error& err)
         {
             compressor.done();
-            throw;
+            throwError(err.what());
         }
         compressor.done();
 #else
-        throw pdal_error("Can't compress without LAZperf.");
+        throwError("Can't compress without LAZperf.");
 #endif
     }
     else
@@ -1021,12 +907,8 @@ void OciWriter::writeTile(const PointViewPtr view)
     }
     catch (std::runtime_error const& e)
     {
-        std::ostringstream oss;
-        oss << getName();
-        oss << ": Failed to insert block # into '" << m_blockTableName <<
-            "' table. Does the table exist? "  << std::endl;
-        oss << e.what() << std::endl;
-        throw pdal_error(oss.str());
+        throwError("Failed to insert block # into '" + m_blockTableName +
+            "' table: " + e.what());
     }
 
     if (m_streamChunks)
@@ -1128,12 +1010,8 @@ void OciWriter::updatePCExtent()
     }
     catch (std::runtime_error const& e)
     {
-        std::ostringstream oss;
-        oss << getName();
-        oss << ": Failed to update cloud extent in '" << m_baseTableName <<
-            "' table with id " << m_pc_id << ". Does the table exist? " <<
-            std::endl << e.what() << std::endl;
-        throw pdal_error(oss.str());
+        throwError("Failed to update cloud extent in '" + m_baseTableName +
+            "' table with id " + Utils::toString(m_pc_id) + ": " + e.what());
     }
     m_connection->Commit();
 }
diff --git a/plugins/oci/io/OciWriter.hpp b/plugins/oci/io/OciWriter.hpp
index 8600f6c..8d73a58 100644
--- a/plugins/oci/io/OciWriter.hpp
+++ b/plugins/oci/io/OciWriter.hpp
@@ -124,6 +124,7 @@ private:
     std::string m_connSpec;
     std::string m_preSql;
     std::string m_postBlockSql;
+    double m_tolerance;
 
     OciWriter& operator=(const OciWriter&); // not implemented
     OciWriter(const OciWriter&); // not implemented
diff --git a/plugins/p2g/CMakeLists.txt b/plugins/p2g/CMakeLists.txt
deleted file mode 100644
index 34728fb..0000000
--- a/plugins/p2g/CMakeLists.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Points2grid plugin CMake configuration
-#
-find_package(P2G)
-if (P2G_FOUND)
-    PDAL_ADD_PLUGIN(libname writer p2g
-        FILES
-            io/P2gWriter.cpp
-        LINK_WITH
-            ${P2G_LIBRARY}
-    )
-    target_compile_definitions(${libname} PRIVATE -DHAVE_P2G=1)
-    target_include_directories(${libname} PRIVATE ${P2G_INCLUDE_DIR})
-endif()
diff --git a/plugins/p2g/io/P2gWriter.cpp b/plugins/p2g/io/P2gWriter.cpp
deleted file mode 100644
index a03b29c..0000000
--- a/plugins/p2g/io/P2gWriter.cpp
+++ /dev/null
@@ -1,203 +0,0 @@
-/******************************************************************************
-* Copyright (c) 2011, Howard Butler, hobu.inc at gmail.com
-*
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following
-* conditions are met:
-*
-*     * Redistributions of source code must retain the above copyright
-*       notice, this list of conditions and the following disclaimer.
-*     * Redistributions in binary form must reproduce the above copyright
-*       notice, this list of conditions and the following disclaimer in
-*       the documentation and/or other materials provided
-*       with the distribution.
-*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
-*       names of its contributors may be used to endorse or promote
-*       products derived from this software without specific prior
-*       written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
-* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
-* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
-* OF SUCH DAMAGE.
-****************************************************************************/
-
-#include "P2gWriter.hpp"
-#include <pdal/PointView.hpp>
-#include <pdal/pdal_macros.hpp>
-#include <pdal/util/FileUtils.hpp>
-
-#include <iostream>
-#include <algorithm>
-
-#include <points2grid/Interpolation.hpp>
-
-namespace pdal
-{
-
-static PluginInfo const s_info = PluginInfo(
-    "writers.p2g",
-    "Points2Grid Writer",
-    "http://pdal.io/stages/writers.p2g.html" );
-
-CREATE_SHARED_PLUGIN(1, 0, P2gWriter, Writer, s_info)
-
-std::string P2gWriter::getName() const { return s_info.name; }
-
-void P2gWriter::addArgs(ProgramArgs& args)
-{
-    args.add("filename", "Output filename", m_filename).setPositional();
-    args.add("grid_dist_x", "X grid distance", m_GRID_DIST_X, 6.0);
-    args.add("grid_dist_y", "Y grid distance", m_GRID_DIST_Y, 6.0);
-    args.add("radius", "Radius", m_RADIUS, 8.4852813742385713);
-    args.add("fill_window_size", "Fill window size", m_fill_window_size, 3U);
-    args.add("output_type", "Output type", m_outputTypeSpec, {"all"});
-    args.add("output_format", "Output format", m_outputFormatSpec, "grid");
-    args.add("bounds", "Output raster bounds", m_bounds);
-
-}
-
-void P2gWriter::initialize()
-{
-    m_outputTypes = 0;
-    for (std::string& type : m_outputTypeSpec)
-    {
-        std::string val = Utils::tolower(type);
-        if (val == "min")
-            m_outputTypes |= OUTPUT_TYPE_MIN;
-        else if (val == "max")
-            m_outputTypes |= OUTPUT_TYPE_MAX;
-        else if (val == "mean")
-            m_outputTypes |= OUTPUT_TYPE_MEAN;
-        else if (val == "idw")
-            m_outputTypes |= OUTPUT_TYPE_IDW;
-        else if (val == "den")
-            m_outputTypes |= OUTPUT_TYPE_DEN;
-        else if (val == "std")
-            m_outputTypes |= OUTPUT_TYPE_STD;
-        else if (val == "all")
-            m_outputTypes = OUTPUT_TYPE_ALL;
-        else
-        {
-            std::ostringstream oss;
-
-            oss << getName() << ": Unrecognized output type '" << type << "'."; 
-            throw pdal_error(oss.str());
-        }
-    }
-
-    std::string fmt = Utils::tolower(m_outputFormatSpec);
-    if (fmt == "grid")
-        m_outputFormat = OUTPUT_FORMAT_GRID_ASCII;
-    else if (fmt == "asc")
-        m_outputFormat = OUTPUT_FORMAT_ARC_ASCII;
-    else if (fmt == "tif")
-        m_outputFormat = OUTPUT_FORMAT_GDAL_GTIFF;
-    else if (fmt == "all")
-        m_outputFormat = OUTPUT_FORMAT_ALL;
-    else
-    {
-        std::ostringstream oss;
-
-        oss << getName();
-        oss << ": Unrecognized output format '" << m_outputFormatSpec << "'";
-        throw pdal_error(oss.str());
-    }
-}
-
-
-void P2gWriter::ready(PointTableRef table)
-{
-    if (!table.spatialReferenceUnique())
-    {
-        std::ostringstream oss;
-
-        oss << getName() << ": Can't write output with multiple spatial "
-            "references.";
-        throw pdal_error(oss.str());
-    }
-}
-
-
-// The P2G writer will only work with a single point view at the current time.
-// Merge point views before writing.
-void P2gWriter::write(const PointViewPtr view)
-{
-    view->calculateBounds(m_bounds);
-    m_GRID_SIZE_X = (int)(ceil((m_bounds.maxx - m_bounds.minx) /
-        m_GRID_DIST_X)) + 1;
-    m_GRID_SIZE_Y = (int)(ceil((m_bounds.maxy - m_bounds.miny) /
-        m_GRID_DIST_Y)) + 1;
-    log()->floatPrecision(6);
-    log()->get(LogLevel::Debug) << "X grid distance: " << m_GRID_DIST_X << std::endl;
-    log()->get(LogLevel::Debug) << "Y grid distance: " << m_GRID_DIST_Y << std::endl;
-    log()->clearFloat();
-
-    m_interpolator.reset(new InCoreInterp(m_GRID_DIST_X, m_GRID_DIST_Y,
-        m_GRID_SIZE_X, m_GRID_SIZE_Y, m_RADIUS * m_RADIUS,
-        m_bounds.minx, m_bounds.maxx, m_bounds.miny, m_bounds.maxy,
-        m_fill_window_size));
-    m_interpolator->init();
-
-    for (point_count_t idx = 0; idx < view->size(); idx++)
-    {
-        double x = view->getFieldAs<double>(Dimension::Id::X, idx) -
-            m_bounds.minx;
-        double y = view->getFieldAs<double>(Dimension::Id::Y, idx) -
-            m_bounds.miny;
-        double z = view->getFieldAs<double>(Dimension::Id::Z, idx);
-        if (m_interpolator->update(x, y, z) < 0)
-        {
-            std::ostringstream oss;
-
-            oss << getName() << ": interp->update() error while processing";
-            throw pdal_error(oss.str());
-        }
-    }
-}
-
-void P2gWriter::done(PointTableRef table)
-{
-
-    double adfGeoTransform[6];
-    adfGeoTransform[0] = m_bounds.minx - 0.5*m_GRID_DIST_X;
-    adfGeoTransform[1] = m_GRID_DIST_X;
-    adfGeoTransform[2] = 0.0;
-    adfGeoTransform[3] = m_bounds.maxy + 0.5*m_GRID_DIST_Y;
-    adfGeoTransform[4] = 0.0;
-    adfGeoTransform[5] = -1 * m_GRID_DIST_Y;
-
-    SpatialReference const& srs = table.spatialReference();
-
-    log()->get(LogLevel::Debug) << "Output SRS  :'" << srs.getWKT() << "'" <<
-        std::endl;
-
-    // Strip off the extension if it was provided so that we don't get
-    // file.asc.type.asc or file.asc.asc, as point2grid appends a file
-    // extension.
-    std::string extension = FileUtils::extension(m_filename);
-    if (extension == ".asc" || extension == ".grid" || extension == ".tif")
-        m_filename = m_filename.substr(0, m_filename.find_last_of("."));
-
-    if (m_interpolator->finish(m_filename.c_str(), m_outputFormat,
-        m_outputTypes, adfGeoTransform, srs.getWKT().c_str()) < 0)
-    {
-        ostringstream oss;
-
-        oss << getName() << ": interp->finish() error";
-        throw pdal_error(oss.str());
-    }
-    getMetadata().addList("filename", m_filename);
-}
-
-} // namespaces
diff --git a/plugins/p2g/io/P2gWriter.hpp b/plugins/p2g/io/P2gWriter.hpp
deleted file mode 100644
index 9021640..0000000
--- a/plugins/p2g/io/P2gWriter.hpp
+++ /dev/null
@@ -1,92 +0,0 @@
-/******************************************************************************
-* Copyright (c) 2011, Howard Butler, hobu.inc at gmail.com
-*
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following
-* conditions are met:
-*
-*     * Redistributions of source code must retain the above copyright
-*       notice, this list of conditions and the following disclaimer.
-*     * Redistributions in binary form must reproduce the above copyright
-*       notice, this list of conditions and the following disclaimer in
-*       the documentation and/or other materials provided
-*       with the distribution.
-*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
-*       names of its contributors may be used to endorse or promote
-*       products derived from this software without specific prior
-*       written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
-* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
-* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
-* OF SUCH DAMAGE.
-****************************************************************************/
-
-#pragma once
-
-#include <pdal/Writer.hpp>
-#include <pdal/StageFactory.hpp>
-
-#include <memory>
-
-#include <boost/tuple/tuple.hpp>
-
-#include <points2grid/config.h>
-#include <points2grid/Interpolation.hpp>
-#include <points2grid/Global.hpp>
-#include <points2grid/InCoreInterp.hpp>
-
-namespace pdal
-{
-
-class CoreInterp;
-
-class PDAL_DLL P2gWriter : public Writer
-{
-public:
-    P2gWriter() : m_outputTypes(0), m_outputFormat(OUTPUT_FORMAT_ARC_ASCII)
-        {}
-
-    static void * create();
-    static int32_t destroy(void *);
-    std::string getName() const;
-
-private:
-    P2gWriter& operator=(const P2gWriter&) = delete;
-
-    virtual void addArgs(ProgramArgs& args);
-    virtual void initialize();
-    virtual void ready(PointTableRef table);
-    virtual void write(const PointViewPtr view);
-    virtual void done(PointTableRef table);
-
-    std::unique_ptr<InCoreInterp> m_interpolator;
-
-    uint32_t m_GRID_SIZE_X;
-    uint32_t m_GRID_SIZE_Y;
-
-    double m_GRID_DIST_X;
-    double m_GRID_DIST_Y;
-
-    double m_RADIUS;
-    StringList m_outputTypeSpec;
-    std::string m_outputFormatSpec;
-    unsigned int m_outputTypes;
-    uint32_t m_fill_window_size;
-    BOX3D m_bounds;
-
-    std::string m_filename;
-    int m_outputFormat;
-};
-
-} // namespaces
diff --git a/plugins/pcl/filters/PCLBlock.cpp b/plugins/pcl/filters/PCLBlock.cpp
index a2a067b..c9f5e63 100644
--- a/plugins/pcl/filters/PCLBlock.cpp
+++ b/plugins/pcl/filters/PCLBlock.cpp
@@ -67,7 +67,7 @@ void PCLBlock::addArgs(ProgramArgs& args)
 PointViewSet PCLBlock::run(PointViewPtr input)
 {
     using namespace Dimension;
-    
+
     PointViewPtr output = input->makeNew();
     PointViewSet viewSet;
     viewSet.insert(output);
@@ -91,7 +91,7 @@ PointViewSet PCLBlock::run(PointViewPtr input)
     else if (!m_methods.empty())
         pipeline.setMethods(m_methods);
     else
-        throw pdal_error("No PCL pipeline specified!");
+        throwError("No PCL pipeline specified.");
     // PDALtoPCD subtracts min values in each XYZ dimension to prevent rounding
     // errors in conversion to float. These offsets need to be conveyed to the
     // pipeline to offset any bounds entered as part of a PassThrough filter.
diff --git a/plugins/pcl/io/PcdReader.cpp b/plugins/pcl/io/PcdReader.cpp
index 107642a..0f20604 100644
--- a/plugins/pcl/io/PcdReader.cpp
+++ b/plugins/pcl/io/PcdReader.cpp
@@ -51,7 +51,7 @@ namespace pdal
 static PluginInfo const s_info = PluginInfo(
     "readers.pcd",
     "Read data in the Point Cloud Library (PCL) format.",
-    "http://pdal.io/stages/readers.pclvisualizer.html" );
+    "http://pdal.io/stages/readers.pcd.html" );
 
 CREATE_SHARED_PLUGIN(1, 0, PcdReader, Reader, s_info)
 
diff --git a/plugins/pcl/kernel/PCLKernel.cpp b/plugins/pcl/kernel/PCLKernel.cpp
index f0f371b..b86d701 100644
--- a/plugins/pcl/kernel/PCLKernel.cpp
+++ b/plugins/pcl/kernel/PCLKernel.cpp
@@ -78,7 +78,7 @@ int PCLKernel::execute()
 
     // go ahead and prepare/execute on reader stage only to grab input
     // PointViewSet, this makes the input PointView available to both the
-    // processing pipeline and the visualizer
+    // processing pipeline.
     readerStage.prepare(table);
     PointViewSet viewSetIn = readerStage.execute(table);
 
@@ -104,14 +104,7 @@ int PCLKernel::execute()
     Stage& writer(makeWriter(m_outputFile, pclStage, "", writerOptions));
 
     writer.prepare(table);
-
-    // process the data, grabbing the PointViewSet for visualization of the
-    // resulting PointView
-    PointViewSet viewSetOut = writer.execute(table);
-
-    if (isVisualize())
-        visualize(*viewSetOut.begin());
-    //visualize(*viewSetIn.begin(), *viewSetOut.begin());
+    writer.execute(table);
 
     return 0;
 }
diff --git a/plugins/pcl/kernel/SmoothKernel.cpp b/plugins/pcl/kernel/SmoothKernel.cpp
index 64bec59..a9c44b9 100644
--- a/plugins/pcl/kernel/SmoothKernel.cpp
+++ b/plugins/pcl/kernel/SmoothKernel.cpp
@@ -68,7 +68,7 @@ int SmoothKernel::execute()
 
     // go ahead and prepare/execute on reader stage only to grab input
     // PointViewSet, this makes the input PointView available to both the
-    // processing pipeline and the visualizer
+    // processing pipeline.
     readerStage.prepare(table);
     PointViewSet viewSetIn = readerStage.execute(table);
 
@@ -100,13 +100,7 @@ int SmoothKernel::execute()
     Stage& writer(Kernel::makeWriter(m_outputFile, smoothStage, ""));
 
     writer.prepare(table);
-
-    // process the data, grabbing the PointViewSet for visualization of the
-    // resulting PointView
-    PointViewSet viewSetOut = writer.execute(table);
-
-    if (isVisualize())
-        visualize(*viewSetOut.begin());
+    writer.execute(table);
 
     return 0;
 }
diff --git a/plugins/pgpointcloud/io/PgReader.cpp b/plugins/pgpointcloud/io/PgReader.cpp
index 7c29cde..6870c86 100644
--- a/plugins/pgpointcloud/io/PgReader.cpp
+++ b/plugins/pgpointcloud/io/PgReader.cpp
@@ -85,7 +85,8 @@ point_count_t PgReader::getNumPoints() const
 
     std::ostringstream oss;
     oss << "SELECT Sum(PC_NumPoints(" << pg_quote_identifier(m_column_name) << ")) AS numpoints, ";
-    oss << "Max(PC_NumPoints(" << pg_quote_identifier(m_column_name) << ")) AS maxpoints FROM ";
+    oss << "Max(PC_NumPoints(" << pg_quote_identifier(m_column_name) <<
+        ")) AS maxpoints FROM ";
     if (m_schema_name.size())
         oss << pg_quote_identifier(m_schema_name) << ".";
     oss << pg_quote_identifier(m_table_name);
@@ -95,9 +96,7 @@ point_count_t PgReader::getNumPoints() const
     PGresult *result = pg_query_result(m_session, oss.str());
 
     if (PQresultStatus(result) != PGRES_TUPLES_OK)
-    {
-        throw pdal_error("unable to get point count");
-    }
+        throwError("Unable to get point count.");
 
     m_cached_point_count = atoi(PQgetvalue(result, 0, 0));
     m_cached_max_points = atoi(PQgetvalue(result, 0, 1));
@@ -173,7 +172,7 @@ uint32_t PgReader::fetchPcid() const
         if (!m_schema_name.empty())
           oss << "'" << m_schema_name << "'.";
         oss << "'" << m_table_name << "'";
-        throw pdal_error(oss.str());
+        throwError(oss.str());
     }
 
     log()->get(LogLevel::Debug) << "     got pcid = " << pcid << std::endl;
@@ -193,7 +192,7 @@ void PgReader::addDimensions(PointLayoutPtr layout)
 
     std::string xmlStr = pg_query_once(m_session, oss.str());
     if (xmlStr.empty())
-        throw pdal_error("Unable to fetch schema from `pointcloud_formats`");
+        throwError("Unable to fetch schema from 'pointcloud_formats'");
 
     loadSchema(layout, xmlStr);
 }
@@ -211,7 +210,7 @@ pdal::SpatialReference PgReader::fetchSpatialReference() const
 
     std::string srid_str = pg_query_once(m_session, oss.str());
     if (srid_str.empty())
-        throw pdal_error("Unable to fetch srid for this table and column");
+        throwError("Unable to fetch srid for this table and column");
 
     int32_t srid = atoi(srid_str.c_str());
     log()->get(LogLevel::Debug) << "     got SRID = " << srid << std::endl;
diff --git a/plugins/pgpointcloud/io/PgWriter.cpp b/plugins/pgpointcloud/io/PgWriter.cpp
index e468439..34677ac 100644
--- a/plugins/pgpointcloud/io/PgWriter.cpp
+++ b/plugins/pgpointcloud/io/PgWriter.cpp
@@ -93,8 +93,7 @@ void PgWriter::addArgs(ProgramArgs& args)
     args.add("schema", "Schema name", m_schema_name);
     args.add("compression", "Compression type", m_compressionSpec,
         "dimensional");
-    args.add("overwrite", "Whether data should be overwritten", m_overwrite,
-        true);
+    args.add("overwrite", "Whether data should be overwritten", m_overwrite);
     args.add("srid", "SRID", m_srid, 4326U);
     args.add("pcid", "PCID", m_pcid);
     args.add("pre_sql", "SQL to execute before query", m_pre_sql);
@@ -195,26 +194,21 @@ uint32_t PgWriter::SetupSchema(uint32_t srid)
             m_pcid;
         std::string count_str = pg_query_once(m_session, oss.str());
         if (count_str.empty())
-            throw pdal_error("Unable to count pcid's in table "
-                "`pointcloud_formats`");
+            throwError("Unable to count pcid's in table `pointcloud_formats`");
         schema_count = atoi(count_str.c_str());
-        oss.str("");
         if (schema_count == 0)
-        {
-            oss << "requested PCID '" << m_pcid <<
-                "' does not exist in POINTCLOUD_FORMATS";
-            throw pdal_error(oss.str());
-        }
+            throwError("Requested PCID '" + Utils::toString(m_pcid) +
+                "' does not exist in POINTCLOUD_FORMATS");
         return m_pcid;
     }
 
     // Do we have any existing schemas in the POINTCLOUD_FORMATS table?
     uint32_t pcid = 0;
+    oss.clear();
     oss << "SELECT Count(pcid) FROM pointcloud_formats";
     std::string schema_count_str = pg_query_once(m_session, oss.str());
     if (schema_count_str.empty())
-        throw pdal_error("Unable to count pcid's in table "
-            "`pointcloud_formats`");
+        throwError("Unable to count pcid's in table 'pointcloud_formats'.");
     schema_count = atoi(schema_count_str.c_str());
     oss.str("");
 
@@ -269,7 +263,8 @@ uint32_t PgWriter::SetupSchema(uint32_t srid)
             std::string pcid_str = pg_query_once(m_session,
                     "SELECT nextval('pointcloud_formats_pcid_sq')");
             if (pcid_str.empty())
-                throw pdal_error("Unable to select nextval from pointcloud_formats_pcid_seq");
+                throwError("Unable to select nextval from "
+                    "'pointcloud_formats_pcid_seq'.");
             pcid = atoi(pcid_str.c_str());
         }
         else
@@ -284,8 +279,7 @@ uint32_t PgWriter::SetupSchema(uint32_t srid)
         std::string pcid_str = pg_query_once(m_session,
                 "SELECT Max(pcid)+1 AS pcid FROM pointcloud_formats");
         if (pcid_str.empty())
-            throw pdal_error("Unable to get the max pcid from "
-                "`pointcloud_formats`");
+            throw("Unable to get the max pcid from 'pointcloud_formats'.");
         pcid = atoi(pcid_str.c_str());
     }
 
@@ -295,7 +289,7 @@ uint32_t PgWriter::SetupSchema(uint32_t srid)
     PGresult *result = PQexecParams(m_session, oss.str().c_str(), 1,
             NULL, &paramValues, NULL, NULL, 0);
     if (PQresultStatus(result) != PGRES_COMMAND_OK)
-        throw pdal_error(PQresultErrorMessage(result));
+        throwError(PQresultErrorMessage(result));
     PQclear(result);
     m_pcid = pcid;
     return m_pcid;
@@ -312,11 +306,10 @@ void PgWriter::DeleteTable(std::string const& schema_name,
 
     if (schema_name.size())
     {
-        name << schema_name << ".";
+        name << pg_quote_identifier(schema_name) << ".";
     }
-    name << table_name;
-    stmt << pg_quote_identifier(name.str());
-
+    name << pg_quote_identifier(table_name);
+    stmt << name.str();
 
     pg_execute(m_session, stmt.str());
 }
@@ -369,7 +362,7 @@ bool PgWriter::CheckTableExists(std::string const& name)
 
     std::string count_str = pg_query_once(m_session, oss.str());
     if (count_str.empty())
-        throw pdal_error("Unable to check for the existence of `pg_table`");
+        throwError("Unable to check for the existence of 'pg_table'.");
     int count = atoi(count_str.c_str());
 
     if (count == 1)
diff --git a/plugins/pgpointcloud/test/PgpointcloudWriterTest.cpp b/plugins/pgpointcloud/test/PgpointcloudWriterTest.cpp
index 1ddfe76..ac56745 100644
--- a/plugins/pgpointcloud/test/PgpointcloudWriterTest.cpp
+++ b/plugins/pgpointcloud/test/PgpointcloudWriterTest.cpp
@@ -287,3 +287,20 @@ TEST_F(PgpointcloudWriterTest, writetNoPointcloudExtension)
 
     EXPECT_THROW(writer->execute(table), pdal_error);
 }
+
+TEST_F(PgpointcloudWriterTest, writeDeleteTable)
+{
+    if (shouldSkipTests())
+    {
+        return;
+    }
+
+    executeOnTestDb("CREATE SCHEMA \"4dal-\"\"test\"\"-schema\"");
+    executeOnTestDb("CREATE TABLE \"4dal-\"\"test\"\"-schema\"."
+                    "\"4dal-\"\"test\"\"-table\" (p PCPATCH)");
+    Options ops = getDbOptions();
+    ops.add("overwrite", true);
+    ops.add("schema", "4dal-\"test\"-schema");
+
+    optionsWrite(ops);
+}
diff --git a/plugins/python/CMakeLists.txt b/plugins/python/CMakeLists.txt
index 1c4a40a..7066bad 100644
--- a/plugins/python/CMakeLists.txt
+++ b/plugins/python/CMakeLists.txt
@@ -11,10 +11,12 @@ if (WITH_TESTS)
         LINK_WITH ${PDAL_PLANG_LIB_NAME})
     target_include_directories(plangtest PRIVATE
         ${PDAL_VENDOR_DIR}/pdalboost
-        ${ROOT_DIR})
+        ${ROOT_DIR} ${PDAL_JSONCPP_INCLUDE_DIR})
     if (WITH_APPS)
         PDAL_ADD_TEST(python_pipeline_test
             FILES ./test/PythonPipelineTest.cpp
             LINK_WITH ${PDAL_PLANG_LIB_NAME})
+        target_include_directories(python_pipeline_test PRIVATE
+            ${PDAL_JSONCPP_INCLUDE_DIR})
     endif()
 endif()
diff --git a/plugins/python/filters/CMakeLists.txt b/plugins/python/filters/CMakeLists.txt
index 832fc16..3d41ac8 100644
--- a/plugins/python/filters/CMakeLists.txt
+++ b/plugins/python/filters/CMakeLists.txt
@@ -6,7 +6,7 @@ PDAL_ADD_PLUGIN(predicate_libname filter predicate
         PredicateFilter.cpp
     LINK_WITH ${PDAL_PLANG_LIB_NAME})
 target_include_directories(${predicate_libname} PRIVATE
-    ${PYTHON_INCLUDE_DIR})
+    ${PYTHON_INCLUDE_DIR} ${PDAL_JSONCPP_INCLUDE_DIR})
 
 #
 # Programmable Filter
@@ -17,7 +17,7 @@ PDAL_ADD_PLUGIN(programmable_libname filter programmable
         ProgrammableFilter.cpp
     LINK_WITH ${PDAL_PLANG_LIB_NAME})
 target_include_directories(${programmable_libname} PRIVATE
-    ${PYTHON_INCLUDE_DIR})
+    ${PYTHON_INCLUDE_DIR} ${PDAL_JSONCPP_INCLUDE_DIR})
 
 if (WITH_TESTS)
     PDAL_ADD_TEST(python_predicate_test
diff --git a/plugins/python/filters/PredicateFilter.cpp b/plugins/python/filters/PredicateFilter.cpp
index f7a133a..82db044 100644
--- a/plugins/python/filters/PredicateFilter.cpp
+++ b/plugins/python/filters/PredicateFilter.cpp
@@ -60,6 +60,7 @@ void PredicateFilter::addArgs(ProgramArgs& args)
     args.add("module", "Python module containing the function to run",
         m_module);
     args.add("function", "Function to call", m_function);
+    args.add("pdalargs", "Dictionary to add to module globals when calling function", m_pdalargs);
 }
 
 
@@ -70,7 +71,7 @@ void PredicateFilter::ready(PointTableRef table)
 
     plang::Environment::get()->set_stdout(log()->getLogStream());
     m_script = new plang::Script(m_source, m_module, m_function);
-    m_pythonMethod = new plang::BufferedInvocation(*m_script);
+    m_pythonMethod = new plang::Invocation(*m_script);
     m_pythonMethod->compile();
 }
 
@@ -81,11 +82,18 @@ PointViewSet PredicateFilter::run(PointViewPtr view)
 
     m_pythonMethod->resetArguments();
     m_pythonMethod->begin(*view, n);
+
+    if (!m_pdalargs.empty())
+    {
+        std::ostringstream args;
+        args << m_pdalargs;
+        m_pythonMethod->setKWargs(args.str());
+    }
+
     m_pythonMethod->execute();
 
     if (!m_pythonMethod->hasOutputVariable("Mask"))
-        throw pdal::pdal_error("Mask variable not set in predicate "
-            "filter function.");
+        throwError("Mask variable not set in filter function.");
 
     PointViewPtr outview = view->makeNew();
 
diff --git a/plugins/python/filters/PredicateFilter.hpp b/plugins/python/filters/PredicateFilter.hpp
index 3e35d3a..79a3075 100644
--- a/plugins/python/filters/PredicateFilter.hpp
+++ b/plugins/python/filters/PredicateFilter.hpp
@@ -37,7 +37,9 @@
 #include <pdal/pdal_internal.hpp>
 #include <pdal/Filter.hpp>
 
-#include <pdal/plang/BufferedInvocation.hpp>
+#include <pdal/plang/Invocation.hpp>
+
+#include <json/json.h>
 
 namespace pdal
 {
@@ -53,12 +55,13 @@ public:
     std::string getName() const;
 
 private:
-    plang::BufferedInvocation* m_pythonMethod;
+    plang::Invocation* m_pythonMethod;
     plang::Script* m_script;
     std::string m_source;
     std::string m_scriptFile;
     std::string m_module;
     std::string m_function;
+    Json::Value m_pdalargs;
 
     virtual void addArgs(ProgramArgs& args);
     virtual void ready(PointTableRef table);
diff --git a/plugins/python/filters/ProgrammableFilter.cpp b/plugins/python/filters/ProgrammableFilter.cpp
index 5e4e5f2..f30db10 100644
--- a/plugins/python/filters/ProgrammableFilter.cpp
+++ b/plugins/python/filters/ProgrammableFilter.cpp
@@ -61,6 +61,7 @@ void ProgrammableFilter::addArgs(ProgramArgs& args)
         m_module);
     args.add("function", "Function to call", m_function);
     args.add("add_dimension", "Dimensions to add", m_addDimensions);
+    args.add("pdalargs", "Dictionary to add to module globals when calling function", m_pdalargs);
 }
 
 
@@ -77,7 +78,7 @@ void ProgrammableFilter::ready(PointTableRef table)
         m_source = FileUtils::readFileIntoString(m_scriptFile);
     plang::Environment::get()->set_stdout(log()->getLogStream());
     m_script = new plang::Script(m_source, m_module, m_function);
-    m_pythonMethod = new plang::BufferedInvocation(*m_script);
+    m_pythonMethod = new plang::Invocation(*m_script);
     m_pythonMethod->compile();
     m_totalMetadata = table.metadata();
 }
@@ -89,6 +90,13 @@ void ProgrammableFilter::filter(PointView& view)
         " processing " << view.size() << " points." << std::endl;
     m_pythonMethod->resetArguments();
     m_pythonMethod->begin(view, m_totalMetadata);
+
+    if (!m_pdalargs.empty())
+    {
+        std::ostringstream args;
+        args << m_pdalargs;
+        m_pythonMethod->setKWargs(args.str());
+    }
     m_pythonMethod->execute();
     m_pythonMethod->end(view, getMetadata());
 }
diff --git a/plugins/python/filters/ProgrammableFilter.hpp b/plugins/python/filters/ProgrammableFilter.hpp
index be3d072..67ddb3a 100644
--- a/plugins/python/filters/ProgrammableFilter.hpp
+++ b/plugins/python/filters/ProgrammableFilter.hpp
@@ -37,7 +37,9 @@
 #include <pdal/pdal_internal.hpp>
 #include <pdal/Filter.hpp>
 
-#include <pdal/plang/BufferedInvocation.hpp>
+#include <pdal/plang/Invocation.hpp>
+
+#include <json/json.h>
 
 namespace pdal
 {
@@ -56,7 +58,7 @@ public:
 
 private:
     plang::Script* m_script;
-    plang::BufferedInvocation *m_pythonMethod;
+    plang::Invocation *m_pythonMethod;
     std::string m_source;
     std::string m_scriptFile;
     std::string m_module;
@@ -73,6 +75,7 @@ private:
     ProgrammableFilter(const ProgrammableFilter&); // not implemented
 
     MetadataNode m_totalMetadata;
+    Json::Value m_pdalargs;
 };
 
 } // namespace pdal
diff --git a/plugins/python/test/ProgrammableFilterTest.cpp b/plugins/python/test/ProgrammableFilterTest.cpp
index 4c3f0b4..d67e98f 100644
--- a/plugins/python/test/ProgrammableFilterTest.cpp
+++ b/plugins/python/test/ProgrammableFilterTest.cpp
@@ -204,9 +204,13 @@ TEST_F(ProgrammableFilterTest, metadata)
     reader.setOptions(ops);
 
     Option source("source", "import numpy\n"
-        "def myfunc(ins,outs,inmeta,outmeta):\n"
-        "  t = ('name', 'value', '', '', [])\n"
-        "  outmeta.append(t)\n"
+        "import sys\n"
+        "import redirector\n"
+        "def myfunc(ins,outs):\n"
+        "  global metadata\n"
+        "  #print('before', globals(),  file=sys.stderr,)\n"
+        "  metadata = {'name': 'root', 'value': 'a string', 'type': 'string', 'description': 'a description', 'children': [{'name': 'filters.programmable', 'value': 52, 'type': 'integer', 'description': 'a filter description', 'children': []}, {'name': 'readers.faux', 'value': 'another string', 'type': 'string', 'description': 'a reader description', 'children': []}]}\n"
+        " # print ('schema', schema, file=sys.stderr,)\n"
         "  return True\n"
     );
     Option module("module", "MyModule");
@@ -230,7 +234,52 @@ TEST_F(ProgrammableFilterTest, metadata)
     MetadataNode m = table.metadata();
     m = m.findChild("filters.programmable");
     MetadataNodeList l = m.children();
-    EXPECT_EQ(l.size(), 1u);
-//     EXPECT_EQ(l[0].name(), "name");
-//     EXPECT_EQ(l[0].value(), "value");
+    EXPECT_EQ(l.size(), 3u);
+    EXPECT_EQ(l[0].name(), "filters.programmable");
+    EXPECT_EQ(l[0].value(), "52");
+    EXPECT_EQ(l[0].description(), "a filter description");
+}
+
+TEST_F(ProgrammableFilterTest, pdalargs)
+{
+    StageFactory f;
+
+    BOX3D bounds(0.0, 0.0, 0.0, 1.0, 1.0, 1.0);
+
+    Options ops;
+    ops.add("bounds", bounds);
+    ops.add("count", 10);
+    ops.add("mode", "ramp");
+
+    FauxReader reader;
+    reader.setOptions(ops);
+
+    Option source("source", "import numpy\n"
+        "import sys\n"
+        "import redirector\n"
+        "def myfunc(ins,outs):\n"
+        "  pdalargs['name']\n"
+        "# print ('pdalargs', pdalargs, file=sys.stderr,)\n"
+        "  return True\n"
+    );
+    Option module("module", "MyModule");
+    Option function("function", "myfunc");
+    Option args("pdalargs", "{\"name\":\"Howard\",\"something\":42, \"another\": \"True\"}");
+    Options opts;
+    opts.add(source);
+    opts.add(module);
+    opts.add(function);
+    opts.add(args);
+
+    Stage* filter(f.createStage("filters.programmable"));
+    filter->setOptions(opts);
+    filter->setInput(reader);
+
+    PointTable table;
+    filter->prepare(table);
+    PointViewSet viewSet = filter->execute(table);
+    EXPECT_EQ(viewSet.size(), 1u);
+    PointViewPtr view = *viewSet.begin();
+
+    // Not throwing anything is success for now
 }
diff --git a/plugins/rxp/CMakeLists.txt b/plugins/rxp/CMakeLists.txt
index 0a31fa1..acf5f7f 100644
--- a/plugins/rxp/CMakeLists.txt
+++ b/plugins/rxp/CMakeLists.txt
@@ -12,8 +12,9 @@ PDAL_ADD_PLUGIN(libname reader rxp
         io/RxpReader.cpp
     LINK_WITH
         ${RiVLib_SCANLIB_LIBRARY})
-target_include_directories(${libname} PRIVATE ${RiVLib_INCLUDE_DIRS})
+target_include_directories(${libname} PUBLIC ${RiVLib_INCLUDE_DIRS})
 
+option(BUILD_RIVLIB_TESTS "Build rivlib tests" ON)
 if (BUILD_RIVLIB_TESTS)
     configure_file(
         test/Config.hpp.in
@@ -24,5 +25,6 @@ if (BUILD_RIVLIB_TESTS)
         FILES test/RxpReaderTest.cpp
         LINK_WITH ${libname})
     target_include_directories(${RXP_TEST_NAME} PRIVATE
+        ${PROJECT_BINARY_DIR}/plugins/rxp/test
         ${PROJECT_SOURCE_DIR}/plugins/rxp/io)
 endif()
diff --git a/plugins/rxp/test/RxpReaderTest.cpp b/plugins/rxp/test/RxpReaderTest.cpp
index e032e18..4ff8448 100644
--- a/plugins/rxp/test/RxpReaderTest.cpp
+++ b/plugins/rxp/test/RxpReaderTest.cpp
@@ -39,7 +39,6 @@
 #include <pdal/Options.hpp>
 #include <pdal/PipelineManager.hpp>
 #include <pdal/PointView.hpp>
-#include "PipelineReader.hpp"
 
 #include "RxpReader.hpp"
 #include "Config.hpp"
diff --git a/plugins/sqlite/io/SQLiteReader.cpp b/plugins/sqlite/io/SQLiteReader.cpp
index 7c1d7c3..df49ab4 100644
--- a/plugins/sqlite/io/SQLiteReader.cpp
+++ b/plugins/sqlite/io/SQLiteReader.cpp
@@ -66,17 +66,13 @@ void SQLiteReader::initialize()
         m_session->loadSpatialite(m_modulename);
 
         if (!bHaveSpatialite)
-        {
-            throw pdal_error("no spatialite enabled!");
-        }
+            throwError("Spatialite not enabled.");
 
     }
     catch (pdal_error const& e)
     {
-        std::stringstream oss;
-        oss << getName() << ": Unable to connect to database with error '" <<
-            e.what() << "'";
-        throw pdal_error(oss.str());
+        throwError("Unable to connect to database with error '" +
+            std::string(e.what()));
     }
 
     if (m_spatialRef.empty())
@@ -145,11 +141,7 @@ void SQLiteReader::validateQuery() const
     {
         auto p = m_session->columns().find(*r);
         if (p == m_session->columns().end())
-        {
-            std::ostringstream oss;
-            oss << "Unable to find required column name '" << *r << "'";
-            throw pdal_error(oss.str());
-        }
+            throwError("Unable to find required column name '" + *r + "'");
     }
 }
 
@@ -165,7 +157,7 @@ void SQLiteReader::addDimensions(PointLayoutPtr layout)
     m_session->query(q);
     const row* r = m_session->get(); // First result better have our schema
     if (!r)
-        throw pdal_error("Unable to select schema from query!");
+        throwError("Unable to select schema from query.");
 
     column const& s = r->at(0); // First column is schema
 
@@ -203,7 +195,7 @@ point_count_t SQLiteReader::readPatch(PointViewPtr view, point_count_t numPts)
 {
     const row* r = m_session->get();
     if (!r)
-        throw pdal_error("readPatch with no data in session!");
+        throwError("readPatch with no data in session.");
     std::map<std::string, int32_t> const& columns = m_session->columns();
 
     // Availability of positions already validated
@@ -251,13 +243,13 @@ point_count_t SQLiteReader::readPatch(PointViewPtr view, point_count_t numPts)
             count--;
         }
 #else
-        throw pdal_error("Can't decompress without LAZperf.");
+        throwError("Can't decompress without LAZperf.");
 #endif
 
         log()->get(LogLevel::Debug3) << "Compressed byte size: " <<
             m_patch->byte_size() << std::endl;
         if (!m_patch->byte_size())
-            throw pdal_error("Compressed patch size was 0!");
+            throwError("Compressed patch size was 0.");
         log()->get(LogLevel::Debug3) << "Uncompressed byte size: " <<
             (m_patch->count * packedPointSize()) << std::endl;
     }
diff --git a/plugins/sqlite/io/SQLiteWriter.cpp b/plugins/sqlite/io/SQLiteWriter.cpp
index f77814f..46fb587 100644
--- a/plugins/sqlite/io/SQLiteWriter.cpp
+++ b/plugins/sqlite/io/SQLiteWriter.cpp
@@ -116,11 +116,8 @@ void SQLiteWriter::initialize()
     }
     catch (pdal_error const& e)
     {
-        std::stringstream oss;
-        oss << getName();
-        oss << ": Unable to connect to database with error '" <<
-            e.what() << "'";
-        throw pdal_error(oss.str());
+        throwError("Unable to connect to database with error '" +
+            std::string(e.what()));
     }
 
     m_patch = PatchPtr(new Patch());
@@ -343,24 +340,16 @@ SQLiteWriter::loadGeometryWKT(std::string const& filename_or_wkt) const
     if (!FileUtils::fileExists(filename_or_wkt))
     {
         if (!IsValidGeometryWKT(filename_or_wkt))
-        {
-            std::ostringstream oss;
-            oss << getName() << ": WKT for not valid and '" << filename_or_wkt
-                << "' doesn't exist as a file";
-            throw pdal::pdal_error(oss.str());
-        }
+            throwError("WKT for not valid and '" + filename_or_wkt +
+                "' doesn't exist as a file");
         wkt_s << filename_or_wkt;
     }
     else
     {
         std::string wkt = FileUtils::readFileIntoString(filename_or_wkt);
         if (!IsValidGeometryWKT(wkt))
-        {
-            std::ostringstream oss;
-            oss << getName() << ": WKT for was from file '" << filename_or_wkt
-                << "' is not valid";
-            throw pdal::pdal_error(oss.str());
-        }
+            throwError("WKT for was from file '" + filename_or_wkt +
+                "' is not valid");
         wkt_s << wkt;
     }
     return wkt_s.str();
@@ -493,7 +482,7 @@ void SQLiteWriter::writeTile(const PointViewPtr view)
         }
         compressor.done();
 #else
-        throw pdal_error("Can't compress without LAZperf.");
+        throwError("Can't compress without LAZperf.");
 #endif
 
         size_t viewSize = view->size() * view->pointSize();
diff --git a/python/README.rst b/python/README.rst
index 84c6d63..f538ed9 100644
--- a/python/README.rst
+++ b/python/README.rst
@@ -12,6 +12,8 @@ Usage
 Given the following pipeline, which simply reads an `ASPRS LAS`_ file and
 sorts it by the ``X`` dimension:
 
+.. _`ASPRS LAS`: https://www.asprs.org/committee-general/laser-las-file-format-exchange-activities.html
+
 .. code-block:: python
 
 
@@ -27,7 +29,7 @@ sorts it by the ``X`` dimension:
     }"""
 
     import pdal
-    pipeline = pdal.Pipeline(pipeline)
+    pipeline = pdal.Pipeline(json)
     pipeline.validate() # check if our JSON and options were good
     pipeline.loglevel = 9 #really noisy
     count = pipeline.execute()
diff --git a/python/VERSION.txt b/python/VERSION.txt
index 88c5fb8..bc80560 100644
--- a/python/VERSION.txt
+++ b/python/VERSION.txt
@@ -1 +1 @@
-1.4.0
+1.5.0
diff --git a/python/pdal/__init__.py b/python/pdal/__init__.py
index 0ebc296..0ec0921 100644
--- a/python/pdal/__init__.py
+++ b/python/pdal/__init__.py
@@ -1,3 +1,3 @@
-__version__='1.4.0'
+__version__='1.5.0'
 
 from .pipeline import Pipeline
diff --git a/python/test/test_pipeline.py b/python/test/test_pipeline.py
index e906150..d497e31 100644
--- a/python/test/test_pipeline.py
+++ b/python/test/test_pipeline.py
@@ -32,14 +32,14 @@ class TestPipeline(unittest.TestCase):
         return output
 
     @unittest.skipUnless(os.path.exists(os.path.join(DATADIRECTORY, 'data/pipeline/sort.json')),
-                         os.path.join(DATADIRECTORY, 'data/pipeline/sort.json'))
+                         "missing test data")
     def test_construction(self):
         """Can we construct a PDAL pipeline"""
         json = self.fetch_json('/data/pipeline/sort.json')
         r = pdal.Pipeline(json)
 
     @unittest.skipUnless(os.path.exists(os.path.join(DATADIRECTORY, 'data/pipeline/sort.json')),
-                           "missing test data")
+                         "missing test data")
     def test_execution(self):
         """Can we execute a PDAL pipeline"""
         x = self.fetch_json('/data/pipeline/sort.json')
@@ -67,6 +67,8 @@ class TestPipeline(unittest.TestCase):
         self.assertAlmostEqual(a[0][0], 635619.85, 7)
         self.assertAlmostEqual(a[1064][2], 456.92, 7)
 
+    @unittest.skipUnless(os.path.exists(os.path.join(DATADIRECTORY, 'data/pipeline/sort.json')),
+                         "missing test data")
     def test_metadata(self):
         """Can we fetch PDAL metadata"""
         json = self.fetch_json('/data/pipeline/sort.json')
@@ -78,6 +80,8 @@ class TestPipeline(unittest.TestCase):
         self.assertEqual(j["metadata"]["readers.las"]["count"], 1065)
 
 
+    @unittest.skipUnless(os.path.exists(os.path.join(DATADIRECTORY, 'data/pipeline/sort.json')),
+                         "missing test data")
     def test_no_execute(self):
         """Does fetching arrays without executing throw an exception"""
         json = self.fetch_json('/data/pipeline/sort.json')
@@ -85,6 +89,8 @@ class TestPipeline(unittest.TestCase):
         with self.assertRaises(RuntimeError):
             r.arrays
 
+    @unittest.skipUnless(os.path.exists(os.path.join(DATADIRECTORY, 'data/pipeline/reproject.json')),
+                         "missing test data")
     def test_logging(self):
         """Can we fetch log output"""
         json = self.fetch_json('/data/pipeline/reproject.json')
@@ -94,6 +100,8 @@ class TestPipeline(unittest.TestCase):
         self.assertEqual(count, 789)
         self.assertEqual(r.log.split()[0], '(pypipeline')
 
+    @unittest.skipUnless(os.path.exists(os.path.join(DATADIRECTORY, 'data/pipeline/sort.json')),
+                         "missing test data")
     def test_schema(self):
         """Fetching a schema works"""
         json = self.fetch_json('/data/pipeline/sort.json')
@@ -102,7 +110,7 @@ class TestPipeline(unittest.TestCase):
         self.assertEqual(r.schema['schema']['dimensions'][0]['name'], 'X')
 
     @unittest.skipUnless(os.path.exists(os.path.join(DATADIRECTORY, 'data/filters/chip.json')),
-                           "missing test data")
+                         "missing test data")
     def test_merged_arrays(self):
         """Can we fetch multiple point views from merged PDAL data """
         json = self.fetch_json('/data/filters/chip.json')
diff --git a/scripts/appveyor/config.cmd b/scripts/appveyor/config.cmd
index b8d7a58..0f97515 100644
--- a/scripts/appveyor/config.cmd
+++ b/scripts/appveyor/config.cmd
@@ -8,7 +8,6 @@ cmake -G "Visual Studio 14 2015 Win64" ^
     -DBUILD_PLUGIN_MRSID=OFF ^
     -DBUILD_PLUGIN_NITF=OFF ^
     -DBUILD_PLUGIN_OCI=OFF ^
-    -DBUILD_PLUGIN_P2G=OFF ^
     -DBUILD_PLUGIN_PCL=OFF ^
     -DBUILD_PLUGIN_PGPOINTCLOUD=OFF ^
     -DBUILD_PLUGIN_SQLITE=OFF ^
diff --git a/scripts/ci/script.sh b/scripts/ci/script.sh
index 3aadbc5..63e1f11 100755
--- a/scripts/ci/script.sh
+++ b/scripts/ci/script.sh
@@ -31,7 +31,6 @@ cmake \
     -DBUILD_PLUGIN_MRSID=OFF \
     -DBUILD_PLUGIN_NITF=OFF \
     -DBUILD_PLUGIN_OCI=OFF \
-    -DBUILD_PLUGIN_P2G=$OPTIONAL_COMPONENT_SWITCH \
     -DBUILD_PLUGIN_PCL=$OPTIONAL_COMPONENT_SWITCH \
     -DBUILD_PLUGIN_PGPOINTCLOUD=$OPTIONAL_COMPONENT_SWITCH \
     -DBUILD_PGPOINTCLOUD_TESTS=OFF \
@@ -43,8 +42,6 @@ cmake \
     -DWITH_LAZPERF=$OPTIONAL_COMPONENT_SWITCH \
     -DWITH_LASZIP=$OPTIONAL_COMPONENT_SWITCH \
     -DWITH_PDAL_JNI=$OPTIONAL_COMPONENT_SWITCH \
-    -DLASZIP_INCLUDE_DIR:PATH=/usr/include \
-    -DLASZIP_LIBRARY:FILEPATH=/usr/lib/liblaszip.so \
     -DWITH_TESTS=ON \
     -G "$PDAL_CMAKE_GENERATOR" \
     ..
diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile
index a8b7e32..8949aba 100644
--- a/scripts/docker/Dockerfile
+++ b/scripts/docker/Dockerfile
@@ -15,14 +15,14 @@ RUN git clone --depth=1 https://github.com/PDAL/PDAL \
     && mkdir build \
     && cd build \
     && cmake \
-        -DBUILD_PLUGIN_CPD=OFF \
+        -DBUILD_PLUGIN_CPD=ON \
+        -DBUILD_PLUGIN_MBIO=ON \
         -DBUILD_PLUGIN_GREYHOUND=ON \
         -DBUILD_PLUGIN_HEXBIN=ON \
         -DBUILD_PLUGIN_ICEBRIDGE=ON \
         -DBUILD_PLUGIN_MRSID=ON \
         -DBUILD_PLUGIN_NITF=ON \
         -DBUILD_PLUGIN_OCI=OFF \
-        -DBUILD_PLUGIN_P2G=ON \
         -DBUILD_PLUGIN_PCL=ON \
         -DBUILD_PLUGIN_PGPOINTCLOUD=ON \
         -DBUILD_PLUGIN_SQLITE=ON \
@@ -115,7 +115,6 @@ RUN apt-get purge -y \
     libcurl4-openssl-dev \
     libspatialite-dev \
     libdap-dev\
-    ninja \
     cython \
     python-pip
 
@@ -157,4 +156,22 @@ RUN apt-get update && apt-get install -y \
     libgdal1i \
     libflann1.8 \
     libpython2.7 \
-    libhdf5-cpp-11
+    libhdf5-cpp-11 \
+    libpcl-common1.7 \
+    libpcl-features1.7 \
+    libpcl-filters1.7 \
+    libpcl-io1.7 \
+    libpcl-kdtree1.7 \
+    libpcl-keypoints1.7 \
+    libpcl-octree1.7 \
+    libpcl-outofcore1.7 \
+    libpcl-people1.7 \
+    libpcl-recognition1.7 \
+    libpcl-registration1.7 \
+    libpcl-sample-consensus1.7 \
+    libpcl-search1.7 \
+    libpcl-segmentation1.7 \
+    libpcl-surface1.7 \
+    libpcl-tracking1.7 \
+    libpcl-visualization1.7
+
diff --git a/scripts/docker/dependencies/Dockerfile b/scripts/docker/dependencies/Dockerfile
index 7137ed8..31a7d74 100644
--- a/scripts/docker/dependencies/Dockerfile
+++ b/scripts/docker/dependencies/Dockerfile
@@ -62,15 +62,33 @@ RUN apt-get update && apt-get install -y --fix-missing --no-install-recommends \
         libcurl4-openssl-dev \
         libspatialite-dev \
         libdap-dev\
-        ninja \
         cython \
         python-pip \
         libgdal1-dev \
+        gdal-bin \
+        libpcl-dev \
         time \
+        libhpdf-dev \
+        python-setuptools \
+        libgeos++-dev \
+        libhpdf-dev \
+        unzip \
     && rm -rf /var/lib/apt/lists/*
 
 RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-3.6 20 && update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-3.6 20
 
+RUN mkdir /vdatum \
+    && cd /vdatum \
+    && wget http://download.osgeo.org/proj/vdatum/usa_geoid2012.zip && unzip -j -u usa_geoid2012.zip -d /usr/share/proj \
+    && wget http://download.osgeo.org/proj/vdatum/usa_geoid2009.zip && unzip -j -u usa_geoid2009.zip -d /usr/share/proj \
+    && wget http://download.osgeo.org/proj/vdatum/usa_geoid2003.zip && unzip -j -u usa_geoid2003.zip -d /usr/share/proj \
+    && wget http://download.osgeo.org/proj/vdatum/usa_geoid1999.zip && unzip -j -u usa_geoid1999.zip -d /usr/share/proj \
+    && wget http://download.osgeo.org/proj/vdatum/vertcon/vertconc.gtx && mv vertconc.gtx /usr/share/proj \
+    && wget http://download.osgeo.org/proj/vdatum/vertcon/vertcone.gtx && mv vertcone.gtx /usr/share/proj \
+    && wget http://download.osgeo.org/proj/vdatum/vertcon/vertconw.gtx && mv vertconw.gtx /usr/share/proj \
+    && wget http://download.osgeo.org/proj/vdatum/egm96_15/egm96_15.gtx && mv egm96_15.gtx /usr/share/proj \
+    && wget http://download.osgeo.org/proj/vdatum/egm08_25/egm08_25.gtx && mv egm08_25.gtx /usr/share/proj \
+    && rm -rf /vdatum
 
 #RUN git clone --depth=1 https://github.com/OSGeo/gdal.git \
 #    &&    cd gdal/gdal \
@@ -150,18 +168,6 @@ RUN git clone https://github.com/hobu/hexer.git \
     && make install \
     && rm -rf /hexer
 
-RUN git clone https://github.com/CRREL/points2grid.git \
-    && cd points2grid \
-    && mkdir build \
-    && cd build \
-    && CXXFLAGS="-std=c++11" cmake \
-        -DCMAKE_INSTALL_PREFIX=/usr \
-        -DCMAKE_BUILD_TYPE="Release" \
-        .. \
-    && make \
-    && make install \
-    && rm -rf /points2grid
-
 RUN git clone  https://github.com/hobu/laz-perf.git \
     && cd laz-perf \
     && mkdir build \
@@ -181,65 +187,65 @@ RUN wget http://bitbucket.org/eigen/eigen/get/3.2.7.tar.gz \
         && rm -rf /3.2.7.tar.gz \
         && rm -rf /eigen-eigen-b30b87236a1b
 
-RUN git clone https://github.com/PointCloudLibrary/pcl.git \
-        && cd pcl \
-        && git checkout pcl-1.8.0 \
-        && mkdir build \
-        && cd build \
-        && CC="clang" CXX="clang++" CXXFLAGS="-std=c++11"  cmake \
-                -DBUILD_2d=ON \
-                -DBUILD_CUDA=OFF \
-                -DBUILD_GPU=OFF \
-                -DBUILD_apps=OFF \
-                -DBUILD_common=ON \
-                -DBUILD_examples=OFF \
-                -DBUILD_features=ON \
-                -DBUILD_filters=ON \
-                -DBUILD_geometry=ON \
-                -DBUILD_global_tests=OFF \
-                -DBUILD_io=ON \
-                -DBUILD_kdtree=ON \
-                -DBUILD_keypoints=ON \
-                -DBUILD_ml=ON \
-                -DBUILD_octree=ON \
-                -DBUILD_outofcore=OFF \
-                -DBUILD_people=OFF \
-                -DBUILD_recognition=OFF \
-                -DBUILD_registration=ON \
-                -DBUILD_sample_concensus=ON \
-                -DBUILD_search=ON \
-                -DBUILD_segmentation=ON \
-                -DBUILD_simulation=OFF \
-                -DBUILD_stereo=OFF \
-                -DBUILD_surface=ON \
-                -DBUILD_surface_on_nurbs=OFF \
-                -DBUILD_tools=OFF \
-                -DBUILD_tracking=OFF \
-                -DBUILD_visualization=OFF \
-                -DWITH_LIBUSB=OFF \
-                -DWITH_OPENNI=OFF \
-                -DWITH_OPENNI2=OFF \
-                -DWITH_FZAPI=OFF \
-                -DWITH_PXCAPI=OFF \
-                -DWITH_PNG=OFF \
-                -DWITH_QHULL=OFF \
-                -DWITH_QT=OFF \
-                -DWITH_VTK=OFF \
-                -DWITH_PCAP=OFF \
-                -DWITH_OPENNI=OFF \
-                -DWITH_OPENNI2=OFF \
-                -DWITH_FZAPI=OFF \
-                -DWITH_ENSENSO=OFF \
-                -DWITH_DAVIDSDK=OFF \
-                -DWITH_DSSDK=OFF \
-                -DWITH_RSSDK=OFF \
-                -DWITH_OPENGL=OFF \
-                -DCMAKE_INSTALL_PREFIX=/usr \
-                -DCMAKE_BUILD_TYPE="Release" \
-                .. \
-        && make -j 4\
-        && make install \
-        && rm -rf /pcl
+#RUN git clone https://github.com/PointCloudLibrary/pcl.git \
+#        && cd pcl \
+#        && git checkout pcl-1.8.0 \
+#        && mkdir build \
+#        && cd build \
+#        && CC="clang" CXX="clang++" CXXFLAGS="-std=c++11"  cmake \
+#                -DBUILD_2d=ON \
+#                -DBUILD_CUDA=OFF \
+#                -DBUILD_GPU=OFF \
+#                -DBUILD_apps=OFF \
+#                -DBUILD_common=ON \
+#                -DBUILD_examples=OFF \
+#                -DBUILD_features=ON \
+#                -DBUILD_filters=ON \
+#                -DBUILD_geometry=ON \
+#                -DBUILD_global_tests=OFF \
+#                -DBUILD_io=ON \
+#                -DBUILD_kdtree=ON \
+#                -DBUILD_keypoints=ON \
+#                -DBUILD_ml=ON \
+#                -DBUILD_octree=ON \
+#                -DBUILD_outofcore=OFF \
+#                -DBUILD_people=OFF \
+#                -DBUILD_recognition=OFF \
+#                -DBUILD_registration=ON \
+#                -DBUILD_sample_concensus=ON \
+#                -DBUILD_search=ON \
+#                -DBUILD_segmentation=ON \
+#                -DBUILD_simulation=OFF \
+#                -DBUILD_stereo=OFF \
+#                -DBUILD_surface=ON \
+#                -DBUILD_surface_on_nurbs=OFF \
+#                -DBUILD_tools=OFF \
+#                -DBUILD_tracking=OFF \
+#                -DBUILD_visualization=OFF \
+#                -DWITH_LIBUSB=OFF \
+#                -DWITH_OPENNI=OFF \
+#                -DWITH_OPENNI2=OFF \
+#                -DWITH_FZAPI=OFF \
+#                -DWITH_PXCAPI=OFF \
+#                -DWITH_PNG=OFF \
+#                -DWITH_QHULL=OFF \
+#                -DWITH_QT=OFF \
+#                -DWITH_VTK=OFF \
+#                -DWITH_PCAP=OFF \
+#                -DWITH_OPENNI=OFF \
+#                -DWITH_OPENNI2=OFF \
+#                -DWITH_FZAPI=OFF \
+#                -DWITH_ENSENSO=OFF \
+#                -DWITH_DAVIDSDK=OFF \
+#                -DWITH_DSSDK=OFF \
+#                -DWITH_RSSDK=OFF \
+#                -DWITH_OPENGL=OFF \
+#                -DCMAKE_INSTALL_PREFIX=/usr \
+#                -DCMAKE_BUILD_TYPE="Release" \
+#                .. \
+#        && make -j 4\
+#        && make install \
+#        && rm -rf /pcl
 
 
 
@@ -251,47 +257,36 @@ RUN svn co -r 2691 https://svn.osgeo.org/metacrs/geotiff/trunk/libgeotiff/ \
     && make install \
     && rm -rf /libgeotiff
 
-RUN apt-get update && apt-get install -y --fix-missing --no-install-recommends \
-        ninja-build \
-        libgeos++-dev \
-        unzip \
-    && rm -rf /var/lib/apt/lists/*
 
-#RUN mkdir /vdatum \
-#    && cd /vdatum \
-#    && wget http://download.osgeo.org/proj/vdatum/usa_geoid2012.zip && unzip -j -u usa_geoid2012.zip -d /usr/share/proj \
-#    && wget http://download.osgeo.org/proj/vdatum/usa_geoid2009.zip && unzip -j -u usa_geoid2009.zip -d /usr/share/proj \
-#    && wget http://download.osgeo.org/proj/vdatum/usa_geoid2003.zip && unzip -j -u usa_geoid2003.zip -d /usr/share/proj \
-#    && wget http://download.osgeo.org/proj/vdatum/usa_geoid1999.zip && unzip -j -u usa_geoid1999.zip -d /usr/share/proj \
-#    && wget http://download.osgeo.org/proj/vdatum/vertcon/vertconc.gtx && mv vertconc.gtx /usr/share/proj \
-#    && wget http://download.osgeo.org/proj/vdatum/vertcon/vertcone.gtx && mv vertcone.gtx /usr/share/proj \
-#    && wget http://download.osgeo.org/proj/vdatum/vertcon/vertconw.gtx && mv vertconw.gtx /usr/share/proj \
-#    && wget http://download.osgeo.org/proj/vdatum/egm96_15/egm96_15.gtx && mv egm96_15.gtx /usr/share/proj \
-#    && wget http://download.osgeo.org/proj/vdatum/egm08_25/egm08_25.gtx && mv egm08_25.gtx /usr/share/proj \
-#    && rm -rf /vdatum
 
-RUN git clone https://github.com/gadomski/fgt.git \
+RUN git clone --depth 1 --branch v0.4.6 https://github.com/gadomski/fgt.git \
     && cd fgt \
-    && git checkout v0.4.4 \
-    && cmake . -DWITH_TESTS=OFF -DBUILD_SHARED_LIBS=ON -DEIGEN3_INCLUDE_DIR=/usr/include -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=Release \
+    && cmake . \
+        -DWITH_TESTS=OFF \
+        -DBUILD_SHARED_LIBS=ON \
+        -DEIGEN3_INCLUDE_DIR=/usr/include \
+        -DCMAKE_INSTALL_PREFIX=/usr \
+        -DCMAKE_BUILD_TYPE=Release \
     && make \
     && make install \
     && rm -rf /fgt
 
-RUN git clone https://github.com/gadomski/cpd.git \
+RUN git clone --depth 1 --branch v0.5.0 https://github.com/gadomski/cpd.git \
     && cd cpd \
-    && git checkout v0.3.2 \
-    && cmake . -DWITH_TESTS=OFF -DBUILD_SHARED_LIBS=ON -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=Release \
+    && cmake . \
+        -DWITH_TESTS=OFF \
+        -DWITH_JSONCPP=OFF \
+        -DWITH_FGT=ON \
+        -DWITH_STRICT_WARNINGS=OFF \
+        -DWITH_DOCS=OFF \
+        -DEIGEN3_INCLUDE_DIR=/usr/include \
+        -DBUILD_SHARED_LIBS=ON \
+        -DCMAKE_INSTALL_PREFIX=/usr \
+        -DCMAKE_BUILD_TYPE=Release \
     && make \
     && make install \
     && rm -rf /cpd
 
-RUN apt-get update && apt-get install -y --fix-missing --no-install-recommends \
-        libhpdf-dev \
-    python-setuptools \
-    && rm -rf /var/lib/apt/lists/*
-
-
 RUN git clone https://github.com/ninja-build/ninja.git \
     && cd ninja \
     && ./configure.py --bootstrap \
@@ -307,6 +302,16 @@ RUN echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true
 
 ENV JAVA_HOME /usr/lib/jvm/java-8-oracle
 
+# fixup for PCL 1.7
+RUN ln -s /usr/lib/x86_64-linux-gnu/libvtkCommonCore-6.2.so /usr/lib/libvtkproj4.so
+
+RUN add-apt-repository ppa:ubuntugis/ubuntugis-unstable -y \
+    && apt-get update \
+    && apt-get install -y mbsystem mbsystem-dev \
+    && rm -rf /var/lib/apt/lists/*
+
+
+
 #RUN update-alternatives --install /usr/bin/clang clang /usr/bin/clang-3.6 20 && update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-3.6 20
 
 
diff --git a/scripts/docker/docbuild/Dockerfile b/scripts/docker/docbuild/Dockerfile
index 984c245..3591187 100644
--- a/scripts/docker/docbuild/Dockerfile
+++ b/scripts/docker/docbuild/Dockerfile
@@ -1,7 +1,5 @@
 FROM ubuntu:16.04
 
-ADD http://www.timeapi.org/utc/now /tmp/bust-cache
-
 RUN apt-get -y update && apt-get install -y \
     python-dev python-pip g++ doxygen dvipng \
     cmake libjpeg8-dev zlib1g-dev texlive-latex-base \
diff --git a/scripts/linux-install-scripts/pdal.sh b/scripts/linux-install-scripts/pdal.sh
index 6522b5f..ad134b3 100644
--- a/scripts/linux-install-scripts/pdal.sh
+++ b/scripts/linux-install-scripts/pdal.sh
@@ -23,7 +23,6 @@ cmake   -G "Unix Makefiles"  \
         -DBUILD_PLUGIN_HEXBIN=ON \
         -DBUILD_PLUGIN_ICEBRIDGE=ON \
         -DBUILD_PLUGIN_NITF=ON \
-        -DBUILD_PLUGIN_P2G=ON \
         -DBUILD_PLUGIN_PGPOINTCLOUD=ON \
         -DBUILD_PLUGIN_SQLITE=ON \
         -DBUILD_PLUGIN_GREYHOUND=ON \
diff --git a/test/data/gdal/grid2.txt b/test/data/gdal/grid2.txt
new file mode 100644
index 0000000..1319e5b
--- /dev/null
+++ b/test/data/gdal/grid2.txt
@@ -0,0 +1,8 @@
+X,Y,Z
+
+-2, -2, 0
+-1.5, -1.5, -1
+-.5, -.5, -1
+-1.5, -.5, -1
+-.5, -1.5, -1
+4.5, 6.5, -1
diff --git a/test/data/las/spec_3.las b/test/data/las/spec_3.las
new file mode 100644
index 0000000..314f0aa
Binary files /dev/null and b/test/data/las/spec_3.las differ
diff --git a/test/data/logs/logtest_1.txt b/test/data/logs/logtest_1.txt
deleted file mode 100644
index 6d821b3..0000000
--- a/test/data/logs/logtest_1.txt
+++ /dev/null
@@ -1 +0,0 @@
-(readers.faux Debug: 8): 					Reading a point view of 750 points.
diff --git a/test/data/logs/logtest_123.txt b/test/data/logs/logtest_123.txt
deleted file mode 100644
index d816f82..0000000
--- a/test/data/logs/logtest_123.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-(readers.faux Debug: 8): 					Reading a point view of 750 points.
-(filters.programmable Debug: 8): 					Python script source=[117 bytes], module=xModule, function=xfunc
- processing 750 points.
-(filters.programmable Debug: 8): 					Python script source=[117 bytes], module=yModule, function=yfunc
- processing 750 points.
diff --git a/test/data/logs/logtest_2.txt b/test/data/logs/logtest_2.txt
deleted file mode 100644
index 3e20d30..0000000
--- a/test/data/logs/logtest_2.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-(filters.programmable Debug: 8): 					Python script source=[117 bytes], module=xModule, function=xfunc
- processing 750 points.
diff --git a/test/data/logs/logtest_3.txt b/test/data/logs/logtest_3.txt
deleted file mode 100644
index 37245e2..0000000
--- a/test/data/logs/logtest_3.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-(filters.programmable Debug: 8): 					Python script source=[117 bytes], module=yModule, function=yfunc
- processing 750 points.
diff --git a/test/data/logs/t1 b/test/data/logs/t1
new file mode 100644
index 0000000..d0e6dd3
--- /dev/null
+++ b/test/data/logs/t1
@@ -0,0 +1,2 @@
+(Debug) debug
+(Info) info
diff --git a/test/data/mbio/mbf_em300raw.mb56 b/test/data/mbio/mbf_em300raw.mb56
new file mode 100644
index 0000000..3aeb831
Binary files /dev/null and b/test/data/mbio/mbf_em300raw.mb56 differ
diff --git a/test/data/pipeline/assign.json.in b/test/data/pipeline/assign.json.in
new file mode 100644
index 0000000..b819bd6
--- /dev/null
+++ b/test/data/pipeline/assign.json.in
@@ -0,0 +1,14 @@
+{
+  "pipeline":[
+    "@CMAKE_SOURCE_DIR@/test/data/autzen/autzen-dd.las",
+    {
+      "type":"filters.assign",
+      "assignment":"PointSourceId[:]=26"
+    },
+    {
+      "filename":"@CMAKE_SOURCE_DIR@/test/temp/attributed.las",
+      "scale_x":0.0000001,
+      "scale_y":0.0000001
+    }
+  ]
+}
diff --git a/test/data/pipeline/options.json.in b/test/data/pipeline/options.json.in
new file mode 100644
index 0000000..90cc3c6
--- /dev/null
+++ b/test/data/pipeline/options.json.in
@@ -0,0 +1,15 @@
+{
+  "pipeline":[
+    {
+      "filename" :"@CMAKE_SOURCE_DIR@/test/data/las/1.2-with-color.las",
+      "compression" : "laszip",
+      "tag": "reader"
+    },
+    {
+      "type":"filters.assign",
+      "assignment":"Z[:]=25",
+      "tag":"assigner"
+    },
+    "@CMAKE_SOURCE_DIR@/test/temp/assigned.las"
+  ]
+}
diff --git a/test/data/pipeline/attribute.json.in b/test/data/pipeline/overlay.json.in
similarity index 77%
rename from test/data/pipeline/attribute.json.in
rename to test/data/pipeline/overlay.json.in
index 86299f5..9852805 100644
--- a/test/data/pipeline/attribute.json.in
+++ b/test/data/pipeline/overlay.json.in
@@ -2,19 +2,14 @@
   "pipeline":[
     "@CMAKE_SOURCE_DIR@/test/data/autzen/autzen-dd.las",
     {
-      "type":"filters.attribute",
-      "dimension":"PointSourceId",
-      "value":26
-    },
-    {
-      "type":"filters.attribute",
+      "type":"filters.overlay",
       "dimension":"Intensity",
       "datasource":"@CMAKE_SOURCE_DIR@/test/data/autzen/attributes.shp",
       "query":"SELECT CLS FROM attributes where cls!=6",
       "column":"CLS"
     },
     {
-      "type":"filters.attribute",
+      "type":"filters.overlay",
       "dimension":"Classification",
       "datasource":"@CMAKE_SOURCE_DIR@/test/data/autzen/attributes.shp",
       "layer":"attributes",
diff --git a/test/data/pts/autzen.pts b/test/data/pts/autzen.pts
new file mode 100644
index 0000000..b4c838e
--- /dev/null
+++ b/test/data/pts/autzen.pts
@@ -0,0 +1,11 @@
+10
+636110.300003 849345.050003 512.139999 -1613 52 70 64
+636102.840012 849345.199997 512.009995 -1998 58 76 67
+636125.449997 849240.250015 428.150009 602 120 127 103
+636122.669998 848984.550003 427.990005 -890 116 128 100
+636122.099991 848986.780014 427.919998 -826 112 126 98
+636121.580002 848989.039993 427.949997 -1131 112 126 98
+636121.060013 848991.229996 427.990005 -1179 107 123 95
+636126.400009 848968.800003 427.990005 -1260 112 126 98
+636124.830002 848975.509995 427.990005 -858 116 128 100
+636125.879990 848971.060013 428.050003 -698 114 127 100
diff --git a/test/data/text/crlf_test.txt b/test/data/text/crlf_test.txt
new file mode 100644
index 0000000..074fa42
--- /dev/null
+++ b/test/data/text/crlf_test.txt
@@ -0,0 +1,11 @@
+X,Y,Z,Intensity
+289814.15,4320978.61,170.76,0
+289814.64,4320978.84,170.76,1
+289815.12,4320979.06,170.75,2
+289815.60,4320979.28,170.74,3
+289816.08,4320979.50,170.68,4
+289816.56,4320979.71,170.66,5
+289817.03,4320979.92,170.63,6
+289817.53,4320980.16,170.62,7
+289818.01,4320980.38,170.61,8
+289818.50,4320980.59,170.58,9
diff --git a/test/temp/SbetWriterTest.sbet b/test/temp/SbetWriterTest.sbet
deleted file mode 100644
index 314f893..0000000
Binary files a/test/temp/SbetWriterTest.sbet and /dev/null differ
diff --git a/test/temp/colorized.las b/test/temp/colorized.las
deleted file mode 100644
index cb457cd..0000000
Binary files a/test/temp/colorized.las and /dev/null differ
diff --git a/test/temp/crop-wkt-2d-classification.las b/test/temp/crop-wkt-2d-classification.las
deleted file mode 100644
index 31e74b1..0000000
Binary files a/test/temp/crop-wkt-2d-classification.las and /dev/null differ
diff --git a/test/temp/foo.las b/test/temp/foo.las
deleted file mode 100644
index 8f5df92..0000000
Binary files a/test/temp/foo.las and /dev/null differ
diff --git a/test/temp/issue895.sqlite b/test/temp/issue895.sqlite
deleted file mode 100644
index 4c9efca..0000000
Binary files a/test/temp/issue895.sqlite and /dev/null differ
diff --git a/test/temp/meta.json b/test/temp/meta.json
deleted file mode 100644
index 9374163..0000000
--- a/test/temp/meta.json
+++ /dev/null
@@ -1,91 +0,0 @@
-{
-  "stages":
-  {
-    "readers.las":
-    {
-      "comp_spatialreference": "PROJCS[\"NAD_1983_HARN_Lambert_Conformal_Conic\",GEOGCS[\"GCS_North_American_1983_HARN\",DATUM[\"NAD83_High_Accuracy_Reference_Network\",SPHEROID[\"GRS 1980\",6378137,298.2572221010002,AUTHORITY[\"EPSG\",\"7019\"]],AUTHORITY[\"EPSG\",\"6152\"]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433]],PROJECTION[\"Lambert_Conformal_Conic_2SP\"],PARAMETER[\"standard_parallel_1\",43],PARAMETER[\"standard_parallel_2\",45.5],PARAMETER[\"latitude_of_origin\" [...]
-      "compressed": false,
-      "count": 110000,
-      "creation_doy": 253,
-      "creation_year": 2015,
-      "dataformat_id": 3,
-      "dataoffset": 2038,
-      "filesource_id": 0,
-      "global_encoding": 0,
-      "global_encoding_base64": "AAA=",
-      "header_size": 227,
-      "major_version": 1,
-      "maxx": 637179.22,
-      "maxy": 849497.9,
-      "maxz": 520.51,
-      "minor_version": 2,
-      "minx": 636001.76,
-      "miny": 848935.2,
-      "minz": 406.26,
-      "offset_x": 0,
-      "offset_y": 0,
-      "offset_z": 0,
-      "project_id": "00000000-0000-0000-0000-000000000000",
-      "scale_x": 0.01,
-      "scale_y": 0.01,
-      "scale_z": 0.01,
-      "software_id": "PDAL 1.0.0 (9e8465)",
-      "spatialreference": "PROJCS[\"NAD_1983_HARN_Lambert_Conformal_Conic\",GEOGCS[\"GCS_North_American_1983_HARN\",DATUM[\"NAD83_High_Accuracy_Reference_Network\",SPHEROID[\"GRS 1980\",6378137,298.2572221010002,AUTHORITY[\"EPSG\",\"7019\"]],AUTHORITY[\"EPSG\",\"6152\"]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433]],PROJECTION[\"Lambert_Conformal_Conic_2SP\"],PARAMETER[\"standard_parallel_1\",43],PARAMETER[\"standard_parallel_2\",45.5],PARAMETER[\"latitude_of_origin\",41.7 [...]
-      "srs":
-      {
-        "compoundwkt": "PROJCS[\"NAD_1983_HARN_Lambert_Conformal_Conic\",GEOGCS[\"GCS_North_American_1983_HARN\",DATUM[\"NAD83_High_Accuracy_Reference_Network\",SPHEROID[\"GRS 1980\",6378137,298.2572221010002,AUTHORITY[\"EPSG\",\"7019\"]],AUTHORITY[\"EPSG\",\"6152\"]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433]],PROJECTION[\"Lambert_Conformal_Conic_2SP\"],PARAMETER[\"standard_parallel_1\",43],PARAMETER[\"standard_parallel_2\",45.5],PARAMETER[\"latitude_of_origin\",41.75], [...]
-        "horizontal": "PROJCS[\"NAD_1983_HARN_Lambert_Conformal_Conic\",GEOGCS[\"GCS_North_American_1983_HARN\",DATUM[\"NAD83_High_Accuracy_Reference_Network\",SPHEROID[\"GRS 1980\",6378137,298.2572221010002,AUTHORITY[\"EPSG\",\"7019\"]],AUTHORITY[\"EPSG\",\"6152\"]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433]],PROJECTION[\"Lambert_Conformal_Conic_2SP\"],PARAMETER[\"standard_parallel_1\",43],PARAMETER[\"standard_parallel_2\",45.5],PARAMETER[\"latitude_of_origin\",41.75],P [...]
-        "isgeocentric": false,
-        "isgeographic": false,
-        "prettycompoundwkt": "PROJCS[\"NAD_1983_HARN_Lambert_Conformal_Conic\",    GEOGCS[\"GCS_North_American_1983_HARN\",        DATUM[\"NAD83_High_Accuracy_Reference_Network\",            SPHEROID[\"GRS 1980\",6378137,298.2572221010002,                AUTHORITY[\"EPSG\",\"7019\"]],            AUTHORITY[\"EPSG\",\"6152\"]],        PRIMEM[\"Greenwich\",0],        UNIT[\"degree\",0.0174532925199433]],    PROJECTION[\"Lambert_Conformal_Conic_2SP\"],    PARAMETER[\"standard_parallel_1\",43 [...]
-        "prettywkt": "PROJCS[\"NAD_1983_HARN_Lambert_Conformal_Conic\",    GEOGCS[\"GCS_North_American_1983_HARN\",        DATUM[\"NAD83_High_Accuracy_Reference_Network\",            SPHEROID[\"GRS 1980\",6378137,298.2572221010002,                AUTHORITY[\"EPSG\",\"7019\"]],            AUTHORITY[\"EPSG\",\"6152\"]],        PRIMEM[\"Greenwich\",0],        UNIT[\"degree\",0.0174532925199433]],    PROJECTION[\"Lambert_Conformal_Conic_2SP\"],    PARAMETER[\"standard_parallel_1\",43],    PA [...]
-        "proj4": "+proj=lcc +lat_1=43 +lat_2=45.5 +lat_0=41.75 +lon_0=-120.5 +x_0=399999.9999999999 +y_0=0 +ellps=GRS80 +units=ft +no_defs",
-        "units":
-        {
-          "horizontal": "foot",
-          "vertical": ""
-        },
-        "vertical": "",
-        "wkt": "PROJCS[\"NAD_1983_HARN_Lambert_Conformal_Conic\",GEOGCS[\"GCS_North_American_1983_HARN\",DATUM[\"NAD83_High_Accuracy_Reference_Network\",SPHEROID[\"GRS 1980\",6378137,298.2572221010002,AUTHORITY[\"EPSG\",\"7019\"]],AUTHORITY[\"EPSG\",\"6152\"]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433]],PROJECTION[\"Lambert_Conformal_Conic_2SP\"],PARAMETER[\"standard_parallel_1\",43],PARAMETER[\"standard_parallel_2\",45.5],PARAMETER[\"latitude_of_origin\",41.75],PARAMETE [...]
-      },
-      "system_id": "PDAL",
-      "vlr_0": "AQABAAAAFQAABAAAAQABAAEEAAABAAEAAgSxhyYAAAAACAAAAQD\/fwEIsYc8ACYAAggAAAEACBgGCAAAAQCOIwkIsIcBAAcACwiwhwEABgANCLCHAQAIAPMLAAABAAEAAAwAAAEA\/38CDAAAAQD\/fwMMAAABAAgABAwAAAEAKiMGDLCHAQACAAcMsIcBAAMADAywhwEAAQANDLCHAQAAAA4MsIcBAAQADwywhwEABQAAAAAAAAAAAA==",
-      "vlr_0":       {
-        "description": "GeoTiff GeoKeyDirectoryTag",
-        "record_id": 34735,
-        "user_id": "LASF_Projection"
-      },
-      "vlr_1": "AAAAAADgREAAAAAAACBewAAAAAAAgEVAAAAAAADARkD+1D\/1TwY0QQAAAAAAAAAAqPnrlB2kckAAAABAplRYQQAAAAAAAAAA",
-      "vlr_1":       {
-        "description": "GeoTiff GeoDoubleParamsTag",
-        "record_id": 34736,
-        "user_id": "LASF_Projection"
-      },
-      "vlr_2": "TkFEXzE5ODNfSEFSTl9MYW1iZXJ0X0NvbmZvcm1hbF9Db25pY3xHQ1MgTmFtZSA9IEdDU19Ob3J0aF9BbWVyaWNhbl8xOTgzX0hBUk58UHJpbWVtID0gR3JlZW53aWNofHwA",
-      "vlr_2":       {
-        "description": "GeoTiff GeoAsciiParamsTag",
-        "record_id": 34737,
-        "user_id": "LASF_Projection"
-      },
-      "vlr_3": "UFJPSkNTWyJOQURfMTk4M19IQVJOX0xhbWJlcnRfQ29uZm9ybWFsX0NvbmljIixHRU9HQ1NbIkdDU19Ob3J0aF9BbWVyaWNhbl8xOTgzX0hBUk4iLERBVFVNWyJOQUQ4M19IaWdoX0FjY3VyYWN5X1JlZ2lvbmFsX05ldHdvcmsiLFNQSEVST0lEWyJHUlNfMTk4MCIsNjM3ODEzNywyOTguMjU3MjIyMTAxLEFVVEhPUklUWVsiRVBTRyIsIjcwMTkiXV0sQVVUSE9SSVRZWyJFUFNHIiwiNjE1MiJdXSxQUklNRU1bIkdyZWVud2ljaCIsMF0sVU5JVFsiZGVncmVlIiwwLjAxNzQ1MzI5MjUxOTk0MzNdXSxQUk9KRUNUSU9OWyJMYW1iZXJ0X0NvbmZvcm1hbF9Db25pY18yU1AiXSxQQVJBTUVURVJbInN0YW5kYXJkX3BhcmFsbGVsXzEiLDQz [...]
-      "vlr_3":       {
-        "description": "OGC Tranformation Record",
-        "record_id": 2112,
-        "user_id": "LASF_Projection"
-      },
-      "vlr_4": "UFJPSkNTWyJOQURfMTk4M19IQVJOX0xhbWJlcnRfQ29uZm9ybWFsX0NvbmljIixHRU9HQ1NbIkdDU19Ob3J0aF9BbWVyaWNhbl8xOTgzX0hBUk4iLERBVFVNWyJOQUQ4M19IaWdoX0FjY3VyYWN5X1JlZ2lvbmFsX05ldHdvcmsiLFNQSEVST0lEWyJHUlNfMTk4MCIsNjM3ODEzNywyOTguMjU3MjIyMTAxLEFVVEhPUklUWVsiRVBTRyIsIjcwMTkiXV0sQVVUSE9SSVRZWyJFUFNHIiwiNjE1MiJdXSxQUklNRU1bIkdyZWVud2ljaCIsMF0sVU5JVFsiZGVncmVlIiwwLjAxNzQ1MzI5MjUxOTk0MzNdXSxQUk9KRUNUSU9OWyJMYW1iZXJ0X0NvbmZvcm1hbF9Db25pY18yU1AiXSxQQVJBTUVURVJbInN0YW5kYXJkX3BhcmFsbGVsXzEiLDQz [...]
-      "vlr_4":       {
-        "description": "OGR variant of OpenGIS WKT SRS",
-        "record_id": 2112,
-        "user_id": "liblas"
-      }
-    },
-    "writers.las":
-    {
-      "filename":
-      [
-        "\/PDAL\/test\/data\/..\/temp\/out.las"
-      ]
-    }
-  }
-}
diff --git a/test/temp/mylog_three.txt b/test/temp/mylog_three.txt
deleted file mode 100644
index 586a766..0000000
--- a/test/temp/mylog_three.txt
+++ /dev/null
@@ -1 +0,0 @@
-Testing log output through python script.
diff --git a/test/temp/out.las b/test/temp/out.las
deleted file mode 100644
index 59b72f9..0000000
Binary files a/test/temp/out.las and /dev/null differ
diff --git a/test/temp/out.ply b/test/temp/out.ply
deleted file mode 100644
index a92e8f0..0000000
Binary files a/test/temp/out.ply and /dev/null differ
diff --git a/test/temp/out2.las b/test/temp/out2.las
deleted file mode 100644
index 6706356..0000000
Binary files a/test/temp/out2.las and /dev/null differ
diff --git a/test/temp/outfile.txt b/test/temp/outfile.txt
deleted file mode 100644
index bbc361a..0000000
--- a/test/temp/outfile.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-"GpsTime","Y","X","Z","XVelocity","YVelocity","ZVelocity","Roll","Pitch","Azimuth","WanderAngle","XBodyAccel","YBodyAccel","ZBodyAccel","XBodyAngRate","YBodyAngRate","ZBodyAngRate"
-151631.003,0.568,-2.042,107.715,-2.332,-0.334,-0.031,-0.028,-0.024,3.047,-0.022,0.786,0.785,-0.298,0.000,0.009,0.072
-151631.008,0.568,-2.042,107.715,-2.336,-0.332,-0.030,-0.028,-0.024,3.047,-0.022,0.840,0.325,-0.156,0.001,0.007,0.072
diff --git a/test/temp/simple.las b/test/temp/simple.las
deleted file mode 100644
index 61b1caa..0000000
Binary files a/test/temp/simple.las and /dev/null differ
diff --git a/test/temp/spat.sqlite b/test/temp/spat.sqlite
deleted file mode 100644
index e056423..0000000
Binary files a/test/temp/spat.sqlite and /dev/null differ
diff --git a/test/temp/spver.sqlite b/test/temp/spver.sqlite
deleted file mode 100644
index e69de29..0000000
diff --git a/test/temp/temp-SqliteWriterTest_test_simple_las.sqlite b/test/temp/temp-SqliteWriterTest_test_simple_las.sqlite
deleted file mode 100644
index 2c1ecfa..0000000
Binary files a/test/temp/temp-SqliteWriterTest_test_simple_las.sqlite and /dev/null differ
diff --git a/test/temp/temp_nitf.ntf b/test/temp/temp_nitf.ntf
deleted file mode 100644
index 1ce08d6..0000000
Binary files a/test/temp/temp_nitf.ntf and /dev/null differ
diff --git a/test/temp/test.bpf b/test/temp/test.bpf
deleted file mode 100644
index 6766416..0000000
Binary files a/test/temp/test.bpf and /dev/null differ
diff --git a/test/temp/test_1.bpf b/test/temp/test_1.bpf
deleted file mode 100644
index 9d387f4..0000000
Binary files a/test/temp/test_1.bpf and /dev/null differ
diff --git a/test/temp/test_1.las b/test/temp/test_1.las
deleted file mode 100644
index 7611b81..0000000
Binary files a/test/temp/test_1.las and /dev/null differ
diff --git a/test/temp/test_1.ntf b/test/temp/test_1.ntf
deleted file mode 100644
index 09446f1..0000000
Binary files a/test/temp/test_1.ntf and /dev/null differ
diff --git a/test/temp/test_2.bpf b/test/temp/test_2.bpf
deleted file mode 100644
index 8f0792c..0000000
Binary files a/test/temp/test_2.bpf and /dev/null differ
diff --git a/test/temp/test_2.las b/test/temp/test_2.las
deleted file mode 100644
index 7adb4c7..0000000
Binary files a/test/temp/test_2.las and /dev/null differ
diff --git a/test/temp/test_2.ntf b/test/temp/test_2.ntf
deleted file mode 100644
index d3690c5..0000000
Binary files a/test/temp/test_2.ntf and /dev/null differ
diff --git a/test/temp/test_3.bpf b/test/temp/test_3.bpf
deleted file mode 100644
index 3d82866..0000000
Binary files a/test/temp/test_3.bpf and /dev/null differ
diff --git a/test/temp/test_3.las b/test/temp/test_3.las
deleted file mode 100644
index 28005fe..0000000
Binary files a/test/temp/test_3.las and /dev/null differ
diff --git a/test/temp/test_3.ntf b/test/temp/test_3.ntf
deleted file mode 100644
index fe68423..0000000
Binary files a/test/temp/test_3.ntf and /dev/null differ
diff --git a/test/temp/test_flex.bpf b/test/temp/test_flex.bpf
deleted file mode 100644
index 18c90af..0000000
Binary files a/test/temp/test_flex.bpf and /dev/null differ
diff --git a/test/temp/test_flex.las b/test/temp/test_flex.las
deleted file mode 100644
index ec61a76..0000000
Binary files a/test/temp/test_flex.las and /dev/null differ
diff --git a/test/temp/test_flex.ntf b/test/temp/test_flex.ntf
deleted file mode 100644
index f8ba32d..0000000
Binary files a/test/temp/test_flex.ntf and /dev/null differ
diff --git a/test/temp/tmp.bpf b/test/temp/tmp.bpf
deleted file mode 100644
index 45fc0fb..0000000
Binary files a/test/temp/tmp.bpf and /dev/null differ
diff --git a/test/temp/tmp.las b/test/temp/tmp.las
deleted file mode 100644
index 2e09205..0000000
Binary files a/test/temp/tmp.las and /dev/null differ
diff --git a/test/temp/tmp.tif b/test/temp/tmp.tif
deleted file mode 100644
index 24134d3..0000000
Binary files a/test/temp/tmp.tif and /dev/null differ
diff --git a/test/temp/trimtest.las b/test/temp/trimtest.las
deleted file mode 100644
index 6aae3c9..0000000
Binary files a/test/temp/trimtest.las and /dev/null differ
diff --git a/test/temp/triple.las b/test/temp/triple.las
deleted file mode 100644
index 0c6d5ae..0000000
Binary files a/test/temp/triple.las and /dev/null differ
diff --git a/test/temp/utm17.txt b/test/temp/utm17.txt
deleted file mode 100644
index a03955f..0000000
--- a/test/temp/utm17.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-X  Y  Z
-289814.15  4320978.61  170.76
-289814.64  4320978.84  170.76
-289815.12  4320979.06  170.75
-289815.60  4320979.28  170.74
-289816.08  4320979.50  170.68
-289816.56  4320979.71  170.66
-289817.03  4320979.92  170.63
-289817.53  4320980.16  170.62
-289818.01  4320980.38  170.61
-289818.50  4320980.59  170.58
diff --git a/test/unit/CMakeLists.txt b/test/unit/CMakeLists.txt
index c3b0ed9..5c8989c 100644
--- a/test/unit/CMakeLists.txt
+++ b/test/unit/CMakeLists.txt
@@ -39,7 +39,9 @@ PDAL_ADD_TEST(pdal_program_arg_test FILES ProgramArgsTest.cpp)
         ${PDAL_JSONCPP_INCLUDE_DIR})
 
 PDAL_ADD_TEST(pdal_polygon_test FILES PolygonTest.cpp)
+PDAL_ADD_TEST(pdal_segmentation_test FILES SegmentationTest.cpp)
 PDAL_ADD_TEST(pdal_spatial_reference_test FILES SpatialReferenceTest.cpp)
+target_include_directories(pdal_spatial_reference_test PRIVATE ${PDAL_JSONCPP_INCLUDE_DIR})
 PDAL_ADD_TEST(pdal_stage_factory_test FILES StageFactoryTest.cpp)
 PDAL_ADD_TEST(pdal_streaming_test FILES StreamingTest.cpp)
 PDAL_ADD_TEST(pdal_support_test FILES SupportTest.cpp)
@@ -58,7 +60,9 @@ PDAL_ADD_TEST(pdal_io_faux_test FILES io/FauxReaderTest.cpp)
 PDAL_ADD_TEST(pdal_io_gdal_reader_test FILES io/GDALReaderTest.cpp)
 PDAL_ADD_TEST(pdal_io_gdal_writer_test FILES io/GDALWriterTest.cpp)
 PDAL_ADD_TEST(pdal_io_las_reader_test FILES io/LasReaderTest.cpp)
+target_include_directories(pdal_io_las_reader_test PRIVATE ${PDAL_JSONCPP_INCLUDE_DIR})
 PDAL_ADD_TEST(pdal_io_las_writer_test FILES io/LasWriterTest.cpp)
+target_include_directories(pdal_io_las_writer_test PRIVATE ${PDAL_JSONCPP_INCLUDE_DIR})
 PDAL_ADD_TEST(pdal_io_optech_test FILES io/OptechReaderTest.cpp)
 PDAL_ADD_TEST(pdal_io_ply_reader_test FILES io/PlyReaderTest.cpp)
 target_include_directories(pdal_io_ply_reader_test PRIVATE ${PDAL_VENDOR_DIR})
@@ -70,13 +74,15 @@ PDAL_ADD_TEST(pdal_io_sbet_reader_test FILES io/SbetReaderTest.cpp)
 PDAL_ADD_TEST(pdal_io_sbet_writer_test FILES io/SbetWriterTest.cpp)
 PDAL_ADD_TEST(pdal_io_terrasolid_test FILES io/TerrasolidReaderTest.cpp)
 PDAL_ADD_TEST(pdal_io_text_reader_test FILES io/TextReaderTest.cpp)
+target_include_directories(pdal_io_text_reader_test PRIVATE ${PDAL_JSONCPP_INCLUDE_DIR})
 PDAL_ADD_TEST(pdal_io_text_writer_test FILES io/TextWriterTest.cpp)
 
 #
 # sources for the native filters
 #
-PDAL_ADD_TEST(pdal_filters_attribute_test FILES filters/AttributeFilterTest.cpp)
+PDAL_ADD_TEST(pdal_filters_assign_test FILES filters/AssignFilterTest.cpp)
 PDAL_ADD_TEST(pdal_filters_chipper_test FILES filters/ChipperTest.cpp)
+target_include_directories(pdal_filters_chipper_test PRIVATE ${PDAL_JSONCPP_INCLUDE_DIR})
 PDAL_ADD_TEST(pdal_filters_colorization_test FILES
     filters/ColorizationFilterTest.cpp)
 PDAL_ADD_TEST(pdal_filters_computerange_test FILES filters/ComputeRangeFilterTest.cpp)
@@ -85,14 +91,19 @@ PDAL_ADD_TEST(pdal_filters_decimation_test FILES
     filters/DecimationFilterTest.cpp)
 PDAL_ADD_TEST(pdal_filters_divider_test FILES filters/DividerFilterTest.cpp)
 PDAL_ADD_TEST(pdal_filters_ferry_test FILES filters/FerryFilterTest.cpp)
+PDAL_ADD_TEST(pdal_filters_groupby_test FILES filters/GroupByFilterTest.cpp)
+PDAL_ADD_TEST(pdal_filters_locate_test FILES filters/LocateFilterTest.cpp)
 PDAL_ADD_TEST(pdal_filters_merge_test FILES filters/MergeTest.cpp)
 PDAL_ADD_TEST(pdal_filters_additional_merge_test FILES
     filters/AdditionalMergeTest.cpp)
+target_include_directories(pdal_filters_additional_merge_test PRIVATE ${PDAL_JSONCPP_INCLUDE_DIR})
+PDAL_ADD_TEST(pdal_filters_overlay_test FILES filters/OverlayFilterTest.cpp)
 PDAL_ADD_TEST(pdal_filters_reprojection_test FILES
     filters/ReprojectionFilterTest.cpp)
 PDAL_ADD_TEST(pdal_filters_range_test FILES filters/RangeFilterTest.cpp)
 PDAL_ADD_TEST(pdal_filters_randomize_test FILES filters/RandomizeFilterTest.cpp)
 PDAL_ADD_TEST(pdal_filters_sort_test FILES filters/SortFilterTest.cpp)
+target_include_directories(pdal_filters_sort_test PRIVATE ${PDAL_JSONCPP_INCLUDE_DIR})
 PDAL_ADD_TEST(pdal_filters_splitter_test FILES filters/SplitterTest.cpp)
 PDAL_ADD_TEST(pdal_filters_stats_test FILES filters/StatsFilterTest.cpp)
 PDAL_ADD_TEST(pdal_filters_transformation_test FILES
@@ -105,7 +116,6 @@ endif()
 PDAL_ADD_TEST(pc2pc_test FILES apps/pc2pcTest.cpp)
 
 if (BUILD_PIPELINE_TESTS)
-    PDAL_ADD_TEST(pcpipeline_test FILES apps/pcpipelineTest.cpp)
     PDAL_ADD_TEST(pcpipeline_test_json FILES apps/pcpipelineTestJSON.cpp)
 endif()
 PDAL_ADD_TEST(hausdorff_test FILES apps/HausdorffTest.cpp)
diff --git a/test/unit/EigenTest.cpp b/test/unit/EigenTest.cpp
index cedc174..ec8c969 100644
--- a/test/unit/EigenTest.cpp
+++ b/test/unit/EigenTest.cpp
@@ -1,36 +1,36 @@
 /******************************************************************************
-* Copyright (c) 2016, Peter J. Gadomski <pete.gadomski at gmail.com>
-*
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following
-* conditions are met:
-*
-*     * Redistributions of source code must retain the above copyright
-*       notice, this list of conditions and the following disclaimer.
-*     * Redistributions in binary form must reproduce the above copyright
-*       notice, this list of conditions and the following disclaimer in
-*       the documentation and/or other materials provided
-*       with the distribution.
-*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
-*       names of its contributors may be used to endorse or promote
-*       products derived from this software without specific prior
-*       written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
-* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
-* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
-* OF SUCH DAMAGE.
-****************************************************************************/
+ * Copyright (c) 2016, Peter J. Gadomski <pete.gadomski at gmail.com>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
+ *       names of its contributors may be used to endorse or promote
+ *       products derived from this software without specific prior
+ *       written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ ****************************************************************************/
 
 #include <pdal/pdal_test_main.hpp>
 
@@ -66,9 +66,8 @@ TEST(EigenTest, ComputeValues)
     using namespace Eigen;
 
     Matrix3d A;
-    A << 1.8339, 0.3188, 0.3426,
-    -2.2588, -1.3077, 3.5784,
-    0.8622, -0.4336, 2.7694;
+    A << 1.8339, 0.3188, 0.3426, -2.2588, -1.3077, 3.5784, 0.8622, -0.4336,
+        2.7694;
 
     double spacing(1.4);
 
@@ -115,7 +114,7 @@ TEST(EigenTest, ComputeValues)
     EXPECT_NEAR(0.8236099869, slope, 0.0001);
 
     double sd8 = eigen::computeSlopeD8(A, spacing);
-    EXPECT_NEAR(48.0378, sd8, 0.0001);
+    EXPECT_NEAR(67.9357, sd8, 0.0001);
 
     double sfd = eigen::computeSlopeFD(A, spacing);
     EXPECT_NEAR(210.1961, sfd, 0.0001);
@@ -127,24 +126,22 @@ TEST(EigenTest, ComputeValues)
     EXPECT_NEAR(269.8718, afd, 0.0001);
 
     double hs = eigen::computeHillshade(A, spacing, 45.0, 315.0);
-    
+
     MatrixXd out = eigen::gradX(A);
-    
+
     Matrix3d gx;
-    gx << -1.5151, -0.7457, 0.0238,
-    0.9511, 2.9186, 4.8861,
-    -1.2958, 0.9536, 3.2030;
-    
+    gx << -1.5151, -0.7457, 0.0238, 0.9511, 2.9186, 4.8861, -1.2958, 0.9536,
+        3.2030;
+
     for (size_t i = 0; i < 9; ++i)
         EXPECT_NEAR(gx(i), out(i), 0.0001);
-    
+
     MatrixXd out2 = eigen::gradY(A);
-    
+
     Matrix3d gy;
-    gy << -4.0927, -1.6265, 3.2358,
-    -0.4859, -0.3762, 1.2134,
-    3.1210, 0.8741, -0.8090;
-    
+    gy << -4.0927, -1.6265, 3.2358, -0.4859, -0.3762, 1.2134, 3.1210, 0.8741,
+        -0.8090;
+
     for (size_t i = 0; i < 9; ++i)
         EXPECT_NEAR(gy(i), out2(i), 0.0001);
 
@@ -200,10 +197,9 @@ TEST(EigenTest, Padding)
 {
     using namespace Eigen;
 
-    Matrix3d A;
-    A << 1.8339, 0.3188, 0.3426,
-    -2.2588, -1.3077, 3.5784,
-    0.8622, -0.4336, 2.7694;
+    MatrixXd A(3, 3);
+    A << 1.8339, 0.3188, 0.3426, -2.2588, -1.3077, 3.5784, 0.8622, -0.4336,
+        2.7694;
 
     MatrixXd B = eigen::padMatrix(A, 1);
 
@@ -215,3 +211,42 @@ TEST(EigenTest, Padding)
     EXPECT_EQ(3.5784, B(2, 4));
     EXPECT_EQ(-0.4336, B(4, 2));
 }
+
+TEST(EigenTest, Morphological)
+{
+    using namespace Eigen;
+
+    MatrixXd C(5, 5);
+    C << 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0,
+        0;
+    std::vector<double> Cv(C.data(), C.data() + C.size());
+
+    MatrixXd D = eigen::dilate(C, 1);
+    std::vector<double> Dv = eigen::dilateDiamond(Cv, 5, 5, 1);
+    std::vector<double> Dv2 = eigen::dilateDiamond(Cv, 5, 5, 2);
+
+    EXPECT_EQ(0, D(0, 0));
+    EXPECT_EQ(1, D(1, 0));
+    EXPECT_EQ(1, D(0, 1));
+    EXPECT_EQ(0, Dv[0]);
+    EXPECT_EQ(1, Dv[1]);
+    EXPECT_EQ(1, Dv[5]);
+    EXPECT_EQ(1, Dv2[0]);
+    EXPECT_EQ(1, Dv2[10]);
+    EXPECT_EQ(0, Dv2[15]);
+
+    MatrixXd E(5, 5);
+    E << 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0,
+        0;
+    std::vector<double> Ev(E.data(), E.data() + E.size());
+
+    MatrixXd F = eigen::erode(E, 1);
+    std::vector<double> Fv = eigen::erodeDiamond(Ev, 5, 5, 1);
+    std::vector<double> Fv2 = eigen::erodeDiamond(Ev, 5, 5, 2);
+
+    EXPECT_EQ(0, F(1, 3));
+    EXPECT_EQ(1, F(2, 2));
+    EXPECT_EQ(0, Fv[16]);
+    EXPECT_EQ(1, Fv[12]);
+    EXPECT_EQ(0, Fv2[12]);
+}
diff --git a/test/unit/KernelTest.cpp b/test/unit/KernelTest.cpp
index bd2f420..22a3dbe 100644
--- a/test/unit/KernelTest.cpp
+++ b/test/unit/KernelTest.cpp
@@ -41,6 +41,20 @@
 namespace pdal
 {
 
+/**
+class TestKernel : public Kernel
+{
+public:
+    virtual std::string getName()
+        { return "TestKernel"; }
+    int execute()
+        { return 0; }
+    bool test_parseStageOption(std::string o, std::string& stage,
+        std::string& option, std::string& value)
+    { return Kernel::parseStageOption(o, stage, option, value); }
+}
+**/
+
 TEST(KernelTest, parseOption)
 {
     std::string stage;
@@ -48,22 +62,22 @@ TEST(KernelTest, parseOption)
     std::string value;
     bool ok;
 
-    ok = Kernel::test_parseOption("--readers.p2g.foobar=baz",
+    ok = Kernel::test_parseStageOption("--readers.p2g.foobar=baz",
         stage, option, value);
     EXPECT_TRUE(ok);
     EXPECT_EQ(stage, "readers.p2g");
     EXPECT_EQ(option, "foobar");
     EXPECT_EQ(value, "baz");
 
-    ok = Kernel::test_parseOption("--readers.2pg.foobar=baz",
+    ok = Kernel::test_parseStageOption("--readers.2pg.foobar=baz",
         stage, option, value);
     EXPECT_FALSE(ok);
 
-    ok = Kernel::test_parseOption("--read1ers.las.foobar=baz",
+    ok = Kernel::test_parseStageOption("--read1ers.las.foobar=baz",
         stage, option, value);
     EXPECT_FALSE(ok);
 
-    ok = Kernel::test_parseOption("--readers.p2g.foobar",
+    ok = Kernel::test_parseStageOption("--readers.p2g.foobar",
         stage, option, value);
     EXPECT_TRUE(ok);
     EXPECT_EQ(value, "");
diff --git a/test/unit/LogTest.cpp b/test/unit/LogTest.cpp
index aa78e92..fd38bf1 100644
--- a/test/unit/LogTest.cpp
+++ b/test/unit/LogTest.cpp
@@ -33,12 +33,57 @@
 ****************************************************************************/
 
 #include <pdal/pdal_test_main.hpp>
-#include <pdal/Options.hpp>
-#include <pdal/PointView.hpp>
-#include <pdal/StageFactory.hpp>
-#include <io/FauxReader.hpp>
+#include <pdal/Log.hpp>
+#include <pdal/util/FileUtils.hpp>
 #include "Support.hpp"
 
-using namespace pdal;
+namespace pdal
+{
 
-//ABELL - Need some tests here, but what we had was crap.
+// Make sure that we properly throw away log stuff that isn't of the right
+// level and that we generally log correctly.
+TEST(Log, t1)
+{
+    std::string filename(Support::temppath("t1"));
+    FileUtils::deleteFile(filename);
+
+    // Scope makes sure file gets closed.
+    {
+        Log l("", filename);
+
+        l.setLevel(LogLevel::Debug);
+
+        l.get(LogLevel::Debug) << "debug\n";
+        l.get(LogLevel::Debug5) << "debug5\n";
+        l.get(LogLevel::Info) << "info\n";
+    }
+
+    EXPECT_TRUE(Support::compare_text_files(filename,
+        Support::datapath("logs/t1")));
+}
+
+// Make sure that devnull thing works.
+TEST(Log, t2)
+{
+    std::string in(Support::datapath("las/utm15.las"));
+    std::string out(Support::temppath("out.las"));
+
+    FileUtils::deleteFile(out);
+    std::string cmd = Support::binpath(Support::exename("pdal")) +
+        " translate --log devnull -v Debug " + in + " " + out;
+
+    std::string output;
+    int stat = Utils::run_shell_command(cmd, output);
+    EXPECT_EQ(stat, 0);
+    EXPECT_EQ(output.size(), 0u);
+
+    cmd = Support::binpath(Support::exename("pdal")) +
+        " translate -v Debug " + in + " " + out + " 2>&1";
+    stat = Utils::run_shell_command(cmd, output);
+    EXPECT_EQ(stat, 0);
+    EXPECT_NE(output.size(), 0u);
+
+    FileUtils::deleteFile(out);
+}
+
+}
diff --git a/test/unit/OldPCLBlockTest.cpp b/test/unit/OldPCLBlockTest.cpp
index d358be2..4a284f2 100644
--- a/test/unit/OldPCLBlockTest.cpp
+++ b/test/unit/OldPCLBlockTest.cpp
@@ -1,41 +1,41 @@
 /******************************************************************************
-* Copyright (c) 2016, Bradley J Chambers (brad.chambers at gmail.com)
-*
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following
-* conditions are met:
-*
-*     * Redistributions of source code must retain the above copyright
-*       notice, this list of conditions and the following disclaimer.
-*     * Redistributions in binary form must reproduce the above copyright
-*       notice, this list of conditions and the following disclaimer in
-*       the documentation and/or other materials provided
-*       with the distribution.
-*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
-*       names of its contributors may be used to endorse or promote
-*       products derived from this software without specific prior
-*       written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
-* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
-* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
-* OF SUCH DAMAGE.
-****************************************************************************/
+ * Copyright (c) 2016-2017, Bradley J Chambers (brad.chambers at gmail.com)
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
+ *       names of its contributors may be used to endorse or promote
+ *       products derived from this software without specific prior
+ *       written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ ****************************************************************************/
 
 #include <string>
 
-#include <pdal/pdal_test_main.hpp>
 #include <pdal/StageFactory.hpp>
+#include <pdal/pdal_test_main.hpp>
 
 #include "Support.hpp"
 
@@ -44,29 +44,36 @@ using namespace pdal;
 TEST(OldPCLBlockTests, StatisticalOutliers1)
 {
     StageFactory f;
-    
+
     Options ro;
     ro.add("filename", Support::datapath("autzen/autzen-point-format-3.las"));
-    
+
     Stage* r(f.createStage("readers.las"));
     EXPECT_TRUE(r);
     r->setOptions(ro);
-    
+
     Options fo;
     fo.add("method", "statistical");
     fo.add("multiplier", 1.5);
     fo.add("mean_k", 2);
-    fo.add("extract", true);
-    
+
     Stage* outlier(f.createStage("filters.outlier"));
     EXPECT_TRUE(outlier);
     outlier->setOptions(fo);
     outlier->setInput(*r);
-    
+
+    Options rangeOpts;
+    rangeOpts.add("limits", "Classification![7:7]");
+
+    Stage* range(f.createStage("filters.range"));
+    EXPECT_TRUE(range);
+    range->setOptions(rangeOpts);
+    range->setInput(*outlier);
+
     PointTable table;
-    outlier->prepare(table);
-    PointViewSet viewSet = outlier->execute(table);
-    
+    range->prepare(table);
+    PointViewSet viewSet = range->execute(table);
+
     EXPECT_EQ(1u, viewSet.size());
     PointViewPtr view = *viewSet.begin();
     EXPECT_EQ(96u, view->size());
@@ -75,29 +82,36 @@ TEST(OldPCLBlockTests, StatisticalOutliers1)
 TEST(OldPCLBlockTests, StatisticalOutliers2)
 {
     StageFactory f;
-    
+
     Options ro;
     ro.add("filename", Support::datapath("autzen/autzen-point-format-3.las"));
-    
+
     Stage* r(f.createStage("readers.las"));
     EXPECT_TRUE(r);
     r->setOptions(ro);
-    
+
     Options fo;
     fo.add("method", "statistical");
     fo.add("multiplier", 0.0);
     fo.add("mean_k", 5);
-    fo.add("extract", true);
-    
+
     Stage* outlier(f.createStage("filters.outlier"));
     EXPECT_TRUE(outlier);
     outlier->setOptions(fo);
     outlier->setInput(*r);
-    
+
+    Options rangeOpts;
+    rangeOpts.add("limits", "Classification![7:7]");
+
+    Stage* range(f.createStage("filters.range"));
+    EXPECT_TRUE(range);
+    range->setOptions(rangeOpts);
+    range->setInput(*outlier);
+
     PointTable table;
-    outlier->prepare(table);
-    PointViewSet viewSet = outlier->execute(table);
-    
+    range->prepare(table);
+    PointViewSet viewSet = range->execute(table);
+
     EXPECT_EQ(1u, viewSet.size());
     PointViewPtr view = *viewSet.begin();
     EXPECT_EQ(63u, view->size());
@@ -106,29 +120,36 @@ TEST(OldPCLBlockTests, StatisticalOutliers2)
 TEST(OldPCLBlockTests, RadiusOutliers1)
 {
     StageFactory f;
-    
+
     Options ro;
     ro.add("filename", Support::datapath("autzen/autzen-point-format-3.las"));
-    
+
     Stage* r(f.createStage("readers.las"));
     EXPECT_TRUE(r);
     r->setOptions(ro);
-    
+
     Options fo;
     fo.add("method", "radius");
     fo.add("radius", 200.0);
     fo.add("min_k", 1);
-    fo.add("extract", true);
-    
+
     Stage* outlier(f.createStage("filters.outlier"));
     EXPECT_TRUE(outlier);
     outlier->setOptions(fo);
     outlier->setInput(*r);
-    
+
+    Options rangeOpts;
+    rangeOpts.add("limits", "Classification![7:7]");
+
+    Stage* range(f.createStage("filters.range"));
+    EXPECT_TRUE(range);
+    range->setOptions(rangeOpts);
+    range->setInput(*outlier);
+
     PointTable table;
-    outlier->prepare(table);
-    PointViewSet viewSet = outlier->execute(table);
-    
+    range->prepare(table);
+    PointViewSet viewSet = range->execute(table);
+
     EXPECT_EQ(1u, viewSet.size());
     PointViewPtr view = *viewSet.begin();
     EXPECT_EQ(60u, view->size());
@@ -137,29 +158,36 @@ TEST(OldPCLBlockTests, RadiusOutliers1)
 TEST(OldPCLBlockTests, RadiusOutliers2)
 {
     StageFactory f;
-    
+
     Options ro;
     ro.add("filename", Support::datapath("autzen/autzen-point-format-3.las"));
-    
+
     Stage* r(f.createStage("readers.las"));
     EXPECT_TRUE(r);
     r->setOptions(ro);
-    
+
     Options fo;
     fo.add("method", "radius");
     fo.add("radius", 100.0);
     fo.add("min_k", 2);
-    fo.add("extract", true);
-    
+
     Stage* outlier(f.createStage("filters.outlier"));
     EXPECT_TRUE(outlier);
     outlier->setOptions(fo);
     outlier->setInput(*r);
-    
+
+    Options rangeOpts;
+    rangeOpts.add("limits", "Classification![7:7]");
+
+    Stage* range(f.createStage("filters.range"));
+    EXPECT_TRUE(range);
+    range->setOptions(rangeOpts);
+    range->setInput(*outlier);
+
     PointTable table;
-    outlier->prepare(table);
-    PointViewSet viewSet = outlier->execute(table);
-    
+    range->prepare(table);
+    PointViewSet viewSet = range->execute(table);
+
     EXPECT_EQ(1u, viewSet.size());
     PointViewPtr view = *viewSet.begin();
     EXPECT_EQ(3u, view->size());
@@ -168,27 +196,43 @@ TEST(OldPCLBlockTests, RadiusOutliers2)
 TEST(OldPCLBlockTests, PMF1)
 {
     StageFactory f;
-    
+
     Options ro;
     ro.add("filename", Support::datapath("autzen/autzen-point-format-3.las"));
-    
+
     Stage* r(f.createStage("readers.las"));
     EXPECT_TRUE(r);
     r->setOptions(ro);
-    
+
+    Options ao;
+    ao.add("assignment", "Classification[:]=0");
+
+    Stage* assign(f.createStage("filters.assign"));
+    EXPECT_TRUE(assign);
+    assign->setOptions(ao);
+    assign->setInput(*r);
+
     Options fo;
     fo.add("max_window_size", 200);
-    fo.add("extract", true);
-    
+    fo.add("last", false);
+
     Stage* outlier(f.createStage("filters.pmf"));
     EXPECT_TRUE(outlier);
     outlier->setOptions(fo);
-    outlier->setInput(*r);
-    
+    outlier->setInput(*assign);
+
+    Options rangeOpts;
+    rangeOpts.add("limits", "Classification[2:2]");
+
+    Stage* range(f.createStage("filters.range"));
+    EXPECT_TRUE(range);
+    range->setOptions(rangeOpts);
+    range->setInput(*outlier);
+
     PointTable table;
-    outlier->prepare(table);
-    PointViewSet viewSet = outlier->execute(table);
-    
+    range->prepare(table);
+    PointViewSet viewSet = range->execute(table);
+
     EXPECT_EQ(1u, viewSet.size());
     PointViewPtr view = *viewSet.begin();
     EXPECT_EQ(93u, view->size());
@@ -197,31 +241,47 @@ TEST(OldPCLBlockTests, PMF1)
 TEST(OldPCLBlockTests, PMF2)
 {
     StageFactory f;
-    
+
     Options ro;
     ro.add("filename", Support::datapath("autzen/autzen-point-format-3.las"));
-    
+
     Stage* r(f.createStage("readers.las"));
     EXPECT_TRUE(r);
     r->setOptions(ro);
-    
+
+    Options ao;
+    ao.add("assignment", "Classification[:]=0");
+
+    Stage* assign(f.createStage("filters.assign"));
+    EXPECT_TRUE(assign);
+    assign->setOptions(ao);
+    assign->setInput(*r);
+
     Options fo;
     fo.add("max_window_size", 200);
     fo.add("cell_size", 1.0);
     fo.add("slope", 1.0);
     fo.add("initial_distance", 0.05);
     fo.add("max_distance", 3.0);
-    fo.add("extract", true);
-    
+    fo.add("last", false);
+
     Stage* outlier(f.createStage("filters.pmf"));
     EXPECT_TRUE(outlier);
     outlier->setOptions(fo);
-    outlier->setInput(*r);
-    
+    outlier->setInput(*assign);
+
+    Options rangeOpts;
+    rangeOpts.add("limits", "Classification[2:2]");
+
+    Stage* range(f.createStage("filters.range"));
+    EXPECT_TRUE(range);
+    range->setOptions(rangeOpts);
+    range->setInput(*outlier);
+
     PointTable table;
-    outlier->prepare(table);
-    PointViewSet viewSet = outlier->execute(table);
-    
+    range->prepare(table);
+    PointViewSet viewSet = range->execute(table);
+
     EXPECT_EQ(1u, viewSet.size());
     PointViewPtr view = *viewSet.begin();
     EXPECT_EQ(94u, view->size());
@@ -230,31 +290,47 @@ TEST(OldPCLBlockTests, PMF2)
 TEST(OldPCLBlockTests, PMF3)
 {
     StageFactory f;
-    
+
     Options ro;
     ro.add("filename", Support::datapath("autzen/autzen-point-format-3.las"));
-    
+
     Stage* r(f.createStage("readers.las"));
     EXPECT_TRUE(r);
     r->setOptions(ro);
-    
+
+    Options ao;
+    ao.add("assignment", "Classification[:]=0");
+
+    Stage* assign(f.createStage("filters.assign"));
+    EXPECT_TRUE(assign);
+    assign->setOptions(ao);
+    assign->setInput(*r);
+
     Options fo;
     fo.add("max_window_size", 33);
     fo.add("cell_size", 1.0);
     fo.add("slope", 1.0);
     fo.add("initial_distance", 0.15);
     fo.add("max_distance", 2.5);
-    fo.add("extract", true);
-    
+    fo.add("last", false);
+
     Stage* outlier(f.createStage("filters.pmf"));
     EXPECT_TRUE(outlier);
     outlier->setOptions(fo);
-    outlier->setInput(*r);
-    
+    outlier->setInput(*assign);
+
+    Options rangeOpts;
+    rangeOpts.add("limits", "Classification[2:2]");
+
+    Stage* range(f.createStage("filters.range"));
+    EXPECT_TRUE(range);
+    range->setOptions(rangeOpts);
+    range->setInput(*outlier);
+
     PointTable table;
-    outlier->prepare(table);
-    PointViewSet viewSet = outlier->execute(table);
-    
+    range->prepare(table);
+    PointViewSet viewSet = range->execute(table);
+
     EXPECT_EQ(1u, viewSet.size());
     PointViewPtr view = *viewSet.begin();
     EXPECT_EQ(106u, view->size());
diff --git a/test/unit/PluginManagerTest.cpp b/test/unit/PluginManagerTest.cpp
index ee6b286..8be6212 100644
--- a/test/unit/PluginManagerTest.cpp
+++ b/test/unit/PluginManagerTest.cpp
@@ -49,6 +49,11 @@ struct DummyPlugin : Filter
     static std::string const description;
     static std::string const link;
     // Plugin management
+    static int32_t dummyExitFunc()
+    {
+        return 0;
+    }
+
     static PF_ExitFunc initPlugin() // PF_InitFunc
     {
         PF_RegisterParams rp;
@@ -61,7 +66,9 @@ struct DummyPlugin : Filter
         rp.pluginType = PF_PluginType_Filter;
         if (!PluginManager::registerObject(name, &rp))
             return nullptr;
-        return []()->int32_t { return 0; };
+        return dummyExitFunc;
+//Broken on gcc 4.7.2
+//        return []()->int32_t { return 0; };
     }
     static void* create()
     {
diff --git a/test/unit/PointTableTest.cpp b/test/unit/PointTableTest.cpp
index aa4ebe1..a8073a2 100644
--- a/test/unit/PointTableTest.cpp
+++ b/test/unit/PointTableTest.cpp
@@ -38,7 +38,8 @@
 #include <io/LasReader.hpp>
 #include "Support.hpp"
 
-using namespace pdal;
+namespace pdal
+{
 
 TEST(PointTable, resolveType)
 {
@@ -178,3 +179,28 @@ TEST(PointTable, userView)
     EXPECT_TRUE(called);
 }
 
+TEST(PointTable, srs)
+{
+   SpatialReference srs1("GEOGCS[\"WGS 84\",DATUM[\"WGS_1984\",SPHEROID[\"WGS 84\",6378137,298.257223563,AUTHORITY[\"EPSG\",\"7030\"]],AUTHORITY[\"EPSG\",\"6326\"]],PRIMEM[\"Greenwich\",0,AUTHORITY[\"EPSG\",\"8901\"]],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9122\"]],AUTHORITY[\"EPSG\",\"4326\"]]");
+
+   SpatialReference srs2("PROJCS[\"WGS 84 / UTM zone 17N\",GEOGCS[\"WGS 84\",DATUM[\"WGS_1984\",SPHEROID[\"WGS 84\",6378137,298.257223563,AUTHORITY[\"EPSG\",\"7030\"]],AUTHORITY[\"EPSG\",\"6326\"]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433],AUTHORITY[\"EPSG\",\"4326\"]],PROJECTION[\"Transverse_Mercator\"],PARAMETER[\"latitude_of_origin\",0],PARAMETER[\"central_meridian\",-81],PARAMETER[\"scale_factor\",0.9996],PARAMETER[\"false_easting\",500000],PARAMETER[\"false_northin [...]
+
+    PointTable table;
+
+    table.addSpatialReference(srs1);
+    table.addSpatialReference(srs1);
+    EXPECT_TRUE(table.spatialReferenceUnique());
+    EXPECT_EQ(table.anySpatialReference(), srs1);
+
+    table.addSpatialReference(srs2);
+    EXPECT_FALSE(table.spatialReferenceUnique());
+    EXPECT_EQ(table.anySpatialReference(), srs2);
+    EXPECT_EQ(table.m_spatialRefs.size(), 2u);
+
+    table.addSpatialReference(srs1);
+    EXPECT_FALSE(table.spatialReferenceUnique());
+    EXPECT_EQ(table.anySpatialReference(), srs1);
+    EXPECT_EQ(table.m_spatialRefs.size(), 2u);
+}
+
+} // namespace
diff --git a/test/unit/SegmentationTest.cpp b/test/unit/SegmentationTest.cpp
new file mode 100644
index 0000000..8974c04
--- /dev/null
+++ b/test/unit/SegmentationTest.cpp
@@ -0,0 +1,97 @@
+/******************************************************************************
+* Copyright (c) 2016, Bradley J. Chambers (brad.chambers at gmail.com)
+*
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following
+* conditions are met:
+*
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above copyright
+*       notice, this list of conditions and the following disclaimer in
+*       the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
+*       names of its contributors may be used to endorse or promote
+*       products derived from this software without specific prior
+*       written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+* OF SUCH DAMAGE.
+****************************************************************************/
+
+#include <pdal/Dimension.hpp>
+#include <pdal/pdal_test_main.hpp>
+#include <pdal/pdal_defines.h>
+#include <pdal/PointTable.hpp>
+#include <pdal/PointView.hpp>
+
+#include <pdal/Segmentation.hpp>
+
+#include <vector>
+
+using namespace pdal;
+
+TEST(SegmentationTest, BasicClustering)
+{
+    using namespace Segmentation;
+
+    std::vector<std::vector<PointId>> clusters;
+
+    PointTable table;
+    PointLayoutPtr layout(table.layout());
+
+    layout->registerDim(Dimension::Id::X);
+    layout->registerDim(Dimension::Id::Y);
+    layout->registerDim(Dimension::Id::Z);
+
+    PointViewPtr src(new PointView(table));
+
+    // Single point, single cluster
+    src->setField(Dimension::Id::X, 0, 0.0);
+    src->setField(Dimension::Id::Y, 0, 0.0);
+    src->setField(Dimension::Id::Z, 0, 0.0);
+    clusters = extractClusters(*src, 1, 10, 1.0);
+    EXPECT_EQ(1u, clusters.size());
+    EXPECT_EQ(1u, clusters[0].size());
+
+    // Two separate clusters, both with single point
+    src->setField(Dimension::Id::X, 1, 10.0);
+    src->setField(Dimension::Id::Y, 1, 10.0);
+    src->setField(Dimension::Id::Z, 1, 10.0);
+    clusters = extractClusters(*src, 1, 10, 1.0);
+    EXPECT_EQ(2u, clusters.size());
+    EXPECT_EQ(1u, clusters[0].size());
+    EXPECT_EQ(1u, clusters[1].size());
+
+    // Still two clusters, one with two points
+    src->setField(Dimension::Id::X, 2, 0.5);
+    src->setField(Dimension::Id::Y, 2, 0.5);
+    src->setField(Dimension::Id::Z, 2, 0.5);
+    clusters = extractClusters(*src, 1, 10, 1.0);
+    EXPECT_EQ(2u, clusters.size());
+    EXPECT_EQ(2u, clusters[0].size());
+    EXPECT_EQ(1u, clusters[1].size());
+
+    // Reject the cluster with only one point
+    clusters = extractClusters(*src, 2, 10, 1.0);
+    EXPECT_EQ(1u, clusters.size());
+    EXPECT_EQ(2u, clusters[0].size());
+
+    // Reject the cluster with two points
+    clusters = extractClusters(*src, 1, 1, 1.0);
+    EXPECT_EQ(1u, clusters.size());
+    EXPECT_EQ(1u, clusters[0].size());
+}
diff --git a/test/unit/StageFactoryTest.cpp b/test/unit/StageFactoryTest.cpp
index 03ce05e..2e56366 100644
--- a/test/unit/StageFactoryTest.cpp
+++ b/test/unit/StageFactoryTest.cpp
@@ -90,6 +90,9 @@ TEST(StageFactoryTest, extensionTest)
     EXPECT_EQ(StageFactory::inferWriterDriver("foo.las"), "writers.las");
     EXPECT_EQ(StageFactory::inferWriterDriver("STDOUT"), "writers.text");
     EXPECT_EQ(StageFactory::inferWriterDriver(""), "writers.text");
+    EXPECT_EQ(StageFactory::inferWriterDriver("foo.tif"), "writers.gdal");
+    EXPECT_EQ(StageFactory::inferWriterDriver("foo.tiff"), "writers.gdal");
+    EXPECT_EQ(StageFactory::inferWriterDriver("foo.vrt"), "writers.gdal");
 
     EXPECT_EQ(StageFactory::inferReaderDriver("foo.laz"), "readers.las");
     EXPECT_EQ(StageFactory::inferReaderDriver("foo.las"), "readers.las");
@@ -101,6 +104,8 @@ TEST(StageFactoryTest, extensionTest)
     EXPECT_EQ(StageFactory::extensions("writers.las"), ext);
     ext = { "csv", "json", "txt", "xyz" };
     EXPECT_EQ(StageFactory::extensions("writers.text"), ext);
+    ext = { "tif", "tiff", "vrt" };
+    EXPECT_EQ(StageFactory::extensions("writers.gdal"), ext);
 }
 
 } // namespace pdal
diff --git a/test/unit/apps/AppTest.cpp b/test/unit/apps/AppTest.cpp
index af1be0a..2e0edbd 100644
--- a/test/unit/apps/AppTest.cpp
+++ b/test/unit/apps/AppTest.cpp
@@ -53,15 +53,15 @@ TEST(PdalApp, log)
     std::string output;
 
     Utils::run_shell_command(appName() + " -v Debug 2>&1", output);
-    EXPECT_TRUE(output.find("PDAL Debug: 3") != std::string::npos);
+    EXPECT_TRUE(output.find("PDAL Debug") != std::string::npos);
 
     output.clear();
     Utils::run_shell_command(appName() + " --verbose=3 2>&1", output);
-    EXPECT_TRUE(output.find("PDAL Debug: 3") != std::string::npos);
+    EXPECT_TRUE(output.find("PDAL Debug") != std::string::npos);
 
     output.clear();
     Utils::run_shell_command(appName() + " 2>&1", output);
-    EXPECT_TRUE(output.find("PDAL Debug: 3") == std::string::npos);
+    EXPECT_TRUE(output.find("PDAL Debug") == std::string::npos);
 }
 
 } // unnamed namespace
diff --git a/test/unit/apps/RandomTest.cpp b/test/unit/apps/RandomTest.cpp
index f26a852..f9327c0 100644
--- a/test/unit/apps/RandomTest.cpp
+++ b/test/unit/apps/RandomTest.cpp
@@ -44,20 +44,12 @@
 
 using namespace pdal;
 
-namespace
-{
-std::string appName()
-{
-    return Support::binpath("pdal random");
-}
-}
-
 TEST(Random, extra_ops)
 {
     std::string outfile(Support::temppath("out.las"));
 
-    const std::string cmd = appName() +
-        " --count=100 --writers.las.minor_version=3 " + outfile;
+    const std::string cmd = Support::binpath("pdal") + " random "
+        "--count=100 --writers.las.minor_version=3 " + outfile;
 
     FileUtils::deleteFile(outfile);
     std::string output;
diff --git a/test/unit/apps/TranslateTest.cpp b/test/unit/apps/TranslateTest.cpp
index ddf925b..11b02cc 100644
--- a/test/unit/apps/TranslateTest.cpp
+++ b/test/unit/apps/TranslateTest.cpp
@@ -71,6 +71,7 @@ TEST(translateTest, t1)
         " --json filters.stats", output), 0);
 }
 
+// Tests for processing JSON input.
 TEST(translateTest, t2)
 {
     std::string output;
@@ -78,29 +79,114 @@ TEST(translateTest, t2)
     std::string in = Support::datapath("las/autzen_trim.las");
     std::string out = Support::temppath("out.las");
 
-    const char *json = " \
-        [ \
+    std::string json = " \
+        { \
+        \\\"pipeline\\\" : [ \
         { \\\"type\\\":\\\"filters.stats\\\" }, \
         { \\\"type\\\":\\\"filters.range\\\", \
           \\\"limits\\\":\\\"Z[0:100]\\\" } \
-        ]";
-
-    EXPECT_EQ(runTranslate(in + " " + out +
-        " --json=\"" + json + "\"", output), 0);
-    EXPECT_EQ(runTranslate(in + " " + out + " -r readers.las "
-        " --json=\"" + json + "\"", output), 0);
-    EXPECT_EQ(runTranslate(in + " " + out + " -w writers.las "
-        " --json=\"" + json + "\"", output), 0);
-    EXPECT_EQ(runTranslate(in + " " + out + " -r readers.las -w writers.las "
-        " --json=\"" + json + "\"", output), 0);
-
-    const char *json2 = " \
-        { \\\"type\\\":\\\"filters.stats\\\" }, \
-        { \\\"type\\\":\\\"filters.range\\\", \
-          \\\"limits\\\":\\\"Z[0:100]\\\" }";
-
-    EXPECT_NE(runTranslate(in + " " + out +
-        " --json=\"" + json2 + "\"", output), 0);
+        ] \
+        }";
+
+    // Check that we work with just a bunch of filters.
+    EXPECT_EQ(runTranslate(in + " " + out + " --json=\"" + json + "\"",
+        output), 0);
+
+    // Check that we fail with no bad input file.
+    EXPECT_NE(runTranslate("foo.las " + out + " --json=\"" + json + "\"",
+        output), 0);
+
+    // Check that we file with bad output file.
+    EXPECT_NE(runTranslate(in + " foo.blam " +  " --json=\"" + json + "\"",
+        output), 0);
+
+    // Check that we work with no stages.
+    json = " \
+        { \
+        \\\"pipeline\\\" : [ \
+        ] \
+        }";
+    EXPECT_EQ(runTranslate(in + " " + out + " --json=\"" + json + "\"",
+        output), 0);
+
+    // Check that we work with only an input (not specified as such).
+    json = " \
+        { \
+        \\\"pipeline\\\" : [ \
+          \\\"badinput.las\\\" \
+        ] \
+        }";
+    EXPECT_EQ(runTranslate(in + " " + out + " --json=\"" + json + "\"",
+        output), 0);
+
+    // Check that we work with an input and an output.
+    json = " \
+        { \
+        \\\"pipeline\\\" : [ \
+          \\\"badinput.las\\\", \
+          \\\"badoutput.las\\\" \
+        ] \
+        }";
+    EXPECT_EQ(runTranslate(in + " " + out + " --json=\"" + json + "\"",
+        output), 0);
+
+    // Check that we work with only an output.
+    json = " \
+        { \
+        \\\"pipeline\\\" : [ \
+          { \
+          \\\"type\\\":\\\"writers.las\\\", \
+          \\\"filename\\\":\\\"badoutput.las\\\" \
+          } \
+        ] \
+        }";
+    EXPECT_EQ(runTranslate(in + " " + out + " --json=\"" + json + "\"",
+        output), 0);
+
+    // Check that we work with only an input.
+    json = " \
+        { \
+        \\\"pipeline\\\" : [ \
+          { \
+          \\\"type\\\":\\\"readers.las\\\", \
+          \\\"filename\\\":\\\"badinput.las\\\" \
+          } \
+        ] \
+        }";
+    EXPECT_EQ(runTranslate(in + " " + out + " --json=\"" + json + "\"",
+        output), 0);
+
+    // Check that we fail with unchanined multiple writers.
+    json = " \
+        { \
+        \\\"pipeline\\\" : [ \
+          { \
+          \\\"type\\\":\\\"writers.las\\\", \
+          \\\"filename\\\":\\\"badoutput.las\\\" \
+          }, \
+          \\\"badoutput2.las\\\" \
+        ] \
+        }";
+    EXPECT_NE(runTranslate(in + " " + out + " --json=\"" + json + "\"",
+        output), 0);
+
+    // Check that we can handle chained writers.
+    json = " \
+        { \
+        \\\"pipeline\\\" : [ \
+          { \
+          \\\"type\\\":\\\"writers.las\\\", \
+          \\\"filename\\\":\\\"badoutput.las\\\", \
+          \\\"tag\\\":\\\"mytag\\\" \
+          }, \
+          { \
+          \\\"filename\\\":\\\"badoutput2.las\\\", \
+          \\\"inputs\\\": \\\"mytag\\\" \
+          } \
+        ] \
+        }";
+    EXPECT_EQ(runTranslate(in + " " + out + " --json=\"" + json + "\"",
+        output), 0);
 }
 
 TEST(translateTest, t3)
diff --git a/test/unit/apps/pcpipelineTest.cpp b/test/unit/apps/pcpipelineTest.cpp
deleted file mode 100644
index cfd681f..0000000
--- a/test/unit/apps/pcpipelineTest.cpp
+++ /dev/null
@@ -1,299 +0,0 @@
-/******************************************************************************
-* Copyright (c) 2011, Michael P. Gerlek (mpg at flaxen.com)
-*
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following
-* conditions are met:
-*
-*     * Redistributions of source code must retain the above copyright
-*       notice, this list of conditions and the following disclaimer.
-*     * Redistributions in binary form must reproduce the above copyright
-*       notice, this list of conditions and the following disclaimer in
-*       the documentation and/or other materials provided
-*       with the distribution.
-*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
-*       names of its contributors may be used to endorse or promote
-*       products derived from this software without specific prior
-*       written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
-* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
-* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
-* OF SUCH DAMAGE.
-****************************************************************************/
-
-#include <pdal/pdal_test_main.hpp>
-
-#include <pdal/util/FileUtils.hpp>
-#include <pdal/util/Utils.hpp>
-#include "Support.hpp"
-
-#include <iostream>
-#include <sstream>
-#include <string>
-
-static std::string appName()
-{
-    const std::string app = Support::binpath(Support::exename("pdal") +
-        " pipeline");
-    return app;
-}
-
-// most pipelines (those with a writer) will be invoked via `pdal pipeline`
-static void run_pipeline(std::string const& pipeline)
-{
-    const std::string cmd = Support::binpath(Support::exename("pdal") +
-        " pipeline");
-
-    std::string output;
-    std::string file(Support::configuredpath(pipeline));
-    int stat = pdal::Utils::run_shell_command(cmd + " " + file, output);
-    EXPECT_EQ(0, stat);
-    if (stat)
-        std::cerr << output << std::endl;
-}
-
-// most pipelines (those with a writer) will be invoked via `pdal pipeline`
-static void run_pipeline_stdin(std::string const& pipeline)
-{
-    const std::string cmd = Support::binpath(Support::exename("pdal") +
-        " pipeline --stdin");
-
-    std::string output;
-    std::string file(Support::configuredpath(pipeline));
-    int stat = pdal::Utils::run_shell_command(cmd + " < " + file, output);
-    EXPECT_EQ(0, stat);
-    if (stat)
-        std::cerr << output << std::endl;
-}
-
-// pipeines with no writer will be invoked via `pdal info`
-static void run_info(std::string const& pipeline)
-{
-    const std::string cmd = Support::binpath(Support::exename("pdal") + " info");
-
-    std::string output;
-    std::string file(Support::configuredpath(pipeline));
-    int stat = pdal::Utils::run_shell_command(cmd + " " + file, output);
-    EXPECT_EQ(0, stat);
-    if (stat)
-        std::cerr << output << std::endl;
-}
-
-#ifdef PDAL_COMPILER_MSVC
-TEST(pipelineBaseTest, no_input)
-{
-    const std::string cmd = appName();
-
-    std::string output;
-    int stat = pdal::Utils::run_shell_command(cmd, output);
-    EXPECT_EQ(stat, 1);
-
-    const std::string expected = "Usage error: input file name required";
-    EXPECT_EQ(output.substr(0, expected.length()), expected);
-}
-#endif
-
-
-TEST(pipelineBaseTest, common_opts)
-{
-    const std::string cmd = appName();
-
-    std::string output;
-    int stat = pdal::Utils::run_shell_command(cmd + " -h", output);
-    EXPECT_EQ(stat, 0);
-}
-
-TEST(pipelineBaseTest, drop_color)
-{ run_pipeline("pipeline/drop_color.xml"); }
-
-TEST(pipelineBaseTest, interpolate)
-{ run_pipeline("pipeline/pipeline_interpolate.xml"); }
-
-TEST(pipelineBaseTest, metadata_reader)
-{ run_info("pipeline/pipeline_metadata_reader.xml"); }
-
-TEST(pipelineBaseTest, metadata_writer)
-{ run_pipeline("pipeline/pipeline_metadata_writer.xml"); }
-
-TEST(pipelineBaseTest, mississippi)
-{ run_pipeline("pipeline/pipeline_mississippi.xml"); }
-
-TEST(pipelineBaseTest, mississippi_reverse)
-{ run_pipeline("pipeline/pipeline_mississippi_reverse.xml"); }
-
-TEST(pipelineBaseTest, multioptions)
-{ run_info("pipeline/pipeline_multioptions.xml"); }
-
-TEST(pipelineBaseTest, read)
-{ run_info("pipeline/pipeline_read.xml"); }
-
-TEST(pipelineBaseTest, read_notype)
-{ run_info("pipeline/pipeline_read_notype.xml"); }
-
-TEST(pipelineBaseTest, readcomments)
-{ run_info("pipeline/pipeline_readcomments.xml"); }
-
-TEST(pipelineBaseTest, write)
-{ run_pipeline("pipeline/pipeline_write.xml"); }
-
-TEST(pipelineBaseTest, writeStdin)
-{ run_pipeline_stdin("pipeline/pipeline_write.xml"); }
-
-TEST(pipelineBaseTest, write2)
-{ run_pipeline("pipeline/pipeline_write2.xml"); }
-
-TEST(pipelineBaseTest, pipeline_writecomments)
-{ run_pipeline("pipeline/pipeline_writecomments.xml"); }
-
-TEST(pipelineBpfTest, bpf)
-{ run_pipeline("bpf/bpf.xml"); }
-
-TEST(pipelineBpfTest, bpf2nitf)
-{ run_pipeline("bpf/bpf2nitf.xml"); }
-
-TEST(pipelineFiltersTest, DISABLED_attribute)
-{ run_pipeline("filters/attribute.xml"); }
-
-TEST(pipelineFiltersTest, chip)
-{ run_pipeline("filters/chip.xml"); }
-
-TEST(pipelineFiltersTest, chipper)
-{ run_pipeline("filters/chipper.xml"); }
-
-TEST(pipelineFiltersTest, DISABLED_colorize_multi)
-{ run_pipeline("filters/colorize-multi.xml"); }
-
-TEST(pipelineFiltersTest, colorize)
-{ run_pipeline("filters/colorize.xml"); }
-
-TEST(pipelineFiltersTest, DISABLED_crop_reproject)
-{ run_pipeline("filters/crop_reproject.xml"); }
-
-TEST(pipelineFiltersTest, crop_wkt)
-{ run_pipeline("filters/crop_wkt.xml"); }
-
-TEST(pipelineFiltersTest, crop_wkt_2d)
-{ run_pipeline("filters/crop_wkt_2d.xml"); }
-
-TEST(pipelineFiltersTest, crop_wkt_2d_classification)
-{ run_pipeline("filters/crop_wkt_2d_classification.xml"); }
-
-TEST(pipelineFiltersTest, decimate)
-{ run_pipeline("filters/decimate.xml"); }
-
-TEST(pipelineFiltersTest, ferry)
-{ run_pipeline("filters/ferry.xml"); }
-
-TEST(pipelineFiltersTest, hexbin_info)
-{ run_info("filters/hexbin-info.xml"); }
-
-TEST(pipelineFiltersTest, hexbin)
-{ run_pipeline("filters/hexbin.xml"); }
-
-TEST(pipelineFiltersTest, merge)
-{ run_info("filters/merge.xml"); }
-
-TEST(pipelineFiltersTest, range_z)
-{ run_info("filters/range_z.xml"); }
-
-TEST(pipelineFiltersTest, range_z_classification)
-{ run_info("filters/range_z_classification.xml"); }
-
-TEST(pipelineFiltersTest, range_classification)
-{ run_info("filters/range_classification.xml"); }
-
-TEST(pipelineFiltersTest, reproject)
-{ run_pipeline("filters/reproject.xml"); }
-
-TEST(pipelineFiltersTest, sort)
-{ run_info("filters/sort.xml"); }
-
-TEST(pipelineFiltersTest, splitter)
-{ run_pipeline("filters/splitter.xml"); }
-
-TEST(pipelineFiltersTest, stats)
-{ run_pipeline("filters/stats.xml"); }
-
-TEST(pipelineHoleTest, crop)
-{ run_pipeline("hole/crop.xml"); }
-
-TEST(pipelineIcebridgeTest, DISABLED_icebridge)
-{ run_pipeline("icebridge/pipeline.xml"); }
-
-TEST(pipelineNitfTest, conversion)
-{ run_pipeline("nitf/conversion.xml"); }
-
-TEST(pipelineNitfTest, las2nitf)
-{ run_pipeline("nitf/las2nitf.xml"); }
-
-TEST(pipelineNitfTest, DISABLED_reader)
-{ run_info("nitf/reader.xml"); }
-
-TEST(pipelineNitfTest, write_laz)
-{ run_pipeline("nitf/write_laz.xml"); }
-
-TEST(pipelineNitfTest, write_options)
-{ run_pipeline("nitf/write_options.xml"); }
-
-// skip oracle tests for now
-
-TEST(pipelineP2gTest, writer)
-{ run_pipeline("io/p2g-writer.xml"); }
-
-TEST(pipelinePLangTest, DISABLED_from_module)
-{ run_info("plang/from-module.xml"); }
-
-TEST(pipelinePLangTest, DISABLED_predicate_embed)
-{ run_info("plang/predicate-embed.xml"); }
-
-TEST(pipelinePLangTest, predicate_keep_ground_and_unclass)
-{ run_pipeline("plang/predicate-keep-ground-and-unclass.xml"); }
-
-TEST(pipelinePLangTest, predicate_keep_last_return)
-{ run_pipeline("plang/predicate-keep-last-return.xml"); }
-
-TEST(pipelinePLangTest, predicate_keep_specified_returns)
-{ run_pipeline("plang/predicate-keep-specified-returns.xml"); }
-
-TEST(pipelinePLangTest, DISABLED_programmabled_update_y_dims)
-{ run_info("plang/programmable-update-y-dims.xml"); }
-
-TEST(pipelineQfitTest, DISABLED_conversion)
-{ run_pipeline("qfit/conversion.xml"); }
-
-TEST(pipelineQfitTest, DISABLED_little_endian_conversion)
-{ run_pipeline("qfit/little-endian-conversion.xml"); }
-
-TEST(pipelineQfitTest, DISABLED_pipeline)
-{ run_pipeline("qfit/pipeline.xml"); }
-
-TEST(pipelineQfitTest, DISABLED_reader)
-{ run_info("qfit/reader.xml"); }
-
-TEST(pipelineSbetTest, pipeline)
-{ run_pipeline("sbet/pipeline.xml"); }
-
-// skip soci tests for now
-
-TEST(pipelineSQLiteTest, DISABLED_reader)
-{ run_pipeline("io/sqlite-reader.xml"); }
-
-TEST(pipelineSQLiteTest, DISABLED_writer)
-{ run_pipeline("io/sqlite-writer.xml"); }
-
-TEST(pipelineTextTest, csv_writer)
-{ run_pipeline("io/text-writer-csv.xml"); }
-
-TEST(pipelineTextTest, geojson_writer)
-{ run_pipeline("io/text-writer-geojson.xml"); }
diff --git a/test/unit/apps/pcpipelineTestJSON.cpp b/test/unit/apps/pcpipelineTestJSON.cpp
index dbee2ad..4e1b933 100644
--- a/test/unit/apps/pcpipelineTestJSON.cpp
+++ b/test/unit/apps/pcpipelineTestJSON.cpp
@@ -38,6 +38,7 @@
 #include <pdal/StageFactory.hpp>
 #include <pdal/util/FileUtils.hpp>
 #include <pdal/util/Utils.hpp>
+#include <io/LasReader.hpp>
 #include "Support.hpp"
 
 #include <iostream>
@@ -106,10 +107,30 @@ TEST(pipelineBaseTest, common_opts)
     const std::string cmd = appName();
 
     std::string output;
-    int stat = pdal::Utils::run_shell_command(cmd + " -h", output);
+    int stat = Utils::run_shell_command(cmd + " -h", output);
     EXPECT_EQ(stat, 0);
 }
 
+
+TEST(pipelineBaseTest, progress)
+{
+    std::string cmd = appName();
+    std::string progressOut = Support::temppath("progress.out");
+    FileUtils::deleteFile(progressOut);
+    auto handle = FileUtils::createFile(progressOut);
+    FileUtils::closeFile(handle);
+
+    cmd += " --progress " + progressOut + " "  +
+        Support::configuredpath("pipeline/bpf2las.json");
+
+    std::string output;
+    EXPECT_EQ(Utils::run_shell_command(cmd, output), 0);
+
+    std::string progress = FileUtils::readFileIntoString(progressOut);
+    EXPECT_NE(progress.find("READYFILE"), std::string::npos);
+    EXPECT_NE(progress.find("DONEFILE"), std::string::npos);
+}
+
 class json : public testing::TestWithParam<const char*> {};
 
 TEST_P(json, pipeline)
@@ -126,7 +147,7 @@ TEST(json, pipeline_stdin)
 INSTANTIATE_TEST_CASE_P(base, json,
                         testing::Values(
                             // "autzen/autzen-interpolate.json",
-                            "pipeline/attribute.json",
+                            "pipeline/assign.json",
                             "pipeline/bpf2las.json",
                             "pipeline/chipper.json",
                             "pipeline/colorize-multi.json",
@@ -144,6 +165,7 @@ INSTANTIATE_TEST_CASE_P(base, json,
                             "pipeline/metadata_writer.json",
                             "pipeline/mississippi.json",
                             "pipeline/mississippi_reverse.json",
+                            "pipeline/overlay.json",
                             // "pipeline/qfit2las.json",
                             "pipeline/range_z.json",
                             "pipeline/range_z_classification.json",
@@ -258,4 +280,80 @@ TEST(json, issue1417)
     run_pipeline("pipeline/issue1417.json", options);
 }
 
+// Test that stage options passed via --stage.<tagname>.<option> work.
+TEST(json, stagetags)
+{
+    auto checkValue = [](const std::string filename, double value)
+    {
+        Options o;
+        o.add("filename", filename);
+        LasReader r;
+        r.setOptions(o);
+
+        PointTable t;
+        r.prepare(t);
+        PointViewSet s = r.execute(t);
+        EXPECT_EQ(s.size(), 1u);
+        PointViewPtr v = *s.begin();
+
+        for (PointId i = 0; i < v->size(); ++i)
+            EXPECT_DOUBLE_EQ(value,
+                v->getFieldAs<double>(Dimension::Id::Z, i));
+
+        FileUtils::deleteFile(filename);
+    };
+
+    std::string outFilename(Support::temppath("assigned.las"));
+    std::string base(appName() + " " +
+        Support::configuredpath("pipeline/options.json"));
+    std::string output;
+    int stat;
+
+    stat = Utils::run_shell_command(base, output);
+    EXPECT_EQ(stat, 0);
+    checkValue(outFilename, 25);
+
+    stat = Utils::run_shell_command(base +
+        " --filters.assign.assignment=Z[:]=101",
+        output);
+    EXPECT_EQ(stat, 0);
+    checkValue(outFilename, 101);
+
+    stat = Utils::run_shell_command(base +
+        " --stage.assigner.assignment=Z[:]=1987",
+        output);
+    EXPECT_EQ(stat, 0);
+    checkValue(outFilename, 1987);
+
+    // Make sure that tag options override stage options.
+    stat = Utils::run_shell_command(base +
+        " --filters.assign.assignment=Z[:]=25 "
+        "--stage.assigner.assignment=Z[:]=555", output);
+    EXPECT_EQ(stat, 0);
+    checkValue(outFilename, 555);
+    stat = Utils::run_shell_command(base +
+        " --stage.assigner.assignment=Z[:]=555 "
+        "--filters.assign.assignment=Z[:]=25 ", output);
+    EXPECT_EQ(stat, 0);
+    checkValue(outFilename, 555);
+
+    // Check that bad tag fails.
+    stat = Utils::run_shell_command(base +
+        " --stage.foobar.assignment=Z[:]=1987",
+        output);
+    EXPECT_NE(stat, 0);
+
+    // Check that bad option name fails.
+    stat = Utils::run_shell_command(base +
+        " --stage.assigner.blah=Z[:]=1987",
+        output);
+    EXPECT_NE(stat, 0);
+
+    // Check that multiply specified option fails.
+    stat = Utils::run_shell_command(base +
+        " --stage.reader.compression=laszip "
+        "--stage.reader.compression=lazperf", output);
+    EXPECT_NE(stat, 0);
+}
+
 } // namespace pdal
diff --git a/test/unit/filters/AssignFilterTest.cpp b/test/unit/filters/AssignFilterTest.cpp
new file mode 100644
index 0000000..f1de4a4
--- /dev/null
+++ b/test/unit/filters/AssignFilterTest.cpp
@@ -0,0 +1,129 @@
+/******************************************************************************
+* Copyright (c) 2015, Hobu Inc. (info at hobu.co)
+*
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following
+* conditions are met:
+*
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above copyright
+*       notice, this list of conditions and the following disclaimer in
+*       the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of Hobu, Inc. nor the
+*       names of its contributors may be used to endorse or promote
+*       products derived from this software without specific prior
+*       written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+* OF SUCH DAMAGE.
+****************************************************************************/
+
+#include <pdal/pdal_test_main.hpp>
+
+#include <pdal/StageFactory.hpp>
+#include <pdal/util/FileUtils.hpp>
+
+#include "Support.hpp"
+
+using namespace pdal;
+
+TEST(AssignFilterTest, value)
+{
+    Options ro;
+    ro.add("filename", Support::datapath("autzen/autzen-dd.las"));
+
+    StageFactory factory;
+    Stage& r = *(factory.createStage("readers.las"));
+    r.setOptions(ro);
+
+    Options fo;
+    fo.add("assignment", "X[:]=27.5");
+
+    Stage& f = *(factory.createStage("filters.assign"));
+    f.setInput(r);
+    f.setOptions(fo);
+
+    std::string tempfile(Support::temppath("out.las"));
+
+    Options wo;
+    wo.add("filename", tempfile);
+    Stage& w = *(factory.createStage("writers.las"));
+    w.setInput(f);
+    w.setOptions(wo);
+
+    FileUtils::deleteFile(tempfile);
+    PointTable t1;
+    w.prepare(t1);
+    w.execute(t1);
+
+    Options testOptions;
+    testOptions.add("filename", tempfile);
+
+    Stage& test = *(factory.createStage("readers.las"));
+    test.setOptions(testOptions);
+
+    PointTable t2;
+    test.prepare(t2);
+    PointViewSet s = test.execute(t2);
+    PointViewPtr v = *s.begin();
+    for (PointId i = 0; i < v->size(); ++i)
+        EXPECT_DOUBLE_EQ(v->getFieldAs<double>(Dimension::Id::X, i), 27.5);
+}
+
+
+TEST(AssignFilterTest, t2)
+{
+    StageFactory factory;
+
+    Stage& r = *factory.createStage("readers.las");
+    Stage& f = *factory.createStage("filters.assign");
+
+    // utm17.las contains 5 points with intensity of 280, 3 of 260 and 2 of 240
+    Options ro;
+    ro.add("filename", Support::datapath("las/utm17.las"));
+    r.setOptions(ro);
+
+    Options fo;
+    fo.add("assignment", "Intensity[:250]=4");
+    fo.add("assignment", "Intensity[245:270 ]=6");
+    fo.add("assignment", "Intensity[272:] = 8");
+
+    f.setInput(r);
+    f.setOptions(fo);
+
+    PointTable t;
+    f.prepare(t);
+    PointViewSet s = f.execute(t);
+    PointViewPtr v = *s.begin();
+
+    int i4 = 0;
+    int i6 = 0;
+    int i8 = 0;
+    for (PointId i = 0; i < v->size(); ++i)
+    {
+        int ii = v->getFieldAs<int>(Dimension::Id::Intensity, i);
+        if (ii == 4)
+            i4++;
+        else if (ii == 6)
+            i6++;
+        else if (ii == 8)
+            i8++;
+    }
+    EXPECT_EQ(i4, 2);
+    EXPECT_EQ(i6, 3);
+    EXPECT_EQ(i8, 5);
+}
diff --git a/test/unit/filters/CropFilterTest.cpp b/test/unit/filters/CropFilterTest.cpp
index 217e8e7..b6f0780 100644
--- a/test/unit/filters/CropFilterTest.cpp
+++ b/test/unit/filters/CropFilterTest.cpp
@@ -351,41 +351,113 @@ TEST(CropFilterTest, stream)
 }
 
 
-TEST(CropFilterTest, test_sphere)
+TEST(CropFilterTest, circle)
 {
-    BOX3D srcBounds(0.0, 0.0, 0.0, 10.0, 100.0, 1000.0);
     Options opts;
-    opts.add("bounds", srcBounds);
-    opts.add("count", 1000);
-    opts.add("mode", "ramp");
+    opts.add("bounds", BOX3D(0.0, 0.0, 0.0, 10.0, 10.0, 0.0));
+    opts.add("mode", "grid");
     FauxReader reader;
     reader.setOptions(opts);
 
-    // crop the window to 1/3rd the size in each dimension
-    BOX2D dstBounds(3.33333, 33.33333, 6.66666, 66.66666);
     Options cropOpts;
-    cropOpts.add("distance", 10.0);
-    cropOpts.add("point", "POINT (4.3 43.0 500)");
+    cropOpts.add("distance", 2.5);
+    cropOpts.add("point", "POINT (5 5)");
 
     CropFilter filter;
     filter.setOptions(cropOpts);
     filter.setInput(reader);
 
-    Options statOpts;
+    PointTable table;
+    filter.prepare(table);
+    PointViewSet viewSet = filter.execute(table);
+    EXPECT_EQ(viewSet.size(), 1u);
+    PointViewPtr buf = *viewSet.begin();
+    EXPECT_EQ(buf->size(), 21u);
 
-    StatsFilter stats;
-    stats.setOptions(statOpts);
-    stats.setInput(filter);
+    struct Point
+    {
+        double x;
+        double y;
+    };
+
+    std::vector<Point> found {
+                {4, 3}, {5, 3}, {6, 3},
+        {3, 4}, {4, 4}, {5, 4}, {6, 4}, {7, 4},
+        {3, 5}, {4, 5}, {5, 5}, {6, 5}, {7, 5},
+        {3, 6}, {4, 6}, {5, 6}, {6, 6}, {7, 6},
+                {4, 7}, {5, 7}, {6, 7}
+    };
+
+    for (PointId idx = 0; idx < buf->size(); ++idx)
+    {
+        EXPECT_EQ(found[idx].x, buf->getFieldAs<double>(Dimension::Id::X, idx));
+        EXPECT_EQ(found[idx].y, buf->getFieldAs<double>(Dimension::Id::Y, idx));
+    }
+}
+
+
+TEST(CropFilterTest, sphere)
+{
+    Options opts;
+    opts.add("bounds", BOX3D(0.0, 0.0, 0.0, 10.0, 10.0, 10.0));
+    opts.add("mode", "grid");
+    FauxReader reader;
+    reader.setOptions(opts);
+
+    Options cropOpts;
+    cropOpts.add("distance", 2.5);
+    cropOpts.add("point", "POINT (5 5 5)");
+
+    CropFilter filter;
+    filter.setOptions(cropOpts);
+    filter.setInput(reader);
 
     PointTable table;
-    stats.prepare(table);
-    PointViewSet viewSet = stats.execute(table);
+    filter.prepare(table);
+    PointViewSet viewSet = filter.execute(table);
     EXPECT_EQ(viewSet.size(), 1u);
     PointViewPtr buf = *viewSet.begin();
+    EXPECT_EQ(buf->size(), 81u);
 
-    const stats::Summary& statsX = stats.getStats(Dimension::Id::X);
-    const stats::Summary& statsY = stats.getStats(Dimension::Id::Y);
-    const stats::Summary& statsZ = stats.getStats(Dimension::Id::Z);
-    EXPECT_EQ(buf->size(), 14u);
+    struct Point
+    {
+        double x;
+        double y;
+        double z;
+    };
 
+    std::vector<Point> found {
+                   {4, 4, 3}, {5, 4, 3}, {6, 4, 3},
+                   {4, 5, 3}, {5, 5, 3}, {6, 5, 3},
+                   {4, 6, 3}, {5, 6, 3}, {6, 6, 3},
+
+                   {4, 3, 4}, {5, 3, 4}, {6, 3, 4},
+        {3, 4, 4}, {4, 4, 4}, {5, 4, 4}, {6, 4, 4}, {7, 4, 4},
+        {3, 5, 4}, {4, 5, 4}, {5, 5, 4}, {6, 5, 4}, {7, 5, 4},
+        {3, 6, 4}, {4, 6, 4}, {5, 6, 4}, {6, 6, 4}, {7, 6, 4},
+                   {4, 7, 4}, {5, 7, 4}, {6, 7, 4},
+
+                   {4, 3, 5}, {5, 3, 5}, {6, 3, 5},
+        {3, 4, 5}, {4, 4, 5}, {5, 4, 5}, {6, 4, 5}, {7, 4, 5},
+        {3, 5, 5}, {4, 5, 5}, {5, 5, 5}, {6, 5, 5}, {7, 5, 5},
+        {3, 6, 5}, {4, 6, 5}, {5, 6, 5}, {6, 6, 5}, {7, 6, 5},
+                   {4, 7, 5}, {5, 7, 5}, {6, 7, 5},
+
+                   {4, 3, 6}, {5, 3, 6}, {6, 3, 6},
+        {3, 4, 6}, {4, 4, 6}, {5, 4, 6}, {6, 4, 6}, {7, 4, 6},
+        {3, 5, 6}, {4, 5, 6}, {5, 5, 6}, {6, 5, 6}, {7, 5, 6},
+        {3, 6, 6}, {4, 6, 6}, {5, 6, 6}, {6, 6, 6}, {7, 6, 6},
+                   {4, 7, 6}, {5, 7, 6}, {6, 7, 6},
+
+                   {4, 4, 7}, {5, 4, 7}, {6, 4, 7},
+                   {4, 5, 7}, {5, 5, 7}, {6, 5, 7},
+                   {4, 6, 7}, {5, 6, 7}, {6, 6, 7}
+    };
+
+    for (PointId idx = 0; idx < buf->size(); ++idx)
+    {
+        EXPECT_EQ(found[idx].x, buf->getFieldAs<double>(Dimension::Id::X, idx));
+        EXPECT_EQ(found[idx].y, buf->getFieldAs<double>(Dimension::Id::Y, idx));
+    }
 }
+
diff --git a/test/unit/LogTest.cpp b/test/unit/filters/GroupByFilterTest.cpp
similarity index 67%
copy from test/unit/LogTest.cpp
copy to test/unit/filters/GroupByFilterTest.cpp
index aa78e92..ed57fdc 100644
--- a/test/unit/LogTest.cpp
+++ b/test/unit/filters/GroupByFilterTest.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
-* Copyright (c) 2012, Michael P. Gerlek (mpg at flaxen.com)
+* Copyright (c) 2016, Bradley J Chambers (brad.chambers at gmail.com)
 *
 * All rights reserved.
 *
@@ -13,7 +13,7 @@
 *       notice, this list of conditions and the following disclaimer in
 *       the documentation and/or other materials provided
 *       with the distribution.
-*     * Neither the name of Hobu, Inc. or Flaxen Consulting LLC nor the
+*     * Neither the name of Hobu, Inc. or Flaxen Geo Consulting nor the
 *       names of its contributors may be used to endorse or promote
 *       products derived from this software without specific prior
 *       written permission.
@@ -33,12 +33,38 @@
 ****************************************************************************/
 
 #include <pdal/pdal_test_main.hpp>
-#include <pdal/Options.hpp>
-#include <pdal/PointView.hpp>
-#include <pdal/StageFactory.hpp>
-#include <io/FauxReader.hpp>
+
+#include <io/LasReader.hpp>
+#include <filters/GroupByFilter.hpp>
 #include "Support.hpp"
 
 using namespace pdal;
 
-//ABELL - Need some tests here, but what we had was crap.
+TEST(GroupByTest, basic_test)
+{
+    Options ro;
+    ro.add("filename", Support::datapath("las/1.2-with-color.las"));
+    LasReader r;
+    r.setOptions(ro);
+
+    Options fo;
+    fo.add("dimension", "Classification");
+
+    GroupByFilter s;
+    s.setOptions(fo);
+    s.setInput(r);
+
+    PointTable table;
+    PointViewPtr view(new PointView(table));
+    s.prepare(table);
+    PointViewSet viewSet = s.execute(table);
+
+    EXPECT_EQ(2u, viewSet.size());
+
+    std::vector<PointViewPtr> views;
+    for (auto it = viewSet.begin(); it != viewSet.end(); ++it)
+        views.push_back(*it);
+
+    EXPECT_EQ(789u, views[0]->size());
+    EXPECT_EQ(276u, views[1]->size());
+}
diff --git a/filters/PMFFilter.hpp b/test/unit/filters/LocateFilterTest.cpp
similarity index 57%
copy from filters/PMFFilter.hpp
copy to test/unit/filters/LocateFilterTest.cpp
index 6d71767..3f2a541 100644
--- a/filters/PMFFilter.hpp
+++ b/test/unit/filters/LocateFilterTest.cpp
@@ -32,53 +32,64 @@
 * OF SUCH DAMAGE.
 ****************************************************************************/
 
-#pragma once
+#include <pdal/pdal_test_main.hpp>
 
-#include <pdal/Filter.hpp>
-#include <pdal/plugin.hpp>
+#include <pdal/Options.hpp>
+#include <filters/LocateFilter.hpp>
+#include <io/LasReader.hpp>
 
-#include <memory>
+#include "Support.hpp"
 
-extern "C" int32_t PMFFilter_ExitFunc();
-extern "C" PF_ExitFunc PMFFilter_InitPlugin();
+using namespace pdal;
 
-namespace pdal
+TEST(LocateTest, locate_max)
 {
+    PointTable table;
 
-class Options;
-class PointLayout;
-class PointTable;
-class PointView;
+    Options ro;
+    ro.add("filename", Support::datapath("las/1.2-with-color.las"));
+    LasReader r;
+    r.setOptions(ro);
 
-class PDAL_DLL PMFFilter : public Filter
-{
-public:
-    PMFFilter() : Filter()
-    {}
+    Options fo;
+    fo.add("dimension", "Z");
+    fo.add("minmax", "max");
 
-    static void * create();
-    static int32_t destroy(void *);
-    std::string getName() const;
+    LocateFilter f;
+    f.setInput(r);
+    f.setOptions(fo);
+    f.prepare(table);
+    PointViewSet viewSet = f.execute(table);
+    EXPECT_EQ(1u, viewSet.size());
+    
+    PointViewPtr view = *viewSet.begin();
+    EXPECT_EQ(1u, view->size());
+    
+    EXPECT_NEAR(586.38, view->getFieldAs<double>(Dimension::Id::Z, 0), 0.0001);
+}
 
-private:
-    double m_maxWindowSize;
-    double m_slope;
-    double m_maxDistance;
-    double m_initialDistance;
-    double m_cellSize;
-    bool m_classify;
-    bool m_extract;
-    bool m_approximate;
+TEST(LocateTest, locate_min)
+{
+    PointTable table;
 
-    virtual void addDimensions(PointLayoutPtr layout);
-    virtual void addArgs(ProgramArgs& args);
-    std::vector<double> morphOpen(PointViewPtr view, float radius);
-    std::vector<PointId> processGround(PointViewPtr view);
-    std::vector<PointId> processGroundApprox(PointViewPtr view);
-    virtual PointViewSet run(PointViewPtr view);
+    Options ro;
+    ro.add("filename", Support::datapath("las/1.2-with-color.las"));
+    LasReader r;
+    r.setOptions(ro);
 
-    PMFFilter& operator=(const PMFFilter&); // not implemented
-    PMFFilter(const PMFFilter&); // not implemented
-};
+    Options fo;
+    fo.add("dimension", "Z");
+    fo.add("minmax", "min");
 
-} // namespace pdal
+    LocateFilter f;
+    f.setInput(r);
+    f.setOptions(fo);
+    f.prepare(table);
+    PointViewSet viewSet = f.execute(table);
+    EXPECT_EQ(1u, viewSet.size());
+    
+    PointViewPtr view = *viewSet.begin();
+    EXPECT_EQ(1u, view->size());
+    
+    EXPECT_NEAR(406.59, view->getFieldAs<double>(Dimension::Id::Z, 0), 0.0001);
+}
diff --git a/test/unit/filters/AttributeFilterTest.cpp b/test/unit/filters/OverlayFilterTest.cpp
similarity index 81%
rename from test/unit/filters/AttributeFilterTest.cpp
rename to test/unit/filters/OverlayFilterTest.cpp
index a26b5f7..9218361 100644
--- a/test/unit/filters/AttributeFilterTest.cpp
+++ b/test/unit/filters/OverlayFilterTest.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
-* Copyright (c) 2015, Hobu Inc. (info at hobu.co)
+* Copyright (c) 2017, Hobu Inc. (info at hobu.co)
 *
 * All rights reserved.
 *
@@ -41,66 +41,27 @@
 
 using namespace pdal;
 
-TEST(AttributeFilterTest, value)
+void testOverlay(int numReaders, bool stream)
 {
     Options ro;
     ro.add("filename", Support::datapath("autzen/autzen-dd.las"));
 
     StageFactory factory;
-    Stage& r = *(factory.createStage("readers.las"));
-    r.setOptions(ro);
-
-    Options fo;
-    fo.add("dimension", "X");
-    fo.add("value", 27.5);
-
-    Stage& f = *(factory.createStage("filters.attribute"));
-    f.setInput(r);
-    f.setOptions(fo);
-
-    std::string tempfile(Support::temppath("out.las"));
-
-    Options wo;
-    wo.add("filename", tempfile);
-    Stage& w = *(factory.createStage("writers.las"));
-    w.setInput(f);
-    w.setOptions(wo);
-
-    FileUtils::deleteFile(tempfile);
-    PointTable t1;
-    w.prepare(t1);
-    w.execute(t1);
-
-    Options testOptions;
-    testOptions.add("filename", tempfile);
-
-    Stage& test = *(factory.createStage("readers.las"));
-    test.setOptions(testOptions);
-
-    PointTable t2;
-    test.prepare(t2);
-    PointViewSet s = test.execute(t2);
-    PointViewPtr v = *s.begin();
-    for (PointId i = 0; i < v->size(); ++i)
-        EXPECT_DOUBLE_EQ(v->getFieldAs<double>(Dimension::Id::X, i), 27.5);
-}
-
-TEST(AttributeFilterTest, datasource)
-{
-    Options ro;
-    ro.add("filename", Support::datapath("autzen/autzen-dd.las"));
-
-    StageFactory factory;
-    Stage& r = *(factory.createStage("readers.las"));
-    r.setOptions(ro);
 
     Options fo;
     fo.add("dimension", "Classification");
     fo.add("column", "cls");
     fo.add("datasource", Support::datapath("autzen/attributes.shp"));
 
-    Stage& f = *(factory.createStage("filters.attribute"));
-    f.setInput(r);
+    LogPtr l(new Log("readers.las", "stderr"));
+    Stage& f = *(factory.createStage("filters.overlay"));
+    for (int i = 0; i < numReaders; ++i)
+    {
+        Stage& r = *(factory.createStage("readers.las"));
+        r.setLog(l);
+        r.setOptions(ro);
+        f.setInput(r);
+    }
     f.setOptions(fo);
 
     std::string tempfile(Support::temppath("out.las"));
@@ -113,10 +74,18 @@ TEST(AttributeFilterTest, datasource)
     w.setOptions(wo);
 
     FileUtils::deleteFile(tempfile);
-    PointTable t;
-    w.prepare(t);
-    w.execute(t);
-
+    if (stream)
+    {
+        FixedPointTable t(100);
+        w.prepare(t);
+        w.execute(t);
+    }
+    else
+    {
+        PointTable t;
+        w.prepare(t);
+        w.execute(t);
+    }
 //
 //
     Options testOptions;
@@ -183,3 +152,13 @@ TEST(AttributeFilterTest, datasource)
     for (PointId i = 0; i < v->size(); ++i)
         EXPECT_EQ(v->getFieldAs<int>(Dimension::Id::Classification, i), 6);
 }
+
+TEST(OverlayFilterTest, nostream)
+{
+    testOverlay(10, false);
+}
+
+TEST(OverlayFilterTest, stream)
+{
+    testOverlay(10, true);
+}
diff --git a/test/unit/filters/SortFilterTest.cpp b/test/unit/filters/SortFilterTest.cpp
index dd6d273..c512242 100644
--- a/test/unit/filters/SortFilterTest.cpp
+++ b/test/unit/filters/SortFilterTest.cpp
@@ -48,11 +48,13 @@ using namespace pdal;
 namespace
 {
 
-void doSort(point_count_t count)
+void doSort(point_count_t count, const std::string & dim="X", const std::string & order="")
 {
     Options opts;
 
-    opts.add("dimension", "X");
+    opts.add("dimension", dim);
+    if (!order.empty())
+        opts.add("order", order);
 
     SortFilter filter;
     filter.setOptions(opts);
@@ -78,7 +80,10 @@ void doSort(point_count_t count)
     {
         double d1 = view->getFieldAs<double>(Dimension::Id::X, i - 1);
         double d2 = view->getFieldAs<double>(Dimension::Id::X, i);
-        EXPECT_TRUE(d1 <= d2);
+        if(order.empty() || order == "ASC")
+            EXPECT_TRUE(d1 <= d2);
+        else // DES(cending)
+            EXPECT_TRUE(d1 >= d2);
     }
 }
 
@@ -86,11 +91,18 @@ void doSort(point_count_t count)
 
 TEST(SortFilterTest, simple)
 {
+    // note that this also tests default sort order ASC
     point_count_t inc = 1;
     for (point_count_t count = 3; count < 100000; count += inc, inc *= 2)
         doSort(count);
 }
 
+TEST(SortFilterTest, testUnknownOptions)
+{
+    EXPECT_THROW( doSort(1, "not a dimension"), std::exception );
+    EXPECT_THROW( doSort(1, "X", "not an order"), std::exception );
+}
+
 TEST(SortFilterTest, pipelineJSON)
 {
     PipelineManager mgr;
@@ -146,3 +158,14 @@ TEST(SortFilterTest, issue1382)
         EXPECT_TRUE(d1 <= d2);
     }
 }
+
+TEST(SortFilterTest, issue1121_simpleSortOrderDesc)
+{
+    point_count_t inc = 1;
+    for (point_count_t count = 3; count < 100000; count += inc, inc *= 2)
+    {
+        doSort(count, "X", "ASC");
+        doSort(count, "X", "DESC");
+    }
+}
+
diff --git a/test/unit/io/FauxReaderTest.cpp b/test/unit/io/FauxReaderTest.cpp
index a2a6280..9b5364a 100644
--- a/test/unit/io/FauxReaderTest.cpp
+++ b/test/unit/io/FauxReaderTest.cpp
@@ -237,3 +237,73 @@ TEST(FauxReaderTest, one_point)
     EXPECT_EQ(2, view->getFieldAs<int>(Dimension::Id::Y, 0));
     EXPECT_EQ(3, view->getFieldAs<int>(Dimension::Id::Z, 0));
 }
+
+void testGrid(point_count_t xlimit, point_count_t ylimit, point_count_t zlimit)
+{
+    point_count_t size = 1;
+    if (xlimit)
+        size *= xlimit;
+    if (ylimit)
+        size *= ylimit;
+    if (zlimit)
+        size *= zlimit;
+    if (!xlimit && !ylimit && !zlimit)
+        return;
+
+    Options ops;
+
+    ops.add("bounds", BOX3D(0, 0, 0, xlimit, ylimit, zlimit));
+    ops.add("mode", "grid");
+    FauxReader reader;
+    reader.setOptions(ops);
+
+    PointTable table;
+    reader.prepare(table);
+    PointViewSet viewSet = reader.execute(table);
+    EXPECT_EQ(viewSet.size(), 1u);
+    PointViewPtr view = *viewSet.begin();
+    EXPECT_EQ(view->size(), size);
+
+    PointId index = 0;
+    int x = 0;
+    int y = 0;
+    int z = 0;
+    for (PointId index =  0; index < size; index++)
+    {
+        EXPECT_EQ(x, view->getFieldAs<int>(Dimension::Id::X, index));
+        EXPECT_EQ(y, view->getFieldAs<int>(Dimension::Id::Y, index));
+        EXPECT_EQ(z, view->getFieldAs<int>(Dimension::Id::Z, index));
+        bool incNext = true;
+        if (xlimit)
+        {
+            x++;
+            if (x >= (int)xlimit)
+                x = 0;
+            else
+                incNext = false;
+        }
+
+        if (ylimit && incNext)
+        {
+            y++;
+            if (y >= (int)ylimit)
+                y = 0;
+            else
+                incNext = false;
+        }
+
+        if (zlimit && incNext)
+            z++;
+    }
+}
+
+TEST(FauxReaderTest, grid)
+{
+    testGrid(2, 3, 4);
+    testGrid(0, 3, 4);
+    testGrid(2, 0, 4);
+    testGrid(2, 3, 0);
+    testGrid(2, 0, 0);
+    testGrid(0, 3, 0);
+    testGrid(0, 3, 4);
+}
diff --git a/test/unit/io/GDALWriterTest.cpp b/test/unit/io/GDALWriterTest.cpp
index 4f40339..fc19788 100644
--- a/test/unit/io/GDALWriterTest.cpp
+++ b/test/unit/io/GDALWriterTest.cpp
@@ -95,6 +95,70 @@ void runGdalWriter(const Options& wo, const std::string& outfile,
         EXPECT_NEAR(arr[i], *d++, .001);
 }
 
+void runGdalWriter2(const Options& wo, const std::string& outfile,
+    const std::string& values, bool stream)
+{
+    FileUtils::deleteFile(outfile);
+
+    Options ro;
+    ro.add("filename", Support::datapath("gdal/grid.txt"));
+
+    Options ro2;
+    ro2.add("filename", Support::datapath("gdal/grid2.txt"));
+
+    TextReader r;
+    r.setOptions(ro);
+
+    TextReader r2;
+    r2.setOptions(ro2);
+
+    GDALWriter w;
+    w.setOptions(wo);
+    w.setInput(r);
+    w.setInput(r2);
+
+    if (!stream)
+    {
+        PointTable t;
+
+        w.prepare(t);
+        w.execute(t);
+    }
+    else
+    {
+        FixedPointTable t(10);
+
+        w.prepare(t);
+        w.execute(t);
+    }
+
+    using namespace gdal;
+
+    std::istringstream iss(values);
+
+    std::vector<double> arr;
+    while (true)
+    {
+        double d;
+        iss >> d;
+        if (!iss)
+            break;
+        arr.push_back(d);
+    }
+
+    registerDrivers();
+    Raster raster(outfile, "GTiff");
+    if (raster.open() != GDALError::None)
+    {
+        throw pdal_error(raster.errorMsg());
+    }
+    std::vector<uint8_t> data;
+    raster.readBand(data, 1);
+    double *d = reinterpret_cast<double *>(data.data());
+    for (size_t i = 0; i < arr.size(); ++i)
+        EXPECT_NEAR(arr[i], *d++, .001);
+}
+
 }
 
 TEST(GDALWriterTest, min)
@@ -118,6 +182,36 @@ TEST(GDALWriterTest, min)
     runGdalWriter(wo, outfile, output);
 }
 
+TEST(GDALWriterTest, min2)
+{
+    std::string outfile = Support::temppath("tmp.tif");
+
+    Options wo;
+    wo.add("gdaldriver", "GTiff");
+    wo.add("output_type", "min");
+    wo.add("resolution", 1);
+    wo.add("radius", .7071);
+    wo.add("filename", outfile);
+
+    Options wo2 = wo;
+    wo2.add("bounds", "([-2, 4.7],[-2, 6.5])");
+
+    const std::string output =
+        "-9999.000 -9999.00 -9999.00 -9999.00 -9999.00 -9999.00    -1.00"
+        "-9999.000 -9999.00 -9999.00 -9999.00 -9999.00 -9999.00 -9999.00 "
+        "-9999.000 -9999.00     5.00 -9999.00     7.00     8.00     8.90 "
+        "-9999.000 -9999.00     4.00 -9999.00     6.00     7.00     8.00 "
+        "-9999.000 -9999.00     3.00     4.00     5.00     5.50     6.50 "
+        "-9999.000 -9999.00     2.00     3.00     4.00     4.50     5.50 "
+        "-9999.000 -9999.00     1.00     2.00     3.00     4.00     5.00 "
+        "   -1.000    -1.00 -9999.00 -9999.00 -9999.00 -9999.00 -9999.00 "
+        "   -1.000    -1.00 -9999.00 -9999.00 -9999.00 -9999.00 -9999.00";
+
+    runGdalWriter2(wo, outfile, output, false);
+    runGdalWriter2(wo2, outfile, output, false);
+    runGdalWriter2(wo2, outfile, output, true);
+}
+
 TEST(GDALWriterTest, minWindow)
 {
     std::string outfile = Support::temppath("tmp.tif");
diff --git a/test/unit/io/Ilvis2ReaderWithMDReaderTest.cpp b/test/unit/io/Ilvis2ReaderWithMDReaderTest.cpp
index 24b8b9d..539c8b4 100644
--- a/test/unit/io/Ilvis2ReaderWithMDReaderTest.cpp
+++ b/test/unit/io/Ilvis2ReaderWithMDReaderTest.cpp
@@ -60,7 +60,8 @@ TEST(Ilvis2ReaderWithMDReaderTest, testInvalidMetadataFile)
     }
     catch (pdal_error const & err)
     {
-        EXPECT_EQ("Invalid metadata file: 'invalidfile'", std::string(err.what()));
+        EXPECT_EQ("readers.ilvis2: Invalid metadata file: 'invalidfile'",
+            std::string(err.what()));
     }
 }
 
diff --git a/test/unit/io/LasWriterTest.cpp b/test/unit/io/LasWriterTest.cpp
index 9a0af87..badbe02 100644
--- a/test/unit/io/LasWriterTest.cpp
+++ b/test/unit/io/LasWriterTest.cpp
@@ -653,6 +653,29 @@ TEST(LasWriterTest, stream)
     compareFiles(infile, outfile);
 }
 
+TEST(LasWriterTest, streamhashwrite)
+{
+    std::string infile(Support::datapath("las/autzen_trim.las"));
+    std::string outfile(Support::temppath("trimtest#.las"));
+
+    FileUtils::deleteFile(outfile);
+
+    Options ops1;
+    ops1.add("filename", infile);
+
+    LasReader r;
+    r.setOptions(ops1);
+
+    Options ops2;
+    ops2.add("filename", outfile);
+    LasWriter w;
+    w.setOptions(ops2);
+    w.setInput(r);
+
+    FixedPointTable t(100);
+    EXPECT_THROW(w.prepare(t), pdal_error);
+}
+
 TEST(LasWriterTest, fix1063_1064_1065)
 {
     std::string outfile = Support::temppath("out.las");
@@ -663,8 +686,6 @@ TEST(LasWriterTest, fix1063_1064_1065)
     std::string cmd = "pdal translate --writers.las.forward=all "
         "--writers.las.a_srs=\"EPSG:4326\" " + infile + " " + outfile;
     std::string output;
-    std::cerr << "*** Shell command = " <<
-        Support::binpath(cmd) << "!\n";
     Utils::run_shell_command(Support::binpath(cmd), output);
 
     Options o;
@@ -695,7 +716,156 @@ TEST(LasWriterTest, fix1063_1064_1065)
     EXPECT_EQ(ref.getWKT(), wkt);
 }
 
+TEST(LasWriterTest, pdal_metadata)
+{
+    PointTable table;
+
+    std::string infile(Support::datapath("las/1.2-with-color.las"));
+    std::string outfile(Support::temppath("simple.las"));
+
+    // remove file from earlier run, if needed
+    FileUtils::deleteFile(outfile);
+
+    Options readerOpts;
+    readerOpts.add("filename", infile);
+
+    Options writerOpts;
+    writerOpts.add("pdal_metadata", true);
+    writerOpts.add("filename", outfile);
+
+    LasReader reader;
+    reader.setOptions(readerOpts);
+
+    LasWriter writer;
+    writer.setOptions(writerOpts);
+    writer.setInput(reader);
+    writer.prepare(table);
+    writer.execute(table);
+
+    PointTable t2;
+    Options readerOpts2;
+    readerOpts2.add("filename", outfile);
+    LasReader reader2;
+    reader2.setOptions(readerOpts2);
+
+    reader2.prepare(t2);
+    reader2.execute(t2);
+
+    EXPECT_EQ(reader2.getMetadata().children("pdal_metadata").size(), 1UL);
+    EXPECT_EQ(reader2.getMetadata().children("pdal_pipeline").size(), 1UL);
+
+}
+
+
+TEST(LasWriterTest, pdal_add_vlr)
+{
+    PointTable table;
+
+    std::string infile(Support::datapath("las/1.2-with-color.las"));
+    std::string outfile(Support::temppath("simple.las"));
+
+    // remove file from earlier run, if needed
+    FileUtils::deleteFile(outfile);
+
+    Options readerOpts;
+    readerOpts.add("filename", infile);
+
+    std::string vlr( " [ { \"description\": \"A description under 32 bytes\", \"record_id\": 42, \"user_id\": \"hobu\", \"data\": \"dGhpcyBpcyBzb21lIHRleHQ=\" },  { \"description\": \"A description under 32 bytes\", \"record_id\": 43, \"user_id\": \"hobu\", \"data\": \"dGhpcyBpcyBzb21lIG1vcmUgdGV4dA==\" } ]");
+
+    Options writerOpts;
+    writerOpts.add("vlrs", vlr);
+    writerOpts.add("filename", outfile);
+
+    LasReader reader;
+    reader.setOptions(readerOpts);
+
+    LasWriter writer;
+    writer.setOptions(writerOpts);
+    writer.setInput(reader);
+    writer.prepare(table);
+    writer.execute(table);
+
+    PointTable t2;
+    Options readerOpts2;
+    readerOpts2.add("filename", outfile);
+    LasReader reader2;
+    reader2.setOptions(readerOpts2);
+
+    reader2.prepare(t2);
+    reader2.execute(t2);
+
+    MetadataNode forward = reader2.getMetadata();
+
+    auto pred = [](MetadataNode temp)
+        { return Utils::startsWith(temp.name(), "vlr_"); };
+    MetadataNodeList nodes = forward.findChildren(pred);
+    EXPECT_EQ(nodes.size(), 2UL);
+}
+
+
+// Make sure that we can forward the LAS_Spec/3 VLR
+TEST(LasWriterTest, forward_spec_3)
+{
+    PointTable table;
+
+    std::string infile(Support::datapath("las/spec_3.las"));
+    std::string outfile(Support::temppath("out.las"));
+
+    // remove file from earlier run, if needed
+    FileUtils::deleteFile(outfile);
+
+    Options readerOpts;
+    readerOpts.add("filename", infile);
+
+    Options writerOpts;
+    writerOpts.add("forward", "all,vlr");
+    writerOpts.add("filename", outfile);
+
+    LasReader reader;
+    reader.setOptions(readerOpts);
+
+    LasWriter writer;
+    writer.setOptions(writerOpts);
+    writer.setInput(reader);
+
+    writer.prepare(table);
+    writer.execute(table);
+
+    PointTable t2;
+    Options readerOpts2;
+    readerOpts2.add("filename", outfile);
+    LasReader reader2;
+    reader2.setOptions(readerOpts2);
+
+    reader2.prepare(t2);
+    reader2.execute(t2);
+
+    auto pred = [](MetadataNode temp)
+    {
+        auto recPred = [](MetadataNode n)
+        {
+            return n.name() == "record_id" &&
+                n.value() == "3";
+        };
+
+        auto userPred = [](MetadataNode n)
+        {
+            return n.name() == "user_id" &&
+                n.value() == "LASF_Spec";
+        };
+
+        return Utils::startsWith(temp.name(), "vlr_") &&
+            !temp.findChild(recPred).empty() &&
+            !temp.findChild(userPred).empty();
+    };
+    MetadataNode root = reader2.getMetadata();
+    MetadataNodeList nodes = root.findChildren(pred);
+    EXPECT_EQ(nodes.size(), 1u);
+}
+
+
 /**
+
 namespace
 {
 
diff --git a/test/unit/io/TextReaderTest.cpp b/test/unit/io/TextReaderTest.cpp
index 6401954..c375dbe 100644
--- a/test/unit/io/TextReaderTest.cpp
+++ b/test/unit/io/TextReaderTest.cpp
@@ -37,23 +37,24 @@
 #include "Support.hpp"
 
 #include <io/LasReader.hpp>
+#include <io/LasWriter.hpp>
 #include <io/TextReader.hpp>
+#include <pdal/util/FileUtils.hpp>
 
 using namespace pdal;
 
 void compareTextLas(const std::string& textFilename,
-    const std::string& lasFilename)
+    Options& textOptions, const std::string& lasFilename)
 {
     TextReader t;
-    Options to;
-    to.add("filename", textFilename);
-    t.setOptions(to);
+    textOptions.add("filename", textFilename);
+    t.setOptions(textOptions);
 
     LasReader l;
     Options lo;
     lo.add("filename", lasFilename);
     l.setOptions(lo);
-    
+
     PointTable tt;
     t.prepare(tt);
     PointViewSet ts = t.execute(tt);
@@ -80,12 +81,90 @@ void compareTextLas(const std::string& textFilename,
     }
 }
 
+
+void compareTextLas(const std::string& textFilename,
+    const std::string& lasFilename)
+{
+    Options textOptions;
+
+    compareTextLas(textFilename, textOptions, lasFilename);
+}
+
+
+void compareTextLasStreaming(const std::string& textFilename,
+    const std::string& lasFilename)
+{
+    std::string tempname(Support::temppath("testlas.las"));
+
+    FileUtils::deleteFile(tempname);
+
+    TextReader t;
+    Options to;
+    to.add("filename", textFilename);
+    t.setOptions(to);
+
+    LasWriter w;
+    Options wo;
+    wo.add("filename", tempname);
+    w.setInput(t);
+    w.setOptions(wo);
+
+    FixedPointTable in(1000);
+    w.prepare(in);
+    w.execute(in);
+
+    LasReader l1;
+    Options l1o;
+    l1o.add("filename", lasFilename);
+    l1.setOptions(l1o);
+
+    LasReader l2;
+    Options l2o;
+    l2o.add("filename", tempname);
+    l2.setOptions(l2o);
+
+    PointTable t1;
+    l1.prepare(t1);
+    PointViewSet s1 = l1.execute(t1);
+    EXPECT_EQ(s1.size(), 1U);
+    PointViewPtr v1 = *s1.begin();
+
+    PointTable t2;
+    l2.prepare(t2);
+    PointViewSet s2 = l2.execute(t2);
+    EXPECT_EQ(s2.size(), 1U);
+    PointViewPtr v2 = *s2.begin();
+
+    EXPECT_EQ(v1->size(), v2->size());
+
+    // Validate some point data.
+    for (PointId i = 0; i < v1->size(); ++i)
+    {
+       EXPECT_DOUBLE_EQ(v1->getFieldAs<double>(Dimension::Id::X, i),
+           v2->getFieldAs<double>(Dimension::Id::X, i));
+       EXPECT_DOUBLE_EQ(v1->getFieldAs<double>(Dimension::Id::Y, i),
+           v2->getFieldAs<double>(Dimension::Id::Y, i));
+       EXPECT_DOUBLE_EQ(v1->getFieldAs<double>(Dimension::Id::Z, i),
+           v2->getFieldAs<double>(Dimension::Id::Z, i));
+    }
+}
+
 TEST(TextReaderTest, t1)
 {
     compareTextLas(Support::datapath("text/utm17_1.txt"),
         Support::datapath("las/utm17.las"));
 }
 
+TEST(TextReaderTest, t1a)
+{
+    Options textOptions;
+
+    textOptions.add("separator", ',');
+
+    compareTextLas(Support::datapath("text/utm17_1.txt"),
+        textOptions, Support::datapath("las/utm17.las"));
+}
+
 TEST(TextReaderTest, t2)
 {
     compareTextLas(Support::datapath("text/utm17_2.txt"),
@@ -108,3 +187,27 @@ TEST(TextReaderTest, badheader)
     PointTable tt;
     EXPECT_THROW(t.prepare(tt), pdal_error);
 }
+
+TEST(TextReaderTest, s1)
+{
+    compareTextLasStreaming(Support::datapath("text/utm17_1.txt"),
+                            Support::datapath("las/utm17.las"));
+}
+
+TEST(TextReaderTest, strip_whitespace_from_dimension_names)
+{
+    TextReader reader;
+    Options options;
+    options.add("filename", Support::datapath("text/crlf_test.txt"));
+    reader.setOptions(options);
+
+    PointTable table;
+    reader.prepare(table);
+    PointViewSet pointViewSet = reader.execute(table);
+    PointViewPtr pointViewPtr = *pointViewSet.begin();
+
+    for (PointId i = 0; i < pointViewPtr->size(); ++i) {
+        EXPECT_EQ(
+            i, pointViewPtr->getFieldAs<uint16_t>(Dimension::Id::Intensity, i));
+    }
+}
diff --git a/tools/lasdump/Dumper.cpp b/tools/lasdump/Dumper.cpp
index b4ccb33..d126c2f 100644
--- a/tools/lasdump/Dumper.cpp
+++ b/tools/lasdump/Dumper.cpp
@@ -32,8 +32,8 @@
  * OF SUCH DAMAGE.
  ****************************************************************************/
 
-#include <laszip/laszip.hpp>
-#include <laszip/lasunzipper.hpp>
+#include <laszip.hpp>
+#include <lasunzipper.hpp>
 
 #include <pdal/util/IStream.hpp>
 
diff --git a/tools/nitfwrap/NitfWrap.cpp b/tools/nitfwrap/NitfWrap.cpp
index ec98034..1ff2ffd 100644
--- a/tools/nitfwrap/NitfWrap.cpp
+++ b/tools/nitfwrap/NitfWrap.cpp
@@ -93,6 +93,7 @@ NitfWrap::NitfWrap(std::vector<std::string>& args)
         BOX3D bounds;
         verify(bounds);
 
+        m_nitfWriter.initialize();
         m_nitfWriter.setFilename(m_outputFile);
         m_nitfWriter.setBounds(bounds);
         m_nitfWriter.wrapData(m_inputFile);
@@ -120,7 +121,7 @@ void NitfWrap::unwrap()
         throw error(oss.str());
     }
     in->seekg(offset, std::istream::beg);
-    
+
     // Find out if this is a LAS or BPF file and make the output filename.
     bool compressed;
     BOX3D bounds;
@@ -238,7 +239,7 @@ bool NitfWrap::verifyLas(ILeStream& in, BOX3D& bounds, bool& compressed)
     {
         in >> h;
     }
-    catch (pdal_error&)
+    catch (LasHeader::error&)
     {
         return false;
     }
diff --git a/vendor/arbiter/arbiter.cpp b/vendor/arbiter/arbiter.cpp
index 1a7f638..349878c 100644
--- a/vendor/arbiter/arbiter.cpp
+++ b/vendor/arbiter/arbiter.cpp
@@ -60,6 +60,7 @@ SOFTWARE.
 
 #include <algorithm>
 #include <cstdlib>
+#include <sstream>
 
 #ifdef ARBITER_CUSTOM_NAMESPACE
 namespace ARBITER_CUSTOM_NAMESPACE
@@ -73,15 +74,36 @@ namespace
 {
     const std::string delimiter("://");
 
+#ifdef ARBITER_CURL
     const std::size_t concurrentHttpReqs(32);
     const std::size_t httpRetryCount(8);
+#endif
+
+    Json::Value getConfig()
+    {
+        Json::Value config;
+        std::string path("~/.arbiter/config.json");
+
+        if      (auto p = util::env("ARBITER_CONFIG_FILE")) path = *p;
+        else if (auto p = util::env("ARBITER_CONFIG_PATH")) path = *p;
+
+        if (auto data = drivers::Fs().tryGet(path))
+        {
+            std::istringstream ss(*data);
+            ss >> config;
+        }
+
+        return config;
+    }
 }
 
-Arbiter::Arbiter() : Arbiter(Json::Value()) { }
+Arbiter::Arbiter() : Arbiter(getConfig()) { }
 
 Arbiter::Arbiter(const Json::Value& json)
     : m_drivers()
-    , m_pool(concurrentHttpReqs, httpRetryCount, json)
+#ifdef ARBITER_CURL
+    , m_pool(new http::Pool(concurrentHttpReqs, httpRetryCount, json))
+#endif
 {
     using namespace drivers;
 
@@ -91,17 +113,32 @@ Arbiter::Arbiter(const Json::Value& json)
     auto test(Test::create(json["test"]));
     if (test) m_drivers[test->type()] = std::move(test);
 
-    auto http(Http::create(m_pool, json["http"]));
+#ifdef ARBITER_CURL
+    auto http(Http::create(*m_pool, json["http"]));
     if (http) m_drivers[http->type()] = std::move(http);
 
-    auto https(Https::create(m_pool, json["http"]));
+    auto https(Https::create(*m_pool, json["http"]));
     if (https) m_drivers[https->type()] = std::move(https);
 
-    auto s3(S3::create(m_pool, json["s3"]));
-    if (s3) m_drivers[s3->type()] = std::move(s3);
+    if (json["s3"].isArray())
+    {
+        for (const auto& sub : json["s3"])
+        {
+            auto s3(S3::create(*m_pool, sub));
+            m_drivers[s3->type()] = std::move(s3);
+        }
+    }
+    else
+    {
+        auto s3(S3::create(*m_pool, json["s3"]));
+        if (s3) m_drivers[s3->type()] = std::move(s3);
+    }
 
-    auto dropbox(Dropbox::create(m_pool, json["dropbox"]));
+    // Credential-based drivers should probably all do something similar to the
+    // S3 driver to support multiple profiles.
+    auto dropbox(Dropbox::create(*m_pool, json["dropbox"]));
     if (dropbox) m_drivers[dropbox->type()] = std::move(dropbox);
+#endif
 }
 
 bool Arbiter::hasDriver(const std::string path) const
@@ -805,7 +842,7 @@ namespace
             std::ofstream::out |
             std::ofstream::trunc);
 
-    const std::string home(([]()
+    std::string getHome()
     {
         std::string s;
 
@@ -827,7 +864,7 @@ namespace
         if (s.empty()) std::cout << "No home directory found" << std::endl;
 
         return s;
-    })());
+    }
 }
 
 namespace drivers
@@ -933,7 +970,8 @@ bool mkdirp(std::string raw)
 
         // Remove consecutive slashes.  For Windows, we'll need to be careful
         // not to remove drive letters like C:\\.
-        const auto end = std::unique(s.begin(), s.end(), [](char l, char r){
+        const auto end = std::unique(s.begin(), s.end(), [](char l, char r)
+        {
             return util::isSlash(l) && util::isSlash(r);
         });
 
@@ -1090,11 +1128,10 @@ std::vector<std::string> glob(std::string path)
 std::string expandTilde(std::string in)
 {
     std::string out(in);
-
+    static std::string home(getHome());
     if (!in.empty() && in.front() == '~')
     {
         if (home.empty()) throw ArbiterError("No home directory found");
-
         out = home + in.substr(1);
     }
 
@@ -1173,7 +1210,13 @@ namespace drivers
 
 using namespace http;
 
-Http::Http(Pool& pool) : m_pool(pool) { }
+Http::Http(Pool& pool)
+    : m_pool(pool)
+{
+#ifndef ARBITER_CURL
+    throw ArbiterError("Cannot create HTTP driver - no curl support was built");
+#endif
+}
 
 std::unique_ptr<Http> Http::create(Pool& pool, const Json::Value&)
 {
@@ -1353,16 +1396,17 @@ Response Http::internalPost(
 #include <functional>
 #include <iostream>
 #include <numeric>
+#include <sstream>
 #include <thread>
 
 #ifndef ARBITER_IS_AMALGAMATION
 #include <arbiter/arbiter.hpp>
 #include <arbiter/drivers/fs.hpp>
 #include <arbiter/third/xml/xml.hpp>
+#include <arbiter/util/ini.hpp>
 #include <arbiter/util/md5.hpp>
 #include <arbiter/util/sha256.hpp>
 #include <arbiter/util/transforms.hpp>
-#include <arbiter/util/util.hpp>
 #endif
 
 #ifdef ARBITER_CUSTOM_NAMESPACE
@@ -1375,17 +1419,17 @@ namespace arbiter
 
 namespace
 {
-    const std::string dateFormat("%Y%m%d");
-    const std::string timeFormat("%H%M%S");
-
-    std::string getBaseUrl(const std::string& region)
-    {
-        // https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
-        if (region == "us-east-1") return "s3.amazonaws.com/";
-        else return "s3-" + region + ".amazonaws.com/";
-    }
+#ifdef ARBITER_CURL
+    // Re-fetch credentials when there are less than 4 minutes remaining.  New
+    // ones are guaranteed by AWS to be available within 5 minutes remaining.
+    constexpr int64_t reauthSeconds(60 * 4);
+#endif
 
-    drivers::Fs fsDriver;
+    // See:
+    // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+    const std::string credIp("http://169.254.169.254/");
+    const std::string credBase(
+            credIp + "latest/meta-data/iam/security-credentials/");
 
     std::string line(const std::string& data) { return data + "\n"; }
     const std::vector<char> empty;
@@ -1431,71 +1475,142 @@ namespace
         if (s.size() && std::isspace(s.back())) s.pop_back();
         return s;
     }
+}
 
-    std::vector<std::string> condense(const std::vector<std::string>& in)
-    {
-        return std::accumulate(
-                in.begin(),
-                in.end(),
-                std::vector<std::string>(),
-                [](const std::vector<std::string>& base, const std::string& in)
-                {
-                    auto out(base);
+namespace drivers
+{
 
-                    std::string current(in);
-                    current.erase(
-                            std::remove_if(
-                                current.begin(),
-                                current.end(),
-                                [](char c) { return std::isspace(c); }),
-                            current.end());
+using namespace http;
+using namespace util;
 
-                    out.push_back(current);
-                    return out;
-                });
+S3::S3(
+        Pool& pool,
+        std::string profile,
+        std::unique_ptr<Auth> auth,
+        std::unique_ptr<Config> config)
+    : Http(pool)
+    , m_profile(profile)
+    , m_auth(std::move(auth))
+    , m_config(std::move(config))
+{ }
+
+std::unique_ptr<S3> S3::create(Pool& pool, const Json::Value& json)
+{
+    const std::string profile(extractProfile(json));
+
+    auto auth(Auth::create(json, profile));
+    if (!auth) return std::unique_ptr<S3>();
+
+    auto config(Config::create(json, profile));
+    if (!config) return std::unique_ptr<S3>();
+
+    return makeUnique<S3>(pool, profile, std::move(auth), std::move(config));
+}
+
+std::string S3::extractProfile(const Json::Value& json)
+{
+    if (auto p = util::env("AWS_PROFILE")) return *p;
+    else if (auto p = util::env("AWS_DEFAULT_PROFILE")) return *p;
+    else if (
+            !json.isNull() &&
+            json.isMember("profile") &&
+            json["profile"].asString().size())
+    {
+        return json["profile"].asString();
     }
+    else return "default";
+}
 
-    std::vector<std::string> split(const std::string& in, char delimiter = '\n')
+std::unique_ptr<S3::Auth> S3::Auth::create(
+        const Json::Value& json,
+        const std::string profile)
+{
+    // Try environment settings first.
     {
-        std::size_t index(0);
-        std::size_t pos(0);
-        std::vector<std::string> lines;
+        auto access(util::env("AWS_ACCESS_KEY_ID"));
+        auto hidden(util::env("AWS_SECRET_ACCESS_KEY"));
 
-        do
+        if (access && hidden)
         {
-            index = in.find(delimiter, pos);
-            std::string line(in.substr(pos, index - pos));
-
-            line.erase(
-                    std::remove_if(line.begin(), line.end(), ::isspace),
-                    line.end());
+            return makeUnique<Auth>(*access, *hidden);
+        }
 
-            lines.push_back(line);
+        access = util::env("AMAZON_ACCESS_KEY_ID");
+        hidden = util::env("AMAZON_SECRET_ACCESS_KEY");
 
-            pos = index + 1;
+        if (access && hidden)
+        {
+            return makeUnique<Auth>(*access, *hidden);
         }
-        while (index != std::string::npos);
+    }
 
-        return lines;
+    // Try explicit JSON configuration next.
+    if (
+            !json.isNull() &&
+            json.isMember("access") &&
+            (json.isMember("secret") || json.isMember("hidden")))
+    {
+        return makeUnique<Auth>(
+                json["access"].asString(),
+                json.isMember("secret") ?
+                    json["secret"].asString() :
+                    json["hidden"].asString());
     }
-}
 
-namespace drivers
-{
+    const std::string credPath(
+            util::env("AWS_CREDENTIAL_FILE") ?
+                *util::env("AWS_CREDENTIAL_FILE") : "~/.aws/credentials");
 
-using namespace http;
+    // Finally, try reading credentials file.
+    drivers::Fs fsDriver;
+    if (std::unique_ptr<std::string> c = fsDriver.tryGet(credPath))
+    {
+        const std::string accessKey("aws_access_key_id");
+        const std::string hiddenKey("aws_secret_access_key");
+        const ini::Contents creds(ini::parse(*c));
+        if (creds.count(profile))
+        {
+            const auto section(creds.at(profile));
+            if (section.count(accessKey) && section.count(hiddenKey))
+            {
+                const auto access(section.at(accessKey));
+                const auto hidden(section.at(hiddenKey));
+                return makeUnique<Auth>(access, hidden);
+            }
+        }
+    }
 
-S3::S3(
-        Pool& pool,
-        const S3::Auth& auth,
+#ifdef ARBITER_CURL
+    // Nothing found in the environment or on the filesystem.  However we may
+    // be running in an EC2 instance with an instance profile set up.
+    //
+    // By default we won't search for this since we don't really want to make
+    // an HTTP request on every Arbiter construction - but if we're allowed,
+    // see if we can request an instance profile configuration.
+    if (
+            json["allowInstanceProfile"].asBool() ||
+            env("AWS_ALLOW_INSTANCE_PROFILE"))
+    {
+        http::Pool pool;
+        drivers::Http httpDriver(pool);
+
+        if (const auto iamRole = httpDriver.tryGet(credBase))
+        {
+            return makeUnique<Auth>(*iamRole);
+        }
+    }
+#endif
+
+    return std::unique_ptr<Auth>();
+}
+
+S3::Config::Config(
         const std::string region,
+        const std::string baseUrl,
         const bool sse,
         const bool precheck)
-    : Http(pool)
-    , m_auth(auth)
-    , m_region(region)
-    , m_baseUrl(getBaseUrl(region))
-    , m_baseHeaders()
+    : m_region(region)
+    , m_baseUrl(baseUrl)
     , m_precheck(precheck)
 {
     if (sse)
@@ -1506,150 +1621,165 @@ S3::S3(
     }
 }
 
-std::unique_ptr<S3> S3::create(Pool& pool, const Json::Value& json)
+std::unique_ptr<S3::Config> S3::Config::create(
+        const Json::Value& json,
+        const std::string profile)
 {
-    std::unique_ptr<Auth> auth;
-    std::unique_ptr<S3> s3;
-
-    const std::string profile(extractProfile(json));
+    const auto region(extractRegion(json, profile));
+    const auto baseUrl(extractBaseUrl(json, region));
     const bool sse(json["sse"].asBool());
     const bool precheck(json["precheck"].asBool());
+    return makeUnique<Config>(region, baseUrl, sse, precheck);
+}
 
-    if (!json.isNull() && json.isMember("access") & json.isMember("hidden"))
-    {
-        auth.reset(
-                new Auth(
-                    json["access"].asString(),
-                    json["hidden"].asString()));
-    }
-    else
-    {
-        auth = Auth::find(profile);
-    }
-
-    if (!auth) return s3;
-
-    // Try to get the region from the config file, or default to US standard.
-    std::string region("us-east-1");
-    bool regionFound(false);
-
+std::string S3::Config::extractRegion(
+        const Json::Value& json,
+        const std::string profile)
+{
     const std::string configPath(
             util::env("AWS_CONFIG_FILE") ?
                 *util::env("AWS_CONFIG_FILE") : "~/.aws/config");
 
+    drivers::Fs fsDriver;
+
     if (auto p = util::env("AWS_REGION"))
     {
-        region = *p;
-        regionFound = true;
+        return *p;
     }
     else if (auto p = util::env("AWS_DEFAULT_REGION"))
     {
-        region = *p;
-        regionFound = true;
+        return *p;
     }
     else if (!json.isNull() && json.isMember("region"))
     {
-        region = json["region"].asString();
-        regionFound = true;
+        return json["region"].asString();
     }
-    else if (std::unique_ptr<std::string> config = fsDriver.tryGet(configPath))
+    else if (std::unique_ptr<std::string> c = fsDriver.tryGet(configPath))
     {
-        const std::vector<std::string> lines(condense(split(*config)));
-
-        if (lines.size() >= 3)
+        const ini::Contents settings(ini::parse(*c));
+        if (settings.count(profile))
         {
-            std::size_t i(0);
+            const auto section(settings.at(profile));
+            if (section.count("region")) return section.at("region");
+        }
+    }
 
-            const std::string profileFind("[" + profile + "]");
-            const std::string outputFind("output=");
-            const std::string regionFind("region=");
+    if (json["verbose"].asBool())
+    {
+        std::cout << "Region not found - defaulting to us-east-1" << std::endl;
+    }
 
-            while (i < lines.size() - 2 && !regionFound)
-            {
-                if (lines[i].find(profileFind) != std::string::npos)
-                {
-                    auto parse([&](
-                                const std::string& outputLine,
-                                const std::string& regionLine)
-                    {
-                        std::size_t outputPos(outputLine.find(outputFind));
-                        std::size_t regionPos(regionLine.find(regionFind));
+    return "us-east-1";
+}
 
-                        if (
-                                outputPos != std::string::npos &&
-                                regionPos != std::string::npos)
-                        {
-                            region = regionLine.substr(
-                                    regionPos + regionFind.size(),
-                                    regionLine.find(';'));
+std::string S3::Config::extractBaseUrl(
+        const Json::Value& json,
+        std::string region)
+{
+    if (json.isMember("endpoint") && json["endpoint"].asString().size())
+    {
+        const std::string path(json["endpoint"].asString());
+        return path.back() == '/' ? path : path + '/';
+    }
 
-                            return true;
-                        }
+    std::string endpointsPath("~/.aws/endpoints.json");
+
+    if (const auto e = util::env("AWS_ENDPOINTS_FILE"))
+    {
+        endpointsPath = *e;
+    }
+    else if (json.isMember("endpointsFile"))
+    {
+        endpointsPath = json["endpointsFile"].asString();
+    }
 
-                        return false;
-                    });
+    std::string dnsSuffix("amazonaws.com");
 
+    drivers::Fs fsDriver;
+    if (std::unique_ptr<std::string> e = fsDriver.tryGet(endpointsPath))
+    {
+        Json::Value ep;
+        std::istringstream ss(*e);
+        ss >> ep;
 
-                    const std::string& l1(lines[i + 1]);
-                    const std::string& l2(lines[i + 2]);
+        for (const auto& partition : ep["partitions"])
+        {
+            if (partition.isMember("dnsSuffix"))
+            {
+                dnsSuffix = partition["dnsSuffix"].asString();
+            }
 
-                    regionFound = parse(l1, l2) || parse(l2, l1);
+            const auto& endpoints(partition["services"]["s3"]["endpoints"]);
+            const auto regions(endpoints.getMemberNames());
+            for (const auto& r : regions)
+            {
+                if (r == region && endpoints[region].isMember("hostname"))
+                {
+                    return endpoints[region]["hostname"].asString() + '/';
                 }
-
-                ++i;
             }
         }
     }
-    else if (json["verbose"].asBool())
-    {
-        std::cout <<
-            "~/.aws/config not found - using region us-east-1" << std::endl;
-    }
-
-    if (!regionFound && json["verbose"].asBool())
-    {
-        std::cout <<
-            "Region not found in ~/.aws/config - using us-east-1" << std::endl;
-    }
 
-    s3.reset(new S3(pool, *auth, region, sse, precheck));
+    if (dnsSuffix.size() && dnsSuffix.back() != '/') dnsSuffix += '/';
 
-    return s3;
+    // https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+    if (region == "us-east-1") return "s3." + dnsSuffix;
+    else return "s3-" + region + "." + dnsSuffix;
 }
 
-std::string S3::extractProfile(const Json::Value& json)
+S3::AuthFields S3::Auth::fields() const
 {
-    if (auto p = util::env("AWS_PROFILE"))
-    {
-        return *p;
-    }
-    else if (auto p = util::env("AWS_DEFAULT_PROFILE"))
-    {
-        return *p;
-    }
-    else if (
-            !json.isNull() &&
-            json.isMember("profile") &&
-            json["profile"].asString().size())
-    {
-        return json["profile"].asString();
-    }
-    else
+#ifdef ARBITER_CURL
+    if (m_role)
     {
-        return "default";
+        std::lock_guard<std::mutex> lock(m_mutex);
+
+        const Time now;
+        if (!m_expiration || *m_expiration - now < reauthSeconds)
+        {
+            http::Pool pool;
+            drivers::Http httpDriver(pool);
+
+            std::istringstream ss(httpDriver.get(credBase + *m_role));
+            Json::Value creds;
+            ss >> creds;
+            m_access = creds["AccessKeyId"].asString();
+            m_hidden = creds["SecretAccessKey"].asString();
+            m_token = creds["Token"].asString();
+            m_expiration.reset(new Time(creds["Expiration"].asString()));
+
+            if (*m_expiration - now < reauthSeconds)
+            {
+                throw ArbiterError("Got invalid instance profile credentials");
+            }
+        }
+
+        // If we're using an IAM role, make sure to create this before
+        // releasing the lock.
+        return S3::AuthFields(m_access, m_hidden, m_token);
     }
+#endif
+
+    return S3::AuthFields(m_access, m_hidden, m_token);
+}
+
+std::string S3::type() const
+{
+    if (m_profile == "default") return "s3";
+    else return m_profile + "@s3";
 }
 
 std::unique_ptr<std::size_t> S3::tryGetSize(std::string rawPath) const
 {
     std::unique_ptr<std::size_t> size;
 
-    const Resource resource(m_baseUrl, rawPath);
+    const Resource resource(m_config->baseUrl(), rawPath);
     const ApiV4 apiV4(
             "HEAD",
-            m_region,
+            m_config->region(),
             resource,
-            m_auth,
+            m_auth->fields(),
             Query(),
             Headers(),
             empty);
@@ -1672,15 +1802,15 @@ bool S3::get(
         const Query query) const
 {
     std::unique_ptr<std::size_t> size(
-            m_precheck && !headers.count("Range") ?
+            m_config->precheck() && !headers.count("Range") ?
                 tryGetSize(rawPath) : nullptr);
 
-    const Resource resource(m_baseUrl, rawPath);
+    const Resource resource(m_config->baseUrl(), rawPath);
     const ApiV4 apiV4(
             "GET",
-            m_region,
+            m_config->region(),
             resource,
-            m_auth,
+            m_auth->fields(),
             query,
             headers,
             empty);
@@ -1711,16 +1841,16 @@ void S3::put(
         const Headers userHeaders,
         const Query query) const
 {
-    const Resource resource(m_baseUrl, rawPath);
+    const Resource resource(m_config->baseUrl(), rawPath);
 
-    Headers headers(m_baseHeaders);
+    Headers headers(m_config->baseHeaders());
     headers.insert(userHeaders.begin(), userHeaders.end());
 
     const ApiV4 apiV4(
             "PUT",
-            m_region,
+            m_config->region(),
             resource,
-            m_auth,
+            m_auth->fields(),
             query,
             headers,
             data);
@@ -1743,7 +1873,7 @@ void S3::put(
 void S3::copy(const std::string src, const std::string dst) const
 {
     Headers headers;
-    const Resource resource(m_baseUrl, src);
+    const Resource resource(m_config->baseUrl(), src);
     headers["x-amz-copy-source"] = resource.bucket() + '/' + resource.object();
     put(dst, std::vector<char>(), headers, Query());
 }
@@ -1757,7 +1887,7 @@ std::vector<std::string> S3::glob(std::string path, bool verbose) const
     if (recursive) path.pop_back();
 
     // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
-    const Resource resource(m_baseUrl, path);
+    const Resource resource(m_config->baseUrl(), path);
     const std::string& bucket(resource.bucket());
     const std::string& object(resource.object());
 
@@ -1852,19 +1982,23 @@ S3::ApiV4::ApiV4(
         const std::string verb,
         const std::string& region,
         const Resource& resource,
-        const S3::Auth& auth,
+        const S3::AuthFields authFields,
         const Query& query,
         const Headers& headers,
         const std::vector<char>& data)
-    : m_auth(auth)
+    : m_authFields(authFields)
     , m_region(region)
-    , m_formattedTime()
+    , m_time()
     , m_headers(headers)
     , m_query(query)
     , m_signedHeadersString()
 {
     m_headers["Host"] = resource.host();
-    m_headers["X-Amz-Date"] = m_formattedTime.amazonDate();
+    m_headers["X-Amz-Date"] = m_time.str(Time::iso8601NoSeparators);
+    if (m_authFields.token().size())
+    {
+        m_headers["X-Amz-Security-Token"] = m_authFields.token();
+    }
     m_headers["X-Amz-Content-Sha256"] =
             crypto::encodeAsHex(crypto::sha256(data));
 
@@ -1924,7 +2058,7 @@ std::string S3::ApiV4::buildCanonicalRequest(
         const Query& query,
         const std::vector<char>& data) const
 {
-    const std::string canonicalUri(sanitize("/" + resource.object()));
+    const std::string canonicalUri("/" + resource.object());
 
     auto canonicalizeQuery([](const std::string& s, const Query::value_type& q)
     {
@@ -1956,8 +2090,9 @@ std::string S3::ApiV4::buildStringToSign(
 {
     return
         line("AWS4-HMAC-SHA256") +
-        line(m_formattedTime.amazonDate()) +
-        line(m_formattedTime.date() + "/" + m_region + "/s3/aws4_request") +
+        line(m_time.str(Time::iso8601NoSeparators)) +
+        line(m_time.str(Time::dateNoSeparators) +
+                "/" + m_region + "/s3/aws4_request") +
         crypto::encodeAsHex(crypto::sha256(canonicalRequest));
 }
 
@@ -1966,8 +2101,8 @@ std::string S3::ApiV4::calculateSignature(
 {
     const std::string kDate(
             crypto::hmacSha256(
-                "AWS4" + m_auth.hidden(),
-                m_formattedTime.date()));
+                "AWS4" + m_authFields.hidden(),
+                m_time.str(Time::dateNoSeparators)));
 
     const std::string kRegion(crypto::hmacSha256(kDate, m_region));
     const std::string kService(crypto::hmacSha256(kRegion, "s3"));
@@ -1983,8 +2118,9 @@ std::string S3::ApiV4::getAuthHeader(
 {
     return
         std::string("AWS4-HMAC-SHA256 ") +
-        "Credential=" + m_auth.access() + '/' +
-            m_formattedTime.date() + "/" + m_region + "/s3/aws4_request, " +
+        "Credential=" + m_authFields.access() + '/' +
+            m_time.str(Time::dateNoSeparators) + "/" +
+            m_region + "/s3/aws4_request, " +
         "SignedHeaders=" + signedHeadersString + ", " +
         "Signature=" + signature;
 }
@@ -1999,11 +2135,7 @@ S3::Resource::Resource(std::string baseUrl, std::string fullPath)
     const std::size_t split(fullPath.find("/"));
 
     m_bucket = fullPath.substr(0, split);
-
-    if (split != std::string::npos)
-    {
-        m_object = fullPath.substr(split + 1);
-    }
+    if (split != std::string::npos) m_object = fullPath.substr(split + 1);
 
     m_virtualHosted = m_bucket.find_first_of('.') == std::string::npos;
 }
@@ -2024,14 +2156,8 @@ std::string S3::Resource::url() const
 std::string S3::Resource::object() const
 {
     // We can't use virtual-host style paths if the bucket contains dots.
-    if (m_virtualHosted)
-    {
-        return m_object;
-    }
-    else
-    {
-        return m_bucket + "/" + m_object;
-    }
+    if (m_virtualHosted) return m_object;
+    else return m_bucket + "/" + m_object;
 }
 
 std::string S3::Resource::host() const
@@ -2047,111 +2173,6 @@ std::string S3::Resource::host() const
     }
 }
 
-S3::FormattedTime::FormattedTime()
-    : m_date(formatTime(dateFormat))
-    , m_time(formatTime(timeFormat))
-{ }
-
-std::string S3::FormattedTime::formatTime(const std::string& format) const
-{
-    std::time_t time(std::time(nullptr));
-    std::vector<char> buf(80, 0);
-
-    if (std::strftime(
-                buf.data(),
-                buf.size(),
-                format.data(),
-                std::gmtime(&time)))
-    {
-        return std::string(buf.data());
-    }
-    else
-    {
-        throw ArbiterError("Could not format time");
-    }
-}
-
-S3::Auth::Auth(const std::string access, const std::string hidden)
-    : m_access(access)
-    , m_hidden(hidden)
-{ }
-
-std::unique_ptr<S3::Auth> S3::Auth::find(std::string profile)
-{
-    std::unique_ptr<S3::Auth> auth;
-
-    auto access(util::env("AWS_ACCESS_KEY_ID"));
-    auto hidden(util::env("AWS_SECRET_ACCESS_KEY"));
-
-    if (access && hidden)
-    {
-        auth.reset(new S3::Auth(*access, *hidden));
-        return auth;
-    }
-
-    access = util::env("AMAZON_ACCESS_KEY_ID");
-    hidden = util::env("AMAZON_SECRET_ACCESS_KEY");
-
-    if (access && hidden)
-    {
-        auth.reset(new S3::Auth(*access, *hidden));
-        return auth;
-    }
-
-    const std::string credFile("~/.aws/credentials");
-
-    // First, try reading credentials file.
-    if (std::unique_ptr<std::string> cred = fsDriver.tryGet(credFile))
-    {
-        const std::vector<std::string> lines(condense(split(*cred)));
-
-        if (lines.size() >= 3)
-        {
-            std::size_t i(0);
-
-            const std::string profileFind("[" + profile + "]");
-            const std::string accessFind("aws_access_key_id=");
-            const std::string hiddenFind("aws_secret_access_key=");
-
-            while (i < lines.size() - 2 && !auth)
-            {
-                if (lines[i].find(profileFind) != std::string::npos)
-                {
-                    const std::string& accessLine(lines[i + 1]);
-                    const std::string& hiddenLine(lines[i + 2]);
-
-                    std::size_t accessPos(accessLine.find(accessFind));
-                    std::size_t hiddenPos(hiddenLine.find(hiddenFind));
-
-                    if (
-                            accessPos != std::string::npos &&
-                            hiddenPos != std::string::npos)
-                    {
-                        const std::string access(
-                                accessLine.substr(
-                                    accessPos + accessFind.size(),
-                                    accessLine.find(';')));
-
-                        const std::string hidden(
-                                hiddenLine.substr(
-                                    hiddenPos + hiddenFind.size(),
-                                    hiddenLine.find(';')));
-
-                        auth.reset(new S3::Auth(access, hidden));
-                    }
-                }
-
-                ++i;
-            }
-        }
-    }
-
-    return auth;
-}
-
-std::string S3::Auth::access() const { return m_access; }
-std::string S3::Auth::hidden() const { return m_hidden; }
-
 } // namespace drivers
 } // namespace arbiter
 
@@ -2534,17 +2555,23 @@ std::vector<std::string> Dropbox::glob(std::string rawPath, bool verbose) const
 
 
 // //////////////////////////////////////////////////////////////////////
-// Beginning of content of file: arbiter/util/http.cpp
+// Beginning of content of file: arbiter/util/curl.cpp
 // //////////////////////////////////////////////////////////////////////
 
+#include <algorithm>
+#include <cstring>
+#include <ios>
+#include <iostream>
+
 #ifndef ARBITER_IS_AMALGAMATION
+#include <arbiter/util/curl.hpp>
 #include <arbiter/util/http.hpp>
+#include <arbiter/util/util.hpp>
 #endif
 
-#include <iostream>
-#include <numeric>
-
+#ifdef ARBITER_CURL
 #include <curl/curl.h>
+#endif
 
 #ifdef ARBITER_CUSTOM_NAMESPACE
 namespace ARBITER_CUSTOM_NAMESPACE
@@ -2558,6 +2585,7 @@ namespace http
 
 namespace
 {
+#ifdef ARBITER_CURL
     struct PutData
     {
         PutData(const std::vector<char>& data)
@@ -2630,88 +2658,120 @@ namespace
         return size * num;
     }
 
-    const std::map<char, std::string> sanitizers
-    {
-        { ' ', "%20" },
-        { '!', "%21" },
-        { '"', "%22" },
-        { '#', "%23" },
-        { '$', "%24" },
-        { '\'', "%27" },
-        { '(', "%28" },
-        { ')', "%29" },
-        { '*', "%2A" },
-        { '+', "%2B" },
-        { ',', "%2C" },
-        { '/', "%2F" },
-        { ';', "%3B" },
-        { '<', "%3C" },
-        { '>', "%3E" },
-        { '@', "%40" },
-        { '[', "%5B" },
-        { '\\', "%5C" },
-        { ']', "%5D" },
-        { '^', "%5E" },
-        { '`', "%60" },
-        { '{', "%7B" },
-        { '|', "%7C" },
-        { '}', "%7D" },
-        { '~', "%7E" }
-    };
-
-    const bool followRedirect(true);
-    const std::size_t defaultHttpTimeout(60 * 5);
+#else
+    const std::string fail("Arbiter was built without curl");
+#endif // ARBITER_CURL
 } // unnamed namespace
 
-std::string sanitize(const std::string path, const std::string exclusions)
+Curl::Curl(const Json::Value& json)
 {
-    std::string result;
+#ifdef ARBITER_CURL
+    using namespace util;
 
-    for (const auto c : path)
-    {
-        const auto it(sanitizers.find(c));
+    m_curl = curl_easy_init();
 
-        if (it == sanitizers.end() || exclusions.find(c) != std::string::npos)
-        {
-            result += c;
-        }
-        else
+    // Configurable entries are:
+    //      - timeout           (CURLOPT_LOW_SPEED_TIME)
+    //      - followRedirect    (CURLOPT_FOLLOWLOCATION)
+    //      - caBundle          (CURLOPT_CAPATH)
+    //      - caInfo            (CURLOPT_CAINFO)
+    //      - verifyPeer        (CURLOPT_SSL_VERIFYPEER)
+
+    using Keys = std::vector<std::string>;
+    auto find([](const Keys& keys)->std::unique_ptr<std::string>
+    {
+        for (const auto& key : keys)
         {
-            result += it->second;
+            if (auto e = util::env(key)) return makeUnique<std::string>(*e);
         }
-    }
+        return std::unique_ptr<std::string>();
+    });
 
-    return result;
-}
+    auto mk([](std::string s) { return makeUnique<std::string>(s); });
 
-std::string buildQueryString(const Query& query)
-{
-    return std::accumulate(
-            query.begin(),
-            query.end(),
-            std::string(),
-            [](const std::string& out, const Query::value_type& keyVal)
+    if (!json.isNull())
+    {
+        m_verbose = json["verbose"].asBool();
+        const auto& h(json["http"]);
+
+        if (!h.isNull())
+        {
+            if (h.isMember("timeout"))
             {
-                const char sep(out.empty() ? '?' : '&');
-                return out + sep + keyVal.first + '=' + keyVal.second;
-            });
-}
+                m_timeout = h["timeout"].asUInt64();
+            }
 
-Curl::Curl(bool verbose, std::size_t timeout)
-    : m_curl(0)
-    , m_headers(0)
-    , m_verbose(verbose)
-    , m_timeout(timeout)
-    , m_data()
-{
-    m_curl = curl_easy_init();
+            if (h.isMember("followRedirect"))
+            {
+                m_followRedirect = h["followRedirect"].asBool();
+            }
+
+            if (h.isMember("caBundle"))
+            {
+                m_caPath = mk(h["caBundle"].asString());
+            }
+            else if (h.isMember("caPath"))
+            {
+                m_caPath = mk(h["caPath"].asString());
+            }
+
+            if (h.isMember("caInfo"))
+            {
+                m_caInfo = mk(h["caInfo"].asString());
+            }
+
+            if (h.isMember("verifyPeer"))
+            {
+                m_verifyPeer = h["verifyPeer"].asBool();
+            }
+        }
+    }
+
+    Keys verboseKeys{ "VERBOSE", "CURL_VERBOSE", "ARBITER_VERBOSE" };
+    Keys timeoutKeys{ "CURL_TIMEOUT", "ARBITER_HTTP_TIMEOUT" };
+    Keys redirKeys{
+        "CURL_FOLLOWLOCATION",
+        "CURL_FOLLOW_LOCATION",
+        "ARBITER_FOLLOW_LOCATION"
+        "ARBITER_FOLLOW_REDIRECT"
+    };
+    Keys verifyKeys{
+        "CURL_SSL_VERIFYPEER",
+        "CURL_VERIFY_PEER",
+        "ARBITER_VERIFY_PEER"
+    };
+    Keys caPathKeys{ "CURL_CA_PATH", "CURL_CA_BUNDLE", "ARBITER_CA_PATH" };
+    Keys caInfoKeys{ "CURL_CAINFO", "CURL_CA_INFO", "ARBITER_CA_INFO" };
+
+    if (auto v = find(verboseKeys)) m_verbose = !!std::stol(*v);
+    if (auto v = find(timeoutKeys)) m_timeout = std::stol(*v);
+    if (auto v = find(redirKeys)) m_followRedirect = !!std::stol(*v);
+    if (auto v = find(verifyKeys)) m_verifyPeer = !!std::stol(*v);
+    if (auto v = find(caPathKeys)) m_caPath = mk(*v);
+    if (auto v = find(caInfoKeys)) m_caInfo = mk(*v);
+
+    static bool logged(false);
+    if (m_verbose && !logged)
+    {
+        logged = true;
+        std::cout << "Curl config:" << std::boolalpha <<
+            "\n\ttimeout: " << m_timeout << "s" <<
+            "\n\tfollowRedirect: " << m_followRedirect <<
+            "\n\tverifyPeer: " << m_verifyPeer <<
+            "\n\tcaBundle: " << (m_caPath ? *m_caPath : "(default)") <<
+            "\n\tcaInfo: " << (m_caInfo ? *m_caInfo : "(default)") <<
+            std::endl;
+    }
+#endif
 }
 
 Curl::~Curl()
 {
+#ifdef ARBITER_CURL
     curl_easy_cleanup(m_curl);
     curl_slist_free_all(m_headers);
-    m_headers = 0;
+    m_headers = nullptr;
+#endif
 }
 
 void Curl::init(
@@ -2719,12 +2779,13 @@ void Curl::init(
         const Headers& headers,
         const Query& query)
 {
+#ifdef ARBITER_CURL
     // Reset our curl instance and header list.
     curl_slist_free_all(m_headers);
-    m_headers = 0;
+    m_headers = nullptr;
 
     // Set path.
-    const std::string path(sanitize(rawPath + buildQueryString(query)));
+    const std::string path(rawPath + buildQueryString(query));
     curl_easy_setopt(m_curl, CURLOPT_URL, path.c_str());
 
     // Needed for multithreaded Curl usage.
@@ -2733,11 +2794,22 @@ void Curl::init(
     // Substantially faster DNS lookups without IPv6.
     curl_easy_setopt(m_curl, CURLOPT_IPRESOLVE, CURL_IPRESOLVE_V4);
 
-    // Don't wait forever.
-    curl_easy_setopt(m_curl, CURLOPT_TIMEOUT, m_timeout);
+    // Don't wait forever.  Use the low-speed options instead of the timeout
+    // option to make the timeout a sliding window instead of an absolute.
+    curl_easy_setopt(m_curl, CURLOPT_LOW_SPEED_LIMIT, 1L);
+    curl_easy_setopt(m_curl, CURLOPT_LOW_SPEED_TIME, m_timeout);
+
+    curl_easy_setopt(m_curl, CURLOPT_CONNECTTIMEOUT_MS, 2000L);
+    curl_easy_setopt(m_curl, CURLOPT_ACCEPTTIMEOUT_MS, 2000L);
+
+    auto toLong([](bool b) { return b ? 1L : 0L; });
 
     // Configuration options.
-    if (followRedirect) curl_easy_setopt(m_curl, CURLOPT_FOLLOWLOCATION, 1L);
+    curl_easy_setopt(m_curl, CURLOPT_VERBOSE, toLong(m_verbose));
+    curl_easy_setopt(m_curl, CURLOPT_FOLLOWLOCATION, toLong(m_followRedirect));
+    curl_easy_setopt(m_curl, CURLOPT_SSL_VERIFYPEER, toLong(m_verifyPeer));
+    if (m_caPath) curl_easy_setopt(m_curl, CURLOPT_CAPATH, m_caPath->c_str());
+    if (m_caInfo) curl_easy_setopt(m_curl, CURLOPT_CAINFO, m_caInfo->c_str());
 
     // Insert supplied headers.
     for (const auto& h : headers)
@@ -2746,6 +2818,26 @@ void Curl::init(
                 m_headers,
                 (h.first + ": " + h.second).c_str());
     }
+#else
+    throw ArbiterError(fail);
+#endif
+}
+
+int Curl::perform()
+{
+#ifdef ARBITER_CURL
+    long httpCode(0);
+
+    const auto code(curl_easy_perform(m_curl));
+    curl_easy_getinfo(m_curl, CURLINFO_RESPONSE_CODE, &httpCode);
+    curl_easy_reset(m_curl);
+
+    if (code != CURLE_OK) httpCode = 500;
+
+    return httpCode;
+#else
+    throw ArbiterError(fail);
+#endif
 }
 
 Response Curl::get(
@@ -2754,13 +2846,12 @@ Response Curl::get(
         Query query,
         const std::size_t reserve)
 {
-    long httpCode(0);
+#ifdef ARBITER_CURL
     std::vector<char> data;
 
     if (reserve) data.reserve(reserve);
 
     init(path, headers, query);
-    if (m_verbose) curl_easy_setopt(m_curl, CURLOPT_VERBOSE, 1L);
 
     // Register callback function and data pointer to consume the result.
     curl_easy_setopt(m_curl, CURLOPT_WRITEFUNCTION, getCb);
@@ -2775,20 +2866,19 @@ Response Curl::get(
     curl_easy_setopt(m_curl, CURLOPT_HEADERDATA, &receivedHeaders);
 
     // Run the command.
-    curl_easy_perform(m_curl);
-    curl_easy_getinfo(m_curl, CURLINFO_RESPONSE_CODE, &httpCode);
-
-    curl_easy_reset(m_curl);
+    const int httpCode(perform());
     return Response(httpCode, data, receivedHeaders);
+#else
+    throw ArbiterError(fail);
+#endif
 }
 
 Response Curl::head(std::string path, Headers headers, Query query)
 {
-    long httpCode(0);
+#ifdef ARBITER_CURL
     std::vector<char> data;
 
     init(path, headers, query);
-    if (m_verbose) curl_easy_setopt(m_curl, CURLOPT_VERBOSE, 1L);
 
     // Register callback function and data pointer to consume the result.
     curl_easy_setopt(m_curl, CURLOPT_WRITEFUNCTION, getCb);
@@ -2806,11 +2896,11 @@ Response Curl::head(std::string path, Headers headers, Query query)
     curl_easy_setopt(m_curl, CURLOPT_NOBODY, 1L);
 
     // Run the command.
-    curl_easy_perform(m_curl);
-    curl_easy_getinfo(m_curl, CURLINFO_RESPONSE_CODE, &httpCode);
-
-    curl_easy_reset(m_curl);
+    const int httpCode(perform());
     return Response(httpCode, data, receivedHeaders);
+#else
+    throw ArbiterError(fail);
+#endif
 }
 
 Response Curl::put(
@@ -2819,10 +2909,8 @@ Response Curl::put(
         Headers headers,
         Query query)
 {
+#ifdef ARBITER_CURL
     init(path, headers, query);
-    if (m_verbose) curl_easy_setopt(m_curl, CURLOPT_VERBOSE, 1L);
-
-    long httpCode(0);
 
     std::unique_ptr<PutData> putData(new PutData(data));
 
@@ -2848,11 +2936,11 @@ Response Curl::put(
     curl_easy_setopt(m_curl, CURLOPT_WRITEFUNCTION, eatLogging);
 
     // Run the command.
-    curl_easy_perform(m_curl);
-    curl_easy_getinfo(m_curl, CURLINFO_RESPONSE_CODE, &httpCode);
-
-    curl_easy_reset(m_curl);
+    const int httpCode(perform());
     return Response(httpCode);
+#else
+    throw ArbiterError(fail);
+#endif
 }
 
 Response Curl::post(
@@ -2861,10 +2949,8 @@ Response Curl::post(
         Headers headers,
         Query query)
 {
+#ifdef ARBITER_CURL
     init(path, headers, query);
-    if (m_verbose) curl_easy_setopt(m_curl, CURLOPT_VERBOSE, 1L);
-
-    long httpCode(0);
 
     std::unique_ptr<PutData> putData(new PutData(data));
     std::vector<char> writeData;
@@ -2896,15 +2982,97 @@ Response Curl::post(
             static_cast<curl_off_t>(data.size()));
 
     // Run the command.
-    curl_easy_perform(m_curl);
-    curl_easy_getinfo(m_curl, CURLINFO_RESPONSE_CODE, &httpCode);
+    const int httpCode(perform());
+    return Response(httpCode, writeData, receivedHeaders);
+#else
+    throw ArbiterError(fail);
+#endif
+}
 
-    curl_easy_reset(m_curl);
-    Response response(httpCode, writeData, receivedHeaders);
-    return response;
+} // namepace http
+} // namespace arbiter
+
+#ifdef ARBITER_CUSTOM_NAMESPACE
 }
+#endif
 
-///////////////////////////////////////////////////////////////////////////////
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: arbiter/util/curl.cpp
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: arbiter/util/http.cpp
+// //////////////////////////////////////////////////////////////////////
+
+#ifndef ARBITER_IS_AMALGAMATION
+#include <arbiter/util/http.hpp>
+#endif
+
+#ifdef ARBITER_CURL
+#include <curl/curl.h>
+#endif
+
+#include <cctype>
+#include <iomanip>
+#include <iostream>
+#include <numeric>
+#include <set>
+#include <sstream>
+
+#ifdef ARBITER_CUSTOM_NAMESPACE
+namespace ARBITER_CUSTOM_NAMESPACE
+{
+#endif
+
+namespace arbiter
+{
+namespace http
+{
+
+std::string sanitize(const std::string path, const std::string excStr)
+{
+    static const std::set<char> unreserved = { '-', '.', '_', '~' };
+    const std::set<char> exclusions(excStr.begin(), excStr.end());
+    std::ostringstream result;
+    result.fill('0');
+    result << std::hex;
+
+    for (const auto c : path)
+    {
+        if (std::isalnum(c) || unreserved.count(c) || exclusions.count(c))
+        {
+            result << c;
+        }
+        else
+        {
+            result << std::uppercase;
+            result << '%' << std::setw(2) <<
+                static_cast<int>(static_cast<uint8_t>(c));
+            result << std::nouppercase;
+        }
+    }
+
+    return result.str();
+}
+
+std::string buildQueryString(const Query& query)
+{
+    return std::accumulate(
+            query.begin(),
+            query.end(),
+            std::string(),
+            [](const std::string& out, const Query::value_type& keyVal)
+            {
+                const char sep(out.empty() ? '?' : '&');
+                return out + sep + keyVal.first + '=' + keyVal.second;
+            });
+}
 
 Resource::Resource(
         Pool& pool,
@@ -2988,30 +3156,33 @@ Response Resource::exec(std::function<Response()> f)
 Pool::Pool(
         const std::size_t concurrent,
         const std::size_t retry,
-        const Json::Value& json)
+        const Json::Value json)
     : m_curls(concurrent)
     , m_available(concurrent)
     , m_retry(retry)
     , m_mutex()
     , m_cv()
 {
-    const bool verbose(
-            json.isMember("arbiter") ?
-                json["arbiter"]["verbose"].asBool() : false);
-
-    const std::size_t timeout(
-            json.isMember("http") && json["http"]["timeout"].asUInt64() ?
-                json["http"]["timeout"].asUInt64() : defaultHttpTimeout);
+#ifdef ARBITER_CURL
+    curl_global_init(CURL_GLOBAL_ALL);
 
     for (std::size_t i(0); i < concurrent; ++i)
     {
         m_available[i] = i;
-        m_curls[i].reset(new Curl(verbose, timeout));
+        m_curls[i].reset(new Curl(json));
     }
+#endif
 }
 
+Pool::~Pool() { }
+
 Resource Pool::acquire()
 {
+    if (m_curls.empty())
+    {
+        throw std::runtime_error("Cannot acquire from empty pool");
+    }
+
     std::unique_lock<std::mutex> lock(m_mutex);
     m_cv.wait(lock, [this]()->bool { return !m_available.empty(); });
 
@@ -3050,11 +3221,87 @@ void Pool::release(const std::size_t id)
 
 
 // //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: arbiter/util/ini.cpp
+// //////////////////////////////////////////////////////////////////////
+
+#ifndef ARBITER_IS_AMALGAMATION
+#include <arbiter/util/ini.hpp>
+#endif
+
+#ifndef ARBITER_IS_AMALGAMATION
+#include <arbiter/util/util.hpp>
+#endif
+
+#ifdef ARBITER_CUSTOM_NAMESPACE
+namespace ARBITER_CUSTOM_NAMESPACE
+{
+#endif
+
+namespace arbiter
+{
+namespace ini
+{
+
+Contents parse(const std::string& s)
+{
+    Contents contents;
+
+    Section section;
+
+    const std::vector<std::string> lines;
+    for (std::string line : util::split(s))
+    {
+        line = util::stripWhitespace(line);
+        const std::size_t semiPos(line.find_first_of(';'));
+        const std::size_t hashPos(line.find_first_of('#'));
+        line = line.substr(0, std::min(semiPos, hashPos));
+
+        if (line.size())
+        {
+            if (line.front() == '[' && line.back() == ']')
+            {
+                section = line.substr(1, line.size() - 2);
+            }
+            else
+            {
+                const std::size_t equals(line.find_first_of('='));
+                if (equals != std::string::npos)
+                {
+                    const Key key(line.substr(0, equals));
+                    const Val val(line.substr(equals + 1));
+                    contents[section][key] = val;
+                }
+            }
+        }
+    }
+
+    return contents;
+}
+
+} // namespace ini
+} // namespace arbiter
+
+#ifdef ARBITER_CUSTOM_NAMESPACE
+}
+#endif
+
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: arbiter/util/ini.cpp
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+// //////////////////////////////////////////////////////////////////////
 // Beginning of content of file: arbiter/util/md5.cpp
 // //////////////////////////////////////////////////////////////////////
 
 #include <cstddef>
 #include <cstdlib>
+#include <cstring>
 #include <memory>
 
 #ifndef ARBITER_IS_AMALGAMATION
@@ -3213,7 +3460,7 @@ void md5_final(Md5Context *ctx, uint8_t hash[])
         while (i < 64)
             ctx->data[i++] = 0x00;
         md5_transform(ctx, ctx->data);
-        memset(ctx->data, 0, 56);
+        std::memset(ctx->data, 0, 56);
     }
 
     // Append to the padding the total message's length in bits and transform.
@@ -3277,6 +3524,7 @@ std::string md5(const std::string& data)
 // //////////////////////////////////////////////////////////////////////
 
 #include <cstdlib>
+#include <cstring>
 #include <memory>
 
 #ifndef ARBITER_IS_AMALGAMATION
@@ -3429,7 +3677,7 @@ void sha256_final(Sha256Context *ctx, uint8_t hash[])
         }
 
         sha256_transform(ctx, ctx->data);
-        memset(ctx->data, 0, 56);
+        std::memset(ctx->data, 0, 56);
     }
 
     // Append to the padding the total message's length in bits and transform.
@@ -3637,6 +3885,133 @@ std::string encodeAsHex(const std::string& input)
 
 
 // //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: arbiter/util/time.cpp
+// //////////////////////////////////////////////////////////////////////
+
+#ifndef ARBITER_IS_AMALGAMATION
+#include <arbiter/util/time.hpp>
+#endif
+
+#include <ctime>
+#include <iomanip>
+#include <iostream>
+#include <mutex>
+#include <sstream>
+
+#ifndef ARBITER_IS_AMALGAMATION
+#include <arbiter/util/types.hpp>
+#endif
+
+#ifdef ARBITER_CUSTOM_NAMESPACE
+namespace ARBITER_CUSTOM_NAMESPACE
+{
+#endif
+
+namespace arbiter
+{
+
+namespace
+{
+    std::mutex mutex;
+
+    int64_t utcOffsetSeconds()
+    {
+        std::lock_guard<std::mutex> lock(mutex);
+        std::time_t now(std::time(nullptr));
+        std::tm utc(*std::gmtime(&now));
+        std::tm loc(*std::localtime(&now));
+        return std::difftime(std::mktime(&utc), std::mktime(&loc));
+    }
+
+    std::tm getTm()
+    {
+        std::tm tm;
+        tm.tm_sec = 0;
+        tm.tm_min = 0;
+        tm.tm_hour = 0;
+        tm.tm_mday = 0;
+        tm.tm_mon = 0;
+        tm.tm_year = 0;
+        tm.tm_wday = 0;
+        tm.tm_yday = 0;
+        tm.tm_isdst = 0;
+        return tm;
+    }
+}
+
+const std::string Time::iso8601 = "%Y-%m-%dT%H:%M:%SZ";
+const std::string Time::iso8601NoSeparators = "%Y%m%dT%H%M%SZ";
+const std::string Time::dateNoSeparators = "%Y%m%d";
+
+Time::Time()
+{
+    m_time = std::time(nullptr);
+}
+
+Time::Time(const std::string& s, const std::string& format)
+{
+    static const int64_t utcOffset(utcOffsetSeconds());
+
+    auto tm(getTm());
+#ifndef ARBITER_WINDOWS
+    // We'd prefer to use get_time, but it has poor compiler support.
+    if (!strptime(s.c_str(), format.c_str(), &tm))
+    {
+        throw ArbiterError("Failed to parse " + s + " as time: " + format);
+    }
+#else
+    std::istringstream ss(s);
+    ss >> std::get_time(&tm, format.c_str());
+    if (ss.fail())
+    {
+        throw ArbiterError("Failed to parse " + s + " as time: " + format);
+    }
+#endif
+    tm.tm_sec -= utcOffset;
+    m_time = std::mktime(&tm);
+}
+
+std::string Time::str(const std::string& format) const
+{
+    std::lock_guard<std::mutex> lock(mutex);
+#ifndef ARBITER_WINDOWS
+    // We'd prefer to use put_time, but it has poor compiler support.
+    // We're already locked here for gmtime, so might as well make this static.
+    static std::vector<char> s(256, 0);
+
+    const std::size_t size =
+        strftime(s.data(), s.size(), format.c_str(), std::gmtime(&m_time));
+
+    return std::string(s.data(), s.data() + size);
+#else
+    std::ostringstream ss;
+    ss << std::put_time(std::gmtime(&m_time), format.c_str());
+    return ss.str();
+#endif
+}
+
+int64_t Time::operator-(const Time& other) const
+{
+    return std::difftime(m_time, other.m_time);
+}
+
+} // namespace arbiter
+
+#ifdef ARBITER_CUSTOM_NAMESPACE
+}
+#endif
+
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: arbiter/util/time.cpp
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+// //////////////////////////////////////////////////////////////////////
 // Beginning of content of file: arbiter/util/util.cpp
 // //////////////////////////////////////////////////////////////////////
 
@@ -3646,6 +4021,8 @@ std::string encodeAsHex(const std::string& input)
 #include <arbiter/arbiter.hpp>
 #endif
 
+#include <algorithm>
+
 #ifdef ARBITER_CUSTOM_NAMESPACE
 namespace ARBITER_CUSTOM_NAMESPACE
 {
@@ -3735,6 +4112,42 @@ std::unique_ptr<std::string> env(const std::string& var)
     return result;
 }
 
+std::vector<std::string> split(const std::string& in, const char delimiter)
+{
+    std::size_t index(0);
+    std::size_t pos(0);
+    std::vector<std::string> lines;
+
+    do
+    {
+        index = in.find(delimiter, pos);
+        std::string line(in.substr(pos, index - pos));
+
+        line.erase(
+                std::remove_if(line.begin(), line.end(), ::isspace),
+                line.end());
+
+        lines.push_back(line);
+
+        pos = index + 1;
+    }
+    while (index != std::string::npos);
+
+    return lines;
+}
+
+std::string stripWhitespace(const std::string& in)
+{
+    std::string out(in);
+    out.erase(
+            std::remove_if(
+                out.begin(),
+                out.end(),
+                [](char c) { return std::isspace(c); }),
+            out.end());
+    return out;
+}
+
 } // namespace util
 } // namespace arbiter
 
diff --git a/vendor/arbiter/arbiter.hpp b/vendor/arbiter/arbiter.hpp
index ff55e53..bb00983 100644
--- a/vendor/arbiter/arbiter.hpp
+++ b/vendor/arbiter/arbiter.hpp
@@ -1,7 +1,7 @@
 /// Arbiter amalgamated header (https://github.com/connormanning/arbiter).
 /// It is intended to be used with #include "arbiter.hpp"
 
-// Git SHA: 333088e3eac1056ba6e718984a967cc241c4d385
+// Git SHA: 5717b5f7b450db996b4a44d4865671f925e2ff50
 
 // //////////////////////////////////////////////////////////////////////
 // Beginning of content of file: LICENSE
@@ -49,30 +49,16 @@ SOFTWARE.
 #define ARBITER_EXTERNAL_JSON
 
 // //////////////////////////////////////////////////////////////////////
-// Beginning of content of file: arbiter/util/http.hpp
+// Beginning of content of file: arbiter/util/types.hpp
 // //////////////////////////////////////////////////////////////////////
 
 #pragma once
 
-#include <condition_variable>
 #include <map>
-#include <memory>
-#include <mutex>
+#include <stdexcept>
 #include <string>
 #include <vector>
 
-#include <curl/curl.h>
-
-#ifndef ARBITER_IS_AMALGAMATION
-#ifndef ARBITER_EXTERNAL_JSON
-#include <arbiter/third/json/json.hpp>
-#endif
-#endif
-
-#ifdef ARBITER_EXTERNAL_JSON
-#include <json/json.h>
-#endif
-
 #ifdef ARBITER_CUSTOM_NAMESPACE
 namespace ARBITER_CUSTOM_NAMESPACE
 {
@@ -80,6 +66,14 @@ namespace ARBITER_CUSTOM_NAMESPACE
 
 namespace arbiter
 {
+
+/** @brief Exception class for all internally thrown runtime errors. */
+class ArbiterError : public std::runtime_error
+{
+public:
+    ArbiterError(std::string msg) : std::runtime_error(msg) { }
+};
+
 namespace http
 {
 
@@ -89,17 +83,6 @@ using Headers = std::map<std::string, std::string>;
 /** HTTP query parameters. */
 using Query = std::map<std::string, std::string>;
 
-/** Perform URI percent-encoding, without encoding characters included in
- * @p exclusions.
- */
-std::string sanitize(std::string path, std::string exclusions = "/");
-
-/** Build a query string from key-value pairs.  If @p query is empty, the
- * result is an empty string.  Otherwise, the result will start with the
- * '?' character.
- */
-std::string buildQueryString(const http::Query& query);
-
 /** @cond arbiter_internal */
 
 class Response
@@ -140,12 +123,74 @@ private:
     Headers m_headers;
 };
 
+/** @endcond */
+
+} // namespace http
+} // namespace arbiter
+
+#ifdef ARBITER_CUSTOM_NAMESPACE
+}
+#endif
+
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: arbiter/util/types.hpp
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: arbiter/util/curl.hpp
+// //////////////////////////////////////////////////////////////////////
+
+#pragma once
+
+#include <cstddef>
+#include <memory>
+#include <string>
+#include <vector>
+
+#ifndef ARBITER_IS_AMALGAMATION
+
+#include <arbiter/util/types.hpp>
+
+#ifndef ARBITER_EXTERNAL_JSON
+#include <arbiter/third/json/json.hpp>
+#endif
+
+#endif
+
+
+
+#ifdef ARBITER_EXTERNAL_JSON
+#include <json/json.h>
+#endif
+
+class curl_slist;
+
+#ifdef ARBITER_CUSTOM_NAMESPACE
+namespace ARBITER_CUSTOM_NAMESPACE
+{
+#endif
+
+namespace arbiter
+{
+namespace http
+{
+
+/** @cond arbiter_internal */
+
 class Pool;
 
 class Curl
 {
     friend class Pool;
 
+    static constexpr std::size_t defaultHttpTimeout = 5;
+
 public:
     ~Curl();
 
@@ -170,21 +215,105 @@ public:
             Query query);
 
 private:
-    Curl(bool verbose, std::size_t timeout);
+    Curl(const Json::Value& json = Json::Value());
 
     void init(std::string path, const Headers& headers, const Query& query);
 
+    // Returns HTTP status code.
+    int perform();
+
     Curl(const Curl&);
     Curl& operator=(const Curl&);
 
-    CURL* m_curl;
-    curl_slist* m_headers;
-    const bool m_verbose;
-    const std::size_t m_timeout;
+    void* m_curl = nullptr;
+    curl_slist* m_headers = nullptr;
+
+    bool m_verbose = false;
+    long m_timeout = defaultHttpTimeout;
+    bool m_followRedirect = true;
+    bool m_verifyPeer = true;
+    std::unique_ptr<std::string> m_caPath;
+    std::unique_ptr<std::string> m_caInfo;
 
     std::vector<char> m_data;
 };
 
+/** @endcond */
+
+} // namespace http
+} // namespace arbiter
+
+#ifdef ARBITER_CUSTOM_NAMESPACE
+}
+#endif
+
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: arbiter/util/curl.hpp
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: arbiter/util/http.hpp
+// //////////////////////////////////////////////////////////////////////
+
+#pragma once
+
+#include <condition_variable>
+#include <cstddef>
+#include <functional>
+#include <map>
+#include <memory>
+#include <mutex>
+#include <string>
+#include <vector>
+
+#ifndef ARBITER_IS_AMALGAMATION
+
+#include <arbiter/util/curl.hpp>
+#include <arbiter/util/types.hpp>
+
+#ifndef ARBITER_EXTERNAL_JSON
+#include <arbiter/third/json/json.hpp>
+#endif
+
+#endif
+
+
+
+#ifdef ARBITER_EXTERNAL_JSON
+#include <json/json.h>
+#endif
+
+#ifdef ARBITER_CUSTOM_NAMESPACE
+namespace ARBITER_CUSTOM_NAMESPACE
+{
+#endif
+
+namespace arbiter
+{
+namespace http
+{
+
+/** Perform URI percent-encoding, without encoding characters included in
+ * @p exclusions.
+ */
+std::string sanitize(std::string path, std::string exclusions = "/");
+
+/** Build a query string from key-value pairs.  If @p query is empty, the
+ * result is an empty string.  Otherwise, the result will start with the
+ * '?' character.
+ */
+std::string buildQueryString(const http::Query& query);
+
+/** @cond arbiter_internal */
+
+class Pool;
+
 class Resource
 {
 public:
@@ -230,9 +359,10 @@ class Pool
 
 public:
     Pool(
-            std::size_t concurrent,
-            std::size_t retry,
-            const Json::Value& json);
+            std::size_t concurrent = 4,
+            std::size_t retry = 4,
+            Json::Value json = Json::Value());
+    ~Pool();
 
     Resource acquire();
 
@@ -267,6 +397,104 @@ private:
 
 
 // //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: arbiter/util/ini.hpp
+// //////////////////////////////////////////////////////////////////////
+
+#pragma once
+
+#include <map>
+#include <string>
+#include <vector>
+
+#ifdef ARBITER_CUSTOM_NAMESPACE
+namespace ARBITER_CUSTOM_NAMESPACE
+{
+#endif
+
+namespace arbiter
+{
+namespace ini
+{
+
+using Section = std::string;
+using Key = std::string;
+using Val = std::string;
+using Contents = std::map<Section, std::map<Key, Val>>;
+
+Contents parse(const std::string& s);
+
+} // namespace ini
+
+} // namespace arbiter
+
+#ifdef ARBITER_CUSTOM_NAMESPACE
+}
+#endif
+
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: arbiter/util/ini.hpp
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: arbiter/util/time.hpp
+// //////////////////////////////////////////////////////////////////////
+
+#pragma once
+
+#include <cstdint>
+#include <ctime>
+#include <string>
+
+#ifdef ARBITER_CUSTOM_NAMESPACE
+namespace ARBITER_CUSTOM_NAMESPACE
+{
+#endif
+
+namespace arbiter
+{
+
+class Time
+{
+public:
+    static const std::string iso8601;
+    static const std::string iso8601NoSeparators;
+    static const std::string dateNoSeparators;
+
+    Time();
+    Time(const std::string& s, const std::string& format = iso8601);
+
+    std::string str(const std::string& format = iso8601) const;
+
+    // Return value is in seconds.
+    int64_t operator-(const Time& other) const;
+
+private:
+    std::time_t m_time;
+};
+
+} // namespace arbiter
+
+#ifdef ARBITER_CUSTOM_NAMESPACE
+}
+#endif
+
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: arbiter/util/time.hpp
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+// //////////////////////////////////////////////////////////////////////
 // Beginning of content of file: arbiter/driver.hpp
 // //////////////////////////////////////////////////////////////////////
 
@@ -629,14 +857,14 @@ public:
     /** Perform an HTTP GET request. */
     std::string get(
             std::string path,
-            http::Headers headers,
-            http::Query query) const;
+            http::Headers headers = http::Headers(),
+            http::Query query = http::Query()) const;
 
     /** Perform an HTTP GET request. */
     std::unique_ptr<std::string> tryGet(
             std::string path,
-            http::Headers headers,
-            http::Query query) const;
+            http::Headers headers = http::Headers(),
+            http::Query query = http::Query()) const;
 
     /** Perform an HTTP GET request. */
     std::vector<char> getBinary(
@@ -3592,7 +3820,10 @@ std::string encodeAsHex(const std::string& data);
 
 #pragma once
 
+#include <memory>
 #include <string>
+#include <utility>
+#include <vector>
 
 #ifdef ARBITER_CUSTOM_NAMESPACE
 namespace ARBITER_CUSTOM_NAMESPACE
@@ -3736,6 +3967,30 @@ namespace util
      */
     std::unique_ptr<std::string> env(const std::string& var);
 
+    /** @brief Split a string on a token. */
+    std::vector<std::string> split(const std::string& s, char delimiter = '\n');
+
+    /** @brief Remove whitespace. */
+    std::string stripWhitespace(const std::string& s);
+
+    template<typename T, typename... Args>
+    std::unique_ptr<T> makeUnique(Args&&... args)
+    {
+        return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
+    }
+
+    template<typename T>
+    std::unique_ptr<T> clone(const T& t)
+    {
+        return makeUnique<T>(t);
+    }
+
+    template<typename T>
+    std::unique_ptr<T> maybeClone(const T* t)
+    {
+        if (t) return makeUnique<T>(*t);
+        else return std::unique_ptr<T>();
+    }
 } // namespace util
 
 } // namespace arbiter
@@ -3761,10 +4016,13 @@ namespace util
 #pragma once
 
 #include <memory>
+#include <mutex>
 #include <string>
 #include <vector>
 
 #ifndef ARBITER_IS_AMALGAMATION
+#include <arbiter/util/time.hpp>
+#include <arbiter/util/util.hpp>
 #include <arbiter/drivers/http.hpp>
 #endif
 
@@ -3782,31 +4040,31 @@ namespace drivers
 /** @brief Amazon %S3 driver. */
 class S3 : public Http
 {
-public:
     class Auth;
+    class AuthFields;
+    class Config;
 
+public:
     S3(
             http::Pool& pool,
-            const Auth& auth,
-            std::string region = "us-east-1",
-            bool sse = false,
-            bool precheck = false);
-
-    /** Try to construct an S3 Driver.  Searches @p json primarily for the keys
-     * `access` and `hidden` to construct an S3::Auth.  If not found, common
-     * filesystem locations and then the environment will be searched (see
-     * S3::Auth::find).
-     *
-     * Server-side encryption may be enabled by setting key `sse` to `true` in
-     * @p json.
+            std::string profile,
+            std::unique_ptr<Auth> auth,
+            std::unique_ptr<Config> config);
+
+    /** Try to construct an S3 driver.  The configuration/credential discovery
+     * follows, in order:
+     *      - Environment settings.
+     *      - Arbiter JSON configuration.
+     *      - Well-known files or their environment overrides, like
+     *          `~/.aws/credentials` or the file at AWS_CREDENTIAL_FILE.
+     *      - EC2 instance profile.
      */
     static std::unique_ptr<S3> create(
             http::Pool& pool,
             const Json::Value& json);
 
-    static std::string extractProfile(const Json::Value& json);
-
-    virtual std::string type() const override { return "s3"; }
+    // Overrides.
+    virtual std::string type() const override;
 
     virtual std::unique_ptr<std::size_t> tryGetSize(
             std::string path) const override;
@@ -3820,32 +4078,13 @@ public:
 
     virtual void copy(std::string src, std::string dst) const override;
 
-    /** @brief AWS authentication information. */
-    class Auth
-    {
-    public:
-        Auth(std::string access, std::string hidden);
-
-        /** @brief Search for credentials in some common locations.
-         *
-         * See:
-         * docs.aws.amazon.com/AWSJavaScriptSDK/guide/node-configuring.html
-         *
-         * Uses methods 2 and 3 of "Setting AWS Credentials":
-         *      - Check for them in `~/.aws/credentials`.
-         *      - If not found, try the environment settings.
-         */
-        static std::unique_ptr<Auth> find(std::string profile = "");
-
-        std::string access() const;
-        std::string hidden() const;
+private:
+    static std::string extractProfile(const Json::Value& json);
 
-    private:
-        std::string m_access;
-        std::string m_hidden;
-    };
+    static std::unique_ptr<Config> extractConfig(
+            const Json::Value& json,
+            std::string profile);
 
-private:
     /** Inherited from Drivers::Http. */
     virtual bool get(
             std::string path,
@@ -3857,96 +4096,153 @@ private:
             std::string path,
             bool verbose) const override;
 
-    struct Resource
-    {
-        Resource(std::string baseUrl, std::string fullPath);
+    class ApiV4;
+    class Resource;
 
-        std::string url() const;
-        std::string host() const;
-        std::string baseUrl() const { return m_baseUrl; }
-        std::string bucket() const { return m_bucket; }
-        std::string object() const;
+    std::string m_profile;
+    std::unique_ptr<Auth> m_auth;
+    std::unique_ptr<Config> m_config;
+};
 
-    private:
-        std::string m_baseUrl;
-        std::string m_bucket;
-        std::string m_object;
-        bool m_virtualHosted;
-    };
+class S3::AuthFields
+{
+public:
+    AuthFields(std::string access, std::string hidden, std::string token = "")
+        : m_access(access), m_hidden(hidden), m_token(token)
+    { }
 
-    class FormattedTime
-    {
-    public:
-        FormattedTime();
+    const std::string& access() const { return m_access; }
+    const std::string& hidden() const { return m_hidden; }
+    const std::string& token() const { return m_token; }
 
-        const std::string& date() const { return m_date; }
-        const std::string& time() const { return m_time; }
+private:
+    std::string m_access;
+    std::string m_hidden;
+    std::string m_token;
+};
 
-        std::string amazonDate() const
-        {
-            return date() + 'T' + time() + 'Z';
-        }
+class S3::Auth
+{
+public:
+    Auth(std::string access, std::string hidden)
+        : m_access(access)
+        , m_hidden(hidden)
+    { }
 
-    private:
-        std::string formatTime(const std::string& format) const;
+    Auth(std::string iamRole)
+        : m_role(util::makeUnique<std::string>(iamRole))
+    { }
 
-        const std::string m_date;
-        const std::string m_time;
-    };
+    static std::unique_ptr<Auth> create(
+            const Json::Value& json,
+            std::string profile);
 
-    class ApiV4
-    {
-    public:
-        ApiV4(
-                std::string verb,
-                const std::string& region,
-                const Resource& resource,
-                const S3::Auth& auth,
-                const http::Query& query,
-                const http::Headers& headers,
-                const std::vector<char>& data);
-
-        const http::Headers& headers() const { return m_headers; }
-        const http::Query& query() const { return m_query; }
-
-        const std::string& signedHeadersString() const
-        {
-            return m_signedHeadersString;
-        }
+    AuthFields fields() const;
 
-    private:
-        std::string buildCanonicalRequest(
-                std::string verb,
-                const Resource& resource,
-                const http::Query& query,
-                const std::vector<char>& data) const;
-
-        std::string buildStringToSign(
-                const std::string& canonicalRequest) const;
-
-        std::string calculateSignature(
-                const std::string& stringToSign) const;
-
-        std::string getAuthHeader(
-                const std::string& signedHeadersString,
-                const std::string& signature) const;
-
-        const S3::Auth& m_auth;
-        const std::string m_region;
-        const FormattedTime m_formattedTime;
-
-        http::Headers m_headers;
-        http::Query m_query;
-        std::string m_canonicalHeadersString;
-        std::string m_signedHeadersString;
-    };
+private:
+    mutable std::string m_access;
+    mutable std::string m_hidden;
+    mutable std::string m_token;
 
-    Auth m_auth;
+    std::unique_ptr<std::string> m_role;
+    mutable std::unique_ptr<Time> m_expiration;
+    mutable std::mutex m_mutex;
+};
 
-    std::string m_region;
-    std::string m_baseUrl;
+class S3::Config
+{
+public:
+    Config(std::string region, std::string baseUrl, bool sse, bool precheck);
+
+    static std::unique_ptr<Config> create(
+            const Json::Value& json,
+            std::string profile);
+
+    const std::string& region() const { return m_region; }
+    const std::string& baseUrl() const { return m_baseUrl; }
+    const http::Headers& baseHeaders() const { return m_baseHeaders; }
+    bool precheck() const { return m_precheck; }
+
+private:
+    static std::string extractRegion(
+            const Json::Value& json,
+            std::string profile);
+
+    static std::string extractBaseUrl(
+            const Json::Value& json,
+            std::string region);
+
+    const std::string m_region;
+    const std::string m_baseUrl;
     http::Headers m_baseHeaders;
-    bool m_precheck;
+    const bool m_precheck;
+};
+
+
+
+class S3::Resource
+{
+public:
+    Resource(std::string baseUrl, std::string fullPath);
+
+    std::string url() const;
+    std::string host() const;
+    std::string baseUrl() const { return m_baseUrl; }
+    std::string bucket() const { return m_bucket; }
+    std::string object() const;
+
+private:
+    std::string m_baseUrl;
+    std::string m_bucket;
+    std::string m_object;
+    bool m_virtualHosted;
+};
+
+class S3::ApiV4
+{
+public:
+    ApiV4(
+            std::string verb,
+            const std::string& region,
+            const Resource& resource,
+            const S3::AuthFields authFields,
+            const http::Query& query,
+            const http::Headers& headers,
+            const std::vector<char>& data);
+
+    const http::Headers& headers() const { return m_headers; }
+    const http::Query& query() const { return m_query; }
+
+    const std::string& signedHeadersString() const
+    {
+        return m_signedHeadersString;
+    }
+
+private:
+    std::string buildCanonicalRequest(
+            std::string verb,
+            const Resource& resource,
+            const http::Query& query,
+            const std::vector<char>& data) const;
+
+    std::string buildStringToSign(
+            const std::string& canonicalRequest) const;
+
+    std::string calculateSignature(
+            const std::string& stringToSign) const;
+
+    std::string getAuthHeader(
+            const std::string& signedHeadersString,
+            const std::string& signature) const;
+
+    const S3::AuthFields m_authFields;
+    const std::string m_region;
+    const Time m_time;
+
+    http::Headers m_headers;
+    http::Query m_query;
+    std::string m_canonicalHeadersString;
+    std::string m_signedHeadersString;
 };
 
 } // namespace drivers
@@ -4142,7 +4438,7 @@ private:
 
 #ifndef ARBITER_IS_AMALGAMATION
 
-#include <arbiter/util/http.hpp>
+#include <arbiter/util/types.hpp>
 
 #endif
 
@@ -4155,6 +4451,7 @@ namespace arbiter
 {
 
 namespace drivers { class Http; }
+namespace http { class Pool; }
 
 class Driver;
 
@@ -4342,6 +4639,7 @@ private:
 #include <arbiter/drivers/http.hpp>
 #include <arbiter/drivers/s3.hpp>
 #include <arbiter/drivers/dropbox.hpp>
+#include <arbiter/util/types.hpp>
 
 #ifndef ARBITER_EXTERNAL_JSON
 #include <arbiter/third/json/json.hpp>
@@ -4363,12 +4661,7 @@ namespace ARBITER_CUSTOM_NAMESPACE
 namespace arbiter
 {
 
-/** @brief Exception class for all internally thrown runtime errors. */
-class ArbiterError : public std::runtime_error
-{
-public:
-    ArbiterError(std::string msg) : std::runtime_error(msg) { }
-};
+namespace http { class Pool; }
 
 /** @brief The primary interface for storage abstraction.
  *
@@ -4620,14 +4913,14 @@ public:
     /** Fetch the common HTTP pool, which may be useful when dynamically
      * constructing adding a Driver via Arbiter::addDriver.
      */
-    http::Pool& httpPool() { return m_pool; }
+    http::Pool& httpPool() { return *m_pool; }
 
 private:
     const drivers::Http* tryGetHttpDriver(std::string path) const;
     const drivers::Http& getHttpDriver(std::string path) const;
 
     DriverMap m_drivers;
-    http::Pool m_pool;
+    std::unique_ptr<http::Pool> m_pool;
 };
 
 } // namespace arbiter
diff --git a/vendor/nanoflann/nanoflann.hpp b/vendor/nanoflann/nanoflann.hpp
index 1c9a71b..445aa29 100644
--- a/vendor/nanoflann/nanoflann.hpp
+++ b/vendor/nanoflann/nanoflann.hpp
@@ -3,7 +3,7 @@
  *
  * Copyright 2008-2009  Marius Muja (mariusm at cs.ubc.ca). All rights reserved.
  * Copyright 2008-2009  David G. Lowe (lowe at cs.ubc.ca). All rights reserved.
- * Copyright 2011-2013  Jose Luis Blanco (joseluisblancoc at gmail.com).
+ * Copyright 2011-2016  Jose Luis Blanco (joseluisblancoc at gmail.com).
  *   All rights reserved.
  *
  * THE BSD LICENSE
@@ -30,6 +30,19 @@
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *************************************************************************/
 
+/** \mainpage nanoflann C++ API documentation
+  *  nanoflann is a C++ header-only library for building KD-Trees, mostly
+  *  optimized for 2D or 3D point clouds.
+  *
+  *  nanoflann does not require compiling or installing, just an
+  *  #include <nanoflann.hpp> in your code.
+  *
+  *  See:
+  *   - <a href="modules.html" >C++ API organized by modules</a>
+  *   - <a href="https://github.com/jlblancoc/nanoflann" >Online README</a>
+  *   - <a href="http://jlblancoc.github.io/nanoflann/" >Doxygen documentation</a>
+  */
+
 #ifndef  NANOFLANN_HPP_
 #define  NANOFLANN_HPP_
 
@@ -38,8 +51,8 @@
 #include <algorithm>
 #include <stdexcept>
 #include <cstdio>  // for fwrite()
-#include <cstdlib> // for malloc()
-#include <cmath>   // for fabs(),...
+#include <cmath>   // for abs()
+#include <cstdlib> // for abs()
 #include <limits>
 
 // Avoid conflicting declaration of min/max macros in windows headers
@@ -56,8 +69,8 @@ namespace nanoflann
 /** @addtogroup nanoflann_grp nanoflann C++ library for ANN
   *  @{ */
 
-  	/** Library version: 0xMmP (M=Major,m=minor,P=path) */
-	#define NANOFLANN_VERSION 0x118
+  	/** Library version: 0xMmP (M=Major,m=minor,P=patch) */
+	#define NANOFLANN_VERSION 0x123
 
 	/** @addtogroup result_sets_grp Result set classes
 	  *  @{ */
@@ -70,7 +83,7 @@ namespace nanoflann
 		CountType count;
 
 	public:
-		inline KNNResultSet(CountType capacity_) : capacity(capacity_), count(0)
+		inline KNNResultSet(CountType capacity_) : indices(0), dists(0), capacity(capacity_), count(0)
 		{
 		}
 
@@ -98,7 +111,7 @@ namespace nanoflann
 		{
 			CountType i;
 			for (i=count; i>0; --i) {
-#ifdef NANOFLANN_FIRST_MATCH   // If defined and two poins have the same distance, the one with the lowest-index will be returned first.
+#ifdef NANOFLANN_FIRST_MATCH   // If defined and two points have the same distance, the one with the lowest-index will be returned first.
 				if ( (dists[i-1]>dist) || ((dist==dists[i-1])&&(indices[i-1]>index)) ) {
 #else
 				if (dists[i-1]>dist) {
@@ -236,16 +249,10 @@ namespace nanoflann
 	/** @addtogroup metric_grp Metric (distance) classes
 	  * @{ */
 
-	template<typename T> inline T abs(T x) { return (x<0) ? -x : x; }
-	template<> inline int abs<int>(int x) { return ::abs(x); }
-	template<> inline float abs<float>(float x) { return fabsf(x); }
-	template<> inline double abs<double>(double x) { return fabs(x); }
-	template<> inline long double abs<long double>(long double x) { return fabsl(x); }
-
 	/** Manhattan distance functor (generic version, optimized for high-dimensionality data sets).
 	  *  Corresponding distance traits: nanoflann::metric_L1
 	  * \tparam T Type of the elements (e.g. double, float, uint8_t)
-	  * \tparam DistanceType Type of distance variables (must be signed) (e.g. float, double, int64_t)
+	  * \tparam _DistanceType Type of distance variables (must be signed) (e.g. float, double, int64_t)
 	  */
 	template<class T, class DataSource, typename _DistanceType = T>
 	struct L1_Adaptor
@@ -266,10 +273,10 @@ namespace nanoflann
 
 			/* Process 4 items with each loop for efficiency. */
 			while (a < lastgroup) {
-				const DistanceType diff0 = nanoflann::abs(a[0] - data_source.kdtree_get_pt(b_idx,d++));
-				const DistanceType diff1 = nanoflann::abs(a[1] - data_source.kdtree_get_pt(b_idx,d++));
-				const DistanceType diff2 = nanoflann::abs(a[2] - data_source.kdtree_get_pt(b_idx,d++));
-				const DistanceType diff3 = nanoflann::abs(a[3] - data_source.kdtree_get_pt(b_idx,d++));
+				const DistanceType diff0 = std::abs(a[0] - data_source.kdtree_get_pt(b_idx,d++));
+				const DistanceType diff1 = std::abs(a[1] - data_source.kdtree_get_pt(b_idx,d++));
+				const DistanceType diff2 = std::abs(a[2] - data_source.kdtree_get_pt(b_idx,d++));
+				const DistanceType diff3 = std::abs(a[3] - data_source.kdtree_get_pt(b_idx,d++));
 				result += diff0 + diff1 + diff2 + diff3;
 				a += 4;
 				if ((worst_dist>0)&&(result>worst_dist)) {
@@ -278,7 +285,7 @@ namespace nanoflann
 			}
 			/* Process last 0-3 components.  Not needed for standard vector lengths. */
 			while (a < last) {
-				result += nanoflann::abs( *a++ - data_source.kdtree_get_pt(b_idx,d++) );
+				result += std::abs( *a++ - data_source.kdtree_get_pt(b_idx,d++) );
 			}
 			return result;
 		}
@@ -286,14 +293,14 @@ namespace nanoflann
 		template <typename U, typename V>
 		inline DistanceType accum_dist(const U a, const V b, int ) const
 		{
-			return nanoflann::abs(a-b);
+			return std::abs(a-b);
 		}
 	};
 
 	/** Squared Euclidean distance functor (generic version, optimized for high-dimensionality data sets).
 	  *  Corresponding distance traits: nanoflann::metric_L2
 	  * \tparam T Type of the elements (e.g. double, float, uint8_t)
-	  * \tparam DistanceType Type of distance variables (must be signed) (e.g. float, double, int64_t)
+	  * \tparam _DistanceType Type of distance variables (must be signed) (e.g. float, double, int64_t)
 	  */
 	template<class T, class DataSource, typename _DistanceType = T>
 	struct L2_Adaptor
@@ -339,10 +346,10 @@ namespace nanoflann
 		}
 	};
 
-	/** Squared Euclidean distance functor (suitable for low-dimensionality datasets, like 2D or 3D point clouds)
+	/** Squared Euclidean (L2) distance functor (suitable for low-dimensionality datasets, like 2D or 3D point clouds)
 	  *  Corresponding distance traits: nanoflann::metric_L2_Simple
 	  * \tparam T Type of the elements (e.g. double, float, uint8_t)
-	  * \tparam DistanceType Type of distance variables (must be signed) (e.g. float, double, int64_t)
+	  * \tparam _DistanceType Type of distance variables (must be signed) (e.g. float, double, int64_t)
 	  */
 	template<class T, class DataSource, typename _DistanceType = T>
 	struct L2_Simple_Adaptor
@@ -389,21 +396,17 @@ namespace nanoflann
 
 	/** @} */
 
-
-
 	/** @addtogroup param_grp Parameter structs
 	  * @{ */
 
-	/**  Parameters (see http://code.google.com/p/nanoflann/ for help choosing the parameters)
-	  */
+	/**  Parameters (see README.md) */
 	struct KDTreeSingleIndexAdaptorParams
 	{
-		KDTreeSingleIndexAdaptorParams(size_t _leaf_max_size = 10, int dim_ = -1) :
-			leaf_max_size(_leaf_max_size), dim(dim_)
+		KDTreeSingleIndexAdaptorParams(size_t _leaf_max_size = 10) :
+			leaf_max_size(_leaf_max_size)
 		{}
 
 		size_t leaf_max_size;
-		int dim;
 	};
 
 	/** Search options for KDTreeSingleIndexAdaptor::findNeighbors() */
@@ -433,7 +436,7 @@ namespace nanoflann
 	template <typename T>
 	inline T* allocate(size_t count = 1)
 	{
-		T* mem = (T*) ::malloc(sizeof(T)*count);
+		T* mem = static_cast<T*>( ::malloc(sizeof(T)*count));
 		return mem;
 	}
 
@@ -467,7 +470,6 @@ namespace nanoflann
 		size_t  remaining;  /* Number of bytes left in current block of storage. */
 		void*   base;     /* Pointer to base of current block of storage. */
 		void*   loc;      /* Current location in block to next allocate memory. */
-		size_t  blocksize;
 
 		void internal_init()
 		{
@@ -484,7 +486,7 @@ namespace nanoflann
 		/**
 		    Default constructor. Initializes a new pool.
 		 */
-		PooledAllocator(const size_t blocksize_ = BLOCKSIZE) : blocksize(blocksize_) {
+		PooledAllocator() {
 			internal_init();
 		}
 
@@ -499,7 +501,7 @@ namespace nanoflann
 		void free_all()
 		{
 			while (base != NULL) {
-				void *prev = *((void**) base); /* Get pointer to prev block. */
+				void *prev = *(static_cast<void**>( base)); /* Get pointer to prev block. */
 				::free(base);
 				base = prev;
 			}
@@ -526,28 +528,28 @@ namespace nanoflann
 				wastedMemory += remaining;
 
 				/* Allocate new storage. */
-				const size_t bsize = (size + sizeof(void*) + (WORDSIZE-1) > blocksize) ?
-							size + sizeof(void*) + (WORDSIZE-1) : blocksize;
+				const size_t blocksize = (size + sizeof(void*) + (WORDSIZE-1) > BLOCKSIZE) ?
+							size + sizeof(void*) + (WORDSIZE-1) : BLOCKSIZE;
 
 				// use the standard C malloc to allocate memory
-				void* m = ::malloc(bsize);
+				void* m = ::malloc(blocksize);
 				if (!m) {
 					fprintf(stderr,"Failed to allocate memory.\n");
 					return NULL;
 				}
 
 				/* Fill first word of new block with pointer to previous block. */
-				((void**) m)[0] = base;
+				static_cast<void**>(m)[0] = base;
 				base = m;
 
 				size_t shift = 0;
 				//int size_t = (WORDSIZE - ( (((size_t)m) + sizeof(void*)) & (WORDSIZE-1))) & (WORDSIZE-1);
 
-				remaining = bsize - sizeof(void*) - shift;
-				loc = ((char*)m + sizeof(void*) + shift);
+				remaining = blocksize - sizeof(void*) - shift;
+				loc = (static_cast<char*>(m) + sizeof(void*) + shift);
 			}
 			void* rloc = loc;
-			loc = (char*)loc + size;
+			loc = static_cast<char*>(loc) + size;
 			remaining -= size;
 
 			usedMemory += size;
@@ -565,7 +567,7 @@ namespace nanoflann
 		template <typename T>
 		T* allocate(const size_t count = 1)
 		{
-			T* mem = (T*) this->malloc(sizeof(T)*count);
+			T* mem = static_cast<T*>(this->malloc(sizeof(T)*count));
 			return mem;
 		}
 
@@ -706,10 +708,10 @@ namespace nanoflann
 	 *  The class "DatasetAdaptor" must provide the following interface (can be non-virtual, inlined methods):
 	 *
 	 *  \code
-	 *   // Must return the number of data points
+	 *   // Must return the number of data poins
 	 *   inline size_t kdtree_get_point_count() const { ... }
 	 *
-	 *   // Must return the Euclidean (L2) distance between the vector "p1[0:size-1]" and the data point with index "idx_p2" stored in the class:
+	 *   // [Only if using the metric_L2_Simple type] Must return the Euclidean (L2) distance between the vector "p1[0:size-1]" and the data point with index "idx_p2" stored in the class:
 	 *   inline DistanceType kdtree_distance(const T *p1, const size_t idx_p2,size_t size) const { ... }
 	 *
 	 *   // Must return the dim'th component of the idx'th point in the class:
@@ -729,6 +731,9 @@ namespace nanoflann
 	 *
 	 *  \endcode
 	 *
+	 * \tparam DatasetAdaptor The user-provided adaptor (see comments above).
+	 * \tparam Distance The distance metric to use: nanoflann::metric_L1, nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc.
+	 * \tparam DIM Dimensionality of data points (e.g. 3 for 3D points)
 	 * \tparam IndexType Will be typically size_t or int
 	 */
 	template <typename Distance, class DatasetAdaptor,int DIM = -1, typename IndexType = size_t>
@@ -757,37 +762,27 @@ namespace nanoflann
 
 		const KDTreeSingleIndexAdaptorParams index_params;
 
-		size_t m_size;
+		size_t m_size; //!< Number of current poins in the dataset
+		size_t m_size_at_index_build; //!< Number of points in the dataset when the index was built
 		int dim;  //!< Dimensionality of each data point
 
 
 		/*--------------------- Internal Data Structures --------------------------*/
 		struct Node
 		{
+			/** Union used because a node can be either a LEAF node or a non-leaf node, so both data fields are never used simultaneously */
 			union {
-				struct
-				{
-					/**
-					 * Indices of points in leaf node
-					 */
-					IndexType left, right;
+				struct leaf
+                                {
+					IndexType    left, right;  //!< Indices of points in leaf node
 				} lr;
-				struct
-				{
-					/**
-					 * Dimension used for subdivision.
-					 */
-					int divfeat;
-					/**
-					 * The values used for subdivision.
-					 */
-					DistanceType divlow, divhigh;
+				struct nonleaf
+                                {
+					int          divfeat; //!< Dimension used for subdivision.
+					DistanceType divlow, divhigh; //!< The values used for subdivision.
 				} sub;
-			};
-			/**
-			 * The child nodes.
-			 */
-			Node* child1, * child2;
+			} node_type;
+			Node* child1, * child2;  //!< Child nodes (both=NULL mean its a leaf node)
 		};
 		typedef Node* NodePtr;
 
@@ -803,32 +798,8 @@ namespace nanoflann
 		/** Define "distance_vector_t" as a fixed-size or variable-size container depending on "DIM" */
 		typedef typename array_or_vector_selector<DIM,DistanceType>::container_t distance_vector_t;
 
-		/** This record represents a branch point when finding neighbors in
-			the tree.  It contains a record of the minimum distance to the query
-			point, as well as the node at which the search resumes.
-		 */
-		template <typename T, typename DistanceType>
-		struct BranchStruct
-		{
-			T node;           /* Tree node at which search resumes */
-			DistanceType mindist;     /* Minimum distance to query for all nodes below. */
-
-			BranchStruct() {}
-			BranchStruct(const T& aNode, DistanceType dist) : node(aNode), mindist(dist) {}
-
-			inline bool operator<(const BranchStruct<T, DistanceType>& rhs) const
-			{
-				return mindist<rhs.mindist;
-			}
-		};
-
-		/**
-		 * Array of k-d trees used to find neighbours.
-		 */
+		/** The KD-tree used to find neighbours */
 		NodePtr root_node;
-		typedef BranchStruct<NodePtr, DistanceType> BranchSt;
-		typedef BranchSt* Branch;
-
 		BoundingBox root_bbox;
 
 		/**
@@ -847,37 +818,38 @@ namespace nanoflann
 		/**
 		 * KDTree constructor
 		 *
-		 * Params:
-		 *          inputData = dataset with the input features
-		 *          params = parameters passed to the kdtree algorithm (see http://code.google.com/p/nanoflann/ for help choosing the parameters)
+		 * Refer to docs in README.md or online in https://github.com/jlblancoc/nanoflann
+		 *
+		 * The KD-Tree point dimension (the length of each point in the datase, e.g. 3 for 3D points)
+		 * is determined by means of:
+		 *  - The \a DIM template parameter if >0 (highest priority)
+		 *  - Otherwise, the \a dimensionality parameter of this constructor.
+		 *
+		 * @param inputData Dataset with the input features
+		 * @param params Basically, the maximum leaf node size
 		 */
 		KDTreeSingleIndexAdaptor(const int dimensionality, const DatasetAdaptor& inputData, const KDTreeSingleIndexAdaptorParams& params = KDTreeSingleIndexAdaptorParams() ) :
 			dataset(inputData), index_params(params), root_node(NULL), distance(inputData)
 		{
 			m_size = dataset.kdtree_get_point_count();
+			m_size_at_index_build = m_size;
 			dim = dimensionality;
 			if (DIM>0) dim=DIM;
-			else {
-				if (params.dim>0) dim = params.dim;
-			}
 			m_leaf_max_size = params.leaf_max_size;
 
 			// Create a permutable array of indices to the input vectors.
 			init_vind();
 		}
 
-		/**
-		 * Standard destructor
-		 */
-		~KDTreeSingleIndexAdaptor()
-		{
-		}
+		/** Standard destructor */
+		~KDTreeSingleIndexAdaptor() { }
 
 		/** Frees the previously-built index. Automatically called within buildIndex(). */
 		void freeIndex()
 		{
 			pool.free_all();
 			root_node=NULL;
+			m_size_at_index_build = 0;
 		}
 
 		/**
@@ -886,25 +858,18 @@ namespace nanoflann
 		void buildIndex()
 		{
 			init_vind();
-			computeBoundingBox(root_bbox);
 			freeIndex();
-            if (size())
-                root_node = divideTree(0, m_size, root_bbox);   // construct the tree
+			m_size_at_index_build = m_size;
+			if(m_size == 0) return;
+			computeBoundingBox(root_bbox);
+			root_node = divideTree(0, m_size, root_bbox );   // construct the tree
 		}
 
-		/**
-		 *  Returns size of index.
-		 */
-		size_t size() const
-		{
-			return m_size;
-		}
+		/** Returns number of points in dataset  */
+		size_t size() const { return m_size; }
 
-		/**
-		 * Returns the length of an index feature.
-		 */
-		size_t veclen() const
-		{
+		/** Returns the length of each point in the dataset */
+		size_t veclen() const {
 			return static_cast<size_t>(DIM>0 ? DIM : dim);
 		}
 
@@ -929,21 +894,24 @@ namespace nanoflann
 		 *     vec = the vector for which to search the nearest neighbors
 		 *
 		 * \tparam RESULTSET Should be any ResultSet<DistanceType>
+         * \return  True if the requested neighbors could be found.
 		 * \sa knnSearch, radiusSearch
 		 */
 		template <typename RESULTSET>
-		void findNeighbors(RESULTSET& result, const ElementType* vec, const SearchParams& searchParams) const
+		bool findNeighbors(RESULTSET& result, const ElementType* vec, const SearchParams& searchParams) const
 		{
 			assert(vec);
             if (size() == 0)
-                return;
-			if (!root_node) throw std::runtime_error("[nanoflann] findNeighbors() called before building the index.");
+                return false;
+			if (!root_node)
+                throw std::runtime_error("[nanoflann] findNeighbors() called before building the index.");
 			float epsError = 1+searchParams.eps;
 
 			distance_vector_t dists; // fixed or variable-sized container (depending on DIM)
 			dists.assign((DIM>0 ? DIM : dim) ,0); // Fill it with zeros.
 			DistanceType distsq = computeInitialDistances(vec, dists);
 			searchLevel(result, vec, root_node, distsq, dists, epsError);  // "count_leaf" parameter removed since was neither used nor returned to the user.
+            return result.full();
 		}
 
 		/**
@@ -951,12 +919,15 @@ namespace nanoflann
 		 * the result object.
 		 *  \sa radiusSearch, findNeighbors
 		 * \note nChecks_IGNORED is ignored but kept for compatibility with the original FLANN interface.
+		 * \return Number `N` of valid points in the result set. Only the first `N` entries in `out_indices` and `out_distances_sq` will be valid. 
+		 *         Return may be less than `num_closest` only if the number of elements in the tree is less than `num_closest`.
 		 */
-		inline void knnSearch(const ElementType *query_point, const size_t num_closest, IndexType *out_indices, DistanceType *out_distances_sq, const int nChecks_IGNORED = 10) const
+		size_t knnSearch(const ElementType *query_point, const size_t num_closest, IndexType *out_indices, DistanceType *out_distances_sq, const int /* nChecks_IGNORED */ = 10) const
 		{
 			nanoflann::KNNResultSet<DistanceType,IndexType> resultSet(num_closest);
 			resultSet.init(out_indices, out_distances_sq);
 			this->findNeighbors(resultSet, query_point, nanoflann::SearchParams());
+			return resultSet.size();
 		}
 
 		/**
@@ -968,17 +939,27 @@ namespace nanoflann
 		 *
 		 *  For a better performance, it is advisable to do a .reserve() on the vector if you have any wild guess about the number of expected matches.
 		 *
-		 *  \sa knnSearch, findNeighbors
+		 *  \sa knnSearch, findNeighbors, radiusSearchCustomCallback
 		 * \return The number of points within the given radius (i.e. indices.size() or dists.size() )
 		 */
-		size_t radiusSearch(const ElementType *query_point,const DistanceType radius, std::vector<std::pair<IndexType,DistanceType> >& IndicesDists, const SearchParams& searchParams) const
+		size_t radiusSearch(const ElementType *query_point,const DistanceType &radius, std::vector<std::pair<IndexType,DistanceType> >& IndicesDists, const SearchParams& searchParams) const
 		{
 			RadiusResultSet<DistanceType,IndexType> resultSet(radius,IndicesDists);
-			this->findNeighbors(resultSet, query_point, searchParams);
-
+			const size_t nFound = radiusSearchCustomCallback(query_point,resultSet,searchParams);
 			if (searchParams.sorted)
 				std::sort(IndicesDists.begin(),IndicesDists.end(), IndexDist_Sorter() );
+			return nFound;
+		}
 
+		/**
+		 * Just like radiusSearch() but with a custom callback class for each point found in the radius of the query.
+		 * See the source of RadiusResultSet<> as a start point for your own classes.
+		 * \sa radiusSearch
+		 */
+		template <class SEARCH_CALLBACK>
+		size_t radiusSearchCustomCallback(const ElementType *query_point,SEARCH_CALLBACK &resultSet, const SearchParams& searchParams = SearchParams() ) const
+		{
+			this->findNeighbors(resultSet, query_point, searchParams);
 			return resultSet.size();
 		}
 
@@ -1034,11 +1015,12 @@ namespace nanoflann
 			}
 			else
 			{
+				const size_t N = dataset.kdtree_get_point_count();
+				if (!N) throw std::runtime_error("[nanoflann] computeBoundingBox() called but no data points found.");
 				for (int i=0; i<(DIM>0 ? DIM : dim); ++i) {
 					bbox[i].low =
-						bbox[i].high = dataset_get(0,i);
+					bbox[i].high = dataset_get(0,i);
 				}
-				const size_t N = dataset.kdtree_get_point_count();
 				for (size_t k=1; k<N; ++k) {
 					for (int i=0; i<(DIM>0 ? DIM : dim); ++i) {
 						if (dataset_get(k,i)<bbox[i].low) bbox[i].low = dataset_get(k,i);
@@ -1052,21 +1034,19 @@ namespace nanoflann
 		/**
 		 * Create a tree node that subdivides the list of vecs from vind[first]
 		 * to vind[last].  The routine is called recursively on each sublist.
-		 * Place a pointer to this new tree node in the location pTree.
 		 *
-		 * Params: pTree = the new node to create
-		 *                  first = index of the first vector
-		 *                  last = index of the last vector
+		 * @param left index of the first vector
+		 * @param right index of the last vector
 		 */
 		NodePtr divideTree(const IndexType left, const IndexType right, BoundingBox& bbox)
 		{
 			NodePtr node = pool.allocate<Node>(); // allocate memory
 
 			/* If too few exemplars remain, then make this a leaf node. */
-			if ( (right-left) <= m_leaf_max_size) {
+			if ( (right-left) <= static_cast<IndexType>(m_leaf_max_size) ) {
 				node->child1 = node->child2 = NULL;    /* Mark as leaf node. */
-				node->lr.left = left;
-				node->lr.right = right;
+				node->node_type.lr.left = left;
+				node->node_type.lr.right = right;
 
 				// compute bounding-box of leaf points
 				for (int i=0; i<(DIM>0 ? DIM : dim); ++i) {
@@ -1086,7 +1066,7 @@ namespace nanoflann
 				DistanceType cutval;
 				middleSplit_(&vind[0]+left, right-left, idx, cutfeat, cutval, bbox);
 
-				node->sub.divfeat = cutfeat;
+				node->node_type.sub.divfeat = cutfeat;
 
 				BoundingBox left_bbox(bbox);
 				left_bbox[cutfeat].high = cutval;
@@ -1096,8 +1076,8 @@ namespace nanoflann
 				right_bbox[cutfeat].low = cutval;
 				node->child2 = divideTree(left+idx, right, right_bbox);
 
-				node->sub.divlow = left_bbox[cutfeat].high;
-				node->sub.divhigh = right_bbox[cutfeat].low;
+				node->node_type.sub.divlow = left_bbox[cutfeat].high;
+				node->node_type.sub.divhigh = right_bbox[cutfeat].low;
 
 				for (int i=0; i<(DIM>0 ? DIM : dim); ++i) {
 					bbox[i].low = std::min(left_bbox[i].low, right_bbox[i].low);
@@ -1108,6 +1088,7 @@ namespace nanoflann
 			return node;
 		}
 
+
 		void computeMinMax(IndexType* ind, IndexType count, int element, ElementType& min_elem, ElementType& max_elem)
 		{
 			min_elem = dataset_get(ind[0],element);
@@ -1119,54 +1100,9 @@ namespace nanoflann
 			}
 		}
 
-		void middleSplit(IndexType* ind, IndexType count, IndexType& index, int& cutfeat, DistanceType& cutval, const BoundingBox& bbox)
-		{
-			// find the largest span from the approximate bounding box
-			ElementType max_span = bbox[0].high-bbox[0].low;
-			cutfeat = 0;
-			cutval = (bbox[0].high+bbox[0].low)/2;
-			for (int i=1; i<(DIM>0 ? DIM : dim); ++i) {
-				ElementType span = bbox[i].low-bbox[i].low;
-				if (span>max_span) {
-					max_span = span;
-					cutfeat = i;
-					cutval = (bbox[i].high+bbox[i].low)/2;
-				}
-			}
-
-			// compute exact span on the found dimension
-			ElementType min_elem, max_elem;
-			computeMinMax(ind, count, cutfeat, min_elem, max_elem);
-			cutval = (min_elem+max_elem)/2;
-			max_span = max_elem - min_elem;
-
-			// check if a dimension of a largest span exists
-			size_t k = cutfeat;
-			for (size_t i=0; i<(DIM>0 ? DIM : dim); ++i) {
-				if (i==k) continue;
-				ElementType span = bbox[i].high-bbox[i].low;
-				if (span>max_span) {
-					computeMinMax(ind, count, i, min_elem, max_elem);
-					span = max_elem - min_elem;
-					if (span>max_span) {
-						max_span = span;
-						cutfeat = i;
-						cutval = (min_elem+max_elem)/2;
-					}
-				}
-			}
-			IndexType lim1, lim2;
-			planeSplit(ind, count, cutfeat, cutval, lim1, lim2);
-
-			if (lim1>count/2) index = lim1;
-			else if (lim2<count/2) index = lim2;
-			else index = count/2;
-		}
-
-
 		void middleSplit_(IndexType* ind, IndexType count, IndexType& index, int& cutfeat, DistanceType& cutval, const BoundingBox& bbox)
 		{
-			const DistanceType EPSs=static_cast<DistanceType>(0.00001);
+			const DistanceType EPS=static_cast<DistanceType>(0.00001);
 			ElementType max_span = bbox[0].high-bbox[0].low;
 			for (int i=1; i<(DIM>0 ? DIM : dim); ++i) {
 				ElementType span = bbox[i].high-bbox[i].low;
@@ -1178,9 +1114,9 @@ namespace nanoflann
 			cutfeat = 0;
 			for (int i=0; i<(DIM>0 ? DIM : dim); ++i) {
 				ElementType span = bbox[i].high-bbox[i].low;
-				if (span>(1-EPSs)*max_span) {
+				if (span>(1-EPS)*max_span) {
 					ElementType min_elem, max_elem;
-					computeMinMax(ind, count, cutfeat, min_elem, max_elem);
+					computeMinMax(ind, count, i, min_elem, max_elem);
 					ElementType spread = max_elem-min_elem;;
 					if (spread>max_spread) {
 						cutfeat = i;
@@ -1215,7 +1151,7 @@ namespace nanoflann
 		 *  dataset[ind[lim1..lim2-1]][cutfeat]==cutval
 		 *  dataset[ind[lim2..count]][cutfeat]>cutval
 		 */
-		void planeSplit(IndexType* ind, const IndexType count, int cutfeat, DistanceType cutval, IndexType& lim1, IndexType& lim2)
+		void planeSplit(IndexType* ind, const IndexType count, int cutfeat, DistanceType &cutval, IndexType& lim1, IndexType& lim2)
 		{
 			/* Move vector indices for left subtree to front of list. */
 			IndexType left = 0;
@@ -1247,7 +1183,7 @@ namespace nanoflann
 		DistanceType computeInitialDistances(const ElementType* vec, distance_vector_t& dists) const
 		{
 			assert(vec);
-			DistanceType distsq = 0.0;
+			DistanceType distsq = DistanceType();
 
 			for (int i = 0; i < (DIM>0 ? DIM : dim); ++i) {
 				if (vec[i] < root_bbox[i].low) {
@@ -1275,7 +1211,7 @@ namespace nanoflann
 			if ((node->child1 == NULL)&&(node->child2 == NULL)) {
 				//count_leaf += (node->lr.right-node->lr.left);  // Removed since was neither used nor returned to the user.
 				DistanceType worst_dist = result_set.worstDist();
-				for (IndexType i=node->lr.left; i<node->lr.right; ++i) {
+				for (IndexType i=node->node_type.lr.left; i<node->node_type.lr.right; ++i) {
 					const IndexType index = vind[i];// reorder... : i;
 					DistanceType dist = distance(vec, index, (DIM>0 ? DIM : dim));
 					if (dist<worst_dist) {
@@ -1286,10 +1222,10 @@ namespace nanoflann
 			}
 
 			/* Which child branch should be taken first? */
-			int idx = node->sub.divfeat;
+			int idx = node->node_type.sub.divfeat;
 			ElementType val = vec[idx];
-			DistanceType diff1 = val - node->sub.divlow;
-			DistanceType diff2 = val - node->sub.divhigh;
+			DistanceType diff1 = val - node->node_type.sub.divlow;
+			DistanceType diff2 = val - node->node_type.sub.divhigh;
 
 			NodePtr bestChild;
 			NodePtr otherChild;
@@ -1297,12 +1233,12 @@ namespace nanoflann
 			if ((diff1+diff2)<0) {
 				bestChild = node->child1;
 				otherChild = node->child2;
-				cut_dist = distance.accum_dist(val, node->sub.divhigh, idx);
+				cut_dist = distance.accum_dist(val, node->node_type.sub.divhigh, idx);
 			}
 			else {
 				bestChild = node->child2;
 				otherChild = node->child1;
-				cut_dist = distance.accum_dist( val, node->sub.divlow, idx);
+				cut_dist = distance.accum_dist( val, node->node_type.sub.divlow, idx);
 			}
 
 			/* Call recursively to search next level down. */
@@ -1349,7 +1285,7 @@ namespace nanoflann
 	};   // class KDTree
 
 
-	/** A simple KD-tree adaptor for working with data directly stored in an Eigen Matrix, without duplicating the data storage.
+	/** An L2-metric KD-tree adaptor for working with data directly stored in an Eigen Matrix, without duplicating the data storage.
 	  *  Each row in the matrix represents a point in the state space.
 	  *
 	  *  Example of usage:
@@ -1366,13 +1302,13 @@ namespace nanoflann
 	  *
 	  *  \tparam DIM If set to >0, it specifies a compile-time fixed dimensionality for the points in the data set, allowing more compiler optimizations.
 	  *  \tparam Distance The distance metric to use: nanoflann::metric_L1, nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc.
-	  *  \tparam IndexType The type for indices in the KD-tree index (typically, size_t of int)
 	  */
-	template <class MatrixType, int DIM = -1, class Distance = nanoflann::metric_L2, typename IndexType = size_t>
+	template <class MatrixType, int DIM = -1, class Distance = nanoflann::metric_L2>
 	struct KDTreeEigenMatrixAdaptor
 	{
-		typedef KDTreeEigenMatrixAdaptor<MatrixType,DIM,Distance,IndexType> self_t;
+		typedef KDTreeEigenMatrixAdaptor<MatrixType,DIM,Distance> self_t;
 		typedef typename MatrixType::Scalar              num_t;
+		typedef typename MatrixType::Index IndexType;
 		typedef typename Distance::template traits<num_t,self_t>::distance_t metric_t;
 		typedef KDTreeSingleIndexAdaptor< metric_t,self_t,DIM,IndexType>  index_t;
 
@@ -1381,10 +1317,11 @@ namespace nanoflann
 		/// Constructor: takes a const ref to the matrix object with the data points
 		KDTreeEigenMatrixAdaptor(const int dimensionality, const MatrixType &mat, const int leaf_max_size = 10) : m_data_matrix(mat)
 		{
-			const size_t dims = mat.cols();
+			const IndexType dims = mat.cols();
+			if (dims!=dimensionality) throw std::runtime_error("Error: 'dimensionality' must match column count in data matrix");
 			if (DIM>0 && static_cast<int>(dims)!=DIM)
 				throw std::runtime_error("Data set dimensionality does not match the 'DIM' template argument");
-			index = new index_t( dims, *this /* adaptor */, nanoflann::KDTreeSingleIndexAdaptorParams(leaf_max_size, dims ) );
+			index = new index_t( dims, *this /* adaptor */, nanoflann::KDTreeSingleIndexAdaptorParams(leaf_max_size ) );
 			index->buildIndex();
 		}
 	private:
@@ -1403,9 +1340,9 @@ namespace nanoflann
 		  *  The user can also call index->... methods as desired.
 		  * \note nChecks_IGNORED is ignored but kept for compatibility with the original FLANN interface.
 		  */
-		inline void query(const num_t *query_point, const size_t num_closest, IndexType *out_indices, num_t *out_distances_sq, const int nChecks_IGNORED = 10) const
+		inline void query(const num_t *query_point, const size_t num_closest, IndexType *out_indices, num_t *out_distances_sq, const int /* nChecks_IGNORED */ = 10) const
 		{
-			nanoflann::KNNResultSet<typename MatrixType::Scalar,IndexType> resultSet(num_closest);
+			nanoflann::KNNResultSet<num_t,IndexType> resultSet(num_closest);
 			resultSet.init(out_indices, out_distances_sq);
 			index->findNeighbors(resultSet, query_point, nanoflann::SearchParams());
 		}
@@ -1425,11 +1362,11 @@ namespace nanoflann
 			return m_data_matrix.rows();
 		}
 
-		// Returns the distance between the vector "p1[0:size-1]" and the data point with index "idx_p2" stored in the class:
-		inline num_t kdtree_distance(const num_t *p1, const size_t idx_p2,size_t size) const
+		// Returns the L2 distance between the vector "p1[0:size-1]" and the data point with index "idx_p2" stored in the class:
+		inline num_t kdtree_distance(const num_t *p1, const IndexType idx_p2,IndexType size) const
 		{
 			num_t s=0;
-			for (size_t i=0; i<size; i++) {
+			for (IndexType i=0; i<size; i++) {
 				const num_t d= p1[i]-m_data_matrix.coeff(idx_p2,i);
 				s+=d*d;
 			}
@@ -1437,15 +1374,15 @@ namespace nanoflann
 		}
 
 		// Returns the dim'th component of the idx'th point in the class:
-		inline num_t kdtree_get_pt(const size_t idx, int dim) const {
-			return m_data_matrix.coeff(idx,dim);
+		inline num_t kdtree_get_pt(const IndexType idx, int dim) const {
+			return m_data_matrix.coeff(idx,IndexType(dim));
 		}
 
 		// Optional bounding-box computation: return false to default to a standard bbox computation loop.
 		//   Return true if the BBOX was already computed by the class and returned in "bb" so it can be avoided to redo it again.
 		//   Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 for point clouds)
 		template <class BBOX>
-		bool kdtree_get_bbox(BBOX &bb) const {
+		bool kdtree_get_bbox(BBOX& /*bb*/) const {
 			return false;
 		}
 

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-grass/pdal.git



More information about the Pkg-grass-devel mailing list