[gdal] 03/09: Imported Upstream version 1.11.3~rc1+dfsg

Sebastiaan Couwenberg sebastic at moszumanska.debian.org
Wed Sep 16 21:03:30 UTC 2015


This is an automated email from the git hooks/post-receive script.

sebastic pushed a commit to branch experimental
in repository gdal.

commit 81592870b680434d3c1a278fe3846d9fbe041be6
Author: Bas Couwenberg <sebastic at xs4all.nl>
Date:   Wed Sep 16 19:52:53 2015 +0200

    Imported Upstream version 1.11.3~rc1+dfsg
---
 GDALmake.opt.in                                    |    2 +-
 NEWS                                               |  111 +
 VERSION                                            |    2 +-
 alg/gdalgrid_priv.h                                |   11 +-
 alg/gdaltransformer.cpp                            |   10 +-
 alg/llrasterize.cpp                                |    4 +-
 apps/gdalbuildvrt.cpp                              |   25 +-
 apps/gdaldem.cpp                                   |    4 +-
 apps/gdalserver.c                                  |    9 +-
 apps/gdalwarp.cpp                                  |    9 +-
 apps/ogr2ogr.cpp                                   |    7 +-
 configure                                          |   64 +-
 configure.in                                       |   61 +-
 data/ruian_vf_ob_v1.gfs                            |   12 +
 data/ruian_vf_v1.gfs                               |   12 +
 frmts/blx/blx.c                                    |    6 +-
 frmts/envisat/EnvisatFile.c                        |   10 +-
 frmts/ers/ersdataset.cpp                           |   33 +-
 frmts/frmt_various.html                            |    9 +-
 frmts/georaster/georaster_dataset.cpp              |  269 +-
 frmts/grass/pkg/Makefile.in                        |   19 +-
 frmts/grass/pkg/configure                          | 4688 +++++++++++---------
 frmts/grass/pkg/configure.in                       |   49 +-
 frmts/gtiff/geotiff.cpp                            |   28 +-
 frmts/hdf4/hdf4dataset.cpp                         |   10 +-
 frmts/hf2/hf2dataset.cpp                           |   10 +-
 frmts/netcdf/netcdfdataset.cpp                     |   13 +-
 frmts/nitf/ecrgtocdataset.cpp                      |  200 +-
 frmts/nitf/rpftocfile.cpp                          |   20 +-
 frmts/northwood/grcdataset.cpp                     |   56 +-
 frmts/northwood/grddataset.cpp                     |   51 +-
 frmts/northwood/northwood.cpp                      |    8 +-
 frmts/pcidsk/sdk/channel/cpcidskchannel.cpp        |    8 +-
 frmts/pcidsk/sdk/channel/ctiledchannel.cpp         |    4 +-
 frmts/pcidsk/sdk/segment/cpcidskbitmap.cpp         |    4 +-
 frmts/raw/btdataset.cpp                            |    4 +-
 frmts/til/tildataset.cpp                           |   10 +-
 frmts/vrt/vrtdataset.cpp                           |   24 +-
 frmts/vrt/vrtsources.cpp                           |    8 +-
 gcore/gdal_version.h                               |    6 +-
 gcore/gdaljp2metadata.cpp                          |   54 +-
 gcore/gdalrasterband.cpp                           |    4 +-
 man/man1/gdal-config.1                             |   23 +-
 man/man1/gdal2tiles.1                              |   51 +-
 man/man1/gdal_calc.1                               |   11 +-
 man/man1/gdal_contour.1                            |   35 +-
 man/man1/gdal_edit.1                               |   33 +-
 man/man1/gdal_fillnodata.1                         |   33 +-
 man/man1/gdal_grid.1                               |  141 +-
 man/man1/gdal_merge.1                              |   45 +-
 man/man1/gdal_polygonize.1                         |   35 +-
 man/man1/gdal_proximity.1                          |   35 +-
 man/man1/gdal_rasterize.1                          |   63 +-
 man/man1/gdal_retile.1                             |   33 +-
 man/man1/gdal_sieve.1                              |   33 +-
 man/man1/gdal_translate.1                          |   67 +-
 man/man1/gdal_utilities.1                          |  111 +-
 man/man1/gdaladdo.1                                |   55 +-
 man/man1/gdalbuildvrt.1                            |   57 +-
 man/man1/gdalcompare.1                             |   21 +-
 man/man1/gdaldem.1                                 |   89 +-
 man/man1/gdalinfo.1                                |   73 +-
 man/man1/gdallocationinfo.1                        |   55 +-
 man/man1/gdalmanage.1                              |   29 +-
 man/man1/gdalmove.1                                |   23 +-
 man/man1/gdalsrsinfo.1                             |   15 +-
 man/man1/gdaltindex.1                              |   39 +-
 man/man1/gdaltransform.1                           |   39 +-
 man/man1/gdalwarp.1                                |  105 +-
 man/man1/nearblack.1                               |   41 +-
 man/man1/ogr2ogr.1                                 |   95 +-
 man/man1/ogr_utilities.1                           |    9 +-
 man/man1/ogrinfo.1                                 |   45 +-
 man/man1/ogrlineref.1                              |   31 +-
 man/man1/ogrtindex.1                               |   29 +-
 man/man1/pct2rgb.1                                 |   25 +-
 man/man1/rgb2pct.1                                 |   25 +-
 ogr/ogrsf_frmts/edigeo/ogredigeodatasource.cpp     |   18 +-
 ogr/ogrsf_frmts/geojson/ogrgeojsondatasource.cpp   |    4 +-
 ogr/ogrsf_frmts/geojson/ogrgeojsonreader.cpp       |    6 +-
 ogr/ogrsf_frmts/geojson/ogrtopojsonreader.cpp      |    4 +-
 ogr/ogrsf_frmts/gml/gmlhandler.cpp                 |   41 +-
 ogr/ogrsf_frmts/gml/parsexsd.cpp                   |   50 +-
 ogr/ogrsf_frmts/gpkg/ogrgeopackagelayer.cpp        |   23 +-
 ogr/ogrsf_frmts/gpkg/ogrgeopackageutility.cpp      |    4 +-
 .../mssqlspatial/ogrmssqlspatialtablelayer.cpp     |  114 +-
 .../openfilegdb/ogropenfilegdbdatasource.cpp       |    3 +-
 ogr/ogrsf_frmts/pg/ogrpgtablelayer.cpp             |   46 +-
 ogr/ogrsf_frmts/pgdump/ogrpgdumplayer.cpp          |   45 +-
 ogr/ogrsf_frmts/shape/shape2ogr.cpp                |    6 +-
 ogr/ogrsf_frmts/sqlite/ogrsqliteselectlayer.cpp    |   30 +-
 port/cpl_vsil_tar.cpp                              |   10 +-
 swig/include/perl/gdal_perl.i                      |    4 +-
 swig/include/perl/ogr_perl.i                       |   27 +-
 swig/perl/lib/Geo/GDAL.pm                          |    4 +-
 swig/perl/lib/Geo/OGR.dox                          |    2 +-
 swig/perl/lib/Geo/OGR.pm                           |   27 +-
 swig/perl/t/gdal.t                                 |   28 +-
 swig/perl/t/ogr.t                                  |    3 +-
 swig/perl/t/osr.t                                  |    1 +
 swig/python/samples/ogr_layer_algebra.py           |   10 +-
 swig/python/scripts/gdal_merge.py                  |    5 +-
 swig/python/setup.py                               |    2 +-
 103 files changed, 4565 insertions(+), 3549 deletions(-)

diff --git a/GDALmake.opt.in b/GDALmake.opt.in
index 1b7147b..247ac69 100644
--- a/GDALmake.opt.in
+++ b/GDALmake.opt.in
@@ -87,7 +87,7 @@ GDAL_INCLUDE	=	-I$(GDAL_ROOT)/port -I$(GDAL_ROOT)/gcore \
 # libtool targets and help variables
 LIBGDAL	:=		libgdal.la
 LIBGDAL_CURRENT	:=	19
-LIBGDAL_REVISION	:=	2
+LIBGDAL_REVISION	:=	3
 LIBGDAL_AGE	:=	18
 
 # native build targets and variables
diff --git a/NEWS b/NEWS
index cd88a0c..cfb34e8 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,114 @@
+= GDAL/OGR 1.11.3 Release Notes = 
+
+The 1.11.3 release is a bug fix release.
+
+== Build (Unix) ==
+ * Support custom buildflags in GRASS plugin (#5850)
+ * GRASS 7.0.0 support (#5852)
+ * fix detection of ECW SDK 5.1 on Mac (#5867, #5390)
+
+== Port ==
+ * /vsitar/: remove useless validation test that prevents from opening valid .tar files (#5864)
+
+== Algorithms ==
+ * gdal_grid: fix crash in optimized mode with GCC 4.4 on 64bit (#5987)
+ * Fix failure in GDALSuggestedWarpOut2() when computing output image size in case the top-left and bottom-right corners tranform to the same point (#5980)
+ * Rasterize: add check to avoid burning negative x values (#5641)
+
+== GDAL core ==
+ * GMLJP2 reader: add compatibility with OGC CRS URL as found in GMLJP2v2 (#5940)
+ * GMLJP2: on reading, don't do axis inversation if there's an explicit axisName requesting easting, northing order (#5960)
+ * GMLJP2: add missing rangeParameters element to validate against GMLJP2 schema (#5707)
+
+== Utilities ==
+ * gdalbuildvrt: fix potential crash when using -b switch (#6095)
+ * gdalserver: fix compilation with recent GNU libc (such as in Arch Linux) (#6073)
+ * gdalwarp: emit error message if file specified with -cutline cannot be opened (#5921)
+ * gdalwarp: fix 1.11.2 regression when invoking several times gdalwarp with several input files on the same target file, and when the input files have a nodata setting: only the last input file was warped onto the target dataset (#5909)
+ * ogr2ogr: fix crash with -clipdst when a reprojection fails before (#5973)
+ * ogr_layer_algebra.py: for Update, Clip and Erase, only creates attribute of input layer by default (#5976)
+
+== GDAL drivers ==
+
+ECRG driver:
+ * change subdataset definition to make sure that they only consist of frames of same scale (#6043)
+
+ERS driver:
+ * fix SetProjection() (#5840)
+
+GeoRaster driver:
+ * New SRID search (#5911)
+ * Fix user-defined SRID issue (#5881)
+
+GTiff driver:
+ * fix GTiffDataset::IsBlockAvailable() wrong behaviour when compiling against internal libtiff, when a BigTIFF file has a TileByteCounts with LONG/4-byte counts and not LONG8 (#6001)
+ * avoid generated corrupted  right-most and bottom-most tiles for 12-bit JPEG-compressed (#5971)
+
+HDF4 driver:
+ * fix AnyTypeToDouble() to use proper type (int instead of long) to work with DFNT_INT32/DFNT_UINT32 on 64-bit Linux (#5965)
+
+HF2 driver:
+ * fix reading side of the driver to work on architectures where char is unsigned, like PPC or ARM (#6082)
+
+NetCDF driver:
+ * fix crash on opening a NOAA dataset (#5962)
+ * fix computation of inverse flattening (#5858)
+
+Northwoord driver:
+ * Fix computation of intermediate color table values on non-Intel platforms (#6091)
+ * NWT_GRD: don't advertize scale/offset as they are transparently applied in IReadBlock() (#5839).
+
+RPFTOC driver:
+ * add tweak for weird relative directory names in the A.TOC file (#5979)
+
+TIL driver:
+ * fix half pixel shift in geo registration (#5961)
+
+VRT driver:
+ * add more checks to CheckCompatibleForDatasetIO() to avoid issues with overview bands (#5954)
+ * fix rounding of output window size on VRTSimpleSource (#5874)
+
+== OGR drivers ==
+
+EDIGEO driver:
+ * backport conversion of atof() to CPLAtof() to avoid truncation of floating point numbers including coordinates on a non C locale (#6070)
+
+GML driver:
+ * accept choice between gml:polygonProperty and gml:multiPolygonProperty (#5977)
+ * fix GML_ATTRIBUTES_TO_OGR_FIELDS=YES to work correctly with xlink:href too (#5970); and also backport a trunk fix for writing the correctly path to attributes in nested elements
+ * VFR: add support for OriginalniHraniceOmpv (#5853)
+
+GeoJSON driver:
+ * avoid truncation of real numbers on reading (#5882)
+ * TopoJSON: fix segfault when reading https://raw.githubusercontent.com/mbostock/topojson/master/test/topojson/properties-id-computed.json (#5919)
+ * GeoJSON writer: make string comparison for authority name case insensitive so as to recognize lowercase 'epsg' (#4995)
+
+GPKG driver:
+ * on reading, recognize GEOMCOLLECTION to be compatible of GDAL 2.0. Still writing GEOMETRYCOLLECTION in 1.11 branch (#5937)
+ * escape column names on reading/insert/update (#5879)
+
+MSSQLSpatial driver:
+ * Add support for creating features with default values (#5957)
+
+OpenFileGDB driver:
+ * fix 1.11.2 regression with FileGDB v9 where the presence of a non spatial table can cause other layers not to be listed (#5875)
+
+PG/PGDump drivers:
+ * fix truncation of fields to work with multi-byte UTF-8 characters (#5854)
+
+Shape driver:
+ * SHPReadOGRFeature() should free the passed psShape when not NULL and if the DBF record is deleted, so as to avoid later reads to faild (linked to #5974)
+
+SQLite driver:
+ * Fix issue with consecutive identical characters in layer name with ExecuteSQL() (#6107)
+
+== SWIG Language Bindings ==
+
+Perl bindings:
+ * Fix creation of Field Definition object (#6099)
+
+A Field Definition object is sometimes created with wrong attributes
+
 = GDAL/OGR 1.11.2 Release Notes = 
 
 The 1.11.2 release is a bug fix release.
diff --git a/VERSION b/VERSION
index ca71766..0a5af26 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.11.2
+1.11.3
diff --git a/alg/gdalgrid_priv.h b/alg/gdalgrid_priv.h
index 61aaa9d..efafa41 100644
--- a/alg/gdalgrid_priv.h
+++ b/alg/gdalgrid_priv.h
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: gdalgrid_priv.h 27044 2014-03-16 23:41:27Z rouault $
+ * $Id: gdalgrid_priv.h 29315 2015-06-05 20:21:41Z rouault $
  *
  * Project:  GDAL Gridding API.
  * Purpose:  Prototypes, and definitions for of GDAL scattered data gridder.
@@ -66,6 +66,14 @@ CPLErr GDALGridInverseDistanceToAPower2NoSmoothingNoSearchAVX(
 #endif
 
 #if defined(__GNUC__) 
+#if defined(__x86_64)
+#define GCC_CPUID(level, a, b, c, d)            \
+  __asm__ ("xchgq %%rbx, %q1\n"                 \
+           "cpuid\n"                            \
+           "xchgq %%rbx, %q1"                   \
+       : "=a" (a), "=r" (b), "=c" (c), "=d" (d) \
+       : "0" (level))
+#else
 #define GCC_CPUID(level, a, b, c, d)            \
   __asm__ ("xchgl %%ebx, %1\n"                  \
            "cpuid\n"                            \
@@ -73,3 +81,4 @@ CPLErr GDALGridInverseDistanceToAPower2NoSmoothingNoSearchAVX(
        : "=a" (a), "=r" (b), "=c" (c), "=d" (d) \
        : "0" (level))
 #endif
+#endif
diff --git a/alg/gdaltransformer.cpp b/alg/gdaltransformer.cpp
index 48a104c..cf586a5 100644
--- a/alg/gdaltransformer.cpp
+++ b/alg/gdaltransformer.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: gdaltransformer.cpp 27865 2014-10-15 22:44:28Z rouault $
+ * $Id: gdaltransformer.cpp 29310 2015-06-05 18:50:26Z rouault $
  *
  * Project:  Mapinfo Image Warper
  * Purpose:  Implementation of one or more GDALTrasformerFunc types, including
@@ -38,7 +38,7 @@
 #include "cpl_list.h"
 #include "cpl_multiproc.h"
 
-CPL_CVSID("$Id: gdaltransformer.cpp 27865 2014-10-15 22:44:28Z rouault $");
+CPL_CVSID("$Id: gdaltransformer.cpp 29310 2015-06-05 18:50:26Z rouault $");
 CPL_C_START
 void *GDALDeserializeGCPTransformer( CPLXMLNode *psTree );
 void *GDALDeserializeTPSTransformer( CPLXMLNode *psTree );
@@ -698,14 +698,16 @@ retry:
 /*      compute an approximate pixel size in the output                 */
 /*      georeferenced coordinates.                                      */
 /* -------------------------------------------------------------------- */
-    double dfDiagonalDist, dfDeltaX, dfDeltaY;
+    double dfDiagonalDist, dfDeltaX = 0.0, dfDeltaY = 0.0;
 
     if( pabSuccess[0] && pabSuccess[nSamplePoints - 1] )
     {
         dfDeltaX = padfX[nSamplePoints-1] - padfX[0];
         dfDeltaY = padfY[nSamplePoints-1] - padfY[0];
+        // In some cases this can result in 0 values. See #5980
+        // so fallback to safer method in that case
     }
-    else
+    if( dfDeltaX == 0.0 || dfDeltaY == 0.0 )
     {
         dfDeltaX = dfMaxXOut - dfMinXOut;
         dfDeltaY = dfMaxYOut - dfMinYOut;
diff --git a/alg/llrasterize.cpp b/alg/llrasterize.cpp
index 9dbfe42..f2feda9 100644
--- a/alg/llrasterize.cpp
+++ b/alg/llrasterize.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: llrasterize.cpp 27739 2014-09-25 18:49:52Z goatbar $
+ * $Id: llrasterize.cpp 29118 2015-05-02 20:50:50Z rouault $
  *
  * Project:  GDAL
  * Purpose:  Vector polygon rasterization code.
@@ -553,7 +553,7 @@ GDALdllImageLineAllTouched(int nRasterXSize, int nRasterYSize,
             }
 
             // step from pixel to pixel.
-            while( dfX < dfXEnd )
+            while( dfX >= 0 && dfX < dfXEnd )
             {
                 int iX = (int) floor(dfX);
                 int iY = (int) floor(dfY);
diff --git a/apps/gdalbuildvrt.cpp b/apps/gdalbuildvrt.cpp
index 078278f..4ab3ff1 100644
--- a/apps/gdalbuildvrt.cpp
+++ b/apps/gdalbuildvrt.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: gdalbuildvrt.cpp 27994 2014-11-21 20:03:49Z rouault $
+ * $Id: gdalbuildvrt.cpp 30129 2015-09-05 09:28:41Z rouault $
  *
  * Project:  GDAL Utilities
  * Purpose:  Commandline application to build VRT datasets from raster products or content of SHP tile index
@@ -37,7 +37,7 @@
 #endif
 #include "ogr_srs_api.h"
 
-CPL_CVSID("$Id: gdalbuildvrt.cpp 27994 2014-11-21 20:03:49Z rouault $");
+CPL_CVSID("$Id: gdalbuildvrt.cpp 30129 2015-09-05 09:28:41Z rouault $");
 
 #define GEOTRSFRM_TOPLEFT_X            0
 #define GEOTRSFRM_WE_RES               1
@@ -254,7 +254,7 @@ class VRTBuilder
     public:
                 VRTBuilder(const char* pszOutputFilename,
                            int nInputFiles, const char* const * ppszInputFilenames,
-                           int *panBandList, int nBandCount, int nMaxBandNo,
+                           const int *panBandListIn, int nBandCount, int nMaxBandNo,
                            ResolutionStrategy resolutionStrategy,
                            double we_res, double ns_res,
                            int bTargetAlignedPixels,
@@ -276,7 +276,7 @@ class VRTBuilder
 
 VRTBuilder::VRTBuilder(const char* pszOutputFilename,
                        int nInputFiles, const char* const * ppszInputFilenames,
-                       int *panBandList, int nBandCount, int nMaxBandNo,
+                       const int *panBandListIn, int nBandCount, int nMaxBandNo,
                        ResolutionStrategy resolutionStrategy,
                        double we_res, double ns_res,
                        int bTargetAlignedPixels,
@@ -297,7 +297,12 @@ VRTBuilder::VRTBuilder(const char* pszOutputFilename,
     }
 
     this->nBands = nBandCount;
-    this->panBandList = panBandList;    
+    panBandList = NULL;
+    if( nBandCount )
+    {
+        panBandList = (int*) CPLMalloc(nBands * sizeof(int));
+        memcpy(panBandList, panBandListIn, nBands * sizeof(int));
+    }
     this->nMaxBandNo = nMaxBandNo;    
 
     this->resolutionStrategy = resolutionStrategy;
@@ -344,8 +349,7 @@ VRTBuilder::~VRTBuilder()
     CPLFree(pszOutputFilename);
     CPLFree(pszSrcNoData);
     CPLFree(pszVRTNoData);
-    if (panBandList)
-        delete[] panBandList;
+    CPLFree(panBandList);
 
     int i;
     for(i=0;i<nInputFiles;i++)
@@ -619,13 +623,12 @@ int VRTBuilder::AnalyseRaster( GDALDatasetH hDS, const char* dsFileName,
             maxY = ds_maxY;
         }
 
-        //if provided band list
+        //if not provided an explicit band list, take the one of the first dataset
         if(nBands == 0)
         {
             nBands = _nBands;
-            if(panBandList != NULL)
-                CPLFree(panBandList);
-            panBandList = new int[nBands];
+            CPLFree(panBandList);
+            panBandList = (int*) CPLMalloc(nBands * sizeof(int));
             for(j=0;j<nBands;j++)
             {
                 panBandList[j] = j + 1;
diff --git a/apps/gdaldem.cpp b/apps/gdaldem.cpp
index 513bec2..4f8c6e0 100644
--- a/apps/gdaldem.cpp
+++ b/apps/gdaldem.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: gdaldem.cpp 27781 2014-10-01 17:48:26Z rouault $
+ * $Id: gdaldem.cpp 30390 2015-09-15 13:14:09Z rouault $
  *
  * Project:  GDAL DEM Utilities
  * Purpose:  
@@ -91,7 +91,7 @@
 #include "gdal_priv.h"
 #include "commonutils.h"
 
-CPL_CVSID("$Id: gdaldem.cpp 27781 2014-10-01 17:48:26Z rouault $");
+CPL_CVSID("$Id: gdaldem.cpp 30390 2015-09-15 13:14:09Z rouault $");
 
 #ifndef M_PI
 # define M_PI  3.1415926535897932384626433832795
diff --git a/apps/gdalserver.c b/apps/gdalserver.c
index b7ce6db..2e89f31 100644
--- a/apps/gdalserver.c
+++ b/apps/gdalserver.c
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: gdalserver.c 27741 2014-09-26 19:20:02Z goatbar $
+ * $Id: gdalserver.c 29669 2015-08-19 16:42:29Z rouault $
  *
  * Project:  GDAL
  * Purpose:  Server application that is forked by libgdal
@@ -27,9 +27,8 @@
  * DEALINGS IN THE SOFTWARE.
  ****************************************************************************/
 
-#if defined(__STDC_VERSION__)
-#define _XOPEN_SOURCE
-#endif
+// So that __USE_XOPEN2K is defined to have getaddrinfo
+#define _XOPEN_SOURCE 600
 
 #include "cpl_port.h"
 
@@ -77,7 +76,7 @@ int CPL_DLL GDALServerLoop(CPL_FILE_HANDLE fin, CPL_FILE_HANDLE fout);
 int CPL_DLL GDALServerLoopSocket(CPL_SOCKET nSocket);
 CPL_C_END
 
-CPL_CVSID("$Id: gdalserver.c 27741 2014-09-26 19:20:02Z goatbar $");
+CPL_CVSID("$Id: gdalserver.c 29669 2015-08-19 16:42:29Z rouault $");
 
 /************************************************************************/
 /*                               Usage()                                */
diff --git a/apps/gdalwarp.cpp b/apps/gdalwarp.cpp
index 5b9e34e..c90f5bf 100644
--- a/apps/gdalwarp.cpp
+++ b/apps/gdalwarp.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: gdalwarp.cpp 28186 2014-12-20 21:14:58Z rouault $
+ * $Id: gdalwarp.cpp 28897 2015-04-13 18:20:57Z rouault $
  *
  * Project:  High Performance Image Reprojector
  * Purpose:  Test program for high performance warper API.
@@ -36,7 +36,7 @@
 #include "commonutils.h"
 #include <vector>
 
-CPL_CVSID("$Id: gdalwarp.cpp 28186 2014-12-20 21:14:58Z rouault $");
+CPL_CVSID("$Id: gdalwarp.cpp 28897 2015-04-13 18:20:57Z rouault $");
 
 static void
 LoadCutline( const char *pszCutlineDSName, const char *pszCLayer, 
@@ -1412,7 +1412,7 @@ int main( int argc, char ** argv )
                 }
             }
 
-            if( !bInitDestSetByUser && iSrc == 0 )
+            if( bCreateOutput && !bInitDestSetByUser && iSrc == 0 )
             {
                 /* As we didn't know at the beginning if there was source nodata */
                 /* we have initialized INIT_DEST=0. Override this with NO_DATA now */
@@ -2094,7 +2094,10 @@ LoadCutline( const char *pszCutlineDSName, const char *pszCLayer,
 
     hSrcDS = OGROpen( pszCutlineDSName, FALSE, NULL );
     if( hSrcDS == NULL )
+    {
+        fprintf( stderr, "Cannot open %s.\n", pszCutlineDSName);
         GDALExit( 1 );
+    }
 
 /* -------------------------------------------------------------------- */
 /*      Get the source layer                                            */
diff --git a/apps/ogr2ogr.cpp b/apps/ogr2ogr.cpp
index 01d8ebb..b3923fe 100644
--- a/apps/ogr2ogr.cpp
+++ b/apps/ogr2ogr.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: ogr2ogr.cpp 27741 2014-09-26 19:20:02Z goatbar $
+ * $Id: ogr2ogr.cpp 29232 2015-05-22 16:12:45Z rouault $
  *
  * Project:  OpenGIS Simple Features Reference Implementation
  * Purpose:  Simple client for translating between formats.
@@ -39,7 +39,7 @@
 #include <map>
 #include <vector>
 
-CPL_CVSID("$Id: ogr2ogr.cpp 27741 2014-09-26 19:20:02Z goatbar $");
+CPL_CVSID("$Id: ogr2ogr.cpp 29232 2015-05-22 16:12:45Z rouault $");
 
 static int bSkipFailures = FALSE;
 static int nGroupTransactions = 20000;
@@ -3529,6 +3529,9 @@ static int TranslateLayer( TargetLayerInfo* psInfo,
 
                 if (poClipDst)
                 {
+                    if( poDstGeometry == NULL )
+                        goto end_loop;
+
                     OGRGeometry* poClipped = poDstGeometry->Intersection(poClipDst);
                     if (poClipped == NULL || poClipped->IsEmpty())
                     {
diff --git a/configure b/configure
index 095b79f..04bdcae 100755
--- a/configure
+++ b/configure
@@ -22528,22 +22528,24 @@ fi
       ECW_LIBS="-lecwj2 $CARBON_FRAMEWORK"
     fi
   fi
-  # ECWJP2 SDK 5.1 style
-  if test "$ECW_SETTING" = "no" ; then
-        if test -r /Intergraph/ERDASEcwJpeg2000SDK5.1/lib/libEcwJp2SDK.a; then
-                ECW_LIBDIR=/Intergraph/ERDASEcwJpeg2000SDK5.1/lib
-                ECW_INCLUDE=-I/Intergraph/ERDASEcwJpeg2000SDK5.1/include
-                ECW_LIBS="-L$ECW_LIBDIR -lEcwJp2SDK -framework Cocoa"
-                ECW_SETTING=yes
-                ECW_FLAGS="-DHAVE_ECW_BUILDNUMBER_H $ECW_FLAGS"
-                { $as_echo "$as_me:${as_lineno-$LINENO}: result: found Intergraph ERDAS EcwJpeg2000 SDK 5.1 in /Intergraph/ERDASEcwJpeg2000SDK5.1/." >&5
-$as_echo "found Intergraph ERDAS EcwJpeg2000 SDK 5.1 in /Intergraph/ERDASEcwJpeg2000SDK5.1/." >&6; }
-        fi
-  fi
 else
 
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for libNCSEcw.so or libecwj2" >&5
-$as_echo_n "checking for libNCSEcw.so or libecwj2... " >&6; }
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for libNCSEcw.a or libecwj2" >&5
+$as_echo_n "checking for libNCSEcw.a or libecwj2... " >&6; }
+  ECW_ARCH=x86
+  ECW_CONF="release"
+  ECW_FLAGS="-DLINUX -DX86 -DPOSIX -DHAVE_COMPRESS -DECW_COMPRESS_RW_SDK_VERSION"
+  ECW_FRAMEWORK_COCOA=""
+  if test "`arch`" = "x86_64" ; then
+    ECW_ARCH="x64"
+  fi
+  if test ! -z "`uname | grep Darwin`" ; then
+    ECW_ARCH=""
+    ECW_CONF=""
+    ECW_FLAGS=""
+    ECW_FRAMEWORK_COCOA=" -framework Cocoa "
+  fi
+
   ECW_SETTING=yes
   if test -r $with_ecw/lib/libNCSCnet.so -o -r $with_ecw/lib/libNCSCnet.dylib ; then
     ECW_LIBS="-L$with_ecw/lib -lNCSEcw -lNCSEcwC -lNCSCnet -lNCSUtil"
@@ -22562,27 +22564,27 @@ $as_echo "found in $with_ecw/bin." >&6; }
     { $as_echo "$as_me:${as_lineno-$LINENO}: result: found libecwj2 in $with_ecw/lib." >&5
 $as_echo "found libecwj2 in $with_ecw/lib." >&6; }
 
-  # ECW SDK 5.0 style
-  elif test -r $with_ecw/lib/x64/release/libNCSEcw.so ; then
-    if test `arch` == "x86_64" ; then
-      ECW_LIBDIR=$with_ecw/lib/x64/release
-    else
-      ECW_LIBDIR=$with_ecw/lib/x86/release
-    fi
-    ECW_FLAGS="-DLINUX -DX86 -DPOSIX -DHAVE_COMPRESS -DECW_COMPRESS_RW_SDK_VERSION"
-    ECW_LIBS="-L$ECW_LIBDIR -lNCSEcw"
+  # ECW SDK 5.0 style and also for the case where license type is included in path i.e. specific license type is requested.
+  elif test -r $with_ecw/lib/$ECW_ARCH/$ECW_CONF/libNCSEcw.a ; then
+    ECW_LIBDIR=$with_ecw/lib/$ECW_ARCH/$ECW_CONF
+    ECW_LIBS="-L$ECW_LIBDIR -lNCSEcw $ECW_FRAMEWORK_COCOA"
     { $as_echo "$as_me:${as_lineno-$LINENO}: result: found Intergraph 5.x+ SDK in ${ECW_LIBDIR}." >&5
 $as_echo "found Intergraph 5.x+ SDK in ${ECW_LIBDIR}." >&6; }
  # ECWJP2 SDK 5.1 style
-  elif test -r $with_ecw/lib/libEcwJp2SDK.a; then
-        ECW_LIBDIR=$with_ecw/lib
-        ECW_INCLUDE=-I$with_ecw/include
-        ECW_LIBS="-L$ECW_LIBDIR -lEcwJp2SDK -framework Cocoa"
-        ECW_FLAGS="-DHAVE_ECW_BUILDNUMBER_H $ECW_FLAGS"
-        { $as_echo "$as_me:${as_lineno-$LINENO}: result: found Intergraph 5.1+ SDK in $ {with_ecw}." >&5
-$as_echo "found Intergraph 5.1+ SDK in $ {with_ecw}." >&6; }
+  elif test -d $with_ecw; then
+    for ecw_license_type in "Desktop_Read-Write" "Server_Read-Only_EndUser" "Server_Read-Only" "Server_Read-Write" "Desktop_Read-Only"
+      do
+        ECW_LIBDIR=$with_ecw/$ecw_license_type/lib/$ECW_ARCH/$ECW_CONF
+        if test -r $ECW_LIBDIR/libNCSEcw.a; then
+          ECW_LIBS="-L$ECW_LIBDIR -lNCSEcw $ECW_FRAMEWORK_COCOA"
+          with_ecw=$with_ecw/$ecw_license_type
+          { $as_echo "$as_me:${as_lineno-$LINENO}: result: found Intergraph 5.x+ SDK in ${ECW_LIBDIR}." >&5
+$as_echo "found Intergraph 5.x+ SDK in ${ECW_LIBDIR}." >&6; }
+          break
+        fi
+      done
  else
-    as_fn_error $? "not found in $with_ecw/lib or $with_ecw/bin." "$LINENO" 5
+    as_fn_error $? "not found in $with_ecw." "$LINENO" 5
   fi
 
   { $as_echo "$as_me:${as_lineno-$LINENO}: checking for NCSECWClient.h in $with_ecw/include" >&5
diff --git a/configure.in b/configure.in
index f1e85d2..f9bad48 100644
--- a/configure.in
+++ b/configure.in
@@ -1,5 +1,5 @@
 dnl ***************************************************************************
-dnl $Id: configure.in 28317 2015-01-15 22:49:41Z tamas $
+dnl $Id: configure.in 28600 2015-03-03 09:38:01Z rouault $
 dnl
 dnl Project:  GDAL
 dnl Purpose:  Configure source file.
@@ -2070,20 +2070,23 @@ elif test "$with_ecw" = "yes" -o "$with_ecw" = "" ; then
       ECW_LIBS="-lecwj2 $CARBON_FRAMEWORK"
     fi
   fi
-  # ECWJP2 SDK 5.1 style
-  if test "$ECW_SETTING" = "no" ; then
-        if test -r /Intergraph/ERDASEcwJpeg2000SDK5.1/lib/libEcwJp2SDK.a; then
-                ECW_LIBDIR=/Intergraph/ERDASEcwJpeg2000SDK5.1/lib
-                ECW_INCLUDE=-I/Intergraph/ERDASEcwJpeg2000SDK5.1/include
-                ECW_LIBS="-L$ECW_LIBDIR -lEcwJp2SDK -framework Cocoa"
-                ECW_SETTING=yes
-                ECW_FLAGS="-DHAVE_ECW_BUILDNUMBER_H $ECW_FLAGS"
-                AC_MSG_RESULT([found Intergraph ERDAS EcwJpeg2000 SDK 5.1 in /Intergraph/ERDASEcwJpeg2000SDK5.1/.])
-        fi
-  fi
 else
 
-  AC_MSG_CHECKING([for libNCSEcw.so or libecwj2])
+  AC_MSG_CHECKING([for libNCSEcw.a or libecwj2])
+  ECW_ARCH=x86
+  ECW_CONF="release"
+  ECW_FLAGS="-DLINUX -DX86 -DPOSIX -DHAVE_COMPRESS -DECW_COMPRESS_RW_SDK_VERSION"
+  ECW_FRAMEWORK_COCOA=""
+  if test "`arch`" = "x86_64" ; then 
+    ECW_ARCH="x64"
+  fi
+  if test ! -z "`uname | grep Darwin`" ; then
+    ECW_ARCH=""
+    ECW_CONF=""
+    ECW_FLAGS=""
+    ECW_FRAMEWORK_COCOA=" -framework Cocoa "
+  fi
+  
   ECW_SETTING=yes
   if test -r $with_ecw/lib/libNCSCnet.so -o -r $with_ecw/lib/libNCSCnet.dylib ; then
     ECW_LIBS="-L$with_ecw/lib -lNCSEcw -lNCSEcwC -lNCSCnet -lNCSUtil"
@@ -2098,25 +2101,25 @@ else
     ECW_LIBS="-L$with_ecw/lib -lecwj2 $CARBON_FRAMEWORK"
     AC_MSG_RESULT([found libecwj2 in $with_ecw/lib.])
 
-  # ECW SDK 5.0 style
-  elif test -r $with_ecw/lib/x64/release/libNCSEcw.so ; then
-    if test `arch` == "x86_64" ; then
-      ECW_LIBDIR=$with_ecw/lib/x64/release
-    else
-      ECW_LIBDIR=$with_ecw/lib/x86/release
-    fi
-    ECW_FLAGS="-DLINUX -DX86 -DPOSIX -DHAVE_COMPRESS -DECW_COMPRESS_RW_SDK_VERSION"
-    ECW_LIBS="-L$ECW_LIBDIR -lNCSEcw"
+  # ECW SDK 5.0 style and also for the case where license type is included in path i.e. specific license type is requested.
+  elif test -r $with_ecw/lib/$ECW_ARCH/$ECW_CONF/libNCSEcw.a ; then
+    ECW_LIBDIR=$with_ecw/lib/$ECW_ARCH/$ECW_CONF
+    ECW_LIBS="-L$ECW_LIBDIR -lNCSEcw $ECW_FRAMEWORK_COCOA"
     AC_MSG_RESULT([found Intergraph 5.x+ SDK in ${ECW_LIBDIR}.])
  # ECWJP2 SDK 5.1 style
-  elif test -r $with_ecw/lib/libEcwJp2SDK.a; then
-        ECW_LIBDIR=$with_ecw/lib
-        ECW_INCLUDE=-I$with_ecw/include
-        ECW_LIBS="-L$ECW_LIBDIR -lEcwJp2SDK -framework Cocoa"
-        ECW_FLAGS="-DHAVE_ECW_BUILDNUMBER_H $ECW_FLAGS"
-        AC_MSG_RESULT([found Intergraph 5.1+ SDK in $ {with_ecw}.]) 
+  elif test -d $with_ecw; then
+    for ecw_license_type in "Desktop_Read-Write" "Server_Read-Only_EndUser" "Server_Read-Only" "Server_Read-Write" "Desktop_Read-Only"
+      do
+        ECW_LIBDIR=$with_ecw/$ecw_license_type/lib/$ECW_ARCH/$ECW_CONF
+        if test -r $ECW_LIBDIR/libNCSEcw.a; then
+          ECW_LIBS="-L$ECW_LIBDIR -lNCSEcw $ECW_FRAMEWORK_COCOA"
+          with_ecw=$with_ecw/$ecw_license_type
+          AC_MSG_RESULT([found Intergraph 5.x+ SDK in ${ECW_LIBDIR}.])
+          break
+        fi
+      done
  else
-    AC_MSG_ERROR([not found in $with_ecw/lib or $with_ecw/bin.])
+    AC_MSG_ERROR([not found in $with_ecw.])
   fi
 
   AC_MSG_CHECKING([for NCSECWClient.h in $with_ecw/include])
diff --git a/data/ruian_vf_ob_v1.gfs b/data/ruian_vf_ob_v1.gfs
index 5bd2e0d..f58d4a6 100644
--- a/data/ruian_vf_ob_v1.gfs
+++ b/data/ruian_vf_ob_v1.gfs
@@ -929,6 +929,12 @@
       <ElementPath>Geometrie|OriginalniHranice</ElementPath> 
       <Type>Polygon</Type>
     </GeomPropertyDefn>
+    <!-- Originální geometrie hranice parcely (Ompv) -->
+    <GeomPropertyDefn>
+      <Name>OriginalniHraniceOmpv</Name>
+      <ElementPath>Geometrie|OriginalniHraniceOmpv</ElementPath>
+      <Type>MultiPolygon</Type>
+    </GeomPropertyDefn>
     <!-- Jednoznační identifikátor parcely -->
     <PropertyDefn>
       <Name>Id</Name>
@@ -1104,6 +1110,12 @@
       <ElementPath>Geometrie|OriginalniHranice</ElementPath> 
       <Type>MultiPolygon</Type>
     </GeomPropertyDefn>
+    <!-- Originální geometrie hranice stavebního objektu (Ompv) -->
+    <GeomPropertyDefn>
+      <Name>OriginalniHraniceOmpv</Name>
+      <ElementPath>Geometrie|OriginalniHraniceOmpv</ElementPath>
+      <Type>MultiPolygon</Type>
+    </GeomPropertyDefn>
     <!-- Kód stavebního objektu -->
     <PropertyDefn>
       <Name>Kod</Name>
diff --git a/data/ruian_vf_v1.gfs b/data/ruian_vf_v1.gfs
index 00da809..b009ac8 100644
--- a/data/ruian_vf_v1.gfs
+++ b/data/ruian_vf_v1.gfs
@@ -1567,6 +1567,12 @@
       <ElementPath>Geometrie|OriginalniHranice</ElementPath> 
       <Type>Polygon</Type>
     </GeomPropertyDefn>
+    <!-- Originální geometrie hranice parcely (Ompv) -->
+    <GeomPropertyDefn>
+      <Name>OriginalniHraniceOmpv</Name>
+      <ElementPath>Geometrie|OriginalniHraniceOmpv</ElementPath>
+      <Type>MultiPolygon</Type>
+    </GeomPropertyDefn>
     <!-- Jednoznační identifikátor parcely -->
     <PropertyDefn>
       <Name>Id</Name>
@@ -1742,6 +1748,12 @@
       <ElementPath>Geometrie|OriginalniHranice</ElementPath> 
       <Type>MultiPolygon</Type>
     </GeomPropertyDefn>
+    <!-- Originální geometrie hranice stavebního objektu (Ompv) -->
+    <GeomPropertyDefn>
+      <Name>OriginalniHraniceOmpv</Name>
+      <ElementPath>Geometrie|OriginalniHraniceOmpv</ElementPath>
+      <Type>MultiPolygon</Type>
+    </GeomPropertyDefn>
     <!-- Kód stavebního objektu -->
     <PropertyDefn>
       <Name>Kod</Name>
diff --git a/frmts/blx/blx.c b/frmts/blx/blx.c
index a3a8f99..59d68b0 100644
--- a/frmts/blx/blx.c
+++ b/frmts/blx/blx.c
@@ -11,10 +11,10 @@
  * copies of the Software, and to permit persons to whom the
  * Software is furnished to do so, subject to the following
  * conditions:
- * 
+ *
  * The above copyright notice and this permission notice shall be
  * included in all copies or substantial portions of the Software.
- * 
+ *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
@@ -30,6 +30,8 @@
 #include <string.h>
 #include <stdlib.h>
 
+#include "cpl_port.h"
+
 /* Constants */
 #define MAXLEVELS 5
 #define MAXCOMPONENTS 4
diff --git a/frmts/envisat/EnvisatFile.c b/frmts/envisat/EnvisatFile.c
index 3b8a93c..386e74c 100644
--- a/frmts/envisat/EnvisatFile.c
+++ b/frmts/envisat/EnvisatFile.c
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: EnvisatFile.c 27731 2014-09-24 07:58:14Z rouault $
+ * $Id: EnvisatFile.c 29666 2015-08-19 15:31:10Z kyle $
  *
  * Project:  APP ENVISAT Support
  * Purpose:  Low Level Envisat file access (read/write) API.
@@ -33,7 +33,7 @@
 #  include "cpl_conv.h"
 #  include "EnvisatFile.h"
 
-CPL_CVSID("$Id: EnvisatFile.c 27731 2014-09-24 07:58:14Z rouault $");
+CPL_CVSID("$Id: EnvisatFile.c 29666 2015-08-19 15:31:10Z kyle $");
 
 #else
 #  include "APP/app.h"
@@ -1386,7 +1386,7 @@ int EnvisatFile_ReadDatasetChunk( EnvisatFile *self,
 {
     if( ds_index < 0 || ds_index >= self->ds_count )
     {
-        SendError( "Attempt to read non-existant dataset in "
+        SendError( "Attempt to read non-existent dataset in "
                    "EnvisatFile_ReadDatasetChunk()" );
         return FAILURE;
     }
@@ -1451,7 +1451,7 @@ int EnvisatFile_WriteDatasetRecord( EnvisatFile *self,
 
     if( ds_index < 0 || ds_index >= self->ds_count )
     {
-        SendError( "Attempt to write non-existant dataset in "
+        SendError( "Attempt to write non-existent dataset in "
                    "EnvisatFile_WriteDatasetRecord()" );
         return FAILURE;
     }
@@ -1568,7 +1568,7 @@ int EnvisatFile_ReadDatasetRecordChunk( EnvisatFile *self,
 
     if( ds_index < 0 || ds_index >= self->ds_count )
     {
-        SendError( "Attempt to read non-existant dataset in "
+        SendError( "Attempt to read non-existent dataset in "
                    "EnvisatFile_ReadDatasetRecordChunk()" );
         return FAILURE;
     }
diff --git a/frmts/ers/ersdataset.cpp b/frmts/ers/ersdataset.cpp
index 5738ffc..3a31c59 100644
--- a/frmts/ers/ersdataset.cpp
+++ b/frmts/ers/ersdataset.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: ersdataset.cpp 27433 2014-06-04 19:21:16Z rouault $
+ * $Id: ersdataset.cpp 28475 2015-02-13 11:43:30Z rouault $
  *
  * Project:  ERMapper .ers Driver
  * Purpose:  Implementation of .ers driver.
@@ -33,7 +33,7 @@
 #include "cpl_string.h"
 #include "ershdrnode.h"
 
-CPL_CVSID("$Id: ersdataset.cpp 27433 2014-06-04 19:21:16Z rouault $");
+CPL_CVSID("$Id: ersdataset.cpp 28475 2015-02-13 11:43:30Z rouault $");
 
 /************************************************************************/
 /* ==================================================================== */
@@ -70,9 +70,9 @@ class ERSDataset : public RawDataset
     int         bHasNoDataValue;
     double      dfNoDataValue;
 
-    CPLString      osProj;
-    CPLString      osDatum;
-    CPLString      osUnits;
+    CPLString      osProj, osProjForced;
+    CPLString      osDatum, osDatumForced;
+    CPLString      osUnits, osUnitsForced;
     void           WriteProjectionInfo(const char* pszProj,
                                        const char* pszDatum,
                                        const char* pszUnits);
@@ -437,9 +437,20 @@ CPLErr ERSDataset::SetProjection( const char *pszSRS )
 
     /* Write the above computed values, unless they have been overriden by */
     /* the creation options PROJ, DATUM or UNITS */
-    WriteProjectionInfo( (osProj.size()) ? osProj.c_str() : szERSProj,
-                         (osDatum.size()) ? osDatum.c_str() : szERSDatum,
-                         (osUnits.size()) ? osUnits.c_str() : szERSUnits );
+    if( osProjForced.size() )
+        osProj = osProjForced;
+    else
+        osProj = szERSProj;
+    if( osDatumForced.size() )
+        osDatum = osDatumForced;
+    else
+        osDatum = szERSDatum;
+    if( osUnitsForced.size() )
+        osUnits = osUnitsForced;
+    else
+        osUnits = szERSUnits;
+
+    WriteProjectionInfo( osProj, osDatum, osUnits );
 
     return CE_None;
 }
@@ -1410,13 +1421,13 @@ GDALDataset *ERSDataset::Create( const char * pszFilename,
 /* -------------------------------------------------------------------- */
     const char *pszDatum = CSLFetchNameValue( papszOptions, "DATUM" );
     if (pszDatum)
-        poDS->osDatum = pszDatum;
+        poDS->osDatumForced = poDS->osDatum = pszDatum;
     const char *pszProj = CSLFetchNameValue( papszOptions, "PROJ" );
     if (pszProj)
-        poDS->osProj = pszProj;
+        poDS->osProjForced = poDS->osProj = pszProj;
     const char *pszUnits = CSLFetchNameValue( papszOptions, "UNITS" );
     if (pszUnits)
-        poDS->osUnits = pszUnits;
+        poDS->osUnitsForced = poDS->osUnits = pszUnits;
 
     if (pszDatum || pszProj || pszUnits)
     {
diff --git a/frmts/frmt_various.html b/frmts/frmt_various.html
index 963506c..5a16c3b 100644
--- a/frmts/frmt_various.html
+++ b/frmts/frmt_various.html
@@ -367,13 +367,14 @@ Starting with GDAL 1.9.0
 that uses the table of content file, TOC.xml, and exposes it as a virtual dataset whose
 coverage is the set of ECRG frames contained in the table of content.</p>
 <p>The driver will report a different subdataset for each subdataset found in the TOC.xml
-file.</p>
+file. Each subdataset consists of the frames of same product id, disk id, and starting
+with GDAL 1.11.3, with same scale.</p>
 
 <p>Result of a gdalinfo on a TOC.xml file.</p>
 <pre>
 Subdatasets:
-  SUBDATASET_1_NAME=ECRG_TOC_ENTRY:ECRG:FalconView:ECRG_Sample/EPF/TOC.xml
-  SUBDATASET_1_DESC=ECRG:FalconView
+  SUBDATASET_1_NAME=ECRG_TOC_ENTRY:ECRG:FalconView:1_500_K:ECRG_Sample/EPF/TOC.xml
+  SUBDATASET_1_DESC=Product ECRG, Disk FalconView, Scale 1:500 K
 </pre>
 
 <p>See Also:</p>
@@ -1325,7 +1326,7 @@ as reported by GDAL will be a half-pixel at the top and left of the values that
 </p>
 
 <p>
-$Id: frmt_various.html 27110 2014-03-28 21:29:20Z rouault $
+$Id: frmt_various.html 29779 2015-08-25 09:03:00Z rouault $
 </p>
 
 </body>
diff --git a/frmts/georaster/georaster_dataset.cpp b/frmts/georaster/georaster_dataset.cpp
index 495ce2c..d2b5ddf 100644
--- a/frmts/georaster/georaster_dataset.cpp
+++ b/frmts/georaster/georaster_dataset.cpp
@@ -785,7 +785,6 @@ GDALDataset *GeoRasterDataset::Create( const char *pszFilename,
         poGRD->poGeoRaster->nCompressQuality = nQuality;
     }
 
-
     pszFetched = CSLFetchNameValue( papszOptions, "GENPYRAMID" );
 
     if( pszFetched != NULL )
@@ -1386,7 +1385,6 @@ const char* GeoRasterDataset::GetProjectionRef( void )
         else if ( EQUAL( pszProjName, "Lambert Conformal Conic" ) )
         {
             oSRS.SetProjection( SRS_PT_LAMBERT_CONFORMAL_CONIC_1SP );
-            //?? One ot two parameters?
         }
         else if ( EQUAL( pszProjName, "Lambert Azimuthal Equal Area" ) )
         {
@@ -1507,47 +1505,286 @@ CPLErr GeoRasterDataset::SetProjection( const char *pszProjString )
         return CE_Failure;
     }
     
+    const char *pszProjName = poSRS2->GetAttrValue( "PROJECTION" );
+
+    if( pszProjName )
+    {
+        // ----------------------------------------------------------------
+        // Translate projection names to Oracle's standards
+        // ----------------------------------------------------------------
+
+        if ( EQUAL( pszProjName, SRS_PT_TRANSVERSE_MERCATOR ) )
+        {
+            poSRS2->SetProjection( "Transverse Mercator" );
+        }
+        else if ( EQUAL( pszProjName, SRS_PT_ALBERS_CONIC_EQUAL_AREA ) )
+        {
+            poSRS2->SetProjection( "Albers Conical Equal Area" );
+        }
+        else if ( EQUAL( pszProjName, SRS_PT_AZIMUTHAL_EQUIDISTANT ) )
+        {
+            poSRS2->SetProjection( "Azimuthal Equidistant" );
+        }
+        else if ( EQUAL( pszProjName, SRS_PT_MILLER_CYLINDRICAL ) )
+        {
+            poSRS2->SetProjection( "Miller Cylindrical" );
+        }
+        else if ( EQUAL( pszProjName, SRS_PT_HOTINE_OBLIQUE_MERCATOR ) )
+        {
+            poSRS2->SetProjection( "Hotine Oblique Mercator" );
+        }
+        else if ( EQUAL( pszProjName, SRS_PT_WAGNER_IV ) )
+        {
+            poSRS2->SetProjection( "Wagner IV" );
+        }
+        else if ( EQUAL( pszProjName, SRS_PT_WAGNER_VII ) )
+        {
+            poSRS2->SetProjection( "Wagner VII" );
+        }
+        else if ( EQUAL( pszProjName, SRS_PT_ECKERT_IV ) )
+        {
+            poSRS2->SetProjection( "Eckert IV" );
+        }
+        else if ( EQUAL( pszProjName, SRS_PT_ECKERT_VI ) )
+        {
+            poSRS2->SetProjection( "Eckert VI" );
+        }
+        else if ( EQUAL( pszProjName, SRS_PT_NEW_ZEALAND_MAP_GRID ) )
+        {
+            poSRS2->SetProjection( "New Zealand Map Grid" );
+        }
+        else if ( EQUAL( pszProjName, SRS_PT_LAMBERT_CONFORMAL_CONIC_1SP ) )
+        {
+            poSRS2->SetProjection( "Lambert Conformal Conic" );
+        }
+        else if ( EQUAL( pszProjName, SRS_PT_LAMBERT_AZIMUTHAL_EQUAL_AREA ) )
+        {
+            poSRS2->SetProjection( "Lambert Azimuthal Equal Area" );
+        }
+        else if ( EQUAL( pszProjName, SRS_PT_VANDERGRINTEN ) )
+        {
+            poSRS2->SetProjection( "Van der Grinten" );
+        }
+        else if ( EQUAL(
+            pszProjName, SRS_PT_LAMBERT_CONFORMAL_CONIC_2SP_BELGIUM ) )
+        {
+            poSRS2->SetProjection( "Lambert Conformal Conic (Belgium 1972)" );
+        }
+        else if ( EQUAL( pszProjName, SRS_PT_CYLINDRICAL_EQUAL_AREA ) )
+        {
+            poSRS2->SetProjection( "Cylindrical Equal Area" );
+        }
+        else if ( EQUAL( pszProjName, SRS_PT_GOODE_HOMOLOSINE ) )
+        {
+            poSRS2->SetProjection( "Interrupted Goode Homolosine" );
+        }
+        
+        // ----------------------------------------------------------------
+        // Translate projection's parameters to Oracle's standards
+        // ----------------------------------------------------------------
+
+        char* pszStart = NULL;
+        
+        CPLFree( pszCloneWKT );       
+
+        if( poSRS2->exportToWkt( &pszCloneWKT ) != OGRERR_NONE )
+        {
+            delete poSRS2;
+            return CE_Failure;
+        }
+        
+        if( ( pszStart = strstr(pszCloneWKT, SRS_PP_AZIMUTH) ) )
+        {
+            strncpy( pszStart, "Azimuth", strlen(SRS_PP_AZIMUTH) );
+        }
+
+        if( ( pszStart = strstr(pszCloneWKT, SRS_PP_CENTRAL_MERIDIAN) ) )
+        {
+            strncpy( pszStart, "Central_Meridian", 
+                                        strlen(SRS_PP_CENTRAL_MERIDIAN) );
+        }
+
+        if( ( pszStart = strstr(pszCloneWKT, SRS_PP_FALSE_EASTING) ) )
+        {
+            strncpy( pszStart, "False_Easting", strlen(SRS_PP_FALSE_EASTING) );
+        }
+
+        if( ( pszStart = strstr(pszCloneWKT, SRS_PP_FALSE_NORTHING) ) )
+        {
+            strncpy( pszStart, "False_Northing", 
+                                        strlen(SRS_PP_FALSE_NORTHING) );
+        }
+
+        if( ( pszStart = strstr(pszCloneWKT, SRS_PP_LATITUDE_OF_CENTER) ) )
+        {
+            strncpy( pszStart, "Latitude_Of_Center", 
+                                        strlen(SRS_PP_LATITUDE_OF_CENTER) );
+        }
+                
+        if( ( pszStart = strstr(pszCloneWKT, SRS_PP_LATITUDE_OF_ORIGIN) ) )
+        {
+            strncpy( pszStart, "Latitude_Of_Origin", 
+                                        strlen(SRS_PP_LATITUDE_OF_ORIGIN) );
+        }
+                
+        if( ( pszStart = strstr(pszCloneWKT, SRS_PP_LONGITUDE_OF_CENTER) ) )
+        {
+            strncpy( pszStart, "Longitude_Of_Center", 
+                                        strlen(SRS_PP_LONGITUDE_OF_CENTER) );
+        }
+                
+        if( ( pszStart = strstr(pszCloneWKT, SRS_PP_PSEUDO_STD_PARALLEL_1) ) )
+        {
+            strncpy( pszStart, "Pseudo_Standard_Parallel_1", 
+                                        strlen(SRS_PP_PSEUDO_STD_PARALLEL_1) );
+        }
+                
+        if( ( pszStart = strstr(pszCloneWKT, SRS_PP_SCALE_FACTOR) ) )
+        {
+            strncpy( pszStart, "Scale_Factor", strlen(SRS_PP_SCALE_FACTOR) );
+        }
+                
+        if( ( pszStart = strstr(pszCloneWKT, SRS_PP_STANDARD_PARALLEL_1) ) )
+        {
+            strncpy( pszStart, "Standard_Parallel_1", 
+                                        strlen(SRS_PP_STANDARD_PARALLEL_1) );
+        }
+                
+        if( ( pszStart = strstr(pszCloneWKT, SRS_PP_STANDARD_PARALLEL_2) ) )
+        {
+            strncpy( pszStart, "Standard_Parallel_2", 
+                                        strlen(SRS_PP_STANDARD_PARALLEL_2) );
+        }                
+                
+        if( ( pszStart = strstr(pszCloneWKT, SRS_PP_STANDARD_PARALLEL_2) ) )
+        {
+            strncpy( pszStart, "Standard_Parallel_2", 
+                                        strlen(SRS_PP_STANDARD_PARALLEL_2) );
+        }                
+        
+        // ----------------------------------------------------------------
+        // Fix Unit name
+        // ----------------------------------------------------------------
+        
+        if( ( pszStart = strstr(pszCloneWKT, "metre") ) )
+        {
+            strncpy( pszStart, SRS_UL_METER, strlen(SRS_UL_METER) );
+        }
+    }
+
     // --------------------------------------------------------------------
-    // Search by simplified WKT or insert it as a user defined
+    // Tries to find a SRID compatible with the WKT
     // --------------------------------------------------------------------
 
     OWConnection* poConnection  = poGeoRaster->poConnection;
     OWStatement* poStmt = NULL;
-    int nMaxSRID = 0;
+    
+    int nNewSRID = 0;    
+   
+    char *pszFuncName = "FIND_GEOG_CRS";
+  
+    if( poSRS2->IsProjected() )
+    {
+        pszFuncName = "FIND_PROJ_CRS";
+    }
+    
+    poStmt = poConnection->CreateStatement( CPLSPrintf(
+        "DECLARE\n"
+        "  LIST SDO_SRID_LIST;"
+        "BEGIN\n"
+        "  SELECT SDO_CS.%s('%s', null) into LIST FROM DUAL;\n"
+        "  IF LIST.COUNT() > 0 then\n"
+        "    SELECT LIST(1) into :out from dual;\n"
+        "  ELSE\n"
+        "    SELECT 0 into :out from dual;\n"
+        "  END IF;\n"
+        "END;",
+            pszFuncName,
+            pszCloneWKT ) );
+        
+    poStmt->BindName( ":out", &nNewSRID );
+
+    CPLPushErrorHandler( CPLQuietErrorHandler );
+
+    if( poStmt->Execute() )
+    {
+        CPLPopErrorHandler();
+
+        if ( nNewSRID > 0 )
+        {
+            poGeoRaster->SetGeoReference( nNewSRID );
+            CPLFree( pszCloneWKT );       
+            return CE_None;
+        }
+    }   
+
+    // --------------------------------------------------------------------
+    // Search by simplified WKT or insert it as a user defined SRS
+    // --------------------------------------------------------------------
+    
+    int nCounter = 0;
 
     poStmt = poConnection->CreateStatement( CPLSPrintf(
+        "SELECT COUNT(*) FROM MDSYS.CS_SRS WHERE WKTEXT = '%s'", pszCloneWKT));
+    
+    poStmt->Define( &nCounter );
+            
+    CPLPushErrorHandler( CPLQuietErrorHandler );
+
+    if( poStmt->Execute() && nCounter > 0 )
+    {    
+        poStmt = poConnection->CreateStatement( CPLSPrintf(
+            "SELECT SRID FROM MDSYS.CS_SRS WHERE WKTEXT = '%s'", pszCloneWKT));
+
+        poStmt->Define( &nNewSRID );
+
+        if( poStmt->Execute() )
+        {
+            CPLPopErrorHandler();
+            
+            poGeoRaster->SetGeoReference( nNewSRID );
+            CPLFree( pszCloneWKT );
+            return CE_None;
+        }    
+    }
+
+    CPLPopErrorHandler();
+    
+    poStmt = poConnection->CreateStatement( CPLSPrintf(
         "DECLARE\n"
         "  MAX_SRID NUMBER := 0;\n"
         "BEGIN\n"
-        "  SELECT SRID INTO MAX_SRID FROM MDSYS.CS_SRS WHERE WKTEXT = '%s';\n"
-        "  EXCEPTION\n"
-        "    WHEN no_data_found THEN\n"
-        "      SELECT MAX(SRID) INTO MAX_SRID FROM MDSYS.CS_SRS;\n"
-        "      MAX_SRID := MAX_SRID + 1;\n"
-        "      INSERT INTO MDSYS.CS_SRS (SRID, WKTEXT, CS_NAME)\n"
+        "  SELECT MAX(SRID) INTO MAX_SRID FROM MDSYS.CS_SRS;\n"
+        "  MAX_SRID := MAX_SRID + 1;\n"
+        "  INSERT INTO MDSYS.CS_SRS (SRID, WKTEXT, CS_NAME)\n"
         "        VALUES (MAX_SRID, '%s', '%s');\n"
+        "  SELECT MAX_SRID INTO :out FROM DUAL;\n"
         "END;",
             pszCloneWKT,
-            pszCloneWKT,
             oSRS.GetRoot()->GetChild(0)->GetValue() ) );
 
-    poStmt->Define( &nMaxSRID );
+    poStmt->BindName( ":out", &nNewSRID );
 
     CPLErr eError = CE_None;
 
+    CPLPushErrorHandler( CPLQuietErrorHandler );
+
     if( poStmt->Execute() )
     {
-        poGeoRaster->SetGeoReference( nMaxSRID ); //TODO change that method
-        poGeoRaster->sWKText = pszCloneWKT;
+        CPLPopErrorHandler();
+            
+        poGeoRaster->SetGeoReference( nNewSRID );
     }
     else
     {
+        CPLPopErrorHandler();
+            
         poGeoRaster->SetGeoReference( UNKNOWN_CRS );
-        poGeoRaster->sWKText = "";
 
         CPLError( CE_Warning, CPLE_UserInterrupt,
             "Insufficient privileges to insert reference system to "
-            "MDSYS.CS_SRS table." );
+            "table MDSYS.CS_SRS." );
+        
         eError = CE_Warning;
     }
 
diff --git a/frmts/grass/pkg/Makefile.in b/frmts/grass/pkg/Makefile.in
index ca97c29..007df6d 100644
--- a/frmts/grass/pkg/Makefile.in
+++ b/frmts/grass/pkg/Makefile.in
@@ -1,11 +1,12 @@
-                                                                                                                                                                                                                                                               
 CC	=	@CC@
 CXX	=	@CXX@
 LD	=	@CXX@
 
 CPPFLAGS = -DUSE_CPL -DGRASS_GISBASE=\"@GRASS_GISBASE@\" \
-	  @GDAL_INC@ @GRASS_INCLUDE@ @CPPFLAGS@ 
+	  @GDAL_INC@ @GRASS_INCLUDE@ @PQ_INCLUDE@ @CPPFLAGS@
 CXXFLAGS = @CXX_WFLAGS@ @CXX_PIC@ 
+CFLAGS   = @CFLAGS@ 
+LDFLAGS  = @LDFLAGS@ 
 
 RANLIB		=	@RANLIB@
 SO_EXT		=	@SO_EXT@
@@ -28,8 +29,12 @@ install:	default
 	cp $(OLIBNAME) $(AUTOLOAD_DIR)
 	test -d ${GRASSTABLES_DIR} || mkdir ${GRASSTABLES_DIR}
 	test -d ${GRASSTABLES_DIR}/etc || mkdir ${GRASSTABLES_DIR}/etc
-	cp @GRASS_GISBASE@/etc/ellipse.table ${GRASSTABLES_DIR}/etc
-	cp @GRASS_GISBASE@/etc/datum.table @GRASS_GISBASE@/etc/datumtransform.table ${GRASSTABLES_DIR}/etc
+	test ! -e @GRASS_GISBASE@/etc/ellipse.table || cp @GRASS_GISBASE@/etc/ellipse.table ${GRASSTABLES_DIR}/etc
+	test ! -e @GRASS_GISBASE@/etc/datum.table || cp @GRASS_GISBASE@/etc/datum.table ${GRASSTABLES_DIR}/etc
+	test ! -e @GRASS_GISBASE@/etc/datumtransform.table || cp @GRASS_GISBASE@/etc/datumtransform.table ${GRASSTABLES_DIR}/etc
+	test ! -e @GRASS_GISBASE@/etc/proj/ellipse.table || cp @GRASS_GISBASE@/etc/proj/ellipse.table ${GRASSTABLES_DIR}/etc
+	test ! -e @GRASS_GISBASE@/etc/proj/datum.table || cp @GRASS_GISBASE@/etc/proj/datum.table ${GRASSTABLES_DIR}/etc
+	test ! -e @GRASS_GISBASE@/etc/proj/datumtransform.table || cp @GRASS_GISBASE@/etc/proj/datumtransform.table ${GRASSTABLES_DIR}/etc
 	test -d ${GRASSTABLES_DIR}/driver || mkdir ${GRASSTABLES_DIR}/driver
 	test -d ${GRASSTABLES_DIR}/driver/db || mkdir ${GRASSTABLES_DIR}/driver/db
 	cp -r @GRASS_GISBASE@/driver/db/* ${GRASSTABLES_DIR}/driver/db/
@@ -42,11 +47,11 @@ distclean: clean
 
 
 $(GLIBNAME):	grass57dataset.o
-	$(LD_SHARED) grass57dataset.o $(LIBS) -o $(GLIBNAME)
+	$(LD_SHARED) $(LDFLAGS) grass57dataset.o $(LIBS) -o $(GLIBNAME)
 
 $(OLIBNAME):	ogrgrassdriver.o ogrgrassdatasource.o ogrgrasslayer.o 
-	$(LD_SHARED) ogrgrassdriver.o ogrgrassdatasource.o ogrgrasslayer.o $(LIBS) -o $(OLIBNAME)
+	$(LD_SHARED) $(LDFLAGS) ogrgrassdriver.o ogrgrassdatasource.o ogrgrasslayer.o $(LIBS) -o $(OLIBNAME)
 
 %.o:	%.cpp
-	$(CXX) $(CXXFLAGS) $(CPPFLAGS) -c -o $@ $<
+	$(CXX) $(CXXFLAGS) $(CPPFLAGS) $(CFLAGS) -c -o $@ $<
 
diff --git a/frmts/grass/pkg/configure b/frmts/grass/pkg/configure
index cdd9df2..ae5e319 100755
--- a/frmts/grass/pkg/configure
+++ b/frmts/grass/pkg/configure
@@ -1,81 +1,458 @@
 #! /bin/sh
 # Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.59.
+# Generated by GNU Autoconf 2.69.
+#
+#
+# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
+#
 #
-# Copyright (C) 2003 Free Software Foundation, Inc.
 # This configure script is free software; the Free Software Foundation
 # gives unlimited permission to copy, distribute and modify it.
-## --------------------- ##
-## M4sh Initialization.  ##
-## --------------------- ##
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
 
-# Be Bourne compatible
-if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
   emulate sh
   NULLCMD=:
-  # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
   # is contrary to our usage.  Disable this feature.
   alias -g '${1+"$@"}'='"$@"'
-elif test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then
-  set -o posix
+  setopt NO_GLOB_SUBST
+else
+  case `(set -o) 2>/dev/null` in #(
+  *posix*) :
+    set -o posix ;; #(
+  *) :
+     ;;
+esac
 fi
-DUALCASE=1; export DUALCASE # for MKS sh
 
-# Support unset when possible.
-if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
-  as_unset=unset
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+# Prefer a ksh shell builtin over an external printf program on Solaris,
+# but without wasting forks for bash or zsh.
+if test -z "$BASH_VERSION$ZSH_VERSION" \
+    && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
+  as_echo='print -r --'
+  as_echo_n='print -rn --'
+elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+  as_echo='printf %s\n'
+  as_echo_n='printf %s'
 else
-  as_unset=false
+  if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+    as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+    as_echo_n='/usr/ucb/echo -n'
+  else
+    as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+    as_echo_n_body='eval
+      arg=$1;
+      case $arg in #(
+      *"$as_nl"*)
+	expr "X$arg" : "X\\(.*\\)$as_nl";
+	arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+      esac;
+      expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+    '
+    export as_echo_n_body
+    as_echo_n='sh -c $as_echo_n_body as_echo'
+  fi
+  export as_echo_body
+  as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+  PATH_SEPARATOR=:
+  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+      PATH_SEPARATOR=';'
+  }
 fi
 
 
-# Work around bugs in pre-3.0 UWIN ksh.
-$as_unset ENV MAIL MAILPATH
+# IFS
+# We need space, tab and new line, in precisely that order.  Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" ""	$as_nl"
+
+# Find who we are.  Look in the path if we contain no directory separator.
+as_myself=
+case $0 in #((
+  *[\\/]* ) as_myself=$0 ;;
+  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+  done
+IFS=$as_save_IFS
+
+     ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+  as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+  $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+  exit 1
+fi
+
+# Unset variables that we do not need and which cause bugs (e.g. in
+# pre-3.0 UWIN ksh).  But do not cause bugs in bash 2.01; the "|| exit 1"
+# suppresses any "Segmentation fault" message there.  '((' could
+# trigger a bug in pdksh 5.2.14.
+for as_var in BASH_ENV ENV MAIL MAILPATH
+do eval test x\${$as_var+set} = xset \
+  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
 PS1='$ '
 PS2='> '
 PS4='+ '
 
 # NLS nuisances.
-for as_var in \
-  LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \
-  LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \
-  LC_TELEPHONE LC_TIME
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# CDPATH.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+# Use a proper internal environment variable to ensure we don't fall
+  # into an infinite loop, continuously re-executing ourselves.
+  if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then
+    _as_can_reexec=no; export _as_can_reexec;
+    # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+  *v*x* | *x*v* ) as_opts=-vx ;;
+  *v* ) as_opts=-v ;;
+  *x* ) as_opts=-x ;;
+  * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+as_fn_exit 255
+  fi
+  # We don't want this to propagate to other subprocesses.
+          { _as_can_reexec=; unset _as_can_reexec;}
+if test "x$CONFIG_SHELL" = x; then
+  as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then :
+  emulate sh
+  NULLCMD=:
+  # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which
+  # is contrary to our usage.  Disable this feature.
+  alias -g '\${1+\"\$@\"}'='\"\$@\"'
+  setopt NO_GLOB_SUBST
+else
+  case \`(set -o) 2>/dev/null\` in #(
+  *posix*) :
+    set -o posix ;; #(
+  *) :
+     ;;
+esac
+fi
+"
+  as_required="as_fn_return () { (exit \$1); }
+as_fn_success () { as_fn_return 0; }
+as_fn_failure () { as_fn_return 1; }
+as_fn_ret_success () { return 0; }
+as_fn_ret_failure () { return 1; }
+
+exitcode=0
+as_fn_success || { exitcode=1; echo as_fn_success failed.; }
+as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; }
+as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; }
+as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; }
+if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then :
+
+else
+  exitcode=1; echo positional parameters were not saved.
+fi
+test x\$exitcode = x0 || exit 1
+test -x / || exit 1"
+  as_suggested="  as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
+  as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
+  eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
+  test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1"
+  if (eval "$as_required") 2>/dev/null; then :
+  as_have_required=yes
+else
+  as_have_required=no
+fi
+  if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then :
+
+else
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+as_found=false
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
 do
-  if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then
-    eval $as_var=C; export $as_var
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+  as_found=:
+  case $as_dir in #(
+	 /*)
+	   for as_base in sh bash ksh sh5; do
+	     # Try only shells that exist, to save several forks.
+	     as_shell=$as_dir/$as_base
+	     if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
+		    { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then :
+  CONFIG_SHELL=$as_shell as_have_required=yes
+		   if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then :
+  break 2
+fi
+fi
+	   done;;
+       esac
+  as_found=false
+done
+$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } &&
+	      { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then :
+  CONFIG_SHELL=$SHELL as_have_required=yes
+fi; }
+IFS=$as_save_IFS
+
+
+      if test "x$CONFIG_SHELL" != x; then :
+  export CONFIG_SHELL
+             # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+  *v*x* | *x*v* ) as_opts=-vx ;;
+  *v* ) as_opts=-v ;;
+  *x* ) as_opts=-x ;;
+  * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+exit 255
+fi
+
+    if test x$as_have_required = xno; then :
+  $as_echo "$0: This script requires a shell more modern than all"
+  $as_echo "$0: the shells that I found on your system."
+  if test x${ZSH_VERSION+set} = xset ; then
+    $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should"
+    $as_echo "$0: be upgraded to zsh 4.3.4 or later."
   else
-    $as_unset $as_var
+    $as_echo "$0: Please tell bug-autoconf at gnu.org about your system,
+$0: including any error possibly output before this
+$0: message. Then install a modern shell, or manually run
+$0: the script under such a shell if you do have one."
   fi
-done
+  exit 1
+fi
+fi
+fi
+SHELL=${CONFIG_SHELL-/bin/sh}
+export SHELL
+# Unset more variables known to interfere with behavior of common tools.
+CLICOLOR_FORCE= GREP_OPTIONS=
+unset CLICOLOR_FORCE GREP_OPTIONS
+
+## --------------------- ##
+## M4sh Shell Functions. ##
+## --------------------- ##
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+  { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+  return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+  set +e
+  as_fn_set_status $1
+  exit $1
+} # as_fn_exit
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+  case $as_dir in #(
+  -*) as_dir=./$as_dir;;
+  esac
+  test -d "$as_dir" || eval $as_mkdir_p || {
+    as_dirs=
+    while :; do
+      case $as_dir in #(
+      *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+      *) as_qdir=$as_dir;;
+      esac
+      as_dirs="'$as_qdir' $as_dirs"
+      as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$as_dir" : 'X\(//\)[^/]' \| \
+	 X"$as_dir" : 'X\(//\)$' \| \
+	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+      test -d "$as_dir" && break
+    done
+    test -z "$as_dirs" || eval "mkdir $as_dirs"
+  } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+  test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+  eval 'as_fn_append ()
+  {
+    eval $1+=\$2
+  }'
+else
+  as_fn_append ()
+  {
+    eval $1=\$$1\$2
+  }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+  eval 'as_fn_arith ()
+  {
+    as_val=$(( $* ))
+  }'
+else
+  as_fn_arith ()
+  {
+    as_val=`expr "$@" || test $? -eq 1`
+  }
+fi # as_fn_arith
+
 
-# Required to use basename.
-if expr a : '\(a\)' >/dev/null 2>&1; then
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+  as_status=$1; test $as_status -eq 0 && as_status=1
+  if test "$4"; then
+    as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+    $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+  fi
+  $as_echo "$as_me: error: $2" >&2
+  as_fn_exit $as_status
+} # as_fn_error
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+   test "X`expr 00001 : '.*\(...\)'`" = X001; then
   as_expr=expr
 else
   as_expr=false
 fi
 
-if (basename /) >/dev/null 2>&1 && test "X`basename / 2>&1`" = "X/"; then
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
   as_basename=basename
 else
   as_basename=false
 fi
 
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+  as_dirname=dirname
+else
+  as_dirname=false
+fi
 
-# Name of the executable.
-as_me=`$as_basename "$0" ||
+as_me=`$as_basename -- "$0" ||
 $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
 	 X"$0" : 'X\(//\)$' \| \
-	 X"$0" : 'X\(/\)$' \| \
-	 .     : '\(.\)' 2>/dev/null ||
-echo X/"$0" |
-    sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/; q; }
-  	  /^X\/\(\/\/\)$/{ s//\1/; q; }
-  	  /^X\/\(\/\).*/{ s//\1/; q; }
-  	  s/.*/./; q'`
-
+	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+    sed '/^.*\/\([^/][^/]*\)\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\/\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\/\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
 
-# PATH needs CR, and LINENO needs CR and PATH.
 # Avoid depending upon Character Ranges.
 as_cr_letters='abcdefghijklmnopqrstuvwxyz'
 as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
@@ -83,146 +460,91 @@ as_cr_Letters=$as_cr_letters$as_cr_LETTERS
 as_cr_digits='0123456789'
 as_cr_alnum=$as_cr_Letters$as_cr_digits
 
-# The user is always right.
-if test "${PATH_SEPARATOR+set}" != set; then
-  echo "#! /bin/sh" >conf$$.sh
-  echo  "exit 0"   >>conf$$.sh
-  chmod +x conf$$.sh
-  if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
-    PATH_SEPARATOR=';'
-  else
-    PATH_SEPARATOR=:
-  fi
-  rm -f conf$$.sh
-fi
-
-
-  as_lineno_1=$LINENO
-  as_lineno_2=$LINENO
-  as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`
-  test "x$as_lineno_1" != "x$as_lineno_2" &&
-  test "x$as_lineno_3"  = "x$as_lineno_2"  || {
-  # Find who we are.  Look in the path if we contain no path at all
-  # relative or not.
-  case $0 in
-    *[\\/]* ) as_myself=$0 ;;
-    *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
-done
-
-       ;;
-  esac
-  # We did not find ourselves, most probably we were run as `sh COMMAND'
-  # in which case we are not to be found in the path.
-  if test "x$as_myself" = x; then
-    as_myself=$0
-  fi
-  if test ! -f "$as_myself"; then
-    { echo "$as_me: error: cannot find myself; rerun with an absolute path" >&2
-   { (exit 1); exit 1; }; }
-  fi
-  case $CONFIG_SHELL in
-  '')
-    as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  for as_base in sh bash ksh sh5; do
-	 case $as_dir in
-	 /*)
-	   if ("$as_dir/$as_base" -c '
-  as_lineno_1=$LINENO
-  as_lineno_2=$LINENO
-  as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`
-  test "x$as_lineno_1" != "x$as_lineno_2" &&
-  test "x$as_lineno_3"  = "x$as_lineno_2" ') 2>/dev/null; then
-	     $as_unset BASH_ENV || test "${BASH_ENV+set}" != set || { BASH_ENV=; export BASH_ENV; }
-	     $as_unset ENV || test "${ENV+set}" != set || { ENV=; export ENV; }
-	     CONFIG_SHELL=$as_dir/$as_base
-	     export CONFIG_SHELL
-	     exec "$CONFIG_SHELL" "$0" ${1+"$@"}
-	   fi;;
-	 esac
-       done
-done
-;;
-  esac
 
-  # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
-  # uniformly replaced by the line number.  The first 'sed' inserts a
-  # line-number line before each line; the second 'sed' does the real
-  # work.  The second script uses 'N' to pair each line-number line
-  # with the numbered line, and appends trailing '-' during
-  # substitution so that $LINENO is not a special case at line end.
-  # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
-  # second 'sed' script.  Blame Lee E. McMahon for sed's syntax.  :-)
-  sed '=' <$as_myself |
+  as_lineno_1=$LINENO as_lineno_1a=$LINENO
+  as_lineno_2=$LINENO as_lineno_2a=$LINENO
+  eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" &&
+  test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || {
+  # Blame Lee E. McMahon (1931-1989) for sed's syntax.  :-)
+  sed -n '
+    p
+    /[$]LINENO/=
+  ' <$as_myself |
     sed '
+      s/[$]LINENO.*/&-/
+      t lineno
+      b
+      :lineno
       N
-      s,$,-,
-      : loop
-      s,^\(['$as_cr_digits']*\)\(.*\)[$]LINENO\([^'$as_cr_alnum'_]\),\1\2\1\3,
+      :loop
+      s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
       t loop
-      s,-$,,
-      s,^['$as_cr_digits']*\n,,
+      s/-\n.*//
     ' >$as_me.lineno &&
-  chmod +x $as_me.lineno ||
-    { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
-   { (exit 1); exit 1; }; }
+  chmod +x "$as_me.lineno" ||
+    { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
 
+  # If we had to re-execute with $CONFIG_SHELL, we're ensured to have
+  # already done that, so ensure we don't try to do so again and fall
+  # in an infinite loop.  This has already happened in practice.
+  _as_can_reexec=no; export _as_can_reexec
   # Don't try to exec as it changes $[0], causing all sort of problems
   # (the dirname of $[0] is not the place where we might find the
-  # original and so on.  Autoconf is especially sensible to this).
-  . ./$as_me.lineno
+  # original and so on.  Autoconf is especially sensitive to this).
+  . "./$as_me.lineno"
   # Exit status is that of the last command.
   exit
 }
 
-
-case `echo "testing\c"; echo 1,2,3`,`echo -n testing; echo 1,2,3` in
-  *c*,-n*) ECHO_N= ECHO_C='
-' ECHO_T='	' ;;
-  *c*,*  ) ECHO_N=-n ECHO_C= ECHO_T= ;;
-  *)       ECHO_N= ECHO_C='\c' ECHO_T= ;;
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+  case `echo 'xy\c'` in
+  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
+  xy)  ECHO_C='\c';;
+  *)   echo `echo ksh88 bug on AIX 6.1` > /dev/null
+       ECHO_T='	';;
+  esac;;
+*)
+  ECHO_N='-n';;
 esac
 
-if expr a : '\(a\)' >/dev/null 2>&1; then
-  as_expr=expr
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+  rm -f conf$$.dir/conf$$.file
 else
-  as_expr=false
+  rm -f conf$$.dir
+  mkdir conf$$.dir 2>/dev/null
 fi
-
-rm -f conf$$ conf$$.exe conf$$.file
-echo >conf$$.file
-if ln -s conf$$.file conf$$ 2>/dev/null; then
-  # We could just check for DJGPP; but this test a) works b) is more generic
-  # and c) will remain valid once DJGPP supports symlinks (DJGPP 2.04).
-  if test -f conf$$.exe; then
-    # Don't use ln at all; we don't have any links
-    as_ln_s='cp -p'
-  else
+if (echo >conf$$.file) 2>/dev/null; then
+  if ln -s conf$$.file conf$$ 2>/dev/null; then
     as_ln_s='ln -s'
+    # ... but there are two gotchas:
+    # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+    # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+    # In both cases, we have to default to `cp -pR'.
+    ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+      as_ln_s='cp -pR'
+  elif ln conf$$.file conf$$ 2>/dev/null; then
+    as_ln_s=ln
+  else
+    as_ln_s='cp -pR'
   fi
-elif ln conf$$.file conf$$ 2>/dev/null; then
-  as_ln_s=ln
 else
-  as_ln_s='cp -p'
+  as_ln_s='cp -pR'
 fi
-rm -f conf$$ conf$$.exe conf$$.file
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
 
 if mkdir -p . 2>/dev/null; then
-  as_mkdir_p=:
+  as_mkdir_p='mkdir -p "$as_dir"'
 else
   test -d ./-p && rmdir ./-p
   as_mkdir_p=false
 fi
 
-as_executable_p="test -f"
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
 
 # Sed expression to map a string onto a valid CPP name.
 as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
@@ -231,38 +553,25 @@ as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
 as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
 
 
-# IFS
-# We need space, tab and new line, in precisely that order.
-as_nl='
-'
-IFS=" 	$as_nl"
-
-# CDPATH.
-$as_unset CDPATH
-
+test -n "$DJDIR" || exec 7<&0 </dev/null
+exec 6>&1
 
 # Name of the host.
-# hostname on some systems (SVR3.2, Linux) returns a bogus exit status,
+# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status,
 # so uname gets run too.
 ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
 
-exec 6>&1
-
 #
 # Initializations.
 #
 ac_default_prefix=/usr/local
+ac_clean_files=
 ac_config_libobj_dir=.
+LIBOBJS=
 cross_compiling=no
 subdirs=
 MFLAGS=
 MAKEFLAGS=
-SHELL=${CONFIG_SHELL-/bin/sh}
-
-# Maximum number of lines to put in a shell here document.
-# This variable seems obsolete.  It should probably be removed, and
-# only ac_max_sed_lines should be used.
-: ${ac_max_here_lines=38}
 
 # Identity of this package.
 PACKAGE_NAME=
@@ -270,14 +579,99 @@ PACKAGE_TARNAME=
 PACKAGE_VERSION=
 PACKAGE_STRING=
 PACKAGE_BUGREPORT=
+PACKAGE_URL=
 
 ac_unique_file="Makefile.in"
-ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS CC CFLAGS LDFLAGS CPPFLAGS ac_ct_CC EXEEXT OBJEXT CXX CXXFLAGS ac_ct_CXX RANLIB ac_ct_RANLIB CXX_PIC C_PIC LD_SHARED SO_EXT CXX_WFLAGS C_WFLAGS GDAL [...]
+ac_subst_vars='LTLIBOBJS
+LIBOBJS
+PQ_INCLUDE
+GRASS_GISBASE
+GRASS_INCLUDE
+AUTOLOAD_DIR
+GDAL_INC
+GDAL_CONFIG
+C_WFLAGS
+CXX_WFLAGS
+SO_EXT
+LD_SHARED
+C_PIC
+CXX_PIC
+RANLIB
+ac_ct_CXX
+CXXFLAGS
+CXX
+OBJEXT
+EXEEXT
+ac_ct_CC
+CPPFLAGS
+LDFLAGS
+CFLAGS
+CC
+target_alias
+host_alias
+build_alias
+LIBS
+ECHO_T
+ECHO_N
+ECHO_C
+DEFS
+mandir
+localedir
+libdir
+psdir
+pdfdir
+dvidir
+htmldir
+infodir
+docdir
+oldincludedir
+includedir
+localstatedir
+sharedstatedir
+sysconfdir
+datadir
+datarootdir
+libexecdir
+sbindir
+bindir
+program_transform_name
+prefix
+exec_prefix
+PACKAGE_URL
+PACKAGE_BUGREPORT
+PACKAGE_STRING
+PACKAGE_VERSION
+PACKAGE_TARNAME
+PACKAGE_NAME
+PATH_SEPARATOR
+SHELL'
 ac_subst_files=''
+ac_user_opts='
+enable_option_checking
+with_ld_shared
+with_gdal
+with_autoload
+with_grass
+with_postgres_includes
+'
+      ac_precious_vars='build_alias
+host_alias
+target_alias
+CC
+CFLAGS
+LDFLAGS
+LIBS
+CPPFLAGS
+CXX
+CXXFLAGS
+CCC'
+
 
 # Initialize some variables set by options.
 ac_init_help=
 ac_init_version=false
+ac_unrecognized_opts=
+ac_unrecognized_sep=
 # The variables have the same names as the options, with
 # dashes changed to underlines.
 cache_file=/dev/null
@@ -300,34 +694,49 @@ x_libraries=NONE
 # and all the variables that are supposed to be based on exec_prefix
 # by default will actually change.
 # Use braces instead of parens because sh, perl, etc. also accept them.
+# (The list follows the same order as the GNU Coding Standards.)
 bindir='${exec_prefix}/bin'
 sbindir='${exec_prefix}/sbin'
 libexecdir='${exec_prefix}/libexec'
-datadir='${prefix}/share'
+datarootdir='${prefix}/share'
+datadir='${datarootdir}'
 sysconfdir='${prefix}/etc'
 sharedstatedir='${prefix}/com'
 localstatedir='${prefix}/var'
-libdir='${exec_prefix}/lib'
 includedir='${prefix}/include'
 oldincludedir='/usr/include'
-infodir='${prefix}/info'
-mandir='${prefix}/man'
+docdir='${datarootdir}/doc/${PACKAGE}'
+infodir='${datarootdir}/info'
+htmldir='${docdir}'
+dvidir='${docdir}'
+pdfdir='${docdir}'
+psdir='${docdir}'
+libdir='${exec_prefix}/lib'
+localedir='${datarootdir}/locale'
+mandir='${datarootdir}/man'
 
 ac_prev=
+ac_dashdash=
 for ac_option
 do
   # If the previous option needs an argument, assign it.
   if test -n "$ac_prev"; then
-    eval "$ac_prev=\$ac_option"
+    eval $ac_prev=\$ac_option
     ac_prev=
     continue
   fi
 
-  ac_optarg=`expr "x$ac_option" : 'x[^=]*=\(.*\)'`
+  case $ac_option in
+  *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
+  *=)   ac_optarg= ;;
+  *)    ac_optarg=yes ;;
+  esac
 
   # Accept the important Cygnus configure options, so we can diagnose typos.
 
-  case $ac_option in
+  case $ac_dashdash$ac_option in
+  --)
+    ac_dashdash=yes ;;
 
   -bindir | --bindir | --bindi | --bind | --bin | --bi)
     ac_prev=bindir ;;
@@ -349,33 +758,59 @@ do
   --config-cache | -C)
     cache_file=config.cache ;;
 
-  -datadir | --datadir | --datadi | --datad | --data | --dat | --da)
+  -datadir | --datadir | --datadi | --datad)
     ac_prev=datadir ;;
-  -datadir=* | --datadir=* | --datadi=* | --datad=* | --data=* | --dat=* \
-  | --da=*)
+  -datadir=* | --datadir=* | --datadi=* | --datad=*)
     datadir=$ac_optarg ;;
 
+  -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
+  | --dataroo | --dataro | --datar)
+    ac_prev=datarootdir ;;
+  -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
+  | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
+    datarootdir=$ac_optarg ;;
+
   -disable-* | --disable-*)
-    ac_feature=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
+    ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
     # Reject names that are not valid shell variable names.
-    expr "x$ac_feature" : ".*[^-_$as_cr_alnum]" >/dev/null &&
-      { echo "$as_me: error: invalid feature name: $ac_feature" >&2
-   { (exit 1); exit 1; }; }
-    ac_feature=`echo $ac_feature | sed 's/-/_/g'`
-    eval "enable_$ac_feature=no" ;;
+    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+      as_fn_error $? "invalid feature name: $ac_useropt"
+    ac_useropt_orig=$ac_useropt
+    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+    case $ac_user_opts in
+      *"
+"enable_$ac_useropt"
+"*) ;;
+      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig"
+	 ac_unrecognized_sep=', ';;
+    esac
+    eval enable_$ac_useropt=no ;;
+
+  -docdir | --docdir | --docdi | --doc | --do)
+    ac_prev=docdir ;;
+  -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
+    docdir=$ac_optarg ;;
+
+  -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
+    ac_prev=dvidir ;;
+  -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
+    dvidir=$ac_optarg ;;
 
   -enable-* | --enable-*)
-    ac_feature=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
+    ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
     # Reject names that are not valid shell variable names.
-    expr "x$ac_feature" : ".*[^-_$as_cr_alnum]" >/dev/null &&
-      { echo "$as_me: error: invalid feature name: $ac_feature" >&2
-   { (exit 1); exit 1; }; }
-    ac_feature=`echo $ac_feature | sed 's/-/_/g'`
-    case $ac_option in
-      *=*) ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`;;
-      *) ac_optarg=yes ;;
+    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+      as_fn_error $? "invalid feature name: $ac_useropt"
+    ac_useropt_orig=$ac_useropt
+    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+    case $ac_user_opts in
+      *"
+"enable_$ac_useropt"
+"*) ;;
+      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig"
+	 ac_unrecognized_sep=', ';;
     esac
-    eval "enable_$ac_feature='$ac_optarg'" ;;
+    eval enable_$ac_useropt=\$ac_optarg ;;
 
   -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
   | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
@@ -402,6 +837,12 @@ do
   -host=* | --host=* | --hos=* | --ho=*)
     host_alias=$ac_optarg ;;
 
+  -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
+    ac_prev=htmldir ;;
+  -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
+  | --ht=*)
+    htmldir=$ac_optarg ;;
+
   -includedir | --includedir | --includedi | --included | --include \
   | --includ | --inclu | --incl | --inc)
     ac_prev=includedir ;;
@@ -426,13 +867,16 @@ do
   | --libexe=* | --libex=* | --libe=*)
     libexecdir=$ac_optarg ;;
 
+  -localedir | --localedir | --localedi | --localed | --locale)
+    ac_prev=localedir ;;
+  -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
+    localedir=$ac_optarg ;;
+
   -localstatedir | --localstatedir | --localstatedi | --localstated \
-  | --localstate | --localstat | --localsta | --localst \
-  | --locals | --local | --loca | --loc | --lo)
+  | --localstate | --localstat | --localsta | --localst | --locals)
     ac_prev=localstatedir ;;
   -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
-  | --localstate=* | --localstat=* | --localsta=* | --localst=* \
-  | --locals=* | --local=* | --loca=* | --loc=* | --lo=*)
+  | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
     localstatedir=$ac_optarg ;;
 
   -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
@@ -497,6 +941,16 @@ do
   | --progr-tra=* | --program-tr=* | --program-t=*)
     program_transform_name=$ac_optarg ;;
 
+  -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
+    ac_prev=pdfdir ;;
+  -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
+    pdfdir=$ac_optarg ;;
+
+  -psdir | --psdir | --psdi | --psd | --ps)
+    ac_prev=psdir ;;
+  -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
+    psdir=$ac_optarg ;;
+
   -q | -quiet | --quiet | --quie | --qui | --qu | --q \
   | -silent | --silent | --silen | --sile | --sil)
     silent=yes ;;
@@ -547,26 +1001,36 @@ do
     ac_init_version=: ;;
 
   -with-* | --with-*)
-    ac_package=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
+    ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
     # Reject names that are not valid shell variable names.
-    expr "x$ac_package" : ".*[^-_$as_cr_alnum]" >/dev/null &&
-      { echo "$as_me: error: invalid package name: $ac_package" >&2
-   { (exit 1); exit 1; }; }
-    ac_package=`echo $ac_package| sed 's/-/_/g'`
-    case $ac_option in
-      *=*) ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`;;
-      *) ac_optarg=yes ;;
+    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+      as_fn_error $? "invalid package name: $ac_useropt"
+    ac_useropt_orig=$ac_useropt
+    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+    case $ac_user_opts in
+      *"
+"with_$ac_useropt"
+"*) ;;
+      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig"
+	 ac_unrecognized_sep=', ';;
     esac
-    eval "with_$ac_package='$ac_optarg'" ;;
+    eval with_$ac_useropt=\$ac_optarg ;;
 
   -without-* | --without-*)
-    ac_package=`expr "x$ac_option" : 'x-*without-\(.*\)'`
+    ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
     # Reject names that are not valid shell variable names.
-    expr "x$ac_package" : ".*[^-_$as_cr_alnum]" >/dev/null &&
-      { echo "$as_me: error: invalid package name: $ac_package" >&2
-   { (exit 1); exit 1; }; }
-    ac_package=`echo $ac_package | sed 's/-/_/g'`
-    eval "with_$ac_package=no" ;;
+    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+      as_fn_error $? "invalid package name: $ac_useropt"
+    ac_useropt_orig=$ac_useropt
+    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+    case $ac_user_opts in
+      *"
+"with_$ac_useropt"
+"*) ;;
+      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig"
+	 ac_unrecognized_sep=', ';;
+    esac
+    eval with_$ac_useropt=no ;;
 
   --x)
     # Obsolete; use --with-x.
@@ -586,27 +1050,26 @@ do
   | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
     x_libraries=$ac_optarg ;;
 
-  -*) { echo "$as_me: error: unrecognized option: $ac_option
-Try \`$0 --help' for more information." >&2
-   { (exit 1); exit 1; }; }
+  -*) as_fn_error $? "unrecognized option: \`$ac_option'
+Try \`$0 --help' for more information"
     ;;
 
   *=*)
     ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
     # Reject names that are not valid shell variable names.
-    expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null &&
-      { echo "$as_me: error: invalid variable name: $ac_envvar" >&2
-   { (exit 1); exit 1; }; }
-    ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`
-    eval "$ac_envvar='$ac_optarg'"
+    case $ac_envvar in #(
+      '' | [0-9]* | *[!_$as_cr_alnum]* )
+      as_fn_error $? "invalid variable name: \`$ac_envvar'" ;;
+    esac
+    eval $ac_envvar=\$ac_optarg
     export $ac_envvar ;;
 
   *)
     # FIXME: should be removed in autoconf 3.0.
-    echo "$as_me: WARNING: you should use --build, --host, --target" >&2
+    $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2
     expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
-      echo "$as_me: WARNING: invalid host type: $ac_option" >&2
-    : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}
+      $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2
+    : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}"
     ;;
 
   esac
@@ -614,31 +1077,36 @@ done
 
 if test -n "$ac_prev"; then
   ac_option=--`echo $ac_prev | sed 's/_/-/g'`
-  { echo "$as_me: error: missing argument to $ac_option" >&2
-   { (exit 1); exit 1; }; }
+  as_fn_error $? "missing argument to $ac_option"
 fi
 
-# Be sure to have absolute paths.
-for ac_var in exec_prefix prefix
-do
-  eval ac_val=$`echo $ac_var`
-  case $ac_val in
-    [\\/$]* | ?:[\\/]* | NONE | '' ) ;;
-    *)  { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2
-   { (exit 1); exit 1; }; };;
+if test -n "$ac_unrecognized_opts"; then
+  case $enable_option_checking in
+    no) ;;
+    fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;;
+    *)     $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
   esac
-done
+fi
 
-# Be sure to have absolute paths.
-for ac_var in bindir sbindir libexecdir datadir sysconfdir sharedstatedir \
-	      localstatedir libdir includedir oldincludedir infodir mandir
+# Check all directory arguments for consistency.
+for ac_var in	exec_prefix prefix bindir sbindir libexecdir datarootdir \
+		datadir sysconfdir sharedstatedir localstatedir includedir \
+		oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
+		libdir localedir mandir
 do
-  eval ac_val=$`echo $ac_var`
+  eval ac_val=\$$ac_var
+  # Remove trailing slashes.
   case $ac_val in
-    [\\/$]* | ?:[\\/]* ) ;;
-    *)  { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2
-   { (exit 1); exit 1; }; };;
+    */ )
+      ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'`
+      eval $ac_var=\$ac_val;;
   esac
+  # Be sure to have absolute directory names.
+  case $ac_val in
+    [\\/$]* | ?:[\\/]* )  continue;;
+    NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
+  esac
+  as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val"
 done
 
 # There might be people who depend on the old broken behavior: `$host'
@@ -652,8 +1120,6 @@ target=$target_alias
 if test "x$host_alias" != x; then
   if test "x$build_alias" = x; then
     cross_compiling=maybe
-    echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host.
-    If a cross compiler is detected then cross compile mode will be used." >&2
   elif test "x$build_alias" != "x$host_alias"; then
     cross_compiling=yes
   fi
@@ -665,78 +1131,72 @@ test -n "$host_alias" && ac_tool_prefix=$host_alias-
 test "$silent" = yes && exec 6>/dev/null
 
 
+ac_pwd=`pwd` && test -n "$ac_pwd" &&
+ac_ls_di=`ls -di .` &&
+ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
+  as_fn_error $? "working directory cannot be determined"
+test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
+  as_fn_error $? "pwd does not report name of working directory"
+
+
 # Find the source files, if location was not specified.
 if test -z "$srcdir"; then
   ac_srcdir_defaulted=yes
-  # Try the directory containing this script, then its parent.
-  ac_confdir=`(dirname "$0") 2>/dev/null ||
-$as_expr X"$0" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
-	 X"$0" : 'X\(//\)[^/]' \| \
-	 X"$0" : 'X\(//\)$' \| \
-	 X"$0" : 'X\(/\)' \| \
-	 .     : '\(.\)' 2>/dev/null ||
-echo X"$0" |
-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; }
-  	  /^X\(\/\/\)[^/].*/{ s//\1/; q; }
-  	  /^X\(\/\/\)$/{ s//\1/; q; }
-  	  /^X\(\/\).*/{ s//\1/; q; }
-  	  s/.*/./; q'`
+  # Try the directory containing this script, then the parent directory.
+  ac_confdir=`$as_dirname -- "$as_myself" ||
+$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$as_myself" : 'X\(//\)[^/]' \| \
+	 X"$as_myself" : 'X\(//\)$' \| \
+	 X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_myself" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
   srcdir=$ac_confdir
-  if test ! -r $srcdir/$ac_unique_file; then
+  if test ! -r "$srcdir/$ac_unique_file"; then
     srcdir=..
   fi
 else
   ac_srcdir_defaulted=no
 fi
-if test ! -r $srcdir/$ac_unique_file; then
-  if test "$ac_srcdir_defaulted" = yes; then
-    { echo "$as_me: error: cannot find sources ($ac_unique_file) in $ac_confdir or .." >&2
-   { (exit 1); exit 1; }; }
-  else
-    { echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2
-   { (exit 1); exit 1; }; }
-  fi
+if test ! -r "$srcdir/$ac_unique_file"; then
+  test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
+  as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir"
+fi
+ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
+ac_abs_confdir=`(
+	cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg"
+	pwd)`
+# When building in place, set srcdir=.
+if test "$ac_abs_confdir" = "$ac_pwd"; then
+  srcdir=.
 fi
-(cd $srcdir && test -r ./$ac_unique_file) 2>/dev/null ||
-  { echo "$as_me: error: sources are in $srcdir, but \`cd $srcdir' does not work" >&2
-   { (exit 1); exit 1; }; }
-srcdir=`echo "$srcdir" | sed 's%\([^\\/]\)[\\/]*$%\1%'`
-ac_env_build_alias_set=${build_alias+set}
-ac_env_build_alias_value=$build_alias
-ac_cv_env_build_alias_set=${build_alias+set}
-ac_cv_env_build_alias_value=$build_alias
-ac_env_host_alias_set=${host_alias+set}
-ac_env_host_alias_value=$host_alias
-ac_cv_env_host_alias_set=${host_alias+set}
-ac_cv_env_host_alias_value=$host_alias
-ac_env_target_alias_set=${target_alias+set}
-ac_env_target_alias_value=$target_alias
-ac_cv_env_target_alias_set=${target_alias+set}
-ac_cv_env_target_alias_value=$target_alias
-ac_env_CC_set=${CC+set}
-ac_env_CC_value=$CC
-ac_cv_env_CC_set=${CC+set}
-ac_cv_env_CC_value=$CC
-ac_env_CFLAGS_set=${CFLAGS+set}
-ac_env_CFLAGS_value=$CFLAGS
-ac_cv_env_CFLAGS_set=${CFLAGS+set}
-ac_cv_env_CFLAGS_value=$CFLAGS
-ac_env_LDFLAGS_set=${LDFLAGS+set}
-ac_env_LDFLAGS_value=$LDFLAGS
-ac_cv_env_LDFLAGS_set=${LDFLAGS+set}
-ac_cv_env_LDFLAGS_value=$LDFLAGS
-ac_env_CPPFLAGS_set=${CPPFLAGS+set}
-ac_env_CPPFLAGS_value=$CPPFLAGS
-ac_cv_env_CPPFLAGS_set=${CPPFLAGS+set}
-ac_cv_env_CPPFLAGS_value=$CPPFLAGS
-ac_env_CXX_set=${CXX+set}
-ac_env_CXX_value=$CXX
-ac_cv_env_CXX_set=${CXX+set}
-ac_cv_env_CXX_value=$CXX
-ac_env_CXXFLAGS_set=${CXXFLAGS+set}
-ac_env_CXXFLAGS_value=$CXXFLAGS
-ac_cv_env_CXXFLAGS_set=${CXXFLAGS+set}
-ac_cv_env_CXXFLAGS_value=$CXXFLAGS
+# Remove unnecessary trailing slashes from srcdir.
+# Double slashes in file names in object file debugging info
+# mess up M-x gdb in Emacs.
+case $srcdir in
+*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
+esac
+for ac_var in $ac_precious_vars; do
+  eval ac_env_${ac_var}_set=\${${ac_var}+set}
+  eval ac_env_${ac_var}_value=\$${ac_var}
+  eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
+  eval ac_cv_env_${ac_var}_value=\$${ac_var}
+done
 
 #
 # Report the --help message.
@@ -759,20 +1219,17 @@ Configuration:
       --help=short        display options specific to this package
       --help=recursive    display the short help of all the included packages
   -V, --version           display version information and exit
-  -q, --quiet, --silent   do not print \`checking...' messages
+  -q, --quiet, --silent   do not print \`checking ...' messages
       --cache-file=FILE   cache test results in FILE [disabled]
   -C, --config-cache      alias for \`--cache-file=config.cache'
   -n, --no-create         do not create output files
       --srcdir=DIR        find the sources in DIR [configure dir or \`..']
 
-_ACEOF
-
-  cat <<_ACEOF
 Installation directories:
   --prefix=PREFIX         install architecture-independent files in PREFIX
-			  [$ac_default_prefix]
+                          [$ac_default_prefix]
   --exec-prefix=EPREFIX   install architecture-dependent files in EPREFIX
-			  [PREFIX]
+                          [PREFIX]
 
 By default, \`make install' will install all the files in
 \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc.  You can specify
@@ -782,18 +1239,25 @@ for instance \`--prefix=\$HOME'.
 For better control, use the options below.
 
 Fine tuning of the installation directories:
-  --bindir=DIR           user executables [EPREFIX/bin]
-  --sbindir=DIR          system admin executables [EPREFIX/sbin]
-  --libexecdir=DIR       program executables [EPREFIX/libexec]
-  --datadir=DIR          read-only architecture-independent data [PREFIX/share]
-  --sysconfdir=DIR       read-only single-machine data [PREFIX/etc]
-  --sharedstatedir=DIR   modifiable architecture-independent data [PREFIX/com]
-  --localstatedir=DIR    modifiable single-machine data [PREFIX/var]
-  --libdir=DIR           object code libraries [EPREFIX/lib]
-  --includedir=DIR       C header files [PREFIX/include]
-  --oldincludedir=DIR    C header files for non-gcc [/usr/include]
-  --infodir=DIR          info documentation [PREFIX/info]
-  --mandir=DIR           man documentation [PREFIX/man]
+  --bindir=DIR            user executables [EPREFIX/bin]
+  --sbindir=DIR           system admin executables [EPREFIX/sbin]
+  --libexecdir=DIR        program executables [EPREFIX/libexec]
+  --sysconfdir=DIR        read-only single-machine data [PREFIX/etc]
+  --sharedstatedir=DIR    modifiable architecture-independent data [PREFIX/com]
+  --localstatedir=DIR     modifiable single-machine data [PREFIX/var]
+  --libdir=DIR            object code libraries [EPREFIX/lib]
+  --includedir=DIR        C header files [PREFIX/include]
+  --oldincludedir=DIR     C header files for non-gcc [/usr/include]
+  --datarootdir=DIR       read-only arch.-independent data root [PREFIX/share]
+  --datadir=DIR           read-only architecture-independent data [DATAROOTDIR]
+  --infodir=DIR           info documentation [DATAROOTDIR/info]
+  --localedir=DIR         locale-dependent data [DATAROOTDIR/locale]
+  --mandir=DIR            man documentation [DATAROOTDIR/man]
+  --docdir=DIR            documentation root [DATAROOTDIR/doc/PACKAGE]
+  --htmldir=DIR           html documentation [DOCDIR]
+  --dvidir=DIR            dvi documentation [DOCDIR]
+  --pdfdir=DIR            pdf documentation [DOCDIR]
+  --psdir=DIR             ps documentation [DOCDIR]
 _ACEOF
 
   cat <<\_ACEOF
@@ -809,135 +1273,233 @@ Optional Packages:
   --without-PACKAGE       do not use PACKAGE (same as --with-PACKAGE=no)
   --with-ld-shared=cmd    provide shared library link
   --with-gdal=PATH        GDAL (PATH is path to gdal-config)
-  --with-autoload=DIR      Directory for autoload drivers
+  --with-autoload=DIR     Directory for autoload drivers
   --with-grass=ARG        Include GRASS support (ARG=GRASS install tree dir)
+  --with-postgres-includes=DIR     use PostgreSQL includes in DIR
 
 Some influential environment variables:
   CC          C compiler command
   CFLAGS      C compiler flags
   LDFLAGS     linker flags, e.g. -L<lib dir> if you have libraries in a
               nonstandard directory <lib dir>
-  CPPFLAGS    C/C++ preprocessor flags, e.g. -I<include dir> if you have
-              headers in a nonstandard directory <include dir>
+  LIBS        libraries to pass to the linker, e.g. -l<library>
+  CPPFLAGS    (Objective) C/C++ preprocessor flags, e.g. -I<include dir> if
+              you have headers in a nonstandard directory <include dir>
   CXX         C++ compiler command
   CXXFLAGS    C++ compiler flags
 
 Use these variables to override the choices made by `configure' or to help
 it to find libraries and programs with nonstandard names/locations.
 
+Report bugs to the package provider.
 _ACEOF
+ac_status=$?
 fi
 
 if test "$ac_init_help" = "recursive"; then
   # If there are subdirs, report their specific --help.
-  ac_popdir=`pwd`
   for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
-    test -d $ac_dir || continue
+    test -d "$ac_dir" ||
+      { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } ||
+      continue
     ac_builddir=.
 
-if test "$ac_dir" != .; then
-  ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'`
-  # A "../" for each directory in $ac_dir_suffix.
-  ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'`
-else
-  ac_dir_suffix= ac_top_builddir=
-fi
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+  ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+  # A ".." for each directory in $ac_dir_suffix.
+  ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+  case $ac_top_builddir_sub in
+  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+  esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
 
 case $srcdir in
-  .)  # No --srcdir option.  We are building in place.
+  .)  # We are building in place.
     ac_srcdir=.
-    if test -z "$ac_top_builddir"; then
-       ac_top_srcdir=.
-    else
-       ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'`
-    fi ;;
-  [\\/]* | ?:[\\/]* )  # Absolute path.
+    ac_top_srcdir=$ac_top_builddir_sub
+    ac_abs_top_srcdir=$ac_pwd ;;
+  [\\/]* | ?:[\\/]* )  # Absolute name.
     ac_srcdir=$srcdir$ac_dir_suffix;
-    ac_top_srcdir=$srcdir ;;
-  *) # Relative path.
-    ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix
-    ac_top_srcdir=$ac_top_builddir$srcdir ;;
-esac
-
-# Do not use `cd foo && pwd` to compute absolute paths, because
-# the directories may not exist.
-case `pwd` in
-.) ac_abs_builddir="$ac_dir";;
-*)
-  case "$ac_dir" in
-  .) ac_abs_builddir=`pwd`;;
-  [\\/]* | ?:[\\/]* ) ac_abs_builddir="$ac_dir";;
-  *) ac_abs_builddir=`pwd`/"$ac_dir";;
-  esac;;
-esac
-case $ac_abs_builddir in
-.) ac_abs_top_builddir=${ac_top_builddir}.;;
-*)
-  case ${ac_top_builddir}. in
-  .) ac_abs_top_builddir=$ac_abs_builddir;;
-  [\\/]* | ?:[\\/]* ) ac_abs_top_builddir=${ac_top_builddir}.;;
-  *) ac_abs_top_builddir=$ac_abs_builddir/${ac_top_builddir}.;;
-  esac;;
-esac
-case $ac_abs_builddir in
-.) ac_abs_srcdir=$ac_srcdir;;
-*)
-  case $ac_srcdir in
-  .) ac_abs_srcdir=$ac_abs_builddir;;
-  [\\/]* | ?:[\\/]* ) ac_abs_srcdir=$ac_srcdir;;
-  *) ac_abs_srcdir=$ac_abs_builddir/$ac_srcdir;;
-  esac;;
-esac
-case $ac_abs_builddir in
-.) ac_abs_top_srcdir=$ac_top_srcdir;;
-*)
-  case $ac_top_srcdir in
-  .) ac_abs_top_srcdir=$ac_abs_builddir;;
-  [\\/]* | ?:[\\/]* ) ac_abs_top_srcdir=$ac_top_srcdir;;
-  *) ac_abs_top_srcdir=$ac_abs_builddir/$ac_top_srcdir;;
-  esac;;
+    ac_top_srcdir=$srcdir
+    ac_abs_top_srcdir=$srcdir ;;
+  *) # Relative name.
+    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+    ac_top_srcdir=$ac_top_build_prefix$srcdir
+    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
 esac
-
-    cd $ac_dir
-    # Check for guested configure; otherwise get Cygnus style configure.
-    if test -f $ac_srcdir/configure.gnu; then
-      echo
-      $SHELL $ac_srcdir/configure.gnu  --help=recursive
-    elif test -f $ac_srcdir/configure; then
-      echo
-      $SHELL $ac_srcdir/configure  --help=recursive
-    elif test -f $ac_srcdir/configure.ac ||
-	   test -f $ac_srcdir/configure.in; then
-      echo
-      $ac_configure --help
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+    cd "$ac_dir" || { ac_status=$?; continue; }
+    # Check for guested configure.
+    if test -f "$ac_srcdir/configure.gnu"; then
+      echo &&
+      $SHELL "$ac_srcdir/configure.gnu" --help=recursive
+    elif test -f "$ac_srcdir/configure"; then
+      echo &&
+      $SHELL "$ac_srcdir/configure" --help=recursive
     else
-      echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
-    fi
-    cd $ac_popdir
+      $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
+    fi || ac_status=$?
+    cd "$ac_pwd" || { ac_status=$?; break; }
   done
 fi
 
-test -n "$ac_init_help" && exit 0
+test -n "$ac_init_help" && exit $ac_status
 if $ac_init_version; then
   cat <<\_ACEOF
+configure
+generated by GNU Autoconf 2.69
 
-Copyright (C) 2003 Free Software Foundation, Inc.
+Copyright (C) 2012 Free Software Foundation, Inc.
 This configure script is free software; the Free Software Foundation
 gives unlimited permission to copy, distribute and modify it.
 _ACEOF
-  exit 0
+  exit
 fi
-exec 5>config.log
-cat >&5 <<_ACEOF
+
+## ------------------------ ##
+## Autoconf initialization. ##
+## ------------------------ ##
+
+# ac_fn_c_try_compile LINENO
+# --------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_compile ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  rm -f conftest.$ac_objext
+  if { { ac_try="$ac_compile"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_compile") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && {
+	 test -z "$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest.$ac_objext; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_retval=1
+fi
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_compile
+
+# ac_fn_cxx_try_compile LINENO
+# ----------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_compile ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  rm -f conftest.$ac_objext
+  if { { ac_try="$ac_compile"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_compile") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && {
+	 test -z "$ac_cxx_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest.$ac_objext; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_retval=1
+fi
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_compile
+
+# ac_fn_c_try_link LINENO
+# -----------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_link ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  rm -f conftest.$ac_objext conftest$ac_exeext
+  if { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && {
+	 test -z "$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest$ac_exeext && {
+	 test "$cross_compiling" = yes ||
+	 test -x conftest$ac_exeext
+       }; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_retval=1
+fi
+  # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
+  # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
+  # interfere with the next link command; also delete a directory that is
+  # left behind by Apple's compiler.  We do this before executing the actions.
+  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_link
+cat >config.log <<_ACEOF
 This file contains any messages produced by compilers while
 running configure, to aid debugging if configure makes a mistake.
 
 It was created by $as_me, which was
-generated by GNU Autoconf 2.59.  Invocation command line was
+generated by GNU Autoconf 2.69.  Invocation command line was
 
   $ $0 $@
 
 _ACEOF
+exec 5>>config.log
 {
 cat <<_ASUNAME
 ## --------- ##
@@ -956,7 +1518,7 @@ uname -v = `(uname -v) 2>/dev/null || echo unknown`
 /bin/arch              = `(/bin/arch) 2>/dev/null              || echo unknown`
 /usr/bin/arch -k       = `(/usr/bin/arch -k) 2>/dev/null       || echo unknown`
 /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
-hostinfo               = `(hostinfo) 2>/dev/null               || echo unknown`
+/usr/bin/hostinfo      = `(/usr/bin/hostinfo) 2>/dev/null      || echo unknown`
 /bin/machine           = `(/bin/machine) 2>/dev/null           || echo unknown`
 /usr/bin/oslevel       = `(/usr/bin/oslevel) 2>/dev/null       || echo unknown`
 /bin/universe          = `(/bin/universe) 2>/dev/null          || echo unknown`
@@ -968,8 +1530,9 @@ for as_dir in $PATH
 do
   IFS=$as_save_IFS
   test -z "$as_dir" && as_dir=.
-  echo "PATH: $as_dir"
-done
+    $as_echo "PATH: $as_dir"
+  done
+IFS=$as_save_IFS
 
 } >&5
 
@@ -991,7 +1554,6 @@ _ACEOF
 ac_configure_args=
 ac_configure_args0=
 ac_configure_args1=
-ac_sep=
 ac_must_keep_next=false
 for ac_pass in 1 2
 do
@@ -1002,13 +1564,13 @@ do
     -q | -quiet | --quiet | --quie | --qui | --qu | --q \
     | -silent | --silent | --silen | --sile | --sil)
       continue ;;
-    *" "*|*"	"*|*[\[\]\~\#\$\^\&\*\(\)\{\}\\\|\;\<\>\?\"\']*)
-      ac_arg=`echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+    *\'*)
+      ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
     esac
     case $ac_pass in
-    1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;;
+    1) as_fn_append ac_configure_args0 " '$ac_arg'" ;;
     2)
-      ac_configure_args1="$ac_configure_args1 '$ac_arg'"
+      as_fn_append ac_configure_args1 " '$ac_arg'"
       if test $ac_must_keep_next = true; then
 	ac_must_keep_next=false # Got value, back to normal.
       else
@@ -1024,104 +1586,115 @@ do
 	  -* ) ac_must_keep_next=true ;;
 	esac
       fi
-      ac_configure_args="$ac_configure_args$ac_sep'$ac_arg'"
-      # Get rid of the leading space.
-      ac_sep=" "
+      as_fn_append ac_configure_args " '$ac_arg'"
       ;;
     esac
   done
 done
-$as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; }
-$as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; }
+{ ac_configure_args0=; unset ac_configure_args0;}
+{ ac_configure_args1=; unset ac_configure_args1;}
 
 # When interrupted or exit'd, cleanup temporary files, and complete
 # config.log.  We remove comments because anyway the quotes in there
 # would cause problems or look ugly.
-# WARNING: Be sure not to use single quotes in there, as some shells,
-# such as our DU 5.0 friend, will then `close' the trap.
+# WARNING: Use '\'' to represent an apostrophe within the trap.
+# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
 trap 'exit_status=$?
   # Save into config.log some information that might help in debugging.
   {
     echo
 
-    cat <<\_ASBOX
-## ---------------- ##
+    $as_echo "## ---------------- ##
 ## Cache variables. ##
-## ---------------- ##
-_ASBOX
+## ---------------- ##"
     echo
     # The following way of writing the cache mishandles newlines in values,
-{
+(
+  for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
+    eval ac_val=\$$ac_var
+    case $ac_val in #(
+    *${as_nl}*)
+      case $ac_var in #(
+      *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+      esac
+      case $ac_var in #(
+      _ | IFS | as_nl) ;; #(
+      BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+      *) { eval $ac_var=; unset $ac_var;} ;;
+      esac ;;
+    esac
+  done
   (set) 2>&1 |
-    case `(ac_space='"'"' '"'"'; set | grep ac_space) 2>&1` in
-    *ac_space=\ *)
+    case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
+    *${as_nl}ac_space=\ *)
       sed -n \
-	"s/'"'"'/'"'"'\\\\'"'"''"'"'/g;
-	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='"'"'\\2'"'"'/p"
-      ;;
+	"s/'\''/'\''\\\\'\'''\''/g;
+	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
+      ;; #(
     *)
-      sed -n \
-	"s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1=\\2/p"
+      sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
       ;;
-    esac;
-}
+    esac |
+    sort
+)
     echo
 
-    cat <<\_ASBOX
-## ----------------- ##
+    $as_echo "## ----------------- ##
 ## Output variables. ##
-## ----------------- ##
-_ASBOX
+## ----------------- ##"
     echo
     for ac_var in $ac_subst_vars
     do
-      eval ac_val=$`echo $ac_var`
-      echo "$ac_var='"'"'$ac_val'"'"'"
+      eval ac_val=\$$ac_var
+      case $ac_val in
+      *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+      esac
+      $as_echo "$ac_var='\''$ac_val'\''"
     done | sort
     echo
 
     if test -n "$ac_subst_files"; then
-      cat <<\_ASBOX
-## ------------- ##
-## Output files. ##
-## ------------- ##
-_ASBOX
+      $as_echo "## ------------------- ##
+## File substitutions. ##
+## ------------------- ##"
       echo
       for ac_var in $ac_subst_files
       do
-	eval ac_val=$`echo $ac_var`
-	echo "$ac_var='"'"'$ac_val'"'"'"
+	eval ac_val=\$$ac_var
+	case $ac_val in
+	*\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+	esac
+	$as_echo "$ac_var='\''$ac_val'\''"
       done | sort
       echo
     fi
 
     if test -s confdefs.h; then
-      cat <<\_ASBOX
-## ----------- ##
+      $as_echo "## ----------- ##
 ## confdefs.h. ##
-## ----------- ##
-_ASBOX
+## ----------- ##"
       echo
-      sed "/^$/d" confdefs.h | sort
+      cat confdefs.h
       echo
     fi
     test "$ac_signal" != 0 &&
-      echo "$as_me: caught signal $ac_signal"
-    echo "$as_me: exit $exit_status"
+      $as_echo "$as_me: caught signal $ac_signal"
+    $as_echo "$as_me: exit $exit_status"
   } >&5
-  rm -f core *.core &&
-  rm -rf conftest* confdefs* conf$$* $ac_clean_files &&
+  rm -f core *.core core.conftest.* &&
+    rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
     exit $exit_status
-     ' 0
+' 0
 for ac_signal in 1 2 13 15; do
-  trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal
+  trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal
 done
 ac_signal=0
 
 # confdefs.h avoids OS command line length limits that DEFS can exceed.
-rm -rf conftest* confdefs.h
-# AIX cpp loses on an empty file, so make sure it contains at least a newline.
-echo >confdefs.h
+rm -f -r conftest* confdefs.h
+
+$as_echo "/* confdefs.h */" > confdefs.h
 
 # Predefined preprocessor variables.
 
@@ -1129,42 +1702,57 @@ cat >>confdefs.h <<_ACEOF
 #define PACKAGE_NAME "$PACKAGE_NAME"
 _ACEOF
 
-
 cat >>confdefs.h <<_ACEOF
 #define PACKAGE_TARNAME "$PACKAGE_TARNAME"
 _ACEOF
 
-
 cat >>confdefs.h <<_ACEOF
 #define PACKAGE_VERSION "$PACKAGE_VERSION"
 _ACEOF
 
-
 cat >>confdefs.h <<_ACEOF
 #define PACKAGE_STRING "$PACKAGE_STRING"
 _ACEOF
 
-
 cat >>confdefs.h <<_ACEOF
 #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
 _ACEOF
 
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_URL "$PACKAGE_URL"
+_ACEOF
+
 
 # Let the site file select an alternate cache file if it wants to.
-# Prefer explicitly selected file to automatically selected ones.
-if test -z "$CONFIG_SITE"; then
-  if test "x$prefix" != xNONE; then
-    CONFIG_SITE="$prefix/share/config.site $prefix/etc/config.site"
-  else
-    CONFIG_SITE="$ac_default_prefix/share/config.site $ac_default_prefix/etc/config.site"
-  fi
+# Prefer an explicitly selected file to automatically selected ones.
+ac_site_file1=NONE
+ac_site_file2=NONE
+if test -n "$CONFIG_SITE"; then
+  # We do not want a PATH search for config.site.
+  case $CONFIG_SITE in #((
+    -*)  ac_site_file1=./$CONFIG_SITE;;
+    */*) ac_site_file1=$CONFIG_SITE;;
+    *)   ac_site_file1=./$CONFIG_SITE;;
+  esac
+elif test "x$prefix" != xNONE; then
+  ac_site_file1=$prefix/share/config.site
+  ac_site_file2=$prefix/etc/config.site
+else
+  ac_site_file1=$ac_default_prefix/share/config.site
+  ac_site_file2=$ac_default_prefix/etc/config.site
 fi
-for ac_site_file in $CONFIG_SITE; do
-  if test -r "$ac_site_file"; then
-    { echo "$as_me:$LINENO: loading site script $ac_site_file" >&5
-echo "$as_me: loading site script $ac_site_file" >&6;}
+for ac_site_file in "$ac_site_file1" "$ac_site_file2"
+do
+  test "x$ac_site_file" = xNONE && continue
+  if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
+$as_echo "$as_me: loading site script $ac_site_file" >&6;}
     sed 's/^/| /' "$ac_site_file" >&5
-    . "$ac_site_file"
+    . "$ac_site_file" \
+      || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "failed to load site script $ac_site_file
+See \`config.log' for more details" "$LINENO" 5; }
   fi
 done
 
@@ -1172,53 +1760,63 @@ done
 # Check that the precious variables saved in the cache have kept the same
 # value.
 ac_cache_corrupted=false
-for ac_var in `(set) 2>&1 |
-	       sed -n 's/^ac_env_\([a-zA-Z_0-9]*\)_set=.*/\1/p'`; do
+for ac_var in $ac_precious_vars; do
   eval ac_old_set=\$ac_cv_env_${ac_var}_set
   eval ac_new_set=\$ac_env_${ac_var}_set
-  eval ac_old_val="\$ac_cv_env_${ac_var}_value"
-  eval ac_new_val="\$ac_env_${ac_var}_value"
+  eval ac_old_val=\$ac_cv_env_${ac_var}_value
+  eval ac_new_val=\$ac_env_${ac_var}_value
   case $ac_old_set,$ac_new_set in
     set,)
-      { echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
-echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
+      { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
       ac_cache_corrupted=: ;;
     ,set)
-      { echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5
-echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
+      { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
       ac_cache_corrupted=: ;;
     ,);;
     *)
       if test "x$ac_old_val" != "x$ac_new_val"; then
-	{ echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5
-echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
-	{ echo "$as_me:$LINENO:   former value:  $ac_old_val" >&5
-echo "$as_me:   former value:  $ac_old_val" >&2;}
-	{ echo "$as_me:$LINENO:   current value: $ac_new_val" >&5
-echo "$as_me:   current value: $ac_new_val" >&2;}
-	ac_cache_corrupted=:
+	# differences in whitespace do not lead to failure.
+	ac_old_val_w=`echo x $ac_old_val`
+	ac_new_val_w=`echo x $ac_new_val`
+	if test "$ac_old_val_w" != "$ac_new_val_w"; then
+	  { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5
+$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
+	  ac_cache_corrupted=:
+	else
+	  { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
+$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
+	  eval $ac_var=\$ac_old_val
+	fi
+	{ $as_echo "$as_me:${as_lineno-$LINENO}:   former value:  \`$ac_old_val'" >&5
+$as_echo "$as_me:   former value:  \`$ac_old_val'" >&2;}
+	{ $as_echo "$as_me:${as_lineno-$LINENO}:   current value: \`$ac_new_val'" >&5
+$as_echo "$as_me:   current value: \`$ac_new_val'" >&2;}
       fi;;
   esac
   # Pass precious variables to config.status.
   if test "$ac_new_set" = set; then
     case $ac_new_val in
-    *" "*|*"	"*|*[\[\]\~\#\$\^\&\*\(\)\{\}\\\|\;\<\>\?\"\']*)
-      ac_arg=$ac_var=`echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
+    *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
     *) ac_arg=$ac_var=$ac_new_val ;;
     esac
     case " $ac_configure_args " in
       *" '$ac_arg' "*) ;; # Avoid dups.  Use of quotes ensures accuracy.
-      *) ac_configure_args="$ac_configure_args '$ac_arg'" ;;
+      *) as_fn_append ac_configure_args " '$ac_arg'" ;;
     esac
   fi
 done
 if $ac_cache_corrupted; then
-  { echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5
-echo "$as_me: error: changes in the environment can compromise the build" >&2;}
-  { { echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5
-echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;}
-   { (exit 1); exit 1; }; }
+  { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+  { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
+$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;}
+  as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5
 fi
+## -------------------- ##
+## Main body of script. ##
+## -------------------- ##
 
 ac_ext=c
 ac_cpp='$CPP $CPPFLAGS'
@@ -1230,23 +1828,6 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
 
 
 
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
 ac_ext=c
 ac_cpp='$CPP $CPPFLAGS'
 ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
@@ -1255,10 +1836,10 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
 if test -n "$ac_tool_prefix"; then
   # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
 set dummy ${ac_tool_prefix}gcc; ac_word=$2
-echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
-if test "${ac_cv_prog_CC+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+  $as_echo_n "(cached) " >&6
 else
   if test -n "$CC"; then
   ac_cv_prog_CC="$CC" # Let the user override the test.
@@ -1268,35 +1849,37 @@ for as_dir in $PATH
 do
   IFS=$as_save_IFS
   test -z "$as_dir" && as_dir=.
-  for ac_exec_ext in '' $ac_executable_extensions; do
-  if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
     ac_cv_prog_CC="${ac_tool_prefix}gcc"
-    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
     break 2
   fi
 done
-done
+  done
+IFS=$as_save_IFS
 
 fi
 fi
 CC=$ac_cv_prog_CC
 if test -n "$CC"; then
-  echo "$as_me:$LINENO: result: $CC" >&5
-echo "${ECHO_T}$CC" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
 else
-  echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
 fi
 
+
 fi
 if test -z "$ac_cv_prog_CC"; then
   ac_ct_CC=$CC
   # Extract the first word of "gcc", so it can be a program name with args.
 set dummy gcc; ac_word=$2
-echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
-if test "${ac_cv_prog_ac_ct_CC+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CC+:} false; then :
+  $as_echo_n "(cached) " >&6
 else
   if test -n "$ac_ct_CC"; then
   ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
@@ -1306,39 +1889,50 @@ for as_dir in $PATH
 do
   IFS=$as_save_IFS
   test -z "$as_dir" && as_dir=.
-  for ac_exec_ext in '' $ac_executable_extensions; do
-  if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
     ac_cv_prog_ac_ct_CC="gcc"
-    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
     break 2
   fi
 done
-done
+  done
+IFS=$as_save_IFS
 
 fi
 fi
 ac_ct_CC=$ac_cv_prog_ac_ct_CC
 if test -n "$ac_ct_CC"; then
-  echo "$as_me:$LINENO: result: $ac_ct_CC" >&5
-echo "${ECHO_T}$ac_ct_CC" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
 else
-  echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
 fi
 
-  CC=$ac_ct_CC
+  if test "x$ac_ct_CC" = x; then
+    CC=""
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    CC=$ac_ct_CC
+  fi
 else
   CC="$ac_cv_prog_CC"
 fi
 
 if test -z "$CC"; then
-  if test -n "$ac_tool_prefix"; then
-  # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
+          if test -n "$ac_tool_prefix"; then
+    # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
 set dummy ${ac_tool_prefix}cc; ac_word=$2
-echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
-if test "${ac_cv_prog_CC+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+  $as_echo_n "(cached) " >&6
 else
   if test -n "$CC"; then
   ac_cv_prog_CC="$CC" # Let the user override the test.
@@ -1348,77 +1942,37 @@ for as_dir in $PATH
 do
   IFS=$as_save_IFS
   test -z "$as_dir" && as_dir=.
-  for ac_exec_ext in '' $ac_executable_extensions; do
-  if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
     ac_cv_prog_CC="${ac_tool_prefix}cc"
-    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
     break 2
   fi
 done
-done
+  done
+IFS=$as_save_IFS
 
 fi
 fi
 CC=$ac_cv_prog_CC
 if test -n "$CC"; then
-  echo "$as_me:$LINENO: result: $CC" >&5
-echo "${ECHO_T}$CC" >&6
-else
-  echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6
-fi
-
-fi
-if test -z "$ac_cv_prog_CC"; then
-  ac_ct_CC=$CC
-  # Extract the first word of "cc", so it can be a program name with args.
-set dummy cc; ac_word=$2
-echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
-if test "${ac_cv_prog_ac_ct_CC+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  if test -n "$ac_ct_CC"; then
-  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  for ac_exec_ext in '' $ac_executable_extensions; do
-  if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
-    ac_cv_prog_ac_ct_CC="cc"
-    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-done
-
-fi
-fi
-ac_ct_CC=$ac_cv_prog_ac_ct_CC
-if test -n "$ac_ct_CC"; then
-  echo "$as_me:$LINENO: result: $ac_ct_CC" >&5
-echo "${ECHO_T}$ac_ct_CC" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
 else
-  echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
 fi
 
-  CC=$ac_ct_CC
-else
-  CC="$ac_cv_prog_CC"
-fi
 
+  fi
 fi
 if test -z "$CC"; then
   # Extract the first word of "cc", so it can be a program name with args.
 set dummy cc; ac_word=$2
-echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
-if test "${ac_cv_prog_CC+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+  $as_echo_n "(cached) " >&6
 else
   if test -n "$CC"; then
   ac_cv_prog_CC="$CC" # Let the user override the test.
@@ -1429,18 +1983,19 @@ for as_dir in $PATH
 do
   IFS=$as_save_IFS
   test -z "$as_dir" && as_dir=.
-  for ac_exec_ext in '' $ac_executable_extensions; do
-  if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
     if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
        ac_prog_rejected=yes
        continue
      fi
     ac_cv_prog_CC="cc"
-    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
     break 2
   fi
 done
-done
+  done
+IFS=$as_save_IFS
 
 if test $ac_prog_rejected = yes; then
   # We found a bogon in the path, so make sure we never use it.
@@ -1458,24 +2013,25 @@ fi
 fi
 CC=$ac_cv_prog_CC
 if test -n "$CC"; then
-  echo "$as_me:$LINENO: result: $CC" >&5
-echo "${ECHO_T}$CC" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
 else
-  echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
 fi
 
+
 fi
 if test -z "$CC"; then
   if test -n "$ac_tool_prefix"; then
-  for ac_prog in cl
+  for ac_prog in cl.exe
   do
     # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
 set dummy $ac_tool_prefix$ac_prog; ac_word=$2
-echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
-if test "${ac_cv_prog_CC+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+  $as_echo_n "(cached) " >&6
 else
   if test -n "$CC"; then
   ac_cv_prog_CC="$CC" # Let the user override the test.
@@ -1485,39 +2041,41 @@ for as_dir in $PATH
 do
   IFS=$as_save_IFS
   test -z "$as_dir" && as_dir=.
-  for ac_exec_ext in '' $ac_executable_extensions; do
-  if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
     ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
-    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
     break 2
   fi
 done
-done
+  done
+IFS=$as_save_IFS
 
 fi
 fi
 CC=$ac_cv_prog_CC
 if test -n "$CC"; then
-  echo "$as_me:$LINENO: result: $CC" >&5
-echo "${ECHO_T}$CC" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
 else
-  echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
 fi
 
+
     test -n "$CC" && break
   done
 fi
 if test -z "$CC"; then
   ac_ct_CC=$CC
-  for ac_prog in cl
+  for ac_prog in cl.exe
 do
   # Extract the first word of "$ac_prog", so it can be a program name with args.
 set dummy $ac_prog; ac_word=$2
-echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
-if test "${ac_cv_prog_ac_ct_CC+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CC+:} false; then :
+  $as_echo_n "(cached) " >&6
 else
   if test -n "$ac_ct_CC"; then
   ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
@@ -1527,66 +2085,78 @@ for as_dir in $PATH
 do
   IFS=$as_save_IFS
   test -z "$as_dir" && as_dir=.
-  for ac_exec_ext in '' $ac_executable_extensions; do
-  if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
     ac_cv_prog_ac_ct_CC="$ac_prog"
-    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
     break 2
   fi
 done
-done
+  done
+IFS=$as_save_IFS
 
 fi
 fi
 ac_ct_CC=$ac_cv_prog_ac_ct_CC
 if test -n "$ac_ct_CC"; then
-  echo "$as_me:$LINENO: result: $ac_ct_CC" >&5
-echo "${ECHO_T}$ac_ct_CC" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
 else
-  echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
 fi
 
+
   test -n "$ac_ct_CC" && break
 done
 
-  CC=$ac_ct_CC
+  if test "x$ac_ct_CC" = x; then
+    CC=""
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    CC=$ac_ct_CC
+  fi
 fi
 
 fi
 
 
-test -z "$CC" && { { echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH
-See \`config.log' for more details." >&5
-echo "$as_me: error: no acceptable C compiler found in \$PATH
-See \`config.log' for more details." >&2;}
-   { (exit 1); exit 1; }; }
+test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "no acceptable C compiler found in \$PATH
+See \`config.log' for more details" "$LINENO" 5; }
 
 # Provide some information about the compiler.
-echo "$as_me:$LINENO:" \
-     "checking for C compiler version" >&5
-ac_compiler=`set X $ac_compile; echo $2`
-{ (eval echo "$as_me:$LINENO: \"$ac_compiler --version </dev/null >&5\"") >&5
-  (eval $ac_compiler --version </dev/null >&5) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }
-{ (eval echo "$as_me:$LINENO: \"$ac_compiler -v </dev/null >&5\"") >&5
-  (eval $ac_compiler -v </dev/null >&5) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }
-{ (eval echo "$as_me:$LINENO: \"$ac_compiler -V </dev/null >&5\"") >&5
-  (eval $ac_compiler -V </dev/null >&5) 2>&5
+$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+  { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_compiler $ac_option >&5") 2>conftest.err
   ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }
+  if test -s conftest.err; then
+    sed '10a\
+... rest of stderr output deleted ...
+         10q' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+  fi
+  rm -f conftest.er1 conftest.err
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+done
 
-cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
 
 int
@@ -1598,112 +2168,108 @@ main ()
 }
 _ACEOF
 ac_clean_files_save=$ac_clean_files
-ac_clean_files="$ac_clean_files a.out a.exe b.out"
+ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out"
 # Try to create an executable without -o first, disregard a.out.
 # It will help us diagnose broken compilers, and finding out an intuition
 # of exeext.
-echo "$as_me:$LINENO: checking for C compiler default output file name" >&5
-echo $ECHO_N "checking for C compiler default output file name... $ECHO_C" >&6
-ac_link_default=`echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
-if { (eval echo "$as_me:$LINENO: \"$ac_link_default\"") >&5
-  (eval $ac_link_default) 2>&5
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5
+$as_echo_n "checking whether the C compiler works... " >&6; }
+ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
+
+# The possible output files:
+ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*"
+
+ac_rmfiles=
+for ac_file in $ac_files
+do
+  case $ac_file in
+    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+    * ) ac_rmfiles="$ac_rmfiles $ac_file";;
+  esac
+done
+rm -f $ac_rmfiles
+
+if { { ac_try="$ac_link_default"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_link_default") 2>&5
   ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; then
-  # Find the output, starting from the most likely.  This scheme is
-# not robust to junk in `.', hence go to wildcards (a.*) only as a last
-# resort.
-
-# Be careful to initialize this variable, since it used to be cached.
-# Otherwise an old cache value of `no' led to `EXEEXT = no' in a Makefile.
-ac_cv_exeext=
-# b.out is created by i960 compilers.
-for ac_file in a_out.exe a.exe conftest.exe a.out conftest a.* conftest.* b.out
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then :
+  # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
+# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
+# in a Makefile.  We should not override ac_cv_exeext if it was cached,
+# so that the user can short-circuit this test for compilers unknown to
+# Autoconf.
+for ac_file in $ac_files ''
 do
   test -f "$ac_file" || continue
   case $ac_file in
-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.o | *.obj )
-	;;
-    conftest.$ac_ext )
-	# This is the source file.
+    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj )
 	;;
     [ab].out )
 	# We found the default executable, but exeext='' is most
 	# certainly right.
 	break;;
     *.* )
-	ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
-	# FIXME: I believe we export ac_cv_exeext for Libtool,
-	# but it would be cool to find out if it's true.  Does anybody
-	# maintain Libtool? --akim.
-	export ac_cv_exeext
+	if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no;
+	then :; else
+	   ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+	fi
+	# We set ac_cv_exeext here because the later test for it is not
+	# safe: cross compilers may not add the suffix if given an `-o'
+	# argument, so we may need to know it at that point already.
+	# Even if this section looks crufty: it has the advantage of
+	# actually working.
 	break;;
     * )
 	break;;
   esac
 done
+test "$ac_cv_exeext" = no && ac_cv_exeext=
+
 else
-  echo "$as_me: failed program was:" >&5
+  ac_file=''
+fi
+if test -z "$ac_file"; then :
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+$as_echo "$as_me: failed program was:" >&5
 sed 's/^/| /' conftest.$ac_ext >&5
 
-{ { echo "$as_me:$LINENO: error: C compiler cannot create executables
-See \`config.log' for more details." >&5
-echo "$as_me: error: C compiler cannot create executables
-See \`config.log' for more details." >&2;}
-   { (exit 77); exit 77; }; }
+{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "C compiler cannot create executables
+See \`config.log' for more details" "$LINENO" 5; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
 fi
-
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5
+$as_echo_n "checking for C compiler default output file name... " >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5
+$as_echo "$ac_file" >&6; }
 ac_exeext=$ac_cv_exeext
-echo "$as_me:$LINENO: result: $ac_file" >&5
-echo "${ECHO_T}$ac_file" >&6
-
-# Check the compiler produces executables we can run.  If not, either
-# the compiler is broken, or we cross compile.
-echo "$as_me:$LINENO: checking whether the C compiler works" >&5
-echo $ECHO_N "checking whether the C compiler works... $ECHO_C" >&6
-# FIXME: These cross compiler hacks should be removed for Autoconf 3.0
-# If not cross compiling, check that we can run a simple program.
-if test "$cross_compiling" != yes; then
-  if { ac_try='./$ac_file'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; }; then
-    cross_compiling=no
-  else
-    if test "$cross_compiling" = maybe; then
-	cross_compiling=yes
-    else
-	{ { echo "$as_me:$LINENO: error: cannot run C compiled programs.
-If you meant to cross compile, use \`--host'.
-See \`config.log' for more details." >&5
-echo "$as_me: error: cannot run C compiled programs.
-If you meant to cross compile, use \`--host'.
-See \`config.log' for more details." >&2;}
-   { (exit 1); exit 1; }; }
-    fi
-  fi
-fi
-echo "$as_me:$LINENO: result: yes" >&5
-echo "${ECHO_T}yes" >&6
 
-rm -f a.out a.exe conftest$ac_cv_exeext b.out
+rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out
 ac_clean_files=$ac_clean_files_save
-# Check the compiler produces executables we can run.  If not, either
-# the compiler is broken, or we cross compile.
-echo "$as_me:$LINENO: checking whether we are cross compiling" >&5
-echo $ECHO_N "checking whether we are cross compiling... $ECHO_C" >&6
-echo "$as_me:$LINENO: result: $cross_compiling" >&5
-echo "${ECHO_T}$cross_compiling" >&6
-
-echo "$as_me:$LINENO: checking for suffix of executables" >&5
-echo $ECHO_N "checking for suffix of executables... $ECHO_C" >&6
-if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
-  (eval $ac_link) 2>&5
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5
+$as_echo_n "checking for suffix of executables... " >&6; }
+if { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>&5
   ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; then
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then :
   # If both `conftest.exe' and `conftest' are `present' (well, observable)
 # catch `conftest.exe'.  For instance with Cygwin, `ls conftest' will
 # work properly (i.e., refer to `conftest.exe'), while it won't with
@@ -1711,38 +2277,90 @@ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
 for ac_file in conftest.exe conftest conftest.*; do
   test -f "$ac_file" || continue
   case $ac_file in
-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.o | *.obj ) ;;
+    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
     *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
-	  export ac_cv_exeext
 	  break;;
     * ) break;;
   esac
 done
 else
-  { { echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link
-See \`config.log' for more details." >&5
-echo "$as_me: error: cannot compute suffix of executables: cannot compile and link
-See \`config.log' for more details." >&2;}
-   { (exit 1); exit 1; }; }
+  { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details" "$LINENO" 5; }
 fi
-
-rm -f conftest$ac_cv_exeext
-echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5
-echo "${ECHO_T}$ac_cv_exeext" >&6
+rm -f conftest conftest$ac_cv_exeext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
+$as_echo "$ac_cv_exeext" >&6; }
 
 rm -f conftest.$ac_ext
 EXEEXT=$ac_cv_exeext
 ac_exeext=$EXEEXT
-echo "$as_me:$LINENO: checking for suffix of object files" >&5
-echo $ECHO_N "checking for suffix of object files... $ECHO_C" >&6
-if test "${ac_cv_objext+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <stdio.h>
+int
+main ()
+{
+FILE *f = fopen ("conftest.out", "w");
+ return ferror (f) || fclose (f) != 0;
+
+  ;
+  return 0;
+}
 _ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
+ac_clean_files="$ac_clean_files conftest.out"
+# Check that the compiler produces executables we can run.  If not, either
+# the compiler is broken, or we cross compile.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5
+$as_echo_n "checking whether we are cross compiling... " >&6; }
+if test "$cross_compiling" != yes; then
+  { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+  if { ac_try='./conftest$ac_cv_exeext'
+  { { case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_try") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; }; then
+    cross_compiling=no
+  else
+    if test "$cross_compiling" = maybe; then
+	cross_compiling=yes
+    else
+	{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot run C compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details" "$LINENO" 5; }
+    fi
+  fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5
+$as_echo "$cross_compiling" >&6; }
+
+rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out
+ac_clean_files=$ac_clean_files_save
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5
+$as_echo_n "checking for suffix of object files... " >&6; }
+if ${ac_cv_objext+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
 
 int
@@ -1754,45 +2372,46 @@ main ()
 }
 _ACEOF
 rm -f conftest.o conftest.obj
-if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
-  (eval $ac_compile) 2>&5
+if { { ac_try="$ac_compile"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_compile") 2>&5
   ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; then
-  for ac_file in `(ls conftest.o conftest.obj; ls conftest.*) 2>/dev/null`; do
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then :
+  for ac_file in conftest.o conftest.obj conftest.*; do
+  test -f "$ac_file" || continue;
   case $ac_file in
-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg ) ;;
+    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;;
     *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
        break;;
   esac
 done
 else
-  echo "$as_me: failed program was:" >&5
+  $as_echo "$as_me: failed program was:" >&5
 sed 's/^/| /' conftest.$ac_ext >&5
 
-{ { echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile
-See \`config.log' for more details." >&5
-echo "$as_me: error: cannot compute suffix of object files: cannot compile
-See \`config.log' for more details." >&2;}
-   { (exit 1); exit 1; }; }
+{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot compute suffix of object files: cannot compile
+See \`config.log' for more details" "$LINENO" 5; }
 fi
-
 rm -f conftest.$ac_cv_objext conftest.$ac_ext
 fi
-echo "$as_me:$LINENO: result: $ac_cv_objext" >&5
-echo "${ECHO_T}$ac_cv_objext" >&6
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5
+$as_echo "$ac_cv_objext" >&6; }
 OBJEXT=$ac_cv_objext
 ac_objext=$OBJEXT
-echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5
-echo $ECHO_N "checking whether we are using the GNU C compiler... $ECHO_C" >&6
-if test "${ac_cv_c_compiler_gnu+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5
+$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
+if ${ac_cv_c_compiler_gnu+:} false; then :
+  $as_echo_n "(cached) " >&6
 else
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
 
 int
@@ -1806,55 +2425,34 @@ main ()
   return 0;
 }
 _ACEOF
-rm -f conftest.$ac_objext
-if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
-  (eval $ac_compile) 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } &&
-	 { ac_try='test -z "$ac_c_werror_flag"
-			 || test ! -s conftest.err'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; } &&
-	 { ac_try='test -s conftest.$ac_objext'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; }; then
+if ac_fn_c_try_compile "$LINENO"; then :
   ac_compiler_gnu=yes
 else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-ac_compiler_gnu=no
+  ac_compiler_gnu=no
 fi
-rm -f conftest.err conftest.$ac_objext conftest.$ac_ext
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
 ac_cv_c_compiler_gnu=$ac_compiler_gnu
 
 fi
-echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5
-echo "${ECHO_T}$ac_cv_c_compiler_gnu" >&6
-GCC=`test $ac_compiler_gnu = yes && echo yes`
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
+$as_echo "$ac_cv_c_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+  GCC=yes
+else
+  GCC=
+fi
 ac_test_CFLAGS=${CFLAGS+set}
 ac_save_CFLAGS=$CFLAGS
-CFLAGS="-g"
-echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5
-echo $ECHO_N "checking whether $CC accepts -g... $ECHO_C" >&6
-if test "${ac_cv_prog_cc_g+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
+$as_echo_n "checking whether $CC accepts -g... " >&6; }
+if ${ac_cv_prog_cc_g+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_save_c_werror_flag=$ac_c_werror_flag
+   ac_c_werror_flag=yes
+   ac_cv_prog_cc_g=no
+   CFLAGS="-g"
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
 
 int
@@ -1865,39 +2463,49 @@ main ()
   return 0;
 }
 _ACEOF
-rm -f conftest.$ac_objext
-if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
-  (eval $ac_compile) 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } &&
-	 { ac_try='test -z "$ac_c_werror_flag"
-			 || test ! -s conftest.err'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; } &&
-	 { ac_try='test -s conftest.$ac_objext'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; }; then
+if ac_fn_c_try_compile "$LINENO"; then :
   ac_cv_prog_cc_g=yes
 else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
+  CFLAGS=""
+      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+  ac_c_werror_flag=$ac_save_c_werror_flag
+	 CFLAGS="-g"
+	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
 
-ac_cv_prog_cc_g=no
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_prog_cc_g=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
 fi
-rm -f conftest.err conftest.$ac_objext conftest.$ac_ext
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
 fi
-echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5
-echo "${ECHO_T}$ac_cv_prog_cc_g" >&6
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+   ac_c_werror_flag=$ac_save_c_werror_flag
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
+$as_echo "$ac_cv_prog_cc_g" >&6; }
 if test "$ac_test_CFLAGS" = set; then
   CFLAGS=$ac_save_CFLAGS
 elif test $ac_cv_prog_cc_g = yes; then
@@ -1913,23 +2521,18 @@ else
     CFLAGS=
   fi
 fi
-echo "$as_me:$LINENO: checking for $CC option to accept ANSI C" >&5
-echo $ECHO_N "checking for $CC option to accept ANSI C... $ECHO_C" >&6
-if test "${ac_cv_prog_cc_stdc+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5
+$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
+if ${ac_cv_prog_cc_c89+:} false; then :
+  $as_echo_n "(cached) " >&6
 else
-  ac_cv_prog_cc_stdc=no
+  ac_cv_prog_cc_c89=no
 ac_save_CC=$CC
-cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
 #include <stdarg.h>
 #include <stdio.h>
-#include <sys/types.h>
-#include <sys/stat.h>
+struct stat;
 /* Most of the following tests are stolen from RCS 5.7's src/conf.sh.  */
 struct buf { int x; };
 FILE * (*rcsopen) (struct buf *, struct stat *, int);
@@ -1952,12 +2555,17 @@ static char *f (char * (*g) (char **, int), char **p, ...)
 /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default.  It has
    function prototypes and stuff, but not '\xHH' hex character constants.
    These don't provoke an error unfortunately, instead are silently treated
-   as 'x'.  The following induces an error, until -std1 is added to get
+   as 'x'.  The following induces an error, until -std is added to get
    proper ANSI mode.  Curiously '\x00'!='x' always comes out true, for an
    array size at least.  It's necessary to write '\x00'==0 to get something
-   that's true only with -std1.  */
+   that's true only with -std.  */
 int osf4_cc_array ['\x00' == 0 ? 1 : -1];
 
+/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
+   inside strings and character constants.  */
+#define FOO(x) 'x'
+int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
+
 int test (int i, double x);
 struct s1 {int (*f) (int a);};
 struct s2 {int (*f) (double a);};
@@ -1972,225 +2580,61 @@ return f (e, argv, 0) != argv[0]  ||  f (e, argv, 1) != argv[1];
   return 0;
 }
 _ACEOF
-# Don't try gcc -ansi; that turns off useful extensions and
-# breaks some systems' header files.
-# AIX			-qlanglvl=ansi
-# Ultrix and OSF/1	-std1
-# HP-UX 10.20 and later	-Ae
-# HP-UX older versions	-Aa -D_HPUX_SOURCE
-# SVR4			-Xc -D__EXTENSIONS__
-for ac_arg in "" -qlanglvl=ansi -std1 -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
+	-Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
 do
   CC="$ac_save_CC $ac_arg"
-  rm -f conftest.$ac_objext
-if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
-  (eval $ac_compile) 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } &&
-	 { ac_try='test -z "$ac_c_werror_flag"
-			 || test ! -s conftest.err'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; } &&
-	 { ac_try='test -s conftest.$ac_objext'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; }; then
-  ac_cv_prog_cc_stdc=$ac_arg
-break
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
+  if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_prog_cc_c89=$ac_arg
 fi
-rm -f conftest.err conftest.$ac_objext
+rm -f core conftest.err conftest.$ac_objext
+  test "x$ac_cv_prog_cc_c89" != "xno" && break
 done
-rm -f conftest.$ac_ext conftest.$ac_objext
+rm -f conftest.$ac_ext
 CC=$ac_save_CC
 
 fi
-
-case "x$ac_cv_prog_cc_stdc" in
-  x|xno)
-    echo "$as_me:$LINENO: result: none needed" >&5
-echo "${ECHO_T}none needed" >&6 ;;
+# AC_CACHE_VAL
+case "x$ac_cv_prog_cc_c89" in
+  x)
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+$as_echo "none needed" >&6; } ;;
+  xno)
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+$as_echo "unsupported" >&6; } ;;
   *)
-    echo "$as_me:$LINENO: result: $ac_cv_prog_cc_stdc" >&5
-echo "${ECHO_T}$ac_cv_prog_cc_stdc" >&6
-    CC="$CC $ac_cv_prog_cc_stdc" ;;
+    CC="$CC $ac_cv_prog_cc_c89"
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
+$as_echo "$ac_cv_prog_cc_c89" >&6; } ;;
 esac
+if test "x$ac_cv_prog_cc_c89" != xno; then :
 
-# Some people use a C++ compiler to compile C.  Since we use `exit',
-# in C++ we need to declare it.  In case someone uses the same compiler
-# for both compiling C and C++ we need to have the C++ compiler decide
-# the declaration of exit, since it's the most demanding environment.
-cat >conftest.$ac_ext <<_ACEOF
-#ifndef __cplusplus
-  choke me
-#endif
-_ACEOF
-rm -f conftest.$ac_objext
-if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
-  (eval $ac_compile) 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } &&
-	 { ac_try='test -z "$ac_c_werror_flag"
-			 || test ! -s conftest.err'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; } &&
-	 { ac_try='test -s conftest.$ac_objext'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; }; then
-  for ac_declaration in \
-   '' \
-   'extern "C" void std::exit (int) throw (); using std::exit;' \
-   'extern "C" void std::exit (int); using std::exit;' \
-   'extern "C" void exit (int) throw ();' \
-   'extern "C" void exit (int);' \
-   'void exit (int);'
-do
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-$ac_declaration
-#include <stdlib.h>
-int
-main ()
-{
-exit (42);
-  ;
-  return 0;
-}
-_ACEOF
-rm -f conftest.$ac_objext
-if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
-  (eval $ac_compile) 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } &&
-	 { ac_try='test -z "$ac_c_werror_flag"
-			 || test ! -s conftest.err'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; } &&
-	 { ac_try='test -s conftest.$ac_objext'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; }; then
-  :
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-continue
-fi
-rm -f conftest.err conftest.$ac_objext conftest.$ac_ext
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-$ac_declaration
-int
-main ()
-{
-exit (42);
-  ;
-  return 0;
-}
-_ACEOF
-rm -f conftest.$ac_objext
-if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
-  (eval $ac_compile) 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } &&
-	 { ac_try='test -z "$ac_c_werror_flag"
-			 || test ! -s conftest.err'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; } &&
-	 { ac_try='test -s conftest.$ac_objext'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; }; then
-  break
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-fi
-rm -f conftest.err conftest.$ac_objext conftest.$ac_ext
-done
-rm -f conftest*
-if test -n "$ac_declaration"; then
-  echo '#ifdef __cplusplus' >>confdefs.h
-  echo $ac_declaration      >>confdefs.h
-  echo '#endif'             >>confdefs.h
 fi
 
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-fi
-rm -f conftest.err conftest.$ac_objext conftest.$ac_ext
 ac_ext=c
 ac_cpp='$CPP $CPPFLAGS'
 ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
 ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
 ac_compiler_gnu=$ac_cv_c_compiler_gnu
 
-ac_ext=cc
+ac_ext=cpp
 ac_cpp='$CXXCPP $CPPFLAGS'
 ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
 ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
 ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
-if test -n "$ac_tool_prefix"; then
-  for ac_prog in $CCC g++ c++ gpp aCC CC cxx cc++ cl FCC KCC RCC xlC_r xlC
+if test -z "$CXX"; then
+  if test -n "$CCC"; then
+    CXX=$CCC
+  else
+    if test -n "$ac_tool_prefix"; then
+  for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
   do
     # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
 set dummy $ac_tool_prefix$ac_prog; ac_word=$2
-echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
-if test "${ac_cv_prog_CXX+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CXX+:} false; then :
+  $as_echo_n "(cached) " >&6
 else
   if test -n "$CXX"; then
   ac_cv_prog_CXX="$CXX" # Let the user override the test.
@@ -2200,39 +2644,41 @@ for as_dir in $PATH
 do
   IFS=$as_save_IFS
   test -z "$as_dir" && as_dir=.
-  for ac_exec_ext in '' $ac_executable_extensions; do
-  if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
     ac_cv_prog_CXX="$ac_tool_prefix$ac_prog"
-    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
     break 2
   fi
 done
-done
+  done
+IFS=$as_save_IFS
 
 fi
 fi
 CXX=$ac_cv_prog_CXX
 if test -n "$CXX"; then
-  echo "$as_me:$LINENO: result: $CXX" >&5
-echo "${ECHO_T}$CXX" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5
+$as_echo "$CXX" >&6; }
 else
-  echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
 fi
 
+
     test -n "$CXX" && break
   done
 fi
 if test -z "$CXX"; then
   ac_ct_CXX=$CXX
-  for ac_prog in $CCC g++ c++ gpp aCC CC cxx cc++ cl FCC KCC RCC xlC_r xlC
+  for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
 do
   # Extract the first word of "$ac_prog", so it can be a program name with args.
 set dummy $ac_prog; ac_word=$2
-echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
-if test "${ac_cv_prog_ac_ct_CXX+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CXX+:} false; then :
+  $as_echo_n "(cached) " >&6
 else
   if test -n "$ac_ct_CXX"; then
   ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test.
@@ -2242,64 +2688,77 @@ for as_dir in $PATH
 do
   IFS=$as_save_IFS
   test -z "$as_dir" && as_dir=.
-  for ac_exec_ext in '' $ac_executable_extensions; do
-  if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
     ac_cv_prog_ac_ct_CXX="$ac_prog"
-    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
     break 2
   fi
 done
-done
+  done
+IFS=$as_save_IFS
 
 fi
 fi
 ac_ct_CXX=$ac_cv_prog_ac_ct_CXX
 if test -n "$ac_ct_CXX"; then
-  echo "$as_me:$LINENO: result: $ac_ct_CXX" >&5
-echo "${ECHO_T}$ac_ct_CXX" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5
+$as_echo "$ac_ct_CXX" >&6; }
 else
-  echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
 fi
 
+
   test -n "$ac_ct_CXX" && break
 done
-test -n "$ac_ct_CXX" || ac_ct_CXX="g++"
 
-  CXX=$ac_ct_CXX
+  if test "x$ac_ct_CXX" = x; then
+    CXX="g++"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    CXX=$ac_ct_CXX
+  fi
 fi
 
-
-# Provide some information about the compiler.
-echo "$as_me:$LINENO:" \
-     "checking for C++ compiler version" >&5
-ac_compiler=`set X $ac_compile; echo $2`
-{ (eval echo "$as_me:$LINENO: \"$ac_compiler --version </dev/null >&5\"") >&5
-  (eval $ac_compiler --version </dev/null >&5) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }
-{ (eval echo "$as_me:$LINENO: \"$ac_compiler -v </dev/null >&5\"") >&5
-  (eval $ac_compiler -v </dev/null >&5) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }
-{ (eval echo "$as_me:$LINENO: \"$ac_compiler -V </dev/null >&5\"") >&5
-  (eval $ac_compiler -V </dev/null >&5) 2>&5
+  fi
+fi
+# Provide some information about the compiler.
+$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+  { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_compiler $ac_option >&5") 2>conftest.err
   ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }
+  if test -s conftest.err; then
+    sed '10a\
+... rest of stderr output deleted ...
+         10q' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+  fi
+  rm -f conftest.er1 conftest.err
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+done
 
-echo "$as_me:$LINENO: checking whether we are using the GNU C++ compiler" >&5
-echo $ECHO_N "checking whether we are using the GNU C++ compiler... $ECHO_C" >&6
-if test "${ac_cv_cxx_compiler_gnu+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5
+$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; }
+if ${ac_cv_cxx_compiler_gnu+:} false; then :
+  $as_echo_n "(cached) " >&6
 else
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
 
 int
@@ -2313,55 +2772,34 @@ main ()
   return 0;
 }
 _ACEOF
-rm -f conftest.$ac_objext
-if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
-  (eval $ac_compile) 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } &&
-	 { ac_try='test -z "$ac_cxx_werror_flag"
-			 || test ! -s conftest.err'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; } &&
-	 { ac_try='test -s conftest.$ac_objext'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; }; then
+if ac_fn_cxx_try_compile "$LINENO"; then :
   ac_compiler_gnu=yes
 else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-ac_compiler_gnu=no
+  ac_compiler_gnu=no
 fi
-rm -f conftest.err conftest.$ac_objext conftest.$ac_ext
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
 ac_cv_cxx_compiler_gnu=$ac_compiler_gnu
 
 fi
-echo "$as_me:$LINENO: result: $ac_cv_cxx_compiler_gnu" >&5
-echo "${ECHO_T}$ac_cv_cxx_compiler_gnu" >&6
-GXX=`test $ac_compiler_gnu = yes && echo yes`
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5
+$as_echo "$ac_cv_cxx_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+  GXX=yes
+else
+  GXX=
+fi
 ac_test_CXXFLAGS=${CXXFLAGS+set}
 ac_save_CXXFLAGS=$CXXFLAGS
-CXXFLAGS="-g"
-echo "$as_me:$LINENO: checking whether $CXX accepts -g" >&5
-echo $ECHO_N "checking whether $CXX accepts -g... $ECHO_C" >&6
-if test "${ac_cv_prog_cxx_g+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5
+$as_echo_n "checking whether $CXX accepts -g... " >&6; }
+if ${ac_cv_prog_cxx_g+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_save_cxx_werror_flag=$ac_cxx_werror_flag
+   ac_cxx_werror_flag=yes
+   ac_cv_prog_cxx_g=no
+   CXXFLAGS="-g"
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
 
 int
@@ -2372,160 +2810,64 @@ main ()
   return 0;
 }
 _ACEOF
-rm -f conftest.$ac_objext
-if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
-  (eval $ac_compile) 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } &&
-	 { ac_try='test -z "$ac_cxx_werror_flag"
-			 || test ! -s conftest.err'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; } &&
-	 { ac_try='test -s conftest.$ac_objext'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; }; then
+if ac_fn_cxx_try_compile "$LINENO"; then :
   ac_cv_prog_cxx_g=yes
 else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-ac_cv_prog_cxx_g=no
-fi
-rm -f conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-echo "$as_me:$LINENO: result: $ac_cv_prog_cxx_g" >&5
-echo "${ECHO_T}$ac_cv_prog_cxx_g" >&6
-if test "$ac_test_CXXFLAGS" = set; then
-  CXXFLAGS=$ac_save_CXXFLAGS
-elif test $ac_cv_prog_cxx_g = yes; then
-  if test "$GXX" = yes; then
-    CXXFLAGS="-g -O2"
-  else
-    CXXFLAGS="-g"
-  fi
-else
-  if test "$GXX" = yes; then
-    CXXFLAGS="-O2"
-  else
-    CXXFLAGS=
-  fi
-fi
-for ac_declaration in \
-   '' \
-   'extern "C" void std::exit (int) throw (); using std::exit;' \
-   'extern "C" void std::exit (int); using std::exit;' \
-   'extern "C" void exit (int) throw ();' \
-   'extern "C" void exit (int);' \
-   'void exit (int);'
-do
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
+  CXXFLAGS=""
+      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
-$ac_declaration
-#include <stdlib.h>
+
 int
 main ()
 {
-exit (42);
+
   ;
   return 0;
 }
 _ACEOF
-rm -f conftest.$ac_objext
-if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
-  (eval $ac_compile) 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } &&
-	 { ac_try='test -z "$ac_cxx_werror_flag"
-			 || test ! -s conftest.err'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; } &&
-	 { ac_try='test -s conftest.$ac_objext'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; }; then
-  :
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
+if ac_fn_cxx_try_compile "$LINENO"; then :
 
-continue
-fi
-rm -f conftest.err conftest.$ac_objext conftest.$ac_ext
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
+else
+  ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+	 CXXFLAGS="-g"
+	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
-$ac_declaration
+
 int
 main ()
 {
-exit (42);
+
   ;
   return 0;
 }
 _ACEOF
-rm -f conftest.$ac_objext
-if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
-  (eval $ac_compile) 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } &&
-	 { ac_try='test -z "$ac_cxx_werror_flag"
-			 || test ! -s conftest.err'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; } &&
-	 { ac_try='test -s conftest.$ac_objext'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; }; then
-  break
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
+if ac_fn_cxx_try_compile "$LINENO"; then :
+  ac_cv_prog_cxx_g=yes
 fi
-rm -f conftest.err conftest.$ac_objext conftest.$ac_ext
-done
-rm -f conftest*
-if test -n "$ac_declaration"; then
-  echo '#ifdef __cplusplus' >>confdefs.h
-  echo $ac_declaration      >>confdefs.h
-  echo '#endif'             >>confdefs.h
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+   ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5
+$as_echo "$ac_cv_prog_cxx_g" >&6; }
+if test "$ac_test_CXXFLAGS" = set; then
+  CXXFLAGS=$ac_save_CXXFLAGS
+elif test $ac_cv_prog_cxx_g = yes; then
+  if test "$GXX" = yes; then
+    CXXFLAGS="-g -O2"
+  else
+    CXXFLAGS="-g"
+  fi
+else
+  if test "$GXX" = yes; then
+    CXXFLAGS="-O2"
+  else
+    CXXFLAGS=
+  fi
 fi
-
 ac_ext=c
 ac_cpp='$CPP $CPPFLAGS'
 ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
@@ -2536,10 +2878,10 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
 if test -n "$ac_tool_prefix"; then
   # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args.
 set dummy ${ac_tool_prefix}ranlib; ac_word=$2
-echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
-if test "${ac_cv_prog_RANLIB+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_RANLIB+:} false; then :
+  $as_echo_n "(cached) " >&6
 else
   if test -n "$RANLIB"; then
   ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test.
@@ -2549,35 +2891,37 @@ for as_dir in $PATH
 do
   IFS=$as_save_IFS
   test -z "$as_dir" && as_dir=.
-  for ac_exec_ext in '' $ac_executable_extensions; do
-  if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
     ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib"
-    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
     break 2
   fi
 done
-done
+  done
+IFS=$as_save_IFS
 
 fi
 fi
 RANLIB=$ac_cv_prog_RANLIB
 if test -n "$RANLIB"; then
-  echo "$as_me:$LINENO: result: $RANLIB" >&5
-echo "${ECHO_T}$RANLIB" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5
+$as_echo "$RANLIB" >&6; }
 else
-  echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
 fi
 
+
 fi
 if test -z "$ac_cv_prog_RANLIB"; then
   ac_ct_RANLIB=$RANLIB
   # Extract the first word of "ranlib", so it can be a program name with args.
 set dummy ranlib; ac_word=$2
-echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
-if test "${ac_cv_prog_ac_ct_RANLIB+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_RANLIB+:} false; then :
+  $as_echo_n "(cached) " >&6
 else
   if test -n "$ac_ct_RANLIB"; then
   ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test.
@@ -2587,28 +2931,38 @@ for as_dir in $PATH
 do
   IFS=$as_save_IFS
   test -z "$as_dir" && as_dir=.
-  for ac_exec_ext in '' $ac_executable_extensions; do
-  if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
     ac_cv_prog_ac_ct_RANLIB="ranlib"
-    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
     break 2
   fi
 done
-done
+  done
+IFS=$as_save_IFS
 
-  test -z "$ac_cv_prog_ac_ct_RANLIB" && ac_cv_prog_ac_ct_RANLIB=":"
 fi
 fi
 ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB
 if test -n "$ac_ct_RANLIB"; then
-  echo "$as_me:$LINENO: result: $ac_ct_RANLIB" >&5
-echo "${ECHO_T}$ac_ct_RANLIB" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5
+$as_echo "$ac_ct_RANLIB" >&6; }
 else
-  echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
 fi
 
-  RANLIB=$ac_ct_RANLIB
+  if test "x$ac_ct_RANLIB" = x; then
+    RANLIB=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    RANLIB=$ac_ct_RANLIB
+  fi
 else
   RANLIB="$ac_cv_prog_RANLIB"
 fi
@@ -2649,11 +3003,11 @@ fi
   fi
 
 
-# Check whether --with-ld-shared or --without-ld-shared was given.
-if test "${with_ld_shared+set}" = set; then
-  withval="$with_ld_shared"
+# Check whether --with-ld-shared was given.
+if test "${with_ld_shared+set}" = set; then :
+  withval=$with_ld_shared;
+fi
 
-fi;
 
   if test "$with_ld_shared" != "" ; then
     if test "$with_ld_shared" = "no" ; then
@@ -2783,17 +3137,13 @@ fi;
 	if test "$GCC" = "yes"; then
 		C_WFLAGS="-Wall"
 
-cat >>confdefs.h <<\_ACEOF
-#define USE_GNUCC 1
-_ACEOF
+$as_echo "#define USE_GNUCC 1" >>confdefs.h
 
 	fi
 	if test "$GXX" = "yes"; then
 		CXX_WFLAGS="-Wall"
 
-cat >>confdefs.h <<\_ACEOF
-#define USE_GNUCC 1
-_ACEOF
+$as_echo "#define USE_GNUCC 1" >>confdefs.h
 
 	fi
 	CXX_WFLAGS=$CXX_WFLAGS
@@ -2804,11 +3154,11 @@ _ACEOF
 
 
 
-# Check whether --with-gdal or --without-gdal was given.
-if test "${with_gdal+set}" = set; then
-  withval="$with_gdal"
+# Check whether --with-gdal was given.
+if test "${with_gdal+set}" = set; then :
+  withval=$with_gdal;
+fi
 
-fi;
 
 if test "$with_gdal" = "yes" -o "$with_gdal" = "" ; then
 
@@ -2819,10 +3169,10 @@ if test "$with_gdal" = "yes" -o "$with_gdal" = "" ; then
   if test -z "$GDAL_CONFIG" ; then
     # Extract the first word of "gdal-config", so it can be a program name with args.
 set dummy gdal-config; ac_word=$2
-echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
-if test "${ac_cv_path_GDAL_CONFIG+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_GDAL_CONFIG+:} false; then :
+  $as_echo_n "(cached) " >&6
 else
   case $GDAL_CONFIG in
   [\\/]* | ?:[\\/]*)
@@ -2834,35 +3184,34 @@ for as_dir in $PATH
 do
   IFS=$as_save_IFS
   test -z "$as_dir" && as_dir=.
-  for ac_exec_ext in '' $ac_executable_extensions; do
-  if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
     ac_cv_path_GDAL_CONFIG="$as_dir/$ac_word$ac_exec_ext"
-    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
     break 2
   fi
 done
-done
+  done
+IFS=$as_save_IFS
 
   test -z "$ac_cv_path_GDAL_CONFIG" && ac_cv_path_GDAL_CONFIG="no"
   ;;
 esac
 fi
 GDAL_CONFIG=$ac_cv_path_GDAL_CONFIG
-
 if test -n "$GDAL_CONFIG"; then
-  echo "$as_me:$LINENO: result: $GDAL_CONFIG" >&5
-echo "${ECHO_T}$GDAL_CONFIG" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GDAL_CONFIG" >&5
+$as_echo "$GDAL_CONFIG" >&6; }
 else
-  echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
 fi
 
+
   fi
 
   if test "$GDAL_CONFIG" = "no" ; then
-    { { echo "$as_me:$LINENO: error: couldn't find gdal-config" >&5
-echo "$as_me: error: couldn't find gdal-config" >&2;}
-   { (exit 1); exit 1; }; }
+    as_fn_error $? "couldn't find gdal-config" "$LINENO" 5
   fi
 
 elif test -n "$with_gdal" -a "$with_gdal" != "no" ; then
@@ -2870,19 +3219,15 @@ elif test -n "$with_gdal" -a "$with_gdal" != "no" ; then
   GDAL_CONFIG=$with_gdal
 
   if test -f "$GDAL_CONFIG" -a -x "$GDAL_CONFIG" ; then
-    echo "$as_me:$LINENO: result: user supplied gdal-config ($GDAL_CONFIG)" >&5
-echo "${ECHO_T}user supplied gdal-config ($GDAL_CONFIG)" >&6
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: user supplied gdal-config ($GDAL_CONFIG)" >&5
+$as_echo "user supplied gdal-config ($GDAL_CONFIG)" >&6; }
   else
-    { { echo "$as_me:$LINENO: error: '$GDAL_CONFIG' is not an executable.  Make sure you use --with-gdal=/path/to/gdal-config" >&5
-echo "$as_me: error: '$GDAL_CONFIG' is not an executable.  Make sure you use --with-gdal=/path/to/gdal-config" >&2;}
-   { (exit 1); exit 1; }; }
+    as_fn_error $? "'$GDAL_CONFIG' is not an executable.  Make sure you use --with-gdal=/path/to/gdal-config" "$LINENO" 5
   fi
 
 else
 
-  { { echo "$as_me:$LINENO: error: gdal required to build GDAL GRASS driver" >&5
-echo "$as_me: error: gdal required to build GDAL GRASS driver" >&2;}
-   { (exit 1); exit 1; }; }
+  as_fn_error $? "gdal required to build GDAL GRASS driver" "$LINENO" 5
 
 fi
 
@@ -2893,11 +3238,11 @@ GDAL_INC=$GDAL_INC
 
 
 
-# Check whether --with-autoload or --without-autoload was given.
-if test "${with_autoload+set}" = set; then
-  withval="$with_autoload"
+# Check whether --with-autoload was given.
+if test "${with_autoload+set}" = set; then :
+  withval=$with_autoload;
+fi
 
-fi;
 
 if test "$with_autoload" != "" ; then
   AUTOLOAD_DIR=$with_autoload
@@ -2909,8 +3254,8 @@ else
   fi
 fi
 
-echo "$as_me:$LINENO: result: using $AUTOLOAD_DIR as GDAL shared library autoload directory" >&5
-echo "${ECHO_T}using $AUTOLOAD_DIR as GDAL shared library autoload directory" >&6
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: using $AUTOLOAD_DIR as GDAL shared library autoload directory" >&5
+$as_echo "using $AUTOLOAD_DIR as GDAL shared library autoload directory" >&6; }
 AUTOLOAD_DIR=$AUTOLOAD_DIR
 
 
@@ -2921,178 +3266,121 @@ GRASS_GISBASE=
 export GRASS_INCLUDE GRASS_SETTING GRASS_GISBASE
 
 
-# Check whether --with-grass or --without-grass was given.
-if test "${with_grass+set}" = set; then
-  withval="$with_grass"
+# Check whether --with-grass was given.
+if test "${with_grass+set}" = set; then :
+  withval=$with_grass;
+fi
 
-fi;
 
 if test "$with_grass" = "no" ; then
-  { { echo "$as_me:$LINENO: error: grass required for this driver, please install GRASS 5.7 or later and rebuild" >&5
-echo "$as_me: error: grass required for this driver, please install GRASS 5.7 or later and rebuild" >&2;}
-   { (exit 1); exit 1; }; }
+  as_fn_error $? "grass required for this driver, please install GRASS 5.7 or later and rebuild" "$LINENO" 5
 fi
 
 if test "$with_grass" != "yes" ; then
 
 
-echo "$as_me:$LINENO: checking for G_asprintf in -lgrass_gis" >&5
-echo $ECHO_N "checking for G_asprintf in -lgrass_gis... $ECHO_C" >&6
-if test "${ac_cv_lib_grass_gis_G_asprintf+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for G_is_initialized in -lgrass_gis" >&5
+$as_echo_n "checking for G_is_initialized in -lgrass_gis... " >&6; }
+if ${ac_cv_lib_grass_gis_G_is_initialized+:} false; then :
+  $as_echo_n "(cached) " >&6
 else
   ac_check_lib_save_LIBS=$LIBS
-LIBS="-lgrass_gis -L$with_grass/lib -lgrass_I -lgrass_vask -lgrass_gmath -lgrass_gis -lgrass_datetime -lgrass_gproj -lgrass_vect -lgrass_dbmibase -lgrass_dbmiclient -lgrass_dgl -lgrass_dig2 -lgrass_rtree -lgrass_linkm $LIBS"
-cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
+LIBS="-lgrass_gis -L$with_grass/lib -lgrass_datetime $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
 
-/* Override any gcc2 internal prototype to avoid an error.  */
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
 #ifdef __cplusplus
 extern "C"
 #endif
-/* We use char because int might match the return type of a gcc2
-   builtin and then its argument prototype would still apply.  */
-char G_asprintf ();
+char G_is_initialized ();
 int
 main ()
 {
-G_asprintf ();
+return G_is_initialized ();
   ;
   return 0;
 }
 _ACEOF
-rm -f conftest.$ac_objext conftest$ac_exeext
-if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
-  (eval $ac_link) 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } &&
-	 { ac_try='test -z "$ac_c_werror_flag"
-			 || test ! -s conftest.err'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; } &&
-	 { ac_try='test -s conftest$ac_exeext'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; }; then
-  ac_cv_lib_grass_gis_G_asprintf=yes
+if ac_fn_c_try_link "$LINENO"; then :
+  ac_cv_lib_grass_gis_G_is_initialized=yes
 else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-ac_cv_lib_grass_gis_G_asprintf=no
+  ac_cv_lib_grass_gis_G_is_initialized=no
 fi
-rm -f conftest.err conftest.$ac_objext \
-      conftest$ac_exeext conftest.$ac_ext
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
 LIBS=$ac_check_lib_save_LIBS
 fi
-echo "$as_me:$LINENO: result: $ac_cv_lib_grass_gis_G_asprintf" >&5
-echo "${ECHO_T}$ac_cv_lib_grass_gis_G_asprintf" >&6
-if test $ac_cv_lib_grass_gis_G_asprintf = yes; then
-  GRASS_SETTING=grass57+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_grass_gis_G_is_initialized" >&5
+$as_echo "$ac_cv_lib_grass_gis_G_is_initialized" >&6; }
+if test "x$ac_cv_lib_grass_gis_G_is_initialized" = xyes; then :
+  GRASS_SETTING=grass70+
 else
   GRASS_SETTING=no
 fi
 
-
-  if test "$GRASS_SETTING" = "grass57+" ; then
-    LIBS="-L$with_grass/lib -lgrass_I -lgrass_vask -lgrass_gmath -lgrass_gis -lgrass_datetime -lgrass_gproj -lgrass_vect -lgrass_dbmibase -lgrass_dbmiclient -lgrass_dgl -lgrass_dig2 -lgrass_rtree -lgrass_linkm $LIBS"
-    GRASS_INCLUDE="-I$with_grass/include"
-    GRASS_GISBASE="$with_grass"
-  else
-
-    # Check for GRASS >= 7.0
-    echo "$as_me:$LINENO: checking for G_putenv in -lgrass_gis.7.0.svn" >&5
-echo $ECHO_N "checking for G_putenv in -lgrass_gis.7.0.svn... $ECHO_C" >&6
-if test "${ac_cv_lib_grass_gis_7_0_svn_G_putenv+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
+  if test "$GRASS_SETTING" = "no" ; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for G_asprintf in -lgrass_gis" >&5
+$as_echo_n "checking for G_asprintf in -lgrass_gis... " >&6; }
+if ${ac_cv_lib_grass_gis_G_asprintf+:} false; then :
+  $as_echo_n "(cached) " >&6
 else
   ac_check_lib_save_LIBS=$LIBS
-LIBS="-lgrass_gis.7.0.svn -L$with_grass/lib -lgrass_raster.7.0.svn -lgrass_gmath.7.0.svn -lgrass_gis.7.0.svn -lgrass_datetime.7.0.svn -lgrass_gproj.7.0.svn -lgrass_vector.7.0.svn -lgrass_dbmibase.7.0.svn -lgrass_dbmiclient.7.0.svn -lgrass_dgl.7.0.svn -lgrass_dig2.7.0.svn -lgrass_rtree.7.0.svn -lgrass_linkm.7.0.svn -lgrass_btree2.7.0.svn -lgrass_ccmath.7.0.svn $LIBS"
-cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
+LIBS="-lgrass_gis -L$with_grass/lib -lgrass_datetime $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
 
-/* Override any gcc2 internal prototype to avoid an error.  */
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
 #ifdef __cplusplus
 extern "C"
 #endif
-/* We use char because int might match the return type of a gcc2
-   builtin and then its argument prototype would still apply.  */
-char G_putenv ();
+char G_asprintf ();
 int
 main ()
 {
-G_putenv ();
+return G_asprintf ();
   ;
   return 0;
 }
 _ACEOF
-rm -f conftest.$ac_objext conftest$ac_exeext
-if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
-  (eval $ac_link) 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } &&
-	 { ac_try='test -z "$ac_c_werror_flag"
-			 || test ! -s conftest.err'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; } &&
-	 { ac_try='test -s conftest$ac_exeext'
-  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; }; then
-  ac_cv_lib_grass_gis_7_0_svn_G_putenv=yes
+if ac_fn_c_try_link "$LINENO"; then :
+  ac_cv_lib_grass_gis_G_asprintf=yes
 else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-ac_cv_lib_grass_gis_7_0_svn_G_putenv=no
+  ac_cv_lib_grass_gis_G_asprintf=no
 fi
-rm -f conftest.err conftest.$ac_objext \
-      conftest$ac_exeext conftest.$ac_ext
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
 LIBS=$ac_check_lib_save_LIBS
 fi
-echo "$as_me:$LINENO: result: $ac_cv_lib_grass_gis_7_0_svn_G_putenv" >&5
-echo "${ECHO_T}$ac_cv_lib_grass_gis_7_0_svn_G_putenv" >&6
-if test $ac_cv_lib_grass_gis_7_0_svn_G_putenv = yes; then
-  GRASS_SETTING=grass7+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_grass_gis_G_asprintf" >&5
+$as_echo "$ac_cv_lib_grass_gis_G_asprintf" >&6; }
+if test "x$ac_cv_lib_grass_gis_G_asprintf" = xyes; then :
+  GRASS_SETTING=grass57+
 else
   GRASS_SETTING=no
 fi
 
-    if test "$GRASS_SETTING" = "grass7+" ; then
-        LIBS="-L$with_grass/lib -lgrass_raster.7.0.svn -lgrass_gmath.7.0.svn -lgrass_gis.7.0.svn -lgrass_datetime.7.0.svn -lgrass_gproj.7.0.svn -lgrass_vector.7.0.svn -lgrass_dbmibase.7.0.svn -lgrass_dbmiclient.7.0.svn -lgrass_dgl.7.0.svn -lgrass_dig2.7.0.svn -lgrass_rtree.7.0.svn -lgrass_linkm.7.0.svn -lgrass_btree2.7.0.svn -lgrass_ccmath.7.0.svn $LIBS"
-        GRASS_INCLUDE="-I$with_grass/include"
-        GRASS_GISBASE="$with_grass"
+  fi
+
+  if test "$GRASS_SETTING" != "no" ; then
+    if test "$GRASS_SETTING" = "grass70+" ; then
+      G_RASTLIBS="-lgrass_raster -lgrass_imagery"
+      G_VECTLIBS="-lgrass_vector -lgrass_dig2 -lgrass_dgl -lgrass_rtree -lgrass_linkm -lgrass_dbmiclient -lgrass_dbmibase"
+      LIBS="-L$with_grass/lib $G_VECTLIBS $G_RASTLIBS -lgrass_gproj -lgrass_gmath -lgrass_gis -lgrass_datetime $LIBS"
     else
-        { { echo "$as_me:$LINENO: error: --with-grass=$with_grass requested, but libraries not found!  Perhaps you need to set LD_LIBRARY_PATH to include $with_grass/lib?" >&5
-echo "$as_me: error: --with-grass=$with_grass requested, but libraries not found!  Perhaps you need to set LD_LIBRARY_PATH to include $with_grass/lib?" >&2;}
-   { (exit 1); exit 1; }; }
+      G_RASTLIBS="-lgrass_I"
+      G_VECTLIBS="-lgrass_vect -lgrass_dig2 -lgrass_dgl -lgrass_rtree -lgrass_linkm -lgrass_dbmiclient -lgrass_dbmibase"
+      LIBS="-L$with_grass/lib $G_VECTLIBS $G_RASTLIBS -lgrass_gproj -lgrass_vask -lgrass_gmath -lgrass_gis -lgrass_datetime $LIBS"
     fi
+    GRASS_INCLUDE="-I$with_grass/include"
+    GRASS_GISBASE="$with_grass"
+    HAVE_GRASS=yes
+  else
+    as_fn_error $? "--with-grass=$with_grass requested, but libraries not found!" "$LINENO" 5
   fi
 fi
 
@@ -3102,72 +3390,84 @@ GRASS_GISBASE=$GRASS_GISBASE
 
 
 
+
+
+# Check whether --with-postgres_includes was given.
+if test "${with_postgres_includes+set}" = set; then :
+  withval=$with_postgres_includes; postgres_includes="$withval"
+else
+  postgres_includes=no
+fi
+
+
+PQ_INCLUDE=
+if test "x$postgres_includes" != "xno"; then
+# With PostgreSQL includes directory
+PQ_INCLUDE="-I$postgres_includes"
+fi
+
+
+
+
 rm -f conftest*
 
-          ac_config_files="$ac_config_files Makefile"
+ac_config_files="$ac_config_files Makefile"
+
 
 
 test "x$prefix" = xNONE && prefix=$ac_default_prefix
 # Let make expand exec_prefix.
 test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
 
-# VPATH may cause trouble with some makes, so we remove $(srcdir),
-# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and
-# trailing colons and then remove the whole line if VPATH becomes empty
-# (actually we leave an empty line to preserve line numbers).
-if test "x$srcdir" = x.; then
-  ac_vpsub='/^[	 ]*VPATH[	 ]*=/{
-s/:*\$(srcdir):*/:/;
-s/:*\${srcdir}:*/:/;
-s/:*@srcdir@:*/:/;
-s/^\([^=]*=[	 ]*\):*/\1/;
-s/:*$//;
-s/^[^=]*=[	 ]*$//;
-}'
-fi
-
 # Transform confdefs.h into DEFS.
 # Protect against shell expansion while executing Makefile rules.
 # Protect against Makefile macro expansion.
 #
 # If the first sed substitution is executed (which looks for macros that
-# take arguments), then we branch to the quote section.  Otherwise,
+# take arguments), then branch to the quote section.  Otherwise,
 # look for a macro that doesn't take arguments.
-cat >confdef2opt.sed <<\_ACEOF
+ac_script='
+:mline
+/\\$/{
+ N
+ s,\\\n,,
+ b mline
+}
 t clear
-: clear
-s,^[	 ]*#[	 ]*define[	 ][	 ]*\([^	 (][^	 (]*([^)]*)\)[	 ]*\(.*\),-D\1=\2,g
+:clear
+s/^[	 ]*#[	 ]*define[	 ][	 ]*\([^	 (][^	 (]*([^)]*)\)[	 ]*\(.*\)/-D\1=\2/g
 t quote
-s,^[	 ]*#[	 ]*define[	 ][	 ]*\([^	 ][^	 ]*\)[	 ]*\(.*\),-D\1=\2,g
+s/^[	 ]*#[	 ]*define[	 ][	 ]*\([^	 ][^	 ]*\)[	 ]*\(.*\)/-D\1=\2/g
 t quote
-d
-: quote
-s,[	 `~#$^&*(){}\\|;'"<>?],\\&,g
-s,\[,\\&,g
-s,\],\\&,g
-s,\$,$$,g
-p
-_ACEOF
-# We use echo to avoid assuming a particular line-breaking character.
-# The extra dot is to prevent the shell from consuming trailing
-# line-breaks from the sub-command output.  A line-break within
-# single-quotes doesn't work because, if this script is created in a
-# platform that uses two characters for line-breaks (e.g., DOS), tr
-# would break.
-ac_LF_and_DOT=`echo; echo .`
-DEFS=`sed -n -f confdef2opt.sed confdefs.h | tr "$ac_LF_and_DOT" ' .'`
-rm -f confdef2opt.sed
+b any
+:quote
+s/[	 `~#$^&*(){}\\|;'\''"<>?]/\\&/g
+s/\[/\\&/g
+s/\]/\\&/g
+s/\$/$$/g
+H
+:any
+${
+	g
+	s/^\n//
+	s/\n/ /g
+	p
+}
+'
+DEFS=`sed -n "$ac_script" confdefs.h`
 
 
 ac_libobjs=
 ac_ltlibobjs=
+U=
 for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
   # 1. Remove the extension, and $U if already installed.
-  ac_i=`echo "$ac_i" |
-	 sed 's/\$U\././;s/\.o$//;s/\.obj$//'`
-  # 2. Add them.
-  ac_libobjs="$ac_libobjs $ac_i\$U.$ac_objext"
-  ac_ltlibobjs="$ac_ltlibobjs $ac_i"'$U.lo'
+  ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
+  ac_i=`$as_echo "$ac_i" | sed "$ac_script"`
+  # 2. Prepend LIBOBJDIR.  When used with automake>=1.10 LIBOBJDIR
+  #    will be set to the directory where LIBOBJS objects are built.
+  as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext"
+  as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo'
 done
 LIBOBJS=$ac_libobjs
 
@@ -3175,12 +3475,14 @@ LTLIBOBJS=$ac_ltlibobjs
 
 
 
-: ${CONFIG_STATUS=./config.status}
+: "${CONFIG_STATUS=./config.status}"
+ac_write_fail=0
 ac_clean_files_save=$ac_clean_files
 ac_clean_files="$ac_clean_files $CONFIG_STATUS"
-{ echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5
-echo "$as_me: creating $CONFIG_STATUS" >&6;}
-cat >$CONFIG_STATUS <<_ACEOF
+{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5
+$as_echo "$as_me: creating $CONFIG_STATUS" >&6;}
+as_write_fail=0
+cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1
 #! $SHELL
 # Generated by $as_me.
 # Run this file to recreate the current configuration.
@@ -3190,81 +3492,253 @@ cat >$CONFIG_STATUS <<_ACEOF
 debug=false
 ac_cs_recheck=false
 ac_cs_silent=false
-SHELL=\${CONFIG_SHELL-$SHELL}
-_ACEOF
-
-cat >>$CONFIG_STATUS <<\_ACEOF
-## --------------------- ##
-## M4sh Initialization.  ##
-## --------------------- ##
 
-# Be Bourne compatible
-if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+SHELL=\${CONFIG_SHELL-$SHELL}
+export SHELL
+_ASEOF
+cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
   emulate sh
   NULLCMD=:
-  # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
   # is contrary to our usage.  Disable this feature.
   alias -g '${1+"$@"}'='"$@"'
-elif test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then
-  set -o posix
+  setopt NO_GLOB_SUBST
+else
+  case `(set -o) 2>/dev/null` in #(
+  *posix*) :
+    set -o posix ;; #(
+  *) :
+     ;;
+esac
 fi
-DUALCASE=1; export DUALCASE # for MKS sh
 
-# Support unset when possible.
-if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
-  as_unset=unset
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+# Prefer a ksh shell builtin over an external printf program on Solaris,
+# but without wasting forks for bash or zsh.
+if test -z "$BASH_VERSION$ZSH_VERSION" \
+    && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
+  as_echo='print -r --'
+  as_echo_n='print -rn --'
+elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+  as_echo='printf %s\n'
+  as_echo_n='printf %s'
 else
-  as_unset=false
+  if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+    as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+    as_echo_n='/usr/ucb/echo -n'
+  else
+    as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+    as_echo_n_body='eval
+      arg=$1;
+      case $arg in #(
+      *"$as_nl"*)
+	expr "X$arg" : "X\\(.*\\)$as_nl";
+	arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+      esac;
+      expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+    '
+    export as_echo_n_body
+    as_echo_n='sh -c $as_echo_n_body as_echo'
+  fi
+  export as_echo_body
+  as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+  PATH_SEPARATOR=:
+  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+      PATH_SEPARATOR=';'
+  }
 fi
 
 
-# Work around bugs in pre-3.0 UWIN ksh.
-$as_unset ENV MAIL MAILPATH
+# IFS
+# We need space, tab and new line, in precisely that order.  Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" ""	$as_nl"
+
+# Find who we are.  Look in the path if we contain no directory separator.
+as_myself=
+case $0 in #((
+  *[\\/]* ) as_myself=$0 ;;
+  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+  done
+IFS=$as_save_IFS
+
+     ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+  as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+  $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+  exit 1
+fi
+
+# Unset variables that we do not need and which cause bugs (e.g. in
+# pre-3.0 UWIN ksh).  But do not cause bugs in bash 2.01; the "|| exit 1"
+# suppresses any "Segmentation fault" message there.  '((' could
+# trigger a bug in pdksh 5.2.14.
+for as_var in BASH_ENV ENV MAIL MAILPATH
+do eval test x\${$as_var+set} = xset \
+  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
 PS1='$ '
 PS2='> '
 PS4='+ '
 
 # NLS nuisances.
-for as_var in \
-  LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \
-  LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \
-  LC_TELEPHONE LC_TIME
-do
-  if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then
-    eval $as_var=C; export $as_var
-  else
-    $as_unset $as_var
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# CDPATH.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+  as_status=$1; test $as_status -eq 0 && as_status=1
+  if test "$4"; then
+    as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+    $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
   fi
-done
+  $as_echo "$as_me: error: $2" >&2
+  as_fn_exit $as_status
+} # as_fn_error
+
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+  return $1
+} # as_fn_set_status
 
-# Required to use basename.
-if expr a : '\(a\)' >/dev/null 2>&1; then
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+  set +e
+  as_fn_set_status $1
+  exit $1
+} # as_fn_exit
+
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+  { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+  eval 'as_fn_append ()
+  {
+    eval $1+=\$2
+  }'
+else
+  as_fn_append ()
+  {
+    eval $1=\$$1\$2
+  }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+  eval 'as_fn_arith ()
+  {
+    as_val=$(( $* ))
+  }'
+else
+  as_fn_arith ()
+  {
+    as_val=`expr "$@" || test $? -eq 1`
+  }
+fi # as_fn_arith
+
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+   test "X`expr 00001 : '.*\(...\)'`" = X001; then
   as_expr=expr
 else
   as_expr=false
 fi
 
-if (basename /) >/dev/null 2>&1 && test "X`basename / 2>&1`" = "X/"; then
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
   as_basename=basename
 else
   as_basename=false
 fi
 
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+  as_dirname=dirname
+else
+  as_dirname=false
+fi
 
-# Name of the executable.
-as_me=`$as_basename "$0" ||
+as_me=`$as_basename -- "$0" ||
 $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
 	 X"$0" : 'X\(//\)$' \| \
-	 X"$0" : 'X\(/\)$' \| \
-	 .     : '\(.\)' 2>/dev/null ||
-echo X/"$0" |
-    sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/; q; }
-  	  /^X\/\(\/\/\)$/{ s//\1/; q; }
-  	  /^X\/\(\/\).*/{ s//\1/; q; }
-  	  s/.*/./; q'`
+	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+    sed '/^.*\/\([^/][^/]*\)\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\/\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\/\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
 
-
-# PATH needs CR, and LINENO needs CR and PATH.
 # Avoid depending upon Character Ranges.
 as_cr_letters='abcdefghijklmnopqrstuvwxyz'
 as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
@@ -3272,148 +3746,111 @@ as_cr_Letters=$as_cr_letters$as_cr_LETTERS
 as_cr_digits='0123456789'
 as_cr_alnum=$as_cr_Letters$as_cr_digits
 
-# The user is always right.
-if test "${PATH_SEPARATOR+set}" != set; then
-  echo "#! /bin/sh" >conf$$.sh
-  echo  "exit 0"   >>conf$$.sh
-  chmod +x conf$$.sh
-  if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
-    PATH_SEPARATOR=';'
-  else
-    PATH_SEPARATOR=:
-  fi
-  rm -f conf$$.sh
-fi
-
-
-  as_lineno_1=$LINENO
-  as_lineno_2=$LINENO
-  as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`
-  test "x$as_lineno_1" != "x$as_lineno_2" &&
-  test "x$as_lineno_3"  = "x$as_lineno_2"  || {
-  # Find who we are.  Look in the path if we contain no path at all
-  # relative or not.
-  case $0 in
-    *[\\/]* ) as_myself=$0 ;;
-    *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
-done
-
-       ;;
-  esac
-  # We did not find ourselves, most probably we were run as `sh COMMAND'
-  # in which case we are not to be found in the path.
-  if test "x$as_myself" = x; then
-    as_myself=$0
-  fi
-  if test ! -f "$as_myself"; then
-    { { echo "$as_me:$LINENO: error: cannot find myself; rerun with an absolute path" >&5
-echo "$as_me: error: cannot find myself; rerun with an absolute path" >&2;}
-   { (exit 1); exit 1; }; }
-  fi
-  case $CONFIG_SHELL in
-  '')
-    as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  for as_base in sh bash ksh sh5; do
-	 case $as_dir in
-	 /*)
-	   if ("$as_dir/$as_base" -c '
-  as_lineno_1=$LINENO
-  as_lineno_2=$LINENO
-  as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`
-  test "x$as_lineno_1" != "x$as_lineno_2" &&
-  test "x$as_lineno_3"  = "x$as_lineno_2" ') 2>/dev/null; then
-	     $as_unset BASH_ENV || test "${BASH_ENV+set}" != set || { BASH_ENV=; export BASH_ENV; }
-	     $as_unset ENV || test "${ENV+set}" != set || { ENV=; export ENV; }
-	     CONFIG_SHELL=$as_dir/$as_base
-	     export CONFIG_SHELL
-	     exec "$CONFIG_SHELL" "$0" ${1+"$@"}
-	   fi;;
-	 esac
-       done
-done
-;;
-  esac
-
-  # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
-  # uniformly replaced by the line number.  The first 'sed' inserts a
-  # line-number line before each line; the second 'sed' does the real
-  # work.  The second script uses 'N' to pair each line-number line
-  # with the numbered line, and appends trailing '-' during
-  # substitution so that $LINENO is not a special case at line end.
-  # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
-  # second 'sed' script.  Blame Lee E. McMahon for sed's syntax.  :-)
-  sed '=' <$as_myself |
-    sed '
-      N
-      s,$,-,
-      : loop
-      s,^\(['$as_cr_digits']*\)\(.*\)[$]LINENO\([^'$as_cr_alnum'_]\),\1\2\1\3,
-      t loop
-      s,-$,,
-      s,^['$as_cr_digits']*\n,,
-    ' >$as_me.lineno &&
-  chmod +x $as_me.lineno ||
-    { { echo "$as_me:$LINENO: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&5
-echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2;}
-   { (exit 1); exit 1; }; }
-
-  # Don't try to exec as it changes $[0], causing all sort of problems
-  # (the dirname of $[0] is not the place where we might find the
-  # original and so on.  Autoconf is especially sensible to this).
-  . ./$as_me.lineno
-  # Exit status is that of the last command.
-  exit
-}
-
-
-case `echo "testing\c"; echo 1,2,3`,`echo -n testing; echo 1,2,3` in
-  *c*,-n*) ECHO_N= ECHO_C='
-' ECHO_T='	' ;;
-  *c*,*  ) ECHO_N=-n ECHO_C= ECHO_T= ;;
-  *)       ECHO_N= ECHO_C='\c' ECHO_T= ;;
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+  case `echo 'xy\c'` in
+  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
+  xy)  ECHO_C='\c';;
+  *)   echo `echo ksh88 bug on AIX 6.1` > /dev/null
+       ECHO_T='	';;
+  esac;;
+*)
+  ECHO_N='-n';;
 esac
 
-if expr a : '\(a\)' >/dev/null 2>&1; then
-  as_expr=expr
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+  rm -f conf$$.dir/conf$$.file
 else
-  as_expr=false
+  rm -f conf$$.dir
+  mkdir conf$$.dir 2>/dev/null
 fi
-
-rm -f conf$$ conf$$.exe conf$$.file
-echo >conf$$.file
-if ln -s conf$$.file conf$$ 2>/dev/null; then
-  # We could just check for DJGPP; but this test a) works b) is more generic
-  # and c) will remain valid once DJGPP supports symlinks (DJGPP 2.04).
-  if test -f conf$$.exe; then
-    # Don't use ln at all; we don't have any links
-    as_ln_s='cp -p'
-  else
+if (echo >conf$$.file) 2>/dev/null; then
+  if ln -s conf$$.file conf$$ 2>/dev/null; then
     as_ln_s='ln -s'
+    # ... but there are two gotchas:
+    # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+    # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+    # In both cases, we have to default to `cp -pR'.
+    ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+      as_ln_s='cp -pR'
+  elif ln conf$$.file conf$$ 2>/dev/null; then
+    as_ln_s=ln
+  else
+    as_ln_s='cp -pR'
   fi
-elif ln conf$$.file conf$$ 2>/dev/null; then
-  as_ln_s=ln
 else
-  as_ln_s='cp -p'
+  as_ln_s='cp -pR'
 fi
-rm -f conf$$ conf$$.exe conf$$.file
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+  case $as_dir in #(
+  -*) as_dir=./$as_dir;;
+  esac
+  test -d "$as_dir" || eval $as_mkdir_p || {
+    as_dirs=
+    while :; do
+      case $as_dir in #(
+      *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+      *) as_qdir=$as_dir;;
+      esac
+      as_dirs="'$as_qdir' $as_dirs"
+      as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$as_dir" : 'X\(//\)[^/]' \| \
+	 X"$as_dir" : 'X\(//\)$' \| \
+	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+      test -d "$as_dir" && break
+    done
+    test -z "$as_dirs" || eval "mkdir $as_dirs"
+  } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
+
 
+} # as_fn_mkdir_p
 if mkdir -p . 2>/dev/null; then
-  as_mkdir_p=:
+  as_mkdir_p='mkdir -p "$as_dir"'
 else
   test -d ./-p && rmdir ./-p
   as_mkdir_p=false
 fi
 
-as_executable_p="test -f"
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+  test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
 
 # Sed expression to map a string onto a valid CPP name.
 as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
@@ -3422,31 +3859,20 @@ as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
 as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
 
 
-# IFS
-# We need space, tab and new line, in precisely that order.
-as_nl='
-'
-IFS=" 	$as_nl"
-
-# CDPATH.
-$as_unset CDPATH
-
 exec 6>&1
-
-# Open the log real soon, to keep \$[0] and so on meaningful, and to
+## ----------------------------------- ##
+## Main body of $CONFIG_STATUS script. ##
+## ----------------------------------- ##
+_ASEOF
+test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# Save the log message, to keep $0 and so on meaningful, and to
 # report actual input values of CONFIG_FILES etc. instead of their
-# values after options handling.  Logging --version etc. is OK.
-exec 5>>config.log
-{
-  echo
-  sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
-## Running $as_me. ##
-_ASBOX
-} >&5
-cat >&5 <<_CSEOF
-
+# values after options handling.
+ac_log="
 This file was extended by $as_me, which was
-generated by GNU Autoconf 2.59.  Invocation command line was
+generated by GNU Autoconf 2.69.  Invocation command line was
 
   CONFIG_FILES    = $CONFIG_FILES
   CONFIG_HEADERS  = $CONFIG_HEADERS
@@ -3454,124 +3880,116 @@ generated by GNU Autoconf 2.59.  Invocation command line was
   CONFIG_COMMANDS = $CONFIG_COMMANDS
   $ $0 $@
 
-_CSEOF
-echo "on `(hostname || uname -n) 2>/dev/null | sed 1q`" >&5
-echo >&5
+on `(hostname || uname -n) 2>/dev/null | sed 1q`
+"
+
 _ACEOF
 
-# Files that config.status was made for.
-if test -n "$ac_config_files"; then
-  echo "config_files=\"$ac_config_files\"" >>$CONFIG_STATUS
-fi
+case $ac_config_files in *"
+"*) set x $ac_config_files; shift; ac_config_files=$*;;
+esac
 
-if test -n "$ac_config_headers"; then
-  echo "config_headers=\"$ac_config_headers\"" >>$CONFIG_STATUS
-fi
 
-if test -n "$ac_config_links"; then
-  echo "config_links=\"$ac_config_links\"" >>$CONFIG_STATUS
-fi
 
-if test -n "$ac_config_commands"; then
-  echo "config_commands=\"$ac_config_commands\"" >>$CONFIG_STATUS
-fi
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+# Files that config.status was made for.
+config_files="$ac_config_files"
 
-cat >>$CONFIG_STATUS <<\_ACEOF
+_ACEOF
 
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
 ac_cs_usage="\
-\`$as_me' instantiates files from templates according to the
-current configuration.
+\`$as_me' instantiates files and other configuration actions
+from templates according to the current configuration.  Unless the files
+and actions are specified as TAGs, all are instantiated by default.
 
-Usage: $0 [OPTIONS] [FILE]...
+Usage: $0 [OPTION]... [TAG]...
 
   -h, --help       print this help, then exit
-  -V, --version    print version number, then exit
-  -q, --quiet      do not print progress messages
+  -V, --version    print version number and configuration settings, then exit
+      --config     print configuration, then exit
+  -q, --quiet, --silent
+                   do not print progress messages
   -d, --debug      don't remove temporary files
       --recheck    update $as_me by reconfiguring in the same conditions
-  --file=FILE[:TEMPLATE]
-		   instantiate the configuration file FILE
+      --file=FILE[:TEMPLATE]
+                   instantiate the configuration file FILE
 
 Configuration files:
 $config_files
 
-Report bugs to <bug-autoconf at gnu.org>."
-_ACEOF
+Report bugs to the package provider."
 
-cat >>$CONFIG_STATUS <<_ACEOF
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
 ac_cs_version="\\
 config.status
-configured by $0, generated by GNU Autoconf 2.59,
-  with options \\"`echo "$ac_configure_args" | sed 's/[\\""\`\$]/\\\\&/g'`\\"
+configured by $0, generated by GNU Autoconf 2.69,
+  with options \\"\$ac_cs_config\\"
 
-Copyright (C) 2003 Free Software Foundation, Inc.
+Copyright (C) 2012 Free Software Foundation, Inc.
 This config.status script is free software; the Free Software Foundation
 gives unlimited permission to copy, distribute and modify it."
-srcdir=$srcdir
+
+ac_pwd='$ac_pwd'
+srcdir='$srcdir'
+test -n "\$AWK" || AWK=awk
 _ACEOF
 
-cat >>$CONFIG_STATUS <<\_ACEOF
-# If no file are specified by the user, then we need to provide default
-# value.  By we need to know if files were specified by the user.
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# The default lists apply if the user does not specify any file.
 ac_need_defaults=:
 while test $# != 0
 do
   case $1 in
-  --*=*)
-    ac_option=`expr "x$1" : 'x\([^=]*\)='`
-    ac_optarg=`expr "x$1" : 'x[^=]*=\(.*\)'`
+  --*=?*)
+    ac_option=`expr "X$1" : 'X\([^=]*\)='`
+    ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
     ac_shift=:
     ;;
-  -*)
+  --*=)
+    ac_option=`expr "X$1" : 'X\([^=]*\)='`
+    ac_optarg=
+    ac_shift=:
+    ;;
+  *)
     ac_option=$1
     ac_optarg=$2
     ac_shift=shift
     ;;
-  *) # This is not an option, so the user has probably given explicit
-     # arguments.
-     ac_option=$1
-     ac_need_defaults=false;;
   esac
 
   case $ac_option in
   # Handling of the options.
-_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF
   -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
     ac_cs_recheck=: ;;
-  --version | --vers* | -V )
-    echo "$ac_cs_version"; exit 0 ;;
-  --he | --h)
-    # Conflict between --help and --header
-    { { echo "$as_me:$LINENO: error: ambiguous option: $1
-Try \`$0 --help' for more information." >&5
-echo "$as_me: error: ambiguous option: $1
-Try \`$0 --help' for more information." >&2;}
-   { (exit 1); exit 1; }; };;
-  --help | --hel | -h )
-    echo "$ac_cs_usage"; exit 0 ;;
-  --debug | --d* | -d )
+  --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
+    $as_echo "$ac_cs_version"; exit ;;
+  --config | --confi | --conf | --con | --co | --c )
+    $as_echo "$ac_cs_config"; exit ;;
+  --debug | --debu | --deb | --de | --d | -d )
     debug=: ;;
   --file | --fil | --fi | --f )
     $ac_shift
-    CONFIG_FILES="$CONFIG_FILES $ac_optarg"
-    ac_need_defaults=false;;
-  --header | --heade | --head | --hea )
-    $ac_shift
-    CONFIG_HEADERS="$CONFIG_HEADERS $ac_optarg"
+    case $ac_optarg in
+    *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+    '') as_fn_error $? "missing file argument" ;;
+    esac
+    as_fn_append CONFIG_FILES " '$ac_optarg'"
     ac_need_defaults=false;;
+  --he | --h |  --help | --hel | -h )
+    $as_echo "$ac_cs_usage"; exit ;;
   -q | -quiet | --quiet | --quie | --qui | --qu | --q \
   | -silent | --silent | --silen | --sile | --sil | --si | --s)
     ac_cs_silent=: ;;
 
   # This is an error.
-  -*) { { echo "$as_me:$LINENO: error: unrecognized option: $1
-Try \`$0 --help' for more information." >&5
-echo "$as_me: error: unrecognized option: $1
-Try \`$0 --help' for more information." >&2;}
-   { (exit 1); exit 1; }; } ;;
+  -*) as_fn_error $? "unrecognized option: \`$1'
+Try \`$0 --help' for more information." ;;
 
-  *) ac_config_targets="$ac_config_targets $1" ;;
+  *) as_fn_append ac_config_targets " $1"
+     ac_need_defaults=false ;;
 
   esac
   shift
@@ -3585,30 +4003,44 @@ if $ac_cs_silent; then
 fi
 
 _ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
 if \$ac_cs_recheck; then
-  echo "running $SHELL $0 " $ac_configure_args \$ac_configure_extra_args " --no-create --no-recursion" >&6
-  exec $SHELL $0 $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+  set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+  shift
+  \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
+  CONFIG_SHELL='$SHELL'
+  export CONFIG_SHELL
+  exec "\$@"
 fi
 
 _ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+exec 5>>config.log
+{
+  echo
+  sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+  $as_echo "$ac_log"
+} >&5
 
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+_ACEOF
 
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
 
-
-
-cat >>$CONFIG_STATUS <<\_ACEOF
+# Handling of arguments.
 for ac_config_target in $ac_config_targets
 do
-  case "$ac_config_target" in
-  # Handling of arguments.
-  "Makefile" ) CONFIG_FILES="$CONFIG_FILES Makefile" ;;
-  *) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5
-echo "$as_me: error: invalid argument: $ac_config_target" >&2;}
-   { (exit 1); exit 1; }; };;
+  case $ac_config_target in
+    "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
+
+  *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
   esac
 done
 
+
 # If the user did not use the arguments to specify the items to instantiate,
 # then the envvar interface is used.  Set only those that are not.
 # We use the long form for the default assignment because of an extremely
@@ -3618,340 +4050,414 @@ if $ac_need_defaults; then
 fi
 
 # Have a temporary directory for convenience.  Make it in the build tree
-# simply because there is no reason to put it here, and in addition,
+# simply because there is no reason against having it here, and in addition,
 # creating and moving files from /tmp can sometimes cause problems.
-# Create a temporary directory, and hook for its removal unless debugging.
+# Hook for its removal unless debugging.
+# Note that there is a small window in which the directory will not be cleaned:
+# after its creation but before its name has been assigned to `$tmp'.
 $debug ||
 {
-  trap 'exit_status=$?; rm -rf $tmp && exit $exit_status' 0
-  trap '{ (exit 1); exit 1; }' 1 2 13 15
+  tmp= ac_tmp=
+  trap 'exit_status=$?
+  : "${ac_tmp:=$tmp}"
+  { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status
+' 0
+  trap 'as_fn_exit 1' 1 2 13 15
 }
-
 # Create a (secure) tmp directory for tmp files.
 
 {
-  tmp=`(umask 077 && mktemp -d -q "./confstatXXXXXX") 2>/dev/null` &&
-  test -n "$tmp" && test -d "$tmp"
+  tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
+  test -d "$tmp"
 }  ||
 {
-  tmp=./confstat$$-$RANDOM
-  (umask 077 && mkdir $tmp)
-} ||
+  tmp=./conf$$-$RANDOM
+  (umask 077 && mkdir "$tmp")
+} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5
+ac_tmp=$tmp
+
+# Set up the scripts for CONFIG_FILES section.
+# No need to generate them if there are no CONFIG_FILES.
+# This happens for instance with `./config.status config.h'.
+if test -n "$CONFIG_FILES"; then
+
+
+ac_cr=`echo X | tr X '\015'`
+# On cygwin, bash can eat \r inside `` if the user requested igncr.
+# But we know of no other shell where ac_cr would be empty at this
+# point, so we can use a bashism as a fallback.
+if test "x$ac_cr" = x; then
+  eval ac_cr=\$\'\\r\'
+fi
+ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
+if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
+  ac_cs_awk_cr='\\r'
+else
+  ac_cs_awk_cr=$ac_cr
+fi
+
+echo 'BEGIN {' >"$ac_tmp/subs1.awk" &&
+_ACEOF
+
+
+{
+  echo "cat >conf$$subs.awk <<_ACEOF" &&
+  echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
+  echo "_ACEOF"
+} >conf$$subs.sh ||
+  as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'`
+ac_delim='%!_!# '
+for ac_last_try in false false false false false :; do
+  . ./conf$$subs.sh ||
+    as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+
+  ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
+  if test $ac_delim_n = $ac_delim_num; then
+    break
+  elif $ac_last_try; then
+    as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+  else
+    ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+  fi
+done
+rm -f conf$$subs.sh
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK &&
+_ACEOF
+sed -n '
+h
+s/^/S["/; s/!.*/"]=/
+p
+g
+s/^[^!]*!//
+:repl
+t repl
+s/'"$ac_delim"'$//
+t delim
+:nl
+h
+s/\(.\{148\}\)..*/\1/
+t more1
+s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/
+p
+n
+b repl
+:more1
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t nl
+:delim
+h
+s/\(.\{148\}\)..*/\1/
+t more2
+s/["\\]/\\&/g; s/^/"/; s/$/"/
+p
+b
+:more2
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t delim
+' <conf$$subs.awk | sed '
+/^[^""]/{
+  N
+  s/\n//
+}
+' >>$CONFIG_STATUS || ac_write_fail=1
+rm -f conf$$subs.awk
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+_ACAWK
+cat >>"\$ac_tmp/subs1.awk" <<_ACAWK &&
+  for (key in S) S_is_set[key] = 1
+  FS = ""
+
+}
 {
-   echo "$me: cannot create a temporary directory in ." >&2
-   { (exit 1); exit 1; }
+  line = $ 0
+  nfields = split(line, field, "@")
+  substed = 0
+  len = length(field[1])
+  for (i = 2; i < nfields; i++) {
+    key = field[i]
+    keylen = length(key)
+    if (S_is_set[key]) {
+      value = S[key]
+      line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
+      len += length(value) + length(field[++i])
+      substed = 1
+    } else
+      len += 1 + keylen
+  }
+
+  print line
 }
 
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
+  sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
+else
+  cat
+fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \
+  || as_fn_error $? "could not setup config files machinery" "$LINENO" 5
 _ACEOF
 
-cat >>$CONFIG_STATUS <<_ACEOF
+# VPATH may cause trouble with some makes, so we remove sole $(srcdir),
+# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and
+# trailing colons and then remove the whole line if VPATH becomes empty
+# (actually we leave an empty line to preserve line numbers).
+if test "x$srcdir" = x.; then
+  ac_vpsub='/^[	 ]*VPATH[	 ]*=[	 ]*/{
+h
+s///
+s/^/:/
+s/[	 ]*$/:/
+s/:\$(srcdir):/:/g
+s/:\${srcdir}:/:/g
+s/:@srcdir@:/:/g
+s/^:*//
+s/:*$//
+x
+s/\(=[	 ]*\).*/\1/
+G
+s/\n//
+s/^[^=]*=[	 ]*$//
+}'
+fi
 
-#
-# CONFIG_FILES section.
-#
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+fi # test -n "$CONFIG_FILES"
 
-# No need to generate the scripts if there are no CONFIG_FILES.
-# This happens for instance when ./config.status config.h
-if test -n "\$CONFIG_FILES"; then
-  # Protect against being on the right side of a sed subst in config.status.
-  sed 's/,@/@@/; s/@,/@@/; s/,;t t\$/@;t t/; /@;t t\$/s/[\\\\&,]/\\\\&/g;
-   s/@@/,@/; s/@@/@,/; s/@;t t\$/,;t t/' >\$tmp/subs.sed <<\\CEOF
-s, at SHELL@,$SHELL,;t t
-s, at PATH_SEPARATOR@,$PATH_SEPARATOR,;t t
-s, at PACKAGE_NAME@,$PACKAGE_NAME,;t t
-s, at PACKAGE_TARNAME@,$PACKAGE_TARNAME,;t t
-s, at PACKAGE_VERSION@,$PACKAGE_VERSION,;t t
-s, at PACKAGE_STRING@,$PACKAGE_STRING,;t t
-s, at PACKAGE_BUGREPORT@,$PACKAGE_BUGREPORT,;t t
-s, at exec_prefix@,$exec_prefix,;t t
-s, at prefix@,$prefix,;t t
-s, at program_transform_name@,$program_transform_name,;t t
-s, at bindir@,$bindir,;t t
-s, at sbindir@,$sbindir,;t t
-s, at libexecdir@,$libexecdir,;t t
-s, at datadir@,$datadir,;t t
-s, at sysconfdir@,$sysconfdir,;t t
-s, at sharedstatedir@,$sharedstatedir,;t t
-s, at localstatedir@,$localstatedir,;t t
-s, at libdir@,$libdir,;t t
-s, at includedir@,$includedir,;t t
-s, at oldincludedir@,$oldincludedir,;t t
-s, at infodir@,$infodir,;t t
-s, at mandir@,$mandir,;t t
-s, at build_alias@,$build_alias,;t t
-s, at host_alias@,$host_alias,;t t
-s, at target_alias@,$target_alias,;t t
-s, at DEFS@,$DEFS,;t t
-s, at ECHO_C@,$ECHO_C,;t t
-s, at ECHO_N@,$ECHO_N,;t t
-s, at ECHO_T@,$ECHO_T,;t t
-s, at LIBS@,$LIBS,;t t
-s, at CC@,$CC,;t t
-s, at CFLAGS@,$CFLAGS,;t t
-s, at LDFLAGS@,$LDFLAGS,;t t
-s, at CPPFLAGS@,$CPPFLAGS,;t t
-s, at ac_ct_CC@,$ac_ct_CC,;t t
-s, at EXEEXT@,$EXEEXT,;t t
-s, at OBJEXT@,$OBJEXT,;t t
-s, at CXX@,$CXX,;t t
-s, at CXXFLAGS@,$CXXFLAGS,;t t
-s, at ac_ct_CXX@,$ac_ct_CXX,;t t
-s, at RANLIB@,$RANLIB,;t t
-s, at ac_ct_RANLIB@,$ac_ct_RANLIB,;t t
-s, at CXX_PIC@,$CXX_PIC,;t t
-s, at C_PIC@,$C_PIC,;t t
-s, at LD_SHARED@,$LD_SHARED,;t t
-s, at SO_EXT@,$SO_EXT,;t t
-s, at CXX_WFLAGS@,$CXX_WFLAGS,;t t
-s, at C_WFLAGS@,$C_WFLAGS,;t t
-s, at GDAL_CONFIG@,$GDAL_CONFIG,;t t
-s, at GDAL_INC@,$GDAL_INC,;t t
-s, at AUTOLOAD_DIR@,$AUTOLOAD_DIR,;t t
-s, at GRASS_INCLUDE@,$GRASS_INCLUDE,;t t
-s, at GRASS_GISBASE@,$GRASS_GISBASE,;t t
-s, at LIBOBJS@,$LIBOBJS,;t t
-s, at LTLIBOBJS@,$LTLIBOBJS,;t t
-CEOF
 
-_ACEOF
+eval set X "  :F $CONFIG_FILES      "
+shift
+for ac_tag
+do
+  case $ac_tag in
+  :[FHLC]) ac_mode=$ac_tag; continue;;
+  esac
+  case $ac_mode$ac_tag in
+  :[FHL]*:*);;
+  :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;;
+  :[FH]-) ac_tag=-:-;;
+  :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
+  esac
+  ac_save_IFS=$IFS
+  IFS=:
+  set x $ac_tag
+  IFS=$ac_save_IFS
+  shift
+  ac_file=$1
+  shift
 
-  cat >>$CONFIG_STATUS <<\_ACEOF
-  # Split the substitutions into bite-sized pieces for seds with
-  # small command number limits, like on Digital OSF/1 and HP-UX.
-  ac_max_sed_lines=48
-  ac_sed_frag=1 # Number of current file.
-  ac_beg=1 # First line for current file.
-  ac_end=$ac_max_sed_lines # Line after last line for current file.
-  ac_more_lines=:
-  ac_sed_cmds=
-  while $ac_more_lines; do
-    if test $ac_beg -gt 1; then
-      sed "1,${ac_beg}d; ${ac_end}q" $tmp/subs.sed >$tmp/subs.frag
-    else
-      sed "${ac_end}q" $tmp/subs.sed >$tmp/subs.frag
-    fi
-    if test ! -s $tmp/subs.frag; then
-      ac_more_lines=false
-    else
-      # The purpose of the label and of the branching condition is to
-      # speed up the sed processing (if there are no `@' at all, there
-      # is no need to browse any of the substitutions).
-      # These are the two extra sed commands mentioned above.
-      (echo ':t
-  /@[a-zA-Z_][a-zA-Z_0-9]*@/!b' && cat $tmp/subs.frag) >$tmp/subs-$ac_sed_frag.sed
-      if test -z "$ac_sed_cmds"; then
-	ac_sed_cmds="sed -f $tmp/subs-$ac_sed_frag.sed"
-      else
-	ac_sed_cmds="$ac_sed_cmds | sed -f $tmp/subs-$ac_sed_frag.sed"
-      fi
-      ac_sed_frag=`expr $ac_sed_frag + 1`
-      ac_beg=$ac_end
-      ac_end=`expr $ac_end + $ac_max_sed_lines`
+  case $ac_mode in
+  :L) ac_source=$1;;
+  :[FH])
+    ac_file_inputs=
+    for ac_f
+    do
+      case $ac_f in
+      -) ac_f="$ac_tmp/stdin";;
+      *) # Look for the file first in the build tree, then in the source tree
+	 # (if the path is not absolute).  The absolute path cannot be DOS-style,
+	 # because $ac_f cannot contain `:'.
+	 test -f "$ac_f" ||
+	   case $ac_f in
+	   [\\/$]*) false;;
+	   *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
+	   esac ||
+	   as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;;
+      esac
+      case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
+      as_fn_append ac_file_inputs " '$ac_f'"
+    done
+
+    # Let's still pretend it is `configure' which instantiates (i.e., don't
+    # use $as_me), people would be surprised to read:
+    #    /* config.h.  Generated by config.status.  */
+    configure_input='Generated from '`
+	  $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
+	`' by configure.'
+    if test x"$ac_file" != x-; then
+      configure_input="$ac_file.  $configure_input"
+      { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5
+$as_echo "$as_me: creating $ac_file" >&6;}
     fi
-  done
-  if test -z "$ac_sed_cmds"; then
-    ac_sed_cmds=cat
-  fi
-fi # test -n "$CONFIG_FILES"
+    # Neutralize special characters interpreted by sed in replacement strings.
+    case $configure_input in #(
+    *\&* | *\|* | *\\* )
+       ac_sed_conf_input=`$as_echo "$configure_input" |
+       sed 's/[\\\\&|]/\\\\&/g'`;; #(
+    *) ac_sed_conf_input=$configure_input;;
+    esac
 
-_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF
-for ac_file in : $CONFIG_FILES; do test "x$ac_file" = x: && continue
-  # Support "outfile[:infile[:infile...]]", defaulting infile="outfile.in".
-  case $ac_file in
-  - | *:- | *:-:* ) # input from stdin
-	cat >$tmp/stdin
-	ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'`
-	ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;;
-  *:* ) ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'`
-	ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;;
-  * )   ac_file_in=$ac_file.in ;;
+    case $ac_tag in
+    *:-:* | *:-) cat >"$ac_tmp/stdin" \
+      || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;;
+    esac
+    ;;
   esac
 
-  # Compute @srcdir@, @top_srcdir@, and @INSTALL@ for subdirectories.
-  ac_dir=`(dirname "$ac_file") 2>/dev/null ||
+  ac_dir=`$as_dirname -- "$ac_file" ||
 $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
 	 X"$ac_file" : 'X\(//\)[^/]' \| \
 	 X"$ac_file" : 'X\(//\)$' \| \
-	 X"$ac_file" : 'X\(/\)' \| \
-	 .     : '\(.\)' 2>/dev/null ||
-echo X"$ac_file" |
-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; }
-  	  /^X\(\/\/\)[^/].*/{ s//\1/; q; }
-  	  /^X\(\/\/\)$/{ s//\1/; q; }
-  	  /^X\(\/\).*/{ s//\1/; q; }
-  	  s/.*/./; q'`
-  { if $as_mkdir_p; then
-    mkdir -p "$ac_dir"
-  else
-    as_dir="$ac_dir"
-    as_dirs=
-    while test ! -d "$as_dir"; do
-      as_dirs="$as_dir $as_dirs"
-      as_dir=`(dirname "$as_dir") 2>/dev/null ||
-$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
-	 X"$as_dir" : 'X\(//\)[^/]' \| \
-	 X"$as_dir" : 'X\(//\)$' \| \
-	 X"$as_dir" : 'X\(/\)' \| \
-	 .     : '\(.\)' 2>/dev/null ||
-echo X"$as_dir" |
-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; }
-  	  /^X\(\/\/\)[^/].*/{ s//\1/; q; }
-  	  /^X\(\/\/\)$/{ s//\1/; q; }
-  	  /^X\(\/\).*/{ s//\1/; q; }
-  	  s/.*/./; q'`
-    done
-    test ! -n "$as_dirs" || mkdir $as_dirs
-  fi || { { echo "$as_me:$LINENO: error: cannot create directory \"$ac_dir\"" >&5
-echo "$as_me: error: cannot create directory \"$ac_dir\"" >&2;}
-   { (exit 1); exit 1; }; }; }
-
+	 X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$ac_file" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+  as_dir="$ac_dir"; as_fn_mkdir_p
   ac_builddir=.
 
-if test "$ac_dir" != .; then
-  ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'`
-  # A "../" for each directory in $ac_dir_suffix.
-  ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'`
-else
-  ac_dir_suffix= ac_top_builddir=
-fi
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+  ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+  # A ".." for each directory in $ac_dir_suffix.
+  ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+  case $ac_top_builddir_sub in
+  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+  esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
 
 case $srcdir in
-  .)  # No --srcdir option.  We are building in place.
+  .)  # We are building in place.
     ac_srcdir=.
-    if test -z "$ac_top_builddir"; then
-       ac_top_srcdir=.
-    else
-       ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'`
-    fi ;;
-  [\\/]* | ?:[\\/]* )  # Absolute path.
+    ac_top_srcdir=$ac_top_builddir_sub
+    ac_abs_top_srcdir=$ac_pwd ;;
+  [\\/]* | ?:[\\/]* )  # Absolute name.
     ac_srcdir=$srcdir$ac_dir_suffix;
-    ac_top_srcdir=$srcdir ;;
-  *) # Relative path.
-    ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix
-    ac_top_srcdir=$ac_top_builddir$srcdir ;;
+    ac_top_srcdir=$srcdir
+    ac_abs_top_srcdir=$srcdir ;;
+  *) # Relative name.
+    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+    ac_top_srcdir=$ac_top_build_prefix$srcdir
+    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
 esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
 
-# Do not use `cd foo && pwd` to compute absolute paths, because
-# the directories may not exist.
-case `pwd` in
-.) ac_abs_builddir="$ac_dir";;
-*)
-  case "$ac_dir" in
-  .) ac_abs_builddir=`pwd`;;
-  [\\/]* | ?:[\\/]* ) ac_abs_builddir="$ac_dir";;
-  *) ac_abs_builddir=`pwd`/"$ac_dir";;
-  esac;;
-esac
-case $ac_abs_builddir in
-.) ac_abs_top_builddir=${ac_top_builddir}.;;
-*)
-  case ${ac_top_builddir}. in
-  .) ac_abs_top_builddir=$ac_abs_builddir;;
-  [\\/]* | ?:[\\/]* ) ac_abs_top_builddir=${ac_top_builddir}.;;
-  *) ac_abs_top_builddir=$ac_abs_builddir/${ac_top_builddir}.;;
-  esac;;
-esac
-case $ac_abs_builddir in
-.) ac_abs_srcdir=$ac_srcdir;;
-*)
-  case $ac_srcdir in
-  .) ac_abs_srcdir=$ac_abs_builddir;;
-  [\\/]* | ?:[\\/]* ) ac_abs_srcdir=$ac_srcdir;;
-  *) ac_abs_srcdir=$ac_abs_builddir/$ac_srcdir;;
-  esac;;
-esac
-case $ac_abs_builddir in
-.) ac_abs_top_srcdir=$ac_top_srcdir;;
-*)
-  case $ac_top_srcdir in
-  .) ac_abs_top_srcdir=$ac_abs_builddir;;
-  [\\/]* | ?:[\\/]* ) ac_abs_top_srcdir=$ac_top_srcdir;;
-  *) ac_abs_top_srcdir=$ac_abs_builddir/$ac_top_srcdir;;
-  esac;;
-esac
 
+  case $ac_mode in
+  :F)
+  #
+  # CONFIG_FILE
+  #
 
+_ACEOF
 
-  if test x"$ac_file" != x-; then
-    { echo "$as_me:$LINENO: creating $ac_file" >&5
-echo "$as_me: creating $ac_file" >&6;}
-    rm -f "$ac_file"
-  fi
-  # Let's still pretend it is `configure' which instantiates (i.e., don't
-  # use $as_me), people would be surprised to read:
-  #    /* config.h.  Generated by config.status.  */
-  if test x"$ac_file" = x-; then
-    configure_input=
-  else
-    configure_input="$ac_file.  "
-  fi
-  configure_input=$configure_input"Generated from `echo $ac_file_in |
-				     sed 's,.*/,,'` by configure."
-
-  # First look for the input files in the build tree, otherwise in the
-  # src tree.
-  ac_file_inputs=`IFS=:
-    for f in $ac_file_in; do
-      case $f in
-      -) echo $tmp/stdin ;;
-      [\\/$]*)
-	 # Absolute (can't be DOS-style, as IFS=:)
-	 test -f "$f" || { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5
-echo "$as_me: error: cannot find input file: $f" >&2;}
-   { (exit 1); exit 1; }; }
-	 echo "$f";;
-      *) # Relative
-	 if test -f "$f"; then
-	   # Build tree
-	   echo "$f"
-	 elif test -f "$srcdir/$f"; then
-	   # Source tree
-	   echo "$srcdir/$f"
-	 else
-	   # /dev/null tree
-	   { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5
-echo "$as_me: error: cannot find input file: $f" >&2;}
-   { (exit 1); exit 1; }; }
-	 fi;;
-      esac
-    done` || { (exit 1); exit 1; }
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# If the template does not know about datarootdir, expand it.
+# FIXME: This hack should be removed a few years after 2.60.
+ac_datarootdir_hack=; ac_datarootdir_seen=
+ac_sed_dataroot='
+/datarootdir/ {
+  p
+  q
+}
+/@datadir@/p
+/@docdir@/p
+/@infodir@/p
+/@localedir@/p
+/@mandir@/p'
+case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
+*datarootdir*) ac_datarootdir_seen=yes;;
+*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
+  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
+$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+  ac_datarootdir_hack='
+  s&@datadir@&$datadir&g
+  s&@docdir@&$docdir&g
+  s&@infodir@&$infodir&g
+  s&@localedir@&$localedir&g
+  s&@mandir@&$mandir&g
+  s&\\\${datarootdir}&$datarootdir&g' ;;
+esac
 _ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF
-  sed "$ac_vpsub
+
+# Neutralize VPATH when `$srcdir' = `.'.
+# Shell code in configure.ac might set extrasub.
+# FIXME: do we really want to maintain this feature?
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_sed_extra="$ac_vpsub
 $extrasub
 _ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
 :t
 /@[a-zA-Z_][a-zA-Z_0-9]*@/!b
-s, at configure_input@,$configure_input,;t t
-s, at srcdir@,$ac_srcdir,;t t
-s, at abs_srcdir@,$ac_abs_srcdir,;t t
-s, at top_srcdir@,$ac_top_srcdir,;t t
-s, at abs_top_srcdir@,$ac_abs_top_srcdir,;t t
-s, at builddir@,$ac_builddir,;t t
-s, at abs_builddir@,$ac_abs_builddir,;t t
-s, at top_builddir@,$ac_top_builddir,;t t
-s, at abs_top_builddir@,$ac_abs_top_builddir,;t t
-" $ac_file_inputs | (eval "$ac_sed_cmds") >$tmp/out
-  rm -f $tmp/stdin
-  if test x"$ac_file" != x-; then
-    mv $tmp/out $ac_file
-  else
-    cat $tmp/out
-    rm -f $tmp/out
-  fi
+s|@configure_input@|$ac_sed_conf_input|;t t
+s&@top_builddir@&$ac_top_builddir_sub&;t t
+s&@top_build_prefix@&$ac_top_build_prefix&;t t
+s&@srcdir@&$ac_srcdir&;t t
+s&@abs_srcdir@&$ac_abs_srcdir&;t t
+s&@top_srcdir@&$ac_top_srcdir&;t t
+s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
+s&@builddir@&$ac_builddir&;t t
+s&@abs_builddir@&$ac_abs_builddir&;t t
+s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
+$ac_datarootdir_hack
+"
+eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \
+  >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+
+test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
+  { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } &&
+  { ac_out=`sed -n '/^[	 ]*datarootdir[	 ]*:*=/p' \
+      "$ac_tmp/out"`; test -z "$ac_out"; } &&
+  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined.  Please make sure it is defined" >&5
+$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined.  Please make sure it is defined" >&2;}
+
+  rm -f "$ac_tmp/stdin"
+  case $ac_file in
+  -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";;
+  *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";;
+  esac \
+  || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ ;;
 
-done
-_ACEOF
 
-cat >>$CONFIG_STATUS <<\_ACEOF
 
-{ (exit 0); exit 0; }
+  esac
+
+done # for ac_tag
+
+
+as_fn_exit 0
 _ACEOF
-chmod +x $CONFIG_STATUS
 ac_clean_files=$ac_clean_files_save
 
+test $ac_write_fail = 0 ||
+  as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5
+
 
 # configure is writing to config.log, and then calls config.status.
 # config.status does its own redirection, appending to config.log.
@@ -3971,7 +4477,11 @@ if test "$no_create" != yes; then
   exec 5>>config.log
   # Use ||, not &&, to avoid exiting from the if with $? = 1, which
   # would make configure fail if this is the last instruction.
-  $ac_cs_success || { (exit 1); exit 1; }
+  $ac_cs_success || as_fn_exit 1
+fi
+if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
+$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
 fi
 
 
diff --git a/frmts/grass/pkg/configure.in b/frmts/grass/pkg/configure.in
index 099a9e1..919c87e 100644
--- a/frmts/grass/pkg/configure.in
+++ b/frmts/grass/pkg/configure.in
@@ -1,5 +1,5 @@
 dnl ***************************************************************************
-dnl $Id: configure.in 20157 2010-07-28 19:32:04Z rouault $
+dnl $Id: configure.in 28712 2015-03-10 11:38:03Z martinl $
 dnl
 dnl Project:  GDAL GRASS Plugin
 dnl Purpose:  Configure source file.
@@ -91,7 +91,7 @@ AC_SUBST(GDAL_INC,    $GDAL_INC)
 dnl ---------------------------------------------------------------------------
 dnl Where to put driver?
 dnl ---------------------------------------------------------------------------
-AC_ARG_WITH(autoload,[  --with-autoload[=DIR]      Directory for autoload drivers],,)
+AC_ARG_WITH(autoload,[  --with-autoload[=DIR]     Directory for autoload drivers],,)
 
 if test "$with_autoload" != "" ; then
   AUTOLOAD_DIR=$with_autoload
@@ -123,23 +123,26 @@ fi
 
 if test "$with_grass" != "yes" ; then
 
-  AC_CHECK_LIB(grass_gis,G_asprintf,GRASS_SETTING=grass57+,GRASS_SETTING=no,-L$with_grass/lib -lgrass_I -lgrass_vask -lgrass_gmath -lgrass_gis -lgrass_datetime -lgrass_gproj -lgrass_vect -lgrass_dbmibase -lgrass_dbmiclient -lgrass_dgl -lgrass_dig2 -lgrass_rtree -lgrass_linkm)
+  AC_CHECK_LIB(grass_gis,G_is_initialized,GRASS_SETTING=grass70+,GRASS_SETTING=no,-L$with_grass/lib -lgrass_datetime)
+  if test "$GRASS_SETTING" = "no" ; then
+    AC_CHECK_LIB(grass_gis,G_asprintf,GRASS_SETTING=grass57+,GRASS_SETTING=no,-L$with_grass/lib -lgrass_datetime)
+  fi
    
-  if test "$GRASS_SETTING" = "grass57+" ; then   
-    LIBS="-L$with_grass/lib -lgrass_I -lgrass_vask -lgrass_gmath -lgrass_gis -lgrass_datetime -lgrass_gproj -lgrass_vect -lgrass_dbmibase -lgrass_dbmiclient -lgrass_dgl -lgrass_dig2 -lgrass_rtree -lgrass_linkm $LIBS"
+  if test "$GRASS_SETTING" != "no" ; then   
+    if test "$GRASS_SETTING" = "grass70+" ; then   
+      G_RASTLIBS="-lgrass_raster -lgrass_imagery"
+      G_VECTLIBS="-lgrass_vector -lgrass_dig2 -lgrass_dgl -lgrass_rtree -lgrass_linkm -lgrass_dbmiclient -lgrass_dbmibase"
+      LIBS="-L$with_grass/lib $G_VECTLIBS $G_RASTLIBS -lgrass_gproj -lgrass_gmath -lgrass_gis -lgrass_datetime $LIBS"
+    else
+      G_RASTLIBS="-lgrass_I"
+      G_VECTLIBS="-lgrass_vect -lgrass_dig2 -lgrass_dgl -lgrass_rtree -lgrass_linkm -lgrass_dbmiclient -lgrass_dbmibase"
+      LIBS="-L$with_grass/lib $G_VECTLIBS $G_RASTLIBS -lgrass_gproj -lgrass_vask -lgrass_gmath -lgrass_gis -lgrass_datetime $LIBS"
+    fi
     GRASS_INCLUDE="-I$with_grass/include"
     GRASS_GISBASE="$with_grass"
+    HAVE_GRASS=yes
   else
-
-    # Check for GRASS >= 7.0
-    AC_CHECK_LIB(grass_gis.7.0.svn,G_putenv,GRASS_SETTING=grass7+,GRASS_SETTING=no,-L$with_grass/lib -lgrass_raster.7.0.svn -lgrass_gmath.7.0.svn -lgrass_gis.7.0.svn -lgrass_datetime.7.0.svn -lgrass_gproj.7.0.svn -lgrass_vector.7.0.svn -lgrass_dbmibase.7.0.svn -lgrass_dbmiclient.7.0.svn -lgrass_dgl.7.0.svn -lgrass_dig2.7.0.svn -lgrass_rtree.7.0.svn -lgrass_linkm.7.0.svn -lgrass_btree2.7.0.svn -lgrass_ccmath.7.0.svn)
-    if test "$GRASS_SETTING" = "grass7+" ; then
-        LIBS="-L$with_grass/lib -lgrass_raster.7.0.svn -lgrass_gmath.7.0.svn -lgrass_gis.7.0.svn -lgrass_datetime.7.0.svn -lgrass_gproj.7.0.svn -lgrass_vector.7.0.svn -lgrass_dbmibase.7.0.svn -lgrass_dbmiclient.7.0.svn -lgrass_dgl.7.0.svn -lgrass_dig2.7.0.svn -lgrass_rtree.7.0.svn -lgrass_linkm.7.0.svn -lgrass_btree2.7.0.svn -lgrass_ccmath.7.0.svn $LIBS"
-        GRASS_INCLUDE="-I$with_grass/include"
-        GRASS_GISBASE="$with_grass"
-    else
-        AC_MSG_ERROR([--with-grass=$with_grass requested, but libraries not found!  Perhaps you need to set LD_LIBRARY_PATH to include $with_grass/lib?])
-    fi
+    AC_MSG_ERROR([--with-grass=$with_grass requested, but libraries not found!])
   fi
 fi
 
@@ -148,6 +151,22 @@ AC_SUBST(GRASS_GISBASE,$GRASS_GISBASE)
 
 dnl ---------------------------------------------------------------------------
 
+dnl ---------------------------------------------------------------------------
+dnl Find PostgreSQL (GRASS optional dependency)
+dnl ---------------------------------------------------------------------------
+
+AC_ARG_WITH(postgres_includes,[  --with-postgres-includes=DIR     use PostgreSQL includes in DIR], postgres_includes="$withval", postgres_includes=no)
+
+PQ_INCLUDE=
+if test "x$postgres_includes" != "xno"; then
+# With PostgreSQL includes directory
+PQ_INCLUDE="-I$postgres_includes"
+fi
+
+AC_SUBST(PQ_INCLUDE)
+
+dnl ---------------------------------------------------------------------------
+
 rm -f conftest*
 
 AC_OUTPUT(Makefile)
diff --git a/frmts/gtiff/geotiff.cpp b/frmts/gtiff/geotiff.cpp
index 48111e1..1d1a1c8 100644
--- a/frmts/gtiff/geotiff.cpp
+++ b/frmts/gtiff/geotiff.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: geotiff.cpp 28419 2015-02-06 00:28:50Z rouault $
+ * $Id: geotiff.cpp 29356 2015-06-15 09:27:49Z rouault $
  *
  * Project:  GeoTIFF Driver
  * Purpose:  GDAL GeoTIFF support.
@@ -60,7 +60,7 @@
 #include "tiffiop.h"
 #endif
 
-CPL_CVSID("$Id: geotiff.cpp 28419 2015-02-06 00:28:50Z rouault $");
+CPL_CVSID("$Id: geotiff.cpp 29356 2015-06-15 09:27:49Z rouault $");
 
 #if SIZEOF_VOIDP == 4
 static int bGlobalStripIntegerOverflow = FALSE;
@@ -3845,7 +3845,9 @@ int GTiffDataset::WriteEncodedTile(uint32 tile, GByte *pabyData,
     /*
     ** Perform tile fill if needed.
     */
-    if( bNeedTileFill )
+    // TODO: we should also handle the case of nBitsPerSample == 12
+    // but this is more involved...
+    if( bNeedTileFill && nBitsPerSample == 8 )
     {
         int nRightPixelsToFill = 0;
         int nBottomPixelsToFill = 0;
@@ -4306,10 +4308,16 @@ int GTiffDataset::IsBlockAvailable( int nBlockId )
             vsi_l_offset nCurOffset = VSIFTellL(fp);
             if( ~(hTIFF->tif_dir.td_stripoffset[nBlockId]) == 0 )
             {
+                vsi_l_offset nDirOffset;
+                if( hTIFF->tif_flags&TIFF_BIGTIFF )
+                    nDirOffset = hTIFF->tif_dir.td_stripoffset_entry.tdir_offset.toff_long8;
+                else
+                    nDirOffset = hTIFF->tif_dir.td_stripoffset_entry.tdir_offset.toff_long;
+
                 if( hTIFF->tif_dir.td_stripoffset_entry.tdir_type == TIFF_LONG )
                 {
                     GTiffCacheOffsetOrCount4(fp,
-                                             hTIFF->tif_dir.td_stripoffset_entry.tdir_offset.toff_long,
+                                             nDirOffset,
                                              nBlockId,
                                              hTIFF->tif_dir.td_nstrips,
                                              hTIFF->tif_dir.td_stripoffset);
@@ -4317,7 +4325,7 @@ int GTiffDataset::IsBlockAvailable( int nBlockId )
                 else
                 {
                     GTiffCacheOffsetOrCount8(fp,
-                                             hTIFF->tif_dir.td_stripoffset_entry.tdir_offset.toff_long8,
+                                             nDirOffset,
                                              nBlockId,
                                              hTIFF->tif_dir.td_nstrips,
                                              hTIFF->tif_dir.td_stripoffset);
@@ -4326,10 +4334,16 @@ int GTiffDataset::IsBlockAvailable( int nBlockId )
 
             if( ~(hTIFF->tif_dir.td_stripbytecount[nBlockId]) == 0 )
             {
+                vsi_l_offset nDirOffset;
+                if( hTIFF->tif_flags&TIFF_BIGTIFF )
+                    nDirOffset = hTIFF->tif_dir.td_stripbytecount_entry.tdir_offset.toff_long8;
+                else
+                    nDirOffset = hTIFF->tif_dir.td_stripbytecount_entry.tdir_offset.toff_long;
+
                 if( hTIFF->tif_dir.td_stripbytecount_entry.tdir_type == TIFF_LONG )
                 {
                     GTiffCacheOffsetOrCount4(fp,
-                                             hTIFF->tif_dir.td_stripbytecount_entry.tdir_offset.toff_long,
+                                             nDirOffset,
                                              nBlockId,
                                              hTIFF->tif_dir.td_nstrips,
                                              hTIFF->tif_dir.td_stripbytecount);
@@ -4337,7 +4351,7 @@ int GTiffDataset::IsBlockAvailable( int nBlockId )
                 else
                 {
                     GTiffCacheOffsetOrCount8(fp,
-                                             hTIFF->tif_dir.td_stripbytecount_entry.tdir_offset.toff_long8,
+                                             nDirOffset,
                                              nBlockId,
                                              hTIFF->tif_dir.td_nstrips,
                                              hTIFF->tif_dir.td_stripbytecount);
diff --git a/frmts/hdf4/hdf4dataset.cpp b/frmts/hdf4/hdf4dataset.cpp
index b6bc1a6..e290c70 100644
--- a/frmts/hdf4/hdf4dataset.cpp
+++ b/frmts/hdf4/hdf4dataset.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: hdf4dataset.cpp 28365 2015-01-27 10:39:30Z rouault $
+ * $Id: hdf4dataset.cpp 29209 2015-05-19 13:56:01Z rouault $
  *
  * Project:  Hierarchical Data Format Release 4 (HDF4)
  * Purpose:  HDF4 Datasets. Open HDF4 file, fetch metadata and list of
@@ -42,7 +42,7 @@
 #include "hdf4compat.h"
 #include "hdf4dataset.h"
 
-CPL_CVSID("$Id: hdf4dataset.cpp 28365 2015-01-27 10:39:30Z rouault $");
+CPL_CVSID("$Id: hdf4dataset.cpp 29209 2015-05-19 13:56:01Z rouault $");
 
 CPL_C_START
 void	GDALRegister_HDF4(void);
@@ -298,11 +298,11 @@ double HDF4Dataset::AnyTypeToDouble( int32 iNumType, void *pData )
         case DFNT_UINT16:
             return (double)*(unsigned short *)pData;
         case DFNT_INT32:
-            return (double)*(long *)pData;
+            return (double)*(int *)pData;
         case DFNT_UINT32:
-            return (double)*(unsigned long *)pData;
+            return (double)*(unsigned int *)pData;
         case DFNT_INT64:
-            return (double)*(char *)pData;
+            return (double)*(char *)pData; // highly suspicious ! Should be GIntBig. But cannot verify
         case DFNT_UINT64:
             return (double)*(GIntBig *)pData;
         case DFNT_FLOAT32:
diff --git a/frmts/hf2/hf2dataset.cpp b/frmts/hf2/hf2dataset.cpp
index 3b4c414..d8cd232 100644
--- a/frmts/hf2/hf2dataset.cpp
+++ b/frmts/hf2/hf2dataset.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: hf2dataset.cpp 27044 2014-03-16 23:41:27Z rouault $
+ * $Id: hf2dataset.cpp 29773 2015-08-24 09:49:09Z rouault $
  *
  * Project:  HF2 driver
  * Purpose:  GDALDataset driver for HF2/HFZ dataset.
@@ -31,7 +31,7 @@
 #include "gdal_pam.h"
 #include "ogr_spatialref.h"
 
-CPL_CVSID("$Id: hf2dataset.cpp 27044 2014-03-16 23:41:27Z rouault $");
+CPL_CVSID("$Id: hf2dataset.cpp 29773 2015-08-24 09:49:09Z rouault $");
 
 CPL_C_START
 void    GDALRegister_HF2(void);
@@ -198,7 +198,7 @@ CPLErr HF2RasterBand::IReadBlock( int nBlockXOff, int nLineYOff,
                 for(i=1;i<nTileWidth;i++)
                 {
                     if (nWordSize == 1)
-                        nVal += ((char*)pabyData)[i-1];
+                        nVal += ((signed char*)pabyData)[i-1];
                     else if (nWordSize == 2)
                         nVal += ((GInt16*)pabyData)[i-1];
                     else
@@ -960,7 +960,7 @@ GDALDataset* HF2Dataset::CreateCopy( const char * pszFilename,
                         if (nWordSize == 1)
                         {
                             CPLAssert(nDiff >= -128 && nDiff <= 127);
-                            char chDiff = (char)nDiff;
+                            signed char chDiff = (signed char)nDiff;
                             VSIFWriteL(&chDiff, 1, 1, fp);
                         }
                         else if (nWordSize == 2)
@@ -1032,7 +1032,7 @@ GDALDataset* HF2Dataset::CreateCopy( const char * pszFilename,
                         if (nWordSize == 1)
                         {
                             CPLAssert(nDiff >= -128 && nDiff <= 127);
-                            char chDiff = (char)nDiff;
+                            signed char chDiff = (signed char)nDiff;
                             VSIFWriteL(&chDiff, 1, 1, fp);
                         }
                         else if (nWordSize == 2)
diff --git a/frmts/netcdf/netcdfdataset.cpp b/frmts/netcdf/netcdfdataset.cpp
index 5c7d7a8..62c67e1 100644
--- a/frmts/netcdf/netcdfdataset.cpp
+++ b/frmts/netcdf/netcdfdataset.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: netcdfdataset.cpp 28365 2015-01-27 10:39:30Z rouault $
+ * $Id: netcdfdataset.cpp 29201 2015-05-15 18:06:42Z rouault $
  *
  * Project:  netCDF read/write Driver
  * Purpose:  GDAL bindings over netCDF library.
@@ -33,7 +33,7 @@
 #include "cpl_error.h"
 #include "cpl_multiproc.h"
 
-CPL_CVSID("$Id: netcdfdataset.cpp 28365 2015-01-27 10:39:30Z rouault $");
+CPL_CVSID("$Id: netcdfdataset.cpp 29201 2015-05-15 18:06:42Z rouault $");
 
 #include <map> //for NCDFWriteProjAttribs()
 
@@ -1991,7 +1991,7 @@ void netCDFDataset::SetProjectionFromVar( int nVarId )
                             dfSemiMajorAxis = dfEarthRadius;
                         //set inv_flat using semi_minor/major
                         dfInverseFlattening = 
-                            1.0 / ( dfSemiMajorAxis - dfSemiMinorAxis ) / dfSemiMajorAxis;
+                            1.0 / (( dfSemiMajorAxis - dfSemiMinorAxis ) / dfSemiMajorAxis);
                         oSRS.SetGeogCS( "unknown", 
                                         NULL, 
                                         "Spheroid", 
@@ -4604,7 +4604,8 @@ GDALDataset *netCDFDataset::Open( GDALOpenInfo * poOpenInfo )
             if ( papszTokens) CSLDestroy( papszTokens );
             CPLFree( pszTemp );
         }
-        if ( NCDFGetAttr( cdfid, j, "bounds", &pszTemp ) == CE_None ) { 
+        if ( NCDFGetAttr( cdfid, j, "bounds", &pszTemp ) == CE_None &&
+             pszTemp != NULL ) { 
             if ( !EQUAL( pszTemp, "" ) )
                 papszIgnoreVars = CSLAddString( papszIgnoreVars, pszTemp );
             CPLFree( pszTemp );
@@ -6829,7 +6830,7 @@ int NCDFDoesVarContainAttribVal( int nCdfId,
 
     for( int i=0; !bFound && i<CSLCount((char**)papszAttribNames); i++ ) {
         if ( NCDFGetAttr( nCdfId, nVarId, papszAttribNames[i], &pszTemp ) 
-             == CE_None ) { 
+             == CE_None && pszTemp != NULL ) { 
             if ( bStrict ) {
                 if ( EQUAL( pszTemp, papszAttribValues[i] ) )
                     bFound=TRUE;
@@ -6860,7 +6861,7 @@ int NCDFDoesVarContainAttribVal2( int nCdfId,
     if ( nVarId == -1 ) return -1;
 
     if ( NCDFGetAttr( nCdfId, nVarId, papszAttribName, &pszTemp ) 
-         != CE_None ) return FALSE;
+         != CE_None || pszTemp == NULL ) return FALSE;
 
     for( int i=0; !bFound && i<CSLCount((char**)papszAttribValues); i++ ) {
         if ( bStrict ) {
diff --git a/frmts/nitf/ecrgtocdataset.cpp b/frmts/nitf/ecrgtocdataset.cpp
index 3b7f455..0d9c54c 100644
--- a/frmts/nitf/ecrgtocdataset.cpp
+++ b/frmts/nitf/ecrgtocdataset.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: ecrgtocdataset.cpp 27044 2014-03-16 23:41:27Z rouault $
+ * $Id: ecrgtocdataset.cpp 29779 2015-08-25 09:03:00Z rouault $
  *
  * Project:  ECRG TOC read Translator
  * Purpose:  Implementation of ECRGTOCDataset and ECRGTOCSubDataset.
@@ -35,7 +35,7 @@
 #include "cpl_minixml.h"
 #include <vector>
 
-CPL_CVSID("$Id: ecrgtocdataset.cpp 27044 2014-03-16 23:41:27Z rouault $");
+CPL_CVSID("$Id: ecrgtocdataset.cpp 29779 2015-08-25 09:03:00Z rouault $");
 
 /** Overview of used classes :
    - ECRGTOCDataset : lists the different subdatasets, listed in the .xml,
@@ -85,7 +85,8 @@ class ECRGTOCDataset : public GDALPamDataset
 
     void                AddSubDataset(const char* pszFilename,
                                       const char* pszProductTitle,
-                                      const char* pszDiscId);
+                                      const char* pszDiscId,
+                                      const char* pszScale);
 
     virtual CPLErr GetGeoTransform( double * padfGeoTransform)
     {
@@ -102,6 +103,7 @@ class ECRGTOCDataset : public GDALPamDataset
                                 CPLXMLNode* psXML,
                                 CPLString osProduct,
                                 CPLString osDiscId,
+                                CPLString osScale,
                                 const char* pszFilename);
     
     static int Identify( GDALOpenInfo * poOpenInfo );
@@ -153,12 +155,28 @@ class ECRGTOCSubDataset : public VRTDataset
 };
 
 /************************************************************************/
+/*                           LaunderString()                            */
+/************************************************************************/
+
+static CPLString LaunderString(const char* pszStr)
+{
+    CPLString osRet(pszStr);
+    for(size_t i=0;i<osRet.size();i++)
+    {
+        if( osRet[i] == ':' || osRet[i] == ' ' )
+            osRet[i] = '_';
+    }
+    return osRet;
+}
+
+/************************************************************************/
 /*                           AddSubDataset()                            */
 /************************************************************************/
 
 void ECRGTOCDataset::AddSubDataset( const char* pszFilename,
                                     const char* pszProductTitle,
-                                    const char* pszDiscId )
+                                    const char* pszDiscId,
+                                    const char* pszScale)
 
 {
     char	szName[80];
@@ -167,13 +185,15 @@ void ECRGTOCDataset::AddSubDataset( const char* pszFilename,
     sprintf( szName, "SUBDATASET_%d_NAME", nCount+1 );
     papszSubDatasets = 
         CSLSetNameValue( papszSubDatasets, szName, 
-              CPLSPrintf( "ECRG_TOC_ENTRY:%s:%s:%s",
-                          pszProductTitle, pszDiscId, pszFilename ) );
+              CPLSPrintf( "ECRG_TOC_ENTRY:%s:%s:%s:%s",
+                          LaunderString(pszProductTitle).c_str(),
+                          LaunderString(pszDiscId).c_str(),
+                          LaunderString(pszScale).c_str(), pszFilename ) );
 
     sprintf( szName, "SUBDATASET_%d_DESC", nCount+1 );
     papszSubDatasets =
         CSLSetNameValue( papszSubDatasets, szName,
-            CPLSPrintf( "%s:%s", pszProductTitle, pszDiscId));
+            CPLSPrintf( "Product %s, disc %s, scale %s", pszProductTitle, pszDiscId, pszScale));
 }
 
 /************************************************************************/
@@ -638,6 +658,7 @@ GDALDataset* ECRGTOCDataset::Build(const char* pszTOCFilename,
                                    CPLXMLNode* psXML,
                                    CPLString osProduct,
                                    CPLString osDiscId,
+                                   CPLString osScale,
                                    const char* pszOpenInfoFilename)
 {
     CPLXMLNode* psTOC = CPLGetXMLNode(psXML, "=Table_of_Contents");
@@ -679,7 +700,7 @@ GDALDataset* ECRGTOCDataset::Build(const char* pszTOCFilename,
             continue;
         }
 
-        if (bLookForSubDataset && strcmp(pszProductTitle, osProduct.c_str()) != 0)
+        if (bLookForSubDataset && strcmp(LaunderString(pszProductTitle), osProduct.c_str()) != 0)
             continue;
 
         for(CPLXMLNode* psIter2 = psIter1->psChild;
@@ -698,11 +719,9 @@ GDALDataset* ECRGTOCDataset::Build(const char* pszTOCFilename,
                 continue;
             }
 
-            if (bLookForSubDataset && strcmp(pszDiscId, osDiscId.c_str()) != 0)
+            if (bLookForSubDataset && strcmp(LaunderString(pszDiscId), osDiscId.c_str()) != 0)
                 continue;
 
-            nCountSubDataset ++;
-
             CPLXMLNode* psFrameList = CPLGetXMLNode(psIter2, "frame_list");
             if (psFrameList == NULL)
             {
@@ -711,12 +730,6 @@ GDALDataset* ECRGTOCDataset::Build(const char* pszTOCFilename,
                 continue;
             }
 
-            int nValidFrames = 0;
-
-            std::vector<FrameDesc> aosFrameDesc;
-
-            int nSubDatasetScale = -1;
-
             for(CPLXMLNode* psIter3 = psFrameList->psChild;
                             psIter3 != NULL;
                             psIter3 = psIter3->psNext)
@@ -742,10 +755,43 @@ GDALDataset* ECRGTOCDataset::Build(const char* pszTOCFilename,
                     continue;
                 }
 
-                if (nValidFrames == 0)
-                    nSubDatasetScale = nScale;
-                else
-                    nSubDatasetScale = -1;
+                if( bLookForSubDataset )
+                {
+                    if( osScale.size() )
+                    {
+                        if( strcmp(LaunderString(pszSize), osScale.c_str()) != 0 )
+                        {
+                            continue;
+                        }
+                    }
+                    else
+                    {
+                        int nCountScales = 0;
+                        for(CPLXMLNode* psIter4 = psFrameList->psChild;
+                                psIter4 != NULL;
+                                psIter4 = psIter4->psNext)
+                        {
+                            if (!(psIter4->eType == CXT_Element &&
+                                psIter4->pszValue != NULL &&
+                                strcmp(psIter4->pszValue, "scale") == 0))
+                                continue;
+                            nCountScales ++;
+                        }
+                        if( nCountScales > 1 )
+                        {
+                            CPLError(CE_Failure, CPLE_AppDefined,
+                                     "Scale should be mentionned in subdatasets "
+                                     "syntax since this disk contains several scales");
+                            delete poDS;
+                            return NULL;
+                        }
+                    }
+                }
+
+                nCountSubDataset ++;
+
+                std::vector<FrameDesc> aosFrameDesc;
+                int nValidFrames = 0;
 
                 for(CPLXMLNode* psIter4 = psIter3->psChild;
                                 psIter4 != NULL;
@@ -860,6 +906,8 @@ GDALDataset* ECRGTOCDataset::Build(const char* pszTOCFilename,
                             dfGlobalPixelYSize = dfPixelYSize;
                     }
 
+                    nValidFrames ++;
+
                     if (bLookForSubDataset)
                     {
                         FrameDesc frameDesc;
@@ -870,32 +918,32 @@ GDALDataset* ECRGTOCDataset::Build(const char* pszTOCFilename,
                         aosFrameDesc.push_back(frameDesc);
                     }
                 }
-            }
 
-            if (bLookForSubDataset)
-            {
-                delete poDS;
-                if (nValidFrames == 0)
-                    return NULL;
-                return ECRGTOCSubDataset::Build(pszProductTitle,
-                                                pszDiscId,
-                                                nSubDatasetScale,
-                                                nCountSubDataset,
-                                                pszTOCFilename,
-                                                aosFrameDesc,
-                                                dfGlobalMinX,
-                                                dfGlobalMinY,
-                                                dfGlobalMaxX,
-                                                dfGlobalMaxY,
-                                                dfGlobalPixelXSize,
-                                                dfGlobalPixelYSize);
-            }
+                if (bLookForSubDataset)
+                {
+                    delete poDS;
+                    if (nValidFrames == 0)
+                        return NULL;
+                    return ECRGTOCSubDataset::Build(pszProductTitle,
+                                                    pszDiscId,
+                                                    nScale,
+                                                    nCountSubDataset,
+                                                    pszTOCFilename,
+                                                    aosFrameDesc,
+                                                    dfGlobalMinX,
+                                                    dfGlobalMinY,
+                                                    dfGlobalMaxX,
+                                                    dfGlobalMaxY,
+                                                    dfGlobalPixelXSize,
+                                                    dfGlobalPixelYSize);
+                }
 
-            if (nValidFrames)
-            {
-                poDS->AddSubDataset(pszOpenInfoFilename,
-                                    pszProductTitle, pszDiscId);
-                nSubDatasets ++;
+                if (nValidFrames)
+                {
+                    poDS->AddSubDataset(pszOpenInfoFilename,
+                                        pszProductTitle, pszDiscId, pszSize);
+                    nSubDatasets ++;
+                }
             }
         }
     }
@@ -959,7 +1007,7 @@ int ECRGTOCDataset::Identify( GDALOpenInfo * poOpenInfo )
     if( pabyHeader == NULL )
         return FALSE;
 
-    if ( strstr(pabyHeader, "<Table_of_Contents>") != NULL &&
+    if ( strstr(pabyHeader, "<Table_of_Contents") != NULL &&
          strstr(pabyHeader, "<file_header ") != NULL)
         return TRUE;
 
@@ -977,7 +1025,8 @@ GDALDataset *ECRGTOCDataset::Open( GDALOpenInfo * poOpenInfo )
 
 {
     const char *pszFilename = poOpenInfo->pszFilename;
-    CPLString osProduct, osDiscId;
+    CPLString osFilename;
+    CPLString osProduct, osDiscId, osScale;
 
     if( !Identify( poOpenInfo ) )
         return NULL;
@@ -985,20 +1034,56 @@ GDALDataset *ECRGTOCDataset::Open( GDALOpenInfo * poOpenInfo )
     if( EQUALN(pszFilename, "ECRG_TOC_ENTRY:",strlen("ECRG_TOC_ENTRY:")))
     {
         pszFilename += strlen("ECRG_TOC_ENTRY:");
-        osProduct = pszFilename;
-        size_t iPos = osProduct.find(":");
-        if (iPos == std::string::npos)
+        
+        /* PRODUCT:DISK:SCALE:FILENAME (or PRODUCT:DISK:FILENAME historically) */
+        /* with FILENAME potentially C:\BLA... */
+        char** papszTokens = CSLTokenizeString2(pszFilename, ":", 0);
+        int nTokens = CSLCount(papszTokens);
+        if( nTokens != 3 && nTokens != 4 && nTokens != 5 )
+        {
+            CSLDestroy(papszTokens);
             return NULL;
-        osProduct.resize(iPos);
+        }
+        
+        osProduct = papszTokens[0];
+        osDiscId = papszTokens[1];
 
-        pszFilename += iPos + 1;
-        osDiscId = pszFilename;
-        iPos = osDiscId.find(":");
-        if (iPos == std::string::npos)
+        if( nTokens == 3 )
+            osFilename = papszTokens[2];
+        else if( nTokens == 4 )
+        {
+            if( strlen(papszTokens[2]) == 1 &&
+                (papszTokens[3][0] == '\\' ||
+                 papszTokens[3][0] == '/') )
+            {
+                osFilename = papszTokens[2];
+                osFilename += ":";
+                osFilename = papszTokens[3];
+            }
+            else
+            {
+                osScale = papszTokens[2];
+                osFilename = papszTokens[3];
+            }
+        }
+        else if( nTokens == 5 &&
+                strlen(papszTokens[3]) == 1 &&
+                (papszTokens[4][0] == '\\' ||
+                 papszTokens[4][0] == '/') )
+        {
+            osScale = papszTokens[2];
+            osFilename = papszTokens[3];
+            osFilename += ":";
+            osFilename = papszTokens[4];
+        }
+        else
+        {
+            CSLDestroy(papszTokens);
             return NULL;
-        osDiscId.resize(iPos);
+        }
 
-        pszFilename += iPos + 1;
+        CSLDestroy(papszTokens);
+        pszFilename = osFilename.c_str();
     }
 
 /* -------------------------------------------------------------------- */
@@ -1011,6 +1096,7 @@ GDALDataset *ECRGTOCDataset::Open( GDALOpenInfo * poOpenInfo )
     }
 
     GDALDataset* poDS = Build( pszFilename, psXML, osProduct, osDiscId,
+                               osScale,
                                poOpenInfo->pszFilename);
     CPLDestroyXMLNode(psXML);
 
diff --git a/frmts/nitf/rpftocfile.cpp b/frmts/nitf/rpftocfile.cpp
index 464d2f6..bbf7042 100644
--- a/frmts/nitf/rpftocfile.cpp
+++ b/frmts/nitf/rpftocfile.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: rpftocfile.cpp 27044 2014-03-16 23:41:27Z rouault $
+ * $Id: rpftocfile.cpp 29261 2015-05-29 09:15:58Z rouault $
  *
  * Project:  RPF A.TOC read Library
  * Purpose:  Module responsible for opening a RPF TOC file, populating RPFToc
@@ -49,7 +49,7 @@
 #include "cpl_conv.h"
 #include "cpl_string.h"
 
-CPL_CVSID("$Id: rpftocfile.cpp 27044 2014-03-16 23:41:27Z rouault $");
+CPL_CVSID("$Id: rpftocfile.cpp 29261 2015-05-29 09:15:58Z rouault $");
 
 /************************************************************************/
 /*                        RPFTOCTrim()                                    */
@@ -497,10 +497,22 @@ RPFToc* RPFTOCReadFromBuffer(const char* pszFilename, VSILFILE* fp, const char*
         frameEntry->directory[pathLength] = 0;
         if (pathLength > 0 && frameEntry->directory[pathLength-1] == '/')
             frameEntry->directory[pathLength-1] = 0;
-        
+
         if (frameEntry->directory[0] == '.' && frameEntry->directory[1] == '/')
+        {
             memmove(frameEntry->directory, frameEntry->directory+2, strlen(frameEntry->directory+2)+1);
-        
+
+            // Some A.TOC have subdirectory names like ".//X/" ... (#5979)
+            // Check if it wasn't intended to be "./X/" instead
+            VSIStatBufL sStatBuf;
+            if( frameEntry->directory[0] == '/' &&
+                VSIStatL(CPLFormFilename(CPLGetDirname(pszFilename), frameEntry->directory+1, NULL), &sStatBuf) == 0 &&
+                VSI_ISDIR(sStatBuf.st_mode) )
+            {
+                memmove(frameEntry->directory, frameEntry->directory+1, strlen(frameEntry->directory+1)+1);
+            }
+        }
+
         {
             char* baseDir = CPLStrdup(CPLGetDirname(pszFilename));
             VSIStatBufL sStatBuf;
diff --git a/frmts/northwood/grcdataset.cpp b/frmts/northwood/grcdataset.cpp
index e012e37..1924684 100644
--- a/frmts/northwood/grcdataset.cpp
+++ b/frmts/northwood/grcdataset.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: grcdataset.cpp 27729 2014-09-24 00:40:16Z goatbar $
+ * $Id: grcdataset.cpp 28503 2015-02-16 16:56:48Z rouault $
  *
  * Project:  GRC Reader
  * Purpose:  GDAL driver for Northwood Classified Format
@@ -83,9 +83,6 @@ class NWT_GRCDataset : public GDALPamDataset
 class NWT_GRCRasterBand : public GDALPamRasterBand
 {
   friend class NWT_GRCDataset;
-    int bHaveOffsetScale;
-    double dfOffset;
-    double dfScale;
 
   public:
 
@@ -95,11 +92,6 @@ class NWT_GRCRasterBand : public GDALPamRasterBand
     virtual CPLErr IReadBlock( int, int, void * );
     virtual double GetNoDataValue( int *pbSuccess );
 
-    virtual double GetOffset( int *pbSuccess = NULL );
-    virtual CPLErr SetOffset( double dfNewValue );
-    virtual double GetScale( int *pbSuccess = NULL );
-    virtual CPLErr SetScale( double dfNewValue );
-
     virtual GDALColorInterp GetColorInterpretation();
     virtual char **GetCategoryNames();
     virtual GDALColorTable *GetColorTable();
@@ -116,9 +108,6 @@ NWT_GRCRasterBand::NWT_GRCRasterBand( NWT_GRCDataset * poDS, int nBand )
     this->nBand = nBand;
     NWT_GRCDataset *poGDS =( NWT_GRCDataset * ) poDS;
 
-    bHaveOffsetScale = FALSE;
-    dfOffset = 0;
-    dfScale = 1.0;
     if( poGDS->pGrd->nBitsPerPixel == 8 )
         eDataType = GDT_Byte;
     else if( poGDS->pGrd->nBitsPerPixel == 16 )
@@ -249,49 +238,6 @@ CPLErr NWT_GRCRasterBand::IReadBlock( CPL_UNUSED int nBlockXOff, int nBlockYOff,
     return CE_None;
 }
 
-
-/************************************************************************/
-/*                             GetOffset()                              */
-/************************************************************************/
-double NWT_GRCRasterBand::GetOffset( int *pbSuccess )
-{
-    if( pbSuccess )
-        *pbSuccess = bHaveOffsetScale;
-    return dfOffset;
-}
-
-/************************************************************************/
-/*                             SetOffset()                              */
-/************************************************************************/
-CPLErr NWT_GRCRasterBand::SetOffset( double dfNewValue )
-{
-    //poGDS->bMetadataChanged = TRUE;
-
-    bHaveOffsetScale = TRUE;
-    dfOffset = dfNewValue;
-    return CE_None;
-}
-
-/************************************************************************/
-/*                              GetScale()                              */
-/************************************************************************/
-double NWT_GRCRasterBand::GetScale( int *pbSuccess )
-{
-    if( pbSuccess )
-        *pbSuccess = bHaveOffsetScale;
-    return dfScale;
-}
-
-/************************************************************************/
-/*                              SetScale()                              */
-/************************************************************************/
-CPLErr NWT_GRCRasterBand::SetScale( double dfNewValue )
-{
-    bHaveOffsetScale = TRUE;
-    dfScale = dfNewValue;
-    return CE_None;
-}
-
 /************************************************************************/
 /* ==================================================================== */
 /*                          NWT_GRCDataset                              */
diff --git a/frmts/northwood/grddataset.cpp b/frmts/northwood/grddataset.cpp
index 6ba9d88..d32596a 100644
--- a/frmts/northwood/grddataset.cpp
+++ b/frmts/northwood/grddataset.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: grddataset.cpp 27729 2014-09-24 00:40:16Z goatbar $
+ * $Id: grddataset.cpp 28503 2015-02-16 16:56:48Z rouault $
  *
  * Project:  GRD Reader
  * Purpose:  GDAL driver for Northwood Grid Format
@@ -91,13 +91,6 @@ class NWT_GRDRasterBand:public GDALPamRasterBand
     virtual CPLErr IReadBlock( int, int, void * );
     virtual double GetNoDataValue( int *pbSuccess );
 
-    /* FIXME. I don't believe it is correct to advertize offset and */
-    /* scale because IReadBlock() already apply them. */
-    virtual double GetOffset( int *pbSuccess = NULL );
-    virtual CPLErr SetOffset( double dfNewValue );
-    virtual double GetScale( int *pbSuccess = NULL );
-    virtual CPLErr SetScale( double dfNewValue );
-
     virtual GDALColorInterp GetColorInterpretation();
 };
 
@@ -246,48 +239,6 @@ CPLErr NWT_GRDRasterBand::IReadBlock( CPL_UNUSED int nBlockXOff, int nBlockYOff,
 }
 
 /************************************************************************/
-/*                             GetOffset()                              */
-/************************************************************************/
-double NWT_GRDRasterBand::GetOffset( int *pbSuccess )
-{
-    if( pbSuccess )
-        *pbSuccess = bHaveOffsetScale;
-    return dfOffset;
-}
-
-/************************************************************************/
-/*                             SetOffset()                              */
-/************************************************************************/
-CPLErr NWT_GRDRasterBand::SetOffset( double dfNewValue )
-{
-    //poGDS->bMetadataChanged = TRUE;
-
-    bHaveOffsetScale = TRUE;
-    dfOffset = dfNewValue;
-    return CE_None;
-}
-
-/************************************************************************/
-/*                              GetScale()                              */
-/************************************************************************/
-double NWT_GRDRasterBand::GetScale( int *pbSuccess )
-{
-    if( pbSuccess )
-        *pbSuccess = bHaveOffsetScale;
-    return dfScale;
-}
-
-/************************************************************************/
-/*                              SetScale()                              */
-/************************************************************************/
-CPLErr NWT_GRDRasterBand::SetScale( double dfNewValue )
-{
-    bHaveOffsetScale = TRUE;
-    dfScale = dfNewValue;
-    return CE_None;
-}
-
-/************************************************************************/
 /* ==================================================================== */
 /*                             NWT_GRDDataset                           */
 /* ==================================================================== */
diff --git a/frmts/northwood/northwood.cpp b/frmts/northwood/northwood.cpp
index c72806b..d91d40f 100644
--- a/frmts/northwood/northwood.cpp
+++ b/frmts/northwood/northwood.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: northwood.cpp 27739 2014-09-25 18:49:52Z goatbar $
+ * $Id: northwood.cpp 30005 2015-09-01 08:17:50Z rouault $
  *
  * Project:  GRC/GRD Reader
  * Purpose:  Northwood Format basic implementation
@@ -387,9 +387,9 @@ void createIP( int index, unsigned char r, unsigned char g, unsigned char b,
     float bslope = (float)(b - map[wm].b) / (float)(index - wm);
     for( i = wm + 1; i < index; i++)
     {
-        map[i].r = map[wm].r + (unsigned char)(((i - wm) * rslope) + 0.5);
-        map[i].g = map[wm].g + (unsigned char)(((i - wm) * gslope) + 0.5);
-        map[i].b = map[wm].b + (unsigned char)(((i - wm) * bslope) + 0.5);
+        map[i].r = (unsigned char)(map[wm].r + ((i - wm) * rslope) + 0.5);
+        map[i].g = (unsigned char)(map[wm].g + ((i - wm) * gslope) + 0.5);
+        map[i].b = (unsigned char)(map[wm].b + ((i - wm) * bslope) + 0.5);
     }
     map[index].r = r;
     map[index].g = g;
diff --git a/frmts/pcidsk/sdk/channel/cpcidskchannel.cpp b/frmts/pcidsk/sdk/channel/cpcidskchannel.cpp
index b8e4f53..b32ba48 100644
--- a/frmts/pcidsk/sdk/channel/cpcidskchannel.cpp
+++ b/frmts/pcidsk/sdk/channel/cpcidskchannel.cpp
@@ -200,7 +200,7 @@ PCIDSKChannel *CPCIDSKChannel::GetOverview( int overview_index )
     EstablishOverviewInfo();
 
     if( overview_index < 0 || overview_index >= (int) overview_infos.size() )
-        ThrowPCIDSKException( "Non existant overview (%d) requested.", 
+        ThrowPCIDSKException( "Non existent overview (%d) requested.", 
                               overview_index );
 
     if( overview_bands[overview_index] == NULL )
@@ -231,7 +231,7 @@ bool CPCIDSKChannel::IsOverviewValid( int overview_index )
     EstablishOverviewInfo();
 
     if( overview_index < 0 || overview_index >= (int) overview_infos.size() )
-        ThrowPCIDSKException( "Non existant overview (%d) requested.", 
+        ThrowPCIDSKException( "Non existent overview (%d) requested.", 
                               overview_index );
 
     int sis_id, validity=0;
@@ -252,7 +252,7 @@ std::string CPCIDSKChannel::GetOverviewResampling( int overview_index )
     EstablishOverviewInfo();
 
     if( overview_index < 0 || overview_index >= (int) overview_infos.size() )
-        ThrowPCIDSKException( "Non existant overview (%d) requested.", 
+        ThrowPCIDSKException( "Non existent overview (%d) requested.", 
                               overview_index );
 
     int sis_id, validity=0;
@@ -275,7 +275,7 @@ void CPCIDSKChannel::SetOverviewValidity( int overview_index,
     EstablishOverviewInfo();
 
     if( overview_index < 0 || overview_index >= (int) overview_infos.size() )
-        ThrowPCIDSKException( "Non existant overview (%d) requested.", 
+        ThrowPCIDSKException( "Non existent overview (%d) requested.", 
                               overview_index );
 
     int sis_id, validity=0;
diff --git a/frmts/pcidsk/sdk/channel/ctiledchannel.cpp b/frmts/pcidsk/sdk/channel/ctiledchannel.cpp
index fcf2429..a06fe5f 100644
--- a/frmts/pcidsk/sdk/channel/ctiledchannel.cpp
+++ b/frmts/pcidsk/sdk/channel/ctiledchannel.cpp
@@ -359,7 +359,7 @@ int CTiledChannel::ReadBlock( int block_index, void *buffer,
 
     if( block_index < 0 || block_index >= tile_count )
     {
-        ThrowPCIDSKException( "Requested non-existant block (%d)", 
+        ThrowPCIDSKException( "Requested non-existent block (%d)", 
                               block_index );
     }
 
@@ -517,7 +517,7 @@ int CTiledChannel::WriteBlock( int block_index, void *buffer )
 
     if( block_index < 0 || block_index >= tile_count )
     {
-        ThrowPCIDSKException( "Requested non-existant block (%d)", 
+        ThrowPCIDSKException( "Requested non-existent block (%d)", 
                               block_index );
     }
 
diff --git a/frmts/pcidsk/sdk/segment/cpcidskbitmap.cpp b/frmts/pcidsk/sdk/segment/cpcidskbitmap.cpp
index e30ebc3..89013a5 100644
--- a/frmts/pcidsk/sdk/segment/cpcidskbitmap.cpp
+++ b/frmts/pcidsk/sdk/segment/cpcidskbitmap.cpp
@@ -238,7 +238,7 @@ int CPCIDSKBitmap::ReadBlock( int block_index, void *buffer,
 
     if( block_index < 0 || block_index >= GetBlockCount() )
     {
-        ThrowPCIDSKException( "Requested non-existant block (%d)", 
+        ThrowPCIDSKException( "Requested non-existent block (%d)", 
                               block_index );
     }
 /* -------------------------------------------------------------------- */
@@ -343,7 +343,7 @@ int CPCIDSKBitmap::GetOverviewCount()
 PCIDSKChannel *CPCIDSKBitmap::GetOverview( CPL_UNUSED int i )
 {
     // The %d is ignored in the exception.
-    ThrowPCIDSKException("Non-existant overview %d requested on bitmap segment.");
+    ThrowPCIDSKException("Non-existent overview %d requested on bitmap segment.");
     return NULL;
 }
 
diff --git a/frmts/raw/btdataset.cpp b/frmts/raw/btdataset.cpp
index 13041bb..3c7baa7 100644
--- a/frmts/raw/btdataset.cpp
+++ b/frmts/raw/btdataset.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: btdataset.cpp 27729 2014-09-24 00:40:16Z goatbar $
+ * $Id: btdataset.cpp 30390 2015-09-15 13:14:09Z rouault $
  *
  * Project:  VTP .bt Driver
  * Purpose:  Implementation of VTP .bt elevation format read/write support.
@@ -32,7 +32,7 @@
 #include "rawdataset.h"
 #include "ogr_spatialref.h"
 
-CPL_CVSID("$Id: btdataset.cpp 27729 2014-09-24 00:40:16Z goatbar $");
+CPL_CVSID("$Id: btdataset.cpp 30390 2015-09-15 13:14:09Z rouault $");
 
 CPL_C_START
 void    GDALRegister_BT(void);
diff --git a/frmts/til/tildataset.cpp b/frmts/til/tildataset.cpp
index 7451835..035dd41 100644
--- a/frmts/til/tildataset.cpp
+++ b/frmts/til/tildataset.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: tildataset.cpp 27044 2014-03-16 23:41:27Z rouault $
+ * $Id: tildataset.cpp 29199 2015-05-15 08:46:03Z rouault $
  *
  * Project:  EarthWatch .TIL Driver
  * Purpose:  Implementation of the TILDataset class.
@@ -36,7 +36,7 @@
 #include "cpl_multiproc.h"
 #include "cplkeywordparser.h"
 
-CPL_CVSID("$Id: tildataset.cpp 27044 2014-03-16 23:41:27Z rouault $");
+CPL_CVSID("$Id: tildataset.cpp 29199 2015-05-15 08:46:03Z rouault $");
 
 /************************************************************************/
 /* ==================================================================== */
@@ -343,8 +343,10 @@ GDALDataset *TILDataset::Open( GDALOpenInfo * poOpenInfo )
     double      adfGeoTransform[6];
     if( poTemplateDS->GetGeoTransform( adfGeoTransform ) == CE_None )
     {
-        adfGeoTransform[0] = CPLAtof(CSLFetchNameValueDef(papszIMD,"MAP_PROJECTED_PRODUCT.ULX","0"));
-        adfGeoTransform[3] = CPLAtof(CSLFetchNameValueDef(papszIMD,"MAP_PROJECTED_PRODUCT.ULY","0"));
+        // According to https://www.digitalglobe.com/sites/default/files/ISD_External.pdf, ulx=originX and 
+        // is "Easting of the center of the upper left pixel of the image."
+        adfGeoTransform[0] = CPLAtof(CSLFetchNameValueDef(papszIMD,"MAP_PROJECTED_PRODUCT.ULX","0")) - adfGeoTransform[1] / 2;
+        adfGeoTransform[3] = CPLAtof(CSLFetchNameValueDef(papszIMD,"MAP_PROJECTED_PRODUCT.ULY","0")) - adfGeoTransform[5] / 2;
         poDS->SetGeoTransform(adfGeoTransform);
     }
 
diff --git a/frmts/vrt/vrtdataset.cpp b/frmts/vrt/vrtdataset.cpp
index eeb9c18..965ac13 100644
--- a/frmts/vrt/vrtdataset.cpp
+++ b/frmts/vrt/vrtdataset.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: vrtdataset.cpp 27739 2014-09-25 18:49:52Z goatbar $
+ * $Id: vrtdataset.cpp 29193 2015-05-14 10:08:11Z rouault $
  *
  * Project:  Virtual GDAL Datasets
  * Purpose:  Implementation of VRTDataset
@@ -33,7 +33,7 @@
 #include "cpl_minixml.h"
 #include "ogr_spatialref.h"
 
-CPL_CVSID("$Id: vrtdataset.cpp 27739 2014-09-25 18:49:52Z goatbar $");
+CPL_CVSID("$Id: vrtdataset.cpp 29193 2015-05-14 10:08:11Z rouault $");
 
 /************************************************************************/
 /*                            VRTDataset()                             */
@@ -1189,9 +1189,15 @@ int VRTDataset::CheckCompatibleForDatasetIO()
                 VRTSimpleSource* poSource = (VRTSimpleSource* )papoSources[iSource];
                 if (!EQUAL(poSource->GetType(), "SimpleSource"))
                     return FALSE;
-                if (poSource->GetBand() == NULL)
+
+                GDALRasterBand *srcband = poSource->GetBand();
+                if (srcband == NULL)
+                    return FALSE;
+                if (srcband->GetDataset() == NULL)
+                    return FALSE;
+                if (srcband->GetDataset()->GetRasterCount() <= iBand)
                     return FALSE;
-                if (poSource->GetBand()->GetBand() != iBand + 1)
+                if (srcband->GetDataset()->GetRasterBand(iBand + 1) != srcband)
                     return FALSE;
             }
         }
@@ -1209,9 +1215,15 @@ int VRTDataset::CheckCompatibleForDatasetIO()
                     return FALSE;
                 if (!poSource->IsSameExceptBandNumber(poRefSource))
                     return FALSE;
-                if (poSource->GetBand() == NULL)
+
+                GDALRasterBand *srcband = poSource->GetBand();
+                if (srcband == NULL)
+                    return FALSE;
+                if (srcband->GetDataset() == NULL)
+                    return FALSE;
+                if (srcband->GetDataset()->GetRasterCount() <= iBand)
                     return FALSE;
-                if (poSource->GetBand()->GetBand() != iBand + 1)
+                if (srcband->GetDataset()->GetRasterBand(iBand + 1) != srcband)
                     return FALSE;
             }
         }
diff --git a/frmts/vrt/vrtsources.cpp b/frmts/vrt/vrtsources.cpp
index 1968120..b430ae4 100644
--- a/frmts/vrt/vrtsources.cpp
+++ b/frmts/vrt/vrtsources.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: vrtsources.cpp 27957 2014-11-12 23:09:34Z rouault $
+ * $Id: vrtsources.cpp 28735 2015-03-15 03:30:21Z rouault $
  *
  * Project:  Virtual GDAL Datasets
  * Purpose:  Implementation of VRTSimpleSource, VRTFuncSource and 
@@ -36,7 +36,7 @@
 
 #include <algorithm>
 
-CPL_CVSID("$Id: vrtsources.cpp 27957 2014-11-12 23:09:34Z rouault $");
+CPL_CVSID("$Id: vrtsources.cpp 28735 2015-03-15 03:30:21Z rouault $");
 
 /************************************************************************/
 /* ==================================================================== */
@@ -839,7 +839,7 @@ VRTSimpleSource::GetSrcDstWindow( int nXOff, int nYOff, int nXSize, int nYSize,
         dfScaleWinToBufX = nBufXSize / (double) nXSize;
 
         *pnOutXOff = (int) ((dfDstULX - nXOff) * dfScaleWinToBufX+0.001);
-        *pnOutXSize = (int) ((dfDstLRX - nXOff) * dfScaleWinToBufX+0.001) 
+        *pnOutXSize = (int) ((dfDstLRX - nXOff) * dfScaleWinToBufX+0.5) 
             - *pnOutXOff;
 
         *pnOutXOff = MAX(0,*pnOutXOff);
@@ -852,7 +852,7 @@ VRTSimpleSource::GetSrcDstWindow( int nXOff, int nYOff, int nXSize, int nYSize,
         dfScaleWinToBufY = nBufYSize / (double) nYSize;
 
         *pnOutYOff = (int) ((dfDstULY - nYOff) * dfScaleWinToBufY+0.001);
-        *pnOutYSize = (int) ((dfDstLRY - nYOff) * dfScaleWinToBufY+0.001) 
+        *pnOutYSize = (int) ((dfDstLRY - nYOff) * dfScaleWinToBufY+0.5) 
             - *pnOutYOff;
 
         *pnOutYOff = MAX(0,*pnOutYOff);
diff --git a/gcore/gdal_version.h b/gcore/gdal_version.h
index e2d2fdd..b84e277 100644
--- a/gcore/gdal_version.h
+++ b/gcore/gdal_version.h
@@ -6,7 +6,7 @@
 #ifndef GDAL_VERSION_MAJOR
 #  define GDAL_VERSION_MAJOR    1
 #  define GDAL_VERSION_MINOR    11
-#  define GDAL_VERSION_REV      2
+#  define GDAL_VERSION_REV      3
 #  define GDAL_VERSION_BUILD    0
 #endif
 
@@ -22,8 +22,8 @@
 #endif
 
 #ifndef GDAL_RELEASE_DATE
-#  define GDAL_RELEASE_DATE     20150210
+#  define GDAL_RELEASE_DATE     20150915
 #endif
 #ifndef GDAL_RELEASE_NAME
-#  define GDAL_RELEASE_NAME     "1.11.2"
+#  define GDAL_RELEASE_NAME     "1.11.3"
 #endif
diff --git a/gcore/gdaljp2metadata.cpp b/gcore/gdaljp2metadata.cpp
index 0ad28f2..4de555b 100644
--- a/gcore/gdaljp2metadata.cpp
+++ b/gcore/gdaljp2metadata.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: gdaljp2metadata.cpp 27182 2014-04-14 20:03:08Z rouault $
+ * $Id: gdaljp2metadata.cpp 29189 2015-05-13 14:40:17Z rouault $
  *
  * Project:  GDAL 
  * Purpose:  GDALJP2Metadata - Read GeoTIFF and/or GML georef info.
@@ -36,7 +36,7 @@
 #include "ogr_api.h"
 #include "gt_wkt_srs_for_gdal.h"
 
-CPL_CVSID("$Id: gdaljp2metadata.cpp 27182 2014-04-14 20:03:08Z rouault $");
+CPL_CVSID("$Id: gdaljp2metadata.cpp 29189 2015-05-13 14:40:17Z rouault $");
 
 static const unsigned char msi_uuid2[16] =
 {0xb1,0x4b,0xf8,0xbd,0x08,0x3d,0x4b,0x43,
@@ -862,9 +862,14 @@ int GDALJP2Metadata::ParseGMLCoverageDesc()
             if( oSRS.SetFromUserInput( pszSRSName ) == OGRERR_NONE )
                 oSRS.exportToWkt( &pszProjection );
         }
-        else if( EQUALN(pszSRSName,"urn:",4) 
+        else if( (EQUALN(pszSRSName,"urn:",4) 
                  && strstr(pszSRSName,":def:") != NULL
-                 && oSRS.importFromURN(pszSRSName) == OGRERR_NONE )
+                 && oSRS.importFromURN(pszSRSName) == OGRERR_NONE) ||
+                 /* GMLJP2 v2.0 uses CRS URL instead of URN */
+                 /* See e.g. http://schemas.opengis.net/gmljp2/2.0/examples/minimalInstance.xml */
+                 (EQUALN(pszSRSName,"http://www.opengis.net/def/crs/",
+                         strlen("http://www.opengis.net/def/crs/")) 
+                 && oSRS.importFromCRSURL(pszSRSName) == OGRERR_NONE) )
         {
             oSRS.exportToWkt( &pszProjection );
 
@@ -889,9 +894,6 @@ int GDALJP2Metadata::ParseGMLCoverageDesc()
                   "Got projection from GML box: %s", 
                  pszProjection );
 
-    CPLDestroyXMLNode( psXML );
-    psXML = NULL;
-
 /* -------------------------------------------------------------------- */
 /*      Do we need to flip the axes?                                    */
 /* -------------------------------------------------------------------- */
@@ -902,6 +904,43 @@ int GDALJP2Metadata::ParseGMLCoverageDesc()
         bNeedAxisFlip = FALSE;
         CPLDebug( "GMLJP2", "Supressed axis flipping based on GDAL_IGNORE_AXIS_ORIENTATION." );
     }
+    
+    /* Some Pleiades files have explicit <gml:axisName>Easting</gml:axisName> */
+    /* <gml:axisName>Northing</gml:axisName> to override default EPSG order */
+    if( bNeedAxisFlip && psRG != NULL )
+    {
+        int nAxisCount = 0;
+        int bFirstAxisIsEastOrLong = FALSE, bSecondAxisIsNorthOrLat = FALSE;
+        for(CPLXMLNode* psIter = psRG->psChild; psIter != NULL; psIter = psIter->psNext )
+        {
+            if( psIter->eType == CXT_Element && strcmp(psIter->pszValue, "axisName") == 0 &&
+                psIter->psChild != NULL && psIter->psChild->eType == CXT_Text )
+            {
+                if( nAxisCount == 0 && 
+                    (EQUALN(psIter->psChild->pszValue, "EAST", 4) ||
+                     EQUALN(psIter->psChild->pszValue, "LONG", 4) ) )
+                {
+                    bFirstAxisIsEastOrLong = TRUE;
+                }
+                else if( nAxisCount == 1 &&
+                         (EQUALN(psIter->psChild->pszValue, "NORTH", 5) ||
+                          EQUALN(psIter->psChild->pszValue, "LAT", 3)) )
+                {
+                    bSecondAxisIsNorthOrLat = TRUE;
+                }
+                nAxisCount ++;
+            }
+        }
+        if( bFirstAxisIsEastOrLong && bSecondAxisIsNorthOrLat )
+        {
+            CPLDebug( "GMLJP2", "Disable axis flip because of explicit axisName disabling it" );
+            bNeedAxisFlip = FALSE;
+        }
+    }
+
+    CPLDestroyXMLNode( psXML );
+    psXML = NULL;
+    psRG = NULL;
 
     if( bNeedAxisFlip )
     {
@@ -1197,6 +1236,7 @@ GDALJP2Box *GDALJP2Metadata::CreateGMLJP2( int nXSize, int nYSize )
 "          </gml:rectifiedGridDomain>\n"
 "          <gml:rangeSet>\n"
 "            <gml:File>\n"
+"              <gml:rangeParameters/>\n"
 "              <gml:fileName>gmljp2://codestream/0</gml:fileName>\n"
 "              <gml:fileStructure>Record Interleaved</gml:fileStructure>\n"
 "            </gml:File>\n"
diff --git a/gcore/gdalrasterband.cpp b/gcore/gdalrasterband.cpp
index a3c945c..b8bcede 100644
--- a/gcore/gdalrasterband.cpp
+++ b/gcore/gdalrasterband.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: gdalrasterband.cpp 27858 2014-10-15 08:41:23Z rouault $
+ * $Id: gdalrasterband.cpp 29036 2015-04-27 14:15:16Z rouault $
  *
  * Project:  GDAL Core
  * Purpose:  Base class for format specific band class implementation.  This
@@ -37,7 +37,7 @@
 #define TO_SUBBLOCK(x) ((x) >> 6)
 #define WITHIN_SUBBLOCK(x) ((x) & 0x3f)
 
-CPL_CVSID("$Id: gdalrasterband.cpp 27858 2014-10-15 08:41:23Z rouault $");
+CPL_CVSID("$Id: gdalrasterband.cpp 29036 2015-04-27 14:15:16Z rouault $");
 
 /************************************************************************/
 /*                           GDALRasterBand()                           */
diff --git a/man/man1/gdal-config.1 b/man/man1/gdal-config.1
index a841b26..7ce23f0 100644
--- a/man/man1/gdal-config.1
+++ b/man/man1/gdal-config.1
@@ -1,12 +1,9 @@
-.TH "gdal-config" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdal-config" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdal-config \- .TH "gdal-config" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdal-config \- determines various information about a GDAL installation
+gdal-config \- gdal-config 
+determines various information about a GDAL installation
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -24,19 +21,19 @@ Options:
 .PP
 .SH "DESCRIPTION"
 .PP
-This utility script (available on Unix systems) can be used to determine various information about a GDAL installation. It is normally just used by configure scripts for applications using GDAL but can be queried by an end user.
+This utility script (available on Unix systems) can be used to determine various information about a GDAL installation\&. It is normally just used by configure scripts for applications using GDAL but can be queried by an end user\&.
 .PP
 .IP "\fB\fB--prefix\fP:\fP" 1c
-the top level directory for the GDAL installation. 
+the top level directory for the GDAL installation\&. 
 .IP "\fB\fB--libs\fP:\fP" 1c
-The libraries and link directives required to use GDAL. 
+The libraries and link directives required to use GDAL\&. 
 .IP "\fB\fB--cflags\fP:\fP" 1c
-The include and macro definition required to compiled modules using GDAL. 
+The include and macro definition required to compiled modules using GDAL\&. 
 .IP "\fB\fB--version\fP:\fP" 1c
-Reports the GDAL version. 
+Reports the GDAL version\&. 
 .IP "\fB\fB--ogr-enabled\fP:\fP" 1c
-Reports 'yes' or 'no' to standard output depending on whether OGR is built into GDAL. 
+Reports 'yes' or 'no' to standard output depending on whether OGR is built into GDAL\&. 
 .IP "\fB\fB--formats\fP:\fP" 1c
-Reports which formats are configured into GDAL to stdout.  
+Reports which formats are configured into GDAL to stdout\&.  
 .PP
 
diff --git a/man/man1/gdal2tiles.1 b/man/man1/gdal2tiles.1
index 4cde8bc..ad30d7a 100644
--- a/man/man1/gdal2tiles.1
+++ b/man/man1/gdal2tiles.1
@@ -1,12 +1,9 @@
-.TH "gdal2tiles" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdal2tiles" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdal2tiles \- .TH "gdal2tiles" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdal2tiles \- generates directory with TMS tiles, KMLs and simple web viewers
+gdal2tiles \- gdal2tiles\&.py 
+generates directory with TMS tiles, KMLs and simple web viewers
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -20,60 +17,60 @@ gdal2tiles.py [-p profile] [-r resampling] [-s srs] [-z zoom]
 .PP
 .SH "DESCRIPTION"
 .PP
-This utility generates a directory with small tiles and metadata, following the OSGeo Tile Map Service Specification. Simple web pages with viewers based on Google Maps and OpenLayers are generated as well - so anybody can comfortably explore your maps on-line and you do not need to install or configure any special software (like MapServer) and the map displays very fast in the web browser. You only need to upload the generated directory onto a web server.
+This utility generates a directory with small tiles and metadata, following the OSGeo Tile Map Service Specification\&. Simple web pages with viewers based on Google Maps and OpenLayers are generated as well - so anybody can comfortably explore your maps on-line and you do not need to install or configure any special software (like MapServer) and the map displays very fast in the web browser\&. You only need to upload the generated directory onto a web server\&.
 .PP
-GDAL2Tiles also creates the necessary metadata for Google Earth (KML SuperOverlay), in case the supplied map uses EPSG:4326 projection.
+GDAL2Tiles also creates the necessary metadata for Google Earth (KML SuperOverlay), in case the supplied map uses EPSG:4326 projection\&.
 .PP
-World files and embedded georeferencing is used during tile generation, but you can publish a picture without proper georeferencing too.
+World files and embedded georeferencing is used during tile generation, but you can publish a picture without proper georeferencing too\&.
 .PP
 .IP "\fB\fB-p\fP \fIPROFILE\fP, --profile=\fIPROFILE\fP: \fP" 1c
-Tile cutting profile (mercator,geodetic,raster) - default 'mercator' (Google Maps compatible). 
+Tile cutting profile (mercator,geodetic,raster) - default 'mercator' (Google Maps compatible)\&. 
 .IP "\fB\fB-r\fP \fIRESAMPLING\fP, --resampling=\fIRESAMPLING\fP: \fP" 1c
-Resampling method (average,near,bilinear,cubic,cubicspline,lanczos,antialias) - default 'average'. 
+Resampling method (average,near,bilinear,cubic,cubicspline,lanczos,antialias) - default 'average'\&. 
 .IP "\fB\fB-s\fP \fISRS\fP, --s_srs=\fISRS\fP: \fP" 1c
-The spatial reference system used for the source input data. 
+The spatial reference system used for the source input data\&. 
 .IP "\fB\fB-z\fP \fIZOOM\fP, --zoom=\fIZOOM\fP: \fP" 1c
-Zoom levels to render (format:'2-5' or '10'). 
+Zoom levels to render (format:'2-5' or '10')\&. 
 .IP "\fB\fB-e\fP, --resume: \fP" 1c
-Resume mode. Generate only missing files. 
+Resume mode\&. Generate only missing files\&. 
 .IP "\fB\fB-a\fP \fINODATA\fP, --srcnodata=\fINODATA\fP: \fP" 1c
-NODATA transparency value to assign to the input data. 
+NODATA transparency value to assign to the input data\&. 
 .IP "\fB\fB-v, --verbose\fP \fP" 1c
-Generate verbose output of tile generation. 
+Generate verbose output of tile generation\&. 
 .IP "\fB\fB-h, --help\fP \fP" 1c
-Show help message and exit. 
+Show help message and exit\&. 
 .IP "\fB\fB--version\fP \fP" 1c
-Show program's version number and exit. 
+Show program's version number and exit\&. 
 .PP
 .PP
 \fBKML (Google Earth) options:\fP
 .PP
 Options for generated Google Earth SuperOverlay metadata 
 .IP "\fB\fB-k, --force-kml\fP \fP" 1c
-Generate KML for Google Earth - default for 'geodetic' profile and 'raster' in EPSG:4326. For a dataset with different projection use with caution! 
+Generate KML for Google Earth - default for 'geodetic' profile and 'raster' in EPSG:4326\&. For a dataset with different projection use with caution! 
 .IP "\fB\fB-n, --no-kml\fP: \fP" 1c
-Avoid automatic generation of KML files for EPSG:4326. 
+Avoid automatic generation of KML files for EPSG:4326\&. 
 .IP "\fB\fB-u\fP \fIURL\fP, --url=\fIURL\fP: \fP" 1c
-URL address where the generated tiles are going to be published. 
+URL address where the generated tiles are going to be published\&. 
 .PP
 .PP
 \fBWeb viewer options:\fP
 .PP
 Options for generated HTML viewers a la Google Maps 
 .IP "\fB\fB-w\fP \fIWEBVIEWER\fP, --webviewer=\fIWEBVIEWER\fP: \fP" 1c
-Web viewer to generate (all,google,openlayers,none) - default 'all'. 
+Web viewer to generate (all,google,openlayers,none) - default 'all'\&. 
 .IP "\fB\fB-t\fP \fITITLE\fP, --title=\fITITLE\fP: \fP" 1c
-Title of the map. 
+Title of the map\&. 
 .IP "\fB\fB-c\fP \fICOPYRIGHT\fP, --copyright=\fICOPYRIGHT\fP: \fP" 1c
-Copyright for the map. 
+Copyright for the map\&. 
 .IP "\fB\fB-g\fP \fIGOOGLEKEY\fP, --googlekey=\fIGOOGLEKEY\fP: \fP" 1c
-Google Maps API key from http://code.google.com/apis/maps/signup.html. 
+Google Maps API key from http://code.google.com/apis/maps/signup.html\&. 
 .IP "\fB\fB-b\fP \fIBINGKEY\fP, --bingkey=\fIBINGKEY\fP: \fP" 1c
 Bing Maps API key from https://www.bingmapsportal.com/
 .PP
 .PP
 .PP
-NOTE: gdal2tiles.py is a Python script that needs to be run against 'new generation' Python GDAL binding.
+NOTE: gdal2tiles\&.py is a Python script that needs to be run against 'new generation' Python GDAL binding\&.
 .SH "AUTHORS"
 .PP
-Klokan Petr Pridal <klokan at klokan.cz> as a Google SoC 2007 Project. 
+Klokan Petr Pridal <klokan at klokan.cz> as a Google SoC 2007 Project\&. 
diff --git a/man/man1/gdal_calc.1 b/man/man1/gdal_calc.1
index 330f091..e9bb80e 100644
--- a/man/man1/gdal_calc.1
+++ b/man/man1/gdal_calc.1
@@ -1,12 +1,9 @@
-.TH "gdal_calc" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdal_calc" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdal_calc \- .TH "gdal_calc" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdal_calc \- Command line raster calculator with numpy syntax
+gdal_calc \- gdal_calc\&.py 
+Command line raster calculator with numpy syntax
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -40,7 +37,7 @@ Options:
 .PP
 .SH "DESCRIPTION"
 .PP
-Command line raster calculator with numpy syntax. Use any basic arithmetic supported by numpy arrays such as +-*\\ along with logical operators such as >. Note that all files must have the same dimensions, but no projection checking is performed.
+Command line raster calculator with numpy syntax\&. Use any basic arithmetic supported by numpy arrays such as +-*\\ along with logical operators such as >\&. Note that all files must have the same dimensions, but no projection checking is performed\&.
 .SH "EXAMPLE"
 .PP
 add two files together 
diff --git a/man/man1/gdal_contour.1 b/man/man1/gdal_contour.1
index cf4ebea..89b5301 100644
--- a/man/man1/gdal_contour.1
+++ b/man/man1/gdal_contour.1
@@ -1,12 +1,9 @@
-.TH "gdal_contour" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdal_contour" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdal_contour \- .TH "gdal_contour" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdal_contour \- builds vector contour lines from a raster elevation model
+gdal_contour \- gdal_contour 
+builds vector contour lines from a raster elevation model
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -22,26 +19,26 @@ Usage: gdal_contour [-b <band>] [-a <attribute_name>] [-3d] [-inodata]
 .PP
 .SH "DESCRIPTION"
 .PP
-This program generates a vector contour file from the input raster elevation model (DEM).
+This program generates a vector contour file from the input raster elevation model (DEM)\&.
 .PP
-Starting from version 1.7 the contour line-strings will be oriented consistently. The high side will be on the right, i.e. a line string goes clockwise around a top.
+Starting from version 1\&.7 the contour line-strings will be oriented consistently\&. The high side will be on the right, i\&.e\&. a line string goes clockwise around a top\&.
 .PP
 .IP "\fB\fB-b\fP \fIband\fP:\fP" 1c
-picks a particular band to get the DEM from. Defaults to band 1.
+picks a particular band to get the DEM from\&. Defaults to band 1\&.
 .PP
 .IP "\fB\fB-a\fP \fIname\fP:\fP" 1c
-provides a name for the attribute in which to put the elevation. If not provided no elevation attribute is attached.  
+provides a name for the attribute in which to put the elevation\&. If not provided no elevation attribute is attached\&.  
 .IP "\fB\fB-3d\fP: \fP" 1c
-Force production of 3D vectors instead of 2D. Includes elevation at every vertex.
+Force production of 3D vectors instead of 2D\&. Includes elevation at every vertex\&.
 .PP
 .IP "\fB\fB-inodata\fP: \fP" 1c
-Ignore any nodata value implied in the dataset - treat all values as valid.
+Ignore any nodata value implied in the dataset - treat all values as valid\&.
 .PP
 .IP "\fB\fB-snodata\fP \fIvalue\fP:\fP" 1c
-Input pixel value to treat as 'nodata'. 
+Input pixel value to treat as 'nodata'\&. 
 .PP
 .IP "\fB\fB-f\fP \fIformat\fP: \fP" 1c
-create output in a particular format, default is shapefiles.
+create output in a particular format, default is shapefiles\&.
 .PP
 .IP "\fB\fB-dsco\fP \fINAME=VALUE\fP:\fP" 1c
 Dataset creation option (format specific) 
@@ -49,19 +46,19 @@ Dataset creation option (format specific)
 Layer creation option (format specific)
 .PP
 .IP "\fB\fB-i\fP \fIinterval\fP:\fP" 1c
-elevation interval between contours.
+elevation interval between contours\&.
 .PP
 .IP "\fB\fB-off\fP \fIoffset\fP:\fP" 1c
-Offset from zero relative to which to interpret intervals.
+Offset from zero relative to which to interpret intervals\&.
 .PP
 .IP "\fB\fB-fl\fP \fIlevel\fP: \fP" 1c
-Name one or more 'fixed levels' to extract. 
+Name one or more 'fixed levels' to extract\&. 
 .IP "\fB\fB-nln\fP \fIoutlayername\fP: \fP" 1c
-Provide a name for the output vector layer. Defaults to 'contour'. 
+Provide a name for the output vector layer\&. Defaults to 'contour'\&. 
 .PP
 .SH "EXAMPLE"
 .PP
-This would create 10meter contours from the DEM data in dem.tif and produce a shapefile in contour.shp/shx/dbf with the contour elevations in the 'elev' attribute.
+This would create 10meter contours from the DEM data in dem\&.tif and produce a shapefile in contour\&.shp/shx/dbf with the contour elevations in the 'elev' attribute\&.
 .PP
 .PP
 .nf
diff --git a/man/man1/gdal_edit.1 b/man/man1/gdal_edit.1
index 987e24d..81ba552 100644
--- a/man/man1/gdal_edit.1
+++ b/man/man1/gdal_edit.1
@@ -1,12 +1,9 @@
-.TH "gdal_edit" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdal_edit" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdal_edit \- .TH "gdal_edit" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdal_edit \- Edit in place various information of an existing GDAL dataset
+gdal_edit \- gdal_edit\&.py 
+Edit in place various information of an existing GDAL dataset
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -20,40 +17,40 @@ gdal_edit [--help-general] [-ro] [-a_srs srs_def] [-a_ullr ulx uly lrx lry]
 .PP
 .SH "DESCRIPTION"
 .PP
-The gdal_edit.py script can be used to allows to edit in place various information of an existing GDAL dataset (projection, geotransform, nodata, metadata).
+The gdal_edit\&.py script can be used to allows to edit in place various information of an existing GDAL dataset (projection, geotransform, nodata, metadata)\&.
 .PP
-It works only with raster formats that support update access to existing datasets. 
+It works only with raster formats that support update access to existing datasets\&. 
 .IP "\fB\fB--help-general\fP:\fP" 1c
-Gives a brief usage message for the generic GDAL commandline options and exit. 
+Gives a brief usage message for the generic GDAL commandline options and exit\&. 
 .PP
 .IP "\fB\fB-ro\fP:\fP" 1c
-(GDAL >= 1.11) Open the dataset in read-only. Might be usefull for drivers refusing to use the dataset in update-mode. In which case, updated information will go into PAM .aux.xml files.
+(GDAL >= 1\&.11) Open the dataset in read-only\&. Might be usefull for drivers refusing to use the dataset in update-mode\&. In which case, updated information will go into PAM \&.aux\&.xml files\&.
 .PP
 .IP "\fB\fB-a_srs\fP \fIsrs_def\fP:\fP" 1c
 .PP
-Defines the target coordinate system. This coordinate system will be written to the dataset.
+Defines the target coordinate system\&. This coordinate system will be written to the dataset\&.
 .PP
 .IP "\fB\fB-a_ullr\fP \fIulx uly lrx lry\fP:\fP" 1c
-Assign/override the georeferenced bounds of the dataset.
+Assign/override the georeferenced bounds of the dataset\&.
 .PP
 .IP "\fB\fB-tr\fP xres yres :\fP" 1c
-Set target resolution. The values must be expressed in georeferenced units. Both must be positive values.
+Set target resolution\&. The values must be expressed in georeferenced units\&. Both must be positive values\&.
 .PP
 .IP "\fB\fB-unsetgt\fP:\fP" 1c
-Remove the georeference information.
+Remove the georeference information\&.
 .PP
 .IP "\fB\fB-a_nodata\fP \fIvalue\fP:\fP" 1c
-Assign a specified nodata value to output bands.
+Assign a specified nodata value to output bands\&.
 .PP
 .IP "\fB\fB-gcp\fP \fIpixel line easting northing [elevation]\fP:\fP" 1c
-Add the indicated ground control point to the dataset. This option may be provided multiple times to provide a set of GCPs. 
+Add the indicated ground control point to the dataset\&. This option may be provided multiple times to provide a set of GCPs\&. 
 .PP
 .IP "\fB\fB-mo\fP \fI'META-TAG=VALUE'\fP:\fP" 1c
-Passes a metadata key and value to set on the output dataset if possible.
+Passes a metadata key and value to set on the output dataset if possible\&.
 .PP
 .PP
 .PP
--a_ullr, -tr and -unsetgt options are exclusive.
+-a_ullr, -tr and -unsetgt options are exclusive\&.
 .SH "EXAMPLE"
 .PP
 .PP
diff --git a/man/man1/gdal_fillnodata.1 b/man/man1/gdal_fillnodata.1
index 32cb11e..f0c0866 100644
--- a/man/man1/gdal_fillnodata.1
+++ b/man/man1/gdal_fillnodata.1
@@ -1,12 +1,9 @@
-.TH "gdal_fillnodata" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdal_fillnodata" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdal_fillnodata \- .TH "gdal_fillnodata" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdal_fillnodata \- fill raster regions by interpolation from edges
+gdal_fillnodata \- gdal_fillnodata\&.py 
+fill raster regions by interpolation from edges
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -19,39 +16,39 @@ gdal_fillnodata.py [-q] [-md max_distance] [-si smooth_iterations]
 .PP
 .SH "DESCRIPTION"
 .PP
-The gdal_fillnodata.py script fills selection regions (usually nodata areas) by interpolating from valid pixels around the edges of the area.
+The gdal_fillnodata\&.py script fills selection regions (usually nodata areas) by interpolating from valid pixels around the edges of the area\&.
 .PP
-Additional details on the algorithm are available in the GDALFillNodata() docs.
+Additional details on the algorithm are available in the GDALFillNodata() docs\&.
 .PP
 .IP "\fB\fB-q\fP:\fP" 1c
-The script runs in quiet mode. The progress monitor is supressed and routine messages are not displayed.
+The script runs in quiet mode\&. The progress monitor is supressed and routine messages are not displayed\&.
 .PP
 .IP "\fB\fB-md\fP \fImax_distance\fP:\fP" 1c
-The maximum distance (in pixels) that the algorithm will search out for values to interpolate.
+The maximum distance (in pixels) that the algorithm will search out for values to interpolate\&.
 .PP
 .IP "\fB\fB-si\fP \fIsmooth_iterations\fP:\fP" 1c
-The number of 3x3 average filter smoothing iterations to run after the interpolation to dampen artifacts. The default is zero smoothing iterations.
+The number of 3x3 average filter smoothing iterations to run after the interpolation to dampen artifacts\&. The default is zero smoothing iterations\&.
 .PP
 .IP "\fB\fB-o\fP \fIname=value\fP:\fP" 1c
-Specify a special argument to the algorithm. Currently none are supported. 
+Specify a special argument to the algorithm\&. Currently none are supported\&. 
 .PP
 .IP "\fB\fB-b\fP \fIband\fP:\fP" 1c
-The band to operate on, by default the first band is operated on. 
+The band to operate on, by default the first band is operated on\&. 
 .PP
 .IP "\fB\fIsrcfile\fP\fP" 1c
-The source raster file used to identify target pixels. Only one band is used.
+The source raster file used to identify target pixels\&. Only one band is used\&.
 .PP
 .IP "\fB\fB-nomask\fP:\fP" 1c
-Do not use the default validity mask for the input band (such as nodata, or alpha masks). 
+Do not use the default validity mask for the input band (such as nodata, or alpha masks)\&. 
 .PP
 .IP "\fB\fB-mask\fP \fIfilename\fP:\fP" 1c
-Use the first band of the specified file as a validity mask (zero is invalid, non-zero is valid). 
+Use the first band of the specified file as a validity mask (zero is invalid, non-zero is valid)\&. 
 .PP
 .IP "\fB\fIdstfile\fP\fP" 1c
-The new file to create with the interpolated result. If not provided, the source band is updated in place.
+The new file to create with the interpolated result\&. If not provided, the source band is updated in place\&.
 .PP
 .IP "\fB\fB-of\fP \fIformat\fP:\fP" 1c
-Select the output format. The default is GeoTIFF (GTiff). Use the short format name.
+Select the output format\&. The default is GeoTIFF (GTiff)\&. Use the short format name\&.
 .PP
 .PP
 .SH "AUTHORS"
diff --git a/man/man1/gdal_grid.1 b/man/man1/gdal_grid.1
index 1ff0d3e..ac14d60 100644
--- a/man/man1/gdal_grid.1
+++ b/man/man1/gdal_grid.1
@@ -1,12 +1,9 @@
-.TH "gdal_grid" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdal_grid" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdal_grid \- .TH "gdal_grid" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdal_grid \- creates regular grid from the scattered data
+gdal_grid \- gdal_grid 
+creates regular grid from the scattered data
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -28,171 +25,171 @@ gdal_grid [-ot {Byte/Int16/UInt16/UInt32/Int32/Float32/Float64/
 .PP
 .SH "DESCRIPTION"
 .PP
-This program creates regular grid (raster) from the scattered data read from the OGR datasource. Input data will be interpolated to fill grid nodes with values, you can choose from various interpolation methods.
+This program creates regular grid (raster) from the scattered data read from the OGR datasource\&. Input data will be interpolated to fill grid nodes with values, you can choose from various interpolation methods\&.
 .PP
-Starting with GDAL 1.10, it is possible to set the \fBGDAL_NUM_THREADS\fP configuration option to parallelize the processing. The value to specify is the number of worker threads, or \fIALL_CPUS\fP to use all the cores/CPUs of the computer.
+Starting with GDAL 1\&.10, it is possible to set the \fBGDAL_NUM_THREADS\fP configuration option to parallelize the processing\&. The value to specify is the number of worker threads, or \fIALL_CPUS\fP to use all the cores/CPUs of the computer\&.
 .PP
 .IP "\fB\fB-ot\fP \fItype\fP:\fP" 1c
-For the output bands to be of the indicated data type.
+For the output bands to be of the indicated data type\&.
 .PP
 .IP "\fB\fB-of\fP \fIformat\fP:\fP" 1c
-Select the output format. The default is GeoTIFF (GTiff). Use the short format name.
+Select the output format\&. The default is GeoTIFF (GTiff)\&. Use the short format name\&.
 .PP
 .IP "\fB\fB-txe\fP \fIxmin xmax\fP:\fP" 1c
-Set georeferenced X extents of output file to be created.
+Set georeferenced X extents of output file to be created\&.
 .PP
 .IP "\fB\fB-tye\fP \fIymin ymax\fP:\fP" 1c
-Set georeferenced Y extents of output file to be created.
+Set georeferenced Y extents of output file to be created\&.
 .PP
 .IP "\fB\fB-outsize\fP \fIxsize ysize\fP:\fP" 1c
-Set the size of the output file in pixels and lines.
+Set the size of the output file in pixels and lines\&.
 .PP
 .IP "\fB\fB-a_srs\fP \fIsrs_def\fP:\fP" 1c
-Override the projection for the output file. The \fIsrs_def\fP may be any of the usual GDAL/OGR forms, complete WKT, PROJ.4, EPSG:n or a file containing the WKT. 
+Override the projection for the output file\&. The \fIsrs_def\fP may be any of the usual GDAL/OGR forms, complete WKT, PROJ\&.4, EPSG:n or a file containing the WKT\&. 
 .PP
 .IP "\fB\fB-zfield\fP \fIfield_name\fP:\fP" 1c
-Identifies an attribute field on the features to be used to get a Z value from. This value overrides Z value read from feature geometry record (naturally, if you have a Z value in geometry, otherwise you have no choice and should specify a field name containing Z value).
+Identifies an attribute field on the features to be used to get a Z value from\&. This value overrides Z value read from feature geometry record (naturally, if you have a Z value in geometry, otherwise you have no choice and should specify a field name containing Z value)\&.
 .PP
 .IP "\fB\fB-z_increase\fP \fIincrease_value\fP:\fP" 1c
-Addition to the attribute field on the features to be used to get a Z value from. The addition should be the same unit as Z value. The result value will be Z value + Z increase value. The default value is 0.
+Addition to the attribute field on the features to be used to get a Z value from\&. The addition should be the same unit as Z value\&. The result value will be Z value + Z increase value\&. The default value is 0\&.
 .PP
 .IP "\fB\fB-z_multiply\fP \fImultiply_value\fP:\fP" 1c
-This is multiplication ratio for Z field. This can be used for shift from e.g. foot to meters or from elevation to deep. The result value will be (Z value + Z increase value) * Z multiply value. The default value is 1.
+This is multiplication ratio for Z field\&. This can be used for shift from e\&.g\&. foot to meters or from elevation to deep\&. The result value will be (Z value + Z increase value) * Z multiply value\&. The default value is 1\&.
 .PP
-.IP "\fB\fB-a\fP \fI[algorithm[:parameter1=value1][:parameter2=value2]...]\fP: \fP" 1c
-Set the interpolation algorithm or data metric name and (optionally) its parameters. See \fBINTERPOLATION ALGORITHMS\fP and \fBDATA METRICS\fP sections for further discussion of available options.
+.IP "\fB\fB-a\fP \fI[algorithm[:parameter1=value1][:parameter2=value2]\&.\&.\&.]\fP: \fP" 1c
+Set the interpolation algorithm or data metric name and (optionally) its parameters\&. See \fBINTERPOLATION ALGORITHMS\fP and \fBDATA METRICS\fP sections for further discussion of available options\&.
 .PP
 .IP "\fB\fB-spat\fP \fIxmin ymin xmax ymax\fP:\fP" 1c
-Adds a spatial filter to select only features contained within the bounding box described by (xmin, ymin) - (xmax, ymax).
+Adds a spatial filter to select only features contained within the bounding box described by (xmin, ymin) - (xmax, ymax)\&.
 .PP
 .IP "\fB\fB-clipsrc\fP\fI [xmin ymin xmax ymax]|WKT|datasource|spat_extent\fP: \fP" 1c
-Adds a spatial filter to select only features contained within the specified bounding box (expressed in source SRS), WKT geometry (POLYGON or MULTIPOLYGON), from a datasource or to the spatial extent of the \fB-spat\fP option if you use the \fIspat_extent\fP keyword. When specifying a datasource, you will generally want to use it in combination of the \fB-clipsrclayer\fP, \fB-clipsrcwhere\fP or \fB-clipsrcsql\fP options.
+Adds a spatial filter to select only features contained within the specified bounding box (expressed in source SRS), WKT geometry (POLYGON or MULTIPOLYGON), from a datasource or to the spatial extent of the \fB-spat\fP option if you use the \fIspat_extent\fP keyword\&. When specifying a datasource, you will generally want to use it in combination of the \fB-clipsrclayer\fP, \fB-clipsrcwhere\fP or \fB-clipsrcsql\fP options\&.
 .PP
 .IP "\fB\fB-clipsrcsql\fP \fIsql_statement\fP:\fP" 1c
-Select desired geometries using an SQL query instead.
+Select desired geometries using an SQL query instead\&.
 .PP
 .IP "\fB\fB-clipsrclayer\fP \fIlayername\fP:\fP" 1c
-Select the named layer from the source clip datasource.
+Select the named layer from the source clip datasource\&.
 .PP
 .IP "\fB\fB-clipsrcwhere\fP \fIexpression\fP:\fP" 1c
-Restrict desired geometries based on attribute query.
+Restrict desired geometries based on attribute query\&.
 .PP
 .IP "\fB\fB-l\fP \fIlayername\fP: \fP" 1c
-Indicates the layer(s) from the datasource that will be used for input features. May be specified multiple times, but at least one layer name or a \fB-sql\fP option must be specified.
+Indicates the layer(s) from the datasource that will be used for input features\&. May be specified multiple times, but at least one layer name or a \fB-sql\fP option must be specified\&.
 .PP
 .IP "\fB\fB-where\fP \fIexpression\fP: \fP" 1c
-An optional SQL WHERE style query expression to be applied to select features to process from the input layer(s). 
+An optional SQL WHERE style query expression to be applied to select features to process from the input layer(s)\&. 
 .PP
 .IP "\fB\fB-sql\fP \fIselect_statement\fP: \fP" 1c
-An SQL statement to be evaluated against the datasource to produce a virtual layer of features to be processed.
+An SQL statement to be evaluated against the datasource to produce a virtual layer of features to be processed\&.
 .PP
 .IP "\fB\fB-co\fP \fI'NAME=VALUE'\fP:\fP" 1c
-Passes a creation option to the output format driver. Multiple \fB-co\fP options may be listed. See format specific documentation for legal creation options for each format.
+Passes a creation option to the output format driver\&. Multiple \fB-co\fP options may be listed\&. See format specific documentation for legal creation options for each format\&.
 .PP
 .IP "\fB\fB-q\fP:\fP" 1c
-Suppress progress monitor and other non-error output.
+Suppress progress monitor and other non-error output\&.
 .PP
 .IP "\fB\fIsrc_datasource\fP: \fP" 1c
-Any OGR supported readable datasource.
+Any OGR supported readable datasource\&.
 .PP
 .IP "\fB\fIdst_filename\fP: \fP" 1c
-The GDAL supported output file.
+The GDAL supported output file\&.
 .PP
 .PP
 .SH "INTERPOLATION ALGORITHMS"
 .PP
-There are number of interpolation algorithms to choose from.
+There are number of interpolation algorithms to choose from\&.
 .SS "invdist"
-Inverse distance to a power. This is default algorithm. It has following parameters:
+Inverse distance to a power\&. This is default algorithm\&. It has following parameters:
 .PP
 .IP "\fB\fIpower\fP: \fP" 1c
-Weighting power (default 2.0). 
+Weighting power (default 2\&.0)\&. 
 .IP "\fB\fIsmoothing\fP: \fP" 1c
-Smoothing parameter (default 0.0). 
+Smoothing parameter (default 0\&.0)\&. 
 .IP "\fB\fIradius1\fP: \fP" 1c
-The first radius (X axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0. 
+The first radius (X axis if rotation angle is 0) of search ellipse\&. Set this parameter to zero to use whole point array\&. Default is 0\&.0\&. 
 .IP "\fB\fIradius2\fP: \fP" 1c
-The second radius (Y axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0. 
+The second radius (Y axis if rotation angle is 0) of search ellipse\&. Set this parameter to zero to use whole point array\&. Default is 0\&.0\&. 
 .IP "\fB\fIangle\fP: \fP" 1c
-Angle of search ellipse rotation in degrees (counter clockwise, default 0.0). 
+Angle of search ellipse rotation in degrees (counter clockwise, default 0\&.0)\&. 
 .IP "\fB\fImax_points\fP: \fP" 1c
-Maximum number of data points to use. Do not search for more points than this number. This is only used if search ellipse is set (both radii are non-zero). Zero means that all found points should be used. Default is 0. 
+Maximum number of data points to use\&. Do not search for more points than this number\&. This is only used if search ellipse is set (both radii are non-zero)\&. Zero means that all found points should be used\&. Default is 0\&. 
 .IP "\fB\fImin_points\fP: \fP" 1c
-Minimum number of data points to use. If less amount of points found the grid node considered empty and will be filled with NODATA marker. This is only used if search ellipse is set (both radii are non-zero). Default is 0. 
+Minimum number of data points to use\&. If less amount of points found the grid node considered empty and will be filled with NODATA marker\&. This is only used if search ellipse is set (both radii are non-zero)\&. Default is 0\&. 
 .IP "\fB\fInodata\fP: \fP" 1c
-NODATA marker to fill empty points (default 0.0). 
+NODATA marker to fill empty points (default 0\&.0)\&. 
 .PP
 .SS "average"
-Moving average algorithm. It has following parameters:
+Moving average algorithm\&. It has following parameters:
 .PP
 .IP "\fB\fIradius1\fP: \fP" 1c
-The first radius (X axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0. 
+The first radius (X axis if rotation angle is 0) of search ellipse\&. Set this parameter to zero to use whole point array\&. Default is 0\&.0\&. 
 .IP "\fB\fIradius2\fP: \fP" 1c
-The second radius (Y axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0. 
+The second radius (Y axis if rotation angle is 0) of search ellipse\&. Set this parameter to zero to use whole point array\&. Default is 0\&.0\&. 
 .IP "\fB\fIangle\fP: \fP" 1c
-Angle of search ellipse rotation in degrees (counter clockwise, default 0.0). 
+Angle of search ellipse rotation in degrees (counter clockwise, default 0\&.0)\&. 
 .IP "\fB\fImin_points\fP: \fP" 1c
-Minimum number of data points to use. If less amount of points found the grid node considered empty and will be filled with NODATA marker. Default is 0. 
+Minimum number of data points to use\&. If less amount of points found the grid node considered empty and will be filled with NODATA marker\&. Default is 0\&. 
 .IP "\fB\fInodata\fP: \fP" 1c
-NODATA marker to fill empty points (default 0.0). 
+NODATA marker to fill empty points (default 0\&.0)\&. 
 .PP
 .PP
-Note, that it is essential to set search ellipse for moving average method. It is a window that will be averaged when computing grid nodes values.
+Note, that it is essential to set search ellipse for moving average method\&. It is a window that will be averaged when computing grid nodes values\&.
 .SS "nearest"
-Nearest neighbor algorithm. It has following parameters:
+Nearest neighbor algorithm\&. It has following parameters:
 .PP
 .IP "\fB\fIradius1\fP: \fP" 1c
-The first radius (X axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0. 
+The first radius (X axis if rotation angle is 0) of search ellipse\&. Set this parameter to zero to use whole point array\&. Default is 0\&.0\&. 
 .IP "\fB\fIradius2\fP: \fP" 1c
-The second radius (Y axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0. 
+The second radius (Y axis if rotation angle is 0) of search ellipse\&. Set this parameter to zero to use whole point array\&. Default is 0\&.0\&. 
 .IP "\fB\fIangle\fP: \fP" 1c
-Angle of search ellipse rotation in degrees (counter clockwise, default 0.0). 
+Angle of search ellipse rotation in degrees (counter clockwise, default 0\&.0)\&. 
 .IP "\fB\fInodata\fP: \fP" 1c
-NODATA marker to fill empty points (default 0.0). 
+NODATA marker to fill empty points (default 0\&.0)\&. 
 .PP
 .SH "DATA METRICS"
 .PP
-Besides the interpolation functionality \fBgdal_grid\fP can be used to compute some data metrics using the specified window and output grid geometry. These metrics are:
+Besides the interpolation functionality \fBgdal_grid\fP can be used to compute some data metrics using the specified window and output grid geometry\&. These metrics are:
 .PP
 .IP "\fB\fIminimum\fP: \fP" 1c
-Minimum value found in grid node search ellipse.
+Minimum value found in grid node search ellipse\&.
 .PP
 .IP "\fB\fImaximum\fP: \fP" 1c
-Maximum value found in grid node search ellipse.
+Maximum value found in grid node search ellipse\&.
 .PP
 .IP "\fB\fIrange\fP: \fP" 1c
-A difference between the minimum and maximum values found in grid node search ellipse.
+A difference between the minimum and maximum values found in grid node search ellipse\&.
 .PP
 .IP "\fB\fIcount\fP: \fP" 1c
-A number of data points found in grid node search ellipse.
+A number of data points found in grid node search ellipse\&.
 .PP
 .IP "\fB\fIaverage_distance\fP: \fP" 1c
-An average distance between the grid node (center of the search ellipse) and all of the data points found in grid node search ellipse.
+An average distance between the grid node (center of the search ellipse) and all of the data points found in grid node search ellipse\&.
 .PP
 .IP "\fB\fIaverage_distance_pts\fP: \fP" 1c
-An average distance between the data points found in grid node search ellipse. The distance between each pair of points within ellipse is calculated and average of all distances is set as a grid node value.
+An average distance between the data points found in grid node search ellipse\&. The distance between each pair of points within ellipse is calculated and average of all distances is set as a grid node value\&.
 .PP
 .PP
 .PP
 All the metrics have the same set of options:
 .PP
 .IP "\fB\fIradius1\fP: \fP" 1c
-The first radius (X axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0. 
+The first radius (X axis if rotation angle is 0) of search ellipse\&. Set this parameter to zero to use whole point array\&. Default is 0\&.0\&. 
 .IP "\fB\fIradius2\fP: \fP" 1c
-The second radius (Y axis if rotation angle is 0) of search ellipse. Set this parameter to zero to use whole point array. Default is 0.0. 
+The second radius (Y axis if rotation angle is 0) of search ellipse\&. Set this parameter to zero to use whole point array\&. Default is 0\&.0\&. 
 .IP "\fB\fIangle\fP: \fP" 1c
-Angle of search ellipse rotation in degrees (counter clockwise, default 0.0). 
+Angle of search ellipse rotation in degrees (counter clockwise, default 0\&.0)\&. 
 .IP "\fB\fImin_points\fP: \fP" 1c
-Minimum number of data points to use. If less amount of points found the grid node considered empty and will be filled with NODATA marker. This is only used if search ellipse is set (both radii are non-zero). Default is 0. 
+Minimum number of data points to use\&. If less amount of points found the grid node considered empty and will be filled with NODATA marker\&. This is only used if search ellipse is set (both radii are non-zero)\&. Default is 0\&. 
 .IP "\fB\fInodata\fP: \fP" 1c
-NODATA marker to fill empty points (default 0.0).
+NODATA marker to fill empty points (default 0\&.0)\&.
 .PP
 .PP
 .SH "READING COMMA SEPARATED VALUES"
 .PP
-Often you have a text file with a list of comma separated XYZ values to work with (so called CSV file). You can easily use that kind of data source in \fBgdal_grid\fP. All you need is create a virtual dataset header (VRT) for you CSV file and use it as input datasource for \fBgdal_grid\fP. You can find details on VRT format at \fCVirtual Format\fP description page.
+Often you have a text file with a list of comma separated XYZ values to work with (so called CSV file)\&. You can easily use that kind of data source in \fBgdal_grid\fP\&. All you need is create a virtual dataset header (VRT) for you CSV file and use it as input datasource for \fBgdal_grid\fP\&. You can find details on VRT format at \fCVirtual Format\fP description page\&.
 .PP
-Here is a small example. Let we have a CSV file called \fIdem.csv\fP containing
+Here is a small example\&. Let we have a CSV file called \fIdem\&.csv\fP containing
 .PP
 .PP
 .nf
@@ -206,7 +203,7 @@ Easting,Northing,Elevation
 .fi
 .PP
 .PP
-For above data we will create \fIdem.vrt\fP header with the following content:
+For above data we will create \fIdem\&.vrt\fP header with the following content:
 .PP
 .PP
 .nf
@@ -221,7 +218,7 @@ For above data we will create \fIdem.vrt\fP header with the following content:
 .fi
 .PP
 .PP
-This description specifies so called 2.5D geometry with three coordinates X, Y and Z. Z value will be used for interpolation. Now you can use \fIdem.vrt\fP with all OGR programs (start with \fBogrinfo\fP to test that everything works fine). The datasource will contain single layer called \fI'dem'\fP filled with point features constructed from values in CSV file. Using this technique you can handle CSV files with more than three columns, switch columns, etc.
+This description specifies so called 2\&.5D geometry with three coordinates X, Y and Z\&. Z value will be used for interpolation\&. Now you can use \fIdem\&.vrt\fP with all OGR programs (start with \fBogrinfo\fP to test that everything works fine)\&. The datasource will contain single layer called \fI'dem'\fP filled with point features constructed from values in CSV file\&. Using this technique you can handle CSV files with more than three columns, switch columns, etc\&.
 .PP
 If your CSV file does not contain column headers then it can be handled in the following way:
 .PP
@@ -232,10 +229,10 @@ If your CSV file does not contain column headers then it can be handled in the f
 .fi
 .PP
 .PP
-\fCComma Separated Value\fP description page contains details on CSV format supported by GDAL/OGR.
+\fCComma Separated Value\fP description page contains details on CSV format supported by GDAL/OGR\&.
 .SH "EXAMPLE"
 .PP
-The following would create raster TIFF file from VRT datasource described in \fBREADING COMMA SEPARATED VALUES\fP section using the inverse distance to a power method. Values to interpolate will be read from Z value of geometry record.
+The following would create raster TIFF file from VRT datasource described in \fBREADING COMMA SEPARATED VALUES\fP section using the inverse distance to a power method\&. Values to interpolate will be read from Z value of geometry record\&.
 .PP
 .PP
 .nf
@@ -244,7 +241,7 @@ gdal_grid -a invdist:power=2.0:smoothing=1.0 -txe 85000 89000 -tye 894000 890000
 .fi
 .PP
 .PP
-The next command does the same thing as the previous one, but reads values to interpolate from the attribute field specified with \fB-zfield\fP option instead of geometry record. So in this case X and Y coordinates are being taken from geometry and Z is being taken from the \fI'Elevation'\fP field. The GDAL_NUM_THREADS is also set to parallelize the computation.
+The next command does the same thing as the previous one, but reads values to interpolate from the attribute field specified with \fB-zfield\fP option instead of geometry record\&. So in this case X and Y coordinates are being taken from geometry and Z is being taken from the \fI'Elevation'\fP field\&. The GDAL_NUM_THREADS is also set to parallelize the computation\&.
 .PP
 .PP
 .nf
diff --git a/man/man1/gdal_merge.1 b/man/man1/gdal_merge.1
index a85dec8..2cdbc45 100644
--- a/man/man1/gdal_merge.1
+++ b/man/man1/gdal_merge.1
@@ -1,12 +1,9 @@
-.TH "gdal_merge" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdal_merge" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdal_merge \- .TH "gdal_merge" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdal_merge \- mosaics a set of images
+gdal_merge \- gdal_merge\&.py 
+mosaics a set of images
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -20,45 +17,45 @@ gdal_merge.py [-o out_filename] [-of out_format] [-co NAME=VALUE]*
 .PP
 .SH "DESCRIPTION"
 .PP
-This utility will automatically mosaic a set of images. All the images must be in the same coordinate system and have a matching number of bands, but they may be overlapping, and at different resolutions. In areas of overlap, the last image will be copied over earlier ones.
+This utility will automatically mosaic a set of images\&. All the images must be in the same coordinate system and have a matching number of bands, but they may be overlapping, and at different resolutions\&. In areas of overlap, the last image will be copied over earlier ones\&.
 .PP
 .IP "\fB\fB-o\fP \fIout_filename\fP:\fP" 1c
-The name of the output file, which will be created if it does not already exist (defaults to 'out.tif'). 
+The name of the output file, which will be created if it does not already exist (defaults to 'out\&.tif')\&. 
 .IP "\fB\fB-of\fP \fIformat\fP:\fP" 1c
-Output format, defaults to GeoTIFF (GTiff).  
+Output format, defaults to GeoTIFF (GTiff)\&.  
 .IP "\fB\fB-co\fP \fINAME=VALUE\fP:\fP" 1c
-Creation option for output file. Multiple options can be specified.  
+Creation option for output file\&. Multiple options can be specified\&.  
 .IP "\fB\fB-ot\fP \fIdatatype\fP:\fP" 1c
-Force the output image bands to have a specific type. Use type names (ie. Byte, Int16,...)  
+Force the output image bands to have a specific type\&. Use type names (ie\&. Byte, Int16,\&.\&.\&.)  
 .IP "\fB\fB-ps\fP \fIpixelsize_x pixelsize_y\fP:\fP" 1c
-Pixel size to be used for the output file. If not specified the resolution of the first input file will be used.
+Pixel size to be used for the output file\&. If not specified the resolution of the first input file will be used\&.
 .PP
 .IP "\fB\fB-tap\fP:\fP" 1c
-(GDAL >= 1.8.0) (target aligned pixels) align the coordinates of the extent of the output file to the values of the -tr, such that the aligned extent includes the minimum extent.
+(GDAL >= 1\&.8\&.0) (target aligned pixels) align the coordinates of the extent of the output file to the values of the -tr, such that the aligned extent includes the minimum extent\&.
 .PP
 .IP "\fB\fB-ul_lr\fP \fIulx uly lrx lry\fP:\fP" 1c
-The extents of the output file. If not specified the aggregate extents of all input files will be used. 
+The extents of the output file\&. If not specified the aggregate extents of all input files will be used\&. 
 .IP "\fB\fP" 1c
 .IP "\fB\fB-v\fP:\fP" 1c
-Generate verbose output of mosaicing operations as they are done. 
+Generate verbose output of mosaicing operations as they are done\&. 
 .IP "\fB\fB-separate\fP:\fP" 1c
-Place each input file into a separate \fIstacked\fP band.  
+Place each input file into a separate \fIstacked\fP band\&.  
 .IP "\fB\fB-pct\fP:\fP" 1c
-Grab a pseudocolor table from the first input image, and use it for the output. Merging pseudocolored images this way assumes that all input files use the same color table.  
+Grab a pseudocolor table from the first input image, and use it for the output\&. Merging pseudocolored images this way assumes that all input files use the same color table\&.  
 .IP "\fB\fB-n\fP \fInodata_value\fP:\fP" 1c
-Ignore pixels from files being merged in with this pixel value.  
+Ignore pixels from files being merged in with this pixel value\&.  
 .IP "\fB\fB-a_nodata\fP \fIoutput_nodata_value\fP:\fP" 1c
-(GDAL >= 1.9.0) Assign a specified nodata value to output bands. 
+(GDAL >= 1\&.9\&.0) Assign a specified nodata value to output bands\&. 
 .IP "\fB\fB-init\fP \fI'value(s)'\fP:\fP" 1c
-Pre-initialize the output image bands with these values. However, it is not marked as the nodata value in the output file. If only one value is given, the same value is used in all the bands.  
+Pre-initialize the output image bands with these values\&. However, it is not marked as the nodata value in the output file\&. If only one value is given, the same value is used in all the bands\&.  
 .IP "\fB\fB-createonly\fP:\fP" 1c
-The output file is created (and potentially pre-initialized) but no input image data is copied into it.  
+The output file is created (and potentially pre-initialized) but no input image data is copied into it\&.  
 .PP
 .PP
-NOTE: gdal_merge.py is a Python script, and will only work if GDAL was built with Python support.
+NOTE: gdal_merge\&.py is a Python script, and will only work if GDAL was built with Python support\&.
 .SH "EXAMPLE"
 .PP
-Create an image with the pixels in all bands initialized to 255.
+Create an image with the pixels in all bands initialized to 255\&.
 .PP
 .PP
 .nf
@@ -67,7 +64,7 @@ Create an image with the pixels in all bands initialized to 255.
 .fi
 .PP
 .PP
-Create an RGB image that shows blue in pixels with no data. The first two bands will be initialized to 0 and the third band will be initalized to 255.
+Create an RGB image that shows blue in pixels with no data\&. The first two bands will be initialized to 0 and the third band will be initalized to 255\&.
 .PP
 .PP
 .nf
diff --git a/man/man1/gdal_polygonize.1 b/man/man1/gdal_polygonize.1
index 36b72c2..076a37e 100644
--- a/man/man1/gdal_polygonize.1
+++ b/man/man1/gdal_polygonize.1
@@ -1,12 +1,9 @@
-.TH "gdal_polygonize" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdal_polygonize" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdal_polygonize \- .TH "gdal_polygonize" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdal_polygonize \- produces a polygon feature layer from a raster
+gdal_polygonize \- gdal_polygonize\&.py 
+produces a polygon feature layer from a raster
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -18,41 +15,41 @@ gdal_polygonize.py [-8] [-nomask] [-mask filename] raster_file [-b band]
 .PP
 .SH "DESCRIPTION"
 .PP
-This utility creates vector polygons for all connected regions of pixels in the raster sharing a common pixel value. Each polygon is created with an attribute indicating the pixel value of that polygon. A raster mask may also be provided to determine which pixels are eligible for processing.
+This utility creates vector polygons for all connected regions of pixels in the raster sharing a common pixel value\&. Each polygon is created with an attribute indicating the pixel value of that polygon\&. A raster mask may also be provided to determine which pixels are eligible for processing\&.
 .PP
-The utility will create the output vector datasource if it does not already exist, defaulting to GML format.
+The utility will create the output vector datasource if it does not already exist, defaulting to GML format\&.
 .PP
-The utility is based on the GDALPolygonize() function which has additional details on the algorithm.
+The utility is based on the GDALPolygonize() function which has additional details on the algorithm\&.
 .PP
 .IP "\fB\fB-8\fP:\fP" 1c
-(GDAL >= 1.10) Use 8 connectedness. Default is 4 connectedness. 
+(GDAL >= 1\&.10) Use 8 connectedness\&. Default is 4 connectedness\&. 
 .PP
 .IP "\fB\fB-nomask\fP:\fP" 1c
-Do not use the default validity mask for the input band (such as nodata, or alpha masks). 
+Do not use the default validity mask for the input band (such as nodata, or alpha masks)\&. 
 .PP
 .IP "\fB\fB-mask\fP \fIfilename\fP:\fP" 1c
-Use the first band of the specified file as a validity mask (zero is invalid, non-zero is valid). 
+Use the first band of the specified file as a validity mask (zero is invalid, non-zero is valid)\&. 
 .PP
 .IP "\fB\fIraster_file\fP\fP" 1c
-The source raster file from which polygons are derived.
+The source raster file from which polygons are derived\&.
 .PP
 .IP "\fB\fB-b\fP \fIband\fP: \fP" 1c
-The band on \fIraster_file\fP to build the polygons from. 
+The band on \fIraster_file\fP to build the polygons from\&. 
 .PP
 .IP "\fB\fB-f\fP \fIogr_format\fP\fP" 1c
-Select the output format of the file to be created. Default is GML. 
+Select the output format of the file to be created\&. Default is GML\&. 
 .PP
 .IP "\fB\fIout_file\fP\fP" 1c
-The destination vector file to which the polygons will be written. 
+The destination vector file to which the polygons will be written\&. 
 .PP
 .IP "\fB\fIlayer\fP\fP" 1c
-The name of the layer created to hold the polygon features. 
+The name of the layer created to hold the polygon features\&. 
 .PP
 .IP "\fB\fIfieldname\fP\fP" 1c
-The name of the field to create (defaults to 'DN'). 
+The name of the field to create (defaults to 'DN')\&. 
 .PP
 .IP "\fB\fB-q\fP:\fP" 1c
-The script runs in quiet mode. The progress monitor is supressed and routine messages are not displayed. 
+The script runs in quiet mode\&. The progress monitor is supressed and routine messages are not displayed\&. 
 .PP
 .PP
 .PP
diff --git a/man/man1/gdal_proximity.1 b/man/man1/gdal_proximity.1
index 94abf12..df177db 100644
--- a/man/man1/gdal_proximity.1
+++ b/man/man1/gdal_proximity.1
@@ -1,12 +1,9 @@
-.TH "gdal_proximity" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdal_proximity" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdal_proximity \- .TH "gdal_proximity" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdal_proximity \- produces a raster proximity map
+gdal_proximity \- gdal_proximity\&.py 
+produces a raster proximity map
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -21,43 +18,43 @@ gdal_proximity.py srcfile dstfile [-srcband n] [-dstband n]
 .PP
 .SH "DESCRIPTION"
 .PP
-The gdal_proximity.py script generates a raster proximity map indicating the distance from the center of each pixel to the center of the nearest pixel identified as a target pixel. Target pixels are those in the source raster for which the raster pixel value is in the set of target pixel values.
+The gdal_proximity\&.py script generates a raster proximity map indicating the distance from the center of each pixel to the center of the nearest pixel identified as a target pixel\&. Target pixels are those in the source raster for which the raster pixel value is in the set of target pixel values\&.
 .PP
 .IP "\fB\fIsrcfile\fP\fP" 1c
-The source raster file used to identify target pixels.
+The source raster file used to identify target pixels\&.
 .PP
 .IP "\fB\fIdstfile\fP\fP" 1c
-The destination raster file to which the proximity map will be written. It may be a pre-existing file of the same size as srcfile. If it does not exist it will be created.
+The destination raster file to which the proximity map will be written\&. It may be a pre-existing file of the same size as srcfile\&. If it does not exist it will be created\&.
 .PP
 .IP "\fB\fB-srcband\fP \fIn\fP\fP" 1c
-Identifies the band in the source file to use (default is 1).
+Identifies the band in the source file to use (default is 1)\&.
 .PP
 .IP "\fB\fB-dstband\fP \fIn\fP\fP" 1c
-Identifies the band in the destination file to use (default is 1).
+Identifies the band in the destination file to use (default is 1)\&.
 .PP
 .IP "\fB\fB-of\fP \fIformat\fP:\fP" 1c
-Select the output format. The default is GeoTIFF (GTiff). Use the short format name. 
+Select the output format\&. The default is GeoTIFF (GTiff)\&. Use the short format name\&. 
 .PP
 .IP "\fB\fB-co\fP \fI'NAME=VALUE'\fP:\fP" 1c
-passes a creation option to the output format driver. Multiple \fB-co\fP options may be listed. See format specific documentation for legal creation options for each format. 
+passes a creation option to the output format driver\&. Multiple \fB-co\fP options may be listed\&. See format specific documentation for legal creation options for each format\&. 
 .PP
 .IP "\fB\fB-ot\fP \fIdatatype\fP:\fP" 1c
-Force the output image bands to have a specific type. Use type names (ie. Byte, Int16,...) 
+Force the output image bands to have a specific type\&. Use type names (ie\&. Byte, Int16,\&.\&.\&.) 
 .PP
 .IP "\fB\fB-values\fP \fIn,n,n\fP:\fP" 1c
-A list of target pixel values in the source image to be considered target pixels. If not specified, all non-zero pixels will be considered target pixels. 
+A list of target pixel values in the source image to be considered target pixels\&. If not specified, all non-zero pixels will be considered target pixels\&. 
 .PP
 .IP "\fB\fB-distunits\fP \fIPIXEL/GEO\fP:\fP" 1c
-Indicate whether distances generated should be in pixel or georeferenced coordinates (default PIXEL). 
+Indicate whether distances generated should be in pixel or georeferenced coordinates (default PIXEL)\&. 
 .PP
 .IP "\fB\fB-maxdist\fP \fIn\fP:\fP" 1c
-The maximum distance to be generated. All pixels beyond this distance will be assigned either the nodata value, or 65535. Distance is interpreted in pixels unless -distunits GEO is specified. 
+The maximum distance to be generated\&. All pixels beyond this distance will be assigned either the nodata value, or 65535\&. Distance is interpreted in pixels unless -distunits GEO is specified\&. 
 .PP
 .IP "\fB\fB-nodata\fP \fIn\fP:\fP" 1c
-Specify a nodata value to use for the destination proximity raster. 
+Specify a nodata value to use for the destination proximity raster\&. 
 .PP
 .IP "\fB\fB-fixed-buf-val\fP \fIn\fP:\fP" 1c
-Specify a value to be applied to all pixels that are within the -maxdist of target pixels (including the target pixels) instead of a distance value. 
+Specify a value to be applied to all pixels that are within the -maxdist of target pixels (including the target pixels) instead of a distance value\&. 
 .PP
 .PP
 .SH "AUTHORS"
diff --git a/man/man1/gdal_rasterize.1 b/man/man1/gdal_rasterize.1
index 0a9528f..d1201db 100644
--- a/man/man1/gdal_rasterize.1
+++ b/man/man1/gdal_rasterize.1
@@ -1,12 +1,9 @@
-.TH "gdal_rasterize" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdal_rasterize" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdal_rasterize \- .TH "gdal_rasterize" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdal_rasterize \- burns vector geometries into a raster
+gdal_rasterize \- gdal_rasterize 
+burns vector geometries into a raster
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -25,82 +22,82 @@ Usage: gdal_rasterize [-b band]* [-i] [-at]
 .PP
 .SH "DESCRIPTION"
 .PP
-This program burns vector geometries (points, lines and polygons) into the raster band(s) of a raster image. Vectors are read from OGR supported vector formats.
+This program burns vector geometries (points, lines and polygons) into the raster band(s) of a raster image\&. Vectors are read from OGR supported vector formats\&.
 .PP
-Note that the vector data must in the same coordinate system as the raster data; on the fly reprojection is not provided.
+Note that the vector data must in the same coordinate system as the raster data; on the fly reprojection is not provided\&.
 .PP
-Since GDAL 1.8.0, the target GDAL file can be created by gdal_rasterize. One of -tr or -ts option must be used in that case.
+Since GDAL 1\&.8\&.0, the target GDAL file can be created by gdal_rasterize\&. One of -tr or -ts option must be used in that case\&.
 .PP
 .IP "\fB\fB-b\fP \fIband\fP: \fP" 1c
-The band(s) to burn values into. Multiple -b arguments may be used to burn into a list of bands. The default is to burn into band 1.
+The band(s) to burn values into\&. Multiple -b arguments may be used to burn into a list of bands\&. The default is to burn into band 1\&.
 .PP
 .IP "\fB\fB-i\fP: \fP" 1c
-Invert rasterization. Burn the fixed burn value, or the burn value associated with the first feature into all parts of the image \fInot\fP inside the provided a polygon.
+Invert rasterization\&. Burn the fixed burn value, or the burn value associated with the first feature into all parts of the image \fInot\fP inside the provided a polygon\&.
 .PP
 .IP "\fB\fB-at\fP: \fP" 1c
-Enables the ALL_TOUCHED rasterization option so that all pixels touched by lines or polygons will be updated not just those one the line render path, or whose center point is within the polygon. Defaults to disabled for normal rendering rules.
+Enables the ALL_TOUCHED rasterization option so that all pixels touched by lines or polygons will be updated not just those one the line render path, or whose center point is within the polygon\&. Defaults to disabled for normal rendering rules\&.
 .PP
 .IP "\fB\fB-burn\fP \fIvalue\fP: \fP" 1c
-A fixed value to burn into a band for all objects. A list of -burn options can be supplied, one per band being written to.
+A fixed value to burn into a band for all objects\&. A list of -burn options can be supplied, one per band being written to\&.
 .PP
 .IP "\fB\fB-a\fP \fIattribute_name\fP: \fP" 1c
-Identifies an attribute field on the features to be used for a burn in value. The value will be burned into all output bands.
+Identifies an attribute field on the features to be used for a burn in value\&. The value will be burned into all output bands\&.
 .PP
 .IP "\fB\fB-3d\fP: \fP" 1c
-Indicates that a burn value should be extracted from the 'Z' values of the feature. These values are adjusted by the burn value given by '-burn value' or '-a attribute_name' if provided. As of now, only points and lines are drawn in 3D.
+Indicates that a burn value should be extracted from the 'Z' values of the feature\&. These values are adjusted by the burn value given by '-burn value' or '-a attribute_name' if provided\&. As of now, only points and lines are drawn in 3D\&.
 .PP
 .IP "\fB\fB-l\fP \fIlayername\fP: \fP" 1c
-Indicates the layer(s) from the datasource that will be used for input features. May be specified multiple times, but at least one layer name or a -sql option must be specified.
+Indicates the layer(s) from the datasource that will be used for input features\&. May be specified multiple times, but at least one layer name or a -sql option must be specified\&.
 .PP
 .IP "\fB\fB-where\fP \fIexpression\fP: \fP" 1c
-An optional SQL WHERE style query expression to be applied to select features to burn in from the input layer(s). 
+An optional SQL WHERE style query expression to be applied to select features to burn in from the input layer(s)\&. 
 .PP
 .IP "\fB\fB-sql\fP \fIselect_statement\fP: \fP" 1c
-An SQL statement to be evaluated against the datasource to produce a virtual layer of features to be burned in.
+An SQL statement to be evaluated against the datasource to produce a virtual layer of features to be burned in\&.
 .PP
 .IP "\fB\fB-of\fP \fIformat\fP:\fP" 1c
-(GDAL >= 1.8.0) Select the output format. The default is GeoTIFF (GTiff). Use the short format name.
+(GDAL >= 1\&.8\&.0) Select the output format\&. The default is GeoTIFF (GTiff)\&. Use the short format name\&.
 .PP
 .IP "\fB\fB-a_nodata\fP \fIvalue\fP:\fP" 1c
-(GDAL >= 1.8.0) Assign a specified nodata value to output bands.
+(GDAL >= 1\&.8\&.0) Assign a specified nodata value to output bands\&.
 .PP
 .IP "\fB\fB-init\fP \fIvalue\fP:\fP" 1c
-(GDAL >= 1.8.0) Pre-initialize the output image bands with these values. However, it is not marked as the nodata value in the output file. If only one value is given, the same value is used in all the bands.
+(GDAL >= 1\&.8\&.0) Pre-initialize the output image bands with these values\&. However, it is not marked as the nodata value in the output file\&. If only one value is given, the same value is used in all the bands\&.
 .PP
 .IP "\fB\fB-a_srs\fP \fIsrs_def\fP:\fP" 1c
-(GDAL >= 1.8.0) Override the projection for the output file. If not specified, the projection of the input vector file will be used if available. If incompatible projections between input and output files, no attempt will be made to reproject features. The \fIsrs_def\fP may be any of the usual GDAL/OGR forms, complete WKT, PROJ.4, EPSG:n or a file containing the WKT. 
+(GDAL >= 1\&.8\&.0) Override the projection for the output file\&. If not specified, the projection of the input vector file will be used if available\&. If incompatible projections between input and output files, no attempt will be made to reproject features\&. The \fIsrs_def\fP may be any of the usual GDAL/OGR forms, complete WKT, PROJ\&.4, EPSG:n or a file containing the WKT\&. 
 .PP
 .IP "\fB\fB-co\fP \fI'NAME=VALUE'\fP:\fP" 1c
-(GDAL >= 1.8.0) Passes a creation option to the output format driver. Multiple \fB-co\fP options may be listed. See format specific documentation for legal creation options for each format.
+(GDAL >= 1\&.8\&.0) Passes a creation option to the output format driver\&. Multiple \fB-co\fP options may be listed\&. See format specific documentation for legal creation options for each format\&.
 .PP
 .IP "\fB\fB-te\fP \fIxmin ymin xmax ymax\fP :\fP" 1c
-(GDAL >= 1.8.0) set georeferenced extents. The values must be expressed in georeferenced units. If not specified, the extent of the output file will be the extent of the vector layers. 
+(GDAL >= 1\&.8\&.0) set georeferenced extents\&. The values must be expressed in georeferenced units\&. If not specified, the extent of the output file will be the extent of the vector layers\&. 
 .PP
 .IP "\fB\fB-tr\fP \fIxres yres\fP :\fP" 1c
-(GDAL >= 1.8.0) set target resolution. The values must be expressed in georeferenced units. Both must be positive values. 
+(GDAL >= 1\&.8\&.0) set target resolution\&. The values must be expressed in georeferenced units\&. Both must be positive values\&. 
 .PP
 .IP "\fB\fB-tap\fP:\fP" 1c
-(GDAL >= 1.8.0) (target aligned pixels) align the coordinates of the extent of the output file to the values of the -tr, such that the aligned extent includes the minimum extent.
+(GDAL >= 1\&.8\&.0) (target aligned pixels) align the coordinates of the extent of the output file to the values of the -tr, such that the aligned extent includes the minimum extent\&.
 .PP
 .IP "\fB\fB-ts\fP \fIwidth height\fP:\fP" 1c
-(GDAL >= 1.8.0) set output file size in pixels and lines. Note that -ts cannot be used with -tr
+(GDAL >= 1\&.8\&.0) set output file size in pixels and lines\&. Note that -ts cannot be used with -tr
 .PP
 .IP "\fB\fB-ot\fP \fItype\fP:\fP" 1c
-(GDAL >= 1.8.0) For the output bands to be of the indicated data type. Defaults to Float64
+(GDAL >= 1\&.8\&.0) For the output bands to be of the indicated data type\&. Defaults to Float64
 .PP
 .IP "\fB\fB-q\fP:\fP" 1c
-(GDAL >= 1.8.0) Suppress progress monitor and other non-error output.
+(GDAL >= 1\&.8\&.0) Suppress progress monitor and other non-error output\&.
 .PP
 .IP "\fB\fIsrc_datasource\fP: \fP" 1c
-Any OGR supported readable datasource.
+Any OGR supported readable datasource\&.
 .PP
 .IP "\fB\fIdst_filename\fP: \fP" 1c
-The GDAL supported output file. Must support update mode access. Before GDAL 1.8.0, gdal_rasterize could not create new output files.
+The GDAL supported output file\&. Must support update mode access\&. Before GDAL 1\&.8\&.0, gdal_rasterize could not create new output files\&.
 .PP
 .PP
 .SH "EXAMPLE"
 .PP
-The following would burn all polygons from mask.shp into the RGB TIFF file work.tif with the color red (RGB = 255,0,0).
+The following would burn all polygons from mask\&.shp into the RGB TIFF file work\&.tif with the color red (RGB = 255,0,0)\&.
 .PP
 .PP
 .nf
@@ -109,7 +106,7 @@ gdal_rasterize -b 1 -b 2 -b 3 -burn 255 -burn 0 -burn 0 -l mask mask.shp work.ti
 .fi
 .PP
 .PP
-The following would burn all 'class A' buildings into the output elevation file, pulling the top elevation from the ROOF_H attribute.
+The following would burn all 'class A' buildings into the output elevation file, pulling the top elevation from the ROOF_H attribute\&.
 .PP
 .PP
 .nf
diff --git a/man/man1/gdal_retile.1 b/man/man1/gdal_retile.1
index 7434f8b..802653d 100644
--- a/man/man1/gdal_retile.1
+++ b/man/man1/gdal_retile.1
@@ -1,12 +1,9 @@
-.TH "gdal_retile" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdal_retile" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdal_retile \- .TH "gdal_retile" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdal_retile \- gdal_retile.py retiles a set of tiles and/or build tiled pyramid levels
+gdal_retile \- gdal_retile\&.py 
+gdal_retile\&.py retiles a set of tiles and/or build tiled pyramid levels
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -30,43 +27,43 @@ gdal_retile.py [-v] [-co NAME=VALUE]* [-of out_format] [-ps pixelWidth pixelHeig
  
 .SH "DESCRIPTION"
 .PP
-This utility will retile a set of input tile(s). All the input tile(s) must be georeferenced in the same coordinate system and have a matching number of bands. Optionally pyramid levels are generated. It is possible to generate shape file(s) for the tiled output.
+This utility will retile a set of input tile(s)\&. All the input tile(s) must be georeferenced in the same coordinate system and have a matching number of bands\&. Optionally pyramid levels are generated\&. It is possible to generate shape file(s) for the tiled output\&.
 .PP
 If your number of input tiles exhausts the command line buffer, use the general --optfile option
 .PP
 .IP "\fB\fB-targetDir\fP \fIdirectory\fP:\fP" 1c
-The directory where the tile result is created. Pyramids are stored in subdirectories numbered from 1. Created tile names have a numbering schema and contain the name of the source tiles(s)  
+The directory where the tile result is created\&. Pyramids are stored in subdirectories numbered from 1\&. Created tile names have a numbering schema and contain the name of the source tiles(s)  
 .IP "\fB\fB-of\fP \fIformat\fP:\fP" 1c
-Output format, defaults to GeoTIFF (GTiff).  
+Output format, defaults to GeoTIFF (GTiff)\&.  
 .IP "\fB\fB-co\fP \fINAME=VALUE\fP:\fP" 1c
-Creation option for output file. Multiple options can be specified.  
+Creation option for output file\&. Multiple options can be specified\&.  
 .IP "\fB\fB-ot\fP \fIdatatype\fP:\fP" 1c
-Force the output image bands to have a specific type. Use type names (ie. Byte, Int16,...)  
+Force the output image bands to have a specific type\&. Use type names (ie\&. Byte, Int16,\&.\&.\&.)  
 .IP "\fB\fB-ps\fP \fIpixelsize_x pixelsize_y\fP:\fP" 1c
-Pixel size to be used for the output file. If not specified, 256 x 256 is the default  
+Pixel size to be used for the output file\&. If not specified, 256 x 256 is the default  
 .IP "\fB\fB-levels\fP \fInumberOfLevels\fP:\fP" 1c
-Number of pyramids levels to build.  
+Number of pyramids levels to build\&.  
 .IP "\fB\fB-v\fP:\fP" 1c
-Generate verbose output of tile operations as they are done.  
+Generate verbose output of tile operations as they are done\&.  
 .IP "\fB\fB-pyramidOnly\fP:\fP" 1c
 No retiling, build only the pyramids  
 .IP "\fB\fB-r\fP \fIalgorithm\fP:\fP" 1c
 Resampling algorithm, default is near  
 .IP "\fB\fB-s_srs\fP \fIsrs_def\fP:\fP" 1c
-Source spatial reference to use. The coordinate systems that can be passed are anything supported by the OGRSpatialReference.SetFro‐mUserInput() call, which includes EPSG PCS and GCSes (ie.EPSG:4296), PROJ.4 declarations (as above), or the name of a .prf file containing well known text. If no srs_def is given, the srs_def of the source tiles is used (if there is any). The srs_def will be propagated to created tiles (if possible) and to the optional shape file(s)  
+Source spatial reference to use\&. The coordinate systems that can be passed are anything supported by the OGRSpatialReference\&.SetFro‐mUserInput() call, which includes EPSG PCS and GCSes (ie\&.EPSG:4296), PROJ\&.4 declarations (as above), or the name of a \&.prf file containing well known text\&. If no srs_def is given, the srs_def of the source tiles is used (if there is any)\&. The srs_def will be propagated to created tiles (if possible) and to the optional shape file(s)  
 .IP "\fB\fB-tileIndex\fP \fItileIndexName\fP:\fP" 1c
 The name of shape file containing the result tile(s) index  
 .IP "\fB\fB-tileIndexField\fP \fItileIndexFieldName\fP:\fP" 1c
 The name of the attribute containing the tile name  
 .IP "\fB\fB-csv\fP \fIcsvFileName\fP:\fP" 1c
-The name of the csv file containing the tile(s) georeferencing information. The file contains 5 columns: tilename,minx,maxx,miny,maxy  
+The name of the csv file containing the tile(s) georeferencing information\&. The file contains 5 columns: tilename,minx,maxx,miny,maxy  
 .IP "\fB\fB-csvDelim\fP \fIcolumn delimiter\fP:\fP" 1c
 The column delimter used in the csv file, default value is a semicolon ';'  
 .IP "\fB\fB-useDirForEachRow\fP:\fP" 1c
-Normally the tiles of the base image are stored as described in \fB-targetDir\fP. For large images, some file systems have performance problems if the number of files in a directory is to big, causing gdal_retile not to finish in reasonable time. Using this parameter creates a different output structure. The tiles of the base image are stored in a subdirectory called 0, the pyramids in subdirectories numbered 1,2,.... Within each of these directories another level of subdirectories is cr [...]
+Normally the tiles of the base image are stored as described in \fB-targetDir\fP\&. For large images, some file systems have performance problems if the number of files in a directory is to big, causing gdal_retile not to finish in reasonable time\&. Using this parameter creates a different output structure\&. The tiles of the base image are stored in a subdirectory called 0, the pyramids in subdirectories numbered 1,2,\&.\&.\&.\&. Within each of these directories another level of subdir [...]
 .PP
 .PP
-NOTE: gdal_retile.py is a Python script, and will only work if GDAL was built with Python support.
+NOTE: gdal_retile\&.py is a Python script, and will only work if GDAL was built with Python support\&.
 .SH "AUTHORS"
 .PP
 Christian Mueller <christian.mueller at nvoe.at> 
diff --git a/man/man1/gdal_sieve.1 b/man/man1/gdal_sieve.1
index eca889c..c67c594 100644
--- a/man/man1/gdal_sieve.1
+++ b/man/man1/gdal_sieve.1
@@ -1,12 +1,9 @@
-.TH "gdal_sieve" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdal_sieve" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdal_sieve \- .TH "gdal_sieve" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdal_sieve \- removes small raster polygons
+gdal_sieve \- gdal_sieve\&.py 
+removes small raster polygons
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -18,39 +15,39 @@ gdal_sieve.py [-q] [-st threshold] [-4] [-8] [-o name=value]
 .PP
 .SH "DESCRIPTION"
 .PP
-The gdal_sieve.py script removes raster polygons smaller than a provided threshold size (in pixels) and replaces replaces them with the pixel value of the largest neighbour polygon. The result can be written back to the existing raster band, or copied into a new file.
+The gdal_sieve\&.py script removes raster polygons smaller than a provided threshold size (in pixels) and replaces replaces them with the pixel value of the largest neighbour polygon\&. The result can be written back to the existing raster band, or copied into a new file\&.
 .PP
-Additional details on the algorithm are available in the GDALSieveFilter() docs.
+Additional details on the algorithm are available in the GDALSieveFilter() docs\&.
 .PP
 .IP "\fB\fB-q\fP:\fP" 1c
-The script runs in quiet mode. The progress monitor is supressed and routine messages are not displayed.
+The script runs in quiet mode\&. The progress monitor is supressed and routine messages are not displayed\&.
 .PP
 .IP "\fB\fB-st\fP \fIthreshold\fP:\fP" 1c
-Set the size threshold in pixels. Only raster polygons smaller than this size will be removed.
+Set the size threshold in pixels\&. Only raster polygons smaller than this size will be removed\&.
 .PP
 .IP "\fB\fB-o\fP \fIname=value\fP:\fP" 1c
-Specify a special argument to the algorithm. Currently none are supported. 
+Specify a special argument to the algorithm\&. Currently none are supported\&. 
 .PP
 .IP "\fB\fB-4\fP:\fP" 1c
-Four connectedness should be used when determining polygons. That is diagonal pixels are not considered directly connected. This is the default. 
+Four connectedness should be used when determining polygons\&. That is diagonal pixels are not considered directly connected\&. This is the default\&. 
 .PP
 .IP "\fB\fB-8\fP:\fP" 1c
-Eight connectedness should be used when determining polygons. That is diagonal pixels are considered directly connected. 
+Eight connectedness should be used when determining polygons\&. That is diagonal pixels are considered directly connected\&. 
 .PP
 .IP "\fB\fIsrcfile\fP\fP" 1c
-The source raster file used to identify target pixels. Only the first band is used.
+The source raster file used to identify target pixels\&. Only the first band is used\&.
 .PP
 .IP "\fB\fB-nomask\fP:\fP" 1c
-Do not use the default validity mask for the input band (such as nodata, or alpha masks). 
+Do not use the default validity mask for the input band (such as nodata, or alpha masks)\&. 
 .PP
 .IP "\fB\fB-mask\fP \fIfilename\fP:\fP" 1c
-Use the first band of the specified file as a validity mask (zero is invalid, non-zero is valid). 
+Use the first band of the specified file as a validity mask (zero is invalid, non-zero is valid)\&. 
 .PP
 .IP "\fB\fIdstfile\fP\fP" 1c
-The new file to create with the filtered result. If not provided, the source band is updated in place.
+The new file to create with the filtered result\&. If not provided, the source band is updated in place\&.
 .PP
 .IP "\fB\fB-of\fP \fIformat\fP:\fP" 1c
-Select the output format. The default is GeoTIFF (GTiff). Use the short format name. 
+Select the output format\&. The default is GeoTIFF (GTiff)\&. Use the short format name\&. 
 .PP
 .PP
 .SH "AUTHORS"
diff --git a/man/man1/gdal_translate.1 b/man/man1/gdal_translate.1
index 4b7ba64..9b02a38 100644
--- a/man/man1/gdal_translate.1
+++ b/man/man1/gdal_translate.1
@@ -1,12 +1,9 @@
-.TH "gdal_translate" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdal_translate" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdal_translate \- .TH "gdal_translate" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdal_translate \- converts raster data between different formats
+gdal_translate \- gdal_translate 
+converts raster data between different formats
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -28,60 +25,60 @@ gdal_translate [--help-general]
 .PP
 .SH "DESCRIPTION"
 .PP
-The gdal_translate utility can be used to convert raster data between different formats, potentially performing some operations like subsettings, resampling, and rescaling pixels in the process.
+The gdal_translate utility can be used to convert raster data between different formats, potentially performing some operations like subsettings, resampling, and rescaling pixels in the process\&.
 .PP
 .IP "\fB\fB-ot\fP: \fItype\fP\fP" 1c
-For the output bands to be of the indicated data type. 
+For the output bands to be of the indicated data type\&. 
 .IP "\fB\fB-strict\fP:\fP" 1c
-Don't be forgiving of mismatches and lost data when translating to the output format. 
+Don't be forgiving of mismatches and lost data when translating to the output format\&. 
 .IP "\fB\fB-of\fP \fIformat\fP:\fP" 1c
-Select the output format. The default is GeoTIFF (GTiff). Use the short format name. 
+Select the output format\&. The default is GeoTIFF (GTiff)\&. Use the short format name\&. 
 .IP "\fB\fB-b\fP \fIband\fP:\fP" 1c
-Select an input band \fIband\fP for output. Bands are numbered from 1. Multiple \fB-b\fP switches may be used to select a set of input bands to write to the output file, or to reorder bands. Starting with GDAL 1.8.0, \fIband\fP can also be set to 'mask,1' (or just 'mask') to mean the mask band of the first band of the input dataset. 
+Select an input band \fIband\fP for output\&. Bands are numbered from 1\&. Multiple \fB-b\fP switches may be used to select a set of input bands to write to the output file, or to reorder bands\&. Starting with GDAL 1\&.8\&.0, \fIband\fP can also be set to 'mask,1' (or just 'mask') to mean the mask band of the first band of the input dataset\&. 
 .IP "\fB\fB-mask\fP \fIband\fP:\fP" 1c
-(GDAL >= 1.8.0) Select an input band \fIband\fP to create output dataset mask band. Bands are numbered from 1. \fIband\fP can be set to 'none' to avoid copying the global mask of the input dataset if it exists. Otherwise it is copied by default ('auto'), unless the mask is an alpha channel, or if it is explicitly used to be a regular band of the output dataset ('-b mask'). \fIband\fP can also be set to 'mask,1' (or just 'mask') to mean the mask band of the 1st band of the input dataset. 
+(GDAL >= 1\&.8\&.0) Select an input band \fIband\fP to create output dataset mask band\&. Bands are numbered from 1\&. \fIband\fP can be set to 'none' to avoid copying the global mask of the input dataset if it exists\&. Otherwise it is copied by default ('auto'), unless the mask is an alpha channel, or if it is explicitly used to be a regular band of the output dataset ('-b mask')\&. \fIband\fP can also be set to 'mask,1' (or just 'mask') to mean the mask band of the 1st band of the inp [...]
 .IP "\fB\fB-expand\fP \fIgray|rgb|rgba\fP:\fP" 1c
-(From GDAL 1.6.0) To expose a dataset with 1 band with a color table as a dataset with 3 (RGB) or 4 (RGBA) bands. Useful for output drivers such as JPEG, JPEG2000, MrSID, ECW that don't support color indexed datasets. The 'gray' value (from GDAL 1.7.0) enables to expand a dataset with a color table that only contains gray levels to a gray indexed dataset. 
+(From GDAL 1\&.6\&.0) To expose a dataset with 1 band with a color table as a dataset with 3 (RGB) or 4 (RGBA) bands\&. Useful for output drivers such as JPEG, JPEG2000, MrSID, ECW that don't support color indexed datasets\&. The 'gray' value (from GDAL 1\&.7\&.0) enables to expand a dataset with a color table that only contains gray levels to a gray indexed dataset\&. 
 .IP "\fB\fB-outsize\fP \fIxsize[%] ysize[%]\fP:\fP" 1c
-Set the size of the output file. Outsize is in pixels and lines unless '%' is attached in which case it is as a fraction of the input image size. 
+Set the size of the output file\&. Outsize is in pixels and lines unless '%' is attached in which case it is as a fraction of the input image size\&. 
 .IP "\fB\fB-scale\fP \fI[src_min src_max [dst_min dst_max]]\fP:\fP" 1c
-Rescale the input pixels values from the range \fIsrc_min\fP to \fIsrc_max\fP to the range \fIdst_min\fP to \fIdst_max\fP. If omitted the output range is 0 to 255. If omitted the input range is automatically computed from the source data. Before GDAL 1.11, it can be specified only once, and in that case, it applies to all bands of the output dataset. Starting with GDAL 1.11, -scale can be repeated several times (if specified only once, it also applies to all bands of the output dataset), [...]
+Rescale the input pixels values from the range \fIsrc_min\fP to \fIsrc_max\fP to the range \fIdst_min\fP to \fIdst_max\fP\&. If omitted the output range is 0 to 255\&. If omitted the input range is automatically computed from the source data\&. Before GDAL 1\&.11, it can be specified only once, and in that case, it applies to all bands of the output dataset\&. Starting with GDAL 1\&.11, -scale can be repeated several times (if specified only once, it also applies to all bands of the outp [...]
 .IP "\fB\fB-exponent\fP \fI exp_val\fP:\fP" 1c
-(From GDAL 1.11) To apply non-linear scaling with a power function. exp_val is the exponent of the power function (must be postive). This option must be used with the -scale option. If specified only once, -exponent applies to all bands of the output image. It can be repeated several times so as to specify per band parameters. It is also possible to use the '-exponent_bn' syntax where bn is a band number (e.g. '-exponent_2' for the 2nd band of the output dataset) to specify the parameter [...]
+(From GDAL 1\&.11) To apply non-linear scaling with a power function\&. exp_val is the exponent of the power function (must be postive)\&. This option must be used with the -scale option\&. If specified only once, -exponent applies to all bands of the output image\&. It can be repeated several times so as to specify per band parameters\&. It is also possible to use the '-exponent_bn' syntax where bn is a band number (e\&.g\&. '-exponent_2' for the 2nd band of the output dataset) to speci [...]
 .IP "\fB\fB-unscale\fP:\fP" 1c
-Apply the scale/offset metadata for the bands to convert scaled values to unscaled values. It is also often necessary to reset the output datatype with the \fB-ot\fP switch. 
+Apply the scale/offset metadata for the bands to convert scaled values to unscaled values\&. It is also often necessary to reset the output datatype with the \fB-ot\fP switch\&. 
 .IP "\fB\fB-srcwin\fP \fIxoff yoff xsize ysize\fP:\fP" 1c
-Selects a subwindow from the source image for copying based on pixel/line location.  
+Selects a subwindow from the source image for copying based on pixel/line location\&.  
 .IP "\fB\fB-projwin\fP \fIulx uly lrx lry\fP:\fP" 1c
-Selects a subwindow from the source image for copying (like \fB-srcwin\fP) but with the corners given in georeferenced coordinates.  
+Selects a subwindow from the source image for copying (like \fB-srcwin\fP) but with the corners given in georeferenced coordinates\&.  
 .IP "\fB\fB-epo\fP: (Error when Partially Outside)\fP" 1c
-(GDAL >= 1.10) If this option is set, \fB-srcwin\fP or \fB-projwin\fP values that falls partially outside the source raster extent will be considered as an error. The default behaviour starting with GDAL 1.10 is to accept such requests, when they were considered as an error before. 
+(GDAL >= 1\&.10) If this option is set, \fB-srcwin\fP or \fB-projwin\fP values that falls partially outside the source raster extent will be considered as an error\&. The default behaviour starting with GDAL 1\&.10 is to accept such requests, when they were considered as an error before\&. 
 .IP "\fB\fB-eco\fP: (Error when Completely Outside)\fP" 1c
-(GDAL >= 1.10) Same as \fB-epo\fP, except that the criterion for erroring out is when the request falls completely outside the source raster extent. 
+(GDAL >= 1\&.10) Same as \fB-epo\fP, except that the criterion for erroring out is when the request falls completely outside the source raster extent\&. 
 .IP "\fB\fB-a_srs\fP \fIsrs_def\fP:\fP" 1c
-Override the projection for the output file. The \fIsrs_def\fP may be any of the usual GDAL/OGR forms, complete WKT, PROJ.4, EPSG:n or a file containing the WKT.  
+Override the projection for the output file\&. The \fIsrs_def\fP may be any of the usual GDAL/OGR forms, complete WKT, PROJ\&.4, EPSG:n or a file containing the WKT\&.  
 .IP "\fB\fB-a_ullr\fP \fIulx uly lrx lry\fP:\fP" 1c
-Assign/override the georeferenced bounds of the output file. This assigns georeferenced bounds to the output file, ignoring what would have been derived from the source file. 
+Assign/override the georeferenced bounds of the output file\&. This assigns georeferenced bounds to the output file, ignoring what would have been derived from the source file\&. 
 .IP "\fB\fB-a_nodata\fP \fIvalue\fP:\fP" 1c
-Assign a specified nodata value to output bands. Starting with GDAL 1.8.0, can be set to \fInone\fP to avoid setting a nodata value to the output file if one exists for the source file 
+Assign a specified nodata value to output bands\&. Starting with GDAL 1\&.8\&.0, can be set to \fInone\fP to avoid setting a nodata value to the output file if one exists for the source file 
 .IP "\fB\fB-mo\fP \fI'META-TAG=VALUE'\fP:\fP" 1c
-Passes a metadata key and value to set on the output dataset if possible. 
+Passes a metadata key and value to set on the output dataset if possible\&. 
 .IP "\fB\fB-co\fP \fI'NAME=VALUE'\fP:\fP" 1c
-Passes a creation option to the output format driver. Multiple \fB-co\fP options may be listed. See format specific documentation for legal creation options for each format. 
+Passes a creation option to the output format driver\&. Multiple \fB-co\fP options may be listed\&. See format specific documentation for legal creation options for each format\&. 
 .IP "\fB\fB-gcp\fP \fIpixel line easting northing elevation\fP:\fP" 1c
-Add the indicated ground control point to the output dataset. This option may be provided multiple times to provide a set of GCPs.  
+Add the indicated ground control point to the output dataset\&. This option may be provided multiple times to provide a set of GCPs\&.  
 .IP "\fB\fB-q\fP:\fP" 1c
-Suppress progress monitor and other non-error output. 
+Suppress progress monitor and other non-error output\&. 
 .IP "\fB\fB-sds\fP:\fP" 1c
-Copy all subdatasets of this file to individual output files. Use with formats like HDF or OGDI that have subdatasets. The output file naming scheme has changed in GDAL 1.11 (e.g. ofile_1.tif, ofile_2.tif). 
+Copy all subdatasets of this file to individual output files\&. Use with formats like HDF or OGDI that have subdatasets\&. The output file naming scheme has changed in GDAL 1\&.11 (e\&.g\&. ofile_1\&.tif, ofile_2\&.tif)\&. 
 .IP "\fB\fB-stats\fP:\fP" 1c
-(GDAL >= 1.8.0) Force (re)computation of statistics. 
+(GDAL >= 1\&.8\&.0) Force (re)computation of statistics\&. 
 .IP "\fB\fB-norat\fP\fP" 1c
-(GDAL >= 1.11) Do not copy source RAT into destination dataset. 
+(GDAL >= 1\&.11) Do not copy source RAT into destination dataset\&. 
 .IP "\fB\fIsrc_dataset\fP:\fP" 1c
-The source dataset name. It can be either file name, URL of data source or subdataset name for multi-dataset files. 
+The source dataset name\&. It can be either file name, URL of data source or subdataset name for multi-dataset files\&. 
 .IP "\fB\fIdst_dataset\fP:\fP" 1c
-The destination file name. 
+The destination file name\&. 
 .PP
 .SH "EXAMPLE"
 .PP
@@ -92,7 +89,7 @@ gdal_translate -of GTiff -co "TILED=YES" utm.tif utm_tiled.tif
 .fi
 .PP
 .PP
-Starting with GDAL 1.8.0, to create a JPEG-compressed TIFF with internal mask from a RGBA dataset : 
+Starting with GDAL 1\&.8\&.0, to create a JPEG-compressed TIFF with internal mask from a RGBA dataset : 
 .PP
 .nf
 
@@ -101,7 +98,7 @@ gdal_translate rgba.tif withmask.tif -b 1 -b 2 -b 3 -mask 4 -co COMPRESS=JPEG -c
 .fi
 .PP
 .PP
-Starting with GDAL 1.8.0, to create a RGBA dataset from a RGB dataset with a mask : 
+Starting with GDAL 1\&.8\&.0, to create a RGBA dataset from a RGB dataset with a mask : 
 .PP
 .nf
 
diff --git a/man/man1/gdal_utilities.1 b/man/man1/gdal_utilities.1
index f073f26..b8b289d 100644
--- a/man/man1/gdal_utilities.1
+++ b/man/man1/gdal_utilities.1
@@ -1,125 +1,122 @@
-.TH "gdal_utilities" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdal_utilities" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdal_utilities \- .TH "gdal_utilities" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdal_utilities \- The following utility programs are distributed with GDAL.
+gdal_utilities \- GDAL Utilities 
+The following utility programs are distributed with GDAL\&.
 .PP
 .PD 0
 .IP "\(bu" 2
-\fBgdalinfo\fP - report information about a file. 
+\fBgdalinfo\fP - report information about a file\&. 
 .IP "\(bu" 2
-\fBgdal_translate\fP - Copy a raster file, with control of output format. 
+\fBgdal_translate\fP - Copy a raster file, with control of output format\&. 
 .IP "\(bu" 2
-\fBgdaladdo\fP - Add overviews to a file. 
+\fBgdaladdo\fP - Add overviews to a file\&. 
 .IP "\(bu" 2
-\fBgdalwarp\fP - Warp an image into a new coordinate system. 
+\fBgdalwarp\fP - Warp an image into a new coordinate system\&. 
 .IP "\(bu" 2
-\fBgdaltindex\fP - Build a MapServer raster tileindex. 
+\fBgdaltindex\fP - Build a MapServer raster tileindex\&. 
 .IP "\(bu" 2
-\fBgdalbuildvrt\fP - Build a VRT from a list of datasets. 
+\fBgdalbuildvrt\fP - Build a VRT from a list of datasets\&. 
 .IP "\(bu" 2
-\fBgdal_contour\fP - Contours from DEM. 
+\fBgdal_contour\fP - Contours from DEM\&. 
 .IP "\(bu" 2
-\fBgdaldem\fP - Tools to analyze and visualize DEMs. 
+\fBgdaldem\fP - Tools to analyze and visualize DEMs\&. 
 .IP "\(bu" 2
-\fBrgb2pct.py\fP - Convert a 24bit RGB image to 8bit paletted. 
+\fBrgb2pct\&.py\fP - Convert a 24bit RGB image to 8bit paletted\&. 
 .IP "\(bu" 2
-\fBpct2rgb.py\fP - Convert an 8bit paletted image to 24bit RGB. 
+\fBpct2rgb\&.py\fP - Convert an 8bit paletted image to 24bit RGB\&. 
 .IP "\(bu" 2
-\fBgdal_merge.py\fP - Build a quick mosaic from a set of images. 
+\fBgdal_merge\&.py\fP - Build a quick mosaic from a set of images\&. 
 .IP "\(bu" 2
-\fBgdal2tiles.py\fP - Create a TMS tile structure, KML and simple web viewer. 
+\fBgdal2tiles\&.py\fP - Create a TMS tile structure, KML and simple web viewer\&. 
 .IP "\(bu" 2
-\fBgdal_rasterize\fP - Rasterize vectors into raster file. 
+\fBgdal_rasterize\fP - Rasterize vectors into raster file\&. 
 .IP "\(bu" 2
-\fBgdaltransform\fP - Transform coordinates. 
+\fBgdaltransform\fP - Transform coordinates\&. 
 .IP "\(bu" 2
-\fBnearblack\fP - Convert nearly black/white borders to exact value. 
+\fBnearblack\fP - Convert nearly black/white borders to exact value\&. 
 .IP "\(bu" 2
-\fBgdal_retile.py\fP - Retiles a set of tiles and/or build tiled pyramid levels. 
+\fBgdal_retile\&.py\fP - Retiles a set of tiles and/or build tiled pyramid levels\&. 
 .IP "\(bu" 2
-\fBgdal_grid\fP - Create raster from the scattered data. 
+\fBgdal_grid\fP - Create raster from the scattered data\&. 
 .IP "\(bu" 2
-\fBgdal_proximity.py\fP - Compute a raster proximity map. 
+\fBgdal_proximity\&.py\fP - Compute a raster proximity map\&. 
 .IP "\(bu" 2
-\fBgdal_polygonize.py\fP - Generate polygons from raster. 
+\fBgdal_polygonize\&.py\fP - Generate polygons from raster\&. 
 .IP "\(bu" 2
-\fBgdal_sieve.py\fP - Raster Sieve filter. 
+\fBgdal_sieve\&.py\fP - Raster Sieve filter\&. 
 .IP "\(bu" 2
-\fBgdal_fillnodata.py\fP - Interpolate in nodata regions. 
+\fBgdal_fillnodata\&.py\fP - Interpolate in nodata regions\&. 
 .IP "\(bu" 2
-\fBgdallocationinfo\fP - Query raster at a location. 
+\fBgdallocationinfo\fP - Query raster at a location\&. 
 .IP "\(bu" 2
-\fBgdalsrsinfo\fP - Report a given SRS in different formats. (GDAL >= 1.9.0) 
+\fBgdalsrsinfo\fP - Report a given SRS in different formats\&. (GDAL >= 1\&.9\&.0) 
 .IP "\(bu" 2
-\fBgdalmove.py\fP - Transform the coordinate system of a file (GDAL >= 1.10) 
+\fBgdalmove\&.py\fP - Transform the coordinate system of a file (GDAL >= 1\&.10) 
 .IP "\(bu" 2
-\fBgdal_edit.py\fP - Edit in place various information of an existing GDAL dataset (projection, geotransform, nodata, metadata) 
+\fBgdal_edit\&.py\fP - Edit in place various information of an existing GDAL dataset (projection, geotransform, nodata, metadata) 
 .IP "\(bu" 2
-\fBgdal_calc.py\fP - Command line raster calculator with numpy syntax 
+\fBgdal_calc\&.py\fP - Command line raster calculator with numpy syntax 
 .IP "\(bu" 2
-\fBgdal-config\fP - Get options required to build software using GDAL. 
+\fBgdal-config\fP - Get options required to build software using GDAL\&. 
 .IP "\(bu" 2
-\fBgdalmanage\fP - Identify, copy, rename and delete raster. 
+\fBgdalmanage\fP - Identify, copy, rename and delete raster\&. 
 .IP "\(bu" 2
-\fBgdalcompare.py\fP - Compare two images and report on differences. 
+\fBgdalcompare\&.py\fP - Compare two images and report on differences\&. 
 .PP
 .SH "Creating New Files"
 .PP
-Access an existing file to read it is generally quite simple. Just indicate the name of the file or dataset on the commandline. However, creating a file is more complicated. It may be necessary to indicate the the format to create, various creation options affecting how it will be created and perhaps a coordinate system to be assigned. Many of these options are handled similarly by different GDAL utilities, and are introduced here. 
+Access an existing file to read it is generally quite simple\&. Just indicate the name of the file or dataset on the commandline\&. However, creating a file is more complicated\&. It may be necessary to indicate the the format to create, various creation options affecting how it will be created and perhaps a coordinate system to be assigned\&. Many of these options are handled similarly by different GDAL utilities, and are introduced here\&. 
 .PP
 .IP "\fB\fB-of\fP \fIformat\fP\fP" 1c
-Select the format to create the new file as. The formats are assigned short names such as GTiff (for GeoTIFF) or HFA (for Erdas Imagine). The list of all format codes can be listed with the \fB--formats\fP switch. Only formats list as '(rw)' (read-write) can be written.
+Select the format to create the new file as\&. The formats are assigned short names such as GTiff (for GeoTIFF) or HFA (for Erdas Imagine)\&. The list of all format codes can be listed with the \fB--formats\fP switch\&. Only formats list as '(rw)' (read-write) can be written\&.
 .PP
-Many utilities default to creating GeoTIFF files if a format is not specified. File extensions are not used to guess output format, nor are extensions generally added by GDAL if not indicated in the filename by the user. 
+Many utilities default to creating GeoTIFF files if a format is not specified\&. File extensions are not used to guess output format, nor are extensions generally added by GDAL if not indicated in the filename by the user\&. 
 .PP
 .IP "\fB\fB-co\fP \fINAME=VALUE\fP\fP" 1c
-Many formats have one or more optional creation options that can be used to control particulars about the file created. For instance, the GeoTIFF driver supports creation options to control compression, and whether the file should be tiled.
+Many formats have one or more optional creation options that can be used to control particulars about the file created\&. For instance, the GeoTIFF driver supports creation options to control compression, and whether the file should be tiled\&.
 .PP
-The creation options available vary by format driver, and some simple formats have no creation options at all. A list of options supported for a format can be listed with the '--format <format>' commandline option but the web page for the format is the definitive source of information on driver creation options.
+The creation options available vary by format driver, and some simple formats have no creation options at all\&. A list of options supported for a format can be listed with the '--format <format>' commandline option but the web page for the format is the definitive source of information on driver creation options\&.
 .PP
 .PP
 .IP "\fB\fB-a_srs\fP \fISRS\fP\fP" 1c
-Several utilities, (gdal_translate and gdalwarp) include the ability to specify coordinate systems with commandline options like \fB-a_srs\fP (assign SRS to output), \fB-s_srs\fP (source SRS) and \fB-t_srs\fP (target SRS).
+Several utilities, (gdal_translate and gdalwarp) include the ability to specify coordinate systems with commandline options like \fB-a_srs\fP (assign SRS to output), \fB-s_srs\fP (source SRS) and \fB-t_srs\fP (target SRS)\&.
 .PP
-These utilities allow the coordinate system (SRS = spatial reference system) to be assigned in a variety of formats.
+These utilities allow the coordinate system (SRS = spatial reference system) to be assigned in a variety of formats\&.
 .PP
 .PP
 .PD 0
 .IP "\(bu" 2
-\fBNAD27\fP/\fBNAD83\fP/\fBWGS84\fP/\fBWGS72\fP: These common geographic (lat/long) coordinate systems can be used directly by these names.
+\fBNAD27\fP/\fBNAD83\fP/\fBWGS84\fP/\fBWGS72\fP: These common geographic (lat/long) coordinate systems can be used directly by these names\&.
 .PP
 
 .IP "\(bu" 2
-\fBEPSG:\fP\fIn\fP: Coordinate systems (projected or geographic) can be selected based on their EPSG codes, for instance EPSG:27700 is the British National Grid. A list of EPSG coordinate systems can be found in the GDAL data files gcs.csv and pcs.csv.
+\fBEPSG:\fP\fIn\fP: Coordinate systems (projected or geographic) can be selected based on their EPSG codes, for instance EPSG:27700 is the British National Grid\&. A list of EPSG coordinate systems can be found in the GDAL data files gcs\&.csv and pcs\&.csv\&.
 .PP
 
 .IP "\(bu" 2
-\fIPROJ.4 Definitions\fP: A PROJ.4 definition string can be used as a coordinate system. For instance '+proj=utm +zone=11 +datum=WGS84'. Take care to keep the proj.4 string together as a single argument to the command (usually by double quoting). 
+\fIPROJ\&.4 Definitions\fP: A PROJ\&.4 definition string can be used as a coordinate system\&. For instance '+proj=utm +zone=11 +datum=WGS84'\&. Take care to keep the proj\&.4 string together as a single argument to the command (usually by double quoting)\&. 
 .PP
 .PP
 
 .IP "\(bu" 2
-\fIOpenGIS Well Known Text\fP: The Open GIS Consortium has defined a textual format for describing coordinate systems as part of the Simple Features specifications. This format is the internal working format for coordinate systems used in GDAL. The name of a file containing a WKT coordinate system definition may be used a coordinate system argument, or the entire coordinate system itself may be used as a commandline option (though escaping all the quotes in WKT is quite challenging). 
+\fIOpenGIS Well Known Text\fP: The Open GIS Consortium has defined a textual format for describing coordinate systems as part of the Simple Features specifications\&. This format is the internal working format for coordinate systems used in GDAL\&. The name of a file containing a WKT coordinate system definition may be used a coordinate system argument, or the entire coordinate system itself may be used as a commandline option (though escaping all the quotes in WKT is quite challenging)\&. 
 .PP
 .PP
 
 .IP "\(bu" 2
-\fIESRI Well Known Text\fP: ESRI uses a slight variation on OGC WKT format in their ArcGIS product (ArcGIS .prj files), and these may be used in a similar manner to WKT files, but the filename should be prefixed with \fBESRI::\fP. For example \fB'ESRI::NAD 1927 StatePlane Wyoming West FIPS 4904.prj'\fP. 
+\fIESRI Well Known Text\fP: ESRI uses a slight variation on OGC WKT format in their ArcGIS product (ArcGIS \&.prj files), and these may be used in a similar manner to WKT files, but the filename should be prefixed with \fBESRI::\fP\&. For example \fB'ESRI::NAD 1927 StatePlane Wyoming West FIPS 4904\&.prj'\fP\&. 
 .PP
 .PP
 
 .IP "\(bu" 2
-\fISpatial References from URLs\fP: For example http://spatialreference.org/ref/user/north-pacific-albers-conic-equal-area/.
+\fISpatial References from URLs\fP: For example http://spatialreference.org/ref/user/north-pacific-albers-conic-equal-area/\&.
 .PP
 .PP
 
 .IP "\(bu" 2
-\fIfilename\fP: The name of a file containing WKT, PROJ.4 strings, or XML/GML coordinate system definitions can be provided. 
+\fIfilename\fP: The name of a file containing WKT, PROJ\&.4 strings, or XML/GML coordinate system definitions can be provided\&. 
 .PP
 .PP
 
@@ -127,28 +124,28 @@ These utilities allow the coordinate system (SRS = spatial reference system) to
 .PP
 .SH "General Command Line Switches"
 .PP
-All GDAL command line utility programs support the following 'general' options.
+All GDAL command line utility programs support the following 'general' options\&.
 .PP
 .IP "\fB\fB--version\fP\fP" 1c
-Report the version of GDAL and exit.
+Report the version of GDAL and exit\&.
 .PP
 .IP "\fB\fB--formats\fP\fP" 1c
-List all raster formats supported by this GDAL build (read-only and read-write) and exit. The format support is indicated as follows: 'ro' is read-only driver; 'rw' is read or write (ie. supports CreateCopy); 'rw+' is read, write and update (ie. supports Create). A 'v' is appended for formats supporting virtual IO (/vsimem, /vsigzip, /vsizip, etc). A 's' is appended for formats supporting subdatasets. Note: The valid formats for the output of gdalwarp are formats that support the Create( [...]
+List all raster formats supported by this GDAL build (read-only and read-write) and exit\&. The format support is indicated as follows: 'ro' is read-only driver; 'rw' is read or write (ie\&. supports CreateCopy); 'rw+' is read, write and update (ie\&. supports Create)\&. A 'v' is appended for formats supporting virtual IO (/vsimem, /vsigzip, /vsizip, etc)\&. A 's' is appended for formats supporting subdatasets\&. Note: The valid formats for the output of gdalwarp are formats that support [...]
 .PP
 .IP "\fB\fB--format\fP \fIformat\fP\fP" 1c
-List detailed information about a single format driver. The \fIformat\fP should be the short name reported in the \fB--formats\fP list, such as GTiff.
+List detailed information about a single format driver\&. The \fIformat\fP should be the short name reported in the \fB--formats\fP list, such as GTiff\&.
 .PP
 .IP "\fB\fB--optfile\fP \fIfile\fP\fP" 1c
-Read the named file and substitute the contents into the commandline options list. Lines beginning with # will be ignored. Multi-word arguments may be kept together with double quotes. 
+Read the named file and substitute the contents into the commandline options list\&. Lines beginning with # will be ignored\&. Multi-word arguments may be kept together with double quotes\&. 
 .PP
 .IP "\fB\fB--config\fP \fIkey value\fP\fP" 1c
-Sets the named \fCconfiguration keyword\fP to the given value, as opposed to setting them as environment variables. Some common configuration keywords are GDAL_CACHEMAX (memory used internally for caching in megabytes) and GDAL_DATA (path of the GDAL 'data' directory). Individual drivers may be influenced by other configuration options. 
+Sets the named \fCconfiguration keyword\fP to the given value, as opposed to setting them as environment variables\&. Some common configuration keywords are GDAL_CACHEMAX (memory used internally for caching in megabytes) and GDAL_DATA (path of the GDAL 'data' directory)\&. Individual drivers may be influenced by other configuration options\&. 
 .PP
 .IP "\fB\fB--debug\fP \fIvalue\fP\fP" 1c
-Control what debugging messages are emitted. A value of \fION\fP will enable all debug messages. A value of \fIOFF\fP will disable all debug messages. Another value will select only debug messages containing that string in the debug prefix code. 
+Control what debugging messages are emitted\&. A value of \fION\fP will enable all debug messages\&. A value of \fIOFF\fP will disable all debug messages\&. Another value will select only debug messages containing that string in the debug prefix code\&. 
 .PP
 .IP "\fB\fB--help-general\fP\fP" 1c
-Gives a brief usage message for the generic GDAL commandline options and exit. 
+Gives a brief usage message for the generic GDAL commandline options and exit\&. 
 .PP
 .PP
 .PP
diff --git a/man/man1/gdaladdo.1 b/man/man1/gdaladdo.1
index 0667339..01e8f8c 100644
--- a/man/man1/gdaladdo.1
+++ b/man/man1/gdaladdo.1
@@ -1,12 +1,9 @@
-.TH "gdaladdo" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdaladdo" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdaladdo \- .TH "gdaladdo" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdaladdo \- builds or rebuilds overview images
+gdaladdo \- gdaladdo 
+builds or rebuilds overview images
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -19,40 +16,40 @@ gdaladdo [-r {nearest,average,gauss,cubic,average_mp,average_magphase,mode}]
 .PP
 .SH "DESCRIPTION"
 .PP
-The gdaladdo utility can be used to build or rebuild overview images for most supported file formats with one of several downsampling algorithms.
+The gdaladdo utility can be used to build or rebuild overview images for most supported file formats with one of several downsampling algorithms\&.
 .PP
 .IP "\fB\fB-r\fP \fI{nearest (default),average,gauss,cubic,average_mp,average_magphase,mode}\fP:\fP" 1c
-Select a resampling algorithm. 
+Select a resampling algorithm\&. 
 .IP "\fB\fB-b\fP \fIband\fP:\fP" 1c
-(available from GDAL 1.10) Select an input band \fIband\fP for overview generation. Band numbering starts from 1. Multiple \fB-b\fP switches may be used to select a set of input bands to generate overviews. 
+(available from GDAL 1\&.10) Select an input band \fIband\fP for overview generation\&. Band numbering starts from 1\&. Multiple \fB-b\fP switches may be used to select a set of input bands to generate overviews\&. 
 .IP "\fB\fB-ro\fP:\fP" 1c
-(available from GDAL 1.6.0) open the dataset in read-only mode, in order to generate external overview (for GeoTIFF especially).  
+(available from GDAL 1\&.6\&.0) open the dataset in read-only mode, in order to generate external overview (for GeoTIFF especially)\&.  
 .IP "\fB\fB-clean\fP:\fP" 1c
-(available from GDAL 1.7.0) remove all overviews.  
+(available from GDAL 1\&.7\&.0) remove all overviews\&.  
 .IP "\fB\fIfilename\fP:\fP" 1c
-The file to build overviews for (or whose overviews must be removed).  
+The file to build overviews for (or whose overviews must be removed)\&.  
 .IP "\fB\fIlevels\fP:\fP" 1c
-A list of integral overview levels to build. Ignored with -clean option. 
+A list of integral overview levels to build\&. Ignored with -clean option\&. 
 .PP
 .PP
-\fIMode\fP (available from GDAL 1.6.0) selects the value which appears most often of all the sampled points. \fIaverage_mp\fP is unsuitable for use. \fIAverage_magphase\fP averages complex data in mag/phase space. \fINearest\fP and \fIaverage\fP are applicable to normal image data. \fINearest\fP applies a nearest neighbour (simple sampling) resampler, while \fIaverage\fP computes the average of all non-NODATA contributing pixels. \fICubic\fP resampling (available from GDAL 1.7.0) applies [...]
+\fIMode\fP (available from GDAL 1\&.6\&.0) selects the value which appears most often of all the sampled points\&. \fIaverage_mp\fP is unsuitable for use\&. \fIAverage_magphase\fP averages complex data in mag/phase space\&. \fINearest\fP and \fIaverage\fP are applicable to normal image data\&. \fINearest\fP applies a nearest neighbour (simple sampling) resampler, while \fIaverage\fP computes the average of all non-NODATA contributing pixels\&. \fICubic\fP resampling (available from GDAL  [...]
 .PP
-gdaladdo will honour properly NODATA_VALUES tuples (special dataset metadata) so that only a given RGB triplet (in case of a RGB image) will be considered as the nodata value and not each value of the triplet independently per band.
+gdaladdo will honour properly NODATA_VALUES tuples (special dataset metadata) so that only a given RGB triplet (in case of a RGB image) will be considered as the nodata value and not each value of the triplet independently per band\&.
 .PP
-Selecting a level value like \fI2\fP causes an overview level that is 1/2 the resolution (in each dimension) of the base layer to be computed. If the file has existing overview levels at a level selected, those levels will be recomputed and rewritten in place.
+Selecting a level value like \fI2\fP causes an overview level that is 1/2 the resolution (in each dimension) of the base layer to be computed\&. If the file has existing overview levels at a level selected, those levels will be recomputed and rewritten in place\&.
 .PP
-For internal GeoTIFF overviews (or external overviews in GeoTIFF format), note that -clean does not shrink the file. A later run of gdaladdo with overview levels will cause the file to be expanded, rather than reusing the space of the previously deleted overviews. If you just want to change the resampling method on a file that already has overviews computed, you don't need to clean the existing overviews.
+For internal GeoTIFF overviews (or external overviews in GeoTIFF format), note that -clean does not shrink the file\&. A later run of gdaladdo with overview levels will cause the file to be expanded, rather than reusing the space of the previously deleted overviews\&. If you just want to change the resampling method on a file that already has overviews computed, you don't need to clean the existing overviews\&.
 .PP
-Some format drivers do not support overviews at all. Many format drivers store overviews in a secondary file with the extension .ovr that is actually in TIFF format. By default, the GeoTIFF driver stores overviews internally to the file operated on (if it is writeable), unless the -ro flag is specified.
+Some format drivers do not support overviews at all\&. Many format drivers store overviews in a secondary file with the extension \&.ovr that is actually in TIFF format\&. By default, the GeoTIFF driver stores overviews internally to the file operated on (if it is writeable), unless the -ro flag is specified\&.
 .PP
-Most drivers also support an alternate overview format using Erdas Imagine format. To trigger this use the USE_RRD=YES configuration option. This will place the overviews in an associated .aux file suitable for direct use with Imagine or ArcGIS as well as GDAL applications. (eg --config USE_RRD YES)
+Most drivers also support an alternate overview format using Erdas Imagine format\&. To trigger this use the USE_RRD=YES configuration option\&. This will place the overviews in an associated \&.aux file suitable for direct use with Imagine or ArcGIS as well as GDAL applications\&. (eg --config USE_RRD YES)
 .SH "External overviews in GeoTIFF format"
 .PP
-External overviews created in TIFF format may be compressed using the COMPRESS_OVERVIEW configuration option. All compression methods, supported by the GeoTIFF driver, are available here. (eg --config COMPRESS_OVERVIEW DEFLATE). The photometric interpretation can be set with --config PHOTOMETRIC_OVERVIEW {RGB,YCBCR,...}, and the interleaving with --config INTERLEAVE_OVERVIEW {PIXEL|BAND}.
+External overviews created in TIFF format may be compressed using the COMPRESS_OVERVIEW configuration option\&. All compression methods, supported by the GeoTIFF driver, are available here\&. (eg --config COMPRESS_OVERVIEW DEFLATE)\&. The photometric interpretation can be set with --config PHOTOMETRIC_OVERVIEW {RGB,YCBCR,\&.\&.\&.}, and the interleaving with --config INTERLEAVE_OVERVIEW {PIXEL|BAND}\&.
 .PP
-For JPEG compressed external overviews, the JPEG quality can be set with '--config JPEG_QUALITY_OVERVIEW value' (GDAL 1.7.0 or later).
+For JPEG compressed external overviews, the JPEG quality can be set with '--config JPEG_QUALITY_OVERVIEW value' (GDAL 1\&.7\&.0 or later)\&.
 .PP
-For LZW or DEFLATE compressed external overviews, the predictor value can be set with '--config PREDICTOR_OVERVIEW 1|2|3' (GDAL 1.8.0 or later).
+For LZW or DEFLATE compressed external overviews, the predictor value can be set with '--config PREDICTOR_OVERVIEW 1|2|3' (GDAL 1\&.8\&.0 or later)\&.
 .PP
 To produce the smallest possible JPEG-In-TIFF overviews, you should use : 
 .PP
@@ -63,22 +60,22 @@ To produce the smallest possible JPEG-In-TIFF overviews, you should use :
 .fi
 .PP
 .PP
-Starting with GDAL 1.7.0, external overviews can be created in the BigTIFF format by using the BIGTIFF_OVERVIEW configuration option : --config BIGTIFF_OVERVIEW {IF_NEEDED|IF_SAFER|YES|NO}. The default value is IF_NEEDED. The behaviour of this option is exactly the same as the BIGTIFF creation option documented in the GeoTIFF driver documentation. 
+Starting with GDAL 1\&.7\&.0, external overviews can be created in the BigTIFF format by using the BIGTIFF_OVERVIEW configuration option : --config BIGTIFF_OVERVIEW {IF_NEEDED|IF_SAFER|YES|NO}\&. The default value is IF_NEEDED\&. The behaviour of this option is exactly the same as the BIGTIFF creation option documented in the GeoTIFF driver documentation\&. 
 .PD 0
 
 .IP "\(bu" 2
-YES forces BigTIFF. 
+YES forces BigTIFF\&. 
 .IP "\(bu" 2
-NO forces classic TIFF. 
+NO forces classic TIFF\&. 
 .IP "\(bu" 2
-IF_NEEDED will only create a BigTIFF if it is clearly needed (uncompressed, and overviews larger than 4GB). 
+IF_NEEDED will only create a BigTIFF if it is clearly needed (uncompressed, and overviews larger than 4GB)\&. 
 .IP "\(bu" 2
-IF_SAFER will create BigTIFF if the resulting file *might* exceed 4GB. 
+IF_SAFER will create BigTIFF if the resulting file *might* exceed 4GB\&. 
 .PP
 
 .br
 .PP
-See the documentation of the GeoTIFF driver for further explanations on all those options.
+See the documentation of the GeoTIFF driver for further explanations on all those options\&.
 .SH "EXAMPLE"
 .PP
 .PP
@@ -91,7 +88,7 @@ gdaladdo -r average abc.tif 2 4 8 16
 .fi
 .PP
 .PP
-Create an external compressed GeoTIFF overview file from the ERDAS .IMG file:
+Create an external compressed GeoTIFF overview file from the ERDAS \&.IMG file:
 .PP
 .PP
 .nf
diff --git a/man/man1/gdalbuildvrt.1 b/man/man1/gdalbuildvrt.1
index b41c3d0..c75b31f 100644
--- a/man/man1/gdalbuildvrt.1
+++ b/man/man1/gdalbuildvrt.1
@@ -1,12 +1,9 @@
-.TH "gdalbuildvrt" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdalbuildvrt" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdalbuildvrt \- .TH "gdalbuildvrt" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdalbuildvrt \- Builds a VRT from a list of datasets. (compiled by default since GDAL 1.6.1)
+gdalbuildvrt \- gdalbuildvrt 
+Builds a VRT from a list of datasets\&. (compiled by default since GDAL 1\&.6\&.1)
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -25,68 +22,68 @@ gdalbuildvrt [-tileindex field_name]
 .PP
 .SH "DESCRIPTION"
 .PP
-This program builds a VRT (Virtual Dataset) that is a mosaic of the list of input GDAL datasets. The list of input GDAL datasets can be specified at the end of the command line, or put in a text file (one filename per line) for very long lists, or it can be a MapServer tileindex (see \fBgdaltindex\fP utility). In the later case, all entries in the tile index will be added to the VRT.
+This program builds a VRT (Virtual Dataset) that is a mosaic of the list of input GDAL datasets\&. The list of input GDAL datasets can be specified at the end of the command line, or put in a text file (one filename per line) for very long lists, or it can be a MapServer tileindex (see \fBgdaltindex\fP utility)\&. In the later case, all entries in the tile index will be added to the VRT\&.
 .PP
-With -separate, each files goes into a separate \fIstacked\fP band in the VRT band. Otherwise, the files are considered as tiles of a larger mosaic and the VRT file has as many bands as one of the input files.
+With -separate, each files goes into a separate \fIstacked\fP band in the VRT band\&. Otherwise, the files are considered as tiles of a larger mosaic and the VRT file has as many bands as one of the input files\&.
 .PP
-If one GDAL dataset is made of several subdatasets and has 0 raster bands, all the subdatasets will be added to the VRT rather than the dataset itself.
+If one GDAL dataset is made of several subdatasets and has 0 raster bands, all the subdatasets will be added to the VRT rather than the dataset itself\&.
 .PP
-gdalbuildvrt does some amount of checks to assure that all files that will be put in the resulting VRT have similar characteristics : number of bands, projection, color interpretation... If not, files that do not match the common characteristics will be skipped. (This is only true in the default mode, and not when using the -separate option)
+gdalbuildvrt does some amount of checks to assure that all files that will be put in the resulting VRT have similar characteristics : number of bands, projection, color interpretation\&.\&.\&. If not, files that do not match the common characteristics will be skipped\&. (This is only true in the default mode, and not when using the -separate option)
 .PP
-If there is some amount of spatial overlapping between files, the order may depend on the order they are inserted in the VRT file, but this behaviour should not be relied on.
+If there is some amount of spatial overlapping between files, the order may depend on the order they are inserted in the VRT file, but this behaviour should not be relied on\&.
 .PP
-This utility is somehow equivalent to the gdal_vrtmerge.py utility and is build by default in GDAL 1.6.1.
+This utility is somehow equivalent to the gdal_vrtmerge\&.py utility and is build by default in GDAL 1\&.6\&.1\&.
 .PP
 .IP "\fB\fB-tileindex\fP:\fP" 1c
-Use the specified value as the tile index field, instead of the default value with is 'location'. 
+Use the specified value as the tile index field, instead of the default value with is 'location'\&. 
 .PP
 .IP "\fB\fB-resolution\fP {highest|lowest|average|user}:\fP" 1c
-In case the resolution of all input files is not the same, the -resolution flag enables the user to control the way the output resolution is computed. 'average' is the default. 'highest' will pick the smallest values of pixel dimensions within the set of source rasters. 'lowest' will pick the largest values of pixel dimensions within the set of source rasters. 'average' will compute an average of pixel dimensions within the set of source rasters. 'user' is new in GDAL 1.7.0 and must be u [...]
+In case the resolution of all input files is not the same, the -resolution flag enables the user to control the way the output resolution is computed\&. 'average' is the default\&. 'highest' will pick the smallest values of pixel dimensions within the set of source rasters\&. 'lowest' will pick the largest values of pixel dimensions within the set of source rasters\&. 'average' will compute an average of pixel dimensions within the set of source rasters\&. 'user' is new in GDAL 1\&.7\&.0 [...]
 .PP
 .IP "\fB\fB-tr\fP xres yres :\fP" 1c
-(starting with GDAL 1.7.0) set target resolution. The values must be expressed in georeferenced units. Both must be positive values. Specifying those values is of course incompatible with highest|lowest|average values for -resolution option. 
+(starting with GDAL 1\&.7\&.0) set target resolution\&. The values must be expressed in georeferenced units\&. Both must be positive values\&. Specifying those values is of course incompatible with highest|lowest|average values for -resolution option\&. 
 .PP
 .IP "\fB\fB-tap\fP:\fP" 1c
-(GDAL >= 1.8.0) (target aligned pixels) align the coordinates of the extent of the output file to the values of the -tr, such that the aligned extent includes the minimum extent.
+(GDAL >= 1\&.8\&.0) (target aligned pixels) align the coordinates of the extent of the output file to the values of the -tr, such that the aligned extent includes the minimum extent\&.
 .PP
 .IP "\fB\fB-te\fP xmin ymin xmax ymax :\fP" 1c
-(starting with GDAL 1.7.0) set georeferenced extents of VRT file. The values must be expressed in georeferenced units. If not specified, the extent of the VRT is the minimum bounding box of the set of source rasters. 
+(starting with GDAL 1\&.7\&.0) set georeferenced extents of VRT file\&. The values must be expressed in georeferenced units\&. If not specified, the extent of the VRT is the minimum bounding box of the set of source rasters\&. 
 .PP
 .IP "\fB\fB-addalpha\fP:\fP" 1c
-(starting with GDAL 1.7.0) Adds an alpha mask band to the VRT when the source raster have none. Mainly useful for RGB sources (or grey-level sources). The alpha band is filled on-the-fly with the value 0 in areas without any source raster, and with value 255 in areas with source raster. The effect is that a RGBA viewer will render the areas without source rasters as transparent and areas with source rasters as opaque. This option is not compatible with -separate.
+(starting with GDAL 1\&.7\&.0) Adds an alpha mask band to the VRT when the source raster have none\&. Mainly useful for RGB sources (or grey-level sources)\&. The alpha band is filled on-the-fly with the value 0 in areas without any source raster, and with value 255 in areas with source raster\&. The effect is that a RGBA viewer will render the areas without source rasters as transparent and areas with source rasters as opaque\&. This option is not compatible with -separate\&.
 .PP
 .IP "\fB\fB-hidenodata\fP:\fP" 1c
-(starting with GDAL 1.7.0) Even if any band contains nodata value, giving this option makes the VRT band not report the NoData. Useful when you want to control the background color of the dataset. By using along with the -addalpha option, you can prepare a dataset which doesn't report nodata value but is transparent in areas with no data.
+(starting with GDAL 1\&.7\&.0) Even if any band contains nodata value, giving this option makes the VRT band not report the NoData\&. Useful when you want to control the background color of the dataset\&. By using along with the -addalpha option, you can prepare a dataset which doesn't report nodata value but is transparent in areas with no data\&.
 .PP
-.IP "\fB\fB-srcnodata\fP \fIvalue [value...]\fP:\fP" 1c
-(starting with GDAL 1.7.0) Set nodata values for input bands (different values can be supplied for each band). If more than one value is supplied all values should be quoted to keep them together as a single operating system argument. If the option is not specified, the intrinsic nodata settings on the source datasets will be used (if they exist). The value set by this option is written in the NODATA element of each ComplexSource element. Use a value of \fCNone\fP to ignore intrinsic nod [...]
+.IP "\fB\fB-srcnodata\fP \fIvalue [value\&.\&.\&.]\fP:\fP" 1c
+(starting with GDAL 1\&.7\&.0) Set nodata values for input bands (different values can be supplied for each band)\&. If more than one value is supplied all values should be quoted to keep them together as a single operating system argument\&. If the option is not specified, the intrinsic nodata settings on the source datasets will be used (if they exist)\&. The value set by this option is written in the NODATA element of each ComplexSource element\&. Use a value of \fCNone\fP to ignore i [...]
 .PP
 .IP "\fB\fB-b\fP \fIband\fP:\fP" 1c
-(GDAL >= 1.10.0) Select an input \fIband\fP to be processed. Bands are numbered from 1. If input bands not set all bands will be added to vrt
+(GDAL >= 1\&.10\&.0) Select an input \fIband\fP to be processed\&. Bands are numbered from 1\&. If input bands not set all bands will be added to vrt
 .PP
 .IP "\fB\fB-sd\fP \fIsubdataset\fP\fP" 1c
-(GDAL >= 1.10.0) If the input dataset contains several subdatasets use a subdataset with the specified number (starting from 1). This is an alternative of giving the full subdataset name as an input.
+(GDAL >= 1\&.10\&.0) If the input dataset contains several subdatasets use a subdataset with the specified number (starting from 1)\&. This is an alternative of giving the full subdataset name as an input\&.
 .PP
-.IP "\fB\fB-vrtnodata\fP \fIvalue [value...]\fP:\fP" 1c
-(starting with GDAL 1.7.0) Set nodata values at the VRT band level (different values can be supplied for each band). If more than one value is supplied all values should be quoted to keep them together as a single operating system argument. If the option is not specified, intrinsic nodata settings on the first dataset will be used (if they exist). The value set by this option is written in the NoDataValue element of each VRTRasterBand element. Use a value of \fCNone\fP to ignore intrinsi [...]
+.IP "\fB\fB-vrtnodata\fP \fIvalue [value\&.\&.\&.]\fP:\fP" 1c
+(starting with GDAL 1\&.7\&.0) Set nodata values at the VRT band level (different values can be supplied for each band)\&. If more than one value is supplied all values should be quoted to keep them together as a single operating system argument\&. If the option is not specified, intrinsic nodata settings on the first dataset will be used (if they exist)\&. The value set by this option is written in the NoDataValue element of each VRTRasterBand element\&. Use a value of \fCNone\fP to ign [...]
 .PP
 .IP "\fB\fB-separate\fP:\fP" 1c
-(starting with GDAL 1.7.0) Place each input file into a separate \fIstacked\fP band. In that case, only the first band of each dataset will be placed into a new band. Contrary to the default mode, it is not required that all bands have the same datatype. 
+(starting with GDAL 1\&.7\&.0) Place each input file into a separate \fIstacked\fP band\&. In that case, only the first band of each dataset will be placed into a new band\&. Contrary to the default mode, it is not required that all bands have the same datatype\&. 
 .PP
 .IP "\fB\fB-allow_projection_difference\fP:\fP" 1c
-(starting with GDAL 1.7.0) When this option is specified, the utility will accept to make a VRT even if the input datasets have not the same projection. Note: this does not mean that they will be reprojected. Their projection will just be ignored. 
+(starting with GDAL 1\&.7\&.0) When this option is specified, the utility will accept to make a VRT even if the input datasets have not the same projection\&. Note: this does not mean that they will be reprojected\&. Their projection will just be ignored\&. 
 .PP
 .IP "\fB\fB-a_srs\fP \fIsrs_def\fP:\fP" 1c
-(starting with GDAL 1.10) Override the projection for the output file. The \fIsrs_def\fP may be any of the usual GDAL/OGR forms, complete WKT, PROJ.4, EPSG:n or a file containing the WKT. 
+(starting with GDAL 1\&.10) Override the projection for the output file\&. The \fIsrs_def\fP may be any of the usual GDAL/OGR forms, complete WKT, PROJ\&.4, EPSG:n or a file containing the WKT\&. 
 .PP
 .IP "\fB\fB-input_file_list\fP:\fP" 1c
 To specify a text file with an input filename on each line 
 .PP
 .IP "\fB\fB-q\fP:\fP" 1c
-(starting with GDAL 1.7.0) To disable the progress bar on the console 
+(starting with GDAL 1\&.7\&.0) To disable the progress bar on the console 
 .PP
 .IP "\fB\fB-overwrite\fP:\fP" 1c
-Overwrite the VRT if it already exists.
+Overwrite the VRT if it already exists\&.
 .PP
 .PP
 .SH "EXAMPLE"
diff --git a/man/man1/gdalcompare.1 b/man/man1/gdalcompare.1
index 3625889..37d307f 100644
--- a/man/man1/gdalcompare.1
+++ b/man/man1/gdalcompare.1
@@ -1,12 +1,9 @@
-.TH "gdalcompare" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdalcompare" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdalcompare \- .TH "gdalcompare" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdalcompare \- compare two images
+gdalcompare \- gdalcompare\&.py 
+compare two images
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -17,22 +14,22 @@ gdalcompare.py [-sds] golden_file new_file
 .PP
 .SH "DESCRIPTION"
 .PP
-The gdalcompare.py script compares two GDAL supported datasets and reports the differences. In addition to reporting differences to the standard out the script will also return the difference count in it's exit value.
+The gdalcompare\&.py script compares two GDAL supported datasets and reports the differences\&. In addition to reporting differences to the standard out the script will also return the difference count in it's exit value\&.
 .PP
-Image pixels, and various metadata are checked. There is also a byte by byte comparison done which will count as one difference. So if it is only important that the GDAL visible data is identical a difference count of 1 (the binary difference) should be considered acceptable.
+Image pixels, and various metadata are checked\&. There is also a byte by byte comparison done which will count as one difference\&. So if it is only important that the GDAL visible data is identical a difference count of 1 (the binary difference) should be considered acceptable\&.
 .PP
 .IP "\fB\fB-sds\fP:\fP" 1c
-If this flag is passed the script will compare all subdatasets that are part of the dataset, otherwise subdatasets are ignored.
+If this flag is passed the script will compare all subdatasets that are part of the dataset, otherwise subdatasets are ignored\&.
 .PP
 .IP "\fB\fIgolden_file\fP:\fP" 1c
-The file that is considered correct, referred to as the golden file.
+The file that is considered correct, referred to as the golden file\&.
 .PP
 .IP "\fB\fInew_file\fP:\fP" 1c
-The file being compared to the golden file, referred to as the new file.
+The file being compared to the golden file, referred to as the new file\&.
 .PP
 .PP
 .PP
-Note that the gdalcompare.py script can also be called as a library from python code though it is not typically in the python path for including. The primary entry point is gdalcompare.compare() which takes a golden gdal.Dataset and a new gdal.Dataset as arguments and returns a difference count (excluding the binary comparison). The gdalcompare.compare_sds() entry point can be used to compare subdatasets.
+Note that the gdalcompare\&.py script can also be called as a library from python code though it is not typically in the python path for including\&. The primary entry point is gdalcompare\&.compare() which takes a golden gdal\&.Dataset and a new gdal\&.Dataset as arguments and returns a difference count (excluding the binary comparison)\&. The gdalcompare\&.compare_sds() entry point can be used to compare subdatasets\&.
 .SH "AUTHORS"
 .PP
 Frank Warmerdam <warmerdam at pobox.com> 
diff --git a/man/man1/gdaldem.1 b/man/man1/gdaldem.1
index 8c8f062..9002539 100644
--- a/man/man1/gdaldem.1
+++ b/man/man1/gdaldem.1
@@ -1,12 +1,9 @@
-.TH "gdaldem" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdaldem" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdaldem \- .TH "gdaldem" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdaldem \- Tools to analyze and visualize DEMs. (since GDAL 1.7.0)
+gdaldem \- gdaldem 
+Tools to analyze and visualize DEMs\&. (since GDAL 1\&.7\&.0)
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -90,63 +87,63 @@ The input DEM raster to be processed
 .IP "\fB\fIoutput_xxx_map\fP:\fP" 1c
 The output raster produced 
 .IP "\fB\fB-of\fP \fIformat\fP:\fP" 1c
-Select the output format. The default is GeoTIFF (GTiff). Use the short format name. 
+Select the output format\&. The default is GeoTIFF (GTiff)\&. Use the short format name\&. 
 .IP "\fB\fB-compute_edges\fP:\fP" 1c
-(GDAL >= 1.8.0) Do the computation at raster edges and near nodata values 
+(GDAL >= 1\&.8\&.0) Do the computation at raster edges and near nodata values 
 .IP "\fB\fB-alg\fP \fIZevenbergenThorne\fP:\fP" 1c
-(GDAL >= 1.8.0) Use Zevenbergen & Thorne formula, instead of Horn's formula, to compute slope & aspect. The litterature suggests Zevenbergen & Thorne to be more suited to smooth landscapes, whereas Horn's formula to perform better on rougher terrain. 
+(GDAL >= 1\&.8\&.0) Use Zevenbergen & Thorne formula, instead of Horn's formula, to compute slope & aspect\&. The litterature suggests Zevenbergen & Thorne to be more suited to smooth landscapes, whereas Horn's formula to perform better on rougher terrain\&. 
 .IP "\fB\fB-b\fP \fIband\fP:\fP" 1c
-Select an input \fIband\fP to be processed. Bands are numbered from 1. 
+Select an input \fIband\fP to be processed\&. Bands are numbered from 1\&. 
 .IP "\fB\fB-co\fP \fI'NAME=VALUE'\fP:\fP" 1c
-Passes a creation option to the output format driver. Multiple \fB-co\fP options may be listed. See format specific documentation for legal creation options for each format. 
+Passes a creation option to the output format driver\&. Multiple \fB-co\fP options may be listed\&. See format specific documentation for legal creation options for each format\&. 
 .IP "\fB\fB-q\fP:\fP" 1c
-Suppress progress monitor and other non-error output. 
+Suppress progress monitor and other non-error output\&. 
 .PP
 .PP
-For all algorithms, except color-relief, a nodata value in the target dataset will be emitted if at least one pixel set to the nodata value is found in the 3x3 window centered around each source pixel. The consequence is that there will be a 1-pixel border around each image set with nodata value. From GDAL 1.8.0, if -compute_edges is specified, gdaldem will compute values at image edges or if a nodata value is found in the 3x3 window, by interpolating missing values.
+For all algorithms, except color-relief, a nodata value in the target dataset will be emitted if at least one pixel set to the nodata value is found in the 3x3 window centered around each source pixel\&. The consequence is that there will be a 1-pixel border around each image set with nodata value\&. From GDAL 1\&.8\&.0, if -compute_edges is specified, gdaldem will compute values at image edges or if a nodata value is found in the 3x3 window, by interpolating missing values\&.
 .SH "Modes"
 .PP
 .SS "hillshade"
-This command outputs an 8-bit raster with a nice shaded relief effect. It’s very useful for visualizing the terrain. You can optionally specify the azimuth and altitude of the light source, a vertical exaggeration factor and a scaling factor to account for differences between vertical and horizontal units.
+This command outputs an 8-bit raster with a nice shaded relief effect\&. It’s very useful for visualizing the terrain\&. You can optionally specify the azimuth and altitude of the light source, a vertical exaggeration factor and a scaling factor to account for differences between vertical and horizontal units\&.
 .PP
-The value 0 is used as the output nodata value.
+The value 0 is used as the output nodata value\&.
 .PP
 The following specific options are available : 
 .IP "\fB\fB-z\fP \fIzFactor\fP:\fP" 1c
 vertical exaggeration used to pre-multiply the elevations 
 .IP "\fB\fB-s\fP \fIscale\fP:\fP" 1c
-ratio of vertical units to horizontal. If the horizontal unit of the source DEM is degrees (e.g Lat/Long WGS84 projection), you can use scale=111120 if the vertical units are meters (or scale=370400 if they are in feet) 
+ratio of vertical units to horizontal\&. If the horizontal unit of the source DEM is degrees (e\&.g Lat/Long WGS84 projection), you can use scale=111120 if the vertical units are meters (or scale=370400 if they are in feet) 
 .IP "\fB\fB-az\fP \fIazimuth\fP:\fP" 1c
-azimuth of the light, in degrees. 0 if it comes from the top of the raster, 90 from the east, ... The default value, 315, should rarely be changed as it is the value generally used to generate shaded maps. 
+azimuth of the light, in degrees\&. 0 if it comes from the top of the raster, 90 from the east, \&.\&.\&. The default value, 315, should rarely be changed as it is the value generally used to generate shaded maps\&. 
 .IP "\fB\fB-alt\fP \fIaltitude\fP:\fP" 1c
-altitude of the light, in degrees. 90 if the light comes from above the DEM, 0 if it is raking light. 
+altitude of the light, in degrees\&. 90 if the light comes from above the DEM, 0 if it is raking light\&. 
 .IP "\fB\fB-combined\fP \fIcombined shading\fP:\fP" 1c
-(starting with GDAL 1.10) a combination of slope and oblique shading. 
+(starting with GDAL 1\&.10) a combination of slope and oblique shading\&. 
 .PP
 .SS "slope"
-This command will take a DEM raster and output a 32-bit float raster with slope values. You have the option of specifying the type of slope value you want: degrees or percent slope. In cases where the horizontal units differ from the vertical units, you can also supply a scaling factor.
+This command will take a DEM raster and output a 32-bit float raster with slope values\&. You have the option of specifying the type of slope value you want: degrees or percent slope\&. In cases where the horizontal units differ from the vertical units, you can also supply a scaling factor\&.
 .PP
-The value -9999 is used as the output nodata value.
+The value -9999 is used as the output nodata value\&.
 .PP
 The following specific options are available : 
 .IP "\fB\fB-p\fP :\fP" 1c
-if specified, the slope will be expressed as percent slope. Otherwise, it is expressed as degrees 
+if specified, the slope will be expressed as percent slope\&. Otherwise, it is expressed as degrees 
 .IP "\fB\fB-s\fP \fIscale\fP:\fP" 1c
-ratio of vertical units to horizontal. If the horizontal unit of the source DEM is degrees (e.g Lat/Long WGS84 projection), you can use scale=111120 if the vertical units are meters (or scale=370400 if they are in feet) 
+ratio of vertical units to horizontal\&. If the horizontal unit of the source DEM is degrees (e\&.g Lat/Long WGS84 projection), you can use scale=111120 if the vertical units are meters (or scale=370400 if they are in feet) 
 .PP
 .SS "aspect"
-This command outputs a 32-bit float raster with values between 0° and 360° representing the azimuth that slopes are facing. The definition of the azimuth is such that : 0° means that the slope is facing the North, 90° it's facing the East, 180° it's facing the South and 270° it's facing the West (provided that the top of your input raster is north oriented). The aspect value -9999 is used as the nodata value to indicate undefined aspect in flat areas with slope=0.
+This command outputs a 32-bit float raster with values between 0° and 360° representing the azimuth that slopes are facing\&. The definition of the azimuth is such that : 0° means that the slope is facing the North, 90° it's facing the East, 180° it's facing the South and 270° it's facing the West (provided that the top of your input raster is north oriented)\&. The aspect value -9999 is used as the nodata value to indicate undefined aspect in flat areas with slope=0\&.
 .PP
 The following specifics options are available : 
 .IP "\fB\fB-trigonometric\fP:\fP" 1c
-return trigonometric angle instead of azimuth. Thus 0° means East, 90° North, 180° West, 270° South 
+return trigonometric angle instead of azimuth\&. Thus 0° means East, 90° North, 180° West, 270° South 
 .IP "\fB\fB-zero_for_flat\fP:\fP" 1c
 return 0 for flat areas with slope=0, instead of -9999 
 .PP
 .PP
-By using those 2 options, the aspect returned by gdaldem aspect should be identical to the one of GRASS r.slope.aspect. Otherwise, it's identical to the one of Matthew Perry's aspect.cpp utility.
+By using those 2 options, the aspect returned by gdaldem aspect should be identical to the one of GRASS r\&.slope\&.aspect\&. Otherwise, it's identical to the one of Matthew Perry's aspect\&.cpp utility\&.
 .SS "color-relief"
-This command outputs a 3-band (RGB) or 4-band (RGBA) raster with values are computed from the elevation and a text-based color configuration file, containing the association between various elevation values and the corresponding wished color. By default, the colors between the given elevation values are blended smoothly and the result is a nice colorized DEM. The -exact_color_entry or -nearest_color_entry options can be used to avoid that linear interpolation for values that don't match  [...]
+This command outputs a 3-band (RGB) or 4-band (RGBA) raster with values are computed from the elevation and a text-based color configuration file, containing the association between various elevation values and the corresponding wished color\&. By default, the colors between the given elevation values are blended smoothly and the result is a nice colorized DEM\&. The -exact_color_entry or -nearest_color_entry options can be used to avoid that linear interpolation for values that don't ma [...]
 .PP
 The following specifics options are available : 
 .IP "\fB\fIcolor_text_file\fP:\fP" 1c
@@ -154,24 +151,24 @@ text-based color configuration file
 .IP "\fB\fB-alpha\fP :\fP" 1c
 add an alpha channel to the output raster 
 .IP "\fB\fB-exact_color_entry\fP :\fP" 1c
-use strict matching when searching in the color configuration file. If none matching color entry is found, the '0,0,0,0' RGBA quadruplet will be used 
+use strict matching when searching in the color configuration file\&. If none matching color entry is found, the '0,0,0,0' RGBA quadruplet will be used 
 .IP "\fB\fB-nearest_color_entry\fP :\fP" 1c
-use the RGBA quadruplet corresponding to the closest entry in the color configuration file. 
+use the RGBA quadruplet corresponding to the closest entry in the color configuration file\&. 
 .PP
 .PP
-The color-relief mode is the only mode that supports VRT as output format. In that case, it will translate the color configuration file into appropriate LUT elements. Note that elevations specified as percentage will be translated as absolute values, which must be taken into account when the statistics of the source raster differ from the one that was used when building the VRT.
+The color-relief mode is the only mode that supports VRT as output format\&. In that case, it will translate the color configuration file into appropriate LUT elements\&. Note that elevations specified as percentage will be translated as absolute values, which must be taken into account when the statistics of the source raster differ from the one that was used when building the VRT\&.
 .PP
-The text-based color configuration file generally contains 4 columns per line : the elevation value and the corresponding Red, Green, Blue component (between 0 and 255). The elevation value can be any floating point value, or the \fInv\fP keyword for the nodata value.. The elevation can also be expressed as a percentage : 0% being the minimum value found in the raster, 100% the maximum value.
+The text-based color configuration file generally contains 4 columns per line : the elevation value and the corresponding Red, Green, Blue component (between 0 and 255)\&. The elevation value can be any floating point value, or the \fInv\fP keyword for the nodata value\&.\&. The elevation can also be expressed as a percentage : 0% being the minimum value found in the raster, 100% the maximum value\&.
 .PP
-An extra column can be optionally added for the alpha component. If it is not specified, full opacity (255) is assumed.
+An extra column can be optionally added for the alpha component\&. If it is not specified, full opacity (255) is assumed\&.
 .PP
-Various field separators are accepted : comma, tabulation, spaces, ':'.
+Various field separators are accepted : comma, tabulation, spaces, ':'\&.
 .PP
-Common colors used by GRASS can also be specified by using their name, instead of the RGB triplet. The supported list is : white, black, red, green, blue, yellow, magenta, cyan, aqua, grey/gray, orange, brown, purple/violet and indigo.
+Common colors used by GRASS can also be specified by using their name, instead of the RGB triplet\&. The supported list is : white, black, red, green, blue, yellow, magenta, cyan, aqua, grey/gray, orange, brown, purple/violet and indigo\&.
 .PP
-Since GDAL 1.8.0, GMT .cpt palette files are also supported (COLOR_MODEL = RGB only).
+Since GDAL 1\&.8\&.0, GMT \&.cpt palette files are also supported (COLOR_MODEL = RGB only)\&.
 .PP
-Note: the syntax of the color configuration file is derived from the one supported by GRASS r.colors utility. ESRI HDR color table files (.clr) also match that syntax. The alpha component and the support of tab and comma as separators are GDAL specific extensions.
+Note: the syntax of the color configuration file is derived from the one supported by GRASS r\&.colors utility\&. ESRI HDR color table files (\&.clr) also match that syntax\&. The alpha component and the support of tab and comma as separators are GDAL specific extensions\&.
 .PP
 For example : 
 .PP
@@ -187,28 +184,28 @@ nv     0   0   0   0
 .fi
 .PP
 .SS "TRI"
-This command outputs a single-band raster with values computed from the elevation. TRI stands for Terrain Ruggedness Index, which is defined as the mean difference between a central pixel and its surrounding cells (see Wilson et al 2007, Marine Geodesy 30:3-35).
+This command outputs a single-band raster with values computed from the elevation\&. TRI stands for Terrain Ruggedness Index, which is defined as the mean difference between a central pixel and its surrounding cells (see Wilson et al 2007, Marine Geodesy 30:3-35)\&.
 .PP
-The value -9999 is used as the output nodata value.
+The value -9999 is used as the output nodata value\&.
 .PP
-There are no specific options.
+There are no specific options\&.
 .SS "TPI"
-This command outputs a single-band raster with values computed from the elevation. TPI stands for Topographic Position Index, which is defined as the difference between a central pixel and the mean of its surrounding cells (see Wilson et al 2007, Marine Geodesy 30:3-35).
+This command outputs a single-band raster with values computed from the elevation\&. TPI stands for Topographic Position Index, which is defined as the difference between a central pixel and the mean of its surrounding cells (see Wilson et al 2007, Marine Geodesy 30:3-35)\&.
 .PP
-The value -9999 is used as the output nodata value.
+The value -9999 is used as the output nodata value\&.
 .PP
-There are no specific options.
+There are no specific options\&.
 .SS "roughness"
-This command outputs a single-band raster with values computed from the elevation. Roughness is the largest inter-cell difference of a central pixel and its surrounding cell, as defined in Wilson et al (2007, Marine Geodesy 30:3-35).
+This command outputs a single-band raster with values computed from the elevation\&. Roughness is the largest inter-cell difference of a central pixel and its surrounding cell, as defined in Wilson et al (2007, Marine Geodesy 30:3-35)\&.
 .PP
-The value -9999 is used as the output nodata value.
+The value -9999 is used as the output nodata value\&.
 .PP
-There are no specific options.
+There are no specific options\&.
 .SH "AUTHORS"
 .PP
 Matthew Perry <perrygeo at gmail.com>, Even Rouault <even.rouault at mines-paris.org>, Howard Butler <hobu.inc at gmail.com>, Chris Yesson <chris.yesson at ioz.ac.uk>
 .PP
-Derived from code by Michael Shapiro, Olga Waupotitsch, Marjorie Larson, Jim Westervelt : U.S. Army CERL, 1993. GRASS 4.1 Reference Manual. U.S. Army Corps of Engineers, Construction Engineering Research Laboratories, Champaign, Illinois, 1-425.
+Derived from code by Michael Shapiro, Olga Waupotitsch, Marjorie Larson, Jim Westervelt : U\&.S\&. Army CERL, 1993\&. GRASS 4\&.1 Reference Manual\&. U\&.S\&. Army Corps of Engineers, Construction Engineering Research Laboratories, Champaign, Illinois, 1-425\&.
 .SH "See also"
 .PP
 Documentation of related GRASS utilities :
diff --git a/man/man1/gdalinfo.1 b/man/man1/gdalinfo.1
index 486cbd1..aec5ea5 100644
--- a/man/man1/gdalinfo.1
+++ b/man/man1/gdalinfo.1
@@ -1,12 +1,9 @@
-.TH "gdalinfo" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdalinfo" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdalinfo \- .TH "gdalinfo" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdalinfo \- lists information about a raster dataset
+gdalinfo \- gdalinfo 
+lists information about a raster dataset
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -20,74 +17,74 @@ gdalinfo [--help-general] [-mm] [-stats] [-hist] [-nogcp] [-nomd]
 .PP
 .SH "DESCRIPTION"
 .PP
-The gdalinfo program lists various information about a GDAL supported raster dataset. 
+The gdalinfo program lists various information about a GDAL supported raster dataset\&. 
 .IP "\fB\fB-mm\fP\fP" 1c
-Force computation of the actual min/max values for each band in the dataset. 
+Force computation of the actual min/max values for each band in the dataset\&. 
 .IP "\fB\fB-stats\fP\fP" 1c
-Read and display image statistics. Force computation if no statistics are stored in an image. 
+Read and display image statistics\&. Force computation if no statistics are stored in an image\&. 
 .IP "\fB\fB-approx_stats\fP\fP" 1c
-Read and display image statistics. Force computation if no statistics are stored in an image. However, they may be computed based on overviews or a subset of all tiles. Useful if you are in a hurry and don't want precise stats. 
+Read and display image statistics\&. Force computation if no statistics are stored in an image\&. However, they may be computed based on overviews or a subset of all tiles\&. Useful if you are in a hurry and don't want precise stats\&. 
 .IP "\fB\fB-hist\fP\fP" 1c
-Report histogram information for all bands. 
+Report histogram information for all bands\&. 
 .IP "\fB\fB-nogcp\fP\fP" 1c
-Suppress ground control points list printing. It may be useful for datasets with huge amount of GCPs, such as L1B AVHRR or HDF4 MODIS which contain thousands of them. 
+Suppress ground control points list printing\&. It may be useful for datasets with huge amount of GCPs, such as L1B AVHRR or HDF4 MODIS which contain thousands of them\&. 
 .IP "\fB\fB-nomd\fP\fP" 1c
-Suppress metadata printing. Some datasets may contain a lot of metadata strings. 
+Suppress metadata printing\&. Some datasets may contain a lot of metadata strings\&. 
 .IP "\fB\fB-nrat\fP\fP" 1c
-Suppress printing of raster attribute table. 
+Suppress printing of raster attribute table\&. 
 .IP "\fB\fB-noct\fP\fP" 1c
-Suppress printing of color table. 
+Suppress printing of color table\&. 
 .IP "\fB\fB-checksum\fP\fP" 1c
-Force computation of the checksum for each band in the dataset. 
+Force computation of the checksum for each band in the dataset\&. 
 .IP "\fB\fB-listmdd\fP\fP" 1c
-(GDAL >= 1.11) List all metadata domains available for the dataset. 
+(GDAL >= 1\&.11) List all metadata domains available for the dataset\&. 
 .IP "\fB\fB-mdd domain\fP\fP" 1c
-Report metadata for the specified domain. Starting with GDAL 1.11, 'all' can be used to report metadata in all domains 
+Report metadata for the specified domain\&. Starting with GDAL 1\&.11, 'all' can be used to report metadata in all domains 
 .IP "\fB\fB-nofl\fP\fP" 1c
-(GDAL >= 1.9.0) Only display the first file of the file list. 
+(GDAL >= 1\&.9\&.0) Only display the first file of the file list\&. 
 .IP "\fB\fB-sd\fP \fIsubdataset\fP\fP" 1c
-(GDAL >= 1.9.0) If the input dataset contains several subdatasets read and display a subdataset with specified number (starting from 1). This is an alternative of giving the full subdataset name. 
+(GDAL >= 1\&.9\&.0) If the input dataset contains several subdatasets read and display a subdataset with specified number (starting from 1)\&. This is an alternative of giving the full subdataset name\&. 
 .IP "\fB\fB-proj4\fP\fP" 1c
-(GDAL >= 1.9.0) Report a PROJ.4 string corresponding to the file's coordinate system. 
+(GDAL >= 1\&.9\&.0) Report a PROJ\&.4 string corresponding to the file's coordinate system\&. 
 .PP
 .PP
 The gdalinfo will report all of the following (if known):
 .PP
 .PD 0
 .IP "\(bu" 2
-The format driver used to access the file. 
+The format driver used to access the file\&. 
 .IP "\(bu" 2
-Raster size (in pixels and lines). 
+Raster size (in pixels and lines)\&. 
 .IP "\(bu" 2
-The coordinate system for the file (in OGC WKT). 
+The coordinate system for the file (in OGC WKT)\&. 
 .IP "\(bu" 2
-The geotransform associated with the file (rotational coefficients are currently not reported). 
+The geotransform associated with the file (rotational coefficients are currently not reported)\&. 
 .IP "\(bu" 2
-Corner coordinates in georeferenced, and if possible lat/long based on the full geotransform (but not GCPs). 
+Corner coordinates in georeferenced, and if possible lat/long based on the full geotransform (but not GCPs)\&. 
 .IP "\(bu" 2
-Ground control points. 
+Ground control points\&. 
 .IP "\(bu" 2
-File wide (including subdatasets) metadata. 
+File wide (including subdatasets) metadata\&. 
 .IP "\(bu" 2
-Band data types. 
+Band data types\&. 
 .IP "\(bu" 2
-Band color interpretations. 
+Band color interpretations\&. 
 .IP "\(bu" 2
-Band block size. 
+Band block size\&. 
 .IP "\(bu" 2
-Band descriptions. 
+Band descriptions\&. 
 .IP "\(bu" 2
-Band min/max values (internally known and possibly computed). 
+Band min/max values (internally known and possibly computed)\&. 
 .IP "\(bu" 2
-Band checksum (if computation asked). 
+Band checksum (if computation asked)\&. 
 .IP "\(bu" 2
-Band NODATA value. 
+Band NODATA value\&. 
 .IP "\(bu" 2
-Band overview resolutions available. 
+Band overview resolutions available\&. 
 .IP "\(bu" 2
-Band unit type (i.e.. 'meters' or 'feet' for elevation bands). 
+Band unit type (i\&.e\&.\&. 'meters' or 'feet' for elevation bands)\&. 
 .IP "\(bu" 2
-Band pseudo-color tables. 
+Band pseudo-color tables\&. 
 .PP
 .SH "EXAMPLE"
 .PP
diff --git a/man/man1/gdallocationinfo.1 b/man/man1/gdallocationinfo.1
index c84888b..69c76eb 100644
--- a/man/man1/gdallocationinfo.1
+++ b/man/man1/gdallocationinfo.1
@@ -1,12 +1,9 @@
-.TH "gdallocationinfo" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdallocationinfo" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdallocationinfo \- .TH "gdallocationinfo" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdallocationinfo \- raster query tool
+gdallocationinfo \- gdallocationinfo 
+raster query tool
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -21,66 +18,66 @@ Usage: gdallocationinfo [--help-general] [-xml] [-lifonly] [-valonly]
 .PP
 .SH "DESCRIPTION"
 .PP
-The gdallocationinfo utility provide a mechanism to query information about a pixel given it's location in one of a variety of coordinate systems. Several reporting options are provided.
+The gdallocationinfo utility provide a mechanism to query information about a pixel given it's location in one of a variety of coordinate systems\&. Several reporting options are provided\&.
 .PP
 .IP "\fB\fB-xml\fP: \fP" 1c
-The output report will be XML formatted for convenient post processing.
+The output report will be XML formatted for convenient post processing\&.
 .PP
 .IP "\fB\fB-lifonly\fP: \fP" 1c
-The only output is filenames production from the LocationInfo request against the database (ie. for identifying impacted file from VRT).
+The only output is filenames production from the LocationInfo request against the database (ie\&. for identifying impacted file from VRT)\&.
 .PP
 .IP "\fB\fB-valonly\fP: \fP" 1c
-The only output is the pixel values of the selected pixel on each of the selected bands.
+The only output is the pixel values of the selected pixel on each of the selected bands\&.
 .PP
 .IP "\fB\fB-b\fP \fIband\fP: \fP" 1c
-Selects a band to query. Multiple bands can be listed. By default all bands are queried.
+Selects a band to query\&. Multiple bands can be listed\&. By default all bands are queried\&.
 .PP
 .IP "\fB\fB-overview\fP \fIoverview_level\fP: \fP" 1c
-Query the (overview_level)th overview (overview_level=1 is the 1st overview), instead of the base band. Note that the x,y location (if the coordinate system is pixel/line) must still be given with respect to the base band.
+Query the (overview_level)th overview (overview_level=1 is the 1st overview), instead of the base band\&. Note that the x,y location (if the coordinate system is pixel/line) must still be given with respect to the base band\&.
 .PP
 .IP "\fB\fB-l_srs\fP \fIsrs def\fP: \fP" 1c
-The coordinate system of the input x, y location.
+The coordinate system of the input x, y location\&.
 .PP
 .IP "\fB\fB-geoloc\fP: \fP" 1c
-Indicates input x,y points are in the georeferencing system of the image.
+Indicates input x,y points are in the georeferencing system of the image\&.
 .PP
 .IP "\fB\fB-wgs84\fP: \fP" 1c
-Indicates input x,y points are WGS84 long, lat.
+Indicates input x,y points are WGS84 long, lat\&.
 .PP
 .IP "\fB\fIsrcfile\fP:\fP" 1c
-The source GDAL raster datasource name.
+The source GDAL raster datasource name\&.
 .PP
 .IP "\fB\fIx\fP:\fP" 1c
-X location of target pixel. By default the coordinate system is pixel/line unless -l_srs, -wgs84 or -geoloc supplied. 
+X location of target pixel\&. By default the coordinate system is pixel/line unless -l_srs, -wgs84 or -geoloc supplied\&. 
 .PP
 .IP "\fB\fIy\fP:\fP" 1c
-Y location of target pixel. By default the coordinate system is pixel/line unless -l_srs, -wgs84 or -geoloc supplied. 
+Y location of target pixel\&. By default the coordinate system is pixel/line unless -l_srs, -wgs84 or -geoloc supplied\&. 
 .PP
 .PP
 .PP
-This utility is intended to provide a variety of information about a pixel. Currently it reports three things:
+This utility is intended to provide a variety of information about a pixel\&. Currently it reports three things:
 .PP
 .PD 0
 .IP "\(bu" 2
-The location of the pixel in pixel/line space. 
+The location of the pixel in pixel/line space\&. 
 .IP "\(bu" 2
-The result of a LocationInfo metadata query against the datasource - currently this is only implemented for VRT files which will report the file(s) used to satisfy requests for that pixel. 
+The result of a LocationInfo metadata query against the datasource - currently this is only implemented for VRT files which will report the file(s) used to satisfy requests for that pixel\&. 
 .IP "\(bu" 2
-The raster pixel value of that pixel for all or a subset of the bands. 
+The raster pixel value of that pixel for all or a subset of the bands\&. 
 .IP "\(bu" 2
-The unscaled pixel value if a Scale and/or Offset apply to the band. 
+The unscaled pixel value if a Scale and/or Offset apply to the band\&. 
 .PP
 .PP
-The pixel selected is requested by x/y coordinate on the commandline, or read from stdin. More than one coordinate pair can be supplied when reading coordinatesis from stdin. By default pixel/line coordinates are expected. However with use of the -geoloc, -wgs84, or -l_srs switches it is possible to specify the location in other coordinate systems.
+The pixel selected is requested by x/y coordinate on the commandline, or read from stdin\&. More than one coordinate pair can be supplied when reading coordinatesis from stdin\&. By default pixel/line coordinates are expected\&. However with use of the -geoloc, -wgs84, or -l_srs switches it is possible to specify the location in other coordinate systems\&.
 .PP
-The default report is in a human readable text format. It is possible to instead request xml output with the -xml switch.
+The default report is in a human readable text format\&. It is possible to instead request xml output with the -xml switch\&.
 .PP
-For scripting purposes, the -valonly and -lifonly switches are provided to restrict output to the actual pixel values, or the LocationInfo files identified for the pixel.
+For scripting purposes, the -valonly and -lifonly switches are provided to restrict output to the actual pixel values, or the LocationInfo files identified for the pixel\&.
 .PP
-It is anticipated that additional reporting capabilities will be added to gdallocationinfo in the future.
+It is anticipated that additional reporting capabilities will be added to gdallocationinfo in the future\&.
 .SH "EXAMPLE"
 .PP
-Simple example reporting on pixel (256,256) on the file utm.tif.
+Simple example reporting on pixel (256,256) on the file utm\&.tif\&.
 .PP
 .PP
 .nf
@@ -93,7 +90,7 @@ Report:
 .fi
 .PP
 .PP
-Query a VRT file providing the location in WGS84, and getting the result in xml.
+Query a VRT file providing the location in WGS84, and getting the result in xml\&.
 .PP
 .PP
 .nf
diff --git a/man/man1/gdalmanage.1 b/man/man1/gdalmanage.1
index 79a1bd7..3313f27 100644
--- a/man/man1/gdalmanage.1
+++ b/man/man1/gdalmanage.1
@@ -1,12 +1,9 @@
-.TH "gdalmanage" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdalmanage" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdalmanage \- .TH "gdalmanage" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdalmanage \- Identify, delete, rename and copy raster data files
+gdalmanage \- gdalmanage 
+Identify, delete, rename and copy raster data files
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -18,31 +15,31 @@ Usage: gdalmanage mode [-r] [-u] [-f format]
 .PP
 .SH "DESCRIPTION"
 .PP
-The gdalmanage program can perform various operations on raster data files, depending on the chosen \fImode\fP. This includes identifying raster data types and deleting, renaming or copying the files.
+The gdalmanage program can perform various operations on raster data files, depending on the chosen \fImode\fP\&. This includes identifying raster data types and deleting, renaming or copying the files\&.
 .PP
 .IP "\fB\fImode\fP:\fP" 1c
 Mode of operation 
 .IP "\fB\fBidentify\fP \fIdatasetname\fP:\fP" 1c
-List data format of file. 
+List data format of file\&. 
 .IP "\fB\fBcopy\fP \fIdatasetname newdatasetname\fP:\fP" 1c
-Create a copy of the raster file with a new name. 
+Create a copy of the raster file with a new name\&. 
 .IP "\fB\fBrename\fP \fIdatasetname newdatasetname\fP:\fP" 1c
-Change the name of the raster file. 
+Change the name of the raster file\&. 
 .IP "\fB\fBdelete\fP \fIdatasetname\fP:\fP" 1c
-Delete raster file. 
+Delete raster file\&. 
 .PP
 .PP
 .IP "\fB\fB-r\fP:\fP" 1c
-Recursively scan files/folders for raster files. 
+Recursively scan files/folders for raster files\&. 
 .IP "\fB\fB-u\fP:\fP" 1c
-Report failures if file type is unidentified. 
+Report failures if file type is unidentified\&. 
 .IP "\fB\fB-f\fP \fIformat\fP:\fP" 1c
-Specify format of raster file if unknown by the application. Uses short data format name (e.g. \fIGTiff\fP).
+Specify format of raster file if unknown by the application\&. Uses short data format name (e\&.g\&. \fIGTiff\fP)\&.
 .PP
 .IP "\fB\fIdatasetname\fP:\fP" 1c
-Raster file to operate on. 
+Raster file to operate on\&. 
 .IP "\fB\fInewdatasetname\fP:\fP" 1c
-For copy and rename modes, you provide a \fIsource\fP filename and a \fItarget\fP filename, just like copy and move commands in an operating system. 
+For copy and rename modes, you provide a \fIsource\fP filename and a \fItarget\fP filename, just like copy and move commands in an operating system\&. 
 .PP
 .SH "EXAMPLES"
 .PP
diff --git a/man/man1/gdalmove.1 b/man/man1/gdalmove.1
index 526174c..7debfe6 100644
--- a/man/man1/gdalmove.1
+++ b/man/man1/gdalmove.1
@@ -1,12 +1,9 @@
-.TH "gdalmove" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdalmove" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdalmove \- .TH "gdalmove" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdalmove \- Transform georeferencing of raster file in place
+gdalmove \- gdalmove\&.py 
+Transform georeferencing of raster file in place
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -18,26 +15,26 @@ gdalmove.py [-s_srs <srs_defn>] -t_srs <srs_defn>
 .PP
 .SH "DESCRIPTION"
 .PP
-The gdalmove.py script transforms the bounds of a raster file from one coordinate system to another, and then updates the coordinate system and geotransform of the file. This is done without altering pixel values at all. It is loosely similar to using gdalwarp to transform an image but avoiding the resampling step in order to avoid image damage. It is generally only suitable for transformations that are effectively linear in the area of the file.
+The gdalmove\&.py script transforms the bounds of a raster file from one coordinate system to another, and then updates the coordinate system and geotransform of the file\&. This is done without altering pixel values at all\&. It is loosely similar to using gdalwarp to transform an image but avoiding the resampling step in order to avoid image damage\&. It is generally only suitable for transformations that are effectively linear in the area of the file\&.
 .PP
-If no error threshold value (-et) is provided then the file is not actually updated, but the errors that would be incurred are reported. If -et is provided then the file is only modify if the apparent error being introduced is less than the indicate threshold (in pixels).
+If no error threshold value (-et) is provided then the file is not actually updated, but the errors that would be incurred are reported\&. If -et is provided then the file is only modify if the apparent error being introduced is less than the indicate threshold (in pixels)\&.
 .PP
-Currently the transformed geotransform is computed based on the transformation of the top left, top right, and bottom left corners. A reduced overall error could be produced using a least squares fit of at least all four corner points.
+Currently the transformed geotransform is computed based on the transformation of the top left, top right, and bottom left corners\&. A reduced overall error could be produced using a least squares fit of at least all four corner points\&.
 .PP
 .IP "\fB\fB-s_srs\fP \fIsrs_defn\fP:\fP" 1c
 .PP
-Override the coordinate system of the file with the indicated coordinate system definition. Optional. If not provided the source coordinate system is read from the source file.
+Override the coordinate system of the file with the indicated coordinate system definition\&. Optional\&. If not provided the source coordinate system is read from the source file\&.
 .PP
 .IP "\fB\fB-t_srs\fP \fIsrs_defn\fP:\fP" 1c
 .PP
-Defines the target coordinate system. This coordinate system will be written to the file after an update.
+Defines the target coordinate system\&. This coordinate system will be written to the file after an update\&.
 .PP
 .IP "\fB\fB-et\fP \fImax_pixel_err\fP:\fP" 1c
 .PP
-The error threshold (in pixels) beyond which the file will not be updated. If not provided no update will be applied to the file, but errors will be reported.
+The error threshold (in pixels) beyond which the file will not be updated\&. If not provided no update will be applied to the file, but errors will be reported\&.
 .PP
 .IP "\fB\fItarget_file\fP\fP" 1c
-The file to be operated on. To update this must be a file format that supports in place updates of the geotransform and SRS.
+The file to be operated on\&. To update this must be a file format that supports in place updates of the geotransform and SRS\&.
 .PP
 .PP
 .SH "AUTHORS"
diff --git a/man/man1/gdalsrsinfo.1 b/man/man1/gdalsrsinfo.1
index b4a6cc7..6ca154b 100644
--- a/man/man1/gdalsrsinfo.1
+++ b/man/man1/gdalsrsinfo.1
@@ -1,12 +1,9 @@
-.TH "gdalsrsinfo" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdalsrsinfo" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdalsrsinfo \- .TH "gdalsrsinfo" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdalsrsinfo \- lists info about a given SRS in number of formats (WKT, PROJ.4, etc.)
+gdalsrsinfo \- gdalsrsinfo 
+lists info about a given SRS in number of formats (WKT, PROJ\&.4, etc\&.)
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -34,7 +31,7 @@ The gdalsrsinfo utility reports information about a given SRS from one of the fo
 .IP "\(bu" 2
 The filename of a dataset supported by GDAL/OGR which contains SRS information
 .IP "\(bu" 2
-Any of the usual GDAL/OGR forms (complete WKT, PROJ.4, EPSG:n or a file containing the SRS)
+Any of the usual GDAL/OGR forms (complete WKT, PROJ\&.4, EPSG:n or a file containing the SRS)
 .PP
 .PP
 Output types:
@@ -46,7 +43,7 @@ Output types:
 .IP "\(bu" 2
 \fBwkt_all\fP   all wkt options available
 .IP "\(bu" 2
-\fBproj4\fP   PROJ.4 string
+\fBproj4\fP   PROJ\&.4 string
 .IP "\(bu" 2
 \fBwkt\fP   OGC WKT format (full)
 .IP "\(bu" 2
@@ -193,4 +190,4 @@ GEOGCS["SAD69",
 .PP
 .SH "AUTHORS"
 .PP
-Frank Warmerdam <warmerdam at pobox.com>, Etienne Tourigny <etourigny.dev-at-gmail-dot-com> 
+Frank Warmerdam <warmerdam at pobox.com>, Etienne Tourigny <etourigny\&.dev-at-gmail-dot-com> 
diff --git a/man/man1/gdaltindex.1 b/man/man1/gdaltindex.1
index 6b25fa9..511f96d 100644
--- a/man/man1/gdaltindex.1
+++ b/man/man1/gdaltindex.1
@@ -1,12 +1,9 @@
-.TH "gdaltindex" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdaltindex" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdaltindex \- .TH "gdaltindex" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdaltindex \- Builds a shapefile as a raster tileindex
+gdaltindex \- gdaltindex 
+Builds a shapefile as a raster tileindex
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -20,46 +17,46 @@ gdaltindex [-f format] [-tileindex field_name] [-write_absolute_path]
 .PP
 .SH "DESCRIPTION"
 .PP
-This program builds a shapefile with a record for each input raster file, an attribute containing the filename, and a polygon geometry outlining the raster. This output is suitable for use with \fCMapServer\fP as a raster tileindex.
+This program builds a shapefile with a record for each input raster file, an attribute containing the filename, and a polygon geometry outlining the raster\&. This output is suitable for use with \fCMapServer\fP as a raster tileindex\&.
 .PP
 .IP "\fB\fB-f\fP format:\fP" 1c
-(GDAL >= 1.11) 
+(GDAL >= 1\&.11) 
 .PP
-The OGR format of the output tile index file. Default is Esri Shapefile.  
+The OGR format of the output tile index file\&. Default is Esri Shapefile\&.  
 .IP "\fB\fB-tileindex\fP field_name: \fP" 1c
 .PP
-The output field name to hold the file path/location to the indexed rasters. The default tile index field name is \fClocation\fP.  
+The output field name to hold the file path/location to the indexed rasters\&. The default tile index field name is \fClocation\fP\&.  
 .IP "\fB\fB-write_absolute_path\fP: \fP" 1c
 .PP
-The absolute path to the raster files is stored in the tile index file. By default the raster filenames will be put in the file exactly as they are specified on the command line.  
+The absolute path to the raster files is stored in the tile index file\&. By default the raster filenames will be put in the file exactly as they are specified on the command line\&.  
 .IP "\fB\fB-skip_different_projection\fP: \fP" 1c
 .PP
-Only files with same projection as files already inserted in the tileindex will be inserted (unless \fC-t_srs\fP is specified). Default does not check projection and accepts all inputs.  
+Only files with same projection as files already inserted in the tileindex will be inserted (unless \fC-t_srs\fP is specified)\&. Default does not check projection and accepts all inputs\&.  
 .IP "\fB\fB-t_srs\fP target_srs: \fP" 1c
 .PP
-Geometries of input files will be transformed to the desired target coordinate reference system. Using this option generates files that are not compatible with MapServer < 6.4. Default creates simple rectangular polygons in the same coordinate reference system as the input rasters.  
+Geometries of input files will be transformed to the desired target coordinate reference system\&. Using this option generates files that are not compatible with MapServer < 6\&.4\&. Default creates simple rectangular polygons in the same coordinate reference system as the input rasters\&.  
 .IP "\fB\fB-src_srs_name\fP field_name:\fP" 1c
-(GDAL >= 1.11) 
+(GDAL >= 1\&.11) 
 .PP
-The name of the field to store the SRS of each tile. This field name can be used as the value of the TILESRS keyword in MapServer >= 6.4.  
+The name of the field to store the SRS of each tile\&. This field name can be used as the value of the TILESRS keyword in MapServer >= 6\&.4\&.  
 .IP "\fB\fB-src_srs_format\fP type:\fP" 1c
-(GDAL >= 1.11) 
+(GDAL >= 1\&.11) 
 .PP
-The format in which the SRS of each tile must be written. Types can be AUTO, WKT, EPSG, PROJ.  
+The format in which the SRS of each tile must be written\&. Types can be AUTO, WKT, EPSG, PROJ\&.  
 .IP "\fB\fB-lyr_name\fP name: \fP" 1c
 .PP
-Layer name to create/append to in the output tile index file.  
+Layer name to create/append to in the output tile index file\&.  
 .IP "\fB\fBindex_file\fP: \fP" 1c
 .PP
-The name of the output file to create/append to. The default shapefile will be created if it doesn't already exist, otherwise it will append to the existing file.  
+The name of the output file to create/append to\&. The default shapefile will be created if it doesn't already exist, otherwise it will append to the existing file\&.  
 .IP "\fB\fBgdal_file\fP: \fP" 1c
 .PP
-The input GDAL raster files, can be multiple files separated by spaces. Wildcards my also be used. Stores the file locations in the same style as specified here, unless \fC-write_absolute_path\fP option is also used.  
+The input GDAL raster files, can be multiple files separated by spaces\&. Wildcards my also be used\&. Stores the file locations in the same style as specified here, unless \fC-write_absolute_path\fP option is also used\&.  
 .PP
 .SH "EXAMPLES"
 .PP
 .PP
-Produce a shapefile (\fCdoq_index.shp\fP) with a record for every image that the utility found in the \fCdoq\fP folder. Each record holds information that points to the location of the image and also a bounding rectangle shape showing the bounds of the image:
+Produce a shapefile (\fCdoq_index\&.shp\fP) with a record for every image that the utility found in the \fCdoq\fP folder\&. Each record holds information that points to the location of the image and also a bounding rectangle shape showing the bounds of the image:
 .PP
 .PP
 .nf
diff --git a/man/man1/gdaltransform.1 b/man/man1/gdaltransform.1
index 4adc1c8..f3eaf4b 100644
--- a/man/man1/gdaltransform.1
+++ b/man/man1/gdaltransform.1
@@ -1,12 +1,9 @@
-.TH "gdaltransform" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdaltransform" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdaltransform \- .TH "gdaltransform" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdaltransform \- transforms coordinates
+gdaltransform \- gdaltransform 
+transforms coordinates
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -21,37 +18,37 @@ gdaltransform [--help-general]
 .PP
 .SH "DESCRIPTION"
 .PP
-The gdaltransform utility reprojects a list of coordinates into any supported projection,including GCP-based transformations.
+The gdaltransform utility reprojects a list of coordinates into any supported projection,including GCP-based transformations\&.
 .PP
 .IP "\fB\fB-s_srs\fP \fIsrs def\fP:\fP" 1c
-source spatial reference set. The coordinate systems that can be passed are anything supported by the OGRSpatialReference.SetFromUserInput() call, which includes EPSG PCS and GCSes (ie. EPSG:4296), PROJ.4 declarations (as above), or the name of a .prf file containing well known text. 
+source spatial reference set\&. The coordinate systems that can be passed are anything supported by the OGRSpatialReference\&.SetFromUserInput() call, which includes EPSG PCS and GCSes (ie\&. EPSG:4296), PROJ\&.4 declarations (as above), or the name of a \&.prf file containing well known text\&. 
 .IP "\fB\fB-t_srs\fP \fIsrs_def\fP:\fP" 1c
-target spatial reference set. The coordinate systems that can be passed are anything supported by the OGRSpatialReference.SetFromUserInput() call, which includes EPSG PCS and GCSes (ie. EPSG:4296), PROJ.4 declarations (as above), or the name of a .prf file containing well known text. 
+target spatial reference set\&. The coordinate systems that can be passed are anything supported by the OGRSpatialReference\&.SetFromUserInput() call, which includes EPSG PCS and GCSes (ie\&. EPSG:4296), PROJ\&.4 declarations (as above), or the name of a \&.prf file containing well known text\&. 
 .IP "\fB\fB-to\fP \fINAME=VALUE\fP:\fP" 1c
-set a transformer option suitable to pass to GDALCreateGenImgProjTransformer2().  
+set a transformer option suitable to pass to GDALCreateGenImgProjTransformer2()\&.  
 .IP "\fB\fB-order\fP \fIn\fP:\fP" 1c
-order of polynomial used for warping (1 to 3). The default is to select a polynomial order based on the number of GCPs. 
+order of polynomial used for warping (1 to 3)\&. The default is to select a polynomial order based on the number of GCPs\&. 
 .IP "\fB\fB-tps\fP:\fP" 1c
-Force use of thin plate spline transformer based on available GCPs. 
+Force use of thin plate spline transformer based on available GCPs\&. 
 .IP "\fB\fB-rpc\fP: \fP" 1c
-Force use of RPCs. 
+Force use of RPCs\&. 
 .IP "\fB\fB-geoloc\fP:\fP" 1c
-Force use of Geolocation Arrays. 
+Force use of Geolocation Arrays\&. 
 .IP "\fB\fB-i\fP\fP" 1c
-Inverse transformation: from destination to source. 
+Inverse transformation: from destination to source\&. 
 .IP "\fB\fB-gcp\fP\fIpixel line easting northing [elevation]\fP: \fP" 1c
 Provide a GCP to be used for transformation (generally three or more are required) 
 .IP "\fB\fIsrcfile\fP:\fP" 1c
-File with source projection definition or GCP's. If not given, source projection is read from the command-line -s_srs or -gcp parameters  
+File with source projection definition or GCP's\&. If not given, source projection is read from the command-line -s_srs or -gcp parameters  
 .IP "\fB\fIdstfile\fP:\fP" 1c
-File with destination projection definition.  
+File with destination projection definition\&.  
 .PP
 .PP
-Coordinates are read as pairs (or triples) of numbers per line from standard input, transformed, and written out to standard output in the same way. All transformations offered by gdalwarp are handled, including gcp-based ones.
+Coordinates are read as pairs (or triples) of numbers per line from standard input, transformed, and written out to standard output in the same way\&. All transformations offered by gdalwarp are handled, including gcp-based ones\&.
 .PP
-Note that input and output must always be in decimal form. There is currently no support for DMS input or output.
+Note that input and output must always be in decimal form\&. There is currently no support for DMS input or output\&.
 .PP
-If an input image file is provided, input is in pixel/line coordinates on that image. If an output file is provided, output is in pixel/line coordinates on that image.
+If an input image file is provided, input is in pixel/line coordinates on that image\&. If an output file is provided, output is in pixel/line coordinates on that image\&.
 .SH "Reprojection Example"
 .PP
 Simple reprojection from one projected coordinate system to another:
@@ -75,7 +72,7 @@ Produces the following output in meters in the 'Belge 1972 / Belgian Lambert
 .PP
 .SH "Reprojection Example"
 .PP
-The following command requests an RPC based transformation using the RPC model associated with the named file. Because the -i (inverse) flag is used, the transformation is from output georeferenced (WGS84) coordinates back to image coordinates.
+The following command requests an RPC based transformation using the RPC model associated with the named file\&. Because the -i (inverse) flag is used, the transformation is from output georeferenced (WGS84) coordinates back to image coordinates\&.
 .PP
 .PP
 .nf
diff --git a/man/man1/gdalwarp.1 b/man/man1/gdalwarp.1
index 944583c..f592f7d 100644
--- a/man/man1/gdalwarp.1
+++ b/man/man1/gdalwarp.1
@@ -1,12 +1,9 @@
-.TH "gdalwarp" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "gdalwarp" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-gdalwarp \- .TH "gdalwarp" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-gdalwarp \- image reprojection and warping utility
+gdalwarp \- gdalwarp 
+image reprojection and warping utility
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -30,104 +27,104 @@ gdalwarp [--help-general] [--formats]
 .PP
 .SH "DESCRIPTION"
 .PP
-The gdalwarp utility is an image mosaicing, reprojection and warping utility. The program can reproject to any supported projection, and can also apply GCPs stored with the image if the image is 'raw' with control information.
+The gdalwarp utility is an image mosaicing, reprojection and warping utility\&. The program can reproject to any supported projection, and can also apply GCPs stored with the image if the image is 'raw' with control information\&.
 .PP
 .IP "\fB\fB-s_srs\fP \fIsrs def\fP:\fP" 1c
-source spatial reference set. The coordinate systems that can be passed are anything supported by the OGRSpatialReference.SetFromUserInput() call, which includes EPSG PCS and GCSes (ie. EPSG:4296), PROJ.4 declarations (as above), or the name of a .prf file containing well known text. 
+source spatial reference set\&. The coordinate systems that can be passed are anything supported by the OGRSpatialReference\&.SetFromUserInput() call, which includes EPSG PCS and GCSes (ie\&. EPSG:4296), PROJ\&.4 declarations (as above), or the name of a \&.prf file containing well known text\&. 
 .IP "\fB\fB-t_srs\fP \fIsrs_def\fP:\fP" 1c
-target spatial reference set. The coordinate systems that can be passed are anything supported by the OGRSpatialReference.SetFromUserInput() call, which includes EPSG PCS and GCSes (ie. EPSG:4296), PROJ.4 declarations (as above), or the name of a .prf file containing well known text. 
+target spatial reference set\&. The coordinate systems that can be passed are anything supported by the OGRSpatialReference\&.SetFromUserInput() call, which includes EPSG PCS and GCSes (ie\&. EPSG:4296), PROJ\&.4 declarations (as above), or the name of a \&.prf file containing well known text\&. 
 .IP "\fB\fB-to\fP \fINAME=VALUE\fP:\fP" 1c
-set a transformer option suitable to pass to GDALCreateGenImgProjTransformer2().  
+set a transformer option suitable to pass to GDALCreateGenImgProjTransformer2()\&.  
 .IP "\fB\fB-order\fP \fIn\fP:\fP" 1c
-order of polynomial used for warping (1 to 3). The default is to select a polynomial order based on the number of GCPs. 
+order of polynomial used for warping (1 to 3)\&. The default is to select a polynomial order based on the number of GCPs\&. 
 .IP "\fB\fB-tps\fP:\fP" 1c
-Force use of thin plate spline transformer based on available GCPs. 
+Force use of thin plate spline transformer based on available GCPs\&. 
 .IP "\fB\fB-rpc\fP: \fP" 1c
-Force use of RPCs. 
+Force use of RPCs\&. 
 .IP "\fB\fB-geoloc\fP:\fP" 1c
-Force use of Geolocation Arrays. 
+Force use of Geolocation Arrays\&. 
 .IP "\fB\fB-et\fP \fIerr_threshold\fP:\fP" 1c
-error threshold for transformation approximation (in pixel units - defaults to 0.125). 
+error threshold for transformation approximation (in pixel units - defaults to 0\&.125)\&. 
 .IP "\fB\fB-refine_gcps\fP \fItolerance minimum_gcps\fP:\fP" 1c
-(GDAL >= 1.9.0) refines the GCPs by automatically eliminating outliers. Outliers will be eliminated until minimum_gcps are left or when no outliers can be detected. The tolerance is passed to adjust when a GCP will be eliminated. Not that GCP refinement only works with polynomial interpolation. The tolerance is in pixel units if no projection is available, otherwise it is in SRS units. If minimum_gcps is not provided, the minimum GCPs according to the polynomial model is used. 
+(GDAL >= 1\&.9\&.0) refines the GCPs by automatically eliminating outliers\&. Outliers will be eliminated until minimum_gcps are left or when no outliers can be detected\&. The tolerance is passed to adjust when a GCP will be eliminated\&. Not that GCP refinement only works with polynomial interpolation\&. The tolerance is in pixel units if no projection is available, otherwise it is in SRS units\&. If minimum_gcps is not provided, the minimum GCPs according to the polynomial model is used\&. 
 .IP "\fB\fB-te\fP \fIxmin ymin xmax ymax\fP:\fP" 1c
-set georeferenced extents of output file to be created (in target SRS). 
+set georeferenced extents of output file to be created (in target SRS)\&. 
 .IP "\fB\fB-tr\fP \fIxres yres\fP:\fP" 1c
 set output file resolution (in target georeferenced units) 
 .IP "\fB\fB-tap\fP:\fP" 1c
-(GDAL >= 1.8.0) (target aligned pixels) align the coordinates of the extent of the output file to the values of the -tr, such that the aligned extent includes the minimum extent. 
+(GDAL >= 1\&.8\&.0) (target aligned pixels) align the coordinates of the extent of the output file to the values of the -tr, such that the aligned extent includes the minimum extent\&. 
 .IP "\fB\fB-ts\fP \fIwidth height\fP:\fP" 1c
-set output file size in pixels and lines. If width or height is set to 0, the other dimension will be guessed from the computed resolution. Note that -ts cannot be used with -tr 
+set output file size in pixels and lines\&. If width or height is set to 0, the other dimension will be guessed from the computed resolution\&. Note that -ts cannot be used with -tr 
 .IP "\fB\fB-wo\fP \fI'NAME=VALUE'\fP:\fP" 1c
-Set a warp options. The GDALWarpOptions::papszWarpOptions docs show all options. Multiple \fB-wo\fP options may be listed. 
+Set a warp options\&. The GDALWarpOptions::papszWarpOptions docs show all options\&. Multiple \fB-wo\fP options may be listed\&. 
 .IP "\fB\fB-ot\fP \fItype\fP:\fP" 1c
-For the output bands to be of the indicated data type. 
+For the output bands to be of the indicated data type\&. 
 .IP "\fB\fB-wt\fP \fItype\fP:\fP" 1c
-Working pixel data type. The data type of pixels in the source image and destination image buffers. 
+Working pixel data type\&. The data type of pixels in the source image and destination image buffers\&. 
 .IP "\fB\fB-r\fP \fIresampling_method\fP:\fP" 1c
-Resampling method to use. Available methods are: 
+Resampling method to use\&. Available methods are: 
 .IP "\fB\fBnear\fP: \fP" 1c
-nearest neighbour resampling (default, fastest algorithm, worst interpolation quality). 
+nearest neighbour resampling (default, fastest algorithm, worst interpolation quality)\&. 
 .IP "\fB\fBbilinear\fP: \fP" 1c
-bilinear resampling. 
+bilinear resampling\&. 
 .IP "\fB\fBcubic\fP: \fP" 1c
-cubic resampling. 
+cubic resampling\&. 
 .IP "\fB\fBcubicspline\fP: \fP" 1c
-cubic spline resampling. 
+cubic spline resampling\&. 
 .IP "\fB\fBlanczos\fP: \fP" 1c
-Lanczos windowed sinc resampling. 
+Lanczos windowed sinc resampling\&. 
 .IP "\fB\fBaverage\fP: \fP" 1c
-average resampling, computes the average of all non-NODATA contributing pixels. (GDAL >= 1.10.0) 
+average resampling, computes the average of all non-NODATA contributing pixels\&. (GDAL >= 1\&.10\&.0) 
 .IP "\fB\fBmode\fP: \fP" 1c
-mode resampling, selects the value which appears most often of all the sampled points. (GDAL >= 1.10.0) 
+mode resampling, selects the value which appears most often of all the sampled points\&. (GDAL >= 1\&.10\&.0) 
 .PP
-.IP "\fB\fB-srcnodata\fP \fIvalue [value...]\fP:\fP" 1c
-Set nodata masking values for input bands (different values can be supplied for each band). If more than one value is supplied all values should be quoted to keep them together as a single operating system argument. Masked values will not be used in interpolation. Use a value of \fCNone\fP to ignore intrinsic nodata settings on the source dataset. 
-.IP "\fB\fB-dstnodata\fP \fIvalue [value...]\fP:\fP" 1c
-Set nodata values for output bands (different values can be supplied for each band). If more than one value is supplied all values should be quoted to keep them together as a single operating system argument. New files will be initialized to this value and if possible the nodata value will be recorded in the output file. Use a value of \fCNone\fP to ensure that nodata is not defined (GDAL>=2.0). If this argument is not used then nodata values will be copied from the source dataset (GDAL>=2.0). 
+.IP "\fB\fB-srcnodata\fP \fIvalue [value\&.\&.\&.]\fP:\fP" 1c
+Set nodata masking values for input bands (different values can be supplied for each band)\&. If more than one value is supplied all values should be quoted to keep them together as a single operating system argument\&. Masked values will not be used in interpolation\&. Use a value of \fCNone\fP to ignore intrinsic nodata settings on the source dataset\&. 
+.IP "\fB\fB-dstnodata\fP \fIvalue [value\&.\&.\&.]\fP:\fP" 1c
+Set nodata values for output bands (different values can be supplied for each band)\&. If more than one value is supplied all values should be quoted to keep them together as a single operating system argument\&. New files will be initialized to this value and if possible the nodata value will be recorded in the output file\&. Use a value of \fCNone\fP to ensure that nodata is not defined (GDAL>=2\&.0)\&. If this argument is not used then nodata values will be copied from the source data [...]
 .IP "\fB\fB-dstalpha\fP:\fP" 1c
-Create an output alpha band to identify nodata (unset/transparent) pixels.  
+Create an output alpha band to identify nodata (unset/transparent) pixels\&.  
 .IP "\fB\fB-wm\fP \fImemory_in_mb\fP:\fP" 1c
-Set the amount of memory (in megabytes) that the warp API is allowed to use for caching. 
+Set the amount of memory (in megabytes) that the warp API is allowed to use for caching\&. 
 .IP "\fB\fB-multi\fP:\fP" 1c
-Use multithreaded warping implementation. Multiple threads will be used to process chunks of image and perform input/output operation simultaneously. 
+Use multithreaded warping implementation\&. Multiple threads will be used to process chunks of image and perform input/output operation simultaneously\&. 
 .IP "\fB\fB-q\fP:\fP" 1c
-Be quiet. 
+Be quiet\&. 
 .IP "\fB\fB-of\fP \fIformat\fP:\fP" 1c
-Select the output format. The default is GeoTIFF (GTiff). Use the short format name.  
+Select the output format\&. The default is GeoTIFF (GTiff)\&. Use the short format name\&.  
 .IP "\fB\fB-co\fP \fI'NAME=VALUE'\fP:\fP" 1c
-passes a creation option to the output format driver. Multiple \fB-co\fP options may be listed. See format specific documentation for legal creation options for each format. 
+passes a creation option to the output format driver\&. Multiple \fB-co\fP options may be listed\&. See format specific documentation for legal creation options for each format\&. 
 .PP
 .IP "\fB\fB-cutline\fP \fIdatasource\fP:\fP" 1c
-Enable use of a blend cutline from the name OGR support datasource. 
+Enable use of a blend cutline from the name OGR support datasource\&. 
 .IP "\fB\fB-cl\fP \fIlayername\fP:\fP" 1c
-Select the named layer from the cutline datasource. 
+Select the named layer from the cutline datasource\&. 
 .IP "\fB\fB-cwhere\fP \fIexpression\fP:\fP" 1c
-Restrict desired cutline features based on attribute query. 
+Restrict desired cutline features based on attribute query\&. 
 .IP "\fB\fB-csql\fP \fIquery\fP:\fP" 1c
-Select cutline features using an SQL query instead of from a layer with -cl. 
+Select cutline features using an SQL query instead of from a layer with -cl\&. 
 .IP "\fB\fB-cblend\fP \fIdistance\fP:\fP" 1c
-Set a blend distance to use to blend over cutlines (in pixels). 
+Set a blend distance to use to blend over cutlines (in pixels)\&. 
 .IP "\fB\fB-crop_to_cutline\fP:\fP" 1c
-(GDAL >= 1.8.0) Crop the extent of the target dataset to the extent of the cutline. 
+(GDAL >= 1\&.8\&.0) Crop the extent of the target dataset to the extent of the cutline\&. 
 .IP "\fB\fB-overwrite\fP:\fP" 1c
-(GDAL >= 1.8.0) Overwrite the target dataset if it already exists. 
+(GDAL >= 1\&.8\&.0) Overwrite the target dataset if it already exists\&. 
 .IP "\fB\fB-nomd\fP:\fP" 1c
-(GDAL >= 1.10.0) Do not copy metadata. Without this option, dataset and band metadata (as well as some band information) will be copied from the first source dataset. Items that differ between source datasets will be set to * (see -cvmd option). 
+(GDAL >= 1\&.10\&.0) Do not copy metadata\&. Without this option, dataset and band metadata (as well as some band information) will be copied from the first source dataset\&. Items that differ between source datasets will be set to * (see -cvmd option)\&. 
 .IP "\fB\fB-cvmd\fP \fImeta_conflict_value\fP:\fP" 1c
-(GDAL >= 1.10.0) Value to set metadata items that conflict between source datasets (default is '*'). Use '' to remove conflicting items.  
+(GDAL >= 1\&.10\&.0) Value to set metadata items that conflict between source datasets (default is '*')\&. Use '' to remove conflicting items\&.  
 .IP "\fB\fB-setci\fP:\fP" 1c
-(GDAL >= 1.10.0) Set the color interpretation of the bands of the target dataset from the source dataset.
+(GDAL >= 1\&.10\&.0) Set the color interpretation of the bands of the target dataset from the source dataset\&.
 .PP
 .IP "\fB\fIsrcfile\fP:\fP" 1c
-The source file name(s).  
+The source file name(s)\&.  
 .IP "\fB\fIdstfile\fP:\fP" 1c
-The destination file name.  
+The destination file name\&.  
 .PP
 .PP
-Mosaicing into an existing output file is supported if the output file already exists. The spatial extent of the existing file will not be modified to accomodate new data, so you may have to remove it in that case, or use the -overwrite option.
+Mosaicing into an existing output file is supported if the output file already exists\&. The spatial extent of the existing file will not be modified to accomodate new data, so you may have to remove it in that case, or use the -overwrite option\&.
 .PP
-Polygon cutlines may be used as a mask to restrict the area of the destination file that may be updated, including blending. If the OGR layer containing the cutline features has no explicit SRS, the cutline features must be in the SRS of the destination file. When outputing to a not yet existing target dataset, its extent will be the one of the original raster unless -te or -crop_to_cutline are specified.
+Polygon cutlines may be used as a mask to restrict the area of the destination file that may be updated, including blending\&. If the OGR layer containing the cutline features has no explicit SRS, the cutline features must be in the SRS of the destination file\&. When outputing to a not yet existing target dataset, its extent will be the one of the original raster unless -te or -crop_to_cutline are specified\&.
 .SH "EXAMPLE"
 .PP
 For instance, an eight bit spot scene stored in GeoTIFF with control points mapping the corners to lat/long could be warped to a UTM projection with a command like this:
diff --git a/man/man1/nearblack.1 b/man/man1/nearblack.1
index fa0729a..5973c5d 100644
--- a/man/man1/nearblack.1
+++ b/man/man1/nearblack.1
@@ -1,12 +1,9 @@
-.TH "nearblack" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "nearblack" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-nearblack \- .TH "nearblack" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-nearblack \- convert nearly black/white borders to black
+nearblack \- nearblack 
+convert nearly black/white borders to black
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -18,37 +15,37 @@ nearblack [-of format] [-white | [-color c1,c2,c3...cn]*] [-near dist] [-nb non_
 .PP
 .SH "DESCRIPTION"
 .PP
-This utility will scan an image and try to set all pixels that are nearly or exactly black, white or one or more custom colors around the collar to black or white. This is often used to 'fix up' lossy compressed airphotos so that color pixels can be treated as transparent when mosaicking.
+This utility will scan an image and try to set all pixels that are nearly or exactly black, white or one or more custom colors around the collar to black or white\&. This is often used to 'fix up' lossy compressed airphotos so that color pixels can be treated as transparent when mosaicking\&.
 .PP
 .IP "\fB\fB-o\fP \fIoutfile\fP:\fP" 1c
-The name of the output file to be created. Newly created files are created with the HFA driver by default (Erdas Imagine - .img) 
+The name of the output file to be created\&. Newly created files are created with the HFA driver by default (Erdas Imagine - \&.img) 
 .IP "\fB\fB-of\fP \fIformat\fP:\fP" 1c
-(GDAL 1.8.0 or later) Select the output format. Use the short format name (GTiff for GeoTIFF for examle). 
+(GDAL 1\&.8\&.0 or later) Select the output format\&. Use the short format name (GTiff for GeoTIFF for examle)\&. 
 .IP "\fB\fB-co\fP \fI'NAME=VALUE'\fP:\fP" 1c
-(GDAL 1.8.0 or later) Passes a creation option to the output format driver. Multiple \fB-co\fP options may be listed. See format specific documentation for legal creation options for each format. Only valid when creating a new file 
+(GDAL 1\&.8\&.0 or later) Passes a creation option to the output format driver\&. Multiple \fB-co\fP options may be listed\&. See format specific documentation for legal creation options for each format\&. Only valid when creating a new file 
 .IP "\fB\fB-white\fP:\fP" 1c
-Search for nearly white (255) pixels instead of nearly black pixels.  
-.IP "\fB\fB-color\fP \fIc1,c2,c3...cn\fP:\fP" 1c
-(GDAL >= 1.9.0) Search for pixels near the specified color. May be specified multiple times. When -color is specified, the pixels that are considered as the collar are set to 0.  
+Search for nearly white (255) pixels instead of nearly black pixels\&.  
+.IP "\fB\fB-color\fP \fIc1,c2,c3\&.\&.\&.cn\fP:\fP" 1c
+(GDAL >= 1\&.9\&.0) Search for pixels near the specified color\&. May be specified multiple times\&. When -color is specified, the pixels that are considered as the collar are set to 0\&.  
 .IP "\fB\fB-near\fP \fIdist\fP:\fP" 1c
-Select how far from black, white or custom colors the pixel values can be and still considered near black, white or custom color. Defaults to 15.  
+Select how far from black, white or custom colors the pixel values can be and still considered near black, white or custom color\&. Defaults to 15\&.  
 .IP "\fB\fB-nb\fP \fInon_black_pixels\fP:\fP" 1c
-number of non-black pixels that can be encountered before the giving up search inwards. Defaults to 2.  
+number of non-black pixels that can be encountered before the giving up search inwards\&. Defaults to 2\&.  
 .IP "\fB\fB-setalpha\fP:\fP" 1c
-(GDAL 1.8.0 or later) Adds an alpha band if the output file is specified and the input file has 3 bands, or sets the alpha band of the output file if it is specified and the input file has 4 bands, or sets the alpha band of the input file if it has 4 bands and no output file is specified. The alpha band is set to 0 in the image collar and to 255 elsewhere.  
+(GDAL 1\&.8\&.0 or later) Adds an alpha band if the output file is specified and the input file has 3 bands, or sets the alpha band of the output file if it is specified and the input file has 4 bands, or sets the alpha band of the input file if it has 4 bands and no output file is specified\&. The alpha band is set to 0 in the image collar and to 255 elsewhere\&.  
 .IP "\fB\fB-setmask\fP:\fP" 1c
-(GDAL 1.8.0 or later) Adds a mask band to the output file, or adds a mask band to the input file if it does not already have one and no output file is specified. The mask band is set to 0 in the image collar and to 255 elsewhere.  
+(GDAL 1\&.8\&.0 or later) Adds a mask band to the output file, or adds a mask band to the input file if it does not already have one and no output file is specified\&. The mask band is set to 0 in the image collar and to 255 elsewhere\&.  
 .IP "\fB\fB-q\fP:\fP" 1c
-(GDAL 1.8.0 or later) Suppress progress monitor and other non-error output. 
+(GDAL 1\&.8\&.0 or later) Suppress progress monitor and other non-error output\&. 
 .IP "\fB\fIinfile\fP:\fP" 1c
-The input file. Any GDAL supported format, any number of bands, normally 8bit Byte bands.  
+The input file\&. Any GDAL supported format, any number of bands, normally 8bit Byte bands\&.  
 .PP
 .PP
-The algorithm processes the image one scanline at a time. A scan 'in' is done from either end setting pixels to black or white until at least 'non_black_pixels' pixels that are more than 'dist' gray levels away from black, white or custom colors have been encountered at which point the scan stops. The nearly black, white or custom color pixels are set to black or white. The algorithm also scans from top to bottom and from bottom to top to identify indentations in the top or bottom.
+The algorithm processes the image one scanline at a time\&. A scan 'in' is done from either end setting pixels to black or white until at least 'non_black_pixels' pixels that are more than 'dist' gray levels away from black, white or custom colors have been encountered at which point the scan stops\&. The nearly black, white or custom color pixels are set to black or white\&. The algorithm also scans from top to bottom and from bottom to top to identify indentations in the top or bottom\&.
 .PP
-The processing is all done in 8bit (Bytes).
+The processing is all done in 8bit (Bytes)\&.
 .PP
-If the output file is omitted, the processed results will be written back to the input file - which must support update.
+If the output file is omitted, the processed results will be written back to the input file - which must support update\&.
 .SH "AUTHORS"
 .PP
 Frank Warmerdam <warmerdam at pobox.com> 
diff --git a/man/man1/ogr2ogr.1 b/man/man1/ogr2ogr.1
index 885e4c4..6037f80 100644
--- a/man/man1/ogr2ogr.1
+++ b/man/man1/ogr2ogr.1
@@ -1,12 +1,9 @@
-.TH "ogr2ogr" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "ogr2ogr" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-ogr2ogr \- .TH "ogr2ogr" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-ogr2ogr \- converts simple features data between file formats
+ogr2ogr \- ogr2ogr 
+converts simple features data between file formats
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -44,7 +41,7 @@ Advanced options :
 .PP
 .SH "DESCRIPTION"
 .PP
-This program can be used to convert simple features data between file formats performing various operations during the process such as spatial or attribute selections, reducing the set of attributes, setting the output coordinate system or even reprojecting the features during translation.
+This program can be used to convert simple features data between file formats performing various operations during the process such as spatial or attribute selections, reducing the set of attributes, setting the output coordinate system or even reprojecting the features during translation\&.
 .PP
 .IP "\fB\fB -f\fP\fI format_name\fP:\fP" 1c
 output file format name (default is ESRI Shapefile), some possible values are: 
@@ -67,21 +64,21 @@ Delete the output layer and recreate it empty
 .IP "\fB\fB-update\fP:\fP" 1c
 Open existing output datasource in update mode rather than trying to create a new one 
 .IP "\fB\fB-select\fP\fI field_list\fP:\fP" 1c
-Comma-delimited list of fields from input layer to copy to the new layer. A field is skipped if mentioned previously in the list even if the input layer has duplicate field names. (Defaults to all; any field is skipped if a subsequent field with same name is found.) Starting with OGR 1.11, geometry fields can also be specified in the list. 
+Comma-delimited list of fields from input layer to copy to the new layer\&. A field is skipped if mentioned previously in the list even if the input layer has duplicate field names\&. (Defaults to all; any field is skipped if a subsequent field with same name is found\&.) Starting with OGR 1\&.11, geometry fields can also be specified in the list\&. 
 .IP "\fB\fB-progress\fP:\fP" 1c
-(starting with GDAL 1.7.0) Display progress on terminal. Only works if input layers have the 'fast feature count' capability. 
+(starting with GDAL 1\&.7\&.0) Display progress on terminal\&. Only works if input layers have the 'fast feature count' capability\&. 
 .IP "\fB\fB-sql\fP \fIsql_statement\fP:\fP" 1c
-SQL statement to execute. The resulting table/layer will be saved to the output. 
+SQL statement to execute\&. The resulting table/layer will be saved to the output\&. 
 .IP "\fB\fB-dialect\fP \fIdialect\fP:\fP" 1c
-SQL dialect. In some cases can be used to use (unoptimized) OGR SQL instead of the native SQL of an RDBMS by passing OGRSQL. Starting with GDAL 1.10, the 'SQLITE' dialect can also be used with any datasource. 
+SQL dialect\&. In some cases can be used to use (unoptimized) OGR SQL instead of the native SQL of an RDBMS by passing OGRSQL\&. Starting with GDAL 1\&.10, the 'SQLITE' dialect can also be used with any datasource\&. 
 .IP "\fB\fB-where\fP\fI restricted_where\fP:\fP" 1c
 Attribute query (like SQL WHERE) 
 .IP "\fB\fB-skipfailures\fP:\fP" 1c
-Continue after a failure, skipping the failed feature. 
+Continue after a failure, skipping the failed feature\&. 
 .IP "\fB\fB-spat\fP\fI xmin ymin xmax ymax\fP:\fP" 1c
-spatial query extents. Only features whose geometry intersects the extents will be selected. The geometries will not be clipped unless -clipsrc is specified 
+spatial query extents\&. Only features whose geometry intersects the extents will be selected\&. The geometries will not be clipped unless -clipsrc is specified 
 .IP "\fB\fB-geomfield\fP \fIfield\fP:\fP" 1c
-(OGR >= 1.11) Name of the geometry field on which the spatial filter operates on. 
+(OGR >= 1\&.11) Name of the geometry field on which the spatial filter operates on\&. 
 .IP "\fB\fB-dsco\fP \fINAME=VALUE\fP:\fP" 1c
 Dataset creation option (format specific) 
 .IP "\fB\fB-lco\fP\fI NAME=VALUE\fP:\fP" 1c
@@ -89,9 +86,9 @@ Layer creation option (format specific)
 .IP "\fB\fB-nln\fP\fI name\fP:\fP" 1c
 Assign an alternate name to the new layer 
 .IP "\fB\fB-nlt\fP\fI type\fP:\fP" 1c
-Define the geometry type for the created layer. One of NONE, GEOMETRY, POINT, LINESTRING, POLYGON, GEOMETRYCOLLECTION, MULTIPOINT, MULTIPOLYGON or MULTILINESTRING. Add '25D' to the name to get 2.5D versions. Starting with GDAL 1.10, PROMOTE_TO_MULTI can be used to automatically promote layers that mix polygon or multipolygons to multipolygons, and layers that mix linestrings or multilinestrings to multilinestrings. Can be usefull when converting shapefiles to PostGIS (and other target dr [...]
+Define the geometry type for the created layer\&. One of NONE, GEOMETRY, POINT, LINESTRING, POLYGON, GEOMETRYCOLLECTION, MULTIPOINT, MULTIPOLYGON or MULTILINESTRING\&. Add '25D' to the name to get 2\&.5D versions\&. Starting with GDAL 1\&.10, PROMOTE_TO_MULTI can be used to automatically promote layers that mix polygon or multipolygons to multipolygons, and layers that mix linestrings or multilinestrings to multilinestrings\&. Can be usefull when converting shapefiles to PostGIS (and oth [...]
 .IP "\fB\fB-dim\fP\fI val\fP:\fP" 1c
-(starting with GDAL 1.10) Force the coordinate dimension to val (valid values are 2 or 3). This affects both the layer geometry type, and feature geometries. Starting with GDAL 1.11, the value can be set to 'layer_dim' to instruct feature geometries to be promoted to the coordinate dimension declared by the layer.  
+(starting with GDAL 1\&.10) Force the coordinate dimension to val (valid values are 2 or 3)\&. This affects both the layer geometry type, and feature geometries\&. Starting with GDAL 1\&.11, the value can be set to 'layer_dim' to instruct feature geometries to be promoted to the coordinate dimension declared by the layer\&.  
 .IP "\fB\fB-a_srs\fP\fI srs_def\fP:\fP" 1c
 Assign an output SRS 
 .IP "\fB\fB-t_srs\fP\fI srs_def\fP:\fP" 1c
@@ -99,71 +96,71 @@ Reproject/transform to this SRS on output
 .IP "\fB\fB-s_srs\fP\fI srs_def\fP:\fP" 1c
 Override source SRS 
 .IP "\fB\fB-preserve_fid\fP:\fP" 1c
-Use the FID of the source features instead of letting the output driver to automatically assign a new one. 
+Use the FID of the source features instead of letting the output driver to automatically assign a new one\&. 
 .IP "\fB\fB-fid\fP \fIfid\fP:\fP" 1c
-If provided, only the feature with this feature id will be reported. Operates exclusive of the spatial or attribute queries. Note: if you want to select several features based on their feature id, you can also use the fact the 'fid' is a special field recognized by OGR SQL. So, '-where 'fid in (1,3,5)'' would select features 1, 3 and 5. 
+If provided, only the feature with this feature id will be reported\&. Operates exclusive of the spatial or attribute queries\&. Note: if you want to select several features based on their feature id, you can also use the fact the 'fid' is a special field recognized by OGR SQL\&. So, '-where 'fid in (1,3,5)'' would select features 1, 3 and 5\&. 
 .PP
 .PP
-Srs_def can be a full WKT definition (hard to escape properly), or a well known definition (ie. EPSG:4326) or a file with a WKT definition.
+Srs_def can be a full WKT definition (hard to escape properly), or a well known definition (ie\&. EPSG:4326) or a file with a WKT definition\&.
 .PP
 Advanced options :
 .PP
 .IP "\fB\fB-gt\fP \fIn\fP:\fP" 1c
-group \fIn\fP features per transaction (default 20000 in OGR 1.11, 200 in previous releases). Increase the value for better performance when writing into DBMS drivers that have transaction support. 
+group \fIn\fP features per transaction (default 20000 in OGR 1\&.11, 200 in previous releases)\&. Increase the value for better performance when writing into DBMS drivers that have transaction support\&. 
 .IP "\fB\fB-clipsrc\fP\fI [xmin ymin xmax ymax]|WKT|datasource|spat_extent\fP: \fP" 1c
-(starting with GDAL 1.7.0) clip geometries to the specified bounding box (expressed in source SRS), WKT geometry (POLYGON or MULTIPOLYGON), from a datasource or to the spatial extent of the \fB-spat\fP option if you use the \fIspat_extent\fP keyword. When specifying a datasource, you will generally want to use it in combination of the \fB-clipsrclayer\fP, \fB-clipsrcwhere\fP or \fB-clipsrcsql\fP options 
+(starting with GDAL 1\&.7\&.0) clip geometries to the specified bounding box (expressed in source SRS), WKT geometry (POLYGON or MULTIPOLYGON), from a datasource or to the spatial extent of the \fB-spat\fP option if you use the \fIspat_extent\fP keyword\&. When specifying a datasource, you will generally want to use it in combination of the \fB-clipsrclayer\fP, \fB-clipsrcwhere\fP or \fB-clipsrcsql\fP options 
 .IP "\fB\fB-clipsrcsql\fP \fIsql_statement\fP:\fP" 1c
-Select desired geometries using an SQL query instead. 
+Select desired geometries using an SQL query instead\&. 
 .IP "\fB\fB-clipsrclayer\fP \fIlayername\fP:\fP" 1c
-Select the named layer from the source clip datasource. 
+Select the named layer from the source clip datasource\&. 
 .IP "\fB\fB-clipsrcwhere\fP \fIexpression\fP:\fP" 1c
-Restrict desired geometries based on attribute query. 
+Restrict desired geometries based on attribute query\&. 
 .IP "\fB\fB-clipdst\fP\fI xmin ymin xmax ymax\fP:\fP" 1c
-(starting with GDAL 1.7.0) clip geometries after reprojection to the specified bounding box (expressed in dest SRS), WKT geometry (POLYGON or MULTIPOLYGON) or from a datasource. When specifying a datasource, you will generally want to use it in combination of the -clipdstlayer, -clipdstwhere or -clipdstsql options 
+(starting with GDAL 1\&.7\&.0) clip geometries after reprojection to the specified bounding box (expressed in dest SRS), WKT geometry (POLYGON or MULTIPOLYGON) or from a datasource\&. When specifying a datasource, you will generally want to use it in combination of the -clipdstlayer, -clipdstwhere or -clipdstsql options 
 .IP "\fB\fB-clipdstsql\fP \fIsql_statement\fP:\fP" 1c
-Select desired geometries using an SQL query instead. 
+Select desired geometries using an SQL query instead\&. 
 .IP "\fB\fB-clipdstlayer\fP \fIlayername\fP:\fP" 1c
-Select the named layer from the destination clip datasource. 
+Select the named layer from the destination clip datasource\&. 
 .IP "\fB\fB-clipdstwhere\fP \fIexpression\fP:\fP" 1c
-Restrict desired geometries based on attribute query. 
+Restrict desired geometries based on attribute query\&. 
 .IP "\fB\fB-wrapdateline\fP:\fP" 1c
-(starting with GDAL 1.7.0) split geometries crossing the dateline meridian (long. = +/- 180deg) 
+(starting with GDAL 1\&.7\&.0) split geometries crossing the dateline meridian (long\&. = +/- 180deg) 
 .IP "\fB\fB-datelineoffset\fP:\fP" 1c
-(starting with GDAL 1.10) offset from dateline in degrees (default long. = +/- 10deg, geometries within 170deg to -170deg will be splited) 
+(starting with GDAL 1\&.10) offset from dateline in degrees (default long\&. = +/- 10deg, geometries within 170deg to -170deg will be splited) 
 .IP "\fB\fB-simplify\fP\fI tolerance\fP:\fP" 1c
-(starting with GDAL 1.9.0) distance tolerance for simplification. Note: the algorithm used preserves topology per feature, in particular for polygon geometries, but not for a whole layer. 
+(starting with GDAL 1\&.9\&.0) distance tolerance for simplification\&. Note: the algorithm used preserves topology per feature, in particular for polygon geometries, but not for a whole layer\&. 
 .IP "\fB\fB-segmentize\fP\fI max_dist\fP:\fP" 1c
-(starting with GDAL 1.6.0) maximum distance between 2 nodes. Used to create intermediate points 
-.IP "\fB\fB-fieldTypeToString\fP\fI type1, ...\fP:\fP" 1c
-(starting with GDAL 1.7.0) converts any field of the specified type to a field of type string in the destination layer. Valid types are : Integer, Real, String, Date, Time, DateTime, Binary, IntegerList, RealList, StringList. Special value \fBAll\fP can be used to convert all fields to strings. This is an alternate way to using the CAST operator of OGR SQL, that may avoid typing a long SQL query. 
+(starting with GDAL 1\&.6\&.0) maximum distance between 2 nodes\&. Used to create intermediate points 
+.IP "\fB\fB-fieldTypeToString\fP\fI type1, \&.\&.\&.\fP:\fP" 1c
+(starting with GDAL 1\&.7\&.0) converts any field of the specified type to a field of type string in the destination layer\&. Valid types are : Integer, Real, String, Date, Time, DateTime, Binary, IntegerList, RealList, StringList\&. Special value \fBAll\fP can be used to convert all fields to strings\&. This is an alternate way to using the CAST operator of OGR SQL, that may avoid typing a long SQL query\&. 
 .IP "\fB\fB-unsetFieldWidth\fP:\fP" 1c
-(starting with GDAL 1.11) set field width and precision to 0. 
+(starting with GDAL 1\&.11) set field width and precision to 0\&. 
 .IP "\fB\fB-splitlistfields\fP:\fP" 1c
-(starting with GDAL 1.8.0) split fields of type StringList, RealList or IntegerList into as many fields of type String, Real or Integer as necessary. 
+(starting with GDAL 1\&.8\&.0) split fields of type StringList, RealList or IntegerList into as many fields of type String, Real or Integer as necessary\&. 
 .IP "\fB\fB-maxsubfields\fP \fIval\fP:\fP" 1c
-To be combined with -splitlistfields to limit the number of subfields created for each split field. 
+To be combined with -splitlistfields to limit the number of subfields created for each split field\&. 
 .IP "\fB\fB-explodecollections\fP:\fP" 1c
-(starting with GDAL 1.8.0) produce one feature for each geometry in any kind of geometry collection in the source file 
+(starting with GDAL 1\&.8\&.0) produce one feature for each geometry in any kind of geometry collection in the source file 
 .IP "\fB\fB-zfield\fP \fIfield_name\fP:\fP" 1c
-(starting with GDAL 1.8.0) Uses the specified field to fill the Z coordinate of geometries 
+(starting with GDAL 1\&.8\&.0) Uses the specified field to fill the Z coordinate of geometries 
 .IP "\fB\fB-gcp\fP \fIungeoref_x ungeoref_y georef_x georef_y elevation\fP:\fP" 1c
-(starting with GDAL 1.10.0) Add the indicated ground control point. This option may be provided multiple times to provide a set of GCPs.  
+(starting with GDAL 1\&.10\&.0) Add the indicated ground control point\&. This option may be provided multiple times to provide a set of GCPs\&.  
 .IP "\fB\fB-order\fP \fIn\fP:\fP" 1c
-(starting with GDAL 1.10.0) order of polynomial used for warping (1 to 3). The default is to select a polynomial order based on the number of GCPs. 
+(starting with GDAL 1\&.10\&.0) order of polynomial used for warping (1 to 3)\&. The default is to select a polynomial order based on the number of GCPs\&. 
 .IP "\fB\fB-tps\fP:\fP" 1c
-(starting with GDAL 1.10.0) Force use of thin plate spline transformer based on available GCPs. 
+(starting with GDAL 1\&.10\&.0) Force use of thin plate spline transformer based on available GCPs\&. 
 .IP "\fB\fB-fieldmap\fP:\fP" 1c
-(starting with GDAL 1.10.0) Specifies the list of field indexes to be copied from the source to the destination. The (n)th value specified in the list is the index of the field in the target layer definition in which the n(th) field of the source layer must be copied. Index count starts at zero. There must be exactly as many values in the list as the count of the fields in the source layer. We can use the 'identity' setting to specify that the fields should be transferred by using the sa [...]
+(starting with GDAL 1\&.10\&.0) Specifies the list of field indexes to be copied from the source to the destination\&. The (n)th value specified in the list is the index of the field in the target layer definition in which the n(th) field of the source layer must be copied\&. Index count starts at zero\&. There must be exactly as many values in the list as the count of the fields in the source layer\&. We can use the 'identity' setting to specify that the fields should be transferred by  [...]
 .IP "\fB\fB-addfields\fP:\fP" 1c
-(starting with GDAL 1.11) This is a specialized version of -append. Contrary to -append, -addfields has the effect of adding, to existing target layers, the new fields found in source layers. This option is usefull when merging files that have non-strictly identical structures. This might not work for output formats that don't support adding fields to existing non-empty layers. 
+(starting with GDAL 1\&.11) This is a specialized version of -append\&. Contrary to -append, -addfields has the effect of adding, to existing target layers, the new fields found in source layers\&. This option is usefull when merging files that have non-strictly identical structures\&. This might not work for output formats that don't support adding fields to existing non-empty layers\&. 
 .PP
 .SH "PERFORMANCE HINTS"
 .PP
-When writing into transactional DBMS (SQLite/PostgreSQL,MySQL, etc...), it might be beneficial to increase the number of INSERT statements executed between BEGIN TRANSACTION and COMMIT TRANSACTION statements. This number is specified with the -gt option. For example, for SQLite, explicitly defining \fB-gt 65536\fP ensures optimal performance while populating some table containing many hundredth thousand or million rows. However, note that if there are failed insertions, the scope of -ski [...]
+When writing into transactional DBMS (SQLite/PostgreSQL,MySQL, etc\&.\&.\&.), it might be beneficial to increase the number of INSERT statements executed between BEGIN TRANSACTION and COMMIT TRANSACTION statements\&. This number is specified with the -gt option\&. For example, for SQLite, explicitly defining \fB-gt 65536\fP ensures optimal performance while populating some table containing many hundredth thousand or million rows\&. However, note that if there are failed insertions, the s [...]
 .PP
-For PostgreSQL, the PG_USE_COPY config option can be set to YES for significantly insertion performance boot. See the PG driver documentation page.
+For PostgreSQL, the PG_USE_COPY config option can be set to YES for significantly insertion performance boot\&. See the PG driver documentation page\&.
 .PP
-More generally, consult the documentation page of the input and output drivers for performance hints.
+More generally, consult the documentation page of the input and output drivers for performance hints\&.
 .SH "EXAMPLE"
 .PP
 Example appending to an existing layer (both flags need to be used):
@@ -184,7 +181,7 @@ Example reprojecting from ETRS_1989_LAEA_52N_10E to EPSG:4326 and clipping to a
 .fi
 .PP
 .PP
-Example for using the -fieldmap setting. The first field of the source layer is used to fill the third field (index 2 = third field) of the target layer, the second field of the source layer is ignored, the third field of the source layer used to fill the fifth field of the target layer.
+Example for using the -fieldmap setting\&. The first field of the source layer is used to fill the third field (index 2 = third field) of the target layer, the second field of the source layer is ignored, the third field of the source layer used to fill the fifth field of the target layer\&.
 .PP
 .PP
 .nf
@@ -193,7 +190,7 @@ Example for using the -fieldmap setting. The first field of the source layer is
 .fi
 .PP
 .PP
-More examples are given in the individual format pages.
+More examples are given in the individual format pages\&.
 .SH "AUTHOR"
 .PP
 Frank Warmerdam <warmerdam at pobox.com>, Silke Reimer <silke at intevation.de> 
diff --git a/man/man1/ogr_utilities.1 b/man/man1/ogr_utilities.1
index 7b3daf1..5778516 100644
--- a/man/man1/ogr_utilities.1
+++ b/man/man1/ogr_utilities.1
@@ -1,12 +1,9 @@
-.TH "ogr_utilities" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "ogr_utilities" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-ogr_utilities \- .TH "ogr_utilities" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-ogr_utilities \- The following utilities are distributed as part of the OGR Simple Features toolkit:
+ogr_utilities \- OGR Utility Programs 
+The following utilities are distributed as part of the OGR Simple Features toolkit:
 .PP
 .PD 0
 .IP "\(bu" 2
diff --git a/man/man1/ogrinfo.1 b/man/man1/ogrinfo.1
index 8398f67..f60c4fa 100644
--- a/man/man1/ogrinfo.1
+++ b/man/man1/ogrinfo.1
@@ -1,12 +1,9 @@
-.TH "ogrinfo" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "ogrinfo" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-ogrinfo \- .TH "ogrinfo" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-ogrinfo \- lists information about an OGR supported data source
+ogrinfo \- ogrinfo 
+lists information about an OGR supported data source
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -23,43 +20,43 @@ ogrinfo [--help-general] [-ro] [-q] [-where restricted_where]
 .PP
 .SH "DESCRIPTION"
 .PP
-The ogrinfo program lists various information about an OGR supported data source to stdout (the terminal).
+The ogrinfo program lists various information about an OGR supported data source to stdout (the terminal)\&.
 .PP
 .IP "\fB\fB-ro\fP:\fP" 1c
-Open the data source in read-only mode.  
+Open the data source in read-only mode\&.  
 .IP "\fB\fB-al\fP:\fP" 1c
-List all features of all layers (used instead of having to give layer names as arguments). 
+List all features of all layers (used instead of having to give layer names as arguments)\&. 
 .IP "\fB\fB-so\fP:\fP" 1c
-Summary Only: supress listing of features, show only the summary information like projection, schema, feature count and extents. 
+Summary Only: supress listing of features, show only the summary information like projection, schema, feature count and extents\&. 
 .IP "\fB\fB-q\fP:\fP" 1c
-Quiet verbose reporting of various information, including coordinate system, layer schema, extents, and feature count.  
+Quiet verbose reporting of various information, including coordinate system, layer schema, extents, and feature count\&.  
 .IP "\fB\fB-where\fP \fIrestricted_where\fP:\fP" 1c
-An attribute query in a restricted form of the queries used in the SQL WHERE statement. Only features matching the attribute query will be reported. 
+An attribute query in a restricted form of the queries used in the SQL WHERE statement\&. Only features matching the attribute query will be reported\&. 
 .IP "\fB\fB-sql\fP \fIstatement\fP:\fP" 1c
-Execute the indicated SQL statement and return the result. 
+Execute the indicated SQL statement and return the result\&. 
 .IP "\fB\fB-dialect\fP \fIdialect\fP:\fP" 1c
-SQL dialect. In some cases can be used to use (unoptimized) OGR SQL instead of the native SQL of an RDBMS by passing OGRSQL. Starting with GDAL 1.10, the 'SQLITE' dialect can also be used with any datasource. 
+SQL dialect\&. In some cases can be used to use (unoptimized) OGR SQL instead of the native SQL of an RDBMS by passing OGRSQL\&. Starting with GDAL 1\&.10, the 'SQLITE' dialect can also be used with any datasource\&. 
 .IP "\fB\fB-spat\fP \fIxmin ymin xmax ymax\fP:\fP" 1c
-The area of interest. Only features within the rectangle will be reported. 
+The area of interest\&. Only features within the rectangle will be reported\&. 
 .IP "\fB\fB-geomfield\fP \fIfield\fP:\fP" 1c
-(OGR >= 1.11) Name of the geometry field on which the spatial filter operates on. 
+(OGR >= 1\&.11) Name of the geometry field on which the spatial filter operates on\&. 
 .IP "\fB\fB-fid\fP \fIfid\fP:\fP" 1c
-If provided, only the feature with this feature id will be reported. Operates exclusive of the spatial or attribute queries. Note: if you want to select several features based on their feature id, you can also use the fact the 'fid' is a special field recognized by OGR SQL. So, '-where 'fid in (1,3,5)'' would select features 1, 3 and 5. 
+If provided, only the feature with this feature id will be reported\&. Operates exclusive of the spatial or attribute queries\&. Note: if you want to select several features based on their feature id, you can also use the fact the 'fid' is a special field recognized by OGR SQL\&. So, '-where 'fid in (1,3,5)'' would select features 1, 3 and 5\&. 
 .IP "\fB\fB-fields\fP={YES/NO}:\fP" 1c
-(starting with GDAL 1.6.0) If set to NO, the feature dump will not display field values. Default value is YES. 
+(starting with GDAL 1\&.6\&.0) If set to NO, the feature dump will not display field values\&. Default value is YES\&. 
 .IP "\fB\fB-geom\fP={YES/NO/SUMMARY}:\fP" 1c
-(starting with GDAL 1.6.0) If set to NO, the feature dump will not display the geometry. If set to SUMMARY, only a summary of the geometry will be displayed. If set to YES, the geometry will be reported in full OGC WKT format. Default value is YES. 
+(starting with GDAL 1\&.6\&.0) If set to NO, the feature dump will not display the geometry\&. If set to SUMMARY, only a summary of the geometry will be displayed\&. If set to YES, the geometry will be reported in full OGC WKT format\&. Default value is YES\&. 
 .IP "\fB\fB--formats\fP:\fP" 1c
-List the format drivers that are enabled. 
+List the format drivers that are enabled\&. 
 .IP "\fB\fIdatasource_name\fP:\fP" 1c
-The data source to open. May be a filename, directory or other virtual name. See the \fCOGR Vector Formats\fP list for supported datasources. 
+The data source to open\&. May be a filename, directory or other virtual name\&. See the \fCOGR Vector Formats\fP list for supported datasources\&. 
 .IP "\fB\fIlayer\fP:\fP" 1c
-One or more layer names may be reported. 
+One or more layer names may be reported\&. 
 .PP
 .PP
-If no layer names are passed then ogrinfo will report a list of available layers (and their layerwide geometry type). If layer name(s) are given then their extents, coordinate system, feature count, geometry type, schema and all features matching query parameters will be reported to the terminal. If no query parameters are provided, all features are reported.
+If no layer names are passed then ogrinfo will report a list of available layers (and their layerwide geometry type)\&. If layer name(s) are given then their extents, coordinate system, feature count, geometry type, schema and all features matching query parameters will be reported to the terminal\&. If no query parameters are provided, all features are reported\&.
 .PP
-Geometries are reported in OGC WKT format.
+Geometries are reported in OGC WKT format\&.
 .SH "EXAMPLE"
 .PP
 Example reporting all layers in an NTF file: 
diff --git a/man/man1/ogrlineref.1 b/man/man1/ogrlineref.1
index 062c486..dc5f3ff 100644
--- a/man/man1/ogrlineref.1
+++ b/man/man1/ogrlineref.1
@@ -1,12 +1,9 @@
-.TH "ogrlineref" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "ogrlineref" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-ogrlineref \- .TH "ogrlineref" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-ogrlineref \- The utility can be used for:
+ogrlineref \- ogrlineref 
+The utility can be used for:
 .IP "\(bu" 2
 create linear reference file from input data
 .IP "\(bu" 2
@@ -36,34 +33,34 @@ ogrlineref [--help-general] [-progress] [-quiet]
 .PP
 .SH "DESCRIPTION"
 .PP
-The ogrlineref program can be used to create a linear reference - a file containing a segments of special length (e.g. 1 km in reference units) and get coordinates, linear referenced distances or sublines (subpaths) from this file. The utility not required the M or Z values in geometry. The results can be stored in any OGR supported format. Also some information writed to the stdout.
+The ogrlineref program can be used to create a linear reference - a file containing a segments of special length (e\&.g\&. 1 km in reference units) and get coordinates, linear referenced distances or sublines (subpaths) from this file\&. The utility not required the M or Z values in geometry\&. The results can be stored in any OGR supported format\&. Also some information writed to the stdout\&.
 .PP
 .IP "\fB\fB--help-general\fP:\fP" 1c
-Show the usage. 
+Show the usage\&. 
 .IP "\fB\fB-progress\fP:\fP" 1c
-Show progress. 
+Show progress\&. 
 .IP "\fB\fB-quiet\fP:\fP" 1c
-Supress all messages except errors and results. 
+Supress all messages except errors and results\&. 
 .IP "\fB\fB-f\fP \fIformat_name\fP:\fP" 1c
-Select an output format name. The default is to create a shapefile. 
+Select an output format name\&. The default is to create a shapefile\&. 
 .IP "\fB\fB-dsco\fP \fINAME=VALUE\fP:\fP" 1c
 Dataset creation option (format specific) 
 .IP "\fB\fB-lco\fP\fI NAME=VALUE\fP:\fP" 1c
 Layer creation option (format specific) 
 .IP "\fB\fB-create\fP:\fP" 1c
-Create the linear reference file (linestring of parts). 
+Create the linear reference file (linestring of parts)\&. 
 .IP "\fB\fB-l\fP\fIsrc_line_datasource_name\fP:\fP" 1c
-The path to input linestring datasource (e.g. the road) 
+The path to input linestring datasource (e\&.g\&. the road) 
 .IP "\fB\fB-ln\fP\fIlayer_name\fP:\fP" 1c
 The layer name in datasource 
 .IP "\fB\fB-lf\fP\fIfield_name\fP:\fP" 1c
-The field name of uniq values to separate the input lines (e.g. the set of roads) 
+The field name of uniq values to separate the input lines (e\&.g\&. the set of roads) 
 .IP "\fB\fB-p\fP\fIsrc_repers_datasource_name\fP:\fP" 1c
-The path to linear references points (e.g. the road mile-stones) 
+The path to linear references points (e\&.g\&. the road mile-stones) 
 .IP "\fB\fB-pn\fP\fIlayer_name\fP:\fP" 1c
 The layer name in datasource 
 .IP "\fB\fB-pm\fP\fIpos_field_name\fP:\fP" 1c
-The field name of distances along path (e.g. mile-stones values) 
+The field name of distances along path (e\&.g\&. mile-stones values) 
 .IP "\fB\fB-pf\fP\fIfield_name\fP:\fP" 1c
 The field name of uniq values to map input reference points to lines 
 .IP "\fB\fB-r\fP\fIsrc_parts_datasource_name\fP:\fP" 1c
@@ -97,7 +94,7 @@ The input end linear distance
 .PP
 .SH "EXAMPLE"
 .PP
-This example would create a shapefile (parts.shp) containing a data needed for linear referencing (1 km parts): 
+This example would create a shapefile (parts\&.shp) containing a data needed for linear referencing (1 km parts): 
 .PP
 .nf
 
diff --git a/man/man1/ogrtindex.1 b/man/man1/ogrtindex.1
index a40c7e9..3e24f32 100644
--- a/man/man1/ogrtindex.1
+++ b/man/man1/ogrtindex.1
@@ -1,12 +1,9 @@
-.TH "ogrtindex" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "ogrtindex" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-ogrtindex \- .TH "ogrtindex" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-ogrtindex \- creates a tileindex
+ogrtindex \- ogrtindex 
+creates a tileindex
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -20,30 +17,30 @@ ogrtindex [-lnum n]... [-lname name]... [-f output_format]
 .PP
 .SH "DESCRIPTION"
 .PP
-The ogrtindex program can be used to create a tileindex - a file containing a list of the identities of a bunch of other files along with there spatial extents. This is primarily intended to be used with \fCMapServer\fP for tiled access to layers using the OGR connection type.
+The ogrtindex program can be used to create a tileindex - a file containing a list of the identities of a bunch of other files along with there spatial extents\&. This is primarily intended to be used with \fCMapServer\fP for tiled access to layers using the OGR connection type\&.
 .PP
 .IP "\fB\fB-lnum\fP \fIn\fP:\fP" 1c
-Add layer number 'n' from each source file in the tile index. 
+Add layer number 'n' from each source file in the tile index\&. 
 .IP "\fB\fB-lname\fP \fIname\fP:\fP" 1c
-Add the layer named 'name' from each source file in the tile index. 
+Add the layer named 'name' from each source file in the tile index\&. 
 .IP "\fB\fB-f\fP \fIoutput_format\fP:\fP" 1c
-Select an output format name. The default is to create a shapefile. 
+Select an output format name\&. The default is to create a shapefile\&. 
 .IP "\fB\fB-tileindex\fP \fIfield_name\fP:\fP" 1c
-The name to use for the dataset name. Defaults to LOCATION. 
+The name to use for the dataset name\&. Defaults to LOCATION\&. 
 .IP "\fB\fB-write_absolute_path\fP:\fP" 1c
 Filenames are written with absolute paths 
 .IP "\fB\fB-skip_different_projection\fP:\fP" 1c
-Only layers with same projection ref as layers already inserted in the tileindex will be inserted. 
+Only layers with same projection ref as layers already inserted in the tileindex will be inserted\&. 
 .PP
 .PP
-If no -lnum or -lname arguments are given it is assumed that all layers in source datasets should be added to the tile index as independent records.
+If no -lnum or -lname arguments are given it is assumed that all layers in source datasets should be added to the tile index as independent records\&.
 .PP
-If the tile index already exists it will be appended to, otherwise it will be created.
+If the tile index already exists it will be appended to, otherwise it will be created\&.
 .PP
-It is a flaw of the current ogrtindex program that no attempt is made to copy the coordinate system definition from the source datasets to the tile index (as is expected by MapServer when PROJECTION AUTO is in use).
+It is a flaw of the current ogrtindex program that no attempt is made to copy the coordinate system definition from the source datasets to the tile index (as is expected by MapServer when PROJECTION AUTO is in use)\&.
 .SH "EXAMPLE"
 .PP
-This example would create a shapefile (tindex.shp) containing a tile index of the BL2000_LINK layers in all the NTF files in the wrk directory: 
+This example would create a shapefile (tindex\&.shp) containing a tile index of the BL2000_LINK layers in all the NTF files in the wrk directory: 
 .PP
 .nf
 
diff --git a/man/man1/pct2rgb.1 b/man/man1/pct2rgb.1
index 84c3faa..9817f45 100644
--- a/man/man1/pct2rgb.1
+++ b/man/man1/pct2rgb.1
@@ -1,12 +1,9 @@
-.TH "pct2rgb" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "pct2rgb" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-pct2rgb \- .TH "pct2rgb" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-pct2rgb \- Convert an 8bit paletted image to 24bit RGB
+pct2rgb \- pct2rgb\&.py 
+Convert an 8bit paletted image to 24bit RGB
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -18,23 +15,23 @@ pct2rgb.py [-of format] [-b band] [-rgba] source_file dest_file
 .PP
 .SH "DESCRIPTION"
 .PP
-This utility will convert a pseudocolor band on the input file into an output RGB file of the desired format.
+This utility will convert a pseudocolor band on the input file into an output RGB file of the desired format\&.
 .PP
 .IP "\fB\fB-of\fP \fIformat\fP:\fP" 1c
-Format to generated (defaults to GeoTIFF). 
+Format to generated (defaults to GeoTIFF)\&. 
 .IP "\fB\fB-b\fP \fIband\fP:\fP" 1c
-Band to convert to RGB, defaults to 1. 
+Band to convert to RGB, defaults to 1\&. 
 .IP "\fB\fB-rgba:\fP\fP" 1c
-Generate a RGBA file (instead of a RGB file by default). 
+Generate a RGBA file (instead of a RGB file by default)\&. 
 .IP "\fB\fIsource_file\fP:\fP" 1c
-The input file.  
+The input file\&.  
 .IP "\fB\fIdest_file\fP:\fP" 1c
-The output RGB file that will be created. 
+The output RGB file that will be created\&. 
 .PP
 .PP
-NOTE: pct2rgb.py is a Python script, and will only work if GDAL was built with Python support.
+NOTE: pct2rgb\&.py is a Python script, and will only work if GDAL was built with Python support\&.
 .PP
-The new '-expand rgb|rgba' option of gdal_translate obsoletes that utility.
+The new '-expand rgb|rgba' option of gdal_translate obsoletes that utility\&.
 .SH "AUTHORS"
 .PP
 Frank Warmerdam <warmerdam at pobox.com>, Silke Reimer <silke at intevation.de> 
diff --git a/man/man1/rgb2pct.1 b/man/man1/rgb2pct.1
index fdb9ce6..a464633 100644
--- a/man/man1/rgb2pct.1
+++ b/man/man1/rgb2pct.1
@@ -1,12 +1,9 @@
-.TH "rgb2pct" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
+.TH "rgb2pct" 1 "Tue Sep 15 2015" "GDAL" \" -*- nroff -*-
 .ad l
 .nh
 .SH NAME
-rgb2pct \- .TH "rgb2pct" 1 "Tue Feb 10 2015" "GDAL" \" -*- nroff -*-
-.ad l
-.nh
-.SH NAME
-rgb2pct \- Convert a 24bit RGB image to 8bit paletted
+rgb2pct \- rgb2pct\&.py 
+Convert a 24bit RGB image to 8bit paletted
 .SH "SYNOPSIS"
 .PP
 .PP
@@ -17,24 +14,24 @@ rgb2pct.py [-n colors | -pct palette_file] [-of format] source_file dest_file
 .PP
 .SH "DESCRIPTION"
 .PP
-This utility will compute an optimal pseudo-color table for a given RGB image using a median cut algorithm on a downsampled RGB histogram. Then it converts the image into a pseudo-colored image using the color table. This conversion utilizes Floyd-Steinberg dithering (error diffusion) to maximize output image visual quality.
+This utility will compute an optimal pseudo-color table for a given RGB image using a median cut algorithm on a downsampled RGB histogram\&. Then it converts the image into a pseudo-colored image using the color table\&. This conversion utilizes Floyd-Steinberg dithering (error diffusion) to maximize output image visual quality\&.
 .PP
 .IP "\fB\fB-n\fP \fIcolors\fP:\fP" 1c
-Select the number of colors in the generated color table. Defaults to 256. Must be between 2 and 256.  
+Select the number of colors in the generated color table\&. Defaults to 256\&. Must be between 2 and 256\&.  
 .IP "\fB\fB-pct\fP \fIpalette_file\fP:\fP" 1c
-Extract the color table from \fIpalette_file\fP instead of computing it. Can be used to have a consistent color table for multiple files. The \fIpalette_file\fP must be a raster file in a GDAL supported format with a palette. 
+Extract the color table from \fIpalette_file\fP instead of computing it\&. Can be used to have a consistent color table for multiple files\&. The \fIpalette_file\fP must be a raster file in a GDAL supported format with a palette\&. 
 .IP "\fB\fB-of\fP \fIformat\fP:\fP" 1c
-Format to generated (defaults to GeoTIFF). Same semantics as the \fB-of\fP flag for gdal_translate. Only output formats supporting pseudocolor tables should be used.  
+Format to generated (defaults to GeoTIFF)\&. Same semantics as the \fB-of\fP flag for gdal_translate\&. Only output formats supporting pseudocolor tables should be used\&.  
 .IP "\fB\fIsource_file\fP:\fP" 1c
-The input RGB file.  
+The input RGB file\&.  
 .IP "\fB\fIdest_file\fP:\fP" 1c
-The output pseudo-colored file that will be created. 
+The output pseudo-colored file that will be created\&. 
 .PP
 .PP
-NOTE: rgb2pct.py is a Python script, and will only work if GDAL was built with Python support.
+NOTE: rgb2pct\&.py is a Python script, and will only work if GDAL was built with Python support\&.
 .SH "EXAMPLE"
 .PP
-If it is desired to hand create the palette, likely the simplest text format is the GDAL VRT format. In the following example a VRT was created in a text editor with a small 4 color palette with the RGBA colors 238/238/238/255, 237/237/237/255, 236/236/236/255 and 229/229/229/255.
+If it is desired to hand create the palette, likely the simplest text format is the GDAL VRT format\&. In the following example a VRT was created in a text editor with a small 4 color palette with the RGBA colors 238/238/238/255, 237/237/237/255, 236/236/236/255 and 229/229/229/255\&.
 .PP
 .PP
 .nf
diff --git a/ogr/ogrsf_frmts/edigeo/ogredigeodatasource.cpp b/ogr/ogrsf_frmts/edigeo/ogredigeodatasource.cpp
index b4669e5..69003ea 100644
--- a/ogr/ogrsf_frmts/edigeo/ogredigeodatasource.cpp
+++ b/ogr/ogrsf_frmts/edigeo/ogredigeodatasource.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: ogredigeodatasource.cpp 27729 2014-09-24 00:40:16Z goatbar $
+ * $Id: ogredigeodatasource.cpp 29641 2015-08-13 20:17:52Z rouault $
  *
  * Project:  EDIGEO Translator
  * Purpose:  Implements OGREDIGEODataSource class
@@ -31,7 +31,7 @@
 #include "cpl_conv.h"
 #include "cpl_string.h"
 
-CPL_CVSID("$Id: ogredigeodatasource.cpp 27729 2014-09-24 00:40:16Z goatbar $");
+CPL_CVSID("$Id: ogredigeodatasource.cpp 29641 2015-08-13 20:17:52Z rouault $");
 
 #ifndef M_PI
 # define M_PI  3.1415926535897932384626433832795
@@ -61,7 +61,7 @@ OGREDIGEODataSource::OGREDIGEODataSource()
 
     iATR = iDI3 = iDI4 = iHEI = iFON = -1;
     iATR_VAL = iANGLE = iSIZE = iOBJ_LNK = iOBJ_LNK_LAYER = -1;
-    dfSizeFactor = atof(CPLGetConfigOption("OGR_EDIGEO_FONT_SIZE_FACTOR", "2"));
+    dfSizeFactor = CPLAtof(CPLGetConfigOption("OGR_EDIGEO_FONT_SIZE_FACTOR", "2"));
     if (dfSizeFactor <= 0 || dfSizeFactor >= 100)
         dfSizeFactor = 2;
 
@@ -316,10 +316,10 @@ int OGREDIGEODataSource::ReadGEN()
     if (CSLCount(papszTokens1) == 2 && CSLCount(papszTokens2) == 2)
     {
         bExtentValid = TRUE;
-        dfMinX = atof(papszTokens1[0]);
-        dfMinY = atof(papszTokens1[1]);
-        dfMaxX = atof(papszTokens2[0]);
-        dfMaxY = atof(papszTokens2[1]);
+        dfMinX = CPLAtof(papszTokens1[0]);
+        dfMinY = CPLAtof(papszTokens1[1]);
+        dfMaxX = CPLAtof(papszTokens2[0]);
+        dfMaxY = CPLAtof(papszTokens2[1]);
     }
     CSLDestroy(papszTokens1);
     CSLDestroy(papszTokens2);
@@ -771,8 +771,8 @@ skip_read_next_line:
             const char* pszY = strchr(pszLine+8, ';');
             if (pszY)
             {
-                double dfX = atof(pszLine + 8);
-                double dfY = atof(pszY + 1);
+                double dfX = CPLAtof(pszLine + 8);
+                double dfY = CPLAtof(pszY + 1);
                 aXY.push_back(xyPairType (dfX, dfY));
             }
         }
diff --git a/ogr/ogrsf_frmts/geojson/ogrgeojsondatasource.cpp b/ogr/ogrsf_frmts/geojson/ogrgeojsondatasource.cpp
index 46b2b4d..76b7d79 100644
--- a/ogr/ogrsf_frmts/geojson/ogrgeojsondatasource.cpp
+++ b/ogr/ogrsf_frmts/geojson/ogrgeojsondatasource.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: ogrgeojsondatasource.cpp 27044 2014-03-16 23:41:27Z rouault $
+ * $Id: ogrgeojsondatasource.cpp 28492 2015-02-15 14:16:35Z rouault $
  *
  * Project:  OpenGIS Simple Features Reference Implementation
  * Purpose:  Implementation of OGRGeoJSONDataSource class (OGR GeoJSON Driver).
@@ -217,7 +217,7 @@ OGRLayer* OGRGeoJSONDataSource::CreateLayer( const char* pszName_,
         const char* pszAuthority = poSRS->GetAuthorityName(NULL);
         const char* pszAuthorityCode = poSRS->GetAuthorityCode(NULL);
         if (pszAuthority != NULL && pszAuthorityCode != NULL &&
-            strcmp(pszAuthority, "EPSG") == 0)
+            EQUAL(pszAuthority, "EPSG"))
         {
             json_object* poObjCRS = json_object_new_object();
             json_object_object_add(poObjCRS, "type",
diff --git a/ogr/ogrsf_frmts/geojson/ogrgeojsonreader.cpp b/ogr/ogrsf_frmts/geojson/ogrgeojsonreader.cpp
index 55c3f4e..be877be 100644
--- a/ogr/ogrsf_frmts/geojson/ogrgeojsonreader.cpp
+++ b/ogr/ogrsf_frmts/geojson/ogrgeojsonreader.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: ogrgeojsonreader.cpp 27741 2014-09-26 19:20:02Z goatbar $
+ * $Id: ogrgeojsonreader.cpp 28749 2015-03-20 13:42:01Z rouault $
  *
  * Project:  OpenGIS Simple Features Reference Implementation
  * Purpose:  Implementation of OGRGeoJSONReader class (OGR GeoJSON Driver).
@@ -702,7 +702,7 @@ OGRFeature* OGRGeoJSONReader::ReadFeature( OGRGeoJSONLayer* poLayer, json_object
             }
             else if( OFTReal == eType )
             {
-                poFeature->SetField( nField, CPLAtof(json_object_get_string(it.val)) );
+                poFeature->SetField( nField, json_object_get_double(it.val) );
             }
             else if( OFTIntegerList == eType )
             {
@@ -728,7 +728,7 @@ OGRFeature* OGRGeoJSONReader::ReadFeature( OGRGeoJSONLayer* poLayer, json_object
                     for(int i=0;i<nLength;i++)
                     {
                         json_object* poRow = json_object_array_get_idx(it.val, i);
-                        padfVal[i] = CPLAtof(json_object_get_string(poRow));
+                        padfVal[i] = json_object_get_double(poRow);
                     }
                     poFeature->SetField( nField, nLength, padfVal );
                     CPLFree(padfVal);
diff --git a/ogr/ogrsf_frmts/geojson/ogrtopojsonreader.cpp b/ogr/ogrsf_frmts/geojson/ogrtopojsonreader.cpp
index b558faa..4298774 100644
--- a/ogr/ogrsf_frmts/geojson/ogrtopojsonreader.cpp
+++ b/ogr/ogrsf_frmts/geojson/ogrtopojsonreader.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: ogrtopojsonreader.cpp 27268 2014-05-01 10:46:20Z rouault $
+ * $Id: ogrtopojsonreader.cpp 28887 2015-04-12 23:10:28Z rouault $
  *
  * Project:  OpenGIS Simple Features Reference Implementation
  * Purpose:  Implementation of OGRTopoJSONReader class
@@ -274,6 +274,8 @@ static void ParseObject(const char* pszId,
                         json_object* poArcsDB, ScalingParams* psParams)
 {
     json_object* poType = OGRGeoJSONFindMemberByName(poObj, "type");
+    if( poType == NULL || json_object_get_type(poType) != json_type_string )
+        return;
     const char* pszType = json_object_get_string(poType);
 
     json_object* poArcsObj = OGRGeoJSONFindMemberByName(poObj, "arcs");
diff --git a/ogr/ogrsf_frmts/gml/gmlhandler.cpp b/ogr/ogrsf_frmts/gml/gmlhandler.cpp
index bbbc117..78a336b 100644
--- a/ogr/ogrsf_frmts/gml/gmlhandler.cpp
+++ b/ogr/ogrsf_frmts/gml/gmlhandler.cpp
@@ -1,5 +1,5 @@
 /**********************************************************************
- * $Id: gmlhandler.cpp 27741 2014-09-26 19:20:02Z goatbar $
+ * $Id: gmlhandler.cpp 29218 2015-05-21 09:09:31Z rouault $
  *
  * Project:  GML Reader
  * Purpose:  Implementation of GMLHandler class.
@@ -788,25 +788,22 @@ void GMLHandler::DealWithAttributes(const char *pszName, int nLenName, void* att
         /* Hard-coded historic cases */
         else if( strcmp(pszAttrKey, "xlink:href") == 0 )
         {
-            if (m_bReportHref)
+            if( (m_bReportHref || m_poReader->ReportAllAttributes()) && m_bInCurField )
             {
-                if( m_bInCurField )
-                {
-                    CPLFree(m_pszHref);
-                    m_pszHref = pszAttrVal;
-                    pszAttrVal = NULL;
-                }
-                else if( !poClass->IsSchemaLocked() ||
-                         (nAttrIndex =
-                            m_poReader->GetAttributeElementIndex( CPLSPrintf("%s_href", pszName ),
-                                                      nLenName + 5 )) != -1 )
-                {
-                    poState->PushPath( pszName, nLenName );
-                    CPLString osPropNameHref = poState->osPath + "_href";
-                    poState->PopPath();
-                    m_poReader->SetFeaturePropertyDirectly( osPropNameHref, pszAttrVal, nAttrIndex );
-                    pszAttrVal = NULL;
-                }
+                CPLFree(m_pszHref);
+                m_pszHref = pszAttrVal;
+                pszAttrVal = NULL;
+            }
+            else if( (!poClass->IsSchemaLocked() && (m_bReportHref || m_poReader->ReportAllAttributes())) ||
+                        (poClass->IsSchemaLocked() && (nAttrIndex =
+                        m_poReader->GetAttributeElementIndex( CPLSPrintf("%s_href", pszName ),
+                                                    nLenName + 5 )) != -1) )
+            {
+                poState->PushPath( pszName, nLenName );
+                CPLString osPropNameHref = poState->osPath + "_href";
+                poState->PopPath();
+                m_poReader->SetFeaturePropertyDirectly( osPropNameHref, pszAttrVal, nAttrIndex );
+                pszAttrVal = NULL;
             }
         }
         else if( strcmp(pszAttrKey, "uom") == 0 )
@@ -834,8 +831,12 @@ void GMLHandler::DealWithAttributes(const char *pszName, int nLenName, void* att
         /* Should we report all attributes ? */
         else if( m_poReader->ReportAllAttributes() && !poClass->IsSchemaLocked() )
         {
+            poState->PushPath( pszName, nLenName );
+            CPLString osPropName = poState->osPath;
+            poState->PopPath();
+
             m_poReader->SetFeaturePropertyDirectly(
-                CPLSPrintf("%s@%s", pszName, pszAttrKeyNoNS ? pszAttrKeyNoNS : pszAttrKey),
+                CPLSPrintf("%s@%s", osPropName.c_str(), pszAttrKeyNoNS ? pszAttrKeyNoNS : pszAttrKey),
                 pszAttrVal, -1 );
             pszAttrVal = NULL;
         }
diff --git a/ogr/ogrsf_frmts/gml/parsexsd.cpp b/ogr/ogrsf_frmts/gml/parsexsd.cpp
index 6a7fd7a..030fba8 100644
--- a/ogr/ogrsf_frmts/gml/parsexsd.cpp
+++ b/ogr/ogrsf_frmts/gml/parsexsd.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: parsexsd.cpp 27132 2014-04-05 21:48:58Z rouault $
+ * $Id: parsexsd.cpp 29256 2015-05-27 20:45:11Z rouault $
  *
  * Project:  GML Reader
  * Purpose:  Implementation of GMLParseXSD()
@@ -322,6 +322,54 @@ GMLFeatureClass* GMLParseFeatureType(CPLXMLNode *psSchemaNode,
             delete poClass;
             return NULL;
         }
+        
+        /* Parse stuff like :
+        <xs:choice>
+            <xs:element ref="gml:polygonProperty"/>
+            <xs:element ref="gml:multiPolygonProperty"/>
+        </xs:choice>
+        as found in https://downloadagiv.blob.core.windows.net/overstromingsgebieden-en-oeverzones/2014_01/Overstromingsgebieden_en_oeverzones_2014_01_GML.zip
+        */
+        if( strcmp(psAttrDef->pszValue,"choice") == 0 )
+        {
+            CPLXMLNode* psChild = psAttrDef->psChild;
+            int bPolygon = FALSE;
+            int bMultiPolygon = FALSE;
+            for( ; psChild; psChild = psChild->psNext )
+            {
+                if( psChild->eType != CXT_Element )
+                    continue;
+                if( strcmp(psChild->pszValue,"element") == 0 )
+                {
+                    const char* pszRef = CPLGetXMLValue( psChild, "ref", NULL );
+                    if( pszRef != NULL )
+                    {
+                        if( strcmp(pszRef, "gml:polygonProperty") == 0 )
+                            bPolygon = TRUE;
+                        else if( strcmp(pszRef, "gml:multiPolygonProperty") == 0 )
+                            bMultiPolygon = TRUE;
+                        else
+                        {
+                            delete poClass;
+                            return NULL;
+                        }
+                    }
+                    else
+                    {
+                        delete poClass;
+                        return NULL;
+                    }
+                }
+            }
+            if( bPolygon && bMultiPolygon )
+            {
+                poClass->AddGeometryProperty( new GMLGeometryPropertyDefn(
+                    "", "", wkbMultiPolygon, nAttributeIndex ) );
+
+                nAttributeIndex ++;
+            }
+            continue;
+        }
 
         if( !EQUAL(psAttrDef->pszValue,"element") )
             continue;
diff --git a/ogr/ogrsf_frmts/gpkg/ogrgeopackagelayer.cpp b/ogr/ogrsf_frmts/gpkg/ogrgeopackagelayer.cpp
index 6886bbb..508584d 100644
--- a/ogr/ogrsf_frmts/gpkg/ogrgeopackagelayer.cpp
+++ b/ogr/ogrsf_frmts/gpkg/ogrgeopackagelayer.cpp
@@ -95,19 +95,22 @@ OGRErr OGRGeoPackageLayer::BuildColumns()
 
     /* Always start with a primary key */
     CPLString soColumns = m_pszFidColumn;
+    CPLString soColumn;
 
     /* Add a geometry column if there is one (just one) */
     if ( m_poFeatureDefn->GetGeomFieldCount() )
     {
         soColumns += ", ";
-        soColumns += m_poFeatureDefn->GetGeomFieldDefn(0)->GetNameRef();
+        soColumn.Printf("\"%s\"", m_poFeatureDefn->GetGeomFieldDefn(0)->GetNameRef());
+        soColumns += soColumn;
     }
 
     /* Add all the attribute columns */
     for( int i = 0; i < m_poFeatureDefn->GetFieldCount(); i++ )
     {
         soColumns += ", ";
-        soColumns += m_poFeatureDefn->GetFieldDefn(i)->GetNameRef();
+        soColumn.Printf("\"%s\"", m_poFeatureDefn->GetFieldDefn(i)->GetNameRef());
+        soColumns += soColumn;
     }
 
     m_soColumns = soColumns;    
@@ -366,9 +369,12 @@ CPLString OGRGeoPackageLayer::FeatureGenerateInsertSQL( OGRFeature *poFeature )
     CPLString osSQLBack;
     osSQLBack = ") VALUES (";
     
+    CPLString osSQLColumn;
+    
     if ( poFeatureDefn->GetGeomFieldCount() )
     {
-        osSQLFront += poFeatureDefn->GetGeomFieldDefn(0)->GetNameRef();
+        osSQLColumn.Printf("\"%s\"", poFeatureDefn->GetGeomFieldDefn(0)->GetNameRef());
+        osSQLFront += osSQLColumn;
         osSQLBack += "?";
         bNeedComma = TRUE;
     }
@@ -386,7 +392,8 @@ CPLString OGRGeoPackageLayer::FeatureGenerateInsertSQL( OGRFeature *poFeature )
             osSQLBack += ", ";
         }
 
-        osSQLFront += poFeatureDefn->GetFieldDefn(i)->GetNameRef();
+        osSQLColumn.Printf("\"%s\"", poFeatureDefn->GetFieldDefn(i)->GetNameRef());
+        osSQLFront += osSQLColumn;
         osSQLBack += "?";        
     }
     
@@ -416,9 +423,12 @@ CPLString OGRGeoPackageLayer::FeatureGenerateUpdateSQL( OGRFeature *poFeature )
     CPLString osUpdate;
     osUpdate.Printf("UPDATE %s SET ", m_pszTableName);
     
+    CPLString osSQLColumn;
+    
     if ( poFeatureDefn->GetGeomFieldCount() > 0 )
     {
-        osUpdate += poFeatureDefn->GetGeomFieldDefn(0)->GetNameRef();
+        osSQLColumn.Printf("\"%s\"", poFeatureDefn->GetGeomFieldDefn(0)->GetNameRef());
+        osUpdate += osSQLColumn;
         osUpdate += "=?";
         bNeedComma = TRUE;
     }
@@ -431,7 +441,8 @@ CPLString OGRGeoPackageLayer::FeatureGenerateUpdateSQL( OGRFeature *poFeature )
         else 
             osUpdate += ", ";
 
-        osUpdate += poFeatureDefn->GetFieldDefn(i)->GetNameRef();
+        osSQLColumn.Printf("\"%s\"", poFeatureDefn->GetFieldDefn(i)->GetNameRef());
+        osUpdate += osSQLColumn;
         osUpdate += "=?";
     }
     
diff --git a/ogr/ogrsf_frmts/gpkg/ogrgeopackageutility.cpp b/ogr/ogrsf_frmts/gpkg/ogrgeopackageutility.cpp
index 94d84ac..632f1d5 100644
--- a/ogr/ogrsf_frmts/gpkg/ogrgeopackageutility.cpp
+++ b/ogr/ogrsf_frmts/gpkg/ogrgeopackageutility.cpp
@@ -207,7 +207,9 @@ OGRwkbGeometryType GPkgGeometryTypeToWKB(const char *pszGpkgType, int bHasZ)
         oType =  wkbMultiLineString;
     else if ( EQUAL("MultiPolygon", pszGpkgType) )
         oType =  wkbMultiPolygon;
-    else if ( EQUAL("GeometryCollection", pszGpkgType) )
+    /* The 1.0 spec is not completely clear on what should be used... */
+    else if ( EQUAL("GeomCollection", pszGpkgType) ||
+              EQUAL("GeometryCollection", pszGpkgType) )
         oType =  wkbGeometryCollection;
     else
         oType =  wkbNone;
diff --git a/ogr/ogrsf_frmts/mssqlspatial/ogrmssqlspatialtablelayer.cpp b/ogr/ogrsf_frmts/mssqlspatial/ogrmssqlspatialtablelayer.cpp
index 952c497..469d700 100644
--- a/ogr/ogrsf_frmts/mssqlspatial/ogrmssqlspatialtablelayer.cpp
+++ b/ogr/ogrsf_frmts/mssqlspatial/ogrmssqlspatialtablelayer.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: ogrmssqlspatialtablelayer.cpp 28397 2015-01-31 22:34:06Z tamas $
+ * $Id: ogrmssqlspatialtablelayer.cpp 29186 2015-05-12 12:20:35Z tamas $
  *
  * Project:  MSSQL Spatial driver
  * Purpose:  Implements OGRMSSQLSpatialTableLayer class, access to an existing table.
@@ -31,7 +31,7 @@
 #include "cpl_conv.h"
 #include "ogr_mssqlspatial.h"
 
-CPL_CVSID("$Id: ogrmssqlspatialtablelayer.cpp 28397 2015-01-31 22:34:06Z tamas $");
+CPL_CVSID("$Id: ogrmssqlspatialtablelayer.cpp 29186 2015-05-12 12:20:35Z tamas $");
 
 /************************************************************************/
 /*                         OGRMSSQLAppendEscaped( )                     */
@@ -169,7 +169,7 @@ OGRFeatureDefn* OGRMSSQLSpatialTableLayer::GetLayerDefn()
     if (eGeomType != wkbNone)
         poFeatureDefn->SetGeomType(eGeomType);
     
-    if ( GetSpatialRef() && poFeatureDefn->GetGeomFieldCount() == 1)
+    if ( GetSpatialRef() && poFeatureDefn->GetGeomFieldCount() == 1)
         poFeatureDefn->GetGeomFieldDefn(0)->SetSpatialRef( poSRS );
 
     if( poFeatureDefn->GetFieldCount() == 0 &&
@@ -1041,7 +1041,7 @@ OGRErr OGRMSSQLSpatialTableLayer::CreateFeature( OGRFeature *poFeature )
 /*      Form the INSERT command.                                        */
 /* -------------------------------------------------------------------- */
 
-    oStatement.Appendf( "INSERT INTO [%s].[%s] (", pszSchemaName, pszTableName );
+    oStatement.Appendf( "INSERT INTO [%s].[%s] ", pszSchemaName, pszTableName );
 
     OGRMSSQLGeometryValidator oValidator(poFeature->GetGeometryRef());
     OGRGeometry *poGeom = oValidator.GetValidGeometryRef();
@@ -1056,7 +1056,9 @@ OGRErr OGRMSSQLSpatialTableLayer::CreateFeature( OGRFeature *poFeature )
 
     if (poGeom != NULL && pszGeomColumn != NULL)
     {
+        oStatement.Append("([");
         oStatement.Append( pszGeomColumn );
+        oStatement.Append("]");
         bNeedComma = TRUE;
     }
 
@@ -1066,7 +1068,7 @@ OGRErr OGRMSSQLSpatialTableLayer::CreateFeature( OGRFeature *poFeature )
             oStatement.Appendf( ", [%s]", pszFIDColumn );
         else
         {
-            oStatement.Appendf( "[%s]", pszFIDColumn );
+            oStatement.Appendf( "([%s]", pszFIDColumn );
             bNeedComma = TRUE;
         }
     }
@@ -1082,72 +1084,80 @@ OGRErr OGRMSSQLSpatialTableLayer::CreateFeature( OGRFeature *poFeature )
             oStatement.Appendf( ", [%s]", poFeatureDefn->GetFieldDefn(i)->GetNameRef() );
         else
         {
-            oStatement.Appendf( "[%s]", poFeatureDefn->GetFieldDefn(i)->GetNameRef() );
+            oStatement.Appendf( "([%s]", poFeatureDefn->GetFieldDefn(i)->GetNameRef() );
             bNeedComma = TRUE;
         }
     }
 
-    oStatement.Appendf( ") VALUES (" );
-
-    /* Set the geometry */
-    bNeedComma = FALSE;
-    if(poGeom != NULL && pszGeomColumn != NULL)
+    if (oStatement.GetCommand()[strlen(oStatement.GetCommand()) - 1] != ']')
     {
-        char    *pszWKT = NULL;
+        /* no fields were added */
+        oStatement.Appendf( "DEFAULT VALUES;" );
+    }
+    else
+    {
+        oStatement.Appendf( ") VALUES (" );
+
+        /* Set the geometry */
+        bNeedComma = FALSE;
+        if(poGeom != NULL && pszGeomColumn != NULL)
+        {
+            char    *pszWKT = NULL;
     
-        //poGeom->setCoordinateDimension( nCoordDimension );
+            //poGeom->setCoordinateDimension( nCoordDimension );
 
-        poGeom->exportToWkt( &pszWKT );
+            poGeom->exportToWkt( &pszWKT );
 
-        if( pszWKT != NULL && (nGeomColumnType == MSSQLCOLTYPE_GEOMETRY 
-            || nGeomColumnType == MSSQLCOLTYPE_GEOGRAPHY))
-        {
-            if (nGeomColumnType == MSSQLCOLTYPE_GEOGRAPHY)
+            if( pszWKT != NULL && (nGeomColumnType == MSSQLCOLTYPE_GEOMETRY 
+                || nGeomColumnType == MSSQLCOLTYPE_GEOGRAPHY))
             {
-                oStatement.Append( "geography::STGeomFromText(" );
-                OGRMSSQLAppendEscaped(&oStatement, pszWKT);
-                oStatement.Appendf(",%d)", nSRSId );
+                if (nGeomColumnType == MSSQLCOLTYPE_GEOGRAPHY)
+                {
+                    oStatement.Append( "geography::STGeomFromText(" );
+                    OGRMSSQLAppendEscaped(&oStatement, pszWKT);
+                    oStatement.Appendf(",%d)", nSRSId );
+                }
+                else
+                {
+                    oStatement.Append( "geometry::STGeomFromText(" );
+                    OGRMSSQLAppendEscaped(&oStatement, pszWKT);
+                    oStatement.Appendf(",%d).MakeValid()", nSRSId );
+                }     
             }
             else
-            {
-                oStatement.Append( "geometry::STGeomFromText(" );
-                OGRMSSQLAppendEscaped(&oStatement, pszWKT);
-                oStatement.Appendf(",%d).MakeValid()", nSRSId );
-            }     
-        }
-        else
-            oStatement.Append( "null" );
+                oStatement.Append( "null" );
 
-        bNeedComma = TRUE;
-        CPLFree(pszWKT);
-    }
+            bNeedComma = TRUE;
+            CPLFree(pszWKT);
+        }
 
-    /* Set the FID */
-    if( poFeature->GetFID() != OGRNullFID && pszFIDColumn != NULL )
-    {
-        if (bNeedComma)
-            oStatement.Appendf( ", %ld", poFeature->GetFID() );
-        else
+        /* Set the FID */
+        if( poFeature->GetFID() != OGRNullFID && pszFIDColumn != NULL )
         {
-            oStatement.Appendf( "%ld", poFeature->GetFID() );
-            bNeedComma = TRUE;
+            if (bNeedComma)
+                oStatement.Appendf( ", %ld", poFeature->GetFID() );
+            else
+            {
+                oStatement.Appendf( "%ld", poFeature->GetFID() );
+                bNeedComma = TRUE;
+            }
         }
-    }
 
-    for( i = 0; i < nFieldCount; i++ )
-    {
-        if( !poFeature->IsFieldSet( i ) )
-            continue;
+        for( i = 0; i < nFieldCount; i++ )
+        {
+            if( !poFeature->IsFieldSet( i ) )
+                continue;
 
-        if (bNeedComma)
-            oStatement.Append( ", " );
-        else
-            bNeedComma = TRUE;
+            if (bNeedComma)
+                oStatement.Append( ", " );
+            else
+                bNeedComma = TRUE;
 
-        AppendFieldValue(&oStatement, poFeature, i);
-    }
+            AppendFieldValue(&oStatement, poFeature, i);
+        }
 
-    oStatement.Append( ");" );
+        oStatement.Append( ");" );
+    }
 
     if( poFeature->GetFID() != OGRNullFID && pszFIDColumn != NULL && bIsIdentityFid )
         oStatement.Appendf("SET IDENTITY_INSERT [%s].[%s] OFF;", pszSchemaName, pszTableName );
diff --git a/ogr/ogrsf_frmts/openfilegdb/ogropenfilegdbdatasource.cpp b/ogr/ogrsf_frmts/openfilegdb/ogropenfilegdbdatasource.cpp
index ae243cb..ebfe766 100644
--- a/ogr/ogrsf_frmts/openfilegdb/ogropenfilegdbdatasource.cpp
+++ b/ogr/ogrsf_frmts/openfilegdb/ogropenfilegdbdatasource.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: ogropenfilegdbdatasource.cpp 27771 2014-09-30 22:45:12Z rouault $
+ * $Id: ogropenfilegdbdatasource.cpp 28733 2015-03-15 03:06:35Z rouault $
  *
  * Project:  OpenGIS Simple Features Reference Implementation
  * Purpose:  Implements Open FileGDB OGR driver.
@@ -426,6 +426,7 @@ int OGROpenFileGDBDataSource::OpenFileGDBv9(int iGDBFeatureClasses,
                 /* Is it a non-spatial table ? */
                 if( strcmp(psField->String, "{7A566981-C114-11D2-8A28-006097AFF44E}") == 0 )
                 {
+                    aosName.push_back( "" );
                     AddLayer( osName, nInterestTable, nCandidateLayers, nLayersSDC,
                               "", "", NULL, wkbNone );
                 }
diff --git a/ogr/ogrsf_frmts/pg/ogrpgtablelayer.cpp b/ogr/ogrsf_frmts/pg/ogrpgtablelayer.cpp
index 5013e80..9ab20af 100644
--- a/ogr/ogrsf_frmts/pg/ogrpgtablelayer.cpp
+++ b/ogr/ogrsf_frmts/pg/ogrpgtablelayer.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: ogrpgtablelayer.cpp 27741 2014-09-26 19:20:02Z goatbar $
+ * $Id: ogrpgtablelayer.cpp 28541 2015-02-23 11:56:10Z rouault $
 
  *
  * Project:  OpenGIS Simple Features Reference Implementation
@@ -37,7 +37,7 @@
 
 #define PQexec this_is_an_error
 
-CPL_CVSID("$Id: ogrpgtablelayer.cpp 27741 2014-09-26 19:20:02Z goatbar $");
+CPL_CVSID("$Id: ogrpgtablelayer.cpp 28541 2015-02-23 11:56:10Z rouault $");
 
 
 #define USE_COPY_UNSET  -10
@@ -1535,13 +1535,19 @@ CPLString OGRPGEscapeString(PGconn *hPGConn,
         CPLDebug( "PG",
                   "Truncated %s.%s field value '%s' to %d characters.",
                   pszTableName, pszFieldName, pszStrValue, nMaxLength );
-        nSrcLen = nSrcLen * nMaxLength / nSrcLenUTF;
 
-        
-        while( nSrcLen > 0 && ((unsigned char *) pszStrValue)[nSrcLen-1] > 127 )
+        int iUTF8Char = 0;
+        for(int iChar = 0; iChar < nSrcLen; iChar++ )
         {
-            CPLDebug( "PG", "Backup to start of multi-byte character." );
-            nSrcLen--;
+            if( (((unsigned char *) pszStrValue)[iChar] & 0xc0) != 0x80 )
+            {
+                if( iUTF8Char == nMaxLength )
+                {
+                    nSrcLen = iChar;
+                    break;
+                }
+                iUTF8Char ++;
+            }
         }
     }
 
@@ -2056,25 +2062,25 @@ OGRErr OGRPGTableLayer::CreateFeatureViaCopy( OGRFeature *poFeature )
         {
             int         iChar;
             int         iUTFChar = 0;
+            int         nMaxWidth = poFeatureDefn->GetFieldDefn(i)->GetWidth();
 
             for( iChar = 0; pszStrValue[iChar] != '\0'; iChar++ )
             {
-
-                if( poFeatureDefn->GetFieldDefn(i)->GetWidth() > 0
-                    && iUTFChar == poFeatureDefn->GetFieldDefn(i)->GetWidth() )
-                {
-                    CPLDebug( "PG",
-                              "Truncated %s.%s field value '%s' to %d characters.",
-                              poFeatureDefn->GetName(),
-                              poFeatureDefn->GetFieldDefn(i)->GetNameRef(),
-                              pszStrValue,
-                              poFeatureDefn->GetFieldDefn(i)->GetWidth() );
-                    break;
-                }
-
                 //count of utf chars
                 if ((pszStrValue[iChar] & 0xc0) != 0x80) 
+                {
+                    if( nMaxWidth > 0 && iUTFChar == nMaxWidth )
+                    {
+                        CPLDebug( "PG",
+                                "Truncated %s.%s field value '%s' to %d characters.",
+                                poFeatureDefn->GetName(),
+                                poFeatureDefn->GetFieldDefn(i)->GetNameRef(),
+                                pszStrValue,
+                                poFeatureDefn->GetFieldDefn(i)->GetWidth() );
+                        break;
+                    }
                     iUTFChar++;
+                }
 
                 /* Escape embedded \, \t, \n, \r since they will cause COPY
                    to misinterpret a line of text and thus abort */
diff --git a/ogr/ogrsf_frmts/pgdump/ogrpgdumplayer.cpp b/ogr/ogrsf_frmts/pgdump/ogrpgdumplayer.cpp
index 178ab74..0df8541 100644
--- a/ogr/ogrsf_frmts/pgdump/ogrpgdumplayer.cpp
+++ b/ogr/ogrsf_frmts/pgdump/ogrpgdumplayer.cpp
@@ -1,12 +1,12 @@
 /******************************************************************************
- * $Id: ogrpgdumplayer.cpp 27729 2014-09-24 00:40:16Z goatbar $
+ * $Id: ogrpgdumplayer.cpp 28541 2015-02-23 11:56:10Z rouault $
  *
  * Project:  OpenGIS Simple Features Reference Implementation
  * Purpose:  Implements OGRPGDumpLayer class
  * Author:   Even Rouault, <even dot rouault at mines dash paris dot org>
  *
  ******************************************************************************
- * Copyright (c) 2010-2013, Even Rouault <even dot rouault at mines-paris dot org>
+ * Copyright (c) 2010-2013, Even Rouault <even dot rouault at mines-paris dot org>
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -32,7 +32,7 @@
 #include "cpl_string.h"
 #include "ogr_p.h"
 
-CPL_CVSID("$Id: ogrpgdumplayer.cpp 27729 2014-09-24 00:40:16Z goatbar $");
+CPL_CVSID("$Id: ogrpgdumplayer.cpp 28541 2015-02-23 11:56:10Z rouault $");
 
 #define USE_COPY_UNSET -1
 
@@ -477,16 +477,22 @@ OGRErr OGRPGDumpLayer::CreateFeatureViaCopy( OGRFeature *poFeature )
             nOGRFieldType != OFTBinary )
         {
             int         iChar;
+            int         iUTFChar = 0;
+            int         nMaxWidth = poFeatureDefn->GetFieldDefn(i)->GetWidth();
 
             for( iChar = 0; pszStrValue[iChar] != '\0'; iChar++ )
             {
-                if( poFeatureDefn->GetFieldDefn(i)->GetWidth() > 0
-                    && iChar == poFeatureDefn->GetFieldDefn(i)->GetWidth() )
+                //count of utf chars
+                if ((pszStrValue[iChar] & 0xc0) != 0x80) 
                 {
-                    CPLDebug( "PG",
-                              "Truncated %s field value, it was too long.",
-                              poFeatureDefn->GetFieldDefn(i)->GetNameRef() );
-                    break;
+                    if( nMaxWidth > 0 && iUTFChar == nMaxWidth )
+                    {
+                        CPLDebug( "PG",
+                                "Truncated %s field value, it was too long.",
+                                poFeatureDefn->GetFieldDefn(i)->GetNameRef() );
+                        break;
+                    }
+                    iUTFChar++;
                 }
 
                 /* Escape embedded \, \t, \n, \r since they will cause COPY
@@ -657,17 +663,26 @@ CPLString OGRPGDumpEscapeString(
     osCommand += "'";
 
     int nSrcLen = strlen(pszStrValue);
-    if (nMaxLength > 0 && nSrcLen > nMaxLength)
+    int nSrcLenUTF = CPLStrlenUTF8(pszStrValue);
+
+    if (nMaxLength > 0 && nSrcLenUTF > nMaxLength)
     {
         CPLDebug( "PG",
                   "Truncated %s field value, it was too long.",
                   pszFieldName );
-        nSrcLen = nMaxLength;
-        
-        while( nSrcLen > 0 && ((unsigned char *) pszStrValue)[nSrcLen-1] > 127 )
+
+        int iUTF8Char = 0;
+        for(int iChar = 0; iChar < nSrcLen; iChar++ )
         {
-            CPLDebug( "PG", "Backup to start of multi-byte character." );
-            nSrcLen--;
+            if( (((unsigned char *) pszStrValue)[iChar] & 0xc0) != 0x80 )
+            {
+                if( iUTF8Char == nMaxLength )
+                {
+                    nSrcLen = iChar;
+                    break;
+                }
+                iUTF8Char ++;
+            }
         }
     }
 
diff --git a/ogr/ogrsf_frmts/shape/shape2ogr.cpp b/ogr/ogrsf_frmts/shape/shape2ogr.cpp
index 93f98e0..ed2c1b8 100644
--- a/ogr/ogrsf_frmts/shape/shape2ogr.cpp
+++ b/ogr/ogrsf_frmts/shape/shape2ogr.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: shape2ogr.cpp 27741 2014-09-26 19:20:02Z goatbar $
+ * $Id: shape2ogr.cpp 29235 2015-05-22 19:28:41Z rouault $
  *
  * Project:  OpenGIS Simple Features Reference Implementation
  * Purpose:  Implements translation of Shapefile shapes into OGR
@@ -32,7 +32,7 @@
 #include "ogrshape.h"
 #include "cpl_conv.h"
 
-CPL_CVSID("$Id: shape2ogr.cpp 27741 2014-09-26 19:20:02Z goatbar $");
+CPL_CVSID("$Id: shape2ogr.cpp 29235 2015-05-22 19:28:41Z rouault $");
 
 /************************************************************************/
 /*                        RingStartEnd                                  */
@@ -1002,6 +1002,8 @@ OGRFeature *SHPReadOGRFeature( SHPHandle hSHP, DBFHandle hDBF,
         CPLError( CE_Failure, CPLE_AppDefined, 
                   "Attempt to read shape with feature id (%d), but it is marked deleted.",
                   iShape );
+        if( psShape != NULL )
+            SHPDestroyObject(psShape);
         return NULL;
     }
 
diff --git a/ogr/ogrsf_frmts/sqlite/ogrsqliteselectlayer.cpp b/ogr/ogrsf_frmts/sqlite/ogrsqliteselectlayer.cpp
index 5a8d36a..84a3c99 100644
--- a/ogr/ogrsf_frmts/sqlite/ogrsqliteselectlayer.cpp
+++ b/ogr/ogrsf_frmts/sqlite/ogrsqliteselectlayer.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: ogrsqliteselectlayer.cpp 27044 2014-03-16 23:41:27Z rouault $
+ * $Id: ogrsqliteselectlayer.cpp 30273 2015-09-11 07:59:52Z rouault $
  *
  * Project:  OpenGIS Simple Features Reference Implementation
  * Purpose:  Implements OGRSQLiteSelectLayer class, layer access to the results
@@ -34,7 +34,7 @@
 #include "swq.h"
 #include "ogr_p.h"
 
-CPL_CVSID("$Id: ogrsqliteselectlayer.cpp 27044 2014-03-16 23:41:27Z rouault $");
+CPL_CVSID("$Id: ogrsqliteselectlayer.cpp 30273 2015-09-11 07:59:52Z rouault $");
 /************************************************************************/
 /*                        OGRSQLiteSelectLayer()                        */
 /************************************************************************/
@@ -373,22 +373,26 @@ OGRSQLiteLayer* OGRSQLiteSelectLayer::GetBaseLayer(size_t& i)
         return NULL;
     }
 
-    char chQuote = osSQLBase[nFromPos + 6];
-    int bInQuotes = (chQuote == '\'' || chQuote == '"' );
+    /* Remove potential quotes around layer name */
+    char chFirst = osSQLBase[nFromPos + 6];
+    int bInQuotes = (chFirst == '\'' || chFirst == '"' );
     CPLString osBaseLayerName;
     for( i = nFromPos + 6 + (bInQuotes ? 1 : 0);
          i < osSQLBase.size(); i++ )
     {
-        if (osSQLBase[i] == chQuote && i + 1 < osSQLBase.size() &&
-            osSQLBase[i + 1] == chQuote )
+        if (osSQLBase[i] == chFirst && bInQuotes )
         {
-            osBaseLayerName += osSQLBase[i];
-            i++;
-        }
-        else if (osSQLBase[i] == chQuote && bInQuotes)
-        {
-            i++;
-            break;
+            if( i + 1 < osSQLBase.size() &&
+                osSQLBase[i + 1] == chFirst )
+            {
+                osBaseLayerName += osSQLBase[i];
+                i++;
+            }
+            else
+            {
+                i++;
+                break;
+            }
         }
         else if (osSQLBase[i] == ' ' && !bInQuotes)
             break;
diff --git a/port/cpl_vsil_tar.cpp b/port/cpl_vsil_tar.cpp
index a0bb0b5..826b0af 100644
--- a/port/cpl_vsil_tar.cpp
+++ b/port/cpl_vsil_tar.cpp
@@ -1,5 +1,5 @@
 /******************************************************************************
- * $Id: cpl_vsil_tar.cpp 27044 2014-03-16 23:41:27Z rouault $
+ * $Id: cpl_vsil_tar.cpp 28589 2015-03-01 20:52:45Z rouault $
  *
  * Project:  CPL - Common Portability Library
  * Purpose:  Implement VSI large file api for tar files (.tar).
@@ -29,7 +29,7 @@
 
 #include "cpl_vsi_virtual.h"
 
-CPL_CVSID("$Id: cpl_vsil_tar.cpp 27044 2014-03-16 23:41:27Z rouault $");
+CPL_CVSID("$Id: cpl_vsil_tar.cpp 28589 2015-03-01 20:52:45Z rouault $");
 
 
 /************************************************************************/
@@ -130,12 +130,12 @@ int VSITarReader::GotoNextFile()
         abyHeader[115] != '\0' ||
         abyHeader[123] != '\0' ||
         (abyHeader[135] != '\0' && abyHeader[135] != ' ') ||
-        (abyHeader[147] != '\0' && abyHeader[147] != ' ') ||
-        abyHeader[154] != '\0' ||
-        abyHeader[155] != ' ')
+        (abyHeader[147] != '\0' && abyHeader[147] != ' '))
     {
         return FALSE;
     }
+    if( abyHeader[124] < '0' || abyHeader[124] > '7' )
+        return FALSE;
 
     osNextFileName = abyHeader;
     nNextFileSize = 0;
diff --git a/swig/include/perl/gdal_perl.i b/swig/include/perl/gdal_perl.i
index d2db8da..42feda2 100644
--- a/swig/include/perl/gdal_perl.i
+++ b/swig/include/perl/gdal_perl.i
@@ -116,8 +116,8 @@ ALTERED_DESTROY(GDALRasterAttributeTableShadow, GDALc, delete_RasterAttributeTab
     # etc.  GDAL 2.0 should then get VERSION 2.000 and 2.1 should get
     # 2.001 etc.
 
-    our $VERSION = '1.9922';
-    our $GDAL_VERSION = '1.11.2';
+    our $VERSION = '1.9923';
+    our $GDAL_VERSION = '1.11.3';
     use vars qw/
 	%TYPE_STRING2INT %TYPE_INT2STRING
 	%ACCESS_STRING2INT %ACCESS_INT2STRING
diff --git a/swig/include/perl/ogr_perl.i b/swig/include/perl/ogr_perl.i
index 79d6fbc..02550ee 100644
--- a/swig/include/perl/ogr_perl.i
+++ b/swig/include/perl/ogr_perl.i
@@ -879,23 +879,26 @@ ALTERED_DESTROY(OGRGeometryShadow, OGRc, delete_Geometry)
 	}
 	sub create {
 	    my $pkg = shift;
+            # documented forms of this constructor are
+            # 1) create($name)
+            # 2) create($name, $type)
+            # 3) create(%named_parameters)
+            # let us assume the case 3 is true only if @_ >= 4 and @_ % 2 == 0;
 	    my %param = ( Name => 'unnamed', Type => 'String' );
 	    if (@_ == 0) {
 	    } elsif (@_ == 1) {
 		$param{Name} = shift;
+            } elsif (@_ == 2) {
+                $param{Name} = shift;
+                $param{Type} = shift;
+            } elsif (@_ >= 4 and @_ % 2 == 0) {
+                my %p = @_;
+                for my $k (keys %p) {
+                    $param{$k} = $p{$k};
+                }
 	    } else {
-		my %known = map {$_ => 1} qw/Index Name Type Justify Width Precision/;
-		unless ($known{$_[0]}) {
-		    $param{Name} = shift;
-		    $param{Type} = shift;
-		} else {
-		    my %p = @_;
-		    for my $k (keys %known) {
-			$param{$k} = $p{$k} if exists $p{$k};
-		    }
-		}
-	    }
-	    croak "usage: Geo::OGR::FieldDefn->create(%params)" if ref($param{Name});
+                croak "usage: Geo::OGR::FieldDefn->create(\$name | \$name, \$type | \%named_params)";
+            }
 	    $param{Type} = $TYPE_STRING2INT{$param{Type}} 
 	    if defined $param{Type} and exists $TYPE_STRING2INT{$param{Type}};
 	    $param{Justify} = $JUSTIFY_STRING2INT{$param{Justify}} 
diff --git a/swig/perl/lib/Geo/GDAL.pm b/swig/perl/lib/Geo/GDAL.pm
index 8814901..0efce1f 100644
--- a/swig/perl/lib/Geo/GDAL.pm
+++ b/swig/perl/lib/Geo/GDAL.pm
@@ -612,8 +612,8 @@ package Geo::GDAL;
     # etc.  GDAL 2.0 should then get VERSION 2.000 and 2.1 should get
     # 2.001 etc.
 
-    our $VERSION = '1.9922';
-    our $GDAL_VERSION = '1.11.2';
+    our $VERSION = '1.9923';
+    our $GDAL_VERSION = '1.11.3';
     use vars qw/
 	%TYPE_STRING2INT %TYPE_INT2STRING
 	%ACCESS_STRING2INT %ACCESS_INT2STRING
diff --git a/swig/perl/lib/Geo/OGR.dox b/swig/perl/lib/Geo/OGR.dox
index 29cf3ab..148abfb 100644
--- a/swig/perl/lib/Geo/OGR.dox
+++ b/swig/perl/lib/Geo/OGR.dox
@@ -841,7 +841,7 @@
 # @return a new Geo::OGR::FieldDefn object
 
 ## @cmethod Geo::OGR::FieldDefn create(%parameters)
-# @param parameters named parameters: Name, Type, Justify, Width,
+# @param parameters two or more named parameters: Name, Type, Justify, Width,
 # Precision
 #
 # Usage:
diff --git a/swig/perl/lib/Geo/OGR.pm b/swig/perl/lib/Geo/OGR.pm
index 1f01802..e57ca0e 100644
--- a/swig/perl/lib/Geo/OGR.pm
+++ b/swig/perl/lib/Geo/OGR.pm
@@ -1449,23 +1449,26 @@ package Geo::OGR;
 	}
 	sub create {
 	    my $pkg = shift;
+            # documented forms of this constructor are
+            # 1) create($name)
+            # 2) create($name, $type)
+            # 3) create(%named_parameters)
+            # let us assume the case 3 is true only if @_ >= 4 and @_ % 2 == 0;
 	    my %param = ( Name => 'unnamed', Type => 'String' );
 	    if (@_ == 0) {
 	    } elsif (@_ == 1) {
 		$param{Name} = shift;
+            } elsif (@_ == 2) {
+                $param{Name} = shift;
+                $param{Type} = shift;
+            } elsif (@_ >= 4 and @_ % 2 == 0) {
+                my %p = @_;
+                for my $k (keys %p) {
+                    $param{$k} = $p{$k};
+                }
 	    } else {
-		my %known = map {$_ => 1} qw/Index Name Type Justify Width Precision/;
-		unless ($known{$_[0]}) {
-		    $param{Name} = shift;
-		    $param{Type} = shift;
-		} else {
-		    my %p = @_;
-		    for my $k (keys %known) {
-			$param{$k} = $p{$k} if exists $p{$k};
-		    }
-		}
-	    }
-	    croak "usage: Geo::OGR::FieldDefn->create(%params)" if ref($param{Name});
+                croak "usage: Geo::OGR::FieldDefn->create(\$name | \$name, \$type | \%named_params)";
+            }
 	    $param{Type} = $TYPE_STRING2INT{$param{Type}} 
 	    if defined $param{Type} and exists $TYPE_STRING2INT{$param{Type}};
 	    $param{Justify} = $JUSTIFY_STRING2INT{$param{Justify}} 
diff --git a/swig/perl/t/gdal.t b/swig/perl/t/gdal.t
index 0322c49..fbd7cb2 100644
--- a/swig/perl/t/gdal.t
+++ b/swig/perl/t/gdal.t
@@ -1,5 +1,6 @@
 use Test::More qw(no_plan);
 BEGIN { use_ok('Geo::GDAL') };
+Geo::GDAL::PushFinderLocation('../../data');
 
 use vars qw/%available_driver %test_driver $loaded $verbose @types @fails @tested_drivers/;
 
@@ -205,21 +206,20 @@ if (0) {
     my $n2 = @t * @u;
     ok($n == $n2, "create rat column");
     $r->SetRowCount(scalar(@t));
-    my $i = 0;
-    my $c = 0;
-    for (@t) {
-	if (/Integer/) {
-	    my $v = $r->Value($i, $c, 12);
-	    ok($v == 12, "rat int");
-	} elsif (/Real/) {
-	    my $v = $r->Value($i, $c, 1.23);
-	    ok($v == 1.23, "rat int");
-	} elsif (/String/) {
-	    my $v = $r->Value($i, $c, "abc");
-	    ok($v eq 'abc', "rat str");
+    for (my $i = 0; $i < 10; $i++) {
+    	for (my $c = 0; $c < $n; $c++) {
+	    my $t = $r->GetTypeOfCol($c);
+	    if ($t =~ /Integer/) {
+		my $v = $r->Value($i, $c, 12);
+		ok($v == 12, "rat int");
+	    } elsif ($t =~ /Real/) {
+		my $v = $r->Value($i, $c, 1.23);
+		ok($v == 1.23, "$n ($i,$c) rat real '$v'");
+	    } elsif ($t =~ /String/) {
+		my $v = $r->Value($i, $c, "abc");
+		ok($v eq 'abc', "$n ($i,$c) rat str '$v'");
+	    }
 	}
-	$i++;
-	$c++;
     }
 }
 
diff --git a/swig/perl/t/ogr.t b/swig/perl/t/ogr.t
index f2e6a7d..19cb2a6 100644
--- a/swig/perl/t/ogr.t
+++ b/swig/perl/t/ogr.t
@@ -1,5 +1,6 @@
 use Test::More qw(no_plan);
 BEGIN { use_ok('Geo::GDAL') };
+Geo::GDAL::PushFinderLocation('../../data');
 
 use strict;
 use vars qw/%available_driver %test_driver $loaded $verbose @types %pack_types @fails @tested_drivers/;
@@ -185,7 +186,7 @@ system "rm -rf tmp_ds_*" unless $^O eq 'MSWin32';
     my $g2;
     {
 	my $d = Geo::OGR::FeatureDefn->new;
-	$d->Schema(Fields=>[Geo::OGR::FieldDefn->create(Name=>'Foo')]);
+	$d->Schema(Fields=>[Geo::OGR::FieldDefn->create('Foo')]);
 	my $f = Geo::OGR::Feature->new($d);
 	my $g = Geo::OGR::Geometry->create('Point');
 	$f->SetGeometry($g);
diff --git a/swig/perl/t/osr.t b/swig/perl/t/osr.t
index c05cccd..531574e 100644
--- a/swig/perl/t/osr.t
+++ b/swig/perl/t/osr.t
@@ -1,5 +1,6 @@
 use Test::More qw(no_plan);
 BEGIN { use_ok('Geo::GDAL') };
+Geo::GDAL::PushFinderLocation('../../data');
 
 $srs1 = Geo::OSR::SpatialReference->create(EPSG=>2936);
 $srs2 = Geo::OSR::SpatialReference->create(Text=>$srs1->AsText);
diff --git a/swig/python/samples/ogr_layer_algebra.py b/swig/python/samples/ogr_layer_algebra.py
index 59d6f1f..6e1343e 100644
--- a/swig/python/samples/ogr_layer_algebra.py
+++ b/swig/python/samples/ogr_layer_algebra.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 #******************************************************************************
-#  $Id: ogr_layer_algebra.py 27044 2014-03-16 23:41:27Z rouault $
+#  $Id: ogr_layer_algebra.py 29255 2015-05-27 12:46:09Z rouault $
 # 
 #  Project:  GDAL Python Interface
 #  Purpose:  Application for executing OGR layer algebra operations
@@ -131,7 +131,7 @@ def main(argv = None):
     opt = []
     overwrite = False
     input_fields = 'ALL'
-    method_fields = 'ALL'
+    method_fields = None
     geom_type = ogr.wkbUnknown
     srs_name = None
     srs = None
@@ -287,6 +287,12 @@ def main(argv = None):
        op_str is None:
            return Usage()
 
+    if method_fields is None:
+        if op_str in ( 'Update', 'Clip', 'Erase' ):
+            method_fields = 'NONE'
+        else:
+            method_fields = 'ALL'
+
     if input_fields == 'NONE' and method_fields == 'NONE':
         print('Warning: -input_fields NONE and -method_fields NONE results in all fields being added')
 
diff --git a/swig/python/scripts/gdal_merge.py b/swig/python/scripts/gdal_merge.py
index 3844886..5faabc1 100755
--- a/swig/python/scripts/gdal_merge.py
+++ b/swig/python/scripts/gdal_merge.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 ###############################################################################
-# $Id: gdal_merge.py 27044 2014-03-16 23:41:27Z rouault $
+# $Id: gdal_merge.py 30390 2015-09-15 13:14:09Z rouault $
 #
 # Project:  InSAR Peppers
 # Purpose:  Module to extract data from many rasters into one output.
@@ -306,9 +306,6 @@ def main( argv=None ):
         elif arg == '-separate':
             separate = 1
 
-        elif arg == '-seperate':
-            separate = 1
-
         elif arg == '-pct':
             copy_pct = 1
 
diff --git a/swig/python/setup.py b/swig/python/setup.py
index bb97c7e..dfeffc1 100644
--- a/swig/python/setup.py
+++ b/swig/python/setup.py
@@ -7,7 +7,7 @@
 # Howard Butler hobu.inc at gmail.com
 
 
-gdal_version = '1.11.2'
+gdal_version = '1.11.3'
 
 import sys
 import os

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-grass/gdal.git



More information about the Pkg-grass-devel mailing list