[pktools] 01/06: Imported Upstream version 2.5.3
Bas Couwenberg
sebastic at xs4all.nl
Tue Aug 12 12:24:16 UTC 2014
This is an automated email from the git hooks/post-receive script.
sebastic-guest pushed a commit to branch master
in repository pktools.
commit 736e5957f548e3ed32f8ba9a8e8e95897f418f56
Author: Bas Couwenberg <sebastic at xs4all.nl>
Date: Tue Aug 12 13:28:18 2014 +0200
Imported Upstream version 2.5.3
---
ChangeLog | 15 +
INSTALL | 6 +
configure | 22 +-
configure.ac | 4 +-
pktools.pc.in | 2 +-
src/algorithms/ConfusionMatrix.h | 2 +-
src/algorithms/CostFactory.h | 60 +++
src/algorithms/FeatureSelector.h | 62 +--
src/algorithms/Filter.cc | 45 ++
src/algorithms/Filter.h | 8 +-
src/algorithms/Filter2d.cc | 13 +-
src/algorithms/Filter2d.h | 82 ++--
src/algorithms/ImgRegression.cc | 91 ++++
src/algorithms/ImgRegression.h | 42 ++
src/algorithms/Makefile.am | 2 +-
src/algorithms/Makefile.in | 14 +-
src/algorithms/StatFactory.h | 31 +-
src/apps/Makefile.am | 14 +-
src/apps/Makefile.in | 59 ++-
src/apps/pkann.cc | 25 +-
src/apps/pkcomposite.cc | 50 +-
src/apps/pkcrop.cc | 4 +-
src/apps/pkdiff.cc | 90 ++--
src/apps/pkextract.cc | 899 +++++++++++++++++++---------------
src/apps/pkfillnodata.cc | 4 +-
src/apps/pkfilter.cc | 19 +-
src/apps/pkfilterdem.cc | 75 +--
src/apps/pkfsann.cc | 247 +++++-----
src/apps/pkfsann.h | 46 ++
src/apps/pkfssvm.cc | 261 +++++-----
src/apps/pkfssvm.h | 57 +++
src/apps/pkkalman.cc | 1004 ++++++++++++++++++++++++++++++++++++++
src/apps/pklas2img.cc | 5 +-
src/apps/pkoptsvm.cc | 18 +-
src/apps/pksieve.cc | 2 +-
src/apps/pkstatascii.cc | 34 +-
src/apps/pkstatogr.cc | 244 ++++-----
src/apps/pksvm.cc | 148 +++---
src/imageclasses/ImgReaderGdal.h | 3 +
src/imageclasses/ImgWriterGdal.h | 1 +
40 files changed, 2755 insertions(+), 1055 deletions(-)
diff --git a/ChangeLog b/ChangeLog
index 16d6767..e0a265c 100755
--- a/ChangeLog
+++ b/ChangeLog
@@ -267,6 +267,8 @@ version 2.5.2
- configure script: GDAL>=1.10.0 is required for pkdiff
- programs ported to windows and GUI with Qt
removed underscore for QProcess in Windows
+ - algorithms
+ ConfusionMatrix.h: must find exact match in getClassIndex
- pkinfo
distinct long options
- pkclassify_svm -> pksvm
@@ -284,9 +286,22 @@ version 2.5.2
- pkmosaic -> pkcomposite
name was confusing as also compositing is supported (unlike gdal_merge.py and gdalwarp)
resample option similar to pkcrop
+ option cb|cband (composite band) instead of rb|rband (ruleband)
- version control for libraries
thanks to suggestion of Francesco Paolo Lovergine
- subdirectory pktools for include headers
thanks to suggestion of Francesco Paolo Lovergine
- pklas2img
support for compressed point cloud (LAZ) files
+ - pkextract
+ support for median rule and pointOnSurface
+ redesign to optimize vector polygon processing
+ removed option for masking, introduced srcnodata and bndnodata
+ (more testing is needed)
+version 2.5.3
+ - pklas2img
+ libLAS 1.8.0 support (ticket #42951)
+
+Next versions:
+ - todo for API: ImgReaderGdal (ImgWriterGdal) open in update mode (check gdal_edit.py: http://searchcode.com/codesearch/view/18938404)
+
diff --git a/INSTALL b/INSTALL
index d564d13..15bab20 100644
--- a/INSTALL
+++ b/INSTALL
@@ -9,6 +9,12 @@ The simplest way to compile this package is:
`sh ./configure' instead to prevent `csh' from trying to execute
`configure' itself.
+ You might need to execute:
+
+ autoreconf -i
+
+ before you are able to configure (in case there is no configure file)
+
configure options (use ./configure --help for help info)
--with-gdal=<path to gdal-config file>
diff --git a/configure b/configure
index e8cb6b9..9884298 100755
--- a/configure
+++ b/configure
@@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.69 for pktools 2.5.2.
+# Generated by GNU Autoconf 2.69 for pktools 2.5.3.
#
# Report bugs to <kempenep at gmail.com>.
#
@@ -590,8 +590,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='pktools'
PACKAGE_TARNAME='pktools'
-PACKAGE_VERSION='2.5.2'
-PACKAGE_STRING='pktools 2.5.2'
+PACKAGE_VERSION='2.5.3'
+PACKAGE_STRING='pktools 2.5.3'
PACKAGE_BUGREPORT='kempenep at gmail.com'
PACKAGE_URL=''
@@ -1361,7 +1361,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
-\`configure' configures pktools 2.5.2 to adapt to many kinds of systems.
+\`configure' configures pktools 2.5.3 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1431,7 +1431,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of pktools 2.5.2:";;
+ short | recursive ) echo "Configuration of pktools 2.5.3:";;
esac
cat <<\_ACEOF
@@ -1553,7 +1553,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
-pktools configure 2.5.2
+pktools configure 2.5.3
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
@@ -2314,7 +2314,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
-It was created by pktools $as_me 2.5.2, which was
+It was created by pktools $as_me 2.5.3, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@@ -3130,7 +3130,7 @@ fi
# Define the identity of the package.
PACKAGE='pktools'
- VERSION='2.5.2'
+ VERSION='2.5.3'
cat >>confdefs.h <<_ACEOF
@@ -18754,7 +18754,7 @@ $as_echo "$GDAL_OGR_ENABLED" >&6; }
fi
- gdal_version_req=1.10.0
+ gdal_version_req=1.9.0
if test "$found_gdal" = "yes" -a -n "$gdal_version_req"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if GDAL version is >= $gdal_version_req" >&5
@@ -19989,7 +19989,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
-This file was extended by pktools $as_me 2.5.2, which was
+This file was extended by pktools $as_me 2.5.3, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -20055,7 +20055,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
-pktools config.status 2.5.2
+pktools config.status 2.5.3
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"
diff --git a/configure.ac b/configure.ac
index 30ec14a..99895c2 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,4 +1,4 @@
-AC_INIT([pktools], [2.5.2], [kempenep at gmail.com])
+AC_INIT([pktools], [2.5.3], [kempenep at gmail.com])
#AM_INIT_AUTOMAKE([-Wall -Werror foreign])
AM_INIT_AUTOMAKE([-Wall -Wno-extra-portability foreign])
AC_CONFIG_MACRO_DIR([m4])
@@ -21,7 +21,7 @@ LT_INIT
# check if the source folder is correct
AC_CONFIG_SRCDIR([src/apps/pkinfo.cc])
-AX_LIB_GDAL([1.10.0]) dnl uncomment if gdal version 1.10.0 is required
+AX_LIB_GDAL([1.9.0]) dnl uncomment if gdal version 1.10.0 is required
AC_CHECK_HEADERS([gdal.h])
diff --git a/pktools.pc.in b/pktools.pc.in
index 0731529..55ff1c3 100644
--- a/pktools.pc.in
+++ b/pktools.pc.in
@@ -7,5 +7,5 @@ Name: pktools
Description: API library for pktools
Requires: gdal gsl
Version: @PACKAGE_VERSION@
-Libs: -L${libdir} -lpktools
+Libs: -L${libdir} -lbase -lalgorithms -limageClasses -lfileClasses -llasClasses
Cflags: -I${includedir}/pktools
\ No newline at end of file
diff --git a/src/algorithms/ConfusionMatrix.h b/src/algorithms/ConfusionMatrix.h
index 4a2b22d..db11864 100644
--- a/src/algorithms/ConfusionMatrix.h
+++ b/src/algorithms/ConfusionMatrix.h
@@ -48,7 +48,7 @@ public:
int getClassIndex(std::string className) const {
int index=0;
for(index=0;index<m_classes.size();++index){
- if(m_classes[index].find(className)!=std::string::npos)
+ if(m_classes[index]==className)
break;
}
if(index>=m_classes.size())
diff --git a/src/algorithms/CostFactory.h b/src/algorithms/CostFactory.h
new file mode 100644
index 0000000..82d3a12
--- /dev/null
+++ b/src/algorithms/CostFactory.h
@@ -0,0 +1,60 @@
+/**********************************************************************
+CostFactory.h: select features, typical use: feature selection for classification
+Copyright (C) 2008-2012 Pieter Kempeneers
+
+This file is part of pktools
+
+pktools is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+pktools is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with pktools. If not, see <http://www.gnu.org/licenses/>.
+***********************************************************************/
+#ifndef _COSTFACTORY_H_
+#define _COSTFACTORY_H_
+
+#include <math.h>
+#include <vector>
+#include <map>
+#include "ConfusionMatrix.h"
+#include "base/Vector2d.h"
+
+
+class CostFactory{
+public:
+ CostFactory(void){};
+ CostFactory(unsigned short cv, short verbose) : m_cv(cv), m_verbose(verbose){};
+
+ virtual ~CostFactory(void){};
+ void setCv(unsigned short cv){m_cv=cv;};
+ void setClassValueMap(const std::string& classname, short classvalue){ m_classValueMap[classname]=classvalue;};
+ std::map<std::string,short> getClassValueMap(){return m_classValueMap;};
+ std::vector<std::string> getNameVector(){return m_nameVector;};
+ void setNameVector(std::vector<std::string>& nameVector){m_nameVector=nameVector;};
+ unsigned short getClassIndex(std::string classname) const {return m_cm.getClassIndex(classname);};
+ void pushBackClassName(std::string classname){m_cm.pushBackClassName(classname,true);};//doSort=true
+ void pushBackName(std::string classname){m_nameVector.push_back(classname);};
+ void setNcTraining(const std::vector<unsigned int> nctraining){m_nctraining=nctraining;};
+ void setNcTest(const std::vector<unsigned int> nctest){m_nctest=nctest;};
+ //getCost needs to be implemented case by case (e.g., SVM, ANN)
+ virtual double getCost(const std::vector<Vector2d<float> > &trainingFeatures)=0;
+
+protected:
+ ConfusionMatrix m_cm;
+ std::map<std::string,short> m_classValueMap;
+ std::vector<std::string> m_nameVector;
+ std::vector<unsigned int> m_nctraining;
+ std::vector<unsigned int> m_nctest;
+ unsigned short m_cv;
+ std::string m_classname;
+ short m_classvalue;
+ short m_verbose;
+};
+#endif /* _FEATURESELECTOR_H_ */
diff --git a/src/algorithms/FeatureSelector.h b/src/algorithms/FeatureSelector.h
index b57771e..52959d7 100644
--- a/src/algorithms/FeatureSelector.h
+++ b/src/algorithms/FeatureSelector.h
@@ -26,28 +26,30 @@ along with pktools. If not, see <http://www.gnu.org/licenses/>.
#include <algorithm>
#include <iostream>
#include <iomanip>
+#include "ConfusionMatrix.h"
#include "base/IndexValue.h"
#include "base/Vector2d.h"
#include "gsl/gsl_combination.h"
+#include "CostFactory.h"
class FeatureSelector
{
public:
FeatureSelector(){};
~FeatureSelector(){};
- template<class T> double forwardUnivariate(std::vector< Vector2d<T> >& v, double (*getCost)(const std::vector< Vector2d<T> >&), std::list<int>& subset, int maxFeatures, short verbose=0);
- template<class T> double forward(std::vector< Vector2d<T> >& v, double (*getCost)(const std::vector< Vector2d<T> >&), std::list<int>& subset, int maxFeatures, short verbose=0);
- template<class T> double backward(std::vector< Vector2d<T> >& v, double (*getCost)(const std::vector< Vector2d<T> >&), std::list<int>& subset, int minFeatures, short verbose=0);
- template<class T> double floating(std::vector< Vector2d<T> >& v, double (*getCost)(const std::vector< Vector2d<T> >&), std::list<int>& subset, int maxFeatures=0, short verbose=0);
- template<class T> double bruteForce(std::vector< Vector2d<T> >& v, double (*getCost)(const std::vector< Vector2d<T> >&), std::list<int>& subset, int maxFeatures=0, short verbose=0);
+ template<class T> double forward(std::vector< Vector2d<T> >& v, CostFactory& theCostFactory, std::list<int>& subset, int maxFeatures, short verbose=0);
+ template<class T> double backward(std::vector< Vector2d<T> >& v, CostFactory& theCostFactory, std::list<int>& subset, int minFeatures, short verbose=0);
+ template<class T> double floating(std::vector< Vector2d<T> >& v, CostFactory& theCostFactory, std::list<int>& subset, int maxFeatures=0, double epsilon=0.001, short verbose=0);
+ template<class T> double bruteForce(std::vector< Vector2d<T> >& v, CostFactory& theCostFactory, std::list<int>& subset, int maxFeatures=0, short verbose=0);
private:
- template<class T> double addFeature(std::vector< Vector2d<T> >& v, double (*getCost)(const std::vector< Vector2d<T> >&), std::list<int>& subset, short verbose=0);
- template<class T> double removeFeature(std::vector< Vector2d<T> >& v, double (*getCost)(const std::vector< Vector2d<T> >&), std::list<int>& subset, int& r, short verbose=0);
+ template<class T> double addFeature(std::vector< Vector2d<T> >& v, CostFactory& theCostFactory, std::list<int>& subset, short verbose=0);
+ template<class T> double removeFeature(std::vector< Vector2d<T> >& v, CostFactory& theCostFactory, std::list<int>& subset, int& r, short verbose=0);
+ template<class T> double forwardUnivariate(std::vector< Vector2d<T> >& v, CostFactory& theCostFactory, std::list<int>& subset, int maxFeatures, short verbose=0);
};
//sequential forward selection Univariate (N single best features)
-template<class T> double FeatureSelector::forwardUnivariate(std::vector< Vector2d<T> >& v, double (*getCost)(const std::vector< Vector2d<T> >&), std::list<int>& subset, int maxFeatures=0, short verbose){
+template<class T> double FeatureSelector::forwardUnivariate(std::vector< Vector2d<T> >& v, CostFactory& theCostFactory, std::list<int>& subset, int maxFeatures=0, short verbose){
int maxLevels=v[0][0].size();
if(!maxFeatures)
maxFeatures=maxLevels;
@@ -67,7 +69,7 @@ template<class T> double FeatureSelector::forwardUnivariate(std::vector< Vector2
try{
IndexValue pv;
pv.position=ilevel;
- pv.value=getCost(tmp);
+ pv.value=theCostFactory.getCost(tmp);
cost[ilevel]=pv;
}
catch(...){
@@ -95,7 +97,7 @@ template<class T> double FeatureSelector::forwardUnivariate(std::vector< Vector2
v[iclass].selectCols(subset,tmp[iclass]);
}
try{
- maxCost=getCost(tmp);
+ maxCost=theCostFactory.getCost(tmp);
}
catch(...){
subset.pop_back();
@@ -106,14 +108,14 @@ template<class T> double FeatureSelector::forwardUnivariate(std::vector< Vector2
}
//sequential forward selection Multivariate (Combination of N best features)
-template<class T> double FeatureSelector::forward(std::vector< Vector2d<T> >& v, double (*getCost)(const std::vector< Vector2d<T> >&), std::list<int>& subset, int maxFeatures=0, short verbose){
+template<class T> double FeatureSelector::forward(std::vector< Vector2d<T> >& v, CostFactory& theCostFactory, std::list<int>& subset, int maxFeatures=0, short verbose){
//Select feature with the best value (get maximal cost for 1 feature)
double maxCost=0;
int maxLevels=v[0][0].size();
if(!maxFeatures)
maxFeatures=maxLevels;
while(subset.size()<maxFeatures){
- maxCost=addFeature(v,*getCost,subset,verbose);
+ maxCost=addFeature(v,theCostFactory,subset,verbose);
if(verbose){
for(std::list<int>::const_iterator lit=subset.begin();lit!=subset.end();++lit)
std::cout << *lit << " ";
@@ -125,7 +127,7 @@ template<class T> double FeatureSelector::forward(std::vector< Vector2d<T> >& v,
}
//sequential backward selection
-template<class T> double FeatureSelector::backward(std::vector< Vector2d<T> >& v, double (*getCost)(const std::vector< Vector2d<T> >&), std::list<int>& subset, int minFeatures, short verbose){
+template<class T> double FeatureSelector::backward(std::vector< Vector2d<T> >& v, CostFactory& theCostFactory, std::list<int>& subset, int minFeatures, short verbose){
//Select features with least effect on cost when removed (obtain minFeatures eventually)
double maxCost=0;
int removedFeature;
@@ -134,9 +136,9 @@ template<class T> double FeatureSelector::backward(std::vector< Vector2d<T> >& v
subset.push_back(iFeature);
}
if(subset.size()==minFeatures)
- maxCost=getCost(v);
+ maxCost=theCostFactory.getCost(v);
while(subset.size()>minFeatures){
- maxCost=removeFeature(v,*getCost,subset,removedFeature,verbose);
+ maxCost=removeFeature(v,theCostFactory,subset,removedFeature,verbose);
if(verbose){
for(std::list<int>::const_iterator lit=subset.begin();lit!=subset.end();++lit)
std::cout << *lit << " ";
@@ -148,8 +150,7 @@ template<class T> double FeatureSelector::backward(std::vector< Vector2d<T> >& v
}
//floating search
-template<class T> double FeatureSelector::floating(std::vector< Vector2d<T> >& v, double (*getCost)(const std::vector< Vector2d<T> >&), std::list<int>& subset, int maxFeatures, short verbose){
- double epsilon=0.001;
+template<class T> double FeatureSelector::floating(std::vector< Vector2d<T> >& v, CostFactory& theCostFactory, std::list<int>& subset, int maxFeatures, double epsilon, short verbose){
std::vector<T> cost;
int maxLevels=v[0][0].size();
if(maxFeatures<1)
@@ -157,21 +158,22 @@ template<class T> double FeatureSelector::floating(std::vector< Vector2d<T> >& v
int k=subset.size();
if(k>=maxFeatures)
return -1;
- cost.push_back(-1);//init 0 features as cost -1
- cost.push_back(addFeature(v,*getCost,subset,verbose));
+ while(cost.size()<subset.size())
+ cost.push_back(1);//init original features as cost 1
+ cost.push_back(addFeature(v,theCostFactory,subset,verbose));
++k;
if(verbose>1)
- std::cout << "added " << subset.back() << ", " << cost.size()-1 << "/" << maxFeatures << " features selected with cost: " << cost.back() << std::endl;
+ std::cout << "added " << subset.back() << ", " << subset.size() << "/" << maxFeatures << " features selected with cost: " << cost.back() << std::endl;
else if(verbose){
for(std::list<int>::const_iterator lit=subset.begin();lit!=subset.end();++lit)
std::cout << *lit << " ";
std::cout << std::endl;
}
while(k<maxFeatures){
- cost.push_back(addFeature(v,*getCost,subset,verbose));
+ cost.push_back(addFeature(v,theCostFactory,subset,verbose));
++k;
if(verbose>1)
- std::cout << "added " << subset.back() << ", " << cost.size()-1 << "/" << maxFeatures << " features selected with cost: " << cost.back() << std::endl;
+ std::cout << "added " << subset.back() << ", " << subset.size() << "/" << maxFeatures << " features selected with cost: " << cost.back() << std::endl;
else if(verbose){
for(std::list<int>::const_iterator lit=subset.begin();lit!=subset.end();++lit)
std::cout << *lit << " ";
@@ -180,13 +182,13 @@ template<class T> double FeatureSelector::floating(std::vector< Vector2d<T> >& v
while(k>1){
int x_r;
- double cost_r=removeFeature(v,*getCost,subset,x_r,verbose);
+ double cost_r=removeFeature(v,theCostFactory,subset,x_r,verbose);
if(cost_r>cost[k-1]+epsilon){
--k;
cost[k]=cost_r;
cost.pop_back();
if(verbose>1)
- std::cout << "removed " << x_r << ", " << cost.size()-1 << "/" << maxFeatures << " features remain with cost: " << cost_r << std::endl;
+ std::cout << "removed " << x_r << ", " << subset.size() << "/" << maxFeatures << " features remain with cost: " << cost_r << std::endl;
else if(verbose){
for(std::list<int>::const_iterator lit=subset.begin();lit!=subset.end();++lit)
std::cout << *lit << " ";
@@ -207,7 +209,7 @@ template<class T> double FeatureSelector::floating(std::vector< Vector2d<T> >& v
}
//brute force search (search for all possible combinations and select best)
-template<class T> double FeatureSelector::bruteForce(std::vector< Vector2d<T> >& v, double (*getCost)(const std::vector< Vector2d<T> >&), std::list<int>& subset, int maxFeatures, short verbose){
+template<class T> double FeatureSelector::bruteForce(std::vector< Vector2d<T> >& v, CostFactory& theCostFactory, std::list<int>& subset, int maxFeatures, short verbose){
int maxLevels=v[0][0].size();
if(maxFeatures<1)
maxFeatures=maxLevels;
@@ -233,7 +235,7 @@ template<class T> double FeatureSelector::bruteForce(std::vector< Vector2d<T> >&
for(int iclass=0;iclass<v.size();++iclass)
v[iclass].selectCols(tmpset,tmpv[iclass]);
try{
- cost=getCost(tmpv);
+ cost=theCostFactory.getCost(tmpv);
}
catch(...){ //singular matrix encountered
catchset=tmpset;//this tmpset resulted in failure of getCost
@@ -263,7 +265,7 @@ template<class T> double FeatureSelector::bruteForce(std::vector< Vector2d<T> >&
return maxCost;
}
-template<class T> double FeatureSelector::addFeature(std::vector< Vector2d<T> >& v, double (*getCost)(const std::vector< Vector2d<T> >&), std::list<int>& subset, short verbose){
+template<class T> double FeatureSelector::addFeature(std::vector< Vector2d<T> >& v, CostFactory& theCostFactory, std::list<int>& subset, short verbose){
//select feature with the best value (get maximal cost for 1 feature)
std::list<int> tmpset=subset;//temporary set of selected features (levels)
std::vector< Vector2d<T> > tmp(v.size());
@@ -283,7 +285,7 @@ template<class T> double FeatureSelector::addFeature(std::vector< Vector2d<T> >&
v[iclass].selectCols(tmpset,tmp[iclass]);
}
try{
- cost=getCost(tmp);
+ cost=theCostFactory.getCost(tmp);
}
catch(...){
catchset=tmpset;//this tmpset resulted in singular matrix
@@ -308,7 +310,7 @@ template<class T> double FeatureSelector::addFeature(std::vector< Vector2d<T> >&
return maxCost;
}
-template<class T> double FeatureSelector::removeFeature(std::vector< Vector2d<T> >& v, double (*getCost)(const std::vector< Vector2d<T> >&), std::list<int>& subset, int& r, short verbose){
+template<class T> double FeatureSelector::removeFeature(std::vector< Vector2d<T> >& v, CostFactory& theCostFactory, std::list<int>& subset, int& r, short verbose){
//find the feature that has the least effect on the cost when it is removed from subset
std::list<int> tmpset=subset;//temporary set of selected features (levels)
std::vector< Vector2d<T> > tmp(v.size());
@@ -331,7 +333,7 @@ template<class T> double FeatureSelector::removeFeature(std::vector< Vector2d<T>
v[iclass].selectCols(tmpset,tmp[iclass]);
}
try{
- cost=getCost(tmp);
+ cost=theCostFactory.getCost(tmp);
}
catch(...){
catchset=tmpset;//this tmpset resulted in singular matrix
diff --git a/src/algorithms/Filter.cc b/src/algorithms/Filter.cc
index 6644631..eb2ddb4 100644
--- a/src/algorithms/Filter.cc
+++ b/src/algorithms/Filter.cc
@@ -142,6 +142,42 @@ void filter::Filter::dwtCut(const ImgReaderGdal& input, ImgWriterGdal& output, c
}
}
+void filter::Filter::dwtCutFrom(const ImgReaderGdal& input, ImgWriterGdal& output, const std::string& wavelet_type, int family, int band){
+ const char* pszMessage;
+ void* pProgressArg=NULL;
+ GDALProgressFunc pfnProgress=GDALTermProgress;
+ double progress=0;
+ pfnProgress(progress,pszMessage,pProgressArg);
+ Vector2d<double> lineInput(input.nrOfBand(),input.nrOfCol());
+ Vector2d<double> lineOutput(input.nrOfBand(),input.nrOfCol());
+ for(int y=0;y<input.nrOfRow();++y){
+ for(int iband=0;iband<input.nrOfBand();++iband)
+ input.readData(lineInput[iband],GDT_Float64,y,iband);
+ vector<double> pixelInput(input.nrOfBand());
+ for(int x=0;x<input.nrOfCol();++x){
+ pixelInput=lineInput.selectCol(x);
+ dwtForward(pixelInput,wavelet_type,family);
+ for(int iband=0;iband<input.nrOfBand();++iband){
+ if(iband>=band)
+ pixelInput[iband]=0;
+ }
+ dwtInverse(pixelInput,wavelet_type,family);
+ for(int iband=0;iband<input.nrOfBand();++iband)
+ lineOutput[iband][x]=pixelInput[iband];
+ }
+ for(int iband=0;iband<input.nrOfBand();++iband){
+ try{
+ output.writeData(lineOutput[iband],GDT_Float64,y,iband);
+ }
+ catch(string errorstring){
+ cerr << errorstring << "in band " << iband << ", line " << y << endl;
+ }
+ }
+ progress=(1.0+y)/output.nrOfRow();
+ pfnProgress(progress,pszMessage,pProgressArg);
+ }
+}
+
void filter::Filter::dwtForward(std::vector<double>& data, const std::string& wavelet_type, int family){
int origsize=data.size();
//make sure data size if power of 2
@@ -248,6 +284,15 @@ void filter::Filter::smooth(const ImgReaderGdal& input, ImgWriterGdal& output, s
filter(input,output,down,offset);
}
+// void filter::Filter::smoothnodata(const ImgReaderGdal& input, ImgWriterGdal& output, short dim, short down, int offset)
+// {
+// assert(dim>0);
+// m_taps.resize(dim);
+// for(int itap=0;itap<dim;++itap)
+// m_taps[itap]=1.0/dim;
+// filter(input,output,down,offset);
+// }
+
void filter::Filter::filter(const ImgReaderGdal& input, ImgWriterGdal& output, short down, int offset)
{
Vector2d<double> lineInput(input.nrOfBand(),input.nrOfCol());
diff --git a/src/algorithms/Filter.h b/src/algorithms/Filter.h
index 3cdab8e..dc4cc12 100644
--- a/src/algorithms/Filter.h
+++ b/src/algorithms/Filter.h
@@ -33,7 +33,7 @@ extern "C" {
namespace filter
{
- enum FILTER_TYPE { median=0, var=1 , min=2, max=3, sum=4, mean=5, minmax=6, dilate=7, erode=8, close=9, open=10, homog=11, sobelx=12, sobely=13, sobelxy=14, sobelyx=-14, smooth=15, density=16, majority=17, mixed=18, smoothnodata=19, threshold=20, ismin=21, ismax=22, heterog=23, order=24, stdev=25, dwt=26, dwti=27, dwt_cut=28};
+ enum FILTER_TYPE { median=0, var=1 , min=2, max=3, sum=4, mean=5, minmax=6, dilate=7, erode=8, close=9, open=10, homog=11, sobelx=12, sobely=13, sobelxy=14, sobelyx=-14, smooth=15, density=16, majority=17, mixed=18, smoothnodata=19, threshold=20, ismin=21, ismax=22, heterog=23, order=24, stdev=25, dwt=26, dwti=27, dwt_cut=28, dwt_cut_from=29};
class Filter
{
@@ -77,6 +77,7 @@ public:
void dwtForward(std::vector<double>& data, const std::string& wavelet_type, int family);
void dwtInverse(std::vector<double>& data, const std::string& wavelet_type, int family);
void dwtCut(std::vector<double>& data, const std::string& wavelet_type, int family, double cut);
+ void dwtCutFrom(const ImgReaderGdal& input, ImgWriterGdal& output, const std::string& wavelet_type, int family, int band);
private:
@@ -85,6 +86,7 @@ private:
m_filterMap["dwt"]=filter::dwt;
m_filterMap["dwti"]=filter::dwti;
m_filterMap["dwt_cut"]=filter::dwt_cut;
+ m_filterMap["dwt_cut_from"]=filter::dwt_cut_from;
m_filterMap["stdev"]=filter::stdev;
m_filterMap["var"]=filter::var;
m_filterMap["min"]=filter::min;
@@ -417,6 +419,7 @@ template<class T> void Filter::filter(const std::vector<T>& input, std::vector<T
for(i=offset;i<m_taps.size()/2;++i){
if((i-offset)%down)
continue;
+ //todo:introduce nodata
output[(i-offset+down-1)/down]=m_taps[m_taps.size()/2]*input[i];
for(int t=1;t<=m_taps.size()/2;++t)
output[(i-offset+down-1)/down]+=(m_taps[m_taps.size()/2+t]+m_taps[m_taps.size()/2-t])*input[i+t];
@@ -425,6 +428,7 @@ template<class T> void Filter::filter(const std::vector<T>& input, std::vector<T
for(i=offset+m_taps.size()/2;i<input.size()-m_taps.size()/2;++i){
if((i-offset)%down)
continue;
+ //todo:introduce nodata
T leaveOut=(*(m_taps.begin()))*input[i-m_taps.size()/2];
T include=(m_taps.back())*input[i+m_taps.size()/2];
output[(i-offset+down-1)/down]=0;
@@ -435,7 +439,9 @@ template<class T> void Filter::filter(const std::vector<T>& input, std::vector<T
for(i=input.size()-m_taps.size()/2;i<input.size();++i){
if((i-offset)%down)
continue;
+ //todo:introduce nodata
output[(i-offset+down-1)/down]=m_taps[m_taps.size()/2]*input[i];
+ //todo:introduce nodata
for(int t=1;t<=m_taps.size()/2;++t)
output[(i-offset+down-1)/down]+=(m_taps[m_taps.size()/2+t]+m_taps[m_taps.size()/2-t])*input[i-t];
}
diff --git a/src/algorithms/Filter2d.cc b/src/algorithms/Filter2d.cc
index 8cf0db0..0fc643a 100644
--- a/src/algorithms/Filter2d.cc
+++ b/src/algorithms/Filter2d.cc
@@ -407,7 +407,7 @@ void filter2d::Filter2d::doit(const ImgReaderGdal& input, ImgWriterGdal& output,
continue;
outBuffer[x/down]=0;
std::vector<double> windowBuffer;
- std::map<int,int> occurrence;
+ std::map<long int,int> occurrence;
int centre=dimX*(dimY-1)/2+(dimX-1)/2;
for(int j=-(dimY-1)/2;j<=dimY/2;++j){
for(int i=-(dimX-1)/2;i<=dimX/2;++i){
@@ -574,10 +574,17 @@ void filter2d::Filter2d::doit(const ImgReaderGdal& input, ImgWriterGdal& output,
outBuffer[x/down]=(m_noDataValues.size())? m_noDataValues[0] : 0;
break;
}
+ case(filter2d::countid):{
+ if(windowBuffer.size())
+ outBuffer[x/down]=occurrence.size();
+ else
+ outBuffer[x/down]=(m_noDataValues.size())? m_noDataValues[0] : 0;
+ break;
+ }
case(filter2d::majority):{
if(occurrence.size()){
- std::map<int,int>::const_iterator maxit=occurrence.begin();
- for(std::map<int,int>::const_iterator mit=occurrence.begin();mit!=occurrence.end();++mit){
+ std::map<long int,int>::const_iterator maxit=occurrence.begin();
+ for(std::map<long int,int>::const_iterator mit=occurrence.begin();mit!=occurrence.end();++mit){
if(mit->second>maxit->second)
maxit=mit;
}
diff --git a/src/algorithms/Filter2d.h b/src/algorithms/Filter2d.h
index 54aeee0..deb27d8 100644
--- a/src/algorithms/Filter2d.h
+++ b/src/algorithms/Filter2d.h
@@ -58,7 +58,7 @@ extern "C" {
namespace filter2d
{
- enum FILTER_TYPE { median=100, var=101 , min=102, max=103, sum=104, mean=105, minmax=106, dilate=107, erode=108, close=109, open=110, homog=111, sobelx=112, sobely=113, sobelxy=114, sobelyx=115, smooth=116, density=117, majority=118, mixed=119, threshold=120, ismin=121, ismax=122, heterog=123, order=124, stdev=125, mrf=126, dwt=127, dwti=128, dwt_cut=129, scramble=130, shift=131, linearfeature=132, smoothnodata=133};
+ enum FILTER_TYPE { median=100, var=101 , min=102, max=103, sum=104, mean=105, minmax=106, dilate=107, erode=108, close=109, open=110, homog=111, sobelx=112, sobely=113, sobelxy=114, sobelyx=115, smooth=116, density=117, majority=118, mixed=119, threshold=120, ismin=121, ismax=122, heterog=123, order=124, stdev=125, mrf=126, dwt=127, dwti=128, dwt_cut=129, scramble=130, shift=131, linearfeature=132, smoothnodata=133, countid=134, dwt_cut_from=135};
enum RESAMPLE { NEAR = 0, BILINEAR = 1, BICUBIC = 2 };//bicubic not supported yet...
@@ -162,9 +162,11 @@ private:
m_filterMap["dwt"]=filter2d::dwt;
m_filterMap["dwti"]=filter2d::dwti;
m_filterMap["dwt_cut"]=filter2d::dwt_cut;
+ m_filterMap["dwt_cut_from"]=filter2d::dwt_cut_from;
m_filterMap["scramble"]=filter2d::scramble;
m_filterMap["shift"]=filter2d::shift;
m_filterMap["linearfeature"]=filter2d::linearfeature;
+ m_filterMap["countid"]=filter2d::countid;
}
Vector2d<double> m_taps;
@@ -825,14 +827,17 @@ template<class T> unsigned long int Filter2d::morphology(const Vector2d<T>& inpu
++nmasked;
}
}
- if(nmasked<nlimit){
+ if(nmasked<=nlimit){
++nchange;
//reset pixel in outputMask
outputMask[y][x]=0;
}
else{
//reset pixel height in tmpDSM
- inBuffer[(dimY-1)/2][x]=stat.mymin(neighbors);
+ sort(neighbors.begin(),neighbors.end());
+ assert(neighbors.size()>1);
+ inBuffer[(dimY-1)/2][x]=neighbors[1];
+ /* inBuffer[(dimY-1)/2][x]=stat.mymin(neighbors); */
}
}
progress=(1.0+y);
@@ -916,14 +921,17 @@ template<class T> unsigned long int Filter2d::morphology(const Vector2d<T>& inpu
++nmasked;
}
}
- if(nmasked<nlimit){
+ if(nmasked<=nlimit){
++nchange;
//reset pixel in outputMask
outputMask[y][x]=0;
}
else{
//reset pixel height in tmpDSM
- inBuffer[(dimY-1)/2][x]=stat.mymin(neighbors);
+ sort(neighbors.begin(),neighbors.end());
+ assert(neighbors.size()>1);
+ inBuffer[(dimY-1)/2][x]=neighbors[1];
+ /* inBuffer[(dimY-1)/2][x]=stat.mymin(neighbors); */
}
}
progress=(1.0+y);
@@ -956,30 +964,26 @@ template<class T> unsigned long int Filter2d::morphology(const Vector2d<T>& inpu
if(outputMask.size()!=inputDSM.nRows())
outputMask.resize(inputDSM.nRows());
int indexI=0;
- int indexJ=0;
- //initialize last half of inBuffer
- for(int j=-(dimY-1)/2;j<=dimY/2;++j){
+ int indexJ=inputDSM.nRows()-1;
+ //initialize first half of inBuffer
+ for(int j=inputDSM.nRows()-dimY/2;j<inputDSM.nRows();--j){
for(int i=0;i<inputDSM.nCols();++i)
inBuffer[indexJ][i]=tmpDSM[abs(j)][i];
++indexJ;
}
for(int y=tmpDSM.nRows()-1;y>=0;--y){
- if(y){//inBuffer already initialized for y=0
- //erase first line from inBuffer
- inBuffer.erase(inBuffer.begin());
- //read extra line and push back to inBuffer if not out of bounds
- if(y+dimY/2<tmpDSM.nRows()){
+ if(y<tmpDSM.nRows()-1){//inBuffer already initialized for y=tmpDSM.nRows()-1
+ //erase last line from inBuffer
+ inBuffer.erase(inBuffer.end()-1);
+ //read extra line and insert to inBuffer if not out of bounds
+ if(y-dimY/2>0){
//allocate buffer
- inBuffer.push_back(inBuffer.back());
+ inBuffer.insert(inBuffer.begin(),inBuffer.back());
for(int i=0;i<tmpDSM.nCols();++i)
- inBuffer[inBuffer.size()-1][i]=tmpDSM[y+dimY/2][i];
+ inBuffer[0][i]=tmpDSM[y-dimY/2][i];
}
else{
- int over=y+dimY/2-tmpDSM.nRows();
- int index=(inBuffer.size()-1)-over;
- assert(index>=0);
- assert(index<inBuffer.size());
- inBuffer.push_back(inBuffer[index]);
+ inBuffer.insert(inBuffer.begin(),inBuffer[abs(y-dimY/2)]);
}
}
for(int x=tmpDSM.nCols()-1;x>=0;--x){
@@ -1007,14 +1011,17 @@ template<class T> unsigned long int Filter2d::morphology(const Vector2d<T>& inpu
++nmasked;
}
}
- if(nmasked<nlimit){
+ if(nmasked<=nlimit){
++nchange;
//reset pixel in outputMask
outputMask[y][x]=0;
}
else{
//reset pixel height in tmpDSM
- inBuffer[(dimY-1)/2][x]=stat.mymin(neighbors);
+ sort(neighbors.begin(),neighbors.end());
+ assert(neighbors.size()>1);
+ inBuffer[(dimY-1)/2][x]=neighbors[1];
+ /* inBuffer[(dimY-1)/2][x]=stat.mymin(neighbors); */
}
}
progress=(1.0+y);
@@ -1048,29 +1055,25 @@ template<class T> unsigned long int Filter2d::morphology(const Vector2d<T>& inpu
outputMask.resize(inputDSM.nRows());
int indexI=0;
int indexJ=0;
- //initialize last half of inBuffer
- for(int j=-(dimY-1)/2;j<=dimY/2;++j){
+ //initialize first half of inBuffer
+ for(int j=inputDSM.nRows()-dimY/2;j<inputDSM.nRows();--j){
for(int i=0;i<inputDSM.nCols();++i)
inBuffer[indexJ][i]=tmpDSM[abs(j)][i];
++indexJ;
}
for(int y=tmpDSM.nRows()-1;y>=0;--y){
- if(y){//inBuffer already initialized for y=0
- //erase first line from inBuffer
- inBuffer.erase(inBuffer.begin());
- //read extra line and push back to inBuffer if not out of bounds
- if(y+dimY/2<tmpDSM.nRows()){
+ if(y<tmpDSM.nRows()-1){//inBuffer already initialized for y=0
+ //erase last line from inBuffer
+ inBuffer.erase(inBuffer.end()-1);
+ //read extra line and insert to inBuffer if not out of bounds
+ if(y-dimY/2>0){
//allocate buffer
- inBuffer.push_back(inBuffer.back());
+ inBuffer.insert(inBuffer.begin(),inBuffer.back());
for(int i=0;i<tmpDSM.nCols();++i)
- inBuffer[inBuffer.size()-1][i]=tmpDSM[y+dimY/2][i];
+ inBuffer[0][i]=tmpDSM[y-dimY/2][i];
}
else{
- int over=y+dimY/2-tmpDSM.nRows();
- int index=(inBuffer.size()-1)-over;
- assert(index>=0);
- assert(index<inBuffer.size());
- inBuffer.push_back(inBuffer[index]);
+ inBuffer.insert(inBuffer.begin(),inBuffer[abs(y-dimY/2)]);
}
}
for(int x=0;x<tmpDSM.nCols();++x){
@@ -1098,14 +1101,17 @@ template<class T> unsigned long int Filter2d::morphology(const Vector2d<T>& inpu
++nmasked;
}
}
- if(nmasked<nlimit){
+ if(nmasked<=nlimit){
++nchange;
//reset pixel in outputMask
outputMask[y][x]=0;
}
else{
//reset pixel height in tmpDSM
- inBuffer[(dimY-1)/2][x]=stat.mymin(neighbors);
+ sort(neighbors.begin(),neighbors.end());
+ assert(neighbors.size()>1);
+ inBuffer[(dimY-1)/2][x]=neighbors[1];
+ /* inBuffer[(dimY-1)/2][x]=stat.mymin(neighbors); */
}
}
progress=(1.0+y);
diff --git a/src/algorithms/ImgRegression.cc b/src/algorithms/ImgRegression.cc
new file mode 100644
index 0000000..55b3c2f
--- /dev/null
+++ b/src/algorithms/ImgRegression.cc
@@ -0,0 +1,91 @@
+/**********************************************************************
+ImgRegression.cc: class to calculate regression between two raster datasets
+Copyright (C) 2008-2014 Pieter Kempeneers
+
+This file is part of pktools
+
+pktools is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+pktools is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with pktools. If not, see <http://www.gnu.org/licenses/>.
+***********************************************************************/
+#include "ImgRegression.h"
+#include <iostream>
+
+using namespace imgregression;
+
+ImgRegression::ImgRegression(void)
+: m_threshold(0), m_down(1)
+{}
+
+ImgRegression::~ImgRegression(void)
+{}
+
+double ImgRegression::getRMSE(const ImgReaderGdal& imgReader1, const ImgReaderGdal& imgReader2, double& c0, double& c1, short verbose) const{
+ c0=0;
+ c1=1;
+ int icol1=0,irow1=0;
+ std::vector<double> rowBuffer1(imgReader1.nrOfCol());
+ std::vector<double> rowBuffer2(imgReader2.nrOfCol());
+ std::vector<double> buffer1;
+ std::vector<double> buffer2;
+
+ srand(time(NULL));
+ for(irow1=0;irow1<imgReader1.nrOfRow();++irow1){
+ if(irow1%m_down)
+ continue;
+ icol1=0;
+ double icol2=0,irow2=0;
+ double geox=0,geoy=0;
+ imgReader1.readData(rowBuffer1,GDT_Float64,irow1);
+ imgReader1.image2geo(icol1,irow1,geox,geoy);
+ imgReader2.geo2image(geox,geoy,icol2,irow2);
+ icol2=static_cast<int>(icol2);
+ irow2=static_cast<int>(irow2);
+ if(irow2<0||irow2>=imgReader2.nrOfRow())
+ continue;
+ imgReader2.readData(rowBuffer2,GDT_Float64,irow2);
+ for(icol1=0;icol1<imgReader1.nrOfCol();++icol1){
+ if(icol1%m_down)
+ continue;
+ if(m_threshold>0){//percentual value
+ double p=static_cast<double>(rand())/(RAND_MAX);
+ p*=100.0;
+ if(p>m_threshold)
+ continue;//do not select for now, go to next column
+ }
+ imgReader1.image2geo(icol1,irow1,geox,geoy);
+ imgReader2.geo2image(geox,geoy,icol2,irow2);
+ if(icol2<0||icol2>=imgReader2.nrOfCol())
+ continue;
+ icol2=static_cast<int>(icol2);
+ irow2=static_cast<int>(irow2);
+ //check for nodata
+ double value1=rowBuffer1[icol1];
+ double value2=rowBuffer2[icol2];
+ if(imgReader1.isNoData(value1)||imgReader2.isNoData(value2))
+ continue;
+
+ buffer1.push_back(value1);
+ buffer2.push_back(value2);
+ if(verbose>1)
+ std::cout << geox << " " << geoy << " " << icol1 << " " << irow1 << " " << icol2 << " " << irow2 << " " << buffer1.back() << " " << buffer2.back() << std::endl;
+ }
+ }
+ double err=0;
+ if(buffer1.size()||buffer2.size()){
+ statfactory::StatFactory stat;
+ err=stat.linear_regression_err(buffer1,buffer2,c0,c1);
+ }
+ if(verbose)
+ std::cout << "linear regression based on " << buffer1.size() << " points: " << c0 << "+" << c1 << " * x " << " with rmse: " << err << std::endl;
+ return err;
+}
diff --git a/src/algorithms/ImgRegression.h b/src/algorithms/ImgRegression.h
new file mode 100644
index 0000000..12ba62d
--- /dev/null
+++ b/src/algorithms/ImgRegression.h
@@ -0,0 +1,42 @@
+/**********************************************************************
+ImgRegression.h: class to calculate regression between two raster datasets
+Copyright (C) 2008-2014 Pieter Kempeneers
+
+This file is part of pktools
+
+pktools is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+pktools is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with pktools. If not, see <http://www.gnu.org/licenses/>.
+***********************************************************************/
+#ifndef _IMGREGRESSION_H_
+#define _IMGREGRESSION_H_
+
+#include <vector>
+#include "imageclasses/ImgReaderGdal.h"
+#include "imageclasses/ImgWriterGdal.h"
+#include "StatFactory.h"
+
+namespace imgregression
+{
+ class ImgRegression{
+ public:
+ ImgRegression(void);
+ ~ImgRegression(void);
+ double getRMSE(const ImgReaderGdal& imgReader1, const ImgReaderGdal& imgReader2, double &c0, double &c1, short verbose=0) const;
+ void setThreshold(double theThreshold){m_threshold=theThreshold;};
+ void setDown(int theDown){m_down=theDown;};
+ private:
+ int m_down;
+ double m_threshold;
+ };
+}
+#endif //_IMGREGRESSION_H_
diff --git a/src/algorithms/Makefile.am b/src/algorithms/Makefile.am
index f06c8ff..ab568ab 100644
--- a/src/algorithms/Makefile.am
+++ b/src/algorithms/Makefile.am
@@ -25,7 +25,7 @@ libalgorithms_ladir = $(includedir)/pktools/algorithms
libalgorithms_la_LDFLAGS = -version-info $(PKTOOLS_SO_VERSION) $(AM_LDFLAGS)
# the list of header files that belong to the library (to be installed later)
-libalgorithms_la_HEADERS = Egcs.h Filter2d.h Filter.h StatFactory.h ConfusionMatrix.h svm.h FeatureSelector.h
+libalgorithms_la_HEADERS = Egcs.h Filter2d.h Filter.h StatFactory.h ConfusionMatrix.h svm.h CostFactory.h FeatureSelector.h
if USE_FANN
libalgorithms_la_HEADERS += myfann_cpp.h
diff --git a/src/algorithms/Makefile.in b/src/algorithms/Makefile.in
index 4e47a6d..a524535 100644
--- a/src/algorithms/Makefile.in
+++ b/src/algorithms/Makefile.in
@@ -100,9 +100,9 @@ am__installdirs = "$(DESTDIR)$(libdir)" \
LTLIBRARIES = $(lib_LTLIBRARIES)
libalgorithms_la_LIBADD =
am__libalgorithms_la_SOURCES_DIST = Egcs.h Filter2d.h Filter.h \
- StatFactory.h ConfusionMatrix.h svm.h FeatureSelector.h \
- myfann_cpp.h OptFactory.h Egcs.cc Filter2d.cc Filter.cc \
- ConfusionMatrix.cc svm.cpp
+ StatFactory.h ConfusionMatrix.h svm.h CostFactory.h \
+ FeatureSelector.h myfann_cpp.h OptFactory.h Egcs.cc \
+ Filter2d.cc Filter.cc ConfusionMatrix.cc svm.cpp
am__objects_1 =
am__objects_2 = $(am__objects_1) $(am__objects_1)
am_libalgorithms_la_OBJECTS = $(am__objects_2) Egcs.lo Filter2d.lo \
@@ -141,8 +141,8 @@ am__can_run_installinfo = \
*) (install-info --version) >/dev/null 2>&1;; \
esac
am__libalgorithms_la_HEADERS_DIST = Egcs.h Filter2d.h Filter.h \
- StatFactory.h ConfusionMatrix.h svm.h FeatureSelector.h \
- myfann_cpp.h OptFactory.h
+ StatFactory.h ConfusionMatrix.h svm.h CostFactory.h \
+ FeatureSelector.h myfann_cpp.h OptFactory.h
HEADERS = $(libalgorithms_la_HEADERS)
ETAGS = etags
CTAGS = ctags
@@ -309,8 +309,8 @@ libalgorithms_la_LDFLAGS = -version-info $(PKTOOLS_SO_VERSION) $(AM_LDFLAGS)
# the list of header files that belong to the library (to be installed later)
libalgorithms_la_HEADERS = Egcs.h Filter2d.h Filter.h StatFactory.h \
- ConfusionMatrix.h svm.h FeatureSelector.h $(am__append_1) \
- $(am__append_2)
+ ConfusionMatrix.h svm.h CostFactory.h FeatureSelector.h \
+ $(am__append_1) $(am__append_2)
# the sources to add to the library and to add to the source distribution
libalgorithms_la_SOURCES = $(libalgorithms_la_HEADERS) Egcs.cc Filter2d.cc Filter.cc ConfusionMatrix.cc svm.cpp
diff --git a/src/algorithms/StatFactory.h b/src/algorithms/StatFactory.h
index e6c410b..abf7640 100644
--- a/src/algorithms/StatFactory.h
+++ b/src/algorithms/StatFactory.h
@@ -178,11 +178,13 @@ public:
template<class T> double correlation(const std::vector<T>& x, const std::vector<T>& y, int delay=0) const;
template<class T> double cross_correlation(const std::vector<T>& x, const std::vector<T>& y, int maxdelay, std::vector<T>& z) const;
template<class T> double linear_regression(const std::vector<T>& x, const std::vector<T>& y, double &c0, double &c1) const;
+ template<class T> double linear_regression_err(const std::vector<T>& x, const std::vector<T>& y, double &c0, double &c1) const;
template<class T> void interpolateUp(const std::vector<double>& wavelengthIn, const std::vector<T>& input, const std::vector<double>& wavelengthOut, const std::string& type, std::vector<T>& output, bool verbose=false) const;
template<class T> void interpolateUp(const std::vector<double>& wavelengthIn, const std::vector< std::vector<T> >& input, const std::vector<double>& wavelengthOut, const std::string& type, std::vector< std::vector<T> >& output, bool verbose=false) const;
// template<class T> void interpolateUp(const std::vector< std::vector<T> >& input, std::vector< std::vector<T> >& output, double start, double end, double step, const gsl_interp_type* type);
// template<class T> void interpolateUp(const std::vector< std::vector<T> >& input, const std::vector<double>& wavelengthIn, std::vector< std::vector<T> >& output, std::vector<double>& wavelengthOut, double start, double end, double step, const gsl_interp_type* type);
template<class T> void interpolateUp(const std::vector<T>& input, std::vector<T>& output, int nbin) const;
+ template<class T> void nearUp(const std::vector<T>& input, std::vector<T>& output) const;
template<class T> void interpolateUp(double* input, int dim, std::vector<T>& output, int nbin);
template<class T> void interpolateDown(const std::vector<T>& input, std::vector<T>& output, int nbin) const;
template<class T> void interpolateDown(double* input, int dim, std::vector<T>& output, int nbin);
@@ -871,7 +873,7 @@ template<class T> double StatFactory::cross_correlation(const std::vector<T>& x,
return sumCorrelation;
}
- template<class T> double StatFactory::linear_regression(const std::vector<T>& x, const std::vector<T>& y, double &c0, double &c1) const{
+template<class T> double StatFactory::linear_regression(const std::vector<T>& x, const std::vector<T>& y, double &c0, double &c1) const{
assert(x.size()==y.size());
assert(x.size());
double cov00;
@@ -882,6 +884,17 @@ template<class T> double StatFactory::cross_correlation(const std::vector<T>& x,
return (1-sumsq/var(y)/(y.size()-1));
}
+template<class T> double StatFactory::linear_regression_err(const std::vector<T>& x, const std::vector<T>& y, double &c0, double &c1) const{
+ assert(x.size()==y.size());
+ assert(x.size());
+ double cov00;
+ double cov01;
+ double cov11;
+ double sumsq;
+ gsl_fit_linear(&(x[0]),1,&(y[0]),1,x.size(),&c0,&c1,&cov00,&cov01,&cov11,&sumsq);
+ return sqrt((sumsq)/(y.size()));
+}
+
//alternatively: use GNU scientific library:
// gsl_stats_correlation (const double data1[], const size_t stride1, const double data2[], const size_t stride2, const size_t n)
@@ -977,6 +990,22 @@ template<class T> void StatFactory::interpolateUp(const std::vector<T>& input, s
}
}
+template<class T> void StatFactory::nearUp(const std::vector<T>& input, std::vector<T>& output) const
+{
+ assert(input.size());
+ assert(output.size()>=input.size());
+ int dimInput=input.size();
+ int dimOutput=output.size();
+
+ for(int iin=0;iin<dimInput;++iin){
+ for(int iout=0;iout<dimOutput/dimInput;++iout){
+ int indexOutput=iin*dimOutput/dimInput+iout;
+ assert(indexOutput<output.size());
+ output[indexOutput]=input[iin];
+ }
+ }
+}
+
template<class T> void StatFactory::interpolateUp(double* input, int dim, std::vector<T>& output, int nbin)
{
assert(nbin);
diff --git a/src/apps/Makefile.am b/src/apps/Makefile.am
index d8ff05b..6aeeeae 100644
--- a/src/apps/Makefile.am
+++ b/src/apps/Makefile.am
@@ -6,7 +6,7 @@ LDADD = $(GSL_LIBS) $(GDAL_LDFLAGS) $(top_builddir)/src/algorithms/libalgorithms
###############################################################################
# the program to build and install (the names of the final binaries)
-bin_PROGRAMS = pkinfo pkcrop pkreclass pkgetmask pksetmask pkcreatect pkdumpimg pkdumpogr pksieve pkstatascii pkstatogr pkegcs pkextract pkfillnodata pkfilter pkfilterdem pkenhance pkfilterascii pkdsm2shadow pkcomposite pkndvi pkpolygonize pkascii2img pkdiff pksvm pkfssvm pkascii2ogr pkeditogr
+bin_PROGRAMS = pkinfo pkcrop pkreclass pkdiff pkgetmask pksetmask pkcreatect pkdumpimg pkdumpogr pksieve pkstatascii pkstatogr pkegcs pkextract pkfillnodata pkfilter pkkalman pkfilterdem pkenhance pkfilterascii pkdsm2shadow pkcomposite pkndvi pkpolygonize pkascii2img pksvm pkfssvm pkascii2ogr pkeditogr
# the program to build but not install (the names of the final binaries)
#noinst_PROGRAMS = pkxcorimg pkgeom
@@ -16,9 +16,9 @@ bin_PROGRAMS += pkann pkfsann pkregann
pkann_SOURCES = $(top_srcdir)/src/algorithms/myfann_cpp.h pkann.cc
pkann_CXXFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/src/base $(FANN_CFLAGS) -I$(top_srcdir)/src/algorithms $(AM_CXXFLAGS)
pkann_LDADD = $(FANN_LIBS) $(FANN_CFLAGS) $(AM_LDFLAGS)
-pkfsann_SOURCES = $(top_srcdir)/src/algorithms/myfann_cpp.h pkfsann.cc
+pkfsann_SOURCES = $(top_srcdir)/src/algorithms/myfann_cpp.h $(top_srcdir)/src/algorithms/CostFactory.h pkfsann.h pkfsann.cc
pkfsann_CXXFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/src/base $(FANN_CFLAGS) -I$(top_srcdir)/src/algorithms $(AM_CXXFLAGS)
-pkfsann_LDADD = $(GSL_LIBS) $(FANN_LIBS) $(FANN_CFLAGS) $(AM_LDFLAGS)
+pkfsann_LDADD = $(GSL_LIBS) $(FANN_LIBS) $(FANN_CFLAGS) $(AM_LDFLAGS) -lalgorithms
pkregann_SOURCES = $(top_srcdir)/src/algorithms/myfann_cpp.h pkregann.cc
pkregann_CXXFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/src/base $(FANN_CFLAGS) -I$(top_srcdir)/src/algorithms $(AM_CXXFLAGS)
pkregann_LDADD = $(GSL_LIBS) $(FANN_LIBS) $(FANN_CFLAGS) $(AM_LDFLAGS)
@@ -38,6 +38,7 @@ endif
pkinfo_SOURCES = pkinfo.cc
pkcrop_SOURCES = pkcrop.cc
pkreclass_SOURCES = pkreclass.cc
+pkdiff_SOURCES = pkdiff.cc
pkgetmask_SOURCES = pkgetmask.cc
pksetmask_SOURCES = pksetmask.cc
pkcreatect_SOURCES = pkcreatect.cc
@@ -53,6 +54,8 @@ pkextract_SOURCES = pkextract.cc
pkfillnodata_SOURCES = pkfillnodata.cc
pkfilter_SOURCES = pkfilter.cc
pkfilter_LDADD = $(GSL_LIBS) $(AM_LDFLAGS) -lgsl -lgdal
+pkkalman_SOURCES = pkkalman.cc $(top_srcdir)/src/algorithms/ImgRegression.h $(top_srcdir)/src/algorithms/ImgRegression.cc
+pkkalman_LDADD = -lalgorithms $(GSL_LIBS) $(AM_LDFLAGS) -lalgorithms
pkfilterdem_SOURCES = pkfilterdem.cc
pkenhance_SOURCES = pkenhance.cc
pkenhance_LDADD = $(AM_LDFLAGS) -lgdal
@@ -62,10 +65,9 @@ pkdsm2shadow_SOURCES = pkdsm2shadow.cc
pkcomposite_SOURCES = pkcomposite.cc
pkndvi_SOURCES = pkndvi.cc
pkpolygonize_SOURCES = pkpolygonize.cc
-pkdiff_SOURCES = pkdiff.cc
pksvm_SOURCES = $(top_srcdir)/src/algorithms/svm.h $(top_srcdir)/src/algorithms/svm.cpp pksvm.cc
-pkfssvm_SOURCES = $(top_srcdir)/src/algorithms/svm.h $(top_srcdir)/src/algorithms/svm.cpp pkfssvm.cc
-pkfssvm_LDADD = $(GSL_LIBS) $(AM_LDFLAGS)
+pkfssvm_SOURCES = $(top_srcdir)/src/algorithms/svm.h $(top_srcdir)/src/algorithms/FeatureSelector.h $(top_srcdir)/src/algorithms/CostFactory.h $(top_srcdir)/src/algorithms/svm.cpp pkfssvm.h pkfssvm.cc
+pkfssvm_LDADD = $(GSL_LIBS) $(AM_LDFLAGS) -lalgorithms
pkascii2img_SOURCES = pkascii2img.cc
pkascii2ogr_SOURCES = pkascii2ogr.cc
pkeditogr_SOURCES = pkeditogr.cc
diff --git a/src/apps/Makefile.in b/src/apps/Makefile.in
index 63a5c4c..734ad41 100644
--- a/src/apps/Makefile.in
+++ b/src/apps/Makefile.in
@@ -52,16 +52,16 @@ POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
bin_PROGRAMS = pkinfo$(EXEEXT) pkcrop$(EXEEXT) pkreclass$(EXEEXT) \
- pkgetmask$(EXEEXT) pksetmask$(EXEEXT) pkcreatect$(EXEEXT) \
- pkdumpimg$(EXEEXT) pkdumpogr$(EXEEXT) pksieve$(EXEEXT) \
- pkstatascii$(EXEEXT) pkstatogr$(EXEEXT) pkegcs$(EXEEXT) \
- pkextract$(EXEEXT) pkfillnodata$(EXEEXT) pkfilter$(EXEEXT) \
- pkfilterdem$(EXEEXT) pkenhance$(EXEEXT) pkfilterascii$(EXEEXT) \
+ pkdiff$(EXEEXT) pkgetmask$(EXEEXT) pksetmask$(EXEEXT) \
+ pkcreatect$(EXEEXT) pkdumpimg$(EXEEXT) pkdumpogr$(EXEEXT) \
+ pksieve$(EXEEXT) pkstatascii$(EXEEXT) pkstatogr$(EXEEXT) \
+ pkegcs$(EXEEXT) pkextract$(EXEEXT) pkfillnodata$(EXEEXT) \
+ pkfilter$(EXEEXT) pkkalman$(EXEEXT) pkfilterdem$(EXEEXT) \
+ pkenhance$(EXEEXT) pkfilterascii$(EXEEXT) \
pkdsm2shadow$(EXEEXT) pkcomposite$(EXEEXT) pkndvi$(EXEEXT) \
- pkpolygonize$(EXEEXT) pkascii2img$(EXEEXT) pkdiff$(EXEEXT) \
- pksvm$(EXEEXT) pkfssvm$(EXEEXT) pkascii2ogr$(EXEEXT) \
- pkeditogr$(EXEEXT) $(am__EXEEXT_1) $(am__EXEEXT_2) \
- $(am__EXEEXT_3)
+ pkpolygonize$(EXEEXT) pkascii2img$(EXEEXT) pksvm$(EXEEXT) \
+ pkfssvm$(EXEEXT) pkascii2ogr$(EXEEXT) pkeditogr$(EXEEXT) \
+ $(am__EXEEXT_1) $(am__EXEEXT_2) $(am__EXEEXT_3)
# the program to build but not install (the names of the final binaries)
#noinst_PROGRAMS = pkxcorimg pkgeom
@@ -217,6 +217,7 @@ pkfilterdem_DEPENDENCIES = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
$(top_builddir)/src/fileclasses/libfileClasses.la \
$(top_builddir)/src/base/libbase.la
am__pkfsann_SOURCES_DIST = $(top_srcdir)/src/algorithms/myfann_cpp.h \
+ $(top_srcdir)/src/algorithms/CostFactory.h pkfsann.h \
pkfsann.cc
@USE_FANN_TRUE at am_pkfsann_OBJECTS = pkfsann-pkfsann.$(OBJEXT)
pkfsann_OBJECTS = $(am_pkfsann_OBJECTS)
@@ -245,6 +246,9 @@ pkinfo_DEPENDENCIES = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
$(top_builddir)/src/imageclasses/libimageClasses.la \
$(top_builddir)/src/fileclasses/libfileClasses.la \
$(top_builddir)/src/base/libbase.la
+am_pkkalman_OBJECTS = pkkalman.$(OBJEXT) ImgRegression.$(OBJEXT)
+pkkalman_OBJECTS = $(am_pkkalman_OBJECTS)
+pkkalman_DEPENDENCIES = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_2)
am__pklas2img_SOURCES_DIST = pklas2img.cc
@USE_LAS_TRUE at am_pklas2img_OBJECTS = pklas2img.$(OBJEXT)
pklas2img_OBJECTS = $(am_pklas2img_OBJECTS)
@@ -358,8 +362,8 @@ SOURCES = $(pkann_SOURCES) $(pkascii2img_SOURCES) \
$(pkfillnodata_SOURCES) $(pkfilter_SOURCES) \
$(pkfilterascii_SOURCES) $(pkfilterdem_SOURCES) \
$(pkfsann_SOURCES) $(pkfssvm_SOURCES) $(pkgetmask_SOURCES) \
- $(pkinfo_SOURCES) $(pklas2img_SOURCES) $(pkndvi_SOURCES) \
- $(pkoptsvm_SOURCES) $(pkpolygonize_SOURCES) \
+ $(pkinfo_SOURCES) $(pkkalman_SOURCES) $(pklas2img_SOURCES) \
+ $(pkndvi_SOURCES) $(pkoptsvm_SOURCES) $(pkpolygonize_SOURCES) \
$(pkreclass_SOURCES) $(pkregann_SOURCES) $(pksetmask_SOURCES) \
$(pksieve_SOURCES) $(pkstatascii_SOURCES) $(pkstatogr_SOURCES) \
$(pksvm_SOURCES)
@@ -372,7 +376,7 @@ DIST_SOURCES = $(am__pkann_SOURCES_DIST) $(pkascii2img_SOURCES) \
$(pkfillnodata_SOURCES) $(pkfilter_SOURCES) \
$(pkfilterascii_SOURCES) $(pkfilterdem_SOURCES) \
$(am__pkfsann_SOURCES_DIST) $(pkfssvm_SOURCES) \
- $(pkgetmask_SOURCES) $(pkinfo_SOURCES) \
+ $(pkgetmask_SOURCES) $(pkinfo_SOURCES) $(pkkalman_SOURCES) \
$(am__pklas2img_SOURCES_DIST) $(pkndvi_SOURCES) \
$(am__pkoptsvm_SOURCES_DIST) $(pkpolygonize_SOURCES) \
$(pkreclass_SOURCES) $(am__pkregann_SOURCES_DIST) \
@@ -530,9 +534,9 @@ LDADD = $(GSL_LIBS) $(GDAL_LDFLAGS) $(top_builddir)/src/algorithms/libalgorithms
@USE_FANN_TRUE at pkann_SOURCES = $(top_srcdir)/src/algorithms/myfann_cpp.h pkann.cc
@USE_FANN_TRUE at pkann_CXXFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/src/base $(FANN_CFLAGS) -I$(top_srcdir)/src/algorithms $(AM_CXXFLAGS)
@USE_FANN_TRUE at pkann_LDADD = $(FANN_LIBS) $(FANN_CFLAGS) $(AM_LDFLAGS)
- at USE_FANN_TRUE@pkfsann_SOURCES = $(top_srcdir)/src/algorithms/myfann_cpp.h pkfsann.cc
+ at USE_FANN_TRUE@pkfsann_SOURCES = $(top_srcdir)/src/algorithms/myfann_cpp.h $(top_srcdir)/src/algorithms/CostFactory.h pkfsann.h pkfsann.cc
@USE_FANN_TRUE at pkfsann_CXXFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/src/base $(FANN_CFLAGS) -I$(top_srcdir)/src/algorithms $(AM_CXXFLAGS)
- at USE_FANN_TRUE@pkfsann_LDADD = $(GSL_LIBS) $(FANN_LIBS) $(FANN_CFLAGS) $(AM_LDFLAGS)
+ at USE_FANN_TRUE@pkfsann_LDADD = $(GSL_LIBS) $(FANN_LIBS) $(FANN_CFLAGS) $(AM_LDFLAGS) -lalgorithms
@USE_FANN_TRUE at pkregann_SOURCES = $(top_srcdir)/src/algorithms/myfann_cpp.h pkregann.cc
@USE_FANN_TRUE at pkregann_CXXFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/src/base $(FANN_CFLAGS) -I$(top_srcdir)/src/algorithms $(AM_CXXFLAGS)
@USE_FANN_TRUE at pkregann_LDADD = $(GSL_LIBS) $(FANN_LIBS) $(FANN_CFLAGS) $(AM_LDFLAGS)
@@ -544,6 +548,7 @@ LDADD = $(GSL_LIBS) $(GDAL_LDFLAGS) $(top_builddir)/src/algorithms/libalgorithms
pkinfo_SOURCES = pkinfo.cc
pkcrop_SOURCES = pkcrop.cc
pkreclass_SOURCES = pkreclass.cc
+pkdiff_SOURCES = pkdiff.cc
pkgetmask_SOURCES = pkgetmask.cc
pksetmask_SOURCES = pksetmask.cc
pkcreatect_SOURCES = pkcreatect.cc
@@ -559,6 +564,8 @@ pkextract_SOURCES = pkextract.cc
pkfillnodata_SOURCES = pkfillnodata.cc
pkfilter_SOURCES = pkfilter.cc
pkfilter_LDADD = $(GSL_LIBS) $(AM_LDFLAGS) -lgsl -lgdal
+pkkalman_SOURCES = pkkalman.cc $(top_srcdir)/src/algorithms/ImgRegression.h $(top_srcdir)/src/algorithms/ImgRegression.cc
+pkkalman_LDADD = -lalgorithms $(GSL_LIBS) $(AM_LDFLAGS) -lalgorithms
pkfilterdem_SOURCES = pkfilterdem.cc
pkenhance_SOURCES = pkenhance.cc
pkenhance_LDADD = $(AM_LDFLAGS) -lgdal
@@ -568,10 +575,9 @@ pkdsm2shadow_SOURCES = pkdsm2shadow.cc
pkcomposite_SOURCES = pkcomposite.cc
pkndvi_SOURCES = pkndvi.cc
pkpolygonize_SOURCES = pkpolygonize.cc
-pkdiff_SOURCES = pkdiff.cc
pksvm_SOURCES = $(top_srcdir)/src/algorithms/svm.h $(top_srcdir)/src/algorithms/svm.cpp pksvm.cc
-pkfssvm_SOURCES = $(top_srcdir)/src/algorithms/svm.h $(top_srcdir)/src/algorithms/svm.cpp pkfssvm.cc
-pkfssvm_LDADD = $(GSL_LIBS) $(AM_LDFLAGS)
+pkfssvm_SOURCES = $(top_srcdir)/src/algorithms/svm.h $(top_srcdir)/src/algorithms/FeatureSelector.h $(top_srcdir)/src/algorithms/CostFactory.h $(top_srcdir)/src/algorithms/svm.cpp pkfssvm.h pkfssvm.cc
+pkfssvm_LDADD = $(GSL_LIBS) $(AM_LDFLAGS) -lalgorithms
pkascii2img_SOURCES = pkascii2img.cc
pkascii2ogr_SOURCES = pkascii2ogr.cc
pkeditogr_SOURCES = pkeditogr.cc
@@ -722,6 +728,9 @@ pkgetmask$(EXEEXT): $(pkgetmask_OBJECTS) $(pkgetmask_DEPENDENCIES) $(EXTRA_pkget
pkinfo$(EXEEXT): $(pkinfo_OBJECTS) $(pkinfo_DEPENDENCIES) $(EXTRA_pkinfo_DEPENDENCIES)
@rm -f pkinfo$(EXEEXT)
$(CXXLINK) $(pkinfo_OBJECTS) $(pkinfo_LDADD) $(LIBS)
+pkkalman$(EXEEXT): $(pkkalman_OBJECTS) $(pkkalman_DEPENDENCIES) $(EXTRA_pkkalman_DEPENDENCIES)
+ @rm -f pkkalman$(EXEEXT)
+ $(CXXLINK) $(pkkalman_OBJECTS) $(pkkalman_LDADD) $(LIBS)
pklas2img$(EXEEXT): $(pklas2img_OBJECTS) $(pklas2img_DEPENDENCIES) $(EXTRA_pklas2img_DEPENDENCIES)
@rm -f pklas2img$(EXEEXT)
$(CXXLINK) $(pklas2img_OBJECTS) $(pklas2img_LDADD) $(LIBS)
@@ -762,6 +771,7 @@ mostlyclean-compile:
distclean-compile:
-rm -f *.tab.c
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ImgRegression.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/pkann-pkann.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/pkascii2img.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/pkascii2ogr.Po at am__quote@
@@ -784,6 +794,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/pkfssvm.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/pkgetmask.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/pkinfo.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/pkkalman.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/pklas2img.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/pkndvi.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/pkoptsvm.Po at am__quote@
@@ -860,6 +871,20 @@ svm.obj: $(top_srcdir)/src/algorithms/svm.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o svm.obj `if test -f '$(top_srcdir)/src/algorithms/svm.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/algorithms/svm.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/algorithms/svm.cpp'; fi`
+ImgRegression.o: $(top_srcdir)/src/algorithms/ImgRegression.cc
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ImgRegression.o -MD -MP -MF $(DEPDIR)/ImgRegression.Tpo -c -o ImgRegression.o `test -f '$(top_srcdir)/src/algorithms/ImgRegression.cc' || echo '$(srcdir)/'`$(top_srcdir)/src/algorithms/ImgRegression.cc
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ImgRegression.Tpo $(DEPDIR)/ImgRegression.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/algorithms/ImgRegression.cc' object='ImgRegression.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ImgRegression.o `test -f '$(top_srcdir)/src/algorithms/ImgRegression.cc' || echo '$(srcdir)/'`$(top_srcdir)/src/algorithms/ImgRegression.cc
+
+ImgRegression.obj: $(top_srcdir)/src/algorithms/ImgRegression.cc
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ImgRegression.obj -MD -MP -MF $(DEPDIR)/ImgRegression.Tpo -c -o ImgRegression.obj `if test -f '$(top_srcdir)/src/algorithms/ImgRegression.cc'; then $(CYGPATH_W) '$(top_srcdir)/src/algorithms/ImgRegression.cc'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/algorithms/ImgRegression.cc'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/ImgRegression.Tpo $(DEPDIR)/ImgRegression.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$(top_srcdir)/src/algorithms/ImgRegression.cc' object='ImgRegression.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ImgRegression.obj `if test -f '$(top_srcdir)/src/algorithms/ImgRegression.cc'; then $(CYGPATH_W) '$(top_srcdir)/src/algorithms/ImgRegression.cc'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/algorithms/ImgRegression.cc'; fi`
+
pkregann-pkregann.o: pkregann.cc
@am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(pkregann_CXXFLAGS) $(CXXFLAGS) -MT pkregann-pkregann.o -MD -MP -MF $(DEPDIR)/pkregann-pkregann.Tpo -c -o pkregann-pkregann.o `test -f 'pkregann.cc' || echo '$(srcdir)/'`pkregann.cc
@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/pkregann-pkregann.Tpo $(DEPDIR)/pkregann-pkregann.Po
diff --git a/src/apps/pkann.cc b/src/apps/pkann.cc
index b199b6d..bccd66d 100644
--- a/src/apps/pkann.cc
+++ b/src/apps/pkann.cc
@@ -43,7 +43,7 @@ int main(int argc, char *argv[])
Optionpk<string> tlayer_opt("tln", "tln", "training layer name(s)");
Optionpk<string> label_opt("label", "label", "identifier for class label in training vector file.","label");
Optionpk<unsigned int> balance_opt("bal", "balance", "balance the input data to this number of samples for each class", 0);
- Optionpk<bool> random_opt("random", "random", "in case of balance, randomize input data", true);
+ Optionpk<bool> random_opt("random", "random", "in case of balance, randomize input data", true,2);
Optionpk<int> minSize_opt("min", "min", "if number of training pixels is less then min, do not take this class into account (0: consider all classes)", 0);
Optionpk<double> start_opt("s", "start", "start band sequence number",0);
Optionpk<double> end_opt("e", "end", "end band sequence number (set to 0 to include bands)", 0);
@@ -52,7 +52,7 @@ int main(int argc, char *argv[])
Optionpk<double> scale_opt("\0", "scale", "scale value for each spectral band input features: refl=(DN[band]-offset[band])/scale[band] (use 0 if scale min and max in each band to -1.0 and 1.0)", 0.0);
Optionpk<unsigned short> aggreg_opt("a", "aggreg", "how to combine aggregated classifiers, see also rc option (1: sum rule, 2: max rule).",1);
Optionpk<double> priors_opt("p", "prior", "prior probabilities for each class (e.g., -p 0.3 -p 0.3 -p 0.2 )", 0.0);
- Optionpk<string> priorimg_opt("pim", "priorimg", "prior probability image (multi-band img with band for each class");
+ Optionpk<string> priorimg_opt("pim", "priorimg", "prior probability image (multi-band img with band for each class","",2);
Optionpk<unsigned short> cv_opt("cv", "cv", "n-fold cross validation mode",0);
Optionpk<unsigned int> nneuron_opt("n", "nneuron", "number of neurons in hidden layers in neural network (multiple hidden layers are set by defining multiple number of neurons: -n 15 -n 1, default is one hidden layer with 5 neurons)", 5);
Optionpk<float> connection_opt("\0", "connection", "connection reate (default: 1.0 for a fully connected network)", 1.0);
@@ -63,18 +63,18 @@ int main(int argc, char *argv[])
Optionpk<unsigned short> bag_opt("bag", "bag", "Number of bootstrap aggregations (default is no bagging: 1)", 1);
Optionpk<int> bagSize_opt("bs", "bsize", "Percentage of features used from available training features for each bootstrap aggregation (one size for all classes, or a different size for each class respectively", 100);
Optionpk<string> classBag_opt("cb", "classbag", "output for each individual bootstrap aggregation (default is blank)");
- Optionpk<string> mask_opt("m", "mask", "mask image (support for single mask only, see also msknodata option)");
+ Optionpk<string> mask_opt("m", "mask", "Use the first band of the specified file as a validity mask. Nodata values can be set with the option msknodata.");
Optionpk<short> msknodata_opt("msknodata", "msknodata", "mask value(s) not to consider for classification (use negative values if only these values should be taken into account). Values will be taken over in classification image. Default is 0", 0);
Optionpk<unsigned short> nodata_opt("nodata", "nodata", "nodata value to put where image is masked as nodata", 0);
Optionpk<string> output_opt("o", "output", "output classification image");
Optionpk<string> otype_opt("ot", "otype", "Data type for output image ({Byte/Int16/UInt16/UInt32/Int32/Float32/Float64/CInt16/CInt32/CFloat32/CFloat64}). Empty string: inherit type from input image");
Optionpk<string> oformat_opt("of", "oformat", "Output image format (see also gdal_translate). Empty string: inherit from input image");
Optionpk<string> option_opt("co", "co", "Creation option for output file. Multiple options can be specified.");
- Optionpk<string> colorTable_opt("ct", "ct", "colour table in ascii format having 5 columns: id R G B ALFA (0: transparent, 255: solid)");
+ Optionpk<string> colorTable_opt("ct", "ct", "colour table in ASCII format having 5 columns: id R G B ALFA (0: transparent, 255: solid)");
Optionpk<string> prob_opt("\0", "prob", "probability image. Default is no probability image");
- Optionpk<string> entropy_opt("entropy", "entropy", "entropy image (measure for uncertainty of classifier output");
- Optionpk<string> active_opt("active", "active", "ogr output for active training sample.");
- Optionpk<string> ogrformat_opt("f", "f", "Output ogr format for active training sample","ESRI Shapefile");
+ Optionpk<string> entropy_opt("entropy", "entropy", "entropy image (measure for uncertainty of classifier output","",2);
+ Optionpk<string> active_opt("active", "active", "ogr output for active training sample.","",2);
+ Optionpk<string> ogrformat_opt("f", "f", "Output ogr format for active training sample","SQLite");
Optionpk<unsigned int> nactive_opt("na", "nactive", "number of active training points",1);
Optionpk<string> classname_opt("c", "class", "list of class names.");
Optionpk<short> classvalue_opt("r", "reclass", "list of class values (use same order as in class opt).");
@@ -133,6 +133,13 @@ int main(int argc, char *argv[])
exit(0);//help was invoked, stop processing
}
+ if(entropy_opt[0]=="")
+ entropy_opt.clear();
+ if(active_opt[0]=="")
+ active_opt.clear();
+ if(priorimg_opt[0]=="")
+ priorimg_opt.clear();
+
if(verbose_opt[0]>=1){
if(input_opt.size())
cout << "image filename: " << input_opt[0] << endl;
@@ -372,7 +379,7 @@ int main(int argc, char *argv[])
++mapit;
}
if(classname_opt.empty()){
- std::cerr << "Warning: no class name and value pair provided for all " << nclass << " classes, using string2type<int> instead!" << std::endl;
+ //std::cerr << "Warning: no class name and value pair provided for all " << nclass << " classes, using string2type<int> instead!" << std::endl;
for(int iclass=0;iclass<nclass;++iclass){
if(verbose_opt[0])
std::cout << iclass << " " << cm.getClass(iclass) << " -> " << string2type<short>(cm.getClass(iclass)) << std::endl;
@@ -659,7 +666,7 @@ int main(int argc, char *argv[])
if(verbose_opt[0]>=1)
cout << "opening class image for writing output " << output_opt[0] << endl;
if(classBag_opt.size()){
- classImageBag.open(output_opt[0],ncol,nrow,nbag,GDT_Byte,imageType,option_opt);
+ classImageBag.open(classBag_opt[0],ncol,nrow,nbag,GDT_Byte,imageType,option_opt);
classImageBag.GDALSetNoDataValue(nodata_opt[0]);
classImageBag.copyGeoTransform(testImage);
classImageBag.setProjection(testImage.getProjection());
diff --git a/src/apps/pkcomposite.cc b/src/apps/pkcomposite.cc
index 7fc6511..f39ca56 100644
--- a/src/apps/pkcomposite.cc
+++ b/src/apps/pkcomposite.cc
@@ -38,62 +38,62 @@ int main(int argc, char *argv[])
{
Optionpk<string> input_opt("i", "input", "Input image file(s). If input contains multiple images, a multi-band output is created");
Optionpk<string> output_opt("o", "output", "Output image file");
- Optionpk<string> projection_opt("a_srs", "a_srs", "Override the spatial reference for the output file (leave blank to copy from input file, use epsg:3035 to use European projection and force to European grid");
+ Optionpk<int> band_opt("b", "band", "band index(es) to crop (leave empty if all bands must be retained)");
+ Optionpk<double> dx_opt("dx", "dx", "Output resolution in x (in meter) (empty: keep original resolution)");
+ Optionpk<double> dy_opt("dy", "dy", "Output resolution in y (in meter) (empty: keep original resolution)");
Optionpk<string> extent_opt("e", "extent", "get boundary from extent from polygons in vector file");
Optionpk<double> ulx_opt("ulx", "ulx", "Upper left x value bounding box", 0.0);
Optionpk<double> uly_opt("uly", "uly", "Upper left y value bounding box", 0.0);
Optionpk<double> lrx_opt("lrx", "lrx", "Lower right x value bounding box", 0.0);
Optionpk<double> lry_opt("lry", "lry", "Lower right y value bounding box", 0.0);
- Optionpk<double> dx_opt("dx", "dx", "Output resolution in x (in meter) (empty: keep original resolution)");
- Optionpk<double> dy_opt("dy", "dy", "Output resolution in y (in meter) (empty: keep original resolution)");
- Optionpk<int> band_opt("b", "band", "band index(es) to crop (leave empty if all bands must be retained)");
- Optionpk<string> otype_opt("ot", "otype", "Data type for output image ({Byte/Int16/UInt16/UInt32/Int32/Float32/Float64/CInt16/CInt32/CFloat32/CFloat64}). Empty string: inherit type from input image", "");
- Optionpk<string> oformat_opt("of", "oformat", "Output image format (see also gdal_translate). Empty string: inherit from input image");
- Optionpk<string> colorTable_opt("ct", "ct", "color table (file with 5 columns: id R G B ALFA (0: transparent, 255: solid)");
- Optionpk<string> option_opt("co", "co", "Creation option for output file. Multiple options can be specified.");
- Optionpk<string> resample_opt("r", "resampling-method", "Resampling method (near: nearest neighbour, bilinear: bi-linear interpolation).", "near");
- Optionpk<string> description_opt("d", "description", "Set image description");
Optionpk<string> crule_opt("cr", "crule", "Composite rule for mosaic (overwrite, maxndvi, maxband, minband, mean, mode (only for byte images), median, sum", "overwrite");
- Optionpk<int> ruleBand_opt("rb", "rband", "band index used for the rule (for ndvi, use --ruleBand=redBand --ruleBand=nirBand", 0);
+ Optionpk<int> ruleBand_opt("cb", "cband", "band index used for the composite rule (e.g., for ndvi, use --cband=0 --cband=1 with 0 and 1 indices for red and nir band respectively", 0);
Optionpk<double> srcnodata_opt("srcnodata", "srcnodata", "invalid value for input image", 0);
Optionpk<int> bndnodata_opt("bndnodata", "bndnodata", "Bands in input image to check if pixel is valid (used for srcnodata, min and max options)", 0);
- Optionpk<double> dstnodata_opt("dstnodata", "dstnodata", "nodata value to put in output image if not valid or out of bounds.", 0);
Optionpk<double> minValue_opt("min", "min", "flag values smaller or equal to this value as invalid.");
Optionpk<double> maxValue_opt("max", "max", "flag values larger or equal to this value as invalid.");
+ Optionpk<double> dstnodata_opt("dstnodata", "dstnodata", "nodata value to put in output image if not valid or out of bounds.", 0);
+ Optionpk<string> resample_opt("r", "resampling-method", "Resampling method (near: nearest neighbor, bilinear: bi-linear interpolation).", "near");
+ Optionpk<string> otype_opt("ot", "otype", "Data type for output image ({Byte/Int16/UInt16/UInt32/Int32/Float32/Float64/CInt16/CInt32/CFloat32/CFloat64}). Empty string: inherit type from input image", "");
+ Optionpk<string> oformat_opt("of", "oformat", "Output image format (see also gdal_translate). Empty string: inherit from input image");
+ Optionpk<string> option_opt("co", "co", "Creation option for output file. Multiple options can be specified.");
+ Optionpk<string> projection_opt("a_srs", "a_srs", "Override the spatial reference for the output file (leave blank to copy from input file, use epsg:3035 to use European projection and force to European grid");
Optionpk<bool> file_opt("file", "file", "write number of observations for each pixels as additional layer in mosaic", false);
Optionpk<short> weight_opt("w", "weight", "Weights (type: short) for the mosaic, use one weight for each input file in same order as input files are provided). Use value 1 for equal weights.", 1);
Optionpk<short> class_opt("c", "class", "classes for multi-band output image: each band represents the number of observations for one specific class. Use value 0 for no multi-band output image).", 0);
+ Optionpk<string> colorTable_opt("ct", "ct", "color table (file with 5 columns: id R G B ALFA (0: transparent, 255: solid)");
+ Optionpk<string> description_opt("d", "description", "Set image description");
Optionpk<bool> verbose_opt("v", "verbose", "verbose", false);
bool doProcess;//stop process when program was invoked with help option (-h --help)
try{
doProcess=input_opt.retrieveOption(argc,argv);
output_opt.retrieveOption(argc,argv);
- projection_opt.retrieveOption(argc,argv);
+ band_opt.retrieveOption(argc,argv);
+ dx_opt.retrieveOption(argc,argv);
+ dy_opt.retrieveOption(argc,argv);
extent_opt.retrieveOption(argc,argv);
ulx_opt.retrieveOption(argc,argv);
uly_opt.retrieveOption(argc,argv);
lrx_opt.retrieveOption(argc,argv);
lry_opt.retrieveOption(argc,argv);
- band_opt.retrieveOption(argc,argv);
- otype_opt.retrieveOption(argc,argv);
- oformat_opt.retrieveOption(argc,argv);
- colorTable_opt.retrieveOption(argc,argv);
- dx_opt.retrieveOption(argc,argv);
- dy_opt.retrieveOption(argc,argv);
- option_opt.retrieveOption(argc,argv);
- dstnodata_opt.retrieveOption(argc,argv);
- resample_opt.retrieveOption(argc,argv);
- description_opt.retrieveOption(argc,argv);
crule_opt.retrieveOption(argc,argv);
ruleBand_opt.retrieveOption(argc,argv);
- bndnodata_opt.retrieveOption(argc,argv);
srcnodata_opt.retrieveOption(argc,argv);
+ bndnodata_opt.retrieveOption(argc,argv);
minValue_opt.retrieveOption(argc,argv);
maxValue_opt.retrieveOption(argc,argv);
+ dstnodata_opt.retrieveOption(argc,argv);
+ resample_opt.retrieveOption(argc,argv);
+ otype_opt.retrieveOption(argc,argv);
+ oformat_opt.retrieveOption(argc,argv);
+ option_opt.retrieveOption(argc,argv);
+ projection_opt.retrieveOption(argc,argv);
file_opt.retrieveOption(argc,argv);
weight_opt.retrieveOption(argc,argv);
class_opt.retrieveOption(argc,argv);
+ colorTable_opt.retrieveOption(argc,argv);
+ description_opt.retrieveOption(argc,argv);
verbose_opt.retrieveOption(argc,argv);
}
catch(string predefinedString){
@@ -139,7 +139,7 @@ int main(int argc, char *argv[])
if(resample_opt[0]=="near"){
theResample=NEAR;
if(verbose_opt[0])
- cout << "resampling: nearest neighbour" << endl;
+ cout << "resampling: nearest neighbor" << endl;
}
else if(resample_opt[0]=="bilinear"){
theResample=BILINEAR;
diff --git a/src/apps/pkcrop.cc b/src/apps/pkcrop.cc
index e7466a5..2684cd8 100644
--- a/src/apps/pkcrop.cc
+++ b/src/apps/pkcrop.cc
@@ -59,7 +59,7 @@ int main(int argc, char *argv[])
Optionpk<string> option_opt("co", "co", "Creation option for output file. Multiple options can be specified.");
Optionpk<string> colorTable_opt("ct", "ct", "color table (file with 5 columns: id R G B ALFA (0: transparent, 255: solid)");
Optionpk<double> nodata_opt("nodata", "nodata", "Nodata value to put in image if out of bounds.");
- Optionpk<string> resample_opt("r", "resampling-method", "Resampling method (near: nearest neighbour, bilinear: bi-linear interpolation).", "near");
+ Optionpk<string> resample_opt("r", "resampling-method", "Resampling method (near: nearest neighbor, bilinear: bi-linear interpolation).", "near");
Optionpk<string> description_opt("d", "description", "Set image description");
Optionpk<bool> verbose_opt("v", "verbose", "verbose", false);
@@ -117,7 +117,7 @@ int main(int argc, char *argv[])
if(resample_opt[0]=="near"){
theResample=NEAR;
if(verbose_opt[0])
- cout << "resampling: nearest neighbour" << endl;
+ cout << "resampling: nearest neighbor" << endl;
}
else if(resample_opt[0]=="bilinear"){
theResample=BILINEAR;
diff --git a/src/apps/pkdiff.cc b/src/apps/pkdiff.cc
index 56415b8..bb370be 100644
--- a/src/apps/pkdiff.cc
+++ b/src/apps/pkdiff.cc
@@ -29,53 +29,55 @@ using namespace std;
int main(int argc, char *argv[])
{
- Optionpk<string> input_opt("i", "input", "Input image file.");
- Optionpk<string> reference_opt("ref", "reference", "Reference image file");
- Optionpk<string> output_opt("o", "output", "Output image file. Default is empty: no output image, only report difference or identical.");
- Optionpk<string> ogrformat_opt("f", "f", "Output sample file format","ESRI Shapefile");
- Optionpk<string> mask_opt("m", "mask", "Mask image file. A single mask is supported only, but several mask values can be used. See also msknodata option. (default is empty)");
+ Optionpk<string> input_opt("i", "input", "Input raster dataset.");
+ Optionpk<string> reference_opt("ref", "reference", "Reference (raster or vector) dataset");
+ Optionpk<string> layer_opt("ln", "ln", "Layer name(s) in sample. Leave empty to select all (for vector reference datasets only)");
+ Optionpk<string> output_opt("o", "output", "Output dataset (optional)");
+ Optionpk<string> ogrformat_opt("f", "f", "OGR format for output vector (for vector reference datasets only)","SQLite");
+ Optionpk<string> mask_opt("m", "mask", "Use the first band of the specified file as a validity mask. Nodata values can be set with the option msknodata.");
Optionpk<int> masknodata_opt("msknodata", "msknodata", "Mask value(s) where image is invalid. Use negative value for valid data (example: use -t -1: if only -1 is valid value)", 0);
- Optionpk<string> colorTable_opt("ct", "ct", "color table (file with 5 columns: id R G B ALFA (0: transparent, 255: solid)");
- Optionpk<short> valueE_opt("\0", "correct", "Value for correct pixels (0)", 0,1);
- Optionpk<short> valueO_opt("\0", "omission", "Value for omission errors: input label > reference label (default value is 1)", 1,1);
- Optionpk<short> valueC_opt("\0", "commission", "Value for commission errors: input label < reference label (default value is 2)", 2,1);
- Optionpk<short> nodata_opt("nodata", "nodata", "No value flag(s)");
- Optionpk<short> band_opt("b", "band", "Band to extract (0)", 0);
- Optionpk<bool> confusion_opt("cm", "confusion", "create confusion matrix (to std out) (default value is 0)", false);
- Optionpk<string> labelref_opt("lr", "lref", "name of the reference label in case reference is shape file(default is label)", "label");
- Optionpk<string> labelclass_opt("lc", "lclass", "name of the classified label in case output is shape file (default is class)", "class");
- Optionpk<short> boundary_opt("bnd", "boundary", "boundary for selecting the sample (default: 1)", 1,1);
- Optionpk<bool> disc_opt("circ", "circular", "use circular disc kernel boundary)", false,1);
- Optionpk<bool> homogeneous_opt("hom", "homogeneous", "only take homogeneous regions into account", false,1);
+ Optionpk<short> valueE_opt("\0", "correct", "Value for correct pixels", 0,2);
+ Optionpk<short> valueO_opt("\0", "omission", "Value for omission errors: input label > reference label", 1,2);
+ Optionpk<short> valueC_opt("\0", "commission", "Value for commission errors: input label < reference label", 2,1);
+ Optionpk<short> nodata_opt("nodata", "nodata", "No data value(s) in input or reference dataset are ignored");
+ Optionpk<short> band_opt("b", "band", "Input raster band", 0);
+ Optionpk<bool> confusion_opt("cm", "confusion", "Create confusion matrix (to std out)", false);
+ Optionpk<string> labelref_opt("lr", "lref", "Attribute name of the reference label (for vector reference datasets only)", "label");
+ Optionpk<string> labelclass_opt("lc", "lclass", "Attribute name of the classified label (for vector reference datasets only)", "class");
+ Optionpk<short> boundary_opt("bnd", "boundary", "Boundary for selecting the sample (for vector reference datasets only)", 1,1);
+ Optionpk<bool> homogeneous_opt("hom", "homogeneous", "Only take regions with homogeneous boundary into account (for reference datasets only)", false,1);
+ Optionpk<bool> disc_opt("circ", "circular", "Use circular boundary (for vector reference datasets only)", false,1);
+ Optionpk<string> classname_opt("c", "class", "List of class names.");
+ Optionpk<short> classvalue_opt("r", "reclass", "List of class values (use same order as in classname option).");
+ Optionpk<string> colorTable_opt("ct", "ct", "Color table in ASCII format having 5 columns: id R G B ALFA (0: transparent, 255: solid).");
Optionpk<string> option_opt("co", "co", "Creation option for output file. Multiple options can be specified.");
- Optionpk<string> classname_opt("c", "class", "list of class names.");
- Optionpk<short> classvalue_opt("r", "reclass", "list of class values (use same order as in classname opt.");
- Optionpk<short> verbose_opt("v", "verbose", "verbose (default value is 0)", 0);
+ Optionpk<short> verbose_opt("v", "verbose", "Verbose level", 0);
bool doProcess;//stop process when program was invoked with help option (-h --help)
try{
doProcess=input_opt.retrieveOption(argc,argv);
- output_opt.retrieveOption(argc,argv);
- ogrformat_opt.retrieveOption(argc,argv);
- option_opt.retrieveOption(argc,argv);
reference_opt.retrieveOption(argc,argv);
- mask_opt.retrieveOption(argc,argv);
- colorTable_opt.retrieveOption(argc,argv);
- valueE_opt.retrieveOption(argc,argv);
- valueO_opt.retrieveOption(argc,argv);
- valueC_opt.retrieveOption(argc,argv);
- nodata_opt.retrieveOption(argc,argv);
- masknodata_opt.retrieveOption(argc,argv);
+ layer_opt.retrieveOption(argc,argv);
band_opt.retrieveOption(argc,argv);
confusion_opt.retrieveOption(argc,argv);
labelref_opt.retrieveOption(argc,argv);
+ classname_opt.retrieveOption(argc,argv);
+ classvalue_opt.retrieveOption(argc,argv);
+ nodata_opt.retrieveOption(argc,argv);
+ mask_opt.retrieveOption(argc,argv);
+ masknodata_opt.retrieveOption(argc,argv);
+ output_opt.retrieveOption(argc,argv);
+ ogrformat_opt.retrieveOption(argc,argv);
labelclass_opt.retrieveOption(argc,argv);
- // class_opt.retrieveOption(argc,argv);
+ valueE_opt.retrieveOption(argc,argv);
+ valueO_opt.retrieveOption(argc,argv);
+ valueC_opt.retrieveOption(argc,argv);
boundary_opt.retrieveOption(argc,argv);
- disc_opt.retrieveOption(argc,argv);
homogeneous_opt.retrieveOption(argc,argv);
- classname_opt.retrieveOption(argc,argv);
- classvalue_opt.retrieveOption(argc,argv);
+ disc_opt.retrieveOption(argc,argv);
+ colorTable_opt.retrieveOption(argc,argv);
+ option_opt.retrieveOption(argc,argv);
+ // class_opt.retrieveOption(argc,argv);
verbose_opt.retrieveOption(argc,argv);
}
catch(string predefinedString){
@@ -206,7 +208,8 @@ int main(int argc, char *argv[])
// if(reference_opt[0].find(".shp")!=string::npos){
if(!refIsRaster){
for(int iinput=0;iinput<input_opt.size();++iinput){
- cout << "Processing input " << input_opt[iinput] << endl;
+ if(verbose_opt[0])
+ cout << "Processing input " << input_opt[iinput] << endl;
if(output_opt.size())
assert(reference_opt.size()==output_opt.size());
for(int iref=0;iref<reference_opt.size();++iref){
@@ -241,11 +244,17 @@ int main(int argc, char *argv[])
int nlayer=referenceReaderOgr.getDataSource()->GetLayerCount();
for(int ilayer=0;ilayer<nlayer;++ilayer){
progress=0;
- OGRLayer *readLayer;
- readLayer = referenceReaderOgr.getDataSource()->GetLayer(ilayer);
- cout << "processing layer " << readLayer->GetName() << endl;
+ OGRLayer *readLayer=referenceReaderOgr.getLayer(ilayer);
+ // readLayer = referenceReaderOgr.getDataSource()->GetLayer(ilayer);
+ string currentLayername=readLayer->GetName();
+ if(layer_opt.size())
+ if(find(layer_opt.begin(),layer_opt.end(),currentLayername)==layer_opt.end())
+ continue;
if(!verbose_opt[0])
pfnProgress(progress,pszMessage,pProgressArg);
+ else
+ cout << "processing layer " << readLayer->GetName() << endl;
+
readLayer->ResetReading();
OGRLayer *writeLayer;
if(output_opt.size()){
@@ -332,6 +341,7 @@ int main(int argc, char *argv[])
referenceValue=readFeature->GetFieldAsInteger(readFeature->GetFieldIndex(labelref_opt[0].c_str()));
if(verbose_opt[0])
cout << "reference value: " << referenceValue << endl;
+
bool pixelFlagged=false;
bool maskFlagged=false;
for(int iflag=0;iflag<nodata_opt.size();++iflag){
@@ -356,6 +366,7 @@ int main(int argc, char *argv[])
//check if i_centre is out of bounds
if(static_cast<int>(i_centre)<0||static_cast<int>(i_centre)>=inputReader.nrOfCol())
continue;
+
if(output_opt.size()){
writeFeature = OGRFeature::CreateFeature(writeLayer->GetLayerDefn());
assert(readFeature);
@@ -433,6 +444,7 @@ int main(int argc, char *argv[])
}
}
//at this point we know the values for the entire window
+
if(homogeneous_opt[0]){//only centre pixel
int j=j_centre;
int i=i_centre;
@@ -539,7 +551,7 @@ int main(int argc, char *argv[])
if(verbose_opt[0])
cout << "creating feature" << endl;
if(writeLayer->CreateFeature( writeFeature ) != OGRERR_NONE ){
- string errorString="Failed to create feature in shapefile";
+ string errorString="Failed to create feature in OGR vector file";
throw(errorString);
}
}
diff --git a/src/apps/pkextract.cc b/src/apps/pkextract.cc
index 2a42071..51646cc 100644
--- a/src/apps/pkextract.cc
+++ b/src/apps/pkextract.cc
@@ -35,69 +35,71 @@ along with pktools. If not, see <http://www.gnu.org/licenses/>.
#endif
namespace rule{
- enum RULE_TYPE {point=0, mean=1, proportion=2, custom=3, minimum=4, maximum=5, maxvote=6, centroid=7, sum=8, pointOnSurface=9, median=10};
+ enum RULE_TYPE {point=0, mean=1, proportion=2, custom=3, minimum=4, maximum=5, maxvote=6, centroid=7, sum=8, median=9};
}
using namespace std;
int main(int argc, char *argv[])
{
- Optionpk<string> image_opt("i", "input", "Input image file");
- Optionpk<string> sample_opt("s", "sample", "Input sample vector file or class file (e.g. Corine CLC) if class option is set");
- Optionpk<string> layer_opt("ln", "ln", "layer name(s) in sample (leave empty to select all)");
- Optionpk<string> mask_opt("m", "mask", "Mask image file");
- Optionpk<int> msknodata_opt("msknodata", "msknodata", "Mask value where image is invalid. If a single mask is used, more nodata values can be set. If more masks are used, use one value for each mask.", 1);
- Optionpk<int> class_opt("c", "class", "Class(es) to extract from input sample image. Leave empty to extract all valid data pixels from sample file. Make sure to set classes if rule is set to maxvote or proportion");
+ Optionpk<string> image_opt("i", "input", "Raster input dataset containing band information");
+ Optionpk<string> sample_opt("s", "sample", "OGR vector file with features to be extracted from input data. Output will contain features with input band information included. Sample image can also be GDAL raster dataset.");
+ Optionpk<string> layer_opt("ln", "ln", "Layer name(s) in sample (leave empty to select all)");
Optionpk<string> output_opt("o", "output", "Output sample file (image file)");
- Optionpk<string> ogrformat_opt("f", "f", "Output sample file format","ESRI Shapefile");
- Optionpk<string> test_opt("test", "test", "Test sample file (use this option in combination with threshold<100 to create a training (output) and test set");
+ Optionpk<int> class_opt("c", "class", "Class(es) to extract from input sample image. Leave empty to extract all valid data pixels from sample file. Make sure to set classes if rule is set to maxvote or proportion");
+ Optionpk<float> threshold_opt("t", "threshold", "Probability threshold for selecting samples (randomly). Provide probability in percentage (>0) or absolute (<0). Use a single threshold for vector sample files. If using raster land cover maps as a sample file, you can provide a threshold value for each class (e.g. -t 80 -t 60). Use value 100 to select all pixels for selected class(es)", 100);
+ Optionpk<string> ogrformat_opt("f", "f", "Output sample file format","SQLite");
+ Optionpk<string> ftype_opt("ft", "ftype", "Field type (only Real or Integer)", "Real");
+ Optionpk<string> ltype_opt("lt", "ltype", "Label type: In16 or String", "Integer");
+ Optionpk<bool> polygon_opt("polygon", "polygon", "Create OGRPolygon as geometry instead of OGRPoint. Only valid if sample features are polygons.", false);
+ Optionpk<int> band_opt("b", "band", "Band index(es) to extract. Use -1 to use all bands)", -1);
+ Optionpk<string> rule_opt("r", "rule", "Rule how to report image information per feature (only for vector sample). point (value at each point or at centroid if polygon), centroid, mean (of polygon), median (of polygon), proportion, minimum (of polygon), maximum (of polygon), maxvote, sum.", "point");
+ Optionpk<double> srcnodata_opt("srcnodata", "srcnodata", "Invalid value(s) for input image");
+ Optionpk<int> bndnodata_opt("bndnodata", "bndnodata", "Band(s) in input image to check if pixel is valid (used for srcnodata)", 0);
+ // Optionpk<string> mask_opt("m", "mask", "Mask image file");
+ // Optionpk<int> msknodata_opt("msknodata", "msknodata", "Mask value where image is invalid. If a single mask is used, more nodata values can be set. If more masks are used, use one value for each mask.", 1);
// Optionpk<string> bufferOutput_opt("bu", "bu", "Buffer output shape file");
- Optionpk<short> geo_opt("g", "geo", "geo coordinates", 1);
- Optionpk<short> down_opt("down", "down", "down sampling factor. Can be used to create grid points", 1);
- Optionpk<float> threshold_opt("t", "threshold", "threshold for selecting samples (randomly). Provide probability in percentage (>0) or absolute (<0). Use a single threshold for vector sample files. If using raster land cover maps as a sample file, you can provide a threshold value for each class (e.g. -t 80 -t 60). Use value 100 to select all pixels for selected class(es)", 100);
Optionpk<float> polythreshold_opt("tp", "thresholdPolygon", "(absolute) threshold for selecting samples in each polygon");
- Optionpk<double> min_opt("min", "min", "minimum number of samples to select (0)", 0);
- Optionpk<short> boundary_opt("bo", "boundary", "boundary for selecting the sample", 1);
+ Optionpk<string> test_opt("test", "test", "Test sample file (use this option in combination with threshold<100 to create a training (output) and test set");
+ Optionpk<string> fieldname_opt("bn", "bname", "For single band input data, this extra attribute name will correspond to the raster values. For multi-band input data, multiple attributes with this prefix will be added (e.g. b0, b1, b2, etc.)", "b");
+ Optionpk<string> label_opt("cn", "cname", "Name of the class label in the output vector file", "label");
+ Optionpk<short> geo_opt("g", "geo", "Use geo coordinates (set to 0 to use image coordinates)", 1);
+ Optionpk<short> down_opt("down", "down", "Down sampling factor (for raster sample datasets only). Can be used to create grid points", 1);
+ Optionpk<short> boundary_opt("bo", "boundary", "Boundary for selecting the sample (for vector sample datasets only) ", 1);
+ Optionpk<short> disc_opt("circ", "circular", "Circular disc kernel boundary (for vector sample datasets only, use in combination with boundary option)", 0);
// Optionpk<short> rbox_opt("rb", "rbox", "rectangular boundary box (total width in m) to draw around the selected pixel. Can not combined with class option. Use multiple rbox options for multiple boundary boxes. Use value 0 for no box)", 0);
// Optionpk<short> cbox_opt("cbox", "cbox", "circular boundary (diameter in m) to draw around the selected pixel. Can not combined with class option. Use multiple cbox options for multiple boundary boxes. Use value 0 for no box)", 0);
- Optionpk<short> disc_opt("circ", "circular", "circular disc kernel boundary", 0);
- Optionpk<string> ftype_opt("ft", "ftype", "Field type (only Real or Integer)", "Real");
- Optionpk<string> ltype_opt("lt", "ltype", "Label type: In16 or String", "Integer");
- Optionpk<string> fieldname_opt("bn", "bname", "Attribute field name of extracted raster band", "B");
- Optionpk<string> label_opt("cn", "cname", "name of the class label in the output vector file", "label");
- Optionpk<bool> polygon_opt("polygon", "polygon", "create OGRPolygon as geometry instead of OGRPoint. Only if sample option is also of polygon type.", false);
- Optionpk<int> band_opt("b", "band", "band index to crop. Use -1 to use all bands)", -1);
- Optionpk<string> rule_opt("r", "rule", "rule how to report image information per feature. point (value at each point or at centroid if polygon), pointOnSurface, centroid, mean (of polygon), median (of polygon), proportion, minimum (of polygon), maximum (of polygon), maxvote, sum.", "point");
- Optionpk<short> verbose_opt("v", "verbose", "verbose mode if > 0", 0);
+ Optionpk<short> verbose_opt("v", "verbose", "Verbose mode if > 0", 0);
bool doProcess;//stop process when program was invoked with help option (-h --help)
try{
doProcess=image_opt.retrieveOption(argc,argv);
sample_opt.retrieveOption(argc,argv);
layer_opt.retrieveOption(argc,argv);
- mask_opt.retrieveOption(argc,argv);
- msknodata_opt.retrieveOption(argc,argv);
- class_opt.retrieveOption(argc,argv);
output_opt.retrieveOption(argc,argv);
+ class_opt.retrieveOption(argc,argv);
+ threshold_opt.retrieveOption(argc,argv);
ogrformat_opt.retrieveOption(argc,argv);
- test_opt.retrieveOption(argc,argv);
+ ftype_opt.retrieveOption(argc,argv);
+ ltype_opt.retrieveOption(argc,argv);
+ polygon_opt.retrieveOption(argc,argv);
+ band_opt.retrieveOption(argc,argv);
+ rule_opt.retrieveOption(argc,argv);
+ bndnodata_opt.retrieveOption(argc,argv);
+ srcnodata_opt.retrieveOption(argc,argv);
+ polythreshold_opt.retrieveOption(argc,argv);
+ // mask_opt.retrieveOption(argc,argv);
+ // msknodata_opt.retrieveOption(argc,argv);
// bufferOutput_opt.retrieveOption(argc,argv);
+ test_opt.retrieveOption(argc,argv);
+ fieldname_opt.retrieveOption(argc,argv);
+ label_opt.retrieveOption(argc,argv);
geo_opt.retrieveOption(argc,argv);
down_opt.retrieveOption(argc,argv);
- threshold_opt.retrieveOption(argc,argv);
- polythreshold_opt.retrieveOption(argc,argv);
- min_opt.retrieveOption(argc,argv);
boundary_opt.retrieveOption(argc,argv);
// rbox_opt.retrieveOption(argc,argv);
// cbox_opt.retrieveOption(argc,argv);
disc_opt.retrieveOption(argc,argv);
- ftype_opt.retrieveOption(argc,argv);
- ltype_opt.retrieveOption(argc,argv);
- fieldname_opt.retrieveOption(argc,argv);
- label_opt.retrieveOption(argc,argv);
- polygon_opt.retrieveOption(argc,argv);
- band_opt.retrieveOption(argc,argv);
- rule_opt.retrieveOption(argc,argv);
verbose_opt.retrieveOption(argc,argv);
}
catch(string predefinedString){
@@ -112,7 +114,6 @@ int main(int argc, char *argv[])
std::map<std::string, rule::RULE_TYPE> ruleMap;
//initialize ruleMap
ruleMap["point"]=rule::point;
- ruleMap["pointOnSurface"]=rule::pointOnSurface;
ruleMap["centroid"]=rule::centroid;
ruleMap["mean"]=rule::mean;
ruleMap["median"]=rule::median;
@@ -123,6 +124,13 @@ int main(int argc, char *argv[])
ruleMap["maxvote"]=rule::maxvote;
ruleMap["sum"]=rule::sum;
+ if(srcnodata_opt.size()){
+ while(srcnodata_opt.size()<bndnodata_opt.size())
+ srcnodata_opt.push_back(srcnodata_opt[0]);
+ while(bndnodata_opt.size()<srcnodata_opt.size())
+ bndnodata_opt.push_back(bndnodata_opt[0]);
+ }
+
if(verbose_opt[0])
std::cout << class_opt << std::endl;
statfactory::StatFactory stat;
@@ -176,26 +184,26 @@ int main(int argc, char *argv[])
if(verbose_opt[0])
std::cout << fieldname_opt << std::endl;
vector<ImgReaderGdal> maskReader;
- if(mask_opt.size()){
- maskReader.resize(mask_opt.size());
- for(int imask=0;imask<mask_opt.size();++imask){
- if(verbose_opt[0]>1)
- std::cout << "opening mask image file " << mask_opt[imask] << std::endl;
- maskReader[imask].open(mask_opt[0]);
- if(imgReader.isGeoRef())
- assert(maskReader[imask].isGeoRef());
- }
- }
+ // if(mask_opt.size()){
+ // maskReader.resize(mask_opt.size());
+ // for(int imask=0;imask<mask_opt.size();++imask){
+ // if(verbose_opt[0]>1)
+ // std::cout << "opening mask image file " << mask_opt[imask] << std::endl;
+ // maskReader[imask].open(mask_opt[0]);
+ // if(imgReader.isGeoRef())
+ // assert(maskReader[imask].isGeoRef());
+ // }
+ // }
- Vector2d<int> maskBuffer;
- if(mask_opt.size()){
- maskBuffer.resize(mask_opt.size());
- for(int imask=0;imask<maskReader.size();++imask)
- maskBuffer[imask].resize(maskReader[imask].nrOfCol());
- }
- vector<double> oldmaskrow(mask_opt.size());
- for(int imask=0;imask<mask_opt.size();++imask)
- oldmaskrow[imask]=-1;
+ // Vector2d<int> maskBuffer;
+ // if(mask_opt.size()){
+ // maskBuffer.resize(mask_opt.size());
+ // for(int imask=0;imask<maskReader.size();++imask)
+ // maskBuffer[imask].resize(maskReader[imask].nrOfCol());
+ // }
+ // vector<double> oldmaskrow(mask_opt.size());
+ // for(int imask=0;imask<mask_opt.size();++imask)
+ // oldmaskrow[imask]=-1;
if(verbose_opt[0]>1)
std::cout << "Number of bands in input image: " << imgReader.nrOfBand() << std::endl;
@@ -346,76 +354,89 @@ int main(int argc, char *argv[])
}
if(static_cast<int>(jimg)<0||static_cast<int>(jimg)>=imgReader.nrOfRow())
continue;
+
+ bool valid=true;
+
if(static_cast<int>(jimg)!=static_cast<int>(oldimgrow)){
assert(imgBuffer.size()==nband);
for(int iband=0;iband<nband;++iband){
int theBand=(band_opt[0]<0)?iband:band_opt[iband];
imgReader.readData(imgBuffer[iband],GDT_Float64,static_cast<int>(jimg),theBand);
assert(imgBuffer[iband].size()==imgReader.nrOfCol());
- }
+ if(srcnodata_opt.size()){
+ vector<int>::const_iterator bndit=find(bndnodata_opt.begin(),bndnodata_opt.end(),theBand);
+ if(bndit!=bndnodata_opt.end()){
+ vector<int>::const_iterator bndit=find(bndnodata_opt.begin(),bndnodata_opt.end(),theBand);
+ if(bndit!=bndnodata_opt.end()){
+ if(imgBuffer[iband][static_cast<int>(iimg)]==srcnodata_opt[theBand])
+ valid=false;
+ }
+ }
+ }
+ }
oldimgrow=jimg;
- }
- // bool valid=true;
- for(int imask=0;imask<mask_opt.size();++imask){
- double colMask,rowMask;//image coordinates in mask image
- if(mask_opt.size()>1){//multiple masks
- if(geo_opt[0])
- maskReader[imask].geo2image(x,y,colMask,rowMask);
- else{
- colMask=icol;
- rowMask=irow;
- }
- //nearest neighbour
- rowMask=static_cast<int>(rowMask);
- colMask=static_cast<int>(colMask);
- if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[imask].nrOfCol())
- continue;
- if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[imask])){
- if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[imask].nrOfRow())
- continue;
- else{
- maskReader[imask].readData(maskBuffer[imask],GDT_Int32,static_cast<int>(rowMask));
- oldmaskrow[imask]=rowMask;
- }
- }
- int ivalue=0;
- if(mask_opt.size()==msknodata_opt.size())//one invalid value for each mask
- ivalue=static_cast<int>(msknodata_opt[imask]);
- else//use same invalid value for each mask
- ivalue=static_cast<int>(msknodata_opt[0]);
- if(maskBuffer[imask][colMask]==ivalue){
- valid=false;
- break;
- }
- }
- else if(maskReader.size()){
- if(geo_opt[0])
- maskReader[0].geo2image(x,y,colMask,rowMask);
- else{
- colMask=icol;
- rowMask=irow;
- }
- //nearest neighbour
- rowMask=static_cast<int>(rowMask);
- colMask=static_cast<int>(colMask);
- if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[0].nrOfCol())
- continue;
- if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[0])){
- if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[0].nrOfRow())
- continue;
- else{
- maskReader[0].readData(maskBuffer[0],GDT_Int32,static_cast<int>(rowMask));
- oldmaskrow[0]=rowMask;
- }
- }
- for(int ivalue=0;ivalue<msknodata_opt.size();++ivalue){
- if(maskBuffer[0][colMask]==static_cast<int>(msknodata_opt[ivalue])){
- valid=false;
- break;
- }
- }
- }
- }
+ }
+
+ // for(int imask=0;imask<mask_opt.size();++imask){
+ // double colMask,rowMask;//image coordinates in mask image
+ // if(mask_opt.size()>1){//multiple masks
+ // if(geo_opt[0])
+ // maskReader[imask].geo2image(x,y,colMask,rowMask);
+ // else{
+ // colMask=icol;
+ // rowMask=irow;
+ // }
+ // //nearest neighbour
+ // rowMask=static_cast<int>(rowMask);
+ // colMask=static_cast<int>(colMask);
+ // if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[imask].nrOfCol())
+ // continue;
+ // if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[imask])){
+ // if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[imask].nrOfRow())
+ // continue;
+ // else{
+ // maskReader[imask].readData(maskBuffer[imask],GDT_Int32,static_cast<int>(rowMask));
+ // oldmaskrow[imask]=rowMask;
+ // }
+ // }
+ // int ivalue=0;
+ // if(mask_opt.size()==msknodata_opt.size())//one invalid value for each mask
+ // ivalue=static_cast<int>(msknodata_opt[imask]);
+ // else//use same invalid value for each mask
+ // ivalue=static_cast<int>(msknodata_opt[0]);
+ // if(maskBuffer[imask][colMask]==ivalue){
+ // valid=false;
+ // break;
+ // }
+ // }
+ // else if(maskReader.size()){
+ // if(geo_opt[0])
+ // maskReader[0].geo2image(x,y,colMask,rowMask);
+ // else{
+ // colMask=icol;
+ // rowMask=irow;
+ // }
+ // //nearest neighbour
+ // rowMask=static_cast<int>(rowMask);
+ // colMask=static_cast<int>(colMask);
+ // if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[0].nrOfCol())
+ // continue;
+ // if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[0])){
+ // if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[0].nrOfRow())
+ // continue;
+ // else{
+ // maskReader[0].readData(maskBuffer[0],GDT_Int32,static_cast<int>(rowMask));
+ // oldmaskrow[0]=rowMask;
+ // }
+ // }
+ // for(int ivalue=0;ivalue<msknodata_opt.size();++ivalue){
+ // if(maskBuffer[0][colMask]==static_cast<int>(msknodata_opt[ivalue])){
+ // valid=false;
+ // break;
+ // }
+ // }
+ // }
+ // }
if(valid){
for(int iband=0;iband<imgBuffer.size();++iband){
if(imgBuffer[iband].size()!=imgReader.nrOfCol()){
@@ -590,76 +611,85 @@ int main(int argc, char *argv[])
}
if(static_cast<int>(jimg)<0||static_cast<int>(jimg)>=imgReader.nrOfRow())
continue;
+
+ bool valid=true;
+
if(static_cast<int>(jimg)!=static_cast<int>(oldimgrow)){
assert(imgBuffer.size()==nband);
for(int iband=0;iband<nband;++iband){
int theBand=(band_opt[0]<0)?iband:band_opt[iband];
imgReader.readData(imgBuffer[iband],GDT_Float64,static_cast<int>(jimg),theBand);
assert(imgBuffer[iband].size()==imgReader.nrOfCol());
+ if(srcnodata_opt.size()){
+ vector<int>::const_iterator bndit=find(bndnodata_opt.begin(),bndnodata_opt.end(),theBand);
+ if(bndit!=bndnodata_opt.end()){
+ if(imgBuffer[iband][static_cast<int>(iimg)]==srcnodata_opt[theBand])
+ valid=false;
+ }
+ }
}
oldimgrow=jimg;
}
- bool valid=true;
- for(int imask=0;imask<mask_opt.size();++imask){
- double colMask,rowMask;//image coordinates in mask image
- if(mask_opt.size()>1){//multiple masks
- if(geo_opt[0])
- maskReader[imask].geo2image(x,y,colMask,rowMask);
- else{
- colMask=icol;
- rowMask=irow;
- }
- //nearest neighbour
- rowMask=static_cast<int>(rowMask);
- colMask=static_cast<int>(colMask);
- if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[imask].nrOfCol())
- continue;
- if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[imask])){
- if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[imask].nrOfRow())
- continue;
- else{
- maskReader[imask].readData(maskBuffer[imask],GDT_Int32,static_cast<int>(rowMask));
- oldmaskrow[imask]=rowMask;
- }
- }
- int ivalue=0;
- if(mask_opt.size()==msknodata_opt.size())//one invalid value for each mask
- ivalue=static_cast<int>(msknodata_opt[imask]);
- else//use same invalid value for each mask
- ivalue=static_cast<int>(msknodata_opt[0]);
- if(maskBuffer[imask][colMask]==ivalue){
- valid=false;
- break;
- }
- }
- else if(maskReader.size()){
- if(geo_opt[0])
- maskReader[0].geo2image(x,y,colMask,rowMask);
- else{
- colMask=icol;
- rowMask=irow;
- }
- //nearest neighbour
- rowMask=static_cast<int>(rowMask);
- colMask=static_cast<int>(colMask);
- if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[0].nrOfCol())
- continue;
- if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[0])){
- if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[0].nrOfRow())
- continue;
- else{
- maskReader[0].readData(maskBuffer[0],GDT_Int32,static_cast<int>(rowMask));
- oldmaskrow[0]=rowMask;
- }
- }
- for(int ivalue=0;ivalue<msknodata_opt.size();++ivalue){
- if(maskBuffer[0][colMask]==static_cast<int>(msknodata_opt[ivalue])){
- valid=false;
- break;
- }
- }
- }
- }
+ // for(int imask=0;imask<mask_opt.size();++imask){
+ // double colMask,rowMask;//image coordinates in mask image
+ // if(mask_opt.size()>1){//multiple masks
+ // if(geo_opt[0])
+ // maskReader[imask].geo2image(x,y,colMask,rowMask);
+ // else{
+ // colMask=icol;
+ // rowMask=irow;
+ // }
+ // //nearest neighbour
+ // rowMask=static_cast<int>(rowMask);
+ // colMask=static_cast<int>(colMask);
+ // if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[imask].nrOfCol())
+ // continue;
+ // if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[imask])){
+ // if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[imask].nrOfRow())
+ // continue;
+ // else{
+ // maskReader[imask].readData(maskBuffer[imask],GDT_Int32,static_cast<int>(rowMask));
+ // oldmaskrow[imask]=rowMask;
+ // }
+ // }
+ // int ivalue=0;
+ // if(mask_opt.size()==msknodata_opt.size())//one invalid value for each mask
+ // ivalue=static_cast<int>(msknodata_opt[imask]);
+ // else//use same invalid value for each mask
+ // ivalue=static_cast<int>(msknodata_opt[0]);
+ // if(maskBuffer[imask][colMask]==ivalue){
+ // valid=false;
+ // break;
+ // }
+ // }
+ // else if(maskReader.size()){
+ // if(geo_opt[0])
+ // maskReader[0].geo2image(x,y,colMask,rowMask);
+ // else{
+ // colMask=icol;
+ // rowMask=irow;
+ // }
+ // //nearest neighbour
+ // rowMask=static_cast<int>(rowMask);
+ // colMask=static_cast<int>(colMask);
+ // if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[0].nrOfCol())
+ // continue;
+ // if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[0])){
+ // if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[0].nrOfRow())
+ // continue;
+ // else{
+ // maskReader[0].readData(maskBuffer[0],GDT_Int32,static_cast<int>(rowMask));
+ // oldmaskrow[0]=rowMask;
+ // }
+ // }
+ // for(int ivalue=0;ivalue<msknodata_opt.size();++ivalue){
+ // if(maskBuffer[0][colMask]==static_cast<int>(msknodata_opt[ivalue])){
+ // valid=false;
+ // break;
+ // }
+ // }
+ // }
+ // }
if(valid){
for(int iband=0;iband<imgBuffer.size();++iband){
if(imgBuffer[iband].size()!=imgReader.nrOfCol()){
@@ -822,6 +852,7 @@ int main(int argc, char *argv[])
// assert(fieldnames.size()==ogrWriter.getFieldCount(ilayerWrite));
// map<std::string,double> pointAttributes;
+ //todo: support multiple rules and write attribute for each rule...
if(class_opt.size()){
switch(ruleMap[rule_opt[0]]){
case(rule::proportion):{//proportion for each class
@@ -903,72 +934,70 @@ int main(int argc, char *argv[])
OGRPoint *poPoint = (OGRPoint *) poGeometry;
x=poPoint->getX();
y=poPoint->getY();
+
bool valid=true;
- for(int imask=0;imask<mask_opt.size();++imask){
- double colMask,rowMask;//image coordinates in mask image
- if(mask_opt.size()>1){//multiple masks
- maskReader[imask].geo2image(x,y,colMask,rowMask);
- //nearest neighbour
- rowMask=static_cast<int>(rowMask);
- colMask=static_cast<int>(colMask);
- if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[imask].nrOfCol())
- continue;
- if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[imask])){
- if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[imask].nrOfRow())
- continue;
- else{
- maskReader[imask].readData(maskBuffer[imask],GDT_Int32,static_cast<int>(rowMask));
- oldmaskrow[imask]=rowMask;
- assert(maskBuffer.size()==maskReader[imask].nrOfBand());
- }
- }
- // char ivalue=0;
- int ivalue=0;
- if(mask_opt.size()==msknodata_opt.size())//one invalid value for each mask
- ivalue=static_cast<int>(msknodata_opt[imask]);
- else//use same invalid value for each mask
- ivalue=static_cast<int>(msknodata_opt[0]);
- if(maskBuffer[imask][colMask]==ivalue){
- valid=false;
- break;
- }
- }
- else if(maskReader.size()){
- maskReader[0].geo2image(x,y,colMask,rowMask);
- //nearest neighbour
- rowMask=static_cast<int>(rowMask);
- colMask=static_cast<int>(colMask);
- if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[0].nrOfCol()){
- continue;
- // cerr << colMask << " out of mask col range!" << std::endl;
- // cerr << x << " " << y << " " << colMask << " " << rowMask << std::endl;
- // assert(static_cast<int>(colMask)>=0&&static_cast<int>(colMask)<maskReader[0].nrOfCol());
- }
+
+ // for(int imask=0;imask<mask_opt.size();++imask){
+ // double colMask,rowMask;//image coordinates in mask image
+ // if(mask_opt.size()>1){//multiple masks
+ // maskReader[imask].geo2image(x,y,colMask,rowMask);
+ // //nearest neighbour
+ // rowMask=static_cast<int>(rowMask);
+ // colMask=static_cast<int>(colMask);
+ // if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[imask].nrOfCol())
+ // continue;
+ // if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[imask])){
+ // if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[imask].nrOfRow())
+ // continue;
+ // else{
+ // maskReader[imask].readData(maskBuffer[imask],GDT_Int32,static_cast<int>(rowMask));
+ // oldmaskrow[imask]=rowMask;
+ // assert(maskBuffer.size()==maskReader[imask].nrOfBand());
+ // }
+ // }
+ // // char ivalue=0;
+ // int ivalue=0;
+ // if(mask_opt.size()==msknodata_opt.size())//one invalid value for each mask
+ // ivalue=static_cast<int>(msknodata_opt[imask]);
+ // else//use same invalid value for each mask
+ // ivalue=static_cast<int>(msknodata_opt[0]);
+ // if(maskBuffer[imask][colMask]==ivalue){
+ // valid=false;
+ // break;
+ // }
+ // }
+ // else if(maskReader.size()){
+ // maskReader[0].geo2image(x,y,colMask,rowMask);
+ // //nearest neighbour
+ // rowMask=static_cast<int>(rowMask);
+ // colMask=static_cast<int>(colMask);
+ // if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[0].nrOfCol()){
+ // continue;
+ // // cerr << colMask << " out of mask col range!" << std::endl;
+ // // cerr << x << " " << y << " " << colMask << " " << rowMask << std::endl;
+ // // assert(static_cast<int>(colMask)>=0&&static_cast<int>(colMask)<maskReader[0].nrOfCol());
+ // }
- if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[0])){
- if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[0].nrOfRow()){
- continue;
- // cerr << rowMask << " out of mask row range!" << std::endl;
- // cerr << x << " " << y << " " << colMask << " " << rowMask << std::endl;
- // assert(static_cast<int>(rowMask)>=0&&static_cast<int>(rowMask)<imgReader.nrOfRow());
- }
- else{
- maskReader[0].readData(maskBuffer[0],GDT_Int32,static_cast<int>(rowMask));
- oldmaskrow[0]=rowMask;
- }
- }
- for(int ivalue=0;ivalue<msknodata_opt.size();++ivalue){
- if(maskBuffer[0][colMask]==static_cast<int>(msknodata_opt[ivalue])){
- valid=false;
- break;
- }
- }
- }
- }
- if(!valid)
- continue;
- else
- validFeature=true;
+ // if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[0])){
+ // if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[0].nrOfRow()){
+ // continue;
+ // // cerr << rowMask << " out of mask row range!" << std::endl;
+ // // cerr << x << " " << y << " " << colMask << " " << rowMask << std::endl;
+ // // assert(static_cast<int>(rowMask)>=0&&static_cast<int>(rowMask)<imgReader.nrOfRow());
+ // }
+ // else{
+ // maskReader[0].readData(maskBuffer[0],GDT_Int32,static_cast<int>(rowMask));
+ // oldmaskrow[0]=rowMask;
+ // }
+ // }
+ // for(int ivalue=0;ivalue<msknodata_opt.size();++ivalue){
+ // if(maskBuffer[0][colMask]==static_cast<int>(msknodata_opt[ivalue])){
+ // valid=false;
+ // break;
+ // }
+ // }
+ // }
+ // }
double value;
double i_centre,j_centre;
@@ -1066,6 +1095,20 @@ int main(int argc, char *argv[])
if(verbose_opt[0]>1)
std::cout << "write feature has " << writeFeature->GetFieldCount() << " fields" << std::endl;
+ // //hiero
+ // for(int vband=0;vband<bndnodata_opt.size();++vband){
+ // value=((readValues[bndnodata_opt[vband]])[j-ulj])[i-uli];
+ // if(value==srcnodata_opt[vband]){
+ // valid=false;
+ // break;
+ // }
+ // }
+
+ // if(!valid)
+ // continue;
+ // else
+ // validFeature=true;
+
vector<double> windowBuffer;
for(int windowJ=-theDim/2;windowJ<(theDim+1)/2;++windowJ){
for(int windowI=-theDim/2;windowI<(theDim+1)/2;++windowI){
@@ -1084,6 +1127,15 @@ int main(int argc, char *argv[])
for(int iband=0;iband<nband;++iband){
int theBand=(band_opt[0]<0)?iband:band_opt[iband];
imgReader.readData(value,GDT_Float64,i,j,theBand);
+
+ if(srcnodata_opt.size()){
+ Optionpk<int>::const_iterator bndit=find(bndnodata_opt.begin(),bndnodata_opt.end(),theBand);
+ if(bndit!=bndnodata_opt.end()){
+ if(value==srcnodata_opt[theBand])
+ valid=false;
+ }
+ }
+
if(verbose_opt[0]>1)
std::cout << ": " << value << std::endl;
ostringstream fs;
@@ -1129,13 +1181,13 @@ int main(int argc, char *argv[])
std::cout << "creating point feature" << std::endl;
if(writeTest){
if(writeTestLayer->CreateFeature( writeFeature ) != OGRERR_NONE ){
- std::string errorString="Failed to create feature in shapefile";
+ std::string errorString="Failed to create feature in ogr vector file";
throw(errorString);
}
}
else{
if(writeLayer->CreateFeature( writeFeature ) != OGRERR_NONE ){
- std::string errorString="Failed to create feature in shapefile";
+ std::string errorString="Failed to create feature in ogr vector file";
throw(errorString);
}
}
@@ -1157,15 +1209,15 @@ int main(int argc, char *argv[])
readPolygon.closeRings();
if(verbose_opt[0]>1)
- std::cout << "get centroid point from polygon" << std::endl;
- if(ruleMap[rule_opt[0]]==rule::pointOnSurface)
- readPolygon.PointOnSurface(&writeCentroidPoint);
- else
+ std::cout << "get point on polygon" << std::endl;
+ if(ruleMap[rule_opt[0]]==rule::centroid)
readPolygon.Centroid(&writeCentroidPoint);
+ else
+ readPolygon.PointOnSurface(&writeCentroidPoint);
double ulx,uly,lrx,lry;
double uli,ulj,lri,lrj;
- if((polygon_opt[0]&&ruleMap[rule_opt[0]]==rule::point)||(ruleMap[rule_opt[0]]==rule::centroid)||(ruleMap[rule_opt[0]]==rule::pointOnSurface)){
+ if((polygon_opt[0]&&ruleMap[rule_opt[0]]==rule::point)||(ruleMap[rule_opt[0]]==rule::centroid)){
ulx=writeCentroidPoint.getX();
uly=writeCentroidPoint.getY();
lrx=ulx;
@@ -1202,8 +1254,24 @@ int main(int argc, char *argv[])
if(verbose_opt[0]>1)
std::cout << "bounding box for polygon feature " << ifeature << ": " << uli << " " << ulj << " " << lri << " " << lrj << std::endl;
- if(uli<0||lri>=imgReader.nrOfCol()||ulj<0||ulj>=imgReader.nrOfRow())
- continue;
+ if(uli<0)
+ uli=0;
+ if(lri<0)
+ lri=0;
+ if(uli>=imgReader.nrOfCol())
+ uli=imgReader.nrOfCol()-1;
+ if(lri>=imgReader.nrOfCol())
+ lri=imgReader.nrOfCol()-1;
+ if(ulj<0)
+ ulj=0;
+ if(lrj<0)
+ lrj=0;
+ if(ulj>=imgReader.nrOfRow())
+ ulj=imgReader.nrOfRow()-1;
+ if(lrj>=imgReader.nrOfRow())
+ lrj=imgReader.nrOfRow()-1;
+ // if(uli<0||lri>=imgReader.nrOfCol()||ulj<0||lrj>=imgReader.nrOfRow())
+ // continue;
int nPointPolygon=0;
@@ -1224,6 +1292,7 @@ int main(int argc, char *argv[])
vector<double> polyClassValues;
if(class_opt.size()){
+
polyClassValues.resize(class_opt.size());
//initialize
for(int iclass=0;iclass<class_opt.size();++iclass)
@@ -1234,9 +1303,18 @@ int main(int argc, char *argv[])
vector< Vector2d<double> > readValues(nband);
for(int iband=0;iband<nband;++iband){
int theBand=(band_opt[0]<0)?iband:band_opt[iband];
+ //test
+ assert(uli>=0);
+ assert(uli<imgReader.nrOfCol());
+ assert(lri>=0);
+ assert(lri<imgReader.nrOfCol());
+ assert(ulj>=0);
+ assert(ulj<imgReader.nrOfRow());
+ assert(lrj>=0);
+ assert(lrj<imgReader.nrOfRow());
imgReader.readDataBlock(readValues[iband],GDT_Float64,uli,lri,ulj,lrj,theBand);
}
- //todo: readDataBlock for maskReader...
+
OGRPoint thePoint;
for(int j=ulj;j<=lrj;++j){
for(int i=uli;i<=lri;++i){
@@ -1254,60 +1332,72 @@ int main(int argc, char *argv[])
if(ruleMap[rule_opt[0]]!=rule::centroid&&!readPolygon.Contains(&thePoint))
continue;
+
bool valid=true;
- for(int imask=0;imask<mask_opt.size();++imask){
- double colMask,rowMask;//image coordinates in mask image
- if(mask_opt.size()>1){//multiple masks
- maskReader[imask].geo2image(x,y,colMask,rowMask);
- //nearest neighbour
- rowMask=static_cast<int>(rowMask);
- colMask=static_cast<int>(colMask);
- if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[imask].nrOfCol())
- continue;
-
- if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[imask])){
- if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[imask].nrOfRow())
- continue;
- else{
- maskReader[imask].readData(maskBuffer[imask],GDT_Int32,static_cast<int>(rowMask));
- oldmaskrow[imask]=rowMask;
- assert(maskBuffer.size()==maskReader[imask].nrOfBand());
- }
- }
- int ivalue=0;
- if(mask_opt.size()==msknodata_opt.size())//one invalid value for each mask
- ivalue=static_cast<int>(msknodata_opt[imask]);
- else//use same invalid value for each mask
- ivalue=static_cast<int>(msknodata_opt[0]);
- if(maskBuffer[imask][colMask]==ivalue){
+
+ if(srcnodata_opt.size()){
+ for(int vband=0;vband<bndnodata_opt.size();++vband){
+ double value=((readValues[bndnodata_opt[vband]])[j-ulj])[i-uli];
+ if(value==srcnodata_opt[vband]){
valid=false;
break;
}
}
- else if(maskReader.size()){
- maskReader[0].geo2image(x,y,colMask,rowMask);
- //nearest neighbour
- rowMask=static_cast<int>(rowMask);
- colMask=static_cast<int>(colMask);
- if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[0].nrOfCol())
- continue;
-
- if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[0])){
- if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[0].nrOfRow())
- continue;
- else{
- maskReader[0].readData(maskBuffer[0],GDT_Int32,static_cast<int>(rowMask));
- oldmaskrow[0]=rowMask;
- }
- }
- for(int ivalue=0;ivalue<msknodata_opt.size();++ivalue){
- if(maskBuffer[0][colMask]==static_cast<int>(msknodata_opt[ivalue])){
- valid=false;
- break;
- }
- }
- }
}
+
+ // for(int imask=0;imask<mask_opt.size();++imask){
+ // double colMask,rowMask;//image coordinates in mask image
+ // if(mask_opt.size()>1){//multiple masks
+ // maskReader[imask].geo2image(x,y,colMask,rowMask);
+ // //nearest neighbour
+ // rowMask=static_cast<int>(rowMask);
+ // colMask=static_cast<int>(colMask);
+ // if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[imask].nrOfCol())
+ // continue;
+
+ // if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[imask])){
+ // if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[imask].nrOfRow())
+ // continue;
+ // else{
+ // maskReader[imask].readData(maskBuffer[imask],GDT_Int32,static_cast<int>(rowMask));
+ // oldmaskrow[imask]=rowMask;
+ // assert(maskBuffer.size()==maskReader[imask].nrOfBand());
+ // }
+ // }
+ // int ivalue=0;
+ // if(mask_opt.size()==msknodata_opt.size())//one invalid value for each mask
+ // ivalue=static_cast<int>(msknodata_opt[imask]);
+ // else//use same invalid value for each mask
+ // ivalue=static_cast<int>(msknodata_opt[0]);
+ // if(maskBuffer[imask][colMask]==ivalue){
+ // valid=false;
+ // break;
+ // }
+ // }
+ // else if(maskReader.size()){
+ // maskReader[0].geo2image(x,y,colMask,rowMask);
+ // //nearest neighbour
+ // rowMask=static_cast<int>(rowMask);
+ // colMask=static_cast<int>(colMask);
+ // if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[0].nrOfCol())
+ // continue;
+
+ // if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[0])){
+ // if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[0].nrOfRow())
+ // continue;
+ // else{
+ // maskReader[0].readData(maskBuffer[0],GDT_Int32,static_cast<int>(rowMask));
+ // oldmaskrow[0]=rowMask;
+ // }
+ // }
+ // for(int ivalue=0;ivalue<msknodata_opt.size();++ivalue){
+ // if(maskBuffer[0][colMask]==static_cast<int>(msknodata_opt[ivalue])){
+ // valid=false;
+ // break;
+ // }
+ // }
+ // }
+ // }
if(!valid)
continue;
else
@@ -1324,7 +1414,7 @@ int main(int argc, char *argv[])
OGRFeature *writePointFeature;
if(!polygon_opt[0]){
//create feature
- if(ruleMap[rule_opt[0]]==rule::point){//do not create in case of mean, median, sum, pointOnSurface or centroid (only create point at centroid)
+ if(ruleMap[rule_opt[0]]==rule::point){//do not create in case of mean, median, sum or centroid (only create point at centroid)
if(writeTest)
writePointFeature = OGRFeature::CreateFeature(writeTestLayer->GetLayerDefn());
else
@@ -1397,13 +1487,13 @@ int main(int argc, char *argv[])
std::cout << "creating point feature" << std::endl;
if(writeTest){
if(writeTestLayer->CreateFeature( writePointFeature ) != OGRERR_NONE ){
- std::string errorString="Failed to create feature in test shapefile";
+ std::string errorString="Failed to create feature in test ogr vector file";
throw(errorString);
}
}
else{
if(writeLayer->CreateFeature( writePointFeature ) != OGRERR_NONE ){
- std::string errorString="Failed to create feature in shapefile";
+ std::string errorString="Failed to create feature in ogr vector file";
throw(errorString);
}
}
@@ -1523,7 +1613,7 @@ int main(int argc, char *argv[])
theValue=stat.mymax(polyValues[index]);
else if(ruleMap[rule_opt[0]]==rule::minimum)
theValue=stat.mymin(polyValues[index]);
- else{//rule::pointOnSurface or rule::centroid
+ else{//rule::centroid
if(verbose_opt[0])
std::cout << "number of points in polygon: " << nPointPolygon << std::endl;
assert(nPointPolygon<=1);
@@ -1629,7 +1719,10 @@ int main(int argc, char *argv[])
maxClass=class_opt[maxIndex];
if(verbose_opt[0]>0)
std::cout << "maxClass: " << maxClass << std::endl;
- writePolygonFeature->SetField(label_opt[0].c_str(),maxClass);
+ if(polygon_opt[0])
+ writePolygonFeature->SetField(label_opt[0].c_str(),maxClass);
+ else
+ writeCentroidFeature->SetField(label_opt[0].c_str(),maxClass);
}
}
if(polygon_opt[0]){
@@ -1637,13 +1730,13 @@ int main(int argc, char *argv[])
std::cout << "creating polygon feature" << std::endl;
if(writeTest){
if(writeTestLayer->CreateFeature( writePolygonFeature ) != OGRERR_NONE ){
- std::string errorString="Failed to create polygon feature in shapefile";
+ std::string errorString="Failed to create polygon feature in ogr vector file";
throw(errorString);
}
}
else{
if(writeLayer->CreateFeature( writePolygonFeature ) != OGRERR_NONE ){
- std::string errorString="Failed to create polygon feature in shapefile";
+ std::string errorString="Failed to create polygon feature in ogr vector file";
throw(errorString);
}
}
@@ -1657,7 +1750,7 @@ int main(int argc, char *argv[])
std::cout << "creating point feature in centroid" << std::endl;
if(writeTest){
if(writeTestLayer->CreateFeature( writeCentroidFeature ) != OGRERR_NONE ){
- std::string errorString="Failed to create point feature in shapefile";
+ std::string errorString="Failed to create point feature in ogr vector file";
throw(errorString);
}
}
@@ -1665,7 +1758,7 @@ int main(int argc, char *argv[])
//test
assert(validFeature);
if(writeLayer->CreateFeature( writeCentroidFeature ) != OGRERR_NONE ){
- std::string errorString="Failed to create point feature in shapefile";
+ std::string errorString="Failed to create point feature in ogr vector file";
throw(errorString);
}
}
@@ -1688,7 +1781,7 @@ int main(int argc, char *argv[])
if(verbose_opt[0]>1)
std::cout << "get centroid point from polygon" << std::endl;
- assert(ruleMap[rule_opt[0]]!=rule::pointOnSurface);//not supported for multipolygons
+
readPolygon.Centroid(&writeCentroidPoint);
double ulx,uly,lrx,lry;
@@ -1711,16 +1804,16 @@ int main(int argc, char *argv[])
lry=psEnvelope->MinY;
delete psEnvelope;
}
- if(geo_opt[0]){
+ // if(geo_opt[0]){
imgReader.geo2image(ulx,uly,uli,ulj);
imgReader.geo2image(lrx,lry,lri,lrj);
- }
- else{
- uli=ulx;
- ulj=uly;
- lri=lrx;
- lrj=lry;
- }
+ // }
+ // else{
+ // uli=ulx;
+ // ulj=uly;
+ // lri=lrx;
+ // lrj=lry;
+ // }
//nearest neighbour
ulj=static_cast<int>(ulj);
uli=static_cast<int>(uli);
@@ -1728,10 +1821,26 @@ int main(int argc, char *argv[])
lri=static_cast<int>(lri);
//iterate through all pixels
if(verbose_opt[0]>1)
- std::cout << "bounding box for feature " << ifeature << ": " << uli << " " << ulj << " " << lri << " " << lrj << std::endl;
+ std::cout << "bounding box for multipologon feature " << ifeature << ": " << uli << " " << ulj << " " << lri << " " << lrj << std::endl;
- if(uli<0||lri>=imgReader.nrOfCol()||ulj<0||ulj>=imgReader.nrOfRow())
- continue;
+ if(uli<0)
+ uli=0;
+ if(lri<0)
+ lri=0;
+ if(uli>=imgReader.nrOfCol())
+ uli=imgReader.nrOfCol()-1;
+ if(lri>=imgReader.nrOfCol())
+ lri=imgReader.nrOfCol()-1;
+ if(ulj<0)
+ ulj=0;
+ if(lrj<0)
+ lrj=0;
+ if(ulj>=imgReader.nrOfRow())
+ ulj=imgReader.nrOfRow()-1;
+ if(lrj>=imgReader.nrOfRow())
+ lrj=imgReader.nrOfRow()-1;
+ // if(uli<0||lri>=imgReader.nrOfCol()||ulj<0||lrj>=imgReader.nrOfRow())
+ // continue;
int nPointPolygon=0;
if(polygon_opt[0]){
@@ -1761,6 +1870,15 @@ int main(int argc, char *argv[])
vector< Vector2d<double> > readValues(nband);
for(int iband=0;iband<nband;++iband){
int theBand=(band_opt[0]<0)?iband:band_opt[iband];
+ //test
+ assert(uli>=0);
+ assert(uli<imgReader.nrOfCol());
+ assert(lri>=0);
+ assert(lri<imgReader.nrOfCol());
+ assert(ulj>=0);
+ assert(ulj<imgReader.nrOfRow());
+ assert(lrj>=0);
+ assert(lrj<imgReader.nrOfRow());
imgReader.readDataBlock(readValues[iband],GDT_Float64,uli,lri,ulj,lrj,theBand);
}
//todo: readDataBlock for maskReader...
@@ -1781,60 +1899,72 @@ int main(int argc, char *argv[])
if(ruleMap[rule_opt[0]]!=rule::centroid&&!readPolygon.Contains(&thePoint))
continue;
+
bool valid=true;
- for(int imask=0;imask<mask_opt.size();++imask){
- double colMask,rowMask;//image coordinates in mask image
- if(mask_opt.size()>1){//multiple masks
- maskReader[imask].geo2image(x,y,colMask,rowMask);
- //nearest neighbour
- rowMask=static_cast<int>(rowMask);
- colMask=static_cast<int>(colMask);
- if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[imask].nrOfCol())
- continue;
-
- if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[imask])){
- if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[imask].nrOfRow())
- continue;
- else{
- maskReader[imask].readData(maskBuffer[imask],GDT_Int32,static_cast<int>(rowMask));
- oldmaskrow[imask]=rowMask;
- assert(maskBuffer.size()==maskReader[imask].nrOfBand());
- }
- }
- int ivalue=0;
- if(mask_opt.size()==msknodata_opt.size())//one invalid value for each mask
- ivalue=static_cast<int>(msknodata_opt[imask]);
- else//use same invalid value for each mask
- ivalue=static_cast<int>(msknodata_opt[0]);
- if(maskBuffer[imask][colMask]==ivalue){
- valid=false;
- break;
- }
- }
- else if(maskReader.size()){
- maskReader[0].geo2image(x,y,colMask,rowMask);
- //nearest neighbour
- rowMask=static_cast<int>(rowMask);
- colMask=static_cast<int>(colMask);
- if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[0].nrOfCol())
- continue;
-
- if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[0])){
- if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[0].nrOfRow())
- continue;
- else{
- maskReader[0].readData(maskBuffer[0],GDT_Int32,static_cast<int>(rowMask));
- oldmaskrow[0]=rowMask;
- }
- }
- for(int ivalue=0;ivalue<msknodata_opt.size();++ivalue){
- if(maskBuffer[0][colMask]==static_cast<int>(msknodata_opt[ivalue])){
- valid=false;
- break;
- }
- }
+
+ if(srcnodata_opt.size()){
+ for(int vband=0;vband<bndnodata_opt.size();++vband){
+ double value=((readValues[bndnodata_opt[vband]])[j-ulj])[i-uli];
+ if(value==srcnodata_opt[vband]){
+ valid=false;
+ break;
}
}
+ }
+ // for(int imask=0;imask<mask_opt.size();++imask){
+ // double colMask,rowMask;//image coordinates in mask image
+ // if(mask_opt.size()>1){//multiple masks
+ // maskReader[imask].geo2image(x,y,colMask,rowMask);
+ // //nearest neighbour
+ // rowMask=static_cast<int>(rowMask);
+ // colMask=static_cast<int>(colMask);
+ // if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[imask].nrOfCol())
+ // continue;
+
+ // if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[imask])){
+ // if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[imask].nrOfRow())
+ // continue;
+ // else{
+ // maskReader[imask].readData(maskBuffer[imask],GDT_Int32,static_cast<int>(rowMask));
+ // oldmaskrow[imask]=rowMask;
+ // assert(maskBuffer.size()==maskReader[imask].nrOfBand());
+ // }
+ // }
+ // int ivalue=0;
+ // if(mask_opt.size()==msknodata_opt.size())//one invalid value for each mask
+ // ivalue=static_cast<int>(msknodata_opt[imask]);
+ // else//use same invalid value for each mask
+ // ivalue=static_cast<int>(msknodata_opt[0]);
+ // if(maskBuffer[imask][colMask]==ivalue){
+ // valid=false;
+ // break;
+ // }
+ // }
+ // else if(maskReader.size()){
+ // maskReader[0].geo2image(x,y,colMask,rowMask);
+ // //nearest neighbour
+ // rowMask=static_cast<int>(rowMask);
+ // colMask=static_cast<int>(colMask);
+ // if(static_cast<int>(colMask)<0||static_cast<int>(colMask)>=maskReader[0].nrOfCol())
+ // continue;
+
+ // if(static_cast<int>(rowMask)!=static_cast<int>(oldmaskrow[0])){
+ // if(static_cast<int>(rowMask)<0||static_cast<int>(rowMask)>=maskReader[0].nrOfRow())
+ // continue;
+ // else{
+ // maskReader[0].readData(maskBuffer[0],GDT_Int32,static_cast<int>(rowMask));
+ // oldmaskrow[0]=rowMask;
+ // }
+ // }
+ // for(int ivalue=0;ivalue<msknodata_opt.size();++ivalue){
+ // if(maskBuffer[0][colMask]==static_cast<int>(msknodata_opt[ivalue])){
+ // valid=false;
+ // break;
+ // }
+ // }
+ // }
+ // }
+
if(!valid)
continue;
else
@@ -1924,13 +2054,13 @@ int main(int argc, char *argv[])
std::cout << "creating point feature" << std::endl;
if(writeTest){
if(writeTestLayer->CreateFeature( writePointFeature ) != OGRERR_NONE ){
- std::string errorString="Failed to create feature in shapefile";
+ std::string errorString="Failed to create feature in ogr vector file";
throw(errorString);
}
}
else{
if(writeLayer->CreateFeature( writePointFeature ) != OGRERR_NONE ){
- std::string errorString="Failed to create feature in shapefile";
+ std::string errorString="Failed to create feature in ogr vector file";
throw(errorString);
}
}
@@ -2051,7 +2181,7 @@ int main(int argc, char *argv[])
theValue=stat.mymax(polyValues[index]);
else if(ruleMap[rule_opt[0]]==rule::minimum)
theValue=stat.mymin(polyValues[index]);
- else{//rule::pointOnSurface or rule::centroid
+ else{//rule::centroid
if(verbose_opt[0])
std::cout << "number of points in polygon: " << nPointPolygon << std::endl;
assert(nPointPolygon<=1);
@@ -2157,7 +2287,10 @@ int main(int argc, char *argv[])
maxClass=class_opt[maxIndex];
if(verbose_opt[0]>0)
std::cout << "maxClass: " << maxClass << std::endl;
- writePolygonFeature->SetField(label_opt[0].c_str(),maxClass);
+ if(polygon_opt[0])
+ writePolygonFeature->SetField(label_opt[0].c_str(),maxClass);
+ else
+ writeCentroidFeature->SetField(label_opt[0].c_str(),maxClass);
}
}
@@ -2166,13 +2299,13 @@ int main(int argc, char *argv[])
std::cout << "creating polygon feature" << std::endl;
if(writeTest){
if(writeTestLayer->CreateFeature( writePolygonFeature ) != OGRERR_NONE ){
- std::string errorString="Failed to create polygon feature in shapefile";
+ std::string errorString="Failed to create polygon feature in ogr vector file";
throw(errorString);
}
}
else{
if(writeLayer->CreateFeature( writePolygonFeature ) != OGRERR_NONE ){
- std::string errorString="Failed to create polygon feature in shapefile";
+ std::string errorString="Failed to create polygon feature in ogr vector file";
throw(errorString);
}
}
@@ -2186,7 +2319,7 @@ int main(int argc, char *argv[])
std::cout << "creating point feature in centroid" << std::endl;
if(writeTest){
if(writeTestLayer->CreateFeature( writeCentroidFeature ) != OGRERR_NONE ){
- std::string errorString="Failed to create point feature in shapefile";
+ std::string errorString="Failed to create point feature in ogr vector file";
throw(errorString);
}
}
@@ -2194,7 +2327,7 @@ int main(int argc, char *argv[])
//test
assert(validFeature);
if(writeLayer->CreateFeature( writeCentroidFeature ) != OGRERR_NONE ){
- std::string errorString="Failed to create point feature in shapefile";
+ std::string errorString="Failed to create point feature in ogr vector file";
throw(errorString);
}
}
diff --git a/src/apps/pkfillnodata.cc b/src/apps/pkfillnodata.cc
index ba6f97b..1f003a8 100644
--- a/src/apps/pkfillnodata.cc
+++ b/src/apps/pkfillnodata.cc
@@ -27,9 +27,9 @@ extern "C" {
#include "base/Optionpk.h"
int main(int argc,char **argv) {
- Optionpk<std::string> input_opt("i", "input", "Input image file");
+ Optionpk<std::string> input_opt("i", "input", "Input raster dataset");
Optionpk<int> band_opt("b", "band", "band(s) to process (Default is -1: process all bands)");
- Optionpk<std::string> mask_opt("m", "mask", "Mask band indicating pixels to be interpolated (zero valued) ");
+ Optionpk<std::string> mask_opt("m", "mask", "Mask raster dataset indicating pixels to be interpolated (zero valued) ");
Optionpk<std::string> output_opt("o", "output", "Output image file");
Optionpk<double> distance_opt("d", "distance", "Maximum number of pixels to search in all directions to find values to interpolate from", 0);
Optionpk<int> iteration_opt("it", "iteration", "Number of 3x3 smoothing filter passes to run (default 0)", 0);
diff --git a/src/apps/pkfilter.cc b/src/apps/pkfilter.cc
index 27d38bc..4ea7e06 100644
--- a/src/apps/pkfilter.cc
+++ b/src/apps/pkfilter.cc
@@ -42,7 +42,7 @@ int main(int argc,char **argv) {
Optionpk<std::string> tmpdir_opt("tmp", "tmp", "Temporary directory","/tmp",2);
Optionpk<bool> disc_opt("circ", "circular", "circular disc kernel for dilation and erosion", false);
// Optionpk<double> angle_opt("a", "angle", "angle used for directional filtering in dilation (North=0, East=90, South=180, West=270).");
- Optionpk<std::string> method_opt("f", "filter", "filter function (median, var, min, max, sum, mean, dilate, erode, close, open, homog (central pixel must be identical to all other pixels within window), heterog, sobelx (horizontal edge detection), sobely (vertical edge detection), sobelxy (diagonal edge detection NE-SW),sobelyx (diagonal edge detection NW-SE), smooth, density, majority voting (only for classes), smoothnodata (smooth nodata values only) values, threshold local filtering [...]
+ Optionpk<std::string> method_opt("f", "filter", "filter function (median, var, min, max, sum, mean, dilate, erode, close, open, homog (central pixel must be identical to all other pixels within window), heterog, sobelx (horizontal edge detection), sobely (vertical edge detection), sobelxy (diagonal edge detection NE-SW),sobelyx (diagonal edge detection NW-SE), smooth, density, countid, majority voting (only for classes), smoothnodata (smooth nodata values only) values, threshold local [...]
Optionpk<std::string> resample_opt("r", "resampling-method", "Resampling method for shifting operation (near: nearest neighbour, bilinear: bi-linear interpolation).", "near");
Optionpk<double> dimX_opt("dx", "dx", "filter kernel size in x, better use odd value to avoid image shift", 3);
Optionpk<double> dimY_opt("dy", "dy", "filter kernel size in y, better use odd value to avoid image shift", 3);
@@ -50,7 +50,7 @@ int main(int argc,char **argv) {
Optionpk<std::string> wavelet_type_opt("wt", "wavelet", "wavelet type: daubechies,daubechies_centered, haar, haar_centered, bspline, bspline_centered", "daubechies");
Optionpk<int> family_opt("wf", "family", "wavelet family (vanishing moment, see also http://www.gnu.org/software/gsl/manual/html_node/DWT-Initialization.html)", 4);
Optionpk<short> class_opt("class", "class", "class value(s) to use for density, erosion, dilation, openening and closing, thresholding");
- Optionpk<double> threshold_opt("t", "threshold", "threshold value(s) to use for threshold filter (one for each class), or threshold to cut for dwt_cut (use 0 to keep all), or sigma for shift", 0);
+ Optionpk<double> threshold_opt("t", "threshold", "threshold value(s) to use for threshold filter (one for each class), or threshold to cut for dwt_cut (use 0 to keep all) or dwt_cut_from, or sigma for shift", 0);
Optionpk<short> nodata_opt("nodata", "nodata", "nodata value(s) for smoothnodata filter");
Optionpk<std::string> tap_opt("tap", "tap", "text file containing taps used for spatial filtering (from ul to lr). Use dimX and dimY to specify tap dimensions in x and y. Leave empty for not using taps");
Optionpk<double> tapz_opt("tapz", "tapz", "taps used for spectral filtering");
@@ -684,6 +684,21 @@ int main(int argc,char **argv) {
else
filter2d.dwtCut(input, output, wavelet_type_opt[0], family_opt[0], threshold_opt[0]);
break;
+ case(filter2d::dwt_cut_from):
+ if(down_opt[0]!=1){
+ std::cerr << "Error: down option not supported for this filter" << std::endl;
+ exit(1);
+ }
+ if(dimZ_opt.size()){
+ if(verbose_opt[0])
+ std::cout<< "DWT approximation in spectral domain" << std::endl;
+ filter1d.dwtCutFrom(input, output, wavelet_type_opt[0], family_opt[0], static_cast<int>(threshold_opt[0]));
+ }
+ else{
+ std::cerr << "Error: this filter is not supported in 2D" << std::endl;
+ exit(1);
+ }
+ break;
case(filter2d::threshold):
filter2d.setThresholds(threshold_opt);//deliberate fall through
case(filter2d::density):
diff --git a/src/apps/pkfilterdem.cc b/src/apps/pkfilterdem.cc
index b96e06c..a62c598 100644
--- a/src/apps/pkfilterdem.cc
+++ b/src/apps/pkfilterdem.cc
@@ -161,11 +161,12 @@ int main(int argc,char **argv) {
unsigned long int nchange=1;
if(postFilter_opt[0]=="vito"){
//todo: fill empty pixels
- // hThreshold_opt.resize(3);
+ // hThreshold_opt.resize(4);
// hThreshold_opt[0]=0.7;
// hThreshold_opt[1]=0.3;
// hThreshold_opt[2]=0.1;
- vector<int> nlimit(3);
+ // hThreshold_opt[2]=-0.2;
+ vector<int> nlimit(4);
nlimit[0]=2;
nlimit[1]=3;
nlimit[2]=4;
@@ -184,40 +185,39 @@ int main(int argc,char **argv) {
tmpMask[irow][icol]=1;//1=surface, 0=terrain
if(verbose_opt[0])
cout << "filtering NWSE" << endl;
- theFilter.dsm2dtm_nwse(inputData,tmpMask,hThreshold_opt[iheight],nlimit[iheight],dim_opt[0]);
-
- // //from here
-
+ //from here
// Vector2d<double> tmpDSM(inputData);
- // double noDataValue=0;
-
- // unsigned long int nchange=0;
// int dimX=dim_opt[0];
// int dimY=dim_opt[0];
// assert(dimX);
// assert(dimY);
// statfactory::StatFactory stat;
// Vector2d<double> inBuffer(dimY,inputData.nCols());
- // // if(tmpMask.size()!=inputData.nRows())
- // // tmpMask.resize(inputData.nRows());
+ // if(tmpData.size()!=inputData.nRows())
+ // tmpData.resize(inputData.nRows());
// int indexI=0;
- // int indexJ=0;
+ // int indexJ=inputData.nRows()-1;
+ // // int indexJ=0;
// //initialize last half of inBuffer
// for(int j=-(dimY-1)/2;j<=dimY/2;++j){
// for(int i=0;i<inputData.nCols();++i)
// inBuffer[indexJ][i]=tmpDSM[abs(j)][i];
- // ++indexJ;
+ // --indexJ;
+ // // ++indexJ;
// }
- // for(int y=0;y<tmpDSM.nRows();++y){
+ // for(int y=tmpDSM.nRows()-1;y>=0;--y){
// if(y){//inBuffer already initialized for y=0
// //erase first line from inBuffer
- // inBuffer.erase(inBuffer.begin());
+ // inBuffer.erase(inBuffer.end()-1);
+ // // inBuffer.erase(inBuffer.begin());
// //read extra line and push back to inBuffer if not out of bounds
// if(y+dimY/2<tmpDSM.nRows()){
// //allocate buffer
- // inBuffer.push_back(inBuffer.back());
- // for(int i=0;i<tmpDSM.nCols();++i)
- // inBuffer[inBuffer.size()-1][i]=tmpDSM[y+dimY/2][i];
+ // // inBuffer.push_back(inBuffer.back());
+ // inBuffer.insert(inBuffer.begin(),*(inBuffer.begin()));
+ // for(int i=0;i<tmpDSM.nCols();++i)
+ // inBuffer[0][i]=tmpDSM[y-dimY/2][i];
+ // // inBuffer[inBuffer.size()-1][i]=tmpDSM[y+dimY/2][i];
// }
// else{
// int over=y+dimY/2-tmpDSM.nRows();
@@ -227,8 +227,10 @@ int main(int argc,char **argv) {
// inBuffer.push_back(inBuffer[index]);
// }
// }
- // for(int x=0;x<tmpDSM.nCols();++x){
+ // for(int x=tmpDSM.nCols()-1;x>=0;--x){
// double centerValue=inBuffer[(dimY-1)/2][x];
+ // //test
+ // cout << "pixel (" << x << "," << y << "): " << centerValue << endl;
// short nmasked=0;
// std::vector<double> neighbors;
// for(int j=-(dimY-1)/2;j<=dimY/2;++j){
@@ -246,41 +248,49 @@ int main(int argc,char **argv) {
// else
// indexJ=(dimY-1)/2+j;
// double difference=(centerValue-inBuffer[indexJ][indexI]);
+ // //test
+ // cout << "centerValue-inBuffer[" << indexJ << "][" << indexI << "]=" << centerValue << " - " << inBuffer[indexJ][indexI] << " = " << difference << endl;
// if(i||j)//skip centerValue
// neighbors.push_back(inBuffer[indexJ][indexI]);
// if(difference>hThreshold_opt[iheight])
// ++nmasked;
// }
// }
- // if(nmasked<nlimit[iheight]){
+ // //test
+ // cout << "pixel " << x << ", " << y << ": nmasked is " << nmasked << endl;
+ // if(nmasked<=nlimit[iheight]){
// ++nchange;
- // //reset pixel in tmpMask
- // tmpMask[y][x]=0;
+ // //reset pixel in outputMask
+ // tmpData[y][x]=0;
+ // //test
+ // cout << "pixel " << x << ", " << y << " is ground" << endl;
// }
// else{
// //reset pixel height in tmpDSM
- // inBuffer[(dimY-1)/2][x]=stat.mymin(neighbors);
+ // sort(neighbors.begin(),neighbors.end());
+ // assert(neighbors.size()>1);
+ // inBuffer[(dimY-1)/2][x]=neighbors[1];
+ // //test
+ // cout << "pixel " << x << ", " << y << " is surface, reset DSM to " << neighbors[1] << endl;
+ // /* inBuffer[(dimY-1)/2][x]=stat.mymin(neighbors); */
// }
// }
// }
//to here
- tmpData.setMask(tmpMask,0,0);
+ theFilter.dsm2dtm_nwse(inputData,tmpData,hThreshold_opt[iheight],nlimit[iheight],dim_opt[0]);
if(verbose_opt[0])
cout << "filtering NESW" << endl;
- theFilter.dsm2dtm_nesw(inputData,tmpMask,hThreshold_opt[iheight],nlimit[iheight],dim_opt[0]);
- tmpData.setMask(tmpMask,0,0);
+ theFilter.dsm2dtm_nesw(inputData,tmpData,hThreshold_opt[iheight],nlimit[iheight],dim_opt[0]);
if(verbose_opt[0])
cout << "filtering SENW" << endl;
- theFilter.dsm2dtm_senw(inputData,tmpMask,hThreshold_opt[iheight],nlimit[iheight],dim_opt[0]);
- tmpData.setMask(tmpMask,0,0);
+ theFilter.dsm2dtm_senw(inputData,tmpData,hThreshold_opt[iheight],nlimit[iheight],dim_opt[0]);
if(verbose_opt[0])
cout << "filtering SWNE" << endl;
- theFilter.dsm2dtm_swne(inputData,tmpMask,hThreshold_opt[iheight],nlimit[iheight],dim_opt[0]);
- // set tmpMask to finalMask
- tmpData.setMask(tmpMask,0,0);
+ theFilter.dsm2dtm_swne(inputData,tmpData,hThreshold_opt[iheight],nlimit[iheight],dim_opt[0]);
}
outputData=tmpData;
+ //todo: interpolate
//outputData.setMask(tmpData,1,0);
}
else if(postFilter_opt[0]=="etew_min"){
@@ -322,8 +332,9 @@ int main(int argc,char **argv) {
hThreshold=hThreshold_opt[0]+maxSlope_opt[0]*(newdim-dim)*input.getDeltaX();
dim=newdim;
if(hThreshold_opt.size()>1){
- if(hThreshold>hThreshold_opt[1])
+ if(hThreshold>hThreshold_opt[1]){
hThreshold=hThreshold_opt[1];
+ }
}
std::cout << "iteration " << iteration << ": " << nchange << " pixels changed" << std::endl;
++iteration;
diff --git a/src/apps/pkfsann.cc b/src/apps/pkfsann.cc
index 94fbd3c..a28faa1 100644
--- a/src/apps/pkfsann.cc
+++ b/src/apps/pkfsann.cc
@@ -25,80 +25,67 @@ along with pktools. If not, see <http://www.gnu.org/licenses/>.
#include "base/Optionpk.h"
#include "imageclasses/ImgReaderOgr.h"
#include "algorithms/ConfusionMatrix.h"
+#include "algorithms/CostFactory.h"
#include "algorithms/FeatureSelector.h"
#include "floatfann.h"
#include "algorithms/myfann_cpp.h"
+#include "pkfsann.h"
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
-enum SelectorValue { NA=0, SFFS=1, SFS=2, SBS=3, BFS=4 };
-
using namespace std;
#define Malloc(type,n) (type *)malloc((n)*sizeof(type))
+CostFactoryANN::CostFactoryANN(const vector<unsigned int>& nneuron, float connection, const std::vector<float> weights, float learning, unsigned int maxit, unsigned short cv, bool verbose)
+ : CostFactory(cv,verbose), m_nneuron(nneuron), m_connection(connection), m_weights(weights), m_learning(learning), m_maxit(maxit){};
+
+CostFactoryANN::~CostFactoryANN(){
+}
-//global parameters used in cost function getCost
-map<string,short> classValueMap;
-vector<std::string> nameVector;
-vector<unsigned int> nctraining;
-vector<unsigned int> nctest;
-Optionpk<unsigned int> nneuron_opt("\0", "nneuron", "number of neurons in hidden layers in neural network (multiple hidden layers are set by defining multiple number of neurons: -n 15 -n 1, default is one hidden layer with 5 neurons)", 5);
-Optionpk<float> connection_opt("\0", "connection", "connection reate (default: 1.0 for a fully connected network)", 1.0);
-Optionpk<float> weights_opt("w", "weights", "weights for neural network. Apply to fully connected network only, starting from first input neuron to last output neuron, including the bias neurons (last neuron in each but last layer)", 0.0);
-Optionpk<float> learning_opt("l", "learning", "learning rate (default: 0.7)", 0.7);
-Optionpk<unsigned int> maxit_opt("\0", "maxit", "number of maximum iterations (epoch) (default: 500)", 500);
-// Optionpk<bool> weight_opt("wi", "wi", "set the parameter C of class i to weight*C, for C-SVC",true);
-Optionpk<unsigned short> cv_opt("cv", "cv", "n-fold cross validation mode",2);
-Optionpk<string> classname_opt("c", "class", "list of class names.");
-Optionpk<short> classvalue_opt("r", "reclass", "list of class values (use same order as in classname opt.");
-Optionpk<short> verbose_opt("v", "verbose", "set to: 0 (results only), 1 (confusion matrix), 2 (debug)",0);
-
-double getCost(const vector<Vector2d<float> > &trainingFeatures)
+double CostFactoryANN::getCost(const vector<Vector2d<float> > &trainingFeatures)
{
unsigned short nclass=trainingFeatures.size();
unsigned int ntraining=0;
unsigned int ntest=0;
for(int iclass=0;iclass<nclass;++iclass){
- ntraining+=nctraining[iclass];
- ntest+=nctest[iclass];
+ ntraining+=m_nctraining[iclass];
+ ntest+=m_nctest[iclass];
}
if(ntest)
- assert(!cv_opt[0]);
- if(!cv_opt[0])
+ assert(!m_cv);
+ if(!m_cv)
assert(ntest);
unsigned short nFeatures=trainingFeatures[0][0].size();
FANN::neural_net net;//the neural network
- const unsigned int num_layers = nneuron_opt.size()+2;
+ const unsigned int num_layers = m_nneuron.size()+2;
const float desired_error = 0.0003;
- const unsigned int iterations_between_reports = (verbose_opt[0])?maxit_opt[0]+1:0;
- if(verbose_opt[0]>1){
- cout << "creating artificial neural network with " << nneuron_opt.size() << " hidden layer, having " << endl;
- for(int ilayer=0;ilayer<nneuron_opt.size();++ilayer)
- cout << nneuron_opt[ilayer] << " ";
+ const unsigned int iterations_between_reports = (m_verbose) ? m_maxit+1:0;
+ if(m_verbose>1){
+ cout << "creating artificial neural network with " << m_nneuron.size() << " hidden layer, having " << endl;
+ for(int ilayer=0;ilayer<m_nneuron.size();++ilayer)
+ cout << m_nneuron[ilayer] << " ";
cout << "neurons" << endl;
}
switch(num_layers){
case(3):{
unsigned int layers[3];
layers[0]=nFeatures;
- layers[1]=nneuron_opt[0];
+ layers[1]=m_nneuron[0];
layers[2]=nclass;
- net.create_sparse_array(connection_opt[0],num_layers,layers);
- // net.create_sparse(connection_opt[0],num_layers, nFeatures, nneuron_opt[0], nclass);
+ net.create_sparse_array(m_connection,num_layers,layers);
break;
}
case(4):{
unsigned int layers[4];
layers[0]=nFeatures;
- layers[1]=nneuron_opt[0];
- layers[2]=nneuron_opt[1];
+ layers[1]=m_nneuron[0];
+ layers[2]=m_nneuron[1];
layers[3]=nclass;
- net.create_sparse_array(connection_opt[0],num_layers,layers);
- // net.create_sparse(connection_opt[0],num_layers, nFeatures, nneuron_opt[0], nneuron_opt[1], nclass);
+ net.create_sparse_array(m_connection,num_layers,layers);
break;
}
default:
@@ -107,70 +94,58 @@ double getCost(const vector<Vector2d<float> > &trainingFeatures)
break;
}
- net.set_learning_rate(learning_opt[0]);
- // net.set_activation_steepness_hidden(1.0);
- // net.set_activation_steepness_output(1.0);
+ net.set_learning_rate(m_learning);
net.set_activation_function_hidden(FANN::SIGMOID_SYMMETRIC_STEPWISE);
net.set_activation_function_output(FANN::SIGMOID_SYMMETRIC_STEPWISE);
- // Set additional properties such as the training algorithm
- // net.set_training_algorithm(FANN::TRAIN_QUICKPROP);
-
vector<unsigned short> referenceVector;
vector<unsigned short> outputVector;
float rmse=0;
- ConfusionMatrix cm;
- //set names in confusion matrix using nameVector
- for(int iname=0;iname<nameVector.size();++iname){
- if(classValueMap.empty())
- cm.pushBackClassName(nameVector[iname]);
- else if(cm.getClassIndex(type2string<short>(classValueMap[nameVector[iname]]))<0)
- cm.pushBackClassName(type2string<short>(classValueMap[nameVector[iname]]));
- }
vector<Vector2d<float> > tmpFeatures(nclass);
for(int iclass=0;iclass<nclass;++iclass){
tmpFeatures[iclass].resize(trainingFeatures[iclass].size(),nFeatures);
- for(unsigned int isample=0;isample<nctraining[iclass];++isample){
+ for(unsigned int isample=0;isample<m_nctraining[iclass];++isample){
for(int ifeature=0;ifeature<nFeatures;++ifeature){
tmpFeatures[iclass][isample][ifeature]=trainingFeatures[iclass][isample][ifeature];
}
}
}
- if(cv_opt[0]>0){
+ m_cm.clearResults();
+ if(m_cv>0){
rmse=net.cross_validation(tmpFeatures,
ntraining,
- cv_opt[0],
- maxit_opt[0],
+ m_cv,
+ m_maxit,
desired_error,
referenceVector,
outputVector,
- verbose_opt[0]);
+ m_verbose);
for(int isample=0;isample<referenceVector.size();++isample){
- string refClassName=nameVector[referenceVector[isample]];
- string className=nameVector[outputVector[isample]];
- if(classValueMap.size())
- cm.incrementResult(type2string<short>(classValueMap[refClassName]),type2string<short>(classValueMap[className]),1.0);
+ string refClassName=m_nameVector[referenceVector[isample]];
+ string className=m_nameVector[outputVector[isample]];
+ if(m_classValueMap.size())
+ m_cm.incrementResult(type2string<short>(m_classValueMap[refClassName]),type2string<short>(m_classValueMap[className]),1.0);
else
- cm.incrementResult(cm.getClass(referenceVector[isample]),cm.getClass(outputVector[isample]),1.0);
+ m_cm.incrementResult(m_cm.getClass(referenceVector[isample]),m_cm.getClass(outputVector[isample]),1.0);
}
}
else{//not working yet. please repair...
- assert(cv_opt[0]>0);
+ assert(m_cv>0);
bool initWeights=true;
- net.train_on_data(tmpFeatures,ntraining,initWeights, maxit_opt[0],
+ net.train_on_data(tmpFeatures,ntraining,initWeights, m_maxit,
iterations_between_reports, desired_error);
vector<Vector2d<float> > testFeatures(nclass);
vector<float> result(nclass);
int maxClass=-1;
for(int iclass=0;iclass<nclass;++iclass){
- testFeatures.resize(nctest[iclass],nFeatures);
- for(unsigned int isample=0;isample<nctraining[iclass];++isample){
+ testFeatures.resize(m_nctest[iclass],nFeatures);
+ for(unsigned int isample=0;isample<m_nctraining[iclass];++isample){
for(int ifeature=0;ifeature<nFeatures;++ifeature){
- testFeatures[iclass][isample][ifeature]=trainingFeatures[iclass][nctraining[iclass]+isample][ifeature];
+ testFeatures[iclass][isample][ifeature]=trainingFeatures[iclass][m_nctraining[iclass]+isample][ifeature];
}
result=net.run(testFeatures[iclass][isample]);
- string refClassName=nameVector[iclass];
+ string refClassName=m_nameVector[iclass];
float maxP=-1;
for(int ic=0;ic<nclass;++ic){
float pv=(result[ic]+1.0)/2.0;//bring back to scale [0,1]
@@ -179,16 +154,16 @@ double getCost(const vector<Vector2d<float> > &trainingFeatures)
maxClass=ic;
}
}
- string className=nameVector[maxClass];
- if(classValueMap.size())
- cm.incrementResult(type2string<short>(classValueMap[refClassName]),type2string<short>(classValueMap[className]),1.0);
+ string className=m_nameVector[maxClass];
+ if(m_classValueMap.size())
+ m_cm.incrementResult(type2string<short>(m_classValueMap[refClassName]),type2string<short>(m_classValueMap[className]),1.0);
else
- cm.incrementResult(cm.getClass(referenceVector[isample]),cm.getClass(outputVector[isample]),1.0);
+ m_cm.incrementResult(m_cm.getClass(referenceVector[isample]),m_cm.getClass(outputVector[isample]),1.0);
}
}
}
- assert(cm.nReference());
- return(cm.kappa());
+ assert(m_cm.nReference());
+ return(m_cm.kappa());
}
int main(int argc, char *argv[])
@@ -213,6 +188,15 @@ int main(int argc, char *argv[])
// Optionpk<double> priors_opt("p", "prior", "prior probabilities for each class (e.g., -p 0.3 -p 0.3 -p 0.2 )", 0.0);
Optionpk<string> selector_opt("sm", "sm", "feature selection method (sffs=sequential floating forward search,sfs=sequential forward search, sbs, sequential backward search ,bfs=brute force search)","sffs");
Optionpk<float> epsilon_cost_opt("ecost", "ecost", "epsilon for stopping criterion in cost function to determine optimal number of features",0.001);
+ Optionpk<unsigned short> cv_opt("cv", "cv", "n-fold cross validation mode",2);
+ Optionpk<string> classname_opt("c", "class", "list of class names.");
+ Optionpk<short> classvalue_opt("r", "reclass", "list of class values (use same order as in classname opt.");
+ Optionpk<unsigned int> nneuron_opt("n", "nneuron", "number of neurons in hidden layers in neural network (multiple hidden layers are set by defining multiple number of neurons: -n 15 -n 1, default is one hidden layer with 5 neurons)", 5);
+ Optionpk<float> connection_opt("\0", "connection", "connection reate (default: 1.0 for a fully connected network)", 1.0);
+ Optionpk<float> weights_opt("w", "weights", "weights for neural network. Apply to fully connected network only, starting from first input neuron to last output neuron, including the bias neurons (last neuron in each but last layer)", 0.0);
+ Optionpk<float> learning_opt("l", "learning", "learning rate (default: 0.7)", 0.7);
+ Optionpk<unsigned int> maxit_opt("\0", "maxit", "number of maximum iterations (epoch) (default: 500)", 500);
+ Optionpk<short> verbose_opt("v", "verbose", "set to: 0 (results only), 1 (confusion matrix), 2 (debug)",0);
bool doProcess;//stop process when program was invoked with help option (-h --help)
try{
@@ -231,16 +215,16 @@ int main(int argc, char *argv[])
scale_opt.retrieveOption(argc,argv);
aggreg_opt.retrieveOption(argc,argv);
// priors_opt.retrieveOption(argc,argv);
+ selector_opt.retrieveOption(argc,argv);
+ epsilon_cost_opt.retrieveOption(argc,argv);
+ cv_opt.retrieveOption(argc,argv);
+ classname_opt.retrieveOption(argc,argv);
+ classvalue_opt.retrieveOption(argc,argv);
nneuron_opt.retrieveOption(argc,argv);
connection_opt.retrieveOption(argc,argv);
weights_opt.retrieveOption(argc,argv);
learning_opt.retrieveOption(argc,argv);
maxit_opt.retrieveOption(argc,argv);
- cv_opt.retrieveOption(argc,argv);
- selector_opt.retrieveOption(argc,argv);
- epsilon_cost_opt.retrieveOption(argc,argv);
- classname_opt.retrieveOption(argc,argv);
- classvalue_opt.retrieveOption(argc,argv);
verbose_opt.retrieveOption(argc,argv);
}
catch(string predefinedString){
@@ -252,6 +236,20 @@ int main(int argc, char *argv[])
exit(0);//help was invoked, stop processing
}
+ CostFactoryANN costfactory(nneuron_opt, connection_opt[0], weights_opt, learning_opt[0], maxit_opt[0], cv_opt[0], verbose_opt[0]);
+
+ assert(training_opt.size());
+ if(input_opt.size())
+ costfactory.setCv(0);
+ if(verbose_opt[0]>=1){
+ if(input_opt.size())
+ std::cout << "input filename: " << input_opt[0] << std::endl;
+ std::cout << "training vector file: " << std::endl;
+ for(int ifile=0;ifile<training_opt.size();++ifile)
+ std::cout << training_opt[ifile] << std::endl;
+ std::cout << "verbose: " << verbose_opt[0] << std::endl;
+ }
+
static std::map<std::string, SelectorValue> selMap;
//initialize selMap
selMap["sffs"]=SFFS;
@@ -272,11 +270,6 @@ int main(int argc, char *argv[])
int nband=0;
int startBand=2;//first two bands represent X and Y pos
- vector<double> offset;
- vector<double> scale;
- vector< Vector2d<float> > trainingPixels;//[class][sample][band]
- vector< Vector2d<float> > testPixels;//[class][sample][band]
-
// if(priors_opt.size()>1){//priors from argument list
// priors.resize(priors_opt.size());
// double normPrior=0;
@@ -297,14 +290,19 @@ int main(int argc, char *argv[])
if(classname_opt.size()){
assert(classname_opt.size()==classvalue_opt.size());
for(int iclass=0;iclass<classname_opt.size();++iclass)
- classValueMap[classname_opt[iclass]]=classvalue_opt[iclass];
+ costfactory.setClassValueMap(classname_opt[iclass],classvalue_opt[iclass]);
}
//----------------------------------- Training -------------------------------
+ vector<double> offset;
+ vector<double> scale;
+ vector< Vector2d<float> > trainingPixels;//[class][sample][band]
+ vector< Vector2d<float> > testPixels;//[class][sample][band]
+ map<string,Vector2d<float> > trainingMap;
+ map<string,Vector2d<float> > testMap;
vector<string> fields;
+
//organize training data
trainingPixels.clear();
- map<string,Vector2d<float> > trainingMap;
- map<string,Vector2d<float> > testMap;
if(verbose_opt[0]>=1)
std::cout << "reading imageVector file " << training_opt[0] << std::endl;
try{
@@ -339,6 +337,12 @@ int main(int argc, char *argv[])
cerr << error << std::endl;
exit(1);
}
+ catch(std::exception& e){
+ std::cerr << "Error: ";
+ std::cerr << e.what() << std::endl;
+ std::cerr << CPLGetLastErrorMsg() << std::endl;
+ exit(1);
+ }
catch(...){
cerr << "error catched" << std::endl;
exit(1);
@@ -354,23 +358,22 @@ int main(int argc, char *argv[])
std::cout << "training pixels: " << std::endl;
map<string,Vector2d<float> >::iterator mapit=trainingMap.begin();
while(mapit!=trainingMap.end()){
- if(classValueMap.size()){
- //check if name in training is covered by classname_opt (values can not be 0)
- if(classValueMap[mapit->first]>0){
- if(verbose_opt[0])
- std::cout << mapit->first << " -> " << classValueMap[mapit->first] << std::endl;
- }
- else{
- std::cerr << "Error: names in classname option are not complete, please check names in training vector and make sure classvalue is > 0" << std::endl;
- exit(1);
- }
- }
+ // if(classValueMap.size()){
+ // //check if name in training is covered by classname_opt (values can not be 0)
+ // if(classValueMap[mapit->first]>0){
+ // if(verbose_opt[0])
+ // std::cout << mapit->first << " -> " << classValueMap[mapit->first] << std::endl;
+ // }
+ // else{
+ // std::cerr << "Error: names in classname option are not complete, please check names in training vector and make sure classvalue is > 0" << std::endl;
+ // exit(1);
+ // }
+ // }
//delete small classes
if((mapit->second).size()<minSize_opt[0]){
trainingMap.erase(mapit);
continue;
}
- nameVector.push_back(mapit->first);
trainingPixels.push_back(mapit->second);
if(verbose_opt[0]>1)
std::cout << mapit->first << ": " << (mapit->second).size() << " samples" << std::endl;
@@ -383,16 +386,6 @@ int main(int argc, char *argv[])
mapit=testMap.begin();
while(mapit!=testMap.end()){
- if(classValueMap.size()){
- //check if name in test is covered by classname_opt (values can not be 0)
- if(classValueMap[mapit->first]>0){
- ;//ok, no need to print to std::cout
- }
- else{
- std::cerr << "Error: names in classname option are not complete, please check names in test vector and make sure classvalue is > 0" << std::endl;
- exit(1);
- }
- }
//no need to delete small classes for test sample
testPixels.push_back(mapit->second);
if(verbose_opt[0]>1)
@@ -480,7 +473,28 @@ int main(int argc, char *argv[])
// std::cout << std::endl;
}
+ //set names in confusion matrix using nameVector
+ vector<string> nameVector=costfactory.getNameVector();
+ for(int iname=0;iname<nameVector.size();++iname){
+ if(costfactory.getClassValueMap().empty())
+ costfactory.pushBackClassName(nameVector[iname]);
+ // cm.pushBackClassName(nameVector[iname]);
+ else if(costfactory.getClassIndex(type2string<short>((costfactory.getClassValueMap())[nameVector[iname]]))<0)
+ costfactory.pushBackClassName(type2string<short>((costfactory.getClassValueMap())[nameVector[iname]]));
+ }
+
+ // if(classname_opt.empty()){
+ // for(int iclass=0;iclass<nclass;++iclass){
+ // if(verbose_opt[0])
+ // std::cout << iclass << " " << cm.getClass(iclass) << " -> " << string2type<short>(cm.getClass(iclass)) << std::endl;
+ // classValueMap[cm.getClass(iclass)]=string2type<short>(cm.getClass(iclass));
+ // }
+ // }
+
//Calculate features of trainig set
+
+ vector<unsigned int> nctraining;
+ vector<unsigned int> nctest;
nctraining.resize(nclass);
nctest.resize(nclass);
vector< Vector2d<float> > trainingFeatures(nclass);
@@ -526,6 +540,8 @@ int main(int argc, char *argv[])
assert(trainingFeatures[iclass].size()==nctraining[iclass]+nctest[iclass]);
}
+ costfactory.setNcTraining(nctraining);
+ costfactory.setNcTest(nctest);
int nFeatures=trainingFeatures[0][0].size();
int maxFeatures=(maxFeatures_opt[0])? maxFeatures_opt[0] : 1;
double previousCost=-1;
@@ -533,36 +549,36 @@ int main(int argc, char *argv[])
list<int> subset;//set of selected features (levels) for each class combination
FeatureSelector selector;
try{
- if(maxFeatures==nFeatures){
+ if(maxFeatures>=nFeatures){
subset.clear();
for(int ifeature=0;ifeature<nFeatures;++ifeature)
subset.push_back(ifeature);
- cost=getCost(trainingFeatures);
+ cost=costfactory.getCost(trainingFeatures);
}
else{
- while(fabs(cost-previousCost)>epsilon_cost_opt[0]){
+ while(fabs(cost-previousCost)>=epsilon_cost_opt[0]){
previousCost=cost;
switch(selMap[selector_opt[0]]){
case(SFFS):
subset.clear();//needed to clear in case of floating and brute force search
- cost=selector.floating(trainingFeatures,&getCost,subset,maxFeatures,verbose_opt[0]);
+ cost=selector.floating(trainingFeatures,costfactory,subset,maxFeatures,epsilon_cost_opt[0],verbose_opt[0]);
break;
case(SFS):
- cost=selector.forward(trainingFeatures,&getCost,subset,maxFeatures,verbose_opt[0]);
+ cost=selector.forward(trainingFeatures,costfactory,subset,maxFeatures,verbose_opt[0]);
break;
case(SBS):
- cost=selector.backward(trainingFeatures,&getCost,subset,maxFeatures,verbose_opt[0]);
+ cost=selector.backward(trainingFeatures,costfactory,subset,maxFeatures,verbose_opt[0]);
break;
case(BFS):
subset.clear();//needed to clear in case of floating and brute force search
- cost=selector.bruteForce(trainingFeatures,&getCost,subset,maxFeatures,verbose_opt[0]);
+ cost=selector.bruteForce(trainingFeatures,costfactory,subset,maxFeatures,verbose_opt[0]);
break;
default:
std::cout << "Error: selector not supported, please use sffs, sfs, sbs or bfs" << std::endl;
exit(1);
break;
}
- if(verbose_opt[0]){
+ if(verbose_opt[0]>1){
std::cout << "cost: " << cost << std::endl;
std::cout << "previousCost: " << previousCost << std::endl;
std::cout << std::setprecision(12) << "cost-previousCost: " << cost - previousCost << " ( " << epsilon_cost_opt[0] << ")" << std::endl;
@@ -581,6 +597,7 @@ int main(int argc, char *argv[])
if(verbose_opt[0])
cout <<"cost: " << cost << endl;
+ subset.sort();
for(list<int>::const_iterator lit=subset.begin();lit!=subset.end();++lit)
std::cout << " -b " << *lit;
std::cout << std::endl;
diff --git a/src/apps/pkfsann.h b/src/apps/pkfsann.h
new file mode 100644
index 0000000..6db587a
--- /dev/null
+++ b/src/apps/pkfsann.h
@@ -0,0 +1,46 @@
+/**********************************************************************
+pkfsann.h: feature selection for ann classifier
+Copyright (C) 2008-2014 Pieter Kempeneers
+
+This file is part of pktools
+
+pktools is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+pktools is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with pktools. If not, see <http://www.gnu.org/licenses/>.
+***********************************************************************/
+#include <string>
+#include <vector>
+#include "base/Vector2d.h"
+
+#ifndef _PKFSANNH_H_
+#define _PKFSANNH_H_
+
+enum SelectorValue { NA=0, SFFS=1, SFS=2, SBS=3, BFS=4};
+
+class CostFactoryANN : public CostFactory
+{
+ public:
+ CostFactoryANN();
+ CostFactoryANN(const std::vector<unsigned int>& nneuron, float connection, const std::vector<float> weights, float learning, unsigned int maxit, unsigned short cv, bool verbose);
+ ~CostFactoryANN();
+ double getCost(const std::vector<Vector2d<float> > &trainingFeatures);
+
+ private:
+ std::vector<unsigned int> m_nneuron;
+ float m_connection;
+ const std::vector<float> m_weights;
+ float m_learning;
+ unsigned int m_maxit;
+};
+
+
+#endif
diff --git a/src/apps/pkfssvm.cc b/src/apps/pkfssvm.cc
index a314c76..7d5fb50 100644
--- a/src/apps/pkfssvm.cc
+++ b/src/apps/pkfssvm.cc
@@ -24,50 +24,38 @@ along with pktools. If not, see <http://www.gnu.org/licenses/>.
#include <algorithm>
#include "base/Optionpk.h"
#include "algorithms/ConfusionMatrix.h"
+#include "algorithms/CostFactory.h"
#include "algorithms/FeatureSelector.h"
#include "algorithms/svm.h"
#include "imageclasses/ImgReaderOgr.h"
+#include "pkfssvm.h"
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
-namespace svm{
- enum SVM_TYPE {C_SVC=0, nu_SVC=1,one_class=2, epsilon_SVR=3, nu_SVR=4};
- enum KERNEL_TYPE {linear=0,polynomial=1,radial=2,sigmoid=3};
-}
-
-enum SelectorValue { NA=0, SFFS=1, SFS=2, SBS=3, BFS=4 };
-
using namespace std;
#define Malloc(type,n) (type *)malloc((n)*sizeof(type))
//global parameters used in cost function getCost
-map<string,short> classValueMap;
-vector<std::string> nameVector;
-vector<unsigned int> nctraining;
-vector<unsigned int> nctest;
-Optionpk<std::string> svm_type_opt("svmt", "svmtype", "type of SVM (C_SVC, nu_SVC,one_class, epsilon_SVR, nu_SVR)","C_SVC");
-Optionpk<std::string> kernel_type_opt("kt", "kerneltype", "type of kernel function (linear,polynomial,radial,sigmoid) ","radial");
-Optionpk<unsigned short> kernel_degree_opt("kd", "kd", "degree in kernel function",3);
-Optionpk<float> gamma_opt("g", "gamma", "gamma in kernel function",0);
-Optionpk<float> coef0_opt("c0", "coef0", "coef0 in kernel function",0);
-Optionpk<float> ccost_opt("cc", "ccost", "the parameter C of C-SVC, epsilon-SVR, and nu-SVR",1);
-Optionpk<float> nu_opt("nu", "nu", "the parameter nu of nu-SVC, one-class SVM, and nu-SVR",0.5);
-Optionpk<float> epsilon_loss_opt("eloss", "eloss", "the epsilon in loss function of epsilon-SVR",0.1);
-Optionpk<int> cache_opt("cache", "cache", "cache memory size in MB",100);
-Optionpk<float> epsilon_tol_opt("etol", "etol", "the tolerance of termination criterion",0.001);
-Optionpk<bool> shrinking_opt("shrink", "shrink", "whether to use the shrinking heuristics",false);
-Optionpk<bool> prob_est_opt("pe", "probest", "whether to train a SVC or SVR model for probability estimates",true,2);
-// Optionpk<bool> weight_opt("wi", "wi", "set the parameter C of class i to weight*C, for C-SVC",true);
-Optionpk<unsigned short> cv_opt("cv", "cv", "n-fold cross validation mode",2);
-Optionpk<string> classname_opt("c", "class", "list of class names.");
-Optionpk<short> classvalue_opt("r", "reclass", "list of class values (use same order as in classname opt.");
-Optionpk<short> verbose_opt("v", "verbose", "set to: 0 (results only), 1 (confusion matrix), 2 (debug)",0);
-
-double getCost(const vector<Vector2d<float> > &trainingFeatures)
-{
+// ConfusionMatrix cm;
+// map<string,short> classValueMap;
+// vector<std::string> nameVector;
+// vector<unsigned int> nctraining;
+// vector<unsigned int> nctest;
+
+CostFactorySVM::CostFactorySVM()
+ : CostFactory(2,0), m_svm_type("C_SVC"), m_kernel_type("radial"), m_kernel_degree(3), m_gamma(1.0), m_coef0(0), m_ccost(1000), m_nu(0.5), m_epsilon_loss(100), m_cache(100), m_epsilon_tol(0.001), m_shrinking(false), m_prob_est(true){
+}
+
+CostFactorySVM::~CostFactorySVM(){
+}
+
+CostFactorySVM::CostFactorySVM(std::string svm_type, std::string kernel_type, unsigned short kernel_degree, float gamma, float coef0, float ccost, float nu, float epsilon_loss, int cache, float epsilon_tol, bool shrinking, bool prob_est, unsigned short cv, bool verbose)
+ : CostFactory(cv,verbose), m_svm_type(svm_type), m_kernel_type(kernel_type), m_kernel_degree(kernel_degree), m_gamma(gamma), m_coef0(coef0), m_ccost(ccost), m_nu(nu), m_epsilon_loss(epsilon_loss), m_cache(cache), m_epsilon_tol(epsilon_tol), m_shrinking(shrinking), m_prob_est(prob_est){};
+
+double CostFactorySVM::getCost(const vector<Vector2d<float> > &trainingFeatures){
std::map<std::string, svm::SVM_TYPE> svmMap;
svmMap["C_SVC"]=svm::C_SVC;
@@ -87,32 +75,32 @@ double getCost(const vector<Vector2d<float> > &trainingFeatures)
unsigned int ntraining=0;
unsigned int ntest=0;
for(int iclass=0;iclass<nclass;++iclass){
- ntraining+=nctraining[iclass];
- ntest+=nctest[iclass];
+ ntraining+=m_nctraining[iclass];
+ ntest+=m_nctest[iclass];
}
if(ntest)
- assert(!cv_opt[0]);
- if(!cv_opt[0])
+ assert(!m_cv);
+ if(!m_cv)
assert(ntest);
unsigned short nFeatures=trainingFeatures[0][0].size();
struct svm_parameter param;
- param.svm_type = svmMap[svm_type_opt[0]];
- param.kernel_type = kernelMap[kernel_type_opt[0]];
- param.degree = kernel_degree_opt[0];
- param.gamma = (gamma_opt[0]>0)? gamma_opt[0] : 1.0/nFeatures;
- param.coef0 = coef0_opt[0];
- param.nu = nu_opt[0];
- param.cache_size = cache_opt[0];
- param.C = ccost_opt[0];
- param.eps = epsilon_tol_opt[0];
- param.p = epsilon_loss_opt[0];
- param.shrinking = (shrinking_opt[0])? 1 : 0;
- param.probability = (prob_est_opt[0])? 1 : 0;
+ param.svm_type = svmMap[m_svm_type];
+ param.kernel_type = kernelMap[m_kernel_type];
+ param.degree = m_kernel_degree;
+ param.gamma = (m_gamma>0)? m_gamma : 1.0/nFeatures;
+ param.coef0 = m_coef0;
+ param.nu = m_nu;
+ param.cache_size = m_cache;
+ param.C = m_ccost;
+ param.eps = m_epsilon_tol;
+ param.p = m_epsilon_loss;
+ param.shrinking = (m_shrinking)? 1 : 0;
+ param.probability = (m_prob_est)? 1 : 0;
param.nr_weight = 0;//not used: I use priors and balancing
param.weight_label = NULL;
param.weight = NULL;
- param.verbose=(verbose_opt[0]>1)? true:false;
+ param.verbose=(m_verbose>1)? true:false;
struct svm_model* svm;
struct svm_problem prob;
struct svm_node* x_space;
@@ -125,7 +113,7 @@ double getCost(const vector<Vector2d<float> > &trainingFeatures)
int lIndex=0;
for(int iclass=0;iclass<nclass;++iclass){
// for(int isample=0;isample<trainingFeatures[iclass].size();++isample){
- for(int isample=0;isample<nctraining[iclass];++isample){
+ for(int isample=0;isample<m_nctraining[iclass];++isample){
prob.x[lIndex]=&(x_space[spaceIndex]);
for(int ifeature=0;ifeature<nFeatures;++ifeature){
x_space[spaceIndex].index=ifeature+1;
@@ -139,69 +127,65 @@ double getCost(const vector<Vector2d<float> > &trainingFeatures)
}
assert(lIndex==prob.l);
- if(verbose_opt[0]>2)
+ if(m_verbose>2)
std::cout << "checking parameters" << std::endl;
svm_check_parameter(&prob,¶m);
- if(verbose_opt[0]>2)
+ if(m_verbose>2)
std::cout << "parameters ok, training" << std::endl;
svm=svm_train(&prob,¶m);
- if(verbose_opt[0]>2)
+ if(m_verbose>2)
std::cout << "SVM is now trained" << std::endl;
- ConfusionMatrix cm;
- //set names in confusion matrix using nameVector
- for(int iname=0;iname<nameVector.size();++iname){
- if(classValueMap.empty())
- cm.pushBackClassName(nameVector[iname]);
- else if(cm.getClassIndex(type2string<short>(classValueMap[nameVector[iname]]))<0)
- cm.pushBackClassName(type2string<short>(classValueMap[nameVector[iname]]));
- }
- if(cv_opt[0]>1){
+ m_cm.clearResults();
+ if(m_cv>1){
double *target = Malloc(double,prob.l);
- svm_cross_validation(&prob,¶m,cv_opt[0],target);
+ svm_cross_validation(&prob,¶m,m_cv,target);
assert(param.svm_type != EPSILON_SVR&¶m.svm_type != NU_SVR);//only for regression
for(int i=0;i<prob.l;i++){
- string refClassName=nameVector[prob.y[i]];
- string className=nameVector[target[i]];
- if(classValueMap.size())
- cm.incrementResult(type2string<short>(classValueMap[refClassName]),type2string<short>(classValueMap[className]),1.0);
+ string refClassName=m_nameVector[prob.y[i]];
+ string className=m_nameVector[target[i]];
+ if(m_classValueMap.size())
+ m_cm.incrementResult(type2string<short>(m_classValueMap[refClassName]),type2string<short>(m_classValueMap[className]),1.0);
else
- cm.incrementResult(cm.getClass(prob.y[i]),cm.getClass(target[i]),1.0);
+ m_cm.incrementResult(m_cm.getClass(prob.y[i]),m_cm.getClass(target[i]),1.0);
}
free(target);
}
else{
struct svm_node *x_test;
+ vector<double> result(nclass);
x_test = Malloc(struct svm_node,(nFeatures+1));
for(int iclass=0;iclass<nclass;++iclass){
- for(int isample=0;isample<nctest[iclass];++isample){
+ for(int isample=0;isample<m_nctest[iclass];++isample){
for(int ifeature=0;ifeature<nFeatures;++ifeature){
x_test[ifeature].index=ifeature+1;
- x_test[ifeature].value=trainingFeatures[iclass][nctraining[iclass]+isample][ifeature];
+ x_test[ifeature].value=trainingFeatures[iclass][m_nctraining[iclass]+isample][ifeature];
}
x_test[nFeatures].index=-1;
double predict_label=0;
- //todo: make distinction between svm_predict and svm_predict_probability?
- predict_label = svm_predict(svm,x_test);
- string refClassName=nameVector[iclass];
- string className=nameVector[static_cast<short>(predict_label)];
- if(classValueMap.size())
- cm.incrementResult(type2string<short>(classValueMap[refClassName]),type2string<short>(classValueMap[className]),1.0);
+ assert(svm_check_probability_model(svm));
+ predict_label = svm_predict_probability(svm,x_test,&(result[0]));
+ // predict_label = svm_predict(svm,x_test);
+ string refClassName=m_nameVector[iclass];
+ string className=m_nameVector[static_cast<short>(predict_label)];
+ if(m_classValueMap.size())
+ m_cm.incrementResult(type2string<short>(m_classValueMap[refClassName]),type2string<short>(m_classValueMap[className]),1.0);
else
- cm.incrementResult(refClassName,className,1.0);
+ m_cm.incrementResult(refClassName,className,1.0);
}
}
free(x_test);
}
- if(verbose_opt[0]>1)
- std::cout << cm << std::endl;
- assert(cm.nReference());
- // if(verbose_opt[0])
- // std::cout << cm << std::endl;
- // std::cout << "Kappa: " << cm.kappa() << std::endl;
+ if(m_verbose>1)
+ std::cout << m_cm << std::endl;
+ assert(m_cm.nReference());
+ // if(m_verbose)
+
+ // std::cout << m_cm << std::endl;
+ // std::cout << "Kappa: " << m_cm.kappa() << std::endl;
// double se95_oa=0;
// double doa=0;
- // doa=cm.oa_pct(&se95_oa);
+ // doa=m_cm.oa_pct(&se95_oa);
// std::cout << "Overall Accuracy: " << doa << " (" << se95_oa << ")" << std::endl;
// *NOTE* Because svm_model contains pointers to svm_problem, you can
@@ -212,7 +196,8 @@ double getCost(const vector<Vector2d<float> > &trainingFeatures)
free(prob.x);
free(x_space);
svm_free_and_destroy_model(&(svm));
- return(cm.kappa());
+
+ return(m_cm.kappa());
}
int main(int argc, char *argv[])
@@ -236,6 +221,23 @@ int main(int argc, char *argv[])
Optionpk<string> selector_opt("sm", "sm", "feature selection method (sffs=sequential floating forward search,sfs=sequential forward search, sbs, sequential backward search ,bfs=brute force search)","sffs");
Optionpk<float> epsilon_cost_opt("ecost", "ecost", "epsilon for stopping criterion in cost function to determine optimal number of features",0.001);
+ Optionpk<std::string> svm_type_opt("svmt", "svmtype", "type of SVM (C_SVC, nu_SVC,one_class, epsilon_SVR, nu_SVR)","C_SVC");
+ Optionpk<std::string> kernel_type_opt("kt", "kerneltype", "type of kernel function (linear,polynomial,radial,sigmoid) ","radial");
+ Optionpk<unsigned short> kernel_degree_opt("kd", "kd", "degree in kernel function",3);
+ Optionpk<float> gamma_opt("g", "gamma", "gamma in kernel function",1.0);
+ Optionpk<float> coef0_opt("c0", "coef0", "coef0 in kernel function",0);
+ Optionpk<float> ccost_opt("cc", "ccost", "the parameter C of C-SVC, epsilon-SVR, and nu-SVR",1000);
+ Optionpk<float> nu_opt("nu", "nu", "the parameter nu of nu-SVC, one-class SVM, and nu-SVR",0.5);
+ Optionpk<float> epsilon_loss_opt("eloss", "eloss", "the epsilon in loss function of epsilon-SVR",0.1);
+ Optionpk<int> cache_opt("cache", "cache", "cache memory size in MB",100);
+ Optionpk<float> epsilon_tol_opt("etol", "etol", "the tolerance of termination criterion",0.001);
+ Optionpk<bool> shrinking_opt("shrink", "shrink", "whether to use the shrinking heuristics",false);
+ Optionpk<bool> prob_est_opt("pe", "probest", "whether to train a SVC or SVR model for probability estimates",true,2);
+ Optionpk<unsigned short> cv_opt("cv", "cv", "n-fold cross validation mode",2);
+ Optionpk<string> classname_opt("c", "class", "list of class names.");
+ Optionpk<short> classvalue_opt("r", "reclass", "list of class values (use same order as in classname opt.");
+ Optionpk<short> verbose_opt("v", "verbose", "set to: 0 (results only), 1 (confusion matrix), 2 (debug)",0);
+
bool doProcess;//stop process when program was invoked with help option (-h --help)
try{
doProcess=input_opt.retrieveOption(argc,argv);
@@ -264,9 +266,9 @@ int main(int argc, char *argv[])
epsilon_tol_opt.retrieveOption(argc,argv);
shrinking_opt.retrieveOption(argc,argv);
prob_est_opt.retrieveOption(argc,argv);
- cv_opt.retrieveOption(argc,argv);
selector_opt.retrieveOption(argc,argv);
epsilon_cost_opt.retrieveOption(argc,argv);
+ cv_opt.retrieveOption(argc,argv);
classname_opt.retrieveOption(argc,argv);
classvalue_opt.retrieveOption(argc,argv);
verbose_opt.retrieveOption(argc,argv);
@@ -280,10 +282,11 @@ int main(int argc, char *argv[])
exit(0);//help was invoked, stop processing
}
+ CostFactorySVM costfactory(svm_type_opt[0], kernel_type_opt[0], kernel_degree_opt[0], gamma_opt[0], coef0_opt[0], ccost_opt[0], nu_opt[0], epsilon_loss_opt[0], cache_opt[0], epsilon_tol_opt[0], shrinking_opt[0], prob_est_opt[0], cv_opt[0], verbose_opt[0]);
+
assert(training_opt.size());
if(input_opt.size())
- cv_opt[0]=0;
-
+ costfactory.setCv(0);
if(verbose_opt[0]>=1){
if(input_opt.size())
std::cout << "input filename: " << input_opt[0] << std::endl;
@@ -307,11 +310,6 @@ int main(int argc, char *argv[])
int nband=0;
int startBand=2;//first two bands represent X and Y pos
- vector<double> offset;
- vector<double> scale;
- vector< Vector2d<float> > trainingPixels;//[class][sample][band]
- vector< Vector2d<float> > testPixels;//[class][sample][band]
-
// if(priors_opt.size()>1){//priors from argument list
// priors.resize(priors_opt.size());
// double normPrior=0;
@@ -328,21 +326,25 @@ int main(int argc, char *argv[])
if(band_opt.size())
std::sort(band_opt.begin(),band_opt.end());
- // map<string,short> classValueMap;//global variable for now (due to getCost)
if(classname_opt.size()){
assert(classname_opt.size()==classvalue_opt.size());
for(int iclass=0;iclass<classname_opt.size();++iclass)
- classValueMap[classname_opt[iclass]]=classvalue_opt[iclass];
+ costfactory.setClassValueMap(classname_opt[iclass],classvalue_opt[iclass]);
}
//----------------------------------- Training -------------------------------
- struct svm_problem prob;
+ vector<double> offset;
+ vector<double> scale;
+ vector< Vector2d<float> > trainingPixels;//[class][sample][band]
+ vector< Vector2d<float> > testPixels;//[class][sample][band]
+ map<string,Vector2d<float> > trainingMap;
+ map<string,Vector2d<float> > testMap;
vector<string> fields;
+
+ struct svm_problem prob;
//organize training data
trainingPixels.clear();
testPixels.clear();
- map<string,Vector2d<float> > trainingMap;
- map<string,Vector2d<float> > testMap;
if(verbose_opt[0]>=1)
std::cout << "reading training file " << training_opt[0] << std::endl;
try{
@@ -377,6 +379,12 @@ int main(int argc, char *argv[])
cerr << error << std::endl;
exit(1);
}
+ catch(std::exception& e){
+ std::cerr << "Error: ";
+ std::cerr << e.what() << std::endl;
+ std::cerr << CPLGetLastErrorMsg() << std::endl;
+ exit(1);
+ }
catch(...){
cerr << "error catched" << std::endl;
exit(1);
@@ -391,23 +399,12 @@ int main(int argc, char *argv[])
std::cout << "training pixels: " << std::endl;
map<string,Vector2d<float> >::iterator mapit=trainingMap.begin();
while(mapit!=trainingMap.end()){
- if(classValueMap.size()){
- //check if name in training is covered by classname_opt (values can not be 0)
- if(classValueMap[mapit->first]>0){
- if(verbose_opt[0])
- std::cout << mapit->first << " -> " << classValueMap[mapit->first] << std::endl;
- }
- else{
- std::cerr << "Error: names in classname option are not complete, please check names in training vector and make sure classvalue is > 0" << std::endl;
- exit(1);
- }
- }
//delete small classes
if((mapit->second).size()<minSize_opt[0]){
trainingMap.erase(mapit);
continue;
}
- nameVector.push_back(mapit->first);
+ costfactory.pushBackName(mapit->first);
trainingPixels.push_back(mapit->second);
if(verbose_opt[0]>1)
std::cout << mapit->first << ": " << (mapit->second).size() << " samples" << std::endl;
@@ -420,14 +417,15 @@ int main(int argc, char *argv[])
mapit=testMap.begin();
while(mapit!=testMap.end()){
- if(classValueMap.size()){
+ if(costfactory.getClassValueMap().size()){
+ // if(classValueMap.size()){
//check if name in test is covered by classname_opt (values can not be 0)
- if(classValueMap[mapit->first]>0){
- ;//ok, no need to print to std::cout
+ if((costfactory.getClassValueMap())[mapit->first]>0){
+ ;//ok, no need to print to std::cout
}
else{
- std::cerr << "Error: names in classname option are not complete, please check names in test vector and make sure classvalue is > 0" << std::endl;
- exit(1);
+ std::cerr << "Error: names in classname option are not complete, please check names in test vector and make sure classvalue is > 0" << std::endl;
+ exit(1);
}
}
//no need to delete small classes for test sample
@@ -518,7 +516,21 @@ int main(int argc, char *argv[])
// std::cout << std::endl;
}
+ //set names in confusion matrix using nameVector
+ vector<string> nameVector=costfactory.getNameVector();
+ for(int iname=0;iname<nameVector.size();++iname){
+ if(costfactory.getClassValueMap().empty())
+ costfactory.pushBackClassName(nameVector[iname]);
+ // cm.pushBackClassName(nameVector[iname]);
+ else if(costfactory.getClassIndex(type2string<short>((costfactory.getClassValueMap())[nameVector[iname]]))<0)
+ costfactory.pushBackClassName(type2string<short>((costfactory.getClassValueMap())[nameVector[iname]]));
+ }
+
+
//Calculate features of training (and test) set
+
+ vector<unsigned int> nctraining;
+ vector<unsigned int> nctest;
nctraining.resize(nclass);
nctest.resize(nclass);
vector< Vector2d<float> > trainingFeatures(nclass);
@@ -563,7 +575,9 @@ int main(int argc, char *argv[])
}
assert(trainingFeatures[iclass].size()==nctraining[iclass]+nctest[iclass]);
}
-
+
+ costfactory.setNcTraining(nctraining);
+ costfactory.setNcTest(nctest);
int nFeatures=trainingFeatures[0][0].size();
int maxFeatures=(maxFeatures_opt[0])? maxFeatures_opt[0] : 1;
double previousCost=-1;
@@ -571,36 +585,36 @@ int main(int argc, char *argv[])
list<int> subset;//set of selected features (levels) for each class combination
FeatureSelector selector;
try{
- if(maxFeatures==nFeatures){
+ if(maxFeatures>=nFeatures){
subset.clear();
for(int ifeature=0;ifeature<nFeatures;++ifeature)
subset.push_back(ifeature);
- cost=getCost(trainingFeatures);
+ cost=costfactory.getCost(trainingFeatures);
}
else{
- while(fabs(cost-previousCost)>epsilon_cost_opt[0]){
+ while(fabs(cost-previousCost)>=epsilon_cost_opt[0]){
previousCost=cost;
switch(selMap[selector_opt[0]]){
case(SFFS):
subset.clear();//needed to clear in case of floating and brute force search
- cost=selector.floating(trainingFeatures,&getCost,subset,maxFeatures,verbose_opt[0]);
+ cost=selector.floating(trainingFeatures,costfactory,subset,maxFeatures,epsilon_cost_opt[0],verbose_opt[0]);
break;
case(SFS):
- cost=selector.forward(trainingFeatures,&getCost,subset,maxFeatures,verbose_opt[0]);
+ cost=selector.forward(trainingFeatures,costfactory,subset,maxFeatures,verbose_opt[0]);
break;
case(SBS):
- cost=selector.backward(trainingFeatures,&getCost,subset,maxFeatures,verbose_opt[0]);
+ cost=selector.backward(trainingFeatures,costfactory,subset,maxFeatures,verbose_opt[0]);
break;
case(BFS):
subset.clear();//needed to clear in case of floating and brute force search
- cost=selector.bruteForce(trainingFeatures,&getCost,subset,maxFeatures,verbose_opt[0]);
+ cost=selector.bruteForce(trainingFeatures,costfactory,subset,maxFeatures,verbose_opt[0]);
break;
default:
std::cout << "Error: selector not supported, please use sffs, sfs, sbs or bfs" << std::endl;
exit(1);
break;
}
- if(verbose_opt[0]){
+ if(verbose_opt[0]>1){
std::cout << "cost: " << cost << std::endl;
std::cout << "previousCost: " << previousCost << std::endl;
std::cout << std::setprecision(12) << "cost-previousCost: " << cost - previousCost << " ( " << epsilon_cost_opt[0] << ")" << std::endl;
@@ -619,6 +633,7 @@ int main(int argc, char *argv[])
if(verbose_opt[0])
cout <<"cost: " << cost << endl;
+ subset.sort();
for(list<int>::const_iterator lit=subset.begin();lit!=subset.end();++lit)
std::cout << " -b " << *lit;
std::cout << std::endl;
diff --git a/src/apps/pkfssvm.h b/src/apps/pkfssvm.h
new file mode 100644
index 0000000..c4d3f72
--- /dev/null
+++ b/src/apps/pkfssvm.h
@@ -0,0 +1,57 @@
+/**********************************************************************
+pkfssvm.h: feature selection for svm classifier
+Copyright (C) 2008-2014 Pieter Kempeneers
+
+This file is part of pktools
+
+pktools is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+pktools is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with pktools. If not, see <http://www.gnu.org/licenses/>.
+***********************************************************************/
+#include <string>
+#include <vector>
+#include "base/Vector2d.h"
+
+#ifndef _PKFSSVMH_H_
+#define _PKFSSVMH_H_
+namespace svm{
+ enum SVM_TYPE {C_SVC=0, nu_SVC=1,one_class=2, epsilon_SVR=3, nu_SVR=4};
+ enum KERNEL_TYPE {linear=0,polynomial=1,radial=2,sigmoid=3};
+}
+
+enum SelectorValue { NA=0, SFFS=1, SFS=2, SBS=3, BFS=4};
+
+class CostFactorySVM : public CostFactory
+{
+public:
+CostFactorySVM();
+CostFactorySVM(std::string svm_type, std::string kernel_type, unsigned short kernel_degree, float gamma, float coef0, float ccost, float nu, float epsilon_loss, int cache, float epsilon_tol, bool shrinking, bool prob_est, unsigned short cv, bool verbose);
+~CostFactorySVM();
+double getCost(const std::vector<Vector2d<float> > &trainingFeatures);
+
+private:
+std::string m_svm_type;
+std::string m_kernel_type;
+unsigned short m_kernel_degree;
+float m_gamma;
+float m_coef0;
+float m_ccost;
+float m_nu;
+float m_epsilon_loss;
+int m_cache;
+float m_epsilon_tol;
+bool m_shrinking;
+bool m_prob_est;
+};
+
+
+#endif
diff --git a/src/apps/pkkalman.cc b/src/apps/pkkalman.cc
new file mode 100644
index 0000000..860b90b
--- /dev/null
+++ b/src/apps/pkkalman.cc
@@ -0,0 +1,1004 @@
+/**********************************************************************
+pkkalman.cc: program to kalman raster images: median, min/max, morphological, kalmaning
+Copyright (C) 2008-2014 Pieter Kempeneers
+
+This file is part of pktools
+
+pktools is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+pktools is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with pktools. If not, see <http://www.gnu.org/licenses/>.
+***********************************************************************/
+#include <sstream>
+#include <vector>
+#include <algorithm>
+#include "base/Optionpk.h"
+#include "base/Vector2d.h"
+#include "imageclasses/ImgReaderGdal.h"
+#include "imageclasses/ImgWriterGdal.h"
+#include "algorithms/StatFactory.h"
+#include "algorithms/ImgRegression.h"
+
+using namespace std;
+/*------------------
+ Main procedure
+ ----------------*/
+int main(int argc,char **argv) {
+ Optionpk<string> direction_opt("dir","direction","direction to run model (forward|backward|smooth)","forward");
+ Optionpk<string> model_opt("mod","model","model input datasets, e.g., MODIS (use: -mod model1 -mod model2 etc.");
+ Optionpk<string> observation_opt("obs","observation","observation input datasets, e.g., landsat (use: -obs obs1 -obs obs2 etc.");
+ Optionpk<int> tmodel_opt("tmod","tmodel","time sequence of model input. Sequence must have exact same length as model input. Leave empty to have default sequence 0,1,2,etc.");
+ Optionpk<int> tobservation_opt("tobs","tobservation","time sequence of observation input. Sequence must have exact same length as observation input)");
+ Optionpk<string> projection_opt("a_srs", "a_srs", "Override the projection for the output file (leave blank to copy from input file, use epsg:3035 to use European projection and force to European grid");
+ Optionpk<string> outputfw_opt("ofw", "outputfw", "Output raster dataset for forward model");
+ Optionpk<string> outputbw_opt("obw", "outputbw", "Output raster dataset for backward model");
+ Optionpk<string> outputfb_opt("ofb", "outputfb", "Output raster dataset for smooth model");
+ Optionpk<float> threshold_opt("th", "threshold", "threshold for selecting samples (randomly). Provide probability in percentage (>0) or absolute (<0).", 0);
+ Optionpk<double> modnodata_opt("modnodata", "modnodata", "invalid value for model input", 0);
+ Optionpk<double> obsnodata_opt("obsnodata", "obsnodata", "invalid value for observation input", 0);
+ Optionpk<double> modoffset_opt("modoffset", "modoffset", "offset used to read model input dataset (value=offset+scale*readValue", 0);
+ Optionpk<double> obsoffset_opt("obsoffset", "obsoffset", "offset used to read observation input dataset (value=offset+scale*readValue", 0);
+ Optionpk<double> modscale_opt("modscale", "modscale", "scale used to read model input dataset (value=offset+scale*readValue", 1);
+ Optionpk<double> obsscale_opt("obsscale", "obsscale", "scale used to read observation input dataset (value=offset+scale*readValue", 1);
+ Optionpk<double> eps_opt("eps", "eps", "epsilon for non zero division", 0.00001);
+ Optionpk<double> uncertModel_opt("um", "uncertmodel", "Multiply this value with std dev of first model image to obtain uncertainty of model",2);
+ Optionpk<double> uncertObs_opt("uo", "uncertobs", "Uncertainty of valid observations",0);
+ Optionpk<double> uncertNodata_opt("unodata", "uncertnodata", "Uncertainty in case of no-data values in observation", 10000);
+ // Optionpk<double> regTime_opt("rt", "regtime", "Relative Weight for regression in time series", 1.0);
+ // Optionpk<double> regSensor_opt("rs", "regsensor", "Relative Weight for regression in sensor series", 1.0);
+ Optionpk<int> down_opt("down", "down", "Downsampling factor for reading model data to calculate regression", 9);
+ Optionpk<string> oformat_opt("of", "oformat", "Output image format (see also gdal_translate). Empty string: inherit from input image","GTiff",2);
+ Optionpk<string> option_opt("co", "co", "Creation option for output file. Multiple options can be specified.");
+ Optionpk<short> verbose_opt("v", "verbose", "verbose mode when positive", 0);
+
+ bool doProcess;//stop process when program was invoked with help option (-h --help)
+ try{
+ doProcess=direction_opt.retrieveOption(argc,argv);
+ model_opt.retrieveOption(argc,argv);
+ observation_opt.retrieveOption(argc,argv);
+ tmodel_opt.retrieveOption(argc,argv);
+ tobservation_opt.retrieveOption(argc,argv);
+ projection_opt.retrieveOption(argc,argv);
+ outputfw_opt.retrieveOption(argc,argv);
+ outputbw_opt.retrieveOption(argc,argv);
+ outputfb_opt.retrieveOption(argc,argv);
+ threshold_opt.retrieveOption(argc,argv);
+ modnodata_opt.retrieveOption(argc,argv);
+ obsnodata_opt.retrieveOption(argc,argv);
+ modoffset_opt.retrieveOption(argc,argv);
+ modscale_opt.retrieveOption(argc,argv);
+ obsoffset_opt.retrieveOption(argc,argv);
+ obsscale_opt.retrieveOption(argc,argv);
+ eps_opt.retrieveOption(argc,argv);
+ uncertModel_opt.retrieveOption(argc,argv);
+ uncertObs_opt.retrieveOption(argc,argv);
+ uncertNodata_opt.retrieveOption(argc,argv);
+ // regTime_opt.retrieveOption(argc,argv);
+ // regSensor_opt.retrieveOption(argc,argv);
+ down_opt.retrieveOption(argc,argv);
+ oformat_opt.retrieveOption(argc,argv);
+ option_opt.retrieveOption(argc,argv);
+ verbose_opt.retrieveOption(argc,argv);
+ }
+ catch(string predefinedString){
+ std::cout << predefinedString << std::endl;
+ exit(0);
+ }
+ if(!doProcess){
+ std::cout << "short option -h shows basic options only, use long option --help to show all options" << std::endl;
+ exit(0);//help was invoked, stop processing
+ }
+
+ try{
+ ostringstream errorStream;
+ if(model_opt.size()<2){
+ errorStream << "Error: no model dataset selected, use option -mod" << endl;
+ throw(errorStream.str());
+ }
+ if(observation_opt.size()<1){
+ errorStream << "Error: no observation dataset selected, use option -obs" << endl;
+ throw(errorStream.str());
+ }
+ if(direction_opt[0]=="smooth"){
+ if(outputfw_opt.empty()){
+ errorStream << "Error: output forward datasets is not provided, use option -ofw" << endl;
+ throw(errorStream.str());
+ }
+ if(outputbw_opt.empty()){
+ errorStream << "Error: output backward datasets is not provided, use option -obw" << endl;
+ throw(errorStream.str());
+ }
+ if(outputfb_opt.empty()){
+ errorStream << "Error: output smooth datasets is not provided, use option -ofb" << endl;
+ throw(errorStream.str());
+ }
+ }
+ else{
+ if(direction_opt[0]=="forward"&&outputfw_opt.empty()){
+ errorStream << "Error: output forward datasets is not provided, use option -ofw" << endl;
+ throw(errorStream.str());
+ }
+ else if(direction_opt[0]=="backward"&&outputbw_opt.empty()){
+ errorStream << "Error: output backward datasets is not provided, use option -obw" << endl;
+ throw(errorStream.str());
+ }
+
+ if(model_opt.size()<observation_opt.size()){
+ errorStream << "Error: sequence of models should be larger than observations" << endl;
+ throw(errorStream.str());
+ }
+ if(tmodel_opt.size()!=model_opt.size()){
+ if(tmodel_opt.empty())
+ cout << "Warning: time sequence is not provided, self generating time sequence from 0 to " << model_opt.size() << endl;
+ else
+ cout << "Warning: time sequence provided (" << tmodel_opt.size() << ") does not match number of model raster datasets (" << model_opt.size() << ")" << endl;
+ tmodel_opt.clear();
+ for(int tindex=0;tindex<model_opt.size();++tindex)
+ tmodel_opt.push_back(tindex);
+ }
+ if(tobservation_opt.size()!=observation_opt.size()){
+ errorStream << "Error: time sequence for observation must match size of observation dataset" << endl;
+ throw(errorStream.str());
+ }
+ }
+ }
+ catch(string errorString){
+ std::cout << errorString << std::endl;
+ exit(1);
+ }
+
+ statfactory::StatFactory stat;
+ imgregression::ImgRegression imgreg;
+ // vector<ImgReaderGdal> imgReaderModel(model_opt.size());
+ // vector<ImgReaderGdal> imgReaderObs(observation_opt.size());
+ ImgReaderGdal imgReaderModel1;
+ ImgReaderGdal imgReaderModel2;
+ ImgReaderGdal imgReaderObs;
+ ImgWriterGdal imgWriterEst;
+
+ imgReaderObs.open(observation_opt[0]);
+
+ int ncol=imgReaderObs.nrOfCol();
+ int nrow=imgReaderObs.nrOfRow();
+ if(projection_opt.empty())
+ projection_opt.push_back(imgReaderObs.getProjection());
+ double geotransform[6];
+ imgReaderObs.getGeoTransform(geotransform);
+
+ string imageType=imgReaderObs.getImageType();
+ if(oformat_opt.size())//default
+ imageType=oformat_opt[0];
+ if(option_opt.findSubstring("INTERLEAVE=")==option_opt.end()){
+ string theInterleave="INTERLEAVE=";
+ theInterleave+=imgReaderObs.getInterleave();
+ option_opt.push_back(theInterleave);
+ }
+
+ imgReaderObs.close();
+
+ int obsindex=0;
+
+ const char* pszMessage;
+ void* pProgressArg=NULL;
+ GDALProgressFunc pfnProgress=GDALTermProgress;
+ double progress=0;
+
+ imgreg.setDown(down_opt[0]);
+ imgreg.setThreshold(threshold_opt[0]);
+
+ double c0obs=0;
+ double c1obs=0;
+ double errObs=uncertNodata_opt[0];//start with high initial value in case we do not have first observation at time 0
+
+ vector<int> relobsindex;
+ // cout << tmodel_opt << endl;
+ // cout << tobservation_opt << endl;
+
+ for(int tindex=0;tindex<tobservation_opt.size();++tindex){
+ vector<int>::iterator modit;
+ modit=lower_bound(tmodel_opt.begin(),tmodel_opt.end(),tobservation_opt[tindex]);
+ int relpos=modit-tmodel_opt.begin();
+ // if(relpos<0)
+ // relpos=0;
+ relobsindex.push_back(relpos);
+ if(verbose_opt[0])
+ cout << "tobservation_opt[tindex] " << tobservation_opt[tindex] << " " << relobsindex.back() << " " << observation_opt[tindex] << " " << model_opt[relpos] << endl;
+ // if(verbose_opt[0])
+ // cout << "tobservation_opt[tindex] " << tobservation_opt[tindex] << " " << relobsindex.back() << endl;
+ }
+
+ if(find(direction_opt.begin(),direction_opt.end(),"forward")!=direction_opt.end()){
+ ///////////////////////////// forward model /////////////////////////
+ obsindex=0;
+ if(verbose_opt[0])
+ cout << "Running forward model" << endl;
+ //initialization
+ string output;
+ if(outputfw_opt.size()==model_opt.size())
+ output=outputfw_opt[0];
+ else{
+ ostringstream outputstream;
+ outputstream << outputfw_opt[0] << "_" << tmodel_opt[0] << ".tif";
+ output=outputstream.str();
+ }
+ if(verbose_opt[0])
+ cout << "Opening image " << output << " for writing " << endl;
+ imgWriterEst.open(output,ncol,nrow,2,GDT_Float32,imageType,option_opt);
+ imgWriterEst.setProjectionProj4(projection_opt[0]);
+ imgWriterEst.setGeoTransform(geotransform);
+ imgWriterEst.GDALSetNoDataValue(obsnodata_opt[0]);
+
+ if(verbose_opt[0]){
+ cout << "processing time " << tmodel_opt[0] << endl;
+ if(obsindex<relobsindex.size())
+ cout << "next observation " << tmodel_opt[relobsindex[obsindex]] << endl;
+ else
+ cout << "There is no next observation" << endl;
+ }
+
+ try{
+ imgReaderModel1.open(model_opt[0]);
+ imgReaderModel1.setNoData(modnodata_opt);
+ imgReaderModel1.setOffset(modoffset_opt[0]);
+ imgReaderModel1.setScale(modscale_opt[0]);
+ }
+ catch(string errorString){
+ cerr << errorString << endl;
+ }
+ catch(...){
+ cerr << "Error opening file " << model_opt[0] << endl;
+ }
+
+ //calculate standard deviation of image to serve as model uncertainty
+ GDALRasterBand* rasterBand;
+ rasterBand=imgReaderModel1.getRasterBand(0);
+ double minValue, maxValue, meanValue, stdDev;
+ void* pProgressData;
+ rasterBand->ComputeStatistics(0,&minValue,&maxValue,&meanValue,&stdDev,pfnProgress,pProgressData);
+ double x=0;
+ double y=0;
+ double modRow=0;
+ double modCol=0;
+ if(relobsindex[0]>0){//initialize output_opt[0] as model[0]
+ //write first model as output
+ if(verbose_opt[0])
+ cout << "write first model as output" << endl;
+ for(int irow=0;irow<nrow;++irow){
+ vector<double> estReadBuffer;
+ vector<double> estWriteBuffer(ncol);
+ vector<double> uncertWriteBuffer(ncol);
+ imgWriterEst.image2geo(0,irow,x,y);
+ imgReaderModel1.geo2image(x,y,modCol,modRow);
+ assert(modRow>=0&&modRow<imgReaderModel1.nrOfRow());
+ try{
+ imgReaderModel1.readData(estReadBuffer,GDT_Float64,modRow);
+ //simple nearest neighbor
+ stat.nearUp(estReadBuffer,estWriteBuffer);
+ imgWriterEst.writeData(estWriteBuffer,GDT_Float64,irow,0);
+ for(int icol=0;icol<ncol;++icol)
+ uncertWriteBuffer[icol]=uncertModel_opt[0]*stdDev;
+ imgWriterEst.writeData(uncertWriteBuffer,GDT_Float64,irow,1);
+ }
+ catch(string errorString){
+ cerr << errorString << endl;
+ }
+ catch(...){
+ cerr << "Error writing file " << imgWriterEst.getFileName() << endl;
+ }
+ }
+ }
+ else{//we have an observation at time 0
+ if(verbose_opt[0])
+ cout << "we have an observation at time 0" << endl;
+ imgReaderObs.open(observation_opt[0]);
+ imgReaderObs.getGeoTransform(geotransform);
+ imgReaderObs.setNoData(obsnodata_opt);
+ imgReaderObs.setOffset(obsoffset_opt[0]);
+ imgReaderObs.setScale(obsscale_opt[0]);
+ for(int irow=0;irow<nrow;++irow){
+ vector<double> estReadBuffer;
+ imgWriterEst.image2geo(0,irow,x,y);
+ imgReaderModel1.geo2image(x,y,modCol,modRow);
+ assert(modRow>=0&&modRow<imgReaderModel1.nrOfRow());
+ imgReaderModel1.readData(estReadBuffer,GDT_Float64,modRow);
+ vector<double> estWriteBuffer(ncol);
+ vector<double> uncertWriteBuffer(ncol);
+ vector<double> uncertObsBuffer;
+ imgReaderObs.readData(estWriteBuffer,GDT_Float64,irow,0);
+ if(imgReaderObs.nrOfBand()>1)
+ imgReaderObs.readData(uncertObsBuffer,GDT_Float64,irow,1);
+ for(int icol=0;icol<ncol;++icol){
+ if(imgReaderObs.isNoData(estWriteBuffer[icol])){
+ imgWriterEst.image2geo(icol,irow,x,y);
+ imgReaderModel1.geo2image(x,y,modCol,modRow);
+ assert(modRow>=0&&modRow<imgReaderModel1.nrOfRow());
+ if(imgReaderModel1.isNoData(estReadBuffer[modCol]))//if both obs and model are no-data, set obs to nodata
+ estWriteBuffer[icol]=obsnodata_opt[0];
+ else
+ estWriteBuffer[icol]=estReadBuffer[modCol];
+ uncertWriteBuffer[icol]=uncertModel_opt[0]*stdDev;
+ }
+ else{
+ double uncertObs=uncertObs_opt[0];
+ if(uncertObsBuffer.size()>icol)
+ uncertObs=uncertObsBuffer[icol];
+ if(uncertModel_opt[0]*stdDev+uncertObs>eps_opt[0]){
+ imgWriterEst.image2geo(icol,irow,x,y);
+ imgReaderModel1.geo2image(x,y,modCol,modRow);
+ assert(modRow>=0&&modRow<imgReaderModel1.nrOfRow());
+ // double noemer=uncertObs*uncertObs+stdDev*stdDev;//todo: multiply stdDev with uncertModel_opt[0]
+ // estWriteBuffer[icol]*=uncertModel_opt[0]*stdDev*stdDev/noemer;
+ // estWriteBuffer[icol]+=uncertModel_opt[0]*uncertObs*uncertObs/noemer;//todo:check! error?
+ double kalmanGain=1;
+ if(!imgReaderModel1.isNoData(estReadBuffer[modCol])){
+ //if model is no-data, retain observation value
+ kalmanGain=uncertModel_opt[0]*stdDev/(uncertModel_opt[0]*stdDev+uncertObs);
+ estWriteBuffer[icol]=estReadBuffer[modCol]+kalmanGain*(estWriteBuffer[icol]-estReadBuffer[modCol]);
+ }
+ uncertWriteBuffer[icol]=uncertModel_opt[0]*stdDev*(1-kalmanGain);
+ }
+ else{
+ //no need to fill write buffer (already done in imgReaderObs.readData
+ uncertWriteBuffer[icol]=uncertObs;
+ }
+ }
+ }
+ imgWriterEst.writeData(estWriteBuffer,GDT_Float64,irow,0);
+ imgWriterEst.writeData(uncertWriteBuffer,GDT_Float64,irow,1);
+ }
+ imgReaderObs.close();
+ ++obsindex;
+ }
+ imgReaderModel1.close();
+ imgWriterEst.close();
+
+ for(int modindex=1;modindex<model_opt.size();++modindex){
+ if(verbose_opt[0]){
+ cout << "processing time " << tmodel_opt[modindex] << endl;
+ if(obsindex<relobsindex.size())
+ cout << "next observation " << tmodel_opt[relobsindex[obsindex]] << endl;
+ else
+ cout << "There is no next observation" << endl;
+ }
+ string output;
+ if(outputfw_opt.size()==model_opt.size())
+ output=outputfw_opt[modindex];
+ else{
+ ostringstream outputstream;
+ outputstream << outputfw_opt[0] << "_" << tmodel_opt[modindex] << ".tif";
+ // outputstream << output_opt[0] << "_" << modindex+1 << ".tif";
+ output=outputstream.str();
+ }
+
+ //two band output band0=estimation, band1=uncertainty
+ imgWriterEst.open(output,ncol,nrow,2,GDT_Float32,imageType,option_opt);
+ imgWriterEst.setProjectionProj4(projection_opt[0]);
+ imgWriterEst.setGeoTransform(geotransform);
+ imgWriterEst.GDALSetNoDataValue(obsnodata_opt[0]);
+
+ //calculate regression between two subsequence model inputs
+ imgReaderModel1.open(model_opt[modindex-1]);
+ imgReaderModel1.setNoData(modnodata_opt);
+ imgReaderModel1.setOffset(modoffset_opt[0]);
+ imgReaderModel1.setScale(modscale_opt[0]);
+ imgReaderModel2.open(model_opt[modindex]);
+ imgReaderModel2.setNoData(modnodata_opt);
+ imgReaderModel2.setOffset(modoffset_opt[0]);
+ imgReaderModel2.setScale(modscale_opt[0]);
+ //calculate regression
+ //we could re-use the points from second image from last run, but
+ //to keep it general, we must redo it (overlap might have changed)
+
+ pfnProgress(progress,pszMessage,pProgressArg);
+ double c0mod=0;
+ double c1mod=0;
+
+ if(verbose_opt[0])
+ cout << "Calculating regression for " << imgReaderModel1.getFileName() << " " << imgReaderModel2.getFileName() << endl;
+ double errMod=imgreg.getRMSE(imgReaderModel1,imgReaderModel2,c0mod,c1mod);
+ // double errMod=imgreg.getRMSE(imgReaderModel1,imgReaderModel2,c0mod,c1mod,verbose_opt[0]);
+
+ bool update=false;
+ if(obsindex<relobsindex.size()){
+ update=(relobsindex[obsindex]==modindex);
+ }
+ if(update){
+ if(verbose_opt[0])
+ cout << "***update " << relobsindex[obsindex] << " = " << modindex << " " << observation_opt[obsindex] << " ***" << endl;
+
+ imgReaderObs.open(observation_opt[obsindex]);
+ imgReaderObs.getGeoTransform(geotransform);
+ imgReaderObs.setNoData(obsnodata_opt);
+ imgReaderObs.setOffset(obsoffset_opt[0]);
+ imgReaderObs.setScale(obsscale_opt[0]);
+ //calculate regression between model and observation
+ if(verbose_opt[0])
+ cout << "Calculating regression for " << imgReaderModel2.getFileName() << " " << imgReaderObs.getFileName() << endl;
+ errObs=imgreg.getRMSE(imgReaderModel2,imgReaderObs,c0obs,c1obs,verbose_opt[0]);
+ if(verbose_opt[0])
+ cout << "c0obs, c1obs: " << c0obs << ", " << c1obs << endl;
+ }
+ //prediction (also to fill cloudy pixels in update mode)
+ string input;
+ if(outputfw_opt.size()==model_opt.size())
+ input=outputfw_opt[modindex-1];
+ else{
+ ostringstream outputstream;
+ outputstream << outputfw_opt[0] << "_" << tmodel_opt[modindex-1] << ".tif";
+ input=outputstream.str();
+ }
+ ImgReaderGdal imgReaderEst(input);
+ imgReaderEst.setNoData(obsnodata_opt);
+ imgReaderEst.setOffset(obsoffset_opt[0]);
+ imgReaderEst.setScale(obsscale_opt[0]);
+
+ vector<double> obsBuffer;
+ vector<double> modelBuffer;
+ vector<double> uncertObsBuffer;
+ vector<double> estReadBuffer;
+ vector<double> uncertReadBuffer;
+ vector<double> estWriteBuffer(ncol);
+ vector<double> uncertWriteBuffer(ncol);
+
+ for(int irow=0;irow<imgWriterEst.nrOfRow();++irow){
+ assert(irow<imgReaderEst.nrOfRow());
+ imgReaderEst.readData(estReadBuffer,GDT_Float64,irow,0);
+ imgReaderEst.readData(uncertReadBuffer,GDT_Float64,irow,1);
+ //read model2 in case current estimate is nodata
+ imgReaderEst.image2geo(0,irow,x,y);
+ imgReaderModel2.geo2image(x,y,modCol,modRow);
+ assert(modRow>=0&&modRow<imgReaderModel2.nrOfRow());
+ imgReaderModel2.readData(modelBuffer,GDT_Float64,modRow,0);
+ if(update){
+ imgReaderObs.readData(obsBuffer,GDT_Float64,irow,0);
+ if(imgReaderObs.nrOfBand()>1)
+ imgReaderObs.readData(uncertObsBuffer,GDT_Float64,irow,1);
+ }
+ for(int icol=0;icol<imgWriterEst.nrOfCol();++icol){
+ double estValue=estReadBuffer[icol];
+ //time update
+ if(imgReaderEst.isNoData(estValue)){
+ //pk: in case we have not found any valid data yet, better here to take the current model value
+ imgReaderEst.image2geo(icol,irow,x,y);
+ imgReaderModel2.geo2image(x,y,modCol,modRow);
+ assert(modRow>=0&&modRow<imgReaderModel2.nrOfRow());
+ if(imgReaderModel2.isNoData(modelBuffer[modCol])){//if both estimate and model are no-data, set obs to nodata
+ estWriteBuffer[icol]=obsnodata_opt[0];
+ uncertWriteBuffer[icol]=uncertNodata_opt[0];
+ }
+ else{
+ estWriteBuffer[icol]=modelBuffer[modCol];
+ uncertWriteBuffer[icol]=uncertModel_opt[0]*stdDev;
+ }
+ }
+ else{
+ double certNorm=(errMod*errMod+errObs*errObs);
+ double certMod=errObs*errObs/certNorm;
+ double certObs=errMod*errMod/certNorm;
+ double regTime=(c0mod+c1mod*estValue)*certMod;
+ double regSensor=(c0obs+c1obs*estValue)*certObs;
+ estWriteBuffer[icol]=regTime+regSensor;
+ double totalUncertainty=0;
+ if(errMod<eps_opt[0])
+ totalUncertainty=errObs;
+ else if(errObs<eps_opt[0])
+ totalUncertainty=errMod;
+ else{
+ totalUncertainty=1.0/errMod/errMod+1/errObs/errObs;
+ totalUncertainty=sqrt(1.0/totalUncertainty);
+ }
+ uncertWriteBuffer[icol]=totalUncertainty+uncertReadBuffer[icol];
+ }
+ //observation update
+ if(update&&!imgReaderObs.isNoData(obsBuffer[icol])){
+ double kalmanGain=1;
+ double uncertObs=uncertObs_opt[0];
+ if(uncertObsBuffer.size()>icol)
+ uncertObs=uncertObsBuffer[icol];
+ if((uncertWriteBuffer[icol]+uncertObs)>eps_opt[0])
+ kalmanGain=uncertWriteBuffer[icol]/(uncertWriteBuffer[icol]+uncertObs);
+ assert(kalmanGain<=1);
+ estWriteBuffer[icol]+=kalmanGain*(obsBuffer[icol]-estWriteBuffer[icol]);
+ uncertWriteBuffer[icol]*=(1-kalmanGain);
+ }
+ }
+ imgWriterEst.writeData(estWriteBuffer,GDT_Float64,irow,0);
+ imgWriterEst.writeData(uncertWriteBuffer,GDT_Float64,irow,1);
+ progress=static_cast<float>((irow+1.0)/imgWriterEst.nrOfRow());
+ pfnProgress(progress,pszMessage,pProgressArg);
+ }
+
+ imgWriterEst.close();
+ imgReaderEst.close();
+
+ if(update){
+ imgReaderObs.close();
+ ++obsindex;
+ }
+ imgReaderModel1.close();
+ imgReaderModel2.close();
+ }
+ }
+ if(find(direction_opt.begin(),direction_opt.end(),"backward")!=direction_opt.end()){
+ ///////////////////////////// backward model /////////////////////////
+ obsindex=relobsindex.size()-1;
+ if(verbose_opt[0])
+ cout << "Running backward model" << endl;
+ //initialization
+ string output;
+ if(outputbw_opt.size()==model_opt.size())
+ output=outputbw_opt.back();
+ else{
+ ostringstream outputstream;
+ outputstream << outputbw_opt[0] << "_" << tmodel_opt.back() << ".tif";
+ output=outputstream.str();
+ }
+ if(verbose_opt[0])
+ cout << "Opening image " << output << " for writing " << endl;
+ imgWriterEst.open(output,ncol,nrow,2,GDT_Float32,imageType,option_opt);
+ imgWriterEst.setProjectionProj4(projection_opt[0]);
+ imgWriterEst.setGeoTransform(geotransform);
+ imgWriterEst.GDALSetNoDataValue(obsnodata_opt[0]);
+
+ if(verbose_opt[0]){
+ cout << "processing time " << tmodel_opt.back() << endl;
+ if(obsindex<relobsindex.size())
+ cout << "next observation " << tmodel_opt[relobsindex[obsindex]] << endl;
+ else
+ cout << "There is no next observation" << endl;
+ }
+
+ try{
+ imgReaderModel1.open(model_opt.back());
+ imgReaderModel1.setNoData(modnodata_opt);
+ imgReaderModel1.setOffset(modoffset_opt[0]);
+ imgReaderModel1.setScale(modscale_opt[0]);
+ }
+ catch(string errorString){
+ cerr << errorString << endl;
+ }
+ catch(...){
+ cerr << "Error opening file " << model_opt[0] << endl;
+ }
+
+ //calculate standard deviation of image to serve as model uncertainty
+ GDALRasterBand* rasterBand;
+ rasterBand=imgReaderModel1.getRasterBand(0);
+ double minValue, maxValue, meanValue, stdDev;
+ void* pProgressData;
+ rasterBand->ComputeStatistics(0,&minValue,&maxValue,&meanValue,&stdDev,pfnProgress,pProgressData);
+ double x=0;
+ double y=0;
+ double modRow=0;
+ double modCol=0;
+ if(relobsindex.back()<model_opt.size()-1){//initialize output_opt.back() as model[0]
+ //write last model as output
+ if(verbose_opt[0])
+ cout << "write last model as output" << endl;
+ // for(int irow=0;irow<imgWriterEst.nrOfRow();++irow){
+ for(int irow=0;irow<nrow;++irow){
+ vector<double> estReadBuffer;
+ vector<double> estWriteBuffer(ncol);
+ vector<double> uncertWriteBuffer(ncol);
+ imgWriterEst.image2geo(0,irow,x,y);
+ imgReaderModel1.geo2image(x,y,modCol,modRow);
+ assert(modRow>=0&&modRow<imgReaderModel1.nrOfRow());
+ try{
+ imgReaderModel1.readData(estReadBuffer,GDT_Float64,modRow);
+ //simple nearest neighbor
+ stat.nearUp(estReadBuffer,estWriteBuffer);
+ imgWriterEst.writeData(estWriteBuffer,GDT_Float64,irow,0);
+ for(int icol=0;icol<imgWriterEst.nrOfCol();++icol)
+ uncertWriteBuffer[icol]=uncertModel_opt[0]*stdDev;
+ imgWriterEst.writeData(uncertWriteBuffer,GDT_Float64,irow,1);
+ }
+ catch(string errorString){
+ cerr << errorString << endl;
+ }
+ catch(...){
+ cerr << "Error writing file " << imgWriterEst.getFileName() << endl;
+ }
+ }
+ }
+ else{//we have an observation at end time
+ if(verbose_opt[0])
+ cout << "we have an observation at end time" << endl;
+ imgReaderObs.open(observation_opt.back());
+ imgReaderObs.getGeoTransform(geotransform);
+ imgReaderObs.setNoData(obsnodata_opt);
+ imgReaderObs.setOffset(obsoffset_opt[0]);
+ imgReaderObs.setScale(obsscale_opt[0]);
+ for(int irow=0;irow<nrow;++irow){
+ vector<double> estReadBuffer;
+ imgWriterEst.image2geo(0,irow,x,y);
+ imgReaderModel1.geo2image(x,y,modCol,modRow);
+ assert(modRow>=0&&modRow<imgReaderModel1.nrOfRow());
+ imgReaderModel1.readData(estReadBuffer,GDT_Float64,modRow);
+ vector<double> estWriteBuffer(ncol);
+ vector<double> uncertWriteBuffer(ncol);
+ vector<double> uncertObsBuffer;
+ imgReaderObs.readData(estWriteBuffer,GDT_Float64,irow,0);
+ if(imgReaderObs.nrOfBand()>1)
+ imgReaderObs.readData(uncertObsBuffer,GDT_Float64,irow,1);
+ for(int icol=0;icol<imgWriterEst.nrOfCol();++icol){
+ if(imgReaderObs.isNoData(estWriteBuffer[icol])){
+ imgWriterEst.image2geo(icol,irow,x,y);
+ imgReaderModel1.geo2image(x,y,modCol,modRow);
+ assert(modRow>=0&&modRow<imgReaderModel1.nrOfRow());
+ if(imgReaderModel1.isNoData(estReadBuffer[modCol]))//if both obs and model are no-data, set obs to nodata
+ estWriteBuffer[icol]=obsnodata_opt[0];
+ else
+ estWriteBuffer[icol]=estReadBuffer[modCol];
+ uncertWriteBuffer[icol]=uncertModel_opt[0]*stdDev;
+ }
+ else{
+ double uncertObs=uncertObs_opt[0];
+ if(uncertObsBuffer.size()>icol)
+ uncertObs=uncertObsBuffer[icol];
+ if(uncertModel_opt[0]*stdDev+uncertObs>eps_opt[0]){
+ imgWriterEst.image2geo(icol,irow,x,y);
+ imgReaderModel1.geo2image(x,y,modCol,modRow);
+ assert(modRow>=0&&modRow<imgReaderModel1.nrOfRow());
+ // double noemer=uncertObs*uncertObs+stdDev*stdDev;//todo: multiply stdDev with uncertModel_opt[0]
+ // estWriteBuffer[icol]*=uncertModel_opt[0]*stdDev*stdDev/noemer;
+ // estWriteBuffer[icol]+=uncertModel_opt[0]*uncertObs*uncertObs/noemer;//todo:check! error?
+ double kalmanGain=1;
+ if(!imgReaderModel1.isNoData(estReadBuffer[modCol])){
+ //if model is no-data, retain observation value
+ kalmanGain=uncertModel_opt[0]*stdDev/(uncertModel_opt[0]*stdDev+uncertObs);
+ estWriteBuffer[icol]=estReadBuffer[modCol]+kalmanGain*(estWriteBuffer[icol]-estReadBuffer[modCol]);
+ }
+ uncertWriteBuffer[icol]=uncertModel_opt[0]*stdDev*(1-kalmanGain);
+ }
+ else{
+ //no need to fill write buffer (already done in imgReaderObs.readData
+ uncertWriteBuffer[icol]=uncertObs;
+ }
+ }
+ }
+ imgWriterEst.writeData(estWriteBuffer,GDT_Float64,irow,0);
+ imgWriterEst.writeData(uncertWriteBuffer,GDT_Float64,irow,1);
+ }
+ imgReaderObs.close();
+ --obsindex;
+ }
+ imgReaderModel1.close();
+ imgWriterEst.close();
+
+ for(int modindex=model_opt.size()-2;modindex>=0;--modindex){
+ if(verbose_opt[0]){
+ cout << "processing time " << tmodel_opt[modindex] << endl;
+ if(obsindex<relobsindex.size())
+ cout << "next observation " << tmodel_opt[relobsindex[obsindex]] << endl;
+ else
+ cout << "There is no next observation" << endl;
+ }
+ string output;
+ if(outputbw_opt.size()==model_opt.size())
+ output=outputbw_opt[modindex];
+ else{
+ ostringstream outputstream;
+ outputstream << outputbw_opt[0] << "_" << tmodel_opt[modindex] << ".tif";
+ // outputstream << output_opt[0] << "_" << modindex+1 << ".tif";
+ output=outputstream.str();
+ }
+
+ //two band output band0=estimation, band1=uncertainty
+ imgWriterEst.open(output,ncol,nrow,2,GDT_Float32,imageType,option_opt);
+ imgWriterEst.setProjectionProj4(projection_opt[0]);
+ imgWriterEst.setGeoTransform(geotransform);
+ imgWriterEst.GDALSetNoDataValue(obsnodata_opt[0]);
+
+ //calculate regression between two subsequence model inputs
+ imgReaderModel1.open(model_opt[modindex+1]);
+ imgReaderModel1.setNoData(modnodata_opt);
+ imgReaderModel1.setOffset(modoffset_opt[0]);
+ imgReaderModel1.setScale(modscale_opt[0]);
+ imgReaderModel2.open(model_opt[modindex]);
+ imgReaderModel2.setNoData(modnodata_opt);
+ imgReaderModel2.setOffset(modoffset_opt[0]);
+ imgReaderModel2.setScale(modscale_opt[0]);
+ //calculate regression
+ //we could re-use the points from second image from last run, but
+ //to keep it general, we must redo it (overlap might have changed)
+
+ pfnProgress(progress,pszMessage,pProgressArg);
+ double c0mod=0;
+ double c1mod=0;
+
+ if(verbose_opt[0])
+ cout << "Calculating regression for " << imgReaderModel1.getFileName() << " " << imgReaderModel2.getFileName() << endl;
+ double errMod=imgreg.getRMSE(imgReaderModel1,imgReaderModel2,c0mod,c1mod);
+ // double errMod=imgreg.getRMSE(imgReaderModel1,imgReaderModel2,c0mod,c1mod,verbose_opt[0]);
+
+ bool update=false;
+ if(obsindex<relobsindex.size()){
+ update=(relobsindex[obsindex]==modindex);
+ }
+ if(update){
+ if(verbose_opt[0])
+ cout << "***update " << relobsindex[obsindex] << " = " << modindex << " " << observation_opt[obsindex] << " ***" << endl;
+
+ imgReaderObs.open(observation_opt[obsindex]);
+ imgReaderObs.getGeoTransform(geotransform);
+ imgReaderObs.setNoData(obsnodata_opt);
+ imgReaderObs.setOffset(obsoffset_opt[0]);
+ imgReaderObs.setScale(obsscale_opt[0]);
+ //calculate regression between model and observation
+ if(verbose_opt[0])
+ cout << "Calculating regression for " << imgReaderModel2.getFileName() << " " << imgReaderObs.getFileName() << endl;
+ errObs=imgreg.getRMSE(imgReaderModel2,imgReaderObs,c0obs,c1obs,verbose_opt[0]);
+ }
+ //prediction (also to fill cloudy pixels in update mode)
+ string input;
+ if(outputbw_opt.size()==model_opt.size())
+ input=outputbw_opt[modindex+1];
+ else{
+ ostringstream outputstream;
+ outputstream << outputbw_opt[0] << "_" << tmodel_opt[modindex+1] << ".tif";
+ input=outputstream.str();
+ }
+ ImgReaderGdal imgReaderEst(input);
+ imgReaderEst.setNoData(obsnodata_opt);
+ imgReaderEst.setOffset(obsoffset_opt[0]);
+ imgReaderEst.setScale(obsscale_opt[0]);
+
+ vector<double> obsBuffer;
+ vector<double> modelBuffer;
+ vector<double> uncertObsBuffer;
+ vector<double> estReadBuffer;
+ vector<double> uncertReadBuffer;
+ vector<double> estWriteBuffer(ncol);
+ vector<double> uncertWriteBuffer(ncol);
+
+ for(int irow=0;irow<imgWriterEst.nrOfRow();++irow){
+ assert(irow<imgReaderEst.nrOfRow());
+ imgReaderEst.readData(estReadBuffer,GDT_Float64,irow,0);
+ imgReaderEst.readData(uncertReadBuffer,GDT_Float64,irow,1);
+ //read model2 in case current estimate is nodata
+ imgReaderEst.image2geo(0,irow,x,y);
+ imgReaderModel2.geo2image(x,y,modCol,modRow);
+ assert(modRow>=0&&modRow<imgReaderModel2.nrOfRow());
+ imgReaderModel2.readData(modelBuffer,GDT_Float64,modRow,0);
+ if(update){
+ imgReaderObs.readData(obsBuffer,GDT_Float64,irow,0);
+ if(imgReaderObs.nrOfBand()>1)
+ imgReaderObs.readData(uncertObsBuffer,GDT_Float64,irow,1);
+ }
+ for(int icol=0;icol<imgWriterEst.nrOfCol();++icol){
+ double estValue=estReadBuffer[icol];
+ //time update
+ if(imgReaderEst.isNoData(estValue)){
+ //pk: in case we have not found any valid data yet, better here to take the current model value
+ imgReaderEst.image2geo(icol,irow,x,y);
+ imgReaderModel2.geo2image(x,y,modCol,modRow);
+ assert(modRow>=0&&modRow<imgReaderModel2.nrOfRow());
+ if(imgReaderModel2.isNoData(modelBuffer[modCol])){//if both estimate and model are no-data, set obs to nodata
+ estWriteBuffer[icol]=obsnodata_opt[0];
+ uncertWriteBuffer[icol]=uncertNodata_opt[0];
+ }
+ else{
+ estWriteBuffer[icol]=modelBuffer[modCol];
+ uncertWriteBuffer[icol]=uncertModel_opt[0]*stdDev;
+ }
+ }
+ else{
+ double certNorm=(errMod*errMod+errObs*errObs);
+ double certMod=errObs*errObs/certNorm;
+ double certObs=errMod*errMod/certNorm;
+ double regTime=(c0mod+c1mod*estValue)*certMod;
+ double regSensor=(c0obs+c1obs*estValue)*certObs;
+ estWriteBuffer[icol]=regTime+regSensor;
+ double totalUncertainty=0;
+ if(errMod<eps_opt[0])
+ totalUncertainty=errObs;
+ else if(errObs<eps_opt[0])
+ totalUncertainty=errObs;
+ else{
+ totalUncertainty=1.0/errMod/errMod+1/errObs/errObs;
+ totalUncertainty=sqrt(1.0/totalUncertainty);
+ }
+ uncertWriteBuffer[icol]=totalUncertainty+uncertReadBuffer[icol];
+ }
+ //observation update
+ if(update&&!imgReaderObs.isNoData(obsBuffer[icol])){
+ double kalmanGain=1;
+ double uncertObs=uncertObs_opt[0];
+ if(uncertObsBuffer.size()>icol)
+ uncertObs=uncertObsBuffer[icol];
+ if((uncertWriteBuffer[icol]+uncertObs)>eps_opt[0])
+ kalmanGain=uncertWriteBuffer[icol]/(uncertWriteBuffer[icol]+uncertObs);
+ assert(kalmanGain<=1);
+ estWriteBuffer[icol]+=kalmanGain*(obsBuffer[icol]-estWriteBuffer[icol]);
+ uncertWriteBuffer[icol]*=(1-kalmanGain);
+ }
+ }
+ imgWriterEst.writeData(estWriteBuffer,GDT_Float64,irow,0);
+ imgWriterEst.writeData(uncertWriteBuffer,GDT_Float64,irow,1);
+ progress=static_cast<float>((irow+1.0)/imgWriterEst.nrOfRow());
+ pfnProgress(progress,pszMessage,pProgressArg);
+ }
+
+ imgWriterEst.close();
+ imgReaderEst.close();
+
+ if(update){
+ imgReaderObs.close();
+ --obsindex;
+ }
+ imgReaderModel1.close();
+ imgReaderModel2.close();
+ }
+ }
+ if(find(direction_opt.begin(),direction_opt.end(),"smooth")!=direction_opt.end()){
+ ///////////////////////////// smooth model /////////////////////////
+ obsindex=0;
+ if(verbose_opt[0])
+ cout << "Running smooth model" << endl;
+ for(int modindex=0;modindex<model_opt.size();++modindex){
+ if(verbose_opt[0]){
+ cout << "processing time " << tmodel_opt[modindex] << endl;
+ if(obsindex<relobsindex.size())
+ cout << "next observation " << tmodel_opt[relobsindex[obsindex]] << endl;
+ else
+ cout << "There is no next observation" << endl;
+ }
+ string output;
+ if(outputfb_opt.size()==model_opt.size())
+ output=outputfb_opt[modindex];
+ else{
+ ostringstream outputstream;
+ outputstream << outputfb_opt[0] << "_" << tmodel_opt[modindex] << ".tif";
+ output=outputstream.str();
+ }
+
+ //two band output band0=estimation, band1=uncertainty
+ imgWriterEst.open(output,ncol,nrow,2,GDT_Float32,imageType,option_opt);
+ imgWriterEst.setProjectionProj4(projection_opt[0]);
+ imgWriterEst.setGeoTransform(geotransform);
+ imgWriterEst.GDALSetNoDataValue(obsnodata_opt[0]);
+
+ //open forward and backward estimates
+ //we assume forward in model and backward in observation...
+
+ string inputfw;
+ string inputbw;
+ if(outputfw_opt.size()==model_opt.size())
+ inputfw=outputfw_opt[modindex];
+ else{
+ ostringstream outputstream;
+ outputstream << outputfw_opt[0] << "_" << tmodel_opt[modindex] << ".tif";
+ inputfw=outputstream.str();
+ }
+ if(outputbw_opt.size()==model_opt.size())
+ inputbw=outputbw_opt[modindex];
+ else{
+ ostringstream outputstream;
+ outputstream << outputbw_opt[0] << "_" << tmodel_opt[modindex] << ".tif";
+ inputbw=outputstream.str();
+ }
+ ImgReaderGdal imgReaderForward(inputfw);
+ ImgReaderGdal imgReaderBackward(inputbw);
+ imgReaderForward.setNoData(obsnodata_opt);
+ imgReaderForward.setOffset(obsoffset_opt[0]);
+ imgReaderForward.setScale(obsscale_opt[0]);
+ imgReaderBackward.setNoData(obsnodata_opt);
+ imgReaderBackward.setOffset(obsoffset_opt[0]);
+ imgReaderBackward.setScale(obsscale_opt[0]);
+
+ vector<double> estForwardBuffer;
+ vector<double> estBackwardBuffer;
+ vector<double> uncertObsBuffer;
+ vector<double> uncertForwardBuffer;
+ vector<double> uncertBackwardBuffer;
+ vector<double> uncertReadBuffer;
+ vector<double> estWriteBuffer(ncol);
+ vector<double> uncertWriteBuffer(ncol);
+
+ bool update=false;
+ if(obsindex<relobsindex.size()){
+ update=(relobsindex[obsindex]==modindex);
+ }
+
+ if(update){
+ if(verbose_opt[0])
+ cout << "***update " << relobsindex[obsindex] << " = " << modindex << " " << observation_opt[obsindex] << " ***" << endl;
+ imgReaderObs.open(observation_opt[obsindex]);
+ imgReaderObs.getGeoTransform(geotransform);
+ imgReaderObs.setNoData(obsnodata_opt);
+ imgReaderObs.setOffset(obsoffset_opt[0]);
+ imgReaderObs.setScale(obsscale_opt[0]);
+ //calculate regression between model and observation
+ }
+
+ pfnProgress(progress,pszMessage,pProgressArg);
+
+ for(int irow=0;irow<imgWriterEst.nrOfRow();++irow){
+ assert(irow<imgReaderForward.nrOfRow());
+ assert(irow<imgReaderBackward.nrOfRow());
+ imgReaderForward.readData(estForwardBuffer,GDT_Float64,irow,0);
+ imgReaderBackward.readData(estBackwardBuffer,GDT_Float64,irow,0);
+ imgReaderForward.readData(uncertForwardBuffer,GDT_Float64,irow,1);
+ imgReaderBackward.readData(uncertBackwardBuffer,GDT_Float64,irow,1);
+
+ if(update){
+ imgReaderObs.readData(estWriteBuffer,GDT_Float64,irow,0);
+ if(imgReaderObs.nrOfBand()>1)
+ imgReaderObs.readData(uncertObsBuffer,GDT_Float64,irow,1);
+ }
+
+ for(int icol=0;icol<imgWriterEst.nrOfCol();++icol){
+ double A=estForwardBuffer[icol];
+ double B=estBackwardBuffer[icol];
+ double C=uncertForwardBuffer[icol]*uncertForwardBuffer[icol];
+ double D=uncertBackwardBuffer[icol]*uncertBackwardBuffer[icol];
+ double uncertObs=uncertObs_opt[0];
+
+ // if(update){//check for nodata in observation
+ // if(imgReaderObs.isNoData(estWriteBuffer[icol]))
+ // uncertObs=uncertNodata_opt[0];
+ // else if(uncertObsBuffer.size()>icol)
+ // uncertObs=uncertObsBuffer[icol];
+ // }
+
+ double noemer=(C+D);
+ //todo: consistently check for division by zero...
+ if(imgReaderForward.isNoData(A)&&imgReaderBackward.isNoData(B)){
+ estWriteBuffer[icol]=obsnodata_opt[0];
+ uncertWriteBuffer[icol]=uncertNodata_opt[0];
+ }
+ else if(imgReaderForward.isNoData(A)){
+ estWriteBuffer[icol]=B;
+ uncertWriteBuffer[icol]=uncertBackwardBuffer[icol];
+ }
+ else if(imgReaderForward.isNoData(B)){
+ estWriteBuffer[icol]=A;
+ uncertWriteBuffer[icol]=uncertForwardBuffer[icol];
+ }
+ else{
+ if(noemer<eps_opt[0]){//simple average if both uncertainties are ~>0
+ estWriteBuffer[icol]=0.5*(A+B);
+ uncertWriteBuffer[icol]=uncertObs;
+ }
+ else{
+ estWriteBuffer[icol]=(A*D+B*C)/noemer;
+ double P=0;
+ if(C>eps_opt[0])
+ P+=1.0/C;
+ if(D>eps_opt[0])
+ P+=1.0/D;
+ if(uncertObs*uncertObs>eps_opt[0])
+ P-=1.0/uncertObs/uncertObs;
+ if(P>eps_opt[0])
+ P=sqrt(1.0/P);
+ else
+ P=0;
+ uncertWriteBuffer[icol]=P;
+ }
+ }
+ }
+ imgWriterEst.writeData(estWriteBuffer,GDT_Float64,irow,0);
+ imgWriterEst.writeData(uncertWriteBuffer,GDT_Float64,irow,1);
+ progress=static_cast<float>((irow+1.0)/imgWriterEst.nrOfRow());
+ pfnProgress(progress,pszMessage,pProgressArg);
+ }
+
+ imgWriterEst.close();
+ imgReaderForward.close();
+ imgReaderBackward.close();
+ if(update){
+ imgReaderObs.close();
+ ++obsindex;
+ }
+ }
+ }
+}
+
diff --git a/src/apps/pklas2img.cc b/src/apps/pklas2img.cc
index dad94b0..5cf97f8 100644
--- a/src/apps/pklas2img.cc
+++ b/src/apps/pklas2img.cc
@@ -146,10 +146,11 @@ int main(int argc,char **argv) {
lasReader.open(input_opt[iinput]);
}
catch(string errorString){
- cout << errorString << endl;
+ cerr << errorString << endl;
exit(1);
}
catch(...){
+ cerr << "Error opening input " << input_opt[iinput] << endl;
exit(2);
}
nPoints=lasReader.getPointCount();
@@ -282,7 +283,7 @@ int main(int argc,char **argv) {
attribute_opt.erase(ait);
}
}
- liblas::Point thePoint;
+ liblas::Point thePoint(&(lasReader.getHeader()));
while(lasReader.readNextPoint(thePoint)){
progress=static_cast<float>(ipoint)/totalPoints;
pfnProgress(progress,pszMessage,pProgressArg);
diff --git a/src/apps/pkoptsvm.cc b/src/apps/pkoptsvm.cc
index 8980d0a..ffcc488 100644
--- a/src/apps/pkoptsvm.cc
+++ b/src/apps/pkoptsvm.cc
@@ -101,7 +101,7 @@ double objFunction(const std::vector<double> &x, std::vector<double> &grad, void
ntest+=nctest[iclass];
}
if(ntest)
- assert(!cv_opt[0]);
+ cv_opt[0]=0;
if(!cv_opt[0])
assert(ntest);
// ntraining+=(*tf)[iclass].size();
@@ -230,8 +230,8 @@ int main(int argc, char *argv[])
{
map<short,int> reclassMap;
vector<int> vreclass;
- Optionpk<string> input_opt("i", "input", "input image");
- Optionpk<string> training_opt("t", "training", "training vector file. A single vector file contains all training features (must be set as: B0, B1, B2,...) for all classes (class numbers identified by label option).");
+ Optionpk<string> training_opt("t", "training", "training vector file. A single vector file contains all training features (must be set as: b0, b1, b2,...) for all classes (class numbers identified by label option).");
+ Optionpk<string> input_opt("i", "input", "input test vectro file");
Optionpk<string> tlayer_opt("tln", "tln", "training layer name(s)");
Optionpk<string> label_opt("\0", "label", "identifier for class label in training vector file.","label");
// Optionpk<unsigned short> reclass_opt("\0", "rc", "reclass code (e.g. --rc=12 --rc=23 to reclass first two classes to 12 and 23 resp.).", 0);
@@ -248,17 +248,17 @@ int main(int argc, char *argv[])
Optionpk<unsigned int> maxit_opt("maxit","maxit","maximum number of iterations",500);
Optionpk<string> algorithm_opt("a", "algorithm", "GRID, or any optimization algorithm from http://ab-initio.mit.edu/wiki/index.php/NLopt_Algorithms","GRID");
Optionpk<double> tolerance_opt("tol","tolerance","relative tolerance for stopping criterion",0.0001);
- Optionpk<double> step_opt("step","step","multiplicative step for GRID search (-step cost -step gamma)",2);
+ Optionpk<double> step_opt("step","step","multiplicative step for ccost and gamma in GRID search",2);
bool doProcess;//stop process when program was invoked with help option (-h --help)
try{
- doProcess=input_opt.retrieveOption(argc,argv);
- training_opt.retrieveOption(argc,argv);
+ doProcess=training_opt.retrieveOption(argc,argv);
+ input_opt.retrieveOption(argc,argv);
tlayer_opt.retrieveOption(argc,argv);
label_opt.retrieveOption(argc,argv);
// reclass_opt.retrieveOption(argc,argv);
balance_opt.retrieveOption(argc,argv);
- random_opt.retrieveOption(argc,argv);
+ random_opt.retrieveOption(argc,argv);
minSize_opt.retrieveOption(argc,argv);
start_opt.retrieveOption(argc,argv);
end_opt.retrieveOption(argc,argv);
@@ -605,8 +605,8 @@ int main(int argc, char *argv[])
double progress=0;
if(!verbose_opt[0])
pfnProgress(progress,pszMessage,pProgressArg);
- double ncost=log(ccost_opt[1])/log(10.0)-log(ccost_opt[0])/log(10.0);
- double ngamma=log(gamma_opt[1])/log(10.0)-log(gamma_opt[0])/log(10.0);
+ double ncost=log(ccost_opt[1])/log(step_opt[0])-log(ccost_opt[0])/log(step_opt[0]);
+ double ngamma=log(gamma_opt[1])/log(step_opt[1])-log(gamma_opt[0])/log(step_opt[1]);
for(double ccost=ccost_opt[0];ccost<=ccost_opt[1];ccost*=step_opt[0]){
for(double gamma=gamma_opt[0];gamma<=gamma_opt[1];gamma*=step_opt[1]){
x[0]=ccost;
diff --git a/src/apps/pksieve.cc b/src/apps/pksieve.cc
index 13673a3..60e3588 100644
--- a/src/apps/pksieve.cc
+++ b/src/apps/pksieve.cc
@@ -34,7 +34,7 @@ using namespace std;
int main(int argc,char **argv) {
Optionpk<string> input_opt("i", "input", "Input image file");
- Optionpk<string> mask_opt("m", "mask", "Mask band indicating pixels to be interpolated (zero valued) ");
+ Optionpk<string> mask_opt("m", "mask", "Use the first band of the specified file as a validity mask (zero is invalid, non-zero is valid).");
Optionpk<string> output_opt("o", "output", "Output image file");
Optionpk<int> band_opt("b", "band", "the band to be used from input file", 0);
Optionpk<int> connect_opt("c", "connect", "the connectedness: 4 directions or 8 directions", 8);
diff --git a/src/apps/pkstatascii.cc b/src/apps/pkstatascii.cc
index 6dd1d25..fe3b842 100644
--- a/src/apps/pkstatascii.cc
+++ b/src/apps/pkstatascii.cc
@@ -56,11 +56,12 @@ int main(int argc, char *argv[])
Optionpk<bool> histogram2d_opt("hist2d","hist2d","calculate 2-dimensional histogram based on two columns",false);
Optionpk<short> nbin_opt("nbin","nbin","number of bins to calculate histogram");
Optionpk<bool> relative_opt("rel","relative","use percentiles for histogram to calculate histogram",false);
- Optionpk<double> kde_opt("kde","kde","bandwith of kernel density when producing histogram, use 0 for practical estimation based on Silverman's rule of thumb. Leave empty if no kernel density is required");
+ Optionpk<bool> kde_opt("kde","kde","Use Kernel density estimation when producing histogram. The standard deviation is estimated based on Silverman's rule of thumb",false);
Optionpk<bool> correlation_opt("cor","correlation","calculate Pearson produc-moment correlation coefficient between two columns (defined by -c <col1> -c <col2>",false);
Optionpk<bool> rmse_opt("rmse","rmse","calculate root mean square error between two columns (defined by -c <col1> -c <col2>",false);
- Optionpk<bool> reg_opt("reg","regression","calculate linear regression error between two columns (defined by -c <col1> -c <col2>",false);
- Optionpk<short> verbose_opt("v", "verbose", "verbose mode when > 0", 0);
+ Optionpk<bool> reg_opt("reg","regression","calculate linear regression between two columns and get correlation coefficient (defined by -c <col1> -c <col2>",false);
+ Optionpk<bool> regerr_opt("regerr","regerr","calculate linear regression between two columns and get root mean square error (defined by -c <col1> -c <col2>",false);
+ Optionpk<short> verbose_opt("v", "verbose", "verbose mode when positive", 0);
bool doProcess;//stop process when program was invoked with help option (-h --help)
try{
@@ -96,6 +97,7 @@ int main(int argc, char *argv[])
correlation_opt.retrieveOption(argc,argv);
rmse_opt.retrieveOption(argc,argv);
reg_opt.retrieveOption(argc,argv);
+ regerr_opt.retrieveOption(argc,argv);
verbose_opt.retrieveOption(argc,argv);
}
catch(string predefinedString){
@@ -204,10 +206,10 @@ int main(int argc, char *argv[])
if(histogram_opt[0]){
//todo: support kernel density function and estimate sigma as in practical estimate of the bandwith in http://en.wikipedia.org/wiki/Kernel_density_estimation
double sigma=0;
- if(kde_opt.size()){
- if(kde_opt[0]>0)
- sigma=kde_opt[0];
- else
+ if(kde_opt[0]){//.size()){
+ // if(kde_opt[0]>0)
+ // sigma=kde_opt[0];
+ // else
sigma=1.06*sqrt(stat.var(dataVector[icol]))*pow(dataVector[icol].size(),-0.2);
}
assert(nbin);
@@ -292,6 +294,16 @@ int main(int argc, char *argv[])
double r2=stat.linear_regression(dataVector[0],dataVector[1],c0,c1);
cout << "linear regression between columns: " << col_opt[0] << " and " << col_opt[1] << ": " << c0 << "+" << c1 << " * x " << " with R^2 (square correlation coefficient): " << r2 << endl;
}
+ if(regerr_opt[0]){
+ assert(dataVector.size()==2);
+ double c0=0;
+ double c1=0;
+ double err=stat.linear_regression_err(dataVector[0],dataVector[1],c0,c1);
+ if(verbose_opt[0])
+ cout << "linear regression between columns: " << col_opt[0] << " and " << col_opt[1] << ": " << c0 << "+" << c1 << " * x " << " with rmse: " << err << endl;
+ else
+ cout << c0 << " " << c1 << " " << err << endl;
+ }
if(histogram_opt[0]){
for(int irow=0;irow<statVector.begin()->size();++irow){
double binValue=0;
@@ -318,10 +330,10 @@ int main(int argc, char *argv[])
assert(dataVector[0].size()==dataVector[1].size());
double sigma=0;
//kernel density estimation as in http://en.wikipedia.org/wiki/Kernel_density_estimation
- if(kde_opt.size()){
- if(kde_opt[0]>0)
- sigma=kde_opt[0];
- else
+ if(kde_opt[0]){
+ // if(kde_opt[0]>0)
+ // sigma=kde_opt[0];
+ // else
sigma=1.06*sqrt(sqrt(stat.var(dataVector[0]))*sqrt(stat.var(dataVector[0])))*pow(dataVector[0].size(),-0.2);
}
assert(nbin);
diff --git a/src/apps/pkstatogr.cc b/src/apps/pkstatogr.cc
index 5e8b43e..88b2a1f 100644
--- a/src/apps/pkstatogr.cc
+++ b/src/apps/pkstatogr.cc
@@ -25,52 +25,56 @@ along with pktools. If not, see <http://www.gnu.org/licenses/>.
#include "imageclasses/ImgReaderOgr.h"
#include "algorithms/StatFactory.h"
+using namespace std;
+
int main(int argc, char *argv[])
{
- Optionpk<std::string> input_opt("i", "input", "Input shape file", "");
- Optionpk<std::string> fieldname_opt("n", "fname", "fields on which to calculate statistics", "");
- Optionpk<bool> minmax_opt("mm","minmax","calculate minimum and maximum value",false);
- Optionpk<bool> min_opt("min","min","calculate minimum value",0);
- Optionpk<bool> max_opt("max","max","calculate maximum value",0);
- Optionpk<double> src_min_opt("src_min","src_min","set minimum value for histogram");
- Optionpk<double> src_max_opt("src_max","src_max","set maximum value for histogram");
- Optionpk<double> nodata_opt("nodata","nodata","set nodata value(s)");
- Optionpk<bool> histogram_opt("hist","hist","calculate histogram",false);
- Optionpk<unsigned int> nbin_opt("nbin", "nbin", "number of bins");
- Optionpk<bool> relative_opt("rel","relative","use percentiles for histogram to calculate histogram",false);
- Optionpk<double> kde_opt("kde","kde","bandwith of kernel density when producing histogram, use 0 for practical estimation based on Silverman's rule of thumb. Leave empty if no kernel density is required");
- Optionpk<bool> mean_opt("mean","mean","calculate mean value",false);
- Optionpk<bool> median_opt("median","median","calculate median value",false);
- Optionpk<bool> stdev_opt("stdev","stdev","calculate standard deviation",false);
- Optionpk<bool> size_opt("s","size","sample size (number of points)",false);
- Optionpk<short> verbose_opt("v", "verbose", "verbose mode if > 0", 0);
+ Optionpk<string> input_opt("i", "input", "Input OGR vector file", "");
+ Optionpk<string> layer_opt("ln", "lname", "Layer name(s) in sample (leave empty to select all)");
+ Optionpk<string> fieldname_opt("n", "fname", "Fields on which to calculate statistics", "");
+ Optionpk<double> nodata_opt("nodata","nodata","Set nodata value(s)");
+ Optionpk<double> src_min_opt("src_min","src_min","Set minimum value for histogram");
+ Optionpk<double> src_max_opt("src_max","src_max","Set maximum value for histogram");
+ Optionpk<bool> size_opt("s","size","Sample size (number of points)",false);
+ Optionpk<bool> minmax_opt("mm","minmax","Calculate minimum and maximum value",false);
+ Optionpk<bool> min_opt("min","min","Calculate minimum value",0);
+ Optionpk<bool> max_opt("max","max","Calculate maximum value",0);
+ Optionpk<bool> mean_opt("mean","mean","Calculate mean value",false);
+ Optionpk<bool> median_opt("median","median","Calculate median value",false);
+ Optionpk<bool> stdev_opt("stdev","stdev","Calculate standard deviation",false);
+ Optionpk<bool> histogram_opt("hist","hist","Calculate histogram",false);
+ Optionpk<unsigned int> nbin_opt("nbin", "nbin", "Number of bins");
+ Optionpk<bool> relative_opt("rel","relative","Use percentiles for histogram to calculate histogram",false);
+ Optionpk<bool> kde_opt("kde","kde","Use Kernel density estimation when producing histogram. The standard deviation is estimated based on Silverman's rule of thumb",false);
+ Optionpk<short> verbose_opt("v", "verbose", "Verbose level", 0);
bool doProcess;//stop process when program was invoked with help option (-h --help)
try{
doProcess=input_opt.retrieveOption(argc,argv);
+ layer_opt.retrieveOption(argc,argv);
fieldname_opt.retrieveOption(argc,argv);
+ nodata_opt.retrieveOption(argc,argv);
+ src_min_opt.retrieveOption(argc,argv);
+ src_max_opt.retrieveOption(argc,argv);
+ size_opt.retrieveOption(argc,argv);
minmax_opt.retrieveOption(argc,argv);
min_opt.retrieveOption(argc,argv);
max_opt.retrieveOption(argc,argv);
- src_min_opt.retrieveOption(argc,argv);
- src_max_opt.retrieveOption(argc,argv);
- nodata_opt.retrieveOption(argc,argv);
+ mean_opt.retrieveOption(argc,argv);
+ median_opt.retrieveOption(argc,argv);
+ stdev_opt.retrieveOption(argc,argv);
histogram_opt.retrieveOption(argc,argv);
nbin_opt.retrieveOption(argc,argv);
relative_opt.retrieveOption(argc,argv);
kde_opt.retrieveOption(argc,argv);
- mean_opt.retrieveOption(argc,argv);
- median_opt.retrieveOption(argc,argv);
- stdev_opt.retrieveOption(argc,argv);
- size_opt.retrieveOption(argc,argv);
verbose_opt.retrieveOption(argc,argv);
}
- catch(std::string predefinedString){
- std::cout << predefinedString << std::endl;
+ catch(string predefinedString){
+ cout << predefinedString << endl;
exit(0);
}
if(!doProcess){
- std::cout << "short option -h shows basic options only, use long option --help to show all options" << std::endl;
+ cout << "short option -h shows basic options only, use long option --help to show all options" << endl;
exit(0);//help was invoked, stop processing
}
@@ -78,105 +82,123 @@ int main(int argc, char *argv[])
try{
imgReader.open(input_opt[0]);
}
- catch(std::string errorstring){
- std::cerr << errorstring << std::endl;
+ catch(string errorstring){
+ cerr << errorstring << endl;
}
ImgReaderOgr inputReader(input_opt[0]);
- std::vector<double> theData;
+ vector<double> theData;
statfactory::StatFactory stat;
//todo: implement ALL
stat.setNoDataValues(nodata_opt);
- for(int ifield=0;ifield<fieldname_opt.size();++ifield){
+
+ //support multiple layers
+ int nlayerRead=inputReader.getDataSource()->GetLayerCount();
+ if(verbose_opt[0])
+ cout << "number of layers: " << nlayerRead << endl;
+
+ for(int ilayer=0;ilayer<nlayerRead;++ilayer){
+ OGRLayer *readLayer=inputReader.getLayer(ilayer);
+ string currentLayername=readLayer->GetName();
+ if(layer_opt.size())
+ if(find(layer_opt.begin(),layer_opt.end(),currentLayername)==layer_opt.end())
+ continue;
if(verbose_opt[0])
- std::cout << "field: " << ifield << std::endl;
- theData.clear();
- inputReader.readData(theData,OFTReal,fieldname_opt[ifield],0,verbose_opt[0]);
- std::vector<double> binData;
- double minValue=0;
- double maxValue=0;
- stat.minmax(theData,theData.begin(),theData.end(),minValue,maxValue);
- if(src_min_opt.size())
- minValue=src_min_opt[0];
- if(src_max_opt.size())
- maxValue=src_max_opt[0];
- unsigned int nbin=(nbin_opt.size())? nbin_opt[0]:0;
+ cout << "processing layer " << currentLayername << endl;
+ if(layer_opt.size())
+ cout << " --lname " << currentLayername;
+
+ for(int ifield=0;ifield<fieldname_opt.size();++ifield){
+ if(verbose_opt[0])
+ cout << "field: " << ifield << endl;
+ theData.clear();
+ inputReader.readData(theData,OFTReal,fieldname_opt[ifield],ilayer,verbose_opt[0]);
+ vector<double> binData;
+ double minValue=0;
+ double maxValue=0;
+ stat.minmax(theData,theData.begin(),theData.end(),minValue,maxValue);
+ if(src_min_opt.size())
+ minValue=src_min_opt[0];
+ if(src_max_opt.size())
+ maxValue=src_max_opt[0];
+ unsigned int nbin=(nbin_opt.size())? nbin_opt[0]:0;
- if(histogram_opt[0]){
- double sigma=0;
- if(kde_opt.size()){
- if(kde_opt[0]>0)
- sigma=kde_opt[0];
- else
- sigma=1.06*sqrt(stat.var(theData))*pow(theData.size(),-0.2);
+ if(histogram_opt[0]){
+ double sigma=0;
+ if(kde_opt[0]){
+ // if(kde_opt[0]>0)
+ // sigma=kde_opt[0];
+ // else
+ sigma=1.06*sqrt(stat.var(theData))*pow(theData.size(),-0.2);
+ }
+ if(nbin<1)
+ nbin=(maxValue-minValue+1);
+ try{
+ stat.distribution(theData,theData.begin(),theData.end(),binData,nbin,minValue,maxValue,sigma);
+ }
+ catch(string theError){
+ cerr << "Warning: all identical values in data" << endl;
+ exit(1);
+ }
}
- if(nbin<1)
- nbin=(maxValue-minValue+1);
+ // int nbin=(nbin_opt[0]>1)? nbin_opt[0] : 2;
+ cout << " --fname " << fieldname_opt[ifield];
try{
- stat.distribution(theData,theData.begin(),theData.end(),binData,nbin,minValue,maxValue,sigma);
- }
- catch(std::string theError){
- std::cerr << "Warning: all identical values in data" << std::endl;
- exit(1);
- }
- }
- // int nbin=(nbin_opt[0]>1)? nbin_opt[0] : 2;
- std::cout << " --fname " << fieldname_opt[ifield];
- try{
- double theMean=0;
- double theVar=0;
- stat.meanVar(theData,theMean,theVar);
- if(mean_opt[0])
- std::cout << " --mean " << theMean;
- if(stdev_opt[0])
- std::cout << " --stdev " << sqrt(theVar);
- if(minmax_opt[0]||min_opt[0]||max_opt[0]){
- if(minmax_opt[0])
- std::cout << " --min " << minValue << " --max " << maxValue << " ";
- else{
- if(min_opt[0])
- std::cout << " --min " << minValue << " ";
- if(max_opt[0])
- std::cout << " --max " << maxValue << " ";
+ double theMean=0;
+ double theVar=0;
+ stat.meanVar(theData,theMean,theVar);
+ if(mean_opt[0])
+ cout << " --mean " << theMean;
+ if(stdev_opt[0])
+ cout << " --stdev " << sqrt(theVar);
+ if(minmax_opt[0]||min_opt[0]||max_opt[0]){
+ if(minmax_opt[0])
+ cout << " --min " << minValue << " --max " << maxValue << " ";
+ else{
+ if(min_opt[0])
+ cout << " --min " << minValue << " ";
+ if(max_opt[0])
+ cout << " --max " << maxValue << " ";
+ }
+ }
+ if(median_opt[0])
+ cout << " -median " << stat.median(theData);
+ if(size_opt[0])
+ cout << " -size " << theData.size();
+ cout << endl;
+ if(histogram_opt[0]){
+ for(int ibin=0;ibin<nbin;++ibin){
+ double binValue=0;
+ if(nbin==maxValue-minValue+1)
+ binValue=minValue+ibin;
+ else
+ binValue=minValue+static_cast<double>(maxValue-minValue)*(ibin+0.5)/nbin;
+ cout << binValue << " ";
+ if(relative_opt[0])
+ cout << 100.0*static_cast<double>(binData[ibin])/theData.size() << endl;
+ else
+ cout << binData[ibin] << endl;
+ }
}
}
- if(median_opt[0])
- std::cout << " -median " << stat.median(theData);
- if(size_opt[0])
- std::cout << " -size " << theData.size();
- std::cout << std::endl;
- if(histogram_opt[0]){
- for(int ibin=0;ibin<nbin;++ibin){
- double binValue=0;
- if(nbin==maxValue-minValue+1)
- binValue=minValue+ibin;
- else
- binValue=minValue+static_cast<double>(maxValue-minValue)*(ibin+0.5)/nbin;
- std::cout << binValue << " ";
- if(relative_opt[0])
- std::cout << 100.0*static_cast<double>(binData[ibin])/theData.size() << std::endl;
- else
- std::cout << binData[ibin] << std::endl;
- }
+ catch(string theError){
+ if(mean_opt[0])
+ cout << " --mean " << theData.back();
+ if(stdev_opt[0])
+ cout << " --stdev " << "0";
+ if(min_opt[0])
+ cout << " -min " << theData.back();
+ if(max_opt[0])
+ cout << " -max " << theData.back();
+ if(median_opt[0])
+ cout << " -median " << theData.back();
+ if(size_opt[0])
+ cout << " -size " << theData.size();
+ cout << endl;
+ cerr << "Warning: all identical values in data" << endl;
}
}
- catch(std::string theError){
- if(mean_opt[0])
- std::cout << " --mean " << theData.back();
- if(stdev_opt[0])
- std::cout << " --stdev " << "0";
- if(min_opt[0])
- std::cout << " -min " << theData.back();
- if(max_opt[0])
- std::cout << " -max " << theData.back();
- if(median_opt[0])
- std::cout << " -median " << theData.back();
- if(size_opt[0])
- std::cout << " -size " << theData.size();
- std::cout << std::endl;
- std::cerr << "Warning: all identical values in data" << std::endl;
- }
}
imgReader.close();
}
diff --git a/src/apps/pksvm.cc b/src/apps/pksvm.cc
index 148c0d6..12ce0e6 100644
--- a/src/apps/pksvm.cc
+++ b/src/apps/pksvm.cc
@@ -49,101 +49,101 @@ int main(int argc, char *argv[])
//--------------------------- command line options ------------------------------------
Optionpk<string> input_opt("i", "input", "input image");
- Optionpk<string> training_opt("t", "training", "training vector file. A single vector file contains all training features (must be set as: B0, B1, B2,...) for all classes (class numbers identified by label option). Use multiple training files for bootstrap aggregation (alternative to the bag and bsize options, where a random subset is taken from a single training file)");
- Optionpk<string> tlayer_opt("tln", "tln", "training layer name(s)");
- Optionpk<string> label_opt("label", "label", "identifier for class label in training vector file.","label");
- Optionpk<unsigned int> balance_opt("bal", "balance", "balance the input data to this number of samples for each class", 0);
- Optionpk<bool> random_opt("random", "random", "in case of balance, randomize input data", true);
- Optionpk<int> minSize_opt("min", "min", "if number of training pixels is less then min, do not take this class into account (0: consider all classes)", 0);
- Optionpk<double> start_opt("s", "start", "start band sequence number",0);
- Optionpk<double> end_opt("e", "end", "end band sequence number (set to 0 to include all bands)", 0);
- Optionpk<short> band_opt("b", "band", "band index (starting from 0, either use band option or use start to end)");
- Optionpk<double> offset_opt("\0", "offset", "offset value for each spectral band input features: refl[band]=(DN[band]-offset[band])/scale[band]", 0.0);
- Optionpk<double> scale_opt("\0", "scale", "scale value for each spectral band input features: refl=(DN[band]-offset[band])/scale[band] (use 0 if scale min and max in each band to -1.0 and 1.0)", 0.0);
- Optionpk<double> priors_opt("p", "prior", "prior probabilities for each class (e.g., -p 0.3 -p 0.3 -p 0.2 ). Used for input only (ignored for cross validation)", 0.0);
- Optionpk<string> priorimg_opt("pim", "priorimg", "prior probability image (multi-band img with band for each class");
- Optionpk<unsigned short> cv_opt("cv", "cv", "n-fold cross validation mode",0);
- Optionpk<std::string> svm_type_opt("svmt", "svmtype", "type of SVM (C_SVC, nu_SVC,one_class, epsilon_SVR, nu_SVR)","C_SVC");
- Optionpk<std::string> kernel_type_opt("kt", "kerneltype", "type of kernel function (linear,polynomial,radial,sigmoid) ","radial");
- Optionpk<unsigned short> kernel_degree_opt("kd", "kd", "degree in kernel function",3);
- Optionpk<float> gamma_opt("g", "gamma", "gamma in kernel function",1.0);
- Optionpk<float> coef0_opt("c0", "coef0", "coef0 in kernel function",0);
- Optionpk<float> ccost_opt("cc", "ccost", "the parameter C of C_SVC, epsilon_SVR, and nu_SVR",1000);
- Optionpk<float> nu_opt("nu", "nu", "the parameter nu of nu_SVC, one_class SVM, and nu_SVR",0.5);
- Optionpk<float> epsilon_loss_opt("eloss", "eloss", "the epsilon in loss function of epsilon_SVR",0.1);
- Optionpk<int> cache_opt("cache", "cache", "cache memory size in MB",100);
- Optionpk<float> epsilon_tol_opt("etol", "etol", "the tolerance of termination criterion",0.001);
- Optionpk<bool> shrinking_opt("shrink", "shrink", "whether to use the shrinking heuristics",false);
- Optionpk<bool> prob_est_opt("pe", "probest", "whether to train a SVC or SVR model for probability estimates",true,2);
- // Optionpk<bool> weight_opt("wi", "wi", "set the parameter C of class i to weight*C, for C_SVC",true);
- Optionpk<unsigned short> comb_opt("comb", "comb", "how to combine bootstrap aggregation classifiers (0: sum rule, 1: product rule, 2: max rule). Also used to aggregate classes with rc option.",0);
+ Optionpk<string> training_opt("t", "training", "Training vector file. A single vector file contains all training features (must be set as: b0, b1, b2,...) for all classes (class numbers identified by label option). Use multiple training files for bootstrap aggregation (alternative to the bag and bsize options, where a random subset is taken from a single training file)");
+ Optionpk<string> tlayer_opt("tln", "tln", "Training layer name(s)");
+ Optionpk<string> label_opt("label", "label", "Attribute name for class label in training vector file.","label");
+ Optionpk<unsigned int> balance_opt("bal", "balance", "Balance the input data to this number of samples for each class", 0);
+ Optionpk<bool> random_opt("random", "random", "Randomize training data for balancing and bagging", true, 2);
+ Optionpk<int> minSize_opt("min", "min", "If number of training pixels is less then min, do not take this class into account (0: consider all classes)", 0);
+ Optionpk<double> start_opt("s", "start", "Start band sequence number",0);
+ Optionpk<double> end_opt("e", "end", "End band sequence number (set to 0 to include all bands)", 0);
+ Optionpk<short> band_opt("b", "band", "Band index (starting from 0, either use band option or use start to end)");
+ Optionpk<double> offset_opt("\0", "offset", "Offset value for each spectral band input features: refl[band]=(DN[band]-offset[band])/scale[band]", 0.0);
+ Optionpk<double> scale_opt("\0", "scale", "Scale value for each spectral band input features: refl=(DN[band]-offset[band])/scale[band] (use 0 if scale min and max in each band to -1.0 and 1.0)", 0.0);
+ Optionpk<double> priors_opt("p", "prior", "Prior probabilities for each class (e.g., -p 0.3 -p 0.3 -p 0.2 ). Used for input only (ignored for cross validation)", 0.0);
+ Optionpk<string> priorimg_opt("pim", "priorimg", "Prior probability image (multi-band img with band for each class","",2);
+ Optionpk<unsigned short> cv_opt("cv", "cv", "N-fold cross validation mode",0);
+ Optionpk<std::string> svm_type_opt("svmt", "svmtype", "Type of SVM (C_SVC, nu_SVC,one_class, epsilon_SVR, nu_SVR)","C_SVC");
+ Optionpk<std::string> kernel_type_opt("kt", "kerneltype", "Type of kernel function (linear,polynomial,radial,sigmoid) ","radial");
+ Optionpk<unsigned short> kernel_degree_opt("kd", "kd", "Degree in kernel function",3);
+ Optionpk<float> gamma_opt("g", "gamma", "Gamma in kernel function",1.0);
+ Optionpk<float> coef0_opt("c0", "coef0", "Coef0 in kernel function",0);
+ Optionpk<float> ccost_opt("cc", "ccost", "The parameter C of C_SVC, epsilon_SVR, and nu_SVR",1000);
+ Optionpk<float> nu_opt("nu", "nu", "The parameter nu of nu_SVC, one_class SVM, and nu_SVR",0.5);
+ Optionpk<float> epsilon_loss_opt("eloss", "eloss", "The epsilon in loss function of epsilon_SVR",0.1);
+ Optionpk<int> cache_opt("cache", "cache", "Cache memory size in MB",100);
+ Optionpk<float> epsilon_tol_opt("etol", "etol", "The tolerance of termination criterion",0.001);
+ Optionpk<bool> shrinking_opt("shrink", "shrink", "Whether to use the shrinking heuristics",false);
+ Optionpk<bool> prob_est_opt("pe", "probest", "Whether to train a SVC or SVR model for probability estimates",true,2);
+ // Optionpk<bool> weight_opt("wi", "wi", "Set the parameter C of class i to weight*C, for C_SVC",true);
+ Optionpk<unsigned short> comb_opt("comb", "comb", "How to combine bootstrap aggregation classifiers (0: sum rule, 1: product rule, 2: max rule). Also used to aggregate classes with rc option.",0);
Optionpk<unsigned short> bag_opt("bag", "bag", "Number of bootstrap aggregations", 1);
Optionpk<int> bagSize_opt("bs", "bsize", "Percentage of features used from available training features for each bootstrap aggregation (one size for all classes, or a different size for each class respectively", 100);
- Optionpk<string> classBag_opt("cb", "classbag", "output for each individual bootstrap aggregation");
- Optionpk<string> mask_opt("m", "mask", "mask image (support for single mask only, see also msknodata option)");
- Optionpk<short> msknodata_opt("msknodata", "msknodata", "mask value(s) not to consider for classification (use negative values if only these values should be taken into account). Values will be taken over in classification image.", 0);
- Optionpk<unsigned short> nodata_opt("nodata", "nodata", "nodata value to put where image is masked as nodata", 0);
- Optionpk<string> output_opt("o", "output", "output classification image");
+ Optionpk<string> classBag_opt("cb", "classbag", "Output for each individual bootstrap aggregation");
+ Optionpk<string> mask_opt("m", "mask", "Use the first band of the specified file as a validity mask. Nodata values can be set with the option msknodata.");
+ Optionpk<short> msknodata_opt("msknodata", "msknodata", "Mask value(s) not to consider for classification (use negative values if only these values should be taken into account). Values will be taken over in classification image.", 0);
+ Optionpk<unsigned short> nodata_opt("nodata", "nodata", "Nodata value to put where image is masked as nodata", 0);
+ Optionpk<string> output_opt("o", "output", "Output classification image");
Optionpk<string> oformat_opt("of", "oformat", "Output image format (see also gdal_translate). Empty string: inherit from input image");
Optionpk<string> option_opt("co", "co", "Creation option for output file. Multiple options can be specified.");
- Optionpk<string> colorTable_opt("ct", "ct", "colour table in ascii format having 5 columns: id R G B ALFA (0: transparent, 255: solid)");
- Optionpk<string> prob_opt("prob", "prob", "probability image.");
- Optionpk<string> entropy_opt("entropy", "entropy", "entropy image (measure for uncertainty of classifier output");
- Optionpk<string> active_opt("active", "active", "ogr output for active training sample.");
- Optionpk<string> ogrformat_opt("f", "f", "Output ogr format for active training sample","ESRI Shapefile");
- Optionpk<unsigned int> nactive_opt("na", "nactive", "number of active training points",1);
- Optionpk<string> classname_opt("c", "class", "list of class names.");
- Optionpk<short> classvalue_opt("r", "reclass", "list of class values (use same order as in class opt).");
- Optionpk<short> verbose_opt("v", "verbose", "set to: 0 (results only), 1 (confusion matrix), 2 (debug)",0);
+ Optionpk<string> colorTable_opt("ct", "ct", "Color table in ASCII format having 5 columns: id R G B ALFA (0: transparent, 255: solid)");
+ Optionpk<string> prob_opt("prob", "prob", "Probability image.");
+ Optionpk<string> entropy_opt("entropy", "entropy", "Entropy image (measure for uncertainty of classifier output","",2);
+ Optionpk<string> active_opt("active", "active", "Ogr output for active training sample.","",2);
+ Optionpk<string> ogrformat_opt("f", "f", "Output ogr format for active training sample","SQLite");
+ Optionpk<unsigned int> nactive_opt("na", "nactive", "Number of active training points",1);
+ Optionpk<string> classname_opt("c", "class", "List of class names.");
+ Optionpk<short> classvalue_opt("r", "reclass", "List of class values (use same order as in class opt).");
+ Optionpk<short> verbose_opt("v", "verbose", "Verbose level",0);
bool doProcess;//stop process when program was invoked with help option (-h --help)
try{
- doProcess=input_opt.retrieveOption(argc,argv);
- training_opt.retrieveOption(argc,argv);
+ doProcess=training_opt.retrieveOption(argc,argv);
tlayer_opt.retrieveOption(argc,argv);
+ input_opt.retrieveOption(argc,argv);
+ output_opt.retrieveOption(argc,argv);
+ cv_opt.retrieveOption(argc,argv);
+ classname_opt.retrieveOption(argc,argv);
+ classvalue_opt.retrieveOption(argc,argv);
+ oformat_opt.retrieveOption(argc,argv);
+ ogrformat_opt.retrieveOption(argc,argv);
+ option_opt.retrieveOption(argc,argv);
+ colorTable_opt.retrieveOption(argc,argv);
label_opt.retrieveOption(argc,argv);
- balance_opt.retrieveOption(argc,argv);
- random_opt.retrieveOption(argc,argv);
- minSize_opt.retrieveOption(argc,argv);
+ priors_opt.retrieveOption(argc,argv);
+ gamma_opt.retrieveOption(argc,argv);
+ ccost_opt.retrieveOption(argc,argv);
+ mask_opt.retrieveOption(argc,argv);
+ msknodata_opt.retrieveOption(argc,argv);
+ nodata_opt.retrieveOption(argc,argv);
+ band_opt.retrieveOption(argc,argv);
start_opt.retrieveOption(argc,argv);
end_opt.retrieveOption(argc,argv);
- band_opt.retrieveOption(argc,argv);
+ balance_opt.retrieveOption(argc,argv);
+ minSize_opt.retrieveOption(argc,argv);
+ bag_opt.retrieveOption(argc,argv);
+ bagSize_opt.retrieveOption(argc,argv);
+ comb_opt.retrieveOption(argc,argv);
+ classBag_opt.retrieveOption(argc,argv);
+ prob_opt.retrieveOption(argc,argv);
+ priorimg_opt.retrieveOption(argc,argv);
offset_opt.retrieveOption(argc,argv);
scale_opt.retrieveOption(argc,argv);
- priors_opt.retrieveOption(argc,argv);
- priorimg_opt.retrieveOption(argc,argv);
svm_type_opt.retrieveOption(argc,argv);
kernel_type_opt.retrieveOption(argc,argv);
kernel_degree_opt.retrieveOption(argc,argv);
- gamma_opt.retrieveOption(argc,argv);
coef0_opt.retrieveOption(argc,argv);
- ccost_opt.retrieveOption(argc,argv);
nu_opt.retrieveOption(argc,argv);
epsilon_loss_opt.retrieveOption(argc,argv);
cache_opt.retrieveOption(argc,argv);
epsilon_tol_opt.retrieveOption(argc,argv);
shrinking_opt.retrieveOption(argc,argv);
prob_est_opt.retrieveOption(argc,argv);
- cv_opt.retrieveOption(argc,argv);
- comb_opt.retrieveOption(argc,argv);
- bag_opt.retrieveOption(argc,argv);
- bagSize_opt.retrieveOption(argc,argv);
- classBag_opt.retrieveOption(argc,argv);
- mask_opt.retrieveOption(argc,argv);
- msknodata_opt.retrieveOption(argc,argv);
- nodata_opt.retrieveOption(argc,argv);
- output_opt.retrieveOption(argc,argv);
- oformat_opt.retrieveOption(argc,argv);
- colorTable_opt.retrieveOption(argc,argv);
- option_opt.retrieveOption(argc,argv);
- prob_opt.retrieveOption(argc,argv);
entropy_opt.retrieveOption(argc,argv);
active_opt.retrieveOption(argc,argv);
- ogrformat_opt.retrieveOption(argc,argv);
nactive_opt.retrieveOption(argc,argv);
- classname_opt.retrieveOption(argc,argv);
- classvalue_opt.retrieveOption(argc,argv);
verbose_opt.retrieveOption(argc,argv);
+ random_opt.retrieveOption(argc,argv);
}
catch(string predefinedString){
std::cout << predefinedString << std::endl;
@@ -154,6 +154,14 @@ int main(int argc, char *argv[])
exit(0);//help was invoked, stop processing
}
+ if(entropy_opt[0]=="")
+ entropy_opt.clear();
+ if(active_opt[0]=="")
+ active_opt.clear();
+ if(priorimg_opt[0]=="")
+ priorimg_opt.clear();
+
+
std::map<std::string, svm::SVM_TYPE> svmMap;
svmMap["C_SVC"]=svm::C_SVC;
@@ -429,7 +437,7 @@ int main(int argc, char *argv[])
exit(1);
}
if(classname_opt.empty()){
- std::cerr << "Warning: no class name and value pair provided for all " << nclass << " classes, using string2type<int> instead!" << std::endl;
+ //std::cerr << "Warning: no class name and value pair provided for all " << nclass << " classes, using string2type<int> instead!" << std::endl;
for(int iclass=0;iclass<nclass;++iclass){
if(verbose_opt[0])
std::cout << iclass << " " << cm.getClass(iclass) << " -> " << string2type<short>(cm.getClass(iclass)) << std::endl;
@@ -653,7 +661,7 @@ int main(int argc, char *argv[])
if(verbose_opt[0]>=1)
std::cout << "opening class image for writing output " << output_opt[0] << std::endl;
if(classBag_opt.size()){
- classImageBag.open(output_opt[0],ncol,nrow,nbag,GDT_Byte,imageType,option_opt);
+ classImageBag.open(classBag_opt[0],ncol,nrow,nbag,GDT_Byte,imageType,option_opt);
classImageBag.GDALSetNoDataValue(nodata_opt[0]);
classImageBag.copyGeoTransform(testImage);
classImageBag.setProjection(testImage.getProjection());
diff --git a/src/imageclasses/ImgReaderGdal.h b/src/imageclasses/ImgReaderGdal.h
index 2d941a6..5d4fceb 100644
--- a/src/imageclasses/ImgReaderGdal.h
+++ b/src/imageclasses/ImgReaderGdal.h
@@ -38,6 +38,7 @@ public:
~ImgReaderGdal(void);
void open(const std::string& filename);//, double magicX=1, double magicY=1);
void close(void);
+ std::string getFileName() const {return m_filename;};
int nrOfCol(void) const { return m_ncol;};
int nrOfRow(void) const { return m_nrow;};
int nrOfBand(void) const { return m_nband;};
@@ -80,6 +81,7 @@ public:
int getNoDataValues(std::vector<double>& noDataValues) const;
bool isNoData(double value) const{if(m_noDataValues.empty()) return false;else return find(m_noDataValues.begin(),m_noDataValues.end(),value)!=m_noDataValues.end();};
int pushNoDataValue(double noDataValue);
+ int setNoData(const std::vector<double> nodata){m_noDataValues=nodata;};
CPLErr GDALSetNoDataValue(double noDataValue, int band=0) {return getRasterBand(band)->SetNoDataValue(noDataValue);};
bool covers(double x, double y) const;
bool covers(double ulx, double uly, double lrx, double lry) const;
@@ -186,6 +188,7 @@ template<typename T> void ImgReaderGdal::readData(std::vector<T>& buffer, const
template<typename T> void ImgReaderGdal::readData(std::vector<T>& buffer, const GDALDataType& dataType , int minCol, int maxCol, double row, int band, RESAMPLE resample) const
{
+ //todo: make upper and lower row depend on isGeo...
std::vector<T> readBuffer_upper;
std::vector<T> readBuffer_lower;
if(buffer.size()!=maxCol-minCol+1)
diff --git a/src/imageclasses/ImgWriterGdal.h b/src/imageclasses/ImgWriterGdal.h
index a90f586..9cfc9a0 100644
--- a/src/imageclasses/ImgWriterGdal.h
+++ b/src/imageclasses/ImgWriterGdal.h
@@ -39,6 +39,7 @@ public:
// void open(const std::string& filename, int ncol, int nrow, int nband, const GDALDataType& dataType, const std::string& imageType="GTiff", const std::string& interleave="BAND", const std::string& compression="LZW", int magicX=1, int magicY=1);
void open(const std::string& filename, int ncol, int nrow, int nband, const GDALDataType& dataType, const std::string& imageType, const std::vector<std::string>& options=std::vector<std::string>());
void close(void);
+ std::string getFileName() const {return m_filename;};
int nrOfCol(void) const { return m_ncol;};
int nrOfRow(void) const { return m_nrow;};
int nrOfBand(void) const { return m_nband;};
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-grass/pktools.git
More information about the Pkg-grass-devel
mailing list