[med-svn] [libseqlib] 01/02: New upstream version 1.1.1
Andreas Tille
tille at debian.org
Fri Jan 27 14:20:49 UTC 2017
This is an automated email from the git hooks/post-receive script.
tille pushed a commit to branch master
in repository libseqlib.
commit 947580479620f0b6727e22de3538baf946c13310
Author: Andreas Tille <tille at debian.org>
Date: Fri Jan 27 15:18:28 2017 +0100
New upstream version 1.1.1
---
.gitignore | 35 +
.gitmodules | 11 +
.travis.scripts/builddox.sh | 1 +
.travis.scripts/clang.sh | 12 +
.travis.scripts/coveralls.sh | 35 +
.travis.scripts/publish-doxygen.sh | 32 +
.travis.scripts/travis-before-install.sh | 24 +
.travis.scripts/travis-install.sh | 21 +
.travis.yml | 66 +
Doxyfile | 1274 ++++++
LICENSE | 13 +
Makefile.am | 8 +
Makefile.in | 778 ++++
README.md | 357 ++
SeqLib/BFC.h | 169 +
SeqLib/BWAWrapper.h | 197 +
SeqLib/BamHeader.h | 130 +
SeqLib/BamReader.h | 279 ++
SeqLib/BamRecord.h | 863 ++++
SeqLib/BamWalker.h | 91 +
SeqLib/BamWriter.h | 114 +
SeqLib/FastqReader.h | 59 +
SeqLib/FermiAssembler.h | 147 +
SeqLib/GenomicRegion.h | 174 +
SeqLib/GenomicRegionCollection.cpp | 701 ++++
SeqLib/GenomicRegionCollection.h | 313 ++
SeqLib/IntervalTree.h | 245 ++
SeqLib/ReadFilter.h | 576 +++
SeqLib/RefGenome.h | 57 +
SeqLib/SeqLibCommon.h | 76 +
SeqLib/SeqLibUtils.h | 160 +
SeqLib/SeqPlot.h | 106 +
SeqLib/UnalignedSequence.h | 60 +
SeqLib/aho_corasick.hpp | 596 +++
SeqLib/ssw.h | 188 +
SeqLib/ssw_cpp.h | 219 +
autogen.sh | 7 +
benchmark/Makefile | 17 +
benchmark/benchmark.cpp | 208 +
config.h.in | 58 +
configure | 6322 ++++++++++++++++++++++++++++
configure.ac | 46 +
depcomp | 530 +++
install-sh | 323 ++
issue_tracking.md | 83 +
json/json-forwards.h | 321 ++
json/json.h | 2135 ++++++++++
missing | 376 ++
seq_test/Makefile.am | 23 +
seq_test/Makefile.in | 889 ++++
seq_test/config.h.in | 58 +
seq_test/config.status | 1206 ++++++
seq_test/configure | 6764 ++++++++++++++++++++++++++++++
seq_test/configure.ac | 57 +
seq_test/seq_test.cpp | 1575 +++++++
src/.clang-format | 19 +
src/BFC.cpp | 420 ++
src/BWAWrapper.cpp | 673 +++
src/BamHeader.cpp | 223 +
src/BamReader.cpp | 381 ++
src/BamRecord.cpp | 687 +++
src/BamWriter.cpp | 148 +
src/FastqReader.cpp | 59 +
src/FermiAssembler.cpp | 177 +
src/GenomicRegion.cpp | 275 ++
src/Makefile.am | 7 +
src/Makefile.in | 679 +++
src/ReadFilter.cpp | 873 ++++
src/RefGenome.cpp | 61 +
src/SeqPlot.cpp | 85 +
src/jsoncpp.cpp | 5247 +++++++++++++++++++++++
src/non_api/BamStats.cpp | 111 +
src/non_api/BamStats.h | 86 +
src/non_api/Fractions.cpp | 80 +
src/non_api/Fractions.h | 46 +
src/non_api/Histogram.cpp | 220 +
src/non_api/Histogram.h | 171 +
src/non_api/STCoverage.cpp | 178 +
src/non_api/STCoverage.h | 88 +
src/non_api/snowtools.cpp | 139 +
src/seqtools/Makefile.am | 4 +
src/seqtools/Makefile.in | 592 +++
src/seqtools/seqtools.cpp | 351 ++
src/ssw.c | 884 ++++
src/ssw_cpp.cpp | 477 +++
85 files changed, 42626 insertions(+)
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..06df383
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,35 @@
+docs
+[#]*[#]
+.\#*
+*~
+*.csv
+*.o
+*.gif
+*.gcda
+*.gcno
+*.gcov
+.*
+Test
+autom4te.cache
+.Rhistory
+.in
+.deps*
+latex
+config.log
+tmp*
+*.a
+example*
+tabix
+htsfile
+bgzip
+back*
+*.m4
+*.bam
+stamp-h1
+config.status
+Makefile
+*.pico
+benchmark
+config.status
+seqtools
+config.h
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..7efde03
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,11 @@
+[submodule "fermi-lite"]
+ path = fermi-lite
+ url = https://github.com/jwalabroad/fermi-lite
+[submodule "htslib"]
+ path = htslib
+ url = https://github.com/walaj/htslib
+ branch = develop
+[submodule "bwa"]
+ path = bwa
+ url = https://github.com/jwalabroad/bwa
+ branch = Apache2
diff --git a/.travis.scripts/builddox.sh b/.travis.scripts/builddox.sh
new file mode 100644
index 0000000..a9bf588
--- /dev/null
+++ b/.travis.scripts/builddox.sh
@@ -0,0 +1 @@
+#!/bin/bash
diff --git a/.travis.scripts/clang.sh b/.travis.scripts/clang.sh
new file mode 100755
index 0000000..6d56fe9
--- /dev/null
+++ b/.travis.scripts/clang.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+## borrowed from Gamgee project: https://github.com/broadinstitute/gamgee/blob/master/.travis_scripts/clang.sh
+
+echo "...building with clang"
+
+wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key | sudo apt-key add -
+sudo apt-add-repository 'deb http://llvm.org/apt/precise/ llvm-toolchain-precise-3.5 main'
+sudo apt-get -qq update
+sudo apt-get -qq --force-yes install clang-3.5 clang-modernize-3.5 # clang-format-3.5
+sudo update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-3.5 1
+sudo rm /usr/local/clang-3.4/bin/clang++
diff --git a/.travis.scripts/coveralls.sh b/.travis.scripts/coveralls.sh
new file mode 100755
index 0000000..bc0a955
--- /dev/null
+++ b/.travis.scripts/coveralls.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+echo "...running unit tests and code coverage"
+
+if [ "$COMPILER" == "g++-4.9" ] && [ "$TRAVIS_BRANCH" == "master" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ];
+then
+ ## only install if not on home environment (eg travis ci)
+ if [ -z "$REFHG19" ]; then
+ sudo pip install cpp-coveralls
+ fi
+ cd seq_test
+
+ ## download the latest matched gcov
+ sudo update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-4.9 90
+ sudo ln -sf /usr/bin/gcov-4.9 /usr/bin/gcov
+ GCOV_VERSION=`gcov --version`
+ echo "GCOV version $GCOV_VERSION"
+
+ ## download the test data
+ mkdir test_data
+ cd test_data
+ wget -r -nH -nd -np -R index.html* https://data.broadinstitute.org/snowman/SeqLibTest/
+ cd ..
+
+ export LD_LIBRARY_PATH=${BOOST_ROOT}/lib:${LD_LIBRARY_PATH}
+ echo "LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
+ ./configure --with-boost=${BOOST_ROOT}
+ make CXXFLAGS='-DHAVE_C11=1 -std=c++11' CXX=$COMPILER
+ ./seq_test 1> stdout.log 2> stderr.log
+ tail -n stderr.log
+
+ EXCL="-e src/non_api -e seq_test/seq_test.cpp -e htslib -e bwa -e fermi-lite -e config.h -e seq_test/config.h -e seq_test/config.h -e src/jsoncpp.cpp -e src/json -e src/SeqLib/ssw.h -e src/SeqLib/ssw_cpp.h -e src/ssw.c -e src/ssw_cpp.cpp -e SeqLib/aho_corasick.hpp -e json/json.h -e SeqLib/ssw.h -e SeqLib/ssw_cpp.h"
+ cpp-coveralls -r ../ -t ${COVERALLS_TOKEN} ${EXCL} ##--dryrun
+ cd ..
+fi
diff --git a/.travis.scripts/publish-doxygen.sh b/.travis.scripts/publish-doxygen.sh
new file mode 100755
index 0000000..f3591d4
--- /dev/null
+++ b/.travis.scripts/publish-doxygen.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+## only build for one compiler
+if [ "$COMPILER" == "g++-4.9" ] && [ "$TRAVIS_BRANCH" == "master" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ];
+then
+
+ echo -e "Downloading latest Doxygen...";
+ cd ${HOME};
+ wget ftp://gsapubftp-anonymous@ftp.broadinstitute.org/travis/doxygen_1.8.8-1_amd64.deb
+ sudo dpkg --install doxygen_1.8.8-1_amd64.deb
+ cd ${HOME}/build/walaj/SeqLib;
+ doxygen
+
+ echo -e "Publishing doxygen...\n";
+ git config --global user.email "travis at travis-ci.org";
+ git config --global user.name "travis-ci";
+ git clone --branch=gh-pages https://${GH_TOKEN}@github.com/walaj/SeqLib gh-pages;
+ cd gh-pages;
+ rm -rf doxygen/;
+ mv ../docs/html doxygen/;
+ echo "...listing doc"
+ p=`pwd`
+ echo "...current location ${pwd}"
+ cd doxygen && ls && cd ..
+ git add doxygen/;
+ git commit -am "Latest doxygen documentation on successful travis build ${TRAVIS_BUILD_NUMBER} auto-pushed";
+ git push origin gh-pages
+
+ echo -e "Published doxygen.\n"
+
+fi
+#fi
diff --git a/.travis.scripts/travis-before-install.sh b/.travis.scripts/travis-before-install.sh
new file mode 100755
index 0000000..2f2cd93
--- /dev/null
+++ b/.travis.scripts/travis-before-install.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+#set -x
+set -e
+set -o pipefail
+
+echo "CXX: $CXX TRAVIS_BRANCH $TRAVIS_BRANCH CC $CC TRAVIS_OS_NAME $TRAVIS_OS_NAME"
+
+## only build for one compiler
+if [ "$COMPILER" == "g++-4.9" ] && [ "$TRAVIS_BRANCH" == "master" ];
+then
+
+ if [ "${TRAVIS_OS_NAME}" = "osx" ]; then
+ brew update
+ fi
+
+ if [ -n "${BOOST_VERSION}" ]; then
+ mkdir -p $BOOST_ROOT
+ wget --no-verbose --output-document=- \
+ http://sourceforge.net/projects/boost/files/boost/${BOOST_VERSION}/boost_${BOOST_VERSION//./_}.tar.bz2/download \
+ | tar jxf - --strip-components=1 -C "${BOOST_ROOT}"
+ fi
+
+fi
diff --git a/.travis.scripts/travis-install.sh b/.travis.scripts/travis-install.sh
new file mode 100755
index 0000000..e73e680
--- /dev/null
+++ b/.travis.scripts/travis-install.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+
+set -e
+set -o pipefail
+
+## print info aobut compiler
+TC=`$COMPILER --version`
+echo "True compiler is $TC"
+
+## only build for one compiler
+#VALID=`g++ --version | grep 4.9 | wc -l`
+#if [[ "$VALID" -eq "0" ]]; then
+# exit 0;
+#fi
+
+if [ -d "${BOOST_ROOT}" ]; then
+ (cd "${BOOST_ROOT}"
+ ./bootstrap.sh --with-libraries="${BOOST_LIBS}"
+ ./b2 threading=multi --prefix="${BOOST_ROOT}" -d0 install
+ )
+fi
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..f943a12
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,66 @@
+language: cpp
+
+matrix:
+ include:
+ - compiler: gcc
+ addons:
+ apt:
+ sources:
+ - ubuntu-toolchain-r-test
+ packages:
+ - g++-4.9
+ env: COMPILER=g++-4.9
+ - compiler: gcc
+ addons:
+ apt:
+ sources:
+ - ubuntu-toolchain-r-test
+ packages:
+ - g++-5
+ env: COMPILER=g++-5
+ - compiler: clang
+ addons:
+ apt:
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-precise-3.6
+ packages:
+ - clang-3.6
+ env: COMPILER=clang++-3.6
+ - compiler: clang
+ addons:
+ apt:
+ sources:
+ - ubuntu-toolchain-r-test
+ - llvm-toolchain-precise-3.7
+ packages:
+ - clang-3.7
+ env: COMPILER=clang++-3.7
+
+env:
+ global:
+ - BOOST_VERSION_MIN="1.59.0"
+ - BOOST_VERSION_MAX="1.60.0" # List of required boost libraries to build"
+ - BOOST_LIBS="test,system,chrono"
+ - secure: c7M47d76dYyjOsYiRWdJ7DaKyV6qo9tUk1l+QlSTX5AgfWGGIsI3eWxHrSid/b1PXTVCOQTBwJKlM13gNPO5kLEAxrT/Bl8rtFUQdBwqA5gZaY1Yyt7h51hRR9GADwUY4lmudLBiPRDp8Pd3WSiP+gOud7hA8U/AP0BPBfbhDkhvUC6GqvHzEp28NIHB0nhOv4r4yl7C2UeNk358ipDmYqs89m17TNTmbReYjqiaswYSkhCjikUT33QyDTLwl502NBRIJZpZZZ3imxorSM4A1uQmuq5Ggx4PyODgt8MpzH/bCHeamNFzYCi/JSh+MsuOv6tD6xsM9p1MzI5Fg7eQk+FtdmfQhgyTYjg24Ugyn0rrnu2i26FjLqkX3jqdgIldncoerJZly92+M0wRv4IrjjQLCrNLT2Wv4WRElKR5W2dRSnKBT/vIrfJQU9HwYPvISLS3yhpm9NcZnP09Mfoe3xU1mrWnSfbs [...]
+ - secure: IR0O5HtMRDbEj0x7wHqXuLADszJcXLBUHeuMqm9K9oqzs0ne/ZAqNBODQVLvxAwcxsLRgiuBCuCTuJdalxusycZuAn5tD43RhAv6OhWIG4w+jh6elr2jmLsMb2VxTXto/VX1wQ4X9lLC4+q9p/vlZzIxwza8ZN//jGIlHNx7h896+euEIBy4F28yqk6Ua3V4Td5mg99uVWjiHeIiwIOmE+ClrYfdFXzLdWKDBmmkZTqPRfw16iVaSBKpkSd9rneQoKSyalFx0OGURADxabO1N5uTCRlK06JTx5220zma29w9BuhniaNmxbjf7gD1pcFZr7da7dZekKaxXy6d0nu/hci7UNsolti13a6bQCLj4AjxjY6GxGjX9dZxtjGvIPxiJKIDfQznwyfiaQYFhLb+Ieyv67lN6quDTuVuRdxxqnQprOz3JAvzTbyHatcKvvDfgWcxUGUCmOiLAWiQan8RuxZHt/+8kCQn [...]
+ - COVERALLS_TOKEN="o70uSbs4NONhUnVEAYj7bo4Go5kQ3e8na"
+
+before_install:
+- sudo add-apt-repository ppa:ubuntu-toolchain-r/test -y
+- sudo apt-get -qq update
+- if [ "${TRAVIS_BRANCH}" = "master" ]; then export BOOST_VERSION="${BOOST_VERSION_MIN}"; else export BOOST_VERSION="${BOOST_VERSION_MAX}"; fi
+- if [ -n "${BOOST_VERSION}" ]; then export BOOST_ROOT="${TRAVIS_BUILD_DIR}/../boost-trunk"; export CMAKE_MODULE_PATH="${BOOST_ROOT}"; fi
+- if [ "${TRAVIS_OS_NAME}" = "osx" -a "${CXX}" = "clang++" ]; then export CXX="$(which c++)"; export CC="$(which cc)"; fi
+- .travis.scripts/travis-before-install.sh
+
+script:
+- .travis.scripts/travis-install.sh
+- ./configure && make CXX=$COMPILER
+
+after_success:
+- .travis.scripts/coveralls.sh
+
+branches:
+ only:
+ - master
diff --git a/Doxyfile b/Doxyfile
new file mode 100644
index 0000000..e2cdf53
--- /dev/null
+++ b/Doxyfile
@@ -0,0 +1,1274 @@
+# Doxyfile 1.4.7
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = SeqLib
+
+PROJECT_BRIEF = "C++ htslib/bwa-mem interface and command line tools for interrogating sequence data"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER = 0.0
+
+REPO_PATH = https://github.com/walaj/SeqLib.git
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = docs
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish,
+# Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese,
+# Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian,
+# Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish,
+# Swedish, and Ukrainian.
+
+OUTPUT_LANGUAGE = English
+
+# This tag can be used to specify the encoding used in the generated output.
+# The encoding is not always determined by the language that is chosen,
+# but also whether or not the output is meant for Windows or non-Windows users.
+# In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES
+# forces the Windows encoding (this is the default for the Windows binary),
+# whereas setting the tag to NO uses a Unix-style encoding (the default for
+# all platforms other than Windows).
+
+USE_WINDOWS_ENCODING = NO
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful is your file systems
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like the Qt-style comments (thus requiring an
+# explicit @brief command for a brief description.
+
+JAVADOC_AUTOBRIEF = YES
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the DETAILS_AT_TOP tag is set to YES then Doxygen
+# will output the detailed description near the top, like JavaDoc.
+# If set to NO, the detailed description appears after the member
+# documentation.
+
+DETAILS_AT_TOP = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for Java.
+# For instance, namespaces will be presented as packages, qualified scopes
+# will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want to
+# include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = YES
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or define consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and defines in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES = NO
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from the
+# version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be abled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = /xchip/gistic/Jeremiah/GIT/SeqLib
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py
+
+FILE_PATTERNS = *.h
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE = /xchip/gistic/Jeremiah/GIT/SeqLib/bwa \
+ /xchip/gistic/Jeremiah/GIT/SeqLib/figs \
+ /xchip/gistic/Jeremiah/GIT/SeqLib/htslib \
+ /xchip/gistic/Jeremiah/GIT/SeqLib/blat \
+ /xchip/gistic/Jeremiah/GIT/SeqLib/src/non_api \
+ /xchip/gistic/Jeremiah/GIT/SeqLib/fermi-lite \
+ /xchip/gistic/Jeremiah/GIT/SeqLib/json \
+ /xchip/gistic/Jeremiah/GIT/SeqLib/SeqLib/ssw.h \
+ /xchip/gistic/Jeremiah/GIT/SeqLib/SeqLib/ssw_cpp.h \
+ /xchip/gistic/Jeremiah/GIT/SeqLib/SeqLib/seq_test \
+ /xchip/gistic/Jeremiah/GIT/SeqLib/SeqLib/.git \
+ /xchip/gistic/Jeremiah/GIT/SeqLib/SeqLib/.travis.scripts \
+ /xchip/gistic/Jeremiah/GIT/SeqLib/SeqLib/README.md
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix filesystem feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+EXCLUDE_SYMBOLS = atm_free_delete bam_hdr_delete bgzf_delete s_align sam_write_delete \
+ idx_delete hts_itr_delete htsFile_delete _Bam PlottedRead PlottedReadLine
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS = */.git/* \
+ */gzstream.h \
+ */gzstream.C \
+ */IntervalTree.h
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output. If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER
+# is applied to all files.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES (the default)
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES (the default)
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = YES
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code. Otherwise they will link to the documentstion.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = NO
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET =
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compressed HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be
+# generated containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+,
+# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are
+# probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = NO
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = NO
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader. This is useful
+# if you want to understand what is going on. On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all function-like macros that are alone
+# on a line, have an all uppercase name, and do not end with a semicolon. Such
+# function macros are typically used for boiler-plate code, and will confuse
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option is superseded by the HAVE_DOT option below. This is only a
+# fallback. It is recommended to install and use dot, since it yields more
+# powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = NO
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will
+# generate a call dependency graph for every global function or class method.
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then doxygen will
+# generate a caller dependency graph for every global function or class method.
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than
+# this value, doxygen will try to truncate the graph, so that it fits within
+# the specified constraint. Beware that most browsers cannot cope with very
+# large images.
+
+MAX_DOT_GRAPH_WIDTH = 1024
+
+# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than
+# this value, doxygen will try to truncate the graph, so that it fits within
+# the specified constraint. Beware that most browsers cannot cope with very
+# large images.
+
+MAX_DOT_GRAPH_HEIGHT = 1024
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that a graph may be further truncated if the graph's
+# image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH
+# and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default),
+# the graph is not depth-constrained.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, which results in a white background.
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to the search engine
+#---------------------------------------------------------------------------
+
+# The SEARCHENGINE tag specifies whether or not a search engine should be
+# used. If set to NO the values of all tags below this one will be ignored.
+
+SEARCHENGINE = NO
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..4f796e1
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2016 Jeremiah A. Wala
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/Makefile.am b/Makefile.am
new file mode 100644
index 0000000..4739c4f
--- /dev/null
+++ b/Makefile.am
@@ -0,0 +1,8 @@
+AUTOMAKE_OPTIONS = foreign
+SUBDIRS = bwa htslib fermi-lite src
+
+install:
+ mkdir -p bin && cp src/libseqlib.a fermi-lite/libfml.a bwa/libbwa.a htslib/libhts.a bin
+
+seqtools:
+ mkdir -p bin && cd src/seqtools && make && mv seqtools ../../bin
diff --git a/Makefile.in b/Makefile.in
new file mode 100644
index 0000000..630f9b8
--- /dev/null
+++ b/Makefile.in
@@ -0,0 +1,778 @@
+# Makefile.in generated by automake 1.15 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2014 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+ at SET_MAKE@
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+subdir = .
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \
+ $(am__configure_deps) $(am__DIST_COMMON)
+am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \
+ configure.lineno config.status.lineno
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_ at AM_V@)
+am__v_P_ = $(am__v_P_ at AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_ at AM_V@)
+am__v_GEN_ = $(am__v_GEN_ at AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_ at AM_V@)
+am__v_at_ = $(am__v_at_ at AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ cscope distdir dist dist-all distcheck
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) \
+ $(LISP)config.h.in
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+CSCOPE = cscope
+DIST_SUBDIRS = $(SUBDIRS)
+am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/config.h.in compile \
+ depcomp install-sh missing
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+distdir = $(PACKAGE)-$(VERSION)
+top_distdir = $(distdir)
+am__remove_distdir = \
+ if test -d "$(distdir)"; then \
+ find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \
+ && rm -rf "$(distdir)" \
+ || { sleep 5 && rm -rf "$(distdir)"; }; \
+ else :; fi
+am__post_remove_distdir = $(am__remove_distdir)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+DIST_ARCHIVES = $(distdir).tar.gz
+GZIP_ENV = --best
+DIST_TARGETS = dist-gzip
+distuninstallcheck_listfiles = find . -type f -print
+am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \
+ | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$'
+distcleancheck_listfiles = find . -type f -print
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_CXXFLAGS = @AM_CXXFLAGS@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build_alias = @build_alias@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host_alias = @host_alias@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+SUBDIRS = bwa htslib fermi-lite src
+all: config.h
+ $(MAKE) $(AM_MAKEFLAGS) all-recursive
+
+.SUFFIXES:
+am--refresh: Makefile
+ @:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \
+ $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \
+ && exit 0; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ echo ' $(SHELL) ./config.status'; \
+ $(SHELL) ./config.status;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ $(SHELL) ./config.status --recheck
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ $(am__cd) $(srcdir) && $(AUTOCONF)
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS)
+$(am__aclocal_m4_deps):
+
+config.h: stamp-h1
+ @test -f $@ || rm -f stamp-h1
+ @test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1
+
+stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status
+ @rm -f stamp-h1
+ cd $(top_builddir) && $(SHELL) ./config.status config.h
+$(srcdir)/config.h.in: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ ($(am__cd) $(top_srcdir) && $(AUTOHEADER))
+ rm -f stamp-h1
+ touch $@
+
+distclean-hdr:
+ -rm -f config.h stamp-h1
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscope: cscope.files
+ test ! -s cscope.files \
+ || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS)
+clean-cscope:
+ -rm -f cscope.files
+cscope.files: clean-cscope cscopelist
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+ -rm -f cscope.out cscope.in.out cscope.po.out cscope.files
+
+distdir: $(DISTFILES)
+ $(am__remove_distdir)
+ test -d "$(distdir)" || mkdir "$(distdir)"
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+ -test -n "$(am__skip_mode_fix)" \
+ || find "$(distdir)" -type d ! -perm -755 \
+ -exec chmod u+rwx,go+rx {} \; -o \
+ ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \
+ ! -type d ! -perm -400 -exec chmod a+r {} \; -o \
+ ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \
+ || chmod -R a+r "$(distdir)"
+dist-gzip: distdir
+ tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
+ $(am__post_remove_distdir)
+
+dist-bzip2: distdir
+ tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2
+ $(am__post_remove_distdir)
+
+dist-lzip: distdir
+ tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz
+ $(am__post_remove_distdir)
+
+dist-xz: distdir
+ tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz
+ $(am__post_remove_distdir)
+
+dist-tarZ: distdir
+ @echo WARNING: "Support for distribution archives compressed with" \
+ "legacy program 'compress' is deprecated." >&2
+ @echo WARNING: "It will be removed altogether in Automake 2.0" >&2
+ tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z
+ $(am__post_remove_distdir)
+
+dist-shar: distdir
+ @echo WARNING: "Support for shar distribution archives is" \
+ "deprecated." >&2
+ @echo WARNING: "It will be removed altogether in Automake 2.0" >&2
+ shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz
+ $(am__post_remove_distdir)
+
+dist-zip: distdir
+ -rm -f $(distdir).zip
+ zip -rq $(distdir).zip $(distdir)
+ $(am__post_remove_distdir)
+
+dist dist-all:
+ $(MAKE) $(AM_MAKEFLAGS) $(DIST_TARGETS) am__post_remove_distdir='@:'
+ $(am__post_remove_distdir)
+
+# This target untars the dist file and tries a VPATH configuration. Then
+# it guarantees that the distribution is self-contained by making another
+# tarfile.
+distcheck: dist
+ case '$(DIST_ARCHIVES)' in \
+ *.tar.gz*) \
+ GZIP=$(GZIP_ENV) gzip -dc $(distdir).tar.gz | $(am__untar) ;;\
+ *.tar.bz2*) \
+ bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\
+ *.tar.lz*) \
+ lzip -dc $(distdir).tar.lz | $(am__untar) ;;\
+ *.tar.xz*) \
+ xz -dc $(distdir).tar.xz | $(am__untar) ;;\
+ *.tar.Z*) \
+ uncompress -c $(distdir).tar.Z | $(am__untar) ;;\
+ *.shar.gz*) \
+ GZIP=$(GZIP_ENV) gzip -dc $(distdir).shar.gz | unshar ;;\
+ *.zip*) \
+ unzip $(distdir).zip ;;\
+ esac
+ chmod -R a-w $(distdir)
+ chmod u+w $(distdir)
+ mkdir $(distdir)/_build $(distdir)/_build/sub $(distdir)/_inst
+ chmod a-w $(distdir)
+ test -d $(distdir)/_build || exit 0; \
+ dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \
+ && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \
+ && am__cwd=`pwd` \
+ && $(am__cd) $(distdir)/_build/sub \
+ && ../../configure \
+ $(AM_DISTCHECK_CONFIGURE_FLAGS) \
+ $(DISTCHECK_CONFIGURE_FLAGS) \
+ --srcdir=../.. --prefix="$$dc_install_base" \
+ && $(MAKE) $(AM_MAKEFLAGS) \
+ && $(MAKE) $(AM_MAKEFLAGS) dvi \
+ && $(MAKE) $(AM_MAKEFLAGS) check \
+ && $(MAKE) $(AM_MAKEFLAGS) install \
+ && $(MAKE) $(AM_MAKEFLAGS) installcheck \
+ && $(MAKE) $(AM_MAKEFLAGS) uninstall \
+ && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \
+ distuninstallcheck \
+ && chmod -R a-w "$$dc_install_base" \
+ && ({ \
+ (cd ../.. && umask 077 && mkdir "$$dc_destdir") \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \
+ distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \
+ } || { rm -rf "$$dc_destdir"; exit 1; }) \
+ && rm -rf "$$dc_destdir" \
+ && $(MAKE) $(AM_MAKEFLAGS) dist \
+ && rm -rf $(DIST_ARCHIVES) \
+ && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \
+ && cd "$$am__cwd" \
+ || exit 1
+ $(am__post_remove_distdir)
+ @(echo "$(distdir) archives ready for distribution: "; \
+ list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \
+ sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x'
+distuninstallcheck:
+ @test -n '$(distuninstallcheck_dir)' || { \
+ echo 'ERROR: trying to run $@ with an empty' \
+ '$$(distuninstallcheck_dir)' >&2; \
+ exit 1; \
+ }; \
+ $(am__cd) '$(distuninstallcheck_dir)' || { \
+ echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \
+ exit 1; \
+ }; \
+ test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \
+ || { echo "ERROR: files left after uninstall:" ; \
+ if test -n "$(DESTDIR)"; then \
+ echo " (check DESTDIR support)"; \
+ fi ; \
+ $(distuninstallcheck_listfiles) ; \
+ exit 1; } >&2
+distcleancheck: distclean
+ @if test '$(srcdir)' = . ; then \
+ echo "ERROR: distcleancheck can only run from a VPATH build" ; \
+ exit 1 ; \
+ fi
+ @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \
+ || { echo "ERROR: files left in build directory after distclean:" ; \
+ $(distcleancheck_listfiles) ; \
+ exit 1; } >&2
+check-am: all-am
+check: check-recursive
+all-am: Makefile config.h
+installdirs: installdirs-recursive
+installdirs-am:
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f $(am__CONFIG_DISTCLEAN_FILES)
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-hdr distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f $(am__CONFIG_DISTCLEAN_FILES)
+ -rm -rf $(top_srcdir)/autom4te.cache
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) all install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \
+ am--refresh check check-am clean clean-cscope clean-generic \
+ cscope cscopelist-am ctags ctags-am dist dist-all dist-bzip2 \
+ dist-gzip dist-lzip dist-shar dist-tarZ dist-xz dist-zip \
+ distcheck distclean distclean-generic distclean-hdr \
+ distclean-tags distcleancheck distdir distuninstallcheck dvi \
+ dvi-am html html-am info info-am install install-am \
+ install-data install-data-am install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs installdirs-am \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-generic pdf pdf-am ps ps-am tags tags-am uninstall \
+ uninstall-am
+
+.PRECIOUS: Makefile
+
+
+install:
+ mkdir -p bin && cp src/libseqlib.a fermi-lite/libfml.a bwa/libbwa.a htslib/libhts.a bin
+
+seqtools:
+ mkdir -p bin && cd src/seqtools && make && mv seqtools ../../bin
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..d53feaf
--- /dev/null
+++ b/README.md
@@ -0,0 +1,357 @@
+[![Build Status](https://travis-ci.org/walaj/SeqLib.svg?branch=master)](https://travis-ci.org/walaj/SeqLib)
+[![Coverage Status](https://coveralls.io/repos/github/walaj/SeqLib/badge.svg?branch=master)](https://coveralls.io/github/walaj/SeqLib?branch=master)
+
+C++ interface to HTSlib, BWA-MEM and Fermi
+
+**License:** [Apache2][license]
+
+API Documentation
+-----------------
+[API Documentation][htmldoc]
+
+Table of contents
+=================
+
+ * [Installation](#installation)
+ * [Integrating into build system](#integrating-into-build-system)
+ * [Description](#description)
+ * [Memory management](#memory-management)
+ * [Other C++ APIs](#other-c++-apis)
+ * [Command line usage](#command-line-usage)
+ * [Examples](#examples)
+ * [Attributions](#attributions)
+
+Installation
+------------
+
+#######
+```bash
+git clone --recursive https://github.com/walaj/SeqLib.git
+cd SeqLib
+## cd htslib && ./configure --enable-libcurl && cd .. # support for remote (FTP/HTTPS/Google etc) BAM access
+./configure ## or: ./configure LDFLAGS='-lcurl -lcrypto' # for remote support
+make ## for c++11 (req. for AhoCorasick), run as: make CXXFLAGS='-std=c++11'
+make install
+make seqtools ## for the command line version
+```
+
+I have successfully compiled with GCC-4.5+ and Clang on Linux and OSX.
+
+SeqLib is compatible with c++98 and later.
+
+Integrating into build system
+-----------------------------
+
+After building, you will need to add the relevant header directories:
+```bash
+SEQ=<path_to_seqlib_git_repos>
+C_INCLUDE_PATH=$C_INCLUDE_PATH:$SEQ:$SEQ/htslib
+```
+
+And need to link the SeqLib static library and Fermi, BWA and HTSlib libraries
+```bash
+SEQ=<path_to_seqlib>
+LDFLAGS="$LDFLAGS -L$SEQ/bin/libseqlib.a -L$SEQ/bin/libbwa.a -L$SEQ/bin/libfml.a -L$SEQ/bin/libhts.a"
+```
+
+To add support for reading BAMs, etc with HTTPS, FTP, S3, Google cloud, etc, you must compile and link with libcurl.
+```bash
+## set hts to build with libcurl links and hfile_libcurl.c
+cd SeqLib/htslib
+./configure --enable-libcurl
+## compile seqlib with libcurl support
+cd ../ # back to SeqLib main directory
+./configure LDFLAGS="-lcurl -lcrypto"
+make
+make install
+```
+Remember then to then link any projects made with SeqLib with the additional ``-lcurl -lcrypto`` flags.
+
+Description
+-----------
+
+SeqLib is a C++ library for querying BAM/SAM/CRAM files, performing
+BWA-MEM operations in memory, and performing sequence assembly. Core operations
+in SeqLib are peformed by:
+* [HTSlib][htslib]
+* [BWA-MEM][BWA] (Apache2 branch)
+* [FermiKit][fermi]
+
+The primary developer for these three projects is Heng Li.
+
+SeqLib also has support for storing and manipulating genomic intervals via ``GenomicRegion`` and ``GenomicRegionCollection``.
+It uses an [interval tree][int] (provided by Erik Garrison @ekg) to provide for rapid interval queries.
+
+SeqLib is built to be extendable. See [VariantBam][var] for examples of how to take advantage of C++
+class extensions to build off of the SeqLib base functionality.
+
+Memory management
+-----------------
+SeqLib is built to automatically handle memory management of C code from BWA-MEM and HTSlib by using C++ smart
+pointers that handle freeing memory automatically. One of the
+main motivations behind SeqLib is that all access to sequencing reads, BWA, etc should
+completely avoid ``malloc`` and ``free``. In SeqLib all the mallocs/frees are handled automatically in the constructors and
+destructors.
+
+Other C++ APIs
+------------------------------
+There are overlaps between this project and [BamTools][BT] from Derek Barnett, [Gamgee][gam]
+from the Broad Institute, and [SeqAn][seqan] from Freie Universitat Berlin. These projects
+provide excellent and high quality APIs. SeqLib provides further performance enhancement and new capabilites for certain classes of
+bioinformatics problems.
+
+Some differences:
+* SeqLib has ~2-4x faster read/write speed over BamTools and SeqAn, and lower memory footprint.
+* SeqLib has support for CRAM file
+* SeqLib provides in memory access to BWA-MEM, BLAT, a chromosome aware interval tree and range operations, and to read correction and sequence assembly with Fermi. BamTools has more support currently for network access.
+* SeqAn provide a substantial amount of additional capabilites not in SeqLib, including graph operations and a more expanded suite of multi-sequence alignments.
+
+For your particular application, our hope is that SeqLib will provide a comprehensive and powerful envrionment to develop
+bioinformatics tools. Feature requests and comments are welcomed.
+
+Command Line Usage
+------------------
+```bash
+## BFC correction (input mode -m is b (BAM/SAM/CRAM), output mode -w is SAM stream
+samtools view in.bam -h 1:1,000,000-1,002,000 | seqtools bfc - -G $REF | samtools sort - -m 4g -o corrected.bam
+
+## Without a pipe, write to BAM
+seqtools bfc in.bam -G $REF -b > corrected.bam
+
+## Skip realignment, send to fasta
+seqtools bfc in.bam -f > corrected.fasta
+
+## Input as gzipped or plain fasta (or fastq), send to aligned BAM
+seqtools bfc --infasta in.fasta -G $REG -b > corrected.bam
+
+
+##### ASSEMBLY (same patterns as above)
+samtools view in.bam -h 1:1,000,000-1,002,000 | seqtools fml - -G $REF | samtools sort - -m 4g -o assembled.bam
+
+```
+
+Examples
+--------
+##### Targeted re-alignment of reads to a given region with BWA-MEM
+```
+#include "SeqLib/RefGenome.h"
+#include "SeqLib/BWAWrapper.h"
+using namespace SeqLib;
+RefGenome ref;
+ref.LoadIndex("hg19.fasta");
+
+// get sequence at given locus
+std::string seq = ref.QueryRegion("1", 1000000,1001000);
+
+// Make an in-memory BWA-MEM index of region
+BWAWrapper bwa;
+UnalignedSequenceVector usv = {{"chr_reg1", seq}};
+bwa.ConstructIndex(usv);
+
+// align an example string with BWA-MEM
+std::string querySeq = "CAGCCTCACCCAGGAAAGCAGCTGGGGGTCCACTGGGCTCAGGGAAG";
+BamRecordVector results;
+// hardclip=false, secondary score cutoff=0.9, max secondary alignments=10
+bwa.AlignSequence("my_seq", querySeq, results, false, 0.9, 10);
+
+// print results to stdout
+for (auto& i : results)
+ std::cout << i << std::endl;
+```
+
+##### Read a BAM line by line, realign reads with BWA-MEM, write to new BAM
+```
+#include "SeqLib/BamReader.h"
+#include "SeqLib/BWAWrapper.h"
+using namespace SeqLib;
+
+// open the reader BAM/SAM/CRAM
+BamReader bw;
+bw.Open("test.bam");
+
+// open a new interface to BWA-MEM
+BWAWrapper bwa;
+bwa.LoadIndex("hg19.fasta");
+
+// open the output BAM
+BamWriter writer; // or writer(SeqLib::SAM) or writer(SeqLib::CRAM)
+writer.SetWriteHeader(bwa.HeaderFromIndex());
+writer.Open("out.bam");
+writer.WriteHeader();
+
+BamRecord r;
+bool hardclip = false;
+float secondary_cutoff = 0.90; // secondary alignments must have score >= 0.9*top_score
+int secondary_cap = 10; // max number of secondary alignments to return
+while (GetNextRecord(r)) {
+ BamRecordVector results; // alignment results (can have multiple alignments)
+ bwa.AlignSequence(r.Sequence(), r.Qname(), results, hardclip, secondary_cutoff, secondary_cap);
+
+ for (auto& i : results)
+ writer.WriteRecord(i);
+}
+```
+
+
+##### Perform sequence assembly with Fermi directly from a BAM
+```
+
+#include "SeqLib/FermiAssembler.h"
+using namespace SeqLib;
+
+FermiAssembler f;
+
+// read in data from a BAM
+BamReader br;
+br.Open("test_data/small.bam");
+
+// retreive sequencing reads (up to 20,000)
+BamRecord r;
+BamRecordVector brv;
+size_t count = 0;
+while(br.GetNextRead(r) && count++ < 20000)
+ brv.push_back(r);
+
+// add the reads and error correct them
+f.AddReads(brv);
+f.CorrectReads();
+
+// peform the assembly
+f.PerformAssembly();
+
+// retrieve the contigs
+std::vector<std::string> contigs = f.GetContigs();
+
+// write as a fasta to stdout
+for (size_t i = 0; i < contigs.size(); ++i)
+ std::cout << ">contig" << i << std::endl << contigs[i] << std::endl;
+```
+
+##### Plot a collection of gapped alignments
+```
+using namespace SeqLib;
+BamReader r;
+r.Open("test_data/small.bam");
+
+GenomicRegion gr("X:1,002,942-1,003,294", r.Header());
+r.SetRegion(gr);
+
+SeqPlot s;
+s.SetView(gr);
+
+BamRecord rec;
+BamRecordVector brv;
+while(r.GetNextRecord(rec))
+ if (!rec.CountNBases() && rec.MappedFlag())
+ brv.push_back(rec);
+s.SetPadding(20);
+
+std::cout << s.PlotAlignmentRecords(brv);
+```
+
+Trimmed output from above (NA12878):
+```
+CTATCTATCTATCTCTTCTTCTGTCCGTTCATGTGTCTGTCCATCTATCTATCCATCTATCTATCATCTAACTATCTGTCCATCCATCCATCCATCCA
+CTATCTATCTATCTCTTCTTCTGTCCGTTCATGTGTCTGTCCATCTATCTATCCATCTAT CATCCATCCATCCATCCATCCACCCATTCATCCATCCACCTATCCATCTATCAATCCATCCATCCATCCA
+ TATCTATCTATCTCTTCTTCTGTCCGTTCATGTGTCTGTCCATCTATCTATCCATCTATCTATCATCTAACTATCTG----TCCATCCATCCATCCATCCACCCA
+ TATCTATCTATCTCTTCTTCTGTCCGTTCATGTGTCTGTCCATCTATCTATCCATCTATCTATCATCTAACTATCTGTCCATCCATCCATCCATC
+ TATCTATCTATCTCTTCTTCTGTCCGTTCATGTGTCTGTCCATCTATCTATCCATCTATCTATCATCTAACTATCTG----TCCATCCATCCATCCATCCACCCA
+ ATCTATCTATCTCTTCTTCTGTCCGTTCATGTGTCTGTCCATCTATCTATCCATCTATCTATCATCTAACTATCTG----TCCATCCATCCATCCATCCACCCAT
+ TCTATCTATCTCTTCTTCTGTCCGCTCATGTGTCTGTCCATCTATCTATC GTCCATCCATCCATCCATCCATCCATCCACCCATTCATCCATCCACCTATCCATCTATCAATCCATCCATCCATCCATCCGTCTATCTTATGCATCACAGC
+ TCTATCTATCTCTTCTTCTGTCCGTTCATGTGTCTGTCCATCTATCTATCCATCTATCTATCATCTAACTATCTG----TCCATCCATCCATCCATCCACCCATT
+ CTATCTATCTCTTCTTCTGTCCGTTCATGTGTCTGTCCATCTATCTATCCATCTATCTATCATCTAACTATCTG----TCCATCCATCCATCCATCCACCCATTC
+ CTATCTATCTCTTCTTCTGTCCGTTCATGTGTCTGTCCATCTATCTATCCATCTATCTATCATCTAACTATCTG----TCCATCCATCCATCCATCCACCCATTC
+ ATCTATCTCTTCTTCTGTCCGTTCATGTGTCTGTCCATCTATCTATCCATCTATCTATCATCTAACTATCTG----TCCATCCATCCATCCATCCACCCATTCAT
+ ATCTATCTCTTCTTCTGTCCGTTCATGTGTCTGTCCATCTATCTATCCATCTATCTATCATCTAACTATCTG----TCCATCCATCCATCCATCCACCCATTCAT
+```
+
+##### Read simultaneously from a BAM, CRAM and SAM file and send to stdout
+```
+using namespace SeqLib;
+#include "SeqLib/BamReader.h"
+BamReader r;
+
+// read from multiple streams coordinate-sorted order
+r.Open("test_data/small.bam");
+r.Open("test_data/small.cram");
+r.Open("test_data/small.sam");
+
+BamWriter w(SeqLib::SAM); // set uncompressed output
+w.Open("-"); // write to stdout
+w.SetHeader(r.Header()); // specify the header
+w.WriteHeader(); // write out the header
+
+BamRecord rec;
+while(r.GetNextRecord(rec))
+ w.WriteRecord(rec);
+w.Close(); // Optional. Will close on destruction
+```
+
+##### Perform error correction on reads, using [BFC][bfc]
+```
+#include "SeqLib/BFC.h"
+using namespace SeqLib;
+
+// brv is some set of reads to train the error corrector
+b.TrainCorrection(brv);
+// brv2 is some set to correct
+b.ErrorCorrect(brv2);
+
+// retrieve the sequences
+UnalignedSequenceVector v;
+b.GetSequences(v);
+
+// alternatively, to train and correct the same set of reads
+b.TrainAndCorrect(brv);
+b.GetSequences(v);
+
+// alternatively, train and correct, and modify the sequence in-place
+b.TrainCorrection(brv);
+b.ErrorCorrectInPlace(brv);
+```
+
+Support
+-------
+This project is being actively developed and maintained by Jeremiah Wala (jwala at broadinstitute.org).
+
+Attributions
+------------
+We would like to thank Heng Li (htslib/bwa/fermi), Erik Garrison (interval tree), Christopher Gilbert (aho corasick),
+and Mengyao Zhao (sw alignment), for providing open-source and robust bioinformatics solutions.
+
+Development, support, guidance, testing:
+* Steve Huang - Research Scientist, Broad Institute
+* Steve Schumacher - Computational Biologist, Dana Farber Cancer Institute
+* Cheng-Zhong Zhang - Research Scientist, Broad Institute
+* Marcin Imielinski - Assistant Professor, Cornell University
+* Rameen Beroukhim - Assistant Professor, Harvard Medical School
+
+[htslib]: https://github.com/samtools/htslib.git
+
+[SGA]: https://github.com/jts/sga
+
+[BLAT]: https://genome.ucsc.edu/cgi-bin/hgBlat?command=start
+
+[BWA]: https://github.com/lh3/bwa
+
+[license]: https://github.com/walaj/SeqLib/blob/master/LICENSE
+
+[BamTools]: https://raw.githubusercontent.com/wiki/pezmaster31/bamtools/Tutorial_Toolkit_BamTools-1.0.pdf
+
+[API]: http://pezmaster31.github.io/bamtools/annotated.html
+
+[htmldoc]: http://walaj.github.io/seqlibdocs/doxygen
+
+[var]: https://github.com/walaj/VariantBam
+
+[BT]: https://github.com/pezmaster31/bamtools
+
+[seqan]: https://www.seqan.de
+
+[gam]: https://github.com/broadinstitute/gamgee
+
+[int]: https://github.com/ekg/intervaltree.git
+
+[fermi]: https://github.com/lh3/fermi-lite
+
+[bfc]: https://github.com/lh3/bfc
diff --git a/SeqLib/BFC.h b/SeqLib/BFC.h
new file mode 100644
index 0000000..29b59c1
--- /dev/null
+++ b/SeqLib/BFC.h
@@ -0,0 +1,169 @@
+#ifndef SEQLIB_BFC_H
+#define SEQLIB_BFC_H
+
+extern "C" {
+ #include "fermi-lite/bfc.h"
+ #include "fermi-lite/fml.h"
+}
+
+#include "SeqLib/BamRecord.h"
+#include "SeqLib/UnalignedSequence.h"
+
+namespace SeqLib {
+
+/** Class to perform error-correction using BFC algorithm
+ *
+ * BFC is designed and implemented by Heng Li (https://github.com/lh3/bfc).
+ * From Heng: It is a variant of the classical spectrum alignment algorithm introduced
+ * by Pevzner et al (2001). It uses an exhaustive search to find a k-mer path
+ * through a read that minimizeds a heuristic objective function jointly considering
+ * penalities on correction, quality and k-mer support.
+ */
+ class BFC {
+
+ public:
+ /** Construct a new BFC engine */
+ BFC() {
+ bfc_opt_init(&bfc_opt);
+ ch = NULL;
+ kmer = 0;
+ flt_uniq = 0;
+ n_seqs = 0;
+ m_seqs = NULL;
+ kcov = 0;
+ tot_k = 0;
+ sum_k = 0;
+ tot_len = 0;
+ m_seqs_size = 0;
+ }
+
+ ~BFC() {
+ clear();
+ if (ch)
+ bfc_ch_destroy(ch);
+ }
+
+ /** Allocate a block of memory for the reads if the amount to enter is known
+ * @note This is not necessary, as reads will dynamically reallocate
+ */
+ bool AllocateMemory(size_t n);
+
+ /** Peform BFC error correction on the sequences stored in this object */
+ bool ErrorCorrect();
+
+ /** Train the error corrector using the reads stored in this object */
+ bool Train();
+
+ /** Add a sequence for either training or correction */
+ bool AddSequence(const BamRecord& r);
+
+ /** Add a sequence for either training or correction */
+ bool AddSequence(const char* seq, const char* qual, const char* name);
+
+ /** Set the k-mer size */
+ void SetKmer(int k) { kmer = k; }
+
+ /** Train error correction using sequences from aligned reads */
+ void TrainCorrection(const BamRecordVector& brv);
+
+ /** Train error correction from raw character strings */
+ void TrainCorrection(const std::vector<char*>& v);
+
+ /** Train and error correction on same reads */
+ void TrainAndCorrect(const BamRecordVector& brv);
+
+ /** Error correct a collection of reads */
+ void ErrorCorrect(const BamRecordVector& brv);
+
+ /** Error correct in place, modify sequence, and the clear memory from this object */
+ void ErrorCorrectInPlace(BamRecordVector& brv);
+
+ /** Error correct and add tag with the corrected sequence data, and the clear memory from this object
+ * @param brv Aligned reads to error correct
+ * @param tag Tag to assign error corrected sequence to (eg KC)
+ * @exception Throws an invalid_argument if tag is not length 2
+ */
+ void ErrorCorrectToTag(BamRecordVector& brv, const std::string& tag);
+
+ /** Return the reads (error corrected if ran ErrorCorrect) */
+ void GetSequences(UnalignedSequenceVector& v) const;
+
+ /** Clear the stored reads */
+ void clear();
+
+ /** Filter reads with unique k-mers. Do after error correction */
+ void FilterUnique();
+
+ /** Return the calculated kcov */
+ float GetKCov() const { return kcov; }
+
+ /** Return the calculated kcov */
+ int GetKMer() const { return kmer; }
+
+ /** Return the number of sequences controlled by this */
+ int NumSequences() const { return n_seqs; }
+
+ private:
+
+ // the amount of memory allocated
+ size_t m_seqs_size;
+
+ void learn_correct();
+
+ bfc_opt_t bfc_opt;
+
+ // histogram of kmer occurences
+ uint64_t hist[256];
+
+ // diff histogram of kmers??
+ uint64_t hist_high[64];
+
+ uint64_t tot_len;
+
+ uint64_t sum_k; // total valid kmer count (kmers above min_count) ?
+
+ // total number of kmers?
+ uint64_t tot_k;
+
+ //
+ float kcov;
+
+ // reads to correct in place
+ fseq1_t * m_seqs;
+
+ // number of sequeces
+ size_t n_seqs;
+
+ // fermi lite options
+ fml_opt_t fml_opt;
+
+ // vector of names
+ std::vector<char*> m_names;
+
+ // assign names, qualities and seq to m_seqs
+ void allocate_sequences_from_reads(const BamRecordVector& brv);
+
+ // assign names, qualities and seq to m_seqs
+ void allocate_sequences_from_char(const std::vector<char*>& v);
+
+ // do the actual read correction
+ void correct_reads();
+
+ // 0 turns off filter uniq
+ int flt_uniq; // from fml_correct call
+
+ int l_pre;
+
+ // 0 is auto learn
+ int kmer;
+
+ // holds data after learning how to correct
+ bfc_ch_t *ch;
+
+ // holds data for actual error correction
+ ec_step_t es;
+ };
+
+ }
+
+#endif
diff --git a/SeqLib/BWAWrapper.h b/SeqLib/BWAWrapper.h
new file mode 100644
index 0000000..6dbf078
--- /dev/null
+++ b/SeqLib/BWAWrapper.h
@@ -0,0 +1,197 @@
+#ifndef SEQLIB_BWAWRAPPER_H
+#define SEQLIB_BWAWRAPPER_H
+
+#include <string>
+#include <vector>
+#include <iostream>
+#include <algorithm>
+#include <memory>
+
+#include "SeqLib/BamRecord.h"
+#include "SeqLib/BamHeader.h"
+#include "SeqLib/UnalignedSequence.h"
+
+// all of the bwa and kseq stuff is in unaligned sequence
+// best way I had to keep from clashes with klib macros
+
+#define MEM_F_SOFTCLIP 0x200
+
+namespace SeqLib {
+
+/** Calls BWA-MEM on sequence queries and returns aligned reads, all in memory
+ * @note Calls core functions provided by Heng Li in BWA-MEM. https://github.com/lh3/bwa
+ */
+class BWAWrapper {
+
+ public:
+
+ /** Create an empty BWA MEM interface
+ * @note Will initalize a BWA-MEM memopt structure
+ * with the BWA-MEM defaults found in mem_opt_init.
+ * Memory allocation and deallocation is automatically
+ * handled in constructor / destructor.
+ */
+ BWAWrapper() {
+ idx = 0;
+ memopt = mem_opt_init();
+ memopt->flag |= MEM_F_SOFTCLIP;
+ }
+
+ /** Destroy the BWAWrapper (deallocate index and options) */
+ ~BWAWrapper() {
+ if (idx)
+ bwa_idx_destroy(idx);
+ if (memopt)
+ free(memopt);
+ }
+
+ /** Retrieve the sequence name from its numeric ID
+ * @param id Numeric ID of the reference sequence
+ * @exception throws an out_of_bounds if id not found
+ */
+ std::string ChrIDToName(int id) const;
+
+ /** Create a BamHeader from the loaded index files */
+ BamHeader HeaderFromIndex() const;
+
+ /** Perform a BWA-MEM alignment of a single sequnece, and store hits in BamReadVector
+ * @param seq Sequence to be aligned
+ * @param name Name of the sequence to be aligned
+ * @param vec Alignment hits are appended to vec
+ * @param hardclip Should the output BamRecord objects be hardclipped
+ * @param keep_sec_with_frac_of_primary_score Set a threshold for whether a secondary alignment should be output
+ * @param max_secondary Set a hard-limit on the number of secondary hits that will be reported
+ */
+ void AlignSequence(const std::string& seq, const std::string& name, BamRecordVector& vec, bool hardclip,
+ double keep_sec_with_frac_of_primary_score, int max_secondary) const;
+
+ /** Construct a new bwa index for this object.
+ * @param v vector of references to input (e.g. v = {{"r1", "AT"}};)
+ *
+ * Throw an invalid_argument exception if any of the names or sequences
+ * of the input UnalignedSequenceVector is empty
+ */
+ void ConstructIndex(const UnalignedSequenceVector& v);
+
+ /** Retrieve a bwa index object from disk
+ * @param file path a to an index fasta (index with bwa index)
+ * @return True if successful
+ * @note Will delete the old index if already stored
+ */
+ bool LoadIndex(const std::string& file);
+
+ /** Dump the stored index to files
+ * @note This does not write the fasta itself
+ * @param index_name Write index files (*.sai, *.pac, *.ann, *.bwt, *.amb)
+ * @return True if able to write index
+ */
+ bool WriteIndex(const std::string& index_name) const;
+
+ /** Return the raw index in bwaidx_t form */
+ bwaidx_t* GetIndex() const { return idx; }
+
+ /** Return the number of reference sequences in current index
+ * @return Number of reference sequences, or 0 if uninitialized
+ */
+ int NumSequences() const;
+
+ /** Print some basic information about the loaded index */
+ friend std::ostream& operator<<(std::ostream& out, const BWAWrapper& b);
+
+ /** Set the gap open penalty
+ * @param gap_open Gap open penalty. Default 6.
+ * @exception Throws invalid_argument if gap_open < 0
+ */
+ void SetGapOpen(int gap_open);
+
+ /** Set the gap open penalty
+ * @param gap_ext Gap extension penalty. Default 1
+ * @exception Throws invalid_argument if gap_ext < 0
+ */
+ void SetGapExtension(int gap_ext);
+
+ /** Set the mismatch penalty
+ * @param m Mismatch penalty (BWA-MEM b). Default 4
+ * @exception Throws invalid_argument if m < 0
+ */
+ void SetMismatchPenalty(int m);
+
+ /** Set the reseed trigger
+ * @param r See BWA-MEM -r. Default 1.5
+ * @exception Throws invalid_argument if r < 0
+ */
+ void SetReseedTrigger(float r);
+
+ /** Set the SW alignment bandwidth
+ * @param w See BWA-MEM -w. Default 100
+ * @exception Throws invalid_argument if w < 0
+ */
+ void SetBandwidth(int w);
+
+ /** Set the SW alignment Z dropoff
+ * @param z See BWA-MEM -d. Default 100
+ * @exception Throws invalid_argument if z < 0
+ */
+ void SetZDropoff(int z);
+
+ /** Set the 3-prime clipping penalty
+ * @param p See BWA-MEM -L.
+ * @exception Throws invalid_argument if p < 0
+ */
+ void Set3primeClippingPenalty(int p);
+
+ /** Set the 5-prime clipping penalty
+ * @param p See BWA-MEM -L.
+ * @exception Throws invalid_argument if p < 0
+ */
+ void Set5primeClippingPenalty(int p);
+
+ /** Set the match score. Scales -TdBOELU
+ * @note Since this scales penalty options, it should be
+ * probably be specified first, and then other options
+ * (eg gap penalty) can be Set explicitly afterwards.
+ * @param a See BWA-MEM -A
+ * @exception Throws invalid_argument if a < 0
+ */
+ void SetAScore(int a);
+
+ /** Check if the index is empty */
+ bool IsEmpty() const { return !idx; }
+
+ private:
+
+ // Construct a bam_hdr_t from a header string
+ bam_hdr_t* sam_hdr_read2(const std::string& hdr) const;
+
+ // Store the options in memory
+ mem_opt_t * memopt;
+
+ // hold the full index structure
+ bwaidx_t* idx;
+
+ // Convert a bns to a header string
+ std::string bwa_print_sam_hdr2(const bntseq_t *bns, const char *hdr_line) const;
+
+ // overwrite the bwa bwt_pac2pwt function
+ bwt_t *seqlib_bwt_pac2bwt(const uint8_t *pac, int bwt_seq_lenr);
+
+ // add an anns (chr annotation structure)
+ bntann1_t* seqlib_add_to_anns(const std::string& name, const std::string& seq, bntann1_t * ann, size_t offset);
+
+ // overwrite the bwa-mem add1 function, which takes a sequence and adds to pac
+ uint8_t* seqlib_add1(const kseq_t *seq, bntseq_t *bns, uint8_t *pac, int64_t *m_pac, int *m_seqs, int *m_holes, bntamb1_t **q);
+
+ // make the pac structure (2-bit encoded packed sequence)
+ uint8_t* seqlib_make_pac(const UnalignedSequenceVector& v, bool for_only);
+
+ // write pac part of the index
+ void seqlib_write_pac_to_file(const std::string& file) const;
+
+ // write the bns file of the index
+ std::string print_bns();
+};
+
+}
+
+
+#endif
diff --git a/SeqLib/BamHeader.h b/SeqLib/BamHeader.h
new file mode 100644
index 0000000..12a2281
--- /dev/null
+++ b/SeqLib/BamHeader.h
@@ -0,0 +1,130 @@
+#ifndef SEQLIB_BAM_HEADER_H__
+#define SEQLIB_BAM_HEADER_H__
+
+#include "htslib/htslib/hts.h"
+#include "htslib/htslib/sam.h"
+#include "htslib/htslib/bgzf.h"
+#include "htslib/htslib/kstring.h"
+
+#include "SeqLib/SeqLibUtils.h"
+#include <string>
+#include <vector>
+
+namespace SeqLib {
+
+ /** Store a reference chromosome and its length
+ * @note This parallels the data found in SQ tag of BAM header
+ */
+ struct HeaderSequence {
+
+ /** Make a new header sequence
+ * @param n Name of the chromosome
+ * @param l Length of the chromosome
+ */
+ HeaderSequence(const std::string& n, uint32_t l) : Name(n), Length(l) {}
+
+ std::string Name; ///< Name of the sequence (eg "1")
+ uint32_t Length; ///< Length of the sequence (eg LN:191469)
+ };
+
+ typedef std::vector<HeaderSequence> HeaderSequenceVector;
+
+ /** Store a header to a BAM file
+ *
+ * Stores a BAM header, which also acts as a dictionary of
+ * reference sequences, with names and lengths.
+ */
+ class BamHeader {
+
+ public:
+
+ /** Initializes a new empty BamHeader with no data
+ *
+ * @note No memory is allocated here
+ */
+ BamHeader() {};
+
+ /** Construct a new header from ref sequences and lengths
+ *
+ */
+ BamHeader(const HeaderSequenceVector& hsv);
+
+ /** Initialize a BamHeader from a string containing
+ * a BAM header in human-readable form (e.g. PG ... )
+ * @param hdr Text of a BAM header, with newlines separating lines
+ */
+ BamHeader(const std::string& hdr);
+
+ /** Create a new BamHeader from a raw HTSlib header.
+ *
+ * @note This will make a copy of the input header
+ */
+ BamHeader(const bam_hdr_t * hdr);
+
+ /** Return the number of sequences store in this dictionary
+ * Returns 0 if header is unitialized.
+ */
+ int NumSequences() const;
+
+ /** Return the length of the sequence */
+ int GetSequenceLength(int id) const;
+
+ /** Return the length of the sequence */
+ int GetSequenceLength(const std::string& id) const;
+
+ /** Return if the header has been opened */
+ bool IsOpen() const { return h.get() != NULL; }
+
+ /** Return the full text of the header */
+ std::string AsString() const;
+
+ /** Convert a numeric sequence ID to a name
+ *
+ * @exception Throws an out_of_range if ID is >= then number of
+ * targets in dictionary, or if header is uninitialized..
+ * @exception Throws an invalid_argument if ID is < 0;
+ */
+ std::string IDtoName(int id) const;
+
+ /** Check if the header has been initialized
+ */
+ bool isEmpty() const { return h.get() == NULL; }
+
+ /** Return the raw bam_hdr_t */
+ const bam_hdr_t* get() const { return h.get(); }
+
+ /** Return the raw bam_hdr_t */
+ bam_hdr_t* get_() const { return h.get(); }
+
+ /** Get the numeric ID associated with a sequence name.
+ * @param name Name of the sequence
+ * @return ID of named sequence, or -1 if not in dictionary
+ */
+ int Name2ID(const std::string& name) const;
+
+ /** Return the reference sequences as vector of HeaderSequence objects */
+ HeaderSequenceVector GetHeaderSequenceVector() const;
+
+ private:
+
+ // adapted from sam.c - bam_nam2id
+ int bam_name2id_2(const bam_hdr_t *h, const char *ref) const;
+
+ SeqPointer<bam_hdr_t> h;
+
+ // make the name 2 id map (to be used by Name2ID)
+ // replaces part of bam_name2id that makes the hash table
+ void ConstructName2IDTable();
+
+ // hash table for name to id
+ SeqPointer<SeqHashMap<std::string, int> > n2i;
+
+ // adapted from sam_hdr_read
+ bam_hdr_t* sam_hdr_read2(const std::string& hdr) const;
+
+ };
+
+}
+
+
+#endif
diff --git a/SeqLib/BamReader.h b/SeqLib/BamReader.h
new file mode 100644
index 0000000..d0f3aa0
--- /dev/null
+++ b/SeqLib/BamReader.h
@@ -0,0 +1,279 @@
+#ifndef SEQLIB_BAM_POLYREADER_H
+#define SEQLIB_BAM_POLYREADER_H
+
+#include <cassert>
+#include "SeqLib/ReadFilter.h"
+#include "SeqLib/BamWalker.h"
+
+// forward declare this from hts.c
+extern "C" {
+int hts_useek(htsFile *file, long uoffset, int where);
+}
+
+class BamReader;
+
+namespace SeqLib {
+
+ typedef SeqPointer<hts_idx_t> SharedIndex; ///< Shared pointer to the HTSlib index struct
+
+ typedef SeqPointer<htsFile> SharedHTSFile; ///< Shared pointer to the HTSlib file pointer
+
+ // store file accessors for single BAM
+ class _Bam {
+
+ friend class BamReader;
+
+ public:
+
+ _Bam(const std::string& m) : m_region_idx(0), m_in(m), empty(true), mark_for_closure(false) {}
+
+ _Bam() : m_region_idx(0), empty(true), mark_for_closure(false) {}
+
+ ~_Bam() {}
+
+ std::string GetFileName() const { return m_in; }
+
+ // point index to this region of bam
+ bool SetRegion(const GenomicRegion& gp);
+
+ // which region are we on
+ size_t m_region_idx;
+
+ private:
+
+ // do the read loading
+ bool load_read(BamRecord& r);
+
+ void reset() {
+ empty = true;
+ mark_for_closure = false;
+ m_region_idx = 0;
+ }
+
+ // close this bam
+ bool close() {
+ if (!fp)
+ return false;
+ fp.reset();
+ idx.reset();
+ hts_itr.reset();
+
+ //fp = nullptr; // calls destructor actually
+ //idx = nullptr;
+ //hts_itr = nullptr;
+
+ empty = true;
+ mark_for_closure = false;
+ m_region_idx = 0;
+
+ return true;
+ }
+
+ // set a pre-loaded index (save on loading each time)
+ //void set_index(SharedIndex& i) { idx = i; }
+
+ // set a pre-loaded htsfile (save on loading each time)
+ //void set_file(SharedHTSFile& i) { fp = i; }
+
+ // set a pre-loaded index and make a deep copy
+ //void deep_set_index();
+
+ GRC* m_region; // local copy of region
+
+ SharedHTSFile fp; // BAM file pointer
+ SharedIndex idx; // bam index
+ SeqPointer<hts_itr_t> hts_itr; // iterator to index location
+ std::string m_in; // file name
+ BamHeader m_hdr; // the BAM header
+
+ // the next read "slotted" for this BAM
+ BamRecord next_read;
+
+ // the next read "slot" is empty
+ bool empty;
+
+ // if set to true, then won't even attempt to lookup read
+ bool mark_for_closure;
+
+ // open the file pointer
+ bool open_BAM_for_reading();
+
+ // hold the reference for CRAM reading
+ std::string m_cram_reference;
+
+ };
+
+ typedef SeqHashMap<std::string, _Bam> _BamMap;
+
+/** Stream in reads from multiple BAM/SAM/CRAM or stdin */
+class BamReader {
+
+ public:
+
+ /** Construct an empty BamReader */
+ BamReader();
+
+ /** Destroy a BamReader and close all connections to the BAMs
+ *
+ * Calling the destructor will take care of all of the C-style dealloc
+ * calls required within HTSlib to close a BAM or SAM file.
+ */
+ ~BamReader() { }
+
+ /** Explicitly set a reference genome to be used to decode CRAM file.
+ * If no reference is specified, will automatically load from
+ * file pointed to in CRAM header using the SQ tags.
+ * @note This function is useful if the reference path pointed
+ * to by the UR field of SQ is not on your system, and you would
+ * like to explicitly provide one.
+ * @param ref Path to an index reference genome
+ */
+ void SetCramReference(const std::string& ref);
+
+ /** Set a part of the BAM to walk.
+ *
+ * This will set the BAM pointer to the given region.
+ * @note This clears all other regions and resets the index
+ * pointer to this location
+ * @param gp Location to point the BAM to
+ * @return true if the region is found in the index
+ */
+ bool SetRegion(const GenomicRegion& gp);
+
+ /** Set up multiple regions. Overwrites current regions.
+ *
+ * This will set the BAM pointer to the first element of the
+ * input list.
+ * @note This clears all other regions and resets the index
+ * pointer to the first element of grc
+ * @param grc Set of location to point BAM to
+ * @return true if the regions are found in the index
+ */
+ bool SetMultipleRegions(const GRC& grc);
+
+ /** Return if the reader has opened the first file */
+ bool IsOpen() const { if (m_bams.size()) return m_bams.begin()->second.fp.get() != NULL; return false; }
+
+ /*
+ Set pre-loaded raw htslib index
+ Provide the reader with an index structure that is already loaded.
+ This is useful if there are multiple newly created BamReader objects
+ that use the same index (e.g. make a BAM index in a loop)
+ @note This does not make a copy, so ops on this index are shared with
+ every other object that controls it.
+ @param i Pointer to an HTSlib index
+ @param f Name of the file to set index for
+ @return True if the file f is controlled by this object
+ bool SetPreloadedIndex(const std::string& f, SharedIndex& i);
+
+ Return a shared pointer to the raw htsFile object
+ @exception Throws runtime_error if the requested file has not been opened already with Open
+ @param f File to retrieve the htsFile from.
+
+ SharedHTSFile GetHTSFile (const std::string& f) const;
+
+ Return a shared pointer to the raw htsFile object from the first BAM
+ @exception Throws runtime_error if no files have been opened already with Open
+ @param f File to retrieve the htsFile from.
+ SharedHTSFile GetHTSFile () const;
+
+ Set a pre-loaded raw index, to the first BAM
+ @note see SetPreloadedIndex(const std::string& f, SharedIndex& i)
+ bool SetPreloadedIndex(SharedIndex& i);
+ */
+
+ /** Return if the reader has opened the file
+ * @param f Name of file to check
+ */
+ bool IsOpen(const std::string& f) const {
+ SeqHashMap<std::string, _Bam>::const_iterator ff = m_bams.find(f);
+ if (ff == m_bams.end())
+ return false;
+ return ff->second.fp.get() != NULL;
+ }
+
+ /** Close all of the BAMs */
+ bool Close();
+
+ /** Close a particular BAM/CRAM/SAM
+ * @param f Particular file to close
+ * @return True if BAM is found and is closable (eg no already closed)
+ */
+ bool Close(const std::string& f);
+
+ /** Reset the given BAM/SAM/CRAM to the begining, but keep the loaded indicies and file-pointers
+ * @param f Name of file to reset
+ * @return Returns false if this BAM is not found in object
+ * @note Unlike Reset(), this version will NOT reset the regions, since other BAMs may still be
+ * using them.
+ */
+ bool Reset(const std::string& f);
+
+ /** Return a vector of all of the BAM/SAM/CRAMs in this reader */
+ std::vector<std::string> ListFiles() const {
+ std::vector<std::string> out;
+ for (_BamMap::const_iterator i = m_bams.begin(); i != m_bams.end(); ++i)
+ out.push_back(i->first);
+ return out;
+ }
+
+ /** Create a string representation of
+ * all of the regions to walk
+ */
+ std::string PrintRegions() const;
+
+ /** Print out some basic info about this reader */
+ friend std::ostream& operator<<(std::ostream& out, const BamReader& b);
+
+ /** Open a BAM/SAM/CRAM/STDIN file for streaming in
+ * @param bam Path to a SAM/CRAM/BAM file, or "-" for stdin
+ * @return True if open was successful
+ */
+ bool Open(const std::string& bam);
+
+ /** Open a set of BAM/SAM/CRAM/STDIN files for streaming in
+ * @param bams Path to a vector fo SAM/CRAM/BAM files, or "-" for stdin
+ * @return True if open was successful
+ */
+ bool Open(const std::vector<std::string>& bams);
+
+ /** Retrieve the next read from the available input streams.
+ * @note Will chose the read with the lowest left-alignment position
+ * from the available streams.
+ * @param r Read to fill with data
+ * @return true if the next read is available
+ */
+ bool GetNextRecord(BamRecord &r);
+
+ /** Reset all the regions, but keep the loaded indicies and file-pointers */
+ void Reset();
+
+ /** Return a copy of the header to the first file
+ * @note The object returned is a copy of the BamHeader, but
+ * this does not actually copy the actual header contents. Header contents
+ * are stored in a shared_ptr, and so the new returned BamHeader
+ * have a copy of the shared_ptr that will point to the originally alloced
+ * raw header data.
+ */
+ BamHeader Header() const;
+
+ /** Return a concatenation of all the headers */
+ std::string HeaderConcat() const;
+
+ protected:
+
+ GRC m_region; ///< Regions to access
+
+ _BamMap m_bams; ///< store the htslib file pointers etc to BAM files
+
+ private:
+ // hold the reference for CRAM reading
+ std::string m_cram_reference;
+
+};
+
+
+}
+#endif
+
+
diff --git a/SeqLib/BamRecord.h b/SeqLib/BamRecord.h
new file mode 100644
index 0000000..8e0bccc
--- /dev/null
+++ b/SeqLib/BamRecord.h
@@ -0,0 +1,863 @@
+#ifndef SEQLIB_BAM_RECORD_H
+#define SEQLIB_BAM_RECORD_H
+
+#include <stdint.h>
+//#include <cstdint> //+11
+#include <vector>
+#include <iostream>
+#include <sstream>
+#include <cassert>
+#include <algorithm>
+
+extern "C" {
+#include "htslib/htslib/hts.h"
+#include "htslib/htslib/sam.h"
+#include "htslib/htslib/bgzf.h"
+#include "htslib/htslib/kstring.h"
+#include "htslib/htslib/faidx.h"
+
+}
+
+#include "SeqLib/SeqLibUtils.h"
+#include "SeqLib/GenomicRegion.h"
+#include "SeqLib/UnalignedSequence.h"
+
+static const char BASES[16] = {' ', 'A', 'C', ' ',
+ 'G', ' ', ' ', ' ',
+ 'T', ' ', ' ', ' ',
+ ' ', ' ', ' ', 'N'};
+
+static std::string cigar_delimiters = "MIDNSHPX";
+
+static const uint8_t CIGTAB[255] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+#define FRORIENTATION 0
+#define FFORIENTATION 1
+#define RFORIENTATION 2
+#define RRORIENTATION 3
+#define UDORIENTATION 4
+
+namespace SeqLib {
+
+/** Basic container for a single cigar operation
+ *
+ * Stores a single cigar element in a compact 32bit form (same as HTSlib).
+ */
+class CigarField {
+
+ friend class Cigar;
+
+ public:
+
+ /** Construct the cigar op by type (MIDNSHP=X) and length
+ * @param t Cigar op (MIDNSHP=X)
+ * @param l Cigar length
+ * @exception Throws an invalid_argument if l <= 0 or invalid cigar op
+ */
+ CigarField(char t, uint32_t l);
+
+ /** Construct the cigar op from the raw sam.h uint32_t (first 4 bits op, last 28 len) */
+ CigarField(uint32_t f) : data(f) {}
+
+ /** Return the raw sam.h uint8_t cigar data */
+ inline uint32_t raw() const { return data; }
+
+ /** Print the cigar field (eg 35M) */
+ friend std::ostream& operator<<(std::ostream& out, const CigarField& c);
+
+ /** Return the cigar op type (one of MIDNSHPX) as a char */
+ inline char Type() const { return bam_cigar_opchr(data); }
+
+ /** Return the raw sam.h uint8_t cigar type (bam_cigar_op(data)) */
+ inline uint8_t RawType() const { return bam_cigar_op(data); }
+
+ /** Return the length of the cigar op (eg 35M returns 35) */
+ inline uint32_t Length() const { return bam_cigar_oplen(data); }
+
+ /** Returns true if cigar op matches bases on the reference (MDN=X) */
+ inline bool ConsumesReference() const { return bam_cigar_type(bam_cigar_op(data))&2; }
+
+ /** Returuns true cigar op matches bases on the query (MIS=X) */
+ inline bool ConsumesQuery() const { return bam_cigar_type(bam_cigar_op(data))&1; }
+
+ /** Return whether two CigarField objects have same op and len */
+ inline bool operator==(const CigarField& c) const { return c.Type() == Type() && c.Length() == Length(); }
+
+ /** Return whether two CigarField objects have different op and/or len */
+ inline bool operator!=(const CigarField& c) const { return !(c == *this); }
+
+ private:
+
+ // first 4 bits hold op, last 28 hold len
+ uint32_t data;
+
+};
+
+/** CIGAR for a single gapped alignment
+ *
+ * Constructed as a vector of CigarField objects.
+ */
+ class Cigar {
+
+ public:
+
+ typedef std::vector<CigarField>::iterator iterator; ///< Iterator for move between CigarField ops
+ typedef std::vector<CigarField>::const_iterator const_iterator; ///< Iterator (const) for move between CigarField ops
+ iterator begin() { return m_data.begin(); } ///< Iterator (aka std::vector<CigarField>.begin()
+ iterator end() { return m_data.end(); } ///< Iterator (aka std::vector<CigarField>.end()
+ const_iterator begin() const { return m_data.begin(); } ///< Iterator (aka std::vector<CigarField>.begin()
+ const_iterator end() const { return m_data.end(); } ///< Iterator (aka std::vector<CigarField>.end()
+
+ /** Const reference to last cigar op */
+ inline const CigarField& back() const { return m_data.back(); }
+
+ /** Reference to last cigar op */
+ inline CigarField& back() { return m_data.back(); }
+
+ /** Const reference to first cigar op */
+ inline const CigarField& front() const { return m_data.front(); }
+
+ /** Reference to first cigar op */
+ inline CigarField& front() { return m_data.front(); }
+
+ /** Returns the number of cigar ops */
+ inline size_t size() const { return m_data.size(); }
+
+ /** Returns the i'th cigar op */
+ inline CigarField& operator[](size_t i) { return m_data[i]; }
+
+ /** Returns the i'th cigar op (const) */
+ const CigarField& operator[](size_t i) const { return m_data[i]; }
+
+ /** Return the sum of all of the lengths for all kinds */
+ inline int TotalLength() const {
+ int t = 0;
+ for (Cigar::const_iterator c = m_data.begin(); c != m_data.end(); ++c)
+ //for (auto& c : m_data)
+ t += c->Length();
+ return t;
+ }
+
+ /** Return the number of query-consumed bases */
+ inline int NumQueryConsumed() const {
+ int out = 0;
+ for (Cigar::const_iterator c = m_data.begin(); c != m_data.end(); ++c)
+ if (c->ConsumesQuery())
+ out += c->Length();
+ return out;
+ }
+
+ /** Return the number of reference-consumed bases */
+ inline int NumReferenceConsumed() const {
+ int out = 0;
+ // for (auto& c : m_data)
+ for (Cigar::const_iterator c = m_data.begin(); c != m_data.end(); ++c)
+ if (c->ConsumesReference())
+ out += c->Length();
+ return out;
+ }
+
+ /** Add a new cigar op */
+ inline void add(const CigarField& c) {
+ m_data.push_back(c);
+ }
+
+ /** Return whether two Cigar objects are equivalent */
+ bool operator==(const Cigar& c) const;
+
+ /** Return whether two Cigar objects are not equivalent */
+ bool operator!=(const Cigar& c) const { return !(c == *this); }
+
+ /** Print cigar string (eg 35M25S) */
+ friend std::ostream& operator<<(std::ostream& out, const Cigar& c);
+
+
+ private:
+
+ std::vector<CigarField> m_data; // should make this simpler
+
+ };
+
+ //typedef std::vector<CigarField> Cigar;
+ typedef SeqHashMap<std::string, size_t> CigarMap;
+
+ Cigar cigarFromString(const std::string& cig);
+
+/** Class to store and interact with a SAM alignment record
+ *
+ * HTSLibrary reads are stored in the bam1_t struct. Memory allocation
+ * is taken care of by bam1_t init, and deallocation by destroy_bam1. This
+ * class is a C++ interface that automatically takes care of memory management
+ * for these C allocs/deallocs. The only member of BamRecord is a bam1_t object.
+ * Alloc/dealloc is taken care of by the constructor and destructor.
+ */
+class BamRecord {
+
+ friend class BLATWraper;
+ friend class BWAWrapper;
+
+ public:
+
+ /** Construct a BamRecord manually from a name, sequence, cigar and location
+ * @param name Name of the read
+ * @param seq Sequence of the read (compsed of ACTG or N).
+ * @param gr Location of the alignment
+ * @param cig Cigar alignment
+ * @exception Throws an invalid_argument exception if length of seq is not commensurate
+ * with number of query-bases consumed in cigar.
+ * @exception Throws an invalid_argument exception if width of gr is not commensurate
+ * with number of reference-bases consumed in cigar.
+ */
+ BamRecord(const std::string& name, const std::string& seq, const GenomicRegion * gr, const Cigar& cig);
+
+ /** Construct an empty BamRecord by calling bam_init1()
+ */
+ void init();
+
+ /** Check if a read is empty (not initialized)
+ * @return true if read was not initialized with any values
+ */
+ bool isEmpty() const { return !b; }
+
+ /** Explicitly pass a bam1_t to the BamRecord.
+ *
+ * The BamRecord now controls the memory, and will delete at destruction
+ * @param a An allocated bam1_t
+ */
+ void assign(bam1_t* a);
+
+ /** Make a BamRecord with no memory allocated and a null header */
+ BamRecord() {}
+
+ /** BamRecord is aligned on reverse strand */
+ inline bool ReverseFlag() const { return b ? ((b->core.flag&BAM_FREVERSE) != 0) : false; }
+
+ /** BamRecord has mate aligned on reverse strand */
+ inline bool MateReverseFlag() const { return b ? ((b->core.flag&BAM_FMREVERSE) != 0) : false; }
+
+ /** BamRecord has is an interchromosomal alignment */
+ inline bool Interchromosomal() const { return b ? b->core.tid != b->core.mtid && PairMappedFlag() : false; }
+
+ /** BamRecord is a duplicate */
+ inline bool DuplicateFlag() const { return b ? ((b->core.flag&BAM_FDUP) != 0) : false; }
+
+ /** BamRecord is a secondary alignment */
+ inline bool SecondaryFlag() const { return b ? ((b->core.flag&BAM_FSECONDARY) != 0) : false; }
+
+ /** BamRecord is paired */
+ inline bool PairedFlag() const { return b ? ((b->core.flag&BAM_FPAIRED) != 0) : false; }
+
+ /** Get the relative pair orientations
+ *
+ * 0 - FR (RFORIENTATION) (lower pos read is Fwd strand, higher is reverse)
+ * 1 - FF (FFORIENTATION)
+ * 2 - RF (RFORIENTATION)
+ * 3 - RR (RRORIENTATION)
+ * 4 - Undefined (UDORIENTATION) (unpaired or one/both is unmapped)
+ */
+ inline int PairOrientation() const {
+ if (!PairMappedFlag())
+ return UDORIENTATION;
+ else if ( (!ReverseFlag() && Position() <= MatePosition() && MateReverseFlag() ) || // read 1
+ (ReverseFlag() && Position() >= MatePosition() && !MateReverseFlag() ) ) // read 2
+ return FRORIENTATION;
+ else if (!ReverseFlag() && !MateReverseFlag())
+ return FFORIENTATION;
+ else if (ReverseFlag() && MateReverseFlag())
+ return RRORIENTATION;
+ else if ( ( ReverseFlag() && Position() < MatePosition() && !MateReverseFlag()) ||
+ (!ReverseFlag() && Position() > MatePosition() && MateReverseFlag()))
+ return RFORIENTATION;
+ assert(false);
+ }
+
+ /** BamRecord is failed QC */
+ inline bool QCFailFlag() const { return b ? ((b->core.flag&BAM_FQCFAIL) != 0) : false; }
+
+ /** BamRecord is mapped */
+ inline bool MappedFlag() const { return b ? ((b->core.flag&BAM_FUNMAP) == 0) : false; }
+
+ /** BamRecord mate is mapped */
+ inline bool MateMappedFlag() const { return b ? ((b->core.flag&BAM_FMUNMAP) == 0) : false; }
+
+ /** BamRecord is mapped and mate is mapped and in pair */
+ inline bool PairMappedFlag() const { return b ? (!(b->core.flag&BAM_FMUNMAP) && !(b->core.flag&BAM_FUNMAP) && (b->core.flag&BAM_FPAIRED) ) : false; }
+
+ /** BamRecord is mapped in proper pair */
+ inline bool ProperPair() const { return b ? (b->core.flag&BAM_FPROPER_PAIR) : false;}
+
+ /** BamRecord has proper orientation (FR) */
+ inline bool ProperOrientation() const {
+ if (!b)
+ return false;
+
+ // mate on diff chrom gets false
+ if (b->core.tid != b->core.mtid)
+ return false;
+
+ // if FR return true
+ if (b->core.pos < b->core.mpos) {
+ return (b->core.flag&BAM_FREVERSE) == 0 && (b->core.flag&BAM_FMREVERSE) != 0 ? true : false;
+ } else {
+ return (b->core.flag&BAM_FREVERSE) == 0 && (b->core.flag&BAM_FMREVERSE) != 0 ? false : true;
+ }
+
+ }
+
+ /** Count the total number of N bases in this sequence */
+ int32_t CountNBases() const;
+
+ /** Trim the sequence down by removing bases from ends with low quality scores. Stores the
+ * trimmed sequence in the GV tag, but does not affect any other part of read.
+ * @param qualTrim Minimal quality score, zero-based (eg # == 2)
+ * @param startpoint Returns the new starting point for the sequence
+ * @param endpoint Return the new ending point for the sequence
+ */
+ void QualityTrimmedSequence(int32_t qualTrim, int32_t& startpoint, int32_t& endpoint) const;
+
+ /** Retrieve the quality trimmed seqeuence from QT tag if made. Otherwise return normal seq */
+ std::string QualitySequence() const;
+
+ /** Get the alignment position */
+ inline int32_t Position() const { return b ? b->core.pos : -1; }
+
+ /** Get the alignment position of mate */
+ inline int32_t MatePosition() const { return b ? b->core.mpos: -1; }
+
+ /** Count the number of secondary alignments by looking at XA tag.
+ * @note A secondary alignment is an alternative mapping. This may not
+ * work for non-BWA aligners that may not place the XA tag.
+ */
+ int32_t CountBWASecondaryAlignments() const;
+
+ /** Count the number of chimeric alignments by looking at XP and SA tags
+ * @note A secondary alignment is an alternative mapping. This may not
+ * work for non-BWA aligners that may not place the XP/SA tags. BWA-MEM
+ * used the XP tag prior to v0.7.5, and SA aftewards.
+ */
+ int32_t CountBWAChimericAlignments() const;
+
+ /** Get the end of the alignment */
+ inline int32_t PositionEnd() const { return b ? bam_endpos(b.get()) : -1; }
+
+ /** Get the chromosome ID of the read */
+ inline int32_t ChrID() const { return b ? b->core.tid : -1; }
+
+ /** Get the chrosome ID of the mate read */
+ inline int32_t MateChrID() const { return b ? b->core.mtid : -1; }
+
+ /** Get the mapping quality */
+ inline int32_t MapQuality() const { return b ? b->core.qual : -1; }
+
+ /** Set the mapping quality */
+ inline void SetMapQuality(int32_t m) { if (b) b->core.qual = m; }
+
+ /** Set the chr id */
+ inline void SetChrID(int32_t i) { b->core.tid = i; }
+
+ /** Set the chr id of mate */
+ inline void SetChrIDMate(int32_t i) { b->core.mtid = i; }
+
+ /** Set the position of the mate read */
+ inline void SetPositionMate(int32_t i) { b->core.mpos = i; }
+
+ /** Set the pair mapped flag on */
+ inline void SetPairMappedFlag() { b->core.flag |= BAM_FPAIRED; }
+
+ /** Set the mate reverse flag on */
+ inline void SetMateReverseFlag() { b->core.flag |= BAM_FMREVERSE; }
+
+ /** Get the number of cigar fields */
+ inline int32_t CigarSize() const { return b ? b->core.n_cigar : -1; }
+
+ /** Check if this read is first in pair */
+ inline bool FirstFlag() const { return (b->core.flag&BAM_FREAD1); }
+
+ /** Get the qname of this read as a string */
+ inline std::string Qname() const { return std::string(bam_get_qname(b)); }
+
+ /** Get the qname of this read as a char array */
+ inline char* QnameChar() const { return bam_get_qname(b); }
+
+ /** Get the full alignment flag for this read */
+ inline uint32_t AlignmentFlag() const { return b->core.flag; }
+
+ /** Get the insert size for this read */
+ inline int32_t InsertSize() const { return b->core.isize; }
+
+ /** Get the read group, first from qname, then by RG tag
+ * @return empty string if no readgroup found
+ */
+ inline std::string ParseReadGroup() const {
+
+ // try to get from RG tag first
+ std::string RG = GetZTag("RG");
+ if (!RG.empty())
+ return RG;
+
+ // try to get the read group tag from qname second
+ std::string qn = Qname();
+ size_t posr = qn.find(":", 0);
+ return (posr != std::string::npos) ? qn.substr(0, posr) : "NA";
+ }
+
+ /** Get the insert size, absolute value, and always taking into account read length */
+ inline int32_t FullInsertSize() const {
+
+ if (b->core.tid != b->core.mtid || !PairMappedFlag())
+ return 0;
+
+ return std::abs(b->core.pos - b->core.mpos) + Length();
+
+ }
+
+ /** Get the number of query bases of this read (aka length) */
+ inline int32_t Length() const { return b->core.l_qseq; }
+
+ /** Append a tag with new value, delimited by 'x' */
+ void SmartAddTag(const std::string& tag, const std::string& val);
+
+ /** Set the query name */
+ void SetQname(const std::string& n);
+
+ /** Set the quality scores
+ * @param n String of quality scores or empty string
+ * @param offset Offset parameter for encoding (eg 33)
+ * @exception Throws an invalid_argument if n is non-empty
+ * and different length than sequence
+ */
+ void SetQualities(const std::string& n, int offset);
+
+ /** Set the sequence name
+ * @param seq Sequence in upper-case (ACTGN) letters.
+ */
+ void SetSequence(const std::string& seq);
+
+ /** Set the cigar field explicitly
+ * @param c Cigar operation to set
+ * @note Will not check if the cigar ops are consistent with
+ * the length of the sequence.
+ */
+ void SetCigar(const Cigar& c);
+
+ /** Print a SAM-lite record for this alignment */
+ friend std::ostream& operator<<(std::ostream& out, const BamRecord &r);
+
+ /** Return read as a GenomicRegion */
+ GenomicRegion AsGenomicRegion() const;
+
+ /** Return mate read as a GenomicRegion */
+ GenomicRegion AsGenomicRegionMate() const;
+
+ /** Return the number of "aligned bases" in the same style as BamTools
+ *
+ * BamTools reports AlignedBases, which for example returns the literal strings (for diff CIGARs):
+ * 3S5M - CTG
+ * 5M - CTAGC
+ * 3M1D3M - ATG-TGA
+ * 3M1I3M - ATGCTGA
+ *
+ * @return The number of M, D, X, = and I bases
+ */
+ inline int NumAlignedBases() const {
+ int out = 0;
+ uint32_t* c = bam_get_cigar(b);
+ for (size_t i = 0; i < b->core.n_cigar; i++)
+ if (bam_cigar_opchr(c[i]) == 'M' ||
+ bam_cigar_opchr(c[i]) == 'I' ||
+ bam_cigar_opchr(c[i]) == '=' ||
+ bam_cigar_opchr(c[i]) == 'X' ||
+ bam_cigar_opchr(c[i]) == 'D')
+ out += bam_cigar_oplen(c[i]);
+ return out;
+ }
+
+
+ /** Return the max single insertion size on this cigar */
+ inline uint32_t MaxInsertionBases() const {
+ uint32_t* c = bam_get_cigar(b);
+ uint32_t imax = 0;
+ for (size_t i = 0; i < b->core.n_cigar; i++)
+ if (bam_cigar_opchr(c[i]) == 'I')
+ imax = std::max(bam_cigar_oplen(c[i]), imax);
+ return imax;
+ }
+
+ /** Return the max single deletion size on this cigar */
+ inline uint32_t MaxDeletionBases() const {
+ uint32_t* c = bam_get_cigar(b);
+ uint32_t dmax = 0;
+ for (size_t i = 0; i < b->core.n_cigar; i++)
+ if (bam_cigar_opchr(c[i]) == 'D')
+ dmax = std::max(bam_cigar_oplen(c[i]), dmax);
+ return dmax;
+ }
+
+ /** Get the number of matched bases in this alignment */
+ inline uint32_t NumMatchBases() const {
+ uint32_t* c = bam_get_cigar(b);
+ uint32_t dmax = 0;
+ for (size_t i = 0; i < b->core.n_cigar; i++)
+ if (bam_cigar_opchr(c[i]) == 'M')
+ dmax += bam_cigar_oplen(c[i]);
+ return dmax;
+ }
+
+
+ /** Retrieve the CIGAR as a more managable Cigar structure */
+ Cigar GetCigar() const {
+ uint32_t* c = bam_get_cigar(b);
+ Cigar cig;
+ for (int k = 0; k < b->core.n_cigar; ++k) {
+ cig.add(CigarField(c[k]));
+ }
+ return cig;
+ }
+
+ /** Retrieve the inverse of the CIGAR as a more managable Cigar structure */
+ Cigar GetReverseCigar() const {
+ uint32_t* c = bam_get_cigar(b);
+ Cigar cig;
+ for (int k = b->core.n_cigar - 1; k >= 0; --k)
+ cig.add(CigarField(c[k]));
+ return cig;
+ }
+
+ /** Remove the sequence, quality and alignment tags.
+ * Make a more compact alignment stucture, without the string data
+ */
+ void ClearSeqQualAndTags();
+
+ /** Retrieve the sequence of this read as a string (ACTGN) */
+ std::string Sequence() const;
+
+ /** Return the mean quality score
+ */
+ double MeanPhred() const;
+
+ /** Performa a Smith-Waterman alignment between two strings
+ * @param name Name of the query sequence to align
+ * @param seq Sequence (ACTGN) of the query string
+ * @param ref Sequence (ACTGN) of the reference string
+ * @param gr Location of the reference string. The alignment record after Smith-Waterman alignment
+ * will be relative to this location.
+ */
+ BamRecord(const std::string& name, const std::string& seq, const std::string& ref, const GenomicRegion * gr);
+
+ /** Get the quality scores of this read as a string
+ * @param offset Encoding offset for phred quality scores. Default 33
+ * @return Qualties scores after converting offset. If first char is empty, returns empty string
+ */
+ inline std::string Qualities(int offset = 33) const {
+ uint8_t * p = bam_get_qual(b);
+ if (!p)
+ return std::string();
+ if (!p[0])
+ return std::string();
+ std::string out(b->core.l_qseq, ' ');
+ for (int32_t i = 0; i < b->core.l_qseq; ++i)
+ out[i] = (char)(p[i] + offset);
+ return out;
+ }
+
+ /** Get the start of the alignment on the read, by removing soft-clips
+ * Do this in the reverse orientation though.
+ */
+ inline int32_t AlignmentPositionReverse() const {
+ uint32_t* c = bam_get_cigar(b);
+ int32_t p = 0;
+ for (int32_t i = b->core.n_cigar - 1; i >= 0; --i) {
+ if ( (bam_cigar_opchr(c[i]) == 'S') || (bam_cigar_opchr(c[i]) == 'H'))
+ p += bam_cigar_oplen(c[i]);
+ else // not a clip, so stop counting
+ break;
+ }
+ return p;
+ }
+
+ /** Get the end of the alignment on the read, by removing soft-clips
+ * Do this in the reverse orientation though.
+ */
+ inline int32_t AlignmentEndPositionReverse() const {
+ uint32_t* c = bam_get_cigar(b);
+ int32_t p = 0;
+ for (int32_t i = 0; i < b->core.n_cigar; ++i) { // loop from the end
+ if ( (bam_cigar_opchr(c[i]) == 'S') || (bam_cigar_opchr(c[i]) == 'H'))
+ p += bam_cigar_oplen(c[i]);
+ else // not a clip, so stop counting
+ break;
+ }
+ return (b->core.l_qseq - p);
+ }
+
+
+ /** Get the start of the alignment on the read, by removing soft-clips
+ */
+ inline int32_t AlignmentPosition() const {
+ uint32_t* c = bam_get_cigar(b);
+ int32_t p = 0;
+ for (int32_t i = 0; i < b->core.n_cigar; ++i) {
+ if ( (bam_cigar_opchr(c[i]) == 'S') || (bam_cigar_opchr(c[i]) == 'H'))
+ p += bam_cigar_oplen(c[i]);
+ else // not a clip, so stop counting
+ break;
+ }
+ return p;
+ }
+
+ /** Get the end of the alignment on the read, by removing soft-clips
+ */
+ inline int32_t AlignmentEndPosition() const {
+ uint32_t* c = bam_get_cigar(b);
+ int32_t p = 0;
+ for (int32_t i = b->core.n_cigar - 1; i >= 0; --i) { // loop from the end
+ if ( (bam_cigar_opchr(c[i]) == 'S') || (bam_cigar_opchr(c[i]) == 'H'))
+ p += bam_cigar_oplen(c[i]);
+ else // not a clip, so stop counting
+ break;
+ }
+ return (b->core.l_qseq - p);
+ }
+
+ /** Get the number of soft clipped bases */
+ inline int32_t NumSoftClip() const {
+ int32_t p = 0;
+ uint32_t* c = bam_get_cigar(b);
+ for (int32_t i = 0; i < b->core.n_cigar; ++i)
+ if (bam_cigar_opchr(c[i]) == 'S')
+ p += bam_cigar_oplen(c[i]);
+ return p;
+ }
+
+ /** Get the number of hard clipped bases */
+ inline int32_t NumHardClip() const {
+ int32_t p = 0;
+ uint32_t* c = bam_get_cigar(b);
+ for (int32_t i = 0; i < b->core.n_cigar; ++i)
+ if (bam_cigar_opchr(c[i]) == 'H')
+ p += bam_cigar_oplen(c[i]);
+ return p;
+ }
+
+
+ /** Get the number of clipped bases (hard clipped and soft clipped) */
+ inline int32_t NumClip() const {
+ int32_t p = 0;
+ uint32_t* c = bam_get_cigar(b);
+ for (int32_t i = 0; i < b->core.n_cigar; ++i)
+ if ( (bam_cigar_opchr(c[i]) == 'S') || (bam_cigar_opchr(c[i]) == 'H') )
+ p += bam_cigar_oplen(c[i]);
+ return p;
+ }
+
+ /** Get a string (Z) tag
+ * @param tag Name of the tag. eg "XP"
+ * @return The value stored in the tag. Returns empty string if it does not exist.
+ */
+ std::string GetZTag(const std::string& tag) const;
+
+ /** Get a vector of type int from a Z tag delimited by "^"
+ * Smart-tags allow one to store vectors of strings, ints or doubles in the alignment tags, and
+ * do not require an additional data structure on top of bseq1_t.
+ * @param tag Name of the tag eg "AL"
+ * @return A vector of ints, retrieved from the x delimited Z tag
+ * @exception Throws an invalid_argument if cannot convert delimited field val to int
+ */
+ std::vector<int> GetSmartIntTag(const std::string& tag) const;
+
+ /** Get a vector of type double from a Z tag delimited by "x"
+ * Smart-tags allow one to store vectors of string, ints or doubles in the alignment tags, and
+ * do not require an additional data structure on top of bseq1_t.
+ * @param tag Name of the tag eg "AL"
+ * @return A vector of double elems, retrieved from the "^" delimited Z tag
+ * @exception Throws an invalid_argument if cannot convert delimited field val to double
+ */
+ std::vector<double> GetSmartDoubleTag(const std::string& tag) const;
+
+ /** Get a vector of strings from a Z tag delimited by "^"
+ * Smart-tags allow one to store vectors of strings, ints or doubles in the alignment tags, and
+ * do not require an additional data structure on top of bseq1_t.
+ * @param tag Name of the tag eg "CN"
+ * @return A vector of strngs, retrieved from the x delimited Z tag
+ */
+ std::vector<std::string> GetSmartStringTag(const std::string& tag) const;
+
+ /** Get an int (i) tag
+ * @param tag Name of the tag. eg "XP"
+ * @return The value stored in the tag. Returns 0 if it does not exist.
+ */
+ inline int32_t GetIntTag(const std::string& tag) const {
+ uint8_t* p = bam_aux_get(b.get(),tag.c_str());
+ if (!p)
+ return 0;
+ return bam_aux2i(p);
+ }
+
+ /** Add a string (Z) tag
+ * @param tag Name of the tag. eg "XP"
+ * @param val Value for the tag
+ */
+ void AddZTag(std::string tag, std::string val);
+
+ /** Add an int (i) tag
+ * @param tag Name of the tag. eg "XP"
+ * @param val Value for the tag
+ */
+ inline void AddIntTag(const std::string& tag, int32_t val) {
+ bam_aux_append(b.get(), tag.data(), 'i', 4, (uint8_t*)&val);
+ }
+
+ /** Set the chr id number
+ * @param id Chromosome id. Typically is 0 for chr1, etc
+ */
+ inline void SetID(int32_t id) {
+ b->core.tid = id;
+ }
+
+ /** Set the alignment start position
+ * @param pos Alignment start position
+ */
+ inline void SetPosition(int32_t pos) {
+ b->core.pos = pos;
+ }
+
+ /** Convert CIGAR to a string
+ */
+ inline std::string CigarString() const {
+ std::stringstream cig;
+ uint32_t* c = bam_get_cigar(b);
+ for (int k = 0; k < b->core.n_cigar; ++k)
+ cig << bam_cigar_oplen(c[k]) << "MIDNSHP=XB"[c[k]&BAM_CIGAR_MASK];
+ return cig.str();
+ }
+
+ /** Return a human readable chromosome name assuming chr is indexed
+ * from 0 (eg id 0 return "1")
+ * @note This is a quick convienence function, and is not robust to non-numbered
+ * chromosomes (eg chrX becomes 23). For accurate string representation of
+ * any chromosomes, use the full ChrName with BamHeader input.
+ */
+ inline std::string ChrName() const {
+ std::stringstream ss;
+ ss << (b->core.tid + 1);
+
+ return ss.str();
+ //return std::to_string(b->core.tid + 1); //c++11
+ }
+
+ /** Retrieve the human readable chromosome name.
+ * @param h Dictionary for chr name lookup. If it is empty, assumes this is chr1 based reference.
+ * @exception Throws an out_of_range exception if chr id is not in dictionary
+ * @return Empty string if chr id < 0, otherwise chromosome name from dictionary.
+ */
+ inline std::string ChrName(const SeqLib::BamHeader& h) const {
+ if (b->core.tid < 0)
+ return std::string();
+
+ if (h.isEmpty())
+ return h.IDtoName(b->core.tid);
+
+ // c++98
+ std::stringstream ss;
+ ss << b->core.tid;
+
+ // no header, assume zero based
+ return ss.str(); //std::to_string(b->core.tid + 1);
+
+ }
+
+ /** Return a short description (chr:pos) of this read */
+ inline std::string Brief() const {
+ //if (!h)
+ // c++11
+ // return(std::to_string(b->core.tid + 1) + ":" + AddCommas<int32_t>(b->core.pos) + "(" + ((b->core.flag&BAM_FREVERSE) != 0 ? "+" : "-") + ")");
+ // c++98
+ std::stringstream ss;
+ ss << (b->core.tid + 1) << ":" << AddCommas(b->core.pos) << "(" << ((b->core.flag&BAM_FREVERSE) != 0 ? "+" : "-") << ")";
+ return ss.str();
+ //else
+ // return(std::string(h->target_name[b->core.tid]) + ":" + AddCommas<int32_t>(b->core.pos) + "(" + ((b->core.flag&BAM_FREVERSE) != 0 ? "+" : "-") + ")");
+ }
+
+ /** Return a short description (chr:pos) of this read's mate */
+ inline std::string BriefMate() const {
+ //if (!h)
+ // c++11
+ // return(std::to_string(b->core.mtid + 1) + ":" + AddCommas<int32_t>(b->core.mpos) + "(" + ((b->core.flag&BAM_FMREVERSE) != 0 ? "+" : "-") + ")");
+ std::stringstream ss;
+ ss << (b->core.mtid + 1) << ":" << AddCommas(b->core.mpos) << "(" << ((b->core.flag&BAM_FMREVERSE) != 0 ? "+" : "-") << ")";
+ return ss.str();
+ //else
+ // return(std::string(h->target_name[b->core.mtid]) + ":" + AddCommas<int32_t>(b->core.mpos) + "(" + ((b->core.flag&BAM_FMREVERSE) != 0 ? "+" : "-") + ")");
+ }
+
+ /** Strip a particular alignment tag
+ * @param tag Tag to remove
+ */
+ inline void RemoveTag(const char* tag) {
+ uint8_t* p = bam_aux_get(b.get(), tag);
+ if (p)
+ bam_aux_del(b.get(), p);
+ }
+
+ /** Strip all of the alignment tags */
+ inline void RemoveAllTags() {
+ size_t keep = (b->core.n_cigar<<2) + b->core.l_qname + ((b->core.l_qseq + 1)>>1) + b->core.l_qseq;
+ b->data = (uint8_t*)realloc(b->data, keep); // free the end, which has aux data
+ b->l_data = keep;
+ b->m_data = b->l_data;
+ }
+
+ /** Return the raw pointer */
+ inline bam1_t* raw() const { return b.get(); }
+
+ /** Return the number of bases on the query that are covered by a match (M) on both reads
+ * This is for tracking overlapping coverage on the reads, regardless of their alignment locations.
+ * For instance, two reads with 101M will have overlapping coverage of 101, regardless of alignment location.
+ * A read with 50S50M and 50M50S will have 0 overlapping coverage.
+ */
+ int OverlappingCoverage(const BamRecord& r) const;
+
+ private:
+
+ SeqPointer<bam1_t> b; // bam1_t shared pointer
+
+};
+
+ typedef std::vector<BamRecord> BamRecordVector; ///< Store a vector of alignment records
+
+ typedef std::vector<BamRecordVector> BamRecordClusterVector; ///< Store a vector of alignment vectors
+
+ /** @brief Sort methods for alignment records
+ */
+ namespace BamRecordSort {
+
+ /** @brief Sort by read position
+ */
+ struct ByReadPosition
+ {
+ bool operator()( const BamRecord& lx, const BamRecord& rx ) const {
+ return (lx.ChrID() < rx.ChrID()) || (lx.ChrID() == rx.ChrID() && lx.Position() < rx.Position());
+ }
+ };
+
+ /** @brief Sort by mate position
+ */
+ struct ByMatePosition
+ {
+ bool operator()( const BamRecord& lx, const BamRecord& rx ) const {
+ return (lx.MateChrID() < rx.MateChrID()) || (lx.MateChrID() == rx.MateChrID() && lx.MatePosition() < rx.MatePosition());
+ }
+ };
+
+}
+
+}
+#endif
diff --git a/SeqLib/BamWalker.h b/SeqLib/BamWalker.h
new file mode 100644
index 0000000..d8ae7ee
--- /dev/null
+++ b/SeqLib/BamWalker.h
@@ -0,0 +1,91 @@
+#ifndef SEQLIB_BAM_WALKER_H__
+#define SEQLIB_BAM_WALKER_H__
+
+#include <cassert>
+
+#include <stdint.h>
+#include "SeqLib/BamRecord.h"
+
+// not sure what going on here...
+#ifndef INT32_MAX
+#define INT32_MAX 0x7fffffffL
+#endif
+
+extern "C" {
+#include "htslib/cram/cram.h"
+#include "htslib/cram/cram_io.h"
+}
+
+struct idx_delete {
+ void operator()(hts_idx_t* x) { if (x) hts_idx_destroy(x); }
+};
+
+struct hts_itr_delete {
+ void operator()(hts_itr_t* x) { if (x) hts_itr_destroy(x); }
+};
+
+struct bgzf_delete {
+ void operator()(BGZF* x) { if(x) bgzf_close(x); }
+};
+
+struct bam_hdr_delete {
+ void operator()(bam_hdr_t* x) { if (x) bam_hdr_destroy(x); }
+};
+
+struct htsFile_delete { // shoudl also close cram index
+ void operator()(htsFile* x) { if (x) sam_close(x); }
+};
+
+// Phred score transformations
+inline int char2phred(char b) {
+ uint8_t v = b;
+ assert(v >= 33);
+ return v - 33;
+}
+
+// from samtools
+inline char *samfaipath(const char *fn_ref)
+{
+ char *fn_list = 0;
+ if (fn_ref == 0) return 0;
+ fn_list = (char*)calloc(strlen(fn_ref) + 5, 1);
+ strcat(strcpy(fn_list, fn_ref), ".fai");
+ if (access(fn_list, R_OK) == -1) { // fn_list is unreadable
+ std::cerr << "ERROR: Cannot read the index file for CRAM read/write" << std::endl;
+ }
+ return fn_list;
+}
+
+namespace SeqLib {
+
+ /** Small class to store a counter to measure BamReader progress.
+ * Currently only stores number of reads seen / kept.
+ */
+struct ReadCount {
+
+ uint32_t keep; ///< Store total number of reads kept
+ uint32_t total; ///< Store total number of reads seen
+
+ ReadCount() : keep(0), total(0) {}
+
+ /** Return the percent of total reads kept
+ */
+ int percent () const {
+ int perc = SeqLib::percentCalc<uint64_t>(keep, total);
+ return perc;
+ }
+
+ /** Return the total reads visited as a comma-formatted string */
+ std::string totalString() const {
+ return SeqLib::AddCommas<uint64_t>(total);
+ }
+
+ /** Return the kept reads as a comma-formatted string */
+ std::string keepString() const {
+ return SeqLib::AddCommas<uint64_t>(keep);
+ }
+
+};
+
+}
+#endif
diff --git a/SeqLib/BamWriter.h b/SeqLib/BamWriter.h
new file mode 100644
index 0000000..fb257f8
--- /dev/null
+++ b/SeqLib/BamWriter.h
@@ -0,0 +1,114 @@
+#ifndef SEQLIB_BAM_WRITER_H
+#define SEQLIB_BAM_WRITER_H
+
+#include <cassert>
+#include "SeqLib/BamRecord.h"
+
+namespace SeqLib {
+
+ const int BAM = 4;
+ const int SAM = 3;
+ const int CRAM = 6;
+
+/** Walk along a BAM or along BAM regions and stream in/out reads
+ */
+class BamWriter {
+
+ public:
+
+ /** Construct an empty BamWriter to write BAM */
+ BamWriter() : output_format("wb") {}
+
+ /** Construct an empty BamWriter and specify output format
+ * @param o One of SeqLib::BAM, SeqLib::CRAM, SeqLib::SAM
+ * @exception Throws an invalid_argument if not one of accepted values
+ */
+ BamWriter(int o);
+
+ /** Destroy a BamWriter and close all connections to the BAM
+ *
+ * Calling the destructor will take care of all of the C-style dealloc
+ * calls required within HTSlib to close a BAM or SAM file.
+ */
+ ~BamWriter() {}
+
+ /** Write the BAM header
+ * @return False if cannot write header
+ */
+ bool WriteHeader() const;
+
+ /** Provide a header to this writer
+ * @param h Header for this writer. Copies contents
+ */
+ void SetHeader(const SeqLib::BamHeader& h);
+
+ /** Close a file explitily. This is required before indexing with makeIndex.
+ * @note If not called, BAM will close properly on object destruction
+ * @return False if BAM already closed or was never opened
+ */
+ bool Close();
+
+ /** Create the index file for the output bam in BAI format.
+ *
+ * This will make a call to HTSlib bam_index_build for the output file.
+ * @return Returns false if sam_index_build exits with < 0 status
+ */
+ bool BuildIndex() const;
+
+ /** Print out some basic info about this writer */
+ friend std::ostream& operator<<(std::ostream& out, const BamWriter& b);
+
+ /** Open a BAM file for streaming out.
+ * @param f Path to the output BAM/SAM/CRAM or "-" for stdout
+ * @return False if cannot openf for writing
+ */
+ bool Open(const std::string& f);
+
+ /** Return if the writer has opened the file */
+ bool IsOpen() const { return fop.get() != NULL; }
+
+ /** Write an alignment to the output BAM file
+ * @param r The BamRecord to save
+ * @return False if cannot write alignment
+ * @exception Throws a runtime_error if cannot write alignment
+ */
+ bool WriteRecord(const BamRecord &r);
+
+ /** Explicitly set a reference genome to be used to decode CRAM file.
+ * If no reference is specified, will automatically load from
+ * file pointed to in CRAM header using the SQ tags.
+ * @note This function is useful if the reference path pointed
+ * to by the UR field of SQ is not on your system, and you would
+ * like to explicitly provide one.
+ * @param ref Path to an index reference genome
+ * @return Returns true if reference loaded.
+ */
+ bool SetCramReference(const std::string& ref);
+
+ /** Return the BAM header */
+ BamHeader Header() const { return hdr; };
+
+ private:
+
+ // path to output file
+ std::string m_out;
+
+ // open m_out, true if success
+ void open_BAM_for_writing();
+
+ // output format
+ std::string output_format;
+
+ // hts
+ SeqPointer<htsFile> fop;
+
+ // header
+ SeqLib::BamHeader hdr;
+
+};
+
+
+}
+#endif
+
+
diff --git a/SeqLib/FastqReader.h b/SeqLib/FastqReader.h
new file mode 100644
index 0000000..26ed8a8
--- /dev/null
+++ b/SeqLib/FastqReader.h
@@ -0,0 +1,59 @@
+#ifndef SEQLIB_FASTQ_READER_H
+#define SEQLIB_FASTQ_READER_H
+
+#include <string>
+
+#include <iostream>
+#include <fstream>
+#include <sys/stat.h>
+#include <unistd.h>
+
+// all kseq stuff is in UnaligedSequence
+#include "SeqLib/UnalignedSequence.h"
+
+namespace SeqLib{
+
+ /** Simple reader for FASTA/FASTQ files */
+class FastqReader {
+
+ public:
+
+ /** Construct an empty FASTQ/FASTA reader */
+ FastqReader() {}
+
+ /** Construct a reader and open a FASTQ/FASTA reader
+ * @param file Path to a FASTQ or FASTA file
+ */
+ FastqReader(const std::string& file);
+
+ /** Open a FASTQ/FASTA file for reading
+ * @param file Path to a FASTQ or FASTA file
+ * @return Returns true if opening was successful
+ */
+ bool Open(const std::string& file);
+
+ /** Retrieve the next sequence from the FASTA/FASTQ
+ * @param s Sequence to be filled in with Name, Seq, Qual and Strand
+ */
+ bool GetNextSequence(UnalignedSequence& s);
+
+ ~FastqReader() {
+ if (seq)
+ kseq_destroy(seq);
+ if (fp)
+ gzclose(fp);
+ }
+
+
+ private:
+
+ std::string m_file;
+
+ gzFile fp; // file handler for kseq
+ kseq_t * seq; // current read
+
+};
+
+}
+
+#endif
diff --git a/SeqLib/FermiAssembler.h b/SeqLib/FermiAssembler.h
new file mode 100644
index 0000000..f01fea4
--- /dev/null
+++ b/SeqLib/FermiAssembler.h
@@ -0,0 +1,147 @@
+#ifndef SEQLIB_FERMI_H
+#define SEQLIB_FERMI_H
+
+#include <string>
+#include <cstdlib>
+#include <iostream>
+
+#include "SeqLib/BamRecord.h"
+
+extern "C"
+{
+#include "fermi-lite/htab.h"
+#include "fermi-lite/fml.h"
+#include "fermi-lite/bfc.h"
+}
+
+namespace SeqLib {
+
+ /** Sequence assembly using FermiKit from Heng Li
+ */
+ class FermiAssembler {
+
+ public:
+
+ /** Create an empty FermiAssembler with default parameters */
+ FermiAssembler ();
+
+ /** Destroy by clearing all stored reads from memory */
+ ~FermiAssembler();
+
+ /** Provide a set of reads to be assembled
+ * @param brv Reads with or without quality scores
+ * @note This will copy the reads and quality scores
+ * into this object. Deallocation is automatic with object
+ * destruction, or with ClearReads.
+ */
+ void AddReads(const BamRecordVector& brv);
+
+ /** Clear all of the sequences and deallocate memory.
+ * This is not required, as it will be done on object destruction
+ */
+ void ClearReads();
+
+ /** Clear all of the contigs and deallocate memory.
+ * This is not required, as it will be done on object destruction
+ */
+ void ClearContigs();
+
+ /** Peform Bloom filter error correction of the reads
+ * in place. */
+ void CorrectReads();
+
+ /** Peform Bloom filter error correction of the reads
+ * in place. Also remove unique reads.
+ */
+ void CorrectAndFilterReads();
+
+ /** Return the sequences in this object, which may have
+ * been error-corrected
+ */
+ UnalignedSequenceVector GetSequences() const;
+
+ /** Perform the string graph assembly.
+ * This will product the string graph,
+ * and travserse the graph to emit contigs
+ */
+ void PerformAssembly();
+
+ /** Return the assembled contigs
+ * @return Assembled contigs in upper case strings (ACTGN)
+ */
+ std::vector<std::string> GetContigs() const;
+
+ /** Perform assembly, without error correction */
+ void DirectAssemble(float kcov);
+
+ /** Set the minimum overlap between reads during string graph construction */
+ void SetMinOverlap(uint32_t m) { opt.min_asm_ovlp = m; }
+
+ /** Aggressively trim graph to discard heterozygotes.
+ * Suggested by lh3 for bacterial assembly
+ * @note See: https://github.com/lh3/fermi-lite/blob/master/example.c
+ */
+ void SetAggressiveTrim() { opt.mag_opt.flag |= MAG_F_AGGRESSIVE; }
+
+ /** From lh3: Drop an overlap if its length is below max_overlap * ratio
+ * @param ratio Overlaps below ratio * max_overlap will be removed
+ */
+ void SetDropOverlapRatio(double ratio) { opt.mag_opt.min_dratio1 = ratio; }
+
+ /** From lh3: Min k-mer & read count thresholds for ec and graph cleaning
+ */
+ void SetKmerMinThreshold(int min) { opt.min_cnt = min; }
+
+ /** From lh3: Max k-mer & read count thresholds for ec and graph cleaning
+ */
+ void SetKmerMaxThreshold(int max) { opt.max_cnt = max; }
+
+ // From lh3: retain a bubble if one side is longer than the other side by >INT-bp
+ //void SetBubbleDifference(int bdiff) { opt.mag_opt.max_bdiff; }
+
+ /** Return the minimum overlap parameter for this assembler */
+ uint32_t GetMinOverlap() const { return opt.min_asm_ovlp; }
+
+ /** Add a set of unaligned sequences to stage for assembly */
+ void AddReads(const UnalignedSequenceVector& v);
+
+ /** Add a single sequence to be assembled */
+ void AddRead(const UnalignedSequence& r);
+
+ /** Add a single sequence from an aligned reads to be assembled */
+ void AddRead(const BamRecord& r);
+
+ /** Return the number of sequences that are controlled by this assembler */
+ size_t NumSequences() const { return n_seqs; }
+
+ private:
+
+ // reads to assemble
+ fseq1_t *m_seqs;
+
+ // size of m_seqs
+ size_t m;
+
+ std::vector<std::string> m_names;
+
+ // number of base-pairs
+ uint64_t size;
+
+ // number of reads
+ size_t n_seqs;
+
+ // number of contigs
+ int n_utg;
+
+ // options
+ fml_opt_t opt;
+
+ // the unitigs
+ fml_utg_t *m_utgs;
+
+ };
+
+
+}
+
+#endif
diff --git a/SeqLib/GenomicRegion.h b/SeqLib/GenomicRegion.h
new file mode 100644
index 0000000..2f400b9
--- /dev/null
+++ b/SeqLib/GenomicRegion.h
@@ -0,0 +1,174 @@
+#ifndef SEQLIB_GENOMIC_REGION_H__
+#define SEQLIB_GENOMIC_REGION_H__
+
+#include <vector>
+#include <iostream>
+#include <stdint.h>
+#include <utility>
+#include <list>
+#include <cstring>
+
+#include "SeqLib/SeqLibCommon.h"
+#include "SeqLib/SeqLibUtils.h"
+#include "SeqLib/BamHeader.h"
+
+namespace SeqLib {
+
+ /** @brief Container for an interval on the genome
+ */
+class GenomicRegion {
+
+ template<typename T> friend class GenomicRegionCollection;
+
+ public:
+
+ /** Construct an "empty" GenomicRegion at (chr -1), pos 0, width = 1
+ */
+ GenomicRegion() : chr(-1), pos1(0), pos2(0), strand('*') {};
+
+ /** Construct a GenomicRegion from another
+ * @param gr A GenomicRegion to copy
+ */
+ GenomicRegion(const GenomicRegion& gr) : chr(gr.chr), pos1(gr.pos1), pos2(gr.pos2), strand(gr.strand) {}
+
+ /** Construct a GenomicRegion at a specific start and end location
+ * @param t_chr Chromosome id (chr1 = 0, etc)
+ * @param t_pos1 Start position
+ * @param t_pos2 End position. Must be >= start position.
+ * @param t_strand +, -, or * (default is *)
+ * @exception throws an invalid_argument exception if pos2 < pos1
+ * @exception throws an invalid_argument exception if char not one of +, - , *
+ */
+ GenomicRegion(int32_t t_chr, int32_t t_pos1, int32_t t_pos2, char t_strand = '*');
+
+ /** Construct a GenomicRegion from a set of strings
+ * @param tchr Chromosome name
+ * @param tpos1 Position 1
+ * @param tpos2 Position 2
+ * @param hdr Header to be used as sequence dictionary to convert chromosome name to id
+ * @exception Throws an invalid_argument if cannot convert string to int
+ * @exception Throws an out_of_range if number if greater than int32_t max
+ * @note If an empty BamHeader is provided, will try to guess chromosome id.
+ * eg "1" -> 0, "X" -> 22, "chr2" -> 1.
+ */
+ GenomicRegion(const std::string& tchr, const std::string& tpos1, const std::string& tpos2, const BamHeader& hdr);
+
+ /** Construct a GenomicRegion from a samtools style region string.
+ *
+ * This calls the samtools-like parser, which accepts in form "chr7:10,000-11,100".
+ * Note that this requires that a BamHeader be provided as well
+ * to convert the text representation of the chr to the id number.
+ * @param reg Samtools-style string (e.g. "1:1,000,000-2,000,000") or single chr
+ * @param hdr Pointer to BAM header that will be used to convert chr string to ref id
+ * @exception throws an invalid_argument exception if cannot parse correctly
+ */
+ GenomicRegion(const std::string& reg, const BamHeader& hdr);
+
+ /** Return a string representation of just the first base-pair
+ * e.g. 1:10,000
+ */
+ std::string PointString() const;
+
+ // Randomize the position of this GenomicRegion on the genome
+ //
+ // Creates a GenomicRegion with pos1 = pos2. Simulates a random value
+ // with val <= genome_size_XY and then converts to GenomicRegion
+ // @note Seed is set before-hand at any time with srand
+ //
+ //void Random();
+
+ /** Check if the GenomicRegion is empty (aka chr -1 and pos1=pos2=0) */
+ bool IsEmpty() const;
+
+ /** Find the absolute distance between start of two GenomicRegion objects
+ *
+ * If chr1 != chr2, then -1 is returned
+ * @param gr GenomicRegion object to compare with
+ */
+ int32_t DistanceBetweenStarts(const GenomicRegion &gr) const;
+
+ /** Find the absolute distance between ends of two GenomicRegion objects
+ *
+ * If chr1 != chr2, then -1 is returned
+ * @param gr GenomicRegion object to compare with
+ */
+ int32_t DistanceBetweenEnds(const GenomicRegion &gr) const;
+
+ /** Returns identical string as would be obtained from << */
+ std::string ToString() const;
+
+ /** Returns true if a.chr < b.chr or a.pos1 < a.pos1 if on same chrome, or if a.pos2 < b.pos2 if same chrom and same pos1 */
+ bool operator < (const GenomicRegion& b) const;
+
+ /** Returns true if a.chr > b.chr or a.pos1 > a.pos1 if on same chrome, or if a.pos2 > b.pos2 if same chrom and same pos1 */
+ bool operator > (const GenomicRegion& b) const;
+
+ /** Returns true if chr, pos1, pos2. No strand consideration */
+ bool operator==(const GenomicRegion& b) const;
+
+ /** Returns opposite of == */
+ bool operator!=(const GenomicRegion& b) const;
+
+ /** Returns true if < or == . No strand consideration */
+ bool operator<=(const GenomicRegion &b) const;
+
+ /** Returns true if > or == . No strand consideration */
+ bool operator>=(const GenomicRegion &b) const;
+
+ /** Check if the GenomicRegion has a complete or partial overlap
+ * If the argument contains the calling object, returns 3
+ * If the argument is contained in the calling object, returns 2
+ * If the argument overlaps partially the calling object, returns 1
+ * If the argument and calling object do not overlap, returns 0
+ * @param gr GenomicRegion to compare against
+ */
+ int GetOverlap(const GenomicRegion& gr) const;
+
+ /** Print with chr ID bumped up by one to make eg ID 0
+ * print as "1"
+ */
+ friend std::ostream& operator<<(std::ostream& out, const GenomicRegion& gr);
+
+ /** Extract the chromosome name as a string
+ * @param h BamHeader to serve as sequence dictionary
+ * @exception throws an out_of_range exception if ref id >= h->n_targets
+ */
+ std::string ChrName(const BamHeader& h) const;
+
+ /** Pad the object to make larger or smaller
+ * @param pad Amount to pad by.
+ * @exception throws an out_of_bounds if for pad < -width/2
+ */
+ void Pad(int32_t pad);
+
+ /** Return the width (inclusive)
+ * @note Width is inclusive, so that if pos1=1 and pos2=2, width is 2
+ */
+ int Width() const;
+
+ int32_t chr; ///< Chromosome ID
+
+ int32_t pos1; ///< Start position
+
+ int32_t pos2; ///< End Position
+
+ char strand; ///< Strand. Should be one of *, -, +
+
+ private:
+
+ // Convert a chromosome number to a string using default ordering (1-Y)
+ // Assumes a 1-based ordering (1, ...), not zero-based.
+ // e.g. chrToString(10) return "11"
+ // @param ref Reference ID to convert
+ // @exception throws an invalid_argument exception if ref < 0
+ std::string chrToString(int32_t ref) const;
+
+
+};
+
+typedef std::vector<GenomicRegion> GenomicRegionVector;
+
+}
+
+
+#endif
diff --git a/SeqLib/GenomicRegionCollection.cpp b/SeqLib/GenomicRegionCollection.cpp
new file mode 100644
index 0000000..7b17da1
--- /dev/null
+++ b/SeqLib/GenomicRegionCollection.cpp
@@ -0,0 +1,701 @@
+#include "SeqLib/GenomicRegionCollection.h"
+
+#include <iostream>
+#include <sstream>
+#include <fstream>
+#include <cassert>
+#include <set>
+#include <stdexcept>
+#include <algorithm>
+#include <zlib.h>
+
+#define GZBUFFER 4096
+
+//#define DEBUG_OVERLAPS 1
+
+namespace SeqLib {
+
+ template<class T>
+ GenomicRegionCollection<T>::GenomicRegionCollection(int width, int ovlp, const HeaderSequenceVector& h) {
+
+ idx = 0;
+ allocate_grc();
+
+ // undefined otherwise
+ if (width <= ovlp)
+ throw std::invalid_argument("Width should be > ovlp");
+
+ size_t chr = 0;
+ for (HeaderSequenceVector::const_iterator i = h.begin(); i != h.end(); ++i) {
+
+ T gr;
+ gr.chr = chr;
+ gr.pos1 = 0;
+ gr.pos2 = i->Length;
+ ++chr;
+
+ if (width >= gr.Width()) {
+ m_grv->push_back(gr);
+ continue;
+ }
+
+ int32_t start = gr.pos1;
+ int32_t end = gr.pos1 + width;
+
+ // region is smaller than width
+ if ( end > gr.pos2 ) {
+ std::cerr << "GenomicRegionCollection constructor: GenomicRegion is smaller than bin width" << std::endl;
+ return;
+ }
+
+ // loop through the sizes until done
+ while (end <= gr.pos2) {
+ T tg;
+ tg.chr = gr.chr;
+ tg.pos1 = start;
+ tg.pos2 = end;
+ m_grv->push_back(tg);
+ end += width - ovlp; // make the new one
+ start += width - ovlp;
+ }
+ assert(m_grv->size() > 0);
+
+
+ }
+ }
+
+ template<class T>
+ void GenomicRegionCollection<T>::CoordinateSort() {
+
+ if (m_grv) {
+ std::sort(m_grv->begin(), m_grv->end());
+ m_sorted = true;
+ }
+ }
+
+ template<class T>
+ void GenomicRegionCollection<T>::Shuffle() {
+ std::random_shuffle ( m_grv->begin(), m_grv->end() );
+ }
+
+ template<class T>
+ void GenomicRegionCollection<T>::SortAndStretchRight(int max) {
+
+ if (!m_grv->size())
+ return;
+
+ CoordinateSort();
+
+ if (max > 0 && max < m_grv->back().pos2)
+ throw std::out_of_range("GenomicRegionCollection::SortAndStrech Can't stretch to max, as we are already past max.");
+
+ for (size_t i = 0; i < m_grv->size() - 1; ++i)
+ m_grv->at(i).pos2 = m_grv->at(i+1).pos1 - 1;
+
+ if (max > 0)
+ m_grv->back().pos2 = max;
+
+ }
+
+ template<class T>
+ void GenomicRegionCollection<T>::SortAndStretchLeft(int min) {
+
+ if (!m_grv->size())
+ return;
+
+ CoordinateSort();
+
+ if (min >= 0 && min < m_grv->begin()->pos1)
+ throw std::out_of_range("GenomicRegionCollection::SortAndStrechLeft - Can't stretch to min, as we are already below min");
+
+ if (min >= 0)
+ m_grv->at(0).pos1 = min;
+
+ for (size_t i = 1; i < m_grv->size(); ++i)
+ m_grv->at(i).pos1 = m_grv->at(i-1).pos2 + 1;
+
+ }
+
+template<class T>
+bool GenomicRegionCollection<T>::ReadBED(const std::string & file, const BamHeader& hdr) {
+
+ m_sorted = false;
+ idx = 0;
+
+ gzFile fp = NULL;
+ fp = strcmp(file.c_str(), "-")? gzopen(file.c_str(), "r") : gzdopen(fileno(stdin), "r");
+
+ if (file.empty() || !fp) {
+ std::cerr << "BED file not readable: " << file << std::endl;
+ return false;
+ }
+
+ // http://www.lemoda.net/c/gzfile-read/
+ while (1) {
+
+ int err;
+ char buffer[GZBUFFER];
+ gzgets(fp, buffer, GZBUFFER);
+ int bytes_read = strlen(buffer);
+
+ // get one line
+ if (bytes_read < GZBUFFER - 1) {
+ if (gzeof (fp)) break;
+ else {
+ const char * error_string;
+ error_string = gzerror (fp, &err);
+ if (err) {
+ fprintf (stderr, "Error: %s.\n", error_string);
+ exit (EXIT_FAILURE);
+ }
+ }
+ }
+
+ // prepare to loop through each field of BED line
+ //size_t counter = 0;
+ std::string chr, pos1, pos2;
+ std::string line(buffer);
+ std::istringstream iss_line(line);
+ std::string val;
+ if (line.find("#") != std::string::npos)
+ continue;
+
+ // read first three BED columns
+ iss_line >> chr >> pos1 >> pos2;
+
+ // construct the GenomicRegion
+ T gr(chr, pos1, pos2, hdr);
+
+ if (gr.chr >= 0)
+ m_grv->push_back(gr);
+ }
+
+ return true;
+}
+
+template<class T>
+bool GenomicRegionCollection<T>::ReadVCF(const std::string & file, const BamHeader& hdr) {
+
+ m_sorted = false;
+ idx = 0;
+
+ gzFile fp = NULL;
+ fp = strcmp(file.c_str(), "-")? gzopen(file.c_str(), "r") : gzdopen(fileno(stdin), "r");
+
+ if (file.empty() || !fp) {
+ std::cerr << "VCF file not readable: " << file << std::endl;
+ return false;
+ }
+
+ // http://www.lemoda.net/c/gzfile-read/
+ while (1) {
+
+ int err;
+ char buffer[GZBUFFER];
+ gzgets(fp, buffer, GZBUFFER);
+ int bytes_read = strlen(buffer);
+
+ // get one line
+ if (bytes_read < GZBUFFER - 1) {
+ if (gzeof (fp)) break;
+ else {
+ const char * error_string;
+ error_string = gzerror (fp, &err);
+ if (err) {
+ fprintf (stderr, "Error: %s.\n", error_string);
+ exit (EXIT_FAILURE);
+ }
+ }
+ }
+
+ // prepare to loop through each field of BED line
+ std::string chr, pos;
+ std::string line(buffer);
+ std::istringstream iss_line(line);
+ std::string val;
+ if (line.empty() || line.at(0) == '#')
+ continue;
+
+ // read first two columnes
+ iss_line >> chr >> pos;
+
+ // construct the GenomicRegion
+ T gr(chr, pos, pos, hdr);
+ if (gr.chr >= 0)
+ m_grv->push_back(gr);
+ }
+
+ return true;
+}
+
+template<class T>
+GenomicRegionCollection<T>::GenomicRegionCollection(const std::string &file, const BamHeader& hdr) {
+
+ allocate_grc();
+
+ idx = 0;
+
+ // check if it's samtools-style file
+ if (file.find(":") != std::string::npos) {
+ m_sorted = true; // only one, so sorted
+ m_grv->push_back(T(file, hdr));
+ return;
+ }
+
+ // BED file
+ if (file.find(".bed") != std::string::npos)
+ ReadBED(file, hdr);
+ // VCF file
+ else if (file.find(".vcf") != std::string::npos)
+ ReadVCF(file, hdr);
+ else // default is BED file
+ ReadBED(file, hdr);
+
+}
+
+// reduce a set of GenomicRegions into the minium overlapping set (same as GenomicRanges "reduce")
+template <class T>
+void GenomicRegionCollection<T>::MergeOverlappingIntervals() {
+
+ // make the list
+ std::list<T> intervals(m_grv->begin(), m_grv->end());
+
+ intervals.sort();
+ typename std::list<T>::iterator inext(intervals.begin());
+ ++inext;
+ for (typename std::list<T>::iterator i(intervals.begin()), iend(intervals.end()); inext != iend;) {
+ if((i->pos2 >= inext->pos1) && (i->chr == inext->chr)) // change >= to > to not overlap touching intervals (eg [4,5][5,6])
+ {
+ if(i->pos2 >= inext->pos2) intervals.erase(inext++);
+ else if(i->pos2 < inext->pos2)
+ { i->pos2 = inext->pos2; intervals.erase(inext++); }
+ }
+ else { ++i; ++inext; }
+ }
+
+ // move it over to a grv
+ m_grv->clear(); // clear the old data
+
+ // c++11
+ //std::vector<T> v{ std::make_move_iterator(std::begin(intervals)),
+ // std::make_move_iterator(std::end(intervals)) };
+ //m_grv->insert(m_grv->end(), v.begin(), v.end());
+
+ // non c++11
+ //std::vector<T> v;
+ // v.push_back(std::make_move_iterator(std::begin(intervals)));
+ //v.push_back(std::make_move_iterator(std::end(intervals)));
+ //std::vector<T> v{ std::make_move_iterator(std::begin(intervals)),
+ // std::make_move_iterator(std::end(intervals)) };
+ //m_grv->insert(m_grv->end(), v.begin(), v.end());
+ //m_grv->reserve(intervals.size());
+ //m_grv->append(intervals.begin(), intervals.end());
+ m_grv->insert(m_grv->end(), intervals.begin(), intervals.end());
+
+ // clear the old interval tree
+ m_tree->clear();
+}
+
+template <class T>
+GenomicRegionVector GenomicRegionCollection<T>::AsGenomicRegionVector() const {
+ GenomicRegionVector gg;
+ for (typename std::vector<T>::const_iterator i = m_grv->begin(); i != m_grv->end(); ++i)
+ gg.push_back(GenomicRegion(i->chr, i->pos1, i->pos2, i->strand));
+ return gg;
+}
+
+template <class T>
+void GenomicRegionCollection<T>::CreateTreeMap() {
+
+ if (!m_grv->size())
+ return;
+
+ // sort the genomic intervals
+ if (!m_sorted)
+ CoordinateSort();
+
+ // loop through and make the intervals for each chromosome
+ GenomicIntervalMap map;
+ for (size_t i = 0; i < m_grv->size(); ++i) {
+ map[m_grv->at(i).chr].push_back(GenomicInterval(m_grv->at(i).pos1, m_grv->at(i).pos2, i));
+ }
+
+ // for each chr, make the tree from the intervals
+ //for (auto it : map) {
+ for (GenomicIntervalMap::iterator it = map.begin(); it != map.end(); ++it) {
+ GenomicIntervalTreeMap::iterator ff = m_tree->find(it->first);
+ if (ff != m_tree->end())
+ ff->second = GenomicIntervalTree(it->second);
+ else
+ m_tree->insert(std::pair<int, GenomicIntervalTree>(it->first, GenomicIntervalTree(it->second)));
+ //old //m_tree[it.first] = GenomicIntervalTree(it.second);
+ }
+
+}
+
+template<class T>
+int GenomicRegionCollection<T>::TotalWidth() const {
+ int wid = 0;
+ for (typename std::vector<T>::const_iterator i = m_grv->begin(); i != m_grv->end(); ++i)
+ // for (auto& i : *m_grv)
+ wid += i->Width();
+ return wid;
+}
+
+// divide a region into pieces of width and overlaps
+template<class T>
+GenomicRegionCollection<T>::GenomicRegionCollection(int width, int ovlp, const T &gr) {
+
+ idx = 0;
+ allocate_grc();
+
+ // undefined otherwise
+ if (width <= ovlp)
+ throw std::invalid_argument("Width should be > ovlp");
+ if (width >= gr.Width()) {
+ m_grv->push_back(gr);
+ return;
+ }
+
+ int32_t start = gr.pos1;
+ int32_t end = gr.pos1 + width;
+
+ // region is smaller than width
+ if ( end > gr.pos2 ) {
+ std::cerr << "GenomicRegionCollection constructor: GenomicRegion is smaller than bin width" << std::endl;
+ return;
+ }
+
+ // loop through the sizes until done
+ while (end <= gr.pos2) {
+ m_grv->push_back(T(gr.chr, start, end));
+ end += width - ovlp; // make the new one
+ start += width - ovlp;
+ }
+ assert(m_grv->size() > 0);
+
+ // finish the last one if we need to
+ if (m_grv->back().pos2 != gr.pos2) {
+ start = m_grv->back().pos2 - ovlp; //width;
+ end = gr.pos2;
+ m_grv->push_back(T(gr.chr, start, end));
+ }
+
+ m_sorted = true;
+
+}
+
+template<class T>
+size_t GenomicRegionCollection<T>::CountOverlaps(const T &gr) const {
+
+ if (m_tree->size() == 0 && m_grv->size() != 0)
+ {
+ std::cerr << "!!!!!! WARNING: Trying to find overlaps on empty tree. Need to run this->createTreeMap() somewhere " << std::endl;
+ return 0;
+ }
+
+ GenomicIntervalVector giv;
+
+ GenomicIntervalTreeMap::const_iterator ff = m_tree->find(gr.chr);
+ if (ff == m_tree->end())
+ return 0;
+ ff->second.findOverlapping(gr.pos1, gr.pos2, giv);
+ return (giv.size());
+}
+
+ template<class T>
+ template<class K>
+ bool GenomicRegionCollection<T>::OverlapSameInterval(const K &gr1, const K &gr2) const {
+
+ // events on diff chr do not overlap same bin
+ if (gr1.chr != gr2.chr)
+ return false;
+
+ if (m_tree->size() == 0 && m_grv->size() != 0) {
+ std::cerr << "!!!!!! WARNING: Trying to find overlaps on empty tree. Need to run this->createTreeMap() somewhere " << std::endl;
+ return false;
+ }
+
+ GenomicIntervalTreeMap::const_iterator ff1 = m_tree->find(gr1.chr);
+ GenomicIntervalTreeMap::const_iterator ff2 = m_tree->find(gr2.chr);
+ if (ff1 == m_tree->end() || ff2 == m_tree->end())
+ return false;
+
+ // do the interval tree query
+ GenomicIntervalVector giv1, giv2;
+ ff1->second.findOverlapping(gr1.pos1, gr1.pos2, giv1);
+ ff2->second.findOverlapping(gr2.pos1, gr2.pos2, giv2);
+
+ if (!giv1.size() || !giv2.size())
+ return false;
+
+ // each one only overlapped one element
+ if (giv1.size() == 1 && giv2.size() == 1)
+ return (giv1[0].value == giv2[0].value);
+
+ // make a set of the possible starts
+ SeqHashSet<int> vals;
+ // for (auto& i : giv1)
+ for (GenomicIntervalVector::iterator i = giv1.begin(); i != giv1.end(); ++i)
+ vals.insert(i->value);
+
+ // loop the other side and see if they mix
+ for (GenomicIntervalVector::iterator i = giv2.begin(); i != giv2.end(); ++i)
+ if (vals.count(i->value))
+ return true;
+
+ return false;
+
+ }
+
+template<class T>
+std::string GenomicRegionCollection<T>::AsBEDString(const BamHeader& h) const {
+
+ if (m_grv->size() == 0)
+ return std::string();
+
+ std::stringstream ss;
+ //for (auto& i : *m_grv)
+ for (typename std::vector<T>::const_iterator i = m_grv->begin(); i != m_grv->end(); ++i)
+ ss << i->ChrName(h) << "\t" << i->pos1 << "\t" << i->pos2 << "\t" << i->strand << std::endl;
+
+ return ss.str();
+
+}
+
+template<class T>
+void GenomicRegionCollection<T>::Concat(const GenomicRegionCollection<T>& g)
+{
+ if (!g.size())
+ return;
+ m_sorted = false;
+ m_grv->insert(m_grv->end(), g.m_grv->begin(), g.m_grv->end());
+}
+
+template<class T>
+GenomicRegionCollection<T>::GenomicRegionCollection() {
+ idx = 0;
+ allocate_grc();
+}
+
+template<class T>
+GenomicRegionCollection<T>::~GenomicRegionCollection() {
+}
+
+
+template<class T>
+void GenomicRegionCollection<T>::allocate_grc() {
+ m_sorted = false;
+ m_grv = SeqPointer<std::vector<T> >(new std::vector<T>()) ;
+ m_tree = SeqPointer<GenomicIntervalTreeMap>(new GenomicIntervalTreeMap()) ;
+}
+
+template<class T>
+GenomicRegionCollection<T>::GenomicRegionCollection(const BamRecordVector& brv) {
+ idx = 0;
+
+ allocate_grc();
+
+ //for (auto& i : brv)
+ for (BamRecordVector::const_iterator i = brv.begin(); i != brv.end(); ++i)
+ m_grv->push_back(GenomicRegion(i->ChrID(), i->Position(), i->PositionEnd()));
+
+}
+
+template<class T>
+const T& GenomicRegionCollection<T>::at(size_t i) const
+{
+ if (i >= m_grv->size())
+ throw 20;
+ return m_grv->at(i);
+}
+
+
+// this is query
+template<class T>
+template<class K>
+std::vector<int> GenomicRegionCollection<T>::FindOverlappedIntervals(const K& gr, bool ignore_strand) const {
+
+ if (m_tree->size() == 0 && m_grv->size() != 0)
+ throw std::logic_error("Need to run CreateTreeMap to make the interval tree before doing range queries");
+
+ // which chr (if any) are common between query and subject
+ GenomicIntervalTreeMap::const_iterator ff = m_tree->find(gr.chr);
+
+ std::vector<int> output;
+
+ //must as least share a chromosome
+ if (ff == m_tree->end())
+ return output;
+
+ // get the subject hits
+ GenomicIntervalVector giv;
+ ff->second.findOverlapping(gr.pos1, gr.pos2, giv);
+
+ for (GenomicIntervalVector::const_iterator i = giv.begin(); i != giv.end(); ++i)
+ if (ignore_strand || m_grv->at(i->value).strand == gr.strand)
+ output.push_back(i->value);
+
+ return output;
+
+}
+
+template<class T>
+template<class K>
+size_t GenomicRegionCollection<T>::FindOverlapWidth(const K& gr, bool ignore_strand) const {
+
+ SeqLib::GRC out = FindOverlaps<K>(gr, ignore_strand);
+ if (!out.size())
+ return 0;
+
+ // make sure merged down
+ out.MergeOverlappingIntervals();
+
+ size_t val = 0;
+ for (size_t i = 0; i < out.size(); ++i)
+ val += out[i].Width();
+
+ return val;
+}
+
+// this is query
+template<class T>
+template<class K>
+GenomicRegionCollection<GenomicRegion> GenomicRegionCollection<T>::FindOverlaps(const K& gr, bool ignore_strand) const
+{
+
+ GenomicRegionCollection<GenomicRegion> output;
+
+ if (m_tree->size() == 0 && m_grv->size() != 0)
+ throw std::logic_error("Need to run CreateTreeMap to make the interval tree before doing range queries");
+
+ // which chr (if any) are common between query and subject
+ GenomicIntervalTreeMap::const_iterator ff = m_tree->find(gr.chr);
+
+ //must as least share a chromosome
+ if (ff == m_tree->end())
+ return output;
+
+ // get the subject hits
+ GenomicIntervalVector giv;
+ ff->second.findOverlapping(gr.pos1, gr.pos2, giv);
+
+#ifdef DEBUG_OVERLAPS
+ std::cerr << "ff->second.intervals.size() " << ff->second.intervals.size() << std::endl;
+ for (auto& k : ff->second.intervals)
+ std::cerr << " intervals " << k.start << " to " << k.stop << " value " << k.value << std::endl;
+ std::cerr << "GIV NUMBER OF HITS " << giv.size() << " for query " << gr << std::endl;
+#endif
+
+ // loop through the hits and define the GenomicRegion
+ for (GenomicIntervalVector::const_iterator j = giv.begin(); j != giv.end(); ++j) {
+ //for (auto& j : giv) { // giv points to positions on subject
+ if (ignore_strand || (m_grv->at(j->value).strand == gr.strand) ) {
+#ifdef DEBUG_OVERLAPS
+ std::cerr << "find overlaps hit " << j->start << " " << j->stop << " -- " << j->value << std::endl;
+#endif
+ output.add(GenomicRegion(gr.chr, std::max(static_cast<int32_t>(j->start), gr.pos1), std::min(static_cast<int32_t>(j->stop), gr.pos2)));
+ }
+ }
+
+ return output;
+
+}
+
+ // this is query
+ template<class T>
+ template<class K>
+GenomicRegionCollection<GenomicRegion> GenomicRegionCollection<T>::FindOverlaps(const GenomicRegionCollection<K>& subject, std::vector<int32_t>& query_id, std::vector<int32_t>& subject_id, bool ignore_strand) const
+{
+
+ GenomicRegionCollection<GenomicRegion> output;
+ if (subject.NumTree() == 0 && subject.size() != 0) {
+ std::cerr << "!!!!!! findOverlaps: WARNING: Trying to find overlaps on empty tree. Need to run this->createTreeMap() somewhere " << std::endl;
+ return output;
+ }
+
+ // we loop through query, so want it to be smaller
+ if (subject.size() < m_grv->size() && m_grv->size() - subject.size() > 20)
+ std::cerr << "findOverlaps warning: Suggest switching query and subject for efficiency." << std::endl;
+
+#ifdef DEBUG_OVERLAPS
+ std::cerr << "OVERLAP SUBJECT: " << std::endl;
+ for (auto& i : subject)
+ std::cerr << i << std::endl;
+#endif
+
+ // loop through the query GRanges (this) and overlap with subject
+ for (size_t i = 0; i < m_grv->size(); ++i)
+ {
+ // which chr (if any) are common between query and subject
+ GenomicIntervalTreeMap::const_iterator ff = subject.GetTree()->find(m_grv->at(i).chr);
+
+ GenomicIntervalVector giv;
+
+#ifdef DEBUG_OVERLAPS
+ std::cerr << "TRYING OVERLAP ON QUERY " << m_grv->at(i) << std::endl;
+#endif
+ //must as least share a chromosome
+ if (ff != m_tree->end())
+ {
+ // get the subject hits
+ ff->second.findOverlapping(m_grv->at(i).pos1, m_grv->at(i).pos2, giv);
+
+#ifdef DEBUG_OVERLAPS
+ std::cerr << "ff->second.intervals.size() " << ff->second.intervals.size() << std::endl;
+ for (auto& k : ff->second.intervals)
+ std::cerr << " intervals " << k.start << " to " << k.stop << " value " << k.value << std::endl;
+ std::cerr << "GIV NUMBER OF HITS " << giv.size() << " for query " << m_grv->at(i) << std::endl;
+#endif
+ // loop through the hits and define the GenomicRegion
+ for (GenomicIntervalVector::const_iterator j = giv.begin(); j != giv.end(); ++j) {
+ //for (auto& j : giv) { // giv points to positions on subject
+ if (ignore_strand || (subject.at(j->value).strand == m_grv->at(i).strand) ) {
+ query_id.push_back(i);
+ subject_id.push_back(j->value);
+#ifdef DEBUG_OVERLAPS
+ std::cerr << "find overlaps hit " << j->start << " " << j->stop << " -- " << j->value << std::endl;
+#endif
+ output.add(GenomicRegion(m_grv->at(i).chr, std::max(static_cast<int32_t>(j->start), m_grv->at(i).pos1), std::min(static_cast<int32_t>(j->stop), m_grv->at(i).pos2)));
+ }
+ }
+ }
+ }
+
+ return output;
+
+}
+
+
+template<class T>
+GenomicRegionCollection<T>::GenomicRegionCollection(const T& gr)
+{
+ m_sorted = true;
+ idx = 0;
+ allocate_grc();
+ m_grv->push_back(gr);
+}
+
+template<class T>
+template<class K>
+GRC GenomicRegionCollection<T>::Intersection(const GenomicRegionCollection<K>& subject, bool ignore_strand) const
+{
+ std::vector<int32_t> sub, que;
+ GRC out;
+ if (subject.size() > this->size()) // do most efficient ordering
+ out = this->FindOverlaps<K>(subject, que, sub, ignore_strand);
+ else
+ out = subject.FindOverlaps(*this, que, sub, ignore_strand);
+ return out;
+}
+
+template<class T>
+void GenomicRegionCollection<T>::Pad(int v)
+{
+ //for (auto& i : *m_grv)
+ for (typename std::vector<T>::iterator i = m_grv->begin(); i != m_grv->end(); ++i)
+ i->Pad(v);
+}
+
+}
+
diff --git a/SeqLib/GenomicRegionCollection.h b/SeqLib/GenomicRegionCollection.h
new file mode 100644
index 0000000..a8fbe07
--- /dev/null
+++ b/SeqLib/GenomicRegionCollection.h
@@ -0,0 +1,313 @@
+#ifndef SWAP_GENOMIC_REGION_COLLECTION_H
+#define SWAP_GENOMIC_REGION_COLLECTION_H
+
+#include <vector>
+#include <string>
+#include <cstdlib>
+#include <list>
+
+#include "SeqLib/IntervalTree.h"
+#include "SeqLib/GenomicRegionCollection.h"
+#include "SeqLib/BamRecord.h"
+
+namespace SeqLib {
+
+ /** Simple structure to store overlap results
+ */
+ typedef std::pair<size_t, size_t> OverlapResult;
+
+/** Class to store vector of intervals on the genome */
+typedef TInterval<int32_t> GenomicInterval;
+typedef SeqHashMap<int, std::vector<GenomicInterval> > GenomicIntervalMap;
+typedef TIntervalTree<int32_t> GenomicIntervalTree;
+typedef SeqHashMap<int, GenomicIntervalTree> GenomicIntervalTreeMap;
+typedef std::vector<GenomicInterval> GenomicIntervalVector;
+
+ /** @brief Template class to store / query a collection of genomic intervals
+ *
+ * Can hold a collection of GenomicRegion objects, or any object whose
+ * class is a child of GenomicRegion. Contains an implementation of an
+ * interval tree (as provided by Erik Garrison) for fast interval queries.
+ */
+template<typename T=GenomicRegion>
+class GenomicRegionCollection {
+
+ public:
+
+ /** Construct an empty GenomicRegionCollection
+ */
+ GenomicRegionCollection();
+
+ ~GenomicRegionCollection();
+
+ /** Construct from a plain vector of GenomicRegion objects
+ */
+ GenomicRegionCollection(std::vector<T>& vec);
+
+ /** Construct from a single GenomicRegion
+ */
+ GenomicRegionCollection(const T& gr);
+
+ /** Construct from a vector of reads
+ *
+ * @note See BamRecord::AsGenomicRegion
+ */
+ GenomicRegionCollection(const BamRecordVector& brv);
+
+ /** Construct a GenomicRegionCollection with overlapping intervals
+ *
+ * @param width Desired bin width
+ * @param ovlp Amount that the bins should overlap
+ * @param gr GenomicRegion to divide into smaller overlapping bins
+ */
+ GenomicRegionCollection(int width, int ovlp, const T &gr);
+
+ /** Construct a tiled set of intervals across a genome
+ *
+ * @param width Width of each interval tile
+ * @param ovlp Amount of overlap between neighboring tiles
+ * @param h Set of chromosomes and their lengths to build the tile on
+ */
+ GenomicRegionCollection(int width, int ovlp, const HeaderSequenceVector& h);
+
+ // Read in a MuTect call-stats file and adds to GenomicRegionCollection object.
+ //
+ // Reads a MuTect call-stats file and imports only
+ // lines with KEEP marked.
+ // @param file Path to call-stats file
+ // @param pad Amount to pad intervals by
+ // @return True if file was succesfully read
+ //
+ //bool ReadMuTect(const std::string &file, const SeqLib::BamHeader& hdr);
+
+ /** Read in a BED file and adds to GenomicRegionCollection object
+ * @param file Path to BED file
+ * @param hdr Dictionary for converting chromosome strings in BED file to chr indicies
+ * @return True if file was succesfully read
+ */
+ bool ReadBED(const std::string &file, const SeqLib::BamHeader& hdr);
+
+ /** Read in a VCF file and adds to GenomicRegionCollection object
+ * @param file Path to VCF file. All elements will be width = 1 (just read start point)
+ * @param hdr Dictionary for converting chromosome strings in BED file to chr indicies
+ */
+ bool ReadVCF(const std::string &file, const SeqLib::BamHeader& hdr);
+
+ /** Shuffle the order of the intervals */
+ void Shuffle();
+
+ /** Read in a text file (can be gzipped) and construct a GenomicRegionCollection
+ *
+ * This function will automatically detect which file type is being input:
+ * -- ends in .vcf -> readVCFfile
+ * -- ends in .bed -> readBEDfile
+ * -- contains ':' -> Assumes single samtools-style region (eg 1:100-100)
+ * The values are appended to existing vector of GenomicRegion objects
+ * @param file Text file to read and store intervals
+ * @param hdr BamHeader to serve as dictionary for chromosomes
+ */
+ GenomicRegionCollection(const std::string &file, const BamHeader& hdr);
+
+ /** Create the set of interval trees (one per chromosome)
+ *
+ * A GenomicIntervalTreeMap is an unordered_map of GenomicIntervalTrees for
+ * each chromosome. A GenomicIntervalTree is an interval tree on the ranges
+ * defined by the genomic interval, with cargo set at the same GenomicRegion object.
+ */
+ void CreateTreeMap();
+
+ /** Reduces the GenomicRegion objects to minimal set by merging overlapping intervals
+ * @note This will merge intervals that touch. eg [4,6] and [6,8]
+ * @note This will also call CreateTreeMap() at end to re-create the interval tree
+ */
+ void MergeOverlappingIntervals();
+
+ /** Return the number of GenomicRegions stored
+ */
+ size_t size() const { return m_grv->size(); }
+
+ /** Add a new GenomicRegion (or child of) to end
+ */
+ void add(const T& g) { m_grv->push_back(g); /*createTreeMap();*/ }
+
+ /** Is this object empty?
+ */
+ bool IsEmpty() const { return !m_grv->size(); }
+
+ /** Clear out all of the GenomicRegion objects
+ */
+ void clear() { m_grv->clear();
+ m_tree->clear();
+ idx = 0;
+ }
+
+ /** Get the number of trees (eg number of chromosomes, each with own tree */
+ int NumTree() const { return m_tree->size(); }
+
+ /** Get the IDs of all intervals that overlap with a query range
+ *
+ * The IDs are created during CreateTreeMap, and are the position of the
+ * the individual intervals from the tree, in genomic order. e.g the first
+ * interval on chromosome 1 gets 0, the next one gets 1, etc.
+ * The returned IDs can then be used as lookups with [], as long as the
+ * collection is not altered in between.
+ * @param gr Query range to check overlaps against
+ * @param ignore_strand Should strandedness be ignore when doing overlaps
+ * @return A vector of IDs of intervals in this collection that overlap with gr
+ */
+ template<class K>
+ std::vector<int> FindOverlappedIntervals(const K& gr, bool ignore_strand) const;
+
+ /** Get a const pointer to the genomic interval tree map */
+ const GenomicIntervalTreeMap* GetTree() const { return m_tree.get(); }
+
+ /** Retrieve a GenomicRegion at given index.
+ *
+ * Note that this does not move the idx iterator, which is
+ * used to loop through all the regions. Throws an exception
+ * if the index is out of bounds.
+ * @return GenomicRegion pointed to by index i
+ */
+ const T& at(size_t i) const;
+
+ /** Find overlaps between this vector and input GenomicRegion.
+ *
+ * Requires that the GenomicIntervalTreeMap have been created first
+ * @param gr Region to test
+ * @return Number of overlapping elements in this GenomicRegionCollection
+ */
+ size_t CountOverlaps(const T &gr) const;
+
+ /** Test if two intervals overlap the same element in the collection
+ */
+ template<class K>
+ bool OverlapSameInterval(const K &gr1, const K &gr2) const;
+
+ /** Count the number of intervals in the collection contained in this range */
+ size_t CountContained(const T &gr);
+
+ /** Return the overlaps between the collection and the query collection
+ * @param subject Subject collection of intervals
+ * @param query_id Indices of the queries that have an overlap. Will be same size as output and subject_id and in same order
+ * @param subject_id Indices of the subject that have an overlap. Will be same size as output and query_id and in same order
+ * @param ignore_strand If true, won't exclude overlap if on different strand
+ * @return A collection of overlapping intervals from this collection, trimmed to be contained
+ * @exception Throws a logic_error if this tree is non-empty, but the interval tree has not been made with
+ * CreateTreeMap
+ * inside the query collection
+ */
+ template<class K>
+ GenomicRegionCollection<GenomicRegion> FindOverlaps(const GenomicRegionCollection<K> &subject, std::vector<int32_t>& query_id, std::vector<int32_t>& subject_id, bool ignore_strand) const;
+
+ /** Return the overlaps between the collection and the query interval
+ * @param gr Query region
+ * @param ignore_strand If true, won't exclude overlap if on different strand
+ * @return A collection of overlapping intervals from this collection, trimmed to be contained
+ * inside gr
+ */
+ template<class K>
+ GenomicRegionCollection<GenomicRegion> FindOverlaps(const K& gr, bool ignore_strand) const;
+
+ /** Return the number of bases in query that overlap this collection
+ * @param gr Query GenomicRegion (or child of)
+ * @param ignore_strand If true, won't exclude overlap if on different strand
+ * @return Number of bases in query that overlap with region in collection
+ */
+ template<class K>
+ size_t FindOverlapWidth(const K& gr, bool ignore_strand) const;
+
+ /** Return the total amount spanned by this collection */
+ int TotalWidth() const;
+
+ /** Increase the left and right ends of each contained GenomicRegion by
+ * the pad value.
+ * @param v Amount to pad each end by. Result is increase in width by 2*pad.
+ * @note See GenomicRegion::Pad
+ */
+ void Pad(int v);
+
+ /** Set the i'th GenomicRegion */
+ T& operator[](size_t i) { return m_grv->at(i); }
+
+ /** Retreive the i'th GenomicRegion */
+ const T& operator[](size_t i) const { return m_grv->at(i); }
+
+ /** Add two GenomicRegionCollection objects together */
+ void Concat(const GenomicRegionCollection<T>& g);
+
+ /** Output the GenomicRegionCollection to a BED format
+ *
+ * @param h Header to convert id to chromosome name
+ * @return BED formated string reprsentation
+ */
+ std::string AsBEDString(const BamHeader& h) const;
+
+ /** Coordinate sort the interval collection */
+ void CoordinateSort();
+
+ /** Expand all the elements so they are sorted and become adjacent
+ * by stretching them to the right up to max
+ * @param max Element furthest to the right will be stretched to max. If set to 0, will not stretch furthest right element.
+ * @exception Throws an out_of_range if furthest right position is > max
+ */
+ void SortAndStretchRight(int max);
+
+ /** Expand all the elements so they are sorted and become adjacent
+ * by stretching them to the left down to min.
+ * @param min Element furthest to the left will be stretched to min. If set to < 0, will not stretch furthest right element.
+ * @exception Throws an out_of_range if furthest left is < min
+ */
+ void SortAndStretchLeft(int min);
+
+ /** Rewind the element pointer to the first GenomicRegion */
+ void Rewind() { idx = 0; }
+
+ /** Return elements as an STL vector of GenomicRegion objects */
+ GenomicRegionVector AsGenomicRegionVector() const;
+
+ /** Iterator to first element of the region collection */
+ typename std::vector<T>::iterator begin() { return m_grv->begin(); }
+
+ /** Iterator to end of the region collection */
+ typename std::vector<T>::iterator end() { return m_grv->end(); }
+
+ /** Const iterator to first element of the region collection */
+ typename std::vector<T>::const_iterator begin() const { return m_grv->begin(); }
+
+ /** Const iterator to end of the region collection */
+ typename std::vector<T>::const_iterator end() const { return m_grv->end(); }
+
+ /** Shortcut to FindOverlaps that just returns the intersecting regions
+ * without keeping track of the query / subject ids
+ * @param subject Collection of regions to intersect this object with
+ * @param ignore_strand Ignore strand considerations when performing intersection
+ * @return Intersecting regions between subject and query
+ */
+ template <class K>
+ GenomicRegionCollection<GenomicRegion> Intersection(const GenomicRegionCollection<K>& subject, bool ignore_strand) const;
+
+ protected:
+
+ bool m_sorted;
+
+ // always construct this object any time m_grv is modifed
+ SeqPointer<GenomicIntervalTreeMap> m_tree;
+
+ // hold the genomic regions
+ SeqPointer<std::vector<T> > m_grv;
+
+ // index for current GenomicRegion
+ size_t idx;
+
+ // open the memory
+ void allocate_grc();
+
+};
+
+typedef GenomicRegionCollection<GenomicRegion> GRC;
+
+}
+
+#include "SeqLib/GenomicRegionCollection.cpp"
+
+#endif
diff --git a/SeqLib/IntervalTree.h b/SeqLib/IntervalTree.h
new file mode 100644
index 0000000..7c5a598
--- /dev/null
+++ b/SeqLib/IntervalTree.h
@@ -0,0 +1,245 @@
+/*Copyright (c) 2011 Erik Garrison
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
+ this software and associated documentation files (the "Software"), to deal in
+ the Software without restriction, including without limitation the rights to
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ of the Software, and to permit persons to whom the Software is furnished to do
+ so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in all
+ copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+*/
+
+/* Modifed by Jeremiah Wala to switch unique_ptr to traditional
+ pointer (requiring free on destruction) */
+
+#ifndef SEQLIB_INTERVAL_TREE_H__
+#define SEQLIB_INTERVAL_TREE_H__
+
+#include <vector>
+#include <algorithm>
+#include <iostream>
+
+namespace SeqLib {
+
+template <class T, typename K = std::size_t>
+class TInterval {
+public:
+ K start;
+ K stop;
+ T value;
+ TInterval(K s, K e, const T& v)
+ : start(s)
+ , stop(e)
+ , value(v)
+ { }
+};
+
+template <class T, typename K>
+K intervalStart(const TInterval<T,K>& i) {
+ return i.start;
+}
+
+template <class T, typename K>
+K intervalStop(const TInterval<T,K>& i) {
+ return i.stop;
+}
+
+template <class T, typename K>
+ std::ostream& operator<<(std::ostream& out, TInterval<T,K>& i) {
+ out << "Interval(" << i.start << ", " << i.stop << "): " << i.value;
+ return out;
+}
+
+template <class T, typename K = std::size_t>
+class IntervalStartSorter {
+public:
+ bool operator() (const TInterval<T,K>& a, const TInterval<T,K>& b) {
+ return a.start < b.start;
+ }
+};
+
+template <class T, typename K = std::size_t>
+class TIntervalTree {
+
+public:
+ typedef TInterval<T,K> interval;
+ typedef std::vector<interval> intervalVector;
+ typedef TIntervalTree<T,K> intervalTree;
+
+ intervalVector intervals;
+ intervalTree * left;
+ intervalTree * right;
+ K center;
+
+ // jwala added destructor
+ ~TIntervalTree<T,K>() {
+ if (left)
+ delete left;
+ if (right)
+ delete right;
+ }
+
+ TIntervalTree<T,K>(void)
+ : left(NULL)
+ , right(NULL)
+ , center(0)
+ { }
+
+private:
+ intervalTree* copyTree(const intervalTree& orig){
+ return (new intervalTree(orig));
+}
+public:
+
+ TIntervalTree<T,K>(const intervalTree& other)
+ : intervals(other.intervals),
+ left(other.left ? copyTree(*other.left) : NULL),
+ right(other.right ? copyTree(*other.right) : NULL),
+ center(other.center)
+ {
+ }
+
+public:
+
+ TIntervalTree<T,K>& operator=(const intervalTree& other) {
+ center = other.center;
+ intervals = other.intervals;
+ left = other.left ? copyTree(*other.left) : NULL;
+ right = other.right ? copyTree(*other.right) : NULL;
+ return *this;
+ }
+
+ // Note: changes the order of ivals
+ TIntervalTree<T,K>(
+ intervalVector& ivals,
+ std::size_t depth = 16,
+ std::size_t minbucket = 64,
+ K leftextent = 0,
+ K rightextent = 0,
+ std::size_t maxbucket = 512
+ )
+ : left(NULL)
+ , right(NULL)
+ {
+
+ --depth;
+ IntervalStartSorter<T,K> intervalStartSorter;
+ if (depth == 0 || (ivals.size() < minbucket && ivals.size() < maxbucket)) {
+ std::sort(ivals.begin(), ivals.end(), intervalStartSorter);
+ intervals = ivals;
+ } else {
+ if (leftextent == 0 && rightextent == 0) {
+ // sort intervals by start
+ std::sort(ivals.begin(), ivals.end(), intervalStartSorter);
+ }
+
+ K leftp = 0;
+ K rightp = 0;
+ K centerp = 0;
+
+ if (leftextent || rightextent) {
+ leftp = leftextent;
+ rightp = rightextent;
+ } else {
+ leftp = ivals.front().start;
+ std::vector<K> stops;
+ stops.resize(ivals.size());
+ transform(ivals.begin(), ivals.end(), stops.begin(), intervalStop<T,K>);
+ rightp = *max_element(stops.begin(), stops.end());
+ }
+
+ //centerp = ( leftp + rightp ) / 2;
+ centerp = ivals.at(ivals.size() / 2).start;
+ center = centerp;
+
+ intervalVector lefts;
+ intervalVector rights;
+
+ for (typename intervalVector::const_iterator i = ivals.begin(); i != ivals.end(); ++i) {
+ const interval& interval = *i;
+ if (interval.stop < center) {
+ lefts.push_back(interval);
+ } else if (interval.start > center) {
+ rights.push_back(interval);
+ } else {
+ intervals.push_back(interval);
+ }
+ }
+
+ if (!lefts.empty()) {
+ left = new intervalTree(lefts, depth, minbucket, leftp, centerp);
+ }
+ if (!rights.empty()) {
+ right = new intervalTree(rights, depth, minbucket, centerp, rightp);
+ }
+ }
+ }
+
+ intervalVector findOverlapping(K start, K stop) const {
+ intervalVector ov;
+ this->findOverlapping(start, stop, ov);
+ return ov;
+ }
+
+ void findOverlapping(K start, K stop, intervalVector& overlapping) const {
+ if (!intervals.empty() && ! (stop < intervals.front().start)) {
+ for (typename intervalVector::const_iterator i = intervals.begin(); i != intervals.end(); ++i) {
+ const interval& interval = *i;
+ if (interval.stop >= start && interval.start <= stop) {
+ overlapping.push_back(interval);
+ }
+ }
+ }
+
+ if (left && start <= center) {
+ left->findOverlapping(start, stop, overlapping);
+ }
+
+ if (right && stop >= center) {
+ right->findOverlapping(start, stop, overlapping);
+ }
+
+ }
+
+ intervalVector findContained(K start, K stop) const {
+ intervalVector contained;
+ this->findContained(start, stop, contained);
+ return contained;
+ }
+
+ void findContained(K start, K stop, intervalVector& contained) const {
+ if (!intervals.empty() && ! (stop < intervals.front().start)) {
+ for (typename intervalVector::const_iterator i = intervals.begin(); i != intervals.end(); ++i) {
+ const interval& interval = *i;
+ if (interval.start >= start && interval.stop <= stop) {
+ contained.push_back(interval);
+ }
+ }
+ }
+
+ if (left && start <= center) {
+ left->findContained(start, stop, contained);
+ }
+
+ if (right && stop >= center) {
+ right->findContained(start, stop, contained);
+ }
+
+ }
+
+//~TIntervalTree(void) = default;
+
+};
+
+}
+#endif
diff --git a/SeqLib/ReadFilter.h b/SeqLib/ReadFilter.h
new file mode 100644
index 0000000..2076b2d
--- /dev/null
+++ b/SeqLib/ReadFilter.h
@@ -0,0 +1,576 @@
+#ifndef SEQLIB_READ_FILTER_H
+#define SEQLIB_READ_FILTER_H
+
+#define AHO_CORASICK 1
+
+#include <string>
+#include <vector>
+#include <climits>
+
+#include "json/json.h"
+
+#include "SeqLib/GenomicRegionCollection.h"
+#include "SeqLib/BamRecord.h"
+
+#ifdef HAVE_C11
+#include "SeqLib/aho_corasick.hpp"
+#endif
+
+
+#define MINIRULES_MATE_LINKED 1
+#define MINIRULES_MATE_LINKED_EXCLUDE 2
+#define MINIRULES_REGION 3
+#define MINIRULES_REGION_EXCLUDE 4
+
+namespace SeqLib {
+
+ typedef SeqHashSet<std::string> StringSet;
+
+ namespace Filter {
+
+#ifdef HAVE_C11
+ /** Tool for using the Aho-Corasick method for substring queries of
+ * using large dictionaries
+ * @note Trie construction / searching implemented by https://github.com/blockchaindev/aho_corasick
+ */
+ struct AhoCorasick {
+
+ /** Allocate a new empty trie */
+ AhoCorasick() {
+ aho_trie = SeqPointer<aho_corasick::trie>(new aho_corasick::trie());
+ inv = false;
+ count = 0;
+ }
+
+ /** Deallocate the trie */
+ ~AhoCorasick() { }
+
+ /** Add a motif to the trie
+ * @note Trie construction is lazy. Won't build trie until
+ * first query. Therefore first query is slow, the rest are
+ * O(n) where (n) is length of query string.
+ */
+ void AddMotif(const std::string& m) {
+ aho_trie->insert(m);
+ }
+
+ /** Add a set of motifs to the trie from a file
+ * @param f File storing the motifs (new line separated)
+ * @exception Throws a runtime_error if file cannot be opened
+ */
+ void TrieFromFile(const std::string& f);
+
+ /** Query if a string is in the trie
+ * @param t Text to query
+ * @return Returns number of substrings in tree that are in t
+ */
+ int QueryText(const std::string& t) const;
+
+ SeqPointer<aho_corasick::trie> aho_trie; ///< The trie for the Aho-Corasick search
+
+ std::string file; ///< Name of the file holding the motifs
+
+ bool inv; ///< Is this an inverted dictinary (ie exclude hits)
+
+ int count; ///< Number of motifs in dictionary
+
+ };
+#endif
+
+/** Stores a rule for a single alignment flag.
+ *
+ * Rules for alignment flags can be one of three states:
+ * - NA - All flag values are valid
+ * - Off - Flag is valid if OFF
+ * - On - Flag is valid if ON
+ */
+class Flag {
+
+ public:
+
+ /** Construct a new Flag with NA rule
+ */
+ Flag() : on(false), off(false), na(true) {}
+
+ /** Set the flag to NA (pass alignment regardless of flag value) */
+ void setNA() { on = false; off = false; na = true; }
+
+ /** Set the flag to ON (require flag ON to pass) */
+ void setOn() { on = true; off = false; na = false; }
+
+ /** Set the flag to OFF (require flag OFF to pass) */
+ void setOff() { on = false; off = true; na = false; }
+
+ /** Return if the Flag filter is NA */
+ bool isNA() const { return na; }
+
+ /** Return if the Flag filter is ON */
+ bool isOn() const { return on; }
+
+ /** Return if the Flag filter is OFF */
+ bool isOff() const { return off; }
+
+ /** Parse the Flag rule from a JSON entry */
+ bool parseJson(const Json::Value& value, const std::string& name);
+
+ private:
+
+ bool on;
+ bool off;
+ bool na;
+
+};
+
+/** Filter numeric values on whether they fall in/out of a range of values (eg mapping quality).
+ *
+ * Can optionally invert the range to make rule the complement of the range
+ * (eg insert-size NOT in [300,600]
+ */
+class Range {
+
+public:
+ /** Construct a default range with everything accepted
+ */
+ Range() : m_min(0), m_max(0), m_inverted(false), m_every(true) {}
+
+ /** Construct a Range from min to max, inclusive
+ * @param min Minimum for range
+ * @param max Maximum for range
+ * @param inverted Declare if this should be an inverted range (do NOT accept vals in range)
+ */
+ Range(int min, int max, bool inverted) : m_min(min), m_max(max), m_inverted(inverted), m_every(false) {}
+
+ /** Given a query value, determine if the value passes this Range
+ * @param val Query value (e.g. mapping quality)
+ * @return true if the value passes this Range rule
+ */
+ bool isValid(int val) {
+ if (m_every)
+ return true;
+ if (!m_inverted)
+ return (val >= m_min && val <= m_max);
+ else
+ return (val < m_min || val > m_max);
+ }
+
+ /** Parse a JSON value
+ * @param value
+ * @param name
+ */
+ void parseJson(const Json::Value& value, const std::string& name);
+
+ /** Print the contents of this Range */
+ friend std::ostream& operator<<(std::ostream &out, const Range &r);
+
+ /** Return if this range accepts all values */
+ bool isEvery() const { return m_every; }
+
+ /** Return the lower bound of the range */
+ int lowerBound() const { return m_min; }
+
+ /** Return the upper bound of the range */
+ int upperBound() const { return m_max; }
+
+ /** Return true if the range is inverted (e.g. do NOT accept i in [min,max] */
+ bool isInverted() const { return m_inverted; }
+
+private:
+
+ int m_min;
+ int m_max;
+ bool m_inverted;
+ bool m_every;
+
+};
+
+/** Stores a set of Flag objects for filtering alignment flags
+ *
+ * An alignment can be queried against a FlagRule to check if it
+ * satisfies the requirements for its alignment flag.
+ */
+class FlagRule {
+
+ public:
+
+ FlagRule() {
+ dup = Flag();
+ supp = Flag();
+ qcfail = Flag();
+ hardclip = Flag();
+ fwd_strand = Flag();
+ rev_strand = Flag();
+ mate_fwd_strand = Flag();
+ mate_rev_strand = Flag();
+ mapped = Flag();
+ mate_mapped = Flag();
+ ff = Flag();
+ fr = Flag();
+ rf = Flag();
+ rr = Flag();
+ ic = Flag();
+ paired = Flag();
+ m_all_on_flag = 0;
+ m_all_off_flag = 0;
+ m_any_on_flag = 0;
+ m_any_off_flag = 0;
+ every = false;
+ }
+
+ Flag dup; ///< Filter for duplicated flag
+ Flag supp; ///< Flag for supplementary flag
+ Flag qcfail; ///< Flag for qcfail flag
+ Flag hardclip; ///< Flag for presence of hardclip in cigar string
+ Flag fwd_strand; ///< Flag for forward strand alignment
+ Flag rev_strand; ///< Flag for reverse strand alignment
+ Flag mate_fwd_strand; ///< Flag for forward strand alignment for mate
+ Flag mate_rev_strand; ///< Flag for reverse strand alignment for mate
+ Flag mapped; ///< Flag for mapped alignment
+ Flag mate_mapped; ///< Flag for mate-mapped alignment
+ Flag ff; ///< Flag for both reads on forward strand
+ Flag fr; ///< Flag for lower (by position) read on forward strand, higher on reverse
+ Flag rf; ///< Flag for lower (by position) read on reverse strand, higher on forward
+ Flag rr; ///< Flag for both reads on reverse strand
+ Flag ic; ///< Flag for read and mate aligned to different chromosomes
+ Flag paired; ///< Flag for read is part of pair
+
+ /** Parse a FlagRule from a JSON object
+ */
+ void parseJson(const Json::Value& value);
+
+ /** Set rule to pass all alignment flags that have any bit in f on
+ * @param f Alignment flags to be on for alignment record to pass
+ */
+ void setAnyOnFlag(uint32_t f) { m_any_on_flag = f; every = (every && f == 0); }
+ // NOTE: every = (every && f == 0) means to set every to true only if
+ // input flag is zero and every was already true
+
+ /** Set rule to pass all alignment flags that have any bit in f off
+ * @param f Alignment flags to be off for alignment record to pass
+ */
+ void setAnyOffFlag(uint32_t f) { m_any_off_flag = f; every = (every && f == 0); }
+
+ /** Set rule to reject all alignment flags that have any bit in f off
+ * @param f Alignment flags to be on for alignment record to pass
+ */
+ void setAllOnFlag(uint32_t f) { m_all_on_flag = f; every = (every && f == 0); }
+
+ /** Set rule to reject all alignment flags that have any bit in f on
+ * @param f Alignment flags to be off for alignment record to pass
+ */
+ void setAllOffFlag(uint32_t f) { m_all_off_flag = f; every = (every && f == 0); }
+
+ /** Return whether a read passes the alignment flag rules in this object
+ * @param r Alignment record to query
+ * @return true if record passes rules
+ */
+ bool isValid(const BamRecord &r);
+
+ /** Print the flag rule */
+ friend std::ostream& operator<<(std::ostream &out, const FlagRule &fr);
+
+ /** Return if every this object will pass all records provided to it */
+ bool isEvery() const { return every; }
+
+private:
+
+ bool every; // does this pass all flags?
+
+ uint32_t m_all_on_flag; // if read has all of these, keep
+ uint32_t m_all_off_flag; // if read has all of these, fail
+
+ uint32_t m_any_on_flag; // if read has any of these, keep
+ uint32_t m_any_off_flag;// if read has any of these, fail
+
+ int parse_json_int(const Json::Value& v);
+
+};
+
+/** Stores a full rule (Flag + Range + motif etc)
+ *
+ * An alignment can be queried with an AbstractRule object
+ * to check if it passes that rule.
+ */
+class AbstractRule {
+
+ friend class ReadFilter;
+ friend class ReadFilterCollection;
+
+ public:
+
+ /** Create empty rule with default to accept all */
+ AbstractRule() : m_count(0), subsam_frac(1), subsam_seed(999) { }
+
+ /** Destroy the filter */
+ ~AbstractRule() {}
+
+ /** Add a list of motifs that will be search as sub-strings
+ * of the read sequence
+ * @param f Path to new-line separted file of motifs
+ * @param inverted If true, the reads that have a matching motif will fail isValid
+ */
+ void addMotifRule(const std::string& f, bool inverted);
+
+ /** Query a read against this rule. If the
+ * read passes this rule, return true.
+ * @param r An aligned sequencing read to query against filter
+ */
+ bool isValid(const BamRecord &r);
+
+ /** Supply the rule parameters with a JSON
+ * @param value JSON object created by parsing a string
+ */
+ void parseJson(const Json::Value& value);
+
+ /** Print some basic information about this filter
+ */
+ friend std::ostream& operator<<(std::ostream &out, const AbstractRule &fr);
+
+ /** Return if this rule accepts all reads
+ */
+ bool isEvery() const;
+
+ /** Set the rate to subsample (default 1 = no subsampling)
+ * @param s A rate between 0 and 1
+ */
+ void SetSubsampleRate(double s) { subsam_frac = s; };
+
+ /** Supply a name for this rule
+ * @param s ID to be associated with this rule
+ */
+ void SetRuleID(const std::string& s) { id = s; };
+
+ /** Specify a read-group for this filter.
+ * Reads that do not belong to this read group
+ * will not pass isValid
+ * @param rg Read group to be matched against RG:Z:readgroup
+ */
+ void SetReadGroup(const std::string& rg) { read_group = rg; }
+
+ FlagRule fr; ///< FlagRule specifying the alignment flag filter
+
+ Range isize; ///< Range object for insert-size filter
+ Range mapq; ///< Range object for mapping quality filter
+ Range len; ///< Range object for length filter
+ Range phred; ///< Range object for base-quality filter
+ Range clip; ///< Range object for number of clipped bases filter
+ Range nm; ///< Range object for NM (num mismatch) filter
+ Range nbases; ///< Range object for number of "N" bases filer
+ Range ins; ///< Range object for max CIGAR insertion size filter
+ Range del; ///< Range object for max CIGAR deletion size filter
+ Range xp; ///< Range object for number of secondary alignments
+
+ private:
+
+ void parseSeqLine(const Json::Value& value);
+
+ // read group
+ std::string read_group;
+
+ // how many reads pass this rule?
+ size_t m_count;
+
+ // the aho-corasick trie
+#ifdef HAVE_C11
+ AhoCorasick aho;
+#endif
+
+ // id for this rule
+ std::string id;
+
+ // fraction reads to subsample
+ double subsam_frac;
+
+ // data
+ uint32_t subsam_seed; // random seed for subsampling
+
+ void parseSubLine(const Json::Value& value);
+
+};
+
+class ReadFilterCollection;
+
+/**
+ * A set of AbstractRules on a region united by logi rules.
+ *
+ * ReadFilter stores an arbitrarily complex collection of AbstractRules
+ * (e.g. (Mapped && Clipped) || (Unmapped)).
+ */
+class ReadFilter {
+
+ friend class ReadFilterCollection;
+
+ public:
+
+ /** Construct an empty filter that passes all reads */
+ ReadFilter() : excluder(false), m_applies_to_mate(false), m_count(0) {}
+
+ /** Destroy the filter */
+ ~ReadFilter();
+
+ // ReadFilter(const ReadFilter& rf);
+
+ // Make a ReadFilter with an all exclude or include rule
+ // @param Samtools style string, BED file or VCF
+ // @param reg_type The type of rule this will be
+ // @param h BAM header that defines available chromosomes
+ ///
+ //ReadFilter(const CommandLineRegion& c, const BamHeader& hdr);
+
+ /** Return whether a read passes this filter
+ * @param r A read to query
+ * @note If this is an excluder rule, then this
+ * returns false if the read passes the filter
+ */
+ bool isValid(const BamRecord &r);
+
+ /** Add a rule to this filter. A read must pass all
+ * of the rules contained in this filter to pass
+ * @param ar A rule (eg MAPQ > 30) that the read must satisfy to pass this filter.
+ */
+ void AddRule(const AbstractRule& ar);
+
+ /** Provide the region covered by this read filter
+ * @param g Region that this filter applies to
+ */
+ void setRegions(const GRC& g);
+
+ /** Add additional regions to the filtered region
+ * @param g Additional regions to be included in filter
+ */
+ void addRegions(const GRC& g);
+
+ /** Check if a read is overlapping the region defined by this filter
+ * @param r Read to query whether it overlaps (even partially) the region.
+ * @note If this is a mate-linked region, then the read will overlap
+ * if its mate overlaps as well.
+ */
+ bool isReadOverlappingRegion(const BamRecord &r) const;
+
+ /** Print basic information about this filter */
+ friend std::ostream& operator<<(std::ostream& out, const ReadFilter &mr);
+
+ /** Return the number of rules in this filter */
+ size_t size() const {
+ return m_abstract_rules.size();
+ }
+
+ /** Set as an excluder region
+ * An excluder region is such that if a read satisfies
+ * this rule, then it will fail isValid, rather than pass
+ */
+ void SetExcluder(bool e) { excluder = e; }
+
+ /** Set as a mate linked region */
+ void SetMateLinked(bool e) { m_applies_to_mate = e; }
+
+ private:
+
+ GRC m_grv; // the interval tree with the regions this rule applies to. Empty is whole-genome
+
+ std::string id; // set a unique id for this filter
+
+ bool excluder; // this filter is such that if read passes, it gets excluded
+
+ std::string m_region_file;
+
+ std::vector<AbstractRule> m_abstract_rules; // hold all of the rules
+
+ // rule applies to mate too
+ bool m_applies_to_mate;
+
+ // how many reads pass this MiniRule
+ size_t m_count;
+
+};
+
+/** A full set of rules across any number of regions
+ *
+ * Stores the entire set of ReadFilter, each defined on a unique interval.
+ * A single ReadFilterCollection object is sufficient to store any combination of rules,
+ * and is the highest in the rule hierarchy. (ReadFilterCollection stores ReadFilter
+ * stores AbstractRules stores FlagRule/Ranges).
+ */
+class ReadFilterCollection {
+
+ public:
+
+ /** Construct an empty ReadFilterCollection
+ * that will pass all reads.
+ */
+ ReadFilterCollection() : m_count(0), m_count_seen(0) {}
+
+ /** Create a new filter collection directly from a JSON
+ * @param script A JSON file or directly as JSON formatted string
+ * @param h BamHeader to convert chr sequence to id
+ * @exception invalid_argument if cannot parse JSON
+ */
+ ReadFilterCollection(const std::string& script, const SeqLib::BamHeader& h);
+
+ /** Add a new rule to the collection.
+ * If a read passes this rule, it will be included,
+ * even if it fails the other filters. Or, if this filter
+ * has the excluder tag, then if a read passes this filter
+ * then it will be excluded, regardless of the other filters
+ */
+ void AddReadFilter(const ReadFilter& rf);
+
+ /** Provide a global rule set (applies to each filter)
+ * @param rule A filter specified in JSON format
+ */
+ void addGlobalRule(const std::string& rule);
+
+ /** Query a read to see if it passes any one of the
+ * filters contained in this collection */
+ bool isValid(const BamRecord &r);
+
+ /** Print some basic information about this object */
+ friend std::ostream& operator<<(std::ostream& out, const ReadFilterCollection &mr);
+
+ /** Return a GenomicRegionCollection of all
+ * of the regions specified by the filters.
+ * @note This returns the raw regions. It may be useful
+ * to run mergeOverlappingIntervals on the output to see
+ * the minimal covered regions.
+ */
+ GRC getAllRegions() const;
+
+ /** Return the number of filters in this collection */
+ size_t size() const { return m_regions.size(); }
+
+ /** Return the total number of rules in this collection.
+ * Filters are composed of collections of rules, and this
+ * returns the total number of rules (e.g. MAPQ > 30) across
+ * all of the filters
+ */
+ size_t numRules() const {
+ size_t num = 0;
+ for (std::vector<ReadFilter>::const_iterator it = m_regions.begin(); it != m_regions.end(); ++it)
+ num += it->size();
+ return num;
+ }
+
+ // Return the a tab-delimited tally of which filters were satisfied.
+ // Includes the header:
+ // total_seen_count total_passed_count region region_passed_count rule rule_passed_count
+ //
+ //std::string EmitCounts() const;
+
+ private:
+
+ // the global rule that all other rules are inherited from
+ AbstractRule rule_all;
+
+ size_t m_count; // passed
+ size_t m_count_seen; // tested
+
+ // store all of the individual filters
+ std::vector<ReadFilter> m_regions;
+
+ bool ParseFilterObject(const std::string& filterName, const Json::Value& filterObject);
+
+};
+
+}
+
+}
+#endif
diff --git a/SeqLib/RefGenome.h b/SeqLib/RefGenome.h
new file mode 100644
index 0000000..d76065a
--- /dev/null
+++ b/SeqLib/RefGenome.h
@@ -0,0 +1,57 @@
+#ifndef SEQLIB_REF_GENOME_H
+#define SEQLIB_REF_GENOME_H
+
+#include <string>
+#include <cstdlib>
+#include <iostream>
+
+#include "htslib/htslib/faidx.h"
+
+namespace SeqLib {
+
+ /** Stores an indexed reference genome
+ *
+ * RefGenome is currently used as an interface to obtain
+ * sequences from the reference given an interval.
+ */
+ class RefGenome {
+
+ public:
+
+ /** Create an empty RefGenome object */
+ RefGenome() { index = NULL; }
+
+ /** Destroy the malloc'ed faidx_t index inside object */
+ ~RefGenome() { if (index) fai_destroy(index); }
+
+ /** Query a region to get the sequence
+ * @param chr_name name of the chr to query
+ * @param p1 position 1. Zero-based
+ * @param p2 position 2. Zero-based
+ *
+ * @exception Throws an invalid_argument if p1 > p2, p1 < 0, p2 < 0, chr not found, or seq not found
+ * @note This is currently NOT thread safe
+ */
+ std::string QueryRegion(const std::string& chr_name, int32_t p1, int32_t p2) const;
+
+ /** Load an indexed reference sequence
+ * @param file Path to an indexed reference genome. See samtools faidx to create
+ * @return True if succesfully loaded
+ */
+ bool LoadIndex(const std::string& file);
+
+ /** Check if reference has been loaded */
+ bool IsEmpty() const {
+ return (index == NULL);
+ }
+
+ private:
+
+ faidx_t * index;
+
+ };
+
+
+}
+
+#endif
diff --git a/SeqLib/SeqLibCommon.h b/SeqLib/SeqLibCommon.h
new file mode 100644
index 0000000..51cfeb0
--- /dev/null
+++ b/SeqLib/SeqLibCommon.h
@@ -0,0 +1,76 @@
+#ifndef SEQLIB_COMMON_H
+#define SEQLIB_COMMON_H
+
+/*! \mainpage SeqLib 1.0
+ *
+ * \section intro_sec Introduction
+ *
+ * SeqLib is a C++ package for querying BAM/SAM/CRAM files with HTSlib, performing
+ * BWA-MEM operations in memory, and peforming sequence assembly with FermiKit.
+ * See https://github.com/walaj/SeqLib for
+ * full description.
+ */
+
+#include <string>
+#include <vector>
+
+/** HTSlib/BWA-MEM/BLAT/Fermi operations */
+namespace SeqLib {
+
+ static const char RCOMPLEMENT_TABLE[128] = {' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',
+ ' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',
+ ' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',
+ ' ',' ',' ',' ',' ','T',' ','G',' ',' ',' ','C',' ',' ',' ',' ',' ',' ','N',' ',
+ ' ',' ',' ',' ','A',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ','t',' ','g',
+ ' ',' ',' ','c',' ',' ',' ',' ',' ',' ','n',' ',' ',' ',' ',' ','a',' ',' ',' ',
+ ' ',' ',' ',' ',' ',' ',' ',' '};
+
+ /*
+static const std::vector<std::string> CHR_NAME {"1", "2", "3", "4", "5", "6", "7", "8", "9",
+ "10", "11", "12", "13", "14", "15", "16", "17",
+ "18", "19", "20", "21", "22", "X", "Y", "M"};
+ static const std::vector<std::string> CHR_NAME_NUM {"1", "2", "3", "4", "5", "6", "7", "8", "9",
+ "10", "11", "12", "13", "14", "15", "16", "17",
+ "18", "19", "20", "21", "22", "23", "24"};
+
+ static const std::vector<int> CHR_LEN_VEC = {249250621, 243199373, 198022430, 191154276, //1-4
+ 180915260, 171115067, //5-6
+ 159138663, 146364022, 141213431, 135534747, 135006516, 133851895, //7-12
+ 115169878, 107349540, 102531392, 90354753, 81195210, 78077248, //13-18
+ 59128983, 63025520, 48129895, 51304566, 155270560, 59373566, //19-24
+ 16571}; //25
+
+ static const std::vector<double> CHR_CUMSUM_WEIGHT_X = {0.08209014, 0.16218732, 0.22740558, 0.29036182, 0.34994586, 0.40630223, 0.45871420,
+ 0.50691887, 0.55342720, 0.59806527, 0.64252937, 0.68661320, 0.72454415, 0.75989948,
+ 0.79366797, 0.82342611, 0.85016757, 0.87588214, 0.89535614, 0.91611346, 0.93196494,
+ 0.94886198, 1.00000000};
+
+ static const std::vector<double> CHR_WEIGHT_X = {0.08209014, 0.08009718, 0.06521825, 0.06295624, //1-4
+ 0.05958404, 0.05635637, //5-6
+ 0.05241197, 0.04820467, 0.04650833, 0.04463807, 0.04446410, 0.04408383, //7-12
+ 0.03793095, 0.03535534, 0.03376849, 0.02975814, 0.02674146, 0.02571457,
+ 0.01947400, 0.02075732, 0.01585148, 0.01689705, 0.05113802};
+
+ static const int CHR_LEN [25] = {249250621, 243199373, 198022430, 191154276, //1-4
+ 180915260, 171115067, //5-6
+ 159138663, 146364022, 141213431, 135534747, 135006516, 133851895, //7-12
+ 115169878, 107349540, 102531392, 90354753, 81195210, 78077248, //13-18
+ 59128983, 63025520, 48129895, 51304566, 155270560, 59373566, //19-24
+ 16571}; //25
+
+ static const uint32_t CHR_CLEN [25] = {0, 249250621, 492449994, 690472424, 881626700, 1062541960, 1233657027,
+ 1392795690,1539159712,1680373143,1815907890,1950914406,2084766301,
+ 2199936179, 2307285719, 2409817111, 2500171864, 2581367074, 2659444322,
+ 2718573305, 2781598825, 2829728720, 2881033286, 3036303846, 3095677412};
+
+ static const uint32_t genome_size_XY = 3095677411;
+
+ static std::string const REFHG19 = "/seq/references/Homo_sapiens_assembly19/v1/Homo_sapiens_assembly19.fasta";
+
+ static const int NONCENT_CHR [44] = {1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,
+ 11,11,12,12,13,14,15,16,16,17,17,18,18,19,19,20,20,21,21,22,23,23,24,24};
+
+ */
+}
+
+#endif
diff --git a/SeqLib/SeqLibUtils.h b/SeqLib/SeqLibUtils.h
new file mode 100644
index 0000000..4e83d62
--- /dev/null
+++ b/SeqLib/SeqLibUtils.h
@@ -0,0 +1,160 @@
+#ifndef SNOWUTILS_H
+#define SNOWUTILS_H
+
+#include <string>
+#include <time.h>
+#include <ctime>
+#include <vector>
+#include <unistd.h>
+#include <sstream>
+#include <cmath>
+#include <algorithm>
+#include <stdio.h>
+
+#include "SeqLib/SeqLibCommon.h"
+
+
+#if __cplusplus > 199711L
+ #include <memory>
+ #include <unordered_set>
+ #include <unordered_map>
+ #define SeqHashMap std::unordered_map
+ #define SeqHashSet std::unordered_set
+ #define SeqPointer std::shared_ptr
+ #define HAVE_C11 1
+#else
+
+#ifdef __APPLE__
+ #include <memory>
+ #include <unordered_set>
+ #include <unordered_map>
+ #define SeqHashMap std::unordered_map
+ #define SeqHashSet std::unordered_set
+ #define SeqPointer std::shared_ptr
+#else
+ #include <tr1/memory>
+ #include <tr1/unordered_set>
+ #include <tr1/unordered_map>
+ #define SeqHashMap std::tr1::unordered_map
+ #define SeqHashSet std::tr1::unordered_set
+ #define SeqPointer std::tr1::shared_ptr
+#endif
+#endif
+
+namespace SeqLib {
+
+ template<typename T>
+ inline std::string tostring(T d) {
+ std::stringstream ss;
+ ss << d;
+ return ss.str();
+ }
+
+ /** Check if a file is readable and exists
+ * @param name Name of a file to test
+ * @return File is readable and exists
+ */
+ inline bool read_access_test (const std::string& name) {
+ return (access (name.c_str(), R_OK) == 0);
+ }
+
+ /** Format an integer to include commas
+ * @param data Number to format
+ * @return String with formatted number containing commas
+ */
+ template <typename T> inline
+ std::string AddCommas(T data) {
+ std::stringstream ss;
+ ss << data;
+ std::string s = ss.str();
+ if (s.length() > 3)
+ for (int i = s.length()-3; i > 0; i -= 3)
+ s.insert(i,",");
+ return s;
+ }
+
+ /** Display the runtime (CPU and Wall)
+ *
+ * @param start Running timer
+ * @return Time formatted as "CPU: XmYs Wall: XmYs"
+ * @note Does not work on OSX or Windows (returns "not configured")
+ */
+ inline std::string displayRuntime(
+#ifndef __APPLE__
+ const timespec start
+#endif
+ ) {
+
+#ifndef __APPLE__
+ struct timespec finish;
+ clock_gettime(CLOCK_MONOTONIC, &finish);
+ double elapsed = (finish.tv_sec - start.tv_sec);
+ int t = clock()/CLOCKS_PER_SEC;
+ int min = (int)std::floor(elapsed / 60.0);
+ int sec = (int)(elapsed-min*60);
+ char buffer[100];
+ sprintf (buffer, "CPU: %4dm%02ds Wall: %4dm%02ds",
+ (int)floor( ((double)t) /60.0), t % 60, min, sec);
+ buffer[99] = '\0';
+ return std::string(buffer);
+#else
+ return "--- time not configured for apple\n";
+#endif
+ }
+
+ /** Reverse complement in-place sequence containg upper/lower case ACTGN
+ * @param a Sequence to be reverse complmented
+ */
+ inline void rcomplement(std::string &a) {
+
+ std::reverse(&a[0], &a[a.size()]);
+ std::string::iterator it = a.begin();
+ for (; it != a.end(); it++)
+ *it = RCOMPLEMENT_TABLE[(unsigned char)*it];
+ }
+
+
+ /** Calculate the percentage and return as integer
+ * @param numer Numerator
+ * @param denom Denominator
+ * @return Integer with the percentage floored
+ */
+ template <typename T> inline int percentCalc(T numer, T denom) {
+ if (denom <= 0)
+ return 0;
+ int perc = numer * 100 / denom;
+ //int perc = static_cast<int>(floor((float)numer / (float)denom * 100.0));
+ return perc;
+ }
+
+ /** Remove substrings from a string
+ * @param toscrub Input string to clean
+ * @param toremove Substring to remove from input
+ * @return Scrubbed string
+ */
+ inline std::string scrubString(const std::string& toscrub, const std::string& toremove)
+ {
+ if (toscrub.empty() || toremove.empty())
+ return toscrub;
+
+ std::string::size_type i = toscrub.find(toremove);
+ if (i == std::string::npos)
+ return toscrub;
+
+ std::string ts = toscrub;
+ while (i != std::string::npos) {
+ ts.erase(i, toremove.length());
+ i = ts.find(toremove);
+ }
+ return ts;
+ }
+
+ // Generate a weighed random integer
+ // @param cs Weighting for each integer (values must sum to one)
+ // @return Random integer bounded on [0,cs.size())
+ //
+ //int weightedRandom(const std::vector<double>& cs);
+
+}
+
+#endif
diff --git a/SeqLib/SeqPlot.h b/SeqLib/SeqPlot.h
new file mode 100644
index 0000000..bdf2328
--- /dev/null
+++ b/SeqLib/SeqPlot.h
@@ -0,0 +1,106 @@
+#ifndef SEQLIB_CONTIG_PLOT_H
+#define SEQLIB_CONTIG_PLOT_H
+
+#include "SeqLib/BamRecord.h"
+
+namespace SeqLib {
+
+ /** Object for creating ASCII alignment plots
+ */
+ class SeqPlot {
+
+ public:
+
+ /** Create an empty plotter object */
+ SeqPlot() : m_pad(5) {}
+
+ /** Plot aligned read by stacking them in an IGV-like view */
+ std::string PlotAlignmentRecords(const BamRecordVector& brv) const;
+
+ /** Set the view window
+ * @param g Window to view reads in. Reads that
+ * start or end outside of this window are not plotted
+ */
+ void SetView(const GenomicRegion& g) { m_view = g; }
+
+ /** Set the padding between reads (default is 5) */
+ void SetPadding(int p) { m_pad = p; }
+
+ private:
+
+ // reads that align to the contig
+ BamRecordVector m_reads;
+
+ // view window
+ GenomicRegion m_view;
+
+ // padding when placing reads
+ int m_pad;
+
+ };
+
+ /** A single plotted read */
+struct PlottedRead {
+
+ int pos;
+ std::string seq;
+ std::string info;
+
+ PlottedRead(int p, const std::string& s, const std::string& i) : pos(p), seq(s), info(i) {}
+
+ bool operator<(const PlottedRead& pr) const {
+ return (pos < pr.pos);
+ }
+
+};
+
+typedef std::vector<PlottedRead> PlottedReadVector;
+
+/** A plotted line */
+struct PlottedReadLine {
+
+PlottedReadLine() : available(0), contig_len(0), pad(5) {}
+
+ std::vector<PlottedRead*> read_vec;
+ int available;
+ int contig_len;
+
+ int pad;
+
+ void addRead(PlottedRead *r) {
+ read_vec.push_back(r);
+ available = r->pos + r->seq.length() + pad;
+ }
+
+ bool readFits(const PlottedRead &r) {
+ return (r.pos >= available);
+ }
+
+ friend std::ostream& operator<<(std::ostream& out, const PlottedReadLine &r) {
+ int last_loc = 0;
+ for (std::vector<PlottedRead*>::const_iterator i = r.read_vec.begin(); i != r.read_vec.end(); ++i) {
+ // for (auto& i : r.read_vec) {
+ assert((*i)->pos - last_loc >= 0);
+ out << std::string((*i)->pos - last_loc, ' ') << (*i)->seq;
+ last_loc = (*i)->pos + (*i)->seq.length();
+ }
+ int name_buff = r.contig_len - last_loc;
+ assert(name_buff < 1e6);
+ out << std::string(std::max(name_buff, 5), ' ');
+ for (std::vector<PlottedRead*>::const_iterator i = r.read_vec.begin(); i != r.read_vec.end(); ++i) {
+ //for (auto& i : r.read_vec) { // add the data
+ out << (*i)->info << ",";
+ }
+ return out;
+ }
+
+};
+
+typedef std::vector<PlottedReadLine> PlottedReadLineVector;
+
+
+}
+
+
+
+#endif
diff --git a/SeqLib/UnalignedSequence.h b/SeqLib/UnalignedSequence.h
new file mode 100644
index 0000000..c124059
--- /dev/null
+++ b/SeqLib/UnalignedSequence.h
@@ -0,0 +1,60 @@
+#ifndef SEQLIB_UNALIGNED_SEQ_H__
+#define SEQLIB_UNALIGNED_SEQ_H__
+
+extern "C" {
+ #include "bwa/bwa.h"
+ #include "bwa/bwt.h"
+ #include "bwa/bntseq.h"
+ #include "bwa/kseq.h"
+ #include <stdlib.h>
+ #include "bwa/utils.h"
+ #include "bwa/bwamem.h"
+ int is_bwt(ubyte_t *T, int n);
+ KSEQ_DECLARE(gzFile)
+}
+
+
+#include <cstring>
+#include <vector>
+
+namespace SeqLib {
+
+ /** Structure to hold unaligned sequence (name and bases)
+ */
+ struct UnalignedSequence {
+
+ /** Construct an empty sequence */
+ UnalignedSequence() {}
+
+ /** Construct an unaliged sequence with name and sequence
+ * @param n Name of the sequence
+ * @param s Sequence, stored as ACTG or N characters
+ */
+ UnalignedSequence(const std::string& n, const std::string& s) : Name(n), Seq(s), Qual(std::string()), Strand('*') {}
+
+ /** Construct an unaliged sequence with name, sequence and quality score
+ * @param n Name of the sequence
+ * @param s Sequence, stored as ACTG or N characters
+ * @param q Quality string
+ */
+ UnalignedSequence(const std::string& n, const std::string& s, const std::string& q) : Name(n), Seq(s), Qual(q), Strand('*') {}
+
+ /** Construct an unaliged sequence with name, sequence and quality score
+ * @param n Name of the sequence
+ * @param s Sequence, stored as ACTG or N characters
+ * @param q Quality string
+ * @param t Strand of the sequence, one of '*', '+', '-'
+ */
+ UnalignedSequence(const std::string& n, const std::string& s, const std::string& q, char t) : Name(n), Seq(s), Qual(q), Strand(t) {}
+
+ std::string Name; ///< Name of the contig
+ std::string Seq; ///< Sequence of the contig (upper-case ACTGN)
+ std::string Qual; ///< Quality scores
+ char Strand; ///< Strand of the sequence. Default is '*'
+ };
+
+ typedef std::vector<UnalignedSequence> UnalignedSequenceVector; ///< A collection of unaligned sequences
+
+}
+
+#endif
diff --git a/SeqLib/aho_corasick.hpp b/SeqLib/aho_corasick.hpp
new file mode 100644
index 0000000..2e201ff
--- /dev/null
+++ b/SeqLib/aho_corasick.hpp
@@ -0,0 +1,596 @@
+/*
+* Copyright (C) 2015 Christopher Gilbert.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy
+* of this software and associated documentation files (the "Software"), to deal
+* in the Software without restriction, including without limitation the rights
+* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+* copies of the Software, and to permit persons to whom the Software is
+* furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in all
+* copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*/
+
+#ifndef AHO_CORASICK_HPP
+#define AHO_CORASICK_HPP
+
+#include <algorithm>
+#include <cctype>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <queue>
+#include <vector>
+
+namespace aho_corasick {
+
+ // class interval
+ class interval {
+ size_t d_start;
+ size_t d_end;
+
+ public:
+ interval(size_t start, size_t end)
+ : d_start(start)
+ , d_end(end) {}
+
+ size_t get_start() const { return d_start; }
+ size_t get_end() const { return d_end; }
+ size_t size() const { return d_end - d_start + 1; }
+
+ bool overlaps_with(const interval& other) const {
+ return d_start <= other.d_end && d_end >= other.d_start;
+ }
+
+ bool overlaps_with(size_t point) const {
+ return d_start <= point && point <= d_end;
+ }
+
+ bool operator <(const interval& other) const {
+ return get_start() < other.get_start();
+ }
+
+ bool operator !=(const interval& other) const {
+ return get_start() != other.get_start() || get_end() != other.get_end();
+ }
+
+ bool operator ==(const interval& other) const {
+ return get_start() == other.get_start() && get_end() == other.get_end();
+ }
+ };
+
+ // class interval_tree
+ template<typename T>
+ class interval_tree {
+ public:
+ using interval_collection = std::vector<T>;
+
+ private:
+ // class node
+ class node {
+ enum direction {
+ LEFT, RIGHT
+ };
+ using node_ptr = std::unique_ptr<node>;
+
+ size_t d_point;
+ node_ptr d_left;
+ node_ptr d_right;
+ interval_collection d_intervals;
+
+ public:
+ node(const interval_collection& intervals)
+ : d_point(0)
+ , d_left(nullptr)
+ , d_right(nullptr)
+ , d_intervals()
+ {
+ d_point = determine_median(intervals);
+ interval_collection to_left, to_right;
+ for (const auto& i : intervals) {
+ if (i.get_end() < d_point) {
+ to_left.push_back(i);
+ } else if (i.get_start() > d_point) {
+ to_right.push_back(i);
+ } else {
+ d_intervals.push_back(i);
+ }
+ }
+ if (to_left.size() > 0) {
+ d_left.reset(new node(to_left));
+ }
+ if (to_right.size() > 0) {
+ d_right.reset(new node(to_right));
+ }
+ }
+
+ size_t determine_median(const interval_collection& intervals) const {
+ int start = -1;
+ int end = -1;
+ for (const auto& i : intervals) {
+ int cur_start = i.get_start();
+ int cur_end = i.get_end();
+ if (start == -1 || cur_start < start) {
+ start = cur_start;
+ }
+ if (end == -1 || cur_end > end) {
+ end = cur_end;
+ }
+ }
+ return (start + end) / 2;
+ }
+
+ interval_collection find_overlaps(const T& i) {
+ interval_collection overlaps;
+ if (d_point < i.get_start()) {
+ add_to_overlaps(i, overlaps, find_overlapping_ranges(d_right, i));
+ add_to_overlaps(i, overlaps, check_right_overlaps(i));
+ } else if (d_point > i.get_end()) {
+ add_to_overlaps(i, overlaps, find_overlapping_ranges(d_left, i));
+ add_to_overlaps(i, overlaps, check_left_overlaps(i));
+ } else {
+ add_to_overlaps(i, overlaps, d_intervals);
+ add_to_overlaps(i, overlaps, find_overlapping_ranges(d_left, i));
+ add_to_overlaps(i, overlaps, find_overlapping_ranges(d_right, i));
+ }
+ return interval_collection(overlaps);
+ }
+
+ protected:
+ void add_to_overlaps(const T& i, interval_collection& overlaps, interval_collection new_overlaps) const {
+ for (const auto& cur : new_overlaps) {
+ if (cur != i) {
+ overlaps.push_back(cur);
+ }
+ }
+ }
+
+ interval_collection check_left_overlaps(const T& i) const {
+ return interval_collection(check_overlaps(i, LEFT));
+ }
+
+ interval_collection check_right_overlaps(const T& i) const {
+ return interval_collection(check_overlaps(i, RIGHT));
+ }
+
+ interval_collection check_overlaps(const T& i, direction d) const {
+ interval_collection overlaps;
+ for (const auto& cur : d_intervals) {
+ switch (d) {
+ case LEFT:
+ if (cur.get_start() <= i.get_end()) {
+ overlaps.push_back(cur);
+ }
+ break;
+ case RIGHT:
+ if (cur.get_end() >= i.get_start()) {
+ overlaps.push_back(cur);
+ }
+ break;
+ }
+ }
+ return interval_collection(overlaps);
+ }
+
+ interval_collection find_overlapping_ranges(node_ptr& node, const T& i) const {
+ if (node) {
+ return interval_collection(node->find_overlaps(i));
+ }
+ return interval_collection();
+ }
+ };
+ node d_root;
+
+ public:
+ interval_tree(const interval_collection& intervals)
+ : d_root(intervals) {}
+
+ interval_collection remove_overlaps(const interval_collection& intervals) {
+ interval_collection result(intervals.begin(), intervals.end());
+ std::sort(result.begin(), result.end(), [](const T& a, const T& b) -> bool {
+ if (b.size() - a.size() == 0) {
+ return a.get_start() > b.get_start();
+ }
+ return a.size() > b.size();
+ });
+ std::set<T> remove_tmp;
+ for (const auto& i : result) {
+ if (remove_tmp.find(i) != remove_tmp.end()) {
+ continue;
+ }
+ auto overlaps = find_overlaps(i);
+ for (const auto& overlap : overlaps) {
+ remove_tmp.insert(overlap);
+ }
+ }
+ for (const auto& i : remove_tmp) {
+ result.erase(
+ std::find(result.begin(), result.end(), i)
+ );
+ }
+ std::sort(result.begin(), result.end(), [](const T& a, const T& b) -> bool {
+ return a.get_start() < b.get_start();
+ });
+ return interval_collection(result);
+ }
+
+ interval_collection find_overlaps(const T& i) {
+ return interval_collection(d_root.find_overlaps(i));
+ }
+ };
+
+ // class ahoemit
+ template<typename CharType>
+ class ahoemit: public interval {
+ public:
+ typedef std::basic_string<CharType> string_type;
+ typedef std::basic_string<CharType>& string_ref_type;
+
+ private:
+ string_type d_keyword;
+
+ public:
+ ahoemit()
+ : interval(-1, -1)
+ , d_keyword() {}
+
+ ahoemit(size_t start, size_t end, string_type keyword)
+ : interval(start, end)
+ , d_keyword(keyword) {}
+
+ string_type get_keyword() const { return string_type(d_keyword); }
+ bool is_empty() const { return (get_start() == -1 && get_end() == -1); }
+ };
+
+ // class token
+ template<typename CharType>
+ class token {
+ public:
+ enum token_type{
+ TYPE_FRAGMENT,
+ TYPE_MATCH,
+ };
+
+ using string_type = std::basic_string<CharType>;
+ using string_ref_type = std::basic_string<CharType>&;
+ using ahoemit_type = ahoemit<CharType>;
+
+ private:
+ token_type d_type;
+ string_type d_fragment;
+ ahoemit_type d_ahoemit;
+
+ public:
+ token(string_ref_type fragment)
+ : d_type(TYPE_FRAGMENT)
+ , d_fragment(fragment)
+ , d_ahoemit() {}
+
+ token(string_ref_type fragment, const ahoemit_type& e)
+ : d_type(TYPE_MATCH)
+ , d_fragment(fragment)
+ , d_ahoemit(e) {}
+
+ bool is_match() const { return (d_type == TYPE_MATCH); }
+ string_type get_fragment() const { return string_type(d_fragment); }
+ ahoemit_type get_ahoemit() const { return d_ahoemit; }
+ };
+
+ // class state
+ template<typename CharType>
+ class state {
+ public:
+ typedef state<CharType>* ptr;
+ typedef std::unique_ptr<state<CharType>> unique_ptr;
+ typedef std::basic_string<CharType> string_type;
+ typedef std::basic_string<CharType>& string_ref_type;
+ typedef std::set<string_type> string_collection;
+ typedef std::vector<ptr> state_collection;
+ typedef std::vector<CharType> transition_collection;
+
+ private:
+ size_t d_depth;
+ ptr d_root;
+ std::map<CharType, unique_ptr> d_success;
+ ptr d_failure;
+ string_collection d_ahoemits;
+
+ public:
+ state(): state(0) {}
+
+ state(size_t depth)
+ : d_depth(depth)
+ , d_root(depth == 0 ? this : nullptr)
+ , d_success()
+ , d_failure(nullptr)
+ , d_ahoemits() {}
+
+ ptr next_state(CharType character) const {
+ return next_state(character, false);
+ }
+
+ ptr next_state_ignore_root_state(CharType character) const {
+ return next_state(character, true);
+ }
+
+ ptr add_state(CharType character) {
+ auto next = next_state_ignore_root_state(character);
+ if (next == nullptr) {
+ next = new state<CharType>(d_depth + 1);
+ d_success[character].reset(next);
+ }
+ return next;
+ }
+
+ size_t get_depth() const { return d_depth; }
+
+ void add_ahoemit(string_ref_type keyword) {
+ d_ahoemits.insert(keyword);
+ }
+
+ void add_ahoemit(const string_collection& ahoemits) {
+ for (const auto& e : ahoemits) {
+ string_type str(e);
+ add_ahoemit(str);
+ }
+ }
+
+ string_collection get_ahoemits() const { return d_ahoemits; }
+
+ ptr failure() const { return d_failure; }
+
+ void set_failure(ptr fail_state) { d_failure = fail_state; }
+
+ state_collection get_states() const {
+ state_collection result;
+ for (auto it = d_success.cbegin(); it != d_success.cend(); ++it) {
+ result.push_back(it->second.get());
+ }
+ return state_collection(result);
+ }
+
+ transition_collection get_transitions() const {
+ transition_collection result;
+ for (auto it = d_success.cbegin(); it != d_success.cend(); ++it) {
+ result.push_back(it->first);
+ }
+ return transition_collection(result);
+ }
+
+ private:
+ ptr next_state(CharType character, bool ignore_root_state) const {
+ ptr result = nullptr;
+ auto found = d_success.find(character);
+ if (found != d_success.end()) {
+ result = found->second.get();
+ } else if (!ignore_root_state && d_root != nullptr) {
+ result = d_root;
+ }
+ return result;
+ }
+ };
+
+ template<typename CharType>
+ class basic_trie {
+ public:
+ using string_type = std::basic_string < CharType > ;
+ using string_ref_type = std::basic_string<CharType>&;
+
+ typedef state<CharType> state_type;
+ typedef state<CharType>* state_ptr_type;
+ typedef token<CharType> token_type;
+ typedef ahoemit<CharType> ahoemit_type;
+ typedef std::vector<token_type> token_collection;
+ typedef std::vector<ahoemit_type> ahoemit_collection;
+
+ class config {
+ bool d_allow_overlaps;
+ bool d_only_whole_words;
+ bool d_case_insensitive;
+
+ public:
+ config()
+ : d_allow_overlaps(true)
+ , d_only_whole_words(false)
+ , d_case_insensitive(false) {}
+
+ bool is_allow_overlaps() const { return d_allow_overlaps; }
+ void set_allow_overlaps(bool val) { d_allow_overlaps = val; }
+
+ bool is_only_whole_words() const { return d_only_whole_words; }
+ void set_only_whole_words(bool val) { d_only_whole_words = val; }
+
+ bool is_case_insensitive() const { return d_case_insensitive; }
+ void set_case_insensitive(bool val) { d_case_insensitive = val; }
+ };
+
+ private:
+ std::unique_ptr<state_type> d_root;
+ config d_config;
+ bool d_constructed_failure_states;
+
+ public:
+ basic_trie(): basic_trie(config()) {}
+
+ basic_trie(const config& c)
+ : d_root(new state_type())
+ , d_config(c)
+ , d_constructed_failure_states(false) {}
+
+ basic_trie& case_insensitive() {
+ d_config.set_case_insensitive(true);
+ return (*this);
+ }
+
+ basic_trie& remove_overlaps() {
+ d_config.set_allow_overlaps(false);
+ return (*this);
+ }
+
+ basic_trie& only_whole_words() {
+ d_config.set_only_whole_words(true);
+ return (*this);
+ }
+
+ void insert(string_type keyword) {
+ if (keyword.empty())
+ return;
+ state_ptr_type cur_state = d_root.get();
+ for (const auto& ch : keyword) {
+ cur_state = cur_state->add_state(ch);
+ }
+ cur_state->add_ahoemit(keyword);
+ }
+
+ template<class InputIterator>
+ void insert(InputIterator first, InputIterator last) {
+ for (InputIterator it = first; first != last; ++it) {
+ insert(*it);
+ }
+ }
+
+ token_collection tokenise(string_type text) {
+ token_collection tokens;
+ auto collected_ahoemits = parse_text(text);
+ size_t last_pos = -1;
+ for (const auto& e : collected_ahoemits) {
+ if (e.get_start() - last_pos > 1) {
+ tokens.push_back(create_fragment(e, text, last_pos));
+ }
+ tokens.push_back(create_match(e, text));
+ last_pos = e.get_end();
+ }
+ if (text.size() - last_pos > 1) {
+ tokens.push_back(create_fragment(typename token_type::ahoemit_type(), text, last_pos));
+ }
+ return token_collection(tokens);
+ }
+
+ ahoemit_collection parse_text(string_type text) {
+ check_construct_failure_states();
+ size_t pos = 0;
+ state_ptr_type cur_state = d_root.get();
+ ahoemit_collection collected_ahoemits;
+ for (auto c : text) {
+ if (d_config.is_case_insensitive()) {
+ c = std::tolower(c);
+ }
+ cur_state = get_state(cur_state, c);
+ store_ahoemits(pos, cur_state, collected_ahoemits);
+ pos++;
+ }
+ if (d_config.is_only_whole_words()) {
+ remove_partial_matches(text, collected_ahoemits);
+ }
+ if (!d_config.is_allow_overlaps()) {
+ interval_tree<ahoemit_type> tree(typename interval_tree<ahoemit_type>::interval_collection(collected_ahoemits.begin(), collected_ahoemits.end()));
+ auto tmp = tree.remove_overlaps(collected_ahoemits);
+ collected_ahoemits.swap(tmp);
+ }
+ return ahoemit_collection(collected_ahoemits);
+ }
+
+ private:
+ token_type create_fragment(const typename token_type::ahoemit_type& e, string_ref_type text, size_t last_pos) const {
+ auto start = last_pos + 1;
+ auto end = (e.is_empty()) ? text.size() : e.get_start();
+ auto len = end - start;
+ typename token_type::string_type str(text.substr(start, len));
+ return token_type(str);
+ }
+
+ token_type create_match(const typename token_type::ahoemit_type& e, string_ref_type text) const {
+ auto start = e.get_start();
+ auto end = e.get_end() + 1;
+ auto len = end - start;
+ typename token_type::string_type str(text.substr(start, len));
+ return token_type(str, e);
+ }
+
+ void remove_partial_matches(string_ref_type search_text, ahoemit_collection& collected_ahoemits) const {
+ size_t size = search_text.size();
+ ahoemit_collection remove_ahoemits;
+ for (const auto& e : collected_ahoemits) {
+ if ((e.get_start() == 0 || !std::isalpha(search_text.at(e.get_start() - 1))) &&
+ (e.get_end() + 1 == size || !std::isalpha(search_text.at(e.get_end() + 1)))
+ ) {
+ continue;
+ }
+ remove_ahoemits.push_back(e);
+ }
+ for (auto& e : remove_ahoemits) {
+ collected_ahoemits.erase(
+ std::find(collected_ahoemits.begin(), collected_ahoemits.end(), e)
+ );
+ }
+ }
+
+ state_ptr_type get_state(state_ptr_type cur_state, CharType c) const {
+ state_ptr_type result = cur_state->next_state(c);
+ while (result == nullptr) {
+ cur_state = cur_state->failure();
+ result = cur_state->next_state(c);
+ }
+ return result;
+ }
+
+ void check_construct_failure_states() {
+ if (!d_constructed_failure_states) {
+ construct_failure_states();
+ }
+ }
+
+ void construct_failure_states() {
+ std::queue<state_ptr_type> q;
+ for (auto& depth_one_state : d_root->get_states()) {
+ depth_one_state->set_failure(d_root.get());
+ q.push(depth_one_state);
+ }
+ d_constructed_failure_states = true;
+
+ while (!q.empty()) {
+ auto cur_state = q.front();
+ for (const auto& transition : cur_state->get_transitions()) {
+ state_ptr_type target_state = cur_state->next_state(transition);
+ q.push(target_state);
+
+ state_ptr_type trace_failure_state = cur_state->failure();
+ while (trace_failure_state->next_state(transition) == nullptr) {
+ trace_failure_state = trace_failure_state->failure();
+ }
+ state_ptr_type new_failure_state = trace_failure_state->next_state(transition);
+ target_state->set_failure(new_failure_state);
+ target_state->add_ahoemit(new_failure_state->get_ahoemits());
+ }
+ q.pop();
+ }
+ }
+
+ void store_ahoemits(size_t pos, state_ptr_type cur_state, ahoemit_collection& collected_ahoemits) const {
+ auto ahoemits = cur_state->get_ahoemits();
+ if (!ahoemits.empty()) {
+ for (const auto& str : ahoemits) {
+ auto ahoemit_str = typename ahoemit_type::string_type(str);
+ collected_ahoemits.push_back(ahoemit_type(pos - ahoemit_str.size() + 1, pos, ahoemit_str));
+ }
+ }
+ }
+ };
+
+ typedef basic_trie<char> trie;
+ typedef basic_trie<wchar_t> wtrie;
+
+
+} // namespace aho_corasick
+
+#endif // AHO_CORASICK_HPP
diff --git a/SeqLib/ssw.h b/SeqLib/ssw.h
new file mode 100644
index 0000000..685ecf3
--- /dev/null
+++ b/SeqLib/ssw.h
@@ -0,0 +1,188 @@
+/*
+ * ssw.h
+ *
+ * Created by Mengyao Zhao on 6/22/10.
+ * Copyright 2010 Boston College. All rights reserved.
+ * Version 0.1.4
+ * Last revision by Mengyao Zhao on 02/11/16.
+ *
+ */
+
+#ifndef SSW_H
+#define SSW_H
+
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <emmintrin.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif // __cplusplus
+
+#define MAPSTR "MIDNSHP=X"
+#ifndef BAM_CIGAR_SHIFT
+#define BAM_CIGAR_SHIFT 4
+#endif
+
+
+/*! @typedef structure of the query profile */
+struct _profile;
+typedef struct _profile s_profile;
+
+/*! @typedef structure of the alignment result
+ @field score1 the best alignment score
+ @field score2 sub-optimal alignment score
+ @field ref_begin1 0-based best alignment beginning position on reference; ref_begin1 = -1 when the best alignment beginning
+ position is not available
+ @field ref_end1 0-based best alignment ending position on reference
+ @field read_begin1 0-based best alignment beginning position on read; read_begin1 = -1 when the best alignment beginning
+ position is not available
+ @field read_end1 0-based best alignment ending position on read
+ @field read_end2 0-based sub-optimal alignment ending position on read
+ @field cigar best alignment cigar; stored the same as that in BAM format, high 28 bits: length, low 4 bits: M/I/D (0/1/2);
+ cigar = 0 when the best alignment path is not available
+ @field cigarLen length of the cigar string; cigarLen = 0 when the best alignment path is not available
+*/
+typedef struct {
+ uint16_t score1;
+ uint16_t score2;
+ int32_t ref_begin1;
+ int32_t ref_end1;
+ int32_t read_begin1;
+ int32_t read_end1;
+ int32_t ref_end2;
+ uint32_t* cigar;
+ int32_t cigarLen;
+} s_align;
+
+/*! @function Create the query profile using the query sequence.
+ @param read pointer to the query sequence; the query sequence needs to be numbers
+ @param readLen length of the query sequence
+ @param mat pointer to the substitution matrix; mat needs to be corresponding to the read sequence
+ @param n the square root of the number of elements in mat (mat has n*n elements)
+ @param score_size estimated Smith-Waterman score; if your estimated best alignment score is surely < 255 please set 0; if
+ your estimated best alignment score >= 255, please set 1; if you don't know, please set 2
+ @return pointer to the query profile structure
+ @note example for parameter read and mat:
+ If the query sequence is: ACGTATC, the sequence that read points to can be: 1234142
+ Then if the penalty for match is 2 and for mismatch is -2, the substitution matrix of parameter mat will be:
+ //A C G T
+ 2 -2 -2 -2 //A
+ -2 2 -2 -2 //C
+ -2 -2 2 -2 //G
+ -2 -2 -2 2 //T
+ mat is the pointer to the array {2, -2, -2, -2, -2, 2, -2, -2, -2, -2, 2, -2, -2, -2, -2, 2}
+*/
+s_profile* ssw_init (const int8_t* read, const int32_t readLen, const int8_t* mat, const int32_t n, const int8_t score_size);
+
+/*! @function Release the memory allocated by function ssw_init.
+ @param p pointer to the query profile structure
+*/
+void init_destroy (s_profile* p);
+
+// @function ssw alignment.
+/*! @function Do Striped Smith-Waterman alignment.
+ @param prof pointer to the query profile structure
+ @param ref pointer to the target sequence; the target sequence needs to be numbers and corresponding to the mat parameter of
+ function ssw_init
+ @param refLen length of the target sequence
+ @param weight_gapO the absolute value of gap open penalty
+ @param weight_gapE the absolute value of gap extension penalty
+ @param flag bitwise FLAG; (from high to low) bit 5: when setted as 1, function ssw_align will return the best alignment
+ beginning position; bit 6: when setted as 1, if (ref_end1 - ref_begin1 < filterd && read_end1 - read_begin1
+ < filterd), (whatever bit 5 is setted) the function will return the best alignment beginning position and
+ cigar; bit 7: when setted as 1, if the best alignment score >= filters, (whatever bit 5 is setted) the function
+ will return the best alignment beginning position and cigar; bit 8: when setted as 1, (whatever bit 5, 6 or 7 is
+ setted) the function will always return the best alignment beginning position and cigar. When flag == 0, only
+ the optimal and sub-optimal scores and the optimal alignment ending position will be returned.
+ @param filters score filter: when bit 7 of flag is setted as 1 and bit 8 is setted as 0, filters will be used (Please check the
+ decription of the flag parameter for detailed usage.)
+ @param filterd distance filter: when bit 6 of flag is setted as 1 and bit 8 is setted as 0, filterd will be used (Please check
+ the decription of the flag parameter for detailed usage.)
+ @param maskLen The distance between the optimal and suboptimal alignment ending position >= maskLen. We suggest to use
+ readLen/2, if you don't have special concerns. Note: maskLen has to be >= 15, otherwise this function will NOT
+ return the suboptimal alignment information. Detailed description of maskLen: After locating the optimal
+ alignment ending position, the suboptimal alignment score can be heuristically found by checking the second
+ largest score in the array that contains the maximal score of each column of the SW matrix. In order to avoid
+ picking the scores that belong to the alignments sharing the partial best alignment, SSW C library masks the
+ reference loci nearby (mask length = maskLen) the best alignment ending position and locates the second largest
+ score from the unmasked elements.
+ @return pointer to the alignment result structure
+ @note Whatever the parameter flag is setted, this function will at least return the optimal and sub-optimal alignment score,
+ and the optimal alignment ending positions on target and query sequences. If both bit 6 and 7 of the flag are setted
+ while bit 8 is not, the function will return cigar only when both criteria are fulfilled. All returned positions are
+ 0-based coordinate.
+*/
+s_align* ssw_align (const s_profile* prof,
+ const int8_t* ref,
+ int32_t refLen,
+ const uint8_t weight_gapO,
+ const uint8_t weight_gapE,
+ const uint8_t flag,
+ const uint16_t filters,
+ const int32_t filterd,
+ const int32_t maskLen);
+
+/*! @function Release the memory allocated by function ssw_align.
+ @param a pointer to the alignment result structure
+*/
+void align_destroy (s_align* a);
+
+/*! @function Produce CIGAR 32-bit unsigned integer from CIGAR operation and CIGAR length
+ @param length length of CIGAR
+ @param op_letter CIGAR operation character ('M', 'I', etc)
+ @return 32-bit unsigned integer, representing encoded CIGAR operation and length
+*/
+static inline uint32_t to_cigar_int (uint32_t length, char op_letter)
+{
+ switch (op_letter) {
+ case 'M': /* alignment match (can be a sequence match or mismatch */
+ default:
+ return length << BAM_CIGAR_SHIFT;
+ case 'S': /* soft clipping (clipped sequences present in SEQ) */
+ return (length << BAM_CIGAR_SHIFT) | (4u);
+ case 'D': /* deletion from the reference */
+ return (length << BAM_CIGAR_SHIFT) | (2u);
+ case 'I': /* insertion to the reference */
+ return (length << BAM_CIGAR_SHIFT) | (1u);
+ case 'H': /* hard clipping (clipped sequences NOT present in SEQ) */
+ return (length << BAM_CIGAR_SHIFT) | (5u);
+ case 'N': /* skipped region from the reference */
+ return (length << BAM_CIGAR_SHIFT) | (3u);
+ case 'P': /* padding (silent deletion from padded reference) */
+ return (length << BAM_CIGAR_SHIFT) | (6u);
+ case '=': /* sequence match */
+ return (length << BAM_CIGAR_SHIFT) | (7u);
+ case 'X': /* sequence mismatch */
+ return (length << BAM_CIGAR_SHIFT) | (8u);
+ }
+ return (uint32_t)-1; // This never happens
+}
+
+
+/*! @function Extract CIGAR operation character from CIGAR 32-bit unsigned integer
+ @param cigar_int 32-bit unsigned integer, representing encoded CIGAR operation and length
+ @return CIGAR operation character ('M', 'I', etc)
+*/
+//char cigar_int_to_op (uint32_t cigar_int);
+static inline char cigar_int_to_op(uint32_t cigar_int)
+{
+ return (cigar_int & 0xfU) > 8 ? 'M': MAPSTR[cigar_int & 0xfU];
+}
+
+
+/*! @function Extract length of a CIGAR operation from CIGAR 32-bit unsigned integer
+ @param cigar_int 32-bit unsigned integer, representing encoded CIGAR operation and length
+ @return length of CIGAR operation
+*/
+//uint32_t cigar_int_to_len (uint32_t cigar_int);
+static inline uint32_t cigar_int_to_len (uint32_t cigar_int)
+{
+ return cigar_int >> BAM_CIGAR_SHIFT;
+}
+#ifdef __cplusplus
+}
+#endif // __cplusplus
+
+#endif // SSW_H
diff --git a/SeqLib/ssw_cpp.h b/SeqLib/ssw_cpp.h
new file mode 100644
index 0000000..cdcf717
--- /dev/null
+++ b/SeqLib/ssw_cpp.h
@@ -0,0 +1,219 @@
+#ifndef COMPLETE_STRIPED_SMITH_WATERMAN_CPP_H_
+#define COMPLETE_STRIPED_SMITH_WATERMAN_CPP_H_
+
+#include <stdint.h>
+#include <string>
+#include <vector>
+
+namespace StripedSmithWaterman {
+
+struct Alignment {
+ uint16_t sw_score; // The best alignment score
+ uint16_t sw_score_next_best; // The next best alignment score
+ int32_t ref_begin; // Reference begin position of the best alignment
+ int32_t ref_end; // Reference end position of the best alignment
+ int32_t query_begin; // Query begin position of the best alignment
+ int32_t query_end; // Query end position of the best alignment
+ int32_t ref_end_next_best; // Reference end position of the next best alignment
+ int32_t mismatches; // Number of mismatches of the alignment
+ std::string cigar_string; // Cigar string of the best alignment
+ std::vector<uint32_t> cigar; // Cigar stored in the BAM format
+ // high 28 bits: length
+ // low 4 bits: M/I/D/S/X (0/1/2/4/8);
+ void Clear() {
+ sw_score = 0;
+ sw_score_next_best = 0;
+ ref_begin = 0;
+ ref_end = 0;
+ query_begin = 0;
+ query_end = 0;
+ ref_end_next_best = 0;
+ mismatches = 0;
+ cigar_string.clear();
+ cigar.clear();
+ };
+};
+
+struct Filter {
+ // NOTE: No matter the filter, those five fields of Alignment will be given anyway.
+ // sw_score; sw_score_next_best; ref_end; query_end; ref_end_next_best.
+ // NOTE: Only need score of alignments, please set 'report_begin_position'
+ // and 'report_cigar' false.
+
+ bool report_begin_position; // Give ref_begin and query_begin.
+ // If it is not set, ref_begin and query_begin are -1.
+ bool report_cigar; // Give cigar_string and cigar.
+ // report_begin_position is automatically TRUE.
+
+ // When *report_cigar* is true and alignment passes these two filters,
+ // cigar_string and cigar will be given.
+ uint16_t score_filter; // score >= score_filter
+ uint16_t distance_filter; // ((ref_end - ref_begin) < distance_filter) &&
+ // ((query_end - read_begin) < distance_filter)
+
+ Filter()
+ : report_begin_position(true)
+ , report_cigar(true)
+ , score_filter(0)
+ , distance_filter(32767)
+ {};
+
+ Filter(const bool& pos, const bool& cigar, const uint16_t& score, const uint16_t& dis)
+ : report_begin_position(pos)
+ , report_cigar(cigar)
+ , score_filter(score)
+ , distance_filter(dis)
+ {};
+};
+
+class Aligner {
+ public:
+ // =========
+ // @function Construct an Aligner on default values.
+ // The function will build the {A.C,G,T,N} aligner.
+ // If you target for other character aligners, then please
+ // use the other constructor and pass the corresponding matrix in.
+ // =========
+ Aligner(void);
+
+ // =========
+ // @function Construct an Aligner by assigning scores.
+ // The function will build the {A.C,G,T,N} aligner.
+ // If you target for other character aligners, then please
+ // use the other constructor and pass the corresponding matrix in.
+ // =========
+ Aligner(const uint8_t& match_score,
+ const uint8_t& mismatch_penalty,
+ const uint8_t& gap_opening_penalty,
+ const uint8_t& gap_extending_penalty);
+
+ // =========
+ // @function Construct an Aligner by the specific matrixs.
+ // =========
+ Aligner(const int8_t* score_matrix,
+ const int& score_matrix_size,
+ const int8_t* translation_matrix,
+ const int& translation_matrix_size);
+
+ ~Aligner(void);
+
+ // =========
+ // @function Build the reference sequence and thus make
+ // Align(const char* query, s_align* alignment) function;
+ // otherwise the reference should be given when aligning.
+ // [NOTICE] If there exists a sequence, that one will be deleted
+ // and replaced.
+ // @param seq The reference bases;
+ // [NOTICE] It is not necessary null terminated.
+ // @param length The length of bases will be be built.
+ // @return The length of the built bases.
+ // =========
+ int SetReferenceSequence(const char* seq, const int& length);
+
+ void CleanReferenceSequence(void);
+
+ // =========
+ // @function Set penalties for opening and extending gaps
+ // [NOTICE] The defaults are 3 and 1 respectively.
+ // =========
+ void SetGapPenalty(const uint8_t& opening, const uint8_t& extending) {
+ gap_opening_penalty_ = opening;
+ gap_extending_penalty_ = extending;
+ };
+
+ // =========
+ // @function Align the query againt the reference that is set by
+ // SetReferenceSequence.
+ // @param query The query sequence.
+ // @param filter The filter for the alignment.
+ // @param alignment The container contains the result.
+ // @return True: succeed; false: fail.
+ // =========
+ bool Align(const char* query, const Filter& filter, Alignment* alignment) const;
+
+ // =========
+ // @function Align the query againt the reference.
+ // [NOTICE] The reference won't replace the reference
+ // set by SetReferenceSequence.
+ // @param query The query sequence.
+ // @param ref The reference sequence.
+ // [NOTICE] It is not necessary null terminated.
+ // @param ref_len The length of the reference sequence.
+ // @param filter The filter for the alignment.
+ // @param alignment The container contains the result.
+ // @return True: succeed; false: fail.
+ // =========
+ bool Align(const char* query, const char* ref, const int& ref_len,
+ const Filter& filter, Alignment* alignment) const;
+
+ // @function Clear up all containers and thus the aligner is disabled.
+ // To rebuild the aligner please use Build functions.
+ void Clear(void);
+
+ // =========
+ // @function Rebuild the aligner's ability on default values.
+ // [NOTICE] If the aligner is not cleaned, rebuilding will fail.
+ // @return True: succeed; false: fail.
+ // =========
+ bool ReBuild(void);
+
+ // =========
+ // @function Rebuild the aligner's ability by the specific matrixs.
+ // [NOTICE] If the aligner is not cleaned, rebuilding will fail.
+ // @return True: succeed; false: fail.
+ // =========
+ bool ReBuild(
+ const uint8_t& match_score,
+ const uint8_t& mismatch_penalty,
+ const uint8_t& gap_opening_penalty,
+ const uint8_t& gap_extending_penalty);
+
+ // =========
+ // @function Construct an Aligner by the specific matrixs.
+ // [NOTICE] If the aligner is not cleaned, rebuilding will fail.
+ // @return True: succeed; false: fail.
+ // =========
+ bool ReBuild(
+ const int8_t* score_matrix,
+ const int& score_matrix_size,
+ const int8_t* translation_matrix,
+ const int& translation_matrix_size);
+
+ private:
+ int8_t* score_matrix_;
+ int score_matrix_size_;
+ int8_t* translation_matrix_;
+
+ uint8_t match_score_; // default: 2
+ uint8_t mismatch_penalty_; // default: 2
+ uint8_t gap_opening_penalty_; // default: 3
+ uint8_t gap_extending_penalty_; // default: 1
+
+ int8_t* translated_reference_;
+ int32_t reference_length_;
+
+ int TranslateBase(const char* bases, const int& length, int8_t* translated) const;
+ void SetAllDefault(void);
+ void BuildDefaultMatrix(void);
+ void ClearMatrices(void);
+
+ Aligner& operator= (const Aligner&);
+ Aligner (const Aligner&);
+}; // class Aligner
+
+
+// ================
+// inline functions
+// ================
+inline void Aligner::CleanReferenceSequence(void) {
+ if (reference_length_ == 0) return;
+
+ // delete the current buffer
+ if (reference_length_ > 1) delete [] translated_reference_;
+ else delete translated_reference_;
+
+ reference_length_ = 0;
+}
+} // namespace StripedSmithWaterman
+
+#endif // COMPLETE_STRIPED_SMITH_WATERMAN_CPP_H_
diff --git a/autogen.sh b/autogen.sh
new file mode 100755
index 0000000..54969f7
--- /dev/null
+++ b/autogen.sh
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+set -ex
+aclocal
+autoconf
+autoheader
+automake -a --add-missing
diff --git a/benchmark/Makefile b/benchmark/Makefile
new file mode 100644
index 0000000..2bb0edd
--- /dev/null
+++ b/benchmark/Makefile
@@ -0,0 +1,17 @@
+##INCLUDES=-I/xchip/gistic/Jeremiah/software/seqan-library-2.0.2/include -I /xchip/gistic/Jeremiah/GIT/SeqLib/src -I/xchip/gistic/Jeremiah/GIT/SeqLib/htslib -I/xchip/gistic/Jeremiah/software/boost_1.61.0_gcc5.1 -I/xchip/gistic/Jeremiah/software/bamtools-2.4.0/include
+INCLUDES=-I/xchip/gistic/Jeremiah/software/seqan-library-2.2.0/include -I /xchip/gistic/Jeremiah/GIT/SeqLib/src -I/xchip/gistic/Jeremiah/GIT/SeqLib/htslib -I/xchip/gistic/Jeremiah/software/boost_1.61.0_gcc5.1 -I/xchip/gistic/Jeremiah/software/bamtools-2.4.0/include -I/xchip/gistic/Jeremiah/software/bzip2-1.0.6
+LIBS=/xchip/gistic/Jeremiah/software/bamtools-2.4.0/lib/libbamtools.a /xchip/gistic/Jeremiah/GIT/SeqLib/src/libseqlib.a /xchip/gistic/Jeremiah/GIT/SeqLib/htslib/libhts.a /xchip/gistic/Jeremiah/software/boost_1.61.0_gcc5.1/stage/lib/libboost_timer.a /xchip/gistic/Jeremiah/software/boost_1.61.0_gcc5.1/stage/lib/libboost_chrono.a /xchip/gistic/Jeremiah/software/boost_1.61.0_gcc5.1/stage/lib/libboost_system.a /xchip/gistic/Jeremiah/software/bzip2-1.0.6/libbz2.a
+CFLAGS=-W -Wall -pedantic -std=c++14 -DSEQAN_HAS_ZLIB=1 -DSEQAN_HAS_BZIP2=1
+
+binaries=benchmark
+
+all: benchmark.o
+ g++ benchmark.o -g -o benchmark $(LIBS) -lrt -lpthread -lz -lm
+
+benchmark.o: benchmark.cpp
+ g++ -g -c benchmark.cpp $(INCLUDES) $(CFLAGS)
+
+.PHONY: clean
+
+clean:
+ rm -f $(binaries) *.o
diff --git a/benchmark/benchmark.cpp b/benchmark/benchmark.cpp
new file mode 100644
index 0000000..4e456ae
--- /dev/null
+++ b/benchmark/benchmark.cpp
@@ -0,0 +1,208 @@
+#define USE_BOOST
+
+#define JUMPING_TEST 1
+//#define READ_TEST 1
+
+#include "SeqLib/SeqLibUtils.h"
+
+#ifdef USE_BOOST
+#include <boost/timer/timer.hpp>
+#endif
+
+#include <cmath>
+
+//#define RUN_SEQAN 1
+//#define RUN_BAMTOOLS 1
+#define RUN_SEQLIB 1
+
+#ifdef RUN_SEQAN
+#include <seqan/bam_io.h>
+#include <seqan/sequence.h>
+using namespace seqan;
+#endif
+
+#ifdef RUN_SEQLIB
+#include "SeqLib/BamReader.h"
+#include "SeqLib/BamWriter.h"
+#endif
+
+#define BAMTOOLS_GET_CORE 1
+
+#ifdef RUN_BAMTOOLS
+#include "api/BamReader.h"
+#endif
+
+int main()
+{
+
+ const size_t limit = 5000000;
+ const size_t print_limit = 1000000;
+ const size_t jump_limit = 1000;
+ size_t count = 0;
+
+ //std::string bam = "/xchip/gistic/Jeremiah/GIT/SeqLib/seq_test/test_data/small.bam";
+ std::string bam = "/broad/broadsv/NA12878/20120117_ceu_trio_b37_decoy/CEUTrio.HiSeq.WGS.b37_decoy.NA12878.clean.dedup.recal.20120117.bam";
+ std::string bami = "/broad/broadsv/NA12878/20120117_ceu_trio_b37_decoy/CEUTrio.HiSeq.WGS.b37_decoy.NA12878.clean.dedup.recal.20120117.bam.bai";
+ std::string obam = "/xchip/gistic/Jeremiah/GIT/SeqLib/seq_test/tmp_out.bam";
+
+#ifdef USE_BOOST
+ boost::timer::auto_cpu_timer t;
+#endif
+
+#ifdef RUN_BAMTOOLS
+ std::cerr << " **** RUNNING BAMTOOLS **** " << std::endl;
+ BamTools::BamReader btr;
+ btr.Open(bam);
+ btr.OpenIndex(bami);
+
+ BamTools::BamAlignment ba;
+ std::vector<BamTools::BamAlignment> bav;
+
+#ifdef READ_TEST
+#ifndef BAMTOOLS_GET_CORE
+ std::cerr << " **** FULL REC **** " << std::endl;
+ while(btr.GetNextAlignment(ba) && count++ < limit) {
+#else
+ std::cerr << " **** CORE REC **** " << std::endl;
+ while(btr.GetNextAlignmentCore(ba) && count++ < limit) {
+#endif
+ if (count % print_limit == 0)
+ std::cerr << "...at read " << SeqLib::AddCommas(count) << std::endl;
+ bav.push_back(ba);
+ }
+#endif
+
+#ifdef JUMPING_TEST
+ // perform jumping
+ for (int i = 0; i < jump_limit; ++i) {
+ int chr = rand() % 22;
+ int pos = rand() % 1000000 + 1000000;
+ if (btr.SetRegion(BamTools::BamRegion(chr,chr, pos, pos + 10000))) {
+ btr.GetNextAlignment(ba);
+ bav.push_back(ba);
+ } else {
+ std::cerr << " jump to " << chr << "-" << pos << " not successful " << std::endl;
+ }
+ }
+#endif
+
+#endif
+
+#ifdef RUN_SEQLIB
+ std::cerr << " **** RUNNING SEQLIB **** " << std::endl;
+ SeqLib::BamReader r;
+ r.Open(bam);
+ //SeqLib::BamWriter w(SeqLib::BAM);
+ //w.SetHeader(r.Header());
+ //w.Open(obam);
+
+
+ SeqLib::BamRecord rec;
+ SeqLib::BamRecordVector bav;
+#ifdef READ_TEST
+ std::vector<std::string> sq;
+ while(r.GetNextRecord(rec) && count++ < limit) {
+ if (count % print_limit == 0)
+ std::cerr << "...at read " << SeqLib::AddCommas(count) << std::endl;
+ bav.push_back(rec);
+ //sq.push_back(rec.Sequence());
+ }
+#endif
+
+#ifdef JUMPING_TEST
+ // perform jumping test
+ for (int i = 0; i < jump_limit; ++i) {
+ int chr = rand() % 22;
+ int pos = rand() % 1000000 + 1000000;
+ r.SetRegion(SeqLib::GenomicRegion(chr,pos, pos + 10000));
+ r.GetNextRecord(rec);
+ bav.push_back(rec);
+ }
+#endif
+
+#endif
+
+#ifdef RUN_SEQAN
+
+ std::cerr << " **** RUNNING SEQAN **** " << std::endl;
+
+ seqan::BamFileIn bamFileIn;
+ seqan::BamHeader header;
+
+ std::vector<seqan::BamAlignmentRecord> bav;
+ seqan::BamAlignmentRecord record;
+
+ if (!open(bamFileIn, bam.c_str(), seqan::OPEN_RDONLY))
+ {
+ std::cerr << "ERROR: could not open input file " << bam << ".\n";
+ return 1;
+ }
+
+ // Open output SAM file.
+ //seqan::BamFileOut samFileOut(context(bamFileIn), obam.c_str());
+
+ // Copy header.
+ try
+ {
+ readHeader(header, bamFileIn);
+ //writeHeader(samFileOut, header);
+ }
+ catch (seqan::IOError const & e)
+ {
+ std::cerr << "ERROR: could not copy header. " << e.what() << "\n";
+ }
+
+#ifdef JUMPING_TEST
+ // read the index
+ BamIndex<Bai> baiIndex;
+ if (!open(baiIndex, bami.c_str()))
+ {
+ std::cerr << "ERROR: Could not read BAI index file " << bami << "\n";
+ return 1;
+ }
+
+ bool hasAlignments = false;
+ for (int i = 0; i < jump_limit; ++i) {
+ int chr = rand() % 22;
+ int pos = rand() % 1000000 + 1000000;
+ if (!jumpToRegion(bamFileIn, hasAlignments, chr, pos, pos+10000, baiIndex)) {
+ std::cerr << "ERROR: Could not jump to " << pos << ":" << (pos+10000) << "\n";
+ return 1;
+ }
+ if (hasAlignments) {
+ readRecord(record, bamFileIn);
+ bav.push_back(record);
+ } else {
+ std::cerr << "no alignments here " << std::endl;
+ }
+
+
+ }
+
+#endif
+
+#ifdef READ_TEST
+ // Copy all records.
+ while (!atEnd(bamFileIn) && count++ < limit)
+ {
+ try
+ {
+ if (count % print_limit == 0)
+ std::cerr << "...at read " << SeqLib::AddCommas(count) << std::endl;
+ readRecord(record, bamFileIn);
+ bav.push_back(record);
+ //writeRecord(samFileOut, record);
+ }
+ catch (seqan::IOError const & e)
+ {
+ std::cerr << "ERROR: could not copy record. " << e.what() << "\n";
+ }
+ }
+#endif
+
+#endif
+
+ std::cerr << " Copied " << bav.size() << " records " << std::endl;
+
+ return 0;
+}
diff --git a/config.h.in b/config.h.in
new file mode 100644
index 0000000..62ffda6
--- /dev/null
+++ b/config.h.in
@@ -0,0 +1,58 @@
+/* config.h.in. Generated from configure.ac by autoheader. */
+
+/* clock_getttime found */
+#undef HAVE_CLOCK_GETTIME
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#undef HAVE_INTTYPES_H
+
+/* Define to 1 if you have the <memory.h> header file. */
+#undef HAVE_MEMORY_H
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#undef HAVE_STDINT_H
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#undef HAVE_STDLIB_H
+
+/* Define to 1 if you have the <strings.h> header file. */
+#undef HAVE_STRINGS_H
+
+/* Define to 1 if you have the <string.h> header file. */
+#undef HAVE_STRING_H
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#undef HAVE_SYS_STAT_H
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#undef HAVE_SYS_TYPES_H
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#undef HAVE_UNISTD_H
+
+/* Name of package */
+#undef PACKAGE
+
+/* Define to the address where bug reports for this package should be sent. */
+#undef PACKAGE_BUGREPORT
+
+/* Define to the full name of this package. */
+#undef PACKAGE_NAME
+
+/* Define to the full name and version of this package. */
+#undef PACKAGE_STRING
+
+/* Define to the one symbol short name of this package. */
+#undef PACKAGE_TARNAME
+
+/* Define to the home page for this package. */
+#undef PACKAGE_URL
+
+/* Define to the version of this package. */
+#undef PACKAGE_VERSION
+
+/* Define to 1 if you have the ANSI C header files. */
+#undef STDC_HEADERS
+
+/* Version number of package */
+#undef VERSION
diff --git a/configure b/configure
new file mode 100755
index 0000000..ec499b1
--- /dev/null
+++ b/configure
@@ -0,0 +1,6322 @@
+#! /bin/sh
+# Guess values for system-dependent variables and create Makefiles.
+# Generated by GNU Autoconf 2.69 for seqkit 1.0.
+#
+# Report bugs to <jwala at broadinstitute.org>.
+#
+#
+# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
+#
+#
+# This configure script is free software; the Free Software Foundation
+# gives unlimited permission to copy, distribute and modify it.
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
+esac
+fi
+
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+# Prefer a ksh shell builtin over an external printf program on Solaris,
+# but without wasting forks for bash or zsh.
+if test -z "$BASH_VERSION$ZSH_VERSION" \
+ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='print -r --'
+ as_echo_n='print -rn --'
+elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='printf %s\n'
+ as_echo_n='printf %s'
+else
+ if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+ as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+ as_echo_n='/usr/ucb/echo -n'
+ else
+ as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+ as_echo_n_body='eval
+ arg=$1;
+ case $arg in #(
+ *"$as_nl"*)
+ expr "X$arg" : "X\\(.*\\)$as_nl";
+ arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+ esac;
+ expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+ '
+ export as_echo_n_body
+ as_echo_n='sh -c $as_echo_n_body as_echo'
+ fi
+ export as_echo_body
+ as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ PATH_SEPARATOR=:
+ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+ PATH_SEPARATOR=';'
+ }
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+as_myself=
+case $0 in #((
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ exit 1
+fi
+
+# Unset variables that we do not need and which cause bugs (e.g. in
+# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1"
+# suppresses any "Segmentation fault" message there. '((' could
+# trigger a bug in pdksh 5.2.14.
+for as_var in BASH_ENV ENV MAIL MAILPATH
+do eval test x\${$as_var+set} = xset \
+ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# CDPATH.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+# Use a proper internal environment variable to ensure we don't fall
+ # into an infinite loop, continuously re-executing ourselves.
+ if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then
+ _as_can_reexec=no; export _as_can_reexec;
+ # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+ *v*x* | *x*v* ) as_opts=-vx ;;
+ *v* ) as_opts=-v ;;
+ *x* ) as_opts=-x ;;
+ * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+as_fn_exit 255
+ fi
+ # We don't want this to propagate to other subprocesses.
+ { _as_can_reexec=; unset _as_can_reexec;}
+if test "x$CONFIG_SHELL" = x; then
+ as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then :
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '\${1+\"\$@\"}'='\"\$@\"'
+ setopt NO_GLOB_SUBST
+else
+ case \`(set -o) 2>/dev/null\` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
+esac
+fi
+"
+ as_required="as_fn_return () { (exit \$1); }
+as_fn_success () { as_fn_return 0; }
+as_fn_failure () { as_fn_return 1; }
+as_fn_ret_success () { return 0; }
+as_fn_ret_failure () { return 1; }
+
+exitcode=0
+as_fn_success || { exitcode=1; echo as_fn_success failed.; }
+as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; }
+as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; }
+as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; }
+if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then :
+
+else
+ exitcode=1; echo positional parameters were not saved.
+fi
+test x\$exitcode = x0 || exit 1
+test -x / || exit 1"
+ as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
+ as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
+ eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
+ test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1
+test \$(( 1 + 1 )) = 2 || exit 1"
+ if (eval "$as_required") 2>/dev/null; then :
+ as_have_required=yes
+else
+ as_have_required=no
+fi
+ if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then :
+
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+as_found=false
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ as_found=:
+ case $as_dir in #(
+ /*)
+ for as_base in sh bash ksh sh5; do
+ # Try only shells that exist, to save several forks.
+ as_shell=$as_dir/$as_base
+ if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
+ { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then :
+ CONFIG_SHELL=$as_shell as_have_required=yes
+ if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then :
+ break 2
+fi
+fi
+ done;;
+ esac
+ as_found=false
+done
+$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } &&
+ { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then :
+ CONFIG_SHELL=$SHELL as_have_required=yes
+fi; }
+IFS=$as_save_IFS
+
+
+ if test "x$CONFIG_SHELL" != x; then :
+ export CONFIG_SHELL
+ # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+ *v*x* | *x*v* ) as_opts=-vx ;;
+ *v* ) as_opts=-v ;;
+ *x* ) as_opts=-x ;;
+ * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+exit 255
+fi
+
+ if test x$as_have_required = xno; then :
+ $as_echo "$0: This script requires a shell more modern than all"
+ $as_echo "$0: the shells that I found on your system."
+ if test x${ZSH_VERSION+set} = xset ; then
+ $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should"
+ $as_echo "$0: be upgraded to zsh 4.3.4 or later."
+ else
+ $as_echo "$0: Please tell bug-autoconf at gnu.org and
+$0: jwala at broadinstitute.org about your system, including
+$0: any error possibly output before this message. Then
+$0: install a modern shell, or manually run the script
+$0: under such a shell if you do have one."
+ fi
+ exit 1
+fi
+fi
+fi
+SHELL=${CONFIG_SHELL-/bin/sh}
+export SHELL
+# Unset more variables known to interfere with behavior of common tools.
+CLICOLOR_FORCE= GREP_OPTIONS=
+unset CLICOLOR_FORCE GREP_OPTIONS
+
+## --------------------- ##
+## M4sh Shell Functions. ##
+## --------------------- ##
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+ { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+ return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+ set +e
+ as_fn_set_status $1
+ exit $1
+} # as_fn_exit
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || eval $as_mkdir_p || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+ test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+ eval 'as_fn_append ()
+ {
+ eval $1+=\$2
+ }'
+else
+ as_fn_append ()
+ {
+ eval $1=\$$1\$2
+ }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+ eval 'as_fn_arith ()
+ {
+ as_val=$(( $* ))
+ }'
+else
+ as_fn_arith ()
+ {
+ as_val=`expr "$@" || test $? -eq 1`
+ }
+fi # as_fn_arith
+
+
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+ as_status=$1; test $as_status -eq 0 && as_status=1
+ if test "$4"; then
+ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+ fi
+ $as_echo "$as_me: error: $2" >&2
+ as_fn_exit $as_status
+} # as_fn_error
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+
+ as_lineno_1=$LINENO as_lineno_1a=$LINENO
+ as_lineno_2=$LINENO as_lineno_2a=$LINENO
+ eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" &&
+ test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || {
+ # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-)
+ sed -n '
+ p
+ /[$]LINENO/=
+ ' <$as_myself |
+ sed '
+ s/[$]LINENO.*/&-/
+ t lineno
+ b
+ :lineno
+ N
+ :loop
+ s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
+ t loop
+ s/-\n.*//
+ ' >$as_me.lineno &&
+ chmod +x "$as_me.lineno" ||
+ { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
+
+ # If we had to re-execute with $CONFIG_SHELL, we're ensured to have
+ # already done that, so ensure we don't try to do so again and fall
+ # in an infinite loop. This has already happened in practice.
+ _as_can_reexec=no; export _as_can_reexec
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensitive to this).
+ . "./$as_me.lineno"
+ # Exit status is that of the last command.
+ exit
+}
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+ case `echo 'xy\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ xy) ECHO_C='\c';;
+ *) echo `echo ksh88 bug on AIX 6.1` > /dev/null
+ ECHO_T=' ';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+ if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -pR'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -pR'
+ elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+ else
+ as_ln_s='cp -pR'
+ fi
+else
+ as_ln_s='cp -pR'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p='mkdir -p "$as_dir"'
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+test -n "$DJDIR" || exec 7<&0 </dev/null
+exec 6>&1
+
+# Name of the host.
+# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status,
+# so uname gets run too.
+ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
+
+#
+# Initializations.
+#
+ac_default_prefix=/usr/local
+ac_clean_files=
+ac_config_libobj_dir=.
+LIBOBJS=
+cross_compiling=no
+subdirs=
+MFLAGS=
+MAKEFLAGS=
+
+# Identity of this package.
+PACKAGE_NAME='seqkit'
+PACKAGE_TARNAME='seqkit'
+PACKAGE_VERSION='1.0'
+PACKAGE_STRING='seqkit 1.0'
+PACKAGE_BUGREPORT='jwala at broadinstitute.org'
+PACKAGE_URL=''
+
+ac_unique_file="src/BamReader.cpp"
+# Factoring default headers for most tests.
+ac_includes_default="\
+#include <stdio.h>
+#ifdef HAVE_SYS_TYPES_H
+# include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_STAT_H
+# include <sys/stat.h>
+#endif
+#ifdef STDC_HEADERS
+# include <stdlib.h>
+# include <stddef.h>
+#else
+# ifdef HAVE_STDLIB_H
+# include <stdlib.h>
+# endif
+#endif
+#ifdef HAVE_STRING_H
+# if !defined STDC_HEADERS && defined HAVE_MEMORY_H
+# include <memory.h>
+# endif
+# include <string.h>
+#endif
+#ifdef HAVE_STRINGS_H
+# include <strings.h>
+#endif
+#ifdef HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+#ifdef HAVE_STDINT_H
+# include <stdint.h>
+#endif
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif"
+
+ac_subst_vars='am__EXEEXT_FALSE
+am__EXEEXT_TRUE
+LTLIBOBJS
+LIBOBJS
+AM_CXXFLAGS
+EGREP
+GREP
+CXXCPP
+RANLIB
+am__fastdepCC_FALSE
+am__fastdepCC_TRUE
+CCDEPMODE
+ac_ct_CC
+CFLAGS
+CC
+am__fastdepCXX_FALSE
+am__fastdepCXX_TRUE
+CXXDEPMODE
+am__nodep
+AMDEPBACKSLASH
+AMDEP_FALSE
+AMDEP_TRUE
+am__quote
+am__include
+DEPDIR
+OBJEXT
+EXEEXT
+ac_ct_CXX
+CPPFLAGS
+LDFLAGS
+CXXFLAGS
+CXX
+MAINT
+MAINTAINER_MODE_FALSE
+MAINTAINER_MODE_TRUE
+AM_BACKSLASH
+AM_DEFAULT_VERBOSITY
+AM_DEFAULT_V
+AM_V
+am__untar
+am__tar
+AMTAR
+am__leading_dot
+SET_MAKE
+AWK
+mkdir_p
+MKDIR_P
+INSTALL_STRIP_PROGRAM
+STRIP
+install_sh
+MAKEINFO
+AUTOHEADER
+AUTOMAKE
+AUTOCONF
+ACLOCAL
+VERSION
+PACKAGE
+CYGPATH_W
+am__isrc
+INSTALL_DATA
+INSTALL_SCRIPT
+INSTALL_PROGRAM
+target_alias
+host_alias
+build_alias
+LIBS
+ECHO_T
+ECHO_N
+ECHO_C
+DEFS
+mandir
+localedir
+libdir
+psdir
+pdfdir
+dvidir
+htmldir
+infodir
+docdir
+oldincludedir
+includedir
+runstatedir
+localstatedir
+sharedstatedir
+sysconfdir
+datadir
+datarootdir
+libexecdir
+sbindir
+bindir
+program_transform_name
+prefix
+exec_prefix
+PACKAGE_URL
+PACKAGE_BUGREPORT
+PACKAGE_STRING
+PACKAGE_VERSION
+PACKAGE_TARNAME
+PACKAGE_NAME
+PATH_SEPARATOR
+SHELL'
+ac_subst_files=''
+ac_user_opts='
+enable_option_checking
+enable_silent_rules
+enable_maintainer_mode
+enable_dependency_tracking
+enable_development
+'
+ ac_precious_vars='build_alias
+host_alias
+target_alias
+CXX
+CXXFLAGS
+LDFLAGS
+LIBS
+CPPFLAGS
+CCC
+CC
+CFLAGS
+CXXCPP'
+
+
+# Initialize some variables set by options.
+ac_init_help=
+ac_init_version=false
+ac_unrecognized_opts=
+ac_unrecognized_sep=
+# The variables have the same names as the options, with
+# dashes changed to underlines.
+cache_file=/dev/null
+exec_prefix=NONE
+no_create=
+no_recursion=
+prefix=NONE
+program_prefix=NONE
+program_suffix=NONE
+program_transform_name=s,x,x,
+silent=
+site=
+srcdir=
+verbose=
+x_includes=NONE
+x_libraries=NONE
+
+# Installation directory options.
+# These are left unexpanded so users can "make install exec_prefix=/foo"
+# and all the variables that are supposed to be based on exec_prefix
+# by default will actually change.
+# Use braces instead of parens because sh, perl, etc. also accept them.
+# (The list follows the same order as the GNU Coding Standards.)
+bindir='${exec_prefix}/bin'
+sbindir='${exec_prefix}/sbin'
+libexecdir='${exec_prefix}/libexec'
+datarootdir='${prefix}/share'
+datadir='${datarootdir}'
+sysconfdir='${prefix}/etc'
+sharedstatedir='${prefix}/com'
+localstatedir='${prefix}/var'
+runstatedir='${localstatedir}/run'
+includedir='${prefix}/include'
+oldincludedir='/usr/include'
+docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
+infodir='${datarootdir}/info'
+htmldir='${docdir}'
+dvidir='${docdir}'
+pdfdir='${docdir}'
+psdir='${docdir}'
+libdir='${exec_prefix}/lib'
+localedir='${datarootdir}/locale'
+mandir='${datarootdir}/man'
+
+ac_prev=
+ac_dashdash=
+for ac_option
+do
+ # If the previous option needs an argument, assign it.
+ if test -n "$ac_prev"; then
+ eval $ac_prev=\$ac_option
+ ac_prev=
+ continue
+ fi
+
+ case $ac_option in
+ *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
+ *=) ac_optarg= ;;
+ *) ac_optarg=yes ;;
+ esac
+
+ # Accept the important Cygnus configure options, so we can diagnose typos.
+
+ case $ac_dashdash$ac_option in
+ --)
+ ac_dashdash=yes ;;
+
+ -bindir | --bindir | --bindi | --bind | --bin | --bi)
+ ac_prev=bindir ;;
+ -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
+ bindir=$ac_optarg ;;
+
+ -build | --build | --buil | --bui | --bu)
+ ac_prev=build_alias ;;
+ -build=* | --build=* | --buil=* | --bui=* | --bu=*)
+ build_alias=$ac_optarg ;;
+
+ -cache-file | --cache-file | --cache-fil | --cache-fi \
+ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
+ ac_prev=cache_file ;;
+ -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
+ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
+ cache_file=$ac_optarg ;;
+
+ --config-cache | -C)
+ cache_file=config.cache ;;
+
+ -datadir | --datadir | --datadi | --datad)
+ ac_prev=datadir ;;
+ -datadir=* | --datadir=* | --datadi=* | --datad=*)
+ datadir=$ac_optarg ;;
+
+ -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
+ | --dataroo | --dataro | --datar)
+ ac_prev=datarootdir ;;
+ -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
+ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
+ datarootdir=$ac_optarg ;;
+
+ -disable-* | --disable-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid feature name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"enable_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval enable_$ac_useropt=no ;;
+
+ -docdir | --docdir | --docdi | --doc | --do)
+ ac_prev=docdir ;;
+ -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
+ docdir=$ac_optarg ;;
+
+ -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
+ ac_prev=dvidir ;;
+ -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
+ dvidir=$ac_optarg ;;
+
+ -enable-* | --enable-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid feature name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"enable_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval enable_$ac_useropt=\$ac_optarg ;;
+
+ -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
+ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
+ | --exec | --exe | --ex)
+ ac_prev=exec_prefix ;;
+ -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
+ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
+ | --exec=* | --exe=* | --ex=*)
+ exec_prefix=$ac_optarg ;;
+
+ -gas | --gas | --ga | --g)
+ # Obsolete; use --with-gas.
+ with_gas=yes ;;
+
+ -help | --help | --hel | --he | -h)
+ ac_init_help=long ;;
+ -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
+ ac_init_help=recursive ;;
+ -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
+ ac_init_help=short ;;
+
+ -host | --host | --hos | --ho)
+ ac_prev=host_alias ;;
+ -host=* | --host=* | --hos=* | --ho=*)
+ host_alias=$ac_optarg ;;
+
+ -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
+ ac_prev=htmldir ;;
+ -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
+ | --ht=*)
+ htmldir=$ac_optarg ;;
+
+ -includedir | --includedir | --includedi | --included | --include \
+ | --includ | --inclu | --incl | --inc)
+ ac_prev=includedir ;;
+ -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
+ | --includ=* | --inclu=* | --incl=* | --inc=*)
+ includedir=$ac_optarg ;;
+
+ -infodir | --infodir | --infodi | --infod | --info | --inf)
+ ac_prev=infodir ;;
+ -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
+ infodir=$ac_optarg ;;
+
+ -libdir | --libdir | --libdi | --libd)
+ ac_prev=libdir ;;
+ -libdir=* | --libdir=* | --libdi=* | --libd=*)
+ libdir=$ac_optarg ;;
+
+ -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
+ | --libexe | --libex | --libe)
+ ac_prev=libexecdir ;;
+ -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
+ | --libexe=* | --libex=* | --libe=*)
+ libexecdir=$ac_optarg ;;
+
+ -localedir | --localedir | --localedi | --localed | --locale)
+ ac_prev=localedir ;;
+ -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
+ localedir=$ac_optarg ;;
+
+ -localstatedir | --localstatedir | --localstatedi | --localstated \
+ | --localstate | --localstat | --localsta | --localst | --locals)
+ ac_prev=localstatedir ;;
+ -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
+ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
+ localstatedir=$ac_optarg ;;
+
+ -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
+ ac_prev=mandir ;;
+ -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
+ mandir=$ac_optarg ;;
+
+ -nfp | --nfp | --nf)
+ # Obsolete; use --without-fp.
+ with_fp=no ;;
+
+ -no-create | --no-create | --no-creat | --no-crea | --no-cre \
+ | --no-cr | --no-c | -n)
+ no_create=yes ;;
+
+ -no-recursion | --no-recursion | --no-recursio | --no-recursi \
+ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
+ no_recursion=yes ;;
+
+ -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
+ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
+ | --oldin | --oldi | --old | --ol | --o)
+ ac_prev=oldincludedir ;;
+ -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
+ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
+ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
+ oldincludedir=$ac_optarg ;;
+
+ -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
+ ac_prev=prefix ;;
+ -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
+ prefix=$ac_optarg ;;
+
+ -program-prefix | --program-prefix | --program-prefi | --program-pref \
+ | --program-pre | --program-pr | --program-p)
+ ac_prev=program_prefix ;;
+ -program-prefix=* | --program-prefix=* | --program-prefi=* \
+ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
+ program_prefix=$ac_optarg ;;
+
+ -program-suffix | --program-suffix | --program-suffi | --program-suff \
+ | --program-suf | --program-su | --program-s)
+ ac_prev=program_suffix ;;
+ -program-suffix=* | --program-suffix=* | --program-suffi=* \
+ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
+ program_suffix=$ac_optarg ;;
+
+ -program-transform-name | --program-transform-name \
+ | --program-transform-nam | --program-transform-na \
+ | --program-transform-n | --program-transform- \
+ | --program-transform | --program-transfor \
+ | --program-transfo | --program-transf \
+ | --program-trans | --program-tran \
+ | --progr-tra | --program-tr | --program-t)
+ ac_prev=program_transform_name ;;
+ -program-transform-name=* | --program-transform-name=* \
+ | --program-transform-nam=* | --program-transform-na=* \
+ | --program-transform-n=* | --program-transform-=* \
+ | --program-transform=* | --program-transfor=* \
+ | --program-transfo=* | --program-transf=* \
+ | --program-trans=* | --program-tran=* \
+ | --progr-tra=* | --program-tr=* | --program-t=*)
+ program_transform_name=$ac_optarg ;;
+
+ -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
+ ac_prev=pdfdir ;;
+ -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
+ pdfdir=$ac_optarg ;;
+
+ -psdir | --psdir | --psdi | --psd | --ps)
+ ac_prev=psdir ;;
+ -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
+ psdir=$ac_optarg ;;
+
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ silent=yes ;;
+
+ -runstatedir | --runstatedir | --runstatedi | --runstated \
+ | --runstate | --runstat | --runsta | --runst | --runs \
+ | --run | --ru | --r)
+ ac_prev=runstatedir ;;
+ -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
+ | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
+ | --run=* | --ru=* | --r=*)
+ runstatedir=$ac_optarg ;;
+
+ -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+ ac_prev=sbindir ;;
+ -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+ | --sbi=* | --sb=*)
+ sbindir=$ac_optarg ;;
+
+ -sharedstatedir | --sharedstatedir | --sharedstatedi \
+ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
+ | --sharedst | --shareds | --shared | --share | --shar \
+ | --sha | --sh)
+ ac_prev=sharedstatedir ;;
+ -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
+ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
+ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
+ | --sha=* | --sh=*)
+ sharedstatedir=$ac_optarg ;;
+
+ -site | --site | --sit)
+ ac_prev=site ;;
+ -site=* | --site=* | --sit=*)
+ site=$ac_optarg ;;
+
+ -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
+ ac_prev=srcdir ;;
+ -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
+ srcdir=$ac_optarg ;;
+
+ -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
+ | --syscon | --sysco | --sysc | --sys | --sy)
+ ac_prev=sysconfdir ;;
+ -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
+ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
+ sysconfdir=$ac_optarg ;;
+
+ -target | --target | --targe | --targ | --tar | --ta | --t)
+ ac_prev=target_alias ;;
+ -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
+ target_alias=$ac_optarg ;;
+
+ -v | -verbose | --verbose | --verbos | --verbo | --verb)
+ verbose=yes ;;
+
+ -version | --version | --versio | --versi | --vers | -V)
+ ac_init_version=: ;;
+
+ -with-* | --with-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid package name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"with_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval with_$ac_useropt=\$ac_optarg ;;
+
+ -without-* | --without-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid package name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"with_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval with_$ac_useropt=no ;;
+
+ --x)
+ # Obsolete; use --with-x.
+ with_x=yes ;;
+
+ -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
+ | --x-incl | --x-inc | --x-in | --x-i)
+ ac_prev=x_includes ;;
+ -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
+ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
+ x_includes=$ac_optarg ;;
+
+ -x-libraries | --x-libraries | --x-librarie | --x-librari \
+ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
+ ac_prev=x_libraries ;;
+ -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
+ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
+ x_libraries=$ac_optarg ;;
+
+ -*) as_fn_error $? "unrecognized option: \`$ac_option'
+Try \`$0 --help' for more information"
+ ;;
+
+ *=*)
+ ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
+ # Reject names that are not valid shell variable names.
+ case $ac_envvar in #(
+ '' | [0-9]* | *[!_$as_cr_alnum]* )
+ as_fn_error $? "invalid variable name: \`$ac_envvar'" ;;
+ esac
+ eval $ac_envvar=\$ac_optarg
+ export $ac_envvar ;;
+
+ *)
+ # FIXME: should be removed in autoconf 3.0.
+ $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2
+ expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+ $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2
+ : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}"
+ ;;
+
+ esac
+done
+
+if test -n "$ac_prev"; then
+ ac_option=--`echo $ac_prev | sed 's/_/-/g'`
+ as_fn_error $? "missing argument to $ac_option"
+fi
+
+if test -n "$ac_unrecognized_opts"; then
+ case $enable_option_checking in
+ no) ;;
+ fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;;
+ *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
+ esac
+fi
+
+# Check all directory arguments for consistency.
+for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
+ datadir sysconfdir sharedstatedir localstatedir includedir \
+ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
+ libdir localedir mandir runstatedir
+do
+ eval ac_val=\$$ac_var
+ # Remove trailing slashes.
+ case $ac_val in
+ */ )
+ ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'`
+ eval $ac_var=\$ac_val;;
+ esac
+ # Be sure to have absolute directory names.
+ case $ac_val in
+ [\\/$]* | ?:[\\/]* ) continue;;
+ NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
+ esac
+ as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val"
+done
+
+# There might be people who depend on the old broken behavior: `$host'
+# used to hold the argument of --host etc.
+# FIXME: To remove some day.
+build=$build_alias
+host=$host_alias
+target=$target_alias
+
+# FIXME: To remove some day.
+if test "x$host_alias" != x; then
+ if test "x$build_alias" = x; then
+ cross_compiling=maybe
+ elif test "x$build_alias" != "x$host_alias"; then
+ cross_compiling=yes
+ fi
+fi
+
+ac_tool_prefix=
+test -n "$host_alias" && ac_tool_prefix=$host_alias-
+
+test "$silent" = yes && exec 6>/dev/null
+
+
+ac_pwd=`pwd` && test -n "$ac_pwd" &&
+ac_ls_di=`ls -di .` &&
+ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
+ as_fn_error $? "working directory cannot be determined"
+test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
+ as_fn_error $? "pwd does not report name of working directory"
+
+
+# Find the source files, if location was not specified.
+if test -z "$srcdir"; then
+ ac_srcdir_defaulted=yes
+ # Try the directory containing this script, then the parent directory.
+ ac_confdir=`$as_dirname -- "$as_myself" ||
+$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_myself" : 'X\(//\)[^/]' \| \
+ X"$as_myself" : 'X\(//\)$' \| \
+ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_myself" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ srcdir=$ac_confdir
+ if test ! -r "$srcdir/$ac_unique_file"; then
+ srcdir=..
+ fi
+else
+ ac_srcdir_defaulted=no
+fi
+if test ! -r "$srcdir/$ac_unique_file"; then
+ test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
+ as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir"
+fi
+ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
+ac_abs_confdir=`(
+ cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg"
+ pwd)`
+# When building in place, set srcdir=.
+if test "$ac_abs_confdir" = "$ac_pwd"; then
+ srcdir=.
+fi
+# Remove unnecessary trailing slashes from srcdir.
+# Double slashes in file names in object file debugging info
+# mess up M-x gdb in Emacs.
+case $srcdir in
+*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
+esac
+for ac_var in $ac_precious_vars; do
+ eval ac_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_env_${ac_var}_value=\$${ac_var}
+ eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_cv_env_${ac_var}_value=\$${ac_var}
+done
+
+#
+# Report the --help message.
+#
+if test "$ac_init_help" = "long"; then
+ # Omit some internal or obsolete options to make the list less imposing.
+ # This message is too long to be a string in the A/UX 3.1 sh.
+ cat <<_ACEOF
+\`configure' configures seqkit 1.0 to adapt to many kinds of systems.
+
+Usage: $0 [OPTION]... [VAR=VALUE]...
+
+To assign environment variables (e.g., CC, CFLAGS...), specify them as
+VAR=VALUE. See below for descriptions of some of the useful variables.
+
+Defaults for the options are specified in brackets.
+
+Configuration:
+ -h, --help display this help and exit
+ --help=short display options specific to this package
+ --help=recursive display the short help of all the included packages
+ -V, --version display version information and exit
+ -q, --quiet, --silent do not print \`checking ...' messages
+ --cache-file=FILE cache test results in FILE [disabled]
+ -C, --config-cache alias for \`--cache-file=config.cache'
+ -n, --no-create do not create output files
+ --srcdir=DIR find the sources in DIR [configure dir or \`..']
+
+Installation directories:
+ --prefix=PREFIX install architecture-independent files in PREFIX
+ [$ac_default_prefix]
+ --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX
+ [PREFIX]
+
+By default, \`make install' will install all the files in
+\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify
+an installation prefix other than \`$ac_default_prefix' using \`--prefix',
+for instance \`--prefix=\$HOME'.
+
+For better control, use the options below.
+
+Fine tuning of the installation directories:
+ --bindir=DIR user executables [EPREFIX/bin]
+ --sbindir=DIR system admin executables [EPREFIX/sbin]
+ --libexecdir=DIR program executables [EPREFIX/libexec]
+ --sysconfdir=DIR read-only single-machine data [PREFIX/etc]
+ --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
+ --localstatedir=DIR modifiable single-machine data [PREFIX/var]
+ --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run]
+ --libdir=DIR object code libraries [EPREFIX/lib]
+ --includedir=DIR C header files [PREFIX/include]
+ --oldincludedir=DIR C header files for non-gcc [/usr/include]
+ --datarootdir=DIR read-only arch.-independent data root [PREFIX/share]
+ --datadir=DIR read-only architecture-independent data [DATAROOTDIR]
+ --infodir=DIR info documentation [DATAROOTDIR/info]
+ --localedir=DIR locale-dependent data [DATAROOTDIR/locale]
+ --mandir=DIR man documentation [DATAROOTDIR/man]
+ --docdir=DIR documentation root [DATAROOTDIR/doc/seqkit]
+ --htmldir=DIR html documentation [DOCDIR]
+ --dvidir=DIR dvi documentation [DOCDIR]
+ --pdfdir=DIR pdf documentation [DOCDIR]
+ --psdir=DIR ps documentation [DOCDIR]
+_ACEOF
+
+ cat <<\_ACEOF
+
+Program names:
+ --program-prefix=PREFIX prepend PREFIX to installed program names
+ --program-suffix=SUFFIX append SUFFIX to installed program names
+ --program-transform-name=PROGRAM run sed PROGRAM on installed program names
+_ACEOF
+fi
+
+if test -n "$ac_init_help"; then
+ case $ac_init_help in
+ short | recursive ) echo "Configuration of seqkit 1.0:";;
+ esac
+ cat <<\_ACEOF
+
+Optional Features:
+ --disable-option-checking ignore unrecognized --enable/--with options
+ --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
+ --enable-FEATURE[=ARG] include FEATURE [ARG=yes]
+ --enable-silent-rules less verbose build output (undo: "make V=1")
+ --disable-silent-rules verbose build output (undo: "make V=0")
+ --enable-maintainer-mode
+ enable make rules and dependencies not useful (and
+ sometimes confusing) to the casual installer
+ --enable-dependency-tracking
+ do not reject slow dependency extractors
+ --disable-dependency-tracking
+ speeds up one-time build
+ --enable-development Turn on development options, like failing
+ compilation on warnings
+
+Some influential environment variables:
+ CXX C++ compiler command
+ CXXFLAGS C++ compiler flags
+ LDFLAGS linker flags, e.g. -L<lib dir> if you have libraries in a
+ nonstandard directory <lib dir>
+ LIBS libraries to pass to the linker, e.g. -l<library>
+ CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I<include dir> if
+ you have headers in a nonstandard directory <include dir>
+ CC C compiler command
+ CFLAGS C compiler flags
+ CXXCPP C++ preprocessor
+
+Use these variables to override the choices made by `configure' or to help
+it to find libraries and programs with nonstandard names/locations.
+
+Report bugs to <jwala at broadinstitute.org>.
+_ACEOF
+ac_status=$?
+fi
+
+if test "$ac_init_help" = "recursive"; then
+ # If there are subdirs, report their specific --help.
+ for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
+ test -d "$ac_dir" ||
+ { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } ||
+ continue
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+ cd "$ac_dir" || { ac_status=$?; continue; }
+ # Check for guested configure.
+ if test -f "$ac_srcdir/configure.gnu"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure.gnu" --help=recursive
+ elif test -f "$ac_srcdir/configure"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure" --help=recursive
+ else
+ $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
+ fi || ac_status=$?
+ cd "$ac_pwd" || { ac_status=$?; break; }
+ done
+fi
+
+test -n "$ac_init_help" && exit $ac_status
+if $ac_init_version; then
+ cat <<\_ACEOF
+seqkit configure 1.0
+generated by GNU Autoconf 2.69
+
+Copyright (C) 2012 Free Software Foundation, Inc.
+This configure script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it.
+_ACEOF
+ exit
+fi
+
+## ------------------------ ##
+## Autoconf initialization. ##
+## ------------------------ ##
+
+# ac_fn_cxx_try_compile LINENO
+# ----------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_compile ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext
+ if { { ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compile") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_compile
+
+# ac_fn_c_try_compile LINENO
+# --------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_compile ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext
+ if { { ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compile") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_compile
+
+# ac_fn_cxx_try_cpp LINENO
+# ------------------------
+# Try to preprocess conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_cpp ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if { { ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } > conftest.i && {
+ test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_cpp
+
+# ac_fn_cxx_check_header_mongrel LINENO HEADER VAR INCLUDES
+# ---------------------------------------------------------
+# Tests whether HEADER exists, giving a warning if it cannot be compiled using
+# the include files in INCLUDES and setting the cache variable VAR
+# accordingly.
+ac_fn_cxx_check_header_mongrel ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if eval \${$3+:} false; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+else
+ # Is the header compilable?
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5
+$as_echo_n "checking $2 usability... " >&6; }
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+#include <$2>
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_header_compiler=yes
+else
+ ac_header_compiler=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5
+$as_echo "$ac_header_compiler" >&6; }
+
+# Is the header present?
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5
+$as_echo_n "checking $2 presence... " >&6; }
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <$2>
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"; then :
+ ac_header_preproc=yes
+else
+ ac_header_preproc=no
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5
+$as_echo "$ac_header_preproc" >&6; }
+
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_cxx_preproc_warn_flag in #((
+ yes:no: )
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5
+$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
+ ;;
+ no:yes:* )
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5
+$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5
+$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5
+$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5
+$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
+( $as_echo "## --------------------------------------- ##
+## Report this to jwala at broadinstitute.org ##
+## --------------------------------------- ##"
+ ) | sed "s/^/$as_me: WARNING: /" >&2
+ ;;
+esac
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ eval "$3=\$ac_header_compiler"
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_cxx_check_header_mongrel
+
+# ac_fn_cxx_try_run LINENO
+# ------------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes
+# that executables *can* be run.
+ac_fn_cxx_try_run ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && { ac_try='./conftest$ac_exeext'
+ { { case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: program exited with status $ac_status" >&5
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=$ac_status
+fi
+ rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_run
+
+# ac_fn_cxx_check_header_compile LINENO HEADER VAR INCLUDES
+# ---------------------------------------------------------
+# Tests whether HEADER exists and can be compiled using the include files in
+# INCLUDES, setting the cache variable VAR accordingly.
+ac_fn_cxx_check_header_compile ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+#include <$2>
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ eval "$3=yes"
+else
+ eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_cxx_check_header_compile
+
+# ac_fn_cxx_try_link LINENO
+# -------------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_link ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext conftest$ac_exeext
+ if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ test -x conftest$ac_exeext
+ }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
+ # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
+ # interfere with the next link command; also delete a directory that is
+ # left behind by Apple's compiler. We do this before executing the actions.
+ rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_link
+cat >config.log <<_ACEOF
+This file contains any messages produced by compilers while
+running configure, to aid debugging if configure makes a mistake.
+
+It was created by seqkit $as_me 1.0, which was
+generated by GNU Autoconf 2.69. Invocation command line was
+
+ $ $0 $@
+
+_ACEOF
+exec 5>>config.log
+{
+cat <<_ASUNAME
+## --------- ##
+## Platform. ##
+## --------- ##
+
+hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown`
+
+/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
+/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown`
+/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown`
+/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown`
+
+_ASUNAME
+
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ $as_echo "PATH: $as_dir"
+ done
+IFS=$as_save_IFS
+
+} >&5
+
+cat >&5 <<_ACEOF
+
+
+## ----------- ##
+## Core tests. ##
+## ----------- ##
+
+_ACEOF
+
+
+# Keep a trace of the command line.
+# Strip out --no-create and --no-recursion so they do not pile up.
+# Strip out --silent because we don't want to record it for future runs.
+# Also quote any args containing shell meta-characters.
+# Make two passes to allow for proper duplicate-argument suppression.
+ac_configure_args=
+ac_configure_args0=
+ac_configure_args1=
+ac_must_keep_next=false
+for ac_pass in 1 2
+do
+ for ac_arg
+ do
+ case $ac_arg in
+ -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ continue ;;
+ *\'*)
+ ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ case $ac_pass in
+ 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;;
+ 2)
+ as_fn_append ac_configure_args1 " '$ac_arg'"
+ if test $ac_must_keep_next = true; then
+ ac_must_keep_next=false # Got value, back to normal.
+ else
+ case $ac_arg in
+ *=* | --config-cache | -C | -disable-* | --disable-* \
+ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
+ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
+ | -with-* | --with-* | -without-* | --without-* | --x)
+ case "$ac_configure_args0 " in
+ "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
+ esac
+ ;;
+ -* ) ac_must_keep_next=true ;;
+ esac
+ fi
+ as_fn_append ac_configure_args " '$ac_arg'"
+ ;;
+ esac
+ done
+done
+{ ac_configure_args0=; unset ac_configure_args0;}
+{ ac_configure_args1=; unset ac_configure_args1;}
+
+# When interrupted or exit'd, cleanup temporary files, and complete
+# config.log. We remove comments because anyway the quotes in there
+# would cause problems or look ugly.
+# WARNING: Use '\'' to represent an apostrophe within the trap.
+# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
+trap 'exit_status=$?
+ # Save into config.log some information that might help in debugging.
+ {
+ echo
+
+ $as_echo "## ---------------- ##
+## Cache variables. ##
+## ---------------- ##"
+ echo
+ # The following way of writing the cache mishandles newlines in values,
+(
+ for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+ *) { eval $ac_var=; unset $ac_var;} ;;
+ esac ;;
+ esac
+ done
+ (set) 2>&1 |
+ case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ sed -n \
+ "s/'\''/'\''\\\\'\'''\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
+ ;; #(
+ *)
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+)
+ echo
+
+ $as_echo "## ----------------- ##
+## Output variables. ##
+## ----------------- ##"
+ echo
+ for ac_var in $ac_subst_vars
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ $as_echo "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+
+ if test -n "$ac_subst_files"; then
+ $as_echo "## ------------------- ##
+## File substitutions. ##
+## ------------------- ##"
+ echo
+ for ac_var in $ac_subst_files
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ $as_echo "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+ fi
+
+ if test -s confdefs.h; then
+ $as_echo "## ----------- ##
+## confdefs.h. ##
+## ----------- ##"
+ echo
+ cat confdefs.h
+ echo
+ fi
+ test "$ac_signal" != 0 &&
+ $as_echo "$as_me: caught signal $ac_signal"
+ $as_echo "$as_me: exit $exit_status"
+ } >&5
+ rm -f core *.core core.conftest.* &&
+ rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
+ exit $exit_status
+' 0
+for ac_signal in 1 2 13 15; do
+ trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal
+done
+ac_signal=0
+
+# confdefs.h avoids OS command line length limits that DEFS can exceed.
+rm -f -r conftest* confdefs.h
+
+$as_echo "/* confdefs.h */" > confdefs.h
+
+# Predefined preprocessor variables.
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_NAME "$PACKAGE_NAME"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_VERSION "$PACKAGE_VERSION"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_STRING "$PACKAGE_STRING"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_URL "$PACKAGE_URL"
+_ACEOF
+
+
+# Let the site file select an alternate cache file if it wants to.
+# Prefer an explicitly selected file to automatically selected ones.
+ac_site_file1=NONE
+ac_site_file2=NONE
+if test -n "$CONFIG_SITE"; then
+ # We do not want a PATH search for config.site.
+ case $CONFIG_SITE in #((
+ -*) ac_site_file1=./$CONFIG_SITE;;
+ */*) ac_site_file1=$CONFIG_SITE;;
+ *) ac_site_file1=./$CONFIG_SITE;;
+ esac
+elif test "x$prefix" != xNONE; then
+ ac_site_file1=$prefix/share/config.site
+ ac_site_file2=$prefix/etc/config.site
+else
+ ac_site_file1=$ac_default_prefix/share/config.site
+ ac_site_file2=$ac_default_prefix/etc/config.site
+fi
+for ac_site_file in "$ac_site_file1" "$ac_site_file2"
+do
+ test "x$ac_site_file" = xNONE && continue
+ if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
+$as_echo "$as_me: loading site script $ac_site_file" >&6;}
+ sed 's/^/| /' "$ac_site_file" >&5
+ . "$ac_site_file" \
+ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "failed to load site script $ac_site_file
+See \`config.log' for more details" "$LINENO" 5; }
+ fi
+done
+
+if test -r "$cache_file"; then
+ # Some versions of bash will fail to source /dev/null (special files
+ # actually), so we avoid doing that. DJGPP emulates it as a regular file.
+ if test /dev/null != "$cache_file" && test -f "$cache_file"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5
+$as_echo "$as_me: loading cache $cache_file" >&6;}
+ case $cache_file in
+ [\\/]* | ?:[\\/]* ) . "$cache_file";;
+ *) . "./$cache_file";;
+ esac
+ fi
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5
+$as_echo "$as_me: creating cache $cache_file" >&6;}
+ >$cache_file
+fi
+
+# Check that the precious variables saved in the cache have kept the same
+# value.
+ac_cache_corrupted=false
+for ac_var in $ac_precious_vars; do
+ eval ac_old_set=\$ac_cv_env_${ac_var}_set
+ eval ac_new_set=\$ac_env_${ac_var}_set
+ eval ac_old_val=\$ac_cv_env_${ac_var}_value
+ eval ac_new_val=\$ac_env_${ac_var}_value
+ case $ac_old_set,$ac_new_set in
+ set,)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,set)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,);;
+ *)
+ if test "x$ac_old_val" != "x$ac_new_val"; then
+ # differences in whitespace do not lead to failure.
+ ac_old_val_w=`echo x $ac_old_val`
+ ac_new_val_w=`echo x $ac_new_val`
+ if test "$ac_old_val_w" != "$ac_new_val_w"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5
+$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
+ ac_cache_corrupted=:
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
+$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
+ eval $ac_var=\$ac_old_val
+ fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5
+$as_echo "$as_me: former value: \`$ac_old_val'" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5
+$as_echo "$as_me: current value: \`$ac_new_val'" >&2;}
+ fi;;
+ esac
+ # Pass precious variables to config.status.
+ if test "$ac_new_set" = set; then
+ case $ac_new_val in
+ *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
+ *) ac_arg=$ac_var=$ac_new_val ;;
+ esac
+ case " $ac_configure_args " in
+ *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy.
+ *) as_fn_append ac_configure_args " '$ac_arg'" ;;
+ esac
+ fi
+done
+if $ac_cache_corrupted; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
+$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;}
+ as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5
+fi
+## -------------------- ##
+## Main body of script. ##
+## -------------------- ##
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+am__api_version='1.15'
+
+ac_aux_dir=
+for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do
+ if test -f "$ac_dir/install-sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install-sh -c"
+ break
+ elif test -f "$ac_dir/install.sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install.sh -c"
+ break
+ elif test -f "$ac_dir/shtool"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/shtool install -c"
+ break
+ fi
+done
+if test -z "$ac_aux_dir"; then
+ as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5
+fi
+
+# These three variables are undocumented and unsupported,
+# and are intended to be withdrawn in a future Autoconf release.
+# They can cause serious problems if a builder's source tree is in a directory
+# whose full name contains unusual characters.
+ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var.
+ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var.
+ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
+
+
+# Find a good install program. We prefer a C program (faster),
+# so one script is as good as another. But avoid the broken or
+# incompatible versions:
+# SysV /etc/install, /usr/sbin/install
+# SunOS /usr/etc/install
+# IRIX /sbin/install
+# AIX /bin/install
+# AmigaOS /C/install, which installs bootblocks on floppy discs
+# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
+# AFS /usr/afsws/bin/install, which mishandles nonexistent args
+# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
+# OS/2's system install, which has a completely different semantic
+# ./install, which can be erroneously created by make from ./install.sh.
+# Reject install programs that cannot install multiple files.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5
+$as_echo_n "checking for a BSD-compatible install... " >&6; }
+if test -z "$INSTALL"; then
+if ${ac_cv_path_install+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ # Account for people who put trailing slashes in PATH elements.
+case $as_dir/ in #((
+ ./ | .// | /[cC]/* | \
+ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
+ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \
+ /usr/ucb/* ) ;;
+ *)
+ # OSF1 and SCO ODT 3.0 have their own names for install.
+ # Don't use installbsd from OSF since it installs stuff as root
+ # by default.
+ for ac_prog in ginstall scoinst install; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then
+ if test $ac_prog = install &&
+ grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # AIX install. It has an incompatible calling convention.
+ :
+ elif test $ac_prog = install &&
+ grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # program-specific install script used by HP pwplus--don't use.
+ :
+ else
+ rm -rf conftest.one conftest.two conftest.dir
+ echo one > conftest.one
+ echo two > conftest.two
+ mkdir conftest.dir
+ if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" &&
+ test -s conftest.one && test -s conftest.two &&
+ test -s conftest.dir/conftest.one &&
+ test -s conftest.dir/conftest.two
+ then
+ ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c"
+ break 3
+ fi
+ fi
+ fi
+ done
+ done
+ ;;
+esac
+
+ done
+IFS=$as_save_IFS
+
+rm -rf conftest.one conftest.two conftest.dir
+
+fi
+ if test "${ac_cv_path_install+set}" = set; then
+ INSTALL=$ac_cv_path_install
+ else
+ # As a last resort, use the slow shell script. Don't cache a
+ # value for INSTALL within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the value is a relative name.
+ INSTALL=$ac_install_sh
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5
+$as_echo "$INSTALL" >&6; }
+
+# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
+# It thinks the first close brace ends the variable substitution.
+test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
+
+test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
+
+test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5
+$as_echo_n "checking whether build environment is sane... " >&6; }
+# Reject unsafe characters in $srcdir or the absolute working directory
+# name. Accept space and tab only in the latter.
+am_lf='
+'
+case `pwd` in
+ *[\\\"\#\$\&\'\`$am_lf]*)
+ as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;;
+esac
+case $srcdir in
+ *[\\\"\#\$\&\'\`$am_lf\ \ ]*)
+ as_fn_error $? "unsafe srcdir value: '$srcdir'" "$LINENO" 5;;
+esac
+
+# Do 'set' in a subshell so we don't clobber the current shell's
+# arguments. Must try -L first in case configure is actually a
+# symlink; some systems play weird games with the mod time of symlinks
+# (eg FreeBSD returns the mod time of the symlink's containing
+# directory).
+if (
+ am_has_slept=no
+ for am_try in 1 2; do
+ echo "timestamp, slept: $am_has_slept" > conftest.file
+ set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
+ if test "$*" = "X"; then
+ # -L didn't work.
+ set X `ls -t "$srcdir/configure" conftest.file`
+ fi
+ if test "$*" != "X $srcdir/configure conftest.file" \
+ && test "$*" != "X conftest.file $srcdir/configure"; then
+
+ # If neither matched, then we have a broken ls. This can happen
+ # if, for instance, CONFIG_SHELL is bash and it inherits a
+ # broken ls alias from the environment. This has actually
+ # happened. Such a system could not be considered "sane".
+ as_fn_error $? "ls -t appears to fail. Make sure there is not a broken
+ alias in your environment" "$LINENO" 5
+ fi
+ if test "$2" = conftest.file || test $am_try -eq 2; then
+ break
+ fi
+ # Just in case.
+ sleep 1
+ am_has_slept=yes
+ done
+ test "$2" = conftest.file
+ )
+then
+ # Ok.
+ :
+else
+ as_fn_error $? "newly created file is older than distributed files!
+Check your system clock" "$LINENO" 5
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+# If we didn't sleep, we still need to ensure time stamps of config.status and
+# generated files are strictly newer.
+am_sleep_pid=
+if grep 'slept: no' conftest.file >/dev/null 2>&1; then
+ ( sleep 1 ) &
+ am_sleep_pid=$!
+fi
+
+rm -f conftest.file
+
+test "$program_prefix" != NONE &&
+ program_transform_name="s&^&$program_prefix&;$program_transform_name"
+# Use a double $ so make ignores it.
+test "$program_suffix" != NONE &&
+ program_transform_name="s&\$&$program_suffix&;$program_transform_name"
+# Double any \ or $.
+# By default was `s,x,x', remove it if useless.
+ac_script='s/[\\$]/&&/g;s/;s,x,x,$//'
+program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"`
+
+# Expand $ac_aux_dir to an absolute path.
+am_aux_dir=`cd "$ac_aux_dir" && pwd`
+
+if test x"${MISSING+set}" != xset; then
+ case $am_aux_dir in
+ *\ * | *\ *)
+ MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;;
+ *)
+ MISSING="\${SHELL} $am_aux_dir/missing" ;;
+ esac
+fi
+# Use eval to expand $SHELL
+if eval "$MISSING --is-lightweight"; then
+ am_missing_run="$MISSING "
+else
+ am_missing_run=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5
+$as_echo "$as_me: WARNING: 'missing' script is too old or missing" >&2;}
+fi
+
+if test x"${install_sh+set}" != xset; then
+ case $am_aux_dir in
+ *\ * | *\ *)
+ install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
+ *)
+ install_sh="\${SHELL} $am_aux_dir/install-sh"
+ esac
+fi
+
+# Installed binaries are usually stripped using 'strip' when the user
+# run "make install-strip". However 'strip' might not be the right
+# tool to use in cross-compilation environments, therefore Automake
+# will honor the 'STRIP' environment variable to overrule this program.
+if test "$cross_compiling" != no; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
+set dummy ${ac_tool_prefix}strip; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_STRIP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$STRIP"; then
+ ac_cv_prog_STRIP="$STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_STRIP="${ac_tool_prefix}strip"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+STRIP=$ac_cv_prog_STRIP
+if test -n "$STRIP"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5
+$as_echo "$STRIP" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_STRIP"; then
+ ac_ct_STRIP=$STRIP
+ # Extract the first word of "strip", so it can be a program name with args.
+set dummy strip; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_STRIP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_STRIP"; then
+ ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_STRIP="strip"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP
+if test -n "$ac_ct_STRIP"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5
+$as_echo "$ac_ct_STRIP" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_STRIP" = x; then
+ STRIP=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ STRIP=$ac_ct_STRIP
+ fi
+else
+ STRIP="$ac_cv_prog_STRIP"
+fi
+
+fi
+INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5
+$as_echo_n "checking for a thread-safe mkdir -p... " >&6; }
+if test -z "$MKDIR_P"; then
+ if ${ac_cv_path_mkdir+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in mkdir gmkdir; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue
+ case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #(
+ 'mkdir (GNU coreutils) '* | \
+ 'mkdir (coreutils) '* | \
+ 'mkdir (fileutils) '4.1*)
+ ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext
+ break 3;;
+ esac
+ done
+ done
+ done
+IFS=$as_save_IFS
+
+fi
+
+ test -d ./--version && rmdir ./--version
+ if test "${ac_cv_path_mkdir+set}" = set; then
+ MKDIR_P="$ac_cv_path_mkdir -p"
+ else
+ # As a last resort, use the slow shell script. Don't cache a
+ # value for MKDIR_P within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the value is a relative name.
+ MKDIR_P="$ac_install_sh -d"
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5
+$as_echo "$MKDIR_P" >&6; }
+
+for ac_prog in gawk mawk nawk awk
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_AWK+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$AWK"; then
+ ac_cv_prog_AWK="$AWK" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_AWK="$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+AWK=$ac_cv_prog_AWK
+if test -n "$AWK"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5
+$as_echo "$AWK" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$AWK" && break
+done
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5
+$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; }
+set x ${MAKE-make}
+ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'`
+if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.make <<\_ACEOF
+SHELL = /bin/sh
+all:
+ @echo '@@@%%%=$(MAKE)=@@@%%%'
+_ACEOF
+# GNU make sometimes prints "make[1]: Entering ...", which would confuse us.
+case `${MAKE-make} -f conftest.make 2>/dev/null` in
+ *@@@%%%=?*=@@@%%%*)
+ eval ac_cv_prog_make_${ac_make}_set=yes;;
+ *)
+ eval ac_cv_prog_make_${ac_make}_set=no;;
+esac
+rm -f conftest.make
+fi
+if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ SET_MAKE=
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ SET_MAKE="MAKE=${MAKE-make}"
+fi
+
+rm -rf .tst 2>/dev/null
+mkdir .tst 2>/dev/null
+if test -d .tst; then
+ am__leading_dot=.
+else
+ am__leading_dot=_
+fi
+rmdir .tst 2>/dev/null
+
+# Check whether --enable-silent-rules was given.
+if test "${enable_silent_rules+set}" = set; then :
+ enableval=$enable_silent_rules;
+fi
+
+case $enable_silent_rules in # (((
+ yes) AM_DEFAULT_VERBOSITY=0;;
+ no) AM_DEFAULT_VERBOSITY=1;;
+ *) AM_DEFAULT_VERBOSITY=1;;
+esac
+am_make=${MAKE-make}
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5
+$as_echo_n "checking whether $am_make supports nested variables... " >&6; }
+if ${am_cv_make_support_nested_variables+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if $as_echo 'TRUE=$(BAR$(V))
+BAR0=false
+BAR1=true
+V=1
+am__doit:
+ @$(TRUE)
+.PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then
+ am_cv_make_support_nested_variables=yes
+else
+ am_cv_make_support_nested_variables=no
+fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5
+$as_echo "$am_cv_make_support_nested_variables" >&6; }
+if test $am_cv_make_support_nested_variables = yes; then
+ AM_V='$(V)'
+ AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)'
+else
+ AM_V=$AM_DEFAULT_VERBOSITY
+ AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY
+fi
+AM_BACKSLASH='\'
+
+if test "`cd $srcdir && pwd`" != "`pwd`"; then
+ # Use -I$(srcdir) only when $(srcdir) != ., so that make's output
+ # is not polluted with repeated "-I."
+ am__isrc=' -I$(srcdir)'
+ # test to see if srcdir already configured
+ if test -f $srcdir/config.status; then
+ as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5
+ fi
+fi
+
+# test whether we have cygpath
+if test -z "$CYGPATH_W"; then
+ if (cygpath --version) >/dev/null 2>/dev/null; then
+ CYGPATH_W='cygpath -w'
+ else
+ CYGPATH_W=echo
+ fi
+fi
+
+
+# Define the identity of the package.
+ PACKAGE='seqkit'
+ VERSION='1.0'
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE "$PACKAGE"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define VERSION "$VERSION"
+_ACEOF
+
+# Some tools Automake needs.
+
+ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"}
+
+
+AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"}
+
+
+AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"}
+
+
+AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"}
+
+
+MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"}
+
+# For better backward compatibility. To be removed once Automake 1.9.x
+# dies out for good. For more background, see:
+# <http://lists.gnu.org/archive/html/automake/2012-07/msg00001.html>
+# <http://lists.gnu.org/archive/html/automake/2012-07/msg00014.html>
+mkdir_p='$(MKDIR_P)'
+
+# We need awk for the "check" target (and possibly the TAP driver). The
+# system "awk" is bad on some platforms.
+# Always define AMTAR for backward compatibility. Yes, it's still used
+# in the wild :-( We should find a proper way to deprecate it ...
+AMTAR='$${TAR-tar}'
+
+
+# We'll loop over all known methods to create a tar archive until one works.
+_am_tools='gnutar pax cpio none'
+
+am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'
+
+
+
+
+
+
+# POSIX will say in a future version that running "rm -f" with no argument
+# is OK; and we want to be able to make that assumption in our Makefile
+# recipes. So use an aggressive probe to check that the usage we want is
+# actually supported "in the wild" to an acceptable degree.
+# See automake bug#10828.
+# To make any issue more visible, cause the running configure to be aborted
+# by default if the 'rm' program in use doesn't match our expectations; the
+# user can still override this though.
+if rm -f && rm -fr && rm -rf; then : OK; else
+ cat >&2 <<'END'
+Oops!
+
+Your 'rm' program seems unable to run without file operands specified
+on the command line, even when the '-f' option is present. This is contrary
+to the behaviour of most rm programs out there, and not conforming with
+the upcoming POSIX standard: <http://austingroupbugs.net/view.php?id=542>
+
+Please tell bug-automake at gnu.org about your system, including the value
+of your $PATH and any error possibly output before this message. This
+can help us improve future automake versions.
+
+END
+ if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then
+ echo 'Configuration will proceed anyway, since you have set the' >&2
+ echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2
+ echo >&2
+ else
+ cat >&2 <<'END'
+Aborting the configuration process, to ensure you take notice of the issue.
+
+You can download and install GNU coreutils to get an 'rm' implementation
+that behaves properly: <http://www.gnu.org/software/coreutils/>.
+
+If you want to complete the configuration process using your problematic
+'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM
+to "yes", and re-run configure.
+
+END
+ as_fn_error $? "Your 'rm' program is bad, sorry." "$LINENO" 5
+ fi
+fi
+
+
+ac_config_headers="$ac_config_headers config.h"
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable maintainer-specific portions of Makefiles" >&5
+$as_echo_n "checking whether to enable maintainer-specific portions of Makefiles... " >&6; }
+ # Check whether --enable-maintainer-mode was given.
+if test "${enable_maintainer_mode+set}" = set; then :
+ enableval=$enable_maintainer_mode; USE_MAINTAINER_MODE=$enableval
+else
+ USE_MAINTAINER_MODE=no
+fi
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_MAINTAINER_MODE" >&5
+$as_echo "$USE_MAINTAINER_MODE" >&6; }
+ if test $USE_MAINTAINER_MODE = yes; then
+ MAINTAINER_MODE_TRUE=
+ MAINTAINER_MODE_FALSE='#'
+else
+ MAINTAINER_MODE_TRUE='#'
+ MAINTAINER_MODE_FALSE=
+fi
+
+ MAINT=$MAINTAINER_MODE_TRUE
+
+
+##m4_include([m4/m4_ax_openmp.m4])
+
+# Checks for programs.
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+if test -z "$CXX"; then
+ if test -n "$CCC"; then
+ CXX=$CCC
+ else
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CXX+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CXX"; then
+ ac_cv_prog_CXX="$CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CXX="$ac_tool_prefix$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CXX=$ac_cv_prog_CXX
+if test -n "$CXX"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5
+$as_echo "$CXX" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$CXX" && break
+ done
+fi
+if test -z "$CXX"; then
+ ac_ct_CXX=$CXX
+ for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CXX+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CXX"; then
+ ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CXX="$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CXX=$ac_cv_prog_ac_ct_CXX
+if test -n "$ac_ct_CXX"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5
+$as_echo "$ac_ct_CXX" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$ac_ct_CXX" && break
+done
+
+ if test "x$ac_ct_CXX" = x; then
+ CXX="g++"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CXX=$ac_ct_CXX
+ fi
+fi
+
+ fi
+fi
+# Provide some information about the compiler.
+$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+ { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ sed '10a\
+... rest of stderr output deleted ...
+ 10q' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ fi
+ rm -f conftest.er1 conftest.err
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+done
+
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out"
+# Try to create an executable without -o first, disregard a.out.
+# It will help us diagnose broken compilers, and finding out an intuition
+# of exeext.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C++ compiler works" >&5
+$as_echo_n "checking whether the C++ compiler works... " >&6; }
+ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
+
+# The possible output files:
+ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*"
+
+ac_rmfiles=
+for ac_file in $ac_files
+do
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+ * ) ac_rmfiles="$ac_rmfiles $ac_file";;
+ esac
+done
+rm -f $ac_rmfiles
+
+if { { ac_try="$ac_link_default"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link_default") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then :
+ # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
+# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
+# in a Makefile. We should not override ac_cv_exeext if it was cached,
+# so that the user can short-circuit this test for compilers unknown to
+# Autoconf.
+for ac_file in $ac_files ''
+do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj )
+ ;;
+ [ab].out )
+ # We found the default executable, but exeext='' is most
+ # certainly right.
+ break;;
+ *.* )
+ if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no;
+ then :; else
+ ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ fi
+ # We set ac_cv_exeext here because the later test for it is not
+ # safe: cross compilers may not add the suffix if given an `-o'
+ # argument, so we may need to know it at that point already.
+ # Even if this section looks crufty: it has the advantage of
+ # actually working.
+ break;;
+ * )
+ break;;
+ esac
+done
+test "$ac_cv_exeext" = no && ac_cv_exeext=
+
+else
+ ac_file=''
+fi
+if test -z "$ac_file"; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+$as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "C++ compiler cannot create executables
+See \`config.log' for more details" "$LINENO" 5; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler default output file name" >&5
+$as_echo_n "checking for C++ compiler default output file name... " >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5
+$as_echo "$ac_file" >&6; }
+ac_exeext=$ac_cv_exeext
+
+rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out
+ac_clean_files=$ac_clean_files_save
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5
+$as_echo_n "checking for suffix of executables... " >&6; }
+if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then :
+ # If both `conftest.exe' and `conftest' are `present' (well, observable)
+# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will
+# work properly (i.e., refer to `conftest.exe'), while it won't with
+# `rm'.
+for ac_file in conftest.exe conftest conftest.*; do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+ *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ break;;
+ * ) break;;
+ esac
+done
+else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+rm -f conftest conftest$ac_cv_exeext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
+$as_echo "$ac_cv_exeext" >&6; }
+
+rm -f conftest.$ac_ext
+EXEEXT=$ac_cv_exeext
+ac_exeext=$EXEEXT
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdio.h>
+int
+main ()
+{
+FILE *f = fopen ("conftest.out", "w");
+ return ferror (f) || fclose (f) != 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+ac_clean_files="$ac_clean_files conftest.out"
+# Check that the compiler produces executables we can run. If not, either
+# the compiler is broken, or we cross compile.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5
+$as_echo_n "checking whether we are cross compiling... " >&6; }
+if test "$cross_compiling" != yes; then
+ { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+ if { ac_try='./conftest$ac_cv_exeext'
+ { { case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; }; then
+ cross_compiling=no
+ else
+ if test "$cross_compiling" = maybe; then
+ cross_compiling=yes
+ else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot run C++ compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details" "$LINENO" 5; }
+ fi
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5
+$as_echo "$cross_compiling" >&6; }
+
+rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out
+ac_clean_files=$ac_clean_files_save
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5
+$as_echo_n "checking for suffix of object files... " >&6; }
+if ${ac_cv_objext+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.o conftest.obj
+if { { ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compile") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then :
+ for ac_file in conftest.o conftest.obj conftest.*; do
+ test -f "$ac_file" || continue;
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;;
+ *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
+ break;;
+ esac
+done
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot compute suffix of object files: cannot compile
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+rm -f conftest.$ac_cv_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5
+$as_echo "$ac_cv_objext" >&6; }
+OBJEXT=$ac_cv_objext
+ac_objext=$OBJEXT
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5
+$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; }
+if ${ac_cv_cxx_compiler_gnu+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_compiler_gnu=yes
+else
+ ac_compiler_gnu=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_cxx_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5
+$as_echo "$ac_cv_cxx_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+ GXX=yes
+else
+ GXX=
+fi
+ac_test_CXXFLAGS=${CXXFLAGS+set}
+ac_save_CXXFLAGS=$CXXFLAGS
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5
+$as_echo_n "checking whether $CXX accepts -g... " >&6; }
+if ${ac_cv_prog_cxx_g+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_save_cxx_werror_flag=$ac_cxx_werror_flag
+ ac_cxx_werror_flag=yes
+ ac_cv_prog_cxx_g=no
+ CXXFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_cv_prog_cxx_g=yes
+else
+ CXXFLAGS=""
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+
+else
+ ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+ CXXFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_cv_prog_cxx_g=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5
+$as_echo "$ac_cv_prog_cxx_g" >&6; }
+if test "$ac_test_CXXFLAGS" = set; then
+ CXXFLAGS=$ac_save_CXXFLAGS
+elif test $ac_cv_prog_cxx_g = yes; then
+ if test "$GXX" = yes; then
+ CXXFLAGS="-g -O2"
+ else
+ CXXFLAGS="-g"
+ fi
+else
+ if test "$GXX" = yes; then
+ CXXFLAGS="-O2"
+ else
+ CXXFLAGS=
+ fi
+fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+DEPDIR="${am__leading_dot}deps"
+
+ac_config_commands="$ac_config_commands depfiles"
+
+
+am_make=${MAKE-make}
+cat > confinc << 'END'
+am__doit:
+ @echo this is the am__doit target
+.PHONY: am__doit
+END
+# If we don't find an include directive, just comment out the code.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for style of include used by $am_make" >&5
+$as_echo_n "checking for style of include used by $am_make... " >&6; }
+am__include="#"
+am__quote=
+_am_result=none
+# First try GNU make style include.
+echo "include confinc" > confmf
+# Ignore all kinds of additional output from 'make'.
+case `$am_make -s -f confmf 2> /dev/null` in #(
+*the\ am__doit\ target*)
+ am__include=include
+ am__quote=
+ _am_result=GNU
+ ;;
+esac
+# Now try BSD make style include.
+if test "$am__include" = "#"; then
+ echo '.include "confinc"' > confmf
+ case `$am_make -s -f confmf 2> /dev/null` in #(
+ *the\ am__doit\ target*)
+ am__include=.include
+ am__quote="\""
+ _am_result=BSD
+ ;;
+ esac
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $_am_result" >&5
+$as_echo "$_am_result" >&6; }
+rm -f confinc confmf
+
+# Check whether --enable-dependency-tracking was given.
+if test "${enable_dependency_tracking+set}" = set; then :
+ enableval=$enable_dependency_tracking;
+fi
+
+if test "x$enable_dependency_tracking" != xno; then
+ am_depcomp="$ac_aux_dir/depcomp"
+ AMDEPBACKSLASH='\'
+ am__nodep='_no'
+fi
+ if test "x$enable_dependency_tracking" != xno; then
+ AMDEP_TRUE=
+ AMDEP_FALSE='#'
+else
+ AMDEP_TRUE='#'
+ AMDEP_FALSE=
+fi
+
+
+
+depcc="$CXX" am_compiler_list=
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
+$as_echo_n "checking dependency style of $depcc... " >&6; }
+if ${am_cv_CXX_dependencies_compiler_type+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+ # We make a subdir and do the tests there. Otherwise we can end up
+ # making bogus files that we don't know about and never remove. For
+ # instance it was reported that on HP-UX the gcc test will end up
+ # making a dummy file named 'D' -- because '-MD' means "put the output
+ # in D".
+ rm -rf conftest.dir
+ mkdir conftest.dir
+ # Copy depcomp to subdir because otherwise we won't find it if we're
+ # using a relative directory.
+ cp "$am_depcomp" conftest.dir
+ cd conftest.dir
+ # We will build objects and dependencies in a subdirectory because
+ # it helps to detect inapplicable dependency modes. For instance
+ # both Tru64's cc and ICC support -MD to output dependencies as a
+ # side effect of compilation, but ICC will put the dependencies in
+ # the current directory while Tru64 will put them in the object
+ # directory.
+ mkdir sub
+
+ am_cv_CXX_dependencies_compiler_type=none
+ if test "$am_compiler_list" = ""; then
+ am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
+ fi
+ am__universal=false
+ case " $depcc " in #(
+ *\ -arch\ *\ -arch\ *) am__universal=true ;;
+ esac
+
+ for depmode in $am_compiler_list; do
+ # Setup a source with many dependencies, because some compilers
+ # like to wrap large dependency lists on column 80 (with \), and
+ # we should not choose a depcomp mode which is confused by this.
+ #
+ # We need to recreate these files for each test, as the compiler may
+ # overwrite some of them when testing with obscure command lines.
+ # This happens at least with the AIX C compiler.
+ : > sub/conftest.c
+ for i in 1 2 3 4 5 6; do
+ echo '#include "conftst'$i'.h"' >> sub/conftest.c
+ # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with
+ # Solaris 10 /bin/sh.
+ echo '/* dummy */' > sub/conftst$i.h
+ done
+ echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+ # We check with '-c' and '-o' for the sake of the "dashmstdout"
+ # mode. It turns out that the SunPro C++ compiler does not properly
+ # handle '-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs.
+ am__obj=sub/conftest.${OBJEXT-o}
+ am__minus_obj="-o $am__obj"
+ case $depmode in
+ gcc)
+ # This depmode causes a compiler race in universal mode.
+ test "$am__universal" = false || continue
+ ;;
+ nosideeffect)
+ # After this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested.
+ if test "x$enable_dependency_tracking" = xyes; then
+ continue
+ else
+ break
+ fi
+ ;;
+ msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+ # This compiler won't grok '-c -o', but also, the minuso test has
+ # not run yet. These depmodes are late enough in the game, and
+ # so weak that their functioning should not be impacted.
+ am__obj=conftest.${OBJEXT-o}
+ am__minus_obj=
+ ;;
+ none) break ;;
+ esac
+ if depmode=$depmode \
+ source=sub/conftest.c object=$am__obj \
+ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+ >/dev/null 2>conftest.err &&
+ grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+ ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+ # icc doesn't choke on unknown options, it will just issue warnings
+ # or remarks (even with -Werror). So we grep stderr for any message
+ # that says an option was ignored or not supported.
+ # When given -MP, icc 7.0 and 7.1 complain thusly:
+ # icc: Command line warning: ignoring option '-M'; no argument required
+ # The diagnosis changed in icc 8.0:
+ # icc: Command line remark: option '-MP' not supported
+ if (grep 'ignoring option' conftest.err ||
+ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+ am_cv_CXX_dependencies_compiler_type=$depmode
+ break
+ fi
+ fi
+ done
+
+ cd ..
+ rm -rf conftest.dir
+else
+ am_cv_CXX_dependencies_compiler_type=none
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5
+$as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; }
+CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type
+
+ if
+ test "x$enable_dependency_tracking" != xno \
+ && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then
+ am__fastdepCXX_TRUE=
+ am__fastdepCXX_FALSE='#'
+else
+ am__fastdepCXX_TRUE='#'
+ am__fastdepCXX_FALSE=
+fi
+
+ ## test for cpp compiler
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}gcc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CC="${ac_tool_prefix}gcc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_CC"; then
+ ac_ct_CC=$CC
+ # Extract the first word of "gcc", so it can be a program name with args.
+set dummy gcc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CC="gcc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+else
+ CC="$ac_cv_prog_CC"
+fi
+
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}cc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CC="${ac_tool_prefix}cc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ fi
+fi
+if test -z "$CC"; then
+ # Extract the first word of "cc", so it can be a program name with args.
+set dummy cc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+ ac_prog_rejected=no
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
+ ac_prog_rejected=yes
+ continue
+ fi
+ ac_cv_prog_CC="cc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+if test $ac_prog_rejected = yes; then
+ # We found a bogon in the path, so make sure we never use it.
+ set dummy $ac_cv_prog_CC
+ shift
+ if test $# != 0; then
+ # We chose a different compiler from the bogus one.
+ # However, it has the same basename, so the bogon will be chosen
+ # first if we set CC to just the basename; use the full file name.
+ shift
+ ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
+ fi
+fi
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in cl.exe
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$CC" && break
+ done
+fi
+if test -z "$CC"; then
+ ac_ct_CC=$CC
+ for ac_prog in cl.exe
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CC="$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$ac_ct_CC" && break
+done
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+fi
+
+fi
+
+
+test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "no acceptable C compiler found in \$PATH
+See \`config.log' for more details" "$LINENO" 5; }
+
+# Provide some information about the compiler.
+$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+ { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ sed '10a\
+... rest of stderr output deleted ...
+ 10q' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ fi
+ rm -f conftest.er1 conftest.err
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+done
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5
+$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
+if ${ac_cv_c_compiler_gnu+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_compiler_gnu=yes
+else
+ ac_compiler_gnu=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_c_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
+$as_echo "$ac_cv_c_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+ GCC=yes
+else
+ GCC=
+fi
+ac_test_CFLAGS=${CFLAGS+set}
+ac_save_CFLAGS=$CFLAGS
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
+$as_echo_n "checking whether $CC accepts -g... " >&6; }
+if ${ac_cv_prog_cc_g+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_save_c_werror_flag=$ac_c_werror_flag
+ ac_c_werror_flag=yes
+ ac_cv_prog_cc_g=no
+ CFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_prog_cc_g=yes
+else
+ CFLAGS=""
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+ ac_c_werror_flag=$ac_save_c_werror_flag
+ CFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_prog_cc_g=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_c_werror_flag=$ac_save_c_werror_flag
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
+$as_echo "$ac_cv_prog_cc_g" >&6; }
+if test "$ac_test_CFLAGS" = set; then
+ CFLAGS=$ac_save_CFLAGS
+elif test $ac_cv_prog_cc_g = yes; then
+ if test "$GCC" = yes; then
+ CFLAGS="-g -O2"
+ else
+ CFLAGS="-g"
+ fi
+else
+ if test "$GCC" = yes; then
+ CFLAGS="-O2"
+ else
+ CFLAGS=
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5
+$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
+if ${ac_cv_prog_cc_c89+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_prog_cc_c89=no
+ac_save_CC=$CC
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdarg.h>
+#include <stdio.h>
+struct stat;
+/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */
+struct buf { int x; };
+FILE * (*rcsopen) (struct buf *, struct stat *, int);
+static char *e (p, i)
+ char **p;
+ int i;
+{
+ return p[i];
+}
+static char *f (char * (*g) (char **, int), char **p, ...)
+{
+ char *s;
+ va_list v;
+ va_start (v,p);
+ s = g (p, va_arg (v,int));
+ va_end (v);
+ return s;
+}
+
+/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has
+ function prototypes and stuff, but not '\xHH' hex character constants.
+ These don't provoke an error unfortunately, instead are silently treated
+ as 'x'. The following induces an error, until -std is added to get
+ proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an
+ array size at least. It's necessary to write '\x00'==0 to get something
+ that's true only with -std. */
+int osf4_cc_array ['\x00' == 0 ? 1 : -1];
+
+/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
+ inside strings and character constants. */
+#define FOO(x) 'x'
+int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
+
+int test (int i, double x);
+struct s1 {int (*f) (int a);};
+struct s2 {int (*f) (double a);};
+int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
+int argc;
+char **argv;
+int
+main ()
+{
+return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1];
+ ;
+ return 0;
+}
+_ACEOF
+for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
+ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+do
+ CC="$ac_save_CC $ac_arg"
+ if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_prog_cc_c89=$ac_arg
+fi
+rm -f core conftest.err conftest.$ac_objext
+ test "x$ac_cv_prog_cc_c89" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CC=$ac_save_CC
+
+fi
+# AC_CACHE_VAL
+case "x$ac_cv_prog_cc_c89" in
+ x)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+$as_echo "none needed" >&6; } ;;
+ xno)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+$as_echo "unsupported" >&6; } ;;
+ *)
+ CC="$CC $ac_cv_prog_cc_c89"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
+$as_echo "$ac_cv_prog_cc_c89" >&6; } ;;
+esac
+if test "x$ac_cv_prog_cc_c89" != xno; then :
+
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5
+$as_echo_n "checking whether $CC understands -c and -o together... " >&6; }
+if ${am_cv_prog_cc_c_o+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ # Make sure it works both with $CC and with simple cc.
+ # Following AC_PROG_CC_C_O, we do the test twice because some
+ # compilers refuse to overwrite an existing .o file with -o,
+ # though they will create one.
+ am_cv_prog_cc_c_o=yes
+ for am_i in 1 2; do
+ if { echo "$as_me:$LINENO: $CC -c conftest.$ac_ext -o conftest2.$ac_objext" >&5
+ ($CC -c conftest.$ac_ext -o conftest2.$ac_objext) >&5 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } \
+ && test -f conftest2.$ac_objext; then
+ : OK
+ else
+ am_cv_prog_cc_c_o=no
+ break
+ fi
+ done
+ rm -f core conftest*
+ unset am_i
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5
+$as_echo "$am_cv_prog_cc_c_o" >&6; }
+if test "$am_cv_prog_cc_c_o" != yes; then
+ # Losing compiler, so override with the script.
+ # FIXME: It is wrong to rewrite CC.
+ # But if we don't then we get into trouble of one sort or another.
+ # A longer-term fix would be to have automake use am__CC in this case,
+ # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
+ CC="$am_aux_dir/compile $CC"
+fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+depcc="$CC" am_compiler_list=
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
+$as_echo_n "checking dependency style of $depcc... " >&6; }
+if ${am_cv_CC_dependencies_compiler_type+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+ # We make a subdir and do the tests there. Otherwise we can end up
+ # making bogus files that we don't know about and never remove. For
+ # instance it was reported that on HP-UX the gcc test will end up
+ # making a dummy file named 'D' -- because '-MD' means "put the output
+ # in D".
+ rm -rf conftest.dir
+ mkdir conftest.dir
+ # Copy depcomp to subdir because otherwise we won't find it if we're
+ # using a relative directory.
+ cp "$am_depcomp" conftest.dir
+ cd conftest.dir
+ # We will build objects and dependencies in a subdirectory because
+ # it helps to detect inapplicable dependency modes. For instance
+ # both Tru64's cc and ICC support -MD to output dependencies as a
+ # side effect of compilation, but ICC will put the dependencies in
+ # the current directory while Tru64 will put them in the object
+ # directory.
+ mkdir sub
+
+ am_cv_CC_dependencies_compiler_type=none
+ if test "$am_compiler_list" = ""; then
+ am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
+ fi
+ am__universal=false
+ case " $depcc " in #(
+ *\ -arch\ *\ -arch\ *) am__universal=true ;;
+ esac
+
+ for depmode in $am_compiler_list; do
+ # Setup a source with many dependencies, because some compilers
+ # like to wrap large dependency lists on column 80 (with \), and
+ # we should not choose a depcomp mode which is confused by this.
+ #
+ # We need to recreate these files for each test, as the compiler may
+ # overwrite some of them when testing with obscure command lines.
+ # This happens at least with the AIX C compiler.
+ : > sub/conftest.c
+ for i in 1 2 3 4 5 6; do
+ echo '#include "conftst'$i'.h"' >> sub/conftest.c
+ # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with
+ # Solaris 10 /bin/sh.
+ echo '/* dummy */' > sub/conftst$i.h
+ done
+ echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+ # We check with '-c' and '-o' for the sake of the "dashmstdout"
+ # mode. It turns out that the SunPro C++ compiler does not properly
+ # handle '-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs.
+ am__obj=sub/conftest.${OBJEXT-o}
+ am__minus_obj="-o $am__obj"
+ case $depmode in
+ gcc)
+ # This depmode causes a compiler race in universal mode.
+ test "$am__universal" = false || continue
+ ;;
+ nosideeffect)
+ # After this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested.
+ if test "x$enable_dependency_tracking" = xyes; then
+ continue
+ else
+ break
+ fi
+ ;;
+ msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+ # This compiler won't grok '-c -o', but also, the minuso test has
+ # not run yet. These depmodes are late enough in the game, and
+ # so weak that their functioning should not be impacted.
+ am__obj=conftest.${OBJEXT-o}
+ am__minus_obj=
+ ;;
+ none) break ;;
+ esac
+ if depmode=$depmode \
+ source=sub/conftest.c object=$am__obj \
+ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+ >/dev/null 2>conftest.err &&
+ grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+ ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+ # icc doesn't choke on unknown options, it will just issue warnings
+ # or remarks (even with -Werror). So we grep stderr for any message
+ # that says an option was ignored or not supported.
+ # When given -MP, icc 7.0 and 7.1 complain thusly:
+ # icc: Command line warning: ignoring option '-M'; no argument required
+ # The diagnosis changed in icc 8.0:
+ # icc: Command line remark: option '-MP' not supported
+ if (grep 'ignoring option' conftest.err ||
+ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+ am_cv_CC_dependencies_compiler_type=$depmode
+ break
+ fi
+ fi
+ done
+
+ cd ..
+ rm -rf conftest.dir
+else
+ am_cv_CC_dependencies_compiler_type=none
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5
+$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; }
+CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type
+
+ if
+ test "x$enable_dependency_tracking" != xno \
+ && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then
+ am__fastdepCC_TRUE=
+ am__fastdepCC_FALSE='#'
+else
+ am__fastdepCC_TRUE='#'
+ am__fastdepCC_FALSE=
+fi
+
+ ## test for C compiler
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args.
+set dummy ${ac_tool_prefix}ranlib; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_RANLIB+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$RANLIB"; then
+ ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+RANLIB=$ac_cv_prog_RANLIB
+if test -n "$RANLIB"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5
+$as_echo "$RANLIB" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_RANLIB"; then
+ ac_ct_RANLIB=$RANLIB
+ # Extract the first word of "ranlib", so it can be a program name with args.
+set dummy ranlib; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_RANLIB+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_RANLIB"; then
+ ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_RANLIB="ranlib"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB
+if test -n "$ac_ct_RANLIB"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5
+$as_echo "$ac_ct_RANLIB" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_RANLIB" = x; then
+ RANLIB=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ RANLIB=$ac_ct_RANLIB
+ fi
+else
+ RANLIB="$ac_cv_prog_RANLIB"
+fi
+ ## required if libraries are built in package
+
+# Check for headers
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5
+$as_echo_n "checking how to run the C++ preprocessor... " >&6; }
+if test -z "$CXXCPP"; then
+ if ${ac_cv_prog_CXXCPP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ # Double quotes because CXXCPP needs to be expanded
+ for CXXCPP in "$CXX -E" "/lib/cpp"
+ do
+ ac_preproc_ok=false
+for ac_cxx_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"; then :
+
+else
+ # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"; then :
+ # Broken: success on invalid input.
+continue
+else
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.i conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then :
+ break
+fi
+
+ done
+ ac_cv_prog_CXXCPP=$CXXCPP
+
+fi
+ CXXCPP=$ac_cv_prog_CXXCPP
+else
+ ac_cv_prog_CXXCPP=$CXXCPP
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5
+$as_echo "$CXXCPP" >&6; }
+ac_preproc_ok=false
+for ac_cxx_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"; then :
+
+else
+ # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"; then :
+ # Broken: success on invalid input.
+continue
+else
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.i conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then :
+
+else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
+$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
+if ${ac_cv_path_GREP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$GREP"; then
+ ac_path_GREP_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in grep ggrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
+ as_fn_executable_p "$ac_path_GREP" || continue
+# Check for GNU ac_path_GREP and select it if it is found.
+ # Check for GNU $ac_path_GREP
+case `"$ac_path_GREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo 'GREP' >> "conftest.nl"
+ "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ as_fn_arith $ac_count + 1 && ac_count=$as_val
+ if test $ac_count -gt ${ac_path_GREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_GREP="$ac_path_GREP"
+ ac_path_GREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_GREP_found && break 3
+ done
+ done
+ done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_GREP"; then
+ as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+ fi
+else
+ ac_cv_path_GREP=$GREP
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5
+$as_echo "$ac_cv_path_GREP" >&6; }
+ GREP="$ac_cv_path_GREP"
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
+$as_echo_n "checking for egrep... " >&6; }
+if ${ac_cv_path_EGREP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
+ then ac_cv_path_EGREP="$GREP -E"
+ else
+ if test -z "$EGREP"; then
+ ac_path_EGREP_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in egrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
+ as_fn_executable_p "$ac_path_EGREP" || continue
+# Check for GNU ac_path_EGREP and select it if it is found.
+ # Check for GNU $ac_path_EGREP
+case `"$ac_path_EGREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo 'EGREP' >> "conftest.nl"
+ "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ as_fn_arith $ac_count + 1 && ac_count=$as_val
+ if test $ac_count -gt ${ac_path_EGREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_EGREP="$ac_path_EGREP"
+ ac_path_EGREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_EGREP_found && break 3
+ done
+ done
+ done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_EGREP"; then
+ as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+ fi
+else
+ ac_cv_path_EGREP=$EGREP
+fi
+
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5
+$as_echo "$ac_cv_path_EGREP" >&6; }
+ EGREP="$ac_cv_path_EGREP"
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5
+$as_echo_n "checking for ANSI C header files... " >&6; }
+if ${ac_cv_header_stdc+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <float.h>
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_cv_header_stdc=yes
+else
+ ac_cv_header_stdc=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+if test $ac_cv_header_stdc = yes; then
+ # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <string.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "memchr" >/dev/null 2>&1; then :
+
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdlib.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "free" >/dev/null 2>&1; then :
+
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
+ if test "$cross_compiling" = yes; then :
+ :
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <ctype.h>
+#include <stdlib.h>
+#if ((' ' & 0x0FF) == 0x020)
+# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
+# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
+#else
+# define ISLOWER(c) \
+ (('a' <= (c) && (c) <= 'i') \
+ || ('j' <= (c) && (c) <= 'r') \
+ || ('s' <= (c) && (c) <= 'z'))
+# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
+#endif
+
+#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
+int
+main ()
+{
+ int i;
+ for (i = 0; i < 256; i++)
+ if (XOR (islower (i), ISLOWER (i))
+ || toupper (i) != TOUPPER (i))
+ return 2;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_run "$LINENO"; then :
+
+else
+ ac_cv_header_stdc=no
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5
+$as_echo "$ac_cv_header_stdc" >&6; }
+if test $ac_cv_header_stdc = yes; then
+
+$as_echo "#define STDC_HEADERS 1" >>confdefs.h
+
+fi
+
+# On IRIX 5.3, sys/types and inttypes.h are conflicting.
+for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \
+ inttypes.h stdint.h unistd.h
+do :
+ as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+ac_fn_cxx_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default
+"
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+ac_fn_cxx_check_header_mongrel "$LINENO" "zlib.h" "ac_cv_header_zlib_h" "$ac_includes_default"
+if test "x$ac_cv_header_zlib_h" = xyes; then :
+
+fi
+
+
+
+# Check for libraries
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing gzopen" >&5
+$as_echo_n "checking for library containing gzopen... " >&6; }
+if ${ac_cv_search_gzopen+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_func_search_save_LIBS=$LIBS
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char gzopen ();
+int
+main ()
+{
+return gzopen ();
+ ;
+ return 0;
+}
+_ACEOF
+for ac_lib in '' z; do
+ if test -z "$ac_lib"; then
+ ac_res="none required"
+ else
+ ac_res=-l$ac_lib
+ LIBS="-l$ac_lib $ac_func_search_save_LIBS"
+ fi
+ if ac_fn_cxx_try_link "$LINENO"; then :
+ ac_cv_search_gzopen=$ac_res
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext
+ if ${ac_cv_search_gzopen+:} false; then :
+ break
+fi
+done
+if ${ac_cv_search_gzopen+:} false; then :
+
+else
+ ac_cv_search_gzopen=no
+fi
+rm conftest.$ac_ext
+LIBS=$ac_func_search_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_gzopen" >&5
+$as_echo "$ac_cv_search_gzopen" >&6; }
+ac_res=$ac_cv_search_gzopen
+if test "$ac_res" != no; then :
+ test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
+
+else
+ as_fn_error $? "libz not found, please install zlib (http://www.zlib.net/)" "$LINENO" 5
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5
+$as_echo_n "checking for library containing clock_gettime... " >&6; }
+if ${ac_cv_search_clock_gettime+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_func_search_save_LIBS=$LIBS
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char clock_gettime ();
+int
+main ()
+{
+return clock_gettime ();
+ ;
+ return 0;
+}
+_ACEOF
+for ac_lib in '' rt; do
+ if test -z "$ac_lib"; then
+ ac_res="none required"
+ else
+ ac_res=-l$ac_lib
+ LIBS="-l$ac_lib $ac_func_search_save_LIBS"
+ fi
+ if ac_fn_cxx_try_link "$LINENO"; then :
+ ac_cv_search_clock_gettime=$ac_res
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext
+ if ${ac_cv_search_clock_gettime+:} false; then :
+ break
+fi
+done
+if ${ac_cv_search_clock_gettime+:} false; then :
+
+else
+ ac_cv_search_clock_gettime=no
+fi
+rm conftest.$ac_ext
+LIBS=$ac_func_search_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5
+$as_echo "$ac_cv_search_clock_gettime" >&6; }
+ac_res=$ac_cv_search_clock_gettime
+if test "$ac_res" != no; then :
+ test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
+
+$as_echo "#define HAVE_CLOCK_GETTIME 1" >>confdefs.h
+
+fi
+
+
+# check for c++11
+#AX_CXX_COMPILE_STDCXX_11(,[optional])
+
+# Only fail on warnings when the --enable-development flag is passed into configure
+# Check whether --enable-development was given.
+if test "${enable_development+set}" = set; then :
+ enableval=$enable_development;
+fi
+
+if test "$enable_development"; then
+ fail_on_warning="-Werror"
+fi
+
+# Set compiler flags.
+AM_CXXFLAGS="-g $fail_on_warning -Wno-unknown-pragmas"
+
+CXXFLAGS="$CXXFLAGS"
+
+CFLAGS="$CFLAGS"
+
+LDFLAGS="$LDFLAGS"
+
+
+# We always need to specify to link in external libraries
+LIBS="$LIBS -pthread"
+
+
+ac_config_files="$ac_config_files Makefile src/Makefile src/seqtools/Makefile"
+
+
+cat >confcache <<\_ACEOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs, see configure's option --config-cache.
+# It is not useful on other systems. If it contains results you don't
+# want to keep, you may remove or edit it.
+#
+# config.status only pays attention to the cache file if you give it
+# the --recheck option to rerun configure.
+#
+# `ac_cv_env_foo' variables (set or unset) will be overridden when
+# loading this file, other *unset* `ac_cv_foo' will be assigned the
+# following values.
+
+_ACEOF
+
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, we kill variables containing newlines.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+(
+ for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+ *) { eval $ac_var=; unset $ac_var;} ;;
+ esac ;;
+ esac
+ done
+
+ (set) 2>&1 |
+ case $as_nl`(ac_space=' '; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ # `set' does not quote correctly, so add quotes: double-quote
+ # substitution turns \\\\ into \\, and sed turns \\ into \.
+ sed -n \
+ "s/'/'\\\\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
+ ;; #(
+ *)
+ # `set' quotes correctly as required by POSIX, so do not add quotes.
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+) |
+ sed '
+ /^ac_cv_env_/b end
+ t clear
+ :clear
+ s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
+ t end
+ s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
+ :end' >>confcache
+if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
+ if test -w "$cache_file"; then
+ if test "x$cache_file" != "x/dev/null"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5
+$as_echo "$as_me: updating cache $cache_file" >&6;}
+ if test ! -f "$cache_file" || test -h "$cache_file"; then
+ cat confcache >"$cache_file"
+ else
+ case $cache_file in #(
+ */* | ?:*)
+ mv -f confcache "$cache_file"$$ &&
+ mv -f "$cache_file"$$ "$cache_file" ;; #(
+ *)
+ mv -f confcache "$cache_file" ;;
+ esac
+ fi
+ fi
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5
+$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;}
+ fi
+fi
+rm -f confcache
+
+test "x$prefix" = xNONE && prefix=$ac_default_prefix
+# Let make expand exec_prefix.
+test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
+
+DEFS=-DHAVE_CONFIG_H
+
+ac_libobjs=
+ac_ltlibobjs=
+U=
+for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
+ # 1. Remove the extension, and $U if already installed.
+ ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
+ ac_i=`$as_echo "$ac_i" | sed "$ac_script"`
+ # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR
+ # will be set to the directory where LIBOBJS objects are built.
+ as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext"
+ as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo'
+done
+LIBOBJS=$ac_libobjs
+
+LTLIBOBJS=$ac_ltlibobjs
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5
+$as_echo_n "checking that generated files are newer than configure... " >&6; }
+ if test -n "$am_sleep_pid"; then
+ # Hide warnings about reused PIDs.
+ wait $am_sleep_pid 2>/dev/null
+ fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: done" >&5
+$as_echo "done" >&6; }
+ if test -n "$EXEEXT"; then
+ am__EXEEXT_TRUE=
+ am__EXEEXT_FALSE='#'
+else
+ am__EXEEXT_TRUE='#'
+ am__EXEEXT_FALSE=
+fi
+
+if test -z "${MAINTAINER_MODE_TRUE}" && test -z "${MAINTAINER_MODE_FALSE}"; then
+ as_fn_error $? "conditional \"MAINTAINER_MODE\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then
+ as_fn_error $? "conditional \"AMDEP\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then
+ as_fn_error $? "conditional \"am__fastdepCXX\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then
+ as_fn_error $? "conditional \"am__fastdepCC\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+
+: "${CONFIG_STATUS=./config.status}"
+ac_write_fail=0
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files $CONFIG_STATUS"
+{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5
+$as_echo "$as_me: creating $CONFIG_STATUS" >&6;}
+as_write_fail=0
+cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1
+#! $SHELL
+# Generated by $as_me.
+# Run this file to recreate the current configuration.
+# Compiler output produced by configure, useful for debugging
+# configure, is in config.log if it exists.
+
+debug=false
+ac_cs_recheck=false
+ac_cs_silent=false
+
+SHELL=\${CONFIG_SHELL-$SHELL}
+export SHELL
+_ASEOF
+cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
+esac
+fi
+
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+# Prefer a ksh shell builtin over an external printf program on Solaris,
+# but without wasting forks for bash or zsh.
+if test -z "$BASH_VERSION$ZSH_VERSION" \
+ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='print -r --'
+ as_echo_n='print -rn --'
+elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='printf %s\n'
+ as_echo_n='printf %s'
+else
+ if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+ as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+ as_echo_n='/usr/ucb/echo -n'
+ else
+ as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+ as_echo_n_body='eval
+ arg=$1;
+ case $arg in #(
+ *"$as_nl"*)
+ expr "X$arg" : "X\\(.*\\)$as_nl";
+ arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+ esac;
+ expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+ '
+ export as_echo_n_body
+ as_echo_n='sh -c $as_echo_n_body as_echo'
+ fi
+ export as_echo_body
+ as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ PATH_SEPARATOR=:
+ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+ PATH_SEPARATOR=';'
+ }
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+as_myself=
+case $0 in #((
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ exit 1
+fi
+
+# Unset variables that we do not need and which cause bugs (e.g. in
+# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1"
+# suppresses any "Segmentation fault" message there. '((' could
+# trigger a bug in pdksh 5.2.14.
+for as_var in BASH_ENV ENV MAIL MAILPATH
+do eval test x\${$as_var+set} = xset \
+ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# CDPATH.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+ as_status=$1; test $as_status -eq 0 && as_status=1
+ if test "$4"; then
+ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+ fi
+ $as_echo "$as_me: error: $2" >&2
+ as_fn_exit $as_status
+} # as_fn_error
+
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+ return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+ set +e
+ as_fn_set_status $1
+ exit $1
+} # as_fn_exit
+
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+ { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+ eval 'as_fn_append ()
+ {
+ eval $1+=\$2
+ }'
+else
+ as_fn_append ()
+ {
+ eval $1=\$$1\$2
+ }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+ eval 'as_fn_arith ()
+ {
+ as_val=$(( $* ))
+ }'
+else
+ as_fn_arith ()
+ {
+ as_val=`expr "$@" || test $? -eq 1`
+ }
+fi # as_fn_arith
+
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+ case `echo 'xy\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ xy) ECHO_C='\c';;
+ *) echo `echo ksh88 bug on AIX 6.1` > /dev/null
+ ECHO_T=' ';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+ if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -pR'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -pR'
+ elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+ else
+ as_ln_s='cp -pR'
+ fi
+else
+ as_ln_s='cp -pR'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || eval $as_mkdir_p || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p='mkdir -p "$as_dir"'
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+ test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+exec 6>&1
+## ----------------------------------- ##
+## Main body of $CONFIG_STATUS script. ##
+## ----------------------------------- ##
+_ASEOF
+test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# Save the log message, to keep $0 and so on meaningful, and to
+# report actual input values of CONFIG_FILES etc. instead of their
+# values after options handling.
+ac_log="
+This file was extended by seqkit $as_me 1.0, which was
+generated by GNU Autoconf 2.69. Invocation command line was
+
+ CONFIG_FILES = $CONFIG_FILES
+ CONFIG_HEADERS = $CONFIG_HEADERS
+ CONFIG_LINKS = $CONFIG_LINKS
+ CONFIG_COMMANDS = $CONFIG_COMMANDS
+ $ $0 $@
+
+on `(hostname || uname -n) 2>/dev/null | sed 1q`
+"
+
+_ACEOF
+
+case $ac_config_files in *"
+"*) set x $ac_config_files; shift; ac_config_files=$*;;
+esac
+
+case $ac_config_headers in *"
+"*) set x $ac_config_headers; shift; ac_config_headers=$*;;
+esac
+
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+# Files that config.status was made for.
+config_files="$ac_config_files"
+config_headers="$ac_config_headers"
+config_commands="$ac_config_commands"
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ac_cs_usage="\
+\`$as_me' instantiates files and other configuration actions
+from templates according to the current configuration. Unless the files
+and actions are specified as TAGs, all are instantiated by default.
+
+Usage: $0 [OPTION]... [TAG]...
+
+ -h, --help print this help, then exit
+ -V, --version print version number and configuration settings, then exit
+ --config print configuration, then exit
+ -q, --quiet, --silent
+ do not print progress messages
+ -d, --debug don't remove temporary files
+ --recheck update $as_me by reconfiguring in the same conditions
+ --file=FILE[:TEMPLATE]
+ instantiate the configuration file FILE
+ --header=FILE[:TEMPLATE]
+ instantiate the configuration header FILE
+
+Configuration files:
+$config_files
+
+Configuration headers:
+$config_headers
+
+Configuration commands:
+$config_commands
+
+Report bugs to <jwala at broadinstitute.org>."
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
+ac_cs_version="\\
+seqkit config.status 1.0
+configured by $0, generated by GNU Autoconf 2.69,
+ with options \\"\$ac_cs_config\\"
+
+Copyright (C) 2012 Free Software Foundation, Inc.
+This config.status script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it."
+
+ac_pwd='$ac_pwd'
+srcdir='$srcdir'
+INSTALL='$INSTALL'
+MKDIR_P='$MKDIR_P'
+AWK='$AWK'
+test -n "\$AWK" || AWK=awk
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# The default lists apply if the user does not specify any file.
+ac_need_defaults=:
+while test $# != 0
+do
+ case $1 in
+ --*=?*)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
+ ac_shift=:
+ ;;
+ --*=)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=
+ ac_shift=:
+ ;;
+ *)
+ ac_option=$1
+ ac_optarg=$2
+ ac_shift=shift
+ ;;
+ esac
+
+ case $ac_option in
+ # Handling of the options.
+ -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+ ac_cs_recheck=: ;;
+ --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
+ $as_echo "$ac_cs_version"; exit ;;
+ --config | --confi | --conf | --con | --co | --c )
+ $as_echo "$ac_cs_config"; exit ;;
+ --debug | --debu | --deb | --de | --d | -d )
+ debug=: ;;
+ --file | --fil | --fi | --f )
+ $ac_shift
+ case $ac_optarg in
+ *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ '') as_fn_error $? "missing file argument" ;;
+ esac
+ as_fn_append CONFIG_FILES " '$ac_optarg'"
+ ac_need_defaults=false;;
+ --header | --heade | --head | --hea )
+ $ac_shift
+ case $ac_optarg in
+ *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ as_fn_append CONFIG_HEADERS " '$ac_optarg'"
+ ac_need_defaults=false;;
+ --he | --h)
+ # Conflict between --help and --header
+ as_fn_error $? "ambiguous option: \`$1'
+Try \`$0 --help' for more information.";;
+ --help | --hel | -h )
+ $as_echo "$ac_cs_usage"; exit ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil | --si | --s)
+ ac_cs_silent=: ;;
+
+ # This is an error.
+ -*) as_fn_error $? "unrecognized option: \`$1'
+Try \`$0 --help' for more information." ;;
+
+ *) as_fn_append ac_config_targets " $1"
+ ac_need_defaults=false ;;
+
+ esac
+ shift
+done
+
+ac_configure_extra_args=
+
+if $ac_cs_silent; then
+ exec 6>/dev/null
+ ac_configure_extra_args="$ac_configure_extra_args --silent"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+if \$ac_cs_recheck; then
+ set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+ shift
+ \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
+ CONFIG_SHELL='$SHELL'
+ export CONFIG_SHELL
+ exec "\$@"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+exec 5>>config.log
+{
+ echo
+ sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+ $as_echo "$ac_log"
+} >&5
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+#
+# INIT-COMMANDS
+#
+AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+
+# Handling of arguments.
+for ac_config_target in $ac_config_targets
+do
+ case $ac_config_target in
+ "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;;
+ "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;;
+ "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
+ "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;;
+ "src/seqtools/Makefile") CONFIG_FILES="$CONFIG_FILES src/seqtools/Makefile" ;;
+
+ *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
+ esac
+done
+
+
+# If the user did not use the arguments to specify the items to instantiate,
+# then the envvar interface is used. Set only those that are not.
+# We use the long form for the default assignment because of an extremely
+# bizarre bug on SunOS 4.1.3.
+if $ac_need_defaults; then
+ test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
+ test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers
+ test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands
+fi
+
+# Have a temporary directory for convenience. Make it in the build tree
+# simply because there is no reason against having it here, and in addition,
+# creating and moving files from /tmp can sometimes cause problems.
+# Hook for its removal unless debugging.
+# Note that there is a small window in which the directory will not be cleaned:
+# after its creation but before its name has been assigned to `$tmp'.
+$debug ||
+{
+ tmp= ac_tmp=
+ trap 'exit_status=$?
+ : "${ac_tmp:=$tmp}"
+ { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status
+' 0
+ trap 'as_fn_exit 1' 1 2 13 15
+}
+# Create a (secure) tmp directory for tmp files.
+
+{
+ tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
+ test -d "$tmp"
+} ||
+{
+ tmp=./conf$$-$RANDOM
+ (umask 077 && mkdir "$tmp")
+} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5
+ac_tmp=$tmp
+
+# Set up the scripts for CONFIG_FILES section.
+# No need to generate them if there are no CONFIG_FILES.
+# This happens for instance with `./config.status config.h'.
+if test -n "$CONFIG_FILES"; then
+
+
+ac_cr=`echo X | tr X '\015'`
+# On cygwin, bash can eat \r inside `` if the user requested igncr.
+# But we know of no other shell where ac_cr would be empty at this
+# point, so we can use a bashism as a fallback.
+if test "x$ac_cr" = x; then
+ eval ac_cr=\$\'\\r\'
+fi
+ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
+if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
+ ac_cs_awk_cr='\\r'
+else
+ ac_cs_awk_cr=$ac_cr
+fi
+
+echo 'BEGIN {' >"$ac_tmp/subs1.awk" &&
+_ACEOF
+
+
+{
+ echo "cat >conf$$subs.awk <<_ACEOF" &&
+ echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
+ echo "_ACEOF"
+} >conf$$subs.sh ||
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'`
+ac_delim='%!_!# '
+for ac_last_try in false false false false false :; do
+ . ./conf$$subs.sh ||
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+
+ ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
+ if test $ac_delim_n = $ac_delim_num; then
+ break
+ elif $ac_last_try; then
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+ else
+ ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+ fi
+done
+rm -f conf$$subs.sh
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK &&
+_ACEOF
+sed -n '
+h
+s/^/S["/; s/!.*/"]=/
+p
+g
+s/^[^!]*!//
+:repl
+t repl
+s/'"$ac_delim"'$//
+t delim
+:nl
+h
+s/\(.\{148\}\)..*/\1/
+t more1
+s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/
+p
+n
+b repl
+:more1
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t nl
+:delim
+h
+s/\(.\{148\}\)..*/\1/
+t more2
+s/["\\]/\\&/g; s/^/"/; s/$/"/
+p
+b
+:more2
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t delim
+' <conf$$subs.awk | sed '
+/^[^""]/{
+ N
+ s/\n//
+}
+' >>$CONFIG_STATUS || ac_write_fail=1
+rm -f conf$$subs.awk
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+_ACAWK
+cat >>"\$ac_tmp/subs1.awk" <<_ACAWK &&
+ for (key in S) S_is_set[key] = 1
+ FS = ""
+
+}
+{
+ line = $ 0
+ nfields = split(line, field, "@")
+ substed = 0
+ len = length(field[1])
+ for (i = 2; i < nfields; i++) {
+ key = field[i]
+ keylen = length(key)
+ if (S_is_set[key]) {
+ value = S[key]
+ line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
+ len += length(value) + length(field[++i])
+ substed = 1
+ } else
+ len += 1 + keylen
+ }
+
+ print line
+}
+
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
+ sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
+else
+ cat
+fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \
+ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5
+_ACEOF
+
+# VPATH may cause trouble with some makes, so we remove sole $(srcdir),
+# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and
+# trailing colons and then remove the whole line if VPATH becomes empty
+# (actually we leave an empty line to preserve line numbers).
+if test "x$srcdir" = x.; then
+ ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{
+h
+s///
+s/^/:/
+s/[ ]*$/:/
+s/:\$(srcdir):/:/g
+s/:\${srcdir}:/:/g
+s/:@srcdir@:/:/g
+s/^:*//
+s/:*$//
+x
+s/\(=[ ]*\).*/\1/
+G
+s/\n//
+s/^[^=]*=[ ]*$//
+}'
+fi
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+fi # test -n "$CONFIG_FILES"
+
+# Set up the scripts for CONFIG_HEADERS section.
+# No need to generate them if there are no CONFIG_HEADERS.
+# This happens for instance with `./config.status Makefile'.
+if test -n "$CONFIG_HEADERS"; then
+cat >"$ac_tmp/defines.awk" <<\_ACAWK ||
+BEGIN {
+_ACEOF
+
+# Transform confdefs.h into an awk script `defines.awk', embedded as
+# here-document in config.status, that substitutes the proper values into
+# config.h.in to produce config.h.
+
+# Create a delimiter string that does not exist in confdefs.h, to ease
+# handling of long lines.
+ac_delim='%!_!# '
+for ac_last_try in false false :; do
+ ac_tt=`sed -n "/$ac_delim/p" confdefs.h`
+ if test -z "$ac_tt"; then
+ break
+ elif $ac_last_try; then
+ as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5
+ else
+ ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+ fi
+done
+
+# For the awk script, D is an array of macro values keyed by name,
+# likewise P contains macro parameters if any. Preserve backslash
+# newline sequences.
+
+ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]*
+sed -n '
+s/.\{148\}/&'"$ac_delim"'/g
+t rset
+:rset
+s/^[ ]*#[ ]*define[ ][ ]*/ /
+t def
+d
+:def
+s/\\$//
+t bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3"/p
+s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p
+d
+:bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3\\\\\\n"\\/p
+t cont
+s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p
+t cont
+d
+:cont
+n
+s/.\{148\}/&'"$ac_delim"'/g
+t clear
+:clear
+s/\\$//
+t bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/"/p
+d
+:bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p
+b cont
+' <confdefs.h | sed '
+s/'"$ac_delim"'/"\\\
+"/g' >>$CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ for (key in D) D_is_set[key] = 1
+ FS = ""
+}
+/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ {
+ line = \$ 0
+ split(line, arg, " ")
+ if (arg[1] == "#") {
+ defundef = arg[2]
+ mac1 = arg[3]
+ } else {
+ defundef = substr(arg[1], 2)
+ mac1 = arg[2]
+ }
+ split(mac1, mac2, "(") #)
+ macro = mac2[1]
+ prefix = substr(line, 1, index(line, defundef) - 1)
+ if (D_is_set[macro]) {
+ # Preserve the white space surrounding the "#".
+ print prefix "define", macro P[macro] D[macro]
+ next
+ } else {
+ # Replace #undef with comments. This is necessary, for example,
+ # in the case of _POSIX_SOURCE, which is predefined and required
+ # on some systems where configure will not decide to define it.
+ if (defundef == "undef") {
+ print "/*", prefix defundef, macro, "*/"
+ next
+ }
+ }
+}
+{ print }
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ as_fn_error $? "could not setup config headers machinery" "$LINENO" 5
+fi # test -n "$CONFIG_HEADERS"
+
+
+eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS"
+shift
+for ac_tag
+do
+ case $ac_tag in
+ :[FHLC]) ac_mode=$ac_tag; continue;;
+ esac
+ case $ac_mode$ac_tag in
+ :[FHL]*:*);;
+ :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;;
+ :[FH]-) ac_tag=-:-;;
+ :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
+ esac
+ ac_save_IFS=$IFS
+ IFS=:
+ set x $ac_tag
+ IFS=$ac_save_IFS
+ shift
+ ac_file=$1
+ shift
+
+ case $ac_mode in
+ :L) ac_source=$1;;
+ :[FH])
+ ac_file_inputs=
+ for ac_f
+ do
+ case $ac_f in
+ -) ac_f="$ac_tmp/stdin";;
+ *) # Look for the file first in the build tree, then in the source tree
+ # (if the path is not absolute). The absolute path cannot be DOS-style,
+ # because $ac_f cannot contain `:'.
+ test -f "$ac_f" ||
+ case $ac_f in
+ [\\/$]*) false;;
+ *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
+ esac ||
+ as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;;
+ esac
+ case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
+ as_fn_append ac_file_inputs " '$ac_f'"
+ done
+
+ # Let's still pretend it is `configure' which instantiates (i.e., don't
+ # use $as_me), people would be surprised to read:
+ # /* config.h. Generated by config.status. */
+ configure_input='Generated from '`
+ $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
+ `' by configure.'
+ if test x"$ac_file" != x-; then
+ configure_input="$ac_file. $configure_input"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5
+$as_echo "$as_me: creating $ac_file" >&6;}
+ fi
+ # Neutralize special characters interpreted by sed in replacement strings.
+ case $configure_input in #(
+ *\&* | *\|* | *\\* )
+ ac_sed_conf_input=`$as_echo "$configure_input" |
+ sed 's/[\\\\&|]/\\\\&/g'`;; #(
+ *) ac_sed_conf_input=$configure_input;;
+ esac
+
+ case $ac_tag in
+ *:-:* | *:-) cat >"$ac_tmp/stdin" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;;
+ esac
+ ;;
+ esac
+
+ ac_dir=`$as_dirname -- "$ac_file" ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$ac_file" : 'X\(//\)[^/]' \| \
+ X"$ac_file" : 'X\(//\)$' \| \
+ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$ac_file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ as_dir="$ac_dir"; as_fn_mkdir_p
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+
+ case $ac_mode in
+ :F)
+ #
+ # CONFIG_FILE
+ #
+
+ case $INSTALL in
+ [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
+ *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;;
+ esac
+ ac_MKDIR_P=$MKDIR_P
+ case $MKDIR_P in
+ [\\/$]* | ?:[\\/]* ) ;;
+ */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;;
+ esac
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# If the template does not know about datarootdir, expand it.
+# FIXME: This hack should be removed a few years after 2.60.
+ac_datarootdir_hack=; ac_datarootdir_seen=
+ac_sed_dataroot='
+/datarootdir/ {
+ p
+ q
+}
+/@datadir@/p
+/@docdir@/p
+/@infodir@/p
+/@localedir@/p
+/@mandir@/p'
+case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
+*datarootdir*) ac_datarootdir_seen=yes;;
+*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
+$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ ac_datarootdir_hack='
+ s&@datadir@&$datadir&g
+ s&@docdir@&$docdir&g
+ s&@infodir@&$infodir&g
+ s&@localedir@&$localedir&g
+ s&@mandir@&$mandir&g
+ s&\\\${datarootdir}&$datarootdir&g' ;;
+esac
+_ACEOF
+
+# Neutralize VPATH when `$srcdir' = `.'.
+# Shell code in configure.ac might set extrasub.
+# FIXME: do we really want to maintain this feature?
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_sed_extra="$ac_vpsub
+$extrasub
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+:t
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
+s|@configure_input@|$ac_sed_conf_input|;t t
+s&@top_builddir@&$ac_top_builddir_sub&;t t
+s&@top_build_prefix@&$ac_top_build_prefix&;t t
+s&@srcdir@&$ac_srcdir&;t t
+s&@abs_srcdir@&$ac_abs_srcdir&;t t
+s&@top_srcdir@&$ac_top_srcdir&;t t
+s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
+s&@builddir@&$ac_builddir&;t t
+s&@abs_builddir@&$ac_abs_builddir&;t t
+s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
+s&@INSTALL@&$ac_INSTALL&;t t
+s&@MKDIR_P@&$ac_MKDIR_P&;t t
+$ac_datarootdir_hack
+"
+eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \
+ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+
+test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
+ { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } &&
+ { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \
+ "$ac_tmp/out"`; test -z "$ac_out"; } &&
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined" >&5
+$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined" >&2;}
+
+ rm -f "$ac_tmp/stdin"
+ case $ac_file in
+ -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";;
+ *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";;
+ esac \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ ;;
+ :H)
+ #
+ # CONFIG_HEADER
+ #
+ if test x"$ac_file" != x-; then
+ {
+ $as_echo "/* $configure_input */" \
+ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs"
+ } >"$ac_tmp/config.h" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5
+$as_echo "$as_me: $ac_file is unchanged" >&6;}
+ else
+ rm -f "$ac_file"
+ mv "$ac_tmp/config.h" "$ac_file" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ fi
+ else
+ $as_echo "/* $configure_input */" \
+ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \
+ || as_fn_error $? "could not create -" "$LINENO" 5
+ fi
+# Compute "$ac_file"'s index in $config_headers.
+_am_arg="$ac_file"
+_am_stamp_count=1
+for _am_header in $config_headers :; do
+ case $_am_header in
+ $_am_arg | $_am_arg:* )
+ break ;;
+ * )
+ _am_stamp_count=`expr $_am_stamp_count + 1` ;;
+ esac
+done
+echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" ||
+$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$_am_arg" : 'X\(//\)[^/]' \| \
+ X"$_am_arg" : 'X\(//\)$' \| \
+ X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$_am_arg" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`/stamp-h$_am_stamp_count
+ ;;
+
+ :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5
+$as_echo "$as_me: executing $ac_file commands" >&6;}
+ ;;
+ esac
+
+
+ case $ac_file$ac_mode in
+ "depfiles":C) test x"$AMDEP_TRUE" != x"" || {
+ # Older Autoconf quotes --file arguments for eval, but not when files
+ # are listed without --file. Let's play safe and only enable the eval
+ # if we detect the quoting.
+ case $CONFIG_FILES in
+ *\'*) eval set x "$CONFIG_FILES" ;;
+ *) set x $CONFIG_FILES ;;
+ esac
+ shift
+ for mf
+ do
+ # Strip MF so we end up with the name of the file.
+ mf=`echo "$mf" | sed -e 's/:.*$//'`
+ # Check whether this is an Automake generated Makefile or not.
+ # We used to match only the files named 'Makefile.in', but
+ # some people rename them; so instead we look at the file content.
+ # Grep'ing the first line is not enough: some people post-process
+ # each Makefile.in and add a new line on top of each file to say so.
+ # Grep'ing the whole file is not good either: AIX grep has a line
+ # limit of 2048, but all sed's we know have understand at least 4000.
+ if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then
+ dirpart=`$as_dirname -- "$mf" ||
+$as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$mf" : 'X\(//\)[^/]' \| \
+ X"$mf" : 'X\(//\)$' \| \
+ X"$mf" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$mf" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ else
+ continue
+ fi
+ # Extract the definition of DEPDIR, am__include, and am__quote
+ # from the Makefile without running 'make'.
+ DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
+ test -z "$DEPDIR" && continue
+ am__include=`sed -n 's/^am__include = //p' < "$mf"`
+ test -z "$am__include" && continue
+ am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
+ # Find all dependency output files, they are included files with
+ # $(DEPDIR) in their names. We invoke sed twice because it is the
+ # simplest approach to changing $(DEPDIR) to its actual value in the
+ # expansion.
+ for file in `sed -n "
+ s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \
+ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do
+ # Make sure the directory exists.
+ test -f "$dirpart/$file" && continue
+ fdir=`$as_dirname -- "$file" ||
+$as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$file" : 'X\(//\)[^/]' \| \
+ X"$file" : 'X\(//\)$' \| \
+ X"$file" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ as_dir=$dirpart/$fdir; as_fn_mkdir_p
+ # echo "creating $dirpart/$file"
+ echo '# dummy' > "$dirpart/$file"
+ done
+ done
+}
+ ;;
+
+ esac
+done # for ac_tag
+
+
+as_fn_exit 0
+_ACEOF
+ac_clean_files=$ac_clean_files_save
+
+test $ac_write_fail = 0 ||
+ as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5
+
+
+# configure is writing to config.log, and then calls config.status.
+# config.status does its own redirection, appending to config.log.
+# Unfortunately, on DOS this fails, as config.log is still kept open
+# by configure, so config.status won't be able to write to it; its
+# output is simply discarded. So we exec the FD to /dev/null,
+# effectively closing config.log, so it can be properly (re)opened and
+# appended to by config.status. When coming back to configure, we
+# need to make the FD available again.
+if test "$no_create" != yes; then
+ ac_cs_success=:
+ ac_config_status_args=
+ test "$silent" = yes &&
+ ac_config_status_args="$ac_config_status_args --quiet"
+ exec 5>/dev/null
+ $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
+ exec 5>>config.log
+ # Use ||, not &&, to avoid exiting from the if with $? = 1, which
+ # would make configure fail if this is the last instruction.
+ $ac_cs_success || as_fn_exit 1
+fi
+if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
+$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
+fi
+
diff --git a/configure.ac b/configure.ac
new file mode 100644
index 0000000..2b8e763
--- /dev/null
+++ b/configure.ac
@@ -0,0 +1,46 @@
+# Process this file with autoconf to produce a configure script.
+AC_PREREQ(2.59) ## specificy version of autoconf
+AC_INIT(seqkit, 1.0, jwala at broadinstitute.org)
+AM_INIT_AUTOMAKE(foreign)
+AC_CONFIG_SRCDIR([src/BamReader.cpp])
+AC_CONFIG_HEADER([config.h])
+AM_MAINTAINER_MODE([disable])
+##m4_include([m4/m4_ax_openmp.m4])
+
+# Checks for programs.
+AC_PROG_CXX ## test for cpp compiler
+AC_PROG_CC ## test for C compiler
+AC_PROG_RANLIB ## required if libraries are built in package
+
+# Check for headers
+AC_LANG([C++])
+AC_CHECK_HEADER([zlib.h])
+
+# Check for libraries
+AC_SEARCH_LIBS([gzopen],[z],,[AC_MSG_ERROR([libz not found, please install zlib (http://www.zlib.net/)])])
+AC_SEARCH_LIBS([clock_gettime], [rt], [AC_DEFINE([HAVE_CLOCK_GETTIME], [1], [clock_getttime found])], )
+
+# check for c++11
+#AX_CXX_COMPILE_STDCXX_11(,[optional])
+
+# Only fail on warnings when the --enable-development flag is passed into configure
+AC_ARG_ENABLE(development, AS_HELP_STRING([--enable-development],
+ [Turn on development options, like failing compilation on warnings]))
+if test "$enable_development"; then
+ fail_on_warning="-Werror"
+fi
+
+# Set compiler flags.
+AC_SUBST(AM_CXXFLAGS, "-g $fail_on_warning -Wno-unknown-pragmas")
+AC_SUBST(CXXFLAGS, "$CXXFLAGS")
+AC_SUBST(CFLAGS, "$CFLAGS")
+AC_SUBST(LDFLAGS, "$LDFLAGS")
+
+# We always need to specify to link in external libraries
+AC_SUBST(LIBS, "$LIBS -pthread")
+
+AC_CONFIG_FILES([Makefile
+ src/Makefile
+ src/seqtools/Makefile])
+
+AC_OUTPUT
diff --git a/depcomp b/depcomp
new file mode 100755
index 0000000..04701da
--- /dev/null
+++ b/depcomp
@@ -0,0 +1,530 @@
+#! /bin/sh
+# depcomp - compile a program generating dependencies as side-effects
+
+scriptversion=2005-07-09.11
+
+# Copyright (C) 1999, 2000, 2003, 2004, 2005 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Originally written by Alexandre Oliva <oliva at dcc.unicamp.br>.
+
+case $1 in
+ '')
+ echo "$0: No command. Try \`$0 --help' for more information." 1>&2
+ exit 1;
+ ;;
+ -h | --h*)
+ cat <<\EOF
+Usage: depcomp [--help] [--version] PROGRAM [ARGS]
+
+Run PROGRAMS ARGS to compile a file, generating dependencies
+as side-effects.
+
+Environment variables:
+ depmode Dependency tracking mode.
+ source Source file read by `PROGRAMS ARGS'.
+ object Object file output by `PROGRAMS ARGS'.
+ DEPDIR directory where to store dependencies.
+ depfile Dependency file to output.
+ tmpdepfile Temporary file to use when outputing dependencies.
+ libtool Whether libtool is used (yes/no).
+
+Report bugs to <bug-automake at gnu.org>.
+EOF
+ exit $?
+ ;;
+ -v | --v*)
+ echo "depcomp $scriptversion"
+ exit $?
+ ;;
+esac
+
+if test -z "$depmode" || test -z "$source" || test -z "$object"; then
+ echo "depcomp: Variables source, object and depmode must be set" 1>&2
+ exit 1
+fi
+
+# Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po.
+depfile=${depfile-`echo "$object" |
+ sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`}
+tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`}
+
+rm -f "$tmpdepfile"
+
+# Some modes work just like other modes, but use different flags. We
+# parameterize here, but still list the modes in the big case below,
+# to make depend.m4 easier to write. Note that we *cannot* use a case
+# here, because this file can only contain one case statement.
+if test "$depmode" = hp; then
+ # HP compiler uses -M and no extra arg.
+ gccflag=-M
+ depmode=gcc
+fi
+
+if test "$depmode" = dashXmstdout; then
+ # This is just like dashmstdout with a different argument.
+ dashmflag=-xM
+ depmode=dashmstdout
+fi
+
+case "$depmode" in
+gcc3)
+## gcc 3 implements dependency tracking that does exactly what
+## we want. Yay! Note: for some reason libtool 1.4 doesn't like
+## it if -MD -MP comes after the -MF stuff. Hmm.
+ "$@" -MT "$object" -MD -MP -MF "$tmpdepfile"
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ mv "$tmpdepfile" "$depfile"
+ ;;
+
+gcc)
+## There are various ways to get dependency output from gcc. Here's
+## why we pick this rather obscure method:
+## - Don't want to use -MD because we'd like the dependencies to end
+## up in a subdir. Having to rename by hand is ugly.
+## (We might end up doing this anyway to support other compilers.)
+## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like
+## -MM, not -M (despite what the docs say).
+## - Using -M directly means running the compiler twice (even worse
+## than renaming).
+ if test -z "$gccflag"; then
+ gccflag=-MD,
+ fi
+ "$@" -Wp,"$gccflag$tmpdepfile"
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ rm -f "$depfile"
+ echo "$object : \\" > "$depfile"
+ alpha=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz
+## The second -e expression handles DOS-style file names with drive letters.
+ sed -e 's/^[^:]*: / /' \
+ -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile"
+## This next piece of magic avoids the `deleted header file' problem.
+## The problem is that when a header file which appears in a .P file
+## is deleted, the dependency causes make to die (because there is
+## typically no way to rebuild the header). We avoid this by adding
+## dummy dependencies for each header file. Too bad gcc doesn't do
+## this for us directly.
+ tr ' ' '
+' < "$tmpdepfile" |
+## Some versions of gcc put a space before the `:'. On the theory
+## that the space means something, we add a space to the output as
+## well.
+## Some versions of the HPUX 10.20 sed can't process this invocation
+## correctly. Breaking it into two sed invocations is a workaround.
+ sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+hp)
+ # This case exists only to let depend.m4 do its work. It works by
+ # looking at the text of this script. This case will never be run,
+ # since it is checked for above.
+ exit 1
+ ;;
+
+sgi)
+ if test "$libtool" = yes; then
+ "$@" "-Wp,-MDupdate,$tmpdepfile"
+ else
+ "$@" -MDupdate "$tmpdepfile"
+ fi
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ rm -f "$depfile"
+
+ if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files
+ echo "$object : \\" > "$depfile"
+
+ # Clip off the initial element (the dependent). Don't try to be
+ # clever and replace this with sed code, as IRIX sed won't handle
+ # lines with more than a fixed number of characters (4096 in
+ # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines;
+ # the IRIX cc adds comments like `#:fec' to the end of the
+ # dependency line.
+ tr ' ' '
+' < "$tmpdepfile" \
+ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' | \
+ tr '
+' ' ' >> $depfile
+ echo >> $depfile
+
+ # The second pass generates a dummy entry for each header file.
+ tr ' ' '
+' < "$tmpdepfile" \
+ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \
+ >> $depfile
+ else
+ # The sourcefile does not contain any dependencies, so just
+ # store a dummy comment line, to avoid errors with the Makefile
+ # "include basename.Plo" scheme.
+ echo "#dummy" > "$depfile"
+ fi
+ rm -f "$tmpdepfile"
+ ;;
+
+aix)
+ # The C for AIX Compiler uses -M and outputs the dependencies
+ # in a .u file. In older versions, this file always lives in the
+ # current directory. Also, the AIX compiler puts `$object:' at the
+ # start of each line; $object doesn't have directory information.
+ # Version 6 uses the directory in both cases.
+ stripped=`echo "$object" | sed 's/\(.*\)\..*$/\1/'`
+ tmpdepfile="$stripped.u"
+ if test "$libtool" = yes; then
+ "$@" -Wc,-M
+ else
+ "$@" -M
+ fi
+ stat=$?
+
+ if test -f "$tmpdepfile"; then :
+ else
+ stripped=`echo "$stripped" | sed 's,^.*/,,'`
+ tmpdepfile="$stripped.u"
+ fi
+
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+
+ if test -f "$tmpdepfile"; then
+ outname="$stripped.o"
+ # Each line is of the form `foo.o: dependent.h'.
+ # Do two passes, one to just change these to
+ # `$object: dependent.h' and one to simply `dependent.h:'.
+ sed -e "s,^$outname:,$object :," < "$tmpdepfile" > "$depfile"
+ sed -e "s,^$outname: \(.*\)$,\1:," < "$tmpdepfile" >> "$depfile"
+ else
+ # The sourcefile does not contain any dependencies, so just
+ # store a dummy comment line, to avoid errors with the Makefile
+ # "include basename.Plo" scheme.
+ echo "#dummy" > "$depfile"
+ fi
+ rm -f "$tmpdepfile"
+ ;;
+
+icc)
+ # Intel's C compiler understands `-MD -MF file'. However on
+ # icc -MD -MF foo.d -c -o sub/foo.o sub/foo.c
+ # ICC 7.0 will fill foo.d with something like
+ # foo.o: sub/foo.c
+ # foo.o: sub/foo.h
+ # which is wrong. We want:
+ # sub/foo.o: sub/foo.c
+ # sub/foo.o: sub/foo.h
+ # sub/foo.c:
+ # sub/foo.h:
+ # ICC 7.1 will output
+ # foo.o: sub/foo.c sub/foo.h
+ # and will wrap long lines using \ :
+ # foo.o: sub/foo.c ... \
+ # sub/foo.h ... \
+ # ...
+
+ "$@" -MD -MF "$tmpdepfile"
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ rm -f "$depfile"
+ # Each line is of the form `foo.o: dependent.h',
+ # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'.
+ # Do two passes, one to just change these to
+ # `$object: dependent.h' and one to simply `dependent.h:'.
+ sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile"
+ # Some versions of the HPUX 10.20 sed can't process this invocation
+ # correctly. Breaking it into two sed invocations is a workaround.
+ sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" |
+ sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+tru64)
+ # The Tru64 compiler uses -MD to generate dependencies as a side
+ # effect. `cc -MD -o foo.o ...' puts the dependencies into `foo.o.d'.
+ # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put
+ # dependencies in `foo.d' instead, so we check for that too.
+ # Subdirectories are respected.
+ dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
+ test "x$dir" = "x$object" && dir=
+ base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'`
+
+ if test "$libtool" = yes; then
+ # With Tru64 cc, shared objects can also be used to make a
+ # static library. This mecanism is used in libtool 1.4 series to
+ # handle both shared and static libraries in a single compilation.
+ # With libtool 1.4, dependencies were output in $dir.libs/$base.lo.d.
+ #
+ # With libtool 1.5 this exception was removed, and libtool now
+ # generates 2 separate objects for the 2 libraries. These two
+ # compilations output dependencies in in $dir.libs/$base.o.d and
+ # in $dir$base.o.d. We have to check for both files, because
+ # one of the two compilations can be disabled. We should prefer
+ # $dir$base.o.d over $dir.libs/$base.o.d because the latter is
+ # automatically cleaned when .libs/ is deleted, while ignoring
+ # the former would cause a distcleancheck panic.
+ tmpdepfile1=$dir.libs/$base.lo.d # libtool 1.4
+ tmpdepfile2=$dir$base.o.d # libtool 1.5
+ tmpdepfile3=$dir.libs/$base.o.d # libtool 1.5
+ tmpdepfile4=$dir.libs/$base.d # Compaq CCC V6.2-504
+ "$@" -Wc,-MD
+ else
+ tmpdepfile1=$dir$base.o.d
+ tmpdepfile2=$dir$base.d
+ tmpdepfile3=$dir$base.d
+ tmpdepfile4=$dir$base.d
+ "$@" -MD
+ fi
+
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4"
+ exit $stat
+ fi
+
+ for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4"
+ do
+ test -f "$tmpdepfile" && break
+ done
+ if test -f "$tmpdepfile"; then
+ sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile"
+ # That's a tab and a space in the [].
+ sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile"
+ else
+ echo "#dummy" > "$depfile"
+ fi
+ rm -f "$tmpdepfile"
+ ;;
+
+#nosideeffect)
+ # This comment above is used by automake to tell side-effect
+ # dependency tracking mechanisms from slower ones.
+
+dashmstdout)
+ # Important note: in order to support this mode, a compiler *must*
+ # always write the preprocessed file to stdout, regardless of -o.
+ "$@" || exit $?
+
+ # Remove the call to Libtool.
+ if test "$libtool" = yes; then
+ while test $1 != '--mode=compile'; do
+ shift
+ done
+ shift
+ fi
+
+ # Remove `-o $object'.
+ IFS=" "
+ for arg
+ do
+ case $arg in
+ -o)
+ shift
+ ;;
+ $object)
+ shift
+ ;;
+ *)
+ set fnord "$@" "$arg"
+ shift # fnord
+ shift # $arg
+ ;;
+ esac
+ done
+
+ test -z "$dashmflag" && dashmflag=-M
+ # Require at least two characters before searching for `:'
+ # in the target name. This is to cope with DOS-style filenames:
+ # a dependency such as `c:/foo/bar' could be seen as target `c' otherwise.
+ "$@" $dashmflag |
+ sed 's:^[ ]*[^: ][^:][^:]*\:[ ]*:'"$object"'\: :' > "$tmpdepfile"
+ rm -f "$depfile"
+ cat < "$tmpdepfile" > "$depfile"
+ tr ' ' '
+' < "$tmpdepfile" | \
+## Some versions of the HPUX 10.20 sed can't process this invocation
+## correctly. Breaking it into two sed invocations is a workaround.
+ sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+dashXmstdout)
+ # This case only exists to satisfy depend.m4. It is never actually
+ # run, as this mode is specially recognized in the preamble.
+ exit 1
+ ;;
+
+makedepend)
+ "$@" || exit $?
+ # Remove any Libtool call
+ if test "$libtool" = yes; then
+ while test $1 != '--mode=compile'; do
+ shift
+ done
+ shift
+ fi
+ # X makedepend
+ shift
+ cleared=no
+ for arg in "$@"; do
+ case $cleared in
+ no)
+ set ""; shift
+ cleared=yes ;;
+ esac
+ case "$arg" in
+ -D*|-I*)
+ set fnord "$@" "$arg"; shift ;;
+ # Strip any option that makedepend may not understand. Remove
+ # the object too, otherwise makedepend will parse it as a source file.
+ -*|$object)
+ ;;
+ *)
+ set fnord "$@" "$arg"; shift ;;
+ esac
+ done
+ obj_suffix="`echo $object | sed 's/^.*\././'`"
+ touch "$tmpdepfile"
+ ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@"
+ rm -f "$depfile"
+ cat < "$tmpdepfile" > "$depfile"
+ sed '1,2d' "$tmpdepfile" | tr ' ' '
+' | \
+## Some versions of the HPUX 10.20 sed can't process this invocation
+## correctly. Breaking it into two sed invocations is a workaround.
+ sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile" "$tmpdepfile".bak
+ ;;
+
+cpp)
+ # Important note: in order to support this mode, a compiler *must*
+ # always write the preprocessed file to stdout.
+ "$@" || exit $?
+
+ # Remove the call to Libtool.
+ if test "$libtool" = yes; then
+ while test $1 != '--mode=compile'; do
+ shift
+ done
+ shift
+ fi
+
+ # Remove `-o $object'.
+ IFS=" "
+ for arg
+ do
+ case $arg in
+ -o)
+ shift
+ ;;
+ $object)
+ shift
+ ;;
+ *)
+ set fnord "$@" "$arg"
+ shift # fnord
+ shift # $arg
+ ;;
+ esac
+ done
+
+ "$@" -E |
+ sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \
+ -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' |
+ sed '$ s: \\$::' > "$tmpdepfile"
+ rm -f "$depfile"
+ echo "$object : \\" > "$depfile"
+ cat < "$tmpdepfile" >> "$depfile"
+ sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+msvisualcpp)
+ # Important note: in order to support this mode, a compiler *must*
+ # always write the preprocessed file to stdout, regardless of -o,
+ # because we must use -o when running libtool.
+ "$@" || exit $?
+ IFS=" "
+ for arg
+ do
+ case "$arg" in
+ "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI")
+ set fnord "$@"
+ shift
+ shift
+ ;;
+ *)
+ set fnord "$@" "$arg"
+ shift
+ shift
+ ;;
+ esac
+ done
+ "$@" -E |
+ sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::echo "`cygpath -u \\"\1\\"`":p' | sort | uniq > "$tmpdepfile"
+ rm -f "$depfile"
+ echo "$object : \\" > "$depfile"
+ . "$tmpdepfile" | sed 's% %\\ %g' | sed -n '/^\(.*\)$/ s:: \1 \\:p' >> "$depfile"
+ echo " " >> "$depfile"
+ . "$tmpdepfile" | sed 's% %\\ %g' | sed -n '/^\(.*\)$/ s::\1\::p' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+none)
+ exec "$@"
+ ;;
+
+*)
+ echo "Unknown depmode $depmode" 1>&2
+ exit 1
+ ;;
+esac
+
+exit 0
+
+# Local Variables:
+# mode: shell-script
+# sh-indentation: 2
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-end: "$"
+# End:
diff --git a/install-sh b/install-sh
new file mode 100755
index 0000000..4d4a951
--- /dev/null
+++ b/install-sh
@@ -0,0 +1,323 @@
+#!/bin/sh
+# install - install a program, script, or datafile
+
+scriptversion=2005-05-14.22
+
+# This originates from X11R5 (mit/util/scripts/install.sh), which was
+# later released in X11R6 (xc/config/util/install.sh) with the
+# following copyright and license.
+#
+# Copyright (C) 1994 X Consortium
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC-
+# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+# Except as contained in this notice, the name of the X Consortium shall not
+# be used in advertising or otherwise to promote the sale, use or other deal-
+# ings in this Software without prior written authorization from the X Consor-
+# tium.
+#
+#
+# FSF changes to this file are in the public domain.
+#
+# Calling this script install-sh is preferred over install.sh, to prevent
+# `make' implicit rules from creating a file called install from it
+# when there is no Makefile.
+#
+# This script is compatible with the BSD install script, but was written
+# from scratch. It can only install one file at a time, a restriction
+# shared with many OS's install programs.
+
+# set DOITPROG to echo to test this script
+
+# Don't use :- since 4.3BSD and earlier shells don't like it.
+doit="${DOITPROG-}"
+
+# put in absolute paths if you don't have them in your path; or use env. vars.
+
+mvprog="${MVPROG-mv}"
+cpprog="${CPPROG-cp}"
+chmodprog="${CHMODPROG-chmod}"
+chownprog="${CHOWNPROG-chown}"
+chgrpprog="${CHGRPPROG-chgrp}"
+stripprog="${STRIPPROG-strip}"
+rmprog="${RMPROG-rm}"
+mkdirprog="${MKDIRPROG-mkdir}"
+
+chmodcmd="$chmodprog 0755"
+chowncmd=
+chgrpcmd=
+stripcmd=
+rmcmd="$rmprog -f"
+mvcmd="$mvprog"
+src=
+dst=
+dir_arg=
+dstarg=
+no_target_directory=
+
+usage="Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE
+ or: $0 [OPTION]... SRCFILES... DIRECTORY
+ or: $0 [OPTION]... -t DIRECTORY SRCFILES...
+ or: $0 [OPTION]... -d DIRECTORIES...
+
+In the 1st form, copy SRCFILE to DSTFILE.
+In the 2nd and 3rd, copy all SRCFILES to DIRECTORY.
+In the 4th, create DIRECTORIES.
+
+Options:
+-c (ignored)
+-d create directories instead of installing files.
+-g GROUP $chgrpprog installed files to GROUP.
+-m MODE $chmodprog installed files to MODE.
+-o USER $chownprog installed files to USER.
+-s $stripprog installed files.
+-t DIRECTORY install into DIRECTORY.
+-T report an error if DSTFILE is a directory.
+--help display this help and exit.
+--version display version info and exit.
+
+Environment variables override the default commands:
+ CHGRPPROG CHMODPROG CHOWNPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG
+"
+
+while test -n "$1"; do
+ case $1 in
+ -c) shift
+ continue;;
+
+ -d) dir_arg=true
+ shift
+ continue;;
+
+ -g) chgrpcmd="$chgrpprog $2"
+ shift
+ shift
+ continue;;
+
+ --help) echo "$usage"; exit $?;;
+
+ -m) chmodcmd="$chmodprog $2"
+ shift
+ shift
+ continue;;
+
+ -o) chowncmd="$chownprog $2"
+ shift
+ shift
+ continue;;
+
+ -s) stripcmd=$stripprog
+ shift
+ continue;;
+
+ -t) dstarg=$2
+ shift
+ shift
+ continue;;
+
+ -T) no_target_directory=true
+ shift
+ continue;;
+
+ --version) echo "$0 $scriptversion"; exit $?;;
+
+ *) # When -d is used, all remaining arguments are directories to create.
+ # When -t is used, the destination is already specified.
+ test -n "$dir_arg$dstarg" && break
+ # Otherwise, the last argument is the destination. Remove it from $@.
+ for arg
+ do
+ if test -n "$dstarg"; then
+ # $@ is not empty: it contains at least $arg.
+ set fnord "$@" "$dstarg"
+ shift # fnord
+ fi
+ shift # arg
+ dstarg=$arg
+ done
+ break;;
+ esac
+done
+
+if test -z "$1"; then
+ if test -z "$dir_arg"; then
+ echo "$0: no input file specified." >&2
+ exit 1
+ fi
+ # It's OK to call `install-sh -d' without argument.
+ # This can happen when creating conditional directories.
+ exit 0
+fi
+
+for src
+do
+ # Protect names starting with `-'.
+ case $src in
+ -*) src=./$src ;;
+ esac
+
+ if test -n "$dir_arg"; then
+ dst=$src
+ src=
+
+ if test -d "$dst"; then
+ mkdircmd=:
+ chmodcmd=
+ else
+ mkdircmd=$mkdirprog
+ fi
+ else
+ # Waiting for this to be detected by the "$cpprog $src $dsttmp" command
+ # might cause directories to be created, which would be especially bad
+ # if $src (and thus $dsttmp) contains '*'.
+ if test ! -f "$src" && test ! -d "$src"; then
+ echo "$0: $src does not exist." >&2
+ exit 1
+ fi
+
+ if test -z "$dstarg"; then
+ echo "$0: no destination specified." >&2
+ exit 1
+ fi
+
+ dst=$dstarg
+ # Protect names starting with `-'.
+ case $dst in
+ -*) dst=./$dst ;;
+ esac
+
+ # If destination is a directory, append the input filename; won't work
+ # if double slashes aren't ignored.
+ if test -d "$dst"; then
+ if test -n "$no_target_directory"; then
+ echo "$0: $dstarg: Is a directory" >&2
+ exit 1
+ fi
+ dst=$dst/`basename "$src"`
+ fi
+ fi
+
+ # This sed command emulates the dirname command.
+ dstdir=`echo "$dst" | sed -e 's,/*$,,;s,[^/]*$,,;s,/*$,,;s,^$,.,'`
+
+ # Make sure that the destination directory exists.
+
+ # Skip lots of stat calls in the usual case.
+ if test ! -d "$dstdir"; then
+ defaultIFS='
+ '
+ IFS="${IFS-$defaultIFS}"
+
+ oIFS=$IFS
+ # Some sh's can't handle IFS=/ for some reason.
+ IFS='%'
+ set x `echo "$dstdir" | sed -e 's@/@%@g' -e 's@^%@/@'`
+ shift
+ IFS=$oIFS
+
+ pathcomp=
+
+ while test $# -ne 0 ; do
+ pathcomp=$pathcomp$1
+ shift
+ if test ! -d "$pathcomp"; then
+ $mkdirprog "$pathcomp"
+ # mkdir can fail with a `File exist' error in case several
+ # install-sh are creating the directory concurrently. This
+ # is OK.
+ test -d "$pathcomp" || exit
+ fi
+ pathcomp=$pathcomp/
+ done
+ fi
+
+ if test -n "$dir_arg"; then
+ $doit $mkdircmd "$dst" \
+ && { test -z "$chowncmd" || $doit $chowncmd "$dst"; } \
+ && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } \
+ && { test -z "$stripcmd" || $doit $stripcmd "$dst"; } \
+ && { test -z "$chmodcmd" || $doit $chmodcmd "$dst"; }
+
+ else
+ dstfile=`basename "$dst"`
+
+ # Make a couple of temp file names in the proper directory.
+ dsttmp=$dstdir/_inst.$$_
+ rmtmp=$dstdir/_rm.$$_
+
+ # Trap to clean up those temp files at exit.
+ trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0
+ trap '(exit $?); exit' 1 2 13 15
+
+ # Copy the file name to the temp name.
+ $doit $cpprog "$src" "$dsttmp" &&
+
+ # and set any options; do chmod last to preserve setuid bits.
+ #
+ # If any of these fail, we abort the whole thing. If we want to
+ # ignore errors from any of these, just make sure not to ignore
+ # errors from the above "$doit $cpprog $src $dsttmp" command.
+ #
+ { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } \
+ && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } \
+ && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } \
+ && { test -z "$chmodcmd" || $doit $chmodcmd "$dsttmp"; } &&
+
+ # Now rename the file to the real destination.
+ { $doit $mvcmd -f "$dsttmp" "$dstdir/$dstfile" 2>/dev/null \
+ || {
+ # The rename failed, perhaps because mv can't rename something else
+ # to itself, or perhaps because mv is so ancient that it does not
+ # support -f.
+
+ # Now remove or move aside any old file at destination location.
+ # We try this two ways since rm can't unlink itself on some
+ # systems and the destination file might be busy for other
+ # reasons. In this case, the final cleanup might fail but the new
+ # file should still install successfully.
+ {
+ if test -f "$dstdir/$dstfile"; then
+ $doit $rmcmd -f "$dstdir/$dstfile" 2>/dev/null \
+ || $doit $mvcmd -f "$dstdir/$dstfile" "$rmtmp" 2>/dev/null \
+ || {
+ echo "$0: cannot unlink or rename $dstdir/$dstfile" >&2
+ (exit 1); exit 1
+ }
+ else
+ :
+ fi
+ } &&
+
+ # Now rename the file to the real destination.
+ $doit $mvcmd "$dsttmp" "$dstdir/$dstfile"
+ }
+ }
+ fi || { (exit 1); exit 1; }
+done
+
+# The final little trick to "correctly" pass the exit status to the exit trap.
+{
+ (exit 0); exit 0
+}
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-end: "$"
+# End:
diff --git a/issue_tracking.md b/issue_tracking.md
new file mode 100644
index 0000000..99279e5
--- /dev/null
+++ b/issue_tracking.md
@@ -0,0 +1,83 @@
+* In `RefGenome.cpp`, the ctor is better to use initializer list rather than
+assignment in the function body, following best practices. In fact, considering the issues raise with these raw pointers, it may be better to use `std::shared_ptr<faidx_t>` and call `get()` whenever the pointer is requested.
+ The proposed temporary fix is (comments are removed because the codes are obvious and
+ excessive comments may become maintenance nightmare):
+
+ RefGenome::RefGenome(const std::string& file ): index(nullptr) {
+
+ if (!read_access_test(file)) {
+ index = nullptr;
+ throw std::invalid_argument("RefGenome: file not found - " + file);
+ }
+
+ index = fai_load(file.c_str());
+ }
+
+* In `RefGenome::queryRegion`, the automatic variable `int len` was left uninitialized, while requested by `faidx_fetch_seq` defined in `htslib`.
+ Proposal:
+
+ int len{0};
+
+* In `RefGenome:queryRegion`, the check for returned `char *` is throwing, and the data member `index` is non-null (because the check precedes this one passes).
+This leads to resource leak.
+ Proposal:
+
+ if (!f){
+ index = nullptr; // added line for exception safety
+ throw std::invalid_argument("RefGenome::queryRegion - Could not find valid sequence");
+ }
+
+* In `SnowUtils.h`, `SnowTools::AddCommas` has an issue that if template parameter is floating point, format is off. The proposal is to change the name of the function to `SnowTools::AddCommasToInt`, to avoid applying the function to floating point types.
+
+* In `GenomicRegion.h`, the `friend` declaration at the beginning is unnecessary because right now there isn't any private data.
+
+* In `GenomicRegion.h`, why ctor `GenomicRegion(int32_t, int32_t, int32_t, char)` doesn't check for positivity of chromosome range, and start, end?
+ In addition, why aren't the members `chr`, `pos1`, `pos2` unsigned and private?
+
+* In `GenomicRegion.h`, ctor
+ `GenomicRegion(const std::string&, const std::string&, const std::string&, bam_hdr_t*)`, why subtract 1 from the string converted chromosome number?
+ Is it better to have an `else` block than early return statement?
+
+* In `GenomicRegion.h`, for function `GenomicRegion::random`, the bound value of 25 in the `for` loop is better to be replaced with a macro, as the tool itself is not necessarily bound to human studies.
+ In addition, why an assertion that `k>0`? This immediately fails when `k` starts from 0, as the `for` loop does.
+
+* In `GenomicRegion.h`, why is function `GenomicRegion::chrToString` marked `static`? Is it possible to have an utility function extracted and put it `SnowUtils.h`? Again, the implementation assumes human.
+
+* In `GenomicRegion.h`, why does function `GenomicRegion::isEmpty` insist on `chr=-1`. Is it possible that `chr != 1`, but `pos1 == pos2`, so that it is actually empty?
+ Is empty `GenomicRegion` always invalid (because validity check as implemented in `GenomicRegion::valid()`, which by the way should be renamed as `is_valid`, checks for positivity of chromosome)?
+
+* In `GenomicRegion.h`, function `GenomicRegion::cetromereOverlap` is declared but not defined.
+
+* In `GenomicRegion.h` and `GenomicRegion.cpp`, function `GenomicRegion::getOverlap` has a typo: it should be taking references instead of a copy.
+
+* In `GenomicRegion.cpp`, the check in function `GenomicRegion::pad` is possibly wrong. If the argument provided is indeed negative, then multiplying by 2 is unnecessary and comparing with width is also unnecessary. Second, it is actually better to change the function parameter from `int32_t` to `uint32_t` because negative padding is counter-intuitive. Third, the check didn't check for the case that padding to the left/right goes out of range (actually commented out).
+
+* In `Fractions.cpp`, the `getline` function in the `while` loop shouldn't use "\n" as the delimiter, as it will be different on other OS (Windows for sure).
+ There also is no member function declared as `bool valid() const` for `FracRegion`, but is used here.
+ Last, it is better to have a `stringSplit` function defined in `SnowUtils.h` for breaking lines of delimited files (eg. CSV, tab) into fields.
+
+* In `gChain.h`, why isn't default ctor deleted? It left data members uninitialized.
+
+* In `gChain.cpp`, the function definitions are not wrapped in `namespace SnowTools`.
+
+* In `BamStats.h`, not ctors defined although there are data members.
+ Proposal:
+
+ BamStats() : m_group_map(){}
+
+* In `BamStats.cpp`, function `BamReadGroup::addRead` should take a `const` reference instead of reference to signal the read is not being modified.
+ Similarly for `BamStats::addRead`.
+
+*
+
+*
+
+*
+
+*
+
+*
+
+*
+
+* In `run_snowman2.cpp` in the `SnowmanSV` repo, line 25 `#define MATE_LOOKUP_MIN 3` and line 146 `static int32_t mate_lookup_min = 3;` potentially conflicts.
diff --git a/json/json-forwards.h b/json/json-forwards.h
new file mode 100644
index 0000000..a4807ae
--- /dev/null
+++ b/json/json-forwards.h
@@ -0,0 +1,321 @@
+/// Json-cpp amalgated forward header (http://jsoncpp.sourceforge.net/).
+/// It is intended to be used with #include "json/json-forwards.h"
+/// This header provides forward declaration for all JsonCpp types.
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: LICENSE
+// //////////////////////////////////////////////////////////////////////
+
+/*
+The JsonCpp library's source code, including accompanying documentation,
+tests and demonstration applications, are licensed under the following
+conditions...
+
+The author (Baptiste Lepilleur) explicitly disclaims copyright in all
+jurisdictions which recognize such a disclaimer. In such jurisdictions,
+this software is released into the Public Domain.
+
+In jurisdictions which do not recognize Public Domain property (e.g. Germany as of
+2010), this software is Copyright (c) 2007-2010 by Baptiste Lepilleur, and is
+released under the terms of the MIT License (see below).
+
+In jurisdictions which recognize Public Domain property, the user of this
+software may choose to accept it either as 1) Public Domain, 2) under the
+conditions of the MIT License (see below), or 3) under the terms of dual
+Public Domain/MIT License conditions described here, as they choose.
+
+The MIT License is about as close to Public Domain as a license can get, and is
+described in clear, concise terms at:
+
+ http://en.wikipedia.org/wiki/MIT_License
+
+The full text of the MIT License follows:
+
+========================================================================
+Copyright (c) 2007-2010 Baptiste Lepilleur
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use, copy,
+modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+========================================================================
+(END LICENSE TEXT)
+
+The MIT license is compatible with both the GPL and commercial
+software, affording one all of the rights of Public Domain with the
+minor nuisance of being required to keep the above copyright notice
+and license text in the source code. Note also that by accepting the
+Public Domain "license" you can re-license your copy using whatever
+license you like.
+
+*/
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: LICENSE
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+#ifndef JSON_FORWARD_AMALGATED_H_INCLUDED
+# define JSON_FORWARD_AMALGATED_H_INCLUDED
+/// If defined, indicates that the source file is amalgated
+/// to prevent private header inclusion.
+#define JSON_IS_AMALGAMATION
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: include/json/config.h
+// //////////////////////////////////////////////////////////////////////
+
+// Copyright 2007-2010 Baptiste Lepilleur
+// Distributed under MIT license, or public domain if desired and
+// recognized in your jurisdiction.
+// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+#ifndef JSON_CONFIG_H_INCLUDED
+#define JSON_CONFIG_H_INCLUDED
+#include <stddef.h>
+#include <string> //typdef String
+
+/// If defined, indicates that json library is embedded in CppTL library.
+//# define JSON_IN_CPPTL 1
+
+/// If defined, indicates that json may leverage CppTL library
+//# define JSON_USE_CPPTL 1
+/// If defined, indicates that cpptl vector based map should be used instead of
+/// std::map
+/// as Value container.
+//# define JSON_USE_CPPTL_SMALLMAP 1
+
+// If non-zero, the library uses exceptions to report bad input instead of C
+// assertion macros. The default is to use exceptions.
+#ifndef JSON_USE_EXCEPTION
+#define JSON_USE_EXCEPTION 1
+#endif
+
+/// If defined, indicates that the source file is amalgated
+/// to prevent private header inclusion.
+/// Remarks: it is automatically defined in the generated amalgated header.
+// #define JSON_IS_AMALGAMATION
+
+#ifdef JSON_IN_CPPTL
+#include <cpptl/config.h>
+#ifndef JSON_USE_CPPTL
+#define JSON_USE_CPPTL 1
+#endif
+#endif
+
+#ifdef JSON_IN_CPPTL
+#define JSON_API CPPTL_API
+#elif defined(JSON_DLL_BUILD)
+#if defined(_MSC_VER) || defined(__MINGW32__)
+#define JSON_API __declspec(dllexport)
+#define JSONCPP_DISABLE_DLL_INTERFACE_WARNING
+#endif // if defined(_MSC_VER)
+#elif defined(JSON_DLL)
+#if defined(_MSC_VER) || defined(__MINGW32__)
+#define JSON_API __declspec(dllimport)
+#define JSONCPP_DISABLE_DLL_INTERFACE_WARNING
+#endif // if defined(_MSC_VER)
+#endif // ifdef JSON_IN_CPPTL
+#if !defined(JSON_API)
+#define JSON_API
+#endif
+
+// If JSON_NO_INT64 is defined, then Json only support C++ "int" type for
+// integer
+// Storages, and 64 bits integer support is disabled.
+// #define JSON_NO_INT64 1
+
+#if defined(_MSC_VER) // MSVC
+# if _MSC_VER <= 1200 // MSVC 6
+ // Microsoft Visual Studio 6 only support conversion from __int64 to double
+ // (no conversion from unsigned __int64).
+# define JSON_USE_INT64_DOUBLE_CONVERSION 1
+ // Disable warning 4786 for VS6 caused by STL (identifier was truncated to '255'
+ // characters in the debug information)
+ // All projects I've ever seen with VS6 were using this globally (not bothering
+ // with pragma push/pop).
+# pragma warning(disable : 4786)
+# endif // MSVC 6
+
+# if _MSC_VER >= 1500 // MSVC 2008
+ /// Indicates that the following function is deprecated.
+# define JSONCPP_DEPRECATED(message) __declspec(deprecated(message))
+# endif
+
+#endif // defined(_MSC_VER)
+
+// In c++11 the override keyword allows you to explicity define that a function
+// is intended to override the base-class version. This makes the code more
+// managable and fixes a set of common hard-to-find bugs.
+#if __cplusplus >= 201103L
+# define JSONCPP_OVERRIDE override
+#else
+# define JSONCPP_OVERRIDE
+#endif
+
+#ifndef JSON_HAS_RVALUE_REFERENCES
+
+#if defined(_MSC_VER) && _MSC_VER >= 1600 // MSVC >= 2010
+#define JSON_HAS_RVALUE_REFERENCES 1
+#endif // MSVC >= 2010
+
+#ifdef __clang__
+#if __has_feature(cxx_rvalue_references)
+#define JSON_HAS_RVALUE_REFERENCES 1
+#endif // has_feature
+
+#elif defined __GNUC__ // not clang (gcc comes later since clang emulates gcc)
+#if defined(__GXX_EXPERIMENTAL_CXX0X__) || (__cplusplus >= 201103L)
+#define JSON_HAS_RVALUE_REFERENCES 1
+#endif // GXX_EXPERIMENTAL
+
+#endif // __clang__ || __GNUC__
+
+#endif // not defined JSON_HAS_RVALUE_REFERENCES
+
+#ifndef JSON_HAS_RVALUE_REFERENCES
+#define JSON_HAS_RVALUE_REFERENCES 0
+#endif
+
+#ifdef __clang__
+#elif defined __GNUC__ // not clang (gcc comes later since clang emulates gcc)
+# if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
+# define JSONCPP_DEPRECATED(message) __attribute__ ((deprecated(message)))
+# elif (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
+# define JSONCPP_DEPRECATED(message) __attribute__((__deprecated__))
+# endif // GNUC version
+#endif // __clang__ || __GNUC__
+
+#if !defined(JSONCPP_DEPRECATED)
+#define JSONCPP_DEPRECATED(message)
+#endif // if !defined(JSONCPP_DEPRECATED)
+
+#if __GNUC__ >= 6
+# define JSON_USE_INT64_DOUBLE_CONVERSION 1
+#endif
+
+#if !defined(JSON_IS_AMALGAMATION)
+
+# include "version.h"
+
+# if JSONCPP_USING_SECURE_MEMORY
+# include "allocator.h" //typedef Allocator
+# endif
+
+#endif // if !defined(JSON_IS_AMALGAMATION)
+
+namespace Json {
+typedef int Int;
+typedef unsigned int UInt;
+#if defined(JSON_NO_INT64)
+typedef int LargestInt;
+typedef unsigned int LargestUInt;
+#undef JSON_HAS_INT64
+#else // if defined(JSON_NO_INT64)
+// For Microsoft Visual use specific types as long long is not supported
+#if defined(_MSC_VER) // Microsoft Visual Studio
+typedef __int64 Int64;
+typedef unsigned __int64 UInt64;
+#else // if defined(_MSC_VER) // Other platforms, use long long
+typedef long long int Int64;
+typedef unsigned long long int UInt64;
+#endif // if defined(_MSC_VER)
+typedef Int64 LargestInt;
+typedef UInt64 LargestUInt;
+#define JSON_HAS_INT64
+#endif // if defined(JSON_NO_INT64)
+#if JSONCPP_USING_SECURE_MEMORY
+#define JSONCPP_STRING std::basic_string<char, std::char_traits<char>, Json::SecureAllocator<char> >
+#define JSONCPP_OSTRINGSTREAM std::basic_ostringstream<char, std::char_traits<char>, Json::SecureAllocator<char> >
+#define JSONCPP_OSTREAM std::basic_ostream<char, std::char_traits<char>>
+#define JSONCPP_ISTRINGSTREAM std::basic_istringstream<char, std::char_traits<char>, Json::SecureAllocator<char> >
+#define JSONCPP_ISTREAM std::istream
+#else
+#define JSONCPP_STRING std::string
+#define JSONCPP_OSTRINGSTREAM std::ostringstream
+#define JSONCPP_OSTREAM std::ostream
+#define JSONCPP_ISTRINGSTREAM std::istringstream
+#define JSONCPP_ISTREAM std::istream
+#endif // if JSONCPP_USING_SECURE_MEMORY
+} // end namespace Json
+
+#endif // JSON_CONFIG_H_INCLUDED
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: include/json/config.h
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: include/json/forwards.h
+// //////////////////////////////////////////////////////////////////////
+
+// Copyright 2007-2010 Baptiste Lepilleur
+// Distributed under MIT license, or public domain if desired and
+// recognized in your jurisdiction.
+// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+#ifndef JSON_FORWARDS_H_INCLUDED
+#define JSON_FORWARDS_H_INCLUDED
+
+#if !defined(JSON_IS_AMALGAMATION)
+#include "config.h"
+#endif // if !defined(JSON_IS_AMALGAMATION)
+
+namespace Json {
+
+// writer.h
+class FastWriter;
+class StyledWriter;
+
+// reader.h
+class Reader;
+
+// features.h
+class Features;
+
+// value.h
+typedef unsigned int ArrayIndex;
+class StaticString;
+class Path;
+class PathArgument;
+class Value;
+class ValueIteratorBase;
+class ValueIterator;
+class ValueConstIterator;
+
+} // namespace Json
+
+#endif // JSON_FORWARDS_H_INCLUDED
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: include/json/forwards.h
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+#endif //ifndef JSON_FORWARD_AMALGATED_H_INCLUDED
diff --git a/json/json.h b/json/json.h
new file mode 100644
index 0000000..fa4b690
--- /dev/null
+++ b/json/json.h
@@ -0,0 +1,2135 @@
+/// Json-cpp amalgated header (http://jsoncpp.sourceforge.net/).
+/// It is intended to be used with #include "json/json.h"
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: LICENSE
+// //////////////////////////////////////////////////////////////////////
+
+/*
+The JsonCpp library's source code, including accompanying documentation,
+tests and demonstration applications, are licensed under the following
+conditions...
+
+The author (Baptiste Lepilleur) explicitly disclaims copyright in all
+jurisdictions which recognize such a disclaimer. In such jurisdictions,
+this software is released into the Public Domain.
+
+In jurisdictions which do not recognize Public Domain property (e.g. Germany as of
+2010), this software is Copyright (c) 2007-2010 by Baptiste Lepilleur, and is
+released under the terms of the MIT License (see below).
+
+In jurisdictions which recognize Public Domain property, the user of this
+software may choose to accept it either as 1) Public Domain, 2) under the
+conditions of the MIT License (see below), or 3) under the terms of dual
+Public Domain/MIT License conditions described here, as they choose.
+
+The MIT License is about as close to Public Domain as a license can get, and is
+described in clear, concise terms at:
+
+ http://en.wikipedia.org/wiki/MIT_License
+
+The full text of the MIT License follows:
+
+========================================================================
+Copyright (c) 2007-2010 Baptiste Lepilleur
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use, copy,
+modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+========================================================================
+(END LICENSE TEXT)
+
+The MIT license is compatible with both the GPL and commercial
+software, affording one all of the rights of Public Domain with the
+minor nuisance of being required to keep the above copyright notice
+and license text in the source code. Note also that by accepting the
+Public Domain "license" you can re-license your copy using whatever
+license you like.
+
+*/
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: LICENSE
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+#ifndef JSON_AMALGATED_H_INCLUDED
+# define JSON_AMALGATED_H_INCLUDED
+/// If defined, indicates that the source file is amalgated
+/// to prevent private header inclusion.
+#define JSON_IS_AMALGAMATION
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: include/json/version.h
+// //////////////////////////////////////////////////////////////////////
+
+// DO NOT EDIT. This file (and "version") is generated by CMake.
+// Run CMake configure step to update it.
+#ifndef JSON_VERSION_H_INCLUDED
+# define JSON_VERSION_H_INCLUDED
+
+# define JSONCPP_VERSION_STRING "1.7.2"
+# define JSONCPP_VERSION_MAJOR 1
+# define JSONCPP_VERSION_MINOR 7
+# define JSONCPP_VERSION_PATCH 2
+# define JSONCPP_VERSION_QUALIFIER
+# define JSONCPP_VERSION_HEXA ((JSONCPP_VERSION_MAJOR << 24) | (JSONCPP_VERSION_MINOR << 16) | (JSONCPP_VERSION_PATCH << 8))
+
+#ifdef JSONCPP_USING_SECURE_MEMORY
+#undef JSONCPP_USING_SECURE_MEMORY
+#endif
+#define JSONCPP_USING_SECURE_MEMORY 0
+// If non-zero, the library zeroes any memory that it has allocated before
+// it frees its memory.
+
+#endif // JSON_VERSION_H_INCLUDED
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: include/json/version.h
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: include/json/config.h
+// //////////////////////////////////////////////////////////////////////
+
+// Copyright 2007-2010 Baptiste Lepilleur
+// Distributed under MIT license, or public domain if desired and
+// recognized in your jurisdiction.
+// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+#ifndef JSON_CONFIG_H_INCLUDED
+#define JSON_CONFIG_H_INCLUDED
+#include <stddef.h>
+#include <string> //typdef String
+
+/// If defined, indicates that json library is embedded in CppTL library.
+//# define JSON_IN_CPPTL 1
+
+/// If defined, indicates that json may leverage CppTL library
+//# define JSON_USE_CPPTL 1
+/// If defined, indicates that cpptl vector based map should be used instead of
+/// std::map
+/// as Value container.
+//# define JSON_USE_CPPTL_SMALLMAP 1
+
+// If non-zero, the library uses exceptions to report bad input instead of C
+// assertion macros. The default is to use exceptions.
+#ifndef JSON_USE_EXCEPTION
+#define JSON_USE_EXCEPTION 1
+#endif
+
+/// If defined, indicates that the source file is amalgated
+/// to prevent private header inclusion.
+/// Remarks: it is automatically defined in the generated amalgated header.
+// #define JSON_IS_AMALGAMATION
+
+#ifdef JSON_IN_CPPTL
+#include <cpptl/config.h>
+#ifndef JSON_USE_CPPTL
+#define JSON_USE_CPPTL 1
+#endif
+#endif
+
+#ifdef JSON_IN_CPPTL
+#define JSON_API CPPTL_API
+#elif defined(JSON_DLL_BUILD)
+#if defined(_MSC_VER) || defined(__MINGW32__)
+#define JSON_API __declspec(dllexport)
+#define JSONCPP_DISABLE_DLL_INTERFACE_WARNING
+#endif // if defined(_MSC_VER)
+#elif defined(JSON_DLL)
+#if defined(_MSC_VER) || defined(__MINGW32__)
+#define JSON_API __declspec(dllimport)
+#define JSONCPP_DISABLE_DLL_INTERFACE_WARNING
+#endif // if defined(_MSC_VER)
+#endif // ifdef JSON_IN_CPPTL
+#if !defined(JSON_API)
+#define JSON_API
+#endif
+
+// If JSON_NO_INT64 is defined, then Json only support C++ "int" type for
+// integer
+// Storages, and 64 bits integer support is disabled.
+// #define JSON_NO_INT64 1
+
+#if defined(_MSC_VER) // MSVC
+# if _MSC_VER <= 1200 // MSVC 6
+ // Microsoft Visual Studio 6 only support conversion from __int64 to double
+ // (no conversion from unsigned __int64).
+# define JSON_USE_INT64_DOUBLE_CONVERSION 1
+ // Disable warning 4786 for VS6 caused by STL (identifier was truncated to '255'
+ // characters in the debug information)
+ // All projects I've ever seen with VS6 were using this globally (not bothering
+ // with pragma push/pop).
+# pragma warning(disable : 4786)
+# endif // MSVC 6
+
+# if _MSC_VER >= 1500 // MSVC 2008
+ /// Indicates that the following function is deprecated.
+# define JSONCPP_DEPRECATED(message) __declspec(deprecated(message))
+# endif
+
+#endif // defined(_MSC_VER)
+
+// In c++11 the override keyword allows you to explicity define that a function
+// is intended to override the base-class version. This makes the code more
+// managable and fixes a set of common hard-to-find bugs.
+#if __cplusplus >= 201103L
+# define JSONCPP_OVERRIDE override
+#else
+# define JSONCPP_OVERRIDE
+#endif
+
+#ifndef JSON_HAS_RVALUE_REFERENCES
+
+#if defined(_MSC_VER) && _MSC_VER >= 1600 // MSVC >= 2010
+#define JSON_HAS_RVALUE_REFERENCES 1
+#endif // MSVC >= 2010
+
+#ifdef __clang__
+#if __has_feature(cxx_rvalue_references)
+#define JSON_HAS_RVALUE_REFERENCES 1
+#endif // has_feature
+
+#elif defined __GNUC__ // not clang (gcc comes later since clang emulates gcc)
+#if defined(__GXX_EXPERIMENTAL_CXX0X__) || (__cplusplus >= 201103L)
+#define JSON_HAS_RVALUE_REFERENCES 1
+#endif // GXX_EXPERIMENTAL
+
+#endif // __clang__ || __GNUC__
+
+#endif // not defined JSON_HAS_RVALUE_REFERENCES
+
+#ifndef JSON_HAS_RVALUE_REFERENCES
+#define JSON_HAS_RVALUE_REFERENCES 0
+#endif
+
+#ifdef __clang__
+#elif defined __GNUC__ // not clang (gcc comes later since clang emulates gcc)
+# if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
+# define JSONCPP_DEPRECATED(message) __attribute__ ((deprecated(message)))
+# elif (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
+# define JSONCPP_DEPRECATED(message) __attribute__((__deprecated__))
+# endif // GNUC version
+#endif // __clang__ || __GNUC__
+
+#if !defined(JSONCPP_DEPRECATED)
+#define JSONCPP_DEPRECATED(message)
+#endif // if !defined(JSONCPP_DEPRECATED)
+
+#if __GNUC__ >= 6
+# define JSON_USE_INT64_DOUBLE_CONVERSION 1
+#endif
+
+#if !defined(JSON_IS_AMALGAMATION)
+
+# include "version.h"
+
+# if JSONCPP_USING_SECURE_MEMORY
+# include "allocator.h" //typedef Allocator
+# endif
+
+#endif // if !defined(JSON_IS_AMALGAMATION)
+
+namespace Json {
+typedef int Int;
+typedef unsigned int UInt;
+#if defined(JSON_NO_INT64)
+typedef int LargestInt;
+typedef unsigned int LargestUInt;
+#undef JSON_HAS_INT64
+#else // if defined(JSON_NO_INT64)
+// For Microsoft Visual use specific types as long long is not supported
+#if defined(_MSC_VER) // Microsoft Visual Studio
+typedef __int64 Int64;
+typedef unsigned __int64 UInt64;
+#else // if defined(_MSC_VER) // Other platforms, use long long
+typedef long long int Int64;
+typedef unsigned long long int UInt64;
+#endif // if defined(_MSC_VER)
+typedef Int64 LargestInt;
+typedef UInt64 LargestUInt;
+#define JSON_HAS_INT64
+#endif // if defined(JSON_NO_INT64)
+#if JSONCPP_USING_SECURE_MEMORY
+#define JSONCPP_STRING std::basic_string<char, std::char_traits<char>, Json::SecureAllocator<char> >
+#define JSONCPP_OSTRINGSTREAM std::basic_ostringstream<char, std::char_traits<char>, Json::SecureAllocator<char> >
+#define JSONCPP_OSTREAM std::basic_ostream<char, std::char_traits<char>>
+#define JSONCPP_ISTRINGSTREAM std::basic_istringstream<char, std::char_traits<char>, Json::SecureAllocator<char> >
+#define JSONCPP_ISTREAM std::istream
+#else
+#define JSONCPP_STRING std::string
+#define JSONCPP_OSTRINGSTREAM std::ostringstream
+#define JSONCPP_OSTREAM std::ostream
+#define JSONCPP_ISTRINGSTREAM std::istringstream
+#define JSONCPP_ISTREAM std::istream
+#endif // if JSONCPP_USING_SECURE_MEMORY
+} // end namespace Json
+
+#endif // JSON_CONFIG_H_INCLUDED
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: include/json/config.h
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: include/json/forwards.h
+// //////////////////////////////////////////////////////////////////////
+
+// Copyright 2007-2010 Baptiste Lepilleur
+// Distributed under MIT license, or public domain if desired and
+// recognized in your jurisdiction.
+// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+#ifndef JSON_FORWARDS_H_INCLUDED
+#define JSON_FORWARDS_H_INCLUDED
+
+#if !defined(JSON_IS_AMALGAMATION)
+#include "config.h"
+#endif // if !defined(JSON_IS_AMALGAMATION)
+
+namespace Json {
+
+// writer.h
+class FastWriter;
+class StyledWriter;
+
+// reader.h
+class Reader;
+
+// features.h
+class Features;
+
+// value.h
+typedef unsigned int ArrayIndex;
+class StaticString;
+class Path;
+class PathArgument;
+class Value;
+class ValueIteratorBase;
+class ValueIterator;
+class ValueConstIterator;
+
+} // namespace Json
+
+#endif // JSON_FORWARDS_H_INCLUDED
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: include/json/forwards.h
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: include/json/features.h
+// //////////////////////////////////////////////////////////////////////
+
+// Copyright 2007-2010 Baptiste Lepilleur
+// Distributed under MIT license, or public domain if desired and
+// recognized in your jurisdiction.
+// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+#ifndef CPPTL_JSON_FEATURES_H_INCLUDED
+#define CPPTL_JSON_FEATURES_H_INCLUDED
+
+#if !defined(JSON_IS_AMALGAMATION)
+#include "forwards.h"
+#endif // if !defined(JSON_IS_AMALGAMATION)
+
+namespace Json {
+
+/** \brief Configuration passed to reader and writer.
+ * This configuration object can be used to force the Reader or Writer
+ * to behave in a standard conforming way.
+ */
+class JSON_API Features {
+public:
+ /** \brief A configuration that allows all features and assumes all strings
+ * are UTF-8.
+ * - C & C++ comments are allowed
+ * - Root object can be any JSON value
+ * - Assumes Value strings are encoded in UTF-8
+ */
+ static Features all();
+
+ /** \brief A configuration that is strictly compatible with the JSON
+ * specification.
+ * - Comments are forbidden.
+ * - Root object must be either an array or an object value.
+ * - Assumes Value strings are encoded in UTF-8
+ */
+ static Features strictMode();
+
+ /** \brief Initialize the configuration like JsonConfig::allFeatures;
+ */
+ Features();
+
+ /// \c true if comments are allowed. Default: \c true.
+ bool allowComments_;
+
+ /// \c true if root must be either an array or an object value. Default: \c
+ /// false.
+ bool strictRoot_;
+
+ /// \c true if dropped null placeholders are allowed. Default: \c false.
+ bool allowDroppedNullPlaceholders_;
+
+ /// \c true if numeric object key are allowed. Default: \c false.
+ bool allowNumericKeys_;
+};
+
+} // namespace Json
+
+#endif // CPPTL_JSON_FEATURES_H_INCLUDED
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: include/json/features.h
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: include/json/value.h
+// //////////////////////////////////////////////////////////////////////
+
+// Copyright 2007-2010 Baptiste Lepilleur
+// Distributed under MIT license, or public domain if desired and
+// recognized in your jurisdiction.
+// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+#ifndef CPPTL_JSON_H_INCLUDED
+#define CPPTL_JSON_H_INCLUDED
+
+#if !defined(JSON_IS_AMALGAMATION)
+#include "forwards.h"
+#endif // if !defined(JSON_IS_AMALGAMATION)
+#include <string>
+#include <vector>
+#include <exception>
+
+#ifndef JSON_USE_CPPTL_SMALLMAP
+#include <map>
+#else
+#include <cpptl/smallmap.h>
+#endif
+#ifdef JSON_USE_CPPTL
+#include <cpptl/forwards.h>
+#endif
+
+//Conditional NORETURN attribute on the throw functions would:
+// a) suppress false positives from static code analysis
+// b) possibly improve optimization opportunities.
+#if !defined(JSONCPP_NORETURN)
+# if defined(_MSC_VER)
+# define JSONCPP_NORETURN __declspec(noreturn)
+# elif defined(__GNUC__)
+# define JSONCPP_NORETURN __attribute__ ((__noreturn__))
+# else
+# define JSONCPP_NORETURN
+# endif
+#endif
+
+// Disable warning C4251: <data member>: <type> needs to have dll-interface to
+// be used by...
+#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+#pragma warning(push)
+#pragma warning(disable : 4251)
+#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+
+/** \brief JSON (JavaScript Object Notation).
+ */
+namespace Json {
+
+/** Base class for all exceptions we throw.
+ *
+ * We use nothing but these internally. Of course, STL can throw others.
+ */
+class JSON_API Exception : public std::exception {
+public:
+ Exception(JSONCPP_STRING const& msg);
+ ~Exception() throw() JSONCPP_OVERRIDE;
+ char const* what() const throw() JSONCPP_OVERRIDE;
+protected:
+ JSONCPP_STRING msg_;
+};
+
+/** Exceptions which the user cannot easily avoid.
+ *
+ * E.g. out-of-memory (when we use malloc), stack-overflow, malicious input
+ *
+ * \remark derived from Json::Exception
+ */
+class JSON_API RuntimeError : public Exception {
+public:
+ RuntimeError(JSONCPP_STRING const& msg);
+};
+
+/** Exceptions thrown by JSON_ASSERT/JSON_FAIL macros.
+ *
+ * These are precondition-violations (user bugs) and internal errors (our bugs).
+ *
+ * \remark derived from Json::Exception
+ */
+class JSON_API LogicError : public Exception {
+public:
+ LogicError(JSONCPP_STRING const& msg);
+};
+
+/// used internally
+JSONCPP_NORETURN void throwRuntimeError(JSONCPP_STRING const& msg);
+/// used internally
+JSONCPP_NORETURN void throwLogicError(JSONCPP_STRING const& msg);
+
+/** \brief Type of the value held by a Value object.
+ */
+enum ValueType {
+ nullValue = 0, ///< 'null' value
+ intValue, ///< signed integer value
+ uintValue, ///< unsigned integer value
+ realValue, ///< double value
+ stringValue, ///< UTF-8 string value
+ booleanValue, ///< bool value
+ arrayValue, ///< array value (ordered list)
+ objectValue ///< object value (collection of name/value pairs).
+};
+
+enum CommentPlacement {
+ commentBefore = 0, ///< a comment placed on the line before a value
+ commentAfterOnSameLine, ///< a comment just after a value on the same line
+ commentAfter, ///< a comment on the line after a value (only make sense for
+ /// root value)
+ numberOfCommentPlacement
+};
+
+//# ifdef JSON_USE_CPPTL
+// typedef CppTL::AnyEnumerator<const char *> EnumMemberNames;
+// typedef CppTL::AnyEnumerator<const Value &> EnumValues;
+//# endif
+
+/** \brief Lightweight wrapper to tag static string.
+ *
+ * Value constructor and objectValue member assignement takes advantage of the
+ * StaticString and avoid the cost of string duplication when storing the
+ * string or the member name.
+ *
+ * Example of usage:
+ * \code
+ * Json::Value aValue( StaticString("some text") );
+ * Json::Value object;
+ * static const StaticString code("code");
+ * object[code] = 1234;
+ * \endcode
+ */
+class JSON_API StaticString {
+public:
+ explicit StaticString(const char* czstring) : c_str_(czstring) {}
+
+ operator const char*() const { return c_str_; }
+
+ const char* c_str() const { return c_str_; }
+
+private:
+ const char* c_str_;
+};
+
+/** \brief Represents a <a HREF="http://www.json.org">JSON</a> value.
+ *
+ * This class is a discriminated union wrapper that can represents a:
+ * - signed integer [range: Value::minInt - Value::maxInt]
+ * - unsigned integer (range: 0 - Value::maxUInt)
+ * - double
+ * - UTF-8 string
+ * - boolean
+ * - 'null'
+ * - an ordered list of Value
+ * - collection of name/value pairs (javascript object)
+ *
+ * The type of the held value is represented by a #ValueType and
+ * can be obtained using type().
+ *
+ * Values of an #objectValue or #arrayValue can be accessed using operator[]()
+ * methods.
+ * Non-const methods will automatically create the a #nullValue element
+ * if it does not exist.
+ * The sequence of an #arrayValue will be automatically resized and initialized
+ * with #nullValue. resize() can be used to enlarge or truncate an #arrayValue.
+ *
+ * The get() methods can be used to obtain default value in the case the
+ * required element does not exist.
+ *
+ * It is possible to iterate over the list of a #objectValue values using
+ * the getMemberNames() method.
+ *
+ * \note #Value string-length fit in size_t, but keys must be < 2^30.
+ * (The reason is an implementation detail.) A #CharReader will raise an
+ * exception if a bound is exceeded to avoid security holes in your app,
+ * but the Value API does *not* check bounds. That is the responsibility
+ * of the caller.
+ */
+class JSON_API Value {
+ friend class ValueIteratorBase;
+public:
+ typedef std::vector<JSONCPP_STRING> Members;
+ typedef ValueIterator iterator;
+ typedef ValueConstIterator const_iterator;
+ typedef Json::UInt UInt;
+ typedef Json::Int Int;
+#if defined(JSON_HAS_INT64)
+ typedef Json::UInt64 UInt64;
+ typedef Json::Int64 Int64;
+#endif // defined(JSON_HAS_INT64)
+ typedef Json::LargestInt LargestInt;
+ typedef Json::LargestUInt LargestUInt;
+ typedef Json::ArrayIndex ArrayIndex;
+
+ static const Value& null; ///< We regret this reference to a global instance; prefer the simpler Value().
+ static const Value& nullRef; ///< just a kludge for binary-compatibility; same as null
+ /// Minimum signed integer value that can be stored in a Json::Value.
+ static const LargestInt minLargestInt;
+ /// Maximum signed integer value that can be stored in a Json::Value.
+ static const LargestInt maxLargestInt;
+ /// Maximum unsigned integer value that can be stored in a Json::Value.
+ static const LargestUInt maxLargestUInt;
+
+ /// Minimum signed int value that can be stored in a Json::Value.
+ static const Int minInt;
+ /// Maximum signed int value that can be stored in a Json::Value.
+ static const Int maxInt;
+ /// Maximum unsigned int value that can be stored in a Json::Value.
+ static const UInt maxUInt;
+
+#if defined(JSON_HAS_INT64)
+ /// Minimum signed 64 bits int value that can be stored in a Json::Value.
+ static const Int64 minInt64;
+ /// Maximum signed 64 bits int value that can be stored in a Json::Value.
+ static const Int64 maxInt64;
+ /// Maximum unsigned 64 bits int value that can be stored in a Json::Value.
+ static const UInt64 maxUInt64;
+#endif // defined(JSON_HAS_INT64)
+
+private:
+#ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
+ class CZString {
+ public:
+ enum DuplicationPolicy {
+ noDuplication = 0,
+ duplicate,
+ duplicateOnCopy
+ };
+ CZString(ArrayIndex index);
+ CZString(char const* str, unsigned length, DuplicationPolicy allocate);
+ CZString(CZString const& other);
+#if JSON_HAS_RVALUE_REFERENCES
+ CZString(CZString&& other);
+#endif
+ ~CZString();
+ CZString& operator=(CZString other);
+ bool operator<(CZString const& other) const;
+ bool operator==(CZString const& other) const;
+ ArrayIndex index() const;
+ //const char* c_str() const; ///< \deprecated
+ char const* data() const;
+ unsigned length() const;
+ bool isStaticString() const;
+
+ private:
+ void swap(CZString& other);
+
+ struct StringStorage {
+ unsigned policy_: 2;
+ unsigned length_: 30; // 1GB max
+ };
+
+ char const* cstr_; // actually, a prefixed string, unless policy is noDup
+ union {
+ ArrayIndex index_;
+ StringStorage storage_;
+ };
+ };
+
+public:
+#ifndef JSON_USE_CPPTL_SMALLMAP
+ typedef std::map<CZString, Value> ObjectValues;
+#else
+ typedef CppTL::SmallMap<CZString, Value> ObjectValues;
+#endif // ifndef JSON_USE_CPPTL_SMALLMAP
+#endif // ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
+
+public:
+ /** \brief Create a default Value of the given type.
+
+ This is a very useful constructor.
+ To create an empty array, pass arrayValue.
+ To create an empty object, pass objectValue.
+ Another Value can then be set to this one by assignment.
+This is useful since clear() and resize() will not alter types.
+
+ Examples:
+\code
+Json::Value null_value; // null
+Json::Value arr_value(Json::arrayValue); // []
+Json::Value obj_value(Json::objectValue); // {}
+\endcode
+ */
+ Value(ValueType type = nullValue);
+ Value(Int value);
+ Value(UInt value);
+#if defined(JSON_HAS_INT64)
+ Value(Int64 value);
+ Value(UInt64 value);
+#endif // if defined(JSON_HAS_INT64)
+ Value(double value);
+ Value(const char* value); ///< Copy til first 0. (NULL causes to seg-fault.)
+ Value(const char* begin, const char* end); ///< Copy all, incl zeroes.
+ /** \brief Constructs a value from a static string.
+
+ * Like other value string constructor but do not duplicate the string for
+ * internal storage. The given string must remain alive after the call to this
+ * constructor.
+ * \note This works only for null-terminated strings. (We cannot change the
+ * size of this class, so we have nowhere to store the length,
+ * which might be computed later for various operations.)
+ *
+ * Example of usage:
+ * \code
+ * static StaticString foo("some text");
+ * Json::Value aValue(foo);
+ * \endcode
+ */
+ Value(const StaticString& value);
+ Value(const JSONCPP_STRING& value); ///< Copy data() til size(). Embedded zeroes too.
+#ifdef JSON_USE_CPPTL
+ Value(const CppTL::ConstString& value);
+#endif
+ Value(bool value);
+ /// Deep copy.
+ Value(const Value& other);
+#if JSON_HAS_RVALUE_REFERENCES
+ /// Move constructor
+ Value(Value&& other);
+#endif
+ ~Value();
+
+ /// Deep copy, then swap(other).
+ /// \note Over-write existing comments. To preserve comments, use #swapPayload().
+ Value& operator=(Value other);
+ /// Swap everything.
+ void swap(Value& other);
+ /// Swap values but leave comments and source offsets in place.
+ void swapPayload(Value& other);
+
+ ValueType type() const;
+
+ /// Compare payload only, not comments etc.
+ bool operator<(const Value& other) const;
+ bool operator<=(const Value& other) const;
+ bool operator>=(const Value& other) const;
+ bool operator>(const Value& other) const;
+ bool operator==(const Value& other) const;
+ bool operator!=(const Value& other) const;
+ int compare(const Value& other) const;
+
+ const char* asCString() const; ///< Embedded zeroes could cause you trouble!
+#if JSONCPP_USING_SECURE_MEMORY
+ unsigned getCStringLength() const; //Allows you to understand the length of the CString
+#endif
+ JSONCPP_STRING asString() const; ///< Embedded zeroes are possible.
+ /** Get raw char* of string-value.
+ * \return false if !string. (Seg-fault if str or end are NULL.)
+ */
+ bool getString(
+ char const** begin, char const** end) const;
+#ifdef JSON_USE_CPPTL
+ CppTL::ConstString asConstString() const;
+#endif
+ Int asInt() const;
+ UInt asUInt() const;
+#if defined(JSON_HAS_INT64)
+ Int64 asInt64() const;
+ UInt64 asUInt64() const;
+#endif // if defined(JSON_HAS_INT64)
+ LargestInt asLargestInt() const;
+ LargestUInt asLargestUInt() const;
+ float asFloat() const;
+ double asDouble() const;
+ bool asBool() const;
+
+ bool isNull() const;
+ bool isBool() const;
+ bool isInt() const;
+ bool isInt64() const;
+ bool isUInt() const;
+ bool isUInt64() const;
+ bool isIntegral() const;
+ bool isDouble() const;
+ bool isNumeric() const;
+ bool isString() const;
+ bool isArray() const;
+ bool isObject() const;
+
+ bool isConvertibleTo(ValueType other) const;
+
+ /// Number of values in array or object
+ ArrayIndex size() const;
+
+ /// \brief Return true if empty array, empty object, or null;
+ /// otherwise, false.
+ bool empty() const;
+
+ /// Return isNull()
+ bool operator!() const;
+
+ /// Remove all object members and array elements.
+ /// \pre type() is arrayValue, objectValue, or nullValue
+ /// \post type() is unchanged
+ void clear();
+
+ /// Resize the array to size elements.
+ /// New elements are initialized to null.
+ /// May only be called on nullValue or arrayValue.
+ /// \pre type() is arrayValue or nullValue
+ /// \post type() is arrayValue
+ void resize(ArrayIndex size);
+
+ /// Access an array element (zero based index ).
+ /// If the array contains less than index element, then null value are
+ /// inserted
+ /// in the array so that its size is index+1.
+ /// (You may need to say 'value[0u]' to get your compiler to distinguish
+ /// this from the operator[] which takes a string.)
+ Value& operator[](ArrayIndex index);
+
+ /// Access an array element (zero based index ).
+ /// If the array contains less than index element, then null value are
+ /// inserted
+ /// in the array so that its size is index+1.
+ /// (You may need to say 'value[0u]' to get your compiler to distinguish
+ /// this from the operator[] which takes a string.)
+ Value& operator[](int index);
+
+ /// Access an array element (zero based index )
+ /// (You may need to say 'value[0u]' to get your compiler to distinguish
+ /// this from the operator[] which takes a string.)
+ const Value& operator[](ArrayIndex index) const;
+
+ /// Access an array element (zero based index )
+ /// (You may need to say 'value[0u]' to get your compiler to distinguish
+ /// this from the operator[] which takes a string.)
+ const Value& operator[](int index) const;
+
+ /// If the array contains at least index+1 elements, returns the element
+ /// value,
+ /// otherwise returns defaultValue.
+ Value get(ArrayIndex index, const Value& defaultValue) const;
+ /// Return true if index < size().
+ bool isValidIndex(ArrayIndex index) const;
+ /// \brief Append value to array at the end.
+ ///
+ /// Equivalent to jsonvalue[jsonvalue.size()] = value;
+ Value& append(const Value& value);
+
+ /// Access an object value by name, create a null member if it does not exist.
+ /// \note Because of our implementation, keys are limited to 2^30 -1 chars.
+ /// Exceeding that will cause an exception.
+ Value& operator[](const char* key);
+ /// Access an object value by name, returns null if there is no member with
+ /// that name.
+ const Value& operator[](const char* key) const;
+ /// Access an object value by name, create a null member if it does not exist.
+ /// \param key may contain embedded nulls.
+ Value& operator[](const JSONCPP_STRING& key);
+ /// Access an object value by name, returns null if there is no member with
+ /// that name.
+ /// \param key may contain embedded nulls.
+ const Value& operator[](const JSONCPP_STRING& key) const;
+ /** \brief Access an object value by name, create a null member if it does not
+ exist.
+
+ * If the object has no entry for that name, then the member name used to store
+ * the new entry is not duplicated.
+ * Example of use:
+ * \code
+ * Json::Value object;
+ * static const StaticString code("code");
+ * object[code] = 1234;
+ * \endcode
+ */
+ Value& operator[](const StaticString& key);
+#ifdef JSON_USE_CPPTL
+ /// Access an object value by name, create a null member if it does not exist.
+ Value& operator[](const CppTL::ConstString& key);
+ /// Access an object value by name, returns null if there is no member with
+ /// that name.
+ const Value& operator[](const CppTL::ConstString& key) const;
+#endif
+ /// Return the member named key if it exist, defaultValue otherwise.
+ /// \note deep copy
+ Value get(const char* key, const Value& defaultValue) const;
+ /// Return the member named key if it exist, defaultValue otherwise.
+ /// \note deep copy
+ /// \note key may contain embedded nulls.
+ Value get(const char* begin, const char* end, const Value& defaultValue) const;
+ /// Return the member named key if it exist, defaultValue otherwise.
+ /// \note deep copy
+ /// \param key may contain embedded nulls.
+ Value get(const JSONCPP_STRING& key, const Value& defaultValue) const;
+#ifdef JSON_USE_CPPTL
+ /// Return the member named key if it exist, defaultValue otherwise.
+ /// \note deep copy
+ Value get(const CppTL::ConstString& key, const Value& defaultValue) const;
+#endif
+ /// Most general and efficient version of isMember()const, get()const,
+ /// and operator[]const
+ /// \note As stated elsewhere, behavior is undefined if (end-begin) >= 2^30
+ Value const* find(char const* begin, char const* end) const;
+ /// Most general and efficient version of object-mutators.
+ /// \note As stated elsewhere, behavior is undefined if (end-begin) >= 2^30
+ /// \return non-zero, but JSON_ASSERT if this is neither object nor nullValue.
+ Value const* demand(char const* begin, char const* end);
+ /// \brief Remove and return the named member.
+ ///
+ /// Do nothing if it did not exist.
+ /// \return the removed Value, or null.
+ /// \pre type() is objectValue or nullValue
+ /// \post type() is unchanged
+ /// \deprecated
+ Value removeMember(const char* key);
+ /// Same as removeMember(const char*)
+ /// \param key may contain embedded nulls.
+ /// \deprecated
+ Value removeMember(const JSONCPP_STRING& key);
+ /// Same as removeMember(const char* begin, const char* end, Value* removed),
+ /// but 'key' is null-terminated.
+ bool removeMember(const char* key, Value* removed);
+ /** \brief Remove the named map member.
+
+ Update 'removed' iff removed.
+ \param key may contain embedded nulls.
+ \return true iff removed (no exceptions)
+ */
+ bool removeMember(JSONCPP_STRING const& key, Value* removed);
+ /// Same as removeMember(JSONCPP_STRING const& key, Value* removed)
+ bool removeMember(const char* begin, const char* end, Value* removed);
+ /** \brief Remove the indexed array element.
+
+ O(n) expensive operations.
+ Update 'removed' iff removed.
+ \return true iff removed (no exceptions)
+ */
+ bool removeIndex(ArrayIndex i, Value* removed);
+
+ /// Return true if the object has a member named key.
+ /// \note 'key' must be null-terminated.
+ bool isMember(const char* key) const;
+ /// Return true if the object has a member named key.
+ /// \param key may contain embedded nulls.
+ bool isMember(const JSONCPP_STRING& key) const;
+ /// Same as isMember(JSONCPP_STRING const& key)const
+ bool isMember(const char* begin, const char* end) const;
+#ifdef JSON_USE_CPPTL
+ /// Return true if the object has a member named key.
+ bool isMember(const CppTL::ConstString& key) const;
+#endif
+
+ /// \brief Return a list of the member names.
+ ///
+ /// If null, return an empty list.
+ /// \pre type() is objectValue or nullValue
+ /// \post if type() was nullValue, it remains nullValue
+ Members getMemberNames() const;
+
+ //# ifdef JSON_USE_CPPTL
+ // EnumMemberNames enumMemberNames() const;
+ // EnumValues enumValues() const;
+ //# endif
+
+ /// \deprecated Always pass len.
+ JSONCPP_DEPRECATED("Use setComment(JSONCPP_STRING const&) instead.")
+ void setComment(const char* comment, CommentPlacement placement);
+ /// Comments must be //... or /* ... */
+ void setComment(const char* comment, size_t len, CommentPlacement placement);
+ /// Comments must be //... or /* ... */
+ void setComment(const JSONCPP_STRING& comment, CommentPlacement placement);
+ bool hasComment(CommentPlacement placement) const;
+ /// Include delimiters and embedded newlines.
+ JSONCPP_STRING getComment(CommentPlacement placement) const;
+
+ JSONCPP_STRING toStyledString() const;
+
+ const_iterator begin() const;
+ const_iterator end() const;
+
+ iterator begin();
+ iterator end();
+
+ // Accessors for the [start, limit) range of bytes within the JSON text from
+ // which this value was parsed, if any.
+ void setOffsetStart(ptrdiff_t start);
+ void setOffsetLimit(ptrdiff_t limit);
+ ptrdiff_t getOffsetStart() const;
+ ptrdiff_t getOffsetLimit() const;
+
+private:
+ void initBasic(ValueType type, bool allocated = false);
+
+ Value& resolveReference(const char* key);
+ Value& resolveReference(const char* key, const char* end);
+
+ struct CommentInfo {
+ CommentInfo();
+ ~CommentInfo();
+
+ void setComment(const char* text, size_t len);
+
+ char* comment_;
+ };
+
+ // struct MemberNamesTransform
+ //{
+ // typedef const char *result_type;
+ // const char *operator()( const CZString &name ) const
+ // {
+ // return name.c_str();
+ // }
+ //};
+
+ union ValueHolder {
+ LargestInt int_;
+ LargestUInt uint_;
+ double real_;
+ bool bool_;
+ char* string_; // actually ptr to unsigned, followed by str, unless !allocated_
+ ObjectValues* map_;
+ } value_;
+ ValueType type_ : 8;
+ unsigned int allocated_ : 1; // Notes: if declared as bool, bitfield is useless.
+ // If not allocated_, string_ must be null-terminated.
+ CommentInfo* comments_;
+
+ // [start, limit) byte offsets in the source JSON text from which this Value
+ // was extracted.
+ ptrdiff_t start_;
+ ptrdiff_t limit_;
+};
+
+/** \brief Experimental and untested: represents an element of the "path" to
+ * access a node.
+ */
+class JSON_API PathArgument {
+public:
+ friend class Path;
+
+ PathArgument();
+ PathArgument(ArrayIndex index);
+ PathArgument(const char* key);
+ PathArgument(const JSONCPP_STRING& key);
+
+private:
+ enum Kind {
+ kindNone = 0,
+ kindIndex,
+ kindKey
+ };
+ JSONCPP_STRING key_;
+ ArrayIndex index_;
+ Kind kind_;
+};
+
+/** \brief Experimental and untested: represents a "path" to access a node.
+ *
+ * Syntax:
+ * - "." => root node
+ * - ".[n]" => elements at index 'n' of root node (an array value)
+ * - ".name" => member named 'name' of root node (an object value)
+ * - ".name1.name2.name3"
+ * - ".[0][1][2].name1[3]"
+ * - ".%" => member name is provided as parameter
+ * - ".[%]" => index is provied as parameter
+ */
+class JSON_API Path {
+public:
+ Path(const JSONCPP_STRING& path,
+ const PathArgument& a1 = PathArgument(),
+ const PathArgument& a2 = PathArgument(),
+ const PathArgument& a3 = PathArgument(),
+ const PathArgument& a4 = PathArgument(),
+ const PathArgument& a5 = PathArgument());
+
+ const Value& resolve(const Value& root) const;
+ Value resolve(const Value& root, const Value& defaultValue) const;
+ /// Creates the "path" to access the specified node and returns a reference on
+ /// the node.
+ Value& make(Value& root) const;
+
+private:
+ typedef std::vector<const PathArgument*> InArgs;
+ typedef std::vector<PathArgument> Args;
+
+ void makePath(const JSONCPP_STRING& path, const InArgs& in);
+ void addPathInArg(const JSONCPP_STRING& path,
+ const InArgs& in,
+ InArgs::const_iterator& itInArg,
+ PathArgument::Kind kind);
+ void invalidPath(const JSONCPP_STRING& path, int location);
+
+ Args args_;
+};
+
+/** \brief base class for Value iterators.
+ *
+ */
+class JSON_API ValueIteratorBase {
+public:
+ typedef std::bidirectional_iterator_tag iterator_category;
+ typedef unsigned int size_t;
+ typedef int difference_type;
+ typedef ValueIteratorBase SelfType;
+
+ bool operator==(const SelfType& other) const { return isEqual(other); }
+
+ bool operator!=(const SelfType& other) const { return !isEqual(other); }
+
+ difference_type operator-(const SelfType& other) const {
+ return other.computeDistance(*this);
+ }
+
+ /// Return either the index or the member name of the referenced value as a
+ /// Value.
+ Value key() const;
+
+ /// Return the index of the referenced Value, or -1 if it is not an arrayValue.
+ UInt index() const;
+
+ /// Return the member name of the referenced Value, or "" if it is not an
+ /// objectValue.
+ /// \note Avoid `c_str()` on result, as embedded zeroes are possible.
+ JSONCPP_STRING name() const;
+
+ /// Return the member name of the referenced Value. "" if it is not an
+ /// objectValue.
+ /// \deprecated This cannot be used for UTF-8 strings, since there can be embedded nulls.
+ JSONCPP_DEPRECATED("Use `key = name();` instead.")
+ char const* memberName() const;
+ /// Return the member name of the referenced Value, or NULL if it is not an
+ /// objectValue.
+ /// \note Better version than memberName(). Allows embedded nulls.
+ char const* memberName(char const** end) const;
+
+protected:
+ Value& deref() const;
+
+ void increment();
+
+ void decrement();
+
+ difference_type computeDistance(const SelfType& other) const;
+
+ bool isEqual(const SelfType& other) const;
+
+ void copy(const SelfType& other);
+
+private:
+ Value::ObjectValues::iterator current_;
+ // Indicates that iterator is for a null value.
+ bool isNull_;
+
+public:
+ // For some reason, BORLAND needs these at the end, rather
+ // than earlier. No idea why.
+ ValueIteratorBase();
+ explicit ValueIteratorBase(const Value::ObjectValues::iterator& current);
+};
+
+/** \brief const iterator for object and array value.
+ *
+ */
+class JSON_API ValueConstIterator : public ValueIteratorBase {
+ friend class Value;
+
+public:
+ typedef const Value value_type;
+ //typedef unsigned int size_t;
+ //typedef int difference_type;
+ typedef const Value& reference;
+ typedef const Value* pointer;
+ typedef ValueConstIterator SelfType;
+
+ ValueConstIterator();
+ ValueConstIterator(ValueIterator const& other);
+
+private:
+/*! \internal Use by Value to create an iterator.
+ */
+ explicit ValueConstIterator(const Value::ObjectValues::iterator& current);
+public:
+ SelfType& operator=(const ValueIteratorBase& other);
+
+ SelfType operator++(int) {
+ SelfType temp(*this);
+ ++*this;
+ return temp;
+ }
+
+ SelfType operator--(int) {
+ SelfType temp(*this);
+ --*this;
+ return temp;
+ }
+
+ SelfType& operator--() {
+ decrement();
+ return *this;
+ }
+
+ SelfType& operator++() {
+ increment();
+ return *this;
+ }
+
+ reference operator*() const { return deref(); }
+
+ pointer operator->() const { return &deref(); }
+};
+
+/** \brief Iterator for object and array value.
+ */
+class JSON_API ValueIterator : public ValueIteratorBase {
+ friend class Value;
+
+public:
+ typedef Value value_type;
+ typedef unsigned int size_t;
+ typedef int difference_type;
+ typedef Value& reference;
+ typedef Value* pointer;
+ typedef ValueIterator SelfType;
+
+ ValueIterator();
+ explicit ValueIterator(const ValueConstIterator& other);
+ ValueIterator(const ValueIterator& other);
+
+private:
+/*! \internal Use by Value to create an iterator.
+ */
+ explicit ValueIterator(const Value::ObjectValues::iterator& current);
+public:
+ SelfType& operator=(const SelfType& other);
+
+ SelfType operator++(int) {
+ SelfType temp(*this);
+ ++*this;
+ return temp;
+ }
+
+ SelfType operator--(int) {
+ SelfType temp(*this);
+ --*this;
+ return temp;
+ }
+
+ SelfType& operator--() {
+ decrement();
+ return *this;
+ }
+
+ SelfType& operator++() {
+ increment();
+ return *this;
+ }
+
+ reference operator*() const { return deref(); }
+
+ pointer operator->() const { return &deref(); }
+};
+
+} // namespace Json
+
+
+namespace std {
+/// Specialize std::swap() for Json::Value.
+template<>
+inline void swap(Json::Value& a, Json::Value& b) { a.swap(b); }
+}
+
+
+#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+#pragma warning(pop)
+#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+
+#endif // CPPTL_JSON_H_INCLUDED
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: include/json/value.h
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: include/json/reader.h
+// //////////////////////////////////////////////////////////////////////
+
+// Copyright 2007-2010 Baptiste Lepilleur
+// Distributed under MIT license, or public domain if desired and
+// recognized in your jurisdiction.
+// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+#ifndef CPPTL_JSON_READER_H_INCLUDED
+#define CPPTL_JSON_READER_H_INCLUDED
+
+#if !defined(JSON_IS_AMALGAMATION)
+#include "features.h"
+#include "value.h"
+#endif // if !defined(JSON_IS_AMALGAMATION)
+#include <deque>
+#include <iosfwd>
+#include <stack>
+#include <string>
+#include <istream>
+
+// Disable warning C4251: <data member>: <type> needs to have dll-interface to
+// be used by...
+#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+#pragma warning(push)
+#pragma warning(disable : 4251)
+#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+
+namespace Json {
+
+/** \brief Unserialize a <a HREF="http://www.json.org">JSON</a> document into a
+ *Value.
+ *
+ * \deprecated Use CharReader and CharReaderBuilder.
+ */
+class JSON_API Reader {
+public:
+ typedef char Char;
+ typedef const Char* Location;
+
+ /** \brief An error tagged with where in the JSON text it was encountered.
+ *
+ * The offsets give the [start, limit) range of bytes within the text. Note
+ * that this is bytes, not codepoints.
+ *
+ */
+ struct StructuredError {
+ ptrdiff_t offset_start;
+ ptrdiff_t offset_limit;
+ JSONCPP_STRING message;
+ };
+
+ /** \brief Constructs a Reader allowing all features
+ * for parsing.
+ */
+ Reader();
+
+ /** \brief Constructs a Reader allowing the specified feature set
+ * for parsing.
+ */
+ Reader(const Features& features);
+
+ /** \brief Read a Value from a <a HREF="http://www.json.org">JSON</a>
+ * document.
+ * \param document UTF-8 encoded string containing the document to read.
+ * \param root [out] Contains the root value of the document if it was
+ * successfully parsed.
+ * \param collectComments \c true to collect comment and allow writing them
+ * back during
+ * serialization, \c false to discard comments.
+ * This parameter is ignored if
+ * Features::allowComments_
+ * is \c false.
+ * \return \c true if the document was successfully parsed, \c false if an
+ * error occurred.
+ */
+ bool
+ parse(const std::string& document, Value& root, bool collectComments = true);
+
+ /** \brief Read a Value from a <a HREF="http://www.json.org">JSON</a>
+ document.
+ * \param beginDoc Pointer on the beginning of the UTF-8 encoded string of the
+ document to read.
+ * \param endDoc Pointer on the end of the UTF-8 encoded string of the
+ document to read.
+ * Must be >= beginDoc.
+ * \param root [out] Contains the root value of the document if it was
+ * successfully parsed.
+ * \param collectComments \c true to collect comment and allow writing them
+ back during
+ * serialization, \c false to discard comments.
+ * This parameter is ignored if
+ Features::allowComments_
+ * is \c false.
+ * \return \c true if the document was successfully parsed, \c false if an
+ error occurred.
+ */
+ bool parse(const char* beginDoc,
+ const char* endDoc,
+ Value& root,
+ bool collectComments = true);
+
+ /// \brief Parse from input stream.
+ /// \see Json::operator>>(std::istream&, Json::Value&).
+ bool parse(JSONCPP_ISTREAM& is, Value& root, bool collectComments = true);
+
+ /** \brief Returns a user friendly string that list errors in the parsed
+ * document.
+ * \return Formatted error message with the list of errors with their location
+ * in
+ * the parsed document. An empty string is returned if no error
+ * occurred
+ * during parsing.
+ * \deprecated Use getFormattedErrorMessages() instead (typo fix).
+ */
+ JSONCPP_DEPRECATED("Use getFormattedErrorMessages() instead.")
+ JSONCPP_STRING getFormatedErrorMessages() const;
+
+ /** \brief Returns a user friendly string that list errors in the parsed
+ * document.
+ * \return Formatted error message with the list of errors with their location
+ * in
+ * the parsed document. An empty string is returned if no error
+ * occurred
+ * during parsing.
+ */
+ JSONCPP_STRING getFormattedErrorMessages() const;
+
+ /** \brief Returns a vector of structured erros encounted while parsing.
+ * \return A (possibly empty) vector of StructuredError objects. Currently
+ * only one error can be returned, but the caller should tolerate
+ * multiple
+ * errors. This can occur if the parser recovers from a non-fatal
+ * parse error and then encounters additional errors.
+ */
+ std::vector<StructuredError> getStructuredErrors() const;
+
+ /** \brief Add a semantic error message.
+ * \param value JSON Value location associated with the error
+ * \param message The error message.
+ * \return \c true if the error was successfully added, \c false if the
+ * Value offset exceeds the document size.
+ */
+ bool pushError(const Value& value, const JSONCPP_STRING& message);
+
+ /** \brief Add a semantic error message with extra context.
+ * \param value JSON Value location associated with the error
+ * \param message The error message.
+ * \param extra Additional JSON Value location to contextualize the error
+ * \return \c true if the error was successfully added, \c false if either
+ * Value offset exceeds the document size.
+ */
+ bool pushError(const Value& value, const JSONCPP_STRING& message, const Value& extra);
+
+ /** \brief Return whether there are any errors.
+ * \return \c true if there are no errors to report \c false if
+ * errors have occurred.
+ */
+ bool good() const;
+
+private:
+ enum TokenType {
+ tokenEndOfStream = 0,
+ tokenObjectBegin,
+ tokenObjectEnd,
+ tokenArrayBegin,
+ tokenArrayEnd,
+ tokenString,
+ tokenNumber,
+ tokenTrue,
+ tokenFalse,
+ tokenNull,
+ tokenArraySeparator,
+ tokenMemberSeparator,
+ tokenComment,
+ tokenError
+ };
+
+ class Token {
+ public:
+ TokenType type_;
+ Location start_;
+ Location end_;
+ };
+
+ class ErrorInfo {
+ public:
+ Token token_;
+ JSONCPP_STRING message_;
+ Location extra_;
+ };
+
+ typedef std::deque<ErrorInfo> Errors;
+
+ bool readToken(Token& token);
+ void skipSpaces();
+ bool match(Location pattern, int patternLength);
+ bool readComment();
+ bool readCStyleComment();
+ bool readCppStyleComment();
+ bool readString();
+ void readNumber();
+ bool readValue();
+ bool readObject(Token& token);
+ bool readArray(Token& token);
+ bool decodeNumber(Token& token);
+ bool decodeNumber(Token& token, Value& decoded);
+ bool decodeString(Token& token);
+ bool decodeString(Token& token, JSONCPP_STRING& decoded);
+ bool decodeDouble(Token& token);
+ bool decodeDouble(Token& token, Value& decoded);
+ bool decodeUnicodeCodePoint(Token& token,
+ Location& current,
+ Location end,
+ unsigned int& unicode);
+ bool decodeUnicodeEscapeSequence(Token& token,
+ Location& current,
+ Location end,
+ unsigned int& unicode);
+ bool addError(const JSONCPP_STRING& message, Token& token, Location extra = 0);
+ bool recoverFromError(TokenType skipUntilToken);
+ bool addErrorAndRecover(const JSONCPP_STRING& message,
+ Token& token,
+ TokenType skipUntilToken);
+ void skipUntilSpace();
+ Value& currentValue();
+ Char getNextChar();
+ void
+ getLocationLineAndColumn(Location location, int& line, int& column) const;
+ JSONCPP_STRING getLocationLineAndColumn(Location location) const;
+ void addComment(Location begin, Location end, CommentPlacement placement);
+ void skipCommentTokens(Token& token);
+
+ typedef std::stack<Value*> Nodes;
+ Nodes nodes_;
+ Errors errors_;
+ JSONCPP_STRING document_;
+ Location begin_;
+ Location end_;
+ Location current_;
+ Location lastValueEnd_;
+ Value* lastValue_;
+ JSONCPP_STRING commentsBefore_;
+ Features features_;
+ bool collectComments_;
+}; // Reader
+
+/** Interface for reading JSON from a char array.
+ */
+class JSON_API CharReader {
+public:
+ virtual ~CharReader() {}
+ /** \brief Read a Value from a <a HREF="http://www.json.org">JSON</a>
+ document.
+ * The document must be a UTF-8 encoded string containing the document to read.
+ *
+ * \param beginDoc Pointer on the beginning of the UTF-8 encoded string of the
+ document to read.
+ * \param endDoc Pointer on the end of the UTF-8 encoded string of the
+ document to read.
+ * Must be >= beginDoc.
+ * \param root [out] Contains the root value of the document if it was
+ * successfully parsed.
+ * \param errs [out] Formatted error messages (if not NULL)
+ * a user friendly string that lists errors in the parsed
+ * document.
+ * \return \c true if the document was successfully parsed, \c false if an
+ error occurred.
+ */
+ virtual bool parse(
+ char const* beginDoc, char const* endDoc,
+ Value* root, JSONCPP_STRING* errs) = 0;
+
+ class JSON_API Factory {
+ public:
+ virtual ~Factory() {}
+ /** \brief Allocate a CharReader via operator new().
+ * \throw std::exception if something goes wrong (e.g. invalid settings)
+ */
+ virtual CharReader* newCharReader() const = 0;
+ }; // Factory
+}; // CharReader
+
+/** \brief Build a CharReader implementation.
+
+Usage:
+\code
+ using namespace Json;
+ CharReaderBuilder builder;
+ builder["collectComments"] = false;
+ Value value;
+ JSONCPP_STRING errs;
+ bool ok = parseFromStream(builder, std::cin, &value, &errs);
+\endcode
+*/
+class JSON_API CharReaderBuilder : public CharReader::Factory {
+public:
+ // Note: We use a Json::Value so that we can add data-members to this class
+ // without a major version bump.
+ /** Configuration of this builder.
+ These are case-sensitive.
+ Available settings (case-sensitive):
+ - `"collectComments": false or true`
+ - true to collect comment and allow writing them
+ back during serialization, false to discard comments.
+ This parameter is ignored if allowComments is false.
+ - `"allowComments": false or true`
+ - true if comments are allowed.
+ - `"strictRoot": false or true`
+ - true if root must be either an array or an object value
+ - `"allowDroppedNullPlaceholders": false or true`
+ - true if dropped null placeholders are allowed. (See StreamWriterBuilder.)
+ - `"allowNumericKeys": false or true`
+ - true if numeric object keys are allowed.
+ - `"allowSingleQuotes": false or true`
+ - true if '' are allowed for strings (both keys and values)
+ - `"stackLimit": integer`
+ - Exceeding stackLimit (recursive depth of `readValue()`) will
+ cause an exception.
+ - This is a security issue (seg-faults caused by deeply nested JSON),
+ so the default is low.
+ - `"failIfExtra": false or true`
+ - If true, `parse()` returns false when extra non-whitespace trails
+ the JSON value in the input string.
+ - `"rejectDupKeys": false or true`
+ - If true, `parse()` returns false when a key is duplicated within an object.
+ - `"allowSpecialFloats": false or true`
+ - If true, special float values (NaNs and infinities) are allowed
+ and their values are lossfree restorable.
+
+ You can examine 'settings_` yourself
+ to see the defaults. You can also write and read them just like any
+ JSON Value.
+ \sa setDefaults()
+ */
+ Json::Value settings_;
+
+ CharReaderBuilder();
+ ~CharReaderBuilder() JSONCPP_OVERRIDE;
+
+ CharReader* newCharReader() const JSONCPP_OVERRIDE;
+
+ /** \return true if 'settings' are legal and consistent;
+ * otherwise, indicate bad settings via 'invalid'.
+ */
+ bool validate(Json::Value* invalid) const;
+
+ /** A simple way to update a specific setting.
+ */
+ Value& operator[](JSONCPP_STRING key);
+
+ /** Called by ctor, but you can use this to reset settings_.
+ * \pre 'settings' != NULL (but Json::null is fine)
+ * \remark Defaults:
+ * \snippet src/lib_json/json_reader.cpp CharReaderBuilderDefaults
+ */
+ static void setDefaults(Json::Value* settings);
+ /** Same as old Features::strictMode().
+ * \pre 'settings' != NULL (but Json::null is fine)
+ * \remark Defaults:
+ * \snippet src/lib_json/json_reader.cpp CharReaderBuilderStrictMode
+ */
+ static void strictMode(Json::Value* settings);
+};
+
+/** Consume entire stream and use its begin/end.
+ * Someday we might have a real StreamReader, but for now this
+ * is convenient.
+ */
+bool JSON_API parseFromStream(
+ CharReader::Factory const&,
+ JSONCPP_ISTREAM&,
+ Value* root, std::string* errs);
+
+/** \brief Read from 'sin' into 'root'.
+
+ Always keep comments from the input JSON.
+
+ This can be used to read a file into a particular sub-object.
+ For example:
+ \code
+ Json::Value root;
+ cin >> root["dir"]["file"];
+ cout << root;
+ \endcode
+ Result:
+ \verbatim
+ {
+ "dir": {
+ "file": {
+ // The input stream JSON would be nested here.
+ }
+ }
+ }
+ \endverbatim
+ \throw std::exception on parse error.
+ \see Json::operator<<()
+*/
+JSON_API JSONCPP_ISTREAM& operator>>(JSONCPP_ISTREAM&, Value&);
+
+} // namespace Json
+
+#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+#pragma warning(pop)
+#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+
+#endif // CPPTL_JSON_READER_H_INCLUDED
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: include/json/reader.h
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: include/json/writer.h
+// //////////////////////////////////////////////////////////////////////
+
+// Copyright 2007-2010 Baptiste Lepilleur
+// Distributed under MIT license, or public domain if desired and
+// recognized in your jurisdiction.
+// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+#ifndef JSON_WRITER_H_INCLUDED
+#define JSON_WRITER_H_INCLUDED
+
+#if !defined(JSON_IS_AMALGAMATION)
+#include "value.h"
+#endif // if !defined(JSON_IS_AMALGAMATION)
+#include <vector>
+#include <string>
+#include <ostream>
+
+// Disable warning C4251: <data member>: <type> needs to have dll-interface to
+// be used by...
+#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+#pragma warning(push)
+#pragma warning(disable : 4251)
+#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+
+namespace Json {
+
+class Value;
+
+/**
+
+Usage:
+\code
+ using namespace Json;
+ void writeToStdout(StreamWriter::Factory const& factory, Value const& value) {
+ std::unique_ptr<StreamWriter> const writer(
+ factory.newStreamWriter());
+ writer->write(value, &std::cout);
+ std::cout << std::endl; // add lf and flush
+ }
+\endcode
+*/
+class JSON_API StreamWriter {
+protected:
+ JSONCPP_OSTREAM* sout_; // not owned; will not delete
+public:
+ StreamWriter();
+ virtual ~StreamWriter();
+ /** Write Value into document as configured in sub-class.
+ Do not take ownership of sout, but maintain a reference during function.
+ \pre sout != NULL
+ \return zero on success (For now, we always return zero, so check the stream instead.)
+ \throw std::exception possibly, depending on configuration
+ */
+ virtual int write(Value const& root, JSONCPP_OSTREAM* sout) = 0;
+
+ /** \brief A simple abstract factory.
+ */
+ class JSON_API Factory {
+ public:
+ virtual ~Factory();
+ /** \brief Allocate a CharReader via operator new().
+ * \throw std::exception if something goes wrong (e.g. invalid settings)
+ */
+ virtual StreamWriter* newStreamWriter() const = 0;
+ }; // Factory
+}; // StreamWriter
+
+/** \brief Write into stringstream, then return string, for convenience.
+ * A StreamWriter will be created from the factory, used, and then deleted.
+ */
+JSONCPP_STRING JSON_API writeString(StreamWriter::Factory const& factory, Value const& root);
+
+
+/** \brief Build a StreamWriter implementation.
+
+Usage:
+\code
+ using namespace Json;
+ Value value = ...;
+ StreamWriterBuilder builder;
+ builder["commentStyle"] = "None";
+ builder["indentation"] = " "; // or whatever you like
+ std::unique_ptr<Json::StreamWriter> writer(
+ builder.newStreamWriter());
+ writer->write(value, &std::cout);
+ std::cout << std::endl; // add lf and flush
+\endcode
+*/
+class JSON_API StreamWriterBuilder : public StreamWriter::Factory {
+public:
+ // Note: We use a Json::Value so that we can add data-members to this class
+ // without a major version bump.
+ /** Configuration of this builder.
+ Available settings (case-sensitive):
+ - "commentStyle": "None" or "All"
+ - "indentation": "<anything>"
+ - "enableYAMLCompatibility": false or true
+ - slightly change the whitespace around colons
+ - "dropNullPlaceholders": false or true
+ - Drop the "null" string from the writer's output for nullValues.
+ Strictly speaking, this is not valid JSON. But when the output is being
+ fed to a browser's Javascript, it makes for smaller output and the
+ browser can handle the output just fine.
+ - "useSpecialFloats": false or true
+ - If true, outputs non-finite floating point values in the following way:
+ NaN values as "NaN", positive infinity as "Infinity", and negative infinity
+ as "-Infinity".
+
+ You can examine 'settings_` yourself
+ to see the defaults. You can also write and read them just like any
+ JSON Value.
+ \sa setDefaults()
+ */
+ Json::Value settings_;
+
+ StreamWriterBuilder();
+ ~StreamWriterBuilder() JSONCPP_OVERRIDE;
+
+ /**
+ * \throw std::exception if something goes wrong (e.g. invalid settings)
+ */
+ StreamWriter* newStreamWriter() const JSONCPP_OVERRIDE;
+
+ /** \return true if 'settings' are legal and consistent;
+ * otherwise, indicate bad settings via 'invalid'.
+ */
+ bool validate(Json::Value* invalid) const;
+ /** A simple way to update a specific setting.
+ */
+ Value& operator[](JSONCPP_STRING key);
+
+ /** Called by ctor, but you can use this to reset settings_.
+ * \pre 'settings' != NULL (but Json::null is fine)
+ * \remark Defaults:
+ * \snippet src/lib_json/json_writer.cpp StreamWriterBuilderDefaults
+ */
+ static void setDefaults(Json::Value* settings);
+};
+
+/** \brief Abstract class for writers.
+ * \deprecated Use StreamWriter. (And really, this is an implementation detail.)
+ */
+class JSON_API Writer {
+public:
+ virtual ~Writer();
+
+ virtual JSONCPP_STRING write(const Value& root) = 0;
+};
+
+/** \brief Outputs a Value in <a HREF="http://www.json.org">JSON</a> format
+ *without formatting (not human friendly).
+ *
+ * The JSON document is written in a single line. It is not intended for 'human'
+ *consumption,
+ * but may be usefull to support feature such as RPC where bandwith is limited.
+ * \sa Reader, Value
+ * \deprecated Use StreamWriterBuilder.
+ */
+class JSON_API FastWriter : public Writer {
+
+public:
+ FastWriter();
+ ~FastWriter() JSONCPP_OVERRIDE {}
+
+ void enableYAMLCompatibility();
+
+ /** \brief Drop the "null" string from the writer's output for nullValues.
+ * Strictly speaking, this is not valid JSON. But when the output is being
+ * fed to a browser's Javascript, it makes for smaller output and the
+ * browser can handle the output just fine.
+ */
+ void dropNullPlaceholders();
+
+ void omitEndingLineFeed();
+
+public: // overridden from Writer
+ JSONCPP_STRING write(const Value& root) JSONCPP_OVERRIDE;
+
+private:
+ void writeValue(const Value& value);
+
+ JSONCPP_STRING document_;
+ bool yamlCompatiblityEnabled_;
+ bool dropNullPlaceholders_;
+ bool omitEndingLineFeed_;
+};
+
+/** \brief Writes a Value in <a HREF="http://www.json.org">JSON</a> format in a
+ *human friendly way.
+ *
+ * The rules for line break and indent are as follow:
+ * - Object value:
+ * - if empty then print {} without indent and line break
+ * - if not empty the print '{', line break & indent, print one value per
+ *line
+ * and then unindent and line break and print '}'.
+ * - Array value:
+ * - if empty then print [] without indent and line break
+ * - if the array contains no object value, empty array or some other value
+ *types,
+ * and all the values fit on one lines, then print the array on a single
+ *line.
+ * - otherwise, it the values do not fit on one line, or the array contains
+ * object or non empty array, then print one value per line.
+ *
+ * If the Value have comments then they are outputed according to their
+ *#CommentPlacement.
+ *
+ * \sa Reader, Value, Value::setComment()
+ * \deprecated Use StreamWriterBuilder.
+ */
+class JSON_API StyledWriter : public Writer {
+public:
+ StyledWriter();
+ ~StyledWriter() JSONCPP_OVERRIDE {}
+
+public: // overridden from Writer
+ /** \brief Serialize a Value in <a HREF="http://www.json.org">JSON</a> format.
+ * \param root Value to serialize.
+ * \return String containing the JSON document that represents the root value.
+ */
+ JSONCPP_STRING write(const Value& root) JSONCPP_OVERRIDE;
+
+private:
+ void writeValue(const Value& value);
+ void writeArrayValue(const Value& value);
+ bool isMultineArray(const Value& value);
+ void pushValue(const JSONCPP_STRING& value);
+ void writeIndent();
+ void writeWithIndent(const JSONCPP_STRING& value);
+ void indent();
+ void unindent();
+ void writeCommentBeforeValue(const Value& root);
+ void writeCommentAfterValueOnSameLine(const Value& root);
+ bool hasCommentForValue(const Value& value);
+ static JSONCPP_STRING normalizeEOL(const JSONCPP_STRING& text);
+
+ typedef std::vector<JSONCPP_STRING> ChildValues;
+
+ ChildValues childValues_;
+ JSONCPP_STRING document_;
+ JSONCPP_STRING indentString_;
+ unsigned int rightMargin_;
+ unsigned int indentSize_;
+ bool addChildValues_;
+};
+
+/** \brief Writes a Value in <a HREF="http://www.json.org">JSON</a> format in a
+ human friendly way,
+ to a stream rather than to a string.
+ *
+ * The rules for line break and indent are as follow:
+ * - Object value:
+ * - if empty then print {} without indent and line break
+ * - if not empty the print '{', line break & indent, print one value per
+ line
+ * and then unindent and line break and print '}'.
+ * - Array value:
+ * - if empty then print [] without indent and line break
+ * - if the array contains no object value, empty array or some other value
+ types,
+ * and all the values fit on one lines, then print the array on a single
+ line.
+ * - otherwise, it the values do not fit on one line, or the array contains
+ * object or non empty array, then print one value per line.
+ *
+ * If the Value have comments then they are outputed according to their
+ #CommentPlacement.
+ *
+ * \param indentation Each level will be indented by this amount extra.
+ * \sa Reader, Value, Value::setComment()
+ * \deprecated Use StreamWriterBuilder.
+ */
+class JSON_API StyledStreamWriter {
+public:
+ StyledStreamWriter(JSONCPP_STRING indentation = "\t");
+ ~StyledStreamWriter() {}
+
+public:
+ /** \brief Serialize a Value in <a HREF="http://www.json.org">JSON</a> format.
+ * \param out Stream to write to. (Can be ostringstream, e.g.)
+ * \param root Value to serialize.
+ * \note There is no point in deriving from Writer, since write() should not
+ * return a value.
+ */
+ void write(JSONCPP_OSTREAM& out, const Value& root);
+
+private:
+ void writeValue(const Value& value);
+ void writeArrayValue(const Value& value);
+ bool isMultineArray(const Value& value);
+ void pushValue(const JSONCPP_STRING& value);
+ void writeIndent();
+ void writeWithIndent(const JSONCPP_STRING& value);
+ void indent();
+ void unindent();
+ void writeCommentBeforeValue(const Value& root);
+ void writeCommentAfterValueOnSameLine(const Value& root);
+ bool hasCommentForValue(const Value& value);
+ static JSONCPP_STRING normalizeEOL(const JSONCPP_STRING& text);
+
+ typedef std::vector<JSONCPP_STRING> ChildValues;
+
+ ChildValues childValues_;
+ JSONCPP_OSTREAM* document_;
+ JSONCPP_STRING indentString_;
+ unsigned int rightMargin_;
+ JSONCPP_STRING indentation_;
+ bool addChildValues_ : 1;
+ bool indented_ : 1;
+};
+
+#if defined(JSON_HAS_INT64)
+JSONCPP_STRING JSON_API valueToString(Int value);
+JSONCPP_STRING JSON_API valueToString(UInt value);
+#endif // if defined(JSON_HAS_INT64)
+JSONCPP_STRING JSON_API valueToString(LargestInt value);
+JSONCPP_STRING JSON_API valueToString(LargestUInt value);
+JSONCPP_STRING JSON_API valueToString(double value);
+JSONCPP_STRING JSON_API valueToString(bool value);
+JSONCPP_STRING JSON_API valueToQuotedString(const char* value);
+
+/// \brief Output using the StyledStreamWriter.
+/// \see Json::operator>>()
+JSON_API JSONCPP_OSTREAM& operator<<(JSONCPP_OSTREAM&, const Value& root);
+
+} // namespace Json
+
+#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+#pragma warning(pop)
+#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+
+#endif // JSON_WRITER_H_INCLUDED
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: include/json/writer.h
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: include/json/assertions.h
+// //////////////////////////////////////////////////////////////////////
+
+// Copyright 2007-2010 Baptiste Lepilleur
+// Distributed under MIT license, or public domain if desired and
+// recognized in your jurisdiction.
+// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+#ifndef CPPTL_JSON_ASSERTIONS_H_INCLUDED
+#define CPPTL_JSON_ASSERTIONS_H_INCLUDED
+
+#include <stdlib.h>
+#include <sstream>
+
+#if !defined(JSON_IS_AMALGAMATION)
+#include "config.h"
+#endif // if !defined(JSON_IS_AMALGAMATION)
+
+/** It should not be possible for a maliciously designed file to
+ * cause an abort() or seg-fault, so these macros are used only
+ * for pre-condition violations and internal logic errors.
+ */
+#if JSON_USE_EXCEPTION
+
+// @todo <= add detail about condition in exception
+# define JSON_ASSERT(condition) \
+ {if (!(condition)) {Json::throwLogicError( "assert json failed" );}}
+
+# define JSON_FAIL_MESSAGE(message) \
+ { \
+ JSONCPP_OSTRINGSTREAM oss; oss << message; \
+ Json::throwLogicError(oss.str()); \
+ abort(); \
+ }
+
+#else // JSON_USE_EXCEPTION
+
+# define JSON_ASSERT(condition) assert(condition)
+
+// The call to assert() will show the failure message in debug builds. In
+// release builds we abort, for a core-dump or debugger.
+# define JSON_FAIL_MESSAGE(message) \
+ { \
+ JSONCPP_OSTRINGSTREAM oss; oss << message; \
+ assert(false && oss.str().c_str()); \
+ abort(); \
+ }
+
+
+#endif
+
+#define JSON_ASSERT_MESSAGE(condition, message) \
+ if (!(condition)) { \
+ JSON_FAIL_MESSAGE(message); \
+ }
+
+#endif // CPPTL_JSON_ASSERTIONS_H_INCLUDED
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: include/json/assertions.h
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+#endif //ifndef JSON_AMALGATED_H_INCLUDED
diff --git a/missing b/missing
new file mode 100755
index 0000000..28055d2
--- /dev/null
+++ b/missing
@@ -0,0 +1,376 @@
+#! /bin/sh
+# Common stub for a few missing GNU programs while installing.
+
+scriptversion=2009-04-28.21; # UTC
+
+# Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003, 2004, 2005, 2006,
+# 2008, 2009 Free Software Foundation, Inc.
+# Originally by Fran,cois Pinard <pinard at iro.umontreal.ca>, 1996.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+if test $# -eq 0; then
+ echo 1>&2 "Try \`$0 --help' for more information"
+ exit 1
+fi
+
+run=:
+sed_output='s/.* --output[ =]\([^ ]*\).*/\1/p'
+sed_minuso='s/.* -o \([^ ]*\).*/\1/p'
+
+# In the cases where this matters, `missing' is being run in the
+# srcdir already.
+if test -f configure.ac; then
+ configure_ac=configure.ac
+else
+ configure_ac=configure.in
+fi
+
+msg="missing on your system"
+
+case $1 in
+--run)
+ # Try to run requested program, and just exit if it succeeds.
+ run=
+ shift
+ "$@" && exit 0
+ # Exit code 63 means version mismatch. This often happens
+ # when the user try to use an ancient version of a tool on
+ # a file that requires a minimum version. In this case we
+ # we should proceed has if the program had been absent, or
+ # if --run hadn't been passed.
+ if test $? = 63; then
+ run=:
+ msg="probably too old"
+ fi
+ ;;
+
+ -h|--h|--he|--hel|--help)
+ echo "\
+$0 [OPTION]... PROGRAM [ARGUMENT]...
+
+Handle \`PROGRAM [ARGUMENT]...' for when PROGRAM is missing, or return an
+error status if there is no known handling for PROGRAM.
+
+Options:
+ -h, --help display this help and exit
+ -v, --version output version information and exit
+ --run try to run the given command, and emulate it if it fails
+
+Supported PROGRAM values:
+ aclocal touch file \`aclocal.m4'
+ autoconf touch file \`configure'
+ autoheader touch file \`config.h.in'
+ autom4te touch the output file, or create a stub one
+ automake touch all \`Makefile.in' files
+ bison create \`y.tab.[ch]', if possible, from existing .[ch]
+ flex create \`lex.yy.c', if possible, from existing .c
+ help2man touch the output file
+ lex create \`lex.yy.c', if possible, from existing .c
+ makeinfo touch the output file
+ tar try tar, gnutar, gtar, then tar without non-portable flags
+ yacc create \`y.tab.[ch]', if possible, from existing .[ch]
+
+Version suffixes to PROGRAM as well as the prefixes \`gnu-', \`gnu', and
+\`g' are ignored when checking the name.
+
+Send bug reports to <bug-automake at gnu.org>."
+ exit $?
+ ;;
+
+ -v|--v|--ve|--ver|--vers|--versi|--versio|--version)
+ echo "missing $scriptversion (GNU Automake)"
+ exit $?
+ ;;
+
+ -*)
+ echo 1>&2 "$0: Unknown \`$1' option"
+ echo 1>&2 "Try \`$0 --help' for more information"
+ exit 1
+ ;;
+
+esac
+
+# normalize program name to check for.
+program=`echo "$1" | sed '
+ s/^gnu-//; t
+ s/^gnu//; t
+ s/^g//; t'`
+
+# Now exit if we have it, but it failed. Also exit now if we
+# don't have it and --version was passed (most likely to detect
+# the program). This is about non-GNU programs, so use $1 not
+# $program.
+case $1 in
+ lex*|yacc*)
+ # Not GNU programs, they don't have --version.
+ ;;
+
+ tar*)
+ if test -n "$run"; then
+ echo 1>&2 "ERROR: \`tar' requires --run"
+ exit 1
+ elif test "x$2" = "x--version" || test "x$2" = "x--help"; then
+ exit 1
+ fi
+ ;;
+
+ *)
+ if test -z "$run" && ($1 --version) > /dev/null 2>&1; then
+ # We have it, but it failed.
+ exit 1
+ elif test "x$2" = "x--version" || test "x$2" = "x--help"; then
+ # Could not run --version or --help. This is probably someone
+ # running `$TOOL --version' or `$TOOL --help' to check whether
+ # $TOOL exists and not knowing $TOOL uses missing.
+ exit 1
+ fi
+ ;;
+esac
+
+# If it does not exist, or fails to run (possibly an outdated version),
+# try to emulate it.
+case $program in
+ aclocal*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified \`acinclude.m4' or \`${configure_ac}'. You might want
+ to install the \`Automake' and \`Perl' packages. Grab them from
+ any GNU archive site."
+ touch aclocal.m4
+ ;;
+
+ autoconf*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified \`${configure_ac}'. You might want to install the
+ \`Autoconf' and \`GNU m4' packages. Grab them from any GNU
+ archive site."
+ touch configure
+ ;;
+
+ autoheader*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified \`acconfig.h' or \`${configure_ac}'. You might want
+ to install the \`Autoconf' and \`GNU m4' packages. Grab them
+ from any GNU archive site."
+ files=`sed -n 's/^[ ]*A[CM]_CONFIG_HEADER(\([^)]*\)).*/\1/p' ${configure_ac}`
+ test -z "$files" && files="config.h"
+ touch_files=
+ for f in $files; do
+ case $f in
+ *:*) touch_files="$touch_files "`echo "$f" |
+ sed -e 's/^[^:]*://' -e 's/:.*//'`;;
+ *) touch_files="$touch_files $f.in";;
+ esac
+ done
+ touch $touch_files
+ ;;
+
+ automake*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified \`Makefile.am', \`acinclude.m4' or \`${configure_ac}'.
+ You might want to install the \`Automake' and \`Perl' packages.
+ Grab them from any GNU archive site."
+ find . -type f -name Makefile.am -print |
+ sed 's/\.am$/.in/' |
+ while read f; do touch "$f"; done
+ ;;
+
+ autom4te*)
+ echo 1>&2 "\
+WARNING: \`$1' is needed, but is $msg.
+ You might have modified some files without having the
+ proper tools for further handling them.
+ You can get \`$1' as part of \`Autoconf' from any GNU
+ archive site."
+
+ file=`echo "$*" | sed -n "$sed_output"`
+ test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
+ if test -f "$file"; then
+ touch $file
+ else
+ test -z "$file" || exec >$file
+ echo "#! /bin/sh"
+ echo "# Created by GNU Automake missing as a replacement of"
+ echo "# $ $@"
+ echo "exit 0"
+ chmod +x $file
+ exit 1
+ fi
+ ;;
+
+ bison*|yacc*)
+ echo 1>&2 "\
+WARNING: \`$1' $msg. You should only need it if
+ you modified a \`.y' file. You may need the \`Bison' package
+ in order for those modifications to take effect. You can get
+ \`Bison' from any GNU archive site."
+ rm -f y.tab.c y.tab.h
+ if test $# -ne 1; then
+ eval LASTARG="\${$#}"
+ case $LASTARG in
+ *.y)
+ SRCFILE=`echo "$LASTARG" | sed 's/y$/c/'`
+ if test -f "$SRCFILE"; then
+ cp "$SRCFILE" y.tab.c
+ fi
+ SRCFILE=`echo "$LASTARG" | sed 's/y$/h/'`
+ if test -f "$SRCFILE"; then
+ cp "$SRCFILE" y.tab.h
+ fi
+ ;;
+ esac
+ fi
+ if test ! -f y.tab.h; then
+ echo >y.tab.h
+ fi
+ if test ! -f y.tab.c; then
+ echo 'main() { return 0; }' >y.tab.c
+ fi
+ ;;
+
+ lex*|flex*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified a \`.l' file. You may need the \`Flex' package
+ in order for those modifications to take effect. You can get
+ \`Flex' from any GNU archive site."
+ rm -f lex.yy.c
+ if test $# -ne 1; then
+ eval LASTARG="\${$#}"
+ case $LASTARG in
+ *.l)
+ SRCFILE=`echo "$LASTARG" | sed 's/l$/c/'`
+ if test -f "$SRCFILE"; then
+ cp "$SRCFILE" lex.yy.c
+ fi
+ ;;
+ esac
+ fi
+ if test ! -f lex.yy.c; then
+ echo 'main() { return 0; }' >lex.yy.c
+ fi
+ ;;
+
+ help2man*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified a dependency of a manual page. You may need the
+ \`Help2man' package in order for those modifications to take
+ effect. You can get \`Help2man' from any GNU archive site."
+
+ file=`echo "$*" | sed -n "$sed_output"`
+ test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
+ if test -f "$file"; then
+ touch $file
+ else
+ test -z "$file" || exec >$file
+ echo ".ab help2man is required to generate this page"
+ exit $?
+ fi
+ ;;
+
+ makeinfo*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified a \`.texi' or \`.texinfo' file, or any other file
+ indirectly affecting the aspect of the manual. The spurious
+ call might also be the consequence of using a buggy \`make' (AIX,
+ DU, IRIX). You might want to install the \`Texinfo' package or
+ the \`GNU make' package. Grab either from any GNU archive site."
+ # The file to touch is that specified with -o ...
+ file=`echo "$*" | sed -n "$sed_output"`
+ test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
+ if test -z "$file"; then
+ # ... or it is the one specified with @setfilename ...
+ infile=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'`
+ file=`sed -n '
+ /^@setfilename/{
+ s/.* \([^ ]*\) *$/\1/
+ p
+ q
+ }' $infile`
+ # ... or it is derived from the source name (dir/f.texi becomes f.info)
+ test -z "$file" && file=`echo "$infile" | sed 's,.*/,,;s,.[^.]*$,,'`.info
+ fi
+ # If the file does not exist, the user really needs makeinfo;
+ # let's fail without touching anything.
+ test -f $file || exit 1
+ touch $file
+ ;;
+
+ tar*)
+ shift
+
+ # We have already tried tar in the generic part.
+ # Look for gnutar/gtar before invocation to avoid ugly error
+ # messages.
+ if (gnutar --version > /dev/null 2>&1); then
+ gnutar "$@" && exit 0
+ fi
+ if (gtar --version > /dev/null 2>&1); then
+ gtar "$@" && exit 0
+ fi
+ firstarg="$1"
+ if shift; then
+ case $firstarg in
+ *o*)
+ firstarg=`echo "$firstarg" | sed s/o//`
+ tar "$firstarg" "$@" && exit 0
+ ;;
+ esac
+ case $firstarg in
+ *h*)
+ firstarg=`echo "$firstarg" | sed s/h//`
+ tar "$firstarg" "$@" && exit 0
+ ;;
+ esac
+ fi
+
+ echo 1>&2 "\
+WARNING: I can't seem to be able to run \`tar' with the given arguments.
+ You may want to install GNU tar or Free paxutils, or check the
+ command line arguments."
+ exit 1
+ ;;
+
+ *)
+ echo 1>&2 "\
+WARNING: \`$1' is needed, and is $msg.
+ You might have modified some files without having the
+ proper tools for further handling them. Check the \`README' file,
+ it often tells you about the needed prerequisites for installing
+ this package. You may also peek at any GNU archive site, in case
+ some other package would contain this missing \`$1' program."
+ exit 1
+ ;;
+esac
+
+exit 0
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/seq_test/Makefile.am b/seq_test/Makefile.am
new file mode 100644
index 0000000..e064050
--- /dev/null
+++ b/seq_test/Makefile.am
@@ -0,0 +1,23 @@
+bin_PROGRAMS = seq_test
+
+seq_test_CPPFLAGS = \
+ -I../src \
+ -I../htslib \
+ -I.. \
+ -I../fermi-lite --coverage
+
+seq_test_LDADD = \
+ ../fermi-lite/libfml.a \
+ ../bwa/libbwa.a \
+ ../htslib/libhts.a \
+ -lboost_unit_test_framework -lboost_system -lboost_timer -lboost_chrono
+
+seq_test_LDFLAGS = --coverage
+
+seq_test_SOURCES = seq_test.cpp \
+ ../src/BFC.cpp ../src/GenomicRegion.cpp \
+ ../src/BamWriter.cpp ../src/BamReader.cpp \
+ ../src/ReadFilter.cpp ../src/BamRecord.cpp \
+ ../src/BWAWrapper.cpp \
+ ../src/RefGenome.cpp ../src/SeqPlot.cpp ../src/BamHeader.cpp \
+ ../src/FermiAssembler.cpp ../src/ssw_cpp.cpp ../src/ssw.c ../src/jsoncpp.cpp
diff --git a/seq_test/Makefile.in b/seq_test/Makefile.in
new file mode 100644
index 0000000..4ee27d0
--- /dev/null
+++ b/seq_test/Makefile.in
@@ -0,0 +1,889 @@
+# Makefile.in generated by automake 1.11.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+ at SET_MAKE@
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+bin_PROGRAMS = seq_test$(EXEEXT)
+subdir = .
+DIST_COMMON = $(am__configure_deps) $(srcdir)/../depcomp \
+ $(srcdir)/../install-sh $(srcdir)/../missing \
+ $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
+ $(srcdir)/config.h.in $(top_srcdir)/configure
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \
+ configure.lineno config.status.lineno
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__installdirs = "$(DESTDIR)$(bindir)"
+PROGRAMS = $(bin_PROGRAMS)
+am_seq_test_OBJECTS = seq_test-seq_test.$(OBJEXT) \
+ seq_test-BFC.$(OBJEXT) seq_test-GenomicRegion.$(OBJEXT) \
+ seq_test-BamWriter.$(OBJEXT) seq_test-BamReader.$(OBJEXT) \
+ seq_test-ReadFilter.$(OBJEXT) seq_test-BamRecord.$(OBJEXT) \
+ seq_test-BWAWrapper.$(OBJEXT) seq_test-RefGenome.$(OBJEXT) \
+ seq_test-SeqPlot.$(OBJEXT) seq_test-BamHeader.$(OBJEXT) \
+ seq_test-FermiAssembler.$(OBJEXT) seq_test-ssw_cpp.$(OBJEXT) \
+ seq_test-ssw.$(OBJEXT) seq_test-jsoncpp.$(OBJEXT)
+seq_test_OBJECTS = $(am_seq_test_OBJECTS)
+seq_test_DEPENDENCIES = ../fermi-lite/libfml.a ../bwa/libbwa.a \
+ ../htslib/libhts.a
+seq_test_LINK = $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) \
+ $(seq_test_LDFLAGS) $(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I. at am__isrc@
+depcomp = $(SHELL) $(top_srcdir)/../depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
+CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+CXXLD = $(CXX)
+CXXLINK = $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) \
+ -o $@
+SOURCES = $(seq_test_SOURCES)
+DIST_SOURCES = $(seq_test_SOURCES)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+distdir = $(PACKAGE)-$(VERSION)
+top_distdir = $(distdir)
+am__remove_distdir = \
+ { test ! -d "$(distdir)" \
+ || { find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \
+ && rm -fr "$(distdir)"; }; }
+DIST_ARCHIVES = $(distdir).tar.gz
+GZIP_ENV = --best
+distuninstallcheck_listfiles = find . -type f -print
+distcleancheck_listfiles = find . -type f -print
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_CXXFLAGS = @AM_CXXFLAGS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+boost_lib = @boost_lib@
+build_alias = @build_alias@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host_alias = @host_alias@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+seq_test_CPPFLAGS = \
+ -I../src \
+ -I../htslib \
+ -I.. \
+ -I../fermi-lite --coverage
+
+seq_test_LDADD = \
+ ../fermi-lite/libfml.a \
+ ../bwa/libbwa.a \
+ ../htslib/libhts.a \
+ -lboost_unit_test_framework -lboost_system -lboost_timer -lboost_chrono
+
+seq_test_LDFLAGS = --coverage
+seq_test_SOURCES = seq_test.cpp \
+ ../src/BFC.cpp ../src/GenomicRegion.cpp \
+ ../src/BamWriter.cpp ../src/BamReader.cpp \
+ ../src/ReadFilter.cpp ../src/BamRecord.cpp \
+ ../src/BWAWrapper.cpp \
+ ../src/RefGenome.cpp ../src/SeqPlot.cpp ../src/BamHeader.cpp \
+ ../src/FermiAssembler.cpp ../src/ssw_cpp.cpp ../src/ssw.c ../src/jsoncpp.cpp
+
+all: config.h
+ $(MAKE) $(AM_MAKEFLAGS) all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .cpp .o .obj
+am--refresh:
+ @:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \
+ $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \
+ && exit 0; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ echo ' $(SHELL) ./config.status'; \
+ $(SHELL) ./config.status;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ $(SHELL) ./config.status --recheck
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ $(am__cd) $(srcdir) && $(AUTOCONF)
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS)
+$(am__aclocal_m4_deps):
+
+config.h: stamp-h1
+ @if test ! -f $@; then \
+ rm -f stamp-h1; \
+ $(MAKE) $(AM_MAKEFLAGS) stamp-h1; \
+ else :; fi
+
+stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status
+ @rm -f stamp-h1
+ cd $(top_builddir) && $(SHELL) ./config.status config.h
+$(srcdir)/config.h.in: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ ($(am__cd) $(top_srcdir) && $(AUTOHEADER))
+ rm -f stamp-h1
+ touch $@
+
+distclean-hdr:
+ -rm -f config.h stamp-h1
+install-binPROGRAMS: $(bin_PROGRAMS)
+ @$(NORMAL_INSTALL)
+ test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)"
+ @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed 's/$(EXEEXT)$$//' | \
+ while read p p1; do if test -f $$p; \
+ then echo "$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \
+ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \
+ sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) files[d] = files[d] " " $$1; \
+ else { print "f", $$3 "/" $$4, $$1; } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \
+ $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-binPROGRAMS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \
+ -e 's/$$/$(EXEEXT)/' `; \
+ test -n "$$list" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(bindir)" && rm -f $$files
+
+clean-binPROGRAMS:
+ -test -z "$(bin_PROGRAMS)" || rm -f $(bin_PROGRAMS)
+seq_test$(EXEEXT): $(seq_test_OBJECTS) $(seq_test_DEPENDENCIES)
+ @rm -f seq_test$(EXEEXT)
+ $(seq_test_LINK) $(seq_test_OBJECTS) $(seq_test_LDADD) $(LIBS)
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/seq_test-BFC.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/seq_test-BWAWrapper.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/seq_test-BamHeader.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/seq_test-BamReader.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/seq_test-BamRecord.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/seq_test-BamWriter.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/seq_test-FermiAssembler.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/seq_test-GenomicRegion.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/seq_test-ReadFilter.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/seq_test-RefGenome.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/seq_test-SeqPlot.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/seq_test-jsoncpp.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/seq_test-seq_test.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/seq_test-ssw.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/seq_test-ssw_cpp.Po at am__quote@
+
+.c.o:
+ at am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+ at am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@ $(COMPILE) -c $<
+
+.c.obj:
+ at am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+ at am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+seq_test-ssw.o: ../src/ssw.c
+ at am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT seq_test-ssw.o -MD -MP -MF $(DEPDIR)/seq_test-ssw.Tpo -c -o seq_test-ssw.o `test -f '../src/ssw.c' || echo '$(srcdir)/'`../src/ssw.c
+ at am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/seq_test-ssw.Tpo $(DEPDIR)/seq_test-ssw.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ source='../src/ssw.c' object='seq_test-ssw.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o seq_test-ssw.o `test -f '../src/ssw.c' || echo '$(srcdir)/'`../src/ssw.c
+
+seq_test-ssw.obj: ../src/ssw.c
+ at am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT seq_test-ssw.obj -MD -MP -MF $(DEPDIR)/seq_test-ssw.Tpo -c -o seq_test-ssw.obj `if test -f '../src/ssw.c'; then $(CYGPATH_W) '../src/ssw.c'; else $(CYGPATH_W) '$(srcdir)/../src/ssw.c'; fi`
+ at am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/seq_test-ssw.Tpo $(DEPDIR)/seq_test-ssw.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ source='../src/ssw.c' object='seq_test-ssw.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o seq_test-ssw.obj `if test -f '../src/ssw.c'; then $(CYGPATH_W) '../src/ssw.c'; else $(CYGPATH_W) '$(srcdir)/../src/ssw.c'; fi`
+
+.cpp.o:
+ at am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $<
+
+.cpp.obj:
+ at am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+seq_test-seq_test.o: seq_test.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-seq_test.o -MD -MP -MF $(DEPDIR)/seq_test-seq_test.Tpo -c -o seq_test-seq_test.o `test -f 'seq_test.cpp' || echo '$(srcdir)/'`seq_test.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-seq_test.Tpo $(DEPDIR)/seq_test-seq_test.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='seq_test.cpp' object='seq_test-seq_test.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-seq_test.o `test -f 'seq_test.cpp' || echo '$(srcdir)/'`seq_test.cpp
+
+seq_test-seq_test.obj: seq_test.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-seq_test.obj -MD -MP -MF $(DEPDIR)/seq_test-seq_test.Tpo -c -o seq_test-seq_test.obj `if test -f 'seq_test.cpp'; then $(CYGPATH_W) 'seq_test.cpp'; else $(CYGPATH_W) '$(srcdir)/seq_test.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-seq_test.Tpo $(DEPDIR)/seq_test-seq_test.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='seq_test.cpp' object='seq_test-seq_test.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-seq_test.obj `if test -f 'seq_test.cpp'; then $(CYGPATH_W) 'seq_test.cpp'; else $(CYGPATH_W) '$(srcdir)/seq_test.cpp'; fi`
+
+seq_test-BFC.o: ../src/BFC.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-BFC.o -MD -MP -MF $(DEPDIR)/seq_test-BFC.Tpo -c -o seq_test-BFC.o `test -f '../src/BFC.cpp' || echo '$(srcdir)/'`../src/BFC.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-BFC.Tpo $(DEPDIR)/seq_test-BFC.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/BFC.cpp' object='seq_test-BFC.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-BFC.o `test -f '../src/BFC.cpp' || echo '$(srcdir)/'`../src/BFC.cpp
+
+seq_test-BFC.obj: ../src/BFC.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-BFC.obj -MD -MP -MF $(DEPDIR)/seq_test-BFC.Tpo -c -o seq_test-BFC.obj `if test -f '../src/BFC.cpp'; then $(CYGPATH_W) '../src/BFC.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/BFC.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-BFC.Tpo $(DEPDIR)/seq_test-BFC.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/BFC.cpp' object='seq_test-BFC.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-BFC.obj `if test -f '../src/BFC.cpp'; then $(CYGPATH_W) '../src/BFC.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/BFC.cpp'; fi`
+
+seq_test-GenomicRegion.o: ../src/GenomicRegion.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-GenomicRegion.o -MD -MP -MF $(DEPDIR)/seq_test-GenomicRegion.Tpo -c -o seq_test-GenomicRegion.o `test -f '../src/GenomicRegion.cpp' || echo '$(srcdir)/'`../src/GenomicRegion.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-GenomicRegion.Tpo $(DEPDIR)/seq_test-GenomicRegion.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/GenomicRegion.cpp' object='seq_test-GenomicRegion.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-GenomicRegion.o `test -f '../src/GenomicRegion.cpp' || echo '$(srcdir)/'`../src/GenomicRegion.cpp
+
+seq_test-GenomicRegion.obj: ../src/GenomicRegion.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-GenomicRegion.obj -MD -MP -MF $(DEPDIR)/seq_test-GenomicRegion.Tpo -c -o seq_test-GenomicRegion.obj `if test -f '../src/GenomicRegion.cpp'; then $(CYGPATH_W) '../src/GenomicRegion.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/GenomicRegion.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-GenomicRegion.Tpo $(DEPDIR)/seq_test-GenomicRegion.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/GenomicRegion.cpp' object='seq_test-GenomicRegion.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-GenomicRegion.obj `if test -f '../src/GenomicRegion.cpp'; then $(CYGPATH_W) '../src/GenomicRegion.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/GenomicRegion.cpp'; fi`
+
+seq_test-BamWriter.o: ../src/BamWriter.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-BamWriter.o -MD -MP -MF $(DEPDIR)/seq_test-BamWriter.Tpo -c -o seq_test-BamWriter.o `test -f '../src/BamWriter.cpp' || echo '$(srcdir)/'`../src/BamWriter.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-BamWriter.Tpo $(DEPDIR)/seq_test-BamWriter.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/BamWriter.cpp' object='seq_test-BamWriter.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-BamWriter.o `test -f '../src/BamWriter.cpp' || echo '$(srcdir)/'`../src/BamWriter.cpp
+
+seq_test-BamWriter.obj: ../src/BamWriter.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-BamWriter.obj -MD -MP -MF $(DEPDIR)/seq_test-BamWriter.Tpo -c -o seq_test-BamWriter.obj `if test -f '../src/BamWriter.cpp'; then $(CYGPATH_W) '../src/BamWriter.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/BamWriter.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-BamWriter.Tpo $(DEPDIR)/seq_test-BamWriter.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/BamWriter.cpp' object='seq_test-BamWriter.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-BamWriter.obj `if test -f '../src/BamWriter.cpp'; then $(CYGPATH_W) '../src/BamWriter.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/BamWriter.cpp'; fi`
+
+seq_test-BamReader.o: ../src/BamReader.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-BamReader.o -MD -MP -MF $(DEPDIR)/seq_test-BamReader.Tpo -c -o seq_test-BamReader.o `test -f '../src/BamReader.cpp' || echo '$(srcdir)/'`../src/BamReader.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-BamReader.Tpo $(DEPDIR)/seq_test-BamReader.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/BamReader.cpp' object='seq_test-BamReader.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-BamReader.o `test -f '../src/BamReader.cpp' || echo '$(srcdir)/'`../src/BamReader.cpp
+
+seq_test-BamReader.obj: ../src/BamReader.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-BamReader.obj -MD -MP -MF $(DEPDIR)/seq_test-BamReader.Tpo -c -o seq_test-BamReader.obj `if test -f '../src/BamReader.cpp'; then $(CYGPATH_W) '../src/BamReader.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/BamReader.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-BamReader.Tpo $(DEPDIR)/seq_test-BamReader.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/BamReader.cpp' object='seq_test-BamReader.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-BamReader.obj `if test -f '../src/BamReader.cpp'; then $(CYGPATH_W) '../src/BamReader.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/BamReader.cpp'; fi`
+
+seq_test-ReadFilter.o: ../src/ReadFilter.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-ReadFilter.o -MD -MP -MF $(DEPDIR)/seq_test-ReadFilter.Tpo -c -o seq_test-ReadFilter.o `test -f '../src/ReadFilter.cpp' || echo '$(srcdir)/'`../src/ReadFilter.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-ReadFilter.Tpo $(DEPDIR)/seq_test-ReadFilter.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/ReadFilter.cpp' object='seq_test-ReadFilter.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-ReadFilter.o `test -f '../src/ReadFilter.cpp' || echo '$(srcdir)/'`../src/ReadFilter.cpp
+
+seq_test-ReadFilter.obj: ../src/ReadFilter.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-ReadFilter.obj -MD -MP -MF $(DEPDIR)/seq_test-ReadFilter.Tpo -c -o seq_test-ReadFilter.obj `if test -f '../src/ReadFilter.cpp'; then $(CYGPATH_W) '../src/ReadFilter.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/ReadFilter.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-ReadFilter.Tpo $(DEPDIR)/seq_test-ReadFilter.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/ReadFilter.cpp' object='seq_test-ReadFilter.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-ReadFilter.obj `if test -f '../src/ReadFilter.cpp'; then $(CYGPATH_W) '../src/ReadFilter.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/ReadFilter.cpp'; fi`
+
+seq_test-BamRecord.o: ../src/BamRecord.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-BamRecord.o -MD -MP -MF $(DEPDIR)/seq_test-BamRecord.Tpo -c -o seq_test-BamRecord.o `test -f '../src/BamRecord.cpp' || echo '$(srcdir)/'`../src/BamRecord.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-BamRecord.Tpo $(DEPDIR)/seq_test-BamRecord.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/BamRecord.cpp' object='seq_test-BamRecord.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-BamRecord.o `test -f '../src/BamRecord.cpp' || echo '$(srcdir)/'`../src/BamRecord.cpp
+
+seq_test-BamRecord.obj: ../src/BamRecord.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-BamRecord.obj -MD -MP -MF $(DEPDIR)/seq_test-BamRecord.Tpo -c -o seq_test-BamRecord.obj `if test -f '../src/BamRecord.cpp'; then $(CYGPATH_W) '../src/BamRecord.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/BamRecord.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-BamRecord.Tpo $(DEPDIR)/seq_test-BamRecord.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/BamRecord.cpp' object='seq_test-BamRecord.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-BamRecord.obj `if test -f '../src/BamRecord.cpp'; then $(CYGPATH_W) '../src/BamRecord.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/BamRecord.cpp'; fi`
+
+seq_test-BWAWrapper.o: ../src/BWAWrapper.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-BWAWrapper.o -MD -MP -MF $(DEPDIR)/seq_test-BWAWrapper.Tpo -c -o seq_test-BWAWrapper.o `test -f '../src/BWAWrapper.cpp' || echo '$(srcdir)/'`../src/BWAWrapper.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-BWAWrapper.Tpo $(DEPDIR)/seq_test-BWAWrapper.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/BWAWrapper.cpp' object='seq_test-BWAWrapper.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-BWAWrapper.o `test -f '../src/BWAWrapper.cpp' || echo '$(srcdir)/'`../src/BWAWrapper.cpp
+
+seq_test-BWAWrapper.obj: ../src/BWAWrapper.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-BWAWrapper.obj -MD -MP -MF $(DEPDIR)/seq_test-BWAWrapper.Tpo -c -o seq_test-BWAWrapper.obj `if test -f '../src/BWAWrapper.cpp'; then $(CYGPATH_W) '../src/BWAWrapper.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/BWAWrapper.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-BWAWrapper.Tpo $(DEPDIR)/seq_test-BWAWrapper.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/BWAWrapper.cpp' object='seq_test-BWAWrapper.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-BWAWrapper.obj `if test -f '../src/BWAWrapper.cpp'; then $(CYGPATH_W) '../src/BWAWrapper.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/BWAWrapper.cpp'; fi`
+
+seq_test-RefGenome.o: ../src/RefGenome.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-RefGenome.o -MD -MP -MF $(DEPDIR)/seq_test-RefGenome.Tpo -c -o seq_test-RefGenome.o `test -f '../src/RefGenome.cpp' || echo '$(srcdir)/'`../src/RefGenome.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-RefGenome.Tpo $(DEPDIR)/seq_test-RefGenome.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/RefGenome.cpp' object='seq_test-RefGenome.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-RefGenome.o `test -f '../src/RefGenome.cpp' || echo '$(srcdir)/'`../src/RefGenome.cpp
+
+seq_test-RefGenome.obj: ../src/RefGenome.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-RefGenome.obj -MD -MP -MF $(DEPDIR)/seq_test-RefGenome.Tpo -c -o seq_test-RefGenome.obj `if test -f '../src/RefGenome.cpp'; then $(CYGPATH_W) '../src/RefGenome.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/RefGenome.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-RefGenome.Tpo $(DEPDIR)/seq_test-RefGenome.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/RefGenome.cpp' object='seq_test-RefGenome.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-RefGenome.obj `if test -f '../src/RefGenome.cpp'; then $(CYGPATH_W) '../src/RefGenome.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/RefGenome.cpp'; fi`
+
+seq_test-SeqPlot.o: ../src/SeqPlot.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-SeqPlot.o -MD -MP -MF $(DEPDIR)/seq_test-SeqPlot.Tpo -c -o seq_test-SeqPlot.o `test -f '../src/SeqPlot.cpp' || echo '$(srcdir)/'`../src/SeqPlot.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-SeqPlot.Tpo $(DEPDIR)/seq_test-SeqPlot.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/SeqPlot.cpp' object='seq_test-SeqPlot.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-SeqPlot.o `test -f '../src/SeqPlot.cpp' || echo '$(srcdir)/'`../src/SeqPlot.cpp
+
+seq_test-SeqPlot.obj: ../src/SeqPlot.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-SeqPlot.obj -MD -MP -MF $(DEPDIR)/seq_test-SeqPlot.Tpo -c -o seq_test-SeqPlot.obj `if test -f '../src/SeqPlot.cpp'; then $(CYGPATH_W) '../src/SeqPlot.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/SeqPlot.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-SeqPlot.Tpo $(DEPDIR)/seq_test-SeqPlot.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/SeqPlot.cpp' object='seq_test-SeqPlot.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-SeqPlot.obj `if test -f '../src/SeqPlot.cpp'; then $(CYGPATH_W) '../src/SeqPlot.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/SeqPlot.cpp'; fi`
+
+seq_test-BamHeader.o: ../src/BamHeader.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-BamHeader.o -MD -MP -MF $(DEPDIR)/seq_test-BamHeader.Tpo -c -o seq_test-BamHeader.o `test -f '../src/BamHeader.cpp' || echo '$(srcdir)/'`../src/BamHeader.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-BamHeader.Tpo $(DEPDIR)/seq_test-BamHeader.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/BamHeader.cpp' object='seq_test-BamHeader.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-BamHeader.o `test -f '../src/BamHeader.cpp' || echo '$(srcdir)/'`../src/BamHeader.cpp
+
+seq_test-BamHeader.obj: ../src/BamHeader.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-BamHeader.obj -MD -MP -MF $(DEPDIR)/seq_test-BamHeader.Tpo -c -o seq_test-BamHeader.obj `if test -f '../src/BamHeader.cpp'; then $(CYGPATH_W) '../src/BamHeader.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/BamHeader.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-BamHeader.Tpo $(DEPDIR)/seq_test-BamHeader.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/BamHeader.cpp' object='seq_test-BamHeader.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-BamHeader.obj `if test -f '../src/BamHeader.cpp'; then $(CYGPATH_W) '../src/BamHeader.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/BamHeader.cpp'; fi`
+
+seq_test-FermiAssembler.o: ../src/FermiAssembler.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-FermiAssembler.o -MD -MP -MF $(DEPDIR)/seq_test-FermiAssembler.Tpo -c -o seq_test-FermiAssembler.o `test -f '../src/FermiAssembler.cpp' || echo '$(srcdir)/'`../src/FermiAssembler.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-FermiAssembler.Tpo $(DEPDIR)/seq_test-FermiAssembler.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/FermiAssembler.cpp' object='seq_test-FermiAssembler.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-FermiAssembler.o `test -f '../src/FermiAssembler.cpp' || echo '$(srcdir)/'`../src/FermiAssembler.cpp
+
+seq_test-FermiAssembler.obj: ../src/FermiAssembler.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-FermiAssembler.obj -MD -MP -MF $(DEPDIR)/seq_test-FermiAssembler.Tpo -c -o seq_test-FermiAssembler.obj `if test -f '../src/FermiAssembler.cpp'; then $(CYGPATH_W) '../src/FermiAssembler.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/FermiAssembler.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-FermiAssembler.Tpo $(DEPDIR)/seq_test-FermiAssembler.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/FermiAssembler.cpp' object='seq_test-FermiAssembler.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-FermiAssembler.obj `if test -f '../src/FermiAssembler.cpp'; then $(CYGPATH_W) '../src/FermiAssembler.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/FermiAssembler.cpp'; fi`
+
+seq_test-ssw_cpp.o: ../src/ssw_cpp.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-ssw_cpp.o -MD -MP -MF $(DEPDIR)/seq_test-ssw_cpp.Tpo -c -o seq_test-ssw_cpp.o `test -f '../src/ssw_cpp.cpp' || echo '$(srcdir)/'`../src/ssw_cpp.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-ssw_cpp.Tpo $(DEPDIR)/seq_test-ssw_cpp.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/ssw_cpp.cpp' object='seq_test-ssw_cpp.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-ssw_cpp.o `test -f '../src/ssw_cpp.cpp' || echo '$(srcdir)/'`../src/ssw_cpp.cpp
+
+seq_test-ssw_cpp.obj: ../src/ssw_cpp.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-ssw_cpp.obj -MD -MP -MF $(DEPDIR)/seq_test-ssw_cpp.Tpo -c -o seq_test-ssw_cpp.obj `if test -f '../src/ssw_cpp.cpp'; then $(CYGPATH_W) '../src/ssw_cpp.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/ssw_cpp.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-ssw_cpp.Tpo $(DEPDIR)/seq_test-ssw_cpp.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/ssw_cpp.cpp' object='seq_test-ssw_cpp.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-ssw_cpp.obj `if test -f '../src/ssw_cpp.cpp'; then $(CYGPATH_W) '../src/ssw_cpp.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/ssw_cpp.cpp'; fi`
+
+seq_test-jsoncpp.o: ../src/jsoncpp.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-jsoncpp.o -MD -MP -MF $(DEPDIR)/seq_test-jsoncpp.Tpo -c -o seq_test-jsoncpp.o `test -f '../src/jsoncpp.cpp' || echo '$(srcdir)/'`../src/jsoncpp.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-jsoncpp.Tpo $(DEPDIR)/seq_test-jsoncpp.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/jsoncpp.cpp' object='seq_test-jsoncpp.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-jsoncpp.o `test -f '../src/jsoncpp.cpp' || echo '$(srcdir)/'`../src/jsoncpp.cpp
+
+seq_test-jsoncpp.obj: ../src/jsoncpp.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seq_test-jsoncpp.obj -MD -MP -MF $(DEPDIR)/seq_test-jsoncpp.Tpo -c -o seq_test-jsoncpp.obj `if test -f '../src/jsoncpp.cpp'; then $(CYGPATH_W) '../src/jsoncpp.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/jsoncpp.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/seq_test-jsoncpp.Tpo $(DEPDIR)/seq_test-jsoncpp.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='../src/jsoncpp.cpp' object='seq_test-jsoncpp.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seq_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seq_test-jsoncpp.obj `if test -f '../src/jsoncpp.cpp'; then $(CYGPATH_W) '../src/jsoncpp.cpp'; else $(CYGPATH_W) '$(srcdir)/../src/jsoncpp.cpp'; fi`
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ set x; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: CTAGS
+CTAGS: $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ $(am__remove_distdir)
+ test -d "$(distdir)" || mkdir "$(distdir)"
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ -test -n "$(am__skip_mode_fix)" \
+ || find "$(distdir)" -type d ! -perm -755 \
+ -exec chmod u+rwx,go+rx {} \; -o \
+ ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \
+ ! -type d ! -perm -400 -exec chmod a+r {} \; -o \
+ ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \
+ || chmod -R a+r "$(distdir)"
+dist-gzip: distdir
+ tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
+ $(am__remove_distdir)
+
+dist-bzip2: distdir
+ tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2
+ $(am__remove_distdir)
+
+dist-lzma: distdir
+ tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma
+ $(am__remove_distdir)
+
+dist-xz: distdir
+ tardir=$(distdir) && $(am__tar) | xz -c >$(distdir).tar.xz
+ $(am__remove_distdir)
+
+dist-tarZ: distdir
+ tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z
+ $(am__remove_distdir)
+
+dist-shar: distdir
+ shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz
+ $(am__remove_distdir)
+
+dist-zip: distdir
+ -rm -f $(distdir).zip
+ zip -rq $(distdir).zip $(distdir)
+ $(am__remove_distdir)
+
+dist dist-all: distdir
+ tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
+ $(am__remove_distdir)
+
+# This target untars the dist file and tries a VPATH configuration. Then
+# it guarantees that the distribution is self-contained by making another
+# tarfile.
+distcheck: dist
+ case '$(DIST_ARCHIVES)' in \
+ *.tar.gz*) \
+ GZIP=$(GZIP_ENV) gzip -dc $(distdir).tar.gz | $(am__untar) ;;\
+ *.tar.bz2*) \
+ bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\
+ *.tar.lzma*) \
+ lzma -dc $(distdir).tar.lzma | $(am__untar) ;;\
+ *.tar.xz*) \
+ xz -dc $(distdir).tar.xz | $(am__untar) ;;\
+ *.tar.Z*) \
+ uncompress -c $(distdir).tar.Z | $(am__untar) ;;\
+ *.shar.gz*) \
+ GZIP=$(GZIP_ENV) gzip -dc $(distdir).shar.gz | unshar ;;\
+ *.zip*) \
+ unzip $(distdir).zip ;;\
+ esac
+ chmod -R a-w $(distdir); chmod u+w $(distdir)
+ mkdir $(distdir)/_build
+ mkdir $(distdir)/_inst
+ chmod a-w $(distdir)
+ test -d $(distdir)/_build || exit 0; \
+ dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \
+ && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \
+ && am__cwd=`pwd` \
+ && $(am__cd) $(distdir)/_build \
+ && ../configure --srcdir=.. --prefix="$$dc_install_base" \
+ $(DISTCHECK_CONFIGURE_FLAGS) \
+ && $(MAKE) $(AM_MAKEFLAGS) \
+ && $(MAKE) $(AM_MAKEFLAGS) dvi \
+ && $(MAKE) $(AM_MAKEFLAGS) check \
+ && $(MAKE) $(AM_MAKEFLAGS) install \
+ && $(MAKE) $(AM_MAKEFLAGS) installcheck \
+ && $(MAKE) $(AM_MAKEFLAGS) uninstall \
+ && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \
+ distuninstallcheck \
+ && chmod -R a-w "$$dc_install_base" \
+ && ({ \
+ (cd ../.. && umask 077 && mkdir "$$dc_destdir") \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \
+ distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \
+ } || { rm -rf "$$dc_destdir"; exit 1; }) \
+ && rm -rf "$$dc_destdir" \
+ && $(MAKE) $(AM_MAKEFLAGS) dist \
+ && rm -rf $(DIST_ARCHIVES) \
+ && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \
+ && cd "$$am__cwd" \
+ || exit 1
+ $(am__remove_distdir)
+ @(echo "$(distdir) archives ready for distribution: "; \
+ list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \
+ sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x'
+distuninstallcheck:
+ @$(am__cd) '$(distuninstallcheck_dir)' \
+ && test `$(distuninstallcheck_listfiles) | wc -l` -le 1 \
+ || { echo "ERROR: files left after uninstall:" ; \
+ if test -n "$(DESTDIR)"; then \
+ echo " (check DESTDIR support)"; \
+ fi ; \
+ $(distuninstallcheck_listfiles) ; \
+ exit 1; } >&2
+distcleancheck: distclean
+ @if test '$(srcdir)' = . ; then \
+ echo "ERROR: distcleancheck can only run from a VPATH build" ; \
+ exit 1 ; \
+ fi
+ @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \
+ || { echo "ERROR: files left in build directory after distclean:" ; \
+ $(distcleancheck_listfiles) ; \
+ exit 1; } >&2
+check-am: all-am
+check: check-am
+all-am: Makefile $(PROGRAMS) config.h
+installdirs:
+ for dir in "$(DESTDIR)$(bindir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-binPROGRAMS clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f $(am__CONFIG_DISTCLEAN_FILES)
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-hdr distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-binPROGRAMS
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f $(am__CONFIG_DISTCLEAN_FILES)
+ -rm -rf $(top_srcdir)/autom4te.cache
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-binPROGRAMS
+
+.MAKE: all install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am am--refresh check check-am clean \
+ clean-binPROGRAMS clean-generic ctags dist dist-all dist-bzip2 \
+ dist-gzip dist-lzma dist-shar dist-tarZ dist-xz dist-zip \
+ distcheck distclean distclean-compile distclean-generic \
+ distclean-hdr distclean-tags distcleancheck distdir \
+ distuninstallcheck dvi dvi-am html html-am info info-am \
+ install install-am install-binPROGRAMS install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-compile \
+ mostlyclean-generic pdf pdf-am ps ps-am tags uninstall \
+ uninstall-am uninstall-binPROGRAMS
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/seq_test/config.h.in b/seq_test/config.h.in
new file mode 100644
index 0000000..1854ef1
--- /dev/null
+++ b/seq_test/config.h.in
@@ -0,0 +1,58 @@
+/* config.h.in. Generated from configure.ac by autoheader. */
+
+/* Define to 1 if you have the <boost/test/unit_test.hpp> header file. */
+#undef HAVE_BOOST_TEST_UNIT_TEST_HPP
+
+/* clock_getttime found */
+#undef HAVE_CLOCK_GETTIME
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#undef HAVE_INTTYPES_H
+
+/* Define to 1 if you have the <memory.h> header file. */
+#undef HAVE_MEMORY_H
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#undef HAVE_STDINT_H
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#undef HAVE_STDLIB_H
+
+/* Define to 1 if you have the <strings.h> header file. */
+#undef HAVE_STRINGS_H
+
+/* Define to 1 if you have the <string.h> header file. */
+#undef HAVE_STRING_H
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#undef HAVE_SYS_STAT_H
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#undef HAVE_SYS_TYPES_H
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#undef HAVE_UNISTD_H
+
+/* Name of package */
+#undef PACKAGE
+
+/* Define to the address where bug reports for this package should be sent. */
+#undef PACKAGE_BUGREPORT
+
+/* Define to the full name of this package. */
+#undef PACKAGE_NAME
+
+/* Define to the full name and version of this package. */
+#undef PACKAGE_STRING
+
+/* Define to the one symbol short name of this package. */
+#undef PACKAGE_TARNAME
+
+/* Define to the version of this package. */
+#undef PACKAGE_VERSION
+
+/* Define to 1 if you have the ANSI C header files. */
+#undef STDC_HEADERS
+
+/* Version number of package */
+#undef VERSION
diff --git a/seq_test/config.status b/seq_test/config.status
new file mode 100755
index 0000000..b72c18f
--- /dev/null
+++ b/seq_test/config.status
@@ -0,0 +1,1206 @@
+#! /bin/sh
+# Generated by configure.
+# Run this file to recreate the current configuration.
+# Compiler output produced by configure, useful for debugging
+# configure, is in config.log if it exists.
+
+debug=false
+ac_cs_recheck=false
+ac_cs_silent=false
+SHELL=${CONFIG_SHELL-/bin/sh}
+## --------------------- ##
+## M4sh Initialization. ##
+## --------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in
+ *posix*) set -o posix ;;
+esac
+
+fi
+
+
+
+
+# PATH needs CR
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+if (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='printf %s\n'
+ as_echo_n='printf %s'
+else
+ if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+ as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+ as_echo_n='/usr/ucb/echo -n'
+ else
+ as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+ as_echo_n_body='eval
+ arg=$1;
+ case $arg in
+ *"$as_nl"*)
+ expr "X$arg" : "X\\(.*\\)$as_nl";
+ arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+ esac;
+ expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+ '
+ export as_echo_n_body
+ as_echo_n='sh -c $as_echo_n_body as_echo'
+ fi
+ export as_echo_body
+ as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ PATH_SEPARATOR=:
+ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+ PATH_SEPARATOR=';'
+ }
+fi
+
+# Support unset when possible.
+if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+ as_unset=unset
+else
+ as_unset=false
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+case $0 in
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ { (exit 1); exit 1; }
+fi
+
+# Work around bugs in pre-3.0 UWIN ksh.
+for as_var in ENV MAIL MAILPATH
+do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# Required to use basename.
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+
+# Name of the executable.
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# CDPATH.
+$as_unset CDPATH
+
+
+
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || {
+
+ # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
+ # uniformly replaced by the line number. The first 'sed' inserts a
+ # line-number line after each line using $LINENO; the second 'sed'
+ # does the real work. The second script uses 'N' to pair each
+ # line-number line with the line containing $LINENO, and appends
+ # trailing '-' during substitution so that $LINENO is not a special
+ # case at line end.
+ # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
+ # scripts with optimization help from Paolo Bonzini. Blame Lee
+ # E. McMahon (1931-1989) for sed's syntax. :-)
+ sed -n '
+ p
+ /[$]LINENO/=
+ ' <$as_myself |
+ sed '
+ s/[$]LINENO.*/&-/
+ t lineno
+ b
+ :lineno
+ N
+ :loop
+ s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
+ t loop
+ s/-\n.*//
+ ' >$as_me.lineno &&
+ chmod +x "$as_me.lineno" ||
+ { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
+ { (exit 1); exit 1; }; }
+
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensitive to this).
+ . "./$as_me.lineno"
+ # Exit status is that of the last command.
+ exit
+}
+
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in
+-n*)
+ case `echo 'x\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ *) ECHO_C='\c';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+ if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -p'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -p'
+ elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+ else
+ as_ln_s='cp -p'
+ fi
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p=:
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+if test -x / >/dev/null 2>&1; then
+ as_test_x='test -x'
+else
+ if ls -dL / >/dev/null 2>&1; then
+ as_ls_L_option=L
+ else
+ as_ls_L_option=
+ fi
+ as_test_x='
+ eval sh -c '\''
+ if test -d "$1"; then
+ test -d "$1/.";
+ else
+ case $1 in
+ -*)set "./$1";;
+ esac;
+ case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in
+ ???[sx]*):;;*)false;;esac;fi
+ '\'' sh
+ '
+fi
+as_executable_p=$as_test_x
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+exec 6>&1
+
+# Save the log message, to keep $[0] and so on meaningful, and to
+# report actual input values of CONFIG_FILES etc. instead of their
+# values after options handling.
+ac_log="
+This file was extended by seqlibtest $as_me 1.0, which was
+generated by GNU Autoconf 2.63. Invocation command line was
+
+ CONFIG_FILES = $CONFIG_FILES
+ CONFIG_HEADERS = $CONFIG_HEADERS
+ CONFIG_LINKS = $CONFIG_LINKS
+ CONFIG_COMMANDS = $CONFIG_COMMANDS
+ $ $0 $@
+
+on `(hostname || uname -n) 2>/dev/null | sed 1q`
+"
+
+# Files that config.status was made for.
+config_files=" Makefile"
+config_headers=" config.h"
+config_commands=" depfiles"
+
+ac_cs_usage="\
+\`$as_me' instantiates files from templates according to the
+current configuration.
+
+Usage: $0 [OPTION]... [FILE]...
+
+ -h, --help print this help, then exit
+ -V, --version print version number and configuration settings, then exit
+ -q, --quiet, --silent
+ do not print progress messages
+ -d, --debug don't remove temporary files
+ --recheck update $as_me by reconfiguring in the same conditions
+ --file=FILE[:TEMPLATE]
+ instantiate the configuration file FILE
+ --header=FILE[:TEMPLATE]
+ instantiate the configuration header FILE
+
+Configuration files:
+$config_files
+
+Configuration headers:
+$config_headers
+
+Configuration commands:
+$config_commands
+
+Report bugs to <bug-autoconf at gnu.org>."
+
+ac_cs_version="\
+seqlibtest config.status 1.0
+configured by ./configure, generated by GNU Autoconf 2.63,
+ with options \"'--with-boost=/xchip/gistic/Jeremiah/software/boost_1.61.0_gcc5.1'\"
+
+Copyright (C) 2008 Free Software Foundation, Inc.
+This config.status script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it."
+
+ac_pwd='/xchip/gistic/Jeremiah/GIT/SeqLib/seq_test'
+srcdir='.'
+INSTALL='/usr/bin/install -c'
+MKDIR_P='/bin/mkdir -p'
+AWK='gawk'
+test -n "$AWK" || AWK=awk
+# The default lists apply if the user does not specify any file.
+ac_need_defaults=:
+while test $# != 0
+do
+ case $1 in
+ --*=*)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
+ ac_shift=:
+ ;;
+ *)
+ ac_option=$1
+ ac_optarg=$2
+ ac_shift=shift
+ ;;
+ esac
+
+ case $ac_option in
+ # Handling of the options.
+ -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+ ac_cs_recheck=: ;;
+ --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
+ $as_echo "$ac_cs_version"; exit ;;
+ --debug | --debu | --deb | --de | --d | -d )
+ debug=: ;;
+ --file | --fil | --fi | --f )
+ $ac_shift
+ case $ac_optarg in
+ *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ CONFIG_FILES="$CONFIG_FILES '$ac_optarg'"
+ ac_need_defaults=false;;
+ --header | --heade | --head | --hea )
+ $ac_shift
+ case $ac_optarg in
+ *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ CONFIG_HEADERS="$CONFIG_HEADERS '$ac_optarg'"
+ ac_need_defaults=false;;
+ --he | --h)
+ # Conflict between --help and --header
+ { $as_echo "$as_me: error: ambiguous option: $1
+Try \`$0 --help' for more information." >&2
+ { (exit 1); exit 1; }; };;
+ --help | --hel | -h )
+ $as_echo "$ac_cs_usage"; exit ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil | --si | --s)
+ ac_cs_silent=: ;;
+
+ # This is an error.
+ -*) { $as_echo "$as_me: error: unrecognized option: $1
+Try \`$0 --help' for more information." >&2
+ { (exit 1); exit 1; }; } ;;
+
+ *) ac_config_targets="$ac_config_targets $1"
+ ac_need_defaults=false ;;
+
+ esac
+ shift
+done
+
+ac_configure_extra_args=
+
+if $ac_cs_silent; then
+ exec 6>/dev/null
+ ac_configure_extra_args="$ac_configure_extra_args --silent"
+fi
+
+if $ac_cs_recheck; then
+ set X '/bin/sh' './configure' '--with-boost=/xchip/gistic/Jeremiah/software/boost_1.61.0_gcc5.1' $ac_configure_extra_args --no-create --no-recursion
+ shift
+ $as_echo "running CONFIG_SHELL=/bin/sh $*" >&6
+ CONFIG_SHELL='/bin/sh'
+ export CONFIG_SHELL
+ exec "$@"
+fi
+
+exec 5>>config.log
+{
+ echo
+ sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+ $as_echo "$ac_log"
+} >&5
+
+#
+# INIT-COMMANDS
+#
+AMDEP_TRUE="" ac_aux_dir="./.."
+
+
+# Handling of arguments.
+for ac_config_target in $ac_config_targets
+do
+ case $ac_config_target in
+ "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;;
+ "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;;
+ "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
+
+ *) { { $as_echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5
+$as_echo "$as_me: error: invalid argument: $ac_config_target" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+done
+
+
+# If the user did not use the arguments to specify the items to instantiate,
+# then the envvar interface is used. Set only those that are not.
+# We use the long form for the default assignment because of an extremely
+# bizarre bug on SunOS 4.1.3.
+if $ac_need_defaults; then
+ test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
+ test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers
+ test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands
+fi
+
+# Have a temporary directory for convenience. Make it in the build tree
+# simply because there is no reason against having it here, and in addition,
+# creating and moving files from /tmp can sometimes cause problems.
+# Hook for its removal unless debugging.
+# Note that there is a small window in which the directory will not be cleaned:
+# after its creation but before its name has been assigned to `$tmp'.
+$debug ||
+{
+ tmp=
+ trap 'exit_status=$?
+ { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status
+' 0
+ trap '{ (exit 1); exit 1; }' 1 2 13 15
+}
+# Create a (secure) tmp directory for tmp files.
+
+{
+ tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
+ test -n "$tmp" && test -d "$tmp"
+} ||
+{
+ tmp=./conf$$-$RANDOM
+ (umask 077 && mkdir "$tmp")
+} ||
+{
+ $as_echo "$as_me: cannot create a temporary directory in ." >&2
+ { (exit 1); exit 1; }
+}
+
+# Set up the scripts for CONFIG_FILES section.
+# No need to generate them if there are no CONFIG_FILES.
+# This happens for instance with `./config.status config.h'.
+if test -n "$CONFIG_FILES"; then
+
+
+ac_cr='
'
+ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
+if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
+ ac_cs_awk_cr='\\r'
+else
+ ac_cs_awk_cr=$ac_cr
+fi
+
+echo 'BEGIN {' >"$tmp/subs1.awk" &&
+cat >>"$tmp/subs1.awk" <<\_ACAWK &&
+S["am__EXEEXT_FALSE"]=""
+S["am__EXEEXT_TRUE"]="#"
+S["LTLIBOBJS"]=""
+S["LIBOBJS"]=""
+S["AM_CXXFLAGS"]="-g -Wno-unknown-pragmas -DHAVE_C11=1 -std=c++11"
+S["boost_lib"]="/xchip/gistic/Jeremiah/software/boost_1.61.0_gcc5.1/stage/lib"
+S["EGREP"]="/bin/grep -E"
+S["GREP"]="/bin/grep"
+S["CXXCPP"]="g++ -E"
+S["RANLIB"]="ranlib"
+S["am__fastdepCC_FALSE"]="#"
+S["am__fastdepCC_TRUE"]=""
+S["CCDEPMODE"]="depmode=gcc3"
+S["ac_ct_CC"]="gcc"
+S["CFLAGS"]="-g -O2"
+S["CC"]="gcc"
+S["am__fastdepCXX_FALSE"]="#"
+S["am__fastdepCXX_TRUE"]=""
+S["CXXDEPMODE"]="depmode=gcc3"
+S["AMDEPBACKSLASH"]="\\"
+S["AMDEP_FALSE"]="#"
+S["AMDEP_TRUE"]=""
+S["am__quote"]=""
+S["am__include"]="include"
+S["DEPDIR"]=".deps"
+S["OBJEXT"]="o"
+S["EXEEXT"]=""
+S["ac_ct_CXX"]="g++"
+S["CPPFLAGS"]=" -I/xchip/gistic/Jeremiah/software/boost_1.61.0_gcc5.1"
+S["LDFLAGS"]=""
+S["CXXFLAGS"]="-g -O2"
+S["CXX"]="g++"
+S["MAINT"]="#"
+S["MAINTAINER_MODE_FALSE"]=""
+S["MAINTAINER_MODE_TRUE"]="#"
+S["am__untar"]="${AMTAR} xf -"
+S["am__tar"]="${AMTAR} chof - \"$$tardir\""
+S["AMTAR"]="${SHELL} /xchip/gistic/Jeremiah/GIT/SeqLib/missing --run tar"
+S["am__leading_dot"]="."
+S["SET_MAKE"]=""
+S["AWK"]="gawk"
+S["mkdir_p"]="/bin/mkdir -p"
+S["MKDIR_P"]="/bin/mkdir -p"
+S["INSTALL_STRIP_PROGRAM"]="$(install_sh) -c -s"
+S["STRIP"]=""
+S["install_sh"]="${SHELL} /xchip/gistic/Jeremiah/GIT/SeqLib/install-sh"
+S["MAKEINFO"]="${SHELL} /xchip/gistic/Jeremiah/GIT/SeqLib/missing --run makeinfo"
+S["AUTOHEADER"]="${SHELL} /xchip/gistic/Jeremiah/GIT/SeqLib/missing --run autoheader"
+S["AUTOMAKE"]="${SHELL} /xchip/gistic/Jeremiah/GIT/SeqLib/missing --run automake-1.11"
+S["AUTOCONF"]="${SHELL} /xchip/gistic/Jeremiah/GIT/SeqLib/missing --run autoconf"
+S["ACLOCAL"]="${SHELL} /xchip/gistic/Jeremiah/GIT/SeqLib/missing --run aclocal-1.11"
+S["VERSION"]="1.0"
+S["PACKAGE"]="seqlibtest"
+S["CYGPATH_W"]="echo"
+S["am__isrc"]=""
+S["INSTALL_DATA"]="${INSTALL} -m 644"
+S["INSTALL_SCRIPT"]="${INSTALL}"
+S["INSTALL_PROGRAM"]="${INSTALL}"
+S["target_alias"]=""
+S["host_alias"]=""
+S["build_alias"]=""
+S["LIBS"]="-L/xchip/gistic/Jeremiah/software/boost_1.61.0_gcc5.1/stage/lib -lrt -lz "
+S["ECHO_T"]=""
+S["ECHO_N"]="-n"
+S["ECHO_C"]=""
+S["DEFS"]="-DHAVE_CONFIG_H"
+S["mandir"]="${datarootdir}/man"
+S["localedir"]="${datarootdir}/locale"
+S["libdir"]="${exec_prefix}/lib"
+S["psdir"]="${docdir}"
+S["pdfdir"]="${docdir}"
+S["dvidir"]="${docdir}"
+S["htmldir"]="${docdir}"
+S["infodir"]="${datarootdir}/info"
+S["docdir"]="${datarootdir}/doc/${PACKAGE_TARNAME}"
+S["oldincludedir"]="/usr/include"
+S["includedir"]="${prefix}/include"
+S["localstatedir"]="${prefix}/var"
+S["sharedstatedir"]="${prefix}/com"
+S["sysconfdir"]="${prefix}/etc"
+S["datadir"]="${datarootdir}"
+S["datarootdir"]="${prefix}/share"
+S["libexecdir"]="${exec_prefix}/libexec"
+S["sbindir"]="${exec_prefix}/sbin"
+S["bindir"]="${exec_prefix}/bin"
+S["program_transform_name"]="s,x,x,"
+S["prefix"]="/usr/local"
+S["exec_prefix"]="${prefix}"
+S["PACKAGE_BUGREPORT"]="jwala at broadinstitute.org"
+S["PACKAGE_STRING"]="seqlibtest 1.0"
+S["PACKAGE_VERSION"]="1.0"
+S["PACKAGE_TARNAME"]="seqlibtest"
+S["PACKAGE_NAME"]="seqlibtest"
+S["PATH_SEPARATOR"]=":"
+S["SHELL"]="/bin/sh"
+_ACAWK
+cat >>"$tmp/subs1.awk" <<_ACAWK &&
+ for (key in S) S_is_set[key] = 1
+ FS = ""
+
+}
+{
+ line = $ 0
+ nfields = split(line, field, "@")
+ substed = 0
+ len = length(field[1])
+ for (i = 2; i < nfields; i++) {
+ key = field[i]
+ keylen = length(key)
+ if (S_is_set[key]) {
+ value = S[key]
+ line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
+ len += length(value) + length(field[++i])
+ substed = 1
+ } else
+ len += 1 + keylen
+ }
+
+ print line
+}
+
+_ACAWK
+if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
+ sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
+else
+ cat
+fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \
+ || { { $as_echo "$as_me:$LINENO: error: could not setup config files machinery" >&5
+$as_echo "$as_me: error: could not setup config files machinery" >&2;}
+ { (exit 1); exit 1; }; }
+fi # test -n "$CONFIG_FILES"
+
+# Set up the scripts for CONFIG_HEADERS section.
+# No need to generate them if there are no CONFIG_HEADERS.
+# This happens for instance with `./config.status Makefile'.
+if test -n "$CONFIG_HEADERS"; then
+cat >"$tmp/defines.awk" <<\_ACAWK ||
+BEGIN {
+D["PACKAGE_NAME"]=" \"seqlibtest\""
+D["PACKAGE_TARNAME"]=" \"seqlibtest\""
+D["PACKAGE_VERSION"]=" \"1.0\""
+D["PACKAGE_STRING"]=" \"seqlibtest 1.0\""
+D["PACKAGE_BUGREPORT"]=" \"jwala at broadinstitute.org\""
+D["PACKAGE"]=" \"seqlibtest\""
+D["VERSION"]=" \"1.0\""
+D["STDC_HEADERS"]=" 1"
+D["HAVE_SYS_TYPES_H"]=" 1"
+D["HAVE_SYS_STAT_H"]=" 1"
+D["HAVE_STDLIB_H"]=" 1"
+D["HAVE_STRING_H"]=" 1"
+D["HAVE_MEMORY_H"]=" 1"
+D["HAVE_STRINGS_H"]=" 1"
+D["HAVE_INTTYPES_H"]=" 1"
+D["HAVE_STDINT_H"]=" 1"
+D["HAVE_UNISTD_H"]=" 1"
+D["HAVE_CLOCK_GETTIME"]=" 1"
+D["HAVE_BOOST_TEST_UNIT_TEST_HPP"]=" 1"
+ for (key in D) D_is_set[key] = 1
+ FS = ""
+}
+/^[\t ]*#[\t ]*(define|undef)[\t ]+[_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ][_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789]*([\t (]|$)/ {
+ line = $ 0
+ split(line, arg, " ")
+ if (arg[1] == "#") {
+ defundef = arg[2]
+ mac1 = arg[3]
+ } else {
+ defundef = substr(arg[1], 2)
+ mac1 = arg[2]
+ }
+ split(mac1, mac2, "(") #)
+ macro = mac2[1]
+ prefix = substr(line, 1, index(line, defundef) - 1)
+ if (D_is_set[macro]) {
+ # Preserve the white space surrounding the "#".
+ print prefix "define", macro P[macro] D[macro]
+ next
+ } else {
+ # Replace #undef with comments. This is necessary, for example,
+ # in the case of _POSIX_SOURCE, which is predefined and required
+ # on some systems where configure will not decide to define it.
+ if (defundef == "undef") {
+ print "/*", prefix defundef, macro, "*/"
+ next
+ }
+ }
+}
+{ print }
+_ACAWK
+ { { $as_echo "$as_me:$LINENO: error: could not setup config headers machinery" >&5
+$as_echo "$as_me: error: could not setup config headers machinery" >&2;}
+ { (exit 1); exit 1; }; }
+fi # test -n "$CONFIG_HEADERS"
+
+
+eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS"
+shift
+for ac_tag
+do
+ case $ac_tag in
+ :[FHLC]) ac_mode=$ac_tag; continue;;
+ esac
+ case $ac_mode$ac_tag in
+ :[FHL]*:*);;
+ :L* | :C*:*) { { $as_echo "$as_me:$LINENO: error: invalid tag $ac_tag" >&5
+$as_echo "$as_me: error: invalid tag $ac_tag" >&2;}
+ { (exit 1); exit 1; }; };;
+ :[FH]-) ac_tag=-:-;;
+ :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
+ esac
+ ac_save_IFS=$IFS
+ IFS=:
+ set x $ac_tag
+ IFS=$ac_save_IFS
+ shift
+ ac_file=$1
+ shift
+
+ case $ac_mode in
+ :L) ac_source=$1;;
+ :[FH])
+ ac_file_inputs=
+ for ac_f
+ do
+ case $ac_f in
+ -) ac_f="$tmp/stdin";;
+ *) # Look for the file first in the build tree, then in the source tree
+ # (if the path is not absolute). The absolute path cannot be DOS-style,
+ # because $ac_f cannot contain `:'.
+ test -f "$ac_f" ||
+ case $ac_f in
+ [\\/$]*) false;;
+ *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
+ esac ||
+ { { $as_echo "$as_me:$LINENO: error: cannot find input file: $ac_f" >&5
+$as_echo "$as_me: error: cannot find input file: $ac_f" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+ case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
+ ac_file_inputs="$ac_file_inputs '$ac_f'"
+ done
+
+ # Let's still pretend it is `configure' which instantiates (i.e., don't
+ # use $as_me), people would be surprised to read:
+ # /* config.h. Generated by config.status. */
+ configure_input='Generated from '`
+ $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
+ `' by configure.'
+ if test x"$ac_file" != x-; then
+ configure_input="$ac_file. $configure_input"
+ { $as_echo "$as_me:$LINENO: creating $ac_file" >&5
+$as_echo "$as_me: creating $ac_file" >&6;}
+ fi
+ # Neutralize special characters interpreted by sed in replacement strings.
+ case $configure_input in #(
+ *\&* | *\|* | *\\* )
+ ac_sed_conf_input=`$as_echo "$configure_input" |
+ sed 's/[\\\\&|]/\\\\&/g'`;; #(
+ *) ac_sed_conf_input=$configure_input;;
+ esac
+
+ case $ac_tag in
+ *:-:* | *:-) cat >"$tmp/stdin" \
+ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5
+$as_echo "$as_me: error: could not create $ac_file" >&2;}
+ { (exit 1); exit 1; }; } ;;
+ esac
+ ;;
+ esac
+
+ ac_dir=`$as_dirname -- "$ac_file" ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$ac_file" : 'X\(//\)[^/]' \| \
+ X"$ac_file" : 'X\(//\)$' \| \
+ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$ac_file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ { as_dir="$ac_dir"
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || { { $as_echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5
+$as_echo "$as_me: error: cannot create directory $as_dir" >&2;}
+ { (exit 1); exit 1; }; }; }
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+
+ case $ac_mode in
+ :F)
+ #
+ # CONFIG_FILE
+ #
+
+ case $INSTALL in
+ [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
+ *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;;
+ esac
+ ac_MKDIR_P=$MKDIR_P
+ case $MKDIR_P in
+ [\\/$]* | ?:[\\/]* ) ;;
+ */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;;
+ esac
+# If the template does not know about datarootdir, expand it.
+# FIXME: This hack should be removed a few years after 2.60.
+ac_datarootdir_hack=; ac_datarootdir_seen=
+
+ac_sed_dataroot='
+/datarootdir/ {
+ p
+ q
+}
+/@datadir@/p
+/@docdir@/p
+/@infodir@/p
+/@localedir@/p
+/@mandir@/p
+'
+case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
+*datarootdir*) ac_datarootdir_seen=yes;;
+*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
+$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
+ ac_datarootdir_hack='
+ s&@datadir@&${datarootdir}&g
+ s&@docdir@&${datarootdir}/doc/${PACKAGE_TARNAME}&g
+ s&@infodir@&${datarootdir}/info&g
+ s&@localedir@&${datarootdir}/locale&g
+ s&@mandir@&${datarootdir}/man&g
+ s&\${datarootdir}&${prefix}/share&g' ;;
+esac
+ac_sed_extra="/^[ ]*VPATH[ ]*=/{
+s/:*\$(srcdir):*/:/
+s/:*\${srcdir}:*/:/
+s/:*@srcdir@:*/:/
+s/^\([^=]*=[ ]*\):*/\1/
+s/:*$//
+s/^[^=]*=[ ]*$//
+}
+
+:t
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
+s|@configure_input@|$ac_sed_conf_input|;t t
+s&@top_builddir@&$ac_top_builddir_sub&;t t
+s&@top_build_prefix@&$ac_top_build_prefix&;t t
+s&@srcdir@&$ac_srcdir&;t t
+s&@abs_srcdir@&$ac_abs_srcdir&;t t
+s&@top_srcdir@&$ac_top_srcdir&;t t
+s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
+s&@builddir@&$ac_builddir&;t t
+s&@abs_builddir@&$ac_abs_builddir&;t t
+s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
+s&@INSTALL@&$ac_INSTALL&;t t
+s&@MKDIR_P@&$ac_MKDIR_P&;t t
+$ac_datarootdir_hack
+"
+eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \
+ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5
+$as_echo "$as_me: error: could not create $ac_file" >&2;}
+ { (exit 1); exit 1; }; }
+
+test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
+ { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } &&
+ { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } &&
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined." >&5
+$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined." >&2;}
+
+ rm -f "$tmp/stdin"
+ case $ac_file in
+ -) cat "$tmp/out" && rm -f "$tmp/out";;
+ *) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";;
+ esac \
+ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5
+$as_echo "$as_me: error: could not create $ac_file" >&2;}
+ { (exit 1); exit 1; }; }
+ ;;
+ :H)
+ #
+ # CONFIG_HEADER
+ #
+ if test x"$ac_file" != x-; then
+ {
+ $as_echo "/* $configure_input */" \
+ && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs"
+ } >"$tmp/config.h" \
+ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5
+$as_echo "$as_me: error: could not create $ac_file" >&2;}
+ { (exit 1); exit 1; }; }
+ if diff "$ac_file" "$tmp/config.h" >/dev/null 2>&1; then
+ { $as_echo "$as_me:$LINENO: $ac_file is unchanged" >&5
+$as_echo "$as_me: $ac_file is unchanged" >&6;}
+ else
+ rm -f "$ac_file"
+ mv "$tmp/config.h" "$ac_file" \
+ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5
+$as_echo "$as_me: error: could not create $ac_file" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ else
+ $as_echo "/* $configure_input */" \
+ && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" \
+ || { { $as_echo "$as_me:$LINENO: error: could not create -" >&5
+$as_echo "$as_me: error: could not create -" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+# Compute "$ac_file"'s index in $config_headers.
+_am_arg="$ac_file"
+_am_stamp_count=1
+for _am_header in $config_headers :; do
+ case $_am_header in
+ $_am_arg | $_am_arg:* )
+ break ;;
+ * )
+ _am_stamp_count=`expr $_am_stamp_count + 1` ;;
+ esac
+done
+echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" ||
+$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$_am_arg" : 'X\(//\)[^/]' \| \
+ X"$_am_arg" : 'X\(//\)$' \| \
+ X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$_am_arg" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`/stamp-h$_am_stamp_count
+ ;;
+
+ :C) { $as_echo "$as_me:$LINENO: executing $ac_file commands" >&5
+$as_echo "$as_me: executing $ac_file commands" >&6;}
+ ;;
+ esac
+
+
+ case $ac_file$ac_mode in
+ "depfiles":C) test x"$AMDEP_TRUE" != x"" || {
+ # Autoconf 2.62 quotes --file arguments for eval, but not when files
+ # are listed without --file. Let's play safe and only enable the eval
+ # if we detect the quoting.
+ case $CONFIG_FILES in
+ *\'*) eval set x "$CONFIG_FILES" ;;
+ *) set x $CONFIG_FILES ;;
+ esac
+ shift
+ for mf
+ do
+ # Strip MF so we end up with the name of the file.
+ mf=`echo "$mf" | sed -e 's/:.*$//'`
+ # Check whether this is an Automake generated Makefile or not.
+ # We used to match only the files named `Makefile.in', but
+ # some people rename them; so instead we look at the file content.
+ # Grep'ing the first line is not enough: some people post-process
+ # each Makefile.in and add a new line on top of each file to say so.
+ # Grep'ing the whole file is not good either: AIX grep has a line
+ # limit of 2048, but all sed's we know have understand at least 4000.
+ if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then
+ dirpart=`$as_dirname -- "$mf" ||
+$as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$mf" : 'X\(//\)[^/]' \| \
+ X"$mf" : 'X\(//\)$' \| \
+ X"$mf" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$mf" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ else
+ continue
+ fi
+ # Extract the definition of DEPDIR, am__include, and am__quote
+ # from the Makefile without running `make'.
+ DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
+ test -z "$DEPDIR" && continue
+ am__include=`sed -n 's/^am__include = //p' < "$mf"`
+ test -z "am__include" && continue
+ am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
+ # When using ansi2knr, U may be empty or an underscore; expand it
+ U=`sed -n 's/^U = //p' < "$mf"`
+ # Find all dependency output files, they are included files with
+ # $(DEPDIR) in their names. We invoke sed twice because it is the
+ # simplest approach to changing $(DEPDIR) to its actual value in the
+ # expansion.
+ for file in `sed -n "
+ s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \
+ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do
+ # Make sure the directory exists.
+ test -f "$dirpart/$file" && continue
+ fdir=`$as_dirname -- "$file" ||
+$as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$file" : 'X\(//\)[^/]' \| \
+ X"$file" : 'X\(//\)$' \| \
+ X"$file" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ { as_dir=$dirpart/$fdir
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || { { $as_echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5
+$as_echo "$as_me: error: cannot create directory $as_dir" >&2;}
+ { (exit 1); exit 1; }; }; }
+ # echo "creating $dirpart/$file"
+ echo '# dummy' > "$dirpart/$file"
+ done
+ done
+}
+ ;;
+
+ esac
+done # for ac_tag
+
+
+{ (exit 0); exit 0; }
diff --git a/seq_test/configure b/seq_test/configure
new file mode 100755
index 0000000..2dd1aa5
--- /dev/null
+++ b/seq_test/configure
@@ -0,0 +1,6764 @@
+#! /bin/sh
+# Guess values for system-dependent variables and create Makefiles.
+# Generated by GNU Autoconf 2.63 for seqlibtest 1.0.
+#
+# Report bugs to <jwala at broadinstitute.org>.
+#
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
+# 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+# This configure script is free software; the Free Software Foundation
+# gives unlimited permission to copy, distribute and modify it.
+## --------------------- ##
+## M4sh Initialization. ##
+## --------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in
+ *posix*) set -o posix ;;
+esac
+
+fi
+
+
+
+
+# PATH needs CR
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+if (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='printf %s\n'
+ as_echo_n='printf %s'
+else
+ if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+ as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+ as_echo_n='/usr/ucb/echo -n'
+ else
+ as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+ as_echo_n_body='eval
+ arg=$1;
+ case $arg in
+ *"$as_nl"*)
+ expr "X$arg" : "X\\(.*\\)$as_nl";
+ arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+ esac;
+ expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+ '
+ export as_echo_n_body
+ as_echo_n='sh -c $as_echo_n_body as_echo'
+ fi
+ export as_echo_body
+ as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ PATH_SEPARATOR=:
+ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+ PATH_SEPARATOR=';'
+ }
+fi
+
+# Support unset when possible.
+if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+ as_unset=unset
+else
+ as_unset=false
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+case $0 in
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ { (exit 1); exit 1; }
+fi
+
+# Work around bugs in pre-3.0 UWIN ksh.
+for as_var in ENV MAIL MAILPATH
+do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# Required to use basename.
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+
+# Name of the executable.
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# CDPATH.
+$as_unset CDPATH
+
+
+if test "x$CONFIG_SHELL" = x; then
+ if (eval ":") 2>/dev/null; then
+ as_have_required=yes
+else
+ as_have_required=no
+fi
+
+ if test $as_have_required = yes && (eval ":
+(as_func_return () {
+ (exit \$1)
+}
+as_func_success () {
+ as_func_return 0
+}
+as_func_failure () {
+ as_func_return 1
+}
+as_func_ret_success () {
+ return 0
+}
+as_func_ret_failure () {
+ return 1
+}
+
+exitcode=0
+if as_func_success; then
+ :
+else
+ exitcode=1
+ echo as_func_success failed.
+fi
+
+if as_func_failure; then
+ exitcode=1
+ echo as_func_failure succeeded.
+fi
+
+if as_func_ret_success; then
+ :
+else
+ exitcode=1
+ echo as_func_ret_success failed.
+fi
+
+if as_func_ret_failure; then
+ exitcode=1
+ echo as_func_ret_failure succeeded.
+fi
+
+if ( set x; as_func_ret_success y && test x = \"\$1\" ); then
+ :
+else
+ exitcode=1
+ echo positional parameters were not saved.
+fi
+
+test \$exitcode = 0) || { (exit 1); exit 1; }
+
+(
+ as_lineno_1=\$LINENO
+ as_lineno_2=\$LINENO
+ test \"x\$as_lineno_1\" != \"x\$as_lineno_2\" &&
+ test \"x\`expr \$as_lineno_1 + 1\`\" = \"x\$as_lineno_2\") || { (exit 1); exit 1; }
+") 2> /dev/null; then
+ :
+else
+ as_candidate_shells=
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ case $as_dir in
+ /*)
+ for as_base in sh bash ksh sh5; do
+ as_candidate_shells="$as_candidate_shells $as_dir/$as_base"
+ done;;
+ esac
+done
+IFS=$as_save_IFS
+
+
+ for as_shell in $as_candidate_shells $SHELL; do
+ # Try only shells that exist, to save several forks.
+ if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
+ { ("$as_shell") 2> /dev/null <<\_ASEOF
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in
+ *posix*) set -o posix ;;
+esac
+
+fi
+
+
+:
+_ASEOF
+}; then
+ CONFIG_SHELL=$as_shell
+ as_have_required=yes
+ if { "$as_shell" 2> /dev/null <<\_ASEOF
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in
+ *posix*) set -o posix ;;
+esac
+
+fi
+
+
+:
+(as_func_return () {
+ (exit $1)
+}
+as_func_success () {
+ as_func_return 0
+}
+as_func_failure () {
+ as_func_return 1
+}
+as_func_ret_success () {
+ return 0
+}
+as_func_ret_failure () {
+ return 1
+}
+
+exitcode=0
+if as_func_success; then
+ :
+else
+ exitcode=1
+ echo as_func_success failed.
+fi
+
+if as_func_failure; then
+ exitcode=1
+ echo as_func_failure succeeded.
+fi
+
+if as_func_ret_success; then
+ :
+else
+ exitcode=1
+ echo as_func_ret_success failed.
+fi
+
+if as_func_ret_failure; then
+ exitcode=1
+ echo as_func_ret_failure succeeded.
+fi
+
+if ( set x; as_func_ret_success y && test x = "$1" ); then
+ :
+else
+ exitcode=1
+ echo positional parameters were not saved.
+fi
+
+test $exitcode = 0) || { (exit 1); exit 1; }
+
+(
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2") || { (exit 1); exit 1; }
+
+_ASEOF
+}; then
+ break
+fi
+
+fi
+
+ done
+
+ if test "x$CONFIG_SHELL" != x; then
+ for as_var in BASH_ENV ENV
+ do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+ done
+ export CONFIG_SHELL
+ exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"}
+fi
+
+
+ if test $as_have_required = no; then
+ echo This script requires a shell more modern than all the
+ echo shells that I found on your system. Please install a
+ echo modern shell, or manually run the script under such a
+ echo shell if you do have one.
+ { (exit 1); exit 1; }
+fi
+
+
+fi
+
+fi
+
+
+
+(eval "as_func_return () {
+ (exit \$1)
+}
+as_func_success () {
+ as_func_return 0
+}
+as_func_failure () {
+ as_func_return 1
+}
+as_func_ret_success () {
+ return 0
+}
+as_func_ret_failure () {
+ return 1
+}
+
+exitcode=0
+if as_func_success; then
+ :
+else
+ exitcode=1
+ echo as_func_success failed.
+fi
+
+if as_func_failure; then
+ exitcode=1
+ echo as_func_failure succeeded.
+fi
+
+if as_func_ret_success; then
+ :
+else
+ exitcode=1
+ echo as_func_ret_success failed.
+fi
+
+if as_func_ret_failure; then
+ exitcode=1
+ echo as_func_ret_failure succeeded.
+fi
+
+if ( set x; as_func_ret_success y && test x = \"\$1\" ); then
+ :
+else
+ exitcode=1
+ echo positional parameters were not saved.
+fi
+
+test \$exitcode = 0") || {
+ echo No shell found that supports shell functions.
+ echo Please tell bug-autoconf at gnu.org about your system,
+ echo including any error possibly output before this message.
+ echo This can help us improve future autoconf versions.
+ echo Configuration will now proceed without shell functions.
+}
+
+
+
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || {
+
+ # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
+ # uniformly replaced by the line number. The first 'sed' inserts a
+ # line-number line after each line using $LINENO; the second 'sed'
+ # does the real work. The second script uses 'N' to pair each
+ # line-number line with the line containing $LINENO, and appends
+ # trailing '-' during substitution so that $LINENO is not a special
+ # case at line end.
+ # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
+ # scripts with optimization help from Paolo Bonzini. Blame Lee
+ # E. McMahon (1931-1989) for sed's syntax. :-)
+ sed -n '
+ p
+ /[$]LINENO/=
+ ' <$as_myself |
+ sed '
+ s/[$]LINENO.*/&-/
+ t lineno
+ b
+ :lineno
+ N
+ :loop
+ s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
+ t loop
+ s/-\n.*//
+ ' >$as_me.lineno &&
+ chmod +x "$as_me.lineno" ||
+ { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
+ { (exit 1); exit 1; }; }
+
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensitive to this).
+ . "./$as_me.lineno"
+ # Exit status is that of the last command.
+ exit
+}
+
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in
+-n*)
+ case `echo 'x\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ *) ECHO_C='\c';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+ if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -p'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -p'
+ elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+ else
+ as_ln_s='cp -p'
+ fi
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p=:
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+if test -x / >/dev/null 2>&1; then
+ as_test_x='test -x'
+else
+ if ls -dL / >/dev/null 2>&1; then
+ as_ls_L_option=L
+ else
+ as_ls_L_option=
+ fi
+ as_test_x='
+ eval sh -c '\''
+ if test -d "$1"; then
+ test -d "$1/.";
+ else
+ case $1 in
+ -*)set "./$1";;
+ esac;
+ case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in
+ ???[sx]*):;;*)false;;esac;fi
+ '\'' sh
+ '
+fi
+as_executable_p=$as_test_x
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+
+exec 7<&0 </dev/null 6>&1
+
+# Name of the host.
+# hostname on some systems (SVR3.2, Linux) returns a bogus exit status,
+# so uname gets run too.
+ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
+
+#
+# Initializations.
+#
+ac_default_prefix=/usr/local
+ac_clean_files=
+ac_config_libobj_dir=.
+LIBOBJS=
+cross_compiling=no
+subdirs=
+MFLAGS=
+MAKEFLAGS=
+SHELL=${CONFIG_SHELL-/bin/sh}
+
+# Identity of this package.
+PACKAGE_NAME='seqlibtest'
+PACKAGE_TARNAME='seqlibtest'
+PACKAGE_VERSION='1.0'
+PACKAGE_STRING='seqlibtest 1.0'
+PACKAGE_BUGREPORT='jwala at broadinstitute.org'
+
+ac_unique_file="seq_test.cpp"
+# Factoring default headers for most tests.
+ac_includes_default="\
+#include <stdio.h>
+#ifdef HAVE_SYS_TYPES_H
+# include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_STAT_H
+# include <sys/stat.h>
+#endif
+#ifdef STDC_HEADERS
+# include <stdlib.h>
+# include <stddef.h>
+#else
+# ifdef HAVE_STDLIB_H
+# include <stdlib.h>
+# endif
+#endif
+#ifdef HAVE_STRING_H
+# if !defined STDC_HEADERS && defined HAVE_MEMORY_H
+# include <memory.h>
+# endif
+# include <string.h>
+#endif
+#ifdef HAVE_STRINGS_H
+# include <strings.h>
+#endif
+#ifdef HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+#ifdef HAVE_STDINT_H
+# include <stdint.h>
+#endif
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif"
+
+ac_subst_vars='am__EXEEXT_FALSE
+am__EXEEXT_TRUE
+LTLIBOBJS
+LIBOBJS
+AM_CXXFLAGS
+boost_lib
+EGREP
+GREP
+CXXCPP
+RANLIB
+am__fastdepCC_FALSE
+am__fastdepCC_TRUE
+CCDEPMODE
+ac_ct_CC
+CFLAGS
+CC
+am__fastdepCXX_FALSE
+am__fastdepCXX_TRUE
+CXXDEPMODE
+AMDEPBACKSLASH
+AMDEP_FALSE
+AMDEP_TRUE
+am__quote
+am__include
+DEPDIR
+OBJEXT
+EXEEXT
+ac_ct_CXX
+CPPFLAGS
+LDFLAGS
+CXXFLAGS
+CXX
+MAINT
+MAINTAINER_MODE_FALSE
+MAINTAINER_MODE_TRUE
+am__untar
+am__tar
+AMTAR
+am__leading_dot
+SET_MAKE
+AWK
+mkdir_p
+MKDIR_P
+INSTALL_STRIP_PROGRAM
+STRIP
+install_sh
+MAKEINFO
+AUTOHEADER
+AUTOMAKE
+AUTOCONF
+ACLOCAL
+VERSION
+PACKAGE
+CYGPATH_W
+am__isrc
+INSTALL_DATA
+INSTALL_SCRIPT
+INSTALL_PROGRAM
+target_alias
+host_alias
+build_alias
+LIBS
+ECHO_T
+ECHO_N
+ECHO_C
+DEFS
+mandir
+localedir
+libdir
+psdir
+pdfdir
+dvidir
+htmldir
+infodir
+docdir
+oldincludedir
+includedir
+localstatedir
+sharedstatedir
+sysconfdir
+datadir
+datarootdir
+libexecdir
+sbindir
+bindir
+program_transform_name
+prefix
+exec_prefix
+PACKAGE_BUGREPORT
+PACKAGE_STRING
+PACKAGE_VERSION
+PACKAGE_TARNAME
+PACKAGE_NAME
+PATH_SEPARATOR
+SHELL'
+ac_subst_files=''
+ac_user_opts='
+enable_option_checking
+enable_maintainer_mode
+enable_dependency_tracking
+with_boost
+enable_development
+'
+ ac_precious_vars='build_alias
+host_alias
+target_alias
+CXX
+CXXFLAGS
+LDFLAGS
+LIBS
+CPPFLAGS
+CCC
+CC
+CFLAGS
+CXXCPP'
+
+
+# Initialize some variables set by options.
+ac_init_help=
+ac_init_version=false
+ac_unrecognized_opts=
+ac_unrecognized_sep=
+# The variables have the same names as the options, with
+# dashes changed to underlines.
+cache_file=/dev/null
+exec_prefix=NONE
+no_create=
+no_recursion=
+prefix=NONE
+program_prefix=NONE
+program_suffix=NONE
+program_transform_name=s,x,x,
+silent=
+site=
+srcdir=
+verbose=
+x_includes=NONE
+x_libraries=NONE
+
+# Installation directory options.
+# These are left unexpanded so users can "make install exec_prefix=/foo"
+# and all the variables that are supposed to be based on exec_prefix
+# by default will actually change.
+# Use braces instead of parens because sh, perl, etc. also accept them.
+# (The list follows the same order as the GNU Coding Standards.)
+bindir='${exec_prefix}/bin'
+sbindir='${exec_prefix}/sbin'
+libexecdir='${exec_prefix}/libexec'
+datarootdir='${prefix}/share'
+datadir='${datarootdir}'
+sysconfdir='${prefix}/etc'
+sharedstatedir='${prefix}/com'
+localstatedir='${prefix}/var'
+includedir='${prefix}/include'
+oldincludedir='/usr/include'
+docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
+infodir='${datarootdir}/info'
+htmldir='${docdir}'
+dvidir='${docdir}'
+pdfdir='${docdir}'
+psdir='${docdir}'
+libdir='${exec_prefix}/lib'
+localedir='${datarootdir}/locale'
+mandir='${datarootdir}/man'
+
+ac_prev=
+ac_dashdash=
+for ac_option
+do
+ # If the previous option needs an argument, assign it.
+ if test -n "$ac_prev"; then
+ eval $ac_prev=\$ac_option
+ ac_prev=
+ continue
+ fi
+
+ case $ac_option in
+ *=*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
+ *) ac_optarg=yes ;;
+ esac
+
+ # Accept the important Cygnus configure options, so we can diagnose typos.
+
+ case $ac_dashdash$ac_option in
+ --)
+ ac_dashdash=yes ;;
+
+ -bindir | --bindir | --bindi | --bind | --bin | --bi)
+ ac_prev=bindir ;;
+ -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
+ bindir=$ac_optarg ;;
+
+ -build | --build | --buil | --bui | --bu)
+ ac_prev=build_alias ;;
+ -build=* | --build=* | --buil=* | --bui=* | --bu=*)
+ build_alias=$ac_optarg ;;
+
+ -cache-file | --cache-file | --cache-fil | --cache-fi \
+ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
+ ac_prev=cache_file ;;
+ -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
+ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
+ cache_file=$ac_optarg ;;
+
+ --config-cache | -C)
+ cache_file=config.cache ;;
+
+ -datadir | --datadir | --datadi | --datad)
+ ac_prev=datadir ;;
+ -datadir=* | --datadir=* | --datadi=* | --datad=*)
+ datadir=$ac_optarg ;;
+
+ -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
+ | --dataroo | --dataro | --datar)
+ ac_prev=datarootdir ;;
+ -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
+ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
+ datarootdir=$ac_optarg ;;
+
+ -disable-* | --disable-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ { $as_echo "$as_me: error: invalid feature name: $ac_useropt" >&2
+ { (exit 1); exit 1; }; }
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"enable_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval enable_$ac_useropt=no ;;
+
+ -docdir | --docdir | --docdi | --doc | --do)
+ ac_prev=docdir ;;
+ -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
+ docdir=$ac_optarg ;;
+
+ -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
+ ac_prev=dvidir ;;
+ -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
+ dvidir=$ac_optarg ;;
+
+ -enable-* | --enable-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ { $as_echo "$as_me: error: invalid feature name: $ac_useropt" >&2
+ { (exit 1); exit 1; }; }
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"enable_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval enable_$ac_useropt=\$ac_optarg ;;
+
+ -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
+ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
+ | --exec | --exe | --ex)
+ ac_prev=exec_prefix ;;
+ -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
+ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
+ | --exec=* | --exe=* | --ex=*)
+ exec_prefix=$ac_optarg ;;
+
+ -gas | --gas | --ga | --g)
+ # Obsolete; use --with-gas.
+ with_gas=yes ;;
+
+ -help | --help | --hel | --he | -h)
+ ac_init_help=long ;;
+ -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
+ ac_init_help=recursive ;;
+ -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
+ ac_init_help=short ;;
+
+ -host | --host | --hos | --ho)
+ ac_prev=host_alias ;;
+ -host=* | --host=* | --hos=* | --ho=*)
+ host_alias=$ac_optarg ;;
+
+ -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
+ ac_prev=htmldir ;;
+ -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
+ | --ht=*)
+ htmldir=$ac_optarg ;;
+
+ -includedir | --includedir | --includedi | --included | --include \
+ | --includ | --inclu | --incl | --inc)
+ ac_prev=includedir ;;
+ -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
+ | --includ=* | --inclu=* | --incl=* | --inc=*)
+ includedir=$ac_optarg ;;
+
+ -infodir | --infodir | --infodi | --infod | --info | --inf)
+ ac_prev=infodir ;;
+ -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
+ infodir=$ac_optarg ;;
+
+ -libdir | --libdir | --libdi | --libd)
+ ac_prev=libdir ;;
+ -libdir=* | --libdir=* | --libdi=* | --libd=*)
+ libdir=$ac_optarg ;;
+
+ -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
+ | --libexe | --libex | --libe)
+ ac_prev=libexecdir ;;
+ -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
+ | --libexe=* | --libex=* | --libe=*)
+ libexecdir=$ac_optarg ;;
+
+ -localedir | --localedir | --localedi | --localed | --locale)
+ ac_prev=localedir ;;
+ -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
+ localedir=$ac_optarg ;;
+
+ -localstatedir | --localstatedir | --localstatedi | --localstated \
+ | --localstate | --localstat | --localsta | --localst | --locals)
+ ac_prev=localstatedir ;;
+ -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
+ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
+ localstatedir=$ac_optarg ;;
+
+ -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
+ ac_prev=mandir ;;
+ -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
+ mandir=$ac_optarg ;;
+
+ -nfp | --nfp | --nf)
+ # Obsolete; use --without-fp.
+ with_fp=no ;;
+
+ -no-create | --no-create | --no-creat | --no-crea | --no-cre \
+ | --no-cr | --no-c | -n)
+ no_create=yes ;;
+
+ -no-recursion | --no-recursion | --no-recursio | --no-recursi \
+ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
+ no_recursion=yes ;;
+
+ -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
+ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
+ | --oldin | --oldi | --old | --ol | --o)
+ ac_prev=oldincludedir ;;
+ -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
+ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
+ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
+ oldincludedir=$ac_optarg ;;
+
+ -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
+ ac_prev=prefix ;;
+ -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
+ prefix=$ac_optarg ;;
+
+ -program-prefix | --program-prefix | --program-prefi | --program-pref \
+ | --program-pre | --program-pr | --program-p)
+ ac_prev=program_prefix ;;
+ -program-prefix=* | --program-prefix=* | --program-prefi=* \
+ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
+ program_prefix=$ac_optarg ;;
+
+ -program-suffix | --program-suffix | --program-suffi | --program-suff \
+ | --program-suf | --program-su | --program-s)
+ ac_prev=program_suffix ;;
+ -program-suffix=* | --program-suffix=* | --program-suffi=* \
+ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
+ program_suffix=$ac_optarg ;;
+
+ -program-transform-name | --program-transform-name \
+ | --program-transform-nam | --program-transform-na \
+ | --program-transform-n | --program-transform- \
+ | --program-transform | --program-transfor \
+ | --program-transfo | --program-transf \
+ | --program-trans | --program-tran \
+ | --progr-tra | --program-tr | --program-t)
+ ac_prev=program_transform_name ;;
+ -program-transform-name=* | --program-transform-name=* \
+ | --program-transform-nam=* | --program-transform-na=* \
+ | --program-transform-n=* | --program-transform-=* \
+ | --program-transform=* | --program-transfor=* \
+ | --program-transfo=* | --program-transf=* \
+ | --program-trans=* | --program-tran=* \
+ | --progr-tra=* | --program-tr=* | --program-t=*)
+ program_transform_name=$ac_optarg ;;
+
+ -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
+ ac_prev=pdfdir ;;
+ -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
+ pdfdir=$ac_optarg ;;
+
+ -psdir | --psdir | --psdi | --psd | --ps)
+ ac_prev=psdir ;;
+ -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
+ psdir=$ac_optarg ;;
+
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ silent=yes ;;
+
+ -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+ ac_prev=sbindir ;;
+ -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+ | --sbi=* | --sb=*)
+ sbindir=$ac_optarg ;;
+
+ -sharedstatedir | --sharedstatedir | --sharedstatedi \
+ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
+ | --sharedst | --shareds | --shared | --share | --shar \
+ | --sha | --sh)
+ ac_prev=sharedstatedir ;;
+ -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
+ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
+ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
+ | --sha=* | --sh=*)
+ sharedstatedir=$ac_optarg ;;
+
+ -site | --site | --sit)
+ ac_prev=site ;;
+ -site=* | --site=* | --sit=*)
+ site=$ac_optarg ;;
+
+ -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
+ ac_prev=srcdir ;;
+ -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
+ srcdir=$ac_optarg ;;
+
+ -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
+ | --syscon | --sysco | --sysc | --sys | --sy)
+ ac_prev=sysconfdir ;;
+ -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
+ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
+ sysconfdir=$ac_optarg ;;
+
+ -target | --target | --targe | --targ | --tar | --ta | --t)
+ ac_prev=target_alias ;;
+ -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
+ target_alias=$ac_optarg ;;
+
+ -v | -verbose | --verbose | --verbos | --verbo | --verb)
+ verbose=yes ;;
+
+ -version | --version | --versio | --versi | --vers | -V)
+ ac_init_version=: ;;
+
+ -with-* | --with-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ { $as_echo "$as_me: error: invalid package name: $ac_useropt" >&2
+ { (exit 1); exit 1; }; }
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"with_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval with_$ac_useropt=\$ac_optarg ;;
+
+ -without-* | --without-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ { $as_echo "$as_me: error: invalid package name: $ac_useropt" >&2
+ { (exit 1); exit 1; }; }
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"with_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval with_$ac_useropt=no ;;
+
+ --x)
+ # Obsolete; use --with-x.
+ with_x=yes ;;
+
+ -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
+ | --x-incl | --x-inc | --x-in | --x-i)
+ ac_prev=x_includes ;;
+ -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
+ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
+ x_includes=$ac_optarg ;;
+
+ -x-libraries | --x-libraries | --x-librarie | --x-librari \
+ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
+ ac_prev=x_libraries ;;
+ -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
+ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
+ x_libraries=$ac_optarg ;;
+
+ -*) { $as_echo "$as_me: error: unrecognized option: $ac_option
+Try \`$0 --help' for more information." >&2
+ { (exit 1); exit 1; }; }
+ ;;
+
+ *=*)
+ ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null &&
+ { $as_echo "$as_me: error: invalid variable name: $ac_envvar" >&2
+ { (exit 1); exit 1; }; }
+ eval $ac_envvar=\$ac_optarg
+ export $ac_envvar ;;
+
+ *)
+ # FIXME: should be removed in autoconf 3.0.
+ $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2
+ expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+ $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2
+ : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}
+ ;;
+
+ esac
+done
+
+if test -n "$ac_prev"; then
+ ac_option=--`echo $ac_prev | sed 's/_/-/g'`
+ { $as_echo "$as_me: error: missing argument to $ac_option" >&2
+ { (exit 1); exit 1; }; }
+fi
+
+if test -n "$ac_unrecognized_opts"; then
+ case $enable_option_checking in
+ no) ;;
+ fatal) { $as_echo "$as_me: error: unrecognized options: $ac_unrecognized_opts" >&2
+ { (exit 1); exit 1; }; } ;;
+ *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
+ esac
+fi
+
+# Check all directory arguments for consistency.
+for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
+ datadir sysconfdir sharedstatedir localstatedir includedir \
+ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
+ libdir localedir mandir
+do
+ eval ac_val=\$$ac_var
+ # Remove trailing slashes.
+ case $ac_val in
+ */ )
+ ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'`
+ eval $ac_var=\$ac_val;;
+ esac
+ # Be sure to have absolute directory names.
+ case $ac_val in
+ [\\/$]* | ?:[\\/]* ) continue;;
+ NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
+ esac
+ { $as_echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2
+ { (exit 1); exit 1; }; }
+done
+
+# There might be people who depend on the old broken behavior: `$host'
+# used to hold the argument of --host etc.
+# FIXME: To remove some day.
+build=$build_alias
+host=$host_alias
+target=$target_alias
+
+# FIXME: To remove some day.
+if test "x$host_alias" != x; then
+ if test "x$build_alias" = x; then
+ cross_compiling=maybe
+ $as_echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host.
+ If a cross compiler is detected then cross compile mode will be used." >&2
+ elif test "x$build_alias" != "x$host_alias"; then
+ cross_compiling=yes
+ fi
+fi
+
+ac_tool_prefix=
+test -n "$host_alias" && ac_tool_prefix=$host_alias-
+
+test "$silent" = yes && exec 6>/dev/null
+
+
+ac_pwd=`pwd` && test -n "$ac_pwd" &&
+ac_ls_di=`ls -di .` &&
+ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
+ { $as_echo "$as_me: error: working directory cannot be determined" >&2
+ { (exit 1); exit 1; }; }
+test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
+ { $as_echo "$as_me: error: pwd does not report name of working directory" >&2
+ { (exit 1); exit 1; }; }
+
+
+# Find the source files, if location was not specified.
+if test -z "$srcdir"; then
+ ac_srcdir_defaulted=yes
+ # Try the directory containing this script, then the parent directory.
+ ac_confdir=`$as_dirname -- "$as_myself" ||
+$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_myself" : 'X\(//\)[^/]' \| \
+ X"$as_myself" : 'X\(//\)$' \| \
+ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_myself" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ srcdir=$ac_confdir
+ if test ! -r "$srcdir/$ac_unique_file"; then
+ srcdir=..
+ fi
+else
+ ac_srcdir_defaulted=no
+fi
+if test ! -r "$srcdir/$ac_unique_file"; then
+ test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
+ { $as_echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2
+ { (exit 1); exit 1; }; }
+fi
+ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
+ac_abs_confdir=`(
+ cd "$srcdir" && test -r "./$ac_unique_file" || { $as_echo "$as_me: error: $ac_msg" >&2
+ { (exit 1); exit 1; }; }
+ pwd)`
+# When building in place, set srcdir=.
+if test "$ac_abs_confdir" = "$ac_pwd"; then
+ srcdir=.
+fi
+# Remove unnecessary trailing slashes from srcdir.
+# Double slashes in file names in object file debugging info
+# mess up M-x gdb in Emacs.
+case $srcdir in
+*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
+esac
+for ac_var in $ac_precious_vars; do
+ eval ac_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_env_${ac_var}_value=\$${ac_var}
+ eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_cv_env_${ac_var}_value=\$${ac_var}
+done
+
+#
+# Report the --help message.
+#
+if test "$ac_init_help" = "long"; then
+ # Omit some internal or obsolete options to make the list less imposing.
+ # This message is too long to be a string in the A/UX 3.1 sh.
+ cat <<_ACEOF
+\`configure' configures seqlibtest 1.0 to adapt to many kinds of systems.
+
+Usage: $0 [OPTION]... [VAR=VALUE]...
+
+To assign environment variables (e.g., CC, CFLAGS...), specify them as
+VAR=VALUE. See below for descriptions of some of the useful variables.
+
+Defaults for the options are specified in brackets.
+
+Configuration:
+ -h, --help display this help and exit
+ --help=short display options specific to this package
+ --help=recursive display the short help of all the included packages
+ -V, --version display version information and exit
+ -q, --quiet, --silent do not print \`checking...' messages
+ --cache-file=FILE cache test results in FILE [disabled]
+ -C, --config-cache alias for \`--cache-file=config.cache'
+ -n, --no-create do not create output files
+ --srcdir=DIR find the sources in DIR [configure dir or \`..']
+
+Installation directories:
+ --prefix=PREFIX install architecture-independent files in PREFIX
+ [$ac_default_prefix]
+ --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX
+ [PREFIX]
+
+By default, \`make install' will install all the files in
+\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify
+an installation prefix other than \`$ac_default_prefix' using \`--prefix',
+for instance \`--prefix=\$HOME'.
+
+For better control, use the options below.
+
+Fine tuning of the installation directories:
+ --bindir=DIR user executables [EPREFIX/bin]
+ --sbindir=DIR system admin executables [EPREFIX/sbin]
+ --libexecdir=DIR program executables [EPREFIX/libexec]
+ --sysconfdir=DIR read-only single-machine data [PREFIX/etc]
+ --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
+ --localstatedir=DIR modifiable single-machine data [PREFIX/var]
+ --libdir=DIR object code libraries [EPREFIX/lib]
+ --includedir=DIR C header files [PREFIX/include]
+ --oldincludedir=DIR C header files for non-gcc [/usr/include]
+ --datarootdir=DIR read-only arch.-independent data root [PREFIX/share]
+ --datadir=DIR read-only architecture-independent data [DATAROOTDIR]
+ --infodir=DIR info documentation [DATAROOTDIR/info]
+ --localedir=DIR locale-dependent data [DATAROOTDIR/locale]
+ --mandir=DIR man documentation [DATAROOTDIR/man]
+ --docdir=DIR documentation root [DATAROOTDIR/doc/seqlibtest]
+ --htmldir=DIR html documentation [DOCDIR]
+ --dvidir=DIR dvi documentation [DOCDIR]
+ --pdfdir=DIR pdf documentation [DOCDIR]
+ --psdir=DIR ps documentation [DOCDIR]
+_ACEOF
+
+ cat <<\_ACEOF
+
+Program names:
+ --program-prefix=PREFIX prepend PREFIX to installed program names
+ --program-suffix=SUFFIX append SUFFIX to installed program names
+ --program-transform-name=PROGRAM run sed PROGRAM on installed program names
+_ACEOF
+fi
+
+if test -n "$ac_init_help"; then
+ case $ac_init_help in
+ short | recursive ) echo "Configuration of seqlibtest 1.0:";;
+ esac
+ cat <<\_ACEOF
+
+Optional Features:
+ --disable-option-checking ignore unrecognized --enable/--with options
+ --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
+ --enable-FEATURE[=ARG] include FEATURE [ARG=yes]
+ --enable-maintainer-mode enable make rules and dependencies not useful
+ (and sometimes confusing) to the casual installer
+ --disable-dependency-tracking speeds up one-time build
+ --enable-dependency-tracking do not reject slow dependency extractors
+ --enable-development Turn on development options, like failing
+ compilation on warnings
+
+Optional Packages:
+ --with-PACKAGE[=ARG] use PACKAGE [ARG=yes]
+ --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no)
+ --with-boost=PATH specify directory containing the boost library)
+
+Some influential environment variables:
+ CXX C++ compiler command
+ CXXFLAGS C++ compiler flags
+ LDFLAGS linker flags, e.g. -L<lib dir> if you have libraries in a
+ nonstandard directory <lib dir>
+ LIBS libraries to pass to the linker, e.g. -l<library>
+ CPPFLAGS C/C++/Objective C preprocessor flags, e.g. -I<include dir> if
+ you have headers in a nonstandard directory <include dir>
+ CC C compiler command
+ CFLAGS C compiler flags
+ CXXCPP C++ preprocessor
+
+Use these variables to override the choices made by `configure' or to help
+it to find libraries and programs with nonstandard names/locations.
+
+Report bugs to <jwala at broadinstitute.org>.
+_ACEOF
+ac_status=$?
+fi
+
+if test "$ac_init_help" = "recursive"; then
+ # If there are subdirs, report their specific --help.
+ for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
+ test -d "$ac_dir" ||
+ { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } ||
+ continue
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+ cd "$ac_dir" || { ac_status=$?; continue; }
+ # Check for guested configure.
+ if test -f "$ac_srcdir/configure.gnu"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure.gnu" --help=recursive
+ elif test -f "$ac_srcdir/configure"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure" --help=recursive
+ else
+ $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
+ fi || ac_status=$?
+ cd "$ac_pwd" || { ac_status=$?; break; }
+ done
+fi
+
+test -n "$ac_init_help" && exit $ac_status
+if $ac_init_version; then
+ cat <<\_ACEOF
+seqlibtest configure 1.0
+generated by GNU Autoconf 2.63
+
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
+2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+This configure script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it.
+_ACEOF
+ exit
+fi
+cat >config.log <<_ACEOF
+This file contains any messages produced by compilers while
+running configure, to aid debugging if configure makes a mistake.
+
+It was created by seqlibtest $as_me 1.0, which was
+generated by GNU Autoconf 2.63. Invocation command line was
+
+ $ $0 $@
+
+_ACEOF
+exec 5>>config.log
+{
+cat <<_ASUNAME
+## --------- ##
+## Platform. ##
+## --------- ##
+
+hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown`
+
+/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
+/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown`
+/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown`
+/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown`
+
+_ASUNAME
+
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ $as_echo "PATH: $as_dir"
+done
+IFS=$as_save_IFS
+
+} >&5
+
+cat >&5 <<_ACEOF
+
+
+## ----------- ##
+## Core tests. ##
+## ----------- ##
+
+_ACEOF
+
+
+# Keep a trace of the command line.
+# Strip out --no-create and --no-recursion so they do not pile up.
+# Strip out --silent because we don't want to record it for future runs.
+# Also quote any args containing shell meta-characters.
+# Make two passes to allow for proper duplicate-argument suppression.
+ac_configure_args=
+ac_configure_args0=
+ac_configure_args1=
+ac_must_keep_next=false
+for ac_pass in 1 2
+do
+ for ac_arg
+ do
+ case $ac_arg in
+ -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ continue ;;
+ *\'*)
+ ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ case $ac_pass in
+ 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;;
+ 2)
+ ac_configure_args1="$ac_configure_args1 '$ac_arg'"
+ if test $ac_must_keep_next = true; then
+ ac_must_keep_next=false # Got value, back to normal.
+ else
+ case $ac_arg in
+ *=* | --config-cache | -C | -disable-* | --disable-* \
+ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
+ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
+ | -with-* | --with-* | -without-* | --without-* | --x)
+ case "$ac_configure_args0 " in
+ "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
+ esac
+ ;;
+ -* ) ac_must_keep_next=true ;;
+ esac
+ fi
+ ac_configure_args="$ac_configure_args '$ac_arg'"
+ ;;
+ esac
+ done
+done
+$as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; }
+$as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; }
+
+# When interrupted or exit'd, cleanup temporary files, and complete
+# config.log. We remove comments because anyway the quotes in there
+# would cause problems or look ugly.
+# WARNING: Use '\'' to represent an apostrophe within the trap.
+# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
+trap 'exit_status=$?
+ # Save into config.log some information that might help in debugging.
+ {
+ echo
+
+ cat <<\_ASBOX
+## ---------------- ##
+## Cache variables. ##
+## ---------------- ##
+_ASBOX
+ echo
+ # The following way of writing the cache mishandles newlines in values,
+(
+ for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { $as_echo "$as_me:$LINENO: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+ *) $as_unset $ac_var ;;
+ esac ;;
+ esac
+ done
+ (set) 2>&1 |
+ case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ sed -n \
+ "s/'\''/'\''\\\\'\'''\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
+ ;; #(
+ *)
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+)
+ echo
+
+ cat <<\_ASBOX
+## ----------------- ##
+## Output variables. ##
+## ----------------- ##
+_ASBOX
+ echo
+ for ac_var in $ac_subst_vars
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ $as_echo "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+
+ if test -n "$ac_subst_files"; then
+ cat <<\_ASBOX
+## ------------------- ##
+## File substitutions. ##
+## ------------------- ##
+_ASBOX
+ echo
+ for ac_var in $ac_subst_files
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ $as_echo "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+ fi
+
+ if test -s confdefs.h; then
+ cat <<\_ASBOX
+## ----------- ##
+## confdefs.h. ##
+## ----------- ##
+_ASBOX
+ echo
+ cat confdefs.h
+ echo
+ fi
+ test "$ac_signal" != 0 &&
+ $as_echo "$as_me: caught signal $ac_signal"
+ $as_echo "$as_me: exit $exit_status"
+ } >&5
+ rm -f core *.core core.conftest.* &&
+ rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
+ exit $exit_status
+' 0
+for ac_signal in 1 2 13 15; do
+ trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal
+done
+ac_signal=0
+
+# confdefs.h avoids OS command line length limits that DEFS can exceed.
+rm -f -r conftest* confdefs.h
+
+# Predefined preprocessor variables.
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_NAME "$PACKAGE_NAME"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_VERSION "$PACKAGE_VERSION"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_STRING "$PACKAGE_STRING"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
+_ACEOF
+
+
+# Let the site file select an alternate cache file if it wants to.
+# Prefer an explicitly selected file to automatically selected ones.
+ac_site_file1=NONE
+ac_site_file2=NONE
+if test -n "$CONFIG_SITE"; then
+ ac_site_file1=$CONFIG_SITE
+elif test "x$prefix" != xNONE; then
+ ac_site_file1=$prefix/share/config.site
+ ac_site_file2=$prefix/etc/config.site
+else
+ ac_site_file1=$ac_default_prefix/share/config.site
+ ac_site_file2=$ac_default_prefix/etc/config.site
+fi
+for ac_site_file in "$ac_site_file1" "$ac_site_file2"
+do
+ test "x$ac_site_file" = xNONE && continue
+ if test -r "$ac_site_file"; then
+ { $as_echo "$as_me:$LINENO: loading site script $ac_site_file" >&5
+$as_echo "$as_me: loading site script $ac_site_file" >&6;}
+ sed 's/^/| /' "$ac_site_file" >&5
+ . "$ac_site_file"
+ fi
+done
+
+if test -r "$cache_file"; then
+ # Some versions of bash will fail to source /dev/null (special
+ # files actually), so we avoid doing that.
+ if test -f "$cache_file"; then
+ { $as_echo "$as_me:$LINENO: loading cache $cache_file" >&5
+$as_echo "$as_me: loading cache $cache_file" >&6;}
+ case $cache_file in
+ [\\/]* | ?:[\\/]* ) . "$cache_file";;
+ *) . "./$cache_file";;
+ esac
+ fi
+else
+ { $as_echo "$as_me:$LINENO: creating cache $cache_file" >&5
+$as_echo "$as_me: creating cache $cache_file" >&6;}
+ >$cache_file
+fi
+
+# Check that the precious variables saved in the cache have kept the same
+# value.
+ac_cache_corrupted=false
+for ac_var in $ac_precious_vars; do
+ eval ac_old_set=\$ac_cv_env_${ac_var}_set
+ eval ac_new_set=\$ac_env_${ac_var}_set
+ eval ac_old_val=\$ac_cv_env_${ac_var}_value
+ eval ac_new_val=\$ac_env_${ac_var}_value
+ case $ac_old_set,$ac_new_set in
+ set,)
+ { $as_echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,set)
+ { $as_echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,);;
+ *)
+ if test "x$ac_old_val" != "x$ac_new_val"; then
+ # differences in whitespace do not lead to failure.
+ ac_old_val_w=`echo x $ac_old_val`
+ ac_new_val_w=`echo x $ac_new_val`
+ if test "$ac_old_val_w" != "$ac_new_val_w"; then
+ { $as_echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5
+$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
+ ac_cache_corrupted=:
+ else
+ { $as_echo "$as_me:$LINENO: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
+$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
+ eval $ac_var=\$ac_old_val
+ fi
+ { $as_echo "$as_me:$LINENO: former value: \`$ac_old_val'" >&5
+$as_echo "$as_me: former value: \`$ac_old_val'" >&2;}
+ { $as_echo "$as_me:$LINENO: current value: \`$ac_new_val'" >&5
+$as_echo "$as_me: current value: \`$ac_new_val'" >&2;}
+ fi;;
+ esac
+ # Pass precious variables to config.status.
+ if test "$ac_new_set" = set; then
+ case $ac_new_val in
+ *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
+ *) ac_arg=$ac_var=$ac_new_val ;;
+ esac
+ case " $ac_configure_args " in
+ *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy.
+ *) ac_configure_args="$ac_configure_args '$ac_arg'" ;;
+ esac
+ fi
+done
+if $ac_cache_corrupted; then
+ { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+ { $as_echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5
+$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;}
+ { { $as_echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5
+$as_echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+am__api_version='1.11'
+
+ac_aux_dir=
+for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do
+ if test -f "$ac_dir/install-sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install-sh -c"
+ break
+ elif test -f "$ac_dir/install.sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install.sh -c"
+ break
+ elif test -f "$ac_dir/shtool"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/shtool install -c"
+ break
+ fi
+done
+if test -z "$ac_aux_dir"; then
+ { { $as_echo "$as_me:$LINENO: error: cannot find install-sh or install.sh in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" >&5
+$as_echo "$as_me: error: cannot find install-sh or install.sh in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+# These three variables are undocumented and unsupported,
+# and are intended to be withdrawn in a future Autoconf release.
+# They can cause serious problems if a builder's source tree is in a directory
+# whose full name contains unusual characters.
+ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var.
+ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var.
+ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
+
+
+# Find a good install program. We prefer a C program (faster),
+# so one script is as good as another. But avoid the broken or
+# incompatible versions:
+# SysV /etc/install, /usr/sbin/install
+# SunOS /usr/etc/install
+# IRIX /sbin/install
+# AIX /bin/install
+# AmigaOS /C/install, which installs bootblocks on floppy discs
+# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
+# AFS /usr/afsws/bin/install, which mishandles nonexistent args
+# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
+# OS/2's system install, which has a completely different semantic
+# ./install, which can be erroneously created by make from ./install.sh.
+# Reject install programs that cannot install multiple files.
+{ $as_echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5
+$as_echo_n "checking for a BSD-compatible install... " >&6; }
+if test -z "$INSTALL"; then
+if test "${ac_cv_path_install+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ # Account for people who put trailing slashes in PATH elements.
+case $as_dir/ in
+ ./ | .// | /cC/* | \
+ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
+ ?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \
+ /usr/ucb/* ) ;;
+ *)
+ # OSF1 and SCO ODT 3.0 have their own names for install.
+ # Don't use installbsd from OSF since it installs stuff as root
+ # by default.
+ for ac_prog in ginstall scoinst install; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then
+ if test $ac_prog = install &&
+ grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # AIX install. It has an incompatible calling convention.
+ :
+ elif test $ac_prog = install &&
+ grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # program-specific install script used by HP pwplus--don't use.
+ :
+ else
+ rm -rf conftest.one conftest.two conftest.dir
+ echo one > conftest.one
+ echo two > conftest.two
+ mkdir conftest.dir
+ if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" &&
+ test -s conftest.one && test -s conftest.two &&
+ test -s conftest.dir/conftest.one &&
+ test -s conftest.dir/conftest.two
+ then
+ ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c"
+ break 3
+ fi
+ fi
+ fi
+ done
+ done
+ ;;
+esac
+
+done
+IFS=$as_save_IFS
+
+rm -rf conftest.one conftest.two conftest.dir
+
+fi
+ if test "${ac_cv_path_install+set}" = set; then
+ INSTALL=$ac_cv_path_install
+ else
+ # As a last resort, use the slow shell script. Don't cache a
+ # value for INSTALL within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the value is a relative name.
+ INSTALL=$ac_install_sh
+ fi
+fi
+{ $as_echo "$as_me:$LINENO: result: $INSTALL" >&5
+$as_echo "$INSTALL" >&6; }
+
+# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
+# It thinks the first close brace ends the variable substitution.
+test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
+
+test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
+
+test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+
+{ $as_echo "$as_me:$LINENO: checking whether build environment is sane" >&5
+$as_echo_n "checking whether build environment is sane... " >&6; }
+# Just in case
+sleep 1
+echo timestamp > conftest.file
+# Reject unsafe characters in $srcdir or the absolute working directory
+# name. Accept space and tab only in the latter.
+am_lf='
+'
+case `pwd` in
+ *[\\\"\#\$\&\'\`$am_lf]*)
+ { { $as_echo "$as_me:$LINENO: error: unsafe absolute working directory name" >&5
+$as_echo "$as_me: error: unsafe absolute working directory name" >&2;}
+ { (exit 1); exit 1; }; };;
+esac
+case $srcdir in
+ *[\\\"\#\$\&\'\`$am_lf\ \ ]*)
+ { { $as_echo "$as_me:$LINENO: error: unsafe srcdir value: \`$srcdir'" >&5
+$as_echo "$as_me: error: unsafe srcdir value: \`$srcdir'" >&2;}
+ { (exit 1); exit 1; }; };;
+esac
+
+# Do `set' in a subshell so we don't clobber the current shell's
+# arguments. Must try -L first in case configure is actually a
+# symlink; some systems play weird games with the mod time of symlinks
+# (eg FreeBSD returns the mod time of the symlink's containing
+# directory).
+if (
+ set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
+ if test "$*" = "X"; then
+ # -L didn't work.
+ set X `ls -t "$srcdir/configure" conftest.file`
+ fi
+ rm -f conftest.file
+ if test "$*" != "X $srcdir/configure conftest.file" \
+ && test "$*" != "X conftest.file $srcdir/configure"; then
+
+ # If neither matched, then we have a broken ls. This can happen
+ # if, for instance, CONFIG_SHELL is bash and it inherits a
+ # broken ls alias from the environment. This has actually
+ # happened. Such a system could not be considered "sane".
+ { { $as_echo "$as_me:$LINENO: error: ls -t appears to fail. Make sure there is not a broken
+alias in your environment" >&5
+$as_echo "$as_me: error: ls -t appears to fail. Make sure there is not a broken
+alias in your environment" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+
+ test "$2" = conftest.file
+ )
+then
+ # Ok.
+ :
+else
+ { { $as_echo "$as_me:$LINENO: error: newly created file is older than distributed files!
+Check your system clock" >&5
+$as_echo "$as_me: error: newly created file is older than distributed files!
+Check your system clock" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+{ $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+test "$program_prefix" != NONE &&
+ program_transform_name="s&^&$program_prefix&;$program_transform_name"
+# Use a double $ so make ignores it.
+test "$program_suffix" != NONE &&
+ program_transform_name="s&\$&$program_suffix&;$program_transform_name"
+# Double any \ or $.
+# By default was `s,x,x', remove it if useless.
+ac_script='s/[\\$]/&&/g;s/;s,x,x,$//'
+program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"`
+
+# expand $ac_aux_dir to an absolute path
+am_aux_dir=`cd $ac_aux_dir && pwd`
+
+if test x"${MISSING+set}" != xset; then
+ case $am_aux_dir in
+ *\ * | *\ *)
+ MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;;
+ *)
+ MISSING="\${SHELL} $am_aux_dir/missing" ;;
+ esac
+fi
+# Use eval to expand $SHELL
+if eval "$MISSING --run true"; then
+ am_missing_run="$MISSING --run "
+else
+ am_missing_run=
+ { $as_echo "$as_me:$LINENO: WARNING: \`missing' script is too old or missing" >&5
+$as_echo "$as_me: WARNING: \`missing' script is too old or missing" >&2;}
+fi
+
+if test x"${install_sh}" != xset; then
+ case $am_aux_dir in
+ *\ * | *\ *)
+ install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
+ *)
+ install_sh="\${SHELL} $am_aux_dir/install-sh"
+ esac
+fi
+
+# Installed binaries are usually stripped using `strip' when the user
+# run `make install-strip'. However `strip' might not be the right
+# tool to use in cross-compilation environments, therefore Automake
+# will honor the `STRIP' environment variable to overrule this program.
+if test "$cross_compiling" != no; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
+set dummy ${ac_tool_prefix}strip; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_STRIP+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$STRIP"; then
+ ac_cv_prog_STRIP="$STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_STRIP="${ac_tool_prefix}strip"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+STRIP=$ac_cv_prog_STRIP
+if test -n "$STRIP"; then
+ { $as_echo "$as_me:$LINENO: result: $STRIP" >&5
+$as_echo "$STRIP" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_STRIP"; then
+ ac_ct_STRIP=$STRIP
+ # Extract the first word of "strip", so it can be a program name with args.
+set dummy strip; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_STRIP"; then
+ ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_STRIP="strip"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP
+if test -n "$ac_ct_STRIP"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_STRIP" >&5
+$as_echo "$ac_ct_STRIP" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_STRIP" = x; then
+ STRIP=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ STRIP=$ac_ct_STRIP
+ fi
+else
+ STRIP="$ac_cv_prog_STRIP"
+fi
+
+fi
+INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
+
+{ $as_echo "$as_me:$LINENO: checking for a thread-safe mkdir -p" >&5
+$as_echo_n "checking for a thread-safe mkdir -p... " >&6; }
+if test -z "$MKDIR_P"; then
+ if test "${ac_cv_path_mkdir+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in mkdir gmkdir; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; } || continue
+ case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #(
+ 'mkdir (GNU coreutils) '* | \
+ 'mkdir (coreutils) '* | \
+ 'mkdir (fileutils) '4.1*)
+ ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext
+ break 3;;
+ esac
+ done
+ done
+done
+IFS=$as_save_IFS
+
+fi
+
+ if test "${ac_cv_path_mkdir+set}" = set; then
+ MKDIR_P="$ac_cv_path_mkdir -p"
+ else
+ # As a last resort, use the slow shell script. Don't cache a
+ # value for MKDIR_P within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the value is a relative name.
+ test -d ./--version && rmdir ./--version
+ MKDIR_P="$ac_install_sh -d"
+ fi
+fi
+{ $as_echo "$as_me:$LINENO: result: $MKDIR_P" >&5
+$as_echo "$MKDIR_P" >&6; }
+
+mkdir_p="$MKDIR_P"
+case $mkdir_p in
+ [\\/$]* | ?:[\\/]*) ;;
+ */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;;
+esac
+
+for ac_prog in gawk mawk nawk awk
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_AWK+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$AWK"; then
+ ac_cv_prog_AWK="$AWK" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_AWK="$ac_prog"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+AWK=$ac_cv_prog_AWK
+if test -n "$AWK"; then
+ { $as_echo "$as_me:$LINENO: result: $AWK" >&5
+$as_echo "$AWK" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$AWK" && break
+done
+
+{ $as_echo "$as_me:$LINENO: checking whether ${MAKE-make} sets \$(MAKE)" >&5
+$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; }
+set x ${MAKE-make}
+ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'`
+if { as_var=ac_cv_prog_make_${ac_make}_set; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.make <<\_ACEOF
+SHELL = /bin/sh
+all:
+ @echo '@@@%%%=$(MAKE)=@@@%%%'
+_ACEOF
+# GNU make sometimes prints "make[1]: Entering...", which would confuse us.
+case `${MAKE-make} -f conftest.make 2>/dev/null` in
+ *@@@%%%=?*=@@@%%%*)
+ eval ac_cv_prog_make_${ac_make}_set=yes;;
+ *)
+ eval ac_cv_prog_make_${ac_make}_set=no;;
+esac
+rm -f conftest.make
+fi
+if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+ SET_MAKE=
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+ SET_MAKE="MAKE=${MAKE-make}"
+fi
+
+rm -rf .tst 2>/dev/null
+mkdir .tst 2>/dev/null
+if test -d .tst; then
+ am__leading_dot=.
+else
+ am__leading_dot=_
+fi
+rmdir .tst 2>/dev/null
+
+if test "`cd $srcdir && pwd`" != "`pwd`"; then
+ # Use -I$(srcdir) only when $(srcdir) != ., so that make's output
+ # is not polluted with repeated "-I."
+ am__isrc=' -I$(srcdir)'
+ # test to see if srcdir already configured
+ if test -f $srcdir/config.status; then
+ { { $as_echo "$as_me:$LINENO: error: source directory already configured; run \"make distclean\" there first" >&5
+$as_echo "$as_me: error: source directory already configured; run \"make distclean\" there first" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+fi
+
+# test whether we have cygpath
+if test -z "$CYGPATH_W"; then
+ if (cygpath --version) >/dev/null 2>/dev/null; then
+ CYGPATH_W='cygpath -w'
+ else
+ CYGPATH_W=echo
+ fi
+fi
+
+
+# Define the identity of the package.
+ PACKAGE='seqlibtest'
+ VERSION='1.0'
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE "$PACKAGE"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define VERSION "$VERSION"
+_ACEOF
+
+# Some tools Automake needs.
+
+ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"}
+
+
+AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"}
+
+
+AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"}
+
+
+AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"}
+
+
+MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"}
+
+# We need awk for the "check" target. The system "awk" is bad on
+# some platforms.
+# Always define AMTAR for backward compatibility.
+
+AMTAR=${AMTAR-"${am_missing_run}tar"}
+
+am__tar='${AMTAR} chof - "$$tardir"'; am__untar='${AMTAR} xf -'
+
+
+
+
+
+
+ac_config_headers="$ac_config_headers config.h"
+
+
+{ $as_echo "$as_me:$LINENO: checking whether to enable maintainer-specific portions of Makefiles" >&5
+$as_echo_n "checking whether to enable maintainer-specific portions of Makefiles... " >&6; }
+ # Check whether --enable-maintainer-mode was given.
+if test "${enable_maintainer_mode+set}" = set; then
+ enableval=$enable_maintainer_mode; USE_MAINTAINER_MODE=$enableval
+else
+ USE_MAINTAINER_MODE=no
+fi
+
+ { $as_echo "$as_me:$LINENO: result: $USE_MAINTAINER_MODE" >&5
+$as_echo "$USE_MAINTAINER_MODE" >&6; }
+ if test $USE_MAINTAINER_MODE = yes; then
+ MAINTAINER_MODE_TRUE=
+ MAINTAINER_MODE_FALSE='#'
+else
+ MAINTAINER_MODE_TRUE='#'
+ MAINTAINER_MODE_FALSE=
+fi
+
+ MAINT=$MAINTAINER_MODE_TRUE
+
+
+##m4_include([m4/m4_ax_openmp.m4])
+
+# Checks for programs.
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+if test -z "$CXX"; then
+ if test -n "$CCC"; then
+ CXX=$CCC
+ else
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CXX+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CXX"; then
+ ac_cv_prog_CXX="$CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CXX="$ac_tool_prefix$ac_prog"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+CXX=$ac_cv_prog_CXX
+if test -n "$CXX"; then
+ { $as_echo "$as_me:$LINENO: result: $CXX" >&5
+$as_echo "$CXX" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$CXX" && break
+ done
+fi
+if test -z "$CXX"; then
+ ac_ct_CXX=$CXX
+ for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_CXX+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CXX"; then
+ ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_CXX="$ac_prog"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CXX=$ac_cv_prog_ac_ct_CXX
+if test -n "$ac_ct_CXX"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_CXX" >&5
+$as_echo "$ac_ct_CXX" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$ac_ct_CXX" && break
+done
+
+ if test "x$ac_ct_CXX" = x; then
+ CXX="g++"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CXX=$ac_ct_CXX
+ fi
+fi
+
+ fi
+fi
+# Provide some information about the compiler.
+$as_echo "$as_me:$LINENO: checking for C++ compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+{ (ac_try="$ac_compiler --version >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compiler --version >&5") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (ac_try="$ac_compiler -v >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compiler -v >&5") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (ac_try="$ac_compiler -V >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compiler -V >&5") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out"
+# Try to create an executable without -o first, disregard a.out.
+# It will help us diagnose broken compilers, and finding out an intuition
+# of exeext.
+{ $as_echo "$as_me:$LINENO: checking for C++ compiler default output file name" >&5
+$as_echo_n "checking for C++ compiler default output file name... " >&6; }
+ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
+
+# The possible output files:
+ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*"
+
+ac_rmfiles=
+for ac_file in $ac_files
+do
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+ * ) ac_rmfiles="$ac_rmfiles $ac_file";;
+ esac
+done
+rm -f $ac_rmfiles
+
+if { (ac_try="$ac_link_default"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link_default") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
+# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
+# in a Makefile. We should not override ac_cv_exeext if it was cached,
+# so that the user can short-circuit this test for compilers unknown to
+# Autoconf.
+for ac_file in $ac_files ''
+do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj )
+ ;;
+ [ab].out )
+ # We found the default executable, but exeext='' is most
+ # certainly right.
+ break;;
+ *.* )
+ if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no;
+ then :; else
+ ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ fi
+ # We set ac_cv_exeext here because the later test for it is not
+ # safe: cross compilers may not add the suffix if given an `-o'
+ # argument, so we may need to know it at that point already.
+ # Even if this section looks crufty: it has the advantage of
+ # actually working.
+ break;;
+ * )
+ break;;
+ esac
+done
+test "$ac_cv_exeext" = no && ac_cv_exeext=
+
+else
+ ac_file=''
+fi
+
+{ $as_echo "$as_me:$LINENO: result: $ac_file" >&5
+$as_echo "$ac_file" >&6; }
+if test -z "$ac_file"; then
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+{ { $as_echo "$as_me:$LINENO: error: C++ compiler cannot create executables
+See \`config.log' for more details." >&5
+$as_echo "$as_me: error: C++ compiler cannot create executables
+See \`config.log' for more details." >&2;}
+ { (exit 77); exit 77; }; }; }
+fi
+
+ac_exeext=$ac_cv_exeext
+
+# Check that the compiler produces executables we can run. If not, either
+# the compiler is broken, or we cross compile.
+{ $as_echo "$as_me:$LINENO: checking whether the C++ compiler works" >&5
+$as_echo_n "checking whether the C++ compiler works... " >&6; }
+# FIXME: These cross compiler hacks should be removed for Autoconf 3.0
+# If not cross compiling, check that we can run a simple program.
+if test "$cross_compiling" != yes; then
+ if { ac_try='./$ac_file'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ cross_compiling=no
+ else
+ if test "$cross_compiling" = maybe; then
+ cross_compiling=yes
+ else
+ { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+{ { $as_echo "$as_me:$LINENO: error: cannot run C++ compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details." >&5
+$as_echo "$as_me: error: cannot run C++ compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }; }
+ fi
+ fi
+fi
+{ $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+
+rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out
+ac_clean_files=$ac_clean_files_save
+# Check that the compiler produces executables we can run. If not, either
+# the compiler is broken, or we cross compile.
+{ $as_echo "$as_me:$LINENO: checking whether we are cross compiling" >&5
+$as_echo_n "checking whether we are cross compiling... " >&6; }
+{ $as_echo "$as_me:$LINENO: result: $cross_compiling" >&5
+$as_echo "$cross_compiling" >&6; }
+
+{ $as_echo "$as_me:$LINENO: checking for suffix of executables" >&5
+$as_echo_n "checking for suffix of executables... " >&6; }
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ # If both `conftest.exe' and `conftest' are `present' (well, observable)
+# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will
+# work properly (i.e., refer to `conftest.exe'), while it won't with
+# `rm'.
+for ac_file in conftest.exe conftest conftest.*; do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+ *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ break;;
+ * ) break;;
+ esac
+done
+else
+ { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+{ { $as_echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details." >&5
+$as_echo "$as_me: error: cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }; }
+fi
+
+rm -f conftest$ac_cv_exeext
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5
+$as_echo "$ac_cv_exeext" >&6; }
+
+rm -f conftest.$ac_ext
+EXEEXT=$ac_cv_exeext
+ac_exeext=$EXEEXT
+{ $as_echo "$as_me:$LINENO: checking for suffix of object files" >&5
+$as_echo_n "checking for suffix of object files... " >&6; }
+if test "${ac_cv_objext+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.o conftest.obj
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ for ac_file in conftest.o conftest.obj conftest.*; do
+ test -f "$ac_file" || continue;
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;;
+ *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
+ break;;
+ esac
+done
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+{ { $as_echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile
+See \`config.log' for more details." >&5
+$as_echo "$as_me: error: cannot compute suffix of object files: cannot compile
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }; }
+fi
+
+rm -f conftest.$ac_cv_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_objext" >&5
+$as_echo "$ac_cv_objext" >&6; }
+OBJEXT=$ac_cv_objext
+ac_objext=$OBJEXT
+{ $as_echo "$as_me:$LINENO: checking whether we are using the GNU C++ compiler" >&5
+$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; }
+if test "${ac_cv_cxx_compiler_gnu+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_compiler_gnu=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_compiler_gnu=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_cxx_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_cxx_compiler_gnu" >&5
+$as_echo "$ac_cv_cxx_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+ GXX=yes
+else
+ GXX=
+fi
+ac_test_CXXFLAGS=${CXXFLAGS+set}
+ac_save_CXXFLAGS=$CXXFLAGS
+{ $as_echo "$as_me:$LINENO: checking whether $CXX accepts -g" >&5
+$as_echo_n "checking whether $CXX accepts -g... " >&6; }
+if test "${ac_cv_prog_cxx_g+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_save_cxx_werror_flag=$ac_cxx_werror_flag
+ ac_cxx_werror_flag=yes
+ ac_cv_prog_cxx_g=no
+ CXXFLAGS="-g"
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_prog_cxx_g=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ CXXFLAGS=""
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ :
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+ CXXFLAGS="-g"
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_prog_cxx_g=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cxx_g" >&5
+$as_echo "$ac_cv_prog_cxx_g" >&6; }
+if test "$ac_test_CXXFLAGS" = set; then
+ CXXFLAGS=$ac_save_CXXFLAGS
+elif test $ac_cv_prog_cxx_g = yes; then
+ if test "$GXX" = yes; then
+ CXXFLAGS="-g -O2"
+ else
+ CXXFLAGS="-g"
+ fi
+else
+ if test "$GXX" = yes; then
+ CXXFLAGS="-O2"
+ else
+ CXXFLAGS=
+ fi
+fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+DEPDIR="${am__leading_dot}deps"
+
+ac_config_commands="$ac_config_commands depfiles"
+
+
+am_make=${MAKE-make}
+cat > confinc << 'END'
+am__doit:
+ @echo this is the am__doit target
+.PHONY: am__doit
+END
+# If we don't find an include directive, just comment out the code.
+{ $as_echo "$as_me:$LINENO: checking for style of include used by $am_make" >&5
+$as_echo_n "checking for style of include used by $am_make... " >&6; }
+am__include="#"
+am__quote=
+_am_result=none
+# First try GNU make style include.
+echo "include confinc" > confmf
+# Ignore all kinds of additional output from `make'.
+case `$am_make -s -f confmf 2> /dev/null` in #(
+*the\ am__doit\ target*)
+ am__include=include
+ am__quote=
+ _am_result=GNU
+ ;;
+esac
+# Now try BSD make style include.
+if test "$am__include" = "#"; then
+ echo '.include "confinc"' > confmf
+ case `$am_make -s -f confmf 2> /dev/null` in #(
+ *the\ am__doit\ target*)
+ am__include=.include
+ am__quote="\""
+ _am_result=BSD
+ ;;
+ esac
+fi
+
+
+{ $as_echo "$as_me:$LINENO: result: $_am_result" >&5
+$as_echo "$_am_result" >&6; }
+rm -f confinc confmf
+
+# Check whether --enable-dependency-tracking was given.
+if test "${enable_dependency_tracking+set}" = set; then
+ enableval=$enable_dependency_tracking;
+fi
+
+if test "x$enable_dependency_tracking" != xno; then
+ am_depcomp="$ac_aux_dir/depcomp"
+ AMDEPBACKSLASH='\'
+fi
+ if test "x$enable_dependency_tracking" != xno; then
+ AMDEP_TRUE=
+ AMDEP_FALSE='#'
+else
+ AMDEP_TRUE='#'
+ AMDEP_FALSE=
+fi
+
+
+
+depcc="$CXX" am_compiler_list=
+
+{ $as_echo "$as_me:$LINENO: checking dependency style of $depcc" >&5
+$as_echo_n "checking dependency style of $depcc... " >&6; }
+if test "${am_cv_CXX_dependencies_compiler_type+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+ # We make a subdir and do the tests there. Otherwise we can end up
+ # making bogus files that we don't know about and never remove. For
+ # instance it was reported that on HP-UX the gcc test will end up
+ # making a dummy file named `D' -- because `-MD' means `put the output
+ # in D'.
+ mkdir conftest.dir
+ # Copy depcomp to subdir because otherwise we won't find it if we're
+ # using a relative directory.
+ cp "$am_depcomp" conftest.dir
+ cd conftest.dir
+ # We will build objects and dependencies in a subdirectory because
+ # it helps to detect inapplicable dependency modes. For instance
+ # both Tru64's cc and ICC support -MD to output dependencies as a
+ # side effect of compilation, but ICC will put the dependencies in
+ # the current directory while Tru64 will put them in the object
+ # directory.
+ mkdir sub
+
+ am_cv_CXX_dependencies_compiler_type=none
+ if test "$am_compiler_list" = ""; then
+ am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
+ fi
+ am__universal=false
+ case " $depcc " in #(
+ *\ -arch\ *\ -arch\ *) am__universal=true ;;
+ esac
+
+ for depmode in $am_compiler_list; do
+ # Setup a source with many dependencies, because some compilers
+ # like to wrap large dependency lists on column 80 (with \), and
+ # we should not choose a depcomp mode which is confused by this.
+ #
+ # We need to recreate these files for each test, as the compiler may
+ # overwrite some of them when testing with obscure command lines.
+ # This happens at least with the AIX C compiler.
+ : > sub/conftest.c
+ for i in 1 2 3 4 5 6; do
+ echo '#include "conftst'$i'.h"' >> sub/conftest.c
+ # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
+ # Solaris 8's {/usr,}/bin/sh.
+ touch sub/conftst$i.h
+ done
+ echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+ # We check with `-c' and `-o' for the sake of the "dashmstdout"
+ # mode. It turns out that the SunPro C++ compiler does not properly
+ # handle `-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs
+ am__obj=sub/conftest.${OBJEXT-o}
+ am__minus_obj="-o $am__obj"
+ case $depmode in
+ gcc)
+ # This depmode causes a compiler race in universal mode.
+ test "$am__universal" = false || continue
+ ;;
+ nosideeffect)
+ # after this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested
+ if test "x$enable_dependency_tracking" = xyes; then
+ continue
+ else
+ break
+ fi
+ ;;
+ msvisualcpp | msvcmsys)
+ # This compiler won't grok `-c -o', but also, the minuso test has
+ # not run yet. These depmodes are late enough in the game, and
+ # so weak that their functioning should not be impacted.
+ am__obj=conftest.${OBJEXT-o}
+ am__minus_obj=
+ ;;
+ none) break ;;
+ esac
+ if depmode=$depmode \
+ source=sub/conftest.c object=$am__obj \
+ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+ >/dev/null 2>conftest.err &&
+ grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+ ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+ # icc doesn't choke on unknown options, it will just issue warnings
+ # or remarks (even with -Werror). So we grep stderr for any message
+ # that says an option was ignored or not supported.
+ # When given -MP, icc 7.0 and 7.1 complain thusly:
+ # icc: Command line warning: ignoring option '-M'; no argument required
+ # The diagnosis changed in icc 8.0:
+ # icc: Command line remark: option '-MP' not supported
+ if (grep 'ignoring option' conftest.err ||
+ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+ am_cv_CXX_dependencies_compiler_type=$depmode
+ break
+ fi
+ fi
+ done
+
+ cd ..
+ rm -rf conftest.dir
+else
+ am_cv_CXX_dependencies_compiler_type=none
+fi
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $am_cv_CXX_dependencies_compiler_type" >&5
+$as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; }
+CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type
+
+ if
+ test "x$enable_dependency_tracking" != xno \
+ && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then
+ am__fastdepCXX_TRUE=
+ am__fastdepCXX_FALSE='#'
+else
+ am__fastdepCXX_TRUE='#'
+ am__fastdepCXX_FALSE=
+fi
+
+ ## test for cpp compiler
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}gcc; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="${ac_tool_prefix}gcc"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:$LINENO: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_CC"; then
+ ac_ct_CC=$CC
+ # Extract the first word of "gcc", so it can be a program name with args.
+set dummy gcc; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_CC+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_CC="gcc"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+else
+ CC="$ac_cv_prog_CC"
+fi
+
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}cc; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="${ac_tool_prefix}cc"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:$LINENO: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ fi
+fi
+if test -z "$CC"; then
+ # Extract the first word of "cc", so it can be a program name with args.
+set dummy cc; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+ ac_prog_rejected=no
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
+ ac_prog_rejected=yes
+ continue
+ fi
+ ac_cv_prog_CC="cc"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+if test $ac_prog_rejected = yes; then
+ # We found a bogon in the path, so make sure we never use it.
+ set dummy $ac_cv_prog_CC
+ shift
+ if test $# != 0; then
+ # We chose a different compiler from the bogus one.
+ # However, it has the same basename, so the bogon will be chosen
+ # first if we set CC to just the basename; use the full file name.
+ shift
+ ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
+ fi
+fi
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:$LINENO: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in cl.exe
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:$LINENO: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$CC" && break
+ done
+fi
+if test -z "$CC"; then
+ ac_ct_CC=$CC
+ for ac_prog in cl.exe
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_CC+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_CC="$ac_prog"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$ac_ct_CC" && break
+done
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+fi
+
+fi
+
+
+test -z "$CC" && { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+{ { $as_echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH
+See \`config.log' for more details." >&5
+$as_echo "$as_me: error: no acceptable C compiler found in \$PATH
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }; }
+
+# Provide some information about the compiler.
+$as_echo "$as_me:$LINENO: checking for C compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+{ (ac_try="$ac_compiler --version >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compiler --version >&5") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (ac_try="$ac_compiler -v >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compiler -v >&5") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (ac_try="$ac_compiler -V >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compiler -V >&5") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+
+{ $as_echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5
+$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
+if test "${ac_cv_c_compiler_gnu+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_compiler_gnu=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_compiler_gnu=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_c_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5
+$as_echo "$ac_cv_c_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+ GCC=yes
+else
+ GCC=
+fi
+ac_test_CFLAGS=${CFLAGS+set}
+ac_save_CFLAGS=$CFLAGS
+{ $as_echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5
+$as_echo_n "checking whether $CC accepts -g... " >&6; }
+if test "${ac_cv_prog_cc_g+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_save_c_werror_flag=$ac_c_werror_flag
+ ac_c_werror_flag=yes
+ ac_cv_prog_cc_g=no
+ CFLAGS="-g"
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_prog_cc_g=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ CFLAGS=""
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ :
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_c_werror_flag=$ac_save_c_werror_flag
+ CFLAGS="-g"
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_prog_cc_g=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_c_werror_flag=$ac_save_c_werror_flag
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5
+$as_echo "$ac_cv_prog_cc_g" >&6; }
+if test "$ac_test_CFLAGS" = set; then
+ CFLAGS=$ac_save_CFLAGS
+elif test $ac_cv_prog_cc_g = yes; then
+ if test "$GCC" = yes; then
+ CFLAGS="-g -O2"
+ else
+ CFLAGS="-g"
+ fi
+else
+ if test "$GCC" = yes; then
+ CFLAGS="-O2"
+ else
+ CFLAGS=
+ fi
+fi
+{ $as_echo "$as_me:$LINENO: checking for $CC option to accept ISO C89" >&5
+$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
+if test "${ac_cv_prog_cc_c89+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_prog_cc_c89=no
+ac_save_CC=$CC
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdarg.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */
+struct buf { int x; };
+FILE * (*rcsopen) (struct buf *, struct stat *, int);
+static char *e (p, i)
+ char **p;
+ int i;
+{
+ return p[i];
+}
+static char *f (char * (*g) (char **, int), char **p, ...)
+{
+ char *s;
+ va_list v;
+ va_start (v,p);
+ s = g (p, va_arg (v,int));
+ va_end (v);
+ return s;
+}
+
+/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has
+ function prototypes and stuff, but not '\xHH' hex character constants.
+ These don't provoke an error unfortunately, instead are silently treated
+ as 'x'. The following induces an error, until -std is added to get
+ proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an
+ array size at least. It's necessary to write '\x00'==0 to get something
+ that's true only with -std. */
+int osf4_cc_array ['\x00' == 0 ? 1 : -1];
+
+/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
+ inside strings and character constants. */
+#define FOO(x) 'x'
+int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
+
+int test (int i, double x);
+struct s1 {int (*f) (int a);};
+struct s2 {int (*f) (double a);};
+int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
+int argc;
+char **argv;
+int
+main ()
+{
+return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1];
+ ;
+ return 0;
+}
+_ACEOF
+for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
+ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+do
+ CC="$ac_save_CC $ac_arg"
+ rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_prog_cc_c89=$ac_arg
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext
+ test "x$ac_cv_prog_cc_c89" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CC=$ac_save_CC
+
+fi
+# AC_CACHE_VAL
+case "x$ac_cv_prog_cc_c89" in
+ x)
+ { $as_echo "$as_me:$LINENO: result: none needed" >&5
+$as_echo "none needed" >&6; } ;;
+ xno)
+ { $as_echo "$as_me:$LINENO: result: unsupported" >&5
+$as_echo "unsupported" >&6; } ;;
+ *)
+ CC="$CC $ac_cv_prog_cc_c89"
+ { $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_c89" >&5
+$as_echo "$ac_cv_prog_cc_c89" >&6; } ;;
+esac
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+depcc="$CC" am_compiler_list=
+
+{ $as_echo "$as_me:$LINENO: checking dependency style of $depcc" >&5
+$as_echo_n "checking dependency style of $depcc... " >&6; }
+if test "${am_cv_CC_dependencies_compiler_type+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+ # We make a subdir and do the tests there. Otherwise we can end up
+ # making bogus files that we don't know about and never remove. For
+ # instance it was reported that on HP-UX the gcc test will end up
+ # making a dummy file named `D' -- because `-MD' means `put the output
+ # in D'.
+ mkdir conftest.dir
+ # Copy depcomp to subdir because otherwise we won't find it if we're
+ # using a relative directory.
+ cp "$am_depcomp" conftest.dir
+ cd conftest.dir
+ # We will build objects and dependencies in a subdirectory because
+ # it helps to detect inapplicable dependency modes. For instance
+ # both Tru64's cc and ICC support -MD to output dependencies as a
+ # side effect of compilation, but ICC will put the dependencies in
+ # the current directory while Tru64 will put them in the object
+ # directory.
+ mkdir sub
+
+ am_cv_CC_dependencies_compiler_type=none
+ if test "$am_compiler_list" = ""; then
+ am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
+ fi
+ am__universal=false
+ case " $depcc " in #(
+ *\ -arch\ *\ -arch\ *) am__universal=true ;;
+ esac
+
+ for depmode in $am_compiler_list; do
+ # Setup a source with many dependencies, because some compilers
+ # like to wrap large dependency lists on column 80 (with \), and
+ # we should not choose a depcomp mode which is confused by this.
+ #
+ # We need to recreate these files for each test, as the compiler may
+ # overwrite some of them when testing with obscure command lines.
+ # This happens at least with the AIX C compiler.
+ : > sub/conftest.c
+ for i in 1 2 3 4 5 6; do
+ echo '#include "conftst'$i'.h"' >> sub/conftest.c
+ # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
+ # Solaris 8's {/usr,}/bin/sh.
+ touch sub/conftst$i.h
+ done
+ echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+ # We check with `-c' and `-o' for the sake of the "dashmstdout"
+ # mode. It turns out that the SunPro C++ compiler does not properly
+ # handle `-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs
+ am__obj=sub/conftest.${OBJEXT-o}
+ am__minus_obj="-o $am__obj"
+ case $depmode in
+ gcc)
+ # This depmode causes a compiler race in universal mode.
+ test "$am__universal" = false || continue
+ ;;
+ nosideeffect)
+ # after this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested
+ if test "x$enable_dependency_tracking" = xyes; then
+ continue
+ else
+ break
+ fi
+ ;;
+ msvisualcpp | msvcmsys)
+ # This compiler won't grok `-c -o', but also, the minuso test has
+ # not run yet. These depmodes are late enough in the game, and
+ # so weak that their functioning should not be impacted.
+ am__obj=conftest.${OBJEXT-o}
+ am__minus_obj=
+ ;;
+ none) break ;;
+ esac
+ if depmode=$depmode \
+ source=sub/conftest.c object=$am__obj \
+ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+ >/dev/null 2>conftest.err &&
+ grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+ ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+ # icc doesn't choke on unknown options, it will just issue warnings
+ # or remarks (even with -Werror). So we grep stderr for any message
+ # that says an option was ignored or not supported.
+ # When given -MP, icc 7.0 and 7.1 complain thusly:
+ # icc: Command line warning: ignoring option '-M'; no argument required
+ # The diagnosis changed in icc 8.0:
+ # icc: Command line remark: option '-MP' not supported
+ if (grep 'ignoring option' conftest.err ||
+ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+ am_cv_CC_dependencies_compiler_type=$depmode
+ break
+ fi
+ fi
+ done
+
+ cd ..
+ rm -rf conftest.dir
+else
+ am_cv_CC_dependencies_compiler_type=none
+fi
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $am_cv_CC_dependencies_compiler_type" >&5
+$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; }
+CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type
+
+ if
+ test "x$enable_dependency_tracking" != xno \
+ && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then
+ am__fastdepCC_TRUE=
+ am__fastdepCC_FALSE='#'
+else
+ am__fastdepCC_TRUE='#'
+ am__fastdepCC_FALSE=
+fi
+
+ ## test for C compiler
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args.
+set dummy ${ac_tool_prefix}ranlib; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_RANLIB+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$RANLIB"; then
+ ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+RANLIB=$ac_cv_prog_RANLIB
+if test -n "$RANLIB"; then
+ { $as_echo "$as_me:$LINENO: result: $RANLIB" >&5
+$as_echo "$RANLIB" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_RANLIB"; then
+ ac_ct_RANLIB=$RANLIB
+ # Extract the first word of "ranlib", so it can be a program name with args.
+set dummy ranlib; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_RANLIB+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_RANLIB"; then
+ ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_RANLIB="ranlib"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB
+if test -n "$ac_ct_RANLIB"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_RANLIB" >&5
+$as_echo "$ac_ct_RANLIB" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_RANLIB" = x; then
+ RANLIB=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ RANLIB=$ac_ct_RANLIB
+ fi
+else
+ RANLIB="$ac_cv_prog_RANLIB"
+fi
+ ## required if libraries are built in package
+
+# Check for headers
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+{ $as_echo "$as_me:$LINENO: checking how to run the C++ preprocessor" >&5
+$as_echo_n "checking how to run the C++ preprocessor... " >&6; }
+if test -z "$CXXCPP"; then
+ if test "${ac_cv_prog_CXXCPP+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ # Double quotes because CXXCPP needs to be expanded
+ for CXXCPP in "$CXX -E" "/lib/cpp"
+ do
+ ac_preproc_ok=false
+for ac_cxx_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ :
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Broken: fails on valid input.
+continue
+fi
+
+rm -f conftest.err conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ # Broken: success on invalid input.
+continue
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+
+rm -f conftest.err conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then
+ break
+fi
+
+ done
+ ac_cv_prog_CXXCPP=$CXXCPP
+
+fi
+ CXXCPP=$ac_cv_prog_CXXCPP
+else
+ ac_cv_prog_CXXCPP=$CXXCPP
+fi
+{ $as_echo "$as_me:$LINENO: result: $CXXCPP" >&5
+$as_echo "$CXXCPP" >&6; }
+ac_preproc_ok=false
+for ac_cxx_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ :
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Broken: fails on valid input.
+continue
+fi
+
+rm -f conftest.err conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ # Broken: success on invalid input.
+continue
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+
+rm -f conftest.err conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then
+ :
+else
+ { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+{ { $as_echo "$as_me:$LINENO: error: C++ preprocessor \"$CXXCPP\" fails sanity check
+See \`config.log' for more details." >&5
+$as_echo "$as_me: error: C++ preprocessor \"$CXXCPP\" fails sanity check
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }; }
+fi
+
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+
+{ $as_echo "$as_me:$LINENO: checking for grep that handles long lines and -e" >&5
+$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
+if test "${ac_cv_path_GREP+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$GREP"; then
+ ac_path_GREP_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in grep ggrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
+ { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue
+# Check for GNU ac_path_GREP and select it if it is found.
+ # Check for GNU $ac_path_GREP
+case `"$ac_path_GREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo 'GREP' >> "conftest.nl"
+ "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ ac_count=`expr $ac_count + 1`
+ if test $ac_count -gt ${ac_path_GREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_GREP="$ac_path_GREP"
+ ac_path_GREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_GREP_found && break 3
+ done
+ done
+done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_GREP"; then
+ { { $as_echo "$as_me:$LINENO: error: no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5
+$as_echo "$as_me: error: no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+else
+ ac_cv_path_GREP=$GREP
+fi
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_path_GREP" >&5
+$as_echo "$ac_cv_path_GREP" >&6; }
+ GREP="$ac_cv_path_GREP"
+
+
+{ $as_echo "$as_me:$LINENO: checking for egrep" >&5
+$as_echo_n "checking for egrep... " >&6; }
+if test "${ac_cv_path_EGREP+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
+ then ac_cv_path_EGREP="$GREP -E"
+ else
+ if test -z "$EGREP"; then
+ ac_path_EGREP_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in egrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
+ { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue
+# Check for GNU ac_path_EGREP and select it if it is found.
+ # Check for GNU $ac_path_EGREP
+case `"$ac_path_EGREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo 'EGREP' >> "conftest.nl"
+ "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ ac_count=`expr $ac_count + 1`
+ if test $ac_count -gt ${ac_path_EGREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_EGREP="$ac_path_EGREP"
+ ac_path_EGREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_EGREP_found && break 3
+ done
+ done
+done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_EGREP"; then
+ { { $as_echo "$as_me:$LINENO: error: no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5
+$as_echo "$as_me: error: no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+else
+ ac_cv_path_EGREP=$EGREP
+fi
+
+ fi
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_path_EGREP" >&5
+$as_echo "$ac_cv_path_EGREP" >&6; }
+ EGREP="$ac_cv_path_EGREP"
+
+
+{ $as_echo "$as_me:$LINENO: checking for ANSI C header files" >&5
+$as_echo_n "checking for ANSI C header files... " >&6; }
+if test "${ac_cv_header_stdc+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <float.h>
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_header_stdc=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_header_stdc=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+if test $ac_cv_header_stdc = yes; then
+ # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <string.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "memchr" >/dev/null 2>&1; then
+ :
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdlib.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "free" >/dev/null 2>&1; then
+ :
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
+ if test "$cross_compiling" = yes; then
+ :
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <ctype.h>
+#include <stdlib.h>
+#if ((' ' & 0x0FF) == 0x020)
+# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
+# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
+#else
+# define ISLOWER(c) \
+ (('a' <= (c) && (c) <= 'i') \
+ || ('j' <= (c) && (c) <= 'r') \
+ || ('s' <= (c) && (c) <= 'z'))
+# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
+#endif
+
+#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
+int
+main ()
+{
+ int i;
+ for (i = 0; i < 256; i++)
+ if (XOR (islower (i), ISLOWER (i))
+ || toupper (i) != TOUPPER (i))
+ return 2;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ :
+else
+ $as_echo "$as_me: program exited with status $ac_status" >&5
+$as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+ac_cv_header_stdc=no
+fi
+rm -rf conftest.dSYM
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+
+
+fi
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5
+$as_echo "$ac_cv_header_stdc" >&6; }
+if test $ac_cv_header_stdc = yes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define STDC_HEADERS 1
+_ACEOF
+
+fi
+
+# On IRIX 5.3, sys/types and inttypes.h are conflicting.
+
+
+
+
+
+
+
+
+
+for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \
+ inttypes.h stdint.h unistd.h
+do
+as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+{ $as_echo "$as_me:$LINENO: checking for $ac_header" >&5
+$as_echo_n "checking for $ac_header... " >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ eval "$as_ac_Header=yes"
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ eval "$as_ac_Header=no"
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+ac_res=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+as_val=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ if test "x$as_val" = x""yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+if test "${ac_cv_header_zlib_h+set}" = set; then
+ { $as_echo "$as_me:$LINENO: checking for zlib.h" >&5
+$as_echo_n "checking for zlib.h... " >&6; }
+if test "${ac_cv_header_zlib_h+set}" = set; then
+ $as_echo_n "(cached) " >&6
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_header_zlib_h" >&5
+$as_echo "$ac_cv_header_zlib_h" >&6; }
+else
+ # Is the header compilable?
+{ $as_echo "$as_me:$LINENO: checking zlib.h usability" >&5
+$as_echo_n "checking zlib.h usability... " >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+#include <zlib.h>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_header_compiler=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_compiler=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
+$as_echo "$ac_header_compiler" >&6; }
+
+# Is the header present?
+{ $as_echo "$as_me:$LINENO: checking zlib.h presence" >&5
+$as_echo_n "checking zlib.h presence... " >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <zlib.h>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ ac_header_preproc=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_preproc=no
+fi
+
+rm -f conftest.err conftest.$ac_ext
+{ $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
+$as_echo "$ac_header_preproc" >&6; }
+
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_cxx_preproc_warn_flag in
+ yes:no: )
+ { $as_echo "$as_me:$LINENO: WARNING: zlib.h: accepted by the compiler, rejected by the preprocessor!" >&5
+$as_echo "$as_me: WARNING: zlib.h: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: zlib.h: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: zlib.h: proceeding with the compiler's result" >&2;}
+ ac_header_preproc=yes
+ ;;
+ no:yes:* )
+ { $as_echo "$as_me:$LINENO: WARNING: zlib.h: present but cannot be compiled" >&5
+$as_echo "$as_me: WARNING: zlib.h: present but cannot be compiled" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: zlib.h: check for missing prerequisite headers?" >&5
+$as_echo "$as_me: WARNING: zlib.h: check for missing prerequisite headers?" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: zlib.h: see the Autoconf documentation" >&5
+$as_echo "$as_me: WARNING: zlib.h: see the Autoconf documentation" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: zlib.h: section \"Present But Cannot Be Compiled\"" >&5
+$as_echo "$as_me: WARNING: zlib.h: section \"Present But Cannot Be Compiled\"" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: zlib.h: proceeding with the preprocessor's result" >&5
+$as_echo "$as_me: WARNING: zlib.h: proceeding with the preprocessor's result" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: zlib.h: in the future, the compiler will take precedence" >&5
+$as_echo "$as_me: WARNING: zlib.h: in the future, the compiler will take precedence" >&2;}
+ ( cat <<\_ASBOX
+## --------------------------------------- ##
+## Report this to jwala at broadinstitute.org ##
+## --------------------------------------- ##
+_ASBOX
+ ) | sed "s/^/$as_me: WARNING: /" >&2
+ ;;
+esac
+{ $as_echo "$as_me:$LINENO: checking for zlib.h" >&5
+$as_echo_n "checking for zlib.h... " >&6; }
+if test "${ac_cv_header_zlib_h+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_header_zlib_h=$ac_header_preproc
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_header_zlib_h" >&5
+$as_echo "$ac_cv_header_zlib_h" >&6; }
+
+fi
+
+
+
+# Check for libraries
+{ $as_echo "$as_me:$LINENO: checking for library containing gzopen" >&5
+$as_echo_n "checking for library containing gzopen... " >&6; }
+if test "${ac_cv_search_gzopen+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_func_search_save_LIBS=$LIBS
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char gzopen ();
+int
+main ()
+{
+return gzopen ();
+ ;
+ return 0;
+}
+_ACEOF
+for ac_lib in '' z; do
+ if test -z "$ac_lib"; then
+ ac_res="none required"
+ else
+ ac_res=-l$ac_lib
+ LIBS="-l$ac_lib $ac_func_search_save_LIBS"
+ fi
+ rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ ac_cv_search_gzopen=$ac_res
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext
+ if test "${ac_cv_search_gzopen+set}" = set; then
+ break
+fi
+done
+if test "${ac_cv_search_gzopen+set}" = set; then
+ :
+else
+ ac_cv_search_gzopen=no
+fi
+rm conftest.$ac_ext
+LIBS=$ac_func_search_save_LIBS
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_search_gzopen" >&5
+$as_echo "$ac_cv_search_gzopen" >&6; }
+ac_res=$ac_cv_search_gzopen
+if test "$ac_res" != no; then
+ test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
+
+else
+ { { $as_echo "$as_me:$LINENO: error: libz not found, please install zlib (http://www.zlib.net/)" >&5
+$as_echo "$as_me: error: libz not found, please install zlib (http://www.zlib.net/)" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+{ $as_echo "$as_me:$LINENO: checking for library containing clock_gettime" >&5
+$as_echo_n "checking for library containing clock_gettime... " >&6; }
+if test "${ac_cv_search_clock_gettime+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_func_search_save_LIBS=$LIBS
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char clock_gettime ();
+int
+main ()
+{
+return clock_gettime ();
+ ;
+ return 0;
+}
+_ACEOF
+for ac_lib in '' rt; do
+ if test -z "$ac_lib"; then
+ ac_res="none required"
+ else
+ ac_res=-l$ac_lib
+ LIBS="-l$ac_lib $ac_func_search_save_LIBS"
+ fi
+ rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ ac_cv_search_clock_gettime=$ac_res
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext
+ if test "${ac_cv_search_clock_gettime+set}" = set; then
+ break
+fi
+done
+if test "${ac_cv_search_clock_gettime+set}" = set; then
+ :
+else
+ ac_cv_search_clock_gettime=no
+fi
+rm conftest.$ac_ext
+LIBS=$ac_func_search_save_LIBS
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_search_clock_gettime" >&5
+$as_echo "$ac_cv_search_clock_gettime" >&6; }
+ac_res=$ac_cv_search_clock_gettime
+if test "$ac_res" != no; then
+ test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_CLOCK_GETTIME 1
+_ACEOF
+
+fi
+
+
+
+# Check whether --with-boost was given.
+if test "${with_boost+set}" = set; then
+ withval=$with_boost;
+fi
+
+if test "$with_boost" -a -d "$with_boost"; then
+ boost_include="-I$with_boost"
+ as_ac_File=`$as_echo "ac_cv_file_"$with_boost/stage/lib"" | $as_tr_sh`
+{ $as_echo "$as_me:$LINENO: checking for \"$with_boost/stage/lib\"" >&5
+$as_echo_n "checking for \"$with_boost/stage/lib\"... " >&6; }
+if { as_var=$as_ac_File; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+else
+ test "$cross_compiling" = yes &&
+ { { $as_echo "$as_me:$LINENO: error: cannot check for file existence when cross compiling" >&5
+$as_echo "$as_me: error: cannot check for file existence when cross compiling" >&2;}
+ { (exit 1); exit 1; }; }
+if test -r ""$with_boost/stage/lib""; then
+ eval "$as_ac_File=yes"
+else
+ eval "$as_ac_File=no"
+fi
+fi
+ac_res=`eval 'as_val=${'$as_ac_File'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+as_val=`eval 'as_val=${'$as_ac_File'}
+ $as_echo "$as_val"'`
+ if test "x$as_val" = x""yes; then
+ STAGE_PATH=1
+else
+ STAGE_PATH=0
+fi
+
+ if test ${STAGE_PATH} = 1; then
+ boost_lib="$with_boost/stage/lib"
+ else
+ boost_lib="$with_boost/lib"
+ fi
+
+fi
+
+# Only fail on warnings when the --enable-development flag is passed into configure
+# Check whether --enable-development was given.
+if test "${enable_development+set}" = set; then
+ enableval=$enable_development;
+fi
+
+if test "$enable_development"; then
+ fail_on_warning="-Werror"
+fi
+
+# Set compiler flags.
+AM_CXXFLAGS="-g $fail_on_warning -Wno-unknown-pragmas -DHAVE_C11=1 -std=c++11"
+
+CXXFLAGS="$CXXFLAGS"
+
+CFLAGS="$CFLAGS"
+
+CPPFLAGS="$CPPFLAGS $boost_include"
+
+LDFLAGS="$LDFLAGS"
+
+
+LIBS="-L$boost_lib $LIBS"
+
+
+# Make sure the boost headers can be found
+
+for ac_header in boost/test/unit_test.hpp
+do
+as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5
+$as_echo_n "checking for $ac_header... " >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+fi
+ac_res=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+else
+ # Is the header compilable?
+{ $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5
+$as_echo_n "checking $ac_header usability... " >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_header_compiler=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_compiler=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
+$as_echo "$ac_header_compiler" >&6; }
+
+# Is the header present?
+{ $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5
+$as_echo_n "checking $ac_header presence... " >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <$ac_header>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ ac_header_preproc=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_preproc=no
+fi
+
+rm -f conftest.err conftest.$ac_ext
+{ $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
+$as_echo "$ac_header_preproc" >&6; }
+
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_cxx_preproc_warn_flag in
+ yes:no: )
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5
+$as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;}
+ ac_header_preproc=yes
+ ;;
+ no:yes:* )
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5
+$as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5
+$as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5
+$as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5
+$as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
+$as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5
+$as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;}
+ ( cat <<\_ASBOX
+## --------------------------------------- ##
+## Report this to jwala at broadinstitute.org ##
+## --------------------------------------- ##
+_ASBOX
+ ) | sed "s/^/$as_me: WARNING: /" >&2
+ ;;
+esac
+{ $as_echo "$as_me:$LINENO: checking for $ac_header" >&5
+$as_echo_n "checking for $ac_header... " >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+else
+ eval "$as_ac_Header=\$ac_header_preproc"
+fi
+ac_res=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+
+fi
+as_val=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ if test "x$as_val" = x""yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+else
+ { { $as_echo "$as_me:$LINENO: error: The Boost library must be installed for unit testing. Specify its path with the --with-boost=PATH option" >&5
+$as_echo "$as_me: error: The Boost library must be installed for unit testing. Specify its path with the --with-boost=PATH option" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+done
+
+
+ac_config_files="$ac_config_files Makefile"
+
+
+cat >confcache <<\_ACEOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs, see configure's option --config-cache.
+# It is not useful on other systems. If it contains results you don't
+# want to keep, you may remove or edit it.
+#
+# config.status only pays attention to the cache file if you give it
+# the --recheck option to rerun configure.
+#
+# `ac_cv_env_foo' variables (set or unset) will be overridden when
+# loading this file, other *unset* `ac_cv_foo' will be assigned the
+# following values.
+
+_ACEOF
+
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, we kill variables containing newlines.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+(
+ for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { $as_echo "$as_me:$LINENO: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+ *) $as_unset $ac_var ;;
+ esac ;;
+ esac
+ done
+
+ (set) 2>&1 |
+ case $as_nl`(ac_space=' '; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ # `set' does not quote correctly, so add quotes (double-quote
+ # substitution turns \\\\ into \\, and sed turns \\ into \).
+ sed -n \
+ "s/'/'\\\\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
+ ;; #(
+ *)
+ # `set' quotes correctly as required by POSIX, so do not add quotes.
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+) |
+ sed '
+ /^ac_cv_env_/b end
+ t clear
+ :clear
+ s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
+ t end
+ s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
+ :end' >>confcache
+if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
+ if test -w "$cache_file"; then
+ test "x$cache_file" != "x/dev/null" &&
+ { $as_echo "$as_me:$LINENO: updating cache $cache_file" >&5
+$as_echo "$as_me: updating cache $cache_file" >&6;}
+ cat confcache >$cache_file
+ else
+ { $as_echo "$as_me:$LINENO: not updating unwritable cache $cache_file" >&5
+$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;}
+ fi
+fi
+rm -f confcache
+
+test "x$prefix" = xNONE && prefix=$ac_default_prefix
+# Let make expand exec_prefix.
+test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
+
+DEFS=-DHAVE_CONFIG_H
+
+ac_libobjs=
+ac_ltlibobjs=
+for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
+ # 1. Remove the extension, and $U if already installed.
+ ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
+ ac_i=`$as_echo "$ac_i" | sed "$ac_script"`
+ # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR
+ # will be set to the directory where LIBOBJS objects are built.
+ ac_libobjs="$ac_libobjs \${LIBOBJDIR}$ac_i\$U.$ac_objext"
+ ac_ltlibobjs="$ac_ltlibobjs \${LIBOBJDIR}$ac_i"'$U.lo'
+done
+LIBOBJS=$ac_libobjs
+
+LTLIBOBJS=$ac_ltlibobjs
+
+
+ if test -n "$EXEEXT"; then
+ am__EXEEXT_TRUE=
+ am__EXEEXT_FALSE='#'
+else
+ am__EXEEXT_TRUE='#'
+ am__EXEEXT_FALSE=
+fi
+
+if test -z "${MAINTAINER_MODE_TRUE}" && test -z "${MAINTAINER_MODE_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"MAINTAINER_MODE\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"MAINTAINER_MODE\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"AMDEP\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"AMDEP\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"am__fastdepCXX\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"am__fastdepCXX\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"am__fastdepCC\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"am__fastdepCC\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+: ${CONFIG_STATUS=./config.status}
+ac_write_fail=0
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files $CONFIG_STATUS"
+{ $as_echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5
+$as_echo "$as_me: creating $CONFIG_STATUS" >&6;}
+cat >$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+#! $SHELL
+# Generated by $as_me.
+# Run this file to recreate the current configuration.
+# Compiler output produced by configure, useful for debugging
+# configure, is in config.log if it exists.
+
+debug=false
+ac_cs_recheck=false
+ac_cs_silent=false
+SHELL=\${CONFIG_SHELL-$SHELL}
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+## --------------------- ##
+## M4sh Initialization. ##
+## --------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in
+ *posix*) set -o posix ;;
+esac
+
+fi
+
+
+
+
+# PATH needs CR
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+if (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='printf %s\n'
+ as_echo_n='printf %s'
+else
+ if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+ as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+ as_echo_n='/usr/ucb/echo -n'
+ else
+ as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+ as_echo_n_body='eval
+ arg=$1;
+ case $arg in
+ *"$as_nl"*)
+ expr "X$arg" : "X\\(.*\\)$as_nl";
+ arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+ esac;
+ expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+ '
+ export as_echo_n_body
+ as_echo_n='sh -c $as_echo_n_body as_echo'
+ fi
+ export as_echo_body
+ as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ PATH_SEPARATOR=:
+ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+ PATH_SEPARATOR=';'
+ }
+fi
+
+# Support unset when possible.
+if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+ as_unset=unset
+else
+ as_unset=false
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+case $0 in
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ { (exit 1); exit 1; }
+fi
+
+# Work around bugs in pre-3.0 UWIN ksh.
+for as_var in ENV MAIL MAILPATH
+do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# Required to use basename.
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+
+# Name of the executable.
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# CDPATH.
+$as_unset CDPATH
+
+
+
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || {
+
+ # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
+ # uniformly replaced by the line number. The first 'sed' inserts a
+ # line-number line after each line using $LINENO; the second 'sed'
+ # does the real work. The second script uses 'N' to pair each
+ # line-number line with the line containing $LINENO, and appends
+ # trailing '-' during substitution so that $LINENO is not a special
+ # case at line end.
+ # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
+ # scripts with optimization help from Paolo Bonzini. Blame Lee
+ # E. McMahon (1931-1989) for sed's syntax. :-)
+ sed -n '
+ p
+ /[$]LINENO/=
+ ' <$as_myself |
+ sed '
+ s/[$]LINENO.*/&-/
+ t lineno
+ b
+ :lineno
+ N
+ :loop
+ s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
+ t loop
+ s/-\n.*//
+ ' >$as_me.lineno &&
+ chmod +x "$as_me.lineno" ||
+ { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
+ { (exit 1); exit 1; }; }
+
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensitive to this).
+ . "./$as_me.lineno"
+ # Exit status is that of the last command.
+ exit
+}
+
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in
+-n*)
+ case `echo 'x\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ *) ECHO_C='\c';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+ if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -p'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -p'
+ elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+ else
+ as_ln_s='cp -p'
+ fi
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p=:
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+if test -x / >/dev/null 2>&1; then
+ as_test_x='test -x'
+else
+ if ls -dL / >/dev/null 2>&1; then
+ as_ls_L_option=L
+ else
+ as_ls_L_option=
+ fi
+ as_test_x='
+ eval sh -c '\''
+ if test -d "$1"; then
+ test -d "$1/.";
+ else
+ case $1 in
+ -*)set "./$1";;
+ esac;
+ case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in
+ ???[sx]*):;;*)false;;esac;fi
+ '\'' sh
+ '
+fi
+as_executable_p=$as_test_x
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+exec 6>&1
+
+# Save the log message, to keep $[0] and so on meaningful, and to
+# report actual input values of CONFIG_FILES etc. instead of their
+# values after options handling.
+ac_log="
+This file was extended by seqlibtest $as_me 1.0, which was
+generated by GNU Autoconf 2.63. Invocation command line was
+
+ CONFIG_FILES = $CONFIG_FILES
+ CONFIG_HEADERS = $CONFIG_HEADERS
+ CONFIG_LINKS = $CONFIG_LINKS
+ CONFIG_COMMANDS = $CONFIG_COMMANDS
+ $ $0 $@
+
+on `(hostname || uname -n) 2>/dev/null | sed 1q`
+"
+
+_ACEOF
+
+case $ac_config_files in *"
+"*) set x $ac_config_files; shift; ac_config_files=$*;;
+esac
+
+case $ac_config_headers in *"
+"*) set x $ac_config_headers; shift; ac_config_headers=$*;;
+esac
+
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+# Files that config.status was made for.
+config_files="$ac_config_files"
+config_headers="$ac_config_headers"
+config_commands="$ac_config_commands"
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ac_cs_usage="\
+\`$as_me' instantiates files from templates according to the
+current configuration.
+
+Usage: $0 [OPTION]... [FILE]...
+
+ -h, --help print this help, then exit
+ -V, --version print version number and configuration settings, then exit
+ -q, --quiet, --silent
+ do not print progress messages
+ -d, --debug don't remove temporary files
+ --recheck update $as_me by reconfiguring in the same conditions
+ --file=FILE[:TEMPLATE]
+ instantiate the configuration file FILE
+ --header=FILE[:TEMPLATE]
+ instantiate the configuration header FILE
+
+Configuration files:
+$config_files
+
+Configuration headers:
+$config_headers
+
+Configuration commands:
+$config_commands
+
+Report bugs to <bug-autoconf at gnu.org>."
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_cs_version="\\
+seqlibtest config.status 1.0
+configured by $0, generated by GNU Autoconf 2.63,
+ with options \\"`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\"
+
+Copyright (C) 2008 Free Software Foundation, Inc.
+This config.status script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it."
+
+ac_pwd='$ac_pwd'
+srcdir='$srcdir'
+INSTALL='$INSTALL'
+MKDIR_P='$MKDIR_P'
+AWK='$AWK'
+test -n "\$AWK" || AWK=awk
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# The default lists apply if the user does not specify any file.
+ac_need_defaults=:
+while test $# != 0
+do
+ case $1 in
+ --*=*)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
+ ac_shift=:
+ ;;
+ *)
+ ac_option=$1
+ ac_optarg=$2
+ ac_shift=shift
+ ;;
+ esac
+
+ case $ac_option in
+ # Handling of the options.
+ -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+ ac_cs_recheck=: ;;
+ --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
+ $as_echo "$ac_cs_version"; exit ;;
+ --debug | --debu | --deb | --de | --d | -d )
+ debug=: ;;
+ --file | --fil | --fi | --f )
+ $ac_shift
+ case $ac_optarg in
+ *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ CONFIG_FILES="$CONFIG_FILES '$ac_optarg'"
+ ac_need_defaults=false;;
+ --header | --heade | --head | --hea )
+ $ac_shift
+ case $ac_optarg in
+ *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ CONFIG_HEADERS="$CONFIG_HEADERS '$ac_optarg'"
+ ac_need_defaults=false;;
+ --he | --h)
+ # Conflict between --help and --header
+ { $as_echo "$as_me: error: ambiguous option: $1
+Try \`$0 --help' for more information." >&2
+ { (exit 1); exit 1; }; };;
+ --help | --hel | -h )
+ $as_echo "$ac_cs_usage"; exit ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil | --si | --s)
+ ac_cs_silent=: ;;
+
+ # This is an error.
+ -*) { $as_echo "$as_me: error: unrecognized option: $1
+Try \`$0 --help' for more information." >&2
+ { (exit 1); exit 1; }; } ;;
+
+ *) ac_config_targets="$ac_config_targets $1"
+ ac_need_defaults=false ;;
+
+ esac
+ shift
+done
+
+ac_configure_extra_args=
+
+if $ac_cs_silent; then
+ exec 6>/dev/null
+ ac_configure_extra_args="$ac_configure_extra_args --silent"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+if \$ac_cs_recheck; then
+ set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+ shift
+ \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
+ CONFIG_SHELL='$SHELL'
+ export CONFIG_SHELL
+ exec "\$@"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+exec 5>>config.log
+{
+ echo
+ sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+ $as_echo "$ac_log"
+} >&5
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+#
+# INIT-COMMANDS
+#
+AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+
+# Handling of arguments.
+for ac_config_target in $ac_config_targets
+do
+ case $ac_config_target in
+ "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;;
+ "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;;
+ "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
+
+ *) { { $as_echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5
+$as_echo "$as_me: error: invalid argument: $ac_config_target" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+done
+
+
+# If the user did not use the arguments to specify the items to instantiate,
+# then the envvar interface is used. Set only those that are not.
+# We use the long form for the default assignment because of an extremely
+# bizarre bug on SunOS 4.1.3.
+if $ac_need_defaults; then
+ test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
+ test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers
+ test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands
+fi
+
+# Have a temporary directory for convenience. Make it in the build tree
+# simply because there is no reason against having it here, and in addition,
+# creating and moving files from /tmp can sometimes cause problems.
+# Hook for its removal unless debugging.
+# Note that there is a small window in which the directory will not be cleaned:
+# after its creation but before its name has been assigned to `$tmp'.
+$debug ||
+{
+ tmp=
+ trap 'exit_status=$?
+ { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status
+' 0
+ trap '{ (exit 1); exit 1; }' 1 2 13 15
+}
+# Create a (secure) tmp directory for tmp files.
+
+{
+ tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
+ test -n "$tmp" && test -d "$tmp"
+} ||
+{
+ tmp=./conf$$-$RANDOM
+ (umask 077 && mkdir "$tmp")
+} ||
+{
+ $as_echo "$as_me: cannot create a temporary directory in ." >&2
+ { (exit 1); exit 1; }
+}
+
+# Set up the scripts for CONFIG_FILES section.
+# No need to generate them if there are no CONFIG_FILES.
+# This happens for instance with `./config.status config.h'.
+if test -n "$CONFIG_FILES"; then
+
+
+ac_cr='
'
+ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
+if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
+ ac_cs_awk_cr='\\r'
+else
+ ac_cs_awk_cr=$ac_cr
+fi
+
+echo 'BEGIN {' >"$tmp/subs1.awk" &&
+_ACEOF
+
+
+{
+ echo "cat >conf$$subs.awk <<_ACEOF" &&
+ echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
+ echo "_ACEOF"
+} >conf$$subs.sh ||
+ { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5
+$as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;}
+ { (exit 1); exit 1; }; }
+ac_delim_num=`echo "$ac_subst_vars" | grep -c '$'`
+ac_delim='%!_!# '
+for ac_last_try in false false false false false :; do
+ . ./conf$$subs.sh ||
+ { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5
+$as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;}
+ { (exit 1); exit 1; }; }
+
+ ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
+ if test $ac_delim_n = $ac_delim_num; then
+ break
+ elif $ac_last_try; then
+ { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5
+$as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;}
+ { (exit 1); exit 1; }; }
+ else
+ ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+ fi
+done
+rm -f conf$$subs.sh
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+cat >>"\$tmp/subs1.awk" <<\\_ACAWK &&
+_ACEOF
+sed -n '
+h
+s/^/S["/; s/!.*/"]=/
+p
+g
+s/^[^!]*!//
+:repl
+t repl
+s/'"$ac_delim"'$//
+t delim
+:nl
+h
+s/\(.\{148\}\).*/\1/
+t more1
+s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/
+p
+n
+b repl
+:more1
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t nl
+:delim
+h
+s/\(.\{148\}\).*/\1/
+t more2
+s/["\\]/\\&/g; s/^/"/; s/$/"/
+p
+b
+:more2
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t delim
+' <conf$$subs.awk | sed '
+/^[^""]/{
+ N
+ s/\n//
+}
+' >>$CONFIG_STATUS || ac_write_fail=1
+rm -f conf$$subs.awk
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+_ACAWK
+cat >>"\$tmp/subs1.awk" <<_ACAWK &&
+ for (key in S) S_is_set[key] = 1
+ FS = ""
+
+}
+{
+ line = $ 0
+ nfields = split(line, field, "@")
+ substed = 0
+ len = length(field[1])
+ for (i = 2; i < nfields; i++) {
+ key = field[i]
+ keylen = length(key)
+ if (S_is_set[key]) {
+ value = S[key]
+ line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
+ len += length(value) + length(field[++i])
+ substed = 1
+ } else
+ len += 1 + keylen
+ }
+
+ print line
+}
+
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
+ sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
+else
+ cat
+fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \
+ || { { $as_echo "$as_me:$LINENO: error: could not setup config files machinery" >&5
+$as_echo "$as_me: error: could not setup config files machinery" >&2;}
+ { (exit 1); exit 1; }; }
+_ACEOF
+
+# VPATH may cause trouble with some makes, so we remove $(srcdir),
+# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and
+# trailing colons and then remove the whole line if VPATH becomes empty
+# (actually we leave an empty line to preserve line numbers).
+if test "x$srcdir" = x.; then
+ ac_vpsub='/^[ ]*VPATH[ ]*=/{
+s/:*\$(srcdir):*/:/
+s/:*\${srcdir}:*/:/
+s/:*@srcdir@:*/:/
+s/^\([^=]*=[ ]*\):*/\1/
+s/:*$//
+s/^[^=]*=[ ]*$//
+}'
+fi
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+fi # test -n "$CONFIG_FILES"
+
+# Set up the scripts for CONFIG_HEADERS section.
+# No need to generate them if there are no CONFIG_HEADERS.
+# This happens for instance with `./config.status Makefile'.
+if test -n "$CONFIG_HEADERS"; then
+cat >"$tmp/defines.awk" <<\_ACAWK ||
+BEGIN {
+_ACEOF
+
+# Transform confdefs.h into an awk script `defines.awk', embedded as
+# here-document in config.status, that substitutes the proper values into
+# config.h.in to produce config.h.
+
+# Create a delimiter string that does not exist in confdefs.h, to ease
+# handling of long lines.
+ac_delim='%!_!# '
+for ac_last_try in false false :; do
+ ac_t=`sed -n "/$ac_delim/p" confdefs.h`
+ if test -z "$ac_t"; then
+ break
+ elif $ac_last_try; then
+ { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_HEADERS" >&5
+$as_echo "$as_me: error: could not make $CONFIG_HEADERS" >&2;}
+ { (exit 1); exit 1; }; }
+ else
+ ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+ fi
+done
+
+# For the awk script, D is an array of macro values keyed by name,
+# likewise P contains macro parameters if any. Preserve backslash
+# newline sequences.
+
+ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]*
+sed -n '
+s/.\{148\}/&'"$ac_delim"'/g
+t rset
+:rset
+s/^[ ]*#[ ]*define[ ][ ]*/ /
+t def
+d
+:def
+s/\\$//
+t bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3"/p
+s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p
+d
+:bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3\\\\\\n"\\/p
+t cont
+s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p
+t cont
+d
+:cont
+n
+s/.\{148\}/&'"$ac_delim"'/g
+t clear
+:clear
+s/\\$//
+t bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/"/p
+d
+:bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p
+b cont
+' <confdefs.h | sed '
+s/'"$ac_delim"'/"\\\
+"/g' >>$CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ for (key in D) D_is_set[key] = 1
+ FS = ""
+}
+/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ {
+ line = \$ 0
+ split(line, arg, " ")
+ if (arg[1] == "#") {
+ defundef = arg[2]
+ mac1 = arg[3]
+ } else {
+ defundef = substr(arg[1], 2)
+ mac1 = arg[2]
+ }
+ split(mac1, mac2, "(") #)
+ macro = mac2[1]
+ prefix = substr(line, 1, index(line, defundef) - 1)
+ if (D_is_set[macro]) {
+ # Preserve the white space surrounding the "#".
+ print prefix "define", macro P[macro] D[macro]
+ next
+ } else {
+ # Replace #undef with comments. This is necessary, for example,
+ # in the case of _POSIX_SOURCE, which is predefined and required
+ # on some systems where configure will not decide to define it.
+ if (defundef == "undef") {
+ print "/*", prefix defundef, macro, "*/"
+ next
+ }
+ }
+}
+{ print }
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ { { $as_echo "$as_me:$LINENO: error: could not setup config headers machinery" >&5
+$as_echo "$as_me: error: could not setup config headers machinery" >&2;}
+ { (exit 1); exit 1; }; }
+fi # test -n "$CONFIG_HEADERS"
+
+
+eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS"
+shift
+for ac_tag
+do
+ case $ac_tag in
+ :[FHLC]) ac_mode=$ac_tag; continue;;
+ esac
+ case $ac_mode$ac_tag in
+ :[FHL]*:*);;
+ :L* | :C*:*) { { $as_echo "$as_me:$LINENO: error: invalid tag $ac_tag" >&5
+$as_echo "$as_me: error: invalid tag $ac_tag" >&2;}
+ { (exit 1); exit 1; }; };;
+ :[FH]-) ac_tag=-:-;;
+ :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
+ esac
+ ac_save_IFS=$IFS
+ IFS=:
+ set x $ac_tag
+ IFS=$ac_save_IFS
+ shift
+ ac_file=$1
+ shift
+
+ case $ac_mode in
+ :L) ac_source=$1;;
+ :[FH])
+ ac_file_inputs=
+ for ac_f
+ do
+ case $ac_f in
+ -) ac_f="$tmp/stdin";;
+ *) # Look for the file first in the build tree, then in the source tree
+ # (if the path is not absolute). The absolute path cannot be DOS-style,
+ # because $ac_f cannot contain `:'.
+ test -f "$ac_f" ||
+ case $ac_f in
+ [\\/$]*) false;;
+ *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
+ esac ||
+ { { $as_echo "$as_me:$LINENO: error: cannot find input file: $ac_f" >&5
+$as_echo "$as_me: error: cannot find input file: $ac_f" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+ case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
+ ac_file_inputs="$ac_file_inputs '$ac_f'"
+ done
+
+ # Let's still pretend it is `configure' which instantiates (i.e., don't
+ # use $as_me), people would be surprised to read:
+ # /* config.h. Generated by config.status. */
+ configure_input='Generated from '`
+ $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
+ `' by configure.'
+ if test x"$ac_file" != x-; then
+ configure_input="$ac_file. $configure_input"
+ { $as_echo "$as_me:$LINENO: creating $ac_file" >&5
+$as_echo "$as_me: creating $ac_file" >&6;}
+ fi
+ # Neutralize special characters interpreted by sed in replacement strings.
+ case $configure_input in #(
+ *\&* | *\|* | *\\* )
+ ac_sed_conf_input=`$as_echo "$configure_input" |
+ sed 's/[\\\\&|]/\\\\&/g'`;; #(
+ *) ac_sed_conf_input=$configure_input;;
+ esac
+
+ case $ac_tag in
+ *:-:* | *:-) cat >"$tmp/stdin" \
+ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5
+$as_echo "$as_me: error: could not create $ac_file" >&2;}
+ { (exit 1); exit 1; }; } ;;
+ esac
+ ;;
+ esac
+
+ ac_dir=`$as_dirname -- "$ac_file" ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$ac_file" : 'X\(//\)[^/]' \| \
+ X"$ac_file" : 'X\(//\)$' \| \
+ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$ac_file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ { as_dir="$ac_dir"
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || { { $as_echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5
+$as_echo "$as_me: error: cannot create directory $as_dir" >&2;}
+ { (exit 1); exit 1; }; }; }
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+
+ case $ac_mode in
+ :F)
+ #
+ # CONFIG_FILE
+ #
+
+ case $INSTALL in
+ [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
+ *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;;
+ esac
+ ac_MKDIR_P=$MKDIR_P
+ case $MKDIR_P in
+ [\\/$]* | ?:[\\/]* ) ;;
+ */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;;
+ esac
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# If the template does not know about datarootdir, expand it.
+# FIXME: This hack should be removed a few years after 2.60.
+ac_datarootdir_hack=; ac_datarootdir_seen=
+
+ac_sed_dataroot='
+/datarootdir/ {
+ p
+ q
+}
+/@datadir@/p
+/@docdir@/p
+/@infodir@/p
+/@localedir@/p
+/@mandir@/p
+'
+case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
+*datarootdir*) ac_datarootdir_seen=yes;;
+*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
+$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ ac_datarootdir_hack='
+ s&@datadir@&$datadir&g
+ s&@docdir@&$docdir&g
+ s&@infodir@&$infodir&g
+ s&@localedir@&$localedir&g
+ s&@mandir@&$mandir&g
+ s&\\\${datarootdir}&$datarootdir&g' ;;
+esac
+_ACEOF
+
+# Neutralize VPATH when `$srcdir' = `.'.
+# Shell code in configure.ac might set extrasub.
+# FIXME: do we really want to maintain this feature?
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_sed_extra="$ac_vpsub
+$extrasub
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+:t
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
+s|@configure_input@|$ac_sed_conf_input|;t t
+s&@top_builddir@&$ac_top_builddir_sub&;t t
+s&@top_build_prefix@&$ac_top_build_prefix&;t t
+s&@srcdir@&$ac_srcdir&;t t
+s&@abs_srcdir@&$ac_abs_srcdir&;t t
+s&@top_srcdir@&$ac_top_srcdir&;t t
+s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
+s&@builddir@&$ac_builddir&;t t
+s&@abs_builddir@&$ac_abs_builddir&;t t
+s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
+s&@INSTALL@&$ac_INSTALL&;t t
+s&@MKDIR_P@&$ac_MKDIR_P&;t t
+$ac_datarootdir_hack
+"
+eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \
+ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5
+$as_echo "$as_me: error: could not create $ac_file" >&2;}
+ { (exit 1); exit 1; }; }
+
+test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
+ { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } &&
+ { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } &&
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined." >&5
+$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined." >&2;}
+
+ rm -f "$tmp/stdin"
+ case $ac_file in
+ -) cat "$tmp/out" && rm -f "$tmp/out";;
+ *) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";;
+ esac \
+ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5
+$as_echo "$as_me: error: could not create $ac_file" >&2;}
+ { (exit 1); exit 1; }; }
+ ;;
+ :H)
+ #
+ # CONFIG_HEADER
+ #
+ if test x"$ac_file" != x-; then
+ {
+ $as_echo "/* $configure_input */" \
+ && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs"
+ } >"$tmp/config.h" \
+ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5
+$as_echo "$as_me: error: could not create $ac_file" >&2;}
+ { (exit 1); exit 1; }; }
+ if diff "$ac_file" "$tmp/config.h" >/dev/null 2>&1; then
+ { $as_echo "$as_me:$LINENO: $ac_file is unchanged" >&5
+$as_echo "$as_me: $ac_file is unchanged" >&6;}
+ else
+ rm -f "$ac_file"
+ mv "$tmp/config.h" "$ac_file" \
+ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5
+$as_echo "$as_me: error: could not create $ac_file" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ else
+ $as_echo "/* $configure_input */" \
+ && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" \
+ || { { $as_echo "$as_me:$LINENO: error: could not create -" >&5
+$as_echo "$as_me: error: could not create -" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+# Compute "$ac_file"'s index in $config_headers.
+_am_arg="$ac_file"
+_am_stamp_count=1
+for _am_header in $config_headers :; do
+ case $_am_header in
+ $_am_arg | $_am_arg:* )
+ break ;;
+ * )
+ _am_stamp_count=`expr $_am_stamp_count + 1` ;;
+ esac
+done
+echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" ||
+$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$_am_arg" : 'X\(//\)[^/]' \| \
+ X"$_am_arg" : 'X\(//\)$' \| \
+ X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$_am_arg" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`/stamp-h$_am_stamp_count
+ ;;
+
+ :C) { $as_echo "$as_me:$LINENO: executing $ac_file commands" >&5
+$as_echo "$as_me: executing $ac_file commands" >&6;}
+ ;;
+ esac
+
+
+ case $ac_file$ac_mode in
+ "depfiles":C) test x"$AMDEP_TRUE" != x"" || {
+ # Autoconf 2.62 quotes --file arguments for eval, but not when files
+ # are listed without --file. Let's play safe and only enable the eval
+ # if we detect the quoting.
+ case $CONFIG_FILES in
+ *\'*) eval set x "$CONFIG_FILES" ;;
+ *) set x $CONFIG_FILES ;;
+ esac
+ shift
+ for mf
+ do
+ # Strip MF so we end up with the name of the file.
+ mf=`echo "$mf" | sed -e 's/:.*$//'`
+ # Check whether this is an Automake generated Makefile or not.
+ # We used to match only the files named `Makefile.in', but
+ # some people rename them; so instead we look at the file content.
+ # Grep'ing the first line is not enough: some people post-process
+ # each Makefile.in and add a new line on top of each file to say so.
+ # Grep'ing the whole file is not good either: AIX grep has a line
+ # limit of 2048, but all sed's we know have understand at least 4000.
+ if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then
+ dirpart=`$as_dirname -- "$mf" ||
+$as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$mf" : 'X\(//\)[^/]' \| \
+ X"$mf" : 'X\(//\)$' \| \
+ X"$mf" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$mf" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ else
+ continue
+ fi
+ # Extract the definition of DEPDIR, am__include, and am__quote
+ # from the Makefile without running `make'.
+ DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
+ test -z "$DEPDIR" && continue
+ am__include=`sed -n 's/^am__include = //p' < "$mf"`
+ test -z "am__include" && continue
+ am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
+ # When using ansi2knr, U may be empty or an underscore; expand it
+ U=`sed -n 's/^U = //p' < "$mf"`
+ # Find all dependency output files, they are included files with
+ # $(DEPDIR) in their names. We invoke sed twice because it is the
+ # simplest approach to changing $(DEPDIR) to its actual value in the
+ # expansion.
+ for file in `sed -n "
+ s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \
+ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do
+ # Make sure the directory exists.
+ test -f "$dirpart/$file" && continue
+ fdir=`$as_dirname -- "$file" ||
+$as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$file" : 'X\(//\)[^/]' \| \
+ X"$file" : 'X\(//\)$' \| \
+ X"$file" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ { as_dir=$dirpart/$fdir
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || { { $as_echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5
+$as_echo "$as_me: error: cannot create directory $as_dir" >&2;}
+ { (exit 1); exit 1; }; }; }
+ # echo "creating $dirpart/$file"
+ echo '# dummy' > "$dirpart/$file"
+ done
+ done
+}
+ ;;
+
+ esac
+done # for ac_tag
+
+
+{ (exit 0); exit 0; }
+_ACEOF
+chmod +x $CONFIG_STATUS
+ac_clean_files=$ac_clean_files_save
+
+test $ac_write_fail = 0 ||
+ { { $as_echo "$as_me:$LINENO: error: write failure creating $CONFIG_STATUS" >&5
+$as_echo "$as_me: error: write failure creating $CONFIG_STATUS" >&2;}
+ { (exit 1); exit 1; }; }
+
+
+# configure is writing to config.log, and then calls config.status.
+# config.status does its own redirection, appending to config.log.
+# Unfortunately, on DOS this fails, as config.log is still kept open
+# by configure, so config.status won't be able to write to it; its
+# output is simply discarded. So we exec the FD to /dev/null,
+# effectively closing config.log, so it can be properly (re)opened and
+# appended to by config.status. When coming back to configure, we
+# need to make the FD available again.
+if test "$no_create" != yes; then
+ ac_cs_success=:
+ ac_config_status_args=
+ test "$silent" = yes &&
+ ac_config_status_args="$ac_config_status_args --quiet"
+ exec 5>/dev/null
+ $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
+ exec 5>>config.log
+ # Use ||, not &&, to avoid exiting from the if with $? = 1, which
+ # would make configure fail if this is the last instruction.
+ $ac_cs_success || { (exit 1); exit 1; }
+fi
+if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
+ { $as_echo "$as_me:$LINENO: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
+$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
+fi
+
diff --git a/seq_test/configure.ac b/seq_test/configure.ac
new file mode 100644
index 0000000..9075cff
--- /dev/null
+++ b/seq_test/configure.ac
@@ -0,0 +1,57 @@
+# Process this file with autoconf to produce a configure script.
+AC_PREREQ(2.59) ## specificy version of autoconf
+AC_INIT(seqlibtest, 1.0, jwala at broadinstitute.org)
+AM_INIT_AUTOMAKE(foreign)
+AC_CONFIG_SRCDIR([seq_test.cpp])
+AC_CONFIG_HEADER([config.h])
+AM_MAINTAINER_MODE([disable])
+##m4_include([m4/m4_ax_openmp.m4])
+
+# Checks for programs.
+AC_PROG_CXX ## test for cpp compiler
+AC_PROG_CC ## test for C compiler
+AC_PROG_RANLIB ## required if libraries are built in package
+
+# Check for headers
+AC_LANG([C++])
+AC_CHECK_HEADER([zlib.h])
+
+# Check for libraries
+AC_SEARCH_LIBS([gzopen],[z],,[AC_MSG_ERROR([libz not found, please install zlib (http://www.zlib.net/)])])
+AC_SEARCH_LIBS([clock_gettime], [rt], [AC_DEFINE([HAVE_CLOCK_GETTIME], [1], [clock_getttime found])], )
+
+AC_ARG_WITH(boost, AS_HELP_STRING([--with-boost=PATH],
+ [specify directory containing the boost library)]))
+if test "$with_boost" -a -d "$with_boost"; then
+ boost_include="-I$with_boost"
+ AC_CHECK_FILE("$with_boost/stage/lib", STAGE_PATH=1, STAGE_PATH=0)
+ if test ${STAGE_PATH} = 1; then
+ boost_lib="$with_boost/stage/lib"
+ else
+ boost_lib="$with_boost/lib"
+ fi
+ AC_SUBST(boost_lib)
+fi
+
+# Only fail on warnings when the --enable-development flag is passed into configure
+AC_ARG_ENABLE(development, AS_HELP_STRING([--enable-development],
+ [Turn on development options, like failing compilation on warnings]))
+if test "$enable_development"; then
+ fail_on_warning="-Werror"
+fi
+
+# Set compiler flags.
+AC_SUBST(AM_CXXFLAGS, "-g $fail_on_warning -Wno-unknown-pragmas -DHAVE_C11=1 -std=c++11")
+AC_SUBST(CXXFLAGS, "$CXXFLAGS")
+AC_SUBST(CFLAGS, "$CFLAGS")
+AC_SUBST(CPPFLAGS, "$CPPFLAGS $boost_include")
+AC_SUBST(LDFLAGS, "$LDFLAGS")
+
+AC_SUBST(LIBS, "-L$boost_lib $LIBS")
+
+# Make sure the boost headers can be found
+AC_CHECK_HEADERS([boost/test/unit_test.hpp],,[AC_MSG_ERROR([The Boost library must be installed for unit testing. Specify its path with the --with-boost=PATH option])])
+
+AC_CONFIG_FILES([Makefile])
+
+AC_OUTPUT
diff --git a/seq_test/seq_test.cpp b/seq_test/seq_test.cpp
new file mode 100644
index 0000000..0be6ade
--- /dev/null
+++ b/seq_test/seq_test.cpp
@@ -0,0 +1,1575 @@
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE MyTest
+#include<boost/test/unit_test.hpp>
+
+#include <climits>
+#include <boost/test/unit_test.hpp>
+
+#include "SeqLib/BWAWrapper.h"
+#include "SeqLib/BamReader.h"
+#include "SeqLib/BamWriter.h"
+#include "SeqLib/ReadFilter.h"
+#include "SeqLib/FermiAssembler.h"
+#include "SeqLib/SeqPlot.h"
+#include "SeqLib/RefGenome.h"
+
+#define GZBED "test_data/test.bed.gz"
+#define GZVCF "test_data/test.vcf.gz"
+#define SBAM "test_data/small.bam"
+#define OBAM "test_data/small_out.bam"
+#define OCRAM "test_data/small_out.cram"
+#define HGREF "/seq/references/Homo_sapiens_assembly19/v1/Homo_sapiens_assembly19.fasta"
+#define TREF "test_data/test_ref.fa"
+#define OREF "tmp_output.fa"
+#define BEDFILE "test_data/test.bed"
+#define VCFFILE "test_data/test.vcf"
+#define JSON1 "test_data/example4.json"
+
+using namespace SeqLib::Filter;
+using namespace SeqLib;
+
+#include <fstream>
+#include "SeqLib/BFC.h"
+
+BOOST_AUTO_TEST_CASE( read_gzbed ) {
+
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+
+ SeqLib::GRC g(GZBED, br.Header());
+ BOOST_CHECK_EQUAL(g.size(), 3);
+
+ BOOST_CHECK_EQUAL(g[2].chr, 22);
+
+ SeqLib::GRC v(GZVCF, br.Header());
+ BOOST_CHECK_EQUAL(v.size(), 31);
+
+ BOOST_CHECK_EQUAL(v[29].chr, 22);
+}
+
+BOOST_AUTO_TEST_CASE ( bfc ) {
+
+ BFC b;
+
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+
+ SeqLib::BamRecord rec;
+ BamRecordVector brv, brv2;
+ size_t count = 0;
+ while(br.GetNextRecord(rec) && count++ < 10000)
+ brv.push_back(rec);
+
+ for (int i = 5000; i < brv.size(); ++i)
+ brv2.push_back(brv[i]);
+
+ count = 0;
+ while(br.GetNextRecord(rec) && count++ < 10000)
+ brv2.push_back(rec);
+
+ std::ofstream orig("orig.fa");
+ std::ofstream corr("corr.fa");
+
+ for (auto& i : brv)
+ orig << ">" << i.Qname() << std::endl << i.Sequence() << std::endl;
+
+ // add the seqs
+ for (auto& r : brv)
+ b.AddSequence(r.Sequence().c_str(), r.Qualities().c_str(), r.Qname().c_str());
+
+ b.Train();
+ b.clear();
+
+ b.ErrorCorrectToTag(brv2, "KC");
+
+ UnalignedSequenceVector v;
+ b.GetSequences(v);
+
+ // write to corrected
+ for (auto& i : v) {
+ corr << ">" << i.Name << std::endl << i.Seq << std::endl;
+ }
+ orig.close();
+ corr.close();
+
+ //
+ v.clear();
+ b.FilterUnique();
+ b.GetSequences(v);
+
+ // do everything at once
+ b.TrainAndCorrect(brv2);
+
+ // do everything in place
+ b.TrainCorrection(brv2);
+ b.ErrorCorrectInPlace(brv2);
+}
+
+BOOST_AUTO_TEST_CASE( correct_and_assemble ) {
+
+ BFC b;
+
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+
+ SeqLib::BamRecord rec;
+ BamRecordVector brv, brv2;
+ size_t count = 0;
+ while(br.GetNextRecord(rec) && count++ < 10000)
+ brv.push_back(rec);
+
+ b.TrainAndCorrect(brv);
+
+ float kcov = b.GetKCov();
+ int kmer = b.GetKMer();
+
+ UnalignedSequenceVector v;
+ b.GetSequences(v);
+
+ std::ofstream corr("corr.fa");
+ for (auto& i : v)
+ corr << ">" << i.Name << std::endl << i.Seq << std::endl;
+ corr.close();
+
+ v.clear();
+ b.FilterUnique();
+ b.GetSequences(v);
+
+ std::ofstream filt("filt.fa");
+ for (auto& i : v) {
+ filt << ">" << i.Name << std::endl << i.Seq << std::endl;
+ }
+ filt.close();
+
+ FermiAssembler f;
+ f.AddReads(v);
+ f.DirectAssemble(kcov);
+
+ // retrieve the contigs
+ std::vector<std::string> contigs = f.GetContigs();
+
+ std::ofstream cont("contigs.fa");
+ size_t cc = 0;
+ for (auto& i : f.GetContigs()) {
+ ++cc;
+ cont << ">" << cc << std::endl << i << std::endl;
+ }
+ cont.close();
+
+}
+
+BOOST_AUTO_TEST_CASE( header_check ) {
+
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+ SeqLib::BamHeader h = br.Header();
+
+ // a BAM header check
+ BOOST_CHECK_EQUAL(h.GetSequenceLength(0), 249250621);
+ BOOST_CHECK_EQUAL(h.GetSequenceLength(3), 191154276);
+ BOOST_CHECK_EQUAL(h.GetSequenceLength("1"), 249250621);
+ BOOST_CHECK_EQUAL(h.GetSequenceLength("4"), 191154276);
+ BOOST_CHECK_EQUAL(h.GetSequenceLength("d4"), -1);
+ BOOST_CHECK_EQUAL(h.GetSequenceLength(10000), -1);
+
+ BOOST_CHECK_EQUAL(h.GetHeaderSequenceVector().size(), h.NumSequences());
+ BOOST_CHECK_EQUAL(h.GetHeaderSequenceVector().begin()->Length, 249250621);
+
+ SeqLib::BamRecord rec;
+ size_t count = 0;
+
+ while(br.GetNextRecord(rec) && count++ < 10000) {
+
+ }
+
+}
+
+BOOST_AUTO_TEST_CASE( merge ) {
+
+ SeqLib::GRC grc;
+ // add two more that we know of
+ grc.add(SeqLib::GenomicRegion(23, 10,100));
+ grc.add(SeqLib::GenomicRegion(23, 20,110));
+
+ grc.add(SeqLib::GenomicRegion(2, 10,100));
+ grc.add(SeqLib::GenomicRegion(2, 20,110));
+ grc.add(SeqLib::GenomicRegion(2, 200,310));
+
+ grc.MergeOverlappingIntervals();
+ BOOST_CHECK_EQUAL(grc.size(), 3);
+ BOOST_CHECK_EQUAL(grc[0].chr, 2);
+ BOOST_CHECK_EQUAL(grc[1].chr, 2);
+ BOOST_CHECK_EQUAL(grc[2].chr, 23);
+ BOOST_CHECK_EQUAL(grc[2].pos2, 110);
+ BOOST_CHECK_EQUAL(grc[2].pos1, 10);
+}
+
+BOOST_AUTO_TEST_CASE ( interval_queries ) {
+
+ SeqLib::GRC grc;
+
+ // create a large GRC
+ for (int i = 0; i < 10; ++i) {
+ int chr = rand() % 23;
+ int pos = rand() % 10000;
+ grc.add(SeqLib::GenomicRegion(chr, pos, pos + 100));
+ }
+ grc.MergeOverlappingIntervals();
+
+ // add two more that we know of
+ grc.add(SeqLib::GenomicRegion(23, 10,100));
+ grc.add(SeqLib::GenomicRegion(23, 20,110));
+
+ // create the interval tree
+ grc.CreateTreeMap();
+
+ SeqLib::GRC results = grc.FindOverlaps(SeqLib::GenomicRegion(23, 10, 100), true);
+
+ for (auto& i : results)
+ std::cerr << " GRC overlaps results " << i << std::endl;
+
+ BOOST_CHECK_EQUAL(results.size(), 2);
+ BOOST_CHECK_EQUAL(results[1].pos2, 100);
+
+ grc.MergeOverlappingIntervals();
+ grc.CreateTreeMap();
+
+ for(auto& r : grc)
+ std::cerr << r << std::endl;
+
+ std::vector<int32_t> q, s;
+ results = grc.FindOverlaps(grc, q, s, true);
+
+ std::cerr << " results.size " << results.size() << " Input size " << grc.size() << std::endl;
+ BOOST_CHECK_EQUAL(results.size(), grc.size());
+ BOOST_CHECK_EQUAL(results.TotalWidth(), grc.TotalWidth());
+
+}
+
+BOOST_AUTO_TEST_CASE( json_parse_from_file ) {
+
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+
+ std::string rules = "{\"global\" : {\"!anyflag\" : 1536}, \"\" : { \"rules\" : [{\"ic\" : true}, {\"clip\" : 5}, {\"ins\" : true}, {\"del\" : true}, {\"mapped\": true , \"mate_mapped\" : false}, {\"mate_mapped\" : true, \"mapped\" : false}]}}";
+ ReadFilterCollection rfc(rules, br.Header());
+
+ std::cerr << rfc << std::endl;
+
+ SeqLib::BamRecord rec;
+ size_t count = 0;
+
+ while(br.GetNextRecord(rec) && count++ < 10000) {
+ if (!rfc.isValid(rec))
+ continue;
+ // test global flag rule
+ if ( (rec.QCFailFlag() || rec.DuplicateFlag())) {
+ std::cerr << rec << std::endl;
+ assert(false);
+ }
+ }
+
+ /// direct from string
+ ReadFilterCollection rfc2(rules, br.Header());
+
+ while(br.GetNextRecord(rec) && count++ < 10000) {
+ if (!rfc.isValid(rec))
+ continue;
+ // test global flag rule
+ if ( (rec.QCFailFlag() || rec.DuplicateFlag())) {
+ std::cerr << rec << std::endl;
+ assert(false);
+ }
+ }
+
+ // check that a bad key throws error
+ //rules = "{\"global\" : {\"!anyflagf\" : 1536}, \"\" : { \"rules\" : [{\"ic\" : true}, {\"clip\" : 5}, {\"ins\" : true}, {\"del\" : true}, {\"mapped\": true , \"mate_mapped\" : false}, {\"mate_mapped\" : true, \"mapped\" : false}]}}";
+ //BOOST_CHECK_THROW(ReadFilterCollection rfc2(rules, br.Header()), std::invalid_argument);
+ // bad value, expected int
+ //rules = "{\"global\" : {\"!anyflag\" : \"BAD\"}, \"\" : { \"rules\" : [{\"ic\" : true}, {\"clip\" : 5}, {\"ins\" : true}, {\"del\" : true}, {\"mapped\": true , \"mate_mapped\" : false}, {\"mate_mapped\" : true, \"mapped\" : false}]}}";
+ //BOOST_CHECK_THROW(ReadFilterCollection rfc3(rules, br.Header()), std::invalid_argument);
+ // bad JSON itself
+ rules = "{\"global\" : \"!anyflag\" : 1536}, \"\" : { \"rules\" : [{\"ic\" : true}, {\"clip\" : 5}, {\"ins\" : true}, {\"del\" : true}, {\"mapped\": true , \"mate_mapped\" : false}, {\"mate_mapped\" : true, \"mapped\" : false}]}}";
+ BOOST_CHECK_THROW(ReadFilterCollection rfc4(rules, br.Header()), std::invalid_argument);
+ // bad value, expected range
+ rules = "{\"global\" : {\"!anyflag\" : 1536}, \"\" : { \"rules\" : [{\"isize\" : \"BAD\"}]}}";
+ BOOST_CHECK_THROW(ReadFilterCollection rfc4(rules, br.Header()), std::invalid_argument);
+
+
+}
+
+BOOST_AUTO_TEST_CASE( sw_alignment ) {
+
+ const std::string ref = "ACTGCGAGCGACTAGCTCGTAGCTAGCTAGCTAGCTAGTGACTGCGGGCGATCATCGATCTTTTATTATCGCGATCGCTACGAC";
+ const std::string seq = "ACTGCGAGCGACTAGCTCGTAGCTAGCTAGCTAGCTAGTGACTGCGGGCGATCATCGATCTTTTATTATCGCGATCGCTACGAC";
+ //const std::string seq = "CTCGTAGCTAGCTGCTAGCTAGTGACTGCGGGCGATCATCGATCTTTTATTATCGCG";
+ const SeqLib::GenomicRegion gr(0,0,0);
+ SeqLib::BamRecord b("test_name", seq, ref, &gr);
+
+ std::cerr << " SMITH WATERMAN " << std::endl;
+ std::cerr << b << std::endl;
+}
+
+BOOST_AUTO_TEST_CASE( read_filter_1 ) {
+
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+ SeqLib::BamHeader h = br.Header();
+
+ SeqLib::GRC g;
+ g.add(SeqLib::GenomicRegion(h.Name2ID("X"), 1100000, 1800000));
+ g.CreateTreeMap();
+
+ // make a new rule set
+ ReadFilterCollection rfc;
+
+ // make a new filter region
+ ReadFilter rf;
+
+ // add an isize rule on whole-genome
+ AbstractRule ar;
+ ar.isize = Range(200, 600, false); // 200 to 600, not inverted
+ ar.mapq = Range(10, 50, false); // 200 to 600, not inverted
+ ar.nm = Range(1, 1, false); // 200 to 600, not inverted
+ rf.AddRule(ar);
+
+ rf.setRegions(g);
+
+ // add to the filter collection
+ rfc.AddReadFilter(rf);
+
+ SeqLib::GRC gback = rfc.getAllRegions();
+ BOOST_CHECK_EQUAL(gback.size(), g.size());
+ for (size_t i = 0; i < gback.size(); ++i)
+ assert(g[i] == gback[i]);
+
+
+ // display
+ std::cerr << br.PrintRegions() << std::endl;
+
+ // read / filter the reads
+ SeqLib::BamRecord rec;
+ size_t count = 0;
+
+ while(br.GetNextRecord(rec) && count++ < 10000) {
+
+ if (!rfc.isValid(rec))
+ continue;
+
+ // test isize rule
+ if (!(rec.FullInsertSize() >= 200 || rec.FullInsertSize() <= 600)) {
+ std::cerr << rec.FullInsertSize() << std::endl;
+ assert(false);
+ }
+ // test mapq rule
+ if (!(rec.MapQuality() >= 10 || rec.MapQuality() <= 50)) {
+ std::cerr << rec.MapQuality() << std::endl;
+ assert(false);
+ }
+ // test nm rule
+ if (!(rec.GetIntTag("NM") != 1)) {
+ std::cerr << rec.GetIntTag("NM") << std::endl;
+ assert(false);
+ }
+
+ }
+}
+
+BOOST_AUTO_TEST_CASE ( fermi_add_reads ) {
+
+ SeqLib::FermiAssembler f;
+
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+ SeqLib::BamRecord r;
+ size_t count = 0;
+ while (br.GetNextRecord(r) && count++ < 1000)
+ f.AddRead(r);
+ while (br.GetNextRecord(r) && count++ < 2000)
+ f.AddRead(SeqLib::UnalignedSequence(r.Qname(), r.Sequence(), r.Qualities()));
+
+ f.CorrectReads();
+ f.PerformAssembly();
+ std::vector<std::string> out = f.GetContigs();
+
+
+
+}
+
+BOOST_AUTO_TEST_CASE ( seq_utils ) {
+
+ // add commas
+ BOOST_CHECK_EQUAL(SeqLib::AddCommas(1),"1");
+ BOOST_CHECK_EQUAL(SeqLib::AddCommas(1000000),"1,000,000");
+
+ // percent calc
+ BOOST_CHECK_EQUAL(SeqLib::percentCalc(10,100), 10);
+ BOOST_CHECK_EQUAL(SeqLib::percentCalc(7,8), 87);
+ BOOST_CHECK_EQUAL(SeqLib::percentCalc(9,10), 90);
+ BOOST_CHECK_EQUAL(SeqLib::percentCalc(2,3), 66);
+
+ // scrub string
+ BOOST_CHECK_EQUAL(SeqLib::scrubString("chr1", "chr"), "1");
+ BOOST_CHECK_EQUAL(SeqLib::scrubString("chr1", ""), "chr1");
+ BOOST_CHECK_EQUAL(SeqLib::scrubString("chr1", "dd"), "chr1");
+ BOOST_CHECK_EQUAL(SeqLib::scrubString("chr1", "1"), "chr");
+
+}
+
+BOOST_AUTO_TEST_CASE( bam_record ) {
+
+ // get a record
+ SeqLib::BamReader br, br2;
+
+ // try multiple open
+ std::vector<std::string> bs = {"test_data/small.bam", "test_data/small.bam"};
+ BOOST_CHECK(!br2.Open(bs)); // should be false, no dups
+ bs = {"test_data/small.bam", "test_data/small.cram"};
+ BOOST_CHECK(br.Open(bs)); // should be true
+ SeqLib::BamRecord r;
+
+ SeqLib::BamRecordVector brv;
+
+ size_t count = 0;
+ br.GetNextRecord(r);
+
+ BOOST_CHECK_EQUAL(r.AsGenomicRegion().chr, 22);
+ BOOST_CHECK_EQUAL(r.AsGenomicRegion().pos1,999901);
+ BOOST_CHECK_EQUAL(r.AsGenomicRegion().pos2,1000002);
+ BOOST_CHECK_EQUAL(r.AsGenomicRegion().strand,'+');
+
+ BOOST_CHECK_EQUAL(r.AsGenomicRegionMate().chr, 22);
+ BOOST_CHECK_EQUAL(r.AsGenomicRegionMate().pos1,999993);
+ BOOST_CHECK_EQUAL(r.AsGenomicRegionMate().pos2,1000094);
+ BOOST_CHECK_EQUAL(r.AsGenomicRegionMate().strand,'-');
+
+ BOOST_CHECK_EQUAL(std::floor(r.MeanPhred()), 34);
+
+ BOOST_CHECK_EQUAL(r.CountNBases(), 0);
+
+ r.SetQname("testq");
+ BOOST_CHECK_EQUAL(r.Qname(), "testq");
+
+ const std::string s = "ACTGCTAGCTAGCTACTCTGCTACTATATTAGCGCGCATTCGC";
+ r.SetSequence(s);
+ BOOST_CHECK_EQUAL(r.Sequence(), s);
+
+ r.SmartAddTag("ST", "1");
+ r.SmartAddTag("ST", "3");
+ r.SmartAddTag("ST", "5");
+ r.SmartAddTag("S2", "5");
+
+ BOOST_CHECK_EQUAL(r.GetSmartIntTag("ST").size(), 3);
+ BOOST_CHECK_EQUAL(r.GetSmartIntTag("ST").at(2), 5);
+ BOOST_CHECK_EQUAL(r.GetSmartDoubleTag("ST").size(), 3);
+ BOOST_CHECK_EQUAL(r.GetSmartDoubleTag("ST").at(2), 5.0);
+
+ BOOST_CHECK_EQUAL(r.GetSmartStringTag("ST").size(), 3);
+ BOOST_CHECK_EQUAL(r.GetSmartStringTag("ST")[1], "3");
+
+ BOOST_CHECK_EQUAL(r.GetSmartDoubleTag("S2").at(0), 5.0);
+ BOOST_CHECK_EQUAL(r.GetSmartIntTag("S2").at(0), 5);
+}
+
+BOOST_AUTO_TEST_CASE( fermi_assemble ) {
+
+ SeqLib::FermiAssembler f;
+
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+ SeqLib::BamRecord r;
+
+ SeqLib::BamRecordVector brv;
+
+ size_t count = 0;
+ while(br.GetNextRecord(r) && count++ < 1000) {
+ brv.push_back(r);
+ }
+
+ f.AddReads(brv);
+
+ f.CorrectReads();
+
+ SeqLib::UnalignedSequenceVector reads = f.GetSequences();
+ BOOST_CHECK_EQUAL(reads.size(), brv.size());
+
+ for (int i = 0; i < reads.size(); ++i) {
+ if (brv[i].Sequence() != reads[i].Seq) {
+ std::cerr << "************" << std::endl;
+ std::cerr << brv[i].Sequence() << std::endl;
+ std::cerr << reads[i].Seq << std::endl;
+ }
+ }
+
+ // peform the assembly
+ std::cerr << "...performing assembly" << std::endl;
+ f.PerformAssembly();
+
+ // retrieve the contigs
+ std::vector<std::string> contigs = f.GetContigs();
+
+}
+
+
+BOOST_AUTO_TEST_CASE( bam_header_stdout ) {
+
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+ SeqLib::BamHeader h = br.Header();
+
+ std::cout << h.AsString() << std::endl;
+}
+
+BOOST_AUTO_TEST_CASE( bam_header_name2id ) {
+
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+ SeqLib::BamHeader h = br.Header();
+
+ BOOST_CHECK_EQUAL(h.Name2ID("2"), 1);
+ BOOST_CHECK_EQUAL(h.Name2ID("23"), -1);
+
+}
+
+BOOST_AUTO_TEST_CASE( bam_header_id2name ) {
+
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+ SeqLib::BamHeader h = br.Header();
+
+ BOOST_CHECK_EQUAL(h.IDtoName(2), "3");
+ BOOST_CHECK_THROW(h.IDtoName(100), std::out_of_range);
+ BOOST_CHECK_THROW(h.IDtoName(-1), std::invalid_argument);
+ BOOST_CHECK_THROW(SeqLib::BamHeader().IDtoName(1), std::out_of_range);
+}
+
+BOOST_AUTO_TEST_CASE( genomic_ranges_string_constructor) {
+
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+ SeqLib::BamHeader h = br.Header();
+
+ const std::string in = "2:1,000,000-2,000,000";
+ SeqLib::GenomicRegion gr(in, h);
+ BOOST_CHECK_EQUAL(gr.chr, 1);
+ BOOST_CHECK_EQUAL(gr.pos1, 1000000);
+ BOOST_CHECK_EQUAL(gr.pos2, 2000000);
+
+ BOOST_CHECK_THROW(SeqLib::GenomicRegion(in, SeqLib::BamHeader()), std::invalid_argument);
+
+ BOOST_CHECK_EQUAL(gr.ChrName(h), "2");
+ BOOST_CHECK_EQUAL(gr.ChrName(SeqLib::BamHeader()), "2");
+ gr.chr = 1000;
+ BOOST_CHECK_THROW(gr.ChrName(h), std::invalid_argument);
+
+}
+
+BOOST_AUTO_TEST_CASE( genomic_region_less_than ) {
+
+ SeqLib::GenomicRegion gr1(0, 1, 2);
+ SeqLib::GenomicRegion gr2(1, 1, 2);
+ SeqLib::GenomicRegion gr3(1, 2, 2);
+ SeqLib::GenomicRegion gr4(1, 6, 6);
+
+ BOOST_CHECK(gr1 < gr2);
+ BOOST_CHECK(gr2 > gr1);
+ BOOST_CHECK(!(gr1 > gr2));
+
+ BOOST_CHECK(gr2 < gr3);
+ BOOST_CHECK(gr3 > gr2);
+ BOOST_CHECK(!(gr2 > gr3));
+
+ BOOST_CHECK(gr3 < gr4);
+ BOOST_CHECK(!(gr4 == gr3));
+ BOOST_CHECK(!(gr3 > gr4));
+ BOOST_CHECK(gr4 > gr3);
+
+ BOOST_CHECK(!(gr1 < gr1));
+ BOOST_CHECK(!(gr1 > gr1));
+
+ BOOST_CHECK(!(gr1 != gr1));
+ BOOST_CHECK(gr2 != gr1);
+ BOOST_CHECK(gr3 != gr1);
+ BOOST_CHECK(gr4 != gr3);
+
+ BOOST_CHECK(gr1 >= gr1);
+ BOOST_CHECK(gr2 >= gr2);
+ BOOST_CHECK(gr3 >= gr3);
+ BOOST_CHECK(gr4 >= gr4);
+
+ BOOST_CHECK(gr1 <= gr1);
+ BOOST_CHECK(gr2 <= gr2);
+ BOOST_CHECK(gr3 <= gr3);
+ BOOST_CHECK(gr4 <= gr4);
+
+ BOOST_CHECK(gr1 <= gr2);
+ BOOST_CHECK(gr2 >= gr1);
+
+ BOOST_CHECK(gr2 <= gr3);
+ BOOST_CHECK(gr3 >= gr2);
+
+}
+
+BOOST_AUTO_TEST_CASE( genomic_region_distance ) {
+
+ SeqLib::GenomicRegion gr1(0, 10, 100);
+ SeqLib::GenomicRegion gr2(0, 10, 200);
+ SeqLib::GenomicRegion gr3(1, 10, 100);
+ SeqLib::GenomicRegion gr4(0, 100, 100);
+
+ BOOST_CHECK_EQUAL(gr1.DistanceBetweenEnds(gr3), -1);
+ BOOST_CHECK_EQUAL(gr1.DistanceBetweenEnds(gr1), 0);
+ BOOST_CHECK_EQUAL(gr1.DistanceBetweenEnds(gr2), 100);
+ BOOST_CHECK_EQUAL(gr1.DistanceBetweenEnds(gr4), 0);
+
+ BOOST_CHECK_EQUAL(gr1.DistanceBetweenStarts(gr3), -1);
+ BOOST_CHECK_EQUAL(gr1.DistanceBetweenStarts(gr1), 0);
+ BOOST_CHECK_EQUAL(gr1.DistanceBetweenStarts(gr2), 0);
+ BOOST_CHECK_EQUAL(gr1.DistanceBetweenStarts(gr4), 90);
+
+}
+
+BOOST_AUTO_TEST_CASE( small_trie_from_file) {
+
+ AbstractRule ar;
+ const bool inverted = false;
+ ar.addMotifRule("test_data/motif.txt", inverted);
+
+ ReadFilterCollection rfc;
+ ReadFilter rf;
+ rf.AddRule(ar);
+ rfc.AddReadFilter(rf);
+
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+
+ SeqLib::BamRecord rec;
+ bool rule;
+ size_t count = 0;
+ while (br.GetNextRecord(rec) && count++ < 1000){
+ }
+
+}
+
+BOOST_AUTO_TEST_CASE( large_trie ) {
+
+ const std::string dictionary = "ACTG";
+
+ const int string_size = 20;
+ const int string_count = 10000;
+
+ SeqLib::Filter::AhoCorasick aho;
+
+ std::vector<std::string> k;
+
+ std::cerr << "...generating key" << std::endl;
+ for (int i = 0; i < string_count; ++i) {
+ char* c = (char*) malloc(string_size + 1);
+ for (int j = 0; j < string_size; ++j)
+ c[j] = dictionary.at(rand() % 4);
+ c[string_size] = '\0';
+ k.push_back(std::string(c));
+ free(c);
+ }
+ std::cerr << "...done with key" << std::endl;
+
+ std::cerr << "...generating trie" << std::endl;
+ for (auto& i : k)
+ aho.AddMotif(i);
+ std::cerr << "...done generating trie" << std::endl;
+
+ std::cerr << "...querying trie" << std::endl;
+ auto result = aho.aho_trie->parse_text(k[0]);
+ std::cerr << "...querying trie fast" << std::endl;
+ for (int i = 0; i < string_count; ++i) {
+ //if (i % 20000 == 0)
+ // std::cerr << "... " << i << std::endl;
+ auto result = aho.aho_trie->parse_text(k[i]);
+ }
+
+}
+
+BOOST_AUTO_TEST_CASE( genomic_region_constructors ) {
+
+ // GenomicRegion Constructors
+ SeqLib::GenomicRegion gr(0, 0, 10, '+');
+ BOOST_CHECK_EQUAL(gr.Width(), 11);
+
+ SeqLib::GenomicRegion gr_empty;
+ BOOST_TEST(gr_empty.IsEmpty());
+
+ SeqLib::GenomicRegion gr2("chrX", "0", "10", SeqLib::BamHeader());
+ BOOST_CHECK_EQUAL(gr2.Width(), 11);
+ BOOST_CHECK_EQUAL(gr2.chr, 22);
+
+ SeqLib::GenomicRegion gr3("X", "0", "10", SeqLib::BamHeader());
+ BOOST_TEST(gr2 == gr3);
+
+ BOOST_CHECK_THROW(SeqLib::GenomicRegion gr3("X", "a", "10", SeqLib::BamHeader()), std::invalid_argument);
+ BOOST_CHECK_THROW(SeqLib::GenomicRegion gr3("X", "1000000000000000000000000000000000000000000000000000000000000000000000000000000", "10", SeqLib::BamHeader()), std::out_of_range);
+
+ BOOST_CHECK_EQUAL(gr.DistanceBetweenStarts(gr2), -1);
+ BOOST_CHECK_EQUAL(gr2.DistanceBetweenStarts(gr), -1);
+
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+ BOOST_CHECK_EQUAL(SeqLib::GenomicRegion("X","1","100", br.Header()).chr, 22);
+
+ // check negative inputs
+ SeqLib::GenomicRegion grn(-1,-11,-10);
+ BOOST_CHECK_EQUAL(grn.chr, -1);
+ BOOST_CHECK_EQUAL(grn.pos1, -11);
+ BOOST_CHECK_EQUAL(grn.pos2, -10);
+
+ // check strand constructions
+ SeqLib::GenomicRegion gra(0,0,0);
+ SeqLib::GenomicRegion grb(0,10000,10001, '+');
+ SeqLib::GenomicRegion grc(0,0,3, '-');
+ BOOST_CHECK_EQUAL(gra.strand, '*');
+ BOOST_CHECK_EQUAL(grb.strand, '+');
+ BOOST_CHECK_EQUAL(grc.strand, '-');
+
+ // check point string
+ BOOST_CHECK_EQUAL(grb.PointString(), "1:10,000(+)");
+
+ // check pretty string
+ std::stringstream ss;
+ ss << grb;
+ BOOST_CHECK_EQUAL(ss.str(), "1:10,000-10,001(+)");
+
+}
+
+BOOST_AUTO_TEST_CASE( genomic_region_bad_inputs ) {
+
+ BOOST_CHECK_THROW(SeqLib::GenomicRegion(0, 10, 9), std::invalid_argument);
+
+ BOOST_CHECK_THROW(SeqLib::GenomicRegion(0,0,0,'P'), std::invalid_argument);
+
+}
+
+// BOOST_AUTO_TEST_CASE( genomic_region_random ) {
+
+// SeqLib::GenomicRegion gr;
+// std::srand(42);
+// gr.random();
+// BOOST_CHECK_EQUAL(gr.pointString(), "9:69,477,830(*)");
+
+// }
+
+BOOST_AUTO_TEST_CASE( genomic_region_range_operations ) {
+
+ SeqLib::GenomicRegion gr(0,1,10);
+ SeqLib::GenomicRegion gr2(0,1,11);
+ gr.Pad(3);
+ gr2.Pad(-3);
+ BOOST_CHECK_EQUAL(gr.pos1,-2);
+ BOOST_CHECK_EQUAL(gr.pos2,13);
+ BOOST_CHECK_EQUAL(gr2.pos1,4);
+ BOOST_CHECK_EQUAL(gr2.pos2,8);
+
+ BOOST_CHECK_THROW(gr.Pad(-10), std::out_of_range);
+
+}
+
+BOOST_AUTO_TEST_CASE( genomic_check_overlaps ) {
+
+ SeqLib::GenomicRegion gr1(0, 0, 10, '+');
+ SeqLib::GenomicRegion gr2(1, 0, 10, '+');
+
+ SeqLib::GenomicRegion gr3(0, 10, 20, '+');
+ SeqLib::GenomicRegion gr4(1, 4, 10, '+');
+
+ SeqLib::GenomicRegion gr5(1, 11, 12, '+');
+
+ // partial overlaps should be one
+ BOOST_CHECK_EQUAL(gr1.GetOverlap(gr3), 1);
+
+ // argument contained gets 2
+ BOOST_CHECK_EQUAL(gr2.GetOverlap(gr4), 2);
+
+ // object contained gets 3
+ BOOST_CHECK_EQUAL(gr4.GetOverlap(gr2), 3);
+
+ // same chr, no overlap
+ BOOST_CHECK_EQUAL(gr4.GetOverlap(gr5), 0);
+ BOOST_CHECK_EQUAL(gr5.GetOverlap(gr4), 0);
+
+}
+
+BOOST_AUTO_TEST_CASE( bwa_wrapper ) {
+
+ SeqLib::BWAWrapper bwa;
+
+ // Set some options
+ bwa.SetGapOpen(32);
+ bwa.SetGapExtension(1);
+ bwa.SetMismatchPenalty(18);
+ bwa.SetAScore(2);
+ bwa.SetZDropoff(100);
+ bwa.Set3primeClippingPenalty(5);
+ bwa.Set5primeClippingPenalty(5);
+ bwa.SetBandwidth(1000);
+ bwa.SetReseedTrigger(1.5);
+
+ BOOST_CHECK_THROW(bwa.SetGapOpen(-1), std::invalid_argument);
+ BOOST_CHECK_THROW(bwa.SetGapExtension(-1), std::invalid_argument);
+ BOOST_CHECK_THROW(bwa.SetMismatchPenalty(-18), std::invalid_argument);
+ BOOST_CHECK_THROW(bwa.SetAScore(-2), std::invalid_argument);
+ BOOST_CHECK_THROW(bwa.SetZDropoff(-100), std::invalid_argument);
+ BOOST_CHECK_THROW(bwa.Set3primeClippingPenalty(-5), std::invalid_argument);
+ BOOST_CHECK_THROW(bwa.Set5primeClippingPenalty(-5), std::invalid_argument);
+ BOOST_CHECK_THROW(bwa.SetBandwidth(-1000), std::invalid_argument);
+ BOOST_CHECK_THROW(bwa.SetReseedTrigger(-1.5), std::invalid_argument);
+
+ // no index loaded yet
+ BOOST_CHECK_THROW(bwa.ChrIDToName(1), std::runtime_error);
+
+ // load a test index
+ BOOST_TEST(SeqLib::read_access_test(TREF));
+ bwa.LoadIndex(TREF);
+
+ BOOST_CHECK_EQUAL(bwa.NumSequences(), 2);
+
+ BOOST_CHECK_EQUAL(bwa.ChrIDToName(0), "ref1");
+ BOOST_CHECK_EQUAL(bwa.ChrIDToName(1), "ref2");
+ BOOST_CHECK_THROW(bwa.ChrIDToName(2), std::out_of_range);
+
+ BOOST_CHECK(!bwa.LoadIndex("test_data/small.bam"));
+
+ SeqLib::BamHeader hh = bwa.HeaderFromIndex();
+ BOOST_CHECK_EQUAL(hh.NumSequences(), 2);
+
+ // error check the index construction
+ SeqLib::UnalignedSequenceVector usv_bad1, usv_bad2, usv;;
+ usv_bad1.push_back(SeqLib::UnalignedSequence("ref1","ACATGGCGAGCACTTCTAGCATCAGCTAGCTACGATCGATCGATCGATCGTAGC", std::string()));
+ usv_bad1.push_back(SeqLib::UnalignedSequence("ref4", std::string(), std::string()));
+ usv_bad1.push_back(SeqLib::UnalignedSequence("ref5","CGATCGTAGCTAGCTGATGCTAGAAGTGCTCGCCATGT", std::string()));
+ usv_bad2.push_back(SeqLib::UnalignedSequence(std::string(), "ACATGGCGAGCACTTCTAGCATCAGCTAGCTACGATCGATCGATCGATCGTAGC", std::string()));
+ usv_bad2.push_back(SeqLib::UnalignedSequence("ref4","ACCATCGCAGCAGCTATCTATTATATCGGCAGCATCTAGC", std::string()));
+ usv_bad2.push_back(SeqLib::UnalignedSequence("ref5","CGATCGTAGCTAGCTGATGCTAGAAGTGCTCGCCATGT", std::string()));
+ BOOST_CHECK_THROW(bwa.ConstructIndex(usv_bad1), std::invalid_argument);
+ BOOST_CHECK_THROW(bwa.ConstructIndex(usv_bad2), std::invalid_argument);
+
+ // construct a normal index
+ usv.push_back(SeqLib::UnalignedSequence("ref3","ACATGGCGAGCACTTCTAGCATCAGCTAGCTACGATCGATCGATCGATCGTAGC", std::string()));
+ usv.push_back(SeqLib::UnalignedSequence("ref4","CTACTTTATCATCTACACACTGCCTGACTGCGGCGACGAGCGAGCAGCTACTATCGACT", std::string()));
+ usv.push_back(SeqLib::UnalignedSequence("ref5","CGATCGTAGCTAGCTGATGCTAGAAGTGCTCGCCATGT", std::string()));
+ usv.push_back(SeqLib::UnalignedSequence("ref6","TATCTACTGCGCGCGATCATCTAGCGCAGGACGAGCATC" + std::string(100,'N') + "CGATCGTTATTATCGAGCGACGATCTACTACGT", std::string()));
+
+ bwa.ConstructIndex(usv);
+
+ BOOST_CHECK_EQUAL(bwa.NumSequences(), 4);
+ bwa.ChrIDToName(1);
+
+ BOOST_CHECK_THROW(bwa.ChrIDToName(-1), std::out_of_range);
+ BOOST_CHECK_THROW(bwa.ChrIDToName(10000), std::out_of_range);
+
+ std::cout << bwa.ChrIDToName(2) << std::endl;
+
+ BOOST_CHECK_EQUAL(bwa.ChrIDToName(0), "ref3");
+ BOOST_CHECK_EQUAL(bwa.ChrIDToName(1), "ref4");
+ BOOST_CHECK_EQUAL(bwa.ChrIDToName(2), "ref5");
+ BOOST_CHECK_EQUAL(bwa.ChrIDToName(3), "ref6");
+ BOOST_CHECK_THROW(bwa.ChrIDToName(4), std::out_of_range);
+
+ // write the index
+ BOOST_CHECK(bwa.WriteIndex(OREF));
+
+ // write the fasta
+ std::ofstream os;
+ os.open(OREF);
+ os << ">" << usv[0].Name << std::endl << usv[0].Seq <<
+ std::endl << ">" << usv[1].Name << std::endl << usv[1].Seq <<
+ std::endl << ">" << usv[2].Name << std::endl << usv[2].Seq <<
+ std::endl << ">" << usv[3].Name << std::endl << usv[3].Seq <<
+ std::endl;
+ os.close();
+
+ // read it back
+ BOOST_CHECK(bwa.LoadIndex(OREF));
+
+ // check that its good
+ BOOST_CHECK_EQUAL(bwa.ChrIDToName(0), "ref3");
+ BOOST_CHECK_EQUAL(bwa.ChrIDToName(1), "ref4");
+
+ // try aligning a sequence
+ std::cerr << "...aligning sequences" << std::endl;
+ SeqLib::BamRecordVector brv, brv2;
+ bool hardclip = false;
+ bwa.AlignSequence("ACATGGCGAGCACTTCTAGCATCAGCTAGCTACGATCG", "name", brv, 0.9, hardclip, 1);
+ // reverse complement
+ bwa.AlignSequence("CGATCGTAGCTAGCTGATGCTAGAAGTGCTCGC", "name", brv2, 0.9, hardclip, 2);
+
+ BOOST_CHECK_EQUAL(brv[0].Qname(), "name");
+ BOOST_CHECK_EQUAL(brv[0].ChrID(), 2);
+ BOOST_CHECK_EQUAL(brv[0].Sequence(), "CGATCGTAGCTAGCTGATGCTAGAAGTGCTCGCCATGT");
+ std::cerr << " brv[0].GetCigar() " << brv[0].GetCigar() << std::endl;
+ BOOST_CHECK_EQUAL(brv[0].GetCigar()[0].Type(), 'M');
+ BOOST_CHECK_EQUAL(brv[0].GetCigar()[0].Length(), 38);
+
+ // check from iterator
+ SeqLib::Cigar ccc = brv[0].GetCigar();
+
+ //SeqLib::Cigar::const_iterator f = brv[0].GetCigar().begin();
+ BOOST_CHECK_EQUAL(ccc.begin()->Length(), 38);
+
+ // check that it got both alignments
+ BOOST_CHECK_EQUAL(brv2.size(), 2);
+
+ // print info
+ std::cerr << bwa << std::endl;
+}
+
+BOOST_AUTO_TEST_CASE( bam_reader ) {
+
+ // print empty read
+ std::cerr << SeqLib::BamRecord() << std::endl;
+
+ SeqLib::BamReader bw;
+ bw.Open(SBAM);
+
+ // open index
+ bw.SetRegion(SeqLib::GenomicRegion(22, 1000000, 1001000));
+
+ // make a set of locations
+ SeqLib::GRC grc;
+ for (size_t i = 0; i < 24; ++i)
+ grc.add(SeqLib::GenomicRegion(i, 1, 100));
+
+ // set regions
+ bw.SetMultipleRegions(grc);
+
+ // write index of new bam
+ // should print a warning since no write bam is specified
+ //bw.BuildIndex();
+
+ // open an output BAM
+ //bw.OpenWriteBam(OBAM);
+
+ // set tags to strip
+ //bw.setStripTags("OQ,BI");
+
+ // loop through and grab some reads
+ SeqLib::BamRecord r;
+ size_t count = 0;
+ while (bw.GetNextRecord(r)) {
+ //if (++count % 10 == 0)
+ // bw.WriteRecord(r);
+ }
+
+ // display info about BAM
+ std::cerr << bw << std::endl;
+
+ // write index of new bam
+ //bw.BuildIndex();
+
+ // reset the walker
+ bw.Reset();
+
+ // set a smaller region
+ bw.SetRegion(SeqLib::GenomicRegion(0, 1, 100));
+ std::cerr << bw << std::endl;
+
+ // write as a cram
+ //bw.OpenWriteBam(OCRAM);
+
+ //
+ //bw.setCram(OCRAM, HGREF);
+
+ // print cram writer
+ //std::cerr << bw << std::endl;
+ // write the CRAM
+ //while (bw.GetNextRecord(r, rule)) {
+ // if (++count % 10 == 0) {
+ // std::cerr << count << std::endl;
+ // bw.WriteRecord(r);
+ // }
+ //}
+
+}
+
+BOOST_AUTO_TEST_CASE( set_qualities ) {
+
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+
+
+ SeqLib::BamRecord r;
+ while (br.GetNextRecord(r)) {
+ r.SetQualities("", 0);
+ BOOST_CHECK_EQUAL(r.Qualities(), std::string());
+ r.SetQualities(std::string(r.Length(), '#'), 33);
+ BOOST_CHECK_EQUAL(r.Qualities(), std::string(r.Length(), '#'));
+ BOOST_CHECK_THROW(r.SetQualities(std::string(8, '#'), 0), std::invalid_argument);
+ break;
+ }
+}
+
+BOOST_AUTO_TEST_CASE( header_constructor ) {
+
+ SeqLib::HeaderSequenceVector hsv;
+ hsv.push_back(SeqLib::HeaderSequence("1", 1000));
+ hsv.push_back(SeqLib::HeaderSequence("chr2", 1200));
+ SeqLib::BamHeader hdr(hsv);
+
+}
+
+BOOST_AUTO_TEST_CASE( overlapping_coverage ) {
+
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+ SeqLib::BamRecordVector brv;
+ size_t count = 0;
+ SeqLib::BamRecord r;
+ while(br.GetNextRecord(r) && ++count < 4) {
+ std::cout << " r " << r << std::endl;
+ brv.push_back(r);
+ }
+ BOOST_CHECK_EQUAL(brv[0].OverlappingCoverage(brv[2]), 101);
+
+}
+
+BOOST_AUTO_TEST_CASE( gr_chr_region_set) {
+
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+
+
+ SeqLib::GenomicRegion gr("1", br.Header());
+ BOOST_CHECK_EQUAL(gr.chr, 0);
+ BOOST_CHECK_EQUAL(gr.pos2, 249250621);
+ BOOST_CHECK_EQUAL(gr.pos1, 1);
+
+ BOOST_CHECK_THROW(SeqLib::GenomicRegion gr2("-1", br.Header()), std::invalid_argument);
+
+}
+
+BOOST_AUTO_TEST_CASE( sequtils ) {
+
+ std::string seq = "actgACGTnTCN";
+
+ SeqLib::rcomplement(seq);
+
+ BOOST_CHECK_EQUAL(seq, "NGAnACGTcagt");
+
+}
+
+BOOST_AUTO_TEST_CASE( bam_write ) {
+
+
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+ SeqLib::BamHeader h = br.Header();
+
+ SeqLib::BamRecord rec;
+
+ // empty constructor
+ SeqLib::BamWriter w;
+
+ BOOST_CHECK(!w.WriteHeader());
+ BOOST_CHECK(!w.Close());
+ BOOST_CHECK(!w.BuildIndex());
+ BOOST_CHECK(!w.WriteRecord(rec));
+
+ w.Open("tmp_out.bam");
+
+ // check that set CRAM fails
+ BOOST_CHECK(!w.SetCramReference("dummy"));
+ BOOST_CHECK(!w.WriteHeader());
+
+ w.SetHeader(h);
+
+ w.WriteHeader();
+
+ size_t count = 0;
+
+ while(br.GetNextRecord(rec) && count++ < 10000)
+ w.WriteRecord(rec);
+
+ BOOST_CHECK(!w.BuildIndex());
+ w.Close();
+
+ w.BuildIndex();
+
+ // check that write header now fails
+ BOOST_CHECK(!w.WriteHeader());
+
+ // check that set CRAM fails
+ BOOST_CHECK(!w.SetCramReference("badref"));
+
+ // print some info
+ std::cerr << w << std::endl;
+
+}
+
+BOOST_AUTO_TEST_CASE( bam_record_more ) {
+
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+ SeqLib::BamHeader h = br.Header();
+
+ SeqLib::BamRecord rec;
+ size_t count = 0;
+
+ while(br.GetNextRecord(rec) && count++ < 100) {
+ rec.ClearSeqQualAndTags();
+ assert(rec.Sequence().empty());
+ assert(rec.Qualities().empty());
+ assert(rec.GetIntTag("NM") == 0);
+ assert(rec.GetZTag("XA").empty() == (rec.CountBWASecondaryAlignments()==0));
+ rec.CountBWAChimericAlignments();
+ }
+
+ br.Reset();
+
+ SeqLib::Filter::ReadFilterCollection rf;
+
+}
+
+BOOST_AUTO_TEST_CASE( bam_record_manipulation ) {
+
+ SeqLib::Cigar cig;
+
+ // manually construct a cigar
+ cig.add(SeqLib::CigarField('M', 10));
+ cig.add(SeqLib::CigarField('I', 1));
+ cig.add(SeqLib::CigarField('M', 10));
+ cig.add(SeqLib::CigarField('D', 1));
+ cig.add(SeqLib::CigarField('M', 10));
+ cig.add(SeqLib::CigarField('S', 10));
+
+ // check that coversion to cigar data strutur (uint32_t) worked
+ SeqLib::CigarField cm('M', 1);
+ SeqLib::CigarField ci('I', 1);
+ SeqLib::CigarField cd('D', 1);
+ SeqLib::CigarField cn('N', 1);
+ SeqLib::CigarField cs('S', 1);
+ SeqLib::CigarField ch('H', 1);
+ SeqLib::CigarField cp('P', 1);
+ SeqLib::CigarField ce('=', 1);
+ SeqLib::CigarField cx('X', 1);
+ SeqLib::CigarField cb('B', 1);
+
+ BOOST_CHECK_EQUAL(cm.Type(), 'M');
+ BOOST_CHECK_EQUAL(ci.Type(), 'I');
+ BOOST_CHECK_EQUAL(cd.Type(), 'D');
+ BOOST_CHECK_EQUAL(cn.Type(), 'N');
+ BOOST_CHECK_EQUAL(cs.Type(), 'S');
+ BOOST_CHECK_EQUAL(ch.Type(), 'H');
+ BOOST_CHECK_EQUAL(cp.Type(), 'P');
+ BOOST_CHECK_EQUAL(ce.Type(), '=');
+ BOOST_CHECK_EQUAL(cx.Type(), 'X');
+
+ // check invalid constructions
+ BOOST_CHECK_THROW(SeqLib::CigarField('L', 1), std::invalid_argument);
+
+ // make a sequence
+ const std::string seq = std::string(10, 'A') + std::string(1, 'T') + std::string(10, 'C') + std::string(10, 'G') + std::string(10, 'A');
+
+ // check
+ BOOST_CHECK_EQUAL(cig.NumQueryConsumed(), 41);
+ BOOST_CHECK_EQUAL(cig.NumReferenceConsumed(), 31);
+
+ std::stringstream ss;
+ ss << cig;
+
+ // cigar from string
+ SeqLib::Cigar cig2 = SeqLib::cigarFromString(ss.str());
+
+ // check that the string from / to are consistent
+ assert(cig == cig2);
+ assert(!(cig != cig2));
+ for (int i = 0; i < cig.size(); ++i)
+ assert(cig[i] == cig2[i]);
+ for (int i = 0; i < cig.size(); ++i)
+ assert(!(cig[i] != cig2[i]));
+
+ // manually make a read
+ SeqLib::GenomicRegion gr_wrong(0, 100, 131);
+ SeqLib::GenomicRegion gr(0, 100, 130);
+
+ BOOST_CHECK_THROW(SeqLib::BamRecord("dumname", seq, &gr_wrong, cig), std::invalid_argument);
+ BOOST_CHECK_THROW(SeqLib::BamRecord("dumname", seq + "A", &gr, cig), std::invalid_argument);
+
+ SeqLib::BamRecord br("dumname", seq, &gr, cig);
+
+ BOOST_CHECK_EQUAL(br.Sequence(), seq);
+ BOOST_CHECK_EQUAL(br.GetCigar(), cig);
+ BOOST_CHECK_EQUAL(br.Qname(), "dumname");
+ BOOST_CHECK_EQUAL(br.Position(), 100);
+ BOOST_CHECK_EQUAL(br.Length(), 41);
+ BOOST_CHECK_EQUAL(br.ChrID(), 0);
+
+}
+
+BOOST_AUTO_TEST_CASE( change_bam_record ) {
+
+ // get a record
+ SeqLib::BamReader br;
+ br.Open("test_data/small.bam");
+ SeqLib::BamRecord r;
+
+ SeqLib::BamRecordVector brv;
+
+ size_t count = 0;
+ br.GetNextRecord(r);
+
+ SeqLib::Cigar c = r.GetCigar();
+ std::cerr << c << std::endl;
+
+ // try replace with cigar of same size
+ SeqLib::Cigar c2;
+ c2.add(SeqLib::CigarField('S', 101));
+ r.SetCigar(c2);
+ std::cerr << r << std::endl;
+
+ // try replace with new cigar
+ SeqLib::Cigar c3;
+ c3.add(SeqLib::CigarField('S', 10));
+ c3.add(SeqLib::CigarField('M', 91));
+ r.SetCigar(c3);
+ std::cerr << r << std::endl;
+
+ const std::string new_seq = "ACTGGACTACAC";
+
+ r.SetSequence(new_seq);
+ std::cerr << r << std::endl;
+
+ r.SetQname("dummy_qname");
+ std::cerr << r << std::endl;
+
+}
+
+BOOST_AUTO_TEST_CASE( stdinput ) {
+
+#ifdef RUN_STDIN
+ // read a BAM from stdin
+ SeqLib::BamReader b;
+ b.Open("-");
+
+ // write it back out
+ SeqLib::BamRecord r;
+ size_t count = 0;
+ while(b.GetNextRecord(r) && count++ < 1) {
+ std::cerr << " STDIN " << r << std::endl;
+ }
+#endif
+}
+
+BOOST_AUTO_TEST_CASE( cramin ) {
+
+ // read a BAM from stdin
+ SeqLib::BamReader b;
+ b.Open("test_data/small.cram");
+
+ SeqLib::BamRecord r;
+ size_t count = 0;
+ while(b.GetNextRecord(r) && count++ < 1) {
+ std::cerr << "CRAM " << r << std::endl;
+ }
+}
+
+BOOST_AUTO_TEST_CASE( cramin_new_ref ) {
+
+ // read a BAM from stdin
+ SeqLib::BamReader b;
+ b.Open("test_data/small.cram");
+
+ SeqLib::BamRecord r;
+ size_t count = 0;
+ while(b.GetNextRecord(r) && count++ < 10) {
+ std::cerr << "CRAM " << r << std::endl;
+ }
+
+ b.Reset();
+
+ // should fail
+
+
+}
+
+
+BOOST_AUTO_TEST_CASE( bamin ) {
+
+ // read a BAM from stdin
+ SeqLib::BamReader b;
+ b.Open("test_data/small.bam");
+
+ SeqLib::BamRecord r;
+ size_t count = 0;
+ while(b.GetNextRecord(r) && count++ < 1) {
+ std::cerr << "BAM " << r << std::endl;
+ }
+}
+
+BOOST_AUTO_TEST_CASE( samin ) {
+
+ // read a BAM from stdin
+ SeqLib::BamReader b;
+ b.Open("test_data/small.sam");
+
+ SeqLib::BamRecord r;
+ size_t count = 0;
+ while(b.GetNextRecord(r) && count++ < 1) {
+ std::cerr << "SAM " << r << std::endl;
+ }
+}
+
+BOOST_AUTO_TEST_CASE( bamout ) {
+
+ // read a BAM from stdin
+ SeqLib::BamReader b;
+ b.Open("test_data/small.sam");
+
+ SeqLib::BamWriter w(SeqLib::BAM);
+ //SeqLib::BamWriter w;
+ w.Open("tmp_out.bam");
+ w.SetHeader(b.Header());
+ w.WriteHeader();
+
+ SeqLib::BamRecord r;
+ size_t count = 0;
+ while(b.GetNextRecord(r) && count++ < 1) {
+ w.WriteRecord(r);
+ }
+ w.Close();
+
+}
+
+BOOST_AUTO_TEST_CASE( samout ) {
+
+ // read a BAM from stdin
+ SeqLib::BamReader b;
+ b.Open("test_data/small.sam");
+
+ SeqLib::BamWriter w(SeqLib::SAM);
+ w.Open("tmp_out.sam");
+ w.SetHeader(b.Header());
+ w.WriteHeader();
+
+ SeqLib::BamRecord r;
+ size_t count = 0;
+ while(b.GetNextRecord(r) && count++ < 1) {
+ w.WriteRecord(r);
+ }
+ w.Close();
+ b.Close();
+}
+
+
+BOOST_AUTO_TEST_CASE( cramout ) {
+
+ SeqLib::BamReader b;
+ b.Open("test_data/small.cram");
+
+ SeqLib::BamWriter w(SeqLib::CRAM);
+ w.Open("tmp_out.cram");
+ w.SetHeader(b.Header());
+ w.WriteHeader();
+
+ SeqLib::BamRecord r;
+ size_t count = 0;
+ while(b.GetNextRecord(r) && count++ < 1) {
+ w.WriteRecord(r);
+ }
+ w.Close();
+
+}
+
+BOOST_AUTO_TEST_CASE( samout_to_stdout ) {
+
+#ifdef RUN_SAM_STDOUT
+ // read a BAM from stdin
+ SeqLib::BamReader b;
+ b.Open("test_data/small.sam");
+
+ SeqLib::BamWriter w(SeqLib::SAM);
+ w.Open("-");
+ w.SetHeader(b.Header());
+ w.WriteHeader();
+
+ SeqLib::BamRecord r;
+ size_t count = 0;
+ while(b.GetNextRecord(r) && count++ < 1) {
+ w.WriteRecord(r);
+ }
+ w.Close();
+#endif
+}
+
+BOOST_AUTO_TEST_CASE( bamout_to_stdout ) {
+
+ //
+ // dont actually run every time
+ // too much stdout-ing
+ //
+
+#ifdef RUN_BAM_STDOUT
+ // read a BAM from stdin
+ SeqLib::BamReader b;
+ b.Open("test_data/small.sam");
+
+ SeqLib::BamWriter w(SeqLib::BAM);
+ w.Open("-");
+ w.SetHeader(b.Header());
+ w.WriteHeader();
+
+ SeqLib::BamRecord r;
+ size_t count = 0;
+ while(b.GetNextRecord(r) && count++ < 1) {
+ w.WriteRecord(r);
+ }
+ w.Close();
+#endif
+
+}
+
+BOOST_AUTO_TEST_CASE( bam_poly ) {
+
+ SeqLib::BamReader r;
+
+ BOOST_CHECK(r.Open("test_data/small.bam"));
+ BOOST_CHECK(r.Open("test_data/small.cram"));
+
+ BOOST_CHECK(r.SetRegion(SeqLib::GenomicRegion(r.Header().Name2ID("X"),1001000, 1001100)));
+ BOOST_CHECK(!r.SetRegion(SeqLib::GenomicRegion(1000, 1001000, 1001100))); // should fail
+
+ SeqLib::BamWriter w(SeqLib::BAM);
+ w.Open("tmp_out_poly.bam");
+ w.SetHeader(r.Header());
+ w.WriteHeader();
+
+ SeqLib::BamRecord rec;
+ while(r.GetNextRecord(rec)) {
+ w.WriteRecord(rec);
+ }
+
+ BOOST_CHECK(r.Reset("test_data/small.bam"));
+ BOOST_CHECK(!r.Reset("dum"));
+
+ BOOST_CHECK(r.Close("test_data/small.bam"));
+ BOOST_CHECK(r.Close("test_data/small.cram"));
+
+ // problematic here FIXME
+ //SeqLib::BamReader r2;
+ //BOOST_CHECK(r2.Open("tmp_out_poly.bam"));
+ // should return false, no index
+ //BOOST_CHECK(!r2.SetRegion(SeqLib::GenomicRegion(r.Header().Name2ID("X"),1001000, 1001100)));
+
+}
+
+
+BOOST_AUTO_TEST_CASE( plot_test ) {
+
+ SeqLib::BamReader r;
+ r.Open("test_data/small.bam");
+
+ // should return false on empty region
+ BOOST_CHECK(!r.SetMultipleRegions(SeqLib::GRC()));
+
+ SeqLib::GenomicRegion gr("X:1,002,942-1,003,294", r.Header());
+ r.SetRegion(gr);
+
+ SeqLib::SeqPlot s;
+
+ s.SetView(gr);
+
+ SeqLib::BamRecord rec;
+ SeqLib::BamRecordVector brv;
+ while(r.GetNextRecord(rec))
+ if (!rec.CountNBases() && rec.MappedFlag())
+ brv.push_back(rec);
+
+ s.SetPadding(20);
+
+ std::cout << s.PlotAlignmentRecords(brv);
+
+}
+
+
+// CURRENTLY DOES NOT WORK
+// need to find how to do reset
+// BOOST_AUTO_TEST_CASE ( reset_works ) {
+
+// SeqLib::BamReader r;
+// r.Open("test_data/small.bam");
+// //r.Open("test_data/small.cram");
+
+// SeqLib::BamRecord rec1, rec2;
+// r.GetNextRecord(rec1);
+// r.Reset();
+// std::cerr << " AFTER RESET " << std::endl;
+// std::cerr << r.GetNextRecord(rec2) << std::endl;
+
+// BOOST_CHECK_EQUAL(rec1.Qname(), rec2.Qname());
+// }
+
+BOOST_AUTO_TEST_CASE (json_parse) {
+
+ SeqLib::BamReader r;
+ r.Open("test_data/small.bam");
+ ReadFilterCollection rfc(JSON1, r.Header());
+
+ ReadFilter rf;
+ SeqLib::GRC g(VCFFILE, r.Header());
+ rf.addRegions(g);
+ AbstractRule ar;
+ ar.isize = Range(10,100, false);
+ rf.SetMateLinked(true);
+ rf.AddRule(ar);
+ rfc.AddReadFilter(rf);
+
+ std::cout << rfc << std::endl;
+
+ SeqLib::BamRecord rec;
+ size_t count = 0;
+ int start, end;
+ while(r.GetNextRecord(rec) && ++count < 10) {
+ rec.QualityTrimmedSequence(4, start, end); // phred trim first
+ rfc.isValid(rec);
+ }
+
+ // empty
+ ReadFilterCollection rfc2("", r.Header());
+
+}
+
+
+BOOST_AUTO_TEST_CASE ( ref_genome ) {
+
+ //SeqLib::RefGenome r("test_data/test_ref.fa");
+ SeqLib::RefGenome r;
+ r.LoadIndex("test_data/test_ref.fa");
+
+ BOOST_CHECK(!r.IsEmpty());
+
+ std::string out = r.QueryRegion("ref1", 0, 5);
+ BOOST_CHECK_EQUAL(out, "ATCTAT");
+
+ BOOST_CHECK_THROW(r.QueryRegion("ref1", 5,4), std::invalid_argument);
+ BOOST_CHECK_THROW(r.QueryRegion("ref1", -1,4), std::invalid_argument);
+
+ SeqLib::RefGenome r2;
+ BOOST_CHECK_THROW(r2.QueryRegion("ref1",1,2), std::invalid_argument);
+
+ // reload
+ r2.LoadIndex("test_data/test_ref.fa");
+}
+
+BOOST_AUTO_TEST_CASE ( set_cigar ) {
+
+ SeqLib::BamReader rr;
+ rr.Open(SBAM);
+ SeqLib::BamRecord rec;
+ size_t count = 0;
+ while (rr.GetNextRecord(rec) && ++count < 10) {
+ SeqLib::Cigar c;
+ c.add(SeqLib::CigarField('M', 70));
+ c.add(SeqLib::CigarField('I', 80));
+ c.add(SeqLib::CigarField('M',1));
+ rec.SetCigar(c);
+ std::cerr << rec << std::endl;
+ }
+
+
+}
diff --git a/src/.clang-format b/src/.clang-format
new file mode 100644
index 0000000..6a78d25
--- /dev/null
+++ b/src/.clang-format
@@ -0,0 +1,19 @@
+{
+ BasedOnStyle: Mozilla,
+ AlignAfterOpenBracket: true,
+ AlignConsecutiveAssignments: true,
+ AllowShortBlocksOnASingleLine: true,
+ AllowShortCaseLabelsOnASingleLine: true,
+ AllowShortFunctionsOnASingleLine: true,
+ AllowShortIfStatementsOnASingleLine: true,
+ AllowShortLoopsOnASingleLine: true,
+ # AlwaysBreakAfterReturnType: true,
+ BinPackParameters: false,
+ BinPackArguments: false,
+ BreakConstructorInitializersBeforeComma: true,
+ ConstructorInitializerAllOnOneLineOrOnePerLine: true,
+ ColumnLimit: 80,
+ NamespaceIndentation: All,
+ KeepEmptyLinesAtTheStartOfBlocks: false,
+ PointerAlignment: Right,
+}
diff --git a/src/BFC.cpp b/src/BFC.cpp
new file mode 100644
index 0000000..ca0d087
--- /dev/null
+++ b/src/BFC.cpp
@@ -0,0 +1,420 @@
+/*
+A significant portion of this code is derived from Heng Li's BFC
+repository: https://github.com/lh3/bfc
+
+BFC is copyrighted by Heng Li with the following license:
+
+The MIT License
+
+Copyright (c) 2015 Broad Institute
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+*/
+
+#include "SeqLib/BFC.h"
+
+#include <stdexcept>
+#include <algorithm>
+
+namespace SeqLib {
+
+ bool BFC::AllocateMemory(size_t n) {
+
+ if (n <= 0)
+ return false;
+
+ m_seqs_size = n;
+ m_seqs = (fseq1_t*)realloc(m_seqs, n * sizeof(fseq1_t));
+
+ if (!m_seqs)
+ return false;
+
+ return true;
+ }
+
+ bool BFC::AddSequence(const BamRecord& r) {
+
+ //char* s = strdup(r.Sequence().c_str());
+ const char* q = bam_get_qname(r.raw());
+ //uint8_t* l = bam_get_qual(r.raw());
+ //char* qual = (char*)malloc(r.Length() + 1);
+ //if (l)
+ // for (size_t i = 0; i < r.Length(); ++i)
+ // qual[i] = l[i] + 33;
+ //qual[r.Length()] = '\0';
+
+ bool ret = AddSequence(r.Sequence().c_str(), r.Qualities().c_str(), q);
+
+ //if (s)
+ // free(s);
+ //if (qual)
+ // free(qual);
+
+ return ret;
+
+ }
+
+ bool BFC::AddSequence(const char* seq, const char* qual, const char* name) {
+
+ // do the intial allocation
+ if (n_seqs == 0 && !m_seqs) {
+ m_seqs_size = 32;
+ m_seqs = (fseq1_t*)malloc(m_seqs_size * sizeof(fseq1_t));
+ }
+ // realloc if not enough space
+ else if (n_seqs >= m_seqs_size) {
+ m_seqs_size = 2 * m_seqs_size;
+ m_seqs = (fseq1_t*)realloc(m_seqs, m_seqs_size * sizeof(fseq1_t));
+ }
+
+ if (!m_seqs)
+ return false;
+
+ // make sure seq and qual are even valid (if qual provided)
+ if (strlen(qual) && seq && qual)
+ if (strlen(seq) != strlen(qual))
+ return false;
+ if (!strlen(seq))
+ return false;
+
+ fseq1_t *s;
+
+ s = &m_seqs[n_seqs];
+
+ s->seq = strdup(seq);
+ s->qual = 0;
+ if (strlen(qual)) {
+ s->qual = strdup(qual);
+ }
+
+ s->l_seq = strlen(seq);
+ n_seqs++;
+
+ m_names.push_back(strdup(name));
+
+ assert(m_names.size() == n_seqs);
+
+ return true;
+ }
+
+
+ bool BFC::ErrorCorrect() {
+ correct_reads();
+ return true;
+ }
+
+ bool BFC::Train() {
+ learn_correct();
+ return true;
+ }
+
+ void BFC::TrainAndCorrect(const BamRecordVector& brv) {
+
+ // if already allocated, clear the old ones
+ clear();
+
+ // send reads to string
+ allocate_sequences_from_reads(brv);
+
+ // learn how to correct
+ learn_correct();
+
+ // do the correction
+ correct_reads();
+
+ }
+
+ void BFC::TrainCorrection(const std::vector<char*>& v) {
+
+ // if already allocated, clear the old ones
+ clear();
+
+ // set m_seqs and n_seqs
+ allocate_sequences_from_char(v);
+
+ // learn correct, set ch
+ learn_correct();
+
+
+ }
+
+ void BFC::TrainCorrection(const BamRecordVector& brv) {
+
+ // if already allocated, clear the old ones
+ clear();
+
+ // set m_seqs and n_seqs
+ allocate_sequences_from_reads(brv);
+
+ // learn correct, set ch
+ learn_correct();
+ }
+
+ void BFC::ErrorCorrectToTag(BamRecordVector& brv, const std::string& tag) {
+
+ if (tag.length() != 2)
+ throw std::invalid_argument("Tag length should be 2");
+
+ flt_uniq = 0;
+
+ // if already allocated, clear the old ones
+ clear();
+
+ // send reads to string
+ allocate_sequences_from_reads(brv);
+
+ // do the correction
+ correct_reads();
+
+ assert(n_seqs == brv.size());
+ for (size_t i = 0; i < n_seqs; ++i) {
+ std::string str = std::string(m_seqs[i].seq);
+ std::transform(str.begin(), str.end(),str.begin(), ::toupper);
+ brv[i].AddZTag("KC", str);
+ }
+
+ clear();
+
+ }
+
+ void BFC::ErrorCorrect(const BamRecordVector& brv) {
+
+ flt_uniq = 0;
+
+ // if already allocated, clear the old ones
+ clear();
+
+ // send reads to string
+ allocate_sequences_from_reads(brv);
+
+ // do the correction
+ correct_reads();
+ }
+
+ void BFC::ErrorCorrectInPlace(BamRecordVector& brv) {
+
+ flt_uniq = 0;
+ clear();
+
+ allocate_sequences_from_reads(brv);
+
+ correct_reads();
+
+ assert(n_seqs == brv.size());
+ for (size_t i = 0; i < n_seqs; ++i) {
+ std::string str = std::string(m_seqs[i].seq);
+ std::transform(str.begin(), str.end(),str.begin(), ::toupper);
+ brv[i].SetSequence(str);
+ }
+
+ clear();
+ }
+
+ void BFC::GetSequences(UnalignedSequenceVector& v) const {
+
+ for (size_t i = 0; i < n_seqs; ++i)
+ if (m_seqs[i].seq) { // wont be here if filter unique was called
+ std::string str = std::string(m_seqs[i].seq);
+ std::transform(str.begin(), str.end(),str.begin(), ::toupper);
+ std::string name = m_names[i] ? std::string(m_names[i]) : std::string();
+ std::string qual = m_seqs[i].qual ? std::string(m_seqs[i].qual) : std::string();
+ v.push_back(UnalignedSequence(name, str, qual));
+ }
+
+ }
+
+ void BFC::allocate_sequences_from_char(const std::vector<char*>& v) {
+
+ m_seqs_size = v.size();
+ m_seqs = (fseq1_t*)malloc(v.size() * sizeof(fseq1_t));
+
+ uint64_t size = 0;
+ for (std::vector<char*>::const_iterator r = v.begin(); r != v.end(); ++r) {
+ // for (auto& r : v) {
+ fseq1_t *s;
+
+ s = &m_seqs[n_seqs];
+
+ s->seq = strdup(*r);
+ s->qual = NULL;
+
+ s->l_seq = strlen(*r);
+ size += m_seqs[n_seqs++].l_seq;
+ }
+ return;
+
+ }
+
+ void BFC::allocate_sequences_from_reads(const BamRecordVector& brv) {
+
+ // alloc the memory
+ m_seqs_size = brv.size();
+ m_seqs = (fseq1_t*)malloc(brv.size() * sizeof(fseq1_t));
+
+ uint64_t size = 0;
+ for (BamRecordVector::const_iterator r = brv.begin(); r != brv.end(); ++r) {
+ // for (auto& r : brv) {
+ m_names.push_back(strdup(r->Qname().c_str()));
+
+ fseq1_t *s;
+
+ s = &m_seqs[n_seqs];
+
+ std::string qs = r->QualitySequence();
+ s->seq = strdup(qs.c_str());
+ s->qual = strdup(r->Qualities().c_str());
+
+ s->l_seq = qs.length();
+ size += m_seqs[n_seqs++].l_seq;
+ }
+ return;
+ }
+
+ void free_char(char*& c) {
+ if (c) {
+ free (c);
+ c = NULL;
+ }
+ }
+
+ void BFC::clear() {
+
+ assert(m_names.size() == n_seqs);
+ for (size_t i = 0; i < n_seqs; ++i) {
+ free_char(m_names[i]);
+ free_char(m_seqs[i].seq);
+ free_char(m_seqs[i].qual);
+ }
+
+ if (m_seqs)
+ free(m_seqs);
+ m_seqs = 0;
+ n_seqs = 0;
+
+ m_names.clear();
+ m_seqs_size = 0;
+
+ }
+
+ void BFC::learn_correct() {
+
+ // options
+ fml_opt_init(&fml_opt);
+
+ // if kmer is 0, fix
+ if (kmer <= 0) {
+ fml_opt_adjust(&fml_opt, n_seqs, m_seqs);
+ kmer = fml_opt.ec_k;
+ }
+
+ // initialize BFC options
+ for (size_t i = 0; i < n_seqs; ++i)
+ tot_len += m_seqs[i].l_seq; // compute total length
+ bfc_opt.l_pre = tot_len - 8 < 20? tot_len - 8 : 20;
+
+ // setup the counting of kmers
+ memset(&es, 0, sizeof(ec_step_t));
+ //kmer is learned before this
+
+ bfc_opt.k = kmer;
+
+ //es.opt = &bfc_opt, es.n_seqs = n_seqs, es.seqs = m_seqs, es.flt_uniq = flt_uniq;
+
+ // hold count info. also called bfc_ch_s. Composed of
+ // int k
+ // int l_pre
+ // cnthash_t **h
+ // h is of size 1<<l_pre (2^l_pre). It is array of hash tables
+ // h[i] is initialized with kh_init(cnt) which makes a cnthash_t
+ // bfc_ch_t *ch; // set in BFC.h
+
+ // do the counting
+ ch = fml_count(n_seqs, m_seqs, bfc_opt.k, bfc_opt.q, bfc_opt.l_pre, bfc_opt.n_threads);
+
+#ifdef DEBUG_BFC
+ // size of random hash value
+ khint_t k;
+ int* ksize = (int*)calloc(1<<ch->l_pre, sizeof(int));
+ for (int i = 0; i < (1<<ch->l_pre); ++i) {
+ for (k = kh_begin(ch->h[i]); k != kh_end(ch->h[i]); ++k)
+ ++ksize[i];
+ fprintf(stderr, "K: %d S: %d\n", i, ksize[i]);
+ }
+#endif
+ }
+
+ void BFC::correct_reads() {
+
+ assert(kmer > 0);
+
+ es.ch = ch;
+ es.opt = &bfc_opt;
+ es.n_seqs = n_seqs;
+ es.seqs = m_seqs;
+ es.flt_uniq = flt_uniq;
+
+ // make the histogram?
+ // es.ch is unchanged (const)
+ int mode = bfc_ch_hist(es.ch, hist, hist_high);
+
+ for (int i = fml_opt.min_cnt; i < 256; ++i)
+ sum_k += hist[i], tot_k += i * hist[i];
+
+#ifdef DEBUG_BFC
+ std::cerr << " sum_k " << sum_k << " tot_k " << tot_k << std::endl;
+ fprintf(stderr, "MODE: %d\n", mode);
+ for (int i = fml_opt.min_cnt; i < 256; ++i) {
+ fprintf(stderr, "hist[%d]: %d\n",i,hist[i]);
+ }
+ for (int i = fml_opt.min_cnt; i < 64; ++i) {
+ fprintf(stderr, "hist_high[%d]: %d\n",i,hist_high[i]);
+ }
+#endif
+
+ kcov = (float)tot_k / sum_k;
+ bfc_opt.min_cov = (int)(BFC_EC_MIN_COV_COEF * kcov + .499);
+ bfc_opt.min_cov = bfc_opt.min_cov < fml_opt.max_cnt? bfc_opt.min_cov : fml_opt.max_cnt;
+ bfc_opt.min_cov = bfc_opt.min_cov > fml_opt.min_cnt? bfc_opt.min_cov : fml_opt.min_cnt;
+
+#ifdef DEBUG_BFC
+ fprintf(stderr, "kcov: %f mincov: %d mode %d \n", kcov, bfc_opt.min_cov, mode);
+#endif
+
+ // do the actual error correction
+ kmer_correct(&es, mode, ch);
+
+ return;
+
+
+ }
+
+ void BFC::FilterUnique() {
+ flt_uniq = 1;
+ correct_reads();
+
+ size_t count = 0;
+ for (size_t i = 0; i < n_seqs; ++i)
+ if (m_seqs[i].seq)
+ ++count;
+ }
+
+}
diff --git a/src/BWAWrapper.cpp b/src/BWAWrapper.cpp
new file mode 100644
index 0000000..2670474
--- /dev/null
+++ b/src/BWAWrapper.cpp
@@ -0,0 +1,673 @@
+/*
+A significant portion of this code is derived from Heng Li's BWA
+repository: https://github.com/lh3/bwa
+
+BWA is copyrighted by Heng Li with the Apache2 License
+
+*/
+
+#include "SeqLib/BWAWrapper.h"
+
+#include <stdexcept>
+#include <sstream>
+#include <iostream>
+
+extern "C" {
+ #include <string.h>
+}
+
+//#define DEBUG_BWATOOLS 1
+
+#define _set_pac(pac, l, c) ((pac)[(l)>>2] |= (c)<<((~(l)&3)<<1))
+#define _get_pac(pac, l) ((pac)[(l)>>2]>>((~(l)&3)<<1)&3)
+
+namespace SeqLib {
+
+ int BWAWrapper::NumSequences() const {
+
+ if (!idx)
+ return 0;
+
+ return idx->bns->n_seqs;
+
+ }
+
+ std::string BWAWrapper::ChrIDToName(int id) const {
+
+ if (!idx)
+ throw std::runtime_error("Index has not be loaded / constructed");
+ if (id < 0 || id >= idx->bns->n_seqs)
+ throw std::out_of_range("BWAWrapper::ChrIDToName - id out of bounds of refs in index for id of " + tostring(id) + " on IDX of size " + tostring(idx->bns->n_seqs));
+
+ return std::string(idx->bns->anns[id].name);
+ }
+
+ BamHeader BWAWrapper::HeaderFromIndex() const
+ {
+
+ std::string my_hdr = bwa_print_sam_hdr2(idx->bns, "");
+
+ BamHeader hdr(my_hdr);
+ //bam_hdr_t * hdr = bam_hdr_init();
+ //bam_hdr_t * hdr = sam_hdr_read2(my_hdr);
+ //hdr->n_targets = idx->bns->n_seqs;
+ //hdr->target_name = (char**)malloc(hdr->n_targets * sizeof(char*));
+ //for (int i = 0; i < idx->bns->n_seqs; ++i) {
+ // hdr->target_name[i] = (char*)malloc( (strlen(idx->bns->anns[i].name) + 1) * sizeof(char));
+ // strcpy(hdr->target_name[i], idx->bns->anns[i].name);
+ //}
+ return hdr;
+ }
+
+ std::string BWAWrapper::bwa_print_sam_hdr2(const bntseq_t *bns, const char *hdr_line) const
+ {
+ std::string out;
+ int i, n_SQ = 0;
+ //extern char *bwa_pg;
+ if (hdr_line) {
+ const char *p = hdr_line;
+ while ((p = strstr(p, "@SQ\t")) != 0) {
+ if (p == hdr_line || *(p-1) == '\n') ++n_SQ;
+ p += 4;
+ }
+ }
+
+ //JEREMIAH
+ // get makx size
+ size_t max_s = 0;
+ for (i = 0; i < bns->n_seqs; ++i)
+ max_s = std::max(strlen(bns->anns[i].name), max_s);
+
+ if (n_SQ == 0) {
+ char buffer[max_s + 30];
+ for (i = 0; i < bns->n_seqs; ++i) {
+ //err_printf("@SQ\tSN:%s\tLN:%d\n", bns->anns[i].name, bns->anns[i].len);
+ sprintf(buffer, "@SQ\tSN:%s\tLN:%d\n", bns->anns[i].name, bns->anns[i].len);
+ out.append(buffer);
+ }
+ } else if (n_SQ != bns->n_seqs && bwa_verbose >= 2)
+ fprintf(stderr, "[W::%s] %d @SQ lines provided with -H; %d sequences in the index. Continue anyway.\n", __func__, n_SQ, bns->n_seqs);
+
+ if (hdr_line) { char buffer[200]; sprintf(buffer, "%s\n", hdr_line); out.append(buffer); } //err_printf("%s\n", hdr_line);
+ //if (bwa_pg) { char buffer[100]; sprintf(buffer, "%s\n", bwa_pg); out.append(buffer); } // err_printf("%s\n", bwa_pg);
+
+ return out;
+ }
+
+
+
+ void BWAWrapper::ConstructIndex(const UnalignedSequenceVector& v) {
+
+ if (!v.size())
+ return;
+
+ // check the integrity of the input data
+ for (UnalignedSequenceVector::const_iterator i = v.begin(); i != v.end(); ++i)
+ if (i->Name.empty() || i->Seq.empty())
+ throw std::invalid_argument("BWAWrapper::constructIndex - Reference sequences must have non-empty name and seq");
+
+ if (idx) {
+ std::cerr << "...clearing old index" << std::endl;
+ bwa_idx_destroy(idx);
+ idx = 0;
+ }
+
+ // allocate memory for idx
+ idx = (bwaidx_t*)calloc(1, sizeof(bwaidx_t));;
+
+ // construct the forward-only pac
+ uint8_t* fwd_pac = seqlib_make_pac(v, true); //true->for_only
+
+ // construct the forward-reverse pac ("packed" 2 bit sequence)
+ uint8_t* pac = seqlib_make_pac(v, false); // don't write, becasue only used to make BWT
+
+ size_t tlen = 0;
+ for (UnalignedSequenceVector::const_iterator i = v.begin(); i != v.end(); ++i)
+ tlen += i->Seq.length();
+
+#ifdef DEBUG_BWATOOLS
+ std::cerr << "ref seq length: " << tlen << std::endl;
+#endif
+
+ // make the bwt
+ bwt_t *bwt;
+ bwt = seqlib_bwt_pac2bwt(pac, tlen*2); // *2 for fwd and rev
+ bwt_bwtupdate_core(bwt);
+ free(pac); // done with fwd-rev pac
+
+ // construct sa from bwt and occ. adds it to bwt struct
+ bwt_cal_sa(bwt, 32);
+ bwt_gen_cnt_table(bwt);
+
+ // make the bns
+ bntseq_t * bns = (bntseq_t*) calloc(1, sizeof(bntseq_t));
+ bns->l_pac = tlen;
+ bns->n_seqs = v.size();
+ bns->seed = 11;
+ bns->n_holes = 0;
+
+ // make the anns
+ bns->anns = (bntann1_t*)calloc(v.size(), sizeof(bntann1_t));
+ size_t offset = 0;
+ for (size_t k = 0; k < v.size(); ++k) {
+ seqlib_add_to_anns(v[k].Name, v[k].Seq, &bns->anns[k], offset);
+ offset += v[k].Seq.length();
+ }
+
+ //ambs is "holes", like N bases
+ bns->ambs = 0; //(bntamb1_t*)calloc(1, sizeof(bntamb1_t));
+
+ // make the in-memory idx struct
+ idx->bwt = bwt;
+ idx->bns = bns;
+ idx->pac = fwd_pac;
+
+ return;
+
+ }
+
+ void BWAWrapper::SetGapOpen(int gap_open) {
+ if (gap_open < 0)
+ throw std::invalid_argument("BWAWrapper::SetGapOpen - gap_open must be >= zero");
+ memopt->o_del = memopt->o_ins = gap_open;
+ }
+
+ void BWAWrapper::SetGapExtension(int gap_ext) {
+ if (gap_ext < 0)
+ throw std::invalid_argument("BWAWrapper::SetGapExtension - gap extension must be >= zero");
+ memopt->e_del = memopt->e_ins = gap_ext;
+ }
+
+ void BWAWrapper::SetMismatchPenalty(int m) {
+ if (m < 0)
+ throw std::invalid_argument("BWAWrapper::SetMismatchPenalty - mismatch must be >= zero");
+ memopt->b = m;
+
+ bwa_fill_scmat(memopt->a, memopt->b, memopt->mat);
+ }
+
+ void BWAWrapper::SetZDropoff(int z) {
+ if (z < 0)
+ throw std::invalid_argument("BWAWrapper::SetZDropoff - dropoff must be >= zero");
+ memopt->zdrop = z;
+ }
+
+ void BWAWrapper::SetAScore(int a) {
+ if (a < 0)
+ throw std::invalid_argument("BWAWrapper::SetAScore - dropoff must be >= zero");
+ memopt->b *= a;
+ memopt->T *= a;
+ memopt->o_del *= a;
+ memopt->o_ins *= a;
+ memopt->e_del *= a;
+ memopt->e_ins *= a;
+ memopt->zdrop *= a;
+ memopt->pen_clip5 *= a;
+ memopt->pen_clip3 *= a;
+ memopt->pen_unpaired *= a;
+ memopt->a = a;
+ }
+
+
+ void BWAWrapper::Set3primeClippingPenalty(int p) {
+ if (p < 0)
+ throw std::invalid_argument("BWAWrapper::Set3primeClippingPenalty - penalty must be >= zero");
+ memopt->pen_clip3 = p;
+ }
+
+ void BWAWrapper::Set5primeClippingPenalty(int p) {
+ if (p < 0)
+ throw std::invalid_argument("BWAWrapper::Set5primeClippingPenalty - penalty must be >= zero");
+ memopt->pen_clip5 = p;
+ }
+
+ void BWAWrapper::SetBandwidth(int w) {
+ if (w < 0)
+ throw std::invalid_argument("BWAWrapper::SetBandwidth - bandwidth must be >= zero");
+ memopt->w = w;
+ }
+
+ void BWAWrapper::SetReseedTrigger(float r) {
+ if (r < 0)
+ throw std::invalid_argument("BWAWrapper::SetReseedTrigger - reseed trigger must be >= zero");
+ memopt->split_factor = r;
+ }
+
+ void BWAWrapper::AlignSequence(const std::string& seq, const std::string& name, BamRecordVector& vec, bool hardclip,
+ double keep_sec_with_frac_of_primary_score, int max_secondary) const {
+
+ // we haven't made an index, just return
+ if (!idx)
+ return;
+
+ mem_alnreg_v ar;
+ ar = mem_align1(memopt, idx->bwt, idx->bns, idx->pac, seq.length(), seq.data()); // get all the hits (was c_str())
+
+#ifdef DEBUG_BWATOOLS
+ std::cout << "num hits: " << ar.n << std::endl;
+#endif
+
+ double primary_score = 0;
+
+ int secondary_count = 0;
+
+ //size_t num_secondary = 0;
+ // loop through the hits
+ for (size_t i = 0; i < ar.n; ++i) {
+
+ if (ar.a[i].secondary >= 0 && (keep_sec_with_frac_of_primary_score < 0 || keep_sec_with_frac_of_primary_score > 1))
+ continue; // skip secondary alignments
+
+ // get forward-strand position and CIGAR
+ mem_aln_t a;
+
+ a = mem_reg2aln(memopt, idx->bns, idx->pac, seq.length(), seq.c_str(), &ar.a[i]);
+
+ // if score not sufficient or past cap, continue
+ bool sec_and_low_score = ar.a[i].secondary >= 0 && (primary_score * keep_sec_with_frac_of_primary_score) > a.score;
+ bool sec_and_cap_hit = ar.a[i].secondary >= 0 && (int)i > max_secondary;
+ if (sec_and_low_score || sec_and_cap_hit) {
+ free(a.cigar);
+ continue;
+ } else if (ar.a[i].secondary < 0) {
+ primary_score = a.score;
+ //num_secondary = 0;
+ }
+
+#ifdef DEBUG_BWATOOLS
+ std::cerr << "allocing bamread" << std::endl;
+#endif
+
+ // instantiate the read
+ BamRecord b;
+ b.init();
+
+ b.b->core.tid = a.rid;
+ b.b->core.pos = a.pos;
+ b.b->core.qual = a.mapq;
+ b.b->core.flag = a.flag;
+ b.b->core.n_cigar = a.n_cigar;
+
+ // set dumy mate
+ b.b->core.mtid = -1;
+ b.b->core.mpos = -1;
+ b.b->core.isize = 0;
+
+ // if alignment is reverse, set it
+ if (a.is_rev)
+ b.b->core.flag |= BAM_FREVERSE;
+
+ std::string new_seq = seq;
+ // if hardclip, figure out what to clip
+ if (hardclip) {
+ size_t tstart = 0;
+ size_t len = 0;
+ for (int i = 0; i < a.n_cigar; ++i) {
+ if (i == 0 && bam_cigar_op(a.cigar[i]) == BAM_CREF_SKIP) // first N (e.g. 20N50M)
+ tstart = bam_cigar_oplen(a.cigar[i]);
+ else if (bam_cigar_type(bam_cigar_op(a.cigar[i]))&1) // consumes query, but not N
+ len += bam_cigar_oplen(a.cigar[i]);
+ }
+ assert(len > 0);
+ assert(tstart + len <= seq.length());
+ new_seq = seq.substr(tstart, len);
+ }
+
+ // allocate all the data
+ b.b->core.l_qname = name.length() + 1;
+ b.b->core.l_qseq = new_seq.length(); //(seq.length()>>1) + seq.length() % 2; // 4-bit encoding
+ b.b->l_data = b.b->core.l_qname + (a.n_cigar<<2) + ((b.b->core.l_qseq+1)>>1) + (b.b->core.l_qseq);
+ b.b.get()->data = (uint8_t*)malloc(b.b.get()->l_data);
+
+#ifdef DEBUG_BWATOOLS
+ std::cerr << "memcpy" << std::endl;
+#endif
+
+ // allocate the qname
+ memcpy(b.b->data, name.c_str(), name.length() + 1);
+
+ // allocate the cigar. Reverse if aligned to neg strand, since mem_aln_t stores
+ // cigars relative to referemce string oreiatnion, not forward alignment
+ memcpy(b.b->data + b.b->core.l_qname, (uint8_t*)a.cigar, a.n_cigar<<2);
+
+ // convert N to S or H
+ int new_val = hardclip ? BAM_CHARD_CLIP : BAM_CSOFT_CLIP;
+ uint32_t * cigr = bam_get_cigar(b.b);
+ for (int k = 0; k < b.b->core.n_cigar; ++k) {
+ if ( (cigr[k] & BAM_CIGAR_MASK) == BAM_CREF_SKIP) {
+ cigr[k] &= ~BAM_CIGAR_MASK;
+ cigr[k] |= new_val;
+ }
+ }
+
+ // allocate the sequence
+ uint8_t* m_bases = b.b->data + b.b->core.l_qname + (b.b->core.n_cigar<<2);
+
+ // TODO move this out of bigger loop
+ int slen = new_seq.length();
+ int j = 0;
+ if (a.is_rev) {
+ for (int i = slen-1; i >= 0; --i) {
+
+ // bad idea but works for now
+ // this is REV COMP things
+ uint8_t base = 15;
+ if (new_seq.at(i) == 'T')
+ base = 1;
+ else if (new_seq.at(i) == 'G')
+ base = 2;
+ else if (new_seq.at(i) == 'C')
+ base = 4;
+ else if (new_seq.at(i) == 'A')
+ base = 8;
+
+ m_bases[j >> 1] &= ~(0xF << ((~j & 1) << 2)); ///< zero out previous 4-bit base encoding
+ m_bases[j >> 1] |= base << ((~j & 1) << 2); ///< insert new 4-bit base encoding
+ ++j;
+ }
+ } else {
+ for (int i = 0; i < slen; ++i) {
+ // bad idea but works for now
+ uint8_t base = 15;
+ if (new_seq.at(i) == 'A')
+ base = 1;
+ else if (new_seq.at(i) == 'C')
+ base = 2;
+ else if (new_seq.at(i) == 'G')
+ base = 4;
+ else if (new_seq.at(i) == 'T')
+ base = 8;
+
+ m_bases[i >> 1] &= ~(0xF << ((~i & 1) << 2)); ///< zero out previous 4-bit base encoding
+ m_bases[i >> 1] |= base << ((~i & 1) << 2); ///< insert new 4-bit base encoding
+
+ }
+ }
+
+#ifdef DEBUG_BWATOOLS
+ std::cerr << "memcpy3" << std::endl;
+#endif
+
+ // allocate the quality to NULL
+ uint8_t* s = bam_get_qual(b.b);
+ s[0] = 0xff;
+
+ b.AddIntTag("NA", ar.n); // number of matches
+ b.AddIntTag("NM", a.NM);
+
+ if (a.XA)
+ b.AddZTag("XA", std::string(a.XA));
+
+ // add num sub opt
+ b.AddIntTag("SB", ar.a[i].sub_n);
+ b.AddIntTag("AS", a.score);
+
+ // count num secondaries
+ if (b.SecondaryFlag())
+ ++secondary_count;
+
+ vec.push_back(b);
+
+#ifdef DEBUG_BWATOOLS
+ // print alignment
+ printf("\t%c\t%s\t%ld\t%d\t", "+-"[a.is_rev], idx->bns->anns[a.rid].name, (long)a.pos, a.mapq);
+ for (int k = 0; k < a.n_cigar; ++k) // print CIGAR
+ printf("%d%c", a.cigar[k]>>4, "MIDSH"[a.cigar[k]&0xf]);
+ printf("\t%d\n", a.NM); // print edit distance
+ std::cerr << "final done" << std::endl;
+#endif
+
+ free(a.cigar); // don't forget to deallocate CIGAR
+ }
+ free (ar.a); // dealloc the hit list
+
+ // add the secondary counts
+ for (BamRecordVector::iterator i = vec.begin(); i != vec.end(); ++i)
+ i->AddIntTag("SQ", secondary_count);
+
+}
+
+ // modified from bwa (heng li)
+uint8_t* BWAWrapper::seqlib_add1(const kseq_t *seq, bntseq_t *bns, uint8_t *pac, int64_t *m_pac, int *m_seqs, int *m_holes, bntamb1_t **q)
+{
+ bntann1_t *p;
+ int lasts;
+ if (bns->n_seqs == *m_seqs) {
+ *m_seqs <<= 1;
+ bns->anns = (bntann1_t*)realloc(bns->anns, *m_seqs * sizeof(bntann1_t));
+ }
+ p = bns->anns + bns->n_seqs;
+ p->name = strdup((char*)seq->name.s);
+ p->anno = seq->comment.l > 0? strdup((char*)seq->comment.s) : strdup("(null)");
+ p->gi = 0; p->len = seq->seq.l;
+ p->offset = (bns->n_seqs == 0)? 0 : (p-1)->offset + (p-1)->len;
+ p->n_ambs = 0;
+ for (size_t i = lasts = 0; i < seq->seq.l; ++i) {
+ int c = nst_nt4_table[(int)seq->seq.s[i]];
+ if (c >= 4) { // N
+ if (lasts == seq->seq.s[i]) { // contiguous N
+ ++(*q)->len;
+ } else {
+ if (bns->n_holes == *m_holes) {
+ (*m_holes) <<= 1;
+ bns->ambs = (bntamb1_t*)realloc(bns->ambs, (*m_holes) * sizeof(bntamb1_t));
+ }
+ *q = bns->ambs + bns->n_holes;
+ (*q)->len = 1;
+ (*q)->offset = p->offset + i;
+ (*q)->amb = seq->seq.s[i];
+ ++p->n_ambs;
+ ++bns->n_holes;
+ }
+ }
+ lasts = seq->seq.s[i];
+ { // fill buffer
+ if (c >= 4) c = lrand48()&3;
+ if (bns->l_pac == *m_pac) { // double the pac size
+ *m_pac <<= 1;
+ pac = (uint8_t*)realloc(pac, *m_pac/4);
+ memset(pac + bns->l_pac/4, 0, (*m_pac - bns->l_pac)/4);
+ }
+ _set_pac(pac, bns->l_pac, c);
+ ++bns->l_pac;
+ }
+ }
+ ++bns->n_seqs;
+
+ return pac;
+}
+
+ // modified from bwa (heng li)
+uint8_t* BWAWrapper::seqlib_make_pac(const UnalignedSequenceVector& v, bool for_only)
+{
+
+ bntseq_t * bns = (bntseq_t*)calloc(1, sizeof(bntseq_t));
+ uint8_t *pac = 0;
+ int32_t m_seqs, m_holes;
+ int64_t m_pac, l;
+ bntamb1_t *q;
+
+ bns->seed = 11; // fixed seed for random generator
+ m_seqs = m_holes = 8; m_pac = 0x10000;
+ bns->anns = (bntann1_t*)calloc(m_seqs, sizeof(bntann1_t));
+ bns->ambs = (bntamb1_t*)calloc(m_holes, sizeof(bntamb1_t));
+ pac = (uint8_t*) calloc(m_pac/4, 1);
+ q = bns->ambs;
+
+ // move through the unaligned sequences
+ for (size_t k = 0; k < v.size(); ++k) {
+
+ // make the ref name kstring
+ kstring_t * name = (kstring_t*)malloc(1 * sizeof(kstring_t));
+ name->l = v[k].Name.length() + 1;
+ name->m = v[k].Name.length() + 3;
+ name->s = (char*)calloc(name->m, sizeof(char));
+ memcpy(name->s, v[k].Name.c_str(), v[k].Name.length()+1);
+
+ // make the sequence kstring
+ kstring_t * t = (kstring_t*)malloc(sizeof(kstring_t));
+ t->l = v[k].Seq.length();
+ t->m = v[k].Seq.length() + 2;
+ //t->s = (char*)calloc(v[k].Seq.length(), sizeof(char));
+ t->s = (char*)malloc(t->m);
+ memcpy(t->s, v[k].Seq.c_str(), v[k].Seq.length());
+
+ // put into a kstring
+ kseq_t *ks = (kseq_t*)calloc(1, sizeof(kseq_t));
+ ks->seq = *t;
+ ks->name = *name;
+
+ // make the forward only pac
+ pac = seqlib_add1(ks, bns, pac, &m_pac, &m_seqs, &m_holes, &q);
+
+ // clear it out
+ free(name->s);
+ free(name);
+ free(t->s);
+ free(t);
+ //free(ks->name.s);
+ //free(ks->seq.s);
+ //free(ks->f->buf);
+ //free(
+ free(ks);
+ // NOTE free kstring_t?
+ //kseq_destroy(s);
+ }
+
+ if (!for_only)
+ {
+ // add the reverse complemented sequence
+ m_pac = (bns->l_pac * 2 + 3) / 4 * 4;
+ pac = (uint8_t*)realloc(pac, m_pac/4);
+ memset(pac + (bns->l_pac+3)/4, 0, (m_pac - (bns->l_pac+3)/4*4) / 4);
+ for (l = bns->l_pac - 1; l >= 0; --l, ++bns->l_pac)
+ _set_pac(pac, bns->l_pac, 3-_get_pac(pac, l));
+ }
+
+ bns_destroy(bns);
+
+ return pac;
+}
+
+ // modified from bwa (heng li)
+bwt_t *BWAWrapper::seqlib_bwt_pac2bwt(const uint8_t *pac, int bwt_seq_lenr)
+{
+
+ bwt_t *bwt;
+ ubyte_t *buf;
+ int i;
+ //FILE *fp;
+
+ // initialization
+ bwt = (bwt_t*)calloc(1, sizeof(bwt_t));
+ bwt->seq_len = bwt_seq_lenr; //bwa_seq_len(fn_pac); //dummy
+ bwt->bwt_size = (bwt->seq_len + 15) >> 4;
+ //fp = xopen(fn_pac, "rb");
+
+ // prepare sequence
+ //pac_size = (bwt->seq_len>>2) + ((bwt->seq_len&3) == 0? 0 : 1);
+ //buf2 = (ubyte_t*)calloc(pac_size, 1);
+ //err_fread_noeof(buf2, 1, pac_size, fp);
+ //err_fclose(fp);
+ memset(bwt->L2, 0, 5 * 4);
+ buf = (ubyte_t*)calloc(bwt->seq_len + 1, 1);
+ for (i = 0; i < (int)bwt->seq_len; ++i) {
+ buf[i] = pac[i>>2] >> ((3 - (i&3)) << 1) & 3;
+ ++bwt->L2[1+buf[i]];
+ }
+ for (i = 2; i <= 4; ++i)
+ bwt->L2[i] += bwt->L2[i-1];
+ //free(buf2);
+
+ // Burrows-Wheeler Transform
+ bwt->primary = is_bwt(buf, bwt->seq_len);
+ bwt->bwt = (u_int32_t*)calloc(bwt->bwt_size, 4);
+ for (i = 0; i < (int)bwt->seq_len; ++i)
+ bwt->bwt[i>>4] |= buf[i] << ((15 - (i&15)) << 1);
+ free(buf);
+ return bwt;
+}
+
+ // modified from bwa (heng li)
+ bntann1_t* BWAWrapper::seqlib_add_to_anns(const std::string& name, const std::string& seq, bntann1_t* ann, size_t offset)
+ {
+
+ ann->offset = offset;
+ ann->name = (char*)malloc(name.length()+1); // +1 for \0
+ strncpy(ann->name, name.c_str(), name.length()+1);
+ ann->anno = (char*)malloc(7);
+ strcpy(ann->anno, "(null)\0");
+ ann->len = seq.length();
+ ann->n_ambs = 0; // number of "holes"
+ ann->gi = 0; // gi?
+ ann->is_alt = 0;
+
+ return ann;
+ }
+
+
+ bool BWAWrapper::LoadIndex(const std::string& file)
+ {
+
+ // read in the bwa index
+ bwaidx_t* idx_new = bwa_idx_load(file.c_str(), BWA_IDX_ALL);
+
+ if (!idx_new)
+ return false;
+
+ if (idx) {
+ std::cerr << "...clearing old index" << std::endl;
+ bwa_idx_destroy(idx);
+ }
+
+ idx = idx_new;
+ return true;
+ }
+
+
+ bool BWAWrapper::WriteIndex(const std::string& index_name) const
+ {
+
+ if (!idx)
+ return false;
+
+ std::string bwt_name = index_name + ".bwt";
+ std::string sa_name = index_name + ".sa";
+ bwt_dump_bwt(bwt_name.c_str(), idx->bwt);
+ bwt_dump_sa(sa_name.c_str(), idx->bwt);
+ bns_dump(idx->bns, index_name.c_str());
+ seqlib_write_pac_to_file(index_name);
+
+ return true;
+ }
+
+ // modified from bwa (heng li)
+ void BWAWrapper::seqlib_write_pac_to_file(const std::string& file) const
+ {
+ // finalize .pac file
+ FILE *fp;
+ std::string nm = file + ".pac";
+ fp = xopen(nm.c_str(), "wb");
+ ubyte_t ct;
+ err_fwrite(idx->pac, 1, (idx->bns->l_pac>>2) + ((idx->bns->l_pac&3) == 0? 0 : 1), fp);
+
+ // the following codes make the pac file size always (l_pac/4+1+1)
+ if (idx->bns->l_pac % 4 == 0) {
+ ct = 0;
+ err_fwrite(&ct, 1, 1, fp);
+ }
+ ct = idx->bns->l_pac % 4;
+ err_fwrite(&ct, 1, 1, fp);
+
+ // close .pac file
+ err_fflush(fp);
+ err_fclose(fp);
+ }
+
+ std::ostream& operator<<(std::ostream& out, const BWAWrapper& b) {
+ out << "BNS: l_pac: " << b.idx->bns->l_pac << " n_seqs: " << b.idx->bns->n_seqs <<
+ " seed: " << b.idx->bns->seed << " n_holes " << b.idx->bns->n_holes;
+ return out;
+ }
+}
+
diff --git a/src/BamHeader.cpp b/src/BamHeader.cpp
new file mode 100644
index 0000000..04f66eb
--- /dev/null
+++ b/src/BamHeader.cpp
@@ -0,0 +1,223 @@
+#include "SeqLib/BamHeader.h"
+#include "SeqLib/BamWalker.h"
+
+#include <sstream>
+#include <stdexcept>
+#include <iostream>
+
+#include "htslib/khash.h"
+
+namespace SeqLib {
+
+ BamHeader::BamHeader(const HeaderSequenceVector& hsv) {
+
+ bam_hdr_t * hdr = bam_hdr_init();
+ hdr->n_targets = hsv.size();
+ hdr->target_len = (uint32_t*)malloc(hdr->n_targets * sizeof(uint32_t));
+ hdr->target_name = (char**)malloc(hdr->n_targets * sizeof(char*));
+
+ // fill the names and make the text
+ std::stringstream text;
+ text << "@HD\tVN:1.4" << std::endl;
+ for (size_t i = 0; i < hsv.size(); ++i) {
+ hdr->target_len[i] = hsv[i].Length;
+ hdr->target_name[i] = strdup(hsv[i].Name.c_str());
+ text << "@SQ\tSN:" << hsv[i].Name << "\tLN:" << hsv[i].Length << std::endl;
+ }
+ hdr->text = strdup(text.str().c_str());
+
+ // give to object
+ h = SeqPointer<bam_hdr_t>(hdr, bam_hdr_delete());
+ ConstructName2IDTable();
+ }
+
+ int BamHeader::GetSequenceLength(int id) const {
+ if (h && id < NumSequences())
+ return h->target_len[id];
+ return -1;
+
+ }
+
+ int BamHeader::GetSequenceLength(const std::string& id) const {
+
+ int nid = Name2ID(id);
+ if (nid == -1)
+ return -1;
+
+ if (h && nid < NumSequences())
+ return h->target_len[nid];
+
+ return -1;
+ }
+
+BamHeader::BamHeader(const std::string& hdr) {
+
+ h = SeqPointer<bam_hdr_t>(sam_hdr_read2(hdr), bam_hdr_delete());
+
+ ConstructName2IDTable();
+
+}
+
+ std::string BamHeader::AsString() const {
+
+ std::stringstream ss;
+
+ ss << h->text;
+ return ss.str();
+
+ }
+
+ BamHeader::BamHeader(const bam_hdr_t * hdr) {
+
+ h = SeqPointer<bam_hdr_t>(bam_hdr_dup(hdr), bam_hdr_delete());
+
+ ConstructName2IDTable();
+
+ }
+
+ void BamHeader::ConstructName2IDTable() {
+
+ // create the lookup table if not already made
+ if (!n2i) {
+ n2i = SeqPointer<SeqHashMap<std::string, int> >(new SeqHashMap<std::string, int>());
+ for (int i = 0; i < h->n_targets; ++i)
+ n2i->insert(std::pair<std::string, int>(std::string(h->target_name[i]), i));
+ }
+
+ }
+
+ int BamHeader::Name2ID(const std::string& name) const {
+
+ SeqHashMap<std::string, int>::const_iterator ff = n2i->find(name);
+ if (ff != n2i->end())
+ return ff->second;
+ else
+ return -1;
+
+ }
+
+bam_hdr_t* BamHeader::sam_hdr_read2(const std::string& hdr) const {
+
+ kstring_t str;
+ bam_hdr_t *hhh;
+ str.l = str.m = 0; str.s = 0;
+
+ std::istringstream iss(hdr);
+ std::string line;
+ while (std::getline(iss, line, '\n')) {
+ //while (hts_getline(fp, KS_SEP_LINE, &fp->line) >= 0) {
+ if (line.length() == 0 || line.at(0) != '@') break;
+
+ //if (line.length() > 3 && line.substr(0,3) == "@SQ") has_SQ = 1;
+ //if (fp->line.l > 3 && strncmp(fp->line.s,"@SQ",3) == 0) has_SQ = 1;
+ //kputsn(fp->line.s, fp->line.l, &str);
+ kputsn(line.c_str(), line.length(), &str);
+ kputc('\n', &str);
+ }
+
+ /*
+ if (! has_SQ && fp->fn_aux) {
+ char line[2048];
+ FILE *f = fopen(fp->fn_aux, "r");
+ if (f == NULL) return NULL;
+ while (fgets(line, sizeof line, f)) {
+ const char *name = strtok(line, "\t");
+ const char *length = strtok(NULL, "\t");
+ ksprintf(&str, "@SQ\tSN:%s\tLN:%s\n", name, length);
+ }
+ fclose(f);
+ }
+ */
+
+ if (str.l == 0)
+ kputsn("", 0, &str);
+ hhh = sam_hdr_parse(str.l, str.s);
+ hhh->l_text = str.l; hhh->text = str.s; // hhh->text needs to be freed
+ return hhh;
+}
+
+ /** Return the reference sequences as vector of HeaderSequence objects */
+ HeaderSequenceVector BamHeader::GetHeaderSequenceVector() const {
+
+ std::vector<HeaderSequence> out;
+ for (int i = 0; i < h->n_targets; ++i)
+ out.push_back(HeaderSequence(std::string(h->target_name[i]), h->target_len[i]));
+ return out;
+ }
+
+
+int BamHeader::NumSequences() const {
+
+ if (!h)
+ return 0;
+ return h->n_targets;
+
+}
+
+std::string BamHeader::IDtoName(int id) const {
+
+ if (id < 0)
+ throw std::invalid_argument("BamHeader::IDtoName - ID must be >= 0");
+
+ if (!h)
+ throw std::out_of_range("BamHeader::IDtoName - Header is uninitialized");
+
+ if (id >= h->n_targets)
+ throw std::out_of_range("BamHeader::IDtoName - Requested ID is higher than number of sequences");
+
+ return std::string(h->target_name[id]);
+
+}
+
+ // copied from htslib - sam.c
+ /*
+ std::string BamHeader::sam_hdr_write2(htsFile *fp, const bam_hdr_t *h)
+ {
+ switch (fp->format.format) {
+ case binary_format:
+ fp->format.category = sequence_data;
+ fp->format.format = bam;
+ // fall-through
+ case bam:
+ if (bam_hdr_write(fp->fp.bgzf, h) < 0) return -1;
+ break;
+
+ case cram: {
+ cram_fd *fd = fp->fp.cram;
+ SAM_hdr *hdr = bam_header_to_cram((bam_hdr_t *)h);
+ if (! hdr) return -1;
+ if (cram_set_header(fd, hdr) < 0) return -1;
+ if (fp->fn_aux)
+ cram_load_reference(fd, fp->fn_aux);
+ if (cram_write_SAM_hdr(fd, fd->header) < 0) return -1;
+ }
+ break;
+
+ case text_format:
+ fp->format.category = sequence_data;
+ fp->format.format = sam;
+ // fall-through
+ case sam: {
+ char *p;
+ hputs(h->text, fp->fp.hfile);
+ p = strstr(h->text, "@SQ\t"); // FIXME: we need a loop to make sure "@SQ\t" does not match something unwanted!!!
+ if (p == 0) {
+ int i;
+ for (i = 0; i < h->n_targets; ++i) {
+ fp->line.l = 0;
+ kputsn("@SQ\tSN:", 7, &fp->line); kputs(h->target_name[i], &fp->line);
+ kputsn("\tLN:", 4, &fp->line); kputw(h->target_len[i], &fp->line); kputc('\n', &fp->line);
+ if ( hwrite(fp->fp.hfile, fp->line.s, fp->line.l) != fp->line.l ) return -1;
+ }
+ }
+ if ( hflush(fp->fp.hfile) != 0 ) return -1;
+ }
+ break;
+
+ default:
+ abort();
+ }
+ return 0;
+ }
+ */
+}
diff --git a/src/BamReader.cpp b/src/BamReader.cpp
new file mode 100644
index 0000000..7c82748
--- /dev/null
+++ b/src/BamReader.cpp
@@ -0,0 +1,381 @@
+#include "SeqLib/BamReader.h"
+
+
+//#define DEBUG_WALKER 1
+
+namespace SeqLib {
+
+// set the bam region
+bool _Bam::SetRegion(const GenomicRegion& gp) {
+
+ // mark it "open" again, may be new reads here
+ mark_for_closure = false;
+
+ //HTS set region
+ if ( (fp->format.format == 4 || fp->format.format == 6) && !idx) // BAM (4) or CRAM (6)
+ idx = SharedIndex(sam_index_load(fp.get(), m_in.c_str()), idx_delete());
+
+ if (!idx) {
+ if (m_in != "-")
+ std::cerr << "Failed to load index for " << m_in << ". Rebuild samtools index" << std::endl;
+ else
+ std::cerr << "Random access with SetRegion not available for STDIN reading (no index file)" << std::endl;
+ return false;
+ }
+
+ if (gp.chr >= m_hdr.NumSequences()) {
+ std::cerr << "Failed to set region on " << gp << ". Chr ID is bigger than n_targets=" << m_hdr.NumSequences() << std::endl;
+ return false;
+ }
+
+ // should work for BAM or CRAM
+ hts_itr = SeqPointer<hts_itr_t>(sam_itr_queryi(idx.get(), gp.chr, gp.pos1, gp.pos2), hts_itr_delete());
+
+ if (!hts_itr) {
+ std::cerr << "Error: Failed to set region: " << gp << std::endl;
+ return false;
+ }
+
+ return true;
+}
+
+void BamReader::Reset() {
+ for (_BamMap::iterator b = m_bams.begin(); b != m_bams.end(); ++b)
+ b->second.reset();
+ m_region = GRC();
+}
+
+ bool BamReader::Reset(const std::string& f) {
+
+ // cant reset what we don't have
+ if (!m_bams.count(f))
+ return false;
+ m_bams[f].reset();
+ return true;
+}
+
+ bool BamReader::Close() {
+
+ bool success = true;
+ for (_BamMap::iterator b = m_bams.begin(); b != m_bams.end(); ++b)
+ success = success && b->second.close();
+ return success;
+ }
+
+ bool BamReader::Close(const std::string& f) {
+
+ // cant close what we don't have
+ if (!m_bams.count(f))
+ return false;
+
+ return m_bams[f].close();
+ }
+
+ /* SharedHTSFile BamReader::GetHTSFile () const {
+ if (!m_bams.size())
+ throw std::runtime_error("No BAMs have been opened yet");
+ return m_bams.begin()->second.fp;
+ }
+
+ SharedHTSFile BamReader::GetHTSFile(const std::string& f) const {
+ _BamMap::const_iterator ff = m_bams.find(f);
+ if (ff == m_bams.end())
+ throw std::runtime_error("File " + f + " has not been opened yet");
+ return ff->second.fp;
+ }
+
+
+ bool BamReader::SetPreloadedIndex(SharedIndex& i) {
+ if (!m_bams.size())
+ return false;
+ m_bams.begin()->second.set_index(i);
+ return true;
+ }
+
+ bool BamReader::SetPreloadedIndex(const std::string& f, SharedIndex& i) {
+ if (!m_bams.count(f))
+ return false;
+ m_bams[f].set_index(i);
+ return true;
+ }
+
+ */
+
+ bool BamReader::SetRegion(const GenomicRegion& g) {
+ m_region.clear();
+ m_region.add(g);
+
+ bool success = true;
+ if (m_region.size()) {
+ for (_BamMap::iterator b = m_bams.begin(); b != m_bams.end(); ++b) {
+ b->second.m_region = &m_region;
+ b->second.m_region_idx = 0; // set to the begining
+ success = success && b->second.SetRegion(m_region[0]);
+ }
+ return success;
+ }
+
+ return false;
+
+}
+
+ bool BamReader::SetMultipleRegions(const GRC& grc)
+{
+ if (grc.size() == 0) {
+ std::cerr << "Warning: Trying to set an empty bam region" << std::endl;
+ return false;
+ }
+
+ m_region = grc;
+
+ // go through and start all the BAMs at the first region
+ bool success = true;
+ if (m_region.size()) {
+ for (_BamMap::iterator b = m_bams.begin(); b != m_bams.end(); ++b) {
+ b->second.m_region = &m_region;
+ b->second.m_region_idx = 0; // set to the begining
+ success = success && b->second.SetRegion(m_region[0]);
+ }
+ return success;
+ }
+
+ return false;
+}
+
+ bool BamReader::Open(const std::string& bam) {
+
+ // dont open same bam twice
+ if (m_bams.count(bam))
+ return false;
+
+ _Bam new_bam(bam);
+ new_bam.m_region = &m_region;
+ bool success = new_bam.open_BAM_for_reading();
+ m_bams.insert(std::pair<std::string, _Bam>(bam, new_bam));
+ return success;
+ }
+
+ bool BamReader::Open(const std::vector<std::string>& bams) {
+
+ bool pass = true;
+ for (std::vector<std::string>::const_iterator i = bams.begin(); i != bams.end(); ++i)
+ pass = pass && Open(*i);
+ return pass;
+ }
+
+BamReader::BamReader() {}
+
+ std::string BamReader::HeaderConcat() const {
+ std::stringstream ss;
+ for (_BamMap::const_iterator i = m_bams.begin(); i != m_bams.end(); ++i)
+ ss << i->second.m_hdr.AsString();
+ return ss.str();
+
+ }
+
+ BamHeader BamReader::Header() const {
+ if (m_bams.size())
+ return m_bams.begin()->second.m_hdr;
+ return BamHeader();
+ }
+
+ bool _Bam::open_BAM_for_reading() {
+
+ // HTS open the reader
+ fp = SharedHTSFile(hts_open(m_in.c_str(), "r"), htsFile_delete());
+
+ // open cram reference
+ if (!m_cram_reference.empty()) {
+ char * m_cram_reference_cstr = strdup(m_cram_reference.c_str());
+ int ret = cram_load_reference(fp->fp.cram, m_cram_reference_cstr);
+ free(m_cram_reference_cstr);
+ if (ret < 0)
+ throw std::invalid_argument("Could not read reference genome " + m_cram_reference + " for CRAM opt");
+ }
+
+ // check if opening failed
+ if (!fp)
+ return false;
+
+ // read the header and create a BamHeader
+ bam_hdr_t * hdr = sam_hdr_read(fp.get());
+ m_hdr = BamHeader(hdr); // calls BamHeader(bam_hdr_t), makes a copy
+
+ // deallocate the memory we just made
+ if (hdr)
+ bam_hdr_destroy(hdr);
+
+ // if BAM header opening failed, return false
+ if (!m_hdr.get())
+ return false;
+
+ // everything worked
+ return true;
+
+ }
+
+ void BamReader::SetCramReference(const std::string& ref) {
+ m_cram_reference = ref;
+ for (_BamMap::iterator b = m_bams.begin(); b != m_bams.end(); ++b)
+ b->second.m_cram_reference = ref;
+ }
+
+bool BamReader::GetNextRecord(BamRecord& r) {
+
+ // shortcut if we have only a single bam
+ if (m_bams.size() == 1) {
+ if (m_bams.begin()->second.fp.get() == NULL || m_bams.begin()->second.mark_for_closure) // cant read if not opened
+ return false;
+ if (m_bams.begin()->second.load_read(r)) { // try to read
+ return true;
+ }
+ // didn't find anything, clear it
+ m_bams.begin()->second.mark_for_closure = true;
+ return false;
+ }
+
+ // loop the files and load the next read
+ // for the one that was emptied last
+ for (_BamMap::iterator bam = m_bams.begin(); bam != m_bams.end(); ++bam) {
+
+ _Bam *tb = &(bam->second);
+
+ // if marked, then don't even try on this BAM
+ if (tb->mark_for_closure)
+ continue;
+
+ // skip un-opened BAMs
+ if (tb->fp.get() == NULL)
+ continue;
+
+ // if next read is not marked as empty, skip to next
+ if (!tb->empty)
+ continue;
+
+ // load the next read
+ if (!tb->load_read(r)) { // if cant load, mark for closing
+ tb->empty = true;
+ tb->mark_for_closure = true; // no more reads in this BAM
+ continue;
+ }
+
+ }
+
+ // for multiple bams, choose the one to return
+ // sort based on chr and left-most alignment pos. Same as samtools
+ int min_chr = INT_MAX;
+ int min_pos = INT_MAX;
+ _BamMap::iterator hit;
+ bool found = false; // did we find a valid read
+
+ for (_BamMap::iterator bam = m_bams.begin();
+ bam != m_bams.end(); ++bam) {
+
+ // dont check if already marked for removal or doesnt need new read
+ if (bam->second.empty || bam->second.mark_for_closure)
+ continue;
+
+ found = true;
+ if (bam->second.next_read.ChrID() < min_chr || // check if read in this BAM is lowest
+ (bam->second.next_read.Position() < min_pos && bam->second.next_read.ChrID() == min_chr)) {
+ min_pos = bam->second.next_read.Position();
+ min_chr = bam->second.next_read.ChrID();
+ hit = bam; // read is lowest, so mark this BAM as having the hit
+ }
+ }
+
+ // mark the one we just found as empty
+ if (found) {
+ r = hit->second.next_read; // read is lowest, so assign
+ hit->second.empty = true; // mark as empty, so we fill this slot again
+ }
+
+ return found;
+}
+
+std::string BamReader::PrintRegions() const {
+
+ std::stringstream ss;
+ //for (GRC::const_iterator r = m_region.begin(); r != m_region.end(); ++r)
+ // ss << *r << std::endl;
+ return(ss.str());
+
+}
+
+ bool _Bam::load_read(BamRecord& r) {
+
+ // allocated the memory
+ bam1_t* b = bam_init1();
+ int32_t valid;
+
+ if (hts_itr.get() == NULL) {
+ valid = sam_read1(fp.get(), m_hdr.get_(), b);
+ if (valid < 0) {
+
+#ifdef DEBUG_WALKER
+ std::cerr << "ended reading on null hts_itr" << std::endl;
+#endif
+ //goto endloop;
+ bam_destroy1(b);
+ return false;
+ }
+ } else {
+
+ //changed to sam from hts_itr_next
+ // move to next region of bam
+ valid = sam_itr_next(fp.get(), hts_itr.get(), b);
+ }
+
+ if (valid < 0) { // read not found
+ do {
+
+#ifdef DEBUG_WALKER
+ std::cerr << "Failed read, trying next region. Moving counter to " << m_region_idx << " of " << m_region.size() << " FP: " << fp_htsfile << " hts_itr " << std::endl;
+#endif
+ // try next region, return if no others to try
+ ++m_region_idx; // increment to next region
+ if (m_region_idx >= m_region->size()) {
+ bam_destroy1(b);
+ return false;
+ }
+ //goto endloop;
+
+ // next region exists, try it
+ SetRegion(m_region->at(m_region_idx));
+ valid = sam_itr_next(fp.get(), hts_itr.get(), b);
+ } while (valid <= 0); // keep trying regions until works
+ }
+
+ // if we got here, then we found a read in this BAM
+ empty = false;
+ next_read.assign(b); // assign the shared_ptr for the bam1_t
+ r = next_read;
+
+ return true;
+}
+
+std::ostream& operator<<(std::ostream& out, const BamReader& b)
+{
+ for(_BamMap::const_iterator bam = b.m_bams.begin(); bam != b.m_bams.end(); ++bam)
+ out << ":" << bam->second.GetFileName() << std::endl;
+
+ if (b.m_region.size() && b.m_region.size() < 20) {
+ out << " ------- BamReader Regions ----------" << std::endl;;
+ //for (GRC::const_iterator r = b.m_region.begin(); r != b.m_region.end(); ++r)
+ // out << *i << std::endl;
+ }
+ else if (b.m_region.size() >= 20) {
+ int wid = 0;
+ //for (GRC::const_iterator r = b.m_region.begin(); r != b.m_region.end(); ++r)
+ // wid += r->Width();
+ out << " ------- BamReader Regions ----------" << std::endl;;
+ out << " -- " << b.m_region.size() << " regions covering " << AddCommas(wid) << " bp of sequence" << std::endl;
+ }
+ else
+ out << " - BamReader - Walking whole genome -" << std::endl;
+
+ out << " ------------------------------------";
+ return out;
+}
+
+}
diff --git a/src/BamRecord.cpp b/src/BamRecord.cpp
new file mode 100644
index 0000000..f1a6a64
--- /dev/null
+++ b/src/BamRecord.cpp
@@ -0,0 +1,687 @@
+#include "SeqLib/BamRecord.h"
+
+#include <cassert>
+#include <bitset>
+#include <cctype>
+#include <stdexcept>
+
+#include "SeqLib/ssw_cpp.h"
+
+#define TAG_DELIMITER "^"
+#define CTAG_DELIMITER '^'
+
+namespace SeqLib {
+
+ const int CigarCharToInt[128] = {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, //0-9
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1, //10-19
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1, //20
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1, //30
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1, //40
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1, //50
+ -1,BAM_CEQUAL,-1,-1,-1,-1,BAM_CBACK,-1,BAM_CDEL,-1, //60-69
+ -1,-1,BAM_CHARD_CLIP,BAM_CINS,-1,-1,-1,BAM_CMATCH,BAM_CREF_SKIP,-1,
+ BAM_CPAD,-1,-1,BAM_CSOFT_CLIP,-1,-1,-1,-1,BAM_CDIFF,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1};
+
+
+
+ struct free_delete {
+ void operator()(void* x) { bam_destroy1((bam1_t*)x); }
+ };
+
+ void BamRecord::init() {
+ bam1_t* f = bam_init1();
+ b = SeqPointer<bam1_t>(f, free_delete());
+ }
+
+ void BamRecord::assign(bam1_t* a) {
+ b = SeqPointer<bam1_t>(a, free_delete());
+ }
+
+ GenomicRegion BamRecord::AsGenomicRegion() const {
+ char s = '*';
+ if (MappedFlag())
+ s = ReverseFlag() ? '-' : '+';
+ return GenomicRegion(b->core.tid, b->core.pos, PositionEnd(), s);
+ }
+
+ GenomicRegion BamRecord::AsGenomicRegionMate() const {
+ char s = '*';
+ if (MateMappedFlag())
+ s = MateReverseFlag() ? '-' : '+';
+ return GenomicRegion(b->core.mtid, b->core.mpos, b->core.mpos + Length(), s);
+ }
+
+ std::string BamRecord::Sequence() const {
+ uint8_t * p = bam_get_seq(b);
+ std::string out(b->core.l_qseq, 'N');
+ for (int32_t i = 0; i < b->core.l_qseq; ++i)
+ out[i] = BASES[bam_seqi(p,i)];
+ return out;
+
+ }
+
+ void BamRecord::SetCigar(const Cigar& c) {
+
+ // case where they are equal, just swap them out
+ if (c.size() == b->core.n_cigar) {
+ b->core.n_cigar = c.size();
+ uint32_t * cigr = bam_get_cigar(b);
+ for (size_t i = 0; i < b->core.n_cigar; ++i)
+ cigr[i] = c[i].raw();
+ return;
+ }
+
+ // make the new cigar structure
+ uint32_t* new_cig = (uint32_t*)malloc(4 * c.size());
+ for (size_t i = 0; i < c.size(); ++i)
+ new_cig[i] = c[i].raw();
+
+ int new_size = b->l_data - (b->core.n_cigar<<2) + (c.size()<<2);
+ int old_seqaux_spot = (b->core.n_cigar<<2) + b->core.l_qname;
+ int old_seqaux_len = bam_get_l_aux(b) + ((b->core.l_qseq + 1)>>1) + b->core.l_qseq;
+
+ // set the new cigar size
+ b->core.n_cigar = c.size();
+
+ // copy out all the old data
+ uint8_t* oldd = (uint8_t*)malloc(b->l_data);
+ memcpy(oldd, b->data, b->l_data);
+
+ // clear out the old data and alloc the new amount
+ free(b->data);
+ b->data = (uint8_t*)calloc(new_size, sizeof(uint8_t));
+
+ // add back the qname
+ memcpy(b->data, oldd, b->core.l_qname);
+
+ // add in the new cigar
+ memcpy(b->data + b->core.l_qname, new_cig, c.size()<<2);
+
+ // add back the rest of the data
+ memcpy(b->data + b->core.l_qname + (b->core.n_cigar<<2), oldd + old_seqaux_spot, old_seqaux_len);
+
+ // update the sizes
+ // >>1 shift is because only 4 bits needed per ATCGN base
+ b->l_data = new_size;
+ b->core.n_cigar = c.size();
+
+ free(oldd);
+ free(new_cig);
+ }
+
+ BamRecord::BamRecord(const std::string& name, const std::string& seq, const std::string& ref, const GenomicRegion * gr) {
+
+ StripedSmithWaterman::Aligner aligner;
+ // Declares a default filter
+ StripedSmithWaterman::Filter filter;
+ // Declares an alignment that stores the result
+ StripedSmithWaterman::Alignment alignment;
+ // Aligns the seq to the ref
+ aligner.Align(seq.c_str(), ref.c_str(), ref.size(), filter, &alignment);
+
+ init();
+ b->core.tid = gr->chr;
+ b->core.pos = gr->pos1 + alignment.ref_begin + 1; // add to make it 1-indexed, not 0-indexed
+ b->core.qual = 60; //alignment.sw_score;
+ b->core.flag = 0;
+ b->core.n_cigar = alignment.cigar.size();
+
+ // set dumy mate
+ b->core.mtid = -1;
+ b->core.mpos = -1;
+ b->core.isize = 0;
+
+ // allocate all the data
+ b->core.l_qname = name.length() + 1;
+ b->core.l_qseq = seq.length(); //(seq.length()>>1) + seq.length() % 2; // 4-bit encoding
+ b->l_data = b->core.l_qname + (b->core.n_cigar<<2) + ((b->core.l_qseq+1)>>1) + (b->core.l_qseq);
+ b.get()->data = (uint8_t*)malloc(b.get()->l_data);
+
+ // allocate the qname
+ memcpy(b->data, name.c_str(), name.length() + 1);
+
+ // allocate the cigar. 32 bits per elem (4 type, 28 length)
+ uint32_t * cigr = bam_get_cigar(b);
+ for (size_t i = 0; i < alignment.cigar.size(); ++i) {
+ cigr[i] = alignment.cigar[i]; //Length << BAM_CIGAR_SHIFT | BAM_CMATCH;
+ }
+
+ // allocate the sequence
+ uint8_t* m_bases = b->data + b->core.l_qname + (b->core.n_cigar<<2);
+
+ // TODO move this out of bigger loop
+ int slen = seq.length();
+ for (int i = 0; i < slen; ++i) {
+ // bad idea but works for now
+ uint8_t base = 15;
+ if (seq.at(i) == 'A')
+ base = 1;
+ else if (seq.at(i) == 'C')
+ base = 2;
+ else if (seq.at(i) == 'G')
+ base = 4;
+ else if (seq.at(i) == 'T')
+ base = 8;
+
+ m_bases[i >> 1] &= ~(0xF << ((~i & 1) << 2)); ///< zero out previous 4-bit base encoding
+ m_bases[i >> 1] |= base << ((~i & 1) << 2); ///< insert new 4-bit base encoding
+
+ }
+
+ // add in the actual alignment score
+ AddIntTag("AS", alignment.sw_score);
+
+ }
+
+ void BamRecord::SmartAddTag(const std::string& tag, const std::string& val)
+ {
+ // get the old tag
+ assert(tag.length());
+ assert(val.length());
+ std::string tmp = GetZTag(tag);
+ if (!tmp.length())
+ {
+ AddZTag(tag, val);
+ return;
+ }
+
+ // check that we don't have the tag delimiter in the stirng
+ if (val.find(TAG_DELIMITER) != std::string::npos)
+ std::cerr << "BamRecord::SmartAddTag -- Tag delimiter " << TAG_DELIMITER << " is in the value to be added. Compile with diff tag delimiter or change val" << std::endl;
+
+ // append the tag
+ tmp += TAG_DELIMITER + val;
+
+ // remove the old tag
+ RemoveTag(tag.c_str());
+
+ // add the new one
+ assert(tmp.length());
+ AddZTag(tag, tmp);
+ }
+
+ void BamRecord::ClearSeqQualAndTags() {
+
+ int new_size = b->core.l_qname + ((b)->core.n_cigar<<2);// + 1; ///* 0xff seq */ + 1 /* 0xff qual */;
+ b->data = (uint8_t*)realloc(b->data, new_size);
+ b->l_data = new_size;
+ b->core.l_qseq = 0;
+ }
+
+ void BamRecord::SetSequence(const std::string& seq) {
+
+ int new_size = b->l_data - ((b->core.l_qseq+1)>>1) - b->core.l_qseq + ((seq.length()+1)>>1) + seq.length();
+ int old_aux_spot = (b->core.n_cigar<<2) + b->core.l_qname + ((b->core.l_qseq + 1)>>1) + b->core.l_qseq;
+ int old_aux_len = bam_get_l_aux(b); //(b->core.n_cigar<<2) + b->core.l_qname + ((b->core.l_qseq + 1)>>1) + b->core.l_qseq;
+
+ // copy out all the old data
+ uint8_t* oldd = (uint8_t*)malloc(b->l_data);
+ memcpy(oldd, b->data, b->l_data);
+
+ // clear out the old data and alloc the new amount
+ free(b->data);
+ b->data = (uint8_t*)calloc(new_size, sizeof(uint8_t));
+
+ // add back the qname and cigar
+ memcpy(b->data, oldd, b->core.l_qname + (b->core.n_cigar<<2));
+
+ // update the sizes
+ // >>1 shift is because only 4 bits needed per ATCGN base
+ b->l_data = new_size; //b->l_data - ((b->core.l_qseq + 1)>>1) - b->core.l_qseq + ((seq.length()+1)>>1) + seq.length();
+ b->core.l_qseq = seq.length();
+
+ // allocate the sequence
+ uint8_t* m_bases = b->data + b->core.l_qname + (b->core.n_cigar<<2);
+ int slen = seq.length();
+
+ for (int i = 0; i < slen; ++i) {
+
+ // bad idea but works for now
+ uint8_t base = 15;
+ if (seq.at(i) == 'A')
+ base = 1;
+ else if (seq.at(i) == 'C')
+ base = 2;
+ else if (seq.at(i) == 'G')
+ base = 4;
+ else if (seq.at(i) == 'T')
+ base = 8;
+
+ m_bases[i >> 1] &= ~(0xF << ((~i & 1) << 2)); ///< zero out previous 4-bit base encoding
+ m_bases[i >> 1] |= base << ((~i & 1) << 2); ///< insert new 4-bit base encoding
+ }
+
+ // add in a NULL qual
+ uint8_t* s = bam_get_qual(b);
+ s[0] = 0xff;
+
+ // add the aux data
+ uint8_t* t = bam_get_aux(b);
+ memcpy(t, oldd + old_aux_spot, old_aux_len);
+
+ // reset the max size
+ b->m_data = b->l_data;
+
+ free(oldd); //just added
+
+ }
+
+ void BamRecord::SetQname(const std::string& n)
+ {
+ // copy out the non-qname data
+ size_t nonq_len = b->l_data - b->core.l_qname;
+ uint8_t* nonq = (uint8_t*)malloc(nonq_len);
+ memcpy(nonq, b->data + b->core.l_qname, nonq_len);
+
+ // clear the old data and alloc the new amount
+ free(b->data);
+ b->data = (uint8_t*)calloc(nonq_len + n.length() + 1, 1);
+
+ // add in the new qname
+ memcpy(b->data, (uint8_t*)n.c_str(), n.length() + 1); // +1 for \0
+
+ // update the sizes
+ b->l_data = b->l_data - b->core.l_qname + n.length() + 1;
+ b->core.l_qname = n.length() + 1;
+
+ // copy over the old data
+ memcpy(b->data + b->core.l_qname, nonq, nonq_len);
+ free(nonq);
+
+ // reset the max size
+ b->m_data = b->l_data;
+ }
+
+ void BamRecord::SetQualities(const std::string& n, int offset) {
+
+ if (!n.empty() && n.length() != b->core.l_qseq)
+ throw std::invalid_argument("New quality score should be same as seq length");
+
+ // length of qual is always same as seq. If empty qual, just set first bit of qual to 0
+ if (n.empty()) {
+ uint8_t* r = bam_get_qual(b);
+ r[0] = 0;
+ return;
+ }
+
+ char * q = strdup(n.data());
+ for (size_t i = 0; i < n.length(); ++i)
+ q[i] -= offset;
+ memcpy(bam_get_qual(b), q, n.length()); // dont copy /0 terminator
+ free(q);
+
+ }
+
+ double BamRecord::MeanPhred() const {
+
+ if (b->core.l_qseq <= 0)
+ return -1;
+
+ double s = 0;
+ uint8_t* p = bam_get_qual(b);
+ for (int32_t i = 0; i < b->core.l_qseq; ++i)
+ s += p[i];
+ return s / b->core.l_qseq;
+ }
+
+ std::string BamRecord::QualitySequence() const {
+ std::string seq = GetZTag("GV");
+ if (!seq.length())
+ seq = Sequence();
+ return seq;
+ }
+
+ std::ostream& operator<<(std::ostream& out, const BamRecord &r)
+ {
+ if (!r.b) {
+ out << "empty read";
+ return out;
+ }
+
+ out << bam_get_qname(r.b) << "\t" << r.b->core.flag
+ << "\t" << (r.b->core.tid+1) << "\t" << r.b->core.pos
+ << "\t" << r.b->core.qual << "\t" << r.CigarString()
+ << "\t" << (r.b->core.mtid+1) << "\t" << r.b->core.mpos << "\t"
+ << r.FullInsertSize() //r.b->core.isize
+ << "\t" << r.Sequence() << "\t*" <<
+ "\tAS:" << r.GetIntTag("AS") <<
+ "\tDD:" << r.GetIntTag("DD");/* << "\t" << r.Qualities()*/;;/* << "\t" << r.Qualities()*/;
+ return out;
+
+
+ }
+
+ int32_t BamRecord::CountBWASecondaryAlignments() const
+ {
+ int xp_count = 0;
+
+ // xa tag
+ std::string xar_s = GetZTag("XA");
+ if (xar_s.length()) {
+ xp_count += std::count(xar_s.begin(), xar_s.end(), ';');
+ }
+
+ return xp_count;
+
+ }
+
+ int32_t BamRecord::CountBWAChimericAlignments() const
+ {
+ int xp_count = 0;
+
+ // sa tag (post bwa mem v0.7.5)
+ std::string xar_s = GetZTag("SA");
+ if (xar_s.length())
+ xp_count += std::count(xar_s.begin(), xar_s.end(), ';');
+
+ // xp tag (pre bwa mem v0.7.5)
+ std::string xpr_s = GetZTag("XP");
+ if (xpr_s.length())
+ xp_count += std::count(xpr_s.begin(), xpr_s.end(), ';');
+
+ return xp_count;
+
+ }
+
+ int32_t BamRecord::CountNBases() const {
+ uint8_t* p = bam_get_seq(b);
+ int32_t n = 0;
+ for (int ww = 0; ww < b->core.l_qseq; ww++)
+ if (bam_seqi(p,ww) == 15)
+ ++n;
+ return n;
+ }
+
+ void BamRecord::QualityTrimmedSequence(int32_t qualTrim, int32_t& startpoint, int32_t& endpoint) const {
+
+ endpoint = -1; //seq.length();
+ startpoint = 0;
+ int i = 0;
+
+ uint8_t * qual = bam_get_qual(b.get());
+
+ // if there is no quality score, return whole thing
+ if (qual[0] == 0xff) {
+ startpoint = 0;
+ return;
+
+ //return Sequence();
+ }
+
+ // get the start point (loop forward)
+ while(i < b->core.l_qseq) {
+ int ps = qual[i];
+ if (ps >= qualTrim) {
+ startpoint = i;
+ break;
+ }
+ ++i;
+ }
+
+ // get the end point (loop backwards)
+ i = b->core.l_qseq - 1; //seq.length() - 1;
+ while(i >= 0) {
+
+ int ps = qual[i];
+
+ if (ps >= qualTrim) { //ps >= qualTrim) {
+ endpoint = i + 1; // endpoint is one past edge
+ break;
+ }
+ --i;
+ }
+ }
+
+ void BamRecord::AddZTag(std::string tag, std::string val) {
+ if (tag.empty() || val.empty())
+ return;
+ bam_aux_append(b.get(), tag.data(), 'Z', val.length()+1, (uint8_t*)val.c_str());
+ }
+
+ std::string BamRecord::GetZTag(const std::string& tag) const {
+ uint8_t* p = bam_aux_get(b.get(),tag.c_str());
+ if (!p)
+ return std::string();
+ char* pp = bam_aux2Z(p);
+ if (!pp)
+ return std::string();
+ return std::string(pp);
+ }
+
+
+ // get a string tag that might be separted by "x"
+ std::vector<std::string> BamRecord::GetSmartStringTag(const std::string& tag) const {
+
+ std::vector<std::string> out;
+ std::string tmp = GetZTag(tag);
+
+ if (tmp.empty())
+ return std::vector<std::string>();
+
+ if (tmp.find(TAG_DELIMITER) != std::string::npos) {
+ std::istringstream iss(tmp);
+ std::string line;
+ while (std::getline(iss, line, CTAG_DELIMITER)) {
+ out.push_back(line);
+ }
+ } else {
+ out.push_back(tmp);
+ }
+
+ assert(out.size());
+ return out;
+
+ }
+
+
+ std::vector<int> BamRecord::GetSmartIntTag(const std::string& tag) const {
+
+ std::vector<int> out;
+ std::string tmp;
+
+ tmp = GetZTag(tag);
+ //r_get_Z_tag(a, tag.c_str(), tmp);
+ //assert(tmp.length());
+ if (tmp.empty())
+ return std::vector<int>();
+
+ if (tmp.find(TAG_DELIMITER) != std::string::npos) {
+ std::istringstream iss(tmp);
+ std::string line;
+ while (std::getline(iss, line, CTAG_DELIMITER))
+ out.push_back(atoi(line.c_str()));
+ } else {
+ out.push_back(atoi(tmp.c_str()));
+ }
+
+ assert(out.size());
+ return out;
+
+ }
+
+ std::vector<double> BamRecord::GetSmartDoubleTag(const std::string& tag) const {
+
+ std::vector<double> out;
+ std::string tmp;
+
+ tmp = GetZTag(tag);
+ if (tmp.empty())
+ return std::vector<double>();
+
+ if (tmp.find(TAG_DELIMITER) != std::string::npos) {
+ std::istringstream iss(tmp);
+ std::string line;
+ while (std::getline(iss, line, CTAG_DELIMITER))
+ out.push_back(std::atof(line.c_str()));
+ } else { // single entry
+ out.push_back(std::atof(tmp.c_str()));
+ }
+
+ assert(out.size());
+ return out;
+
+ }
+
+ BamRecord::BamRecord(const std::string& name, const std::string& seq, const GenomicRegion * gr, const Cigar& cig) {
+
+ // make sure cigar fits with sequence
+ if (cig.NumQueryConsumed() != seq.length())
+ throw std::invalid_argument("Sequence string length mismatches cigar consumed query bases");
+
+ // make sure alignment fits
+ if (cig.NumReferenceConsumed() != gr->Width())
+ throw std::invalid_argument("Alignment position mismatches cigar consumed reference bases");
+
+ init();
+ b->core.tid = gr->chr;
+ b->core.pos = gr->pos1; //gr->pos1 + 1;
+ b->core.qual = 60;
+ b->core.flag = 0;
+ b->core.n_cigar = cig.size();
+
+ // set dumy mate
+ b->core.mtid = -1;
+ b->core.mpos = -1;
+ b->core.isize = 0;
+
+ // if alignment is reverse, set it
+ if (gr->strand == '-') // just choose this convention to reverse
+ b->core.flag |= BAM_FREVERSE;
+
+ // allocate all the data
+ b->core.l_qname = name.length() + 1;
+ b->core.l_qseq = seq.length(); //(seq.length()>>1) + seq.length() % 2; // 4-bit encoding
+ b->l_data = b->core.l_qname + (b->core.n_cigar<<2) + ((b->core.l_qseq+1)>>1) + (b->core.l_qseq);
+ b.get()->data = (uint8_t*)malloc(b.get()->l_data);
+
+ // allocate the qname
+ memcpy(b->data, name.c_str(), name.length() + 1);
+
+ // allocate the cigar. 32 bits per elem (4 type, 28 length)
+ uint32_t * cigr = bam_get_cigar(b);
+ for (size_t i = 0; i < cig.size(); ++i)
+ cigr[i] = cig[i].raw(); //Length << BAM_CIGAR_SHIFT | BAM_CMATCH;
+
+ // allocate the sequence
+ uint8_t* m_bases = b->data + b->core.l_qname + (b->core.n_cigar<<2);
+
+ // TODO move this out of bigger loop
+ int slen = seq.length();
+ for (int i = 0; i < slen; ++i) {
+ // bad idea but works for now
+ uint8_t base = 15;
+ if (seq.at(i) == 'A')
+ base = 1;
+ else if (seq.at(i) == 'C')
+ base = 2;
+ else if (seq.at(i) == 'G')
+ base = 4;
+ else if (seq.at(i) == 'T')
+ base = 8;
+
+ m_bases[i >> 1] &= ~(0xF << ((~i & 1) << 2)); ///< zero out previous 4-bit base encoding
+ m_bases[i >> 1] |= base << ((~i & 1) << 2); ///< insert new 4-bit base encoding
+
+ }
+ }
+
+
+ CigarField::CigarField(char t, uint32_t len) {
+ int op = CigarCharToInt[(int)t];
+ if (op < 0)
+ throw std::invalid_argument("Cigar type must be one of MIDSHPN=X");
+ data = len << BAM_CIGAR_SHIFT;
+ data = data | static_cast<uint32_t>(op);
+ }
+
+ std::ostream& operator<<(std::ostream& out, const CigarField& c) {
+ out << bam_cigar_oplen(c.data) << bam_cigar_opchr(c.data);
+ return out;
+ }
+
+
+ std::ostream& operator<<(std::ostream& out, const Cigar& c) {
+ for (Cigar::const_iterator i = c.begin(); i != c.end(); ++i)
+ out << *i;
+ return out;
+ }
+
+
+ Cigar cigarFromString(const std::string& cig) {
+
+ Cigar tc;
+
+ // get the ops (MIDSHPN)
+ std::vector<char> ops;
+ for (size_t i = 0; i < cig.length(); ++i)
+ if (!isdigit(cig.at(i))) {
+ ops.push_back(cig.at(i));
+ }
+
+ std::size_t prev = 0, pos;
+ std::vector<std::string> lens;
+ while ((pos = cig.find_first_of("MIDSHPNX", prev)) != std::string::npos) {
+ if (pos > prev)
+ lens.push_back(cig.substr(prev, pos-prev));
+ prev = pos+1;
+ }
+ if (prev < cig.length())
+ lens.push_back(cig.substr(prev, std::string::npos));
+
+ assert(ops.size() == lens.size());
+ for (size_t i = 0; i < lens.size(); ++i) {
+ tc.add(CigarField(ops[i], std::atoi(lens[i].c_str())));
+ }
+
+ return tc;
+
+ }
+
+ bool Cigar::operator==(const Cigar& c) const {
+ if (m_data.size() != c.size())
+ return false;
+ if (!m_data.size()) // both empty
+ return true;
+ for (size_t i = 0; i < m_data.size(); ++i)
+ if (m_data[i].Type() != c[i].Type() || m_data[i].Length() != c[i].Length())
+ return false;
+ return true;
+ }
+
+
+ int BamRecord::OverlappingCoverage(const BamRecord& r) const {
+
+ uint32_t* c = bam_get_cigar(b);
+ uint32_t* c2 = bam_get_cigar(r.b);
+ uint8_t * cov1 = (uint8_t*)calloc(b->core.l_qseq, sizeof(uint8_t));
+ size_t pos = 0;
+ for (int k = 0; k < b->core.n_cigar; ++k) {
+ if (bam_cigar_opchr(c[k]) == 'M') // is match, so track locale
+ for (size_t j = 0; j < bam_cigar_oplen(c[k]); ++j)
+ cov1[pos + j] = 1;
+ if (bam_cigar_type(bam_cigar_op(c[k]))&1) // consumes query, so move position
+ pos = pos + bam_cigar_oplen(c[k]);
+ }
+
+ pos = 0;
+ size_t ocov = 0; // overlapping coverage
+ for (int k = 0; k < r.b->core.n_cigar; ++k) {
+ if (bam_cigar_opchr(c2[k]) == 'M') // is match, so track local
+ for (size_t j = 0; j < bam_cigar_oplen(c2[k]); ++j)
+ if (cov1[pos+j]) // r is covered. Check again this too
+ ++ocov;
+ if (bam_cigar_type(bam_cigar_op(c2[k]))&1) // consumes query, so move position
+ pos = pos + bam_cigar_oplen(c2[k]);
+ }
+
+ free(cov1);
+
+ return ocov;
+ }
+
+
+}
diff --git a/src/BamWriter.cpp b/src/BamWriter.cpp
new file mode 100644
index 0000000..c9bb029
--- /dev/null
+++ b/src/BamWriter.cpp
@@ -0,0 +1,148 @@
+#include "SeqLib/BamWalker.h"
+#include "SeqLib/BamWriter.h"
+
+#include <stdexcept>
+
+//#define DEBUG_WALKER 1
+
+namespace SeqLib {
+
+ void BamWriter::SetHeader(const SeqLib::BamHeader& h) {
+ hdr = h;
+ }
+
+ bool BamWriter::WriteHeader() const {
+
+ if (hdr.isEmpty()) {
+ std::cerr << "BamWriter::WriteHeader - No header supplied. Provide with SetWriteHeader" << std::endl;
+ return false;
+ }
+
+ if (!fop) {
+ std::cerr << "BamWriter::WriteHeader - Output not open for writing. Open with Open()" << std::endl;
+ return false;
+ }
+
+ if (sam_hdr_write(fop.get(), hdr.get()) < 0) {
+ std::cerr << "Cannot write header. sam_hdr_write exited with < 0" << std::endl;
+ return false;
+ }
+
+ return true;
+
+ }
+
+ bool BamWriter::Close() {
+
+ if (!fop)
+ return false;
+
+ fop.reset(); //tr1 compatible
+ //fop = NULL; // this clears shared_ptr, calls sam_close (c++11)
+
+ return true;
+ }
+
+bool BamWriter::BuildIndex() const {
+
+ // throw an error if BAM is not already closed
+ if (fop) {
+ std::cerr << "Trying to index open BAM. Close first with Close()" << std::endl;
+ return false;
+ }
+
+ if (m_out.empty()) {
+ std::cerr << "Trying to make index, but no BAM specified" << std::endl;
+ return false;
+ }
+
+ // call to htslib to build bai index
+ if (sam_index_build(m_out.c_str(), 0) < 0) { // 0 is "min_shift", which is 0 for bai index
+ std::cerr << "Failed to create index";
+ return false;
+ }
+
+ return true;
+
+}
+
+ bool BamWriter::Open(const std::string& f) {
+
+ // don't reopen
+ if (fop)
+ return false;
+
+ m_out = f;
+
+ // hts open the writer
+ fop = SeqPointer<htsFile>(hts_open(m_out.c_str(), output_format.c_str()), htsFile_delete());
+
+ if (!fop) {
+ return false;
+ //throw std::runtime_error("BamWriter::Open - Cannot open output file: " + f);
+ }
+
+ return true;
+ }
+
+ BamWriter::BamWriter(int o) {
+
+ switch(o) {
+ case BAM : output_format = "wb"; break;
+ case CRAM : output_format = "wc"; break;
+ case SAM : output_format = "w"; break;
+ default : throw std::invalid_argument("Invalid writer type");
+ }
+
+ }
+
+
+bool BamWriter::WriteRecord(const BamRecord &r)
+{
+ if (!fop) {
+ return false;
+ } else {
+ if (sam_write1(fop.get(), hdr.get(), r.raw()) < 0)
+ return false;
+ }
+
+ return true;
+}
+
+std::ostream& operator<<(std::ostream& out, const BamWriter& b)
+{
+ if (b.fop)
+ out << "Write format: " << b.fop->format.format;
+ out << " Write file " << b.m_out;
+ return out;
+}
+
+ //does not return false if file not found
+bool BamWriter::SetCramReference(const std::string& ref) {
+
+ if (!fop)
+ return false;
+
+ // need to open reference for CRAM writing
+ char* fn_list = samfaipath(ref.c_str()); // eg ref = my.fa returns my.fa.fai
+ if (fn_list) {
+
+ // in theory hts_set_fai_filename should give back < 0
+ // if fn_list not there, but it doesnt
+ if (!read_access_test(std::string(fn_list)))
+ return false;
+
+ int status = hts_set_fai_filename(fop.get(), fn_list);
+ if (status < 0) {
+ fprintf(stderr, "Failed to use reference \"%s\".\n", fn_list);
+ return false;
+ }
+ } else {
+ std::cerr << "Failed to get the reference for CRAM compression" << std::endl;
+ return false;
+ }
+
+ return true;
+}
+
+}
diff --git a/src/FastqReader.cpp b/src/FastqReader.cpp
new file mode 100644
index 0000000..1c9b84e
--- /dev/null
+++ b/src/FastqReader.cpp
@@ -0,0 +1,59 @@
+#include "SeqLib/FastqReader.h"
+
+#include <cctype>
+#include <algorithm>
+
+namespace SeqLib {
+
+ bool FastqReader::Open(const std::string& f) {
+
+ m_file = f;
+
+ // check if file exists
+ struct stat buffer;
+ if (stat(m_file.c_str(), &buffer) != 0) {
+ std::cerr << "FastqReader: Failed to read non-existant file " << m_file << std::endl;
+ return false;
+ }
+
+ fp = NULL;
+ fp = (m_file != "-") ? gzopen(m_file.c_str(), "r") : gzdopen(fileno(stdin), "r");
+
+ if (!fp) {
+ std::cerr << "FastqReader: Failed to read " << m_file << std::endl;
+ return false;
+ }
+
+ seq = kseq_init(fp); // set to first seq
+
+ return true;
+
+ }
+
+ FastqReader::FastqReader(const std::string& file) : m_file(file) {
+ Open(m_file);
+ }
+
+bool FastqReader::GetNextSequence(UnalignedSequence& s) {
+
+ // kseq_read parses fastq and fasta
+
+ if (!fp || !seq)
+ return false;
+
+ // no more reads
+ if (kseq_read(seq) < 0)
+ return false;
+
+ if (seq->name.s)
+ s.Name = std::string(seq->name.s, seq->name.l);
+ if (seq->seq.s)
+ s.Seq = std::string(seq->seq.s, seq->seq.l);
+ if (seq->qual.s)
+ s.Qual = std::string(seq->qual.s, seq->qual.l);
+
+ return true;
+
+}
+
+}
diff --git a/src/FermiAssembler.cpp b/src/FermiAssembler.cpp
new file mode 100644
index 0000000..7fdf843
--- /dev/null
+++ b/src/FermiAssembler.cpp
@@ -0,0 +1,177 @@
+#include "SeqLib/FermiAssembler.h"
+#define MAG_MIN_NSR_COEF .1
+
+namespace SeqLib {
+
+ FermiAssembler::FermiAssembler() : m_seqs(0), m(0), size(0), n_seqs(0), n_utg(0), m_utgs(0) {
+ fml_opt_init(&opt);
+ }
+
+ FermiAssembler::~FermiAssembler() {
+ ClearReads();
+ ClearContigs();
+ }
+
+ // code copied and slightly modified from
+ // fermi-lite/misc.c by Heng Li
+ void FermiAssembler::DirectAssemble(float kcov) {
+
+ rld_t *e = fml_seq2fmi(&opt, n_seqs, m_seqs);
+ mag_t *g = fml_fmi2mag(&opt, e);
+
+ opt.mag_opt.min_ensr = opt.mag_opt.min_ensr > kcov * MAG_MIN_NSR_COEF? opt.mag_opt.min_ensr : (int)(kcov * MAG_MIN_NSR_COEF + .499);
+ //opt.mag_opt.min_ensr = opt.mag_opt.min_ensr < opt0->max_cnt? opt.mag_opt.min_ensr : opt0->max_cnt;
+ //opt.mag_opt.min_ensr = opt.mag_opt.min_ensr > opt0->min_cnt? opt.mag_opt.min_ensr : opt0->min_cnt;
+ opt.mag_opt.min_insr = opt.mag_opt.min_ensr - 1;
+ fml_mag_clean(&opt, g);
+ m_utgs = fml_mag2utg(g, &n_utg);
+ }
+
+ void FermiAssembler::AddRead(const BamRecord& r) {
+ AddRead(UnalignedSequence(r.Qname(), r.Sequence(), r.Qualities())); // probably faster way
+ }
+
+ void FermiAssembler::AddRead(const UnalignedSequence& r) {
+
+ if (r.Seq.empty())
+ return;
+ if (r.Name.empty())
+ return;
+
+ // dynamically alloc the memory
+ if (m <= n_seqs)
+ m = m <= 0 ? 32 : (m*2); // if out of mem, double it
+ m_seqs = (fseq1_t*)realloc(m_seqs, m * sizeof(fseq1_t));
+
+ // add the name
+ m_names.push_back(r.Name);
+
+ // construct the seq
+ fseq1_t *s;
+ s = &m_seqs[n_seqs];
+ s->seq = strdup(r.Seq.c_str());
+ s->qual = r.Qual.empty() ? NULL : strdup(r.Qual.c_str());
+
+ s->l_seq = r.Seq.length();
+ size += m_seqs[n_seqs++].l_seq;
+
+ }
+
+ void FermiAssembler::AddReads(const UnalignedSequenceVector& v) {
+
+ // alloc the memory
+ m = n_seqs + v.size();
+ m_seqs = (fseq1_t*)realloc(m_seqs, m * sizeof(fseq1_t));
+
+ for (UnalignedSequenceVector::const_iterator r = v.begin(); r != v.end(); ++r) {
+ m_names.push_back(r->Name);
+ fseq1_t *s;
+
+ s = &m_seqs[n_seqs];
+
+ s->seq = strdup(r->Seq.c_str());
+ s->qual = strdup(r->Qual.c_str());
+
+ s->l_seq = r->Seq.length();
+ size += m_seqs[n_seqs++].l_seq;
+ }
+
+
+ }
+ void FermiAssembler::AddReads(const BamRecordVector& brv) {
+
+ // alloc the memory
+ m_seqs = (fseq1_t*)realloc(m_seqs, (n_seqs + brv.size()) * sizeof(fseq1_t));
+
+ int m = 0;
+ uint64_t size = 0;
+ for (BamRecordVector::const_iterator r = brv.begin(); r != brv.end(); ++r) {
+ m_names.push_back(r->Qname());
+ fseq1_t *s;
+
+ s = &m_seqs[n_seqs];
+
+ s->seq = strdup(r->Sequence().c_str());
+ s->qual = strdup(r->Qualities().c_str());
+
+ s->l_seq = r->Sequence().length();
+ size += m_seqs[n_seqs++].l_seq;
+ }
+
+ }
+
+ void FermiAssembler::ClearContigs() {
+ fml_utg_destroy(n_utg, m_utgs);
+ m_utgs = 0;
+ n_utg = 0;
+ }
+
+ void FermiAssembler::ClearReads() {
+ if (!m_seqs)
+ return; //already cleared
+
+ for (size_t i = 0; i < n_seqs; ++i) {
+ fseq1_t * s = &m_seqs[i];
+ if (s->qual)
+ free(s->qual);
+ s->qual = NULL;
+ if (s->seq)
+ free(s->seq);
+ s->seq = NULL;
+ }
+ free(m_seqs);
+ m_seqs = NULL;
+
+ }
+
+ void FermiAssembler::CorrectReads() {
+ fml_correct(&opt, n_seqs, m_seqs);
+ }
+
+ void FermiAssembler::CorrectAndFilterReads() {
+ fml_fltuniq(&opt, n_seqs, m_seqs);
+ }
+
+ void FermiAssembler::PerformAssembly() {
+ m_utgs = fml_assemble(&opt, n_seqs, m_seqs, &n_utg); // assemble!
+ }
+
+ std::vector<std::string> FermiAssembler::GetContigs() const {
+ std::vector<std::string> c;
+ for (size_t i = 0; i < n_utg; ++i)
+ c.push_back(std::string(m_utgs[i].seq));
+ return c;
+ }
+
+ /*void FermiAssembler::count() {
+
+ // initialize BFC options
+ uint64_t tot_len = 0;
+ for (int i = 0; i < n_seqs; ++i)
+ tot_len += m_seqs[i].l_seq; // compute total length
+ int l_pre = tot_len - 8 < 20? tot_len - 8 : 20;
+
+ //bfc_ch_t *ch = fml_count(n_seqs, m_seqs, opt.ec_k, 20, l_pre, opt.n_threads);
+ //std::cerr << " ch->k " << ch->k << " ch->l_pre " << ch->l_pre << std::endl;
+
+ // directly from fml count
+ cnt_step_t cs;
+ cs.n_seqs = n_seqs, cs.seqs = m_seqs, cs.k = opt.ec_k, cs.q = 20;
+ cs.ch = bfc_ch_init(cs.k, l_pre);
+ }*/
+
+ UnalignedSequenceVector FermiAssembler::GetSequences() const {
+
+ UnalignedSequenceVector r;
+ for (size_t i = 0; i < n_seqs; ++i) {
+ fseq1_t * s = &m_seqs[i];
+ UnalignedSequence read;
+ if (s->seq)
+ read.Seq = (std::string(s->seq));
+ read.Name = m_names[i];
+ r.push_back(read);
+ }
+ return r;
+ }
+
+}
diff --git a/src/GenomicRegion.cpp b/src/GenomicRegion.cpp
new file mode 100644
index 0000000..2d21a4d
--- /dev/null
+++ b/src/GenomicRegion.cpp
@@ -0,0 +1,275 @@
+#include "SeqLib/GenomicRegion.h"
+
+#include <cassert>
+#include <stdexcept>
+#include <climits>
+
+// 4 billion
+#define END_MAX 4000000000
+
+namespace SeqLib {
+
+// return the width of the genomic region
+int GenomicRegion::Width() const {
+ return pos2 - pos1 + 1;
+}
+
+// returns 0 for no overlaps, 1 for partial and 2 for complete
+int GenomicRegion::GetOverlap(const GenomicRegion& gr) const {
+
+ if (gr.chr != chr)
+ return 0;
+
+ // argument pos1 is in
+ bool gr1_in = gr.pos1 >= pos1 && gr.pos1 <= pos2;
+ // argument pos2 is in
+ bool gr2_in = gr.pos2 >= pos1 && gr.pos2 <= pos2;
+ // object pos1 is in
+ bool pos1_in = pos1 >= gr.pos1 && pos1 <= gr.pos2;
+ // object pos2 is in
+ bool pos2_in = pos2 >= gr.pos1 && pos2 <= gr.pos2;
+
+ // object is in the argument
+ if (pos1_in && pos2_in)
+ return 3;
+
+ // argument is in the oboject
+ if ( gr1_in && gr2_in)
+ return 2;
+
+ // partial overlap
+ if (gr1_in || gr2_in || pos1_in || pos2_in)
+ return 1;
+
+ return 0;
+
+}
+
+
+ std::string GenomicRegion::ChrName(const BamHeader& h) const {
+
+ std::string cc;
+ if (!h.isEmpty()) {
+ if (chr >= h.NumSequences())
+ throw std::invalid_argument( "GenomicRegion::ChrName - not enough targets in BamHeader to cover ref id");
+ else
+ cc = h.IDtoName(chr); // std::string(h->target_name[chr]);
+ } else {
+ cc = chrToString(chr);
+ }
+ return cc;
+ }
+
+
+ std::string GenomicRegion::PointString() const {
+ std::stringstream out;
+ out << chrToString(chr) << ":" << SeqLib::AddCommas<int>(pos1) << "(" << strand << ")";
+ return out.str();
+ }
+
+void GenomicRegion::Pad(int32_t pad) {
+
+ if (-pad*2 > Width())
+ throw std::out_of_range(
+ "GenomicRegion::pad - negative pad values can't obliterate GenomicRegion with val " +
+ tostring(chr) + ":" + tostring(pos1) + "-" + tostring(pos2) +
+ " and pad " + tostring(pad));
+
+ pos1 -= pad;
+ pos2 += pad;
+
+ //if (pad > pos1)
+ // pos1 = 1;
+ //else
+ // pos1 = pos1-pad;
+
+ //const int32_t maxpos = 250000000;
+ //pos2 = std::min(pos2+pad, maxpos); // 2500000000 is dummy for now. should be chr end
+
+}
+
+bool GenomicRegion::operator<(const GenomicRegion& b) const {
+ return (chr < b.chr) || (chr == b.chr && pos1 < b.pos1) || (chr==b.chr && pos1 == b.pos1 && pos2 < b.pos2);
+}
+
+bool GenomicRegion::operator>(const GenomicRegion& b) const {
+ return !(*this == b) && !(*this < b);
+}
+
+bool GenomicRegion::operator==(const GenomicRegion &b) const {
+ return (chr == b.chr && pos1 == b.pos1 && b.pos2 == pos2);
+}
+
+bool GenomicRegion::operator!=(const GenomicRegion &b) const {
+ return !(*this == b);
+}
+
+bool GenomicRegion::operator<=(const GenomicRegion &b) const {
+ return (*this < b || *this == b);
+}
+
+bool GenomicRegion::operator>=(const GenomicRegion &b) const {
+ return (*this > b || *this == b);
+}
+
+ std::string GenomicRegion::ToString() const {
+ return chrToString(chr) + ":" + SeqLib::AddCommas<int>(pos1) + "-" + AddCommas<int>(pos2) + "(" +
+ strand + ")";
+ }
+
+std::ostream& operator<<(std::ostream& out, const GenomicRegion& gr) {
+ out << gr.chrToString(gr.chr) << ":" << SeqLib::AddCommas<int>(gr.pos1) << "-" << AddCommas<int>(gr.pos2) << "(" <<
+ gr.strand << ")";
+ return out;
+}
+
+ GenomicRegion::GenomicRegion(const std::string& reg, const BamHeader& hdr) {
+
+ if (hdr.isEmpty())
+ throw std::invalid_argument("GenomicRegion constructor - supplied empty BamHeader");
+
+ // scrub String
+ //std::string reg2 = SeqLib::scrubString(reg, "chr");
+
+ // use htslib region parsing code
+ int tid, beg, end;
+ const char * q = hts_parse_reg(reg.c_str(), &beg, &end);
+ if (q) {
+ char *tmp = (char*)alloca(q - reg.c_str() + 1); // stack alloc
+ strncpy(tmp, reg.c_str(), q - reg.c_str());
+ tmp[q - reg.c_str()] = 0;
+ tid = hdr.Name2ID(std::string(tmp)); //bam_name2id(h.get(), tmp);
+ if (tid < 0) {
+ std::string inv = "GenomicRegion constructor: Failed to set region for " + reg;
+ throw std::invalid_argument(inv);
+ }
+
+ if (end == INT_MAX) { // single chrome
+ tid = hdr.Name2ID(reg);
+ beg = 0;
+ end = hdr.GetSequenceLength(reg);
+ }
+ } else {
+ std::string inv = "GenomicRegion constructor: Failed to set region for " + reg;
+ throw std::invalid_argument(inv);
+ }
+
+ chr = tid;
+ pos1 = beg+1;
+ pos2 = end;
+ strand = '*';
+
+}
+
+// constructor to take a pair of coordinates to define the genomic interval
+GenomicRegion::GenomicRegion(int32_t t_chr, int32_t t_pos1, int32_t t_pos2, char t_strand) {
+
+ if (t_pos2 < t_pos1 )
+ throw std::invalid_argument( "GenomicRegion constructor: end pos must be >= start pos" );
+
+ if ( !(t_strand == '+' || t_strand == '-' || t_strand == '*') )
+ throw std::invalid_argument( "GenomicRegion constructor: strand must be one of +, -, *" );
+
+ chr = t_chr;
+ pos1 = t_pos1;
+ pos2 = t_pos2;
+ strand = t_strand;
+
+}
+ //private
+std::string GenomicRegion::chrToString(int32_t ref) const {
+
+ std::string ref_id;
+ if (ref < 0)
+ ref_id = tostring(ref);
+
+ if (ref == 22)
+ ref_id = "X";
+ else if (ref == 23)
+ ref_id = "Y";
+ else if (ref == 24)
+ ref_id = "M";
+ else if (ref >= 0)
+ ref_id = tostring(ref+1);
+ assert(ref_id != "23");
+ return ref_id;
+}
+
+// checks whether a GenomicRegion is empty
+bool GenomicRegion::IsEmpty() const {
+ return chr == -1 && pos1 == 0 && pos2 == 0;
+}
+
+
+int32_t GenomicRegion::DistanceBetweenStarts(const GenomicRegion &gr) const {
+
+ if (gr.chr != chr)
+ return -1;
+ else
+ return std::abs(pos1 - gr.pos1);//((pos1 > gr.pos1) ? (pos1 - gr.pos1) : (gr.pos1 - pos1));
+
+}
+
+int32_t GenomicRegion::DistanceBetweenEnds(const GenomicRegion &gr) const {
+
+ if (gr.chr != chr)
+ return -1;
+ else
+ return std::abs(pos2 - gr.pos2);
+
+}
+
+
+ /*void GenomicRegion::Random() {
+
+ uint32_t big = rand() % SeqLib::genome_size_XY;
+ //SeqLib::genRandomValue(big, SeqLib::genome_size_XY, seed);
+
+ for (size_t k = 0; k < 25; ++k)
+ if (big < SeqLib::CHR_CLEN[k]) {
+ assert(k > 0);
+ chr = --k;
+ assert(big > SeqLib::CHR_CLEN[chr]);
+ pos1 = big - SeqLib::CHR_CLEN[chr];
+ pos2 = pos1;
+ return;
+ }
+
+ std::cerr << "Value of " << big << " outside of expected range." << std::endl;
+ assert(false);
+
+ }*/
+
+ GenomicRegion::GenomicRegion(const std::string& tchr, const std::string& tpos1, const std::string& tpos2, const SeqLib::BamHeader& hdr)
+ {
+ strand = '*';
+ // convert the pos strings
+ // throws invalid_argument if conversion can't be performed
+ // or throws an out_of_range if it is too big for result
+#ifdef HAVE_C11
+ pos1 = std::stoi(tpos1);
+ pos2 = std::stoi(tpos2);
+#else
+ pos1 = std::atoi(tpos1.c_str());
+ pos2 = std::atoi(tpos2.c_str());
+#endif
+
+ // if no header, assume that it is "standard"
+ if (hdr.isEmpty()) {
+ if (tchr == "X" || tchr == "chrX")
+ chr = 22;
+ else if (tchr == "Y" || tchr == "chrY")
+ chr = 23;
+ else
+#ifdef HAVE_C11
+ chr = std::stoi(SeqLib::scrubString(tchr, "chr")) - 1;
+#else
+ chr = std::atoi(SeqLib::scrubString(tchr, "chr").c_str());
+#endif
+ return;
+ } else {
+ chr = hdr.Name2ID(tchr); //bam_name2id(hdr.get(), tchr.c_str());
+ }
+ }
+}
+
diff --git a/src/Makefile.am b/src/Makefile.am
new file mode 100644
index 0000000..925738b
--- /dev/null
+++ b/src/Makefile.am
@@ -0,0 +1,7 @@
+noinst_LIBRARIES = libseqlib.a
+
+libseqlib_a_CPPFLAGS = -I../ -I../htslib -Wno-sign-compare
+
+libseqlib_a_SOURCES = FastqReader.cpp BFC.cpp ReadFilter.cpp SeqPlot.cpp jsoncpp.cpp ssw_cpp.cpp ssw.c \
+ GenomicRegion.cpp RefGenome.cpp BamWriter.cpp BamReader.cpp \
+ BWAWrapper.cpp BamRecord.cpp FermiAssembler.cpp BamHeader.cpp
diff --git a/src/Makefile.in b/src/Makefile.in
new file mode 100644
index 0000000..664aab2
--- /dev/null
+++ b/src/Makefile.in
@@ -0,0 +1,679 @@
+# Makefile.in generated by automake 1.11.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+ at SET_MAKE@
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+subdir = SeqLib/src
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+LIBRARIES = $(noinst_LIBRARIES)
+AR = ar
+ARFLAGS = cru
+libseqlib_a_AR = $(AR) $(ARFLAGS)
+libseqlib_a_LIBADD =
+am_libseqlib_a_OBJECTS = libseqlib_a-FastqReader.$(OBJEXT) \
+ libseqlib_a-BFC.$(OBJEXT) libseqlib_a-ReadFilter.$(OBJEXT) \
+ libseqlib_a-SeqPlot.$(OBJEXT) libseqlib_a-jsoncpp.$(OBJEXT) \
+ libseqlib_a-ssw_cpp.$(OBJEXT) libseqlib_a-ssw.$(OBJEXT) \
+ libseqlib_a-GenomicRegion.$(OBJEXT) \
+ libseqlib_a-RefGenome.$(OBJEXT) \
+ libseqlib_a-BamWriter.$(OBJEXT) \
+ libseqlib_a-BamReader.$(OBJEXT) \
+ libseqlib_a-BWAWrapper.$(OBJEXT) \
+ libseqlib_a-BamRecord.$(OBJEXT) \
+ libseqlib_a-FermiAssembler.$(OBJEXT) \
+ libseqlib_a-BamHeader.$(OBJEXT)
+libseqlib_a_OBJECTS = $(am_libseqlib_a_OBJECTS)
+DEFAULT_INCLUDES = -I. at am__isrc@ -I$(top_builddir)
+depcomp = $(SHELL) $(top_srcdir)/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
+CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+CXXLD = $(CXX)
+CXXLINK = $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) \
+ -o $@
+SOURCES = $(libseqlib_a_SOURCES)
+DIST_SOURCES = $(libseqlib_a_SOURCES)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_CXXFLAGS = @AM_CXXFLAGS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build_alias = @build_alias@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host_alias = @host_alias@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+noinst_LIBRARIES = libseqlib.a
+libseqlib_a_CPPFLAGS = -I../ -I../htslib -Wno-sign-compare
+libseqlib_a_SOURCES = FastqReader.cpp BFC.cpp ReadFilter.cpp SeqPlot.cpp jsoncpp.cpp ssw_cpp.cpp ssw.c \
+ GenomicRegion.cpp RefGenome.cpp BamWriter.cpp BamReader.cpp \
+ BWAWrapper.cpp BamRecord.cpp FermiAssembler.cpp BamHeader.cpp
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .cpp .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign SeqLib/src/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign SeqLib/src/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+clean-noinstLIBRARIES:
+ -test -z "$(noinst_LIBRARIES)" || rm -f $(noinst_LIBRARIES)
+libseqlib.a: $(libseqlib_a_OBJECTS) $(libseqlib_a_DEPENDENCIES)
+ -rm -f libseqlib.a
+ $(libseqlib_a_AR) libseqlib.a $(libseqlib_a_OBJECTS) $(libseqlib_a_LIBADD)
+ $(RANLIB) libseqlib.a
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libseqlib_a-BFC.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libseqlib_a-BWAWrapper.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libseqlib_a-BamHeader.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libseqlib_a-BamReader.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libseqlib_a-BamRecord.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libseqlib_a-BamWriter.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libseqlib_a-FastqReader.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libseqlib_a-FermiAssembler.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libseqlib_a-GenomicRegion.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libseqlib_a-ReadFilter.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libseqlib_a-RefGenome.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libseqlib_a-SeqPlot.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libseqlib_a-jsoncpp.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libseqlib_a-ssw.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libseqlib_a-ssw_cpp.Po at am__quote@
+
+.c.o:
+ at am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+ at am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@ $(COMPILE) -c $<
+
+.c.obj:
+ at am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+ at am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+libseqlib_a-ssw.o: ssw.c
+ at am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT libseqlib_a-ssw.o -MD -MP -MF $(DEPDIR)/libseqlib_a-ssw.Tpo -c -o libseqlib_a-ssw.o `test -f 'ssw.c' || echo '$(srcdir)/'`ssw.c
+ at am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-ssw.Tpo $(DEPDIR)/libseqlib_a-ssw.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ssw.c' object='libseqlib_a-ssw.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o libseqlib_a-ssw.o `test -f 'ssw.c' || echo '$(srcdir)/'`ssw.c
+
+libseqlib_a-ssw.obj: ssw.c
+ at am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT libseqlib_a-ssw.obj -MD -MP -MF $(DEPDIR)/libseqlib_a-ssw.Tpo -c -o libseqlib_a-ssw.obj `if test -f 'ssw.c'; then $(CYGPATH_W) 'ssw.c'; else $(CYGPATH_W) '$(srcdir)/ssw.c'; fi`
+ at am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-ssw.Tpo $(DEPDIR)/libseqlib_a-ssw.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ssw.c' object='libseqlib_a-ssw.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o libseqlib_a-ssw.obj `if test -f 'ssw.c'; then $(CYGPATH_W) 'ssw.c'; else $(CYGPATH_W) '$(srcdir)/ssw.c'; fi`
+
+.cpp.o:
+ at am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $<
+
+.cpp.obj:
+ at am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+libseqlib_a-FastqReader.o: FastqReader.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-FastqReader.o -MD -MP -MF $(DEPDIR)/libseqlib_a-FastqReader.Tpo -c -o libseqlib_a-FastqReader.o `test -f 'FastqReader.cpp' || echo '$(srcdir)/'`FastqReader.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-FastqReader.Tpo $(DEPDIR)/libseqlib_a-FastqReader.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FastqReader.cpp' object='libseqlib_a-FastqReader.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-FastqReader.o `test -f 'FastqReader.cpp' || echo '$(srcdir)/'`FastqReader.cpp
+
+libseqlib_a-FastqReader.obj: FastqReader.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-FastqReader.obj -MD -MP -MF $(DEPDIR)/libseqlib_a-FastqReader.Tpo -c -o libseqlib_a-FastqReader.obj `if test -f 'FastqReader.cpp'; then $(CYGPATH_W) 'FastqReader.cpp'; else $(CYGPATH_W) '$(srcdir)/FastqReader.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-FastqReader.Tpo $(DEPDIR)/libseqlib_a-FastqReader.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FastqReader.cpp' object='libseqlib_a-FastqReader.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-FastqReader.obj `if test -f 'FastqReader.cpp'; then $(CYGPATH_W) 'FastqReader.cpp'; else $(CYGPATH_W) '$(srcdir)/FastqReader.cpp'; fi`
+
+libseqlib_a-BFC.o: BFC.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-BFC.o -MD -MP -MF $(DEPDIR)/libseqlib_a-BFC.Tpo -c -o libseqlib_a-BFC.o `test -f 'BFC.cpp' || echo '$(srcdir)/'`BFC.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-BFC.Tpo $(DEPDIR)/libseqlib_a-BFC.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BFC.cpp' object='libseqlib_a-BFC.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-BFC.o `test -f 'BFC.cpp' || echo '$(srcdir)/'`BFC.cpp
+
+libseqlib_a-BFC.obj: BFC.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-BFC.obj -MD -MP -MF $(DEPDIR)/libseqlib_a-BFC.Tpo -c -o libseqlib_a-BFC.obj `if test -f 'BFC.cpp'; then $(CYGPATH_W) 'BFC.cpp'; else $(CYGPATH_W) '$(srcdir)/BFC.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-BFC.Tpo $(DEPDIR)/libseqlib_a-BFC.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BFC.cpp' object='libseqlib_a-BFC.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-BFC.obj `if test -f 'BFC.cpp'; then $(CYGPATH_W) 'BFC.cpp'; else $(CYGPATH_W) '$(srcdir)/BFC.cpp'; fi`
+
+libseqlib_a-ReadFilter.o: ReadFilter.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-ReadFilter.o -MD -MP -MF $(DEPDIR)/libseqlib_a-ReadFilter.Tpo -c -o libseqlib_a-ReadFilter.o `test -f 'ReadFilter.cpp' || echo '$(srcdir)/'`ReadFilter.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-ReadFilter.Tpo $(DEPDIR)/libseqlib_a-ReadFilter.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ReadFilter.cpp' object='libseqlib_a-ReadFilter.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-ReadFilter.o `test -f 'ReadFilter.cpp' || echo '$(srcdir)/'`ReadFilter.cpp
+
+libseqlib_a-ReadFilter.obj: ReadFilter.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-ReadFilter.obj -MD -MP -MF $(DEPDIR)/libseqlib_a-ReadFilter.Tpo -c -o libseqlib_a-ReadFilter.obj `if test -f 'ReadFilter.cpp'; then $(CYGPATH_W) 'ReadFilter.cpp'; else $(CYGPATH_W) '$(srcdir)/ReadFilter.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-ReadFilter.Tpo $(DEPDIR)/libseqlib_a-ReadFilter.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ReadFilter.cpp' object='libseqlib_a-ReadFilter.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-ReadFilter.obj `if test -f 'ReadFilter.cpp'; then $(CYGPATH_W) 'ReadFilter.cpp'; else $(CYGPATH_W) '$(srcdir)/ReadFilter.cpp'; fi`
+
+libseqlib_a-SeqPlot.o: SeqPlot.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-SeqPlot.o -MD -MP -MF $(DEPDIR)/libseqlib_a-SeqPlot.Tpo -c -o libseqlib_a-SeqPlot.o `test -f 'SeqPlot.cpp' || echo '$(srcdir)/'`SeqPlot.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-SeqPlot.Tpo $(DEPDIR)/libseqlib_a-SeqPlot.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SeqPlot.cpp' object='libseqlib_a-SeqPlot.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-SeqPlot.o `test -f 'SeqPlot.cpp' || echo '$(srcdir)/'`SeqPlot.cpp
+
+libseqlib_a-SeqPlot.obj: SeqPlot.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-SeqPlot.obj -MD -MP -MF $(DEPDIR)/libseqlib_a-SeqPlot.Tpo -c -o libseqlib_a-SeqPlot.obj `if test -f 'SeqPlot.cpp'; then $(CYGPATH_W) 'SeqPlot.cpp'; else $(CYGPATH_W) '$(srcdir)/SeqPlot.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-SeqPlot.Tpo $(DEPDIR)/libseqlib_a-SeqPlot.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='SeqPlot.cpp' object='libseqlib_a-SeqPlot.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-SeqPlot.obj `if test -f 'SeqPlot.cpp'; then $(CYGPATH_W) 'SeqPlot.cpp'; else $(CYGPATH_W) '$(srcdir)/SeqPlot.cpp'; fi`
+
+libseqlib_a-jsoncpp.o: jsoncpp.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-jsoncpp.o -MD -MP -MF $(DEPDIR)/libseqlib_a-jsoncpp.Tpo -c -o libseqlib_a-jsoncpp.o `test -f 'jsoncpp.cpp' || echo '$(srcdir)/'`jsoncpp.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-jsoncpp.Tpo $(DEPDIR)/libseqlib_a-jsoncpp.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='jsoncpp.cpp' object='libseqlib_a-jsoncpp.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-jsoncpp.o `test -f 'jsoncpp.cpp' || echo '$(srcdir)/'`jsoncpp.cpp
+
+libseqlib_a-jsoncpp.obj: jsoncpp.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-jsoncpp.obj -MD -MP -MF $(DEPDIR)/libseqlib_a-jsoncpp.Tpo -c -o libseqlib_a-jsoncpp.obj `if test -f 'jsoncpp.cpp'; then $(CYGPATH_W) 'jsoncpp.cpp'; else $(CYGPATH_W) '$(srcdir)/jsoncpp.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-jsoncpp.Tpo $(DEPDIR)/libseqlib_a-jsoncpp.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='jsoncpp.cpp' object='libseqlib_a-jsoncpp.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-jsoncpp.obj `if test -f 'jsoncpp.cpp'; then $(CYGPATH_W) 'jsoncpp.cpp'; else $(CYGPATH_W) '$(srcdir)/jsoncpp.cpp'; fi`
+
+libseqlib_a-ssw_cpp.o: ssw_cpp.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-ssw_cpp.o -MD -MP -MF $(DEPDIR)/libseqlib_a-ssw_cpp.Tpo -c -o libseqlib_a-ssw_cpp.o `test -f 'ssw_cpp.cpp' || echo '$(srcdir)/'`ssw_cpp.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-ssw_cpp.Tpo $(DEPDIR)/libseqlib_a-ssw_cpp.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ssw_cpp.cpp' object='libseqlib_a-ssw_cpp.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-ssw_cpp.o `test -f 'ssw_cpp.cpp' || echo '$(srcdir)/'`ssw_cpp.cpp
+
+libseqlib_a-ssw_cpp.obj: ssw_cpp.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-ssw_cpp.obj -MD -MP -MF $(DEPDIR)/libseqlib_a-ssw_cpp.Tpo -c -o libseqlib_a-ssw_cpp.obj `if test -f 'ssw_cpp.cpp'; then $(CYGPATH_W) 'ssw_cpp.cpp'; else $(CYGPATH_W) '$(srcdir)/ssw_cpp.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-ssw_cpp.Tpo $(DEPDIR)/libseqlib_a-ssw_cpp.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='ssw_cpp.cpp' object='libseqlib_a-ssw_cpp.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-ssw_cpp.obj `if test -f 'ssw_cpp.cpp'; then $(CYGPATH_W) 'ssw_cpp.cpp'; else $(CYGPATH_W) '$(srcdir)/ssw_cpp.cpp'; fi`
+
+libseqlib_a-GenomicRegion.o: GenomicRegion.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-GenomicRegion.o -MD -MP -MF $(DEPDIR)/libseqlib_a-GenomicRegion.Tpo -c -o libseqlib_a-GenomicRegion.o `test -f 'GenomicRegion.cpp' || echo '$(srcdir)/'`GenomicRegion.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-GenomicRegion.Tpo $(DEPDIR)/libseqlib_a-GenomicRegion.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='GenomicRegion.cpp' object='libseqlib_a-GenomicRegion.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-GenomicRegion.o `test -f 'GenomicRegion.cpp' || echo '$(srcdir)/'`GenomicRegion.cpp
+
+libseqlib_a-GenomicRegion.obj: GenomicRegion.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-GenomicRegion.obj -MD -MP -MF $(DEPDIR)/libseqlib_a-GenomicRegion.Tpo -c -o libseqlib_a-GenomicRegion.obj `if test -f 'GenomicRegion.cpp'; then $(CYGPATH_W) 'GenomicRegion.cpp'; else $(CYGPATH_W) '$(srcdir)/GenomicRegion.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-GenomicRegion.Tpo $(DEPDIR)/libseqlib_a-GenomicRegion.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='GenomicRegion.cpp' object='libseqlib_a-GenomicRegion.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-GenomicRegion.obj `if test -f 'GenomicRegion.cpp'; then $(CYGPATH_W) 'GenomicRegion.cpp'; else $(CYGPATH_W) '$(srcdir)/GenomicRegion.cpp'; fi`
+
+libseqlib_a-RefGenome.o: RefGenome.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-RefGenome.o -MD -MP -MF $(DEPDIR)/libseqlib_a-RefGenome.Tpo -c -o libseqlib_a-RefGenome.o `test -f 'RefGenome.cpp' || echo '$(srcdir)/'`RefGenome.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-RefGenome.Tpo $(DEPDIR)/libseqlib_a-RefGenome.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='RefGenome.cpp' object='libseqlib_a-RefGenome.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-RefGenome.o `test -f 'RefGenome.cpp' || echo '$(srcdir)/'`RefGenome.cpp
+
+libseqlib_a-RefGenome.obj: RefGenome.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-RefGenome.obj -MD -MP -MF $(DEPDIR)/libseqlib_a-RefGenome.Tpo -c -o libseqlib_a-RefGenome.obj `if test -f 'RefGenome.cpp'; then $(CYGPATH_W) 'RefGenome.cpp'; else $(CYGPATH_W) '$(srcdir)/RefGenome.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-RefGenome.Tpo $(DEPDIR)/libseqlib_a-RefGenome.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='RefGenome.cpp' object='libseqlib_a-RefGenome.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-RefGenome.obj `if test -f 'RefGenome.cpp'; then $(CYGPATH_W) 'RefGenome.cpp'; else $(CYGPATH_W) '$(srcdir)/RefGenome.cpp'; fi`
+
+libseqlib_a-BamWriter.o: BamWriter.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-BamWriter.o -MD -MP -MF $(DEPDIR)/libseqlib_a-BamWriter.Tpo -c -o libseqlib_a-BamWriter.o `test -f 'BamWriter.cpp' || echo '$(srcdir)/'`BamWriter.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-BamWriter.Tpo $(DEPDIR)/libseqlib_a-BamWriter.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BamWriter.cpp' object='libseqlib_a-BamWriter.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-BamWriter.o `test -f 'BamWriter.cpp' || echo '$(srcdir)/'`BamWriter.cpp
+
+libseqlib_a-BamWriter.obj: BamWriter.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-BamWriter.obj -MD -MP -MF $(DEPDIR)/libseqlib_a-BamWriter.Tpo -c -o libseqlib_a-BamWriter.obj `if test -f 'BamWriter.cpp'; then $(CYGPATH_W) 'BamWriter.cpp'; else $(CYGPATH_W) '$(srcdir)/BamWriter.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-BamWriter.Tpo $(DEPDIR)/libseqlib_a-BamWriter.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BamWriter.cpp' object='libseqlib_a-BamWriter.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-BamWriter.obj `if test -f 'BamWriter.cpp'; then $(CYGPATH_W) 'BamWriter.cpp'; else $(CYGPATH_W) '$(srcdir)/BamWriter.cpp'; fi`
+
+libseqlib_a-BamReader.o: BamReader.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-BamReader.o -MD -MP -MF $(DEPDIR)/libseqlib_a-BamReader.Tpo -c -o libseqlib_a-BamReader.o `test -f 'BamReader.cpp' || echo '$(srcdir)/'`BamReader.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-BamReader.Tpo $(DEPDIR)/libseqlib_a-BamReader.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BamReader.cpp' object='libseqlib_a-BamReader.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-BamReader.o `test -f 'BamReader.cpp' || echo '$(srcdir)/'`BamReader.cpp
+
+libseqlib_a-BamReader.obj: BamReader.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-BamReader.obj -MD -MP -MF $(DEPDIR)/libseqlib_a-BamReader.Tpo -c -o libseqlib_a-BamReader.obj `if test -f 'BamReader.cpp'; then $(CYGPATH_W) 'BamReader.cpp'; else $(CYGPATH_W) '$(srcdir)/BamReader.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-BamReader.Tpo $(DEPDIR)/libseqlib_a-BamReader.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BamReader.cpp' object='libseqlib_a-BamReader.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-BamReader.obj `if test -f 'BamReader.cpp'; then $(CYGPATH_W) 'BamReader.cpp'; else $(CYGPATH_W) '$(srcdir)/BamReader.cpp'; fi`
+
+libseqlib_a-BWAWrapper.o: BWAWrapper.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-BWAWrapper.o -MD -MP -MF $(DEPDIR)/libseqlib_a-BWAWrapper.Tpo -c -o libseqlib_a-BWAWrapper.o `test -f 'BWAWrapper.cpp' || echo '$(srcdir)/'`BWAWrapper.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-BWAWrapper.Tpo $(DEPDIR)/libseqlib_a-BWAWrapper.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BWAWrapper.cpp' object='libseqlib_a-BWAWrapper.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-BWAWrapper.o `test -f 'BWAWrapper.cpp' || echo '$(srcdir)/'`BWAWrapper.cpp
+
+libseqlib_a-BWAWrapper.obj: BWAWrapper.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-BWAWrapper.obj -MD -MP -MF $(DEPDIR)/libseqlib_a-BWAWrapper.Tpo -c -o libseqlib_a-BWAWrapper.obj `if test -f 'BWAWrapper.cpp'; then $(CYGPATH_W) 'BWAWrapper.cpp'; else $(CYGPATH_W) '$(srcdir)/BWAWrapper.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-BWAWrapper.Tpo $(DEPDIR)/libseqlib_a-BWAWrapper.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BWAWrapper.cpp' object='libseqlib_a-BWAWrapper.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-BWAWrapper.obj `if test -f 'BWAWrapper.cpp'; then $(CYGPATH_W) 'BWAWrapper.cpp'; else $(CYGPATH_W) '$(srcdir)/BWAWrapper.cpp'; fi`
+
+libseqlib_a-BamRecord.o: BamRecord.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-BamRecord.o -MD -MP -MF $(DEPDIR)/libseqlib_a-BamRecord.Tpo -c -o libseqlib_a-BamRecord.o `test -f 'BamRecord.cpp' || echo '$(srcdir)/'`BamRecord.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-BamRecord.Tpo $(DEPDIR)/libseqlib_a-BamRecord.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BamRecord.cpp' object='libseqlib_a-BamRecord.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-BamRecord.o `test -f 'BamRecord.cpp' || echo '$(srcdir)/'`BamRecord.cpp
+
+libseqlib_a-BamRecord.obj: BamRecord.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-BamRecord.obj -MD -MP -MF $(DEPDIR)/libseqlib_a-BamRecord.Tpo -c -o libseqlib_a-BamRecord.obj `if test -f 'BamRecord.cpp'; then $(CYGPATH_W) 'BamRecord.cpp'; else $(CYGPATH_W) '$(srcdir)/BamRecord.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-BamRecord.Tpo $(DEPDIR)/libseqlib_a-BamRecord.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BamRecord.cpp' object='libseqlib_a-BamRecord.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-BamRecord.obj `if test -f 'BamRecord.cpp'; then $(CYGPATH_W) 'BamRecord.cpp'; else $(CYGPATH_W) '$(srcdir)/BamRecord.cpp'; fi`
+
+libseqlib_a-FermiAssembler.o: FermiAssembler.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-FermiAssembler.o -MD -MP -MF $(DEPDIR)/libseqlib_a-FermiAssembler.Tpo -c -o libseqlib_a-FermiAssembler.o `test -f 'FermiAssembler.cpp' || echo '$(srcdir)/'`FermiAssembler.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-FermiAssembler.Tpo $(DEPDIR)/libseqlib_a-FermiAssembler.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FermiAssembler.cpp' object='libseqlib_a-FermiAssembler.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-FermiAssembler.o `test -f 'FermiAssembler.cpp' || echo '$(srcdir)/'`FermiAssembler.cpp
+
+libseqlib_a-FermiAssembler.obj: FermiAssembler.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-FermiAssembler.obj -MD -MP -MF $(DEPDIR)/libseqlib_a-FermiAssembler.Tpo -c -o libseqlib_a-FermiAssembler.obj `if test -f 'FermiAssembler.cpp'; then $(CYGPATH_W) 'FermiAssembler.cpp'; else $(CYGPATH_W) '$(srcdir)/FermiAssembler.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-FermiAssembler.Tpo $(DEPDIR)/libseqlib_a-FermiAssembler.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='FermiAssembler.cpp' object='libseqlib_a-FermiAssembler.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-FermiAssembler.obj `if test -f 'FermiAssembler.cpp'; then $(CYGPATH_W) 'FermiAssembler.cpp'; else $(CYGPATH_W) '$(srcdir)/FermiAssembler.cpp'; fi`
+
+libseqlib_a-BamHeader.o: BamHeader.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-BamHeader.o -MD -MP -MF $(DEPDIR)/libseqlib_a-BamHeader.Tpo -c -o libseqlib_a-BamHeader.o `test -f 'BamHeader.cpp' || echo '$(srcdir)/'`BamHeader.cpp
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-BamHeader.Tpo $(DEPDIR)/libseqlib_a-BamHeader.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BamHeader.cpp' object='libseqlib_a-BamHeader.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-BamHeader.o `test -f 'BamHeader.cpp' || echo '$(srcdir)/'`BamHeader.cpp
+
+libseqlib_a-BamHeader.obj: BamHeader.cpp
+ at am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libseqlib_a-BamHeader.obj -MD -MP -MF $(DEPDIR)/libseqlib_a-BamHeader.Tpo -c -o libseqlib_a-BamHeader.obj `if test -f 'BamHeader.cpp'; then $(CYGPATH_W) 'BamHeader.cpp'; else $(CYGPATH_W) '$(srcdir)/BamHeader.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libseqlib_a-BamHeader.Tpo $(DEPDIR)/libseqlib_a-BamHeader.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='BamHeader.cpp' object='libseqlib_a-BamHeader.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libseqlib_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libseqlib_a-BamHeader.obj `if test -f 'BamHeader.cpp'; then $(CYGPATH_W) 'BamHeader.cpp'; else $(CYGPATH_W) '$(srcdir)/BamHeader.cpp'; fi`
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ set x; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: CTAGS
+CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LIBRARIES)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-noinstLIBRARIES mostlyclean-am
+
+distclean: distclean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+ clean-noinstLIBRARIES ctags distclean distclean-compile \
+ distclean-generic distclean-tags distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-compile \
+ mostlyclean-generic pdf pdf-am ps ps-am tags uninstall \
+ uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/ReadFilter.cpp b/src/ReadFilter.cpp
new file mode 100644
index 0000000..8a7749f
--- /dev/null
+++ b/src/ReadFilter.cpp
@@ -0,0 +1,873 @@
+#include "SeqLib/ReadFilter.h"
+
+#include <cassert>
+#include "htslib/htslib/khash.h"
+
+//#define QNAME "D0EN0ACXX111207:7:2306:6903:136511"
+//#define QFLAG -1
+
+//#define DEBUG_MINI 1
+
+#ifdef QNAME
+#define DEBUGIV(msg, read) \
+ if (read.Qname() == QNAME && (read.AlignmentFlag() == QFLAG || QFLAG == -1)) { std::cerr << (msg) << " read " << r << std::endl; }
+#else
+#define DEBUGIV(msg, read)
+#endif
+
+namespace SeqLib {
+
+ namespace Filter {
+ // return if this rule accepts all reads
+ bool AbstractRule::isEvery() const {
+ return read_group.empty() && ins.isEvery() && del.isEvery() && isize.isEvery() &&
+ mapq.isEvery() && len.isEvery() && clip.isEvery() && nm.isEvery() &&
+ nbases.isEvery() && fr.isEvery() &&
+ (subsam_frac >= 1) && xp.isEvery()
+#ifdef HAVE_C11
+ && !aho.count
+#endif
+ ;
+ }
+
+// define what is a valid condition
+/* static const StringSet valid =
+ {
+ "duplicate", "supplementary", "qcfail", "hardclip", "fwd_strand",
+ "rev_strand", "mate_fwd_strand", "mate_rev_strand", "mapped",
+ "mate_mapped", "isize","clip", "length","nm",
+ "mapq", "all", "ff", "xp","fr","rr","rf",
+ "ic", "discordant","motif","nbases","!motif","allflag", "!allflag", "anyflag", "!anyflag",
+ "ins","del", "subsample", "rg"
+};
+
+ static const StringSet allowed_region_annots =
+ { "region","pad", "matelink", "exclude", "rules"};
+
+ static const StringSet allowed_flag_annots =
+ {"duplicate", "supplementary", "qcfail", "hardclip",
+ "fwd_strand", "rev_strand", "mate_fwd_strand", "mate_rev_strand",
+ "mapped", "mate_mapped", "ff", "fr", "rr", "rf", "ic"};
+*/
+bool ReadFilter::isValid(const BamRecord &r) {
+
+ // empty default is pass
+ if (!m_abstract_rules.size())
+ return true;
+
+ for (std::vector<AbstractRule>::iterator it = m_abstract_rules.begin();
+ it != m_abstract_rules.end(); ++it)
+ if (it->isValid(r)) {
+ ++it->m_count; //update this rule counter
+ ++m_count;
+ return true; // it is includable in at least one.
+ }
+
+ return false;
+
+}
+
+ int FlagRule::parse_json_int(const Json::Value& v) {
+
+ if (v.asInt())
+ return v.asInt();
+ else
+ throw std::invalid_argument("Failed to parse int tag in JSON");
+ return 0;
+ }
+
+ bool convert_to_bool(const Json::Value& value, const std::string& name) {
+
+ Json::Value null(Json::nullValue);
+ Json::Value v = value.get(name, null);
+ if (v != null) {
+ if (v.asBool())
+ return true;
+ else if (!v.asBool())
+ return false;
+ }
+
+ return false;
+
+ }
+
+// check whether a BamAlignment (or optionally it's mate) is overlapping the regions
+// contained in these rules
+bool ReadFilter::isReadOverlappingRegion(const BamRecord &r) const {
+
+ // if this is a whole genome rule, it overlaps
+ if (!m_grv.size())
+ return true;
+
+ if (m_grv.CountOverlaps(GenomicRegion(r.ChrID(), r.Position(), r.PositionEnd())))
+ return true;
+
+ if (!m_applies_to_mate)
+ return false;
+ if (m_grv.CountOverlaps(GenomicRegion(r.MateChrID(), r.MatePosition(), r.MatePosition() + r.Length())))
+ return true;
+
+ return false;
+}
+
+// checks which rule a read applies to (using the hiearchy stored in m_regions).
+// if a read does not satisfy a rule it is excluded.
+ bool ReadFilterCollection::isValid(const BamRecord &r) {
+
+ ++m_count_seen;
+
+ DEBUGIV(r, "starting RFC isValid")
+
+ if (m_regions.size() == 0)
+ return true;
+
+ DEBUGIV(r, "starting RFC isValid with non-empty regions")
+
+ bool is_valid = false;
+ bool exclude_hit = false;
+
+ for (std::vector<ReadFilter>::iterator it = m_regions.begin(); it != m_regions.end(); ++it) {
+
+ // only check read validity if it overlaps region
+ if (!it->isReadOverlappingRegion(r))
+ continue;
+
+ // check the region with all its rules
+ if (it->isValid(r)) {
+
+ // if this is excluder region, exclude read
+ if (it->excluder)
+ exclude_hit = true;
+
+ // in case we do fall through, track that we passed here
+ is_valid = true;
+
+ }
+ }
+
+ // found a hit in a rule
+ if (is_valid && !exclude_hit) {
+ ++m_count;
+ return true;
+ }
+
+ return false;
+}
+
+ void ReadFilter::AddRule(const AbstractRule& ar) {
+ m_abstract_rules.push_back(ar);
+ }
+
+ // constructor to make a ReadFilterCollection from a rules file.
+ // This will reduce each individual BED file and make the
+ // GenomicIntervalTreeMap
+ ReadFilterCollection::ReadFilterCollection(const std::string& script, const BamHeader& hdr) : m_count(0), m_count_seen(0) {
+
+ // if is a file, read into a string
+ std::ifstream iscript(script.c_str());
+ std::stringstream ss;
+ std::string tscript = script;
+ if (iscript.is_open()) {
+ char c = iscript.get();
+ while (iscript.good()) {
+ ss << c;
+ c = iscript.get();
+ }
+ tscript = ss.str();;
+ }
+
+ // set up JsonCPP reader and attempt to parse script
+ Json::Value root;
+ Json::Reader reader;
+ if ( !reader.parse(tscript, root)) {
+ if (script.empty()) {
+ std::cerr << "JSON script is empty. Setting default to filter all reads" << std::endl;
+ return;
+ }
+ throw std::invalid_argument("ERROR: failed to parse JSON script");
+ }
+
+ Json::Value null(Json::nullValue);
+
+ int level = 1;
+
+ // assign the global rule if there is one
+ // remove from the rest of the rules
+ Json::Value glob = root.removeMember("global");
+ if (!glob.isNull())
+ rule_all.parseJson(glob);
+
+ // iterator over regions
+ for (Json::ValueConstIterator regions = root.begin(); regions != root.end(); ++regions) {
+
+ ReadFilter mr;
+
+ // check if mate applies
+ mr.m_applies_to_mate = convert_to_bool(*regions, "matelink");
+
+ // check for region padding
+ int pad = regions->get("pad", 0).asInt();
+
+ // set the region
+ std::string reg;
+ Json::Value v = regions->get("region", null);
+ if (v != null) {
+ reg = v.asString();
+ mr.id = mr.id + reg;
+ }
+
+ // actually parse the region
+ if (reg == "WG" || reg.empty())
+ mr.m_grv.clear(); // ensure it is whole-genome
+ else {
+ GRC regr(reg, hdr);
+ regr.Pad(pad);
+ mr.setRegions(regr);
+ }
+ // debug mr.setRegionFromFile(reg, hdr);
+
+ // check if its excluder region
+ mr.excluder = false; // default is no exclude
+ v = regions->get("exclude", null);
+ if (v != null) {
+ mr.excluder = v.asBool();
+ if (mr.excluder)
+ mr.id = mr.id + "_exclude";
+ }
+
+ // set the rules
+ v = regions->get("rules", null);
+ if (!v.size()) {
+ //std::cerr << " !!!! RULES size is zero. !!!! " << std::endl;
+ //exit(EXIT_FAILURE);
+ }
+
+ // loop through the rules
+ for (Json::ValueIterator vv = v.begin(); vv != v.end(); ++vv) {
+ if (*vv != null) {
+ AbstractRule ar = rule_all; // always start with the global rule
+ ar.parseJson(*vv);
+ // add the rule to the region
+ mr.m_abstract_rules.push_back(ar);
+ }
+ }
+
+ // check that the regions have at least one rule
+ // if it it doesn't, give it the global WG all
+ if (!mr.m_abstract_rules.size())
+ mr.m_abstract_rules.push_back(rule_all);
+
+ mr.id = tostring(level);
+
+ m_regions.push_back(mr);
+
+ }
+
+ // check that there is at least one non-excluder region.
+ // if not, give global includer
+ bool has_includer = false;
+ for (std::vector<ReadFilter>::const_iterator kk = m_regions.begin(); kk != m_regions.end(); ++kk)
+ if (!kk->excluder)
+ has_includer = true;
+ if (!has_includer) {
+ ReadFilter mr;
+ mr.m_abstract_rules.push_back(rule_all);
+ mr.id = "WG_includer";
+ m_regions.push_back(mr);
+ }
+
+ }
+
+ void ReadFilter::setRegions(const GRC& g) {
+ m_grv = g;
+ m_grv.CreateTreeMap();
+ }
+
+ void ReadFilter::addRegions(const GRC& g) {
+ m_grv.Concat(g);
+ m_grv.MergeOverlappingIntervals();
+ m_grv.CreateTreeMap();
+ }
+
+
+ // print the ReadFilterCollection
+ std::ostream& operator<<(std::ostream &out, const ReadFilterCollection &mr) {
+
+ out << "----------ReadFilterCollection-------------" << std::endl;
+
+ for (std::vector<ReadFilter>::const_iterator it = mr.m_regions.begin(); it != mr.m_regions.end(); ++it)
+ out << *it << std::endl;
+ out << "------------------------------------------";
+ return out;
+
+ }
+
+// print a ReadFilter information
+std::ostream& operator<<(std::ostream &out, const ReadFilter &mr) {
+
+ std::string file_print = !mr.m_grv.size() ? "WHOLE GENOME" : mr.m_region_file;
+ out << (mr.excluder ? "--Exclude Region: " : "--Include Region: ") << file_print;
+ if (mr.m_grv.size()) {
+ //out << " --Size: " << AddCommas<int>(mr.m_width);
+ out << " Matelink: " << (mr.m_applies_to_mate ? "ON" : "OFF");
+ if (mr.m_grv.size() == 1)
+ out << " Region : " << mr.m_grv[0] << std::endl;
+ else
+ out << " " << mr.m_grv.size() << " regions" << std::endl;
+ } else {
+ out << std::endl;
+ }
+
+ for (std::vector<AbstractRule>::const_iterator it = mr.m_abstract_rules.begin(); it != mr.m_abstract_rules.end(); ++it)
+ out << *it << std::endl;
+ return out;
+}
+
+ void ReadFilterCollection::AddReadFilter(const ReadFilter& rf) {
+ m_regions.push_back(rf);
+ }
+
+ ReadFilter::~ReadFilter() {}
+
+ bool Flag::parseJson(const Json::Value& value, const std::string& name) {
+
+ if (value.isMember(name.c_str())) {
+ convert_to_bool(value, name) ? setOn() : setOff();
+ return true;
+ }
+
+ return false;
+
+ }
+
+ void FlagRule::parseJson(const Json::Value& value) {
+
+ Json::Value null(Json::nullValue);
+ if (value.isMember("allflag"))
+ setAllOnFlag(parse_json_int(value.get("allflag", null)));
+ if (value.isMember("!allflag"))
+ setAllOffFlag(parse_json_int(value.get("!allflag", null)));
+ if (value.isMember("anyflag"))
+ setAnyOnFlag(parse_json_int(value.get("anyflag", null)));
+ if (value.isMember("!anyflag"))
+ setAnyOffFlag(m_any_off_flag = parse_json_int(value.get("!anyflag", null)));
+
+ // have to set the every if find flag so that rule knows it cant skip checking
+ if (dup.parseJson(value, "duplicate")) every = false;
+ if (supp.parseJson(value, "supplementary")) every = false;
+ if (qcfail.parseJson(value, "qcfail")) every = false;
+ if (hardclip.parseJson(value, "hardclip")) every = false;
+ if (fwd_strand.parseJson(value, "fwd_strand")) every = false;
+ if (mate_rev_strand.parseJson(value, "mate_rev")) every = false;
+ if (mate_fwd_strand.parseJson(value, "mate_fwd")) every = false;
+ if (mate_mapped.parseJson(value, "mate_mapped")) every = false;
+ if (mapped.parseJson(value, "mapped")) every = false;
+ if (ff.parseJson(value, "ff")) every = false;
+ if (fr.parseJson(value, "fr")) every = false;
+ if (rf.parseJson(value, "rf")) every = false;
+ if (rr.parseJson(value, "rr")) every = false;
+ if (ic.parseJson(value, "ic")) every = false;
+
+ }
+
+ void Range::parseJson(const Json::Value& value, const std::string& name) {
+ Json::Value null(Json::nullValue);
+ Json::Value v = value.get(name, null);
+
+ if (v != null) {
+ if (v.size() > 2) {
+ std::cerr << " ERROR. Not expecting array size " << v.size() << " for Range " << name << std::endl;
+ } else {
+ m_every = false;
+ m_inverted = false;
+
+ if (v.isArray()) {
+ m_min = v[0].asInt();
+ m_max = v[1].asInt();
+ } else if (v.isInt()) {
+ m_min = v.asInt();
+ m_max = INT_MAX;
+ } else if (v.isBool()) {
+ m_min = v.asBool() ? 1 : INT_MAX; // if true, [1,MAX], if false [MAX,1] (not 1-MAX)
+ m_max = v.asBool() ? INT_MAX : 1;
+ } else {
+ throw std::invalid_argument("Unexpected type for range flag: " + name);
+ }
+
+ if (m_min > m_max) {
+ m_inverted = true;
+ std::swap(m_min, m_max); // make min always lower
+ }
+ }
+
+ }
+ }
+
+ void AbstractRule::parseJson(const Json::Value& value) {
+
+ // parse read group
+ const std::string rg = "rg";
+ if (value.isMember(rg.c_str())) {
+ Json::Value null(Json::nullValue);
+ Json::Value v = value.get(rg, null);
+ assert(v != null);
+ read_group = v.asString();
+ }
+
+ // set the ID
+ std::vector<std::string> mn = value.getMemberNames();
+ for (std::vector<std::string>::const_iterator i = mn.begin(); i != mn.end(); ++i)
+ id += *i + ";";
+
+ // not necessary, not c++98 compatible
+ //if (id.length())
+ // id.pop_back();
+
+ // parse the flags
+ fr.parseJson(value);
+
+ isize.parseJson(value, "isize");
+ mapq.parseJson(value, "mapq");
+ len.parseJson(value, "length");
+ clip.parseJson(value, "clip");
+ nbases.parseJson(value, "nbases");
+ ins.parseJson(value, "ins");
+ del.parseJson(value, "del");
+ nm.parseJson(value, "nm");
+ xp.parseJson(value, "xp");
+
+ // parse the subsample data
+ parseSubLine(value);
+
+ // parse the motif line
+ parseSeqLine(value);
+
+ }
+
+
+ // main function for determining if a read is valid
+ bool AbstractRule::isValid(const BamRecord &r) {
+
+ DEBUGIV(r, "starting AR:isValid")
+
+ // check if its keep all or none
+ if (isEvery())
+ return true;
+
+ // check if it is a subsample
+ if (subsam_frac < 1) {
+ uint32_t k = __ac_Wang_hash(__ac_X31_hash_string(r.QnameChar()) ^ subsam_seed);
+ if ((double)(k&0xffffff) / 0x1000000 >= subsam_frac)
+ return false;
+ }
+
+ // check if is discordant
+ bool isize_pass = isize.isValid(r.FullInsertSize());
+
+ DEBUGIV("isize_pass: " + isize_pass, r)
+
+ if (!isize_pass) {
+ return false;
+ }
+
+ // check for valid read name
+ if (!read_group.empty()) {
+ std::string RG = r.ParseReadGroup();
+ if (!RG.empty() && RG != read_group)
+ return false;
+ }
+
+ // check for valid mapping quality
+ if (!mapq.isEvery())
+ if (!mapq.isValid(r.MapQuality()))
+ return false;
+
+ DEBUGIV(r, "mapq pass")
+
+ // check for valid flags
+ if (!fr.isValid(r))
+ return false;
+
+ DEBUGIV(r, "flag pass")
+
+ // check the CIGAR
+ if (!ins.isEvery() || !del.isEvery()) {
+ if (!ins.isValid(r.MaxInsertionBases()))
+ return false;
+ if (!del.isValid(r.MaxDeletionBases()))
+ return false;
+ }
+
+ DEBUGIV(r, "cigar pass")
+
+ // get the sequence as trimmed
+ std::string tseq = r.QualitySequence(); //AddZTag("GV", r.Sequence().substr(startpoint, new_len));
+
+#ifdef HAVE_C11
+ // check for aho corasick motif match
+ if (aho.count) {
+ if (!aho.QueryText(tseq))
+ return false;
+ DEBUGIV(r, "aho pass")
+ }
+#endif
+
+ // check for valid NM
+ if (!nm.isEvery()) {
+ int32_t nm_val = r.GetIntTag("NM");
+ if (!nm.isValid(nm_val))
+ return false;
+ DEBUGIV(r, "NM pass")
+ }
+
+ // check the N bases
+ if (!nbases.isEvery()) {
+ size_t n = r.CountNBases();
+ if (!nbases.isValid(n))
+ return false;
+ DEBUGIV(r, "N bases pass")
+ }
+
+ // check for valid length
+ if (!len.isValid(tseq.length())) {
+ return false;
+ DEBUGIV(r, "len pass")
+ }
+
+ // check for valid clip
+ int new_clipnum = r.NumClip() - (r.Length() - tseq.length()); // get clips, minus amount trimmed off
+ if (!clip.isValid(new_clipnum)) {
+ return false;
+ DEBUGIV(r, "clip pass with clip size " + tostring(new_clipnum))
+ }
+
+ // check for secondary alignments
+ if (!xp.isEvery()) {
+ if (!xp.isValid(r.CountBWASecondaryAlignments())) {
+ return false;
+ }
+ DEBUGIV(r, "XP pass")
+ }
+
+ DEBUGIV(r, "**** READ ACCEPTED IN AR:ISVALID")
+ return true;
+ }
+
+ bool FlagRule::isValid(const BamRecord &r) {
+
+ DEBUGIV(r, "flagrule start")
+
+ if (isEvery())
+ return true;
+
+ // if have on or off flag, use that
+ // 0001100 - all flag
+ // 0101000 - flag
+ // -------
+ // 0001000 - should fail all flag. Should pass any flag
+ if (m_all_on_flag && !( (r.AlignmentFlag() & m_all_on_flag) == m_all_on_flag) ) // if all on, pass
+ return false;
+ if (m_all_off_flag && ( (r.AlignmentFlag() & m_all_off_flag) == m_all_off_flag) ) // if all on, fail
+ return false;
+
+ // if have on or off flag, use that
+ if (m_any_on_flag && !(r.AlignmentFlag() & m_any_on_flag) ) // if ANY on, pass
+ return false;
+ if (m_any_off_flag && (r.AlignmentFlag() & m_any_off_flag)) // if ANY on, fail
+ return false;
+
+ DEBUGIV(r, "FlagRule::isValid checking named flags")
+
+ if (!dup.isNA())
+ if ((dup.isOff() && r.DuplicateFlag()) || (dup.isOn() && !r.DuplicateFlag()))
+ return false;
+ if (!supp.isNA())
+ if ((supp.isOff() && r.SecondaryFlag()) || (supp.isOn() && !r.SecondaryFlag()))
+ return false;
+ if (!qcfail.isNA())
+ if ((qcfail.isOff() && r.QCFailFlag()) || (qcfail.isOn() && !r.QCFailFlag()))
+ return false;
+ if (!mapped.isNA())
+ if ( (mapped.isOff() && r.MappedFlag()) || (mapped.isOn() && !r.MappedFlag()))
+ return false;
+ if (!mate_mapped.isNA())
+ if ( (mate_mapped.isOff() && r.MateMappedFlag()) || (mate_mapped.isOn() && !r.MateMappedFlag()) )
+ return false;
+
+ // check for hard clips
+ if (!hardclip.isNA()) {// check that we want to chuck hard clip
+ if (r.CigarSize() > 1) {
+ bool ishclipped = r.NumHardClip() > 0;
+ if ( (ishclipped && hardclip.isOff()) || (!ishclipped && hardclip.isOn()) )
+ return false;
+ }
+ }
+
+ // check for orientation
+ // check first if we need to even look for orientation
+ bool ocheck = !ff.isNA() || !fr.isNA() || !rf.isNA() || !rr.isNA() || !ic.isNA();
+
+ // now its an orientation pair check. If not both mapped, chuck
+ if (!r.PairMappedFlag() && ocheck)
+ return false;
+
+ if ( ocheck ) {
+
+ // bool first = r.Position() < r.MatePosition();
+ //bool bfr = (first && (!r.ReverseFlag() && r.MateReverseFlag())) || (!first && r.ReverseFlag() && !r.MateReverseFlag());
+ //bool brr = r.ReverseFlag() && r.MateReverseFlag();
+ //bool brf = (first && (r.ReverseFlag() && !r.MateReverseFlag())) || (!first && !r.ReverseFlag() && r.MateReverseFlag());
+ //bool bff = !r.ReverseFlag() && !r.MateReverseFlag();
+
+ bool bic = r.Interchromosomal();
+
+ int PO = r.PairOrientation();
+
+ // its FR and it CANT be FR (off) or its !FR and it MUST be FR (ON)
+ // orienation not defined for inter-chrom, so exclude these with !ic
+ if (!bic) { // PROCEED IF INTRA-CHROMOSOMAL
+ //if ( (bfr && fr.isOff()) || (!bfr && fr.isOn()))
+ if ( (PO == FRORIENTATION && fr.isOff()) || (PO != FRORIENTATION && fr.isOn()))
+ return false;
+ //if ( (brr && rr.isOff()) || (!brr && rr.isOn()))
+ if ( (PO == RRORIENTATION && rr.isOff()) || (PO != RRORIENTATION && rr.isOn()))
+ return false;
+ //if ( (brf && rf.isOff()) || (!brf && rf.isOn()))
+ if ( (PO == RFORIENTATION && rf.isOff()) || (PO != RFORIENTATION&& rf.isOn()))
+ return false;
+ //if ( (bff && ff.isOff()) || (!bff && ff.isOn()))
+ if ( (PO == FFORIENTATION && ff.isOff()) || (PO != FFORIENTATION && ff.isOn()))
+ return false;
+ }
+ if ( (bic && ic.isOff()) || (!bic && ic.isOn()))
+ return false;
+
+ }
+
+ return true;
+
+}
+
+// define how to print
+std::ostream& operator<<(std::ostream &out, const AbstractRule &ar) {
+
+ out << " Rule: ";
+ if (ar.isEvery()) {
+ out << " ALL";
+ } else {
+ if (!ar.read_group.empty())
+ out << "Read Group: " << ar.read_group << " -- ";
+ if (!ar.isize.isEvery())
+ out << "isize:" << ar.isize << " -- " ;
+ if (!ar.mapq.isEvery())
+ out << "mapq:" << ar.mapq << " -- " ;
+ if (!ar.len.isEvery())
+ out << "length:" << ar.len << " -- ";
+ if (!ar.clip.isEvery())
+ out << "clip:" << ar.clip << " -- ";
+ if (!ar.nm.isEvery())
+ out << "nm:" << ar.nm << " -- ";
+ if (!ar.xp.isEvery())
+ out << "xp:" << ar.xp << " -- ";
+ if (!ar.nbases.isEvery())
+ out << "nbases:" << ar.nbases << " -- ";
+ if (!ar.ins.isEvery())
+ out << "ins:" << ar.ins << " -- ";
+ if (!ar.del.isEvery())
+ out << "del:" << ar.del << " -- ";
+ if (ar.subsam_frac < 1)
+ out << "sub:" << ar.subsam_frac << " -- ";
+#ifdef HAVE_C11
+ if (ar.aho.count)
+ out << "motif: " << ar.aho.file << " -- ";
+#endif
+ out << ar.fr;
+ }
+ return out;
+}
+
+// define how to print
+std::ostream& operator<<(std::ostream &out, const FlagRule &fr) {
+
+ if (fr.isEvery()) {
+ out << "Flag: ALL";
+ return out;
+ }
+
+ std::string keep = "Flag ON: ";
+ std::string remo = "Flag OFF: ";
+
+ if (fr.m_all_on_flag)
+ keep += "[(all)" + tostring(fr.m_all_on_flag) + "],";
+ if (fr.m_all_off_flag)
+ remo += "[(all)" + tostring(fr.m_all_off_flag) + "],";
+
+ if (fr.m_any_on_flag)
+ keep += "[(any)" + tostring(fr.m_any_on_flag) + "],";
+ if (fr.m_any_off_flag)
+ remo += "[(any)" + tostring(fr.m_any_off_flag) + "],";
+
+ if (fr.dup.isOff())
+ remo += "duplicate,";
+ if (fr.dup.isOn())
+ keep += "duplicate,";
+
+ if (fr.supp.isOff())
+ remo += "supplementary,";
+ if (fr.supp.isOn())
+ keep += "supplementary,";
+
+ if (fr.qcfail.isOff())
+ remo += "qcfail,";
+ if (fr.qcfail.isOn())
+ keep += "qcfail,";
+
+ if (fr.hardclip.isOff())
+ remo += "hardclip,";
+ if (fr.hardclip.isOn())
+ keep += "hardclip,";
+
+ if (fr.paired.isOff())
+ remo += "paired,";
+ if (fr.paired.isOn())
+ keep += "paired,";
+
+
+
+ if (fr.ic.isOff())
+ remo += "ic,";
+ if (fr.ic.isOn())
+ keep += "ic,";
+
+ if (fr.ff.isOff())
+ remo += "ff,";
+ if (fr.ff.isOn())
+ keep += "ff,";
+
+ if (fr.fr.isOff())
+ remo += "fr,";
+ if (fr.fr.isOn())
+ keep += "fr,";
+
+ if (fr.rr.isOff())
+ remo += "rr,";
+ if (fr.rr.isOn())
+ keep += "rr,";
+
+ if (fr.rf.isOff())
+ remo += "rf,";
+ if (fr.rf.isOn())
+ keep += "rf,";
+
+ if (fr.mapped.isOff())
+ remo += "mapped,";
+ if (fr.mapped.isOn())
+ keep += "mapped,";
+
+ if (fr.mate_mapped.isOff())
+ remo += "mate_mapped,";
+ if (fr.mate_mapped.isOn())
+ keep += "mate_mapped,";
+
+ keep = keep.length() > 10 ? keep.substr(0, keep.length() - 1) : ""; // remove trailing comment
+ remo = remo.length() > 10 ? remo.substr(0, remo.length() - 1) : ""; // remove trailing comment
+
+ if (!keep.empty() && !remo.empty())
+ out << keep << " -- " << remo;
+ else if (!keep.empty())
+ out << keep;
+ else
+ out << remo;
+
+ return out;
+}
+
+// define how to print
+std::ostream& operator<<(std::ostream &out, const Range &r) {
+ if (r.isEvery())
+ out << "ALL";
+ else
+ out << (r.m_inverted ? "NOT " : "") << "[" << r.m_min << "," << (r.m_max == INT_MAX ? "MAX" : tostring(r.m_max)) << "]";
+ return out;
+}
+
+ void AbstractRule::parseSeqLine(const Json::Value& value) {
+
+#ifdef HAVE_C11
+ bool i = false; // invert motif?
+#endif
+ std::string motif_file;
+ Json::Value null(Json::nullValue);
+ if (value.get("motif", null) != null)
+ motif_file = value.get("motif", null).asString();
+ else if (value.get("!motif", null) != null) {
+ motif_file = value.get("!motif", null).asString();
+#ifdef HAVE_C11
+ i = true;
+#endif
+ }
+ else
+ return;
+#ifdef HAVE_C11
+ addMotifRule(motif_file, i);
+#else
+ if (!motif_file.empty())
+ std::cerr << "WARNING: Need to compile with C++11 for Aho-Corasick matching" << std::endl;
+#endif
+
+ return;
+
+ }
+
+#ifdef HAVE_C11
+ void AbstractRule::addMotifRule(const std::string& f, bool inverted) {
+ std::cerr << "...making the AhoCorasick trie from " << f << std::endl;
+ aho.TrieFromFile(f);
+ std::cerr << "...finished making AhoCorasick trie with " << AddCommas(aho.count) << " motifs" << std::endl;
+ aho.inv = inverted;
+ }
+
+ void AhoCorasick::TrieFromFile(const std::string& f) {
+
+ file = f;
+
+ // open the sequence file
+ std::ifstream iss(f.c_str());
+ if (!iss || !read_access_test(f))
+ throw std::runtime_error("AhoCorasick::TrieFromFile - Cannot read file: " + f);
+
+ // make the Aho-Corasick trie
+ std::string pat;
+ while (getline(iss, pat, '\n')) {
+ ++count;
+ AddMotif(pat);
+ }
+ }
+#endif
+
+ void AbstractRule::parseSubLine(const Json::Value& value) {
+ Json::Value null(Json::nullValue);
+ if (value.get("subsample", null) != null)
+ subsam_frac = value.get("subample", null).asDouble();
+ }
+
+GRC ReadFilterCollection::getAllRegions() const
+{
+ GRC out;
+
+ for (std::vector<ReadFilter>::const_iterator i = m_regions.begin(); i != m_regions.end(); ++i)
+ out.Concat(i->m_grv);
+
+ return out;
+}
+
+#ifdef HAVE_C11
+ int AhoCorasick::QueryText(const std::string& t) const {
+ auto matches = aho_trie->parse_text(t);
+ return matches.size();
+ return 0;
+ }
+#endif
+
+ }
+}
diff --git a/src/RefGenome.cpp b/src/RefGenome.cpp
new file mode 100644
index 0000000..6f7a695
--- /dev/null
+++ b/src/RefGenome.cpp
@@ -0,0 +1,61 @@
+#include "SeqLib/RefGenome.h"
+
+#include <stdexcept>
+#include "SeqLib/SeqLibUtils.h"
+
+namespace SeqLib {
+
+ bool RefGenome::LoadIndex(const std::string& file) {
+
+ // clear the old one
+ if (index)
+ fai_destroy(index);
+
+ index = NULL;
+
+ // check that its readable
+ if (!read_access_test(file)) {
+ return false;
+ //throw std::invalid_argument("RefGenome: file not found - " + file);
+ }
+
+ // load it in
+ index = fai_load(file.c_str());
+
+ if (!index)
+ return false;
+
+ return true;
+
+ }
+
+ std::string RefGenome::QueryRegion(const std::string& chr_name, int32_t p1, int32_t p2) const {
+
+ // check that we have a loaded index
+ if (!index)
+ throw std::invalid_argument("RefGenome::queryRegion index not loaded");
+
+ // check input is OK
+ if (p1 > p2)
+ throw std::invalid_argument("RefGenome::queryRegion p1 must be <= p2");
+ if (p1 < 0)
+ throw std::invalid_argument("RefGenome::queryRegion p1 must be >= 0");
+
+ int len;
+ char * f = faidx_fetch_seq(index, const_cast<char*>(chr_name.c_str()), p1, p2, &len);
+
+ if (!f)
+ throw std::invalid_argument("RefGenome::queryRegion - Could not find valid sequence");
+
+ std::string out(f);
+
+ free(f);
+
+ if (out.empty())
+ throw std::invalid_argument("RefGenome::queryRegion - Returning empty query on " + chr_name + ":" + tostring(p1) + "-" + tostring(p2));
+
+ return (out);
+
+ }
+
+}
diff --git a/src/SeqPlot.cpp b/src/SeqPlot.cpp
new file mode 100644
index 0000000..5753c02
--- /dev/null
+++ b/src/SeqPlot.cpp
@@ -0,0 +1,85 @@
+#include "SeqLib/SeqPlot.h"
+
+namespace SeqLib {
+
+std::string SeqPlot::PlotAlignmentRecords(const BamRecordVector& brv) const {
+
+ PlottedReadVector plot_vec;
+
+ for (BamRecordVector::const_iterator i = brv.begin(); i != brv.end(); ++i) {
+
+ // get the position in the view window
+ if (i->ChrID() != m_view.chr)
+ continue;
+
+ int pos = i->Position() - m_view.pos1;
+ if (pos < 0)
+ continue;
+
+ if (i->PositionEnd() > m_view.pos2)
+ continue;
+
+ // plot with gaps
+ std::string tseq = i->Sequence();
+ std::string gapped_seq;
+
+ size_t p = i->AlignmentPosition(); // move along on sequence, starting at first non-clipped base
+ Cigar cc = i->GetCigar();
+ for (Cigar::const_iterator c = cc.begin(); c != cc.end(); ++c) {
+ if (c->Type() == 'M') { //
+ assert(p + c->Length() <= tseq.length());
+ gapped_seq += tseq.substr(p, c->Length());
+ } else if (c->Type() == 'D') {
+ gapped_seq += std::string(c->Length(), '-');
+ }
+
+ if (c->Type() == 'I' || c->Type() == 'M')
+ p += c->Length();
+ }
+
+ std::stringstream msg;
+ msg << i->Qname() << ">>>" << (i->ChrID() + 1) << ":" << i->Position();
+
+ // add to the read plot
+ plot_vec.push_back(PlottedRead(pos, gapped_seq, msg.str()));
+
+ }
+
+ // sort them
+ std::sort(plot_vec.begin(), plot_vec.end());
+
+ // make a list of lines
+ PlottedReadLineVector line_vec;
+
+ // plot the reads from the ReadPlot vector
+ for (PlottedReadVector::iterator i = plot_vec.begin(); i != plot_vec.end(); ++i) {
+ bool found = false;
+ for (PlottedReadLineVector::iterator j = line_vec.begin(); j != line_vec.end(); ++j) {
+ if (j->readFits(*i)) { // it fits here
+ j->addRead(&(*i));
+ found = true;
+ break;
+ }
+ }
+ if (!found) { // didn't fit anywhere, so make a new line
+ PlottedReadLine prl;
+ prl.pad = m_pad;
+ prl.contig_len = m_view.Width(); //ac.getSequence().length();
+ prl.addRead(&(*i));
+ line_vec.push_back(prl);
+ }
+ }
+
+ std::stringstream ss;
+
+ // plot the lines. Add contig identifier to each
+ for (PlottedReadLineVector::const_iterator i = line_vec.begin(); i != line_vec.end(); ++i)
+ ss << (*i) << std::endl;
+
+ return ss.str();
+
+
+}
+
+
+}
diff --git a/src/jsoncpp.cpp b/src/jsoncpp.cpp
new file mode 100644
index 0000000..62d5bda
--- /dev/null
+++ b/src/jsoncpp.cpp
@@ -0,0 +1,5247 @@
+/// Json-cpp amalgated source (http://jsoncpp.sourceforge.net/).
+/// It is intended to be used with #include "json/json.h"
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: LICENSE
+// //////////////////////////////////////////////////////////////////////
+
+/*
+The JsonCpp library's source code, including accompanying documentation,
+tests and demonstration applications, are licensed under the following
+conditions...
+
+The author (Baptiste Lepilleur) explicitly disclaims copyright in all
+jurisdictions which recognize such a disclaimer. In such jurisdictions,
+this software is released into the Public Domain.
+
+In jurisdictions which do not recognize Public Domain property (e.g. Germany as of
+2010), this software is Copyright (c) 2007-2010 by Baptiste Lepilleur, and is
+released under the terms of the MIT License (see below).
+
+In jurisdictions which recognize Public Domain property, the user of this
+software may choose to accept it either as 1) Public Domain, 2) under the
+conditions of the MIT License (see below), or 3) under the terms of dual
+Public Domain/MIT License conditions described here, as they choose.
+
+The MIT License is about as close to Public Domain as a license can get, and is
+described in clear, concise terms at:
+
+ http://en.wikipedia.org/wiki/MIT_License
+
+The full text of the MIT License follows:
+
+========================================================================
+Copyright (c) 2007-2010 Baptiste Lepilleur
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use, copy,
+modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+========================================================================
+(END LICENSE TEXT)
+
+The MIT license is compatible with both the GPL and commercial
+software, affording one all of the rights of Public Domain with the
+minor nuisance of being required to keep the above copyright notice
+and license text in the source code. Note also that by accepting the
+Public Domain "license" you can re-license your copy using whatever
+license you like.
+
+*/
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: LICENSE
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+#include "json/json.h"
+
+#ifndef JSON_IS_AMALGAMATION
+#error "Compile with -I PATH_TO_JSON_DIRECTORY"
+#endif
+
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: src/lib_json/json_tool.h
+// //////////////////////////////////////////////////////////////////////
+
+// Copyright 2007-2010 Baptiste Lepilleur
+// Distributed under MIT license, or public domain if desired and
+// recognized in your jurisdiction.
+// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+#ifndef LIB_JSONCPP_JSON_TOOL_H_INCLUDED
+#define LIB_JSONCPP_JSON_TOOL_H_INCLUDED
+
+/* This header provides common string manipulation support, such as UTF-8,
+ * portable conversion from/to string...
+ *
+ * It is an internal header that must not be exposed.
+ */
+
+namespace Json {
+
+/// Converts a unicode code-point to UTF-8.
+static inline JSONCPP_STRING codePointToUTF8(unsigned int cp) {
+ JSONCPP_STRING result;
+
+ // based on description from http://en.wikipedia.org/wiki/UTF-8
+
+ if (cp <= 0x7f) {
+ result.resize(1);
+ result[0] = static_cast<char>(cp);
+ } else if (cp <= 0x7FF) {
+ result.resize(2);
+ result[1] = static_cast<char>(0x80 | (0x3f & cp));
+ result[0] = static_cast<char>(0xC0 | (0x1f & (cp >> 6)));
+ } else if (cp <= 0xFFFF) {
+ result.resize(3);
+ result[2] = static_cast<char>(0x80 | (0x3f & cp));
+ result[1] = static_cast<char>(0x80 | (0x3f & (cp >> 6)));
+ result[0] = static_cast<char>(0xE0 | (0xf & (cp >> 12)));
+ } else if (cp <= 0x10FFFF) {
+ result.resize(4);
+ result[3] = static_cast<char>(0x80 | (0x3f & cp));
+ result[2] = static_cast<char>(0x80 | (0x3f & (cp >> 6)));
+ result[1] = static_cast<char>(0x80 | (0x3f & (cp >> 12)));
+ result[0] = static_cast<char>(0xF0 | (0x7 & (cp >> 18)));
+ }
+
+ return result;
+}
+
+/// Returns true if ch is a control character (in range [1,31]).
+static inline bool isControlCharacter(char ch) { return ch > 0 && ch <= 0x1F; }
+
+enum {
+ /// Constant that specify the size of the buffer that must be passed to
+ /// uintToString.
+ uintToStringBufferSize = 3 * sizeof(LargestUInt) + 1
+};
+
+// Defines a char buffer for use with uintToString().
+typedef char UIntToStringBuffer[uintToStringBufferSize];
+
+/** Converts an unsigned integer to string.
+ * @param value Unsigned interger to convert to string
+ * @param current Input/Output string buffer.
+ * Must have at least uintToStringBufferSize chars free.
+ */
+static inline void uintToString(LargestUInt value, char*& current) {
+ *--current = 0;
+ do {
+ *--current = static_cast<char>(value % 10U + static_cast<unsigned>('0'));
+ value /= 10;
+ } while (value != 0);
+}
+
+/** Change ',' to '.' everywhere in buffer.
+ *
+ * We had a sophisticated way, but it did not work in WinCE.
+ * @see https://github.com/open-source-parsers/jsoncpp/pull/9
+ */
+static inline void fixNumericLocale(char* begin, char* end) {
+ while (begin < end) {
+ if (*begin == ',') {
+ *begin = '.';
+ }
+ ++begin;
+ }
+}
+
+} // namespace Json {
+
+#endif // LIB_JSONCPP_JSON_TOOL_H_INCLUDED
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: src/lib_json/json_tool.h
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: src/lib_json/json_reader.cpp
+// //////////////////////////////////////////////////////////////////////
+
+// Copyright 2007-2011 Baptiste Lepilleur
+// Distributed under MIT license, or public domain if desired and
+// recognized in your jurisdiction.
+// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+#if !defined(JSON_IS_AMALGAMATION)
+#include <json/assertions.h>
+#include <json/reader.h>
+#include <json/value.h>
+#include "json_tool.h"
+#endif // if !defined(JSON_IS_AMALGAMATION)
+#include <utility>
+#include <cstdio>
+#include <cassert>
+#include <cstring>
+#include <istream>
+#include <sstream>
+#include <memory>
+#include <set>
+#include <limits>
+
+#if defined(_MSC_VER)
+#if !defined(WINCE) && defined(__STDC_SECURE_LIB__) && _MSC_VER >= 1500 // VC++ 9.0 and above
+#define snprintf sprintf_s
+#elif _MSC_VER >= 1900 // VC++ 14.0 and above
+#define snprintf std::snprintf
+#else
+#define snprintf _snprintf
+#endif
+#elif defined(__ANDROID__) || defined(__QNXNTO__)
+#define snprintf snprintf
+#elif __cplusplus >= 201103L
+#if !defined(__MINGW32__) && !defined(__CYGWIN__)
+#define snprintf std::snprintf
+#endif
+#endif
+
+#if defined(__QNXNTO__)
+#define sscanf std::sscanf
+#endif
+
+#if defined(_MSC_VER) && _MSC_VER >= 1400 // VC++ 8.0
+// Disable warning about strdup being deprecated.
+#pragma warning(disable : 4996)
+#endif
+
+static int const stackLimit_g = 1000;
+static int stackDepth_g = 0; // see readValue()
+
+namespace Json {
+
+#if __cplusplus >= 201103L || (defined(_CPPLIB_VER) && _CPPLIB_VER >= 520)
+typedef std::unique_ptr<CharReader> CharReaderPtr;
+#else
+typedef std::auto_ptr<CharReader> CharReaderPtr;
+#endif
+
+// Implementation of class Features
+// ////////////////////////////////
+
+Features::Features()
+ : allowComments_(true), strictRoot_(false),
+ allowDroppedNullPlaceholders_(false), allowNumericKeys_(false) {}
+
+Features Features::all() { return Features(); }
+
+Features Features::strictMode() {
+ Features features;
+ features.allowComments_ = false;
+ features.strictRoot_ = true;
+ features.allowDroppedNullPlaceholders_ = false;
+ features.allowNumericKeys_ = false;
+ return features;
+}
+
+// Implementation of class Reader
+// ////////////////////////////////
+
+static bool containsNewLine(Reader::Location begin, Reader::Location end) {
+ for (; begin < end; ++begin)
+ if (*begin == '\n' || *begin == '\r')
+ return true;
+ return false;
+}
+
+// Class Reader
+// //////////////////////////////////////////////////////////////////
+
+Reader::Reader()
+ : errors_(), document_(), begin_(), end_(), current_(), lastValueEnd_(),
+ lastValue_(), commentsBefore_(), features_(Features::all()),
+ collectComments_() {}
+
+Reader::Reader(const Features& features)
+ : errors_(), document_(), begin_(), end_(), current_(), lastValueEnd_(),
+ lastValue_(), commentsBefore_(), features_(features), collectComments_() {
+}
+
+bool
+Reader::parse(const std::string& document, Value& root, bool collectComments) {
+ JSONCPP_STRING documentCopy(document.data(), document.data() + document.capacity());
+ std::swap(documentCopy, document_);
+ const char* begin = document_.c_str();
+ const char* end = begin + document_.length();
+ return parse(begin, end, root, collectComments);
+}
+
+bool Reader::parse(std::istream& sin, Value& root, bool collectComments) {
+ // std::istream_iterator<char> begin(sin);
+ // std::istream_iterator<char> end;
+ // Those would allow streamed input from a file, if parse() were a
+ // template function.
+
+ // Since JSONCPP_STRING is reference-counted, this at least does not
+ // create an extra copy.
+ JSONCPP_STRING doc;
+ std::getline(sin, doc, (char)EOF);
+ return parse(doc.data(), doc.data() + doc.size(), root, collectComments);
+}
+
+bool Reader::parse(const char* beginDoc,
+ const char* endDoc,
+ Value& root,
+ bool collectComments) {
+ if (!features_.allowComments_) {
+ collectComments = false;
+ }
+
+ begin_ = beginDoc;
+ end_ = endDoc;
+ collectComments_ = collectComments;
+ current_ = begin_;
+ lastValueEnd_ = 0;
+ lastValue_ = 0;
+ commentsBefore_ = "";
+ errors_.clear();
+ while (!nodes_.empty())
+ nodes_.pop();
+ nodes_.push(&root);
+
+ stackDepth_g = 0; // Yes, this is bad coding, but options are limited.
+ bool successful = readValue();
+ Token token;
+ skipCommentTokens(token);
+ if (collectComments_ && !commentsBefore_.empty())
+ root.setComment(commentsBefore_, commentAfter);
+ if (features_.strictRoot_) {
+ if (!root.isArray() && !root.isObject()) {
+ // Set error location to start of doc, ideally should be first token found
+ // in doc
+ token.type_ = tokenError;
+ token.start_ = beginDoc;
+ token.end_ = endDoc;
+ addError(
+ "A valid JSON document must be either an array or an object value.",
+ token);
+ return false;
+ }
+ }
+ return successful;
+}
+
+bool Reader::readValue() {
+ // This is a non-reentrant way to support a stackLimit. Terrible!
+ // But this deprecated class has a security problem: Bad input can
+ // cause a seg-fault. This seems like a fair, binary-compatible way
+ // to prevent the problem.
+ if (stackDepth_g >= stackLimit_g) throwRuntimeError("Exceeded stackLimit in readValue().");
+ ++stackDepth_g;
+
+ Token token;
+ skipCommentTokens(token);
+ bool successful = true;
+
+ if (collectComments_ && !commentsBefore_.empty()) {
+ currentValue().setComment(commentsBefore_, commentBefore);
+ commentsBefore_ = "";
+ }
+
+ switch (token.type_) {
+ case tokenObjectBegin:
+ successful = readObject(token);
+ currentValue().setOffsetLimit(current_ - begin_);
+ break;
+ case tokenArrayBegin:
+ successful = readArray(token);
+ currentValue().setOffsetLimit(current_ - begin_);
+ break;
+ case tokenNumber:
+ successful = decodeNumber(token);
+ break;
+ case tokenString:
+ successful = decodeString(token);
+ break;
+ case tokenTrue:
+ {
+ Value v(true);
+ currentValue().swapPayload(v);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ }
+ break;
+ case tokenFalse:
+ {
+ Value v(false);
+ currentValue().swapPayload(v);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ }
+ break;
+ case tokenNull:
+ {
+ Value v;
+ currentValue().swapPayload(v);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ }
+ break;
+ case tokenArraySeparator:
+ case tokenObjectEnd:
+ case tokenArrayEnd:
+ if (features_.allowDroppedNullPlaceholders_) {
+ // "Un-read" the current token and mark the current value as a null
+ // token.
+ current_--;
+ Value v;
+ currentValue().swapPayload(v);
+ currentValue().setOffsetStart(current_ - begin_ - 1);
+ currentValue().setOffsetLimit(current_ - begin_);
+ break;
+ } // Else, fall through...
+ default:
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ return addError("Syntax error: value, object or array expected.", token);
+ }
+
+ if (collectComments_) {
+ lastValueEnd_ = current_;
+ lastValue_ = ¤tValue();
+ }
+
+ --stackDepth_g;
+ return successful;
+}
+
+void Reader::skipCommentTokens(Token& token) {
+ if (features_.allowComments_) {
+ do {
+ readToken(token);
+ } while (token.type_ == tokenComment);
+ } else {
+ readToken(token);
+ }
+}
+
+bool Reader::readToken(Token& token) {
+ skipSpaces();
+ token.start_ = current_;
+ Char c = getNextChar();
+ bool ok = true;
+ switch (c) {
+ case '{':
+ token.type_ = tokenObjectBegin;
+ break;
+ case '}':
+ token.type_ = tokenObjectEnd;
+ break;
+ case '[':
+ token.type_ = tokenArrayBegin;
+ break;
+ case ']':
+ token.type_ = tokenArrayEnd;
+ break;
+ case '"':
+ token.type_ = tokenString;
+ ok = readString();
+ break;
+ case '/':
+ token.type_ = tokenComment;
+ ok = readComment();
+ break;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ case '-':
+ token.type_ = tokenNumber;
+ readNumber();
+ break;
+ case 't':
+ token.type_ = tokenTrue;
+ ok = match("rue", 3);
+ break;
+ case 'f':
+ token.type_ = tokenFalse;
+ ok = match("alse", 4);
+ break;
+ case 'n':
+ token.type_ = tokenNull;
+ ok = match("ull", 3);
+ break;
+ case ',':
+ token.type_ = tokenArraySeparator;
+ break;
+ case ':':
+ token.type_ = tokenMemberSeparator;
+ break;
+ case 0:
+ token.type_ = tokenEndOfStream;
+ break;
+ default:
+ ok = false;
+ break;
+ }
+ if (!ok)
+ token.type_ = tokenError;
+ token.end_ = current_;
+ return true;
+}
+
+void Reader::skipSpaces() {
+ while (current_ != end_) {
+ Char c = *current_;
+ if (c == ' ' || c == '\t' || c == '\r' || c == '\n')
+ ++current_;
+ else
+ break;
+ }
+}
+
+bool Reader::match(Location pattern, int patternLength) {
+ if (end_ - current_ < patternLength)
+ return false;
+ int index = patternLength;
+ while (index--)
+ if (current_[index] != pattern[index])
+ return false;
+ current_ += patternLength;
+ return true;
+}
+
+bool Reader::readComment() {
+ Location commentBegin = current_ - 1;
+ Char c = getNextChar();
+ bool successful = false;
+ if (c == '*')
+ successful = readCStyleComment();
+ else if (c == '/')
+ successful = readCppStyleComment();
+ if (!successful)
+ return false;
+
+ if (collectComments_) {
+ CommentPlacement placement = commentBefore;
+ if (lastValueEnd_ && !containsNewLine(lastValueEnd_, commentBegin)) {
+ if (c != '*' || !containsNewLine(commentBegin, current_))
+ placement = commentAfterOnSameLine;
+ }
+
+ addComment(commentBegin, current_, placement);
+ }
+ return true;
+}
+
+static JSONCPP_STRING normalizeEOL(Reader::Location begin, Reader::Location end) {
+ JSONCPP_STRING normalized;
+ normalized.reserve(static_cast<size_t>(end - begin));
+ Reader::Location current = begin;
+ while (current != end) {
+ char c = *current++;
+ if (c == '\r') {
+ if (current != end && *current == '\n')
+ // convert dos EOL
+ ++current;
+ // convert Mac EOL
+ normalized += '\n';
+ } else {
+ normalized += c;
+ }
+ }
+ return normalized;
+}
+
+void
+Reader::addComment(Location begin, Location end, CommentPlacement placement) {
+ assert(collectComments_);
+ const JSONCPP_STRING& normalized = normalizeEOL(begin, end);
+ if (placement == commentAfterOnSameLine) {
+ assert(lastValue_ != 0);
+ lastValue_->setComment(normalized, placement);
+ } else {
+ commentsBefore_ += normalized;
+ }
+}
+
+bool Reader::readCStyleComment() {
+ while (current_ != end_) {
+ Char c = getNextChar();
+ if (c == '*' && *current_ == '/')
+ break;
+ }
+ return getNextChar() == '/';
+}
+
+bool Reader::readCppStyleComment() {
+ while (current_ != end_) {
+ Char c = getNextChar();
+ if (c == '\n')
+ break;
+ if (c == '\r') {
+ // Consume DOS EOL. It will be normalized in addComment.
+ if (current_ != end_ && *current_ == '\n')
+ getNextChar();
+ // Break on Moc OS 9 EOL.
+ break;
+ }
+ }
+ return true;
+}
+
+void Reader::readNumber() {
+ const char *p = current_;
+ char c = '0'; // stopgap for already consumed character
+ // integral part
+ while (c >= '0' && c <= '9')
+ c = (current_ = p) < end_ ? *p++ : '\0';
+ // fractional part
+ if (c == '.') {
+ c = (current_ = p) < end_ ? *p++ : '\0';
+ while (c >= '0' && c <= '9')
+ c = (current_ = p) < end_ ? *p++ : '\0';
+ }
+ // exponential part
+ if (c == 'e' || c == 'E') {
+ c = (current_ = p) < end_ ? *p++ : '\0';
+ if (c == '+' || c == '-')
+ c = (current_ = p) < end_ ? *p++ : '\0';
+ while (c >= '0' && c <= '9')
+ c = (current_ = p) < end_ ? *p++ : '\0';
+ }
+}
+
+bool Reader::readString() {
+ Char c = '\0';
+ while (current_ != end_) {
+ c = getNextChar();
+ if (c == '\\')
+ getNextChar();
+ else if (c == '"')
+ break;
+ }
+ return c == '"';
+}
+
+bool Reader::readObject(Token& tokenStart) {
+ Token tokenName;
+ JSONCPP_STRING name;
+ Value init(objectValue);
+ currentValue().swapPayload(init);
+ currentValue().setOffsetStart(tokenStart.start_ - begin_);
+ while (readToken(tokenName)) {
+ bool initialTokenOk = true;
+ while (tokenName.type_ == tokenComment && initialTokenOk)
+ initialTokenOk = readToken(tokenName);
+ if (!initialTokenOk)
+ break;
+ if (tokenName.type_ == tokenObjectEnd && name.empty()) // empty object
+ return true;
+ name = "";
+ if (tokenName.type_ == tokenString) {
+ if (!decodeString(tokenName, name))
+ return recoverFromError(tokenObjectEnd);
+ } else if (tokenName.type_ == tokenNumber && features_.allowNumericKeys_) {
+ Value numberName;
+ if (!decodeNumber(tokenName, numberName))
+ return recoverFromError(tokenObjectEnd);
+ name = JSONCPP_STRING(numberName.asCString());
+ } else {
+ break;
+ }
+
+ Token colon;
+ if (!readToken(colon) || colon.type_ != tokenMemberSeparator) {
+ return addErrorAndRecover(
+ "Missing ':' after object member name", colon, tokenObjectEnd);
+ }
+ Value& value = currentValue()[name];
+ nodes_.push(&value);
+ bool ok = readValue();
+ nodes_.pop();
+ if (!ok) // error already set
+ return recoverFromError(tokenObjectEnd);
+
+ Token comma;
+ if (!readToken(comma) ||
+ (comma.type_ != tokenObjectEnd && comma.type_ != tokenArraySeparator &&
+ comma.type_ != tokenComment)) {
+ return addErrorAndRecover(
+ "Missing ',' or '}' in object declaration", comma, tokenObjectEnd);
+ }
+ bool finalizeTokenOk = true;
+ while (comma.type_ == tokenComment && finalizeTokenOk)
+ finalizeTokenOk = readToken(comma);
+ if (comma.type_ == tokenObjectEnd)
+ return true;
+ }
+ return addErrorAndRecover(
+ "Missing '}' or object member name", tokenName, tokenObjectEnd);
+}
+
+bool Reader::readArray(Token& tokenStart) {
+ Value init(arrayValue);
+ currentValue().swapPayload(init);
+ currentValue().setOffsetStart(tokenStart.start_ - begin_);
+ skipSpaces();
+ if (*current_ == ']') // empty array
+ {
+ Token endArray;
+ readToken(endArray);
+ return true;
+ }
+ int index = 0;
+ for (;;) {
+ Value& value = currentValue()[index++];
+ nodes_.push(&value);
+ bool ok = readValue();
+ nodes_.pop();
+ if (!ok) // error already set
+ return recoverFromError(tokenArrayEnd);
+
+ Token token;
+ // Accept Comment after last item in the array.
+ ok = readToken(token);
+ while (token.type_ == tokenComment && ok) {
+ ok = readToken(token);
+ }
+ bool badTokenType =
+ (token.type_ != tokenArraySeparator && token.type_ != tokenArrayEnd);
+ if (!ok || badTokenType) {
+ return addErrorAndRecover(
+ "Missing ',' or ']' in array declaration", token, tokenArrayEnd);
+ }
+ if (token.type_ == tokenArrayEnd)
+ break;
+ }
+ return true;
+}
+
+bool Reader::decodeNumber(Token& token) {
+ Value decoded;
+ if (!decodeNumber(token, decoded))
+ return false;
+ currentValue().swapPayload(decoded);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ return true;
+}
+
+bool Reader::decodeNumber(Token& token, Value& decoded) {
+ // Attempts to parse the number as an integer. If the number is
+ // larger than the maximum supported value of an integer then
+ // we decode the number as a double.
+ Location current = token.start_;
+ bool isNegative = *current == '-';
+ if (isNegative)
+ ++current;
+ // TODO: Help the compiler do the div and mod at compile time or get rid of them.
+ Value::LargestUInt maxIntegerValue =
+ isNegative ? Value::LargestUInt(Value::maxLargestInt) + 1
+ : Value::maxLargestUInt;
+ Value::LargestUInt threshold = maxIntegerValue / 10;
+ Value::LargestUInt value = 0;
+ while (current < token.end_) {
+ Char c = *current++;
+ if (c < '0' || c > '9')
+ return decodeDouble(token, decoded);
+ Value::UInt digit(static_cast<Value::UInt>(c - '0'));
+ if (value >= threshold) {
+ // We've hit or exceeded the max value divided by 10 (rounded down). If
+ // a) we've only just touched the limit, b) this is the last digit, and
+ // c) it's small enough to fit in that rounding delta, we're okay.
+ // Otherwise treat this number as a double to avoid overflow.
+ if (value > threshold || current != token.end_ ||
+ digit > maxIntegerValue % 10) {
+ return decodeDouble(token, decoded);
+ }
+ }
+ value = value * 10 + digit;
+ }
+ if (isNegative && value == maxIntegerValue)
+ decoded = Value::minLargestInt;
+ else if (isNegative)
+ decoded = -Value::LargestInt(value);
+ else if (value <= Value::LargestUInt(Value::maxInt))
+ decoded = Value::LargestInt(value);
+ else
+ decoded = value;
+ return true;
+}
+
+bool Reader::decodeDouble(Token& token) {
+ Value decoded;
+ if (!decodeDouble(token, decoded))
+ return false;
+ currentValue().swapPayload(decoded);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ return true;
+}
+
+bool Reader::decodeDouble(Token& token, Value& decoded) {
+ double value = 0;
+ JSONCPP_STRING buffer(token.start_, token.end_);
+ JSONCPP_ISTRINGSTREAM is(buffer);
+ if (!(is >> value))
+ return addError("'" + JSONCPP_STRING(token.start_, token.end_) +
+ "' is not a number.",
+ token);
+ decoded = value;
+ return true;
+}
+
+bool Reader::decodeString(Token& token) {
+ JSONCPP_STRING decoded_string;
+ if (!decodeString(token, decoded_string))
+ return false;
+ Value decoded(decoded_string);
+ currentValue().swapPayload(decoded);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ return true;
+}
+
+bool Reader::decodeString(Token& token, JSONCPP_STRING& decoded) {
+ decoded.reserve(static_cast<size_t>(token.end_ - token.start_ - 2));
+ Location current = token.start_ + 1; // skip '"'
+ Location end = token.end_ - 1; // do not include '"'
+ while (current != end) {
+ Char c = *current++;
+ if (c == '"')
+ break;
+ else if (c == '\\') {
+ if (current == end)
+ return addError("Empty escape sequence in string", token, current);
+ Char escape = *current++;
+ switch (escape) {
+ case '"':
+ decoded += '"';
+ break;
+ case '/':
+ decoded += '/';
+ break;
+ case '\\':
+ decoded += '\\';
+ break;
+ case 'b':
+ decoded += '\b';
+ break;
+ case 'f':
+ decoded += '\f';
+ break;
+ case 'n':
+ decoded += '\n';
+ break;
+ case 'r':
+ decoded += '\r';
+ break;
+ case 't':
+ decoded += '\t';
+ break;
+ case 'u': {
+ unsigned int unicode;
+ if (!decodeUnicodeCodePoint(token, current, end, unicode))
+ return false;
+ decoded += codePointToUTF8(unicode);
+ } break;
+ default:
+ return addError("Bad escape sequence in string", token, current);
+ }
+ } else {
+ decoded += c;
+ }
+ }
+ return true;
+}
+
+bool Reader::decodeUnicodeCodePoint(Token& token,
+ Location& current,
+ Location end,
+ unsigned int& unicode) {
+
+ if (!decodeUnicodeEscapeSequence(token, current, end, unicode))
+ return false;
+ if (unicode >= 0xD800 && unicode <= 0xDBFF) {
+ // surrogate pairs
+ if (end - current < 6)
+ return addError(
+ "additional six characters expected to parse unicode surrogate pair.",
+ token,
+ current);
+ unsigned int surrogatePair;
+ if (*(current++) == '\\' && *(current++) == 'u') {
+ if (decodeUnicodeEscapeSequence(token, current, end, surrogatePair)) {
+ unicode = 0x10000 + ((unicode & 0x3FF) << 10) + (surrogatePair & 0x3FF);
+ } else
+ return false;
+ } else
+ return addError("expecting another \\u token to begin the second half of "
+ "a unicode surrogate pair",
+ token,
+ current);
+ }
+ return true;
+}
+
+bool Reader::decodeUnicodeEscapeSequence(Token& token,
+ Location& current,
+ Location end,
+ unsigned int& ret_unicode) {
+ if (end - current < 4)
+ return addError(
+ "Bad unicode escape sequence in string: four digits expected.",
+ token,
+ current);
+ int unicode = 0;
+ for (int index = 0; index < 4; ++index) {
+ Char c = *current++;
+ unicode *= 16;
+ if (c >= '0' && c <= '9')
+ unicode += c - '0';
+ else if (c >= 'a' && c <= 'f')
+ unicode += c - 'a' + 10;
+ else if (c >= 'A' && c <= 'F')
+ unicode += c - 'A' + 10;
+ else
+ return addError(
+ "Bad unicode escape sequence in string: hexadecimal digit expected.",
+ token,
+ current);
+ }
+ ret_unicode = static_cast<unsigned int>(unicode);
+ return true;
+}
+
+bool
+Reader::addError(const JSONCPP_STRING& message, Token& token, Location extra) {
+ ErrorInfo info;
+ info.token_ = token;
+ info.message_ = message;
+ info.extra_ = extra;
+ errors_.push_back(info);
+ return false;
+}
+
+bool Reader::recoverFromError(TokenType skipUntilToken) {
+ size_t const errorCount = errors_.size();
+ Token skip;
+ for (;;) {
+ if (!readToken(skip))
+ errors_.resize(errorCount); // discard errors caused by recovery
+ if (skip.type_ == skipUntilToken || skip.type_ == tokenEndOfStream)
+ break;
+ }
+ errors_.resize(errorCount);
+ return false;
+}
+
+bool Reader::addErrorAndRecover(const JSONCPP_STRING& message,
+ Token& token,
+ TokenType skipUntilToken) {
+ addError(message, token);
+ return recoverFromError(skipUntilToken);
+}
+
+Value& Reader::currentValue() { return *(nodes_.top()); }
+
+Reader::Char Reader::getNextChar() {
+ if (current_ == end_)
+ return 0;
+ return *current_++;
+}
+
+void Reader::getLocationLineAndColumn(Location location,
+ int& line,
+ int& column) const {
+ Location current = begin_;
+ Location lastLineStart = current;
+ line = 0;
+ while (current < location && current != end_) {
+ Char c = *current++;
+ if (c == '\r') {
+ if (*current == '\n')
+ ++current;
+ lastLineStart = current;
+ ++line;
+ } else if (c == '\n') {
+ lastLineStart = current;
+ ++line;
+ }
+ }
+ // column & line start at 1
+ column = int(location - lastLineStart) + 1;
+ ++line;
+}
+
+JSONCPP_STRING Reader::getLocationLineAndColumn(Location location) const {
+ int line, column;
+ getLocationLineAndColumn(location, line, column);
+ char buffer[18 + 16 + 16 + 1];
+ snprintf(buffer, sizeof(buffer), "Line %d, Column %d", line, column);
+ return buffer;
+}
+
+// Deprecated. Preserved for backward compatibility
+JSONCPP_STRING Reader::getFormatedErrorMessages() const {
+ return getFormattedErrorMessages();
+}
+
+JSONCPP_STRING Reader::getFormattedErrorMessages() const {
+ JSONCPP_STRING formattedMessage;
+ for (Errors::const_iterator itError = errors_.begin();
+ itError != errors_.end();
+ ++itError) {
+ const ErrorInfo& error = *itError;
+ formattedMessage +=
+ "* " + getLocationLineAndColumn(error.token_.start_) + "\n";
+ formattedMessage += " " + error.message_ + "\n";
+ if (error.extra_)
+ formattedMessage +=
+ "See " + getLocationLineAndColumn(error.extra_) + " for detail.\n";
+ }
+ return formattedMessage;
+}
+
+std::vector<Reader::StructuredError> Reader::getStructuredErrors() const {
+ std::vector<Reader::StructuredError> allErrors;
+ for (Errors::const_iterator itError = errors_.begin();
+ itError != errors_.end();
+ ++itError) {
+ const ErrorInfo& error = *itError;
+ Reader::StructuredError structured;
+ structured.offset_start = error.token_.start_ - begin_;
+ structured.offset_limit = error.token_.end_ - begin_;
+ structured.message = error.message_;
+ allErrors.push_back(structured);
+ }
+ return allErrors;
+}
+
+bool Reader::pushError(const Value& value, const JSONCPP_STRING& message) {
+ ptrdiff_t const length = end_ - begin_;
+ if(value.getOffsetStart() > length
+ || value.getOffsetLimit() > length)
+ return false;
+ Token token;
+ token.type_ = tokenError;
+ token.start_ = begin_ + value.getOffsetStart();
+ token.end_ = end_ + value.getOffsetLimit();
+ ErrorInfo info;
+ info.token_ = token;
+ info.message_ = message;
+ info.extra_ = 0;
+ errors_.push_back(info);
+ return true;
+}
+
+bool Reader::pushError(const Value& value, const JSONCPP_STRING& message, const Value& extra) {
+ ptrdiff_t const length = end_ - begin_;
+ if(value.getOffsetStart() > length
+ || value.getOffsetLimit() > length
+ || extra.getOffsetLimit() > length)
+ return false;
+ Token token;
+ token.type_ = tokenError;
+ token.start_ = begin_ + value.getOffsetStart();
+ token.end_ = begin_ + value.getOffsetLimit();
+ ErrorInfo info;
+ info.token_ = token;
+ info.message_ = message;
+ info.extra_ = begin_ + extra.getOffsetStart();
+ errors_.push_back(info);
+ return true;
+}
+
+bool Reader::good() const {
+ return !errors_.size();
+}
+
+// exact copy of Features
+class OurFeatures {
+public:
+ static OurFeatures all();
+ bool allowComments_;
+ bool strictRoot_;
+ bool allowDroppedNullPlaceholders_;
+ bool allowNumericKeys_;
+ bool allowSingleQuotes_;
+ bool failIfExtra_;
+ bool rejectDupKeys_;
+ bool allowSpecialFloats_;
+ int stackLimit_;
+}; // OurFeatures
+
+// exact copy of Implementation of class Features
+// ////////////////////////////////
+
+OurFeatures OurFeatures::all() { return OurFeatures(); }
+
+// Implementation of class Reader
+// ////////////////////////////////
+
+// exact copy of Reader, renamed to OurReader
+class OurReader {
+public:
+ typedef char Char;
+ typedef const Char* Location;
+ struct StructuredError {
+ ptrdiff_t offset_start;
+ ptrdiff_t offset_limit;
+ JSONCPP_STRING message;
+ };
+
+ OurReader(OurFeatures const& features);
+ bool parse(const char* beginDoc,
+ const char* endDoc,
+ Value& root,
+ bool collectComments = true);
+ JSONCPP_STRING getFormattedErrorMessages() const;
+ std::vector<StructuredError> getStructuredErrors() const;
+ bool pushError(const Value& value, const JSONCPP_STRING& message);
+ bool pushError(const Value& value, const JSONCPP_STRING& message, const Value& extra);
+ bool good() const;
+
+private:
+ OurReader(OurReader const&); // no impl
+ void operator=(OurReader const&); // no impl
+
+ enum TokenType {
+ tokenEndOfStream = 0,
+ tokenObjectBegin,
+ tokenObjectEnd,
+ tokenArrayBegin,
+ tokenArrayEnd,
+ tokenString,
+ tokenNumber,
+ tokenTrue,
+ tokenFalse,
+ tokenNull,
+ tokenNaN,
+ tokenPosInf,
+ tokenNegInf,
+ tokenArraySeparator,
+ tokenMemberSeparator,
+ tokenComment,
+ tokenError
+ };
+
+ class Token {
+ public:
+ TokenType type_;
+ Location start_;
+ Location end_;
+ };
+
+ class ErrorInfo {
+ public:
+ Token token_;
+ JSONCPP_STRING message_;
+ Location extra_;
+ };
+
+ typedef std::deque<ErrorInfo> Errors;
+
+ bool readToken(Token& token);
+ void skipSpaces();
+ bool match(Location pattern, int patternLength);
+ bool readComment();
+ bool readCStyleComment();
+ bool readCppStyleComment();
+ bool readString();
+ bool readStringSingleQuote();
+ bool readNumber(bool checkInf);
+ bool readValue();
+ bool readObject(Token& token);
+ bool readArray(Token& token);
+ bool decodeNumber(Token& token);
+ bool decodeNumber(Token& token, Value& decoded);
+ bool decodeString(Token& token);
+ bool decodeString(Token& token, JSONCPP_STRING& decoded);
+ bool decodeDouble(Token& token);
+ bool decodeDouble(Token& token, Value& decoded);
+ bool decodeUnicodeCodePoint(Token& token,
+ Location& current,
+ Location end,
+ unsigned int& unicode);
+ bool decodeUnicodeEscapeSequence(Token& token,
+ Location& current,
+ Location end,
+ unsigned int& unicode);
+ bool addError(const JSONCPP_STRING& message, Token& token, Location extra = 0);
+ bool recoverFromError(TokenType skipUntilToken);
+ bool addErrorAndRecover(const JSONCPP_STRING& message,
+ Token& token,
+ TokenType skipUntilToken);
+ void skipUntilSpace();
+ Value& currentValue();
+ Char getNextChar();
+ void
+ getLocationLineAndColumn(Location location, int& line, int& column) const;
+ JSONCPP_STRING getLocationLineAndColumn(Location location) const;
+ void addComment(Location begin, Location end, CommentPlacement placement);
+ void skipCommentTokens(Token& token);
+
+ typedef std::stack<Value*> Nodes;
+ Nodes nodes_;
+ Errors errors_;
+ JSONCPP_STRING document_;
+ Location begin_;
+ Location end_;
+ Location current_;
+ Location lastValueEnd_;
+ Value* lastValue_;
+ JSONCPP_STRING commentsBefore_;
+ int stackDepth_;
+
+ OurFeatures const features_;
+ bool collectComments_;
+}; // OurReader
+
+// complete copy of Read impl, for OurReader
+
+OurReader::OurReader(OurFeatures const& features)
+ : errors_(), document_(), begin_(), end_(), current_(), lastValueEnd_(),
+ lastValue_(), commentsBefore_(),
+ stackDepth_(0),
+ features_(features), collectComments_() {
+}
+
+bool OurReader::parse(const char* beginDoc,
+ const char* endDoc,
+ Value& root,
+ bool collectComments) {
+ if (!features_.allowComments_) {
+ collectComments = false;
+ }
+
+ begin_ = beginDoc;
+ end_ = endDoc;
+ collectComments_ = collectComments;
+ current_ = begin_;
+ lastValueEnd_ = 0;
+ lastValue_ = 0;
+ commentsBefore_ = "";
+ errors_.clear();
+ while (!nodes_.empty())
+ nodes_.pop();
+ nodes_.push(&root);
+
+ stackDepth_ = 0;
+ bool successful = readValue();
+ Token token;
+ skipCommentTokens(token);
+ if (features_.failIfExtra_) {
+ if (token.type_ != tokenError && token.type_ != tokenEndOfStream) {
+ addError("Extra non-whitespace after JSON value.", token);
+ return false;
+ }
+ }
+ if (collectComments_ && !commentsBefore_.empty())
+ root.setComment(commentsBefore_, commentAfter);
+ if (features_.strictRoot_) {
+ if (!root.isArray() && !root.isObject()) {
+ // Set error location to start of doc, ideally should be first token found
+ // in doc
+ token.type_ = tokenError;
+ token.start_ = beginDoc;
+ token.end_ = endDoc;
+ addError(
+ "A valid JSON document must be either an array or an object value.",
+ token);
+ return false;
+ }
+ }
+ return successful;
+}
+
+bool OurReader::readValue() {
+ if (stackDepth_ >= features_.stackLimit_) throwRuntimeError("Exceeded stackLimit in readValue().");
+ ++stackDepth_;
+ Token token;
+ skipCommentTokens(token);
+ bool successful = true;
+
+ if (collectComments_ && !commentsBefore_.empty()) {
+ currentValue().setComment(commentsBefore_, commentBefore);
+ commentsBefore_ = "";
+ }
+
+ switch (token.type_) {
+ case tokenObjectBegin:
+ successful = readObject(token);
+ currentValue().setOffsetLimit(current_ - begin_);
+ break;
+ case tokenArrayBegin:
+ successful = readArray(token);
+ currentValue().setOffsetLimit(current_ - begin_);
+ break;
+ case tokenNumber:
+ successful = decodeNumber(token);
+ break;
+ case tokenString:
+ successful = decodeString(token);
+ break;
+ case tokenTrue:
+ {
+ Value v(true);
+ currentValue().swapPayload(v);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ }
+ break;
+ case tokenFalse:
+ {
+ Value v(false);
+ currentValue().swapPayload(v);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ }
+ break;
+ case tokenNull:
+ {
+ Value v;
+ currentValue().swapPayload(v);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ }
+ break;
+ case tokenNaN:
+ {
+ Value v(std::numeric_limits<double>::quiet_NaN());
+ currentValue().swapPayload(v);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ }
+ break;
+ case tokenPosInf:
+ {
+ Value v(std::numeric_limits<double>::infinity());
+ currentValue().swapPayload(v);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ }
+ break;
+ case tokenNegInf:
+ {
+ Value v(-std::numeric_limits<double>::infinity());
+ currentValue().swapPayload(v);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ }
+ break;
+ case tokenArraySeparator:
+ case tokenObjectEnd:
+ case tokenArrayEnd:
+ if (features_.allowDroppedNullPlaceholders_) {
+ // "Un-read" the current token and mark the current value as a null
+ // token.
+ current_--;
+ Value v;
+ currentValue().swapPayload(v);
+ currentValue().setOffsetStart(current_ - begin_ - 1);
+ currentValue().setOffsetLimit(current_ - begin_);
+ break;
+ } // else, fall through ...
+ default:
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ return addError("Syntax error: value, object or array expected.", token);
+ }
+
+ if (collectComments_) {
+ lastValueEnd_ = current_;
+ lastValue_ = ¤tValue();
+ }
+
+ --stackDepth_;
+ return successful;
+}
+
+void OurReader::skipCommentTokens(Token& token) {
+ if (features_.allowComments_) {
+ do {
+ readToken(token);
+ } while (token.type_ == tokenComment);
+ } else {
+ readToken(token);
+ }
+}
+
+bool OurReader::readToken(Token& token) {
+ skipSpaces();
+ token.start_ = current_;
+ Char c = getNextChar();
+ bool ok = true;
+ switch (c) {
+ case '{':
+ token.type_ = tokenObjectBegin;
+ break;
+ case '}':
+ token.type_ = tokenObjectEnd;
+ break;
+ case '[':
+ token.type_ = tokenArrayBegin;
+ break;
+ case ']':
+ token.type_ = tokenArrayEnd;
+ break;
+ case '"':
+ token.type_ = tokenString;
+ ok = readString();
+ break;
+ case '\'':
+ if (features_.allowSingleQuotes_) {
+ token.type_ = tokenString;
+ ok = readStringSingleQuote();
+ break;
+ } // else continue
+ case '/':
+ token.type_ = tokenComment;
+ ok = readComment();
+ break;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ token.type_ = tokenNumber;
+ readNumber(false);
+ break;
+ case '-':
+ if (readNumber(true)) {
+ token.type_ = tokenNumber;
+ } else {
+ token.type_ = tokenNegInf;
+ ok = features_.allowSpecialFloats_ && match("nfinity", 7);
+ }
+ break;
+ case 't':
+ token.type_ = tokenTrue;
+ ok = match("rue", 3);
+ break;
+ case 'f':
+ token.type_ = tokenFalse;
+ ok = match("alse", 4);
+ break;
+ case 'n':
+ token.type_ = tokenNull;
+ ok = match("ull", 3);
+ break;
+ case 'N':
+ if (features_.allowSpecialFloats_) {
+ token.type_ = tokenNaN;
+ ok = match("aN", 2);
+ } else {
+ ok = false;
+ }
+ break;
+ case 'I':
+ if (features_.allowSpecialFloats_) {
+ token.type_ = tokenPosInf;
+ ok = match("nfinity", 7);
+ } else {
+ ok = false;
+ }
+ break;
+ case ',':
+ token.type_ = tokenArraySeparator;
+ break;
+ case ':':
+ token.type_ = tokenMemberSeparator;
+ break;
+ case 0:
+ token.type_ = tokenEndOfStream;
+ break;
+ default:
+ ok = false;
+ break;
+ }
+ if (!ok)
+ token.type_ = tokenError;
+ token.end_ = current_;
+ return true;
+}
+
+void OurReader::skipSpaces() {
+ while (current_ != end_) {
+ Char c = *current_;
+ if (c == ' ' || c == '\t' || c == '\r' || c == '\n')
+ ++current_;
+ else
+ break;
+ }
+}
+
+bool OurReader::match(Location pattern, int patternLength) {
+ if (end_ - current_ < patternLength)
+ return false;
+ int index = patternLength;
+ while (index--)
+ if (current_[index] != pattern[index])
+ return false;
+ current_ += patternLength;
+ return true;
+}
+
+bool OurReader::readComment() {
+ Location commentBegin = current_ - 1;
+ Char c = getNextChar();
+ bool successful = false;
+ if (c == '*')
+ successful = readCStyleComment();
+ else if (c == '/')
+ successful = readCppStyleComment();
+ if (!successful)
+ return false;
+
+ if (collectComments_) {
+ CommentPlacement placement = commentBefore;
+ if (lastValueEnd_ && !containsNewLine(lastValueEnd_, commentBegin)) {
+ if (c != '*' || !containsNewLine(commentBegin, current_))
+ placement = commentAfterOnSameLine;
+ }
+
+ addComment(commentBegin, current_, placement);
+ }
+ return true;
+}
+
+void
+OurReader::addComment(Location begin, Location end, CommentPlacement placement) {
+ assert(collectComments_);
+ const JSONCPP_STRING& normalized = normalizeEOL(begin, end);
+ if (placement == commentAfterOnSameLine) {
+ assert(lastValue_ != 0);
+ lastValue_->setComment(normalized, placement);
+ } else {
+ commentsBefore_ += normalized;
+ }
+}
+
+bool OurReader::readCStyleComment() {
+ while (current_ != end_) {
+ Char c = getNextChar();
+ if (c == '*' && *current_ == '/')
+ break;
+ }
+ return getNextChar() == '/';
+}
+
+bool OurReader::readCppStyleComment() {
+ while (current_ != end_) {
+ Char c = getNextChar();
+ if (c == '\n')
+ break;
+ if (c == '\r') {
+ // Consume DOS EOL. It will be normalized in addComment.
+ if (current_ != end_ && *current_ == '\n')
+ getNextChar();
+ // Break on Moc OS 9 EOL.
+ break;
+ }
+ }
+ return true;
+}
+
+bool OurReader::readNumber(bool checkInf) {
+ const char *p = current_;
+ if (checkInf && p != end_ && *p == 'I') {
+ current_ = ++p;
+ return false;
+ }
+ char c = '0'; // stopgap for already consumed character
+ // integral part
+ while (c >= '0' && c <= '9')
+ c = (current_ = p) < end_ ? *p++ : '\0';
+ // fractional part
+ if (c == '.') {
+ c = (current_ = p) < end_ ? *p++ : '\0';
+ while (c >= '0' && c <= '9')
+ c = (current_ = p) < end_ ? *p++ : '\0';
+ }
+ // exponential part
+ if (c == 'e' || c == 'E') {
+ c = (current_ = p) < end_ ? *p++ : '\0';
+ if (c == '+' || c == '-')
+ c = (current_ = p) < end_ ? *p++ : '\0';
+ while (c >= '0' && c <= '9')
+ c = (current_ = p) < end_ ? *p++ : '\0';
+ }
+ return true;
+}
+bool OurReader::readString() {
+ Char c = 0;
+ while (current_ != end_) {
+ c = getNextChar();
+ if (c == '\\')
+ getNextChar();
+ else if (c == '"')
+ break;
+ }
+ return c == '"';
+}
+
+
+bool OurReader::readStringSingleQuote() {
+ Char c = 0;
+ while (current_ != end_) {
+ c = getNextChar();
+ if (c == '\\')
+ getNextChar();
+ else if (c == '\'')
+ break;
+ }
+ return c == '\'';
+}
+
+bool OurReader::readObject(Token& tokenStart) {
+ Token tokenName;
+ JSONCPP_STRING name;
+ Value init(objectValue);
+ currentValue().swapPayload(init);
+ currentValue().setOffsetStart(tokenStart.start_ - begin_);
+ while (readToken(tokenName)) {
+ bool initialTokenOk = true;
+ while (tokenName.type_ == tokenComment && initialTokenOk)
+ initialTokenOk = readToken(tokenName);
+ if (!initialTokenOk)
+ break;
+ if (tokenName.type_ == tokenObjectEnd && name.empty()) // empty object
+ return true;
+ name = "";
+ if (tokenName.type_ == tokenString) {
+ if (!decodeString(tokenName, name))
+ return recoverFromError(tokenObjectEnd);
+ } else if (tokenName.type_ == tokenNumber && features_.allowNumericKeys_) {
+ Value numberName;
+ if (!decodeNumber(tokenName, numberName))
+ return recoverFromError(tokenObjectEnd);
+ name = numberName.asString();
+ } else {
+ break;
+ }
+
+ Token colon;
+ if (!readToken(colon) || colon.type_ != tokenMemberSeparator) {
+ return addErrorAndRecover(
+ "Missing ':' after object member name", colon, tokenObjectEnd);
+ }
+ if (name.length() >= (1U<<30)) throwRuntimeError("keylength >= 2^30");
+ if (features_.rejectDupKeys_ && currentValue().isMember(name)) {
+ JSONCPP_STRING msg = "Duplicate key: '" + name + "'";
+ return addErrorAndRecover(
+ msg, tokenName, tokenObjectEnd);
+ }
+ Value& value = currentValue()[name];
+ nodes_.push(&value);
+ bool ok = readValue();
+ nodes_.pop();
+ if (!ok) // error already set
+ return recoverFromError(tokenObjectEnd);
+
+ Token comma;
+ if (!readToken(comma) ||
+ (comma.type_ != tokenObjectEnd && comma.type_ != tokenArraySeparator &&
+ comma.type_ != tokenComment)) {
+ return addErrorAndRecover(
+ "Missing ',' or '}' in object declaration", comma, tokenObjectEnd);
+ }
+ bool finalizeTokenOk = true;
+ while (comma.type_ == tokenComment && finalizeTokenOk)
+ finalizeTokenOk = readToken(comma);
+ if (comma.type_ == tokenObjectEnd)
+ return true;
+ }
+ return addErrorAndRecover(
+ "Missing '}' or object member name", tokenName, tokenObjectEnd);
+}
+
+bool OurReader::readArray(Token& tokenStart) {
+ Value init(arrayValue);
+ currentValue().swapPayload(init);
+ currentValue().setOffsetStart(tokenStart.start_ - begin_);
+ skipSpaces();
+ if (*current_ == ']') // empty array
+ {
+ Token endArray;
+ readToken(endArray);
+ return true;
+ }
+ int index = 0;
+ for (;;) {
+ Value& value = currentValue()[index++];
+ nodes_.push(&value);
+ bool ok = readValue();
+ nodes_.pop();
+ if (!ok) // error already set
+ return recoverFromError(tokenArrayEnd);
+
+ Token token;
+ // Accept Comment after last item in the array.
+ ok = readToken(token);
+ while (token.type_ == tokenComment && ok) {
+ ok = readToken(token);
+ }
+ bool badTokenType =
+ (token.type_ != tokenArraySeparator && token.type_ != tokenArrayEnd);
+ if (!ok || badTokenType) {
+ return addErrorAndRecover(
+ "Missing ',' or ']' in array declaration", token, tokenArrayEnd);
+ }
+ if (token.type_ == tokenArrayEnd)
+ break;
+ }
+ return true;
+}
+
+bool OurReader::decodeNumber(Token& token) {
+ Value decoded;
+ if (!decodeNumber(token, decoded))
+ return false;
+ currentValue().swapPayload(decoded);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ return true;
+}
+
+bool OurReader::decodeNumber(Token& token, Value& decoded) {
+ // Attempts to parse the number as an integer. If the number is
+ // larger than the maximum supported value of an integer then
+ // we decode the number as a double.
+ Location current = token.start_;
+ bool isNegative = *current == '-';
+ if (isNegative)
+ ++current;
+ // TODO: Help the compiler do the div and mod at compile time or get rid of them.
+ Value::LargestUInt maxIntegerValue =
+ isNegative ? Value::LargestUInt(-Value::minLargestInt)
+ : Value::maxLargestUInt;
+ Value::LargestUInt threshold = maxIntegerValue / 10;
+ Value::LargestUInt value = 0;
+ while (current < token.end_) {
+ Char c = *current++;
+ if (c < '0' || c > '9')
+ return decodeDouble(token, decoded);
+ Value::UInt digit(static_cast<Value::UInt>(c - '0'));
+ if (value >= threshold) {
+ // We've hit or exceeded the max value divided by 10 (rounded down). If
+ // a) we've only just touched the limit, b) this is the last digit, and
+ // c) it's small enough to fit in that rounding delta, we're okay.
+ // Otherwise treat this number as a double to avoid overflow.
+ if (value > threshold || current != token.end_ ||
+ digit > maxIntegerValue % 10) {
+ return decodeDouble(token, decoded);
+ }
+ }
+ value = value * 10 + digit;
+ }
+ if (isNegative)
+ decoded = -Value::LargestInt(value);
+ else if (value <= Value::LargestUInt(Value::maxInt))
+ decoded = Value::LargestInt(value);
+ else
+ decoded = value;
+ return true;
+}
+
+bool OurReader::decodeDouble(Token& token) {
+ Value decoded;
+ if (!decodeDouble(token, decoded))
+ return false;
+ currentValue().swapPayload(decoded);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ return true;
+}
+
+bool OurReader::decodeDouble(Token& token, Value& decoded) {
+ double value = 0;
+ const int bufferSize = 32;
+ int count;
+ ptrdiff_t const length = token.end_ - token.start_;
+
+ // Sanity check to avoid buffer overflow exploits.
+ if (length < 0) {
+ return addError("Unable to parse token length", token);
+ }
+ size_t const ulength = static_cast<size_t>(length);
+
+ // Avoid using a string constant for the format control string given to
+ // sscanf, as this can cause hard to debug crashes on OS X. See here for more
+ // info:
+ //
+ // http://developer.apple.com/library/mac/#DOCUMENTATION/DeveloperTools/gcc-4.0.1/gcc/Incompatibilities.html
+ char format[] = "%lf";
+
+ if (length <= bufferSize) {
+ Char buffer[bufferSize + 1];
+ memcpy(buffer, token.start_, ulength);
+ buffer[length] = 0;
+ count = sscanf(buffer, format, &value);
+ } else {
+ JSONCPP_STRING buffer(token.start_, token.end_);
+ count = sscanf(buffer.c_str(), format, &value);
+ }
+
+ if (count != 1)
+ return addError("'" + JSONCPP_STRING(token.start_, token.end_) +
+ "' is not a number.",
+ token);
+ decoded = value;
+ return true;
+}
+
+bool OurReader::decodeString(Token& token) {
+ JSONCPP_STRING decoded_string;
+ if (!decodeString(token, decoded_string))
+ return false;
+ Value decoded(decoded_string);
+ currentValue().swapPayload(decoded);
+ currentValue().setOffsetStart(token.start_ - begin_);
+ currentValue().setOffsetLimit(token.end_ - begin_);
+ return true;
+}
+
+bool OurReader::decodeString(Token& token, JSONCPP_STRING& decoded) {
+ decoded.reserve(static_cast<size_t>(token.end_ - token.start_ - 2));
+ Location current = token.start_ + 1; // skip '"'
+ Location end = token.end_ - 1; // do not include '"'
+ while (current != end) {
+ Char c = *current++;
+ if (c == '"')
+ break;
+ else if (c == '\\') {
+ if (current == end)
+ return addError("Empty escape sequence in string", token, current);
+ Char escape = *current++;
+ switch (escape) {
+ case '"':
+ decoded += '"';
+ break;
+ case '/':
+ decoded += '/';
+ break;
+ case '\\':
+ decoded += '\\';
+ break;
+ case 'b':
+ decoded += '\b';
+ break;
+ case 'f':
+ decoded += '\f';
+ break;
+ case 'n':
+ decoded += '\n';
+ break;
+ case 'r':
+ decoded += '\r';
+ break;
+ case 't':
+ decoded += '\t';
+ break;
+ case 'u': {
+ unsigned int unicode;
+ if (!decodeUnicodeCodePoint(token, current, end, unicode))
+ return false;
+ decoded += codePointToUTF8(unicode);
+ } break;
+ default:
+ return addError("Bad escape sequence in string", token, current);
+ }
+ } else {
+ decoded += c;
+ }
+ }
+ return true;
+}
+
+bool OurReader::decodeUnicodeCodePoint(Token& token,
+ Location& current,
+ Location end,
+ unsigned int& unicode) {
+
+ if (!decodeUnicodeEscapeSequence(token, current, end, unicode))
+ return false;
+ if (unicode >= 0xD800 && unicode <= 0xDBFF) {
+ // surrogate pairs
+ if (end - current < 6)
+ return addError(
+ "additional six characters expected to parse unicode surrogate pair.",
+ token,
+ current);
+ unsigned int surrogatePair;
+ if (*(current++) == '\\' && *(current++) == 'u') {
+ if (decodeUnicodeEscapeSequence(token, current, end, surrogatePair)) {
+ unicode = 0x10000 + ((unicode & 0x3FF) << 10) + (surrogatePair & 0x3FF);
+ } else
+ return false;
+ } else
+ return addError("expecting another \\u token to begin the second half of "
+ "a unicode surrogate pair",
+ token,
+ current);
+ }
+ return true;
+}
+
+bool OurReader::decodeUnicodeEscapeSequence(Token& token,
+ Location& current,
+ Location end,
+ unsigned int& ret_unicode) {
+ if (end - current < 4)
+ return addError(
+ "Bad unicode escape sequence in string: four digits expected.",
+ token,
+ current);
+ int unicode = 0;
+ for (int index = 0; index < 4; ++index) {
+ Char c = *current++;
+ unicode *= 16;
+ if (c >= '0' && c <= '9')
+ unicode += c - '0';
+ else if (c >= 'a' && c <= 'f')
+ unicode += c - 'a' + 10;
+ else if (c >= 'A' && c <= 'F')
+ unicode += c - 'A' + 10;
+ else
+ return addError(
+ "Bad unicode escape sequence in string: hexadecimal digit expected.",
+ token,
+ current);
+ }
+ ret_unicode = static_cast<unsigned int>(unicode);
+ return true;
+}
+
+bool
+OurReader::addError(const JSONCPP_STRING& message, Token& token, Location extra) {
+ ErrorInfo info;
+ info.token_ = token;
+ info.message_ = message;
+ info.extra_ = extra;
+ errors_.push_back(info);
+ return false;
+}
+
+bool OurReader::recoverFromError(TokenType skipUntilToken) {
+ size_t errorCount = errors_.size();
+ Token skip;
+ for (;;) {
+ if (!readToken(skip))
+ errors_.resize(errorCount); // discard errors caused by recovery
+ if (skip.type_ == skipUntilToken || skip.type_ == tokenEndOfStream)
+ break;
+ }
+ errors_.resize(errorCount);
+ return false;
+}
+
+bool OurReader::addErrorAndRecover(const JSONCPP_STRING& message,
+ Token& token,
+ TokenType skipUntilToken) {
+ addError(message, token);
+ return recoverFromError(skipUntilToken);
+}
+
+Value& OurReader::currentValue() { return *(nodes_.top()); }
+
+OurReader::Char OurReader::getNextChar() {
+ if (current_ == end_)
+ return 0;
+ return *current_++;
+}
+
+void OurReader::getLocationLineAndColumn(Location location,
+ int& line,
+ int& column) const {
+ Location current = begin_;
+ Location lastLineStart = current;
+ line = 0;
+ while (current < location && current != end_) {
+ Char c = *current++;
+ if (c == '\r') {
+ if (*current == '\n')
+ ++current;
+ lastLineStart = current;
+ ++line;
+ } else if (c == '\n') {
+ lastLineStart = current;
+ ++line;
+ }
+ }
+ // column & line start at 1
+ column = int(location - lastLineStart) + 1;
+ ++line;
+}
+
+JSONCPP_STRING OurReader::getLocationLineAndColumn(Location location) const {
+ int line, column;
+ getLocationLineAndColumn(location, line, column);
+ char buffer[18 + 16 + 16 + 1];
+ snprintf(buffer, sizeof(buffer), "Line %d, Column %d", line, column);
+ return buffer;
+}
+
+JSONCPP_STRING OurReader::getFormattedErrorMessages() const {
+ JSONCPP_STRING formattedMessage;
+ for (Errors::const_iterator itError = errors_.begin();
+ itError != errors_.end();
+ ++itError) {
+ const ErrorInfo& error = *itError;
+ formattedMessage +=
+ "* " + getLocationLineAndColumn(error.token_.start_) + "\n";
+ formattedMessage += " " + error.message_ + "\n";
+ if (error.extra_)
+ formattedMessage +=
+ "See " + getLocationLineAndColumn(error.extra_) + " for detail.\n";
+ }
+ return formattedMessage;
+}
+
+std::vector<OurReader::StructuredError> OurReader::getStructuredErrors() const {
+ std::vector<OurReader::StructuredError> allErrors;
+ for (Errors::const_iterator itError = errors_.begin();
+ itError != errors_.end();
+ ++itError) {
+ const ErrorInfo& error = *itError;
+ OurReader::StructuredError structured;
+ structured.offset_start = error.token_.start_ - begin_;
+ structured.offset_limit = error.token_.end_ - begin_;
+ structured.message = error.message_;
+ allErrors.push_back(structured);
+ }
+ return allErrors;
+}
+
+bool OurReader::pushError(const Value& value, const JSONCPP_STRING& message) {
+ ptrdiff_t length = end_ - begin_;
+ if(value.getOffsetStart() > length
+ || value.getOffsetLimit() > length)
+ return false;
+ Token token;
+ token.type_ = tokenError;
+ token.start_ = begin_ + value.getOffsetStart();
+ token.end_ = end_ + value.getOffsetLimit();
+ ErrorInfo info;
+ info.token_ = token;
+ info.message_ = message;
+ info.extra_ = 0;
+ errors_.push_back(info);
+ return true;
+}
+
+bool OurReader::pushError(const Value& value, const JSONCPP_STRING& message, const Value& extra) {
+ ptrdiff_t length = end_ - begin_;
+ if(value.getOffsetStart() > length
+ || value.getOffsetLimit() > length
+ || extra.getOffsetLimit() > length)
+ return false;
+ Token token;
+ token.type_ = tokenError;
+ token.start_ = begin_ + value.getOffsetStart();
+ token.end_ = begin_ + value.getOffsetLimit();
+ ErrorInfo info;
+ info.token_ = token;
+ info.message_ = message;
+ info.extra_ = begin_ + extra.getOffsetStart();
+ errors_.push_back(info);
+ return true;
+}
+
+bool OurReader::good() const {
+ return !errors_.size();
+}
+
+
+class OurCharReader : public CharReader {
+ bool const collectComments_;
+ OurReader reader_;
+public:
+ OurCharReader(
+ bool collectComments,
+ OurFeatures const& features)
+ : collectComments_(collectComments)
+ , reader_(features)
+ {}
+ bool parse(
+ char const* beginDoc, char const* endDoc,
+ Value* root, JSONCPP_STRING* errs) JSONCPP_OVERRIDE {
+ bool ok = reader_.parse(beginDoc, endDoc, *root, collectComments_);
+ if (errs) {
+ *errs = reader_.getFormattedErrorMessages();
+ }
+ return ok;
+ }
+};
+
+CharReaderBuilder::CharReaderBuilder()
+{
+ setDefaults(&settings_);
+}
+CharReaderBuilder::~CharReaderBuilder()
+{}
+CharReader* CharReaderBuilder::newCharReader() const
+{
+ bool collectComments = settings_["collectComments"].asBool();
+ OurFeatures features = OurFeatures::all();
+ features.allowComments_ = settings_["allowComments"].asBool();
+ features.strictRoot_ = settings_["strictRoot"].asBool();
+ features.allowDroppedNullPlaceholders_ = settings_["allowDroppedNullPlaceholders"].asBool();
+ features.allowNumericKeys_ = settings_["allowNumericKeys"].asBool();
+ features.allowSingleQuotes_ = settings_["allowSingleQuotes"].asBool();
+ features.stackLimit_ = settings_["stackLimit"].asInt();
+ features.failIfExtra_ = settings_["failIfExtra"].asBool();
+ features.rejectDupKeys_ = settings_["rejectDupKeys"].asBool();
+ features.allowSpecialFloats_ = settings_["allowSpecialFloats"].asBool();
+ return new OurCharReader(collectComments, features);
+}
+static void getValidReaderKeys(std::set<JSONCPP_STRING>* valid_keys)
+{
+ valid_keys->clear();
+ valid_keys->insert("collectComments");
+ valid_keys->insert("allowComments");
+ valid_keys->insert("strictRoot");
+ valid_keys->insert("allowDroppedNullPlaceholders");
+ valid_keys->insert("allowNumericKeys");
+ valid_keys->insert("allowSingleQuotes");
+ valid_keys->insert("stackLimit");
+ valid_keys->insert("failIfExtra");
+ valid_keys->insert("rejectDupKeys");
+ valid_keys->insert("allowSpecialFloats");
+}
+bool CharReaderBuilder::validate(Json::Value* invalid) const
+{
+ Json::Value my_invalid;
+ if (!invalid) invalid = &my_invalid; // so we do not need to test for NULL
+ Json::Value& inv = *invalid;
+ std::set<JSONCPP_STRING> valid_keys;
+ getValidReaderKeys(&valid_keys);
+ Value::Members keys = settings_.getMemberNames();
+ size_t n = keys.size();
+ for (size_t i = 0; i < n; ++i) {
+ JSONCPP_STRING const& key = keys[i];
+ if (valid_keys.find(key) == valid_keys.end()) {
+ inv[key] = settings_[key];
+ }
+ }
+ return 0u == inv.size();
+}
+Value& CharReaderBuilder::operator[](JSONCPP_STRING key)
+{
+ return settings_[key];
+}
+// static
+void CharReaderBuilder::strictMode(Json::Value* settings)
+{
+//! [CharReaderBuilderStrictMode]
+ (*settings)["allowComments"] = false;
+ (*settings)["strictRoot"] = true;
+ (*settings)["allowDroppedNullPlaceholders"] = false;
+ (*settings)["allowNumericKeys"] = false;
+ (*settings)["allowSingleQuotes"] = false;
+ (*settings)["stackLimit"] = 1000;
+ (*settings)["failIfExtra"] = true;
+ (*settings)["rejectDupKeys"] = true;
+ (*settings)["allowSpecialFloats"] = false;
+//! [CharReaderBuilderStrictMode]
+}
+// static
+void CharReaderBuilder::setDefaults(Json::Value* settings)
+{
+//! [CharReaderBuilderDefaults]
+ (*settings)["collectComments"] = true;
+ (*settings)["allowComments"] = true;
+ (*settings)["strictRoot"] = false;
+ (*settings)["allowDroppedNullPlaceholders"] = false;
+ (*settings)["allowNumericKeys"] = false;
+ (*settings)["allowSingleQuotes"] = false;
+ (*settings)["stackLimit"] = 1000;
+ (*settings)["failIfExtra"] = false;
+ (*settings)["rejectDupKeys"] = false;
+ (*settings)["allowSpecialFloats"] = false;
+//! [CharReaderBuilderDefaults]
+}
+
+//////////////////////////////////
+// global functions
+
+bool parseFromStream(
+ CharReader::Factory const& fact, JSONCPP_ISTREAM& sin,
+ Value* root, JSONCPP_STRING* errs)
+{
+ JSONCPP_OSTRINGSTREAM ssin;
+ ssin << sin.rdbuf();
+ JSONCPP_STRING doc = ssin.str();
+ char const* begin = doc.data();
+ char const* end = begin + doc.size();
+ // Note that we do not actually need a null-terminator.
+ CharReaderPtr const reader(fact.newCharReader());
+ return reader->parse(begin, end, root, errs);
+}
+
+JSONCPP_ISTREAM& operator>>(JSONCPP_ISTREAM& sin, Value& root) {
+ CharReaderBuilder b;
+ JSONCPP_STRING errs;
+ bool ok = parseFromStream(b, sin, &root, &errs);
+ if (!ok) {
+ fprintf(stderr,
+ "Error from reader: %s",
+ errs.c_str());
+
+ throwRuntimeError(errs);
+ }
+ return sin;
+}
+
+} // namespace Json
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: src/lib_json/json_reader.cpp
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: src/lib_json/json_valueiterator.inl
+// //////////////////////////////////////////////////////////////////////
+
+// Copyright 2007-2010 Baptiste Lepilleur
+// Distributed under MIT license, or public domain if desired and
+// recognized in your jurisdiction.
+// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+// included by json_value.cpp
+
+namespace Json {
+
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// class ValueIteratorBase
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+
+ValueIteratorBase::ValueIteratorBase()
+ : current_(), isNull_(true) {
+}
+
+ValueIteratorBase::ValueIteratorBase(
+ const Value::ObjectValues::iterator& current)
+ : current_(current), isNull_(false) {}
+
+Value& ValueIteratorBase::deref() const {
+ return current_->second;
+}
+
+void ValueIteratorBase::increment() {
+ ++current_;
+}
+
+void ValueIteratorBase::decrement() {
+ --current_;
+}
+
+ValueIteratorBase::difference_type
+ValueIteratorBase::computeDistance(const SelfType& other) const {
+#ifdef JSON_USE_CPPTL_SMALLMAP
+ return other.current_ - current_;
+#else
+ // Iterator for null value are initialized using the default
+ // constructor, which initialize current_ to the default
+ // std::map::iterator. As begin() and end() are two instance
+ // of the default std::map::iterator, they can not be compared.
+ // To allow this, we handle this comparison specifically.
+ if (isNull_ && other.isNull_) {
+ return 0;
+ }
+
+ // Usage of std::distance is not portable (does not compile with Sun Studio 12
+ // RogueWave STL,
+ // which is the one used by default).
+ // Using a portable hand-made version for non random iterator instead:
+ // return difference_type( std::distance( current_, other.current_ ) );
+ difference_type myDistance = 0;
+ for (Value::ObjectValues::iterator it = current_; it != other.current_;
+ ++it) {
+ ++myDistance;
+ }
+ return myDistance;
+#endif
+}
+
+bool ValueIteratorBase::isEqual(const SelfType& other) const {
+ if (isNull_) {
+ return other.isNull_;
+ }
+ return current_ == other.current_;
+}
+
+void ValueIteratorBase::copy(const SelfType& other) {
+ current_ = other.current_;
+ isNull_ = other.isNull_;
+}
+
+Value ValueIteratorBase::key() const {
+ const Value::CZString czstring = (*current_).first;
+ if (czstring.data()) {
+ if (czstring.isStaticString())
+ return Value(StaticString(czstring.data()));
+ return Value(czstring.data(), czstring.data() + czstring.length());
+ }
+ return Value(czstring.index());
+}
+
+UInt ValueIteratorBase::index() const {
+ const Value::CZString czstring = (*current_).first;
+ if (!czstring.data())
+ return czstring.index();
+ return Value::UInt(-1);
+}
+
+JSONCPP_STRING ValueIteratorBase::name() const {
+ char const* keey;
+ char const* end;
+ keey = memberName(&end);
+ if (!keey) return JSONCPP_STRING();
+ return JSONCPP_STRING(keey, end);
+}
+
+char const* ValueIteratorBase::memberName() const {
+ const char* cname = (*current_).first.data();
+ return cname ? cname : "";
+}
+
+char const* ValueIteratorBase::memberName(char const** end) const {
+ const char* cname = (*current_).first.data();
+ if (!cname) {
+ *end = NULL;
+ return NULL;
+ }
+ *end = cname + (*current_).first.length();
+ return cname;
+}
+
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// class ValueConstIterator
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+
+ValueConstIterator::ValueConstIterator() {}
+
+ValueConstIterator::ValueConstIterator(
+ const Value::ObjectValues::iterator& current)
+ : ValueIteratorBase(current) {}
+
+ValueConstIterator::ValueConstIterator(ValueIterator const& other)
+ : ValueIteratorBase(other) {}
+
+ValueConstIterator& ValueConstIterator::
+operator=(const ValueIteratorBase& other) {
+ copy(other);
+ return *this;
+}
+
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// class ValueIterator
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+
+ValueIterator::ValueIterator() {}
+
+ValueIterator::ValueIterator(const Value::ObjectValues::iterator& current)
+ : ValueIteratorBase(current) {}
+
+ValueIterator::ValueIterator(const ValueConstIterator& other)
+ : ValueIteratorBase(other) {
+ throwRuntimeError("ConstIterator to Iterator should never be allowed.");
+}
+
+ValueIterator::ValueIterator(const ValueIterator& other)
+ : ValueIteratorBase(other) {}
+
+ValueIterator& ValueIterator::operator=(const SelfType& other) {
+ copy(other);
+ return *this;
+}
+
+} // namespace Json
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: src/lib_json/json_valueiterator.inl
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: src/lib_json/json_value.cpp
+// //////////////////////////////////////////////////////////////////////
+
+// Copyright 2011 Baptiste Lepilleur
+// Distributed under MIT license, or public domain if desired and
+// recognized in your jurisdiction.
+// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+#if !defined(JSON_IS_AMALGAMATION)
+#include <json/assertions.h>
+#include <json/value.h>
+#include <json/writer.h>
+#endif // if !defined(JSON_IS_AMALGAMATION)
+#include <math.h>
+#include <sstream>
+#include <utility>
+#include <cstring>
+#include <cassert>
+#ifdef JSON_USE_CPPTL
+#include <cpptl/conststring.h>
+#endif
+#include <cstddef> // size_t
+#include <algorithm> // min()
+
+#define JSON_ASSERT_UNREACHABLE assert(false)
+
+namespace Json {
+
+// This is a walkaround to avoid the static initialization of Value::null.
+// kNull must be word-aligned to avoid crashing on ARM. We use an alignment of
+// 8 (instead of 4) as a bit of future-proofing.
+#if defined(__ARMEL__)
+#define ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment)))
+#else
+#define ALIGNAS(byte_alignment)
+#endif
+static const unsigned char ALIGNAS(8) kNull[sizeof(Value)] = { 0 };
+const unsigned char& kNullRef = kNull[0];
+const Value& Value::null = reinterpret_cast<const Value&>(kNullRef);
+const Value& Value::nullRef = null;
+
+const Int Value::minInt = Int(~(UInt(-1) / 2));
+const Int Value::maxInt = Int(UInt(-1) / 2);
+const UInt Value::maxUInt = UInt(-1);
+#if defined(JSON_HAS_INT64)
+const Int64 Value::minInt64 = Int64(~(UInt64(-1) / 2));
+const Int64 Value::maxInt64 = Int64(UInt64(-1) / 2);
+const UInt64 Value::maxUInt64 = UInt64(-1);
+// The constant is hard-coded because some compiler have trouble
+// converting Value::maxUInt64 to a double correctly (AIX/xlC).
+// Assumes that UInt64 is a 64 bits integer.
+static const double maxUInt64AsDouble = 18446744073709551615.0;
+#endif // defined(JSON_HAS_INT64)
+const LargestInt Value::minLargestInt = LargestInt(~(LargestUInt(-1) / 2));
+const LargestInt Value::maxLargestInt = LargestInt(LargestUInt(-1) / 2);
+const LargestUInt Value::maxLargestUInt = LargestUInt(-1);
+
+#if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
+template <typename T, typename U>
+static inline bool InRange(double d, T min, U max) {
+ // The casts can lose precision, but we are looking only for
+ // an approximate range. Might fail on edge cases though. ~cdunn
+ //return d >= static_cast<double>(min) && d <= static_cast<double>(max);
+ return d >= min && d <= max;
+}
+#else // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
+static inline double integerToDouble(Json::UInt64 value) {
+ return static_cast<double>(Int64(value / 2)) * 2.0 + static_cast<double>(Int64(value & 1));
+}
+
+template <typename T> static inline double integerToDouble(T value) {
+ return static_cast<double>(value);
+}
+
+template <typename T, typename U>
+static inline bool InRange(double d, T min, U max) {
+ return d >= integerToDouble(min) && d <= integerToDouble(max);
+}
+#endif // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
+
+/** Duplicates the specified string value.
+ * @param value Pointer to the string to duplicate. Must be zero-terminated if
+ * length is "unknown".
+ * @param length Length of the value. if equals to unknown, then it will be
+ * computed using strlen(value).
+ * @return Pointer on the duplicate instance of string.
+ */
+static inline char* duplicateStringValue(const char* value,
+ size_t length)
+{
+ // Avoid an integer overflow in the call to malloc below by limiting length
+ // to a sane value.
+ if (length >= static_cast<size_t>(Value::maxInt))
+ length = Value::maxInt - 1;
+
+ char* newString = static_cast<char*>(malloc(length + 1));
+ if (newString == NULL) {
+ throwRuntimeError(
+ "in Json::Value::duplicateStringValue(): "
+ "Failed to allocate string value buffer");
+ }
+ memcpy(newString, value, length);
+ newString[length] = 0;
+ return newString;
+}
+
+/* Record the length as a prefix.
+ */
+static inline char* duplicateAndPrefixStringValue(
+ const char* value,
+ unsigned int length)
+{
+ // Avoid an integer overflow in the call to malloc below by limiting length
+ // to a sane value.
+ JSON_ASSERT_MESSAGE(length <= static_cast<unsigned>(Value::maxInt) - sizeof(unsigned) - 1U,
+ "in Json::Value::duplicateAndPrefixStringValue(): "
+ "length too big for prefixing");
+ unsigned actualLength = length + static_cast<unsigned>(sizeof(unsigned)) + 1U;
+ char* newString = static_cast<char*>(malloc(actualLength));
+ if (newString == 0) {
+ throwRuntimeError(
+ "in Json::Value::duplicateAndPrefixStringValue(): "
+ "Failed to allocate string value buffer");
+ }
+ *reinterpret_cast<unsigned*>(newString) = length;
+ memcpy(newString + sizeof(unsigned), value, length);
+ newString[actualLength - 1U] = 0; // to avoid buffer over-run accidents by users later
+ return newString;
+}
+inline static void decodePrefixedString(
+ bool isPrefixed, char const* prefixed,
+ unsigned* length, char const** value)
+{
+ if (!isPrefixed) {
+ *length = static_cast<unsigned>(strlen(prefixed));
+ *value = prefixed;
+ } else {
+ *length = *reinterpret_cast<unsigned const*>(prefixed);
+ *value = prefixed + sizeof(unsigned);
+ }
+}
+/** Free the string duplicated by duplicateStringValue()/duplicateAndPrefixStringValue().
+ */
+#if JSONCPP_USING_SECURE_MEMORY
+static inline void releasePrefixedStringValue(char* value) {
+ unsigned length = 0;
+ char const* valueDecoded;
+ decodePrefixedString(true, value, &length, &valueDecoded);
+ size_t const size = sizeof(unsigned) + length + 1U;
+ memset(value, 0, size);
+ free(value);
+}
+static inline void releaseStringValue(char* value, unsigned length) {
+ // length==0 => we allocated the strings memory
+ size_t size = (length==0) ? strlen(value) : length;
+ memset(value, 0, size);
+ free(value);
+}
+#else // !JSONCPP_USING_SECURE_MEMORY
+static inline void releasePrefixedStringValue(char* value) {
+ free(value);
+}
+static inline void releaseStringValue(char* value, unsigned) {
+ free(value);
+}
+#endif // JSONCPP_USING_SECURE_MEMORY
+
+} // namespace Json
+
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// ValueInternals...
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+#if !defined(JSON_IS_AMALGAMATION)
+
+#include "json_valueiterator.inl"
+#endif // if !defined(JSON_IS_AMALGAMATION)
+
+namespace Json {
+
+Exception::Exception(JSONCPP_STRING const& msg)
+ : msg_(msg)
+{}
+Exception::~Exception() throw()
+{}
+char const* Exception::what() const throw()
+{
+ return msg_.c_str();
+}
+RuntimeError::RuntimeError(JSONCPP_STRING const& msg)
+ : Exception(msg)
+{}
+LogicError::LogicError(JSONCPP_STRING const& msg)
+ : Exception(msg)
+{}
+JSONCPP_NORETURN void throwRuntimeError(JSONCPP_STRING const& msg)
+{
+ throw RuntimeError(msg);
+}
+JSONCPP_NORETURN void throwLogicError(JSONCPP_STRING const& msg)
+{
+ throw LogicError(msg);
+}
+
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// class Value::CommentInfo
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+
+Value::CommentInfo::CommentInfo() : comment_(0)
+{}
+
+Value::CommentInfo::~CommentInfo() {
+ if (comment_)
+ releaseStringValue(comment_, 0u);
+}
+
+void Value::CommentInfo::setComment(const char* text, size_t len) {
+ if (comment_) {
+ releaseStringValue(comment_, 0u);
+ comment_ = 0;
+ }
+ JSON_ASSERT(text != 0);
+ JSON_ASSERT_MESSAGE(
+ text[0] == '\0' || text[0] == '/',
+ "in Json::Value::setComment(): Comments must start with /");
+ // It seems that /**/ style comments are acceptable as well.
+ comment_ = duplicateStringValue(text, len);
+}
+
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// class Value::CZString
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+
+// Notes: policy_ indicates if the string was allocated when
+// a string is stored.
+
+Value::CZString::CZString(ArrayIndex aindex) : cstr_(0), index_(aindex) {}
+
+Value::CZString::CZString(char const* str, unsigned ulength, DuplicationPolicy allocate)
+ : cstr_(str) {
+ // allocate != duplicate
+ storage_.policy_ = allocate & 0x3;
+ storage_.length_ = ulength & 0x3FFFFFFF;
+}
+
+Value::CZString::CZString(const CZString& other) {
+ cstr_ = (other.storage_.policy_ != noDuplication && other.cstr_ != 0
+ ? duplicateStringValue(other.cstr_, other.storage_.length_)
+ : other.cstr_);
+ storage_.policy_ = static_cast<unsigned>(other.cstr_
+ ? (static_cast<DuplicationPolicy>(other.storage_.policy_) == noDuplication
+ ? noDuplication : duplicate)
+ : static_cast<DuplicationPolicy>(other.storage_.policy_)) & 3U;
+ storage_.length_ = other.storage_.length_;
+}
+
+#if JSON_HAS_RVALUE_REFERENCES
+Value::CZString::CZString(CZString&& other)
+ : cstr_(other.cstr_), index_(other.index_) {
+ other.cstr_ = nullptr;
+}
+#endif
+
+Value::CZString::~CZString() {
+ if (cstr_ && storage_.policy_ == duplicate) {
+ releaseStringValue(const_cast<char*>(cstr_), storage_.length_ + 1u); //+1 for null terminating character for sake of completeness but not actually necessary
+ }
+}
+
+void Value::CZString::swap(CZString& other) {
+ std::swap(cstr_, other.cstr_);
+ std::swap(index_, other.index_);
+}
+
+Value::CZString& Value::CZString::operator=(CZString other) {
+ swap(other);
+ return *this;
+}
+
+bool Value::CZString::operator<(const CZString& other) const {
+ if (!cstr_) return index_ < other.index_;
+ //return strcmp(cstr_, other.cstr_) < 0;
+ // Assume both are strings.
+ unsigned this_len = this->storage_.length_;
+ unsigned other_len = other.storage_.length_;
+ unsigned min_len = std::min(this_len, other_len);
+ JSON_ASSERT(this->cstr_ && other.cstr_);
+ int comp = memcmp(this->cstr_, other.cstr_, min_len);
+ if (comp < 0) return true;
+ if (comp > 0) return false;
+ return (this_len < other_len);
+}
+
+bool Value::CZString::operator==(const CZString& other) const {
+ if (!cstr_) return index_ == other.index_;
+ //return strcmp(cstr_, other.cstr_) == 0;
+ // Assume both are strings.
+ unsigned this_len = this->storage_.length_;
+ unsigned other_len = other.storage_.length_;
+ if (this_len != other_len) return false;
+ JSON_ASSERT(this->cstr_ && other.cstr_);
+ int comp = memcmp(this->cstr_, other.cstr_, this_len);
+ return comp == 0;
+}
+
+ArrayIndex Value::CZString::index() const { return index_; }
+
+//const char* Value::CZString::c_str() const { return cstr_; }
+const char* Value::CZString::data() const { return cstr_; }
+unsigned Value::CZString::length() const { return storage_.length_; }
+bool Value::CZString::isStaticString() const { return storage_.policy_ == noDuplication; }
+
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// class Value::Value
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+// //////////////////////////////////////////////////////////////////
+
+/*! \internal Default constructor initialization must be equivalent to:
+ * memset( this, 0, sizeof(Value) )
+ * This optimization is used in ValueInternalMap fast allocator.
+ */
+Value::Value(ValueType vtype) {
+ initBasic(vtype);
+ switch (vtype) {
+ case nullValue:
+ break;
+ case intValue:
+ case uintValue:
+ value_.int_ = 0;
+ break;
+ case realValue:
+ value_.real_ = 0.0;
+ break;
+ case stringValue:
+ value_.string_ = 0;
+ break;
+ case arrayValue:
+ case objectValue:
+ value_.map_ = new ObjectValues();
+ break;
+ case booleanValue:
+ value_.bool_ = false;
+ break;
+ default:
+ JSON_ASSERT_UNREACHABLE;
+ }
+}
+
+Value::Value(Int value) {
+ initBasic(intValue);
+ value_.int_ = value;
+}
+
+Value::Value(UInt value) {
+ initBasic(uintValue);
+ value_.uint_ = value;
+}
+#if defined(JSON_HAS_INT64)
+Value::Value(Int64 value) {
+ initBasic(intValue);
+ value_.int_ = value;
+}
+Value::Value(UInt64 value) {
+ initBasic(uintValue);
+ value_.uint_ = value;
+}
+#endif // defined(JSON_HAS_INT64)
+
+Value::Value(double value) {
+ initBasic(realValue);
+ value_.real_ = value;
+}
+
+Value::Value(const char* value) {
+ initBasic(stringValue, true);
+ value_.string_ = duplicateAndPrefixStringValue(value, static_cast<unsigned>(strlen(value)));
+}
+
+Value::Value(const char* beginValue, const char* endValue) {
+ initBasic(stringValue, true);
+ value_.string_ =
+ duplicateAndPrefixStringValue(beginValue, static_cast<unsigned>(endValue - beginValue));
+}
+
+Value::Value(const JSONCPP_STRING& value) {
+ initBasic(stringValue, true);
+ value_.string_ =
+ duplicateAndPrefixStringValue(value.data(), static_cast<unsigned>(value.length()));
+}
+
+Value::Value(const StaticString& value) {
+ initBasic(stringValue);
+ value_.string_ = const_cast<char*>(value.c_str());
+}
+
+#ifdef JSON_USE_CPPTL
+Value::Value(const CppTL::ConstString& value) {
+ initBasic(stringValue, true);
+ value_.string_ = duplicateAndPrefixStringValue(value, static_cast<unsigned>(value.length()));
+}
+#endif
+
+Value::Value(bool value) {
+ initBasic(booleanValue);
+ value_.bool_ = value;
+}
+
+Value::Value(Value const& other)
+ : type_(other.type_), allocated_(false)
+ ,
+ comments_(0), start_(other.start_), limit_(other.limit_)
+{
+ switch (type_) {
+ case nullValue:
+ case intValue:
+ case uintValue:
+ case realValue:
+ case booleanValue:
+ value_ = other.value_;
+ break;
+ case stringValue:
+ if (other.value_.string_ && other.allocated_) {
+ unsigned len;
+ char const* str;
+ decodePrefixedString(other.allocated_, other.value_.string_,
+ &len, &str);
+ value_.string_ = duplicateAndPrefixStringValue(str, len);
+ allocated_ = true;
+ } else {
+ value_.string_ = other.value_.string_;
+ allocated_ = false;
+ }
+ break;
+ case arrayValue:
+ case objectValue:
+ value_.map_ = new ObjectValues(*other.value_.map_);
+ break;
+ default:
+ JSON_ASSERT_UNREACHABLE;
+ }
+ if (other.comments_) {
+ comments_ = new CommentInfo[numberOfCommentPlacement];
+ for (int comment = 0; comment < numberOfCommentPlacement; ++comment) {
+ const CommentInfo& otherComment = other.comments_[comment];
+ if (otherComment.comment_)
+ comments_[comment].setComment(
+ otherComment.comment_, strlen(otherComment.comment_));
+ }
+ }
+}
+
+#if JSON_HAS_RVALUE_REFERENCES
+// Move constructor
+Value::Value(Value&& other) {
+ initBasic(nullValue);
+ swap(other);
+}
+#endif
+
+Value::~Value() {
+ switch (type_) {
+ case nullValue:
+ case intValue:
+ case uintValue:
+ case realValue:
+ case booleanValue:
+ break;
+ case stringValue:
+ if (allocated_)
+ releasePrefixedStringValue(value_.string_);
+ break;
+ case arrayValue:
+ case objectValue:
+ delete value_.map_;
+ break;
+ default:
+ JSON_ASSERT_UNREACHABLE;
+ }
+
+ if (comments_)
+ delete[] comments_;
+
+ value_.uint_ = 0;
+}
+
+Value& Value::operator=(Value other) {
+ swap(other);
+ return *this;
+}
+
+void Value::swapPayload(Value& other) {
+ ValueType temp = type_;
+ type_ = other.type_;
+ other.type_ = temp;
+ std::swap(value_, other.value_);
+ int temp2 = allocated_;
+ allocated_ = other.allocated_;
+ other.allocated_ = temp2 & 0x1;
+}
+
+void Value::swap(Value& other) {
+ swapPayload(other);
+ std::swap(comments_, other.comments_);
+ std::swap(start_, other.start_);
+ std::swap(limit_, other.limit_);
+}
+
+ValueType Value::type() const { return type_; }
+
+int Value::compare(const Value& other) const {
+ if (*this < other)
+ return -1;
+ if (*this > other)
+ return 1;
+ return 0;
+}
+
+bool Value::operator<(const Value& other) const {
+ int typeDelta = type_ - other.type_;
+ if (typeDelta)
+ return typeDelta < 0 ? true : false;
+ switch (type_) {
+ case nullValue:
+ return false;
+ case intValue:
+ return value_.int_ < other.value_.int_;
+ case uintValue:
+ return value_.uint_ < other.value_.uint_;
+ case realValue:
+ return value_.real_ < other.value_.real_;
+ case booleanValue:
+ return value_.bool_ < other.value_.bool_;
+ case stringValue:
+ {
+ if ((value_.string_ == 0) || (other.value_.string_ == 0)) {
+ if (other.value_.string_) return true;
+ else return false;
+ }
+ unsigned this_len;
+ unsigned other_len;
+ char const* this_str;
+ char const* other_str;
+ decodePrefixedString(this->allocated_, this->value_.string_, &this_len, &this_str);
+ decodePrefixedString(other.allocated_, other.value_.string_, &other_len, &other_str);
+ unsigned min_len = std::min(this_len, other_len);
+ JSON_ASSERT(this_str && other_str);
+ int comp = memcmp(this_str, other_str, min_len);
+ if (comp < 0) return true;
+ if (comp > 0) return false;
+ return (this_len < other_len);
+ }
+ case arrayValue:
+ case objectValue: {
+ int delta = int(value_.map_->size() - other.value_.map_->size());
+ if (delta)
+ return delta < 0;
+ return (*value_.map_) < (*other.value_.map_);
+ }
+ default:
+ JSON_ASSERT_UNREACHABLE;
+ }
+ return false; // unreachable
+}
+
+bool Value::operator<=(const Value& other) const { return !(other < *this); }
+
+bool Value::operator>=(const Value& other) const { return !(*this < other); }
+
+bool Value::operator>(const Value& other) const { return other < *this; }
+
+bool Value::operator==(const Value& other) const {
+ // if ( type_ != other.type_ )
+ // GCC 2.95.3 says:
+ // attempt to take address of bit-field structure member `Json::Value::type_'
+ // Beats me, but a temp solves the problem.
+ int temp = other.type_;
+ if (type_ != temp)
+ return false;
+ switch (type_) {
+ case nullValue:
+ return true;
+ case intValue:
+ return value_.int_ == other.value_.int_;
+ case uintValue:
+ return value_.uint_ == other.value_.uint_;
+ case realValue:
+ return value_.real_ == other.value_.real_;
+ case booleanValue:
+ return value_.bool_ == other.value_.bool_;
+ case stringValue:
+ {
+ if ((value_.string_ == 0) || (other.value_.string_ == 0)) {
+ return (value_.string_ == other.value_.string_);
+ }
+ unsigned this_len;
+ unsigned other_len;
+ char const* this_str;
+ char const* other_str;
+ decodePrefixedString(this->allocated_, this->value_.string_, &this_len, &this_str);
+ decodePrefixedString(other.allocated_, other.value_.string_, &other_len, &other_str);
+ if (this_len != other_len) return false;
+ JSON_ASSERT(this_str && other_str);
+ int comp = memcmp(this_str, other_str, this_len);
+ return comp == 0;
+ }
+ case arrayValue:
+ case objectValue:
+ return value_.map_->size() == other.value_.map_->size() &&
+ (*value_.map_) == (*other.value_.map_);
+ default:
+ JSON_ASSERT_UNREACHABLE;
+ }
+ return false; // unreachable
+}
+
+bool Value::operator!=(const Value& other) const { return !(*this == other); }
+
+const char* Value::asCString() const {
+ JSON_ASSERT_MESSAGE(type_ == stringValue,
+ "in Json::Value::asCString(): requires stringValue");
+ if (value_.string_ == 0) return 0;
+ unsigned this_len;
+ char const* this_str;
+ decodePrefixedString(this->allocated_, this->value_.string_, &this_len, &this_str);
+ return this_str;
+}
+
+#if JSONCPP_USING_SECURE_MEMORY
+unsigned Value::getCStringLength() const {
+ JSON_ASSERT_MESSAGE(type_ == stringValue,
+ "in Json::Value::asCString(): requires stringValue");
+ if (value_.string_ == 0) return 0;
+ unsigned this_len;
+ char const* this_str;
+ decodePrefixedString(this->allocated_, this->value_.string_, &this_len, &this_str);
+ return this_len;
+}
+#endif
+
+bool Value::getString(char const** str, char const** cend) const {
+ if (type_ != stringValue) return false;
+ if (value_.string_ == 0) return false;
+ unsigned length;
+ decodePrefixedString(this->allocated_, this->value_.string_, &length, str);
+ *cend = *str + length;
+ return true;
+}
+
+JSONCPP_STRING Value::asString() const {
+ switch (type_) {
+ case nullValue:
+ return "";
+ case stringValue:
+ {
+ if (value_.string_ == 0) return "";
+ unsigned this_len;
+ char const* this_str;
+ decodePrefixedString(this->allocated_, this->value_.string_, &this_len, &this_str);
+ return JSONCPP_STRING(this_str, this_len);
+ }
+ case booleanValue:
+ return value_.bool_ ? "true" : "false";
+ case intValue:
+ return valueToString(value_.int_);
+ case uintValue:
+ return valueToString(value_.uint_);
+ case realValue:
+ return valueToString(value_.real_);
+ default:
+ JSON_FAIL_MESSAGE("Type is not convertible to string");
+ }
+}
+
+#ifdef JSON_USE_CPPTL
+CppTL::ConstString Value::asConstString() const {
+ unsigned len;
+ char const* str;
+ decodePrefixedString(allocated_, value_.string_,
+ &len, &str);
+ return CppTL::ConstString(str, len);
+}
+#endif
+
+Value::Int Value::asInt() const {
+ switch (type_) {
+ case intValue:
+ JSON_ASSERT_MESSAGE(isInt(), "LargestInt out of Int range");
+ return Int(value_.int_);
+ case uintValue:
+ JSON_ASSERT_MESSAGE(isInt(), "LargestUInt out of Int range");
+ return Int(value_.uint_);
+ case realValue:
+ JSON_ASSERT_MESSAGE(InRange(value_.real_, minInt, maxInt),
+ "double out of Int range");
+ return Int(value_.real_);
+ case nullValue:
+ return 0;
+ case booleanValue:
+ return value_.bool_ ? 1 : 0;
+ default:
+ break;
+ }
+ JSON_FAIL_MESSAGE("Value is not convertible to Int.");
+}
+
+Value::UInt Value::asUInt() const {
+ switch (type_) {
+ case intValue:
+ JSON_ASSERT_MESSAGE(isUInt(), "LargestInt out of UInt range");
+ return UInt(value_.int_);
+ case uintValue:
+ JSON_ASSERT_MESSAGE(isUInt(), "LargestUInt out of UInt range");
+ return UInt(value_.uint_);
+ case realValue:
+ JSON_ASSERT_MESSAGE(InRange(value_.real_, 0, maxUInt),
+ "double out of UInt range");
+ return UInt(value_.real_);
+ case nullValue:
+ return 0;
+ case booleanValue:
+ return value_.bool_ ? 1 : 0;
+ default:
+ break;
+ }
+ JSON_FAIL_MESSAGE("Value is not convertible to UInt.");
+}
+
+#if defined(JSON_HAS_INT64)
+
+Value::Int64 Value::asInt64() const {
+ switch (type_) {
+ case intValue:
+ return Int64(value_.int_);
+ case uintValue:
+ JSON_ASSERT_MESSAGE(isInt64(), "LargestUInt out of Int64 range");
+ return Int64(value_.uint_);
+ case realValue:
+ JSON_ASSERT_MESSAGE(InRange(value_.real_, minInt64, maxInt64),
+ "double out of Int64 range");
+ return Int64(value_.real_);
+ case nullValue:
+ return 0;
+ case booleanValue:
+ return value_.bool_ ? 1 : 0;
+ default:
+ break;
+ }
+ JSON_FAIL_MESSAGE("Value is not convertible to Int64.");
+}
+
+Value::UInt64 Value::asUInt64() const {
+ switch (type_) {
+ case intValue:
+ JSON_ASSERT_MESSAGE(isUInt64(), "LargestInt out of UInt64 range");
+ return UInt64(value_.int_);
+ case uintValue:
+ return UInt64(value_.uint_);
+ case realValue:
+ JSON_ASSERT_MESSAGE(InRange(value_.real_, 0, maxUInt64),
+ "double out of UInt64 range");
+ return UInt64(value_.real_);
+ case nullValue:
+ return 0;
+ case booleanValue:
+ return value_.bool_ ? 1 : 0;
+ default:
+ break;
+ }
+ JSON_FAIL_MESSAGE("Value is not convertible to UInt64.");
+}
+#endif // if defined(JSON_HAS_INT64)
+
+LargestInt Value::asLargestInt() const {
+#if defined(JSON_NO_INT64)
+ return asInt();
+#else
+ return asInt64();
+#endif
+}
+
+LargestUInt Value::asLargestUInt() const {
+#if defined(JSON_NO_INT64)
+ return asUInt();
+#else
+ return asUInt64();
+#endif
+}
+
+double Value::asDouble() const {
+ switch (type_) {
+ case intValue:
+ return static_cast<double>(value_.int_);
+ case uintValue:
+#if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
+ return static_cast<double>(value_.uint_);
+#else // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
+ return integerToDouble(value_.uint_);
+#endif // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
+ case realValue:
+ return value_.real_;
+ case nullValue:
+ return 0.0;
+ case booleanValue:
+ return value_.bool_ ? 1.0 : 0.0;
+ default:
+ break;
+ }
+ JSON_FAIL_MESSAGE("Value is not convertible to double.");
+}
+
+float Value::asFloat() const {
+ switch (type_) {
+ case intValue:
+ return static_cast<float>(value_.int_);
+ case uintValue:
+#if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
+ return static_cast<float>(value_.uint_);
+#else // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
+ // This can fail (silently?) if the value is bigger than MAX_FLOAT.
+ return static_cast<float>(integerToDouble(value_.uint_));
+#endif // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
+ case realValue:
+ return static_cast<float>(value_.real_);
+ case nullValue:
+ return 0.0;
+ case booleanValue:
+ return value_.bool_ ? 1.0f : 0.0f;
+ default:
+ break;
+ }
+ JSON_FAIL_MESSAGE("Value is not convertible to float.");
+}
+
+bool Value::asBool() const {
+ switch (type_) {
+ case booleanValue:
+ return value_.bool_;
+ case nullValue:
+ return false;
+ case intValue:
+ return value_.int_ ? true : false;
+ case uintValue:
+ return value_.uint_ ? true : false;
+ case realValue:
+ // This is kind of strange. Not recommended.
+ return (value_.real_ != 0.0) ? true : false;
+ default:
+ break;
+ }
+ JSON_FAIL_MESSAGE("Value is not convertible to bool.");
+}
+
+bool Value::isConvertibleTo(ValueType other) const {
+ switch (other) {
+ case nullValue:
+ return (isNumeric() && asDouble() == 0.0) ||
+ (type_ == booleanValue && value_.bool_ == false) ||
+ (type_ == stringValue && asString() == "") ||
+ (type_ == arrayValue && value_.map_->size() == 0) ||
+ (type_ == objectValue && value_.map_->size() == 0) ||
+ type_ == nullValue;
+ case intValue:
+ return isInt() ||
+ (type_ == realValue && InRange(value_.real_, minInt, maxInt)) ||
+ type_ == booleanValue || type_ == nullValue;
+ case uintValue:
+ return isUInt() ||
+ (type_ == realValue && InRange(value_.real_, 0, maxUInt)) ||
+ type_ == booleanValue || type_ == nullValue;
+ case realValue:
+ return isNumeric() || type_ == booleanValue || type_ == nullValue;
+ case booleanValue:
+ return isNumeric() || type_ == booleanValue || type_ == nullValue;
+ case stringValue:
+ return isNumeric() || type_ == booleanValue || type_ == stringValue ||
+ type_ == nullValue;
+ case arrayValue:
+ return type_ == arrayValue || type_ == nullValue;
+ case objectValue:
+ return type_ == objectValue || type_ == nullValue;
+ }
+ JSON_ASSERT_UNREACHABLE;
+ return false;
+}
+
+/// Number of values in array or object
+ArrayIndex Value::size() const {
+ switch (type_) {
+ case nullValue:
+ case intValue:
+ case uintValue:
+ case realValue:
+ case booleanValue:
+ case stringValue:
+ return 0;
+ case arrayValue: // size of the array is highest index + 1
+ if (!value_.map_->empty()) {
+ ObjectValues::const_iterator itLast = value_.map_->end();
+ --itLast;
+ return (*itLast).first.index() + 1;
+ }
+ return 0;
+ case objectValue:
+ return ArrayIndex(value_.map_->size());
+ }
+ JSON_ASSERT_UNREACHABLE;
+ return 0; // unreachable;
+}
+
+bool Value::empty() const {
+ if (isNull() || isArray() || isObject())
+ return size() == 0u;
+ else
+ return false;
+}
+
+bool Value::operator!() const { return isNull(); }
+
+void Value::clear() {
+ JSON_ASSERT_MESSAGE(type_ == nullValue || type_ == arrayValue ||
+ type_ == objectValue,
+ "in Json::Value::clear(): requires complex value");
+ start_ = 0;
+ limit_ = 0;
+ switch (type_) {
+ case arrayValue:
+ case objectValue:
+ value_.map_->clear();
+ break;
+ default:
+ break;
+ }
+}
+
+void Value::resize(ArrayIndex newSize) {
+ JSON_ASSERT_MESSAGE(type_ == nullValue || type_ == arrayValue,
+ "in Json::Value::resize(): requires arrayValue");
+ if (type_ == nullValue)
+ *this = Value(arrayValue);
+ ArrayIndex oldSize = size();
+ if (newSize == 0)
+ clear();
+ else if (newSize > oldSize)
+ (*this)[newSize - 1];
+ else {
+ for (ArrayIndex index = newSize; index < oldSize; ++index) {
+ value_.map_->erase(index);
+ }
+ JSON_ASSERT(size() == newSize);
+ }
+}
+
+Value& Value::operator[](ArrayIndex index) {
+ JSON_ASSERT_MESSAGE(
+ type_ == nullValue || type_ == arrayValue,
+ "in Json::Value::operator[](ArrayIndex): requires arrayValue");
+ if (type_ == nullValue)
+ *this = Value(arrayValue);
+ CZString key(index);
+ ObjectValues::iterator it = value_.map_->lower_bound(key);
+ if (it != value_.map_->end() && (*it).first == key)
+ return (*it).second;
+
+ ObjectValues::value_type defaultValue(key, nullRef);
+ it = value_.map_->insert(it, defaultValue);
+ return (*it).second;
+}
+
+Value& Value::operator[](int index) {
+ JSON_ASSERT_MESSAGE(
+ index >= 0,
+ "in Json::Value::operator[](int index): index cannot be negative");
+ return (*this)[ArrayIndex(index)];
+}
+
+const Value& Value::operator[](ArrayIndex index) const {
+ JSON_ASSERT_MESSAGE(
+ type_ == nullValue || type_ == arrayValue,
+ "in Json::Value::operator[](ArrayIndex)const: requires arrayValue");
+ if (type_ == nullValue)
+ return nullRef;
+ CZString key(index);
+ ObjectValues::const_iterator it = value_.map_->find(key);
+ if (it == value_.map_->end())
+ return nullRef;
+ return (*it).second;
+}
+
+const Value& Value::operator[](int index) const {
+ JSON_ASSERT_MESSAGE(
+ index >= 0,
+ "in Json::Value::operator[](int index) const: index cannot be negative");
+ return (*this)[ArrayIndex(index)];
+}
+
+void Value::initBasic(ValueType vtype, bool allocated) {
+ type_ = vtype;
+ allocated_ = allocated;
+ comments_ = 0;
+ start_ = 0;
+ limit_ = 0;
+}
+
+// Access an object value by name, create a null member if it does not exist.
+// @pre Type of '*this' is object or null.
+// @param key is null-terminated.
+Value& Value::resolveReference(const char* key) {
+ JSON_ASSERT_MESSAGE(
+ type_ == nullValue || type_ == objectValue,
+ "in Json::Value::resolveReference(): requires objectValue");
+ if (type_ == nullValue)
+ *this = Value(objectValue);
+ CZString actualKey(
+ key, static_cast<unsigned>(strlen(key)), CZString::noDuplication); // NOTE!
+ ObjectValues::iterator it = value_.map_->lower_bound(actualKey);
+ if (it != value_.map_->end() && (*it).first == actualKey)
+ return (*it).second;
+
+ ObjectValues::value_type defaultValue(actualKey, nullRef);
+ it = value_.map_->insert(it, defaultValue);
+ Value& value = (*it).second;
+ return value;
+}
+
+// @param key is not null-terminated.
+Value& Value::resolveReference(char const* key, char const* cend)
+{
+ JSON_ASSERT_MESSAGE(
+ type_ == nullValue || type_ == objectValue,
+ "in Json::Value::resolveReference(key, end): requires objectValue");
+ if (type_ == nullValue)
+ *this = Value(objectValue);
+ CZString actualKey(
+ key, static_cast<unsigned>(cend-key), CZString::duplicateOnCopy);
+ ObjectValues::iterator it = value_.map_->lower_bound(actualKey);
+ if (it != value_.map_->end() && (*it).first == actualKey)
+ return (*it).second;
+
+ ObjectValues::value_type defaultValue(actualKey, nullRef);
+ it = value_.map_->insert(it, defaultValue);
+ Value& value = (*it).second;
+ return value;
+}
+
+Value Value::get(ArrayIndex index, const Value& defaultValue) const {
+ const Value* value = &((*this)[index]);
+ return value == &nullRef ? defaultValue : *value;
+}
+
+bool Value::isValidIndex(ArrayIndex index) const { return index < size(); }
+
+Value const* Value::find(char const* key, char const* cend) const
+{
+ JSON_ASSERT_MESSAGE(
+ type_ == nullValue || type_ == objectValue,
+ "in Json::Value::find(key, end, found): requires objectValue or nullValue");
+ if (type_ == nullValue) return NULL;
+ CZString actualKey(key, static_cast<unsigned>(cend-key), CZString::noDuplication);
+ ObjectValues::const_iterator it = value_.map_->find(actualKey);
+ if (it == value_.map_->end()) return NULL;
+ return &(*it).second;
+}
+const Value& Value::operator[](const char* key) const
+{
+ Value const* found = find(key, key + strlen(key));
+ if (!found) return nullRef;
+ return *found;
+}
+Value const& Value::operator[](JSONCPP_STRING const& key) const
+{
+ Value const* found = find(key.data(), key.data() + key.length());
+ if (!found) return nullRef;
+ return *found;
+}
+
+Value& Value::operator[](const char* key) {
+ return resolveReference(key, key + strlen(key));
+}
+
+Value& Value::operator[](const JSONCPP_STRING& key) {
+ return resolveReference(key.data(), key.data() + key.length());
+}
+
+Value& Value::operator[](const StaticString& key) {
+ return resolveReference(key.c_str());
+}
+
+#ifdef JSON_USE_CPPTL
+Value& Value::operator[](const CppTL::ConstString& key) {
+ return resolveReference(key.c_str(), key.end_c_str());
+}
+Value const& Value::operator[](CppTL::ConstString const& key) const
+{
+ Value const* found = find(key.c_str(), key.end_c_str());
+ if (!found) return nullRef;
+ return *found;
+}
+#endif
+
+Value& Value::append(const Value& value) { return (*this)[size()] = value; }
+
+Value Value::get(char const* key, char const* cend, Value const& defaultValue) const
+{
+ Value const* found = find(key, cend);
+ return !found ? defaultValue : *found;
+}
+Value Value::get(char const* key, Value const& defaultValue) const
+{
+ return get(key, key + strlen(key), defaultValue);
+}
+Value Value::get(JSONCPP_STRING const& key, Value const& defaultValue) const
+{
+ return get(key.data(), key.data() + key.length(), defaultValue);
+}
+
+
+bool Value::removeMember(const char* key, const char* cend, Value* removed)
+{
+ if (type_ != objectValue) {
+ return false;
+ }
+ CZString actualKey(key, static_cast<unsigned>(cend-key), CZString::noDuplication);
+ ObjectValues::iterator it = value_.map_->find(actualKey);
+ if (it == value_.map_->end())
+ return false;
+ *removed = it->second;
+ value_.map_->erase(it);
+ return true;
+}
+bool Value::removeMember(const char* key, Value* removed)
+{
+ return removeMember(key, key + strlen(key), removed);
+}
+bool Value::removeMember(JSONCPP_STRING const& key, Value* removed)
+{
+ return removeMember(key.data(), key.data() + key.length(), removed);
+}
+Value Value::removeMember(const char* key)
+{
+ JSON_ASSERT_MESSAGE(type_ == nullValue || type_ == objectValue,
+ "in Json::Value::removeMember(): requires objectValue");
+ if (type_ == nullValue)
+ return nullRef;
+
+ Value removed; // null
+ removeMember(key, key + strlen(key), &removed);
+ return removed; // still null if removeMember() did nothing
+}
+Value Value::removeMember(const JSONCPP_STRING& key)
+{
+ return removeMember(key.c_str());
+}
+
+bool Value::removeIndex(ArrayIndex index, Value* removed) {
+ if (type_ != arrayValue) {
+ return false;
+ }
+ CZString key(index);
+ ObjectValues::iterator it = value_.map_->find(key);
+ if (it == value_.map_->end()) {
+ return false;
+ }
+ *removed = it->second;
+ ArrayIndex oldSize = size();
+ // shift left all items left, into the place of the "removed"
+ for (ArrayIndex i = index; i < (oldSize - 1); ++i){
+ CZString keey(i);
+ (*value_.map_)[keey] = (*this)[i + 1];
+ }
+ // erase the last one ("leftover")
+ CZString keyLast(oldSize - 1);
+ ObjectValues::iterator itLast = value_.map_->find(keyLast);
+ value_.map_->erase(itLast);
+ return true;
+}
+
+#ifdef JSON_USE_CPPTL
+Value Value::get(const CppTL::ConstString& key,
+ const Value& defaultValue) const {
+ return get(key.c_str(), key.end_c_str(), defaultValue);
+}
+#endif
+
+bool Value::isMember(char const* key, char const* cend) const
+{
+ Value const* value = find(key, cend);
+ return NULL != value;
+}
+bool Value::isMember(char const* key) const
+{
+ return isMember(key, key + strlen(key));
+}
+bool Value::isMember(JSONCPP_STRING const& key) const
+{
+ return isMember(key.data(), key.data() + key.length());
+}
+
+#ifdef JSON_USE_CPPTL
+bool Value::isMember(const CppTL::ConstString& key) const {
+ return isMember(key.c_str(), key.end_c_str());
+}
+#endif
+
+Value::Members Value::getMemberNames() const {
+ JSON_ASSERT_MESSAGE(
+ type_ == nullValue || type_ == objectValue,
+ "in Json::Value::getMemberNames(), value must be objectValue");
+ if (type_ == nullValue)
+ return Value::Members();
+ Members members;
+ members.reserve(value_.map_->size());
+ ObjectValues::const_iterator it = value_.map_->begin();
+ ObjectValues::const_iterator itEnd = value_.map_->end();
+ for (; it != itEnd; ++it) {
+ members.push_back(JSONCPP_STRING((*it).first.data(),
+ (*it).first.length()));
+ }
+ return members;
+}
+//
+//# ifdef JSON_USE_CPPTL
+// EnumMemberNames
+// Value::enumMemberNames() const
+//{
+// if ( type_ == objectValue )
+// {
+// return CppTL::Enum::any( CppTL::Enum::transform(
+// CppTL::Enum::keys( *(value_.map_), CppTL::Type<const CZString &>() ),
+// MemberNamesTransform() ) );
+// }
+// return EnumMemberNames();
+//}
+//
+//
+// EnumValues
+// Value::enumValues() const
+//{
+// if ( type_ == objectValue || type_ == arrayValue )
+// return CppTL::Enum::anyValues( *(value_.map_),
+// CppTL::Type<const Value &>() );
+// return EnumValues();
+//}
+//
+//# endif
+
+static bool IsIntegral(double d) {
+ double integral_part;
+ return modf(d, &integral_part) == 0.0;
+}
+
+bool Value::isNull() const { return type_ == nullValue; }
+
+bool Value::isBool() const { return type_ == booleanValue; }
+
+bool Value::isInt() const {
+ switch (type_) {
+ case intValue:
+ return value_.int_ >= minInt && value_.int_ <= maxInt;
+ case uintValue:
+ return value_.uint_ <= UInt(maxInt);
+ case realValue:
+ return value_.real_ >= minInt && value_.real_ <= maxInt &&
+ IsIntegral(value_.real_);
+ default:
+ break;
+ }
+ return false;
+}
+
+bool Value::isUInt() const {
+ switch (type_) {
+ case intValue:
+ return value_.int_ >= 0 && LargestUInt(value_.int_) <= LargestUInt(maxUInt);
+ case uintValue:
+ return value_.uint_ <= maxUInt;
+ case realValue:
+ return value_.real_ >= 0 && value_.real_ <= maxUInt &&
+ IsIntegral(value_.real_);
+ default:
+ break;
+ }
+ return false;
+}
+
+bool Value::isInt64() const {
+#if defined(JSON_HAS_INT64)
+ switch (type_) {
+ case intValue:
+ return true;
+ case uintValue:
+ return value_.uint_ <= UInt64(maxInt64);
+ case realValue:
+ // Note that maxInt64 (= 2^63 - 1) is not exactly representable as a
+ // double, so double(maxInt64) will be rounded up to 2^63. Therefore we
+ // require the value to be strictly less than the limit.
+ return value_.real_ >= double(minInt64) &&
+ value_.real_ < double(maxInt64) && IsIntegral(value_.real_);
+ default:
+ break;
+ }
+#endif // JSON_HAS_INT64
+ return false;
+}
+
+bool Value::isUInt64() const {
+#if defined(JSON_HAS_INT64)
+ switch (type_) {
+ case intValue:
+ return value_.int_ >= 0;
+ case uintValue:
+ return true;
+ case realValue:
+ // Note that maxUInt64 (= 2^64 - 1) is not exactly representable as a
+ // double, so double(maxUInt64) will be rounded up to 2^64. Therefore we
+ // require the value to be strictly less than the limit.
+ return value_.real_ >= 0 && value_.real_ < maxUInt64AsDouble &&
+ IsIntegral(value_.real_);
+ default:
+ break;
+ }
+#endif // JSON_HAS_INT64
+ return false;
+}
+
+bool Value::isIntegral() const {
+#if defined(JSON_HAS_INT64)
+ return isInt64() || isUInt64();
+#else
+ return isInt() || isUInt();
+#endif
+}
+
+bool Value::isDouble() const { return type_ == realValue || isIntegral(); }
+
+bool Value::isNumeric() const { return isIntegral() || isDouble(); }
+
+bool Value::isString() const { return type_ == stringValue; }
+
+bool Value::isArray() const { return type_ == arrayValue; }
+
+bool Value::isObject() const { return type_ == objectValue; }
+
+void Value::setComment(const char* comment, size_t len, CommentPlacement placement) {
+ if (!comments_)
+ comments_ = new CommentInfo[numberOfCommentPlacement];
+ if ((len > 0) && (comment[len-1] == '\n')) {
+ // Always discard trailing newline, to aid indentation.
+ len -= 1;
+ }
+ comments_[placement].setComment(comment, len);
+}
+
+void Value::setComment(const char* comment, CommentPlacement placement) {
+ setComment(comment, strlen(comment), placement);
+}
+
+void Value::setComment(const JSONCPP_STRING& comment, CommentPlacement placement) {
+ setComment(comment.c_str(), comment.length(), placement);
+}
+
+bool Value::hasComment(CommentPlacement placement) const {
+ return comments_ != 0 && comments_[placement].comment_ != 0;
+}
+
+JSONCPP_STRING Value::getComment(CommentPlacement placement) const {
+ if (hasComment(placement))
+ return comments_[placement].comment_;
+ return "";
+}
+
+void Value::setOffsetStart(ptrdiff_t start) { start_ = start; }
+
+void Value::setOffsetLimit(ptrdiff_t limit) { limit_ = limit; }
+
+ptrdiff_t Value::getOffsetStart() const { return start_; }
+
+ptrdiff_t Value::getOffsetLimit() const { return limit_; }
+
+JSONCPP_STRING Value::toStyledString() const {
+ StyledWriter writer;
+ return writer.write(*this);
+}
+
+Value::const_iterator Value::begin() const {
+ switch (type_) {
+ case arrayValue:
+ case objectValue:
+ if (value_.map_)
+ return const_iterator(value_.map_->begin());
+ break;
+ default:
+ break;
+ }
+ return const_iterator();
+}
+
+Value::const_iterator Value::end() const {
+ switch (type_) {
+ case arrayValue:
+ case objectValue:
+ if (value_.map_)
+ return const_iterator(value_.map_->end());
+ break;
+ default:
+ break;
+ }
+ return const_iterator();
+}
+
+Value::iterator Value::begin() {
+ switch (type_) {
+ case arrayValue:
+ case objectValue:
+ if (value_.map_)
+ return iterator(value_.map_->begin());
+ break;
+ default:
+ break;
+ }
+ return iterator();
+}
+
+Value::iterator Value::end() {
+ switch (type_) {
+ case arrayValue:
+ case objectValue:
+ if (value_.map_)
+ return iterator(value_.map_->end());
+ break;
+ default:
+ break;
+ }
+ return iterator();
+}
+
+// class PathArgument
+// //////////////////////////////////////////////////////////////////
+
+PathArgument::PathArgument() : key_(), index_(), kind_(kindNone) {}
+
+PathArgument::PathArgument(ArrayIndex index)
+ : key_(), index_(index), kind_(kindIndex) {}
+
+PathArgument::PathArgument(const char* key)
+ : key_(key), index_(), kind_(kindKey) {}
+
+PathArgument::PathArgument(const JSONCPP_STRING& key)
+ : key_(key.c_str()), index_(), kind_(kindKey) {}
+
+// class Path
+// //////////////////////////////////////////////////////////////////
+
+Path::Path(const JSONCPP_STRING& path,
+ const PathArgument& a1,
+ const PathArgument& a2,
+ const PathArgument& a3,
+ const PathArgument& a4,
+ const PathArgument& a5) {
+ InArgs in;
+ in.push_back(&a1);
+ in.push_back(&a2);
+ in.push_back(&a3);
+ in.push_back(&a4);
+ in.push_back(&a5);
+ makePath(path, in);
+}
+
+void Path::makePath(const JSONCPP_STRING& path, const InArgs& in) {
+ const char* current = path.c_str();
+ const char* end = current + path.length();
+ InArgs::const_iterator itInArg = in.begin();
+ while (current != end) {
+ if (*current == '[') {
+ ++current;
+ if (*current == '%')
+ addPathInArg(path, in, itInArg, PathArgument::kindIndex);
+ else {
+ ArrayIndex index = 0;
+ for (; current != end && *current >= '0' && *current <= '9'; ++current)
+ index = index * 10 + ArrayIndex(*current - '0');
+ args_.push_back(index);
+ }
+ if (current == end || *current++ != ']')
+ invalidPath(path, int(current - path.c_str()));
+ } else if (*current == '%') {
+ addPathInArg(path, in, itInArg, PathArgument::kindKey);
+ ++current;
+ } else if (*current == '.') {
+ ++current;
+ } else {
+ const char* beginName = current;
+ while (current != end && !strchr("[.", *current))
+ ++current;
+ args_.push_back(JSONCPP_STRING(beginName, current));
+ }
+ }
+}
+
+void Path::addPathInArg(const JSONCPP_STRING& /*path*/,
+ const InArgs& in,
+ InArgs::const_iterator& itInArg,
+ PathArgument::Kind kind) {
+ if (itInArg == in.end()) {
+ // Error: missing argument %d
+ } else if ((*itInArg)->kind_ != kind) {
+ // Error: bad argument type
+ } else {
+ args_.push_back(**itInArg);
+ }
+}
+
+void Path::invalidPath(const JSONCPP_STRING& /*path*/, int /*location*/) {
+ // Error: invalid path.
+}
+
+const Value& Path::resolve(const Value& root) const {
+ const Value* node = &root;
+ for (Args::const_iterator it = args_.begin(); it != args_.end(); ++it) {
+ const PathArgument& arg = *it;
+ if (arg.kind_ == PathArgument::kindIndex) {
+ if (!node->isArray() || !node->isValidIndex(arg.index_)) {
+ // Error: unable to resolve path (array value expected at position...
+ }
+ node = &((*node)[arg.index_]);
+ } else if (arg.kind_ == PathArgument::kindKey) {
+ if (!node->isObject()) {
+ // Error: unable to resolve path (object value expected at position...)
+ }
+ node = &((*node)[arg.key_]);
+ if (node == &Value::nullRef) {
+ // Error: unable to resolve path (object has no member named '' at
+ // position...)
+ }
+ }
+ }
+ return *node;
+}
+
+Value Path::resolve(const Value& root, const Value& defaultValue) const {
+ const Value* node = &root;
+ for (Args::const_iterator it = args_.begin(); it != args_.end(); ++it) {
+ const PathArgument& arg = *it;
+ if (arg.kind_ == PathArgument::kindIndex) {
+ if (!node->isArray() || !node->isValidIndex(arg.index_))
+ return defaultValue;
+ node = &((*node)[arg.index_]);
+ } else if (arg.kind_ == PathArgument::kindKey) {
+ if (!node->isObject())
+ return defaultValue;
+ node = &((*node)[arg.key_]);
+ if (node == &Value::nullRef)
+ return defaultValue;
+ }
+ }
+ return *node;
+}
+
+Value& Path::make(Value& root) const {
+ Value* node = &root;
+ for (Args::const_iterator it = args_.begin(); it != args_.end(); ++it) {
+ const PathArgument& arg = *it;
+ if (arg.kind_ == PathArgument::kindIndex) {
+ if (!node->isArray()) {
+ // Error: node is not an array at position ...
+ }
+ node = &((*node)[arg.index_]);
+ } else if (arg.kind_ == PathArgument::kindKey) {
+ if (!node->isObject()) {
+ // Error: node is not an object at position...
+ }
+ node = &((*node)[arg.key_]);
+ }
+ }
+ return *node;
+}
+
+} // namespace Json
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: src/lib_json/json_value.cpp
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+// //////////////////////////////////////////////////////////////////////
+// Beginning of content of file: src/lib_json/json_writer.cpp
+// //////////////////////////////////////////////////////////////////////
+
+// Copyright 2011 Baptiste Lepilleur
+// Distributed under MIT license, or public domain if desired and
+// recognized in your jurisdiction.
+// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+#if !defined(JSON_IS_AMALGAMATION)
+#include <json/writer.h>
+#include "json_tool.h"
+#endif // if !defined(JSON_IS_AMALGAMATION)
+#include <iomanip>
+#include <memory>
+#include <sstream>
+#include <utility>
+#include <set>
+#include <cassert>
+#include <cstring>
+#include <cstdio>
+
+#if defined(_MSC_VER) && _MSC_VER >= 1200 && _MSC_VER < 1800 // Between VC++ 6.0 and VC++ 11.0
+#include <float.h>
+#define isfinite _finite
+#elif defined(__sun) && defined(__SVR4) //Solaris
+#if !defined(isfinite)
+#include <ieeefp.h>
+#define isfinite finite
+#endif
+#elif defined(_AIX)
+#if !defined(isfinite)
+#include <math.h>
+#define isfinite finite
+#endif
+#elif defined(__hpux)
+#if !defined(isfinite)
+#if defined(__ia64) && !defined(finite)
+#define isfinite(x) ((sizeof(x) == sizeof(float) ? \
+ _Isfinitef(x) : _IsFinite(x)))
+#else
+#include <math.h>
+#define isfinite finite
+#endif
+#endif
+#else
+#include <cmath>
+#if !(defined(__QNXNTO__)) // QNX already defines isfinite
+#define isfinite std::isfinite
+#endif
+#endif
+
+#if defined(_MSC_VER)
+#if !defined(WINCE) && defined(__STDC_SECURE_LIB__) && _MSC_VER >= 1500 // VC++ 9.0 and above
+#define snprintf sprintf_s
+#elif _MSC_VER >= 1900 // VC++ 14.0 and above
+#define snprintf std::snprintf
+#else
+#define snprintf _snprintf
+#endif
+#elif defined(__ANDROID__) || defined(__QNXNTO__)
+#define snprintf snprintf
+#elif __cplusplus >= 201103L
+#if !defined(__MINGW32__) && !defined(__CYGWIN__)
+#define snprintf std::snprintf
+#endif
+#endif
+
+#if defined(__BORLANDC__)
+#include <float.h>
+#define isfinite _finite
+#define snprintf _snprintf
+#endif
+
+#if defined(_MSC_VER) && _MSC_VER >= 1400 // VC++ 8.0
+// Disable warning about strdup being deprecated.
+#pragma warning(disable : 4996)
+#endif
+
+namespace Json {
+
+#if __cplusplus >= 201103L || (defined(_CPPLIB_VER) && _CPPLIB_VER >= 520)
+typedef std::unique_ptr<StreamWriter> StreamWriterPtr;
+#else
+typedef std::auto_ptr<StreamWriter> StreamWriterPtr;
+#endif
+
+static bool containsControlCharacter(const char* str) {
+ while (*str) {
+ if (isControlCharacter(*(str++)))
+ return true;
+ }
+ return false;
+}
+
+static bool containsControlCharacter0(const char* str, unsigned len) {
+ char const* end = str + len;
+ while (end != str) {
+ if (isControlCharacter(*str) || 0==*str)
+ return true;
+ ++str;
+ }
+ return false;
+}
+
+JSONCPP_STRING valueToString(LargestInt value) {
+ UIntToStringBuffer buffer;
+ char* current = buffer + sizeof(buffer);
+ if (value == Value::minLargestInt) {
+ uintToString(LargestUInt(Value::maxLargestInt) + 1, current);
+ *--current = '-';
+ } else if (value < 0) {
+ uintToString(LargestUInt(-value), current);
+ *--current = '-';
+ } else {
+ uintToString(LargestUInt(value), current);
+ }
+ assert(current >= buffer);
+ return current;
+}
+
+JSONCPP_STRING valueToString(LargestUInt value) {
+ UIntToStringBuffer buffer;
+ char* current = buffer + sizeof(buffer);
+ uintToString(value, current);
+ assert(current >= buffer);
+ return current;
+}
+
+#if defined(JSON_HAS_INT64)
+
+JSONCPP_STRING valueToString(Int value) {
+ return valueToString(LargestInt(value));
+}
+
+JSONCPP_STRING valueToString(UInt value) {
+ return valueToString(LargestUInt(value));
+}
+
+#endif // # if defined(JSON_HAS_INT64)
+
+JSONCPP_STRING valueToString(double value, bool useSpecialFloats, unsigned int precision) {
+ // Allocate a buffer that is more than large enough to store the 16 digits of
+ // precision requested below.
+ char buffer[32];
+ int len = -1;
+
+ char formatString[6];
+ sprintf(formatString, "%%.%dg", precision);
+
+ // Print into the buffer. We need not request the alternative representation
+ // that always has a decimal point because JSON doesn't distingish the
+ // concepts of reals and integers.
+ if (isfinite(value)) {
+ len = snprintf(buffer, sizeof(buffer), formatString, value);
+ } else {
+ // IEEE standard states that NaN values will not compare to themselves
+ if (value != value) {
+ len = snprintf(buffer, sizeof(buffer), useSpecialFloats ? "NaN" : "null");
+ } else if (value < 0) {
+ len = snprintf(buffer, sizeof(buffer), useSpecialFloats ? "-Infinity" : "-1e+9999");
+ } else {
+ len = snprintf(buffer, sizeof(buffer), useSpecialFloats ? "Infinity" : "1e+9999");
+ }
+ // For those, we do not need to call fixNumLoc, but it is fast.
+ }
+ assert(len >= 0);
+ fixNumericLocale(buffer, buffer + len);
+ return buffer;
+}
+
+JSONCPP_STRING valueToString(double value) { return valueToString(value, false, 17); }
+
+JSONCPP_STRING valueToString(bool value) { return value ? "true" : "false"; }
+
+JSONCPP_STRING valueToQuotedString(const char* value) {
+ if (value == NULL)
+ return "";
+ // Not sure how to handle unicode...
+ if (strpbrk(value, "\"\\\b\f\n\r\t") == NULL &&
+ !containsControlCharacter(value))
+ return JSONCPP_STRING("\"") + value + "\"";
+ // We have to walk value and escape any special characters.
+ // Appending to JSONCPP_STRING is not efficient, but this should be rare.
+ // (Note: forward slashes are *not* rare, but I am not escaping them.)
+ JSONCPP_STRING::size_type maxsize =
+ strlen(value) * 2 + 3; // allescaped+quotes+NULL
+ JSONCPP_STRING result;
+ result.reserve(maxsize); // to avoid lots of mallocs
+ result += "\"";
+ for (const char* c = value; *c != 0; ++c) {
+ switch (*c) {
+ case '\"':
+ result += "\\\"";
+ break;
+ case '\\':
+ result += "\\\\";
+ break;
+ case '\b':
+ result += "\\b";
+ break;
+ case '\f':
+ result += "\\f";
+ break;
+ case '\n':
+ result += "\\n";
+ break;
+ case '\r':
+ result += "\\r";
+ break;
+ case '\t':
+ result += "\\t";
+ break;
+ // case '/':
+ // Even though \/ is considered a legal escape in JSON, a bare
+ // slash is also legal, so I see no reason to escape it.
+ // (I hope I am not misunderstanding something.
+ // blep notes: actually escaping \/ may be useful in javascript to avoid </
+ // sequence.
+ // Should add a flag to allow this compatibility mode and prevent this
+ // sequence from occurring.
+ default:
+ if (isControlCharacter(*c)) {
+ JSONCPP_OSTRINGSTREAM oss;
+ oss << "\\u" << std::hex << std::uppercase << std::setfill('0')
+ << std::setw(4) << static_cast<int>(*c);
+ result += oss.str();
+ } else {
+ result += *c;
+ }
+ break;
+ }
+ }
+ result += "\"";
+ return result;
+}
+
+// https://github.com/upcaste/upcaste/blob/master/src/upcore/src/cstring/strnpbrk.cpp
+static char const* strnpbrk(char const* s, char const* accept, size_t n) {
+ assert((s || !n) && accept);
+
+ char const* const end = s + n;
+ for (char const* cur = s; cur < end; ++cur) {
+ int const c = *cur;
+ for (char const* a = accept; *a; ++a) {
+ if (*a == c) {
+ return cur;
+ }
+ }
+ }
+ return NULL;
+}
+static JSONCPP_STRING valueToQuotedStringN(const char* value, unsigned length) {
+ if (value == NULL)
+ return "";
+ // Not sure how to handle unicode...
+ if (strnpbrk(value, "\"\\\b\f\n\r\t", length) == NULL &&
+ !containsControlCharacter0(value, length))
+ return JSONCPP_STRING("\"") + value + "\"";
+ // We have to walk value and escape any special characters.
+ // Appending to JSONCPP_STRING is not efficient, but this should be rare.
+ // (Note: forward slashes are *not* rare, but I am not escaping them.)
+ JSONCPP_STRING::size_type maxsize =
+ length * 2 + 3; // allescaped+quotes+NULL
+ JSONCPP_STRING result;
+ result.reserve(maxsize); // to avoid lots of mallocs
+ result += "\"";
+ char const* end = value + length;
+ for (const char* c = value; c != end; ++c) {
+ switch (*c) {
+ case '\"':
+ result += "\\\"";
+ break;
+ case '\\':
+ result += "\\\\";
+ break;
+ case '\b':
+ result += "\\b";
+ break;
+ case '\f':
+ result += "\\f";
+ break;
+ case '\n':
+ result += "\\n";
+ break;
+ case '\r':
+ result += "\\r";
+ break;
+ case '\t':
+ result += "\\t";
+ break;
+ // case '/':
+ // Even though \/ is considered a legal escape in JSON, a bare
+ // slash is also legal, so I see no reason to escape it.
+ // (I hope I am not misunderstanding something.)
+ // blep notes: actually escaping \/ may be useful in javascript to avoid </
+ // sequence.
+ // Should add a flag to allow this compatibility mode and prevent this
+ // sequence from occurring.
+ default:
+ if ((isControlCharacter(*c)) || (*c == 0)) {
+ JSONCPP_OSTRINGSTREAM oss;
+ oss << "\\u" << std::hex << std::uppercase << std::setfill('0')
+ << std::setw(4) << static_cast<int>(*c);
+ result += oss.str();
+ } else {
+ result += *c;
+ }
+ break;
+ }
+ }
+ result += "\"";
+ return result;
+}
+
+// Class Writer
+// //////////////////////////////////////////////////////////////////
+Writer::~Writer() {}
+
+// Class FastWriter
+// //////////////////////////////////////////////////////////////////
+
+FastWriter::FastWriter()
+ : yamlCompatiblityEnabled_(false), dropNullPlaceholders_(false),
+ omitEndingLineFeed_(false) {}
+
+void FastWriter::enableYAMLCompatibility() { yamlCompatiblityEnabled_ = true; }
+
+void FastWriter::dropNullPlaceholders() { dropNullPlaceholders_ = true; }
+
+void FastWriter::omitEndingLineFeed() { omitEndingLineFeed_ = true; }
+
+JSONCPP_STRING FastWriter::write(const Value& root) {
+ document_ = "";
+ writeValue(root);
+ if (!omitEndingLineFeed_)
+ document_ += "\n";
+ return document_;
+}
+
+void FastWriter::writeValue(const Value& value) {
+ switch (value.type()) {
+ case nullValue:
+ if (!dropNullPlaceholders_)
+ document_ += "null";
+ break;
+ case intValue:
+ document_ += valueToString(value.asLargestInt());
+ break;
+ case uintValue:
+ document_ += valueToString(value.asLargestUInt());
+ break;
+ case realValue:
+ document_ += valueToString(value.asDouble());
+ break;
+ case stringValue:
+ {
+ // Is NULL possible for value.string_?
+ char const* str;
+ char const* end;
+ bool ok = value.getString(&str, &end);
+ if (ok) document_ += valueToQuotedStringN(str, static_cast<unsigned>(end-str));
+ break;
+ }
+ case booleanValue:
+ document_ += valueToString(value.asBool());
+ break;
+ case arrayValue: {
+ document_ += '[';
+ ArrayIndex size = value.size();
+ for (ArrayIndex index = 0; index < size; ++index) {
+ if (index > 0)
+ document_ += ',';
+ writeValue(value[index]);
+ }
+ document_ += ']';
+ } break;
+ case objectValue: {
+ Value::Members members(value.getMemberNames());
+ document_ += '{';
+ for (Value::Members::iterator it = members.begin(); it != members.end();
+ ++it) {
+ const JSONCPP_STRING& name = *it;
+ if (it != members.begin())
+ document_ += ',';
+ document_ += valueToQuotedStringN(name.data(), static_cast<unsigned>(name.length()));
+ document_ += yamlCompatiblityEnabled_ ? ": " : ":";
+ writeValue(value[name]);
+ }
+ document_ += '}';
+ } break;
+ }
+}
+
+// Class StyledWriter
+// //////////////////////////////////////////////////////////////////
+
+StyledWriter::StyledWriter()
+ : rightMargin_(74), indentSize_(3), addChildValues_() {}
+
+JSONCPP_STRING StyledWriter::write(const Value& root) {
+ document_ = "";
+ addChildValues_ = false;
+ indentString_ = "";
+ writeCommentBeforeValue(root);
+ writeValue(root);
+ writeCommentAfterValueOnSameLine(root);
+ document_ += "\n";
+ return document_;
+}
+
+void StyledWriter::writeValue(const Value& value) {
+ switch (value.type()) {
+ case nullValue:
+ pushValue("null");
+ break;
+ case intValue:
+ pushValue(valueToString(value.asLargestInt()));
+ break;
+ case uintValue:
+ pushValue(valueToString(value.asLargestUInt()));
+ break;
+ case realValue:
+ pushValue(valueToString(value.asDouble()));
+ break;
+ case stringValue:
+ {
+ // Is NULL possible for value.string_?
+ char const* str;
+ char const* end;
+ bool ok = value.getString(&str, &end);
+ if (ok) pushValue(valueToQuotedStringN(str, static_cast<unsigned>(end-str)));
+ else pushValue("");
+ break;
+ }
+ case booleanValue:
+ pushValue(valueToString(value.asBool()));
+ break;
+ case arrayValue:
+ writeArrayValue(value);
+ break;
+ case objectValue: {
+ Value::Members members(value.getMemberNames());
+ if (members.empty())
+ pushValue("{}");
+ else {
+ writeWithIndent("{");
+ indent();
+ Value::Members::iterator it = members.begin();
+ for (;;) {
+ const JSONCPP_STRING& name = *it;
+ const Value& childValue = value[name];
+ writeCommentBeforeValue(childValue);
+ writeWithIndent(valueToQuotedString(name.c_str()));
+ document_ += " : ";
+ writeValue(childValue);
+ if (++it == members.end()) {
+ writeCommentAfterValueOnSameLine(childValue);
+ break;
+ }
+ document_ += ',';
+ writeCommentAfterValueOnSameLine(childValue);
+ }
+ unindent();
+ writeWithIndent("}");
+ }
+ } break;
+ }
+}
+
+void StyledWriter::writeArrayValue(const Value& value) {
+ unsigned size = value.size();
+ if (size == 0)
+ pushValue("[]");
+ else {
+ bool isArrayMultiLine = isMultineArray(value);
+ if (isArrayMultiLine) {
+ writeWithIndent("[");
+ indent();
+ bool hasChildValue = !childValues_.empty();
+ unsigned index = 0;
+ for (;;) {
+ const Value& childValue = value[index];
+ writeCommentBeforeValue(childValue);
+ if (hasChildValue)
+ writeWithIndent(childValues_[index]);
+ else {
+ writeIndent();
+ writeValue(childValue);
+ }
+ if (++index == size) {
+ writeCommentAfterValueOnSameLine(childValue);
+ break;
+ }
+ document_ += ',';
+ writeCommentAfterValueOnSameLine(childValue);
+ }
+ unindent();
+ writeWithIndent("]");
+ } else // output on a single line
+ {
+ assert(childValues_.size() == size);
+ document_ += "[ ";
+ for (unsigned index = 0; index < size; ++index) {
+ if (index > 0)
+ document_ += ", ";
+ document_ += childValues_[index];
+ }
+ document_ += " ]";
+ }
+ }
+}
+
+bool StyledWriter::isMultineArray(const Value& value) {
+ ArrayIndex const size = value.size();
+ bool isMultiLine = size * 3 >= rightMargin_;
+ childValues_.clear();
+ for (ArrayIndex index = 0; index < size && !isMultiLine; ++index) {
+ const Value& childValue = value[index];
+ isMultiLine = ((childValue.isArray() || childValue.isObject()) &&
+ childValue.size() > 0);
+ }
+ if (!isMultiLine) // check if line length > max line length
+ {
+ childValues_.reserve(size);
+ addChildValues_ = true;
+ ArrayIndex lineLength = 4 + (size - 1) * 2; // '[ ' + ', '*n + ' ]'
+ for (ArrayIndex index = 0; index < size; ++index) {
+ if (hasCommentForValue(value[index])) {
+ isMultiLine = true;
+ }
+ writeValue(value[index]);
+ lineLength += static_cast<ArrayIndex>(childValues_[index].length());
+ }
+ addChildValues_ = false;
+ isMultiLine = isMultiLine || lineLength >= rightMargin_;
+ }
+ return isMultiLine;
+}
+
+void StyledWriter::pushValue(const JSONCPP_STRING& value) {
+ if (addChildValues_)
+ childValues_.push_back(value);
+ else
+ document_ += value;
+}
+
+void StyledWriter::writeIndent() {
+ if (!document_.empty()) {
+ char last = document_[document_.length() - 1];
+ if (last == ' ') // already indented
+ return;
+ if (last != '\n') // Comments may add new-line
+ document_ += '\n';
+ }
+ document_ += indentString_;
+}
+
+void StyledWriter::writeWithIndent(const JSONCPP_STRING& value) {
+ writeIndent();
+ document_ += value;
+}
+
+void StyledWriter::indent() { indentString_ += JSONCPP_STRING(indentSize_, ' '); }
+
+void StyledWriter::unindent() {
+ assert(indentString_.size() >= indentSize_);
+ indentString_.resize(indentString_.size() - indentSize_);
+}
+
+void StyledWriter::writeCommentBeforeValue(const Value& root) {
+ if (!root.hasComment(commentBefore))
+ return;
+
+ document_ += "\n";
+ writeIndent();
+ const JSONCPP_STRING& comment = root.getComment(commentBefore);
+ JSONCPP_STRING::const_iterator iter = comment.begin();
+ while (iter != comment.end()) {
+ document_ += *iter;
+ if (*iter == '\n' &&
+ (iter != comment.end() && *(iter + 1) == '/'))
+ writeIndent();
+ ++iter;
+ }
+
+ // Comments are stripped of trailing newlines, so add one here
+ document_ += "\n";
+}
+
+void StyledWriter::writeCommentAfterValueOnSameLine(const Value& root) {
+ if (root.hasComment(commentAfterOnSameLine))
+ document_ += " " + root.getComment(commentAfterOnSameLine);
+
+ if (root.hasComment(commentAfter)) {
+ document_ += "\n";
+ document_ += root.getComment(commentAfter);
+ document_ += "\n";
+ }
+}
+
+bool StyledWriter::hasCommentForValue(const Value& value) {
+ return value.hasComment(commentBefore) ||
+ value.hasComment(commentAfterOnSameLine) ||
+ value.hasComment(commentAfter);
+}
+
+// Class StyledStreamWriter
+// //////////////////////////////////////////////////////////////////
+
+StyledStreamWriter::StyledStreamWriter(JSONCPP_STRING indentation)
+ : document_(NULL), rightMargin_(74), indentation_(indentation),
+ addChildValues_() {}
+
+void StyledStreamWriter::write(JSONCPP_OSTREAM& out, const Value& root) {
+ document_ = &out;
+ addChildValues_ = false;
+ indentString_ = "";
+ indented_ = true;
+ writeCommentBeforeValue(root);
+ if (!indented_) writeIndent();
+ indented_ = true;
+ writeValue(root);
+ writeCommentAfterValueOnSameLine(root);
+ *document_ << "\n";
+ document_ = NULL; // Forget the stream, for safety.
+}
+
+void StyledStreamWriter::writeValue(const Value& value) {
+ switch (value.type()) {
+ case nullValue:
+ pushValue("null");
+ break;
+ case intValue:
+ pushValue(valueToString(value.asLargestInt()));
+ break;
+ case uintValue:
+ pushValue(valueToString(value.asLargestUInt()));
+ break;
+ case realValue:
+ pushValue(valueToString(value.asDouble()));
+ break;
+ case stringValue:
+ {
+ // Is NULL possible for value.string_?
+ char const* str;
+ char const* end;
+ bool ok = value.getString(&str, &end);
+ if (ok) pushValue(valueToQuotedStringN(str, static_cast<unsigned>(end-str)));
+ else pushValue("");
+ break;
+ }
+ case booleanValue:
+ pushValue(valueToString(value.asBool()));
+ break;
+ case arrayValue:
+ writeArrayValue(value);
+ break;
+ case objectValue: {
+ Value::Members members(value.getMemberNames());
+ if (members.empty())
+ pushValue("{}");
+ else {
+ writeWithIndent("{");
+ indent();
+ Value::Members::iterator it = members.begin();
+ for (;;) {
+ const JSONCPP_STRING& name = *it;
+ const Value& childValue = value[name];
+ writeCommentBeforeValue(childValue);
+ writeWithIndent(valueToQuotedString(name.c_str()));
+ *document_ << " : ";
+ writeValue(childValue);
+ if (++it == members.end()) {
+ writeCommentAfterValueOnSameLine(childValue);
+ break;
+ }
+ *document_ << ",";
+ writeCommentAfterValueOnSameLine(childValue);
+ }
+ unindent();
+ writeWithIndent("}");
+ }
+ } break;
+ }
+}
+
+void StyledStreamWriter::writeArrayValue(const Value& value) {
+ unsigned size = value.size();
+ if (size == 0)
+ pushValue("[]");
+ else {
+ bool isArrayMultiLine = isMultineArray(value);
+ if (isArrayMultiLine) {
+ writeWithIndent("[");
+ indent();
+ bool hasChildValue = !childValues_.empty();
+ unsigned index = 0;
+ for (;;) {
+ const Value& childValue = value[index];
+ writeCommentBeforeValue(childValue);
+ if (hasChildValue)
+ writeWithIndent(childValues_[index]);
+ else {
+ if (!indented_) writeIndent();
+ indented_ = true;
+ writeValue(childValue);
+ indented_ = false;
+ }
+ if (++index == size) {
+ writeCommentAfterValueOnSameLine(childValue);
+ break;
+ }
+ *document_ << ",";
+ writeCommentAfterValueOnSameLine(childValue);
+ }
+ unindent();
+ writeWithIndent("]");
+ } else // output on a single line
+ {
+ assert(childValues_.size() == size);
+ *document_ << "[ ";
+ for (unsigned index = 0; index < size; ++index) {
+ if (index > 0)
+ *document_ << ", ";
+ *document_ << childValues_[index];
+ }
+ *document_ << " ]";
+ }
+ }
+}
+
+bool StyledStreamWriter::isMultineArray(const Value& value) {
+ ArrayIndex const size = value.size();
+ bool isMultiLine = size * 3 >= rightMargin_;
+ childValues_.clear();
+ for (ArrayIndex index = 0; index < size && !isMultiLine; ++index) {
+ const Value& childValue = value[index];
+ isMultiLine = ((childValue.isArray() || childValue.isObject()) &&
+ childValue.size() > 0);
+ }
+ if (!isMultiLine) // check if line length > max line length
+ {
+ childValues_.reserve(size);
+ addChildValues_ = true;
+ ArrayIndex lineLength = 4 + (size - 1) * 2; // '[ ' + ', '*n + ' ]'
+ for (ArrayIndex index = 0; index < size; ++index) {
+ if (hasCommentForValue(value[index])) {
+ isMultiLine = true;
+ }
+ writeValue(value[index]);
+ lineLength += static_cast<ArrayIndex>(childValues_[index].length());
+ }
+ addChildValues_ = false;
+ isMultiLine = isMultiLine || lineLength >= rightMargin_;
+ }
+ return isMultiLine;
+}
+
+void StyledStreamWriter::pushValue(const JSONCPP_STRING& value) {
+ if (addChildValues_)
+ childValues_.push_back(value);
+ else
+ *document_ << value;
+}
+
+void StyledStreamWriter::writeIndent() {
+ // blep intended this to look at the so-far-written string
+ // to determine whether we are already indented, but
+ // with a stream we cannot do that. So we rely on some saved state.
+ // The caller checks indented_.
+ *document_ << '\n' << indentString_;
+}
+
+void StyledStreamWriter::writeWithIndent(const JSONCPP_STRING& value) {
+ if (!indented_) writeIndent();
+ *document_ << value;
+ indented_ = false;
+}
+
+void StyledStreamWriter::indent() { indentString_ += indentation_; }
+
+void StyledStreamWriter::unindent() {
+ assert(indentString_.size() >= indentation_.size());
+ indentString_.resize(indentString_.size() - indentation_.size());
+}
+
+void StyledStreamWriter::writeCommentBeforeValue(const Value& root) {
+ if (!root.hasComment(commentBefore))
+ return;
+
+ if (!indented_) writeIndent();
+ const JSONCPP_STRING& comment = root.getComment(commentBefore);
+ JSONCPP_STRING::const_iterator iter = comment.begin();
+ while (iter != comment.end()) {
+ *document_ << *iter;
+ if (*iter == '\n' &&
+ (iter != comment.end() && *(iter + 1) == '/'))
+ // writeIndent(); // would include newline
+ *document_ << indentString_;
+ ++iter;
+ }
+ indented_ = false;
+}
+
+void StyledStreamWriter::writeCommentAfterValueOnSameLine(const Value& root) {
+ if (root.hasComment(commentAfterOnSameLine))
+ *document_ << ' ' << root.getComment(commentAfterOnSameLine);
+
+ if (root.hasComment(commentAfter)) {
+ writeIndent();
+ *document_ << root.getComment(commentAfter);
+ }
+ indented_ = false;
+}
+
+bool StyledStreamWriter::hasCommentForValue(const Value& value) {
+ return value.hasComment(commentBefore) ||
+ value.hasComment(commentAfterOnSameLine) ||
+ value.hasComment(commentAfter);
+}
+
+//////////////////////////
+// BuiltStyledStreamWriter
+
+/// Scoped enums are not available until C++11.
+struct CommentStyle {
+ /// Decide whether to write comments.
+ enum Enum {
+ None, ///< Drop all comments.
+ Most, ///< Recover odd behavior of previous versions (not implemented yet).
+ All ///< Keep all comments.
+ };
+};
+
+struct BuiltStyledStreamWriter : public StreamWriter
+{
+ BuiltStyledStreamWriter(
+ JSONCPP_STRING const& indentation,
+ CommentStyle::Enum cs,
+ JSONCPP_STRING const& colonSymbol,
+ JSONCPP_STRING const& nullSymbol,
+ JSONCPP_STRING const& endingLineFeedSymbol,
+ bool useSpecialFloats,
+ unsigned int precision);
+ int write(Value const& root, JSONCPP_OSTREAM* sout) JSONCPP_OVERRIDE;
+private:
+ void writeValue(Value const& value);
+ void writeArrayValue(Value const& value);
+ bool isMultineArray(Value const& value);
+ void pushValue(JSONCPP_STRING const& value);
+ void writeIndent();
+ void writeWithIndent(JSONCPP_STRING const& value);
+ void indent();
+ void unindent();
+ void writeCommentBeforeValue(Value const& root);
+ void writeCommentAfterValueOnSameLine(Value const& root);
+ static bool hasCommentForValue(const Value& value);
+
+ typedef std::vector<JSONCPP_STRING> ChildValues;
+
+ ChildValues childValues_;
+ JSONCPP_STRING indentString_;
+ unsigned int rightMargin_;
+ JSONCPP_STRING indentation_;
+ CommentStyle::Enum cs_;
+ JSONCPP_STRING colonSymbol_;
+ JSONCPP_STRING nullSymbol_;
+ JSONCPP_STRING endingLineFeedSymbol_;
+ bool addChildValues_ : 1;
+ bool indented_ : 1;
+ bool useSpecialFloats_ : 1;
+ unsigned int precision_;
+};
+BuiltStyledStreamWriter::BuiltStyledStreamWriter(
+ JSONCPP_STRING const& indentation,
+ CommentStyle::Enum cs,
+ JSONCPP_STRING const& colonSymbol,
+ JSONCPP_STRING const& nullSymbol,
+ JSONCPP_STRING const& endingLineFeedSymbol,
+ bool useSpecialFloats,
+ unsigned int precision)
+ : rightMargin_(74)
+ , indentation_(indentation)
+ , cs_(cs)
+ , colonSymbol_(colonSymbol)
+ , nullSymbol_(nullSymbol)
+ , endingLineFeedSymbol_(endingLineFeedSymbol)
+ , addChildValues_(false)
+ , indented_(false)
+ , useSpecialFloats_(useSpecialFloats)
+ , precision_(precision)
+{
+}
+int BuiltStyledStreamWriter::write(Value const& root, JSONCPP_OSTREAM* sout)
+{
+ sout_ = sout;
+ addChildValues_ = false;
+ indented_ = true;
+ indentString_ = "";
+ writeCommentBeforeValue(root);
+ if (!indented_) writeIndent();
+ indented_ = true;
+ writeValue(root);
+ writeCommentAfterValueOnSameLine(root);
+ *sout_ << endingLineFeedSymbol_;
+ sout_ = NULL;
+ return 0;
+}
+void BuiltStyledStreamWriter::writeValue(Value const& value) {
+ switch (value.type()) {
+ case nullValue:
+ pushValue(nullSymbol_);
+ break;
+ case intValue:
+ pushValue(valueToString(value.asLargestInt()));
+ break;
+ case uintValue:
+ pushValue(valueToString(value.asLargestUInt()));
+ break;
+ case realValue:
+ pushValue(valueToString(value.asDouble(), useSpecialFloats_, precision_));
+ break;
+ case stringValue:
+ {
+ // Is NULL is possible for value.string_?
+ char const* str;
+ char const* end;
+ bool ok = value.getString(&str, &end);
+ if (ok) pushValue(valueToQuotedStringN(str, static_cast<unsigned>(end-str)));
+ else pushValue("");
+ break;
+ }
+ case booleanValue:
+ pushValue(valueToString(value.asBool()));
+ break;
+ case arrayValue:
+ writeArrayValue(value);
+ break;
+ case objectValue: {
+ Value::Members members(value.getMemberNames());
+ if (members.empty())
+ pushValue("{}");
+ else {
+ writeWithIndent("{");
+ indent();
+ Value::Members::iterator it = members.begin();
+ for (;;) {
+ JSONCPP_STRING const& name = *it;
+ Value const& childValue = value[name];
+ writeCommentBeforeValue(childValue);
+ writeWithIndent(valueToQuotedStringN(name.data(), static_cast<unsigned>(name.length())));
+ *sout_ << colonSymbol_;
+ writeValue(childValue);
+ if (++it == members.end()) {
+ writeCommentAfterValueOnSameLine(childValue);
+ break;
+ }
+ *sout_ << ",";
+ writeCommentAfterValueOnSameLine(childValue);
+ }
+ unindent();
+ writeWithIndent("}");
+ }
+ } break;
+ }
+}
+
+void BuiltStyledStreamWriter::writeArrayValue(Value const& value) {
+ unsigned size = value.size();
+ if (size == 0)
+ pushValue("[]");
+ else {
+ bool isMultiLine = (cs_ == CommentStyle::All) || isMultineArray(value);
+ if (isMultiLine) {
+ writeWithIndent("[");
+ indent();
+ bool hasChildValue = !childValues_.empty();
+ unsigned index = 0;
+ for (;;) {
+ Value const& childValue = value[index];
+ writeCommentBeforeValue(childValue);
+ if (hasChildValue)
+ writeWithIndent(childValues_[index]);
+ else {
+ if (!indented_) writeIndent();
+ indented_ = true;
+ writeValue(childValue);
+ indented_ = false;
+ }
+ if (++index == size) {
+ writeCommentAfterValueOnSameLine(childValue);
+ break;
+ }
+ *sout_ << ",";
+ writeCommentAfterValueOnSameLine(childValue);
+ }
+ unindent();
+ writeWithIndent("]");
+ } else // output on a single line
+ {
+ assert(childValues_.size() == size);
+ *sout_ << "[";
+ if (!indentation_.empty()) *sout_ << " ";
+ for (unsigned index = 0; index < size; ++index) {
+ if (index > 0)
+ *sout_ << ", ";
+ *sout_ << childValues_[index];
+ }
+ if (!indentation_.empty()) *sout_ << " ";
+ *sout_ << "]";
+ }
+ }
+}
+
+bool BuiltStyledStreamWriter::isMultineArray(Value const& value) {
+ ArrayIndex const size = value.size();
+ bool isMultiLine = size * 3 >= rightMargin_;
+ childValues_.clear();
+ for (ArrayIndex index = 0; index < size && !isMultiLine; ++index) {
+ Value const& childValue = value[index];
+ isMultiLine = ((childValue.isArray() || childValue.isObject()) &&
+ childValue.size() > 0);
+ }
+ if (!isMultiLine) // check if line length > max line length
+ {
+ childValues_.reserve(size);
+ addChildValues_ = true;
+ ArrayIndex lineLength = 4 + (size - 1) * 2; // '[ ' + ', '*n + ' ]'
+ for (ArrayIndex index = 0; index < size; ++index) {
+ if (hasCommentForValue(value[index])) {
+ isMultiLine = true;
+ }
+ writeValue(value[index]);
+ lineLength += static_cast<ArrayIndex>(childValues_[index].length());
+ }
+ addChildValues_ = false;
+ isMultiLine = isMultiLine || lineLength >= rightMargin_;
+ }
+ return isMultiLine;
+}
+
+void BuiltStyledStreamWriter::pushValue(JSONCPP_STRING const& value) {
+ if (addChildValues_)
+ childValues_.push_back(value);
+ else
+ *sout_ << value;
+}
+
+void BuiltStyledStreamWriter::writeIndent() {
+ // blep intended this to look at the so-far-written string
+ // to determine whether we are already indented, but
+ // with a stream we cannot do that. So we rely on some saved state.
+ // The caller checks indented_.
+
+ if (!indentation_.empty()) {
+ // In this case, drop newlines too.
+ *sout_ << '\n' << indentString_;
+ }
+}
+
+void BuiltStyledStreamWriter::writeWithIndent(JSONCPP_STRING const& value) {
+ if (!indented_) writeIndent();
+ *sout_ << value;
+ indented_ = false;
+}
+
+void BuiltStyledStreamWriter::indent() { indentString_ += indentation_; }
+
+void BuiltStyledStreamWriter::unindent() {
+ assert(indentString_.size() >= indentation_.size());
+ indentString_.resize(indentString_.size() - indentation_.size());
+}
+
+void BuiltStyledStreamWriter::writeCommentBeforeValue(Value const& root) {
+ if (cs_ == CommentStyle::None) return;
+ if (!root.hasComment(commentBefore))
+ return;
+
+ if (!indented_) writeIndent();
+ const JSONCPP_STRING& comment = root.getComment(commentBefore);
+ JSONCPP_STRING::const_iterator iter = comment.begin();
+ while (iter != comment.end()) {
+ *sout_ << *iter;
+ if (*iter == '\n' &&
+ (iter != comment.end() && *(iter + 1) == '/'))
+ // writeIndent(); // would write extra newline
+ *sout_ << indentString_;
+ ++iter;
+ }
+ indented_ = false;
+}
+
+void BuiltStyledStreamWriter::writeCommentAfterValueOnSameLine(Value const& root) {
+ if (cs_ == CommentStyle::None) return;
+ if (root.hasComment(commentAfterOnSameLine))
+ *sout_ << " " + root.getComment(commentAfterOnSameLine);
+
+ if (root.hasComment(commentAfter)) {
+ writeIndent();
+ *sout_ << root.getComment(commentAfter);
+ }
+}
+
+// static
+bool BuiltStyledStreamWriter::hasCommentForValue(const Value& value) {
+ return value.hasComment(commentBefore) ||
+ value.hasComment(commentAfterOnSameLine) ||
+ value.hasComment(commentAfter);
+}
+
+///////////////
+// StreamWriter
+
+StreamWriter::StreamWriter()
+ : sout_(NULL)
+{
+}
+StreamWriter::~StreamWriter()
+{
+}
+StreamWriter::Factory::~Factory()
+{}
+StreamWriterBuilder::StreamWriterBuilder()
+{
+ setDefaults(&settings_);
+}
+StreamWriterBuilder::~StreamWriterBuilder()
+{}
+StreamWriter* StreamWriterBuilder::newStreamWriter() const
+{
+ JSONCPP_STRING indentation = settings_["indentation"].asString();
+ JSONCPP_STRING cs_str = settings_["commentStyle"].asString();
+ bool eyc = settings_["enableYAMLCompatibility"].asBool();
+ bool dnp = settings_["dropNullPlaceholders"].asBool();
+ bool usf = settings_["useSpecialFloats"].asBool();
+ unsigned int pre = settings_["precision"].asUInt();
+ CommentStyle::Enum cs = CommentStyle::All;
+ if (cs_str == "All") {
+ cs = CommentStyle::All;
+ } else if (cs_str == "None") {
+ cs = CommentStyle::None;
+ } else {
+ throwRuntimeError("commentStyle must be 'All' or 'None'");
+ }
+ JSONCPP_STRING colonSymbol = " : ";
+ if (eyc) {
+ colonSymbol = ": ";
+ } else if (indentation.empty()) {
+ colonSymbol = ":";
+ }
+ JSONCPP_STRING nullSymbol = "null";
+ if (dnp) {
+ nullSymbol = "";
+ }
+ if (pre > 17) pre = 17;
+ JSONCPP_STRING endingLineFeedSymbol = "";
+ return new BuiltStyledStreamWriter(
+ indentation, cs,
+ colonSymbol, nullSymbol, endingLineFeedSymbol, usf, pre);
+}
+static void getValidWriterKeys(std::set<JSONCPP_STRING>* valid_keys)
+{
+ valid_keys->clear();
+ valid_keys->insert("indentation");
+ valid_keys->insert("commentStyle");
+ valid_keys->insert("enableYAMLCompatibility");
+ valid_keys->insert("dropNullPlaceholders");
+ valid_keys->insert("useSpecialFloats");
+ valid_keys->insert("precision");
+}
+bool StreamWriterBuilder::validate(Json::Value* invalid) const
+{
+ Json::Value my_invalid;
+ if (!invalid) invalid = &my_invalid; // so we do not need to test for NULL
+ Json::Value& inv = *invalid;
+ std::set<JSONCPP_STRING> valid_keys;
+ getValidWriterKeys(&valid_keys);
+ Value::Members keys = settings_.getMemberNames();
+ size_t n = keys.size();
+ for (size_t i = 0; i < n; ++i) {
+ JSONCPP_STRING const& key = keys[i];
+ if (valid_keys.find(key) == valid_keys.end()) {
+ inv[key] = settings_[key];
+ }
+ }
+ return 0u == inv.size();
+}
+Value& StreamWriterBuilder::operator[](JSONCPP_STRING key)
+{
+ return settings_[key];
+}
+// static
+void StreamWriterBuilder::setDefaults(Json::Value* settings)
+{
+ //! [StreamWriterBuilderDefaults]
+ (*settings)["commentStyle"] = "All";
+ (*settings)["indentation"] = "\t";
+ (*settings)["enableYAMLCompatibility"] = false;
+ (*settings)["dropNullPlaceholders"] = false;
+ (*settings)["useSpecialFloats"] = false;
+ (*settings)["precision"] = 17;
+ //! [StreamWriterBuilderDefaults]
+}
+
+JSONCPP_STRING writeString(StreamWriter::Factory const& builder, Value const& root) {
+ JSONCPP_OSTRINGSTREAM sout;
+ StreamWriterPtr const writer(builder.newStreamWriter());
+ writer->write(root, &sout);
+ return sout.str();
+}
+
+JSONCPP_OSTREAM& operator<<(JSONCPP_OSTREAM& sout, Value const& root) {
+ StreamWriterBuilder builder;
+ StreamWriterPtr const writer(builder.newStreamWriter());
+ writer->write(root, &sout);
+ return sout;
+}
+
+} // namespace Json
+
+// //////////////////////////////////////////////////////////////////////
+// End of content of file: src/lib_json/json_writer.cpp
+// //////////////////////////////////////////////////////////////////////
+
+
+
+
+
diff --git a/src/non_api/BamStats.cpp b/src/non_api/BamStats.cpp
new file mode 100644
index 0000000..f86d326
--- /dev/null
+++ b/src/non_api/BamStats.cpp
@@ -0,0 +1,111 @@
+#include "SeqLib/BamStats.h"
+
+#include <cmath>
+
+//#define DEBUG_STATS 1
+namespace SeqLib {
+
+BamReadGroup::BamReadGroup(const std::string& name) : reads(0), supp(0), unmap(0), qcfail(0),
+ duplicate(0), mate_unmap(0), m_name(name)
+{
+
+ mapq = Histogram(0,100,1);
+ nm = Histogram(0,100,1);
+ isize = Histogram(-2,2000,10);
+ clip = Histogram(0,100,5);
+ phred = Histogram(0,100,1);
+ len = Histogram(0,250,1);
+
+}
+
+ std::ostream& operator<<(std::ostream& out, const BamStats& qc) {
+ out << "ReadGroup\tReadCount\tSupplementary\tUnmapped\tMateUnmapped\tQCFailed\tDuplicate\tMappingQuality\tNM\tInsertSize\tClippedBases\tMeanPhredScore\tReadLength" << std::endl;
+ for (auto& i : qc.m_group_map)
+ out << i.second << std::endl;
+ return out;
+ }
+
+ std::ostream& operator<<(std::ostream& out, const BamReadGroup& qc) {
+ std::string sep = "\t";
+ out << qc.m_name << sep << qc.reads << sep <<
+ qc.supp << sep <<
+ qc.unmap << sep <<
+ qc.mate_unmap << sep <<
+ qc.qcfail << sep <<
+ qc.duplicate << sep <<
+ qc.mapq.toFileString() << sep <<
+ qc.nm.toFileString() << sep <<
+ qc.isize.toFileString() << sep <<
+ qc.clip.toFileString() << sep <<
+ qc.phred.toFileString() << sep <<
+ qc.len.toFileString();
+ return out;
+ }
+
+void BamReadGroup::addRead(BamRecord &r)
+{
+
+ ++reads;
+ if (r.SecondaryFlag())
+ ++supp;
+ if (r.QCFailFlag())
+ ++qcfail;
+ if (r.DuplicateFlag())
+ ++duplicate;
+ if (!r.MappedFlag())
+ ++unmap;
+ if (!r.MateMappedFlag())
+ ++mate_unmap;
+
+ int mapqr = r.MapQuality();
+ if (mapqr >=0 && mapqr <= 100)
+ mapq.addElem(mapqr);
+
+ int32_t this_nm = r.GetIntTag("NM");;
+ //r_get_int32_tag(r, "NM", this_nm);
+ if (this_nm <= 100)
+ nm.addElem(this_nm);
+
+ int32_t isizer = -1;
+ if (!r.PairMappedFlag())
+ isizer = -2;
+ else if (!r.Interchromosomal())
+ isizer = std::abs(r.InsertSize());
+ isize.addElem(isizer);
+
+ int32_t c = r.NumClip();
+ //r_get_clip(r,c);
+ clip.addElem(c);
+
+ len.addElem(r.Length());
+
+ phred.addElem((int)r.MeanPhred());
+
+}
+
+void BamStats::addRead(BamRecord &r)
+{
+
+ // get the read group
+ std::string rg = r.GetZTag("RG");
+ if (rg.empty()) // try grabbing from QNAME
+ rg = "QNAMED_" + r.ParseReadGroup();
+
+#ifdef DEBUG_STATS
+ std::cout << "got read group tag " << rg << std::endl;
+#endif
+
+ std::unordered_map<std::string, BamReadGroup>::iterator ff = m_group_map.find(rg);
+
+ if (ff == m_group_map.end())
+ {
+ m_group_map[rg] = BamReadGroup(rg);
+ m_group_map[rg].addRead(r);
+ }
+ else
+ {
+ ff->second.addRead(r);
+ }
+}
+
+}
diff --git a/src/non_api/BamStats.h b/src/non_api/BamStats.h
new file mode 100644
index 0000000..7cf1bf8
--- /dev/null
+++ b/src/non_api/BamStats.h
@@ -0,0 +1,86 @@
+#ifndef SEQLIB_BAMSTATS_H__
+#define SEQLIB_BAMSTATS_H__
+
+#include <unordered_map>
+#include <cstdint>
+#include <iostream>
+
+#include "SeqLib/Histogram.h"
+#include "SeqLib/BamRecord.h"
+
+namespace SeqLib{
+
+ /** Store information pertaining to a given read group *
+ *
+ * This class will collect statistics on number of: read, supplementary reads, unmapped reads, qcfail reads, duplicate reads.
+ * It will also create Histogram objects to store counts of: mapq, nm, isize, clip, mean phred score, length
+ */
+class BamReadGroup {
+
+ friend class BamStats;
+
+ public:
+
+ /** Construct an empty BamReadGroup */
+ BamReadGroup() {}
+
+ /** Construct an empty BamReadGroup for the specified read group
+ * @param name Name of the read group
+ */
+ BamReadGroup(const std::string& name);
+
+ /** Display basic information about this read group
+ */
+ friend std::ostream& operator<<(std::ostream& out, const BamReadGroup& rg);
+
+ /** Add a BamRecord to this read group */
+ void addRead(BamRecord &r);
+
+ private:
+
+ size_t reads;
+ size_t supp;
+ size_t unmap;
+ size_t qcfail;
+ size_t duplicate;
+ size_t mate_unmap;
+
+ Histogram mapq;
+ Histogram nm;
+ Histogram isize;
+ Histogram clip;
+ Histogram phred;
+ Histogram len;
+
+ std::string m_name;
+
+};
+
+/** Class to store statistics on a BAM file.
+ *
+ * BamStats currently stores a map of BamReadGroup objects. Bam statistics
+ * are collected then on a read-group basis, but can be output in aggregate. See
+ * BamReadGroup for description of relevant BAM statistics.
+ */
+class BamStats
+{
+
+ public:
+
+ /** Loop through the BamReadGroup objections and print them */
+ friend std::ostream& operator<<(std::ostream& out, const BamStats& qc);
+
+ /** Add a read by finding which read group it belongs to and calling the
+ * addRead function for that BamReadGroup.
+ */
+ void addRead(BamRecord &r);
+
+ private:
+
+ std::unordered_map<std::string, BamReadGroup> m_group_map;
+
+};
+
+}
+
+#endif
diff --git a/src/non_api/Fractions.cpp b/src/non_api/Fractions.cpp
new file mode 100644
index 0000000..6ac8a3b
--- /dev/null
+++ b/src/non_api/Fractions.cpp
@@ -0,0 +1,80 @@
+#include "SeqLib/Fractions.h"
+
+namespace SeqLib {
+
+ FracRegion::FracRegion(const std::string& c, const std::string& p1, const std::string& p2, bam_hdr_t * h, const std::string& f) : SeqLib::GenomicRegion(c, p1, p2, h)
+ {
+ // convert frac to double
+ try {
+ frac = std::stod(f);
+ } catch (...) {
+ std::cerr << "FracRegion::FracRegion - Error converting fraction " << f << " to double " << std::endl;
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ size_t Fractions::size() const {
+ return m_frc.size();
+ }
+
+
+ std::ostream& operator<<(std::ostream& out, const FracRegion& f) {
+ out << f.chr << ":" << SeqLib::AddCommas<int32_t>(f.pos1) << "-" <<
+ SeqLib::AddCommas<int32_t>(f.pos2) << " Frac: " << f.frac;
+ return out;
+ }
+
+void Fractions::readFromBed(const std::string& file, bam_hdr_t * h) {
+
+ std::ifstream iss(file.c_str());
+ if (!iss || file.length() == 0) {
+ std::cerr << "BED file does not exist: " << file << std::endl;
+ exit(EXIT_FAILURE);
+ }
+
+ std::string line;
+ std::string curr_chr = "-1";
+ while (std::getline(iss, line, '\n')) {
+
+ size_t counter = 0;
+ std::string chr, pos1, pos2, f;
+ std::istringstream iss_line(line);
+ std::string val;
+
+ if (line.find("#") == std::string::npos) {
+ while(std::getline(iss_line, val, '\t')) {
+ switch (counter) {
+ case 0 : chr = val; break;
+ case 1 : pos1 = val; break;
+ case 2 : pos2 = val; break;
+ case 3 : f = val; break;
+ }
+ if (counter >= 3)
+ break;
+ ++counter;
+
+ if (chr != curr_chr) {
+ //std::cerr << "...reading from BED - chr" << chr << std::endl;
+ curr_chr = chr;
+ }
+
+ }
+
+ // construct the GenomicRegion
+ FracRegion ff(chr, pos1, pos2, h, f);
+
+ if (ff.valid()) {
+ //gr.pad(pad);
+ //m_grv.push_back(gr);
+ m_frc.add(ff);
+ }
+
+ //}
+ } // end "keep" conditional
+ } // end main while
+
+
+
+}
+
+}
diff --git a/src/non_api/Fractions.h b/src/non_api/Fractions.h
new file mode 100644
index 0000000..4ca7fa4
--- /dev/null
+++ b/src/non_api/Fractions.h
@@ -0,0 +1,46 @@
+#ifndef SNOWMAN_FRACTIONS_H__
+#define SNOWMAN_FRACTIONS_H__
+
+#include "SeqLib/GenomicRegionCollection.h"
+#include <string>
+
+namespace SeqLib {
+
+ /** @brief Extension of GenomicRegion with a fraction of reads to keep on an interval
+ *
+ * Used in conjunction with Fractions and used for selectively sub-sampling BAM files.
+ */
+class FracRegion : public SeqLib::GenomicRegion {
+
+ public:
+
+ FracRegion() {}
+
+ FracRegion(const std::string& c, const std::string& p1, const std::string& p2, bam_hdr_t * h, const std::string& f);
+
+ friend std::ostream& operator<<(std::ostream& out, const FracRegion& f);
+
+ double frac;
+};
+
+ /** @brief Genomic intervals and associated sampling fractions
+ */
+class Fractions {
+
+ public:
+
+ Fractions() {}
+
+ size_t size() const;
+
+ void readFromBed(const std::string& file, bam_hdr_t * h) ;
+
+ SeqLib::GenomicRegionCollection<FracRegion> m_frc;
+
+ private:
+
+};
+
+}
+
+#endif
diff --git a/src/non_api/Histogram.cpp b/src/non_api/Histogram.cpp
new file mode 100644
index 0000000..f0735df
--- /dev/null
+++ b/src/non_api/Histogram.cpp
@@ -0,0 +1,220 @@
+#include "SeqLib/Histogram.h"
+#include "SeqLib/SeqLibUtils.h"
+#include <fstream>
+#include <cmath>
+#include <algorithm>
+#include <sstream>
+
+#define BINARY_SEARCH 1
+
+#define DEBUG_HISTOGRAM
+
+namespace SeqLib {
+
+Histogram::Histogram(const int32_t& start, const int32_t& end, const uint32_t& width)
+{
+
+ if (end >= start)
+ throw std::invalid_argument("Histogram end must be > start");
+
+ Bin bin;
+ bin.bounds.first = start;
+
+ int32_t next_end = start + width - 1; // -1 because width=1 is bound.first = bounds.second
+
+ while (next_end < end)
+ {
+ // finish this bin
+ bin.bounds.second = next_end;
+ m_bins.push_back(bin);
+ m_ind.push_back(bin.bounds.first); // make the index for the lower bound
+
+ // start a new one
+ bin.bounds.first = next_end+1;
+ next_end += width;
+ }
+
+ // finish the last bin
+ bin.bounds.second = end;
+ m_bins.push_back(bin);
+ m_ind.push_back(bin.bounds.first);
+
+ // add a final bin
+ //bin.bounds.first = end+1;
+ //bin.bounds.second = INT_MAX;
+ //m_bins.push_back(bin);
+ //m_ind.push_back(bin.bounds.first);
+
+}
+
+void Histogram::toCSV(std::ofstream &fs) {
+
+ for (auto& i : m_bins)
+ fs << i << std::endl;
+
+}
+void Histogram::removeElem(const int32_t& elem) {
+ --m_bins[retrieveBinID(elem)];
+}
+
+void Histogram::addElem(const int32_t& elem) {
+ ++m_bins[retrieveBinID(elem)];
+}
+
+std::string Histogram::toFileString() const {
+ std::stringstream ss;
+ for (auto& i : m_bins)
+ if (i.m_count)
+ ss << i.bounds.first << "_" << i.bounds.second << "_" << i.m_count << ",";
+ std::string out = ss.str();
+ out.pop_back(); // trim off last comma
+ return(out);
+
+}
+
+size_t Histogram::retrieveBinID(const int32_t& elem) const {
+
+ if (elem < m_bins[0].bounds.first)
+ {
+#ifdef DEBUG_HISTOGRAM
+ std::cerr << "removeElem: elem of value " << elem << " is below min bin " << m_bins[0] << std::endl;
+ exit(1);
+#endif
+ return 0;
+ }
+
+ if (elem > m_bins.back().bounds.second)
+ {
+#ifdef DEBUG_HISTOGRAM
+ std::cerr << "removeElem: elem of value " << elem << " is above max bin " << m_bins.back() << std::endl;
+ exit(1);
+#endif
+ return m_bins.size();
+ }
+
+
+ if (m_bins[0].contains(elem))
+ return 0;
+ if (m_bins.back().contains(elem))
+ return m_bins.size();
+
+#ifdef BINARY_SEARCH
+ // binary search
+ std::vector<int32_t>::const_iterator it = std::upper_bound(m_ind.begin(), m_ind.end(), elem);
+ size_t i = it - m_ind.begin()-1;
+ assert(i < m_ind.size());
+ return i;
+#else
+ for (size_t i = 0; i < m_bins.size(); i++) {
+ if (m_bins[i].contains(elem)) {
+ return i;
+ }
+ }
+#endif
+ std::cerr << "bin not found for element " << elem << std::endl;
+ return 0;
+}
+
+void Histogram::initialize(size_t num_bins, std::vector<int32_t>* pspanv, size_t min_bin_width) {
+
+ // ensure that they spans are sorted
+ std::sort(pspanv->begin(), pspanv->end());
+
+ // fill the histogram bins with matrix pairs (pre-sorted by distance)
+ Bin bin;
+
+ // get number of inter-chr events
+ size_t intra = 0;
+ for (auto& i : *pspanv)
+ if (i != INTERCHR)
+ intra++;
+
+ int bin_cut = 0;
+ try {
+ bin_cut = floor((double)intra / (double)num_bins);
+ if (bin_cut == 0)
+ throw 1;
+ } catch(...) {
+ std::cerr << "Error in determining bin cut. Not enought events or too many bins?" << std::endl;
+ std::cerr << "Events: " << pspanv->size() << " Num Bins " << num_bins << " quantile count (hist height) " << bin_cut << std::endl;
+ }
+
+ std::cout << "...Events per bin: " << bin_cut << " num bins " << num_bins << std::endl;
+
+ S last_span = 0;
+ size_t tcount = 0; // count events put into bins
+
+ // iterate over spans
+ for (auto& span : *pspanv) {
+ if (span != INTERCHR) {
+
+ ++tcount;
+
+ // moved into a new bin? (or done?)
+ if (bin.getCount() > bin_cut && span != last_span && (last_span - bin.bounds.first) >= min_bin_width) {
+
+ // finalize, save old bin
+ bin.bounds.second = last_span;
+ m_bins.push_back(bin);
+
+ // new bin
+ bin.bounds.first = last_span+1;
+ bin.m_count = 0;
+
+ }
+ ++bin;
+ if (bin.getCount() >= bin_cut) {
+ last_span = span;
+ }
+
+ //update the size of current bin
+ bin.bounds.second = span;
+ }
+ }
+ // add the last bin
+ bin.bounds.second = INTERCHR-1;
+ m_bins.push_back(bin);
+
+ // add a bin for interchr events
+ bin.bounds = {INTERCHR, INTERCHR};
+ bin.m_count = pspanv->size() - intra;
+ m_bins.push_back(bin);
+
+ // make the indices of lower bound
+ for (auto& i : m_bins)
+ m_ind.push_back(i.bounds.first);
+
+ if (m_bins.size() != (num_bins+1)) {
+ //std::cout << " bin cut " << bin_cut << std::endl;
+ //std::cout << " num bins " << num_bins << " bins.size() " << m_bins.size() << std::endl;
+ //assert(bins.size() == (num_bins+1));
+ }
+
+}
+
+bool Bin::operator < (const Bin& b) const {
+ return (bounds.first < b.bounds.first || (bounds.first==b.bounds.first && bounds.second < b.bounds.second));
+
+}
+
+bool Bin::contains(const int32_t& elem) const {
+
+ return (elem >= bounds.first && elem <= bounds.second);
+
+
+}
+
+Bin& Bin::operator++()
+{
+ ++m_count;
+ return *this;
+}
+
+
+Bin& Bin::operator--() {
+ assert(m_count > 0);
+ --m_count;
+ return *this;
+}
+
+}
diff --git a/src/non_api/Histogram.h b/src/non_api/Histogram.h
new file mode 100644
index 0000000..e8c6ec5
--- /dev/null
+++ b/src/non_api/Histogram.h
@@ -0,0 +1,171 @@
+#ifndef SEQLIB_HISTOGRAM_H__
+#define SEQLIB_HISTOGRAM_H__
+
+#include <iostream>
+#include <cassert>
+#include <string>
+#include <utility>
+#include <vector>
+#include <fstream>
+
+#include "IntervalTree.h"
+
+class Bin;
+
+typedef TInterval<Bin> BinInterval;
+typedef TIntervalTree<Bin> BinIntervalTree;
+typedef std::vector<BinInterval> BinIntervalVector;
+
+#define INTERCHR 250000000
+
+namespace SeqLib {
+
+ class Histogram;
+
+/** Stores one bin in a Histogram
+ */
+class Bin {
+
+ friend class Histogram;
+
+ public:
+
+ /** Construct a new object with 0 count and range [0,1]
+ */
+ Bin() : m_count(0)
+ {
+ bounds = {0,1};
+ }
+
+ /** Output the bin in format "start range, end range, count"
+ */
+ friend std::ostream& operator<<(std::ostream &out, const Bin &b) {
+ out << b.bounds.first << "," << b.bounds.second << "," << b.m_count;
+ return out;
+ }
+
+ /** Return the number of counts in this histogram bin
+ */
+ int32_t getCount() const { return m_count; }
+
+ /** Check if a value fits within the range of this bin
+ * @param dist Distance value to check if its in this range
+ * @return true if the value is within the range
+ */
+
+ /** Check if this bin contains a value
+ * @param elem Value to check if its in this range
+ * @return true if the value is within the range
+ */
+ bool contains(const int32_t& elem) const;
+
+ /** Define bin comparison operator by location of left bound, then right */
+ bool operator < (const Bin& b) const;
+
+ /** Decrement the histogram bin by one.
+ * Note that this is the prefix version only
+ */
+ Bin& operator--();
+
+ /** Increment the histogram bin by one.
+ * Note that this is the prefix version only
+ */
+ Bin& operator++();
+
+ private:
+ int32_t m_count;
+ std::pair<int32_t, int32_t> bounds; //@! was"bin";
+};
+
+/** Class to store histogram of numeric values.
+ *
+ * The bins of the Histogram are not uniformly spaced, and their ranges determined
+ * by partitioning the spans it tablulates into uniform quantiles when initialized
+ * by Histogram::initialSpans(). As elements are added and removed this initial bin
+ * definition remains constant.
+ */
+class Histogram {
+
+ private:
+
+ std::vector<int32_t> m_ind;
+
+ public:
+
+ std::vector<Bin> m_bins;
+ /** Construct an empty histogram
+ */
+ Histogram() {}
+
+ /** Construct a new histogram with bins spaced evenly
+ * @param start Min value covered
+ * @param end Max value covered
+ * @param width Fixed bin width
+ * @exception Throws an invalid_argument if end <= start
+ */
+ Histogram(const int32_t& start, const int32_t& end, const uint32_t& width);
+
+ std::string toFileString() const;
+
+ friend std::ostream& operator<<(std::ostream &out, const Histogram &h) {
+ for (auto& i : h.m_bins)
+ out << i << std::endl;
+ return out;
+ }
+
+ /** Return iterator to the fist bin
+ */
+ std::vector<int32_t>::iterator begin() { return m_ind.begin(); }
+
+ /** Return iterator to the last bin
+ */
+ std::vector<int32_t>::iterator end() { return m_ind.end(); }
+
+ /** Initialize histogram from a vector of numeric values
+ */
+ void Initialize(size_t num_bins, std::vector<int32_t>* pspanv, size_t min_bin_width = 0);
+
+ /** Add an element to the histogram
+ * @param elem Length of event to add
+ */
+ void addElem(const int32_t &elem);
+
+ /** Remove a span from the histogram
+ * @param span Length of event to remove
+ */
+ void removeElem(const int32_t &elem);
+
+ /** Output to CSV file like: bin_start,bin_end,count
+ */
+ void toCSV(std::ofstream &fs);
+
+ /** Return the total number of elements in the Histogram
+ */
+ int totalCount() const {
+ int tot = 0;
+ for (auto& i : m_bins)
+ tot += i.getCount();
+ return tot;
+ }
+
+ /** Get count for a histogram bin
+ * @param i Bin index
+ * @return number of events in histogram bin
+ */
+ int32_t binCount(size_t i) { return m_bins[i].getCount(); }
+
+ /** Get number of bins in histogram
+ * @return Number of bins in histogram
+ */
+ size_t numBins() { return m_bins.size(); }
+
+ /** Find bin corresponding to a span
+ * @param elem Event length
+ * @return Bin containing event length
+ */
+ size_t retrieveBinID(const int32_t& elem) const;
+
+};
+
+}
+#endif
diff --git a/src/non_api/STCoverage.cpp b/src/non_api/STCoverage.cpp
new file mode 100644
index 0000000..ac14266
--- /dev/null
+++ b/src/non_api/STCoverage.cpp
@@ -0,0 +1,178 @@
+#include "SeqLib/STCoverage.h"
+#include "SeqLib/SeqLibCommon.h"
+#include <stdexcept>
+#include <algorithm>
+
+namespace SeqLib {
+
+ void STCoverage::clear() {
+ m_map.clear();
+ }
+
+ void STCoverage::settleCoverage() {
+ GRC tmp = m_grc;
+ m_grc.mergeOverlappingIntervals();
+ }
+
+ STCoverage::STCoverage(const SeqLib::GenomicRegion& gr) {
+ m_gr = gr;
+ v = uint16_sp(new std::vector<uint16_t>(gr.width(),0));
+ }
+
+ uint16_t STCoverage::maxCov() const {
+ return (*std::max_element(v->begin(), v->end()));
+ }
+
+ /*void STCoverage::addRead2(const BamRecord& r) {
+
+ int p = r.Position();
+ int e = r.PositionEnd();
+
+ if (p < 0 || e < 0)
+ return;
+
+ if (p < m_gr.pos1 || e > m_gr.pos2)
+ return;
+
+ if (r.ChrID() != m_gr.chr)
+ return;
+
+ assert(p - m_gr.pos1 < v->size());
+ ++v[p - m_gr.pos1];
+ }
+ */
+ void STCoverage::addRead(const BamRecord &r, int buff, bool full_length) {
+
+ //m_settled = false;
+ //m_grc.add(GenomicRegion(r.ChrID(), r.Position(), r.PositionEnd()));
+
+ // out of bounds
+ //if (r.Position() < m_gr.pos1 || r.PositionEnd() > m_gr.pos2 || r.ChrID() != m_gr.chr)
+ // return;
+
+ //int p = std::min(r.Position() - m_gr.pos1, m_gr.pos2);
+ //int e = std::min(r.PositionEnd() - m_gr.pos1, m_gr.pos2);
+ int p = -1;
+ int e = -1;
+
+ if (full_length) {
+ Cigar c = r.GetCigar();
+ // get beginning
+ if (c.size() && c[0].RawType() == BAM_CSOFT_CLIP)
+ p = std::max((int32_t)0, r.Position() - (int32_t)c[0].Length()); // get prefixing S
+ else
+ p = r.Position();
+ // get end
+ if (c.size() && c.back().RawType() == BAM_CSOFT_CLIP)
+ e = r.PositionEnd() + c.back().Length();
+ else
+ e = r.PositionEnd();
+ }
+ else {
+ p = r.Position() + buff;
+ e = r.PositionEnd() - buff;
+ }
+
+ if (p < 0 || e < 0)
+ return;
+
+ // if we don't have an empty map for this, add
+ if (r.ChrID() >= (int)m_map.size()) {
+ int k = m_map.size();
+ while (k <= r.ChrID()) {
+ m_map.push_back(CovMap());
+ //m_map.back().reserve(reserve_size);
+ ++k;
+ }
+ }
+
+ assert(e - p < 1e6); // limit on read length
+ assert(r.ChrID() >= 0);
+ assert(r.ChrID() < (int)m_map.size());
+
+ try {
+ while (p <= e) {
+ //CovMap::iterator iter = m_map.find(p);
+ ++(m_map[r.ChrID()][p]); // add one to this position
+ ++p;
+ //if (v->at(p) < 60000) // 60000 is roughly int16 lim
+ // v->at(p)++;
+ //++p;
+
+ }
+ } catch (std::out_of_range &oor) {
+ std::cerr << "Position " << p << " on tid " << r.ChrID()
+ << " is greater than expected max of " << v->size() << " -- skipping" << std::endl;
+
+ }
+
+ }
+
+ std::ostream& operator<<(std::ostream &out, const STCoverage &c) {
+ out << "Region " << c.m_gr << " v.size() " << c.v->size() << std::endl;
+ return out;
+ }
+
+ void STCoverage::ToBedgraph(std::ofstream * o, const bam_hdr_t * h) const {
+
+ //settleCoverage();
+
+ // unitialized so nothing to do
+ if (m_gr.chr == -1 || v->size() == 0)
+ return;
+
+ size_t curr_start = 0;
+ size_t curr_val = v->at(0);
+ for (size_t i = 0; i < v->size(); ++i) {
+ if (v->at(i) != curr_val) {
+ (*o) << m_gr.ChrName(h) << "\t" << (curr_start + m_gr.pos1) << "\t" << (i+m_gr.pos1) << "\t" << curr_val << std::endl;
+ curr_start = i;
+ curr_val = v->at(i);
+ }
+ }
+
+ // need to dump last one
+ if ( (curr_start+1) != v->size())
+ (*o) << m_gr.ChrName(h) << "\t" << (curr_start + m_gr.pos1) << "\t" << (v->size()+m_gr.pos1-1) << "\t" << curr_val << std::endl;
+ }
+
+ int STCoverage::getCoverageAtPosition(int chr, int pos) const {
+
+ //CovMapMap::iterator it = m_map.find(chr);
+ //if (it == m_map.end())
+ // return 0;
+ if (chr >= (int)m_map.size())
+ return 0;
+
+ //std::cerr << " MAP " << std::endl;
+ //for (auto& i : m_map)
+ // std::cerr << i.first << " " << i.second << std::endl;
+
+ //if (!m_settled)
+ // settleCoverage();
+
+ //if (pos < m_gr.pos1 || pos > m_gr.pos2) {
+ //std::cerr << "Coverage query out of bounds for location " << m_gr.chr << ":" << pos << std::endl;
+ // return 0;
+ //}
+
+ //size_t q = pos - m_gr.pos1;
+ //if (q >= v->size()) {
+ // std::cerr << "Coverage query out of bounds for location " << m_gr.chr << ":" << pos << " with pos-start of " << q << " attempt on v of size " << v->size() << std::endl;
+ // return 0;
+ //}
+ //return it->second[pos];
+
+ CovMap::const_iterator ff = m_map[chr].find(pos);
+ if (ff == m_map[chr].end()) {
+ return 0;
+ }
+
+ return ff->second;
+
+ //return (v->at(q));
+
+
+}
+
+}
diff --git a/src/non_api/STCoverage.h b/src/non_api/STCoverage.h
new file mode 100644
index 0000000..963569d
--- /dev/null
+++ b/src/non_api/STCoverage.h
@@ -0,0 +1,88 @@
+#ifndef SNOWMAN_SEQLIB_COVERAGE_H__
+#define SNOWMAN_SEQLIB_COVERAGE_H__
+
+#include <memory>
+#include <unordered_map>
+#include <vector>
+#include <cstdint>
+#include <vector>
+#include <cassert>
+#include <iostream>
+#include <memory>
+
+#include "htslib/hts.h"
+#include "htslib/sam.h"
+#include "htslib/bgzf.h"
+#include "htslib/kstring.h"
+
+#include "SeqLib/BamRecord.h"
+#include "SeqLib/GenomicRegion.h"
+#include "SeqLib/GenomicRegionCollection.h"
+
+typedef std::shared_ptr<std::vector<uint16_t>> uint16_sp;
+typedef std::unordered_map<int,int> CovMap;
+//typedef std::unordered_map<int,CovMap> CovMapMap;
+
+namespace SeqLib {
+
+
+
+ /** Hold base-pair or binned coverage across an interval or genome
+ *
+ * Currently stores coverage as an unordered_map.
+ */
+class STCoverage {
+
+ private:
+
+ GRC m_grc;
+ GenomicRegion m_gr;
+
+ //CovMapMap m_map;
+ std::vector<CovMap> m_map;
+
+ uint16_sp v;
+
+
+ public:
+
+ /** Clear the coverage map */
+ void clear();
+
+
+ /** */
+ void settleCoverage();
+
+ /** Add a read to this coverage track
+ * @param reserve_size Upper bound estimate for size of map. Not a hard
+ * cutoff but improves performance if total number of positions is less than this,
+ * as it will not rehash. */
+ void addRead(const BamRecord &r, int buff, bool full_length);
+
+ /** Make a new coverage object at interval gr */
+ STCoverage(const GenomicRegion& gr);
+
+ uint16_t maxCov() const;
+
+ /** Make an empty coverage */
+ STCoverage() {}
+
+ /*! Add to coverage objects together to get total coverge
+ *
+ * @param cov Coverage object to add to current object
+ */
+ //void combineCoverage(Coverage &cov);
+
+ /** Return a short summary string of this coverage object */
+ void ToBedgraph(std::ofstream * o, const bam_hdr_t * h) const;
+
+ /** Print the entire data */
+ friend std::ostream& operator<<(std::ostream &out, const STCoverage &c);
+
+ /** Return the coverage count at a position */
+ int getCoverageAtPosition(int chr, int pos) const;
+
+};
+
+}
+#endif
diff --git a/src/non_api/snowtools.cpp b/src/non_api/snowtools.cpp
new file mode 100644
index 0000000..28387de
--- /dev/null
+++ b/src/non_api/snowtools.cpp
@@ -0,0 +1,139 @@
+#include "json/json.h"
+#include "json/json-forwards.h"
+
+#include "SeqLib/MiniRules2.h"
+
+#include <cstdio>
+#include <fstream>
+#include <iostream>
+#include <sstream>
+#include <string>
+#include <vector>
+using namespace std;
+
+const std::vector<std::string> m_propertyNames = {"reference", "mapQuality", "isReverseStrand"};
+
+bool ParseFilterObject(const string& filterName, const Json::Value& filterObject) {
+
+ // filter object parsing variables
+ Json::Value null(Json::nullValue);
+ Json::Value propertyValue;
+
+ // store results
+ map<string, string> propertyTokens;
+
+ // iterate over known properties
+ vector<string>::const_iterator propertyNameIter = m_propertyNames.begin();
+ vector<string>::const_iterator propertyNameEnd = m_propertyNames.end();
+ for ( ; propertyNameIter != propertyNameEnd; ++propertyNameIter ) {
+ const string& propertyName = (*propertyNameIter);
+
+ // if property defined in filter, add to token list
+ propertyValue = filterObject.get(propertyName, null);
+ if ( propertyValue != null ) {
+ std::cerr << "[" << propertyName << "] -- " << propertyValue.asString() << std::endl;
+ propertyTokens.insert( make_pair(propertyName, propertyValue.asString()) );
+ }
+ }
+
+ // add this filter to engin
+ //m_filterEngine.addFilter(filterName);
+
+ // add token list to this filter
+ //return AddPropertyTokensToFilter(filterName, propertyTokens);
+ return true;
+}
+
+const string GetScriptContents(string script) {
+
+ // open script for reading
+ FILE* inFile = fopen(script.c_str(), "rb");
+ if ( !inFile ) {
+ cerr << "bamtools filter ERROR: could not open script: "
+ << script << " for reading" << endl;
+ return string();
+ }
+
+ // read in entire script contents
+ char buffer[1024];
+ ostringstream docStream("");
+ while ( true ) {
+
+ // peek ahead, make sure there is data available
+ char ch = fgetc(inFile);
+ ungetc(ch, inFile);
+ if( feof(inFile) )
+ break;
+
+ // read next block of data
+ if ( fgets(buffer, 1024, inFile) == 0 ) {
+ cerr << "bamtools filter ERROR: could not read script contents" << endl;
+ return string();
+ }
+
+ docStream << buffer;
+ }
+
+ // close script file
+ fclose(inFile);
+
+ // import buffer contents to document, return
+ return docStream.str();
+}
+
+int main(int argc, char** argv) {
+
+ std::string script = "/xchip/gistic/Jeremiah/GIT/SeqLib/test.json";
+ const string document = GetScriptContents(script);
+
+ // set up JsonCPP reader and attempt to parse script
+ Json::Value root;
+ Json::Reader reader;
+ if ( !reader.parse(document, root) ) {
+ // use built-in error reporting mechanism to alert user what was wrong with the script
+ cerr << "bamtools filter ERROR: failed to parse script - see error message(s) below" << endl;
+ return false;
+ }
+
+ // see if root object contains multiple filters
+ const Json::Value filters = root["filters"];
+
+ cerr << " root size " << root.size() << std::endl;
+ cerr << " filters size " << filters.size() << std::endl;
+
+ // iterate over any filters found
+ int filterIndex = 0;
+ Json::Value::const_iterator filtersIter = filters.begin();
+ Json::Value::const_iterator filtersEnd = filters.end();
+ for ( ; filtersIter != filtersEnd; ++filtersIter, ++filterIndex ) {
+ Json::Value filter = (*filtersIter);
+
+ // convert filter index to string
+ string filterName;
+
+ // if id tag supplied
+ const Json::Value id = filter["id"];
+ if ( !id.isNull() )
+ filterName = id.asString();
+
+
+ // use array index
+ else {
+ stringstream convert;
+ convert << filterIndex;
+ filterName = convert.str();
+ }
+
+ cerr << "filter " << filterName << std::endl;
+ // create & parse filter
+ bool success = true;
+ success &= ParseFilterObject(filterName, filter);
+ }
+
+ /// make mini rules
+ cerr << " ... maing mini rules " << endl;
+
+ SeqLib::MiniRulesCollection mrc(script);
+
+
+}
diff --git a/src/seqtools/Makefile.am b/src/seqtools/Makefile.am
new file mode 100644
index 0000000..665af44
--- /dev/null
+++ b/src/seqtools/Makefile.am
@@ -0,0 +1,4 @@
+bin_PROGRAMS = seqtools
+seqtools_SOURCES = seqtools.cpp
+seqtools_LDADD=../libseqlib.a ../../htslib/libhts.a ../../bwa/libbwa.a ../../fermi-lite/libfml.a $(LDFLAGS)
+seqtools_CPPFLAGS=-I../../htslib -I../..
diff --git a/src/seqtools/Makefile.in b/src/seqtools/Makefile.in
new file mode 100644
index 0000000..d3e622e
--- /dev/null
+++ b/src/seqtools/Makefile.in
@@ -0,0 +1,592 @@
+# Makefile.in generated by automake 1.15 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2014 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+ at SET_MAKE@
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+bin_PROGRAMS = seqtools$(EXEEXT)
+subdir = src/seqtools
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__installdirs = "$(DESTDIR)$(bindir)"
+PROGRAMS = $(bin_PROGRAMS)
+am_seqtools_OBJECTS = seqtools-seqtools.$(OBJEXT)
+seqtools_OBJECTS = $(am_seqtools_OBJECTS)
+am__DEPENDENCIES_1 =
+seqtools_DEPENDENCIES = ../libseqlib.a ../../htslib/libhts.a \
+ ../../bwa/libbwa.a ../../fermi-lite/libfml.a \
+ $(am__DEPENDENCIES_1)
+AM_V_P = $(am__v_P_ at AM_V@)
+am__v_P_ = $(am__v_P_ at AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_ at AM_V@)
+am__v_GEN_ = $(am__v_GEN_ at AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_ at AM_V@)
+am__v_at_ = $(am__v_at_ at AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+DEFAULT_INCLUDES = -I. at am__isrc@ -I$(top_builddir)
+depcomp = $(SHELL) $(top_srcdir)/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+AM_V_lt = $(am__v_lt_ at AM_V@)
+am__v_lt_ = $(am__v_lt_ at AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 =
+CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+AM_V_CXX = $(am__v_CXX_ at AM_V@)
+am__v_CXX_ = $(am__v_CXX_ at AM_DEFAULT_V@)
+am__v_CXX_0 = @echo " CXX " $@;
+am__v_CXX_1 =
+CXXLD = $(CXX)
+CXXLINK = $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) \
+ -o $@
+AM_V_CXXLD = $(am__v_CXXLD_ at AM_V@)
+am__v_CXXLD_ = $(am__v_CXXLD_ at AM_DEFAULT_V@)
+am__v_CXXLD_0 = @echo " CXXLD " $@;
+am__v_CXXLD_1 =
+SOURCES = $(seqtools_SOURCES)
+DIST_SOURCES = $(seqtools_SOURCES)
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_CXXFLAGS = @AM_CXXFLAGS@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build_alias = @build_alias@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host_alias = @host_alias@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+seqtools_SOURCES = seqtools.cpp
+seqtools_LDADD = ../libseqlib.a ../../htslib/libhts.a ../../bwa/libbwa.a ../../fermi-lite/libfml.a $(LDFLAGS)
+seqtools_CPPFLAGS = -I../../htslib -I../..
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .cpp .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/seqtools/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign src/seqtools/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-binPROGRAMS: $(bin_PROGRAMS)
+ @$(NORMAL_INSTALL)
+ @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \
+ fi; \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed 's/$(EXEEXT)$$//' | \
+ while read p p1; do if test -f $$p \
+ ; then echo "$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n;h' \
+ -e 's|.*|.|' \
+ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \
+ sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) files[d] = files[d] " " $$1; \
+ else { print "f", $$3 "/" $$4, $$1; } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \
+ $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-binPROGRAMS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \
+ -e 's/$$/$(EXEEXT)/' \
+ `; \
+ test -n "$$list" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(bindir)" && rm -f $$files
+
+clean-binPROGRAMS:
+ -test -z "$(bin_PROGRAMS)" || rm -f $(bin_PROGRAMS)
+
+seqtools$(EXEEXT): $(seqtools_OBJECTS) $(seqtools_DEPENDENCIES) $(EXTRA_seqtools_DEPENDENCIES)
+ @rm -f seqtools$(EXEEXT)
+ $(AM_V_CXXLD)$(CXXLINK) $(seqtools_OBJECTS) $(seqtools_LDADD) $(LIBS)
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/seqtools-seqtools.Po at am__quote@
+
+.cpp.o:
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX at am__nodep@)$(CXXCOMPILE) -c -o $@ $<
+
+.cpp.obj:
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX at am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+seqtools-seqtools.o: seqtools.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seqtools_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seqtools-seqtools.o -MD -MP -MF $(DEPDIR)/seqtools-seqtools.Tpo -c -o seqtools-seqtools.o `test -f 'seqtools.cpp' || echo '$(srcdir)/'`seqtools.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/seqtools-seqtools.Tpo $(DEPDIR)/seqtools-seqtools.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='seqtools.cpp' object='seqtools-seqtools.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX at am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seqtools_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seqtools-seqtools.o `test -f 'seqtools.cpp' || echo '$(srcdir)/'`seqtools.cpp
+
+seqtools-seqtools.obj: seqtools.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seqtools_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT seqtools-seqtools.obj -MD -MP -MF $(DEPDIR)/seqtools-seqtools.Tpo -c -o seqtools-seqtools.obj `if test -f 'seqtools.cpp'; then $(CYGPATH_W) 'seqtools.cpp'; else $(CYGPATH_W) '$(srcdir)/seqtools.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/seqtools-seqtools.Tpo $(DEPDIR)/seqtools-seqtools.Po
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='seqtools.cpp' object='seqtools-seqtools.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX at am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(seqtools_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o seqtools-seqtools.obj `if test -f 'seqtools.cpp'; then $(CYGPATH_W) 'seqtools.cpp'; else $(CYGPATH_W) '$(srcdir)/seqtools.cpp'; fi`
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(PROGRAMS)
+installdirs:
+ for dir in "$(DESTDIR)$(bindir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-binPROGRAMS clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-binPROGRAMS
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-binPROGRAMS
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean \
+ clean-binPROGRAMS clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-compile distclean-generic distclean-tags \
+ distdir dvi dvi-am html html-am info info-am install \
+ install-am install-binPROGRAMS install-data install-data-am \
+ install-dvi install-dvi-am install-exec install-exec-am \
+ install-html install-html-am install-info install-info-am \
+ install-man install-pdf install-pdf-am install-ps \
+ install-ps-am install-strip installcheck installcheck-am \
+ installdirs maintainer-clean maintainer-clean-generic \
+ mostlyclean mostlyclean-compile mostlyclean-generic pdf pdf-am \
+ ps ps-am tags tags-am uninstall uninstall-am \
+ uninstall-binPROGRAMS
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/seqtools/seqtools.cpp b/src/seqtools/seqtools.cpp
new file mode 100644
index 0000000..cf6aeab
--- /dev/null
+++ b/src/seqtools/seqtools.cpp
@@ -0,0 +1,351 @@
+#include <getopt.h>
+#include <iostream>
+#include <string>
+#include <sstream>
+#include <cassert>
+
+#include "SeqLib/BFC.h"
+#include "SeqLib/FastqReader.h"
+#include "SeqLib/BamReader.h"
+#include "SeqLib/BamWriter.h"
+#include "SeqLib/BWAWrapper.h"
+#include "SeqLib/FermiAssembler.h"
+
+void kt_pipeline(int n_threads, void *(*func)(void*, int, void*), void *shared_data, int n_steps);
+
+#define AUTHOR "Jeremiah Wala <jwala at broadinstitute.org>"
+
+static const char *SEQTOOLS_USAGE_MESSAGE =
+"Program: seqtools \n"
+"Contact: Jeremiah Wala [ jwala at broadinstitute.org ]\n"
+"Usage: seqtools <command> [options]\n\n"
+"Commands:\n"
+" bfc Error correction from a BAM or fasta, direct to re-aligned BAM\n"
+" fml FermiKit assembly (with error correction), direct to re-aligned BAM\n"
+"\nReport bugs to jwala at broadinstitute.org \n\n";
+
+static const char *BFC_USAGE_MESSAGE =
+"Program: seqtools bfc \n"
+"Contact: Jeremiah Wala [ jwala at broadinstitute.org ]\n"
+"Usage: seqtools bfc [options]\n\n"
+"Commands:\n"
+" --verbose, -v Set verbose output\n"
+" --fasta, -f Output stream should be a FASTA (no realignment)\n"
+" --bam, -b Output stream should be a BAM (not SAM)\n"
+" --cram, -C Output stream should be a CRAM (not SAM)\n"
+" --infasta, -F <file> Input a FASTA insted of BAM/SAM/CRAM stream\n"
+" --reference, -G <file> Reference genome if using BWA-MEM realignment\n"
+"\nReport bugs to jwala at broadinstitute.org \n\n";
+
+static const char *FML_USAGE_MESSAGE =
+"Program: seqtools fml \n"
+"Contact: Jeremiah Wala [ jwala at broadinstitute.org ]\n"
+"Usage: seqtools fml [options]\n\n"
+"Description: Extract sequences and assemble and realign contigs\n"
+"Commands:\n"
+" --verbose, -v Set verbose output\n"
+" --fasta, -f Output stream should be a FASTA (no realignment)\n"
+" --bam, -b Output stream should be a BAM (not SAM)\n"
+" --cram, -C Output stream should be a CRAM (not SAM)\n"
+" --infasta, -F <file> Input a FASTA insted of BAM/SAM/CRAM stream\n"
+" --reference, -G <file> Reference genome if using BWA-MEM realignment\n"
+"\nReport bugs to jwala at broadinstitute.org \n\n";
+
+void runbfc(int argc, char** argv);
+void runfml(int argc, char** argv);
+void parseOptions(int argc, char** argv, const char* msg);
+
+namespace opt {
+
+ static bool verbose = false;
+ static char mode = 's';
+ static std::string input;
+ static std::string reference = "/seq/references/Homo_sapiens_assembly19/v1/Homo_sapiens_assembly19.fasta";
+ static std::string fasta; // input is a fasta
+ static std::string target; // input target sequence
+}
+
+static const char* shortopts = "hbfvCG:F:T:";
+static const struct option longopts[] = {
+ { "help", no_argument, NULL, 'h' },
+ { "verbose", no_argument, NULL, 'v' },
+ { "bam", no_argument, NULL, 'b' },
+ { "cram", no_argument, NULL, 'C' },
+ { "fasta", no_argument, NULL, 'f' },
+ { "infasta", required_argument, NULL, 'F' },
+ { "reference", required_argument, NULL, 'G' },
+ { "target", required_argument, NULL, 'T' },
+ { NULL, 0, NULL, 0 }
+};
+
+int main(int argc, char** argv) {
+
+ if (argc <= 1) {
+ std::cerr << SEQTOOLS_USAGE_MESSAGE;
+ return 0;
+ } else {
+ std::string command(argv[1]);
+ if (command == "help" || command == "--help") {
+ std::cerr << SEQTOOLS_USAGE_MESSAGE;
+ return 0;
+ } else if (command == "bfc") {
+ runbfc(argc -1, argv + 1);
+ } else if (command == "fml") {
+ runfml(argc -1, argv + 1);
+ } else {
+ std::cerr << SEQTOOLS_USAGE_MESSAGE;
+ return 0;
+ }
+ }
+
+ return 0;
+
+}
+
+void runfml(int argc, char** argv) {
+
+ parseOptions(argc, argv, FML_USAGE_MESSAGE);
+
+ SeqLib::FermiAssembler fml;
+
+ if (!opt::fasta.empty()) {
+ SeqLib::FastqReader f(opt::fasta);
+
+ SeqLib::UnalignedSequenceVector usv;
+ std::string qn, seq;
+ SeqLib::UnalignedSequence u;
+ if (opt::verbose)
+ std::cerr << "...reading fasta/fastq file " << opt::fasta << std::endl;
+ while (f.GetNextSequence(u))
+ fml.AddRead(u);
+
+ if (opt::verbose)
+ std::cerr << "...read in " << SeqLib::AddCommas(fml.NumSequences()) << " sequences" << std::endl;
+
+ } else {
+
+ SeqLib::BamReader br;
+ if (!br.Open(opt::input == "-" ? "-" : opt::input))
+ exit(EXIT_FAILURE);
+
+ if (opt::verbose)
+ std::cerr << "...opened " << opt::input << std::endl;
+ SeqLib::BamRecord rec;
+ SeqLib::BamRecordVector brv;
+ size_t count = 0;
+ while(br.GetNextRecord(rec)) {
+ if (++count % 1000000 == 0 && opt::verbose)
+ std::cerr << "...at read " << SeqLib::AddCommas(count) << " " << rec.Brief() << std::endl;
+ brv.push_back(rec); //rec.Sequence().c_str(), rec.Qualities().c_str(), rec.Qname().c_str());
+ }
+ fml.AddReads(brv);
+
+ }
+
+ if (opt::verbose)
+ std::cerr << "...error correcting " << std::endl;
+ fml.CorrectReads();
+
+ if (opt::verbose)
+ std::cerr << "...assembling " << std::endl;
+ fml.PerformAssembly();
+
+ // retrieve the reads
+ std::vector<std::string> contigs = fml.GetContigs();
+
+ size_t count = 0;
+ if (opt::mode == 'f') {
+ for (std::vector<std::string>::const_iterator i = contigs.begin(); i != contigs.end(); ++i)
+ std::cout << ">contig" << ++count << std::endl << *i << std::endl;
+ return;
+ }
+
+ SeqLib::BamWriter bw;
+ if (opt::mode == 'b')
+ bw = SeqLib::BamWriter(SeqLib::BAM);
+ else if (opt::mode == 's')
+ bw = SeqLib::BamWriter(SeqLib::SAM);
+ else if (opt::mode == 'C') {
+ bw = SeqLib::BamWriter(SeqLib::CRAM);
+ bw.SetCramReference(opt::reference);
+ }
+ else {
+ std::cerr << "Unrecognized output stream mode " << opt::mode << std::endl;
+ exit(EXIT_FAILURE);
+ }
+
+ if (!bw.Open("-"))
+ exit(EXIT_FAILURE);
+
+ SeqLib::BWAWrapper bwa;
+ if (!bwa.LoadIndex(opt::reference)) {
+ std::cerr << "Failed to load index for BWA-MEM from: " << opt::reference << std::endl;
+ exit(EXIT_FAILURE);
+ }
+
+ bw.SetHeader(bwa.HeaderFromIndex());
+ bw.WriteHeader();
+
+ if (opt::verbose)
+ std::cerr << "...aligning contig with BWA-MEM" << std::endl;
+
+ // run through and read
+ std::stringstream ss;
+ for (std::vector<std::string>::const_iterator i = contigs.begin(); i != contigs.end(); ++i) {
+ SeqLib::BamRecordVector brv;
+ const bool hardclip = false;
+ const double frac = 0.9;
+ ss << "contig" << ++count;
+ const int max_secondary = 10;
+ bwa.AlignSequence(*i, ss.str(), brv, hardclip, frac, max_secondary);
+ ss.str(std::string());
+ for (SeqLib::BamRecordVector::iterator r = brv.begin();
+ r != brv.end(); ++r) {
+ bw.WriteRecord(*r);
+ }
+ }
+
+}
+
+void runbfc(int argc, char** argv) {
+
+ parseOptions(argc, argv, BFC_USAGE_MESSAGE);
+
+ SeqLib::BFC b;
+
+ if (!opt::fasta.empty()) {
+ // read in a fasta file
+ SeqLib::FastqReader f(opt::fasta);
+
+ SeqLib::UnalignedSequence u;
+ while (f.GetNextSequence(u)) {
+ if (!b.AddSequence(u.Seq.c_str(), u.Qual.c_str(), u.Name.c_str())) {
+ std::cerr << "Error adding sequence from fasta: " << u.Seq << std::endl;
+ exit(EXIT_FAILURE);
+ }
+ }
+ } else { //if (opt::mode == 'b' || opt::mode == 's' || opt::mode == 'C') {
+ SeqLib::BamReader br;
+ if (!br.Open(opt::input == "-" ? "-" : opt::input))
+ exit(EXIT_FAILURE);
+ if (opt::verbose)
+ std::cerr << "...opened " << opt::input << std::endl;
+ SeqLib::BamRecord rec;
+ size_t count = 0;
+ while(br.GetNextRecord(rec)) {
+ if (++count % 1000000 == 0 && opt::verbose)
+ std::cerr << "...at read " << SeqLib::AddCommas(count) << " " << rec.Brief() << std::endl;
+ b.AddSequence(rec); //rec.Sequence().c_str(), rec.Qualities().c_str(), rec.Qname().c_str());
+ }
+ }
+
+ if (opt::verbose)
+ std::cerr << "...read in " << SeqLib::AddCommas(b.NumSequences()) << " sequences" << std::endl;
+
+ if (!b.Train()) {
+ std::cerr << "Training failed on " << b.NumSequences() << std::endl;
+ exit(EXIT_FAILURE);
+ }
+ if (opt::verbose)
+ std::cerr << "...finished training " << SeqLib::AddCommas(b.NumSequences()) << " sequences" << std::endl;
+ if (!b.ErrorCorrect()) {
+ std::cerr << "Correction failed on " << b.NumSequences() << std::endl;
+ exit(EXIT_FAILURE);
+ }
+ if (opt::verbose)
+ std::cerr << "...finished correcting " << SeqLib::AddCommas(b.NumSequences()) << " sequences" << std::endl;
+
+ SeqLib::UnalignedSequenceVector u;
+ b.GetSequences(u);
+ if (opt::verbose)
+ std::cerr << "nseqs: " << u.size()
+ << " kcov: " << b.GetKCov()
+ << " kmer: " << b.GetKMer() << std::endl;
+
+
+ if (opt::mode == 'f') {
+ for (SeqLib::UnalignedSequenceVector::const_iterator i = u.begin();
+ i != u.end(); ++i) {
+ std::cout << ">" << i->Name << std::endl << i->Seq << std::endl;
+ }
+ return;
+ }
+
+ SeqLib::BamWriter bw;
+ if (opt::mode == 'b')
+ bw = SeqLib::BamWriter(SeqLib::BAM);
+ else if (opt::mode == 's')
+ bw = SeqLib::BamWriter(SeqLib::SAM);
+ else if (opt::mode == 'C') {
+ bw = SeqLib::BamWriter(SeqLib::CRAM);
+ bw.SetCramReference(opt::reference);
+ }
+ else {
+ std::cerr << "Unrecognized output stream mode " << opt::mode << std::endl;
+ exit(EXIT_FAILURE);
+ }
+
+ if (!bw.Open("-"))
+ exit(EXIT_FAILURE);
+
+ SeqLib::BWAWrapper bwa;
+ if (opt::verbose)
+ std::cerr << "...loading reference genome" << std::endl;
+ if (!bwa.LoadIndex(opt::reference)) {
+ std::cerr << "Failed to load index for BWA-MEM from: " << opt::reference << std::endl;
+ exit(EXIT_FAILURE);
+ }
+
+ bw.SetHeader(bwa.HeaderFromIndex());
+ bw.WriteHeader();
+
+ if (opt::verbose)
+ std::cerr << "...realigning corrected sequences with BWA-MEM" << std::endl;
+ // run through and read
+ for (SeqLib::UnalignedSequenceVector::const_iterator i = u.begin(); i != u.end(); ++i) {
+ SeqLib::BamRecordVector brv;
+ const bool hardclip = false;
+ const double frac = 0.9;
+ const int max_secondary = 10;
+ bwa.AlignSequence(i->Seq, i->Name, brv, hardclip, frac, max_secondary);
+ for (SeqLib::BamRecordVector::iterator r = brv.begin();
+ r != brv.end(); ++r) {
+ if (!i->Qual.empty())
+ r->SetQualities(i->Qual, 33);
+ bw.WriteRecord(*r);
+ }
+ }
+
+}
+// parse the command line options
+void parseOptions(int argc, char** argv, const char* msg) {
+
+ bool die = false;
+ bool help = false;
+
+ // get the first argument as input
+ if (argc > 1)
+ opt::input = std::string(argv[1]);
+
+ for (char c; (c = getopt_long(argc, argv, shortopts, longopts, NULL)) != -1;) {
+ std::istringstream arg(optarg != NULL ? optarg : "");
+ switch (c) {
+ case 'v': opt::verbose = true; break;
+ case 'f': opt::mode = 'f'; break;
+ case 'F': arg >> opt::fasta; break;
+ case 'b': opt::mode = 'b'; break;
+ case 'C': opt::mode = 'C'; break;
+ case 'T': arg >> opt::target; break;
+ case 'G': arg >> opt::reference; break;
+ default: die= true;
+ }
+ }
+
+ if (die || help || (opt::input.empty() && opt::fasta.empty())) {
+ std::cerr << "\n" << msg;
+ if (die)
+ exit(EXIT_FAILURE);
+ else
+ exit(EXIT_SUCCESS);
+ }
+}
diff --git a/src/ssw.c b/src/ssw.c
new file mode 100644
index 0000000..213d486
--- /dev/null
+++ b/src/ssw.c
@@ -0,0 +1,884 @@
+/* The MIT License
+
+ Copyright (c) 2012-1015 Boston College.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+*/
+
+/* Contact: Mengyao Zhao <zhangmp at bc.edu> */
+
+/*
+ * ssw.c
+ *
+ * Created by Mengyao Zhao on 6/22/10.
+ * Copyright 2010 Boston College. All rights reserved.
+ * Version 0.1.4
+ * Last revision by Mengyao Zhao on 02/11/16.
+ *
+ */
+
+#include <emmintrin.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include "SeqLib/ssw.h"
+
+#ifdef __GNUC__
+#define LIKELY(x) __builtin_expect((x),1)
+#define UNLIKELY(x) __builtin_expect((x),0)
+#else
+#define LIKELY(x) (x)
+#define UNLIKELY(x) (x)
+#endif
+
+/* Convert the coordinate in the scoring matrix into the coordinate in one line of the band. */
+#define set_u(u, w, i, j) { int x=(i)-(w); x=x>0?x:0; (u)=(j)-x+1; }
+
+/* Convert the coordinate in the direction matrix into the coordinate in one line of the band. */
+#define set_d(u, w, i, j, p) { int x=(i)-(w); x=x>0?x:0; x=(j)-x; (u)=x*3+p; }
+
+/*! @function
+ @abstract Round an integer to the next closest power-2 integer.
+ @param x integer to be rounded (in place)
+ @discussion x will be modified.
+ */
+#define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x))
+
+typedef struct {
+ uint16_t score;
+ int32_t ref; //0-based position
+ int32_t read; //alignment ending position on read, 0-based
+} alignment_end;
+
+typedef struct {
+ uint32_t* seq;
+ int32_t length;
+} cigar;
+
+struct _profile{
+ __m128i* profile_byte; // 0: none
+ __m128i* profile_word; // 0: none
+ const int8_t* read;
+ const int8_t* mat;
+ int32_t readLen;
+ int32_t n;
+ uint8_t bias;
+};
+
+/* Generate query profile rearrange query sequence & calculate the weight of match/mismatch. */
+static __m128i* qP_byte (const int8_t* read_num,
+ const int8_t* mat,
+ const int32_t readLen,
+ const int32_t n, /* the edge length of the squre matrix mat */
+ uint8_t bias) {
+
+ int32_t segLen = (readLen + 15) / 16; /* Split the 128 bit register into 16 pieces.
+ Each piece is 8 bit. Split the read into 16 segments.
+ Calculat 16 segments in parallel.
+ */
+ __m128i* vProfile = (__m128i*)malloc(n * segLen * sizeof(__m128i));
+ int8_t* t = (int8_t*)vProfile;
+ int32_t nt, i, j, segNum;
+
+ /* Generate query profile rearrange query sequence & calculate the weight of match/mismatch */
+ for (nt = 0; LIKELY(nt < n); nt ++) {
+ for (i = 0; i < segLen; i ++) {
+ j = i;
+ for (segNum = 0; LIKELY(segNum < 16) ; segNum ++) {
+ *t++ = j>= readLen ? bias : mat[nt * n + read_num[j]] + bias;
+ j += segLen;
+ }
+ }
+ }
+ return vProfile;
+}
+
+/* Striped Smith-Waterman
+ Record the highest score of each reference position.
+ Return the alignment score and ending position of the best alignment, 2nd best alignment, etc.
+ Gap begin and gap extension are different.
+ wight_match > 0, all other weights < 0.
+ The returned positions are 0-based.
+ */
+static alignment_end* sw_sse2_byte (const int8_t* ref,
+ int8_t ref_dir, // 0: forward ref; 1: reverse ref
+ int32_t refLen,
+ int32_t readLen,
+ const uint8_t weight_gapO, /* will be used as - */
+ const uint8_t weight_gapE, /* will be used as - */
+ const __m128i* vProfile,
+ uint8_t terminate, /* the best alignment score: used to terminate
+ the matrix calculation when locating the
+ alignment beginning point. If this score
+ is set to 0, it will not be used */
+ uint8_t bias, /* Shift 0 point to a positive value. */
+ int32_t maskLen) {
+
+#define max16(m, vm) (vm) = _mm_max_epu8((vm), _mm_srli_si128((vm), 8)); \
+ (vm) = _mm_max_epu8((vm), _mm_srli_si128((vm), 4)); \
+ (vm) = _mm_max_epu8((vm), _mm_srli_si128((vm), 2)); \
+ (vm) = _mm_max_epu8((vm), _mm_srli_si128((vm), 1)); \
+ (m) = _mm_extract_epi16((vm), 0)
+
+ uint8_t max = 0; /* the max alignment score */
+ int32_t end_read = readLen - 1;
+ int32_t end_ref = -1; /* 0_based best alignment ending point; Initialized as isn't aligned -1. */
+ int32_t segLen = (readLen + 15) / 16; /* number of segment */
+
+ /* array to record the largest score of each reference position */
+ uint8_t* maxColumn = (uint8_t*) calloc(refLen, 1);
+
+ /* array to record the alignment read ending position of the largest score of each reference position */
+ int32_t* end_read_column = (int32_t*) calloc(refLen, sizeof(int32_t));
+
+ /* Define 16 byte 0 vector. */
+ __m128i vZero = _mm_set1_epi32(0);
+
+ __m128i* pvHStore = (__m128i*) calloc(segLen, sizeof(__m128i));
+ __m128i* pvHLoad = (__m128i*) calloc(segLen, sizeof(__m128i));
+ __m128i* pvE = (__m128i*) calloc(segLen, sizeof(__m128i));
+ __m128i* pvHmax = (__m128i*) calloc(segLen, sizeof(__m128i));
+
+ int32_t i, j;
+ /* 16 byte insertion begin vector */
+ __m128i vGapO = _mm_set1_epi8(weight_gapO);
+
+ /* 16 byte insertion extension vector */
+ __m128i vGapE = _mm_set1_epi8(weight_gapE);
+
+ /* 16 byte bias vector */
+ __m128i vBias = _mm_set1_epi8(bias);
+
+ __m128i vMaxScore = vZero; /* Trace the highest score of the whole SW matrix. */
+ __m128i vMaxMark = vZero; /* Trace the highest score till the previous column. */
+ __m128i vTemp;
+ int32_t edge, begin = 0, end = refLen, step = 1;
+
+ /* outer loop to process the reference sequence */
+ if (ref_dir == 1) {
+ begin = refLen - 1;
+ end = -1;
+ step = -1;
+ }
+ for (i = begin; LIKELY(i != end); i += step) {
+//fprintf(stderr, "%d", ref[i]);
+ int32_t cmp;
+ __m128i e, vF = vZero, vMaxColumn = vZero; /* Initialize F value to 0.
+ Any errors to vH values will be corrected in the Lazy_F loop.
+ */
+// max16(maxColumn[i], vMaxColumn);
+// fprintf(stderr, "middle[%d]: %d\n", i, maxColumn[i]);
+
+ __m128i vH = pvHStore[segLen - 1];
+ vH = _mm_slli_si128 (vH, 1); /* Shift the 128-bit value in vH left by 1 byte. */
+ const __m128i* vP = vProfile + ref[i] * segLen; /* Right part of the vProfile */
+
+ /* Swap the 2 H buffers. */
+ __m128i* pv = pvHLoad;
+ pvHLoad = pvHStore;
+ pvHStore = pv;
+
+ /* inner loop to process the query sequence */
+ for (j = 0; LIKELY(j < segLen); ++j) {
+ vH = _mm_adds_epu8(vH, _mm_load_si128(vP + j));
+ vH = _mm_subs_epu8(vH, vBias); /* vH will be always > 0 */
+ // max16(maxColumn[i], vH);
+ // fprintf(stderr, "H[%d]: %d\n", i, maxColumn[i]);
+// int8_t* t;
+// int32_t ti;
+//for (t = (int8_t*)&vH, ti = 0; ti < 16; ++ti) fprintf(stderr, "%d\t", *t++);
+
+ /* Get max from vH, vE and vF. */
+ e = _mm_load_si128(pvE + j);
+ vH = _mm_max_epu8(vH, e);
+ vH = _mm_max_epu8(vH, vF);
+ vMaxColumn = _mm_max_epu8(vMaxColumn, vH);
+
+ // max16(maxColumn[i], vMaxColumn);
+ // fprintf(stderr, "middle[%d]: %d\n", i, maxColumn[i]);
+// for (t = (int8_t*)&vMaxColumn, ti = 0; ti < 16; ++ti) fprintf(stderr, "%d\t", *t++);
+
+ /* Save vH values. */
+ _mm_store_si128(pvHStore + j, vH);
+
+ /* Update vE value. */
+ vH = _mm_subs_epu8(vH, vGapO); /* saturation arithmetic, result >= 0 */
+ e = _mm_subs_epu8(e, vGapE);
+ e = _mm_max_epu8(e, vH);
+ _mm_store_si128(pvE + j, e);
+
+ /* Update vF value. */
+ vF = _mm_subs_epu8(vF, vGapE);
+ vF = _mm_max_epu8(vF, vH);
+
+ /* Load the next vH. */
+ vH = _mm_load_si128(pvHLoad + j);
+ }
+
+ /* Lazy_F loop: has been revised to disallow adjecent insertion and then deletion, so don't update E(i, j), learn from SWPS3 */
+ /* reset pointers to the start of the saved data */
+ j = 0;
+ vH = _mm_load_si128 (pvHStore + j);
+
+ /* the computed vF value is for the given column. since */
+ /* we are at the end, we need to shift the vF value over */
+ /* to the next column. */
+ vF = _mm_slli_si128 (vF, 1);
+ vTemp = _mm_subs_epu8 (vH, vGapO);
+ vTemp = _mm_subs_epu8 (vF, vTemp);
+ vTemp = _mm_cmpeq_epi8 (vTemp, vZero);
+ cmp = _mm_movemask_epi8 (vTemp);
+
+ while (cmp != 0xffff)
+ {
+ vH = _mm_max_epu8 (vH, vF);
+ vMaxColumn = _mm_max_epu8(vMaxColumn, vH);
+ _mm_store_si128 (pvHStore + j, vH);
+ vF = _mm_subs_epu8 (vF, vGapE);
+ j++;
+ if (j >= segLen)
+ {
+ j = 0;
+ vF = _mm_slli_si128 (vF, 1);
+ }
+ vH = _mm_load_si128 (pvHStore + j);
+
+ vTemp = _mm_subs_epu8 (vH, vGapO);
+ vTemp = _mm_subs_epu8 (vF, vTemp);
+ vTemp = _mm_cmpeq_epi8 (vTemp, vZero);
+ cmp = _mm_movemask_epi8 (vTemp);
+ }
+
+ vMaxScore = _mm_max_epu8(vMaxScore, vMaxColumn);
+ vTemp = _mm_cmpeq_epi8(vMaxMark, vMaxScore);
+ cmp = _mm_movemask_epi8(vTemp);
+ if (cmp != 0xffff) {
+ uint8_t temp;
+ vMaxMark = vMaxScore;
+ max16(temp, vMaxScore);
+ vMaxScore = vMaxMark;
+
+ if (LIKELY(temp > max)) {
+ max = temp;
+ if (max + bias >= 255) break; //overflow
+ end_ref = i;
+
+ /* Store the column with the highest alignment score in order to trace the alignment ending position on read. */
+ for (j = 0; LIKELY(j < segLen); ++j) pvHmax[j] = pvHStore[j];
+ }
+ }
+
+ /* Record the max score of current column. */
+ max16(maxColumn[i], vMaxColumn);
+ // fprintf(stderr, "maxColumn[%d]: %d\n", i, maxColumn[i]);
+ if (maxColumn[i] == terminate) break;
+ }
+
+ /* Trace the alignment ending position on read. */
+ uint8_t *t = (uint8_t*)pvHmax;
+ int32_t column_len = segLen * 16;
+ for (i = 0; LIKELY(i < column_len); ++i, ++t) {
+ int32_t temp;
+ if (*t == max) {
+ temp = i / 16 + i % 16 * segLen;
+ if (temp < end_read) end_read = temp;
+ }
+ }
+
+ free(pvHmax);
+ free(pvE);
+ free(pvHLoad);
+ free(pvHStore);
+
+ /* Find the most possible 2nd best alignment. */
+ alignment_end* bests = (alignment_end*) calloc(2, sizeof(alignment_end));
+ bests[0].score = max + bias >= 255 ? 255 : max;
+ bests[0].ref = end_ref;
+ bests[0].read = end_read;
+
+ bests[1].score = 0;
+ bests[1].ref = 0;
+ bests[1].read = 0;
+
+ edge = (end_ref - maskLen) > 0 ? (end_ref - maskLen) : 0;
+ for (i = 0; i < edge; i ++) {
+// fprintf (stderr, "maxColumn[%d]: %d\n", i, maxColumn[i]);
+ if (maxColumn[i] > bests[1].score) {
+ bests[1].score = maxColumn[i];
+ bests[1].ref = i;
+ }
+ }
+ edge = (end_ref + maskLen) > refLen ? refLen : (end_ref + maskLen);
+ for (i = edge + 1; i < refLen; i ++) {
+// fprintf (stderr, "refLen: %d\tmaxColumn[%d]: %d\n", refLen, i, maxColumn[i]);
+ if (maxColumn[i] > bests[1].score) {
+ bests[1].score = maxColumn[i];
+ bests[1].ref = i;
+ }
+ }
+
+ free(maxColumn);
+ free(end_read_column);
+ return bests;
+}
+
+static __m128i* qP_word (const int8_t* read_num,
+ const int8_t* mat,
+ const int32_t readLen,
+ const int32_t n) {
+
+ int32_t segLen = (readLen + 7) / 8;
+ __m128i* vProfile = (__m128i*)malloc(n * segLen * sizeof(__m128i));
+ int16_t* t = (int16_t*)vProfile;
+ int32_t nt, i, j;
+ int32_t segNum;
+
+ /* Generate query profile rearrange query sequence & calculate the weight of match/mismatch */
+ for (nt = 0; LIKELY(nt < n); nt ++) {
+ for (i = 0; i < segLen; i ++) {
+ j = i;
+ for (segNum = 0; LIKELY(segNum < 8) ; segNum ++) {
+ *t++ = j>= readLen ? 0 : mat[nt * n + read_num[j]];
+ j += segLen;
+ }
+ }
+ }
+ return vProfile;
+}
+
+static alignment_end* sw_sse2_word (const int8_t* ref,
+ int8_t ref_dir, // 0: forward ref; 1: reverse ref
+ int32_t refLen,
+ int32_t readLen,
+ const uint8_t weight_gapO, /* will be used as - */
+ const uint8_t weight_gapE, /* will be used as - */
+ const __m128i* vProfile,
+ uint16_t terminate,
+ int32_t maskLen) {
+
+#define max8(m, vm) (vm) = _mm_max_epi16((vm), _mm_srli_si128((vm), 8)); \
+ (vm) = _mm_max_epi16((vm), _mm_srli_si128((vm), 4)); \
+ (vm) = _mm_max_epi16((vm), _mm_srli_si128((vm), 2)); \
+ (m) = _mm_extract_epi16((vm), 0)
+
+ uint16_t max = 0; /* the max alignment score */
+ int32_t end_read = readLen - 1;
+ int32_t end_ref = 0; /* 1_based best alignment ending point; Initialized as isn't aligned - 0. */
+ int32_t segLen = (readLen + 7) / 8; /* number of segment */
+
+ /* array to record the largest score of each reference position */
+ uint16_t* maxColumn = (uint16_t*) calloc(refLen, 2);
+
+ /* array to record the alignment read ending position of the largest score of each reference position */
+ int32_t* end_read_column = (int32_t*) calloc(refLen, sizeof(int32_t));
+
+ /* Define 16 byte 0 vector. */
+ __m128i vZero = _mm_set1_epi32(0);
+
+ __m128i* pvHStore = (__m128i*) calloc(segLen, sizeof(__m128i));
+ __m128i* pvHLoad = (__m128i*) calloc(segLen, sizeof(__m128i));
+ __m128i* pvE = (__m128i*) calloc(segLen, sizeof(__m128i));
+ __m128i* pvHmax = (__m128i*) calloc(segLen, sizeof(__m128i));
+
+ int32_t i, j, k;
+ /* 16 byte insertion begin vector */
+ __m128i vGapO = _mm_set1_epi16(weight_gapO);
+
+ /* 16 byte insertion extension vector */
+ __m128i vGapE = _mm_set1_epi16(weight_gapE);
+
+ __m128i vMaxScore = vZero; /* Trace the highest score of the whole SW matrix. */
+ __m128i vMaxMark = vZero; /* Trace the highest score till the previous column. */
+ __m128i vTemp;
+ int32_t edge, begin = 0, end = refLen, step = 1;
+
+ /* outer loop to process the reference sequence */
+ if (ref_dir == 1) {
+ begin = refLen - 1;
+ end = -1;
+ step = -1;
+ }
+ for (i = begin; LIKELY(i != end); i += step) {
+ int32_t cmp;
+ __m128i e, vF = vZero; /* Initialize F value to 0.
+ Any errors to vH values will be corrected in the Lazy_F loop.
+ */
+ __m128i vH = pvHStore[segLen - 1];
+ vH = _mm_slli_si128 (vH, 2); /* Shift the 128-bit value in vH left by 2 byte. */
+
+ /* Swap the 2 H buffers. */
+ __m128i* pv = pvHLoad;
+
+ __m128i vMaxColumn = vZero; /* vMaxColumn is used to record the max values of column i. */
+
+ const __m128i* vP = vProfile + ref[i] * segLen; /* Right part of the vProfile */
+ pvHLoad = pvHStore;
+ pvHStore = pv;
+
+ /* inner loop to process the query sequence */
+ for (j = 0; LIKELY(j < segLen); j ++) {
+ vH = _mm_adds_epi16(vH, _mm_load_si128(vP + j));
+
+ /* Get max from vH, vE and vF. */
+ e = _mm_load_si128(pvE + j);
+ vH = _mm_max_epi16(vH, e);
+ vH = _mm_max_epi16(vH, vF);
+ vMaxColumn = _mm_max_epi16(vMaxColumn, vH);
+
+ /* Save vH values. */
+ _mm_store_si128(pvHStore + j, vH);
+
+ /* Update vE value. */
+ vH = _mm_subs_epu16(vH, vGapO); /* saturation arithmetic, result >= 0 */
+ e = _mm_subs_epu16(e, vGapE);
+ e = _mm_max_epi16(e, vH);
+ _mm_store_si128(pvE + j, e);
+
+ /* Update vF value. */
+ vF = _mm_subs_epu16(vF, vGapE);
+ vF = _mm_max_epi16(vF, vH);
+
+ /* Load the next vH. */
+ vH = _mm_load_si128(pvHLoad + j);
+ }
+
+ /* Lazy_F loop: has been revised to disallow adjecent insertion and then deletion, so don't update E(i, j), learn from SWPS3 */
+ for (k = 0; LIKELY(k < 8); ++k) {
+ vF = _mm_slli_si128 (vF, 2);
+ for (j = 0; LIKELY(j < segLen); ++j) {
+ vH = _mm_load_si128(pvHStore + j);
+ vH = _mm_max_epi16(vH, vF);
+ vMaxColumn = _mm_max_epi16(vMaxColumn, vH); //newly added line
+ _mm_store_si128(pvHStore + j, vH);
+ vH = _mm_subs_epu16(vH, vGapO);
+ vF = _mm_subs_epu16(vF, vGapE);
+ if (UNLIKELY(! _mm_movemask_epi8(_mm_cmpgt_epi16(vF, vH)))) goto end;
+ }
+ }
+
+end:
+ vMaxScore = _mm_max_epi16(vMaxScore, vMaxColumn);
+ vTemp = _mm_cmpeq_epi16(vMaxMark, vMaxScore);
+ cmp = _mm_movemask_epi8(vTemp);
+ if (cmp != 0xffff) {
+ uint16_t temp;
+ vMaxMark = vMaxScore;
+ max8(temp, vMaxScore);
+ vMaxScore = vMaxMark;
+
+ if (LIKELY(temp > max)) {
+ max = temp;
+ end_ref = i;
+ for (j = 0; LIKELY(j < segLen); ++j) pvHmax[j] = pvHStore[j];
+ }
+ }
+
+ /* Record the max score of current column. */
+ max8(maxColumn[i], vMaxColumn);
+ if (maxColumn[i] == terminate) break;
+ }
+
+ /* Trace the alignment ending position on read. */
+ uint16_t *t = (uint16_t*)pvHmax;
+ int32_t column_len = segLen * 8;
+ for (i = 0; LIKELY(i < column_len); ++i, ++t) {
+ int32_t temp;
+ if (*t == max) {
+ temp = i / 8 + i % 8 * segLen;
+ if (temp < end_read) end_read = temp;
+ }
+ }
+
+ free(pvHmax);
+ free(pvE);
+ free(pvHLoad);
+ free(pvHStore);
+
+ /* Find the most possible 2nd best alignment. */
+ alignment_end* bests = (alignment_end*) calloc(2, sizeof(alignment_end));
+ bests[0].score = max;
+ bests[0].ref = end_ref;
+ bests[0].read = end_read;
+
+ bests[1].score = 0;
+ bests[1].ref = 0;
+ bests[1].read = 0;
+
+ edge = (end_ref - maskLen) > 0 ? (end_ref - maskLen) : 0;
+ for (i = 0; i < edge; i ++) {
+ if (maxColumn[i] > bests[1].score) {
+ bests[1].score = maxColumn[i];
+ bests[1].ref = i;
+ }
+ }
+ edge = (end_ref + maskLen) > refLen ? refLen : (end_ref + maskLen);
+ for (i = edge; i < refLen; i ++) {
+ if (maxColumn[i] > bests[1].score) {
+ bests[1].score = maxColumn[i];
+ bests[1].ref = i;
+ }
+ }
+
+ free(maxColumn);
+ free(end_read_column);
+ return bests;
+}
+
+static cigar* banded_sw (const int8_t* ref,
+ const int8_t* read,
+ int32_t refLen,
+ int32_t readLen,
+ int32_t score,
+ const uint32_t weight_gapO, /* will be used as - */
+ const uint32_t weight_gapE, /* will be used as - */
+ int32_t band_width,
+ const int8_t* mat, /* pointer to the weight matrix */
+ int32_t n) {
+
+ uint32_t *c = (uint32_t*)malloc(16 * sizeof(uint32_t)), *c1;
+ int32_t i, j, e, f, temp1, temp2, s = 16, s1 = 8, l, max = 0;
+ int64_t s2 = 1024;
+ char op, prev_op;
+ int32_t width, width_d, *h_b, *e_b, *h_c;
+ int8_t *direction, *direction_line;
+ cigar* result = (cigar*)malloc(sizeof(cigar));
+ h_b = (int32_t*)malloc(s1 * sizeof(int32_t));
+ e_b = (int32_t*)malloc(s1 * sizeof(int32_t));
+ h_c = (int32_t*)malloc(s1 * sizeof(int32_t));
+ direction = (int8_t*)malloc(s2 * sizeof(int8_t));
+
+ do {
+ width = band_width * 2 + 3, width_d = band_width * 2 + 1;
+ while (width >= s1) {
+ ++s1;
+ kroundup32(s1);
+ h_b = (int32_t*)realloc(h_b, s1 * sizeof(int32_t));
+ e_b = (int32_t*)realloc(e_b, s1 * sizeof(int32_t));
+ h_c = (int32_t*)realloc(h_c, s1 * sizeof(int32_t));
+ }
+ while (width_d * readLen * 3 >= s2) {
+ ++s2;
+ kroundup32(s2);
+ if (s2 < 0) {
+ fprintf(stderr, "Alignment score and position are not consensus.\n");
+ exit(1);
+ }
+ direction = (int8_t*)realloc(direction, s2 * sizeof(int8_t));
+ }
+ direction_line = direction;
+ for (j = 1; LIKELY(j < width - 1); j ++) h_b[j] = 0;
+ for (i = 0; LIKELY(i < readLen); i ++) {
+ int32_t beg = 0, end = refLen - 1, u = 0, edge;
+ j = i - band_width; beg = beg > j ? beg : j; // band start
+ j = i + band_width; end = end < j ? end : j; // band end
+ edge = end + 1 < width - 1 ? end + 1 : width - 1;
+ f = h_b[0] = e_b[0] = h_b[edge] = e_b[edge] = h_c[0] = 0;
+ direction_line = direction + width_d * i * 3;
+
+ for (j = beg; LIKELY(j <= end); j ++) {
+ int32_t b, e1, f1, d, de, df, dh;
+ set_u(u, band_width, i, j); set_u(e, band_width, i - 1, j);
+ set_u(b, band_width, i, j - 1); set_u(d, band_width, i - 1, j - 1);
+ set_d(de, band_width, i, j, 0);
+ set_d(df, band_width, i, j, 1);
+ set_d(dh, band_width, i, j, 2);
+
+ temp1 = i == 0 ? -weight_gapO : h_b[e] - weight_gapO;
+ temp2 = i == 0 ? -weight_gapE : e_b[e] - weight_gapE;
+ e_b[u] = temp1 > temp2 ? temp1 : temp2;
+ //fprintf(stderr, "de: %d\twidth_d: %d\treadLen: %d\ts2:%lu\n", de, width_d, readLen, s2);
+ direction_line[de] = temp1 > temp2 ? 3 : 2;
+
+ temp1 = h_c[b] - weight_gapO;
+ temp2 = f - weight_gapE;
+ f = temp1 > temp2 ? temp1 : temp2;
+ direction_line[df] = temp1 > temp2 ? 5 : 4;
+
+ e1 = e_b[u] > 0 ? e_b[u] : 0;
+ f1 = f > 0 ? f : 0;
+ temp1 = e1 > f1 ? e1 : f1;
+ temp2 = h_b[d] + mat[ref[j] * n + read[i]];
+ h_c[u] = temp1 > temp2 ? temp1 : temp2;
+
+ if (h_c[u] > max) max = h_c[u];
+
+ if (temp1 <= temp2) direction_line[dh] = 1;
+ else direction_line[dh] = e1 > f1 ? direction_line[de] : direction_line[df];
+ }
+ for (j = 1; j <= u; j ++) h_b[j] = h_c[j];
+ }
+ band_width *= 2;
+ } while (LIKELY(max < score));
+ band_width /= 2;
+
+ // trace back
+ i = readLen - 1;
+ j = refLen - 1;
+ e = 0; // Count the number of M, D or I.
+ l = 0; // record length of current cigar
+ op = prev_op = 'M';
+ temp2 = 2; // h
+ while (LIKELY(i > 0)) {
+ set_d(temp1, band_width, i, j, temp2);
+ switch (direction_line[temp1]) {
+ case 1:
+ --i;
+ --j;
+ temp2 = 2;
+ direction_line -= width_d * 3;
+ op = 'M';
+ break;
+ case 2:
+ --i;
+ temp2 = 0; // e
+ direction_line -= width_d * 3;
+ op = 'I';
+ break;
+ case 3:
+ --i;
+ temp2 = 2;
+ direction_line -= width_d * 3;
+ op = 'I';
+ break;
+ case 4:
+ --j;
+ temp2 = 1;
+ op = 'D';
+ break;
+ case 5:
+ --j;
+ temp2 = 2;
+ op = 'D';
+ break;
+ default:
+ fprintf(stderr, "Trace back error: %d.\n", direction_line[temp1 - 1]);
+ free(direction);
+ free(h_c);
+ free(e_b);
+ free(h_b);
+ free(c);
+ free(result);
+ return 0;
+ }
+ if (op == prev_op) ++e;
+ else {
+ ++l;
+ while (l >= s) {
+ ++s;
+ kroundup32(s);
+ c = (uint32_t*)realloc(c, s * sizeof(uint32_t));
+ }
+ c[l - 1] = to_cigar_int(e, prev_op);
+ prev_op = op;
+ e = 1;
+ }
+ }
+ if (op == 'M') {
+ ++l;
+ while (l >= s) {
+ ++s;
+ kroundup32(s);
+ c = (uint32_t*)realloc(c, s * sizeof(uint32_t));
+ }
+ c[l - 1] = to_cigar_int(e + 1, op);
+ }else {
+ l += 2;
+ while (l >= s) {
+ ++s;
+ kroundup32(s);
+ c = (uint32_t*)realloc(c, s * sizeof(uint32_t));
+ }
+ c[l - 2] = to_cigar_int(e, op);
+ c[l - 1] = to_cigar_int(1, 'M');
+ }
+
+ // reverse cigar
+ c1 = (uint32_t*)malloc(l * sizeof(uint32_t));
+ s = 0;
+ e = l - 1;
+ while (LIKELY(s <= e)) {
+ c1[s] = c[e];
+ c1[e] = c[s];
+ ++ s;
+ -- e;
+ }
+ result->seq = c1;
+ result->length = l;
+
+ free(direction);
+ free(h_c);
+ free(e_b);
+ free(h_b);
+ free(c);
+ return result;
+}
+
+static int8_t* seq_reverse(const int8_t* seq, int32_t end) /* end is 0-based alignment ending position */
+{
+ int8_t* reverse = (int8_t*)calloc(end + 1, sizeof(int8_t));
+ int32_t start = 0;
+ while (LIKELY(start <= end)) {
+ reverse[start] = seq[end];
+ reverse[end] = seq[start];
+ ++ start;
+ -- end;
+ }
+ return reverse;
+}
+
+s_profile* ssw_init (const int8_t* read, const int32_t readLen, const int8_t* mat, const int32_t n, const int8_t score_size) {
+ s_profile* p = (s_profile*)calloc(1, sizeof(struct _profile));
+ p->profile_byte = 0;
+ p->profile_word = 0;
+ p->bias = 0;
+
+ if (score_size == 0 || score_size == 2) {
+ /* Find the bias to use in the substitution matrix */
+ int32_t bias = 0, i;
+ for (i = 0; i < n*n; i++) if (mat[i] < bias) bias = mat[i];
+ bias = abs(bias);
+
+ p->bias = bias;
+ p->profile_byte = qP_byte (read, mat, readLen, n, bias);
+ }
+ if (score_size == 1 || score_size == 2) p->profile_word = qP_word (read, mat, readLen, n);
+ p->read = read;
+ p->mat = mat;
+ p->readLen = readLen;
+ p->n = n;
+ return p;
+}
+
+void init_destroy (s_profile* p) {
+ free(p->profile_byte);
+ free(p->profile_word);
+ free(p);
+}
+
+s_align* ssw_align (const s_profile* prof,
+ const int8_t* ref,
+ int32_t refLen,
+ const uint8_t weight_gapO,
+ const uint8_t weight_gapE,
+ const uint8_t flag, // (from high to low) bit 5: return the best alignment beginning position; 6: if (ref_end1 - ref_begin1 <= filterd) && (read_end1 - read_begin1 <= filterd), return cigar; 7: if max score >= filters, return cigar; 8: always return cigar; if 6 & 7 are both setted, only return cigar when both filter fulfilled
+ const uint16_t filters,
+ const int32_t filterd,
+ const int32_t maskLen) {
+
+ alignment_end* bests = 0, *bests_reverse = 0;
+ __m128i* vP = 0;
+ int32_t word = 0, band_width = 0, readLen = prof->readLen;
+ int8_t* read_reverse = 0;
+ cigar* path;
+ s_align* r = (s_align*)calloc(1, sizeof(s_align));
+ r->ref_begin1 = -1;
+ r->read_begin1 = -1;
+ r->cigar = 0;
+ r->cigarLen = 0;
+ if (maskLen < 15) {
+ fprintf(stderr, "When maskLen < 15, the function ssw_align doesn't return 2nd best alignment information.\n");
+ }
+
+ // Find the alignment scores and ending positions
+ if (prof->profile_byte) {
+ bests = sw_sse2_byte(ref, 0, refLen, readLen, weight_gapO, weight_gapE, prof->profile_byte, -1, prof->bias, maskLen);
+ if (prof->profile_word && bests[0].score == 255) {
+ free(bests);
+ bests = sw_sse2_word(ref, 0, refLen, readLen, weight_gapO, weight_gapE, prof->profile_word, -1, maskLen);
+ word = 1;
+ } else if (bests[0].score == 255) {
+ fprintf(stderr, "Please set 2 to the score_size parameter of the function ssw_init, otherwise the alignment results will be incorrect.\n");
+ free(r);
+ return NULL;
+ }
+ }else if (prof->profile_word) {
+ bests = sw_sse2_word(ref, 0, refLen, readLen, weight_gapO, weight_gapE, prof->profile_word, -1, maskLen);
+ word = 1;
+ }else {
+ fprintf(stderr, "Please call the function ssw_init before ssw_align.\n");
+ free(r);
+ return NULL;
+ }
+ r->score1 = bests[0].score;
+ r->ref_end1 = bests[0].ref;
+//fprintf(stderr, "0based ref_end: %d\n", r->ref_end1);
+ r->read_end1 = bests[0].read;
+ if (maskLen >= 15) {
+ r->score2 = bests[1].score;
+ r->ref_end2 = bests[1].ref;
+ } else {
+ r->score2 = 0;
+ r->ref_end2 = -1;
+ }
+ free(bests);
+ if (flag == 0 || (flag == 2 && r->score1 < filters)) goto end;
+
+ // Find the beginning position of the best alignment.
+ read_reverse = seq_reverse(prof->read, r->read_end1);
+ if (word == 0) {
+ vP = qP_byte(read_reverse, prof->mat, r->read_end1 + 1, prof->n, prof->bias);
+ bests_reverse = sw_sse2_byte(ref, 1, r->ref_end1 + 1, r->read_end1 + 1, weight_gapO, weight_gapE, vP, r->score1, prof->bias, maskLen);
+ } else {
+ vP = qP_word(read_reverse, prof->mat, r->read_end1 + 1, prof->n);
+ bests_reverse = sw_sse2_word(ref, 1, r->ref_end1 + 1, r->read_end1 + 1, weight_gapO, weight_gapE, vP, r->score1, maskLen);
+ }
+ free(vP);
+ free(read_reverse);
+ r->ref_begin1 = bests_reverse[0].ref;
+ r->read_begin1 = r->read_end1 - bests_reverse[0].read;
+ free(bests_reverse);
+ if ((7&flag) == 0 || ((2&flag) != 0 && r->score1 < filters) || ((4&flag) != 0 && (r->ref_end1 - r->ref_begin1 > filterd || r->read_end1 - r->read_begin1 > filterd))) goto end;
+
+ // Generate cigar.
+ refLen = r->ref_end1 - r->ref_begin1 + 1;
+ readLen = r->read_end1 - r->read_begin1 + 1;
+ band_width = abs(refLen - readLen) + 1;
+ path = banded_sw(ref + r->ref_begin1, prof->read + r->read_begin1, refLen, readLen, r->score1, weight_gapO, weight_gapE, band_width, prof->mat, prof->n);
+ if (path == 0) {
+ free(r);
+ r = NULL;
+ }
+ else {
+ r->cigar = path->seq;
+ r->cigarLen = path->length;
+ free(path);
+ }
+
+end:
+ return r;
+}
+
+void align_destroy (s_align* a) {
+ free(a->cigar);
+ free(a);
+}
+/*
+inline char cigar_int_to_op(uint32_t cigar_int) {
+ return UNLIKELY((cigar_int & 0xfU) > 8) ? 'M': MAPSTR[cigar_int & 0xfU];
+}
+
+
+inline uint32_t cigar_int_to_len (uint32_t cigar_int)
+{
+ return cigar_int >> BAM_CIGAR_SHIFT;
+}*/
diff --git a/src/ssw_cpp.cpp b/src/ssw_cpp.cpp
new file mode 100644
index 0000000..b7018d3
--- /dev/null
+++ b/src/ssw_cpp.cpp
@@ -0,0 +1,477 @@
+#include "SeqLib/ssw_cpp.h"
+#include "SeqLib/ssw.h"
+
+#include <sstream>
+
+namespace {
+
+static const int8_t kBaseTranslation[128] = {
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ // A C G
+ 4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4,
+ // T
+ 4, 4, 4, 4, 3, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ // a c g
+ 4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4,
+ // t
+ 4, 4, 4, 4, 3, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
+};
+
+void BuildSwScoreMatrix(const uint8_t& match_score,
+ const uint8_t& mismatch_penalty,
+ int8_t* matrix) {
+
+ // The score matrix looks like
+ // // A, C, G, T, N
+ // score_matrix_ = { 2, -2, -2, -2, -2, // A
+ // -2, 2, -2, -2, -2, // C
+ // -2, -2, 2, -2, -2, // G
+ // -2, -2, -2, 2, -2, // T
+ // -2, -2, -2, -2, -2};// N
+
+ int id = 0;
+ for (int i = 0; i < 4; ++i) {
+ for (int j = 0; j < 4; ++j) {
+ matrix[id] = ((i == j) ? match_score : static_cast<int8_t>(-mismatch_penalty));
+ ++id;
+ }
+ matrix[id] = static_cast<int8_t>(-mismatch_penalty); // For N
+ ++id;
+ }
+
+ for (int i = 0; i < 5; ++i)
+ matrix[id++] = static_cast<int8_t>(-mismatch_penalty); // For N
+
+}
+
+void ConvertAlignment(const s_align& s_al,
+ const int& query_len,
+ StripedSmithWaterman::Alignment* al) {
+ al->sw_score = s_al.score1;
+ al->sw_score_next_best = s_al.score2;
+ al->ref_begin = s_al.ref_begin1;
+ al->ref_end = s_al.ref_end1;
+ al->query_begin = s_al.read_begin1;
+ al->query_end = s_al.read_end1;
+ al->ref_end_next_best = s_al.ref_end2;
+
+ al->cigar.clear();
+ al->cigar_string.clear();
+
+ if (s_al.cigarLen > 0) {
+ std::ostringstream cigar_string;
+ if (al->query_begin > 0) {
+ uint32_t cigar = to_cigar_int(al->query_begin, 'S');
+ al->cigar.push_back(cigar);
+ cigar_string << al->query_begin << 'S';
+ }
+
+ for (int i = 0; i < s_al.cigarLen; ++i) {
+ al->cigar.push_back(s_al.cigar[i]);
+ cigar_string << cigar_int_to_len(s_al.cigar[i]) << cigar_int_to_op(s_al.cigar[i]);
+ }
+
+ int end = query_len - al->query_end - 1;
+ if (end > 0) {
+ uint32_t cigar = to_cigar_int(end, 'S');
+ al->cigar.push_back(cigar);
+ cigar_string << end << 'S';
+ }
+
+ al->cigar_string = cigar_string.str();
+ } // end if
+}
+
+// @Function:
+// Calculate the length of the previous cigar operator
+// and store it in new_cigar and new_cigar_string.
+// Clean up in_M (false), in_X (false), length_M (0), and length_X(0).
+void CleanPreviousMOperator(
+ bool* in_M,
+ bool* in_X,
+ uint32_t* length_M,
+ uint32_t* length_X,
+ std::vector<uint32_t>* new_cigar,
+ std::ostringstream* new_cigar_string) {
+ if (*in_M) {
+ uint32_t match = to_cigar_int(*length_M, '=');
+ new_cigar->push_back(match);
+ (*new_cigar_string) << *length_M << '=';
+ } else if (*in_X){ //in_X
+ uint32_t match = to_cigar_int(*length_X, 'X');
+ new_cigar->push_back(match);
+ (*new_cigar_string) << *length_X << 'X';
+ }
+
+ // Clean up
+ *in_M = false;
+ *in_X = false;
+ *length_M = 0;
+ *length_X = 0;
+}
+
+// @Function:
+// 1. Calculate the number of mismatches.
+// 2. Modify the cigar string:
+// differentiate matches (M) and mismatches(X).
+// Note that SSW does not differentiate matches and mismatches.
+// @Return:
+// The number of mismatches.
+int CalculateNumberMismatch(
+ StripedSmithWaterman::Alignment* al,
+ int8_t const *ref,
+ int8_t const *query,
+ const int& query_len) {
+
+ ref += al->ref_begin;
+ query += al->query_begin;
+ int mismatch_length = 0;
+
+ std::vector<uint32_t> new_cigar;
+ std::ostringstream new_cigar_string;
+
+ if (al->query_begin > 0) {
+ uint32_t cigar = to_cigar_int(al->query_begin, 'S');
+ new_cigar.push_back(cigar);
+ new_cigar_string << al->query_begin << 'S';
+ }
+
+ bool in_M = false; // the previous is match
+ bool in_X = false; // the previous is mismatch
+ uint32_t length_M = 0;
+ uint32_t length_X = 0;
+
+ for (unsigned int i = 0; i < al->cigar.size(); ++i) {
+ char op = cigar_int_to_op(al->cigar[i]);
+ uint32_t length = cigar_int_to_len(al->cigar[i]);
+ if (op == 'M') {
+ for (uint32_t j = 0; j < length; ++j) {
+ if (*ref != *query) {
+ ++mismatch_length;
+ if (in_M) { // the previous is match; however the current one is mismatche
+ uint32_t match = to_cigar_int(length_M, '=');
+ new_cigar.push_back(match);
+ new_cigar_string << length_M << '=';
+ }
+ length_M = 0;
+ ++length_X;
+ in_M = false;
+ in_X = true;
+ } else { // *ref == *query
+ if (in_X) { // the previous is mismatch; however the current one is matche
+ uint32_t match = to_cigar_int(length_X, 'X');
+ new_cigar.push_back(match);
+ new_cigar_string << length_X << 'X';
+ }
+ ++length_M;
+ length_X = 0;
+ in_M = true;
+ in_X = false;
+ } // end of if (*ref != *query)
+ ++ref;
+ ++query;
+ }
+ } else if (op == 'I') {
+ query += length;
+ mismatch_length += length;
+ CleanPreviousMOperator(&in_M, &in_X, &length_M, &length_X, &new_cigar, &new_cigar_string);
+ new_cigar.push_back(al->cigar[i]);
+ new_cigar_string << length << 'I';
+ } else if (op == 'D') {
+ ref += length;
+ mismatch_length += length;
+ CleanPreviousMOperator(&in_M, &in_X, &length_M, &length_X, &new_cigar, &new_cigar_string);
+ new_cigar.push_back(al->cigar[i]);
+ new_cigar_string << length << 'D';
+ }
+ }
+
+ CleanPreviousMOperator(&in_M, &in_X, &length_M, &length_X, &new_cigar, &new_cigar_string);
+
+ int end = query_len - al->query_end - 1;
+ if (end > 0) {
+ uint32_t cigar = to_cigar_int(end, 'S');
+ new_cigar.push_back(cigar);
+ new_cigar_string << end << 'S';
+ }
+
+ al->cigar_string.clear();
+ al->cigar.clear();
+ al->cigar_string = new_cigar_string.str();
+ al->cigar = new_cigar;
+
+ return mismatch_length;
+}
+
+void SetFlag(const StripedSmithWaterman::Filter& filter, uint8_t* flag) {
+ if (filter.report_begin_position) *flag |= 0x08;
+ if (filter.report_cigar) *flag |= 0x0f;
+}
+
+// http://www.cplusplus.com/faq/sequences/arrays/sizeof-array/#cpp
+template <typename T, size_t N>
+inline size_t SizeOfArray( const T(&)[ N ] )
+{
+ return N;
+}
+
+} // namespace
+
+
+
+namespace StripedSmithWaterman {
+
+Aligner::Aligner(void)
+ : score_matrix_(NULL)
+ , score_matrix_size_(5)
+ , translation_matrix_(NULL)
+ , match_score_(2)
+ , mismatch_penalty_(2)
+ , gap_opening_penalty_(3)
+ , gap_extending_penalty_(1)
+ , translated_reference_(NULL)
+ , reference_length_(0)
+{
+ BuildDefaultMatrix();
+}
+
+Aligner::Aligner(
+ const uint8_t& match_score,
+ const uint8_t& mismatch_penalty,
+ const uint8_t& gap_opening_penalty,
+ const uint8_t& gap_extending_penalty)
+
+ : score_matrix_(NULL)
+ , score_matrix_size_(5)
+ , translation_matrix_(NULL)
+ , match_score_(match_score)
+ , mismatch_penalty_(mismatch_penalty)
+ , gap_opening_penalty_(gap_opening_penalty)
+ , gap_extending_penalty_(gap_extending_penalty)
+ , translated_reference_(NULL)
+ , reference_length_(0)
+{
+ BuildDefaultMatrix();
+}
+
+Aligner::Aligner(const int8_t* score_matrix,
+ const int& score_matrix_size,
+ const int8_t* translation_matrix,
+ const int& translation_matrix_size)
+
+ : score_matrix_(NULL)
+ , score_matrix_size_(score_matrix_size)
+ , translation_matrix_(NULL)
+ , match_score_(2)
+ , mismatch_penalty_(2)
+ , gap_opening_penalty_(3)
+ , gap_extending_penalty_(1)
+ , translated_reference_(NULL)
+ , reference_length_(0)
+{
+ score_matrix_ = new int8_t[score_matrix_size_ * score_matrix_size_];
+ memcpy(score_matrix_, score_matrix, sizeof(int8_t) * score_matrix_size_ * score_matrix_size_);
+ translation_matrix_ = new int8_t[translation_matrix_size];
+ memcpy(translation_matrix_, translation_matrix, sizeof(int8_t) * translation_matrix_size);
+}
+
+
+Aligner::~Aligner(void){
+ Clear();
+}
+
+int Aligner::SetReferenceSequence(const char* seq, const int& length) {
+
+ int len = 0;
+ if (translation_matrix_) {
+ // calculate the valid length
+ //int calculated_ref_length = static_cast<int>(strlen(seq));
+ //int valid_length = (calculated_ref_length > length)
+ // ? length : calculated_ref_length;
+ int valid_length = length;
+ // delete the current buffer
+ CleanReferenceSequence();
+ // allocate a new buffer
+ translated_reference_ = new int8_t[valid_length];
+
+ len = TranslateBase(seq, valid_length, translated_reference_);
+ } else {
+ // nothing
+ }
+
+ reference_length_ = len;
+ return len;
+
+
+}
+
+int Aligner::TranslateBase(const char* bases, const int& length,
+ int8_t* translated) const {
+
+ const char* ptr = bases;
+ int len = 0;
+ for (int i = 0; i < length; ++i) {
+ translated[i] = translation_matrix_[(int) *ptr];
+ ++ptr;
+ ++len;
+ }
+
+ return len;
+}
+
+
+bool Aligner::Align(const char* query, const Filter& filter,
+ Alignment* alignment) const
+{
+ if (!translation_matrix_) return false;
+ if (reference_length_ == 0) return false;
+
+ int query_len = strlen(query);
+ if (query_len == 0) return false;
+ int8_t* translated_query = new int8_t[query_len];
+ TranslateBase(query, query_len, translated_query);
+
+ const int8_t score_size = 2;
+ s_profile* profile = ssw_init(translated_query, query_len, score_matrix_,
+ score_matrix_size_, score_size);
+
+ uint8_t flag = 0;
+ SetFlag(filter, &flag);
+ s_align* s_al = ssw_align(profile, translated_reference_, reference_length_,
+ static_cast<int>(gap_opening_penalty_),
+ static_cast<int>(gap_extending_penalty_),
+ flag, filter.score_filter, filter.distance_filter, query_len);
+
+ alignment->Clear();
+ ConvertAlignment(*s_al, query_len, alignment);
+ alignment->mismatches = CalculateNumberMismatch(&*alignment, translated_reference_, translated_query, query_len);
+
+
+ // Free memory
+ delete [] translated_query;
+ align_destroy(s_al);
+ init_destroy(profile);
+
+ return true;
+}
+
+
+bool Aligner::Align(const char* query, const char* ref, const int& ref_len,
+ const Filter& filter, Alignment* alignment) const
+{
+ if (!translation_matrix_) return false;
+
+ int query_len = strlen(query);
+ if (query_len == 0) return false;
+ int8_t* translated_query = new int8_t[query_len];
+ TranslateBase(query, query_len, translated_query);
+
+ // calculate the valid length
+ //int calculated_ref_length = static_cast<int>(strlen(ref));
+ //int valid_ref_len = (calculated_ref_length > ref_len)
+ // ? ref_len : calculated_ref_length;
+ int valid_ref_len = ref_len;
+ int8_t* translated_ref = new int8_t[valid_ref_len];
+ TranslateBase(ref, valid_ref_len, translated_ref);
+
+
+ const int8_t score_size = 2;
+ s_profile* profile = ssw_init(translated_query, query_len, score_matrix_,
+ score_matrix_size_, score_size);
+
+ uint8_t flag = 0;
+ SetFlag(filter, &flag);
+ s_align* s_al = ssw_align(profile, translated_ref, valid_ref_len,
+ static_cast<int>(gap_opening_penalty_),
+ static_cast<int>(gap_extending_penalty_),
+ flag, filter.score_filter, filter.distance_filter, query_len);
+
+ alignment->Clear();
+ ConvertAlignment(*s_al, query_len, alignment);
+ alignment->mismatches = CalculateNumberMismatch(&*alignment, translated_ref, translated_query, query_len);
+
+ // Free memory
+ delete [] translated_query;
+ delete [] translated_ref;
+ align_destroy(s_al);
+ init_destroy(profile);
+
+ return true;
+}
+
+void Aligner::Clear(void) {
+ ClearMatrices();
+ CleanReferenceSequence();
+}
+
+void Aligner::SetAllDefault(void) {
+ score_matrix_size_ = 5;
+ match_score_ = 2;
+ mismatch_penalty_ = 2;
+ gap_opening_penalty_ = 3;
+ gap_extending_penalty_ = 1;
+ reference_length_ = 0;
+}
+
+bool Aligner::ReBuild(void) {
+ if (translation_matrix_) return false;
+
+ SetAllDefault();
+ BuildDefaultMatrix();
+
+ return true;
+}
+
+bool Aligner::ReBuild(
+ const uint8_t& match_score,
+ const uint8_t& mismatch_penalty,
+ const uint8_t& gap_opening_penalty,
+ const uint8_t& gap_extending_penalty) {
+ if (translation_matrix_) return false;
+
+ SetAllDefault();
+
+ match_score_ = match_score;
+ mismatch_penalty_ = mismatch_penalty;
+ gap_opening_penalty_ = gap_opening_penalty;
+ gap_extending_penalty_ = gap_extending_penalty;
+
+ BuildDefaultMatrix();
+
+ return true;
+}
+
+bool Aligner::ReBuild(
+ const int8_t* score_matrix,
+ const int& score_matrix_size,
+ const int8_t* translation_matrix,
+ const int& translation_matrix_size) {
+
+ ClearMatrices();
+ score_matrix_ = new int8_t[score_matrix_size_ * score_matrix_size_];
+ memcpy(score_matrix_, score_matrix, sizeof(int8_t) * score_matrix_size_ * score_matrix_size_);
+ translation_matrix_ = new int8_t[translation_matrix_size];
+ memcpy(translation_matrix_, translation_matrix, sizeof(int8_t) * translation_matrix_size);
+
+ return true;
+}
+
+void Aligner::BuildDefaultMatrix(void) {
+ ClearMatrices();
+ score_matrix_ = new int8_t[score_matrix_size_ * score_matrix_size_];
+ BuildSwScoreMatrix(match_score_, mismatch_penalty_, score_matrix_);
+ translation_matrix_ = new int8_t[SizeOfArray(kBaseTranslation)];
+ memcpy(translation_matrix_, kBaseTranslation, sizeof(int8_t) * SizeOfArray(kBaseTranslation));
+}
+
+void Aligner::ClearMatrices(void) {
+ delete [] score_matrix_;
+ score_matrix_ = NULL;
+
+ delete [] translation_matrix_;
+ translation_matrix_ = NULL;
+}
+} // namespace StripedSmithWaterman
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/libseqlib.git
More information about the debian-med-commit
mailing list