[med-svn] [hinge] 01/04: Imported Upstream version 0.41+dfsg
Afif Elghraoui
afif at moszumanska.debian.org
Tue May 23 05:08:09 UTC 2017
This is an automated email from the git hooks/post-receive script.
afif pushed a commit to branch master
in repository hinge.
commit 3e4bb183220cf74459d114aab446890b373915ce
Author: Afif Elghraoui <afif at ghraoui.name>
Date: Fri Oct 7 18:45:13 2016 -0700
Imported Upstream version 0.41+dfsg
---
.gitignore | 11 +
.gitmodules | 18 +
.travis.yml | 29 +
CMakeLists.txt | 3 +
LICENSE | 33 +
README.md | 190 +
demo/NCTC9657_demo/run.sh | 30 +
demo/ecoli_demo/run.sh | 41 +
docker/README.md | 3 +
docker/centos6/Dockerfile | 30 +
docker/ubuntu12/Dockerfile | 23 +
docker/ubuntu14/Dockerfile | 22 +
misc/Falcon_ecoli_shortened.png | Bin 0 -> 147088 bytes
misc/High_level_overview.png | Bin 0 -> 51033 bytes
misc/ecoli_shortened.png | Bin 0 -> 62462 bytes
scripts/Visualise_graph.py | 82 +
scripts/add_groundtruth.py | 61 +
scripts/add_groundtruth_json.py | 52 +
scripts/clip_ends.py | 36 +
scripts/compute_n50_from_draft.py | 134 +
scripts/condense_graph.py | 159 +
scripts/condense_graph_and_annotate.py | 197 +
scripts/condense_graph_annotate_clip_ends.py | 252 ++
scripts/condense_graph_create_gfa_compute_n50.py | 233 +
scripts/condense_graph_with_gt.py | 191 +
scripts/connected.py | 73 +
scripts/correct_head.py | 42 +
scripts/create_bandage_file.py | 58 +
scripts/create_hgraph.py | 56 +
scripts/create_hgraph_nogt.py | 42 +
scripts/download_NCTC_pipeline.py | 50 +
scripts/draft_assembly.py | 36 +
scripts/draft_assembly_not_perfect.py | 36 +
scripts/draw2.py | 191 +
scripts/draw2_pileup.py | 125 +
scripts/draw2_pileup_region.py | 134 +
scripts/draw2_pileup_w_repeat.py | 158 +
scripts/draw_pileup_region.py | 152 +
scripts/get_NCTC_json.py | 65 +
scripts/get_consensus_gfa.py | 110 +
scripts/get_draft_annotation.py | 368 ++
scripts/get_draft_path.py | 519 +++
scripts/get_draft_path_norevcomp.py | 516 +++
scripts/interface_utils.py | 105 +
scripts/longest_path.py | 63 +
scripts/merge_hinges.py | 606 +++
scripts/parallel_draw.sh | 19 +
scripts/parallel_draw_large.sh | 16 +
scripts/parse.py | 44 +
scripts/parse_alignment.py | 26 +
scripts/parse_qv.py | 15 +
scripts/parse_read.py | 28 +
scripts/pileup.ipynb | 227 +
scripts/pipeline_consensus.py | 102 +
scripts/pipeline_consensus_norevcomp.py | 102 +
scripts/pipeline_nctc.py | 87 +
scripts/pruning_and_clipping.py | 1290 ++++++
scripts/pruning_and_clipping2.py | 1219 +++++
scripts/random_condensation.py | 287 ++
scripts/repeat_annotate_reads.py | 101 +
scripts/run_mapping.py | 41 +
scripts/run_mapping2.py | 44 +
scripts/run_mapping3.py | 44 +
scripts/run_parse_alignment.py | 18 +
scripts/run_parse_read.py | 20 +
scripts/unitig.py | 130 +
src/CMakeLists.txt | 47 +
src/consensus/CMakeLists.txt | 10 +
src/consensus/consensus.cpp | 230 +
src/consensus/draft.cpp | 992 +++++
src/consensus/draft_chopper.cpp | 1005 +++++
src/consensus/io_base.cpp | 275 ++
src/filter/CMakeLists.txt | 4 +
src/filter/filter.cpp | 1109 +++++
src/include/DB.h | 449 ++
src/include/INIReader.h | 53 +
src/include/LAInterface.h | 234 +
src/include/QV.h | 125 +
src/include/align.h | 372 ++
src/include/cmdline.h | 809 ++++
src/include/common.h | 307 ++
src/include/ini.h | 83 +
src/include/kseq.h | 256 ++
src/include/paf.h | 63 +
src/layout/CMakeLists.txt | 9 +
src/layout/hinging.cpp | 2056 +++++++++
src/lib/CMakeLists.txt | 23 +
src/lib/DB.c | 1712 +++++++
src/lib/DW_banded.c | 319 ++
src/lib/INIReader.cpp | 81 +
src/lib/LAInterface.cpp | 4794 ++++++++++++++++++++
src/lib/QV.c | 1406 ++++++
src/lib/align.c | 5149 ++++++++++++++++++++++
src/lib/falcon.c | 804 ++++
src/lib/ini.c | 187 +
src/lib/kmer_lookup.c | 589 +++
src/lib/paf.c | 92 +
src/test/CMakeLists.txt | 2 +
src/test/LAInterface_consensus_test.cpp | 146 +
src/test/LAInterface_test.cpp | 88 +
src/test/LAInterface_test1.cpp | 20 +
src/test/LAInterface_test_2DB.cpp | 90 +
src/test/omp_test.c | 31 +
utils/build.sh | 22 +
utils/clean.sh | 22 +
utils/compile.sh | 14 +
utils/nominal.ini | 31 +
utils/run.sh | 28 +
utils/setup.sh | 7 +
utils/test.sh | 31 +
utils/update.sh | 25 +
111 files changed, 33396 insertions(+)
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..1f5e5ee
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,11 @@
+data
+.DS_Store
+*.pyc
+src/build
+src/.idea/
+scripts/.ipynb_checkpoints/
+scripts/figures/
+build
+notebook
+demo
+.idea
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..c0eaa56
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,18 @@
+[submodule "DALIGNER"]
+ path = thirdparty/DALIGNER
+ url = https://github.com/thegenemyers/DALIGNER.git
+[submodule "DAZZ_DB"]
+ path = thirdparty/DAZZ_DB
+ url = https://github.com/Eureka22/DAZZ_DB.git
+[submodule "DEXTRACTOR"]
+ path = thirdparty/DEXTRACTOR
+ url = https://github.com/thegenemyers/DEXTRACTOR.git
+[submodule "DASCRUBBER"]
+ path = thirdparty/DASCRUBBER
+ url = https://github.com/thegenemyers/DASCRUBBER.git
+[submodule "graphmap"]
+ path = graphmap
+ url = https://github.com/isovic/graphmap.git
+[submodule "src/spdlog"]
+ path = src/spdlog
+ url = https://github.com/gabime/spdlog.git
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..2ec13a2
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,29 @@
+language: cpp
+
+compiler: gcc
+
+sudo: required
+
+install: sudo apt-get install build-essential &&
+ sudo apt-get install libboost-dev &&
+ sudo apt-get install libboost-all-dev &&
+ wget http://www.cmake.org/files/v3.2/cmake-3.2.2.tar.gz --no-check-certificate &&
+ tar xf cmake-3.2.2.tar.gz &&
+ cd cmake-3.2.2 &&
+ ./configure &&
+ make -j 8 &&
+ sudo make install &&
+ cmake --version &&
+ pwd &&
+ cd .. &&
+ sudo add-apt-repository ppa:ubuntu-toolchain-r/test -y &&
+ sudo apt-get update; sudo apt-get install gcc-4.8 g++-4.8 -y --force-yes&&
+ gcc-4.8 --version &&
+ which gcc-4.8
+
+script: ./utils/build.sh
+
+notifications:
+ email:
+ - xf1280 at gmail.com
+ - govinda.kamath at gmail.com
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..1bae40c
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,3 @@
+cmake_minimum_required(VERSION 3.2)
+project(AwAssembler)
+add_subdirectory(src bin)
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..fdd3efc
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,33 @@
+Copyright (c) 2016, Govinda Kamath, Fei Xia, Ilan Shomorony, Thomas Courtade and
+David Tse. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ · Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ · Redistributions in binary form must reproduce the above copyright notice, this
+ list of conditions and the following disclaimer in the documentation and/or
+ other materials provided with the distribution.
+
+ · The name of HINGE may not be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ · This project uses open source code from Dr. Eugene W. Myers, Jason Chin and
+ Heng Li, their code is protected by their licenses which are explicitly contained
+ in the source files. Redistributions of source code and binary must also reproduce
+ their copyright notice.
+
+ THIS SOFTWARE IS PROVIDED BY Govinda Kamath, Fei Xia, Ilan Shomorony, Thomas Courtade
+ and David Tse ”AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Govinda Kamath, Fei Xia, Ilan Shomorony,
+ Thomas Courtade, and David Tse BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ For any issues regarding this software and its use, contact Fei Xia at xf1280 at gmail.com.
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..a3234fb
--- /dev/null
+++ b/README.md
@@ -0,0 +1,190 @@
+# HINGE
+Software accompanying "HINGE: Long-Read Assembly Achieves Optimal Repeat Resolution"
+
+- Preprint: http://biorxiv.org/content/early/2016/08/01/062117
+
+- An ipython notebook to reproduce results in the paper can be found in this [repository](https://github.com/govinda-kamath/HINGE-analyses).
+
+CI Status: ![image](https://travis-ci.org/fxia22/HINGE.svg?branch=master)
+
+
+
+## Introduction
+
+HINGE is a long read assembler based on an idea called _hinging_.
+
+## Pipeline Overview
+
+HINGE is an OLC(Overlap-Layout-Consensus) assembler. The idea of the pipeline is shown below.
+
+![image](misc/High_level_overview.png)
+
+At a high level, the algorithm can be thought of a variation of the classical greedy algorithm.
+The main difference with the greedy algorithm is that rather than each read having a single successor,
+and a single predecessor, we allow a small subset of reads to have a higher number of successors/predecessors.
+This subset is identified by a process called _hinging_. This helps us to recover the graph structure
+directly during assembly.
+
+Another significant difference from HGAP or Falcon pipeline is that it does not have a pre-assembly or read correction step.
+
+
+
+## Algorithm Details
+
+### Reads filtering
+Reads filtering filters reads that have long chimer in the middle, and short reads.
+Reads which can have higher number of predecessors/successors are also identified there.
+This is implemented in `filter/filter.cpp`
+
+### Layout
+The layout is implemented in `layout/hinging.cpp`. It is done by a variant of the greedy algorithm.
+
+The graph output by the layout stage is post-processed by running `scripts/pruning_and_clipping.py`.
+One output is a graphml file which is the graph representation of the backbone.
+This removes dead ends and Z-structures from the graph enabling easy condensation.
+It can be analyzed and visualized, etc.
+
+
+## Parameters
+
+In the pipeline described above, most programs not only takes the input file and output file as arguments, but also require a configuration file in ini format. This consists parameters for each step in the pipeline, and their usage and effects are explained below:
+
+
+###[filter]
+- length_threshold = 6500; // Length threshold for reads to be considered in the backbone
+- quality_threshold = 0.23; // Quality threshold for edges to be considered in the backbone
+- n_iter = 2; // iterations of filtering, the filtering needs several iterations, because when filter reads, you got rid of some edges; when filter edges, you got rid of some reads (if the last edge is filtered.) Typically 2-3 iterations will be enough.
+- aln_threshold = 2500; // Length of alignment for edges to be considered in the backbone
+- min_cov = 5; // Minimal coverage for a segment to be considered not chimer/adaptor
+- cut_off = 200; // A parameter for identifying long chimer in the middle of a read
+- theta = 300; // A parameter for tolerance of the overhang length when looking for right extension.
+
+
+###[running]
+- n_proc = 12; // number of CPUs for layout step
+
+###[draft]
+- min_cov = 10; //obsolete
+- trim = 200; //obsolete
+- edge_safe = 100; //obsolete
+- tspace = 900; //space between new "trace points"
+
+
+###[consensus]
+- min_length = 2000; // Minimal length of reads used for final consensus
+- trim_end = 200; // Trim ends for alignments for final consensus
+- best_n = 1; // If one read has multiple alignments with the bacbone assembly, choose the longest n segments for consensus.
+- quality_threshold = 0.23; // alignment quality threshold
+
+# Installation
+
+## Dependencies
+- g++ 4.9
+- cmake 3.x
+- libhdf5
+- boost
+- Python 2.7
+
+The following python packages are necessary:
+- numpy
+- ujson
+- colormap
+- easydev.tools
+
+This software is still at prototype stage so it is not well packaged, however it is designed in a modular flavor so different combinations of methods can be tested.
+
+Installing the software is very easy.
+
+```
+git clone https://github.com/fxia22/HINGE.git
+git submodule init
+git submodule update
+./utils/build.sh
+```
+
+# Running
+
+In order to call the programs from anywhere, I suggest one export the directory of binary file to system environment, you can do that by using the script `setup.sh`. The parameters are initialised in `utils/nominal.ini`. The path to nominal.ini has to be specified to run the scripts.
+
+A demo run for assembling the ecoli genome is the following:
+
+```
+source utils/setup.sh
+mkdir data/ecoli
+cd data/ecoli
+# reads.fasta should be in data/ecoli
+fasta2DB ecoli reads.fasta
+DBsplit -x500 -s100 ecoli
+HPC.daligner -t5 ecoli | csh -v
+# alternatively, you can put output of HPC.daligner to a bash file and edit it to support
+rm ecoli.*.ecoli.*
+LAmerge ecoli.las ecoli.+([[:digit:]]).las
+rm ecoli.*.las # we only need ecoli.las
+DASqv -c100 ecoli ecoli.las
+
+# Run filter
+
+mkdir log
+Reads_filter --db ecoli --las ecoli.las -x ecoli --config <path-to-nominal.ini>
+
+# Run layout
+
+hinging --db ecoli --las ecoli.las -x ecoli --config <path-to-nominal.ini> -o ecoli
+
+# Run postprocessing
+
+python pruning_and_clipping.py ecoli.edges.hinges ecoli.hinge.list <identifier-of-run>
+
+
+# get draft assembly
+
+get_draft_path.py <working directory> ecoli ecoli<identifier-of-run>.G2.graphml
+draft_assembly --db ecoli --las ecoli.las --prefix ecoli --config <path-to-nominal.ini> --out ecoli.draft
+
+
+# get consensus assembly
+
+correct_head.py ecoli.draft.fasta ecoli.draft.pb.fasta draft_map.txt
+fasta2DB draft ecoli.draft.pb.fasta
+HPC.daligner draft ecoli | zsh -v
+LAmerge draft.ecoli.las draft.ecoli.*.las
+consensus draft ecoli draft.ecoli.las ecoli.consensus.fasta utils/nominal.ini
+get_consensus_gfa.py <working directory> ecoli ecoli.consensus.fasta
+
+#results should be in ecoli_consensus.gfa
+```
+
+## Analysis of Results
+
+### showing ground truth on graph
+Some programs are for debugging and oberservation. For example, one can get the ground truth by mapping reads to reference and get `ecoli.ecoli.ref.las`.
+
+This `las` file can be parsed to json file for other programs to use.
+
+```
+run_mapping.py ecoli ecoli.ref ecoli.ecoli.ref.las 1-$
+```
+
+In the prune step, if `ecoli.mapping.json` exists, the output `graphml` file will contain the information of ground truth.
+
+### drawing alignment graphs and mapping graphs
+Draw a read, for example 60947, and output figure to `sample` folder (need plus 1 as LAshow counts from 1):
+
+```
+draw2.py ecoli ecoli.las 60948 sample 100
+```
+
+Draw pileup on draft assembly, given a region(start,end):
+
+```
+draw2_pileup_region.py 3600000 4500000
+```
+
+# Results:
+
+For ecoli 160X dataset, after shortening reads to have a mean length of 3500 (with a variance of 1500), the graph is preserved.
+
+
+![image](misc/ecoli_shortened.png)
+
+Results on the bacterial genomes of the [NCTC 3000](http://www.sanger.ac.uk/resources/downloads/bacteria/nctc/) project can be found at [web.stanford.edu/~gkamath/NCTC/report.html](https://web.stanford.edu/~gkamath/NCTC/report.html)
diff --git a/demo/NCTC9657_demo/run.sh b/demo/NCTC9657_demo/run.sh
new file mode 100644
index 0000000..2c36c03
--- /dev/null
+++ b/demo/NCTC9657_demo/run.sh
@@ -0,0 +1,30 @@
+correct_head.py NCTC9657_reads.fasta reads.pb.fasta map.txt
+fasta2DB NCTC9657 reads.pb.fasta
+
+DBsplit NCTC9657
+
+HPC.daligner NCTC9657 | bash -v
+
+rm NCTC9657.*.NCTC9657.*.las
+LAmerge NCTC9657.las NCTC9657.[0-9].las
+DASqv -c100 NCTC9657 NCTC9657.las
+
+mkdir log
+
+Reads_filter --db NCTC9657 --las NCTC9657.las -x NCTC9657 --config ../../utils/nominal.ini
+hinging --db NCTC9657 --las NCTC9657.las -x NCTC9657 --config ../../utils/nominal.ini -o NCTC9657
+
+pruning_and_clipping.py NCTC9657.edges.hinges NCTC9657.hinge.list demo
+
+get_draft_path.py $PWD NCTC9657 NCTC9657demo.G2.graphml
+draft_assembly --db NCTC9657 --las NCTC9657.las --prefix NCTC9657 --config ../../utils/nominal.ini --out NCTC9657.draft
+
+correct_head.py NCTC9657.draft.fasta NCTC9657.draft.pb.fasta draft_map.txt
+fasta2DB draft NCTC9657.draft.pb.fasta
+HPC.daligner draft NCTC9657 | bash -v
+
+rm draft.*.NCTC9657.*.las
+LAmerge draft.NCTC9657.las draft.NCTC9657.*.las
+consensus draft NCTC9657 draft.NCTC9657.las NCTC9657.consensus.fasta ../../utils/nominal.ini
+
+get_consensus_gfa.py $PWD NCTC9657 NCTC9657demo.G2.graphml NCTC9657.consensus.fasta
diff --git a/demo/ecoli_demo/run.sh b/demo/ecoli_demo/run.sh
new file mode 100644
index 0000000..52105a5
--- /dev/null
+++ b/demo/ecoli_demo/run.sh
@@ -0,0 +1,41 @@
+#wget http://gembox.cbcb.umd.edu/mhap/raw/ecoli_p4_filtered.fastq.gz
+#gunzip ecoli_p4_filtered.fastq.gz
+
+seqtk seq -a ecoli_p4_filtered.fastq > reads.fasta
+correct_head.py reads.fasta reads.pb.fasta map.txt
+fasta2DB ecoli reads.pb.fasta
+
+
+DBsplit ecoli
+
+HPC.daligner ecoli | bash -v
+
+rm ecoli.*.ecoli.*.las
+LAmerge ecoli.las ecoli.[0-9].las
+DASqv -c100 ecoli ecoli.las
+
+mkdir log
+
+
+
+Reads_filter --db ecoli --las "ecoli.*.las" -x ecoli --config ../../utils/nominal.ini
+hinging --db ecoli --las ecoli.las -x ecoli --config ../../utils/nominal.ini -o ecoli
+
+pruning_and_clipping.py ecoli.edges.hinges ecoli.hinge.list demo
+
+get_draft_path.py $PWD ecoli ecolidemo.G2.graphml
+draft_assembly --db ecoli --las ecoli.las --prefix ecoli --config ../../utils/nominal.ini --out ecoli.draft
+
+
+
+correct_head.py ecoli.draft.fasta ecoli.draft.pb.fasta draft_map.txt
+fasta2DB draft ecoli.draft.pb.fasta
+
+HPC.daligner ecoli draft | bash -v
+
+# rm draft.*.ecoli.*.las
+# LAmerge draft.ecoli.las draft.ecoli.*.las
+
+consensus draft ecoli draft.ecoli.las ecoli.consensus.fasta ../../utils/nominal.ini
+
+get_consensus_gfa.py $PWD ecoli ecolidemo.G2.graphml ecoli.consensus.fasta
diff --git a/docker/README.md b/docker/README.md
new file mode 100644
index 0000000..40c2d3f
--- /dev/null
+++ b/docker/README.md
@@ -0,0 +1,3 @@
+#Docker Image Build Guide
+This folder contains dockerfiles to build hinge for certain linux distributions. To use copy the dockerfile to root directory of the repository and run `docker build .`
+
diff --git a/docker/centos6/Dockerfile b/docker/centos6/Dockerfile
new file mode 100644
index 0000000..e10a5af
--- /dev/null
+++ b/docker/centos6/Dockerfile
@@ -0,0 +1,30 @@
+FROM centos:6
+
+
+RUN rpm --import http://ftp.scientificlinux.org/linux/scientific/5x/x86_64/RPM-GPG-KEYs/RPM-GPG-KEY-cern
+
+RUN yum install wget -y
+
+RUN wget http://people.centos.org/tru/devtools-2/devtools-2.repo -O /etc/yum.repos.d/devtools-2.repo
+RUN yum install devtoolset-2-gcc devtoolset-2-binutils -y
+RUN yum install devtoolset-2-gcc-c++ devtoolset-2-gcc-gfortran -y
+RUN source /opt/rh/devtoolset-2/enable
+
+ENV PATH=$PATH:/opt/rh/devtoolset-2/root/usr/bin/
+
+RUN wget http://www.cmake.org/files/v3.2/cmake-3.2.2.tar.gz --no-check-certificate && tar xf cmake-3.2.2.tar.gz
+RUN cd cmake-3.2.2 && ./configure && make && make install
+
+RUN wget http://sourceforge.net/projects/boost/files/boost/1.55.0/boost_1_55_0.tar.gz --no-check-certificate
+RUN tar -xvzf boost_1_55_0.tar.gz
+WORKDIR /boost_1_55_0/
+RUN ./bootstrap.sh --with-libraries=graph
+RUN ./b2 install
+
+RUN yum install zlib-devel -y
+
+RUN ln -s /opt/rh/devtoolset-2/root/usr/bin/gcc /usr/bin/gcc-4.8
+RUN ln -s /opt/rh/devtoolset-2/root/usr/bin/g++ /usr/bin/g++-4.8
+ADD . /hinge/
+WORKDIR /hinge/
+RUN ./utils/build.sh
diff --git a/docker/ubuntu12/Dockerfile b/docker/ubuntu12/Dockerfile
new file mode 100644
index 0000000..c15af73
--- /dev/null
+++ b/docker/ubuntu12/Dockerfile
@@ -0,0 +1,23 @@
+FROM ubuntu:12.04
+
+RUN apt-get update
+
+RUN apt-get install zlibc zlib1g zlib1g-dev -y
+RUN apt-get install software-properties-common python-software-properties -y
+RUN apt-get install build-essential wget -y
+RUN apt-get install libboost-graph-dev -y
+
+RUN wget http://www.cmake.org/files/v3.2/cmake-3.2.2.tar.gz --no-check-certificate && tar xf cmake-3.2.2.tar.gz
+RUN cd cmake-3.2.2 && ./configure && make && make install
+
+RUN cmake --version
+
+RUN add-apt-repository ppa:ubuntu-toolchain-r/test -y
+RUN apt-get update; apt-get install gcc-4.8 g++-4.8 -y
+RUN gcc-4.8 --version
+RUN which gcc-4.8
+
+
+ADD . /hinge/
+WORKDIR /hinge/
+RUN ./utils/build.sh
diff --git a/docker/ubuntu14/Dockerfile b/docker/ubuntu14/Dockerfile
new file mode 100644
index 0000000..89e9b37
--- /dev/null
+++ b/docker/ubuntu14/Dockerfile
@@ -0,0 +1,22 @@
+FROM ubuntu:14.04
+
+RUN apt-get update
+
+RUN apt-get install zlibc zlib1g zlib1g-dev -y
+RUN apt-get install software-properties-common -y
+RUN apt-get install build-essential wget -y
+RUN apt-get install libboost-graph-dev -y
+
+RUN wget http://www.cmake.org/files/v3.2/cmake-3.2.2.tar.gz --no-check-certificate && tar xf cmake-3.2.2.tar.gz
+RUN cd cmake-3.2.2 && ./configure && make && make install
+
+RUN cmake --version
+
+RUN add-apt-repository ppa:ubuntu-toolchain-r/test -y
+RUN apt-get update; apt-get install gcc-4.8 g++-4.8 -y
+RUN gcc-4.8 --version
+RUN which gcc-4.8
+
+ADD . /hinge/
+WORKDIR /hinge/
+RUN ./utils/build.sh
diff --git a/misc/Falcon_ecoli_shortened.png b/misc/Falcon_ecoli_shortened.png
new file mode 100644
index 0000000..ea06143
Binary files /dev/null and b/misc/Falcon_ecoli_shortened.png differ
diff --git a/misc/High_level_overview.png b/misc/High_level_overview.png
new file mode 100644
index 0000000..52aa2ff
Binary files /dev/null and b/misc/High_level_overview.png differ
diff --git a/misc/ecoli_shortened.png b/misc/ecoli_shortened.png
new file mode 100644
index 0000000..60b2a05
Binary files /dev/null and b/misc/ecoli_shortened.png differ
diff --git a/scripts/Visualise_graph.py b/scripts/Visualise_graph.py
new file mode 100755
index 0000000..6eadb95
--- /dev/null
+++ b/scripts/Visualise_graph.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+
+# In[1]:
+
+import networkx as nx
+import sys
+
+# In[2]:
+if len(sys.argv) >2:
+ print "wrong usage.\n python Visualise_graph.py graph_edge_file [list_of_hinges]"
+
+vertices=set()
+with open (sys.argv[1]) as f:
+
+ for lines in f:
+ lines1=lines.split()
+ #print lines1
+ if len(lines1) < 5:
+ continue
+ #vertices.add(lines1[0])
+ #vertices.add(str(lines1[1]))
+ #vertices.add(str(lines1[0])+"_" + lines1[3])
+ #vertices.add(str(lines1[1])+"_" + lines1[4])
+
+
+# In[3]:
+
+len(vertices)
+
+
+# In[4]:
+
+G = nx.DiGraph()
+for vertex in vertices:
+ G.add_node(vertex)
+
+
+# In[5]:
+
+with open (sys.argv[1]) as f:
+ for lines in f:
+ lines1=lines.split()
+ print lines1
+ if len(lines1) < 5:
+ continue
+ #print lines1
+ G.add_edge(lines1[0] + "_" + lines1[3], lines1[1] + "_" + lines1[4], hinge_edge=int(lines1[5]))
+ G.add_edge(lines1[1] + "_" + str(1-int(lines1[4])), lines1[0] + "_" + str(1-int(lines1[3])),hinge_edge=int(lines1[5]))
+
+try:
+ in_hinges = set()
+ out_hinges = set()
+ with open (sys.argv[2]) as f:
+
+ for lines in f:
+ lines1=lines.split()
+
+ if lines1[2] == '1':
+ in_hinges.add(lines1[0]+'_0')
+ out_hinges.add(lines1[0]+'_1')
+ elif lines1[2] == '-1':
+ in_hinges.add(lines1[0]+'_1')
+ out_hinges.add(lines1[0]+'_0')
+
+ for node in G.nodes():
+ if node in in_hinges and node in out_hinges:
+ G.node[node]['hinge']=100
+ elif node in in_hinges:
+ G.node[node]['hinge']=10
+ elif node in out_hinges:
+ G.node[node]['hinge']=-10
+ else:
+ G.node[node]['hinge']=0
+except:
+ pass
+
+nx.write_graphml(G, './out.graphml')
+
+# In[ ]:
+
+
+
diff --git a/scripts/add_groundtruth.py b/scripts/add_groundtruth.py
new file mode 100644
index 0000000..b1eba74
--- /dev/null
+++ b/scripts/add_groundtruth.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+
+import networkx as nx
+import sys
+
+graphml_file = sys.argv[1]
+groundtruth_file = sys.argv[2]
+graphml_file_w_groundtruth = sys.argv[3]
+try:
+ chromosome_to_consider= int(sys.argv[4])
+except:
+ chromosome_to_consider=None
+
+g = nx.read_graphml(graphml_file)
+
+print nx.info(g)
+
+mapping_dict = {}
+
+with open(groundtruth_file,'r') as f:
+ for num, line in enumerate(f.readlines()):
+ m = map(int, line.strip().split())
+ # mapping_dict[num] = [min(m), max(m), int(m[0]>m[1])]
+ mapping_dict[num] = [m[2],m[3],m[1]]
+
+#print mapping_dict
+
+max_len=0
+for num in mapping_dict.keys():
+ max_len=max(max_len,len(str(m[3])))
+
+
+pow_mov=10**(max_len+1)
+for node in g.nodes():
+ #print node
+ try:
+ nodeid = int(node.split('_')[0])
+ #print nodeid
+ rev = int(node.split('_')[1])
+ if chromosome_to_consider != None:
+ g.node[node]['chromosome'] = 0
+ if mapping_dict[nodeid][2]==chromosome_to_consider:
+ g.node[node]['chromosome'] = mapping_dict[nodeid][2]+1
+ else:
+ g.node[node]['chromosome'] = mapping_dict[nodeid][2]+1
+
+ if rev == 0:
+ g.node[node]['aln_end'] = mapping_dict[nodeid][2]*pow_mov+ mapping_dict[nodeid][1]
+ g.node[node]['aln_start'] = mapping_dict[nodeid][2]*pow_mov + mapping_dict[nodeid][0]
+ # g.node[node]['aln_strand'] = mapping_dict[nodeid][2]
+ else:
+ g.node[node]['aln_end'] = mapping_dict[nodeid][2]*pow_mov + mapping_dict[nodeid][1]
+ g.node[node]['aln_start'] = mapping_dict[nodeid][2]*pow_mov+ mapping_dict[nodeid][0]
+ # g.node[node]['aln_strand'] = 1-mapping_dict[nodeid][2]
+
+ except:
+ pass
+
+nx.write_graphml(g, graphml_file_w_groundtruth)
+
+
diff --git a/scripts/add_groundtruth_json.py b/scripts/add_groundtruth_json.py
new file mode 100644
index 0000000..449bc69
--- /dev/null
+++ b/scripts/add_groundtruth_json.py
@@ -0,0 +1,52 @@
+import networkx as nx
+import sys
+import json
+
+graphml_file = sys.argv[1]
+groundtruth_file = sys.argv[2]
+graphml_file_w_groundtruth = sys.argv[3]
+
+g = nx.read_graphml(graphml_file)
+
+print nx.info(g)
+
+with open(groundtruth_file) as f:
+ read_dict=json.load(f)
+
+max_len=0
+for read in read_dict:
+ for aln_info in read_dict[read]:
+ try:
+ max_len=max(max_len,len(str(aln_info[0])))
+ max_len=max(max_len,len(str(aln_info[1])))
+ except:
+ print
+ raise
+
+pow_mov=10**(max_len+1)
+
+for node in g.nodes():
+ #print node
+ nodeid = node.split('_')[0]
+ #print nodeid
+ rev = int(node.split('_')[1])
+ if rev==1:
+ nodeid+="'"
+
+ if nodeid in read_dict:
+ g.node[node]['chr'] = read_dict[nodeid][0][2]
+ g.node[node]['aln_end'] = pow_mov*read_dict[nodeid][0][2]+max(read_dict[nodeid][0][0],read_dict[nodeid][0][1])
+# g.node[node]['aln_start'] = pow_mov*read_dict[nodeid][0][2]+min(read_dict[nodeid][0][0],read_dict[nodeid][0][1])
+# g.node[node]['repeat']=0
+# if len (read_dict[nodeid]) >1 :
+# g.node[node]['repeat']=1
+# chrom_maps=set([aln[3] for aln in read_dict[nodeid]])
+# if len (chrom_maps) > 1:
+# g.node[node]['repeat']=10
+ else:
+ g.node[node]['chr'] = -1
+ g.node[node]['aln_end'] = -1
+# g.node[node]['aln_start'] = -1
+# g.node[node]['repeat']=-1
+
+nx.write_graphml(g, graphml_file_w_groundtruth)
diff --git a/scripts/clip_ends.py b/scripts/clip_ends.py
new file mode 100644
index 0000000..da6f93f
--- /dev/null
+++ b/scripts/clip_ends.py
@@ -0,0 +1,36 @@
+import sys
+
+chr_lengths={}
+
+ground_truth=sys.argv[1]
+graph_file=sys.argv[2]
+out_file=sys.argv[2]+'.clipped'
+
+with open(ground_truth) as f:
+ for line in f:
+ m = map(int, line.strip().split())
+ chr_lengths.setdefault(m[1],0)
+ chr_lengths[m[1]]= max(chr_lengths[m[1]], max(m[2],m[3]))
+
+CHR_THR=20000
+
+reads_to_kill=set()
+
+with open(ground_truth) as f:
+ for line in f:
+ m = map(int, line.strip().split())
+ read_left=min(m[2],m[3])
+ read_right=max(m[2],m[3])
+ read_chr=m[1]
+ if read_left < CHR_THR:
+ reads_to_kill.add(m[0])
+ if read_right > chr_lengths[read_chr] - CHR_THR:
+ reads_to_kill.add(m[0])
+
+with open(graph_file) as f:
+ with open(out_file, 'w') as g:
+ for line in f:
+ line1=line.split()
+ if int(line1[0])in reads_to_kill or int(line1[1]) in reads_to_kill:
+ continue
+ g.write(line)
diff --git a/scripts/compute_n50_from_draft.py b/scripts/compute_n50_from_draft.py
new file mode 100644
index 0000000..42f3c74
--- /dev/null
+++ b/scripts/compute_n50_from_draft.py
@@ -0,0 +1,134 @@
+import sys
+import os
+import networkx as nx
+from Bio import SeqIO
+
+
+
+def comp_n50(contig_vec):
+ if len(contig_vec) == 0:
+ return 0
+ sorted_lengths = sorted(contig_vec)
+
+ total_length = sum(contig_vec)
+
+ half_length = 0.5*total_length
+
+ min_n50 = sorted_lengths[-1]
+ max_n50 = 0
+
+ for i in range(len(sorted_lengths)):
+ sum_1 = sum(sorted_lengths[0:i+1])
+ sum_2 = sum(sorted_lengths[i:])
+ if sum_1 >= half_length and sum_2 >= half_length:
+ min_n50 = min(sorted_lengths[i],min_n50)
+ max_n50 = max(sorted_lengths[i],max_n50)
+
+ return 0.5*(min_n50+max_n50)
+
+
+hinging_n50 = -1
+hinging_comp_n50 = -1
+hgap_n50 = -1
+
+count = 0
+count1 = 0
+count2 = 0
+
+
+data_dict = {}
+
+
+
+
+fullpath = '/data/pacbio_assembly/pb_data/NCTC/'
+
+for nctc_name in os.listdir(fullpath):
+ if 'NCTC' not in nctc_name:
+ continue
+
+ mypath = fullpath+nctc_name
+
+ if not os.path.isdir(mypath):
+ continue
+
+ mypath = mypath+'/'
+
+ count += 1
+
+ hinging_n50 = -1
+ hinging_comp_n50 = -1
+ hgap_n50 = -1
+
+
+ data_dict[nctc_name] = []
+
+ draft_file = [x for x in os.listdir(mypath) if 'draft.graphml' in x]
+
+ try:
+
+ # flname = sys.argv[1]
+ g = nx.read_graphml(mypath+draft_file[0])
+
+ contig_lengths = []
+ component_lengths = []
+
+
+ for u in g.nodes():
+ contig_lengths.append(len(g.node[u]['segment']))
+
+ for c in nx.weakly_connected_components(g):
+ # we use set() so that we cannot double-count a two reverse complementary contigs
+ # in the same component
+ component_lengths.append(sum(set([len(g.node[u]['segment']) for u in c])))
+
+ component_lengths = set(component_lengths)
+
+ hinging_n50 = comp_n50(contig_lengths)
+ hinging_comp_n50 = comp_n50(component_lengths)
+
+ count1+=1
+
+ except:
+ pass
+
+ # print "contig n50: "+str(comp_n50(contig_lengths))
+ # print "component n50: "+str(comp_n50(component_lengths))
+
+
+ hgap_file = [x for x in os.listdir(mypath) if 'hgap.fasta' in x]
+
+
+ try:
+
+ hgap_file = hgap_file[0]
+ hgap_contigs = [len(x) for x in SeqIO.parse(open(mypath+hgap_file),'fasta')]
+
+ hgap_n50 = comp_n50(hgap_contigs)
+
+ count2+=1
+
+ except:
+ pass
+
+
+ with open(mypath+nctc_name+'.n50','w') as f:
+ f.write('hinging'+'\t'+str(hinging_n50)+'\n')
+ f.write('hinging_comp'+'\t'+str(hinging_comp_n50)+'\n')
+ f.write('hgap'+'\t'+str(hgap_n50)+'\n')
+
+ data_dict[nctc_name] = [hinging_n50,hinging_comp_n50,hgap_n50]
+
+ print count
+ print count1
+ print count2
+
+
+
+with open(fullpath+'computed.n50','w') as f:
+ for nctc_name in data_dict:
+ vec = data_dict[nctc_name]
+ f.write(nctc_name+'\t'+str(vec[0])+'\t'+str(vec[1])+'\t'+str(vec[2])+'\n')
+
+
+
diff --git a/scripts/condense_graph.py b/scripts/condense_graph.py
new file mode 100644
index 0000000..7ec4dea
--- /dev/null
+++ b/scripts/condense_graph.py
@@ -0,0 +1,159 @@
+#!/usr/bin/env python
+
+import networkx as nx
+import sys
+from collections import Counter
+
+def merge_simple_path(g):
+ for node in g.nodes():
+ #print g.in_degree(node), g.out_degree(node)
+ if g.in_degree(node) == 1 and g.out_degree(node) == 1:
+
+ in_node = g.in_edges(node)[0][0]
+ out_node = g.out_edges(node)[0][1]
+ if g.out_degree(in_node) == 1 and g.in_degree(out_node) == 1:
+ if in_node != node and out_node != node and in_node != out_node:
+ #print in_node, node, out_node
+ merge_path(g,in_node,node,out_node)
+
+
+def merge_two_nodes(g):
+ for node in g.nodes():
+ if g.in_degree(node) == 1 and g.out_degree(node) == 0:
+ in_node = g.in_edges(node)[0][0]
+ if g.out_degree(in_node) == 1:
+ if in_node != node:
+ node_id = g.graph['aval']
+ g.graph['aval'] += 1
+ g.add_node(str(node_id),
+ count = g.node[in_node]['count'] + g.node[node]['count'],
+ read = g.node[in_node]['read'] + '_' + g.node[node]['read'],
+ #aln_chr = g.node[node]['aln_chr']
+ )
+ g.remove_node(in_node)
+ g.remove_node(node)
+
+
+def merge_path(g,in_node,node,out_node):
+ #ov1 = find_overlap(g.node[in_node]['bases'], g.node[node]['bases'])
+ #ov2 = find_overlap(g.node[node]['bases'], g.node[out_node]['bases'])
+
+ node_id = g.graph['aval']
+ g.graph['aval'] += 1
+ #length = g.node[node]['length'] + g.node[in_node]['length'] + g.node[out_node]['length'] - ov1 - ov2
+ #cov = (g.node[in_node]['cov'] * g.node[in_node]['length'] + g.node[node]['cov'] * g.node[node]['length'] + \
+ #g.node[out_node]['cov'] * g.node[out_node]['length'])/float(length)
+ #bases = g.node[in_node]['bases'][:-ov1] + g.node[node]['bases'] + g.node[out_node]['bases'][ov2:]
+
+ g.add_node(str(node_id),
+ count = g.node[in_node]['count'] + g.node[node]['count'] + g.node[out_node]['count'],
+ read = g.node[in_node]['read'] + '_' + g.node[node]['read'] + '_' +g.node[out_node]['read'],
+ #aln_chr = g.node[node]['aln_chr']
+ )
+ #g.add_node(str(node_id)+'-', bases = reverse_comp_bases(bases), length = length, cov = cov)
+
+ for edge in g.in_edges(in_node):
+ g.add_edge(edge[0],str(node_id))
+
+ for edge in g.out_edges(out_node):
+ g.add_edge(str(node_id),edge[1])
+
+
+ g.remove_node(in_node)
+ g.remove_node(node)
+ g.remove_node(out_node)
+
+def input1(flname):
+ g = nx.DiGraph()
+ with open (flname) as f:
+ for lines in f:
+ lines1=lines.split()
+ #print lines1
+ if len(lines1) < 5:
+ continue
+ #print lines1
+ g.add_edge(lines1[0] + "_" + lines1[3], lines1[1] + "_" + lines1[4], hinge_edge=int(lines1[5]))
+ g.add_edge(lines1[1] + "_" + str(1-int(lines1[4])), lines1[0] + "_" + str(1-int(lines1[3])),hinge_edge=int(lines1[5]))
+ return g
+
+def input2(flname):
+ g = nx.DiGraph()
+ with open (flname) as f:
+ for lines in f:
+ lines1=lines.split()
+ #print lines1
+ g.add_edge(lines1[0], lines1[1])
+ return g
+
+def run(filename, n_iter):
+
+
+ f=open(filename)
+ line1=f.readline()
+ print line1
+ f.close()
+ if len(line1.split()) !=2:
+ g=input1(filename)
+ else:
+ g=input2(filename)
+
+
+
+ print nx.info(g)
+
+
+ for node in g.nodes():
+ g.node[node]['count'] = 1
+ g.node[node]['read'] = node
+
+
+ degree_sequence=sorted(g.degree().values(),reverse=True)
+ print Counter(degree_sequence)
+ for i in range(n_iter):
+ for node in g.nodes():
+ if g.in_degree(node) == 0:
+ g.remove_node(node)
+
+ print nx.info(g)
+ degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+ print Counter(degree_sequence)
+
+ degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+ print Counter(degree_sequence)
+
+
+ g.graph['aval'] = 1000000000
+
+ for i in range(5):
+ merge_simple_path(g)
+ degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+ print Counter(degree_sequence)
+
+ try:
+ import ujson
+ mapping = ujson.load(open(filename.split('.')[0]+'.mapping.json'))
+
+ print 'get mapping'
+
+ for node in g.nodes():
+ #print node
+ if mapping.has_key(node):
+ g.node[node]['aln_start'] = mapping[node][0]
+ g.node[node]['aln_end'] = mapping[node][1]
+ g.node[node]['aln_strand'] = mapping[node][2]
+ else:
+ g.node[node]['aln_start'] = 0
+ g.node[node]['aln_end'] = 0
+ g.node[node]['aln_strand'] = 0
+
+ except:
+ pass
+
+ nx.write_graphml(g, filename.split('.')[0]+'_condensed.graphml')
+
+ print nx.number_weakly_connected_components(g)
+ print nx.number_strongly_connected_components(g)
+
+
+filename = sys.argv[1]
+run(filename, 5)
diff --git a/scripts/condense_graph_and_annotate.py b/scripts/condense_graph_and_annotate.py
new file mode 100644
index 0000000..4b4ee5c
--- /dev/null
+++ b/scripts/condense_graph_and_annotate.py
@@ -0,0 +1,197 @@
+#!/usr/bin/env python
+
+import networkx as nx
+import sys
+from collections import Counter
+import json
+
+LENGTH_THRESHOLD=10 #Connected components with less than LENGTH_THRESHOLD reads are thrown away
+
+
+def merge_simple_path(g):
+ for node in g.nodes():
+ #print g.in_degree(node), g.out_degree(node)
+ if g.in_degree(node) == 1 and g.out_degree(node) == 1:
+
+ in_node = g.in_edges(node)[0][0]
+ out_node = g.out_edges(node)[0][1]
+ if g.out_degree(in_node) == 1 and g.in_degree(out_node) == 1:
+ if in_node != node and out_node != node and in_node != out_node:
+ merge_path(g,in_node,node,out_node)
+
+
+def merge_two_nodes(g):
+ for node in g.nodes():
+ if g.in_degree(node) == 1 and g.out_degree(node) == 0:
+ in_node = g.in_edges(node)[0][0]
+ if g.out_degree(in_node) == 1:
+ if in_node != node:
+ node_id = g.graph['aval']
+ g.graph['aval'] += 1
+ g.add_node(str(node_id),
+ count = g.node[in_node]['count'] + g.node[node]['count'],
+ read = g.node[in_node]['read'] + ':' + g.node[node]['read'],
+ #aln_chr = g.node[node]['aln_chr']
+ )
+ g.remove_node(in_node)
+ g.remove_node(node)
+
+
+def merge_path(g,in_node,node,out_node):
+ #ov1 = find_overlap(g.node[in_node]['bases'], g.node[node]['bases'])
+ #ov2 = find_overlap(g.node[node]['bases'], g.node[out_node]['bases'])
+
+ node_id = g.graph['aval']
+ g.graph['aval'] += 1
+ #length = g.node[node]['length'] + g.node[in_node]['length'] + g.node[out_node]['length'] - ov1 - ov2
+ #cov = (g.node[in_node]['cov'] * g.node[in_node]['length'] + g.node[node]['cov'] * g.node[node]['length'] + \
+ #g.node[out_node]['cov'] * g.node[out_node]['length'])/float(length)
+ #bases = g.node[in_node]['bases'][:-ov1] + g.node[node]['bases'] + g.node[out_node]['bases'][ov2:]
+
+ g.add_node(str(node_id),
+ count = g.node[in_node]['count'] + g.node[node]['count'] + g.node[out_node]['count'],
+ read = g.node[in_node]['read'] + ':' + g.node[node]['read'] + ':' +g.node[out_node]['read'],
+ #aln_chr = g.node[node]['aln_chr']
+ )
+ #g.add_node(str(node_id)+'-', bases = reverse_comp_bases(bases), length = length, cov = cov)
+ #print g.node[str(node_id)]['chr']
+
+ for edge in g.in_edges(in_node):
+ g.add_edge(edge[0],str(node_id))
+
+ for edge in g.out_edges(out_node):
+ g.add_edge(str(node_id),edge[1])
+
+
+ g.remove_node(in_node)
+ g.remove_node(node)
+ g.remove_node(out_node)
+
+def input1(flname):
+ g = nx.DiGraph()
+ with open (flname) as f:
+ for lines in f:
+ lines1=lines.split()
+ #print lines1
+ if len(lines1) < 5:
+ continue
+ #print lines1
+ g.add_edge(lines1[0] + "_" + lines1[3], lines1[1] + "_" + lines1[4], hinge_edge=int(lines1[5]))
+ g.add_edge(lines1[1] + "_" + str(1-int(lines1[4])), lines1[0] + "_" + str(1-int(lines1[3])),hinge_edge=int(lines1[5]))
+ return g
+
+def input2(flname):
+ g = nx.DiGraph()
+ with open (flname) as f:
+ for lines in f:
+ lines1=lines.split()
+ #print lines1
+ g.add_edge(lines1[0], lines1[1])
+ return g
+
+def run(filename, gt_file, n_iter):
+
+
+ f=open(filename)
+ line1=f.readline()
+ print line1
+ f.close()
+ if len(line1.split()) !=2:
+ g=input1(filename)
+ else:
+ g=input2(filename)
+
+ print str(len(g.nodes())) + " vertices in graph to begin with."
+
+ connected_components=[x for x in nx.weakly_connected_components(g)]
+ for component in connected_components:
+ if len(component) < 10:
+ g.remove_nodes_from(component)
+
+ print str(len(g.nodes())) + " vertices in graph after removing components of at most "+str(LENGTH_THRESHOLD)+ " nodes."
+
+ read_to_chr_map={}
+
+ if gt_file.split('.')[-1]=='json':
+ with open(gt_file,'r') as f:
+ tmp_map=json.load(f)
+ for read in tmp_map:
+ readid=int(read.strip("'"))
+ read_to_chr_map[readid] = int(tmp_map[read][0][2])
+ else:
+ with open(gt_file,'r') as f:
+ for num, line in enumerate(f.readlines()):
+ m = map(int, line.strip().split())
+ read_to_chr_map[m[0]]=m[1]
+
+ nodes_seen=set([x.split("_")[0] for x in g.nodes()])
+
+ for node in nodes_seen:
+ read_to_chr_map.setdefault(int(node),-1)
+
+ #print nx.info(g)
+ print "Num reads read : "+str(len(read_to_chr_map))
+
+ for node in g.nodes():
+ nodeid=int(node.split('_')[0])
+
+ g.node[node]['count'] = 1
+ g.node[node]['read'] = node
+ #print str(nodeid), node,g.node[node]['chr']
+
+
+ degree_sequence=sorted(g.degree().values(),reverse=True)
+ print Counter(degree_sequence)
+ for i in range(n_iter):
+ for node in g.nodes():
+ if g.in_degree(node) == 0:
+ g.remove_node(node)
+
+ print nx.info(g)
+ degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+ print Counter(degree_sequence)
+
+ degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+ print Counter(degree_sequence)
+
+
+ g.graph['aval'] = 1000000000
+
+ for i in range(5):
+ merge_simple_path(g)
+ degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+ print Counter(degree_sequence)
+
+ h=nx.DiGraph()
+ h.add_nodes_from(g)
+ h.add_edges_from(g.edges())
+ for node in g.nodes():
+ reads_in_node=[int(x.split('_')[0]) for x in g.node[node]['read'].split(':')]
+ try:
+ chr_in_node=map(lambda x: read_to_chr_map[x], reads_in_node)
+ except:
+ print reads_in_node,g.node[node]['read']
+ return
+ chr_in_node_set=set(chr_in_node)
+ if len(chr_in_node_set) ==1:
+ h.node[node]['chr']=chr_in_node[0]
+ else:
+ h.node[node]['chr']=':'.join(map(str,chr_in_node))
+
+ h.node[node]['count']=g.node[node]['count']
+ try:
+ h.node[node]['read']=g.node[node]['read']
+ except:
+ pass
+
+
+ nx.write_graphml(h, filename.split('.')[0]+'_condensed_annotated.graphml')
+
+ print nx.number_weakly_connected_components(h)
+ print nx.number_strongly_connected_components(h)
+
+#
+
+filename = sys.argv[1]
+gt_file=sys.argv[2]
+run(filename, gt_file,5)
diff --git a/scripts/condense_graph_annotate_clip_ends.py b/scripts/condense_graph_annotate_clip_ends.py
new file mode 100644
index 0000000..950cf16
--- /dev/null
+++ b/scripts/condense_graph_annotate_clip_ends.py
@@ -0,0 +1,252 @@
+#!/usr/bin/env python
+
+import networkx as nx
+import sys
+from collections import Counter
+
+
+
+def merge_simple_path(g):
+ for node in g.nodes():
+ #print g.in_degree(node), g.out_degree(node)
+ if g.in_degree(node) == 1 and g.out_degree(node) == 1:
+
+ in_node = g.in_edges(node)[0][0]
+ out_node = g.out_edges(node)[0][1]
+ if g.out_degree(in_node) == 1 and g.in_degree(out_node) == 1:
+ if in_node != node and out_node != node and in_node != out_node:
+ merge_path(g,in_node,node,out_node)
+
+
+def merge_two_nodes(g):
+ for node in g.nodes():
+ if g.in_degree(node) == 1 and g.out_degree(node) == 0:
+ in_node = g.in_edges(node)[0][0]
+ if g.out_degree(in_node) == 1:
+ if in_node != node:
+ node_id = g.graph['aval']
+ g.graph['aval'] += 1
+ g.add_node(str(node_id),
+ count = g.node[in_node]['count'] + g.node[node]['count'],
+ read = g.node[in_node]['read'] + ':' + g.node[node]['read'],
+ #aln_chr = g.node[node]['aln_chr']
+ )
+ g.remove_node(in_node)
+ g.remove_node(node)
+
+
+def merge_path(g,in_node,node,out_node):
+ #ov1 = find_overlap(g.node[in_node]['bases'], g.node[node]['bases'])
+ #ov2 = find_overlap(g.node[node]['bases'], g.node[out_node]['bases'])
+
+ node_id = g.graph['aval']
+ g.graph['aval'] += 1
+ #length = g.node[node]['length'] + g.node[in_node]['length'] + g.node[out_node]['length'] - ov1 - ov2
+ #cov = (g.node[in_node]['cov'] * g.node[in_node]['length'] + g.node[node]['cov'] * g.node[node]['length'] + \
+ #g.node[out_node]['cov'] * g.node[out_node]['length'])/float(length)
+ #bases = g.node[in_node]['bases'][:-ov1] + g.node[node]['bases'] + g.node[out_node]['bases'][ov2:]
+
+ g.add_node(str(node_id),
+ count = g.node[in_node]['count'] + g.node[node]['count'] + g.node[out_node]['count'],
+ read = g.node[in_node]['read'] + ':' + g.node[node]['read'] + ':' +g.node[out_node]['read'],
+ #aln_chr = g.node[node]['aln_chr']
+ )
+ #g.add_node(str(node_id)+'-', bases = reverse_comp_bases(bases), length = length, cov = cov)
+ #print g.node[str(node_id)]['chr']
+
+ for edge in g.in_edges(in_node):
+ g.add_edge(edge[0],str(node_id),st_pc=g.edge[edge[0]][edge[1]]['st_pc'],end_pc=g.edge[edge[0]][edge[1]]['end_pc'])
+
+
+ for edge in g.out_edges(out_node):
+ g.add_edge(str(node_id),edge[1],st_pc=g.edge[edge[0]][edge[1]]['st_pc'],end_pc=g.edge[edge[0]][edge[1]]['end_pc'])
+
+
+ g.remove_node(in_node)
+ g.remove_node(node)
+ g.remove_node(out_node)
+
+def input1(flname):
+ g = nx.DiGraph()
+ with open (flname) as f:
+ for lines in f:
+ lines1=lines.split()
+ #print lines1
+ if len(lines1) < 5:
+ continue
+ #print lines1
+ g.add_edge(lines1[0] + "_" + lines1[3], lines1[1] + "_" + lines1[4], hinge_edge=int(lines1[5]))
+ g.add_edge(lines1[1] + "_" + str(1-int(lines1[4])), lines1[0] + "_" + str(1-int(lines1[3])),hinge_edge=int(lines1[5]))
+ return g
+
+def input2(flname):
+ g = nx.DiGraph()
+ with open (flname) as f:
+ for lines in f:
+ lines1=lines.split()
+ #print lines1
+ g.add_edge(lines1[0], lines1[1])
+ return g
+
+def run(filename, gt_file, n_iter):
+
+
+ f=open(filename)
+ line1=f.readline()
+ print line1
+ f.close()
+ if len(line1.split()) !=2:
+ g=input1(filename)
+ else:
+ g=input2(filename)
+
+ read_to_chr_map={}
+ pos_dict = {}
+ mapping_dict = {}
+
+ chr_lengths = {}
+ for chr in range(14):
+ chr_lengths[chr] = 1000
+
+ with open(gt_file,'r') as f:
+ for num, line in enumerate(f.readlines()):
+ m = map(int, line.strip().split())
+ # mapping_dict[num] = [min(m), max(m), int(m[0]>m[1])]
+ read_to_chr_map[m[0]]= str(m[1])
+ mapping_dict[num] = m[1]
+ pos_dict[num] = [min(m[2],m[3]),max(m[2],m[3])]
+ # pos_dict[num] = [m[2],m[3],int(m[2]>m[3])]
+ chr_lengths[m[1]] = max(chr_lengths[m[1]],max(m[2],m[3]))
+
+
+ print nx.info(g)
+
+ print "Chromosome lenghts:"
+ print chr_lengths
+
+ margin = 10000
+
+ del_count = 0
+
+
+ #print nx.info(g)
+ print "Num reads read : "+str(len(read_to_chr_map))
+
+ for cur_edge in g.edges():
+ node0=int(cur_edge[0].split('_')[0])
+ node1=int(cur_edge[1].split('_')[0])
+ # g.edge[cur_edge[0]][cur_edge[1]]['st_pc'] = "{0:.2f}".format(1.0*pos_dict[node0][1]/chr_lengths[mapping_dict[node0]])
+ # g.edge[cur_edge[0]][cur_edge[1]]['end_pc'] = "{0:.2f}".format(1.0*pos_dict[node1][0]/chr_lengths[mapping_dict[node1]])
+
+ # st_pc is the "start percentage"; i.e., the percent location of edge[0] on its original chromosome
+ # end_pc is the "end percentage"; i.e., the percent location of edge[1] on its original chromosome
+
+ g.edge[cur_edge[0]][cur_edge[1]]['st_pc'] = 1.0*pos_dict[node0][1]/chr_lengths[mapping_dict[node0]]
+ g.edge[cur_edge[0]][cur_edge[1]]['end_pc'] = 1.0*pos_dict[node1][0]/chr_lengths[mapping_dict[node1]]
+
+
+ for node in g.nodes():
+ nodeid=int(node.split('_')[0])
+
+ if pos_dict[nodeid][0] < margin:
+ g.remove_node(node)
+ del_count += 1
+ continue
+
+ if pos_dict[nodeid][1] > chr_lengths[mapping_dict[nodeid]] - margin:
+ g.remove_node(node)
+ del_count += 1
+ continue
+
+ g.node[node]['count'] = 1
+ g.node[node]['read'] = node
+ #print str(nodeid), node,g.node[node]['chr']
+
+ print "Deleted nodes: "+str(del_count)
+
+
+ degree_sequence=sorted(g.degree().values(),reverse=True)
+ print Counter(degree_sequence)
+ for i in range(n_iter):
+ for node in g.nodes():
+ if g.in_degree(node) == 0:
+ g.remove_node(node)
+
+ print nx.info(g)
+ degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+ print Counter(degree_sequence)
+
+ degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+ print Counter(degree_sequence)
+
+
+ g.graph['aval'] = 1000000000
+
+ for i in range(5):
+ merge_simple_path(g)
+ degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+ print Counter(degree_sequence)
+
+ h=nx.DiGraph()
+ h.add_nodes_from(g)
+ h.add_edges_from(g.edges())
+
+ for cur_edge in h.edges():
+ h.edge[cur_edge[0]][cur_edge[1]]['st_pc'] = g.edge[cur_edge[0]][cur_edge[1]]['st_pc']
+ h.edge[cur_edge[0]][cur_edge[1]]['end_pc'] = g.edge[cur_edge[0]][cur_edge[1]]['end_pc']
+
+ # h = g.copy()
+
+ for node in g.nodes():
+ reads_in_node=[int(x.split('_')[0]) for x in g.node[node]['read'].split(':')]
+ try:
+ chr_in_node=map(lambda x: read_to_chr_map[x], reads_in_node)
+ except:
+ print reads_in_node,g.node[node]['read']
+ return
+ chr_in_node_set=set(chr_in_node)
+ if len(chr_in_node_set) ==1:
+ h.node[node]['chr']=chr_in_node[0]
+ else:
+ h.node[node]['chr']= ':'.join(chr_in_node)
+
+ h.node[node]['count']=g.node[node]['count']
+ try:
+ h.node[node]['read']=g.node[node]['read']
+ except:
+ pass
+
+
+ try:
+ import ujson
+ mapping = ujson.load(open(filename.split('.')[0]+'.mapping.json'))
+
+ print 'get mapping'
+
+ for node in h.nodes():
+ #print node
+ if mapping.has_key(node):
+ h.node[node]['aln_start'] = mapping[node][0]
+ h.node[node]['aln_end'] = mapping[node][1]
+ h.node[node]['aln_strand'] = mapping[node][2]
+ else:
+ h.node[node]['aln_start'] = 0
+ h.node[node]['aln_end'] = 0
+ h.node[node]['aln_strand'] = 0
+
+ except:
+ pass
+
+
+
+ nx.write_graphml(h, filename.split('.')[0]+'_condensed_annotated.graphml')
+ nx.write_graphml(g, filename.split('.')[0]+'_G_condensed_annotated.graphml')
+
+ print nx.number_weakly_connected_components(h)
+ print nx.number_strongly_connected_components(h)
+
+#
+
+filename = sys.argv[1]
+gt_file=sys.argv[2]
+run(filename, gt_file,5)
diff --git a/scripts/condense_graph_create_gfa_compute_n50.py b/scripts/condense_graph_create_gfa_compute_n50.py
new file mode 100644
index 0000000..23700d1
--- /dev/null
+++ b/scripts/condense_graph_create_gfa_compute_n50.py
@@ -0,0 +1,233 @@
+#!/usr/bin/env python
+
+import networkx as nx
+import sys
+from collections import Counter
+
+
+# This script condenses the graph down, creates a gfa with for the condensed graph, and computes the contig N50
+
+# python condense_graph_create_gfa_compute_n50.py ecoli.edges
+
+# The conditions in lines 23 and 24 are meant to prevent nodes corresponding to different strands to be merged
+# (and should be commented out if this is not desired, or if a json is not available)
+
+
+def merge_simple_path(g):
+ for node in g.nodes():
+ if g.in_degree(node) == 1 and g.out_degree(node) == 1:
+
+ in_node = g.in_edges(node)[0][0]
+ out_node = g.out_edges(node)[0][1]
+ if g.out_degree(in_node) == 1 and g.in_degree(out_node) == 1:
+ if in_node != node and out_node != node and in_node != out_node:
+ if g.node[in_node]['aln_strand']==g.node[node]['aln_strand'] or max(g.node[in_node]['aln_strand'],g.node[node]['aln_strand']) == 5:
+ if g.node[out_node]['aln_strand']==g.node[node]['aln_strand'] or max(g.node[out_node]['aln_strand'],g.node[node]['aln_strand']) == 5:
+ #print in_node, node, out_node
+ merge_path(g,in_node,node,out_node)
+
+def merge_path(g,in_node,node,out_node):
+ #ov1 = find_overlap(g.node[in_node]['bases'], g.node[node]['bases'])
+ #ov2 = find_overlap(g.node[node]['bases'], g.node[out_node]['bases'])
+
+ node_id = g.graph['aval']
+ g.graph['aval'] += 1
+ #length = g.node[node]['length'] + g.node[in_node]['length'] + g.node[out_node]['length'] - ov1 - ov2
+ #cov = (g.node[in_node]['cov'] * g.node[in_node]['length'] + g.node[node]['cov'] * g.node[node]['length'] + \
+ #g.node[out_node]['cov'] * g.node[out_node]['length'])/float(length)
+ #bases = g.node[in_node]['bases'][:-ov1] + g.node[node]['bases'] + g.node[out_node]['bases'][ov2:]
+
+ overlap1 = g.edge[in_node][node][0]['overlap']
+ overlap2 = g.edge[node][out_node][0]['overlap']
+
+ length0 = g.node[in_node]['length']
+ length1 = g.node[node]['length']
+ length2 = g.node[out_node]['length']
+
+
+ if overlap1 > min(length0,length1):
+ print "problem here:"
+ print overlap1, length0, length1
+
+
+ g.add_node(str(node_id),length = length0+length1+length2 - overlap1 - overlap2, aln_strand = g.node[node]['aln_strand'])
+ #g.add_node(str(node_id)+'-', bases = reverse_comp_bases(bases), length = length, cov = cov)
+
+ for cur_edge in g.in_edges(in_node):
+
+ # print g.edge[cur_edge[0]][cur_edge[1]][0]['overlap']
+
+ g.add_edge(cur_edge[0],str(node_id),overlap = g.edge[cur_edge[0]][cur_edge[1]][0]['overlap'])
+
+ for cur_edge in g.out_edges(out_node):
+ g.add_edge(str(node_id),cur_edge[1],overlap = g.edge[cur_edge[0]][cur_edge[1]][0]['overlap'])
+
+
+
+ g.remove_node(in_node)
+ g.remove_node(node)
+ g.remove_node(out_node)
+
+def comp_n50(contig_vec):
+ if len(contig_vec) == 0:
+ return 0
+ sorted_lengths = sorted(contig_vec)
+
+ total_length = sum(contig_vec)
+
+ half_length = 0.5*total_length
+
+ min_n50 = sorted_lengths[-1]
+ max_n50 = 0
+
+ for i in range(len(sorted_lengths)):
+ #if len(sorted_lengths) % 2 == 0:
+ # sum_1 = sum(sorted_lengths[0:i])
+ # sum_2 = sum(sorted_lengths[i:])
+ #else:
+ # sum_1 = sum(sorted_lengths[0:i+1])
+ # sum_2 = sum(sorted_lengths[i:])
+ sum_1 = sum(sorted_lengths[0:i+1])
+ sum_2 = sum(sorted_lengths[i:])
+ if sum_1 >= half_length and sum_2 >= half_length:
+ min_n50 = min(sorted_lengths[i],min_n50)
+ max_n50 = max(sorted_lengths[i],max_n50)
+
+ # print "Min N50: "+str(min_n50)
+ # print "Max N50: "+str(max_n50)
+
+ return 0.5*(min_n50+max_n50)
+
+
+
+def de_clip(filename, n_iter):
+
+ g = nx.MultiDiGraph()
+
+ # count = 0
+
+ with open(filename,'r') as f:
+ for line in f.xreadlines():
+ l = line.strip().split()
+ #print l2
+ g.add_edge(l[0],l[1],overlap=int(l[2])/2)
+ # if count < 10:
+ # print l[0], l[1], l[2]
+ # count += 1
+
+ node0start = int(l[7][1:])
+ node0end = int(l[8][:-1])
+
+ g.node[l[0]]['length'] = node0end - node0start
+
+ node1start = int(l[9][1:])
+ node1end = int(l[10][:-1])
+
+ g.node[l[1]]['length'] = node1end - node1start
+
+
+ print nx.info(g)
+
+ try:
+ import ujson
+ mapping = ujson.load(open(filename.split('.')[0]+'.mapping.json'))
+
+ # print mapping
+
+ print 'get mapping'
+
+ for node in g.nodes():
+ #print node
+ if mapping.has_key(node):
+
+ # alnstart = int(mapping[node][0])
+ # alnend = int(mapping[node][1])
+
+ # g.node[node]['length'] = abs(alnend-alnstart)
+ # print abs(alnend-alnstart)
+
+ g.node[node]['aln_strand'] = mapping[node][3]
+
+ # g.node[node]['aln_start'] = mapping[node][0]
+ # g.node[node]['aln_end'] = mapping[node][1]
+ # g.node[node]['aln_strand'] = mapping[node][2]
+ else:
+ # g.node[node]['length'] = 5000
+ g.node[node]['aln_strand'] = 5
+ # print "this happened"
+ # g.node[node]['aln_start'] = 0
+ # g.node[node]['aln_end'] = 0
+ # g.node[node]['aln_strand'] = 0
+
+ except:
+ pass
+
+
+
+ degree_sequence=sorted(g.degree().values(),reverse=True)
+ print Counter(degree_sequence)
+ for i in range(n_iter):
+ for node in g.nodes():
+ if g.degree(node) < 2:
+ g.remove_node(node)
+
+ print nx.info(g)
+ degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+ print Counter(degree_sequence)
+
+ degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+ print Counter(degree_sequence)
+
+
+ g.graph['aval'] = 1000000000
+
+ for i in range(5):
+ merge_simple_path(g)
+ degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+ print Counter(degree_sequence)
+
+
+
+ nx.write_graphml(g, filename.split('.')[0]+'.graphml')
+
+ print nx.number_weakly_connected_components(g)
+ print nx.number_strongly_connected_components(g)
+
+
+ # Next we create the gfa file
+
+
+ outputfile = filename.split('.')[0]+'.gfa'
+ with open(outputfile, 'w') as fout:
+
+ for cur_node in g.nodes():
+
+ node_length = g.node[cur_node]['length']
+ node_str = 'A'*node_length
+ node_str = node_str + '\n'
+
+ fout.write("NODE "+str(cur_node)+' 0 0 0 0 0\n')
+ fout.write(node_str)
+ fout.write(node_str)
+ # print "NODE "+str(node)
+
+ for arc in g.edges():
+ fout.write("ARC "+str(arc[0])+' '+str(arc[1])+' 0\n')
+
+
+
+ # Compute N50
+
+ contig_lengths = []
+
+ for cur_node in g.nodes():
+ contig_lengths.append(g.node[cur_node]['length'])
+
+ print "N50 = "+str(comp_n50(contig_lengths))
+
+
+
+
+
+filename = sys.argv[1]
+de_clip(filename, 5)
diff --git a/scripts/condense_graph_with_gt.py b/scripts/condense_graph_with_gt.py
new file mode 100644
index 0000000..cbee3a4
--- /dev/null
+++ b/scripts/condense_graph_with_gt.py
@@ -0,0 +1,191 @@
+#!/usr/bin/env python
+
+import networkx as nx
+import sys
+from collections import Counter
+
+
+
+def merge_simple_path(g):
+ for node in g.nodes():
+ #print g.in_degree(node), g.out_degree(node)
+ if g.in_degree(node) == 1 and g.out_degree(node) == 1:
+
+ in_node = g.in_edges(node)[0][0]
+ out_node = g.out_edges(node)[0][1]
+ if g.out_degree(in_node) == 1 and g.in_degree(out_node) == 1:
+ if in_node != node and out_node != node and in_node != out_node:
+ if g.node[in_node]['chr']==g.node[node]['chr'] and g.node[out_node]['chr']==g.node[node]['chr']:
+ #print g.node[in_node]['chr'],g.node[node]['chr'],g.node[out_node]['chr']
+ merge_path(g,in_node,node,out_node)
+
+
+def merge_two_nodes(g):
+ for node in g.nodes():
+ if g.in_degree(node) == 1 and g.out_degree(node) == 0:
+ in_node = g.in_edges(node)[0][0]
+ if g.out_degree(in_node) == 1:
+ if in_node != node:
+ node_id = g.graph['aval']
+ g.graph['aval'] += 1
+ g.add_node(str(node_id),
+ chr=g.node[node]['chr'],
+ count = g.node[in_node]['count'] + g.node[node]['count'],
+ read = g.node[in_node]['read'] + '_' + g.node[node]['read'],
+ #aln_chr = g.node[node]['aln_chr']
+ )
+ g.remove_node(in_node)
+ g.remove_node(node)
+
+
+def merge_path(g,in_node,node,out_node):
+ #ov1 = find_overlap(g.node[in_node]['bases'], g.node[node]['bases'])
+ #ov2 = find_overlap(g.node[node]['bases'], g.node[out_node]['bases'])
+
+ node_id = g.graph['aval']
+ g.graph['aval'] += 1
+ #length = g.node[node]['length'] + g.node[in_node]['length'] + g.node[out_node]['length'] - ov1 - ov2
+ #cov = (g.node[in_node]['cov'] * g.node[in_node]['length'] + g.node[node]['cov'] * g.node[node]['length'] + \
+ #g.node[out_node]['cov'] * g.node[out_node]['length'])/float(length)
+ #bases = g.node[in_node]['bases'][:-ov1] + g.node[node]['bases'] + g.node[out_node]['bases'][ov2:]
+
+ g.add_node(str(node_id),
+ chr=g.node[node]['chr'],
+ count = g.node[in_node]['count'] + g.node[node]['count'] + g.node[out_node]['count'],
+ read = g.node[in_node]['read'] + '_' + g.node[node]['read'] + '_' +g.node[out_node]['read'],
+ #aln_chr = g.node[node]['aln_chr']
+ )
+ #g.add_node(str(node_id)+'-', bases = reverse_comp_bases(bases), length = length, cov = cov)
+ #print g.node[str(node_id)]['chr']
+
+ for edge in g.in_edges(in_node):
+ g.add_edge(edge[0],str(node_id))
+
+ for edge in g.out_edges(out_node):
+ g.add_edge(str(node_id),edge[1])
+
+
+ g.remove_node(in_node)
+ g.remove_node(node)
+ g.remove_node(out_node)
+
+def input1(flname):
+ g = nx.DiGraph()
+ with open (flname) as f:
+ for lines in f:
+ lines1=lines.split()
+ #print lines1
+ if len(lines1) < 5:
+ continue
+ #print lines1
+ g.add_edge(lines1[0] + "_" + lines1[3], lines1[1] + "_" + lines1[4], hinge_edge=int(lines1[5]))
+ g.add_edge(lines1[1] + "_" + str(1-int(lines1[4])), lines1[0] + "_" + str(1-int(lines1[3])),hinge_edge=int(lines1[5]))
+ return g
+
+def input2(flname):
+ g = nx.DiGraph()
+ with open (flname) as f:
+ for lines in f:
+ lines1=lines.split()
+ #print lines1
+ g.add_edge(lines1[0], lines1[1])
+ return g
+
+def run(filename, gt_file, n_iter):
+
+
+ f=open(filename)
+ line1=f.readline()
+ print line1
+ f.close()
+ if len(line1.split()) !=2:
+ g=input1(filename)
+ else:
+ g=input2(filename)
+
+ mapping_dict = {}
+
+ with open(gt_file,'r') as f:
+ for num, line in enumerate(f.readlines()):
+ m = map(int, line.strip().split())
+ # mapping_dict[num] = [min(m), max(m), int(m[0]>m[1])]
+ mapping_dict[num] = m[1]
+
+ print nx.info(g)
+
+
+ for node in g.nodes():
+ nodeid=int(node.split('_')[0])
+
+ g.node[node]['count'] = 1
+ g.node[node]['chr']=mapping_dict[nodeid]
+ g.node[node]['read'] = node
+ #print str(nodeid), node,g.node[node]['chr']
+
+
+ degree_sequence=sorted(g.degree().values(),reverse=True)
+ print Counter(degree_sequence)
+ for i in range(n_iter):
+ for node in g.nodes():
+ if g.in_degree(node) == 0:
+ g.remove_node(node)
+
+ print nx.info(g)
+ degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+ print Counter(degree_sequence)
+
+ degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+ print Counter(degree_sequence)
+
+
+ g.graph['aval'] = 1000000000
+
+ for i in range(5):
+ merge_simple_path(g)
+ degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+ print Counter(degree_sequence)
+
+ h=nx.DiGraph()
+ h.add_nodes_from(g)
+ h.add_edges_from(g.edges())
+ for node in g.nodes():
+ h.node[node]['count']=g.node[node]['count']
+ h.node[node]['chr']=g.node[node]['chr']
+ try:
+ h.node[node]['read']=g.node[node]['read']
+ except:
+ pass
+
+
+ try:
+ import ujson
+ mapping = ujson.load(open(filename.split('.')[0]+'.mapping.json'))
+
+ print 'get mapping'
+
+ for node in h.nodes():
+ #print node
+ if mapping.has_key(node):
+ h.node[node]['aln_start'] = mapping[node][0]
+ h.node[node]['aln_end'] = mapping[node][1]
+ h.node[node]['aln_strand'] = mapping[node][2]
+ else:
+ h.node[node]['aln_start'] = 0
+ h.node[node]['aln_end'] = 0
+ h.node[node]['aln_strand'] = 0
+
+ except:
+ pass
+
+
+
+ nx.write_graphml(h, filename.split('.')[0]+'_condensed.graphml')
+
+ print nx.number_weakly_connected_components(h)
+ print nx.number_strongly_connected_components(h)
+
+#
+
+filename = sys.argv[1]
+gt_file=sys.argv[2]
+run(filename, gt_file,5)
diff --git a/scripts/connected.py b/scripts/connected.py
new file mode 100755
index 0000000..d1efb57
--- /dev/null
+++ b/scripts/connected.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+
+import networkx as nx
+import sys
+from collections import Counter
+
+def longest_path(G):
+ dist = {} # stores [node, distance] pair
+ for node in nx.topological_sort(G):
+ # pairs of dist,node for all incoming edges
+ pairs = [(dist[v][0]+1,v) for v in G.pred[node]]
+ if pairs:
+ dist[node] = max(pairs)
+ else:
+ dist[node] = (0, node)
+ node,(length,_) = max(dist.items(), key=lambda x:x[1])
+ path = []
+ while length > 0:
+ path.append(node)
+ length,node = dist[node]
+ return list(reversed(path))
+
+
+filename = sys.argv[1]
+
+
+g = nx.DiGraph()
+
+with open(filename,'r') as f:
+ for line in f.xreadlines():
+ g.add_edge(*(line.strip().split('->')))
+
+
+print nx.info(g)
+degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+print Counter(degree_sequence)
+
+for i in range(15):
+ for node in g.nodes():
+ if g.in_degree(node) == 0:
+ g.remove_node(node)
+
+ print nx.info(g)
+
+#print nx.is_directed_acyclic_graph(g)
+#print list(nx.simple_cycles(g))
+degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+print Counter(degree_sequence)
+
+#print nx.diameter(g)
+
+def rev(string):
+ if string[-1] == '\'':
+ return string[:-1]
+ else:
+ return string+'\''
+
+#for edge in g.edges():
+# g.add_edge(rev(edge[1]), rev(edge[0]))
+ #print edge
+ #print rev(edge[1]), rev(edge[0])
+
+print nx.info(g)
+print [len(item) for item in nx.weakly_connected_components(g)]
+
+
+nx.write_graphml(g, filename.split('.')[0]+'.graphml')
+
+with open(sys.argv[2],'w') as f:
+ for edge in nx.dfs_edges(g):
+ f.write('{} {}\n'.format(edge[0],edge[1]))
+
+f.close()
diff --git a/scripts/correct_head.py b/scripts/correct_head.py
new file mode 100755
index 0000000..3a0f429
--- /dev/null
+++ b/scripts/correct_head.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+
+import sys, os
+from pbcore.io import FastaIO
+
+def run(reader, writer, lookupfile):
+ with open (lookupfile,'w') as f:
+ for i,record in enumerate(reader):
+ seq_length = len(record.sequence)
+
+ zmw = i+1
+ old_header=record.header
+
+ if seq_length < 30:
+ new_header = 'Deleted'
+ f.write(old_header+'\t'+new_header+'\n')
+ continue
+ #bounds = record.header.split('/')[-1]
+ #start, end = [int(k) for k in bounds.split('_')]
+ start = 0
+ new_end = start + seq_length
+
+ new_header = "m000_000/{zmw}/{start}_{end}".format(zmw=zmw, start=start, end=new_end)
+ f.write(old_header+'\t'+new_header+'\n')
+
+ writer.writeRecord(new_header, record.sequence)
+
+def main(iname, ofile, lookupfile):
+ reader = FastaIO.FastaReader(iname)
+ writer = FastaIO.FastaWriter(ofile)
+ run(reader, writer,lookupfile)
+
+if __name__ == '__main__':
+ iname, oname, lookupfile = sys.argv[1:4]
+ ofile = open(oname, 'w')
+ try:
+ main(iname, ofile, lookupfile)
+ except:
+ # clean up (for make)
+ ofile.close()
+ os.unlink(oname)
+ raise
diff --git a/scripts/create_bandage_file.py b/scripts/create_bandage_file.py
new file mode 100755
index 0000000..441d59b
--- /dev/null
+++ b/scripts/create_bandage_file.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+import sys
+import os
+
+
+
+def run(inputfile,outputfile):
+
+
+ nodes = {}
+ arcs = {}
+
+ with open(inputfile) as file:
+
+ for line in file:
+
+ line_str = line[:-1]
+ split_str = line_str.split(' ')
+
+ node0 = int(split_str[0])
+ node1 = int(split_str[1])
+
+ # print node0,node1
+
+ nodes[node0] = 1
+ nodes[node1] = 1
+
+ if node0 < node1:
+ arcs[tuple([node0,node1])] = 1
+ else:
+ arcs[tuple([node1,node0])] = 1
+
+
+
+ with open(outputfile, 'w') as fout:
+
+ for node in nodes:
+ fout.write("NODE "+str(node)+' 0 0 0 0 0\n')
+ fout.write('AAA\n')
+ fout.write('AAA\n')
+ # print "NODE "+str(node)
+
+ for arc in arcs:
+ fout.write("ARC "+str(arc[0])+' '+str(arc[1])+' 0\n')
+
+
+
+
+
+def main():
+
+ run(sys.argv[1],sys.argv[2])
+ return
+
+
+if __name__ == '__main__':
+ main()
diff --git a/scripts/create_hgraph.py b/scripts/create_hgraph.py
new file mode 100644
index 0000000..3f79e49
--- /dev/null
+++ b/scripts/create_hgraph.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+
+import networkx as nx
+import random
+import sys
+from collections import Counter
+import json
+
+
+
+# This script creates a graphml file from the hgraph file
+
+
+def read_graph(filename,gt_file):
+
+ with open(gt_file) as f:
+ read_dict=json.load(f)
+ g = nx.DiGraph()
+
+ with open (filename) as f:
+ for lines in f:
+ lines1=lines.split()
+ g.add_node(lines1[0] + "_" + lines1[2])
+ g.add_node(lines1[1] + "_" + lines1[3])
+ if lines1[0] in read_dict:
+ g.node[lines1[0] + "_" + lines1[2]]['aln_start']=min(read_dict[lines1[0]][0][0],read_dict[lines1[0]][0][1])
+ g.node[lines1[0] + "_" + lines1[2]]['aln_end']=max(read_dict[lines1[0]][0][0],read_dict[lines1[0]][0][1])
+ else:
+ g.node[lines1[0] + "_" + lines1[2]]['aln_start']=0
+ g.node[lines1[0] + "_" + lines1[2]]['aln_end']=0
+ if lines1[1] in read_dict:
+ g.node[lines1[1] + "_" + lines1[3]]['aln_start']=min(read_dict[lines1[1]][0][0],read_dict[lines1[1]][0][1])
+ g.node[lines1[1] + "_" + lines1[3]]['aln_end']=max(read_dict[lines1[1]][0][0],read_dict[lines1[1]][0][1])
+ else:
+ g.node[lines1[1] + "_" + lines1[3]]['aln_start']=0
+ g.node[lines1[1] + "_" + lines1[3]]['aln_end']=0
+
+ g.node[lines1[0] + "_" + lines1[2]]['active']=1
+ g.node[lines1[1] + "_" + lines1[3]]['active']=int(lines1[4])
+ g.add_edge(lines1[0] + "_" + lines1[2], lines1[1] + "_" + lines1[3])
+
+
+ nx.write_graphml(g, filename.split('.')[0]+'_hgraph.graphml')
+
+ print nx.number_weakly_connected_components(g)
+ print nx.number_strongly_connected_components(g)
+
+
+if __name__ == "__main__":
+
+ read_graph(sys.argv[1],sys.argv[2])
+
+
+
+
+
diff --git a/scripts/create_hgraph_nogt.py b/scripts/create_hgraph_nogt.py
new file mode 100644
index 0000000..ed522c9
--- /dev/null
+++ b/scripts/create_hgraph_nogt.py
@@ -0,0 +1,42 @@
+#!/usr/bin/python
+
+import networkx as nx
+import random
+import sys
+from collections import Counter
+
+
+
+
+# This script creates a graphml file from the hgraph file
+
+
+def read_graph(filename):
+
+ g = nx.DiGraph()
+
+ with open (filename) as f:
+ for lines in f:
+ lines1=lines.split()
+ g.add_node(lines1[0] + "_" + lines1[2])
+ g.add_node(lines1[1] + "_" + lines1[3])
+
+ g.node[lines1[0] + "_" + lines1[2]]['active']=1
+ g.node[lines1[1] + "_" + lines1[3]]['active']=int(lines1[4])
+ g.add_edge(lines1[0] + "_" + lines1[2], lines1[1] + "_" + lines1[3])
+
+
+ nx.write_graphml(g, filename.split('.')[0]+'_hgraph.graphml')
+
+ print nx.number_weakly_connected_components(g)
+ print nx.number_strongly_connected_components(g)
+
+
+if __name__ == "__main__":
+
+ read_graph(sys.argv[1])
+
+
+
+
+
diff --git a/scripts/download_NCTC_pipeline.py b/scripts/download_NCTC_pipeline.py
new file mode 100644
index 0000000..536c0dd
--- /dev/null
+++ b/scripts/download_NCTC_pipeline.py
@@ -0,0 +1,50 @@
+import json
+import os
+import sys
+import subprocess
+
+base_dir = '/data/pacbio_assembly/pb_data/NCTC/'
+bact_dict = json.load(open(base_dir+'NCTC.json'))
+
+#bacterium_of_interest='NCTC7972'
+
+bacterium_of_interest=sys.argv[1]
+
+if len(sys.argv) > 2:
+ bact_dict=sys.argv[2]
+
+bact_name="_".join(bact_dict[bacterium_of_interest]['Species'][0].split())
+
+cmd_base = 'ascp -QT -l 1000m -i /data/pacbio_assembly/pb_data/asperaweb_id_dsa.openssh era-fasp at fasp.ega.ebi.ac.uk:vol1/'
+dest_dir = base_dir+bacterium_of_interest+'/'
+
+os.system('mkdir -p '+dest_dir)
+
+for run, file_list in bact_dict[bacterium_of_interest]['file_paths'].items():
+ for file_path in file_list:
+ cmd = cmd_base+file_path+' '+dest_dir
+ print cmd
+ os.system(cmd)
+
+dest_fasta_name = dest_dir+bact_name
+
+dextract_cmd = 'dextract -o'+dest_fasta_name
+
+bax_files = [x for x in os.listdir(dest_dir) if x.endswith('.bax.h5')]
+
+for bax_file in bax_files:
+ dextract_cmd += " " + dest_dir+bax_file
+
+print dextract_cmd
+
+try:
+ subprocess.check_output(dextract_cmd.split())
+ print 'dextract done. deleting .bax.h5 files'
+ os.system('rm '+dest_dir+'*.bax.h5')
+ print 'removing .quiva files'
+ os.system('rm '+dest_dir+'*.quiva')
+except:
+ print 'error'
+
+
+
diff --git a/scripts/draft_assembly.py b/scripts/draft_assembly.py
new file mode 100755
index 0000000..b568b76
--- /dev/null
+++ b/scripts/draft_assembly.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+import networkx as nx
+import sys
+from collections import Counter
+
+def linearize(filename):
+ graph_name = filename.split('.')[0]+'.graphml'
+ g = nx.read_graphml(graph_name)
+
+ print nx.info(g)
+
+ # get first strong connected component
+
+ con = list(nx.strongly_connected_component_subgraphs(g))
+
+ con.sort(key = lambda x:len(x), reverse = True)
+ print [len(item) for item in con]
+
+ print nx.info(con[0])
+
+ dfs_edges = list(nx.dfs_edges(con[0]))
+
+
+ dfs_edges.append((dfs_edges[-1][-1], dfs_edges[0][0]))
+
+ #print dfs_edges
+
+ with open(filename.split('.')[0]+'.linear.edges', 'w') as f:
+ for item in dfs_edges:
+ f.write(item[0] + ' ' + item[1] + ' ' + str(con[0].edge[item[0]][item[1]]['ew'])+'\n')
+
+
+
+filename = sys.argv[1]
+linearize(filename)
diff --git a/scripts/draft_assembly_not_perfect.py b/scripts/draft_assembly_not_perfect.py
new file mode 100755
index 0000000..b568b76
--- /dev/null
+++ b/scripts/draft_assembly_not_perfect.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+import networkx as nx
+import sys
+from collections import Counter
+
+def linearize(filename):
+ graph_name = filename.split('.')[0]+'.graphml'
+ g = nx.read_graphml(graph_name)
+
+ print nx.info(g)
+
+ # get first strong connected component
+
+ con = list(nx.strongly_connected_component_subgraphs(g))
+
+ con.sort(key = lambda x:len(x), reverse = True)
+ print [len(item) for item in con]
+
+ print nx.info(con[0])
+
+ dfs_edges = list(nx.dfs_edges(con[0]))
+
+
+ dfs_edges.append((dfs_edges[-1][-1], dfs_edges[0][0]))
+
+ #print dfs_edges
+
+ with open(filename.split('.')[0]+'.linear.edges', 'w') as f:
+ for item in dfs_edges:
+ f.write(item[0] + ' ' + item[1] + ' ' + str(con[0].edge[item[0]][item[1]]['ew'])+'\n')
+
+
+
+filename = sys.argv[1]
+linearize(filename)
diff --git a/scripts/draw2.py b/scripts/draw2.py
new file mode 100755
index 0000000..81f00b7
--- /dev/null
+++ b/scripts/draw2.py
@@ -0,0 +1,191 @@
+#!/usr/bin/env python
+import numpy as np
+import matplotlib
+matplotlib.use('Agg')
+import matplotlib.pyplot as plt
+#from ipywidgets.widgets import interact
+import interface_utils as util
+import sys
+import os
+import linereader
+os.environ['PATH'] += ':/data/pacbio_assembly/AwesomeAssembler/DALIGNER'
+#print os.popen("export").read()
+
+Qvd = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j',
+ 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
+ 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D',
+ 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
+ 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
+ 'Y']
+Qvv = range(len(Qvd))[::-1]
+
+QVdict = dict(zip(Qvd,Qvv))
+
+
+dbname = sys.argv[1]
+lasname = sys.argv[2]
+n = int(sys.argv[3])
+path = os.getcwd()+'/'
+coveragename = path + dbname + '.coverage.txt'
+
+aln = []
+
+coveragefile = linereader.copen(coveragename)
+
+coverage = coveragefile.getline(n)
+cov = coverage.split()[2:]
+covx = []
+covy = []
+for item in cov:
+ data = item.split(',')
+ covx.append(int(data[0]))
+ covy.append(int(data[1]))
+
+qv = list(util.get_QV(path+dbname, [n]))[0]
+qx = []
+qy = []
+ts = int(sys.argv[5])
+
+if len(sys.argv) < 7:
+ rev = 0
+else:
+ rev = int(sys.argv[6])
+
+print 'rev', rev
+
+for i in range(len(qv)):
+ qx.append(i*ts)
+ qy.append(QVdict[qv[i]])
+
+for item in util.get_alignments2(path+dbname,path+lasname,[n]):
+ aln.append(item)
+
+if (len(aln) == 0):
+ sys.exit()
+#print aln[0:5]
+
+aln.sort(key = lambda x:x[2])
+
+alns = []
+current_b = aln[0][2]
+aln_group = []
+
+for item in aln:
+ if current_b != item[2]:
+ alns.append(aln_group)
+ aln_group = []
+ aln_group.append(item)
+ current_b = item[2]
+ else:
+ aln_group.append(item)
+
+num = len(alns)
+print len(aln), len(alns)
+
+#print [len(item) for item in alns]
+#print [item[0:3] for item in aln]
+
+alns.sort(key = lambda x:min([item[3] for item in x]))
+
+#size_chunk = num/grid_size
+#for i in range(grid_size):
+# aln[i*size_chunk:min((i+1)*size_chunk, num)] = sorted(aln[i*size_chunk:min((i+1)*size_chunk, num)],key = lambda x: x[4]-x[3] ,reverse=True)
+
+fig = plt.figure(figsize = (15,10))
+plt.axes()
+ax1 = plt.subplot2grid((6,6), (0, 0), colspan=6, rowspan=4)
+ax2 = plt.subplot2grid((6,6), (4, 0), colspan=6, rowspan=1, sharex = ax1)
+ax3 = plt.subplot2grid((6,6), (5, 0), colspan=6, rowspan=1, sharex = ax1)
+
+#plt.gca().axes.get_yaxis().set_visible(False)
+l = aln[0][5]
+tip = l/200
+ed = l/50
+grid_size = 1.0
+ax1.set_xlim(-2000,l+2000)
+ax1.set_ylim(-5,num*grid_size)
+
+if rev == 0:
+ points = [[0,0], [l,0], [l+tip,grid_size/4], [l,grid_size/2], [0,grid_size/2]]
+else:
+ points = [[0,0], [-tip,grid_size/4], [0,grid_size/2], [l,grid_size/2], [l,0]]
+
+
+
+#rectangle = plt.Rectangle((0, 0), l, 5, fc='r',ec = 'none')
+polygon = plt.Polygon(points,fc = 'r', ec = 'none', alpha = 0.6)
+ax1.add_patch(polygon)
+
+dotted_line = plt.Line2D((0, 0), (0, num*grid_size ),ls='-.')
+ax1.add_line(dotted_line)
+
+dotted_line2 = plt.Line2D((l, l), (0, num*grid_size ),ls='-.')
+ax1.add_line(dotted_line2)
+
+for i,aln_group in enumerate(alns):
+ for item in aln_group:
+ if rev == 0:
+ abpos = item[3]
+ aepos = item[4]
+ bbpos = item[6]
+ bepos = item[7]
+ blen = item[8]
+ strand = item[0]
+ else:
+ aepos = l - item[3]
+ abpos = l - item[4]
+ blen = item[8]
+ bbpos = blen - item[7]
+ bepos = blen - item[6]
+ strand = item[0]
+ if strand == 'n':
+ strand = 'c'
+ else:
+ strand = 'n'
+
+
+ points_start = []
+ points_end = []
+
+ if strand == 'n':
+ points = [[abpos, (i+1)*grid_size], [aepos, (i+1)*grid_size], [aepos + tip, (i+1)*grid_size + grid_size/4], [aepos, (i+1)*grid_size+grid_size/2], [abpos, (i+1)*grid_size+grid_size/2]]
+ if (bepos < blen):
+ points_end = [[aepos, (i+1)*grid_size], [aepos + tip, (i+1)*grid_size + grid_size/4], [aepos, (i+1)*grid_size+grid_size/2], [aepos+ed, (i+1)*grid_size+grid_size/2], [aepos + ed+ tip, (i+1)*grid_size + grid_size/4], [aepos+ed, (i+1)*grid_size]]
+ if (bbpos > 0):
+ points_start = [[abpos, (i+1)*grid_size], [abpos, (i+1)*grid_size+grid_size/2], [abpos-ed, (i+1)*grid_size+grid_size/2], [abpos-ed, (i+1)*grid_size]]
+ else:
+ points = [[abpos, (i+1)*grid_size], [aepos, (i+1)*grid_size], [aepos, (i+1)*grid_size+grid_size/2], [abpos, (i+1)*grid_size+grid_size/2], [abpos - tip, (i+1)*grid_size + grid_size/4]]
+ if (bepos < blen):
+ points_end = [[aepos, (i+1)*grid_size], [aepos, (i+1)*grid_size+grid_size/2], [aepos+ed, (i+1)*grid_size+grid_size/2], [aepos+ed, (i+1)*grid_size]]
+ if (bbpos > 0):
+ points_start = [[abpos, (i+1)*grid_size],[abpos-tip, (i+1)*grid_size+grid_size/4], [abpos, (i+1)*grid_size+grid_size/2], [abpos-ed, (i+1)*grid_size+grid_size/2],[abpos-ed-tip, (i+1)*grid_size+grid_size/4], [abpos-ed, (i+1)*grid_size]]
+
+ polygon = plt.Polygon(points,fc = 'b', ec = 'none', alpha = 0.6)
+ polygon.set_url('aln_svg' + str(item[2])+'.svg')
+ ax1.add_patch(polygon)
+
+ if points_end != []:
+ polygon2 = plt.Polygon(points_end,fc = 'g', ec = 'none', alpha = 0.6)
+ ax1.add_patch(polygon2)
+
+ if points_start != []:
+ polygon2 = plt.Polygon(points_start,fc = 'g', ec = 'none', alpha = 0.6)
+ ax1.add_patch(polygon2)
+
+
+
+
+if rev == 1:
+ covx = [l -item for item in covx]
+ qx = [l - item for item in qx]
+
+ax2.plot(covx, covy)
+ax3.plot(qx, qy)
+
+plt.xlabel('position')
+ax1.set_ylabel('pile-o-gram')
+ax2.set_ylabel('coverage')
+ax3.set_ylabel('i-qv')
+
+
+plt.savefig(path + sys.argv[4] + '/aln_svg' + str(n) + '_' + str(rev)+ '.svg')
diff --git a/scripts/draw2_pileup.py b/scripts/draw2_pileup.py
new file mode 100755
index 0000000..374c636
--- /dev/null
+++ b/scripts/draw2_pileup.py
@@ -0,0 +1,125 @@
+#!/usr/bin/env python
+import numpy as np
+import matplotlib
+matplotlib.use('Agg')
+import matplotlib.pyplot as plt
+from ipywidgets.widgets import interact
+import interface_utils as util
+import sys
+
+import os
+os.environ['PATH'] += ':/data/pacbio_assembly/AwesomeAssembler/DALIGNER'
+#print os.popen("export").read()
+
+n = (sys.argv[1])
+rst = []
+with open(n) as f:
+ for line in f:
+ tmp = line.strip().split()
+ t1 = tmp[0]
+ if t1[-1] == '\'':
+ t1 = t1[:-1]
+ t2 = tmp[1]
+ if t2[-1] == '\'':
+ t2 = t2[:-1]
+ rst.append((int(t1)+1, int(tmp[2])))
+ #rst.append(int(t2)+1)
+
+#rst = range(1,1399)
+path = '/data/pacbio_assembly/AwesomeAssembler/data/'
+aln = []
+for i,e in enumerate(rst):
+ n = e[0]
+ print i,n
+ li = list(util.get_alignments_mapping(path+'ecoli', path + 'ecoli.ref', path +'ecoli.ecoli.ref.las', [n]))
+ if (len(li) > 0):
+ item = sorted(li, key=lambda x:x[4] - x[3], reverse = True)[0]
+ aln.append(item)
+
+print aln[0:20]
+
+#aln.sort(key = lambda x:x[2])
+
+alns = []
+current_b = aln[0][2]
+aln_group = []
+
+for item in aln:
+ if current_b != item[2]:
+ alns.append(aln_group)
+ aln_group = []
+ aln_group.append(item)
+ current_b = item[2]
+ else:
+ aln_group.append(item)
+
+num = len(alns)
+print len(aln), len(alns)
+
+#print [len(item) for item in alns]
+#print [item[0:3] for item in aln]
+
+#alns.sort(key = lambda x:min([item[3] for item in x]))
+
+#size_chunk = num/grid_size
+#for i in range(grid_size):
+# aln[i*size_chunk:min((i+1)*size_chunk, num)] = sorted(aln[i*size_chunk:min((i+1)*size_chunk, num)],key = lambda x: x[4]-x[3] ,reverse=True)
+
+plt.figure(figsize = (15,10))
+plt.axes()
+#plt.gca().axes.get_yaxis().set_visible(False)
+l = aln[0][5]
+tip = l/5000
+ed = l/2000
+grid_size = 1.0
+plt.xlim(-2000,l+2000)
+plt.ylim(-5,num*grid_size)
+
+points = [[0,0], [l,0], [l+tip,grid_size/4], [l,grid_size/2], [0,grid_size/2]]
+#rectangle = plt.Rectangle((0, 0), l, 5, fc='r',ec = 'none')
+polygon = plt.Polygon(points,fc = 'r', ec = 'none', alpha = 0.6)
+plt.gca().add_patch(polygon)
+
+dotted_line = plt.Line2D((0, 0), (0, num*grid_size ),ls='-.')
+plt.gca().add_line(dotted_line)
+
+dotted_line2 = plt.Line2D((l, l), (0, num*grid_size ),ls='-.')
+plt.gca().add_line(dotted_line2)
+
+for i,aln_group in enumerate(alns):
+ for item in aln_group:
+ abpos = item[3]
+ aepos = item[4]
+ bbpos = item[6]
+ bepos = item[7]
+ blen = item[8]
+ strand = item[0]
+ points_start = []
+ points_end = []
+
+ if strand == 'n':
+ points = [[abpos, (i+1)*grid_size], [aepos, (i+1)*grid_size], [aepos + tip, (i+1)*grid_size + grid_size/4], [aepos, (i+1)*grid_size+grid_size/2], [abpos, (i+1)*grid_size+grid_size/2]]
+ if (bepos < blen):
+ points_end = [[aepos, (i+1)*grid_size], [aepos + tip, (i+1)*grid_size + grid_size/4], [aepos, (i+1)*grid_size+grid_size/2], [aepos+ed, (i+1)*grid_size+grid_size/2], [aepos + ed+ tip, (i+1)*grid_size + grid_size/4], [aepos+ed, (i+1)*grid_size]]
+ if (bbpos > 0):
+ points_start = [[abpos, (i+1)*grid_size], [abpos, (i+1)*grid_size+grid_size/2], [abpos-ed, (i+1)*grid_size+grid_size/2], [abpos-ed, (i+1)*grid_size]]
+ else:
+ points = [[abpos, (i+1)*grid_size], [aepos, (i+1)*grid_size], [aepos, (i+1)*grid_size+grid_size/2], [abpos, (i+1)*grid_size+grid_size/2], [abpos - tip, (i+1)*grid_size + grid_size/4]]
+ if (bepos < blen):
+ points_end = [[aepos, (i+1)*grid_size], [aepos, (i+1)*grid_size+grid_size/2], [aepos+ed, (i+1)*grid_size+grid_size/2], [aepos+ed, (i+1)*grid_size]]
+ if (bbpos > 0):
+ points_start = [[abpos, (i+1)*grid_size],[abpos-tip, (i+1)*grid_size+grid_size/4], [abpos, (i+1)*grid_size+grid_size/2], [abpos-ed, (i+1)*grid_size+grid_size/2],[abpos-ed-tip, (i+1)*grid_size+grid_size/4], [abpos-ed, (i+1)*grid_size]]
+
+ polygon = plt.Polygon(points,fc = 'b', ec = 'none', alpha = 0.6)
+ polygon.set_url("http://shannon.stanford.edu:5000/aln" + str(item[2]+1) + ".pdf")
+ plt.gca().add_patch(polygon)
+
+ if points_end != []:
+ polygon2 = plt.Polygon(points_end,fc = 'g', ec = 'none', alpha = 0.6)
+ plt.gca().add_patch(polygon2)
+
+ if points_start != []:
+ polygon2 = plt.Polygon(points_start,fc = 'g', ec = 'none', alpha = 0.6)
+ plt.gca().add_patch(polygon2)
+
+plt.savefig('mapping/map.' + str(n)+ '.svg')
diff --git a/scripts/draw2_pileup_region.py b/scripts/draw2_pileup_region.py
new file mode 100755
index 0000000..56e1fb8
--- /dev/null
+++ b/scripts/draw2_pileup_region.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+import numpy as np
+import matplotlib
+matplotlib.use('Agg')
+import matplotlib.pyplot as plt
+from ipywidgets.widgets import interact
+import interface_utils as util
+import sys
+
+import os
+os.environ['PATH'] += ':/data/pacbio_assembly/AwesomeAssembler/DALIGNER'
+#print os.popen("export").read()
+
+left = int(sys.argv[1])
+right = int(sys.argv[2])
+
+
+#rst = range(1,1399)
+path = '/data/pacbio_assembly/AwesomeAssembler/data/ecoli/'
+aln = []
+
+bb = []
+with open('ecoli.linear.edges') as f:
+ for line in f:
+ e = line.split(" ")[0]
+ if e[-1] == '\'':
+ e = e[:-1]
+
+ bb.append(int(e))
+
+print bb
+
+bb = set(bb)
+
+
+for i,item in enumerate(util.get_alignments_mapping2(path+'draft', path +'ecoli', path +'draft.ecoli.las')):
+ if i%2000 == 0:
+ print i, item
+
+ if item[3] >= left and item[4] <= right:
+ aln.append(item)
+
+
+
+
+
+
+
+
+print 'number:',len(aln)
+aln.sort(key = lambda x:x[2])
+
+alns = []
+current_b = aln[0][2]
+aln_group = []
+
+for item in aln:
+ if current_b != item[2]:
+ alns.append(aln_group)
+ aln_group = []
+ aln_group.append(item)
+ current_b = item[2]
+ else:
+ aln_group.append(item)
+
+num = len(alns)
+
+print len(aln), len(alns)
+
+alns.sort(key = lambda x:min([item[3] for item in x]))
+
+
+plt.figure(figsize = (15,10))
+plt.axes()
+#plt.gca().axes.get_yaxis().set_visible(False)
+#l = aln[0][5]
+tip = (right-left)/5000
+ed = (right-left)/2000
+grid_size = 1.0
+plt.xlim(left-2000,right+2000)
+plt.ylim(-5,num*grid_size)
+
+points = [[left,0], [right,0], [right+tip,grid_size/4], [right,grid_size/2], [left,grid_size/2]]
+#rectangle = plt.Rectangle((0, 0), l, 5, fc='r',ec = 'none')
+polygon = plt.Polygon(points,fc = 'r', ec = 'none', alpha = 0.6)
+plt.gca().add_patch(polygon)
+
+dotted_line = plt.Line2D((left, left), (0, num*grid_size ),ls='-.')
+plt.gca().add_line(dotted_line)
+
+dotted_line2 = plt.Line2D((right, right), (0, num*grid_size ),ls='-.')
+plt.gca().add_line(dotted_line2)
+
+for i,aln_group in enumerate(alns):
+ for item in aln_group:
+ abpos = item[3]
+ aepos = item[4]
+ bbpos = item[6]
+ bepos = item[7]
+ blen = item[8]
+ strand = item[0]
+ points_start = []
+ points_end = []
+
+ if strand == 'n':
+ points = [[abpos, (i+1)*grid_size], [aepos, (i+1)*grid_size], [aepos + tip, (i+1)*grid_size + grid_size/4], [aepos, (i+1)*grid_size+grid_size/2], [abpos, (i+1)*grid_size+grid_size/2]]
+ if (bepos < blen):
+ points_end = [[aepos, (i+1)*grid_size], [aepos + tip, (i+1)*grid_size + grid_size/4], [aepos, (i+1)*grid_size+grid_size/2], [aepos+ed, (i+1)*grid_size+grid_size/2], [aepos + ed+ tip, (i+1)*grid_size + grid_size/4], [aepos+ed, (i+1)*grid_size]]
+ if (bbpos > 0):
+ points_start = [[abpos, (i+1)*grid_size], [abpos, (i+1)*grid_size+grid_size/2], [abpos-ed, (i+1)*grid_size+grid_size/2], [abpos-ed, (i+1)*grid_size]]
+ else:
+ points = [[abpos, (i+1)*grid_size], [aepos, (i+1)*grid_size], [aepos, (i+1)*grid_size+grid_size/2], [abpos, (i+1)*grid_size+grid_size/2], [abpos - tip, (i+1)*grid_size + grid_size/4]]
+ if (bepos < blen):
+ points_end = [[aepos, (i+1)*grid_size], [aepos, (i+1)*grid_size+grid_size/2], [aepos+ed, (i+1)*grid_size+grid_size/2], [aepos+ed, (i+1)*grid_size]]
+ if (bbpos > 0):
+ points_start = [[abpos, (i+1)*grid_size],[abpos-tip, (i+1)*grid_size+grid_size/4], [abpos, (i+1)*grid_size+grid_size/2], [abpos-ed, (i+1)*grid_size+grid_size/2],[abpos-ed-tip, (i+1)*grid_size+grid_size/4], [abpos-ed, (i+1)*grid_size]]
+
+ if item[2] in bb:
+ polygon = plt.Polygon(points,fc = 'r', ec = 'none', alpha = 0.8)
+ else:
+ polygon = plt.Polygon(points,fc = 'b', ec = 'none', alpha = 0.6)
+
+ polygon.set_url("http://shannon.stanford.edu:5000/aln" + str(item[2]+1) + ".pdf")
+ plt.gca().add_patch(polygon)
+
+ if points_end != []:
+ polygon2 = plt.Polygon(points_end,fc = 'g', ec = 'none', alpha = 0.6)
+ plt.gca().add_patch(polygon2)
+
+ if points_start != []:
+ polygon2 = plt.Polygon(points_start,fc = 'g', ec = 'none', alpha = 0.6)
+ plt.gca().add_patch(polygon2)
+
+plt.savefig('mapping/map.' + str(left) +'_'+ str(right)+ '.svg')
diff --git a/scripts/draw2_pileup_w_repeat.py b/scripts/draw2_pileup_w_repeat.py
new file mode 100755
index 0000000..3408665
--- /dev/null
+++ b/scripts/draw2_pileup_w_repeat.py
@@ -0,0 +1,158 @@
+#!/usr/bin/env python
+import numpy as np
+import matplotlib
+matplotlib.use('Agg')
+import matplotlib.pyplot as plt
+from ipywidgets.widgets import interact
+import interface_utils as util
+import sys
+
+import os
+os.environ['PATH'] += ':/data/pacbio_assembly/AwesomeAssembler/DALIGNER'
+#print os.popen("export").read()
+path = os.environ['PWD'] + '/' #/data/pacbio_assembly/AwesomeAssembler/data/'
+
+n = (sys.argv[1])
+rst = []
+with open(n) as f:
+ for line in f:
+ rst.append(int(line.strip()))
+
+
+rep = {}
+with open(path + 'ecoli.repeat.txt') as f:
+ for line in f:
+ l = map(int, line.strip().split())
+ if len(l) > 1:
+ for i in range((len(l) - 1) / 2):
+ if not rep.has_key(l[0]):
+ rep[l[0]] = []
+ rep[l[0]].append((l[2*i+1], l[2*i+2]))
+
+
+#rst = range(1,1399)
+aln = []
+for i,e in enumerate(rst):
+ n = e
+ print i,n
+ li = list(util.get_alignments_mapping(path+'ecoli', path + 'ecoli.ref', path +'ecoli.ecoli.ref.las', [n]))
+ if (len(li) > 0):
+ item = sorted(li, key=lambda x:x[4] - x[3], reverse = True)
+ for l in item:
+ aln.append(l)
+
+print aln[0:20]
+
+
+#aln.sort(key = lambda x:x[2])
+
+alns = []
+current_b = aln[0][2]
+aln_group = []
+
+for item in aln:
+ if current_b != item[2]:
+ aln_group.sort(key = lambda x:x[4]-x[3], reverse = True)
+ alns.append(aln_group)
+ aln_group = []
+ aln_group.append(item)
+ current_b = item[2]
+ else:
+ aln_group.append(item)
+
+num = len(alns)
+print len(aln), len(alns)
+
+#print [len(item) for item in alns]
+#print [item[0:3] for item in aln]
+
+alns.sort(key = lambda x:x[0][3])
+
+#size_chunk = num/grid_size
+#for i in range(grid_size):
+# aln[i*size_chunk:min((i+1)*size_chunk, num)] = sorted(aln[i*size_chunk:min((i+1)*size_chunk, num)],key = lambda x: x[4]-x[3] ,reverse=True)
+
+plt.figure(figsize = (15,10))
+plt.axes()
+#plt.gca().axes.get_yaxis().set_visible(False)
+l = aln[0][5]
+tip = l/5000
+ed = l/2000
+grid_size = 1.0
+plt.xlim(-2000,l+2000)
+plt.ylim(-5,num*grid_size)
+
+points = [[0,0], [l,0], [l+tip,grid_size/4], [l,grid_size/2], [0,grid_size/2]]
+#rectangle = plt.Rectangle((0, 0), l, 5, fc='r',ec = 'none')
+polygon = plt.Polygon(points,fc = 'r', ec = 'none', alpha = 0.6)
+plt.gca().add_patch(polygon)
+
+dotted_line = plt.Line2D((0, 0), (0, num*grid_size ),ls='-.')
+plt.gca().add_line(dotted_line)
+
+dotted_line2 = plt.Line2D((l, l), (0, num*grid_size ),ls='-.')
+plt.gca().add_line(dotted_line2)
+
+for i,aln_group in enumerate(alns):
+ for item in aln_group:
+ abpos = item[3]
+ aepos = item[4]
+ bbpos = item[6]
+ bepos = item[7]
+ blen = item[8]
+ strand = item[0]
+ points_start = []
+ points_end = []
+ rid = item[2]
+ abpos = abpos - bbpos
+ aepos = aepos + (blen - bepos)
+
+ if strand == 'n':
+ points = [[abpos, (i+1)*grid_size], [aepos, (i+1)*grid_size], [aepos + tip, (i+1)*grid_size + grid_size/4], [aepos, (i+1)*grid_size+grid_size/2], [abpos, (i+1)*grid_size+grid_size/2]]
+ if (bepos < blen):
+ points_end = [[aepos, (i+1)*grid_size], [aepos + tip, (i+1)*grid_size + grid_size/4], [aepos, (i+1)*grid_size+grid_size/2], [aepos+ed, (i+1)*grid_size+grid_size/2], [aepos + ed+ tip, (i+1)*grid_size + grid_size/4], [aepos+ed, (i+1)*grid_size]]
+ if (bbpos > 0):
+ points_start = [[abpos, (i+1)*grid_size], [abpos, (i+1)*grid_size+grid_size/2], [abpos-ed, (i+1)*grid_size+grid_size/2], [abpos-ed, (i+1)*grid_size]]
+ else:
+ points = [[abpos, (i+1)*grid_size], [aepos, (i+1)*grid_size], [aepos, (i+1)*grid_size+grid_size/2], [abpos, (i+1)*grid_size+grid_size/2], [abpos - tip, (i+1)*grid_size + grid_size/4]]
+ if (bepos < blen):
+ points_end = [[aepos, (i+1)*grid_size], [aepos, (i+1)*grid_size+grid_size/2], [aepos+ed, (i+1)*grid_size+grid_size/2], [aepos+ed, (i+1)*grid_size]]
+ if (bbpos > 0):
+ points_start = [[abpos, (i+1)*grid_size],[abpos-tip, (i+1)*grid_size+grid_size/4], [abpos, (i+1)*grid_size+grid_size/2], [abpos-ed, (i+1)*grid_size+grid_size/2],[abpos-ed-tip, (i+1)*grid_size+grid_size/4], [abpos-ed, (i+1)*grid_size]]
+
+
+ polygon = plt.Polygon(points,fc = 'b', ec = 'none', alpha = 0.6)
+ polygon.set_url("http://shannon.stanford.edu:5000/aln" + str(item[2]+1) + ".pdf")
+ plt.gca().add_patch(polygon)
+
+ #if points_end != []:
+ # polygon2 = plt.Polygon(points_end,fc = 'g', ec = 'none', alpha = 0.6)
+ # plt.gca().add_patch(polygon2)
+ #
+ #if points_start != []:
+ # polygon2 = plt.Polygon(points_start,fc = 'g', ec = 'none', alpha = 0.6)
+ # plt.gca().add_patch(polygon2)
+
+
+ if rep.has_key(rid):
+ for item in rep[rid]:
+ s = item[0]
+ e = item[1]
+ if item[0] == -1:
+ s = 0
+ if item[1] == -1:
+ e = blen
+
+
+ if strand != 'n':
+ s = blen - s
+ e = blen - e
+
+ points = [[abpos + s, (i+1)*grid_size], [abpos + e, (i+1)*grid_size], [abpos + e, (i+1)*grid_size+grid_size/2], [abpos + s, (i+1)*grid_size+grid_size/2]]
+
+ polygon2 = plt.Polygon(points,fc = 'y', ec = 'none', alpha = 0.8)
+ plt.gca().add_patch(polygon2)
+
+
+
+plt.savefig('mapping/map.svg')
diff --git a/scripts/draw_pileup_region.py b/scripts/draw_pileup_region.py
new file mode 100755
index 0000000..31dfd11
--- /dev/null
+++ b/scripts/draw_pileup_region.py
@@ -0,0 +1,152 @@
+#!/usr/bin/env python
+import numpy as np
+import matplotlib
+matplotlib.use('Agg')
+import matplotlib.pyplot as plt
+from ipywidgets.widgets import interact
+import interface_utils as util
+import sys
+
+import os
+os.environ['PATH'] += ':~/AwesomeAssembler/DALIGNER'
+#print os.popen("export").read()
+
+left = int(sys.argv[1])
+right = int(sys.argv[2])
+
+ref = sys.argv[3]
+read = sys.argv[4]
+las = sys.argv[5]
+contig = sys.argv[6]
+length_th = int(sys.argv[7])
+
+#path = '/data/pacbio_assembly/AwesomeAssembler/data/ecoli/'
+aln = []
+
+#bb = []
+#with open('ecoli.linear.edges') as f:
+# for line in f:
+# e = line.split(" ")[0]
+# if e[-1] == '\'':
+# e = e[:-1]
+#
+# bb.append(int(e))
+#
+#print bb
+#
+#bb = set(bb)
+
+
+for i,item in enumerate(util.get_alignments_mapping3(ref, read, las, contig)):
+ if i%2000 == 0:
+ print i, item
+
+ if item[3] >= left and item[4] <= right and item[4] - item[3] > length_th:
+ aln.append(item)
+
+
+
+covy = np.zeros((right - left, ))
+for item in aln:
+ covy[item[3] - left : item[4] - left] += 1
+
+covx = np.arange(left, right)
+
+
+print 'number:',len(aln)
+aln.sort(key = lambda x:x[2])
+
+alns = []
+current_b = aln[0][2]
+aln_group = []
+
+for item in aln:
+ if current_b != item[2]:
+ alns.append(aln_group)
+ aln_group = []
+ aln_group.append(item)
+ current_b = item[2]
+ else:
+ aln_group.append(item)
+
+num = len(alns)
+
+print len(aln), len(alns)
+
+alns.sort(key = lambda x:min([item[3] for item in x]))
+
+
+
+fig = plt.figure(figsize = (15,10))
+plt.axes()
+ax1 = plt.subplot2grid((6,6), (0, 0), colspan=6, rowspan=4)
+ax2 = plt.subplot2grid((6,6), (4, 0), colspan=6, rowspan=1, sharex = ax1)
+
+
+#plt.gca().axes.get_yaxis().set_visible(False)
+#l = aln[0][5]
+tip = (right-left)/5000
+ed = (right-left)/2000
+grid_size = 1.0
+ax1.set_xlim(left-2000,right+2000)
+ax1.set_ylim(-5,num*grid_size)
+
+points = [[left,0], [right,0], [right+tip,grid_size/4], [right,grid_size/2], [left,grid_size/2]]
+#rectangle = plt.Rectangle((0, 0), l, 5, fc='r',ec = 'none')
+polygon = plt.Polygon(points,fc = 'r', ec = 'none', alpha = 0.6)
+ax1.add_patch(polygon)
+
+dotted_line = plt.Line2D((left, left), (0, num*grid_size ),ls='-.')
+ax1.add_line(dotted_line)
+
+dotted_line2 = plt.Line2D((right, right), (0, num*grid_size ),ls='-.')
+ax1.add_line(dotted_line2)
+
+for i,aln_group in enumerate(alns):
+ for item in aln_group:
+ abpos = item[3]
+ aepos = item[4]
+ bbpos = item[6]
+ bepos = item[7]
+ blen = item[8]
+ strand = item[0]
+ points_start = []
+ points_end = []
+
+ if strand == 'n':
+ points = [[abpos, (i+1)*grid_size], [aepos, (i+1)*grid_size], [aepos + tip, (i+1)*grid_size + grid_size/4], [aepos, (i+1)*grid_size+grid_size/2], [abpos, (i+1)*grid_size+grid_size/2]]
+ if (bepos < blen):
+ points_end = [[aepos, (i+1)*grid_size], [aepos + tip, (i+1)*grid_size + grid_size/4], [aepos, (i+1)*grid_size+grid_size/2], [aepos+ed, (i+1)*grid_size+grid_size/2], [aepos + ed+ tip, (i+1)*grid_size + grid_size/4], [aepos+ed, (i+1)*grid_size]]
+ if (bbpos > 0):
+ points_start = [[abpos, (i+1)*grid_size], [abpos, (i+1)*grid_size+grid_size/2], [abpos-ed, (i+1)*grid_size+grid_size/2], [abpos-ed, (i+1)*grid_size]]
+ else:
+ points = [[abpos, (i+1)*grid_size], [aepos, (i+1)*grid_size], [aepos, (i+1)*grid_size+grid_size/2], [abpos, (i+1)*grid_size+grid_size/2], [abpos - tip, (i+1)*grid_size + grid_size/4]]
+ if (bepos < blen):
+ points_end = [[aepos, (i+1)*grid_size], [aepos, (i+1)*grid_size+grid_size/2], [aepos+ed, (i+1)*grid_size+grid_size/2], [aepos+ed, (i+1)*grid_size]]
+ if (bbpos > 0):
+ points_start = [[abpos, (i+1)*grid_size],[abpos-tip, (i+1)*grid_size+grid_size/4], [abpos, (i+1)*grid_size+grid_size/2], [abpos-ed, (i+1)*grid_size+grid_size/2],[abpos-ed-tip, (i+1)*grid_size+grid_size/4], [abpos-ed, (i+1)*grid_size]]
+
+ #if item[2] in bb:
+ # polygon = plt.Polygon(points,fc = 'r', ec = 'none', alpha = 0.8)
+ #else:
+ polygon = plt.Polygon(points,fc = 'b', ec = 'none', alpha = 0.6)
+
+ polygon.set_url("http://shannon.stanford.edu:5000/aln" + str(item[2]+1) + ".pdf")
+ ax1.add_patch(polygon)
+
+ if points_end != []:
+ polygon2 = plt.Polygon(points_end,fc = 'g', ec = 'none', alpha = 0.6)
+ ax1.add_patch(polygon2)
+
+ if points_start != []:
+ polygon2 = plt.Polygon(points_start,fc = 'g', ec = 'none', alpha = 0.6)
+ ax1.add_patch(polygon2)
+
+
+ax2.plot(covx, covy)
+plt.xlabel('position')
+ax1.set_ylabel('pile-o-gram')
+ax2.set_ylabel('coverage')
+
+
+plt.savefig('mapping/map.' + str(contig) + '_' + str(left) +'_'+ str(right)+ '.svg')
diff --git a/scripts/get_NCTC_json.py b/scripts/get_NCTC_json.py
new file mode 100644
index 0000000..bba89c4
--- /dev/null
+++ b/scripts/get_NCTC_json.py
@@ -0,0 +1,65 @@
+import urllib2
+from bs4 import BeautifulSoup
+import json
+
+response = urllib2.urlopen('http://www.sanger.ac.uk/resources/downloads/bacteria/nctc/')
+html = response.read()
+
+soup=BeautifulSoup(html)
+table = soup.find("table")
+
+headings = [th.get_text() for th in table.find("tr").find_all("th")]
+
+dataset={}
+for row in table.find_all("tr")[1:]:
+ #
+ print row
+ row1= [td.get_text() for td in row.find_all("td")]
+ print row1
+ metadata={}
+ cellname=''
+ for i, td in enumerate(row.find_all("td")):
+ #print metadata
+ link=td.find('a')
+ # print i, td
+
+
+ if i==1:
+ cellname=td.get_text()
+ print cellname
+
+ if i==3:
+ # print td
+# ERR_soup=BeautifulSoup(td)
+ ERR_links=[]
+ potential_links = td.findAll('a')
+ # print potential_links
+ for potential_link in td.findAll('a'):
+
+ ERR_links.append((potential_link.text, potential_link['href']))
+ metadata[headings[i]]=ERR_links
+ continue
+ if link != None:
+ link=link.get('href')
+ metadata[headings[i]]=(td.get_text(),link)
+
+ list_of_files={}
+ for run in metadata[headings[3]]:
+ link_to_go=run[1]
+ response1 = urllib2.urlopen(link_to_go+"&display=xml")
+ xml = response1.read()
+ xmlsoup = BeautifulSoup(xml)
+ fllist=[]
+ for data_block in xmlsoup.findAll('data_block'):
+ for files in data_block.findAll('files'):
+ for fle in files.findAll('file'):
+ fllist.append(fle['filename'])
+ list_of_files[run[0]]=fllist
+ metadata['file_paths']=list_of_files
+# print xml
+ dataset[cellname]=metadata
+
+
+with open('NCTC.json', 'w') as outfile:
+ json.dump(dataset, outfile)
+
diff --git a/scripts/get_consensus_gfa.py b/scripts/get_consensus_gfa.py
new file mode 100755
index 0000000..ac9decf
--- /dev/null
+++ b/scripts/get_consensus_gfa.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+
+import sys
+import os
+import subprocess
+from parse_read import *
+import numpy as np
+import networkx as nx
+import itertools
+
+
+
+filedir = sys.argv[1]
+filename = sys.argv[2]
+consensus_name = sys.argv[3]
+in_graphml_name = filedir + '/' + filename +'_draft.graphml'
+map_filename = filedir + '/draft_map.txt'
+
+
+g = nx.read_graphml(in_graphml_name)
+
+
+gfaname = filedir + '/' + filename +'_consensus.gfa'
+cols = np.loadtxt(map_filename, dtype=str,usecols=(1,))
+del_contigs = np.nonzero(cols == 'Deleted')[0]
+
+
+# consensus_contigs = []
+# i = 0
+# try:
+# with open(consensus_name) as f:
+# for line in f:
+# if line[0] != '>':
+# consensus_contigs.append(line.strip())
+# i += 1
+# while i in set(del_contigs):
+# consensus_contigs.append('')
+# print len()
+# i += 1
+# except:
+# pass
+
+del_contig_ptr = 0
+cols = np.loadtxt(map_filename, dtype=str,usecols=(1,))
+del_contigs = np.nonzero(cols == 'Deleted')[0]
+
+consensus_contigs = []
+i = 0
+with open(consensus_name) as f:
+ for line in f:
+ if line[0] != '>':
+ while del_contig_ptr < len(del_contigs) :
+ if len(consensus_contigs) == del_contigs[del_contig_ptr]:
+ consensus_contigs.append('')
+ del_contig_ptr += 1
+ else:
+ break
+ consensus_contigs.append(line.strip())
+ i += 1
+
+
+nodes_to_keep = [x for x in g.nodes() if consensus_contigs[g.node[x]['contig_id']] != '' ]
+h = g.subgraph(nodes_to_keep)
+
+
+# for i, vert in enumerate(h.nodes()):
+# print i, vert
+# try:
+# print i,len(h.node[vert]['path']), len(h.node[vert]['segment']), len(consensus_contigs[i])
+# except:
+# print len(h.nodes()), len(consensus_contigs)
+# raise
+
+print 'Number of contigs'
+print len(consensus_contigs), len(h.nodes())
+# print [len(x) for x in consensus_contigs]
+
+
+with open(gfaname,'w') as f:
+ f.write("H\tVN:Z:1.0\n")
+ for j,vert in enumerate(h.nodes()):
+
+ i = h.node[vert]['contig_id']
+ # print j, i
+
+ seg = consensus_contigs[i]
+ print(len(seg))
+ seg_line = "S\t"+vert+"\t"+seg + '\n'
+ f.write(seg_line)
+ for edge in h.edges():
+ edge_line = "L\t"+edge[0]+"\t+\t"+edge[1]+"\t+\t0M\n"
+ f.write(edge_line)
+
+#last = h.nodes()[-1]
+#print h.node[last]
+#path_last = h.node[last]['path']
+
+
+
+#for i in range(len(path_last)-1):
+# read_a = path_last[i]
+# read_b = path_last[i+1]
+# print read_a, read_b, in_graph.edge[read_a][read_b]
+
+# for i,node in enumerate(h.nodes()):
+# h.node[node]['path'] = ';'.join(h.node[node]['path'])
+# nx.write_graphml(h,out_graphml_name)
+
+
+
diff --git a/scripts/get_draft_annotation.py b/scripts/get_draft_annotation.py
new file mode 100755
index 0000000..27b0896
--- /dev/null
+++ b/scripts/get_draft_annotation.py
@@ -0,0 +1,368 @@
+#!/usr/bin/env python
+import sys
+import os
+import subprocess
+from parse_read import *
+import numpy as np
+import networkx as nx
+import itertools
+
+NCTCname = sys.argv[1]
+filename = '/data/pacbio_assembly/pb_data/NCTC/'+NCTCname+'/'+NCTCname
+graphml_path = sys.argv[2]
+
+in_graph = nx.read_graphml(graphml_path)
+
+reads = sorted(list(set([int(x.split("_")[0].lstrip("B")) for x in in_graph.nodes()])))
+
+dbshow_reads = ' '.join([str(x+1) for x in reads])
+
+DBshow_cmd = "DBshow "+filename+' '+dbshow_reads
+stream = subprocess.Popen(DBshow_cmd.split(),
+ stdout=subprocess.PIPE,bufsize=1)
+reads_queried = parse_read(stream.stdout)
+read_dict = {}
+for read_id,read in itertools.izip(reads,reads_queried):
+ rdlen = len(read[1])
+# print read
+ read_dict[read_id] = read
+
+complement = {'A':'T','C': 'G','T':'A', 'G':'C','a':'t','t':'a','c':'g','g':'c'}
+
+def reverse_complement(string):
+ return "".join(map(lambda x:complement[x],reversed(string)))
+
+def get_string(path):
+ #print path
+ ret_str = ''
+ for itm in path:
+# print itm
+ read_id,rd_orientation = itm[0].split("_")
+ if rd_orientation == '1':
+ assert itm[1][0] >= itm[1][1]
+# print itm
+ str_st = itm[1][1]
+ str_end = itm[1][0]
+ read_str = read_dict[int(read_id.lstrip("B"))][1][str_st:str_end]
+ else:
+ assert itm[1][0] <= itm[1][1]
+ str_st = itm[1][0]
+ str_end = itm[1][1]
+ read_str = reverse_complement(read_dict[int(read_id.lstrip("B"))][1][str_st:str_end])
+# print str_st,str_end
+# print read_id
+# print read_dict[int(read_id)][str_st:str_end]
+# print read_str
+ print 'read len',len(read_str)
+ ret_str += read_str
+ print len(path), len(ret_str)
+ return ret_str
+
+
+
+
+vertices_of_interest = set([x for x in in_graph if in_graph.in_degree(x) != 1 or in_graph.out_degree(x) != 1])
+
+read_tuples = {}
+
+for vert in vertices_of_interest:
+
+ vert_id, vert_or = vert.split("_")
+ if vert_or == '1':
+ continue
+ vert_len = len(read_dict[int(vert_id)][1])
+# print vert_len
+ read_starts = [(in_graph.edge[x][vert]['read_b_start']) for x in in_graph.predecessors(vert)]
+ read_ends = [(in_graph.edge[vert][x]['read_a_start']) for x in in_graph.successors(vert)]
+ if read_starts:
+ read_start = max(read_starts)
+ else:
+ read_start = 0
+ if read_ends:
+ read_end = min(read_ends)
+ else:
+ read_end = vert_len
+ read_tuples[vert] = (read_start,read_end)
+ print read_starts, read_ends, vert
+
+
+for vert in vertices_of_interest:
+
+ vert_id, vert_or = vert.split("_")
+ if vert_or == '1':
+ read_tuples[vert] = read_tuples[vert_id+"_0"]
+
+
+start_vertices = [x for x in vertices_of_interest if in_graph.in_degree(x) == 0 or in_graph.out_degree(x) > 1]
+h = nx.DiGraph()
+
+read_tuples_raw = {}
+for vertex in vertices_of_interest:
+ successors = in_graph.successors(vertex)
+ if successors:
+ succ = successors[0]
+ d = in_graph.get_edge_data(vertex,succ)
+ read_tuples_raw[vertex] = (d['read_a_start_raw'], d['read_a_end_raw'])
+ else:
+ predecessors = in_graph.predecessors(vertex)
+ if not len(predecessors) == 0:
+ pred = predecessors[0]
+ d = in_graph.get_edge_data(pred,vertex)
+ read_tuples_raw[vertex] = (d['read_b_start_raw'], d['read_b_end_raw'])
+ else:
+ read_tuples_raw[vertex] = (0,0)
+
+
+
+for vertex in vertices_of_interest:
+ h.add_node(vertex)
+ if vertex.split("_")[1] == '0':
+ path_var = [(vertex,(read_tuples[vertex][0], read_tuples[vertex][1]))]
+ else:
+ path_var = [(vertex,(read_tuples[vertex][1], read_tuples[vertex][0]))]
+ #print path_var
+ segment = get_string(path_var)
+ h.node[vertex]['start_read'] = path_var[0][1][0]
+ h.node[vertex]['end_read'] = path_var[0][1][1]
+ h.node[vertex]['path'] = [vertex]
+ h.node[vertex]['segment'] = segment
+
+vertices_used = set([x for x in h.nodes()])
+contig_no = 1
+for start_vertex in vertices_of_interest:
+ first_out_vertices = in_graph.successors(start_vertex)
+ print start_vertex, first_out_vertices
+ for vertex in first_out_vertices:
+ predecessor = start_vertex
+ start_vertex_id,start_vertex_or = start_vertex.split("_")
+ cur_vertex = vertex
+ if start_vertex_or == '0':
+ cur_path = [(start_vertex,(read_tuples[start_vertex][1],
+ in_graph.edge[start_vertex][cur_vertex]['read_a_start']))]
+ elif start_vertex_or == '1':
+ cur_path = [(start_vertex,(read_tuples[start_vertex][0],
+ in_graph.edge[start_vertex][cur_vertex]['read_a_start']))]
+
+ while cur_vertex not in vertices_of_interest:
+ successor = in_graph.successors(cur_vertex)[0]
+ start_point = in_graph.edge[predecessor][cur_vertex]['read_b_start']
+ end_point = in_graph.edge[cur_vertex][successor]['read_a_start']
+ cur_path.append((cur_vertex,(start_point,end_point)))
+ vertices_used.add(cur_vertex)
+ predecessor = cur_vertex
+ cur_vertex = successor
+
+ stop_vertex_id, stop_vertex_or = cur_vertex.split("_")
+ if stop_vertex_or == '0':
+ cur_path.append((cur_vertex,(in_graph.edge[predecessor][cur_vertex]['read_b_start'],
+ read_tuples[cur_vertex][0])))
+ elif stop_vertex_or == '1':
+ cur_path.append((cur_vertex,(in_graph.edge[predecessor][cur_vertex]['read_b_start'],
+ read_tuples[cur_vertex][1])))
+
+
+ node_name = str(contig_no)
+ h.add_node(node_name)
+ contig_no += 1
+# print cur_path
+ node_path = [x[0] for x in cur_path]
+ h.node[node_name]['path'] = node_path
+ h.node[node_name]['start_read'] = path_var[0][1][0]
+ h.node[node_name]['end_read'] = path_var[-1][1][1]
+ h.node[node_name]['segment'] = get_string(cur_path)
+ h.add_edges_from([(start_vertex,node_name),(node_name,cur_vertex)])
+# paths.append(cur_path)
+
+#print read_tuples
+
+while set(in_graph.nodes())-vertices_used:
+ vert = list(set(in_graph.nodes())-vertices_used)[0]
+ vert_id,vert_or = vert.split("_")
+ if vert_or == '0':
+ read_start = min( min([(in_graph.edge[x][vert]['read_b_start']) for x in in_graph.predecessors(vert)]),
+ max([(in_graph.edge[vert][x]['read_a_start']) for x in in_graph.successors(vert)]))
+ read_end = max( min([(in_graph.edge[x][vert]['read_b_start']) for x in in_graph.predecessors(vert)]),
+ max([(in_graph.edge[vert][x]['read_a_start']) for x in in_graph.successors(vert)]))
+ vertRC = vert_id+"_1"
+ else:
+ read_start = max( min([(in_graph.edge[x][vert]['read_b_start']) for x in in_graph.predecessors(vert)]),
+ max([(in_graph.edge[vert][x]['read_a_start']) for x in in_graph.successors(vert)]))
+ read_end = min( min([(in_graph.edge[x][vert]['read_b_start']) for x in in_graph.predecessors(vert)]),
+ max([(in_graph.edge[vert][x]['read_a_start']) for x in in_graph.successors(vert)]))
+ vertRC = vert_id+"_0"
+
+ successor_start = in_graph.successors(vert)[0]
+ d = in_graph.get_edge_data(vert,successor_start)
+ read_tuples_raw[vert] = (d['read_a_start_raw'], d['read_a_end_raw'])
+
+ successor_start = in_graph.successors(vertRC)[0]
+ d = in_graph.get_edge_data(vertRC,successor_start)
+ read_tuples_raw[vertRC] = (d['read_a_start_raw'], d['read_a_end_raw'])
+
+ h.add_node(vert)
+ node_path = [vert]
+ h.node[vert]['path'] = node_path
+ h.node[vert]['start_read'] = read_start
+ h.node[vert]['end_read'] = read_end
+ h.node[vert]['segment'] = get_string([(vert,(read_start, read_end))])
+ vertices_used.add(vert)
+
+ first_out_vertices = in_graph.successors(vert)
+ for vertex in first_out_vertices:
+ predecessor = vert
+ cur_vertex = vertex
+ cur_path = []
+ while cur_vertex != vert:
+ successor = in_graph.successors(cur_vertex)[0]
+ start_point = in_graph.edge[predecessor][cur_vertex]['read_b_start']
+ end_point = in_graph.edge[cur_vertex][successor]['read_a_start']
+ cur_path.append((cur_vertex,(start_point,end_point)))
+ vertices_used.add(cur_vertex)
+ predecessor = cur_vertex
+ cur_vertex = successor
+ node_name = str(contig_no)
+ h.add_node(node_name)
+ contig_no += 1
+# print cur_path
+
+ node_path = [x[0] for x in cur_path]
+ h.node[node_name]['path'] = node_path
+ h.node[node_name]['start_read'] = path_var[0][1][0]
+ h.node[node_name]['end_read'] = path_var[-1][1][1]
+ h.node[node_name]['segment'] = get_string(cur_path)
+ h.add_edges_from([(vert,node_name),(node_name,vert)])
+
+ if vertRC not in vertices_used:
+ h.add_node(vertRC)
+ h.node[vertRC]['segment'] = get_string([(vertRC,(read_end, read_start))])
+ h.node[vertRC]['path'] = [vertRC]
+ h.node[vertRC]['start_read'] = read_end
+ h.node[vertRC]['end_read'] = read_start
+
+ vertices_used.add(vertRC)
+ first_out_vertices = in_graph.successors(vertRC)
+ for vertex in first_out_vertices:
+ predecessor = vertRC
+ cur_vertex = vertex
+ cur_path = []
+ while cur_vertex != vertRC:
+ successor = in_graph.successors(cur_vertex)[0]
+ start_point = in_graph.edge[predecessor][cur_vertex]['read_b_start']
+ end_point = in_graph.edge[cur_vertex][successor]['read_a_start']
+ cur_path.append((cur_vertex,(start_point,end_point)))
+ vertices_used.add(cur_vertex)
+ predecessor = cur_vertex
+ cur_vertex = successor
+ node_name = str(contig_no)
+ h.add_node(node_name)
+ contig_no += 1
+ # print cur_path
+
+ node_path = [x[0] for x in cur_path]
+ h.node[node_name]['path'] = node_path
+ h.node[node_name]['start_read'] = path_var[0][1][0]
+ h.node[node_name]['end_read'] = path_var[-1][1][1]
+ h.node[node_name]['segment'] = get_string(cur_path)
+ print len(cur_path)
+ h.add_edges_from([(vertRC,node_name),(node_name,vertRC)])
+
+
+
+outfile = '/data/pacbio_assembly/pb_data/NCTC/'+NCTCname+'/'+NCTCname + ".edges.list"
+
+vert_to_merge = [x for x in h.nodes() if len(h.successors(x)) == 1 and len(h.predecessors(h.successors(x)[0])) == 1 and
+ len(nx.node_connected_component(h.to_undirected(), x)) > 2]
+
+while True:
+
+ vert_to_merge = [x for x in h.nodes() if len(h.successors(x)) == 1 and len(h.predecessors(h.successors(x)[0])) == 1 and
+ len(nx.node_connected_component(h.to_undirected(), x)) > 2]
+
+ if not vert_to_merge:
+ break
+ vert = vert_to_merge[0]
+ #print vert,
+ succ = h.successors(vert)[0]
+ preds = h.predecessors(vert)
+ h.node[succ]['segment'] = h.node[vert]['segment'] + h.node[succ]['segment']
+ h.node[succ]['path'] = h.node[vert]['path'] + h.node[succ]['path'][1:]
+ for pred in preds:
+ #print pred, succ
+ h.add_edges_from([(pred,succ)])
+ h.remove_edge(pred,vert)
+ h.remove_edge(vert,succ)
+ h.remove_node(vert)
+
+for i, vert in enumerate(h.nodes()):
+ print i,len(h.node[vert]['path'])
+
+with open(outfile, 'w') as f:
+ for i,node in enumerate(h.nodes()):
+ #print node
+ #print h.node[node]
+ path = h.node[node]['path']
+
+ f.write('>Unitig%d\n'%(i))
+ if len(path) == 1:
+ #print path[0]
+ f.write(' '.join([path[0].split('_')[0], path[0].split('_')[1], str(read_tuples_raw[path[0]][0]), str(read_tuples_raw[path[0]][1])]) + '\n')
+ for j in range(len(path)-1):
+ nodeA = path[j].lstrip("B")
+ nodeB = path[j+1].lstrip("B")
+
+ d = in_graph.get_edge_data(path[j],path[j+1])
+
+ f.write('%s %s %s %s %d %d %d %d %d\n'%(nodeA.split('_')[0],nodeA.split('_')[1] , nodeB.split('_')[0],
+ nodeB.split('_')[1], -d['read_a_start_raw'] + d['read_a_end_raw'] - d['read_b_start_raw'] + d['read_b_end_raw'],
+ d['read_a_start_raw'], d['read_a_end_raw'], d['read_b_start_raw'], d['read_b_end_raw']))
+
+out_graphml_name = '/data/pacbio_assembly/pb_data/NCTC/'+NCTCname+'/'+NCTCname+'_draft.graphml'
+
+
+
+gfaname = '/data/pacbio_assembly/pb_data/NCTC/'+NCTCname+'/'+NCTCname+'_draft_python.gfa'
+consensus_name = sys.argv[3]
+
+consensus_contigs = []
+try:
+ with open(consensus_name) as f:
+ for line in f:
+ if line[0] != '>':
+ consensus_contigs.append(line.strip())
+except:
+ pass
+for i, vert in enumerate(h.nodes()):
+ print i,len(h.node[vert]['path']), len(h.node[vert]['segment']), len(consensus_contigs[i])
+
+
+with open(gfaname,'w') as f:
+ f.write("H\tVN:Z:1.0\n")
+ for i,vert in enumerate(h.nodes()):
+ if len(consensus_contigs) > 0:
+ seg = consensus_contigs[i]
+ else:
+ seg = h.node[vert]['segment']
+
+ seg_line = "S\t"+vert+"\t"+seg + '\n'
+ f.write(seg_line)
+ for edge in h.edges():
+ edge_line = "L\t"+edge[0]+"\t+\t"+edge[1]+"\t+\t0M\n"
+ f.write(edge_line)
+
+#last = h.nodes()[-1]
+#print h.node[last]
+#path_last = h.node[last]['path']
+
+
+
+#for i in range(len(path_last)-1):
+# read_a = path_last[i]
+# read_b = path_last[i+1]
+# print read_a, read_b, in_graph.edge[read_a][read_b]
+
+for i,node in enumerate(h.nodes()):
+ h.node[node]['path'] = ';'.join(h.node[node]['path'])
+nx.write_graphml(h,out_graphml_name)
+
+
+
diff --git a/scripts/get_draft_path.py b/scripts/get_draft_path.py
new file mode 100755
index 0000000..14a8f73
--- /dev/null
+++ b/scripts/get_draft_path.py
@@ -0,0 +1,519 @@
+#!/usr/bin/env python
+
+import sys
+import os
+import subprocess
+from parse_read import *
+import numpy as np
+import networkx as nx
+import itertools
+from pbcore.io import FastaIO
+
+filedir = sys.argv[1]
+filename = sys.argv[2]
+graphml_path = sys.argv[3]
+
+in_graph = nx.read_graphml(graphml_path)
+
+reads = sorted(list(set([int(x.split("_")[0].lstrip("B")) for x in in_graph.nodes()])))
+
+dbshow_reads = ' '.join([str(x+1) for x in reads])
+
+DBshow_cmd = "DBshow "+ filedir+'/'+ filename+' '+dbshow_reads
+stream = subprocess.Popen(DBshow_cmd.split(),
+ stdout=subprocess.PIPE,bufsize=1)
+reads_queried = parse_read(stream.stdout)
+read_dict = {}
+for read_id,read in itertools.izip(reads,reads_queried):
+ rdlen = len(read[1])
+# print read
+ read_dict[read_id] = read
+
+complement = {'A':'T','C': 'G','T':'A', 'G':'C','a':'t','t':'a','c':'g','g':'c'}
+
+
+def rev_node(node):
+ node_id = node.split('_')[0]
+ return node_id + '_' + str(1-int(node.split('_')[1]))
+
+
+
+def reverse_complement(string):
+ return "".join(map(lambda x:complement[x],reversed(string)))
+
+def get_string(path):
+ # print path
+ ret_str = ''
+ for itm in path:
+ # print itm
+ read_id,rd_orientation = itm[0].split("_")
+ if rd_orientation == '1':
+ assert itm[1][0] >= itm[1][1]
+ str_st = itm[1][1]
+ str_end = itm[1][0]
+ read_str = read_dict[int(read_id.lstrip("B"))][1][str_st:str_end]
+ else:
+
+ assert itm[1][0] <= itm[1][1]
+ str_st = itm[1][0]
+ str_end = itm[1][1]
+ read_str = reverse_complement(read_dict[int(read_id.lstrip("B"))][1][str_st:str_end])
+# print str_st,str_end
+# print read_id
+# print read_dict[int(read_id)][str_st:str_end]
+# print read_str
+ # print 'read len',len(read_str)
+ ret_str += read_str
+ # print len(path), len(ret_str)
+ return ret_str
+
+
+
+# the following loop removes start/end inconsistencies created in pruning and clipping
+for vert in in_graph:
+
+ vert_id, vert_or = vert.split("_")
+ if vert_or == '1':
+ continue
+
+ read_starts = [(in_graph.edge[x][vert]['read_b_start']) for x in in_graph.predecessors(vert)]
+ read_starts.append(0)
+ read_ends = [(in_graph.edge[vert][x]['read_a_start']) for x in in_graph.successors(vert)]
+ read_ends.append(100000)
+
+ if max(read_starts) > min(read_ends):
+
+ for pred in in_graph.predecessors(vert):
+ in_graph.remove_edge(pred,vert)
+ in_graph.remove_edge(rev_node(vert),rev_node(pred))
+
+
+
+vertices_of_interest = set([x for x in in_graph if in_graph.in_degree(x) != 1 or in_graph.out_degree(x) != 1])
+
+read_tuples = {}
+
+for vert in vertices_of_interest:
+
+ vert_id, vert_or = vert.split("_")
+ if vert_or == '1':
+ continue
+ vert_len = len(read_dict[int(vert_id)][1])
+# print vert_len
+ read_starts = [(in_graph.edge[x][vert]['read_b_start']) for x in in_graph.predecessors(vert)]
+ read_ends = [(in_graph.edge[vert][x]['read_a_start']) for x in in_graph.successors(vert)]
+ if read_starts:
+ read_start = max(read_starts)
+ else:
+ read_start = 0
+ if read_ends:
+ read_end = min(read_ends)
+ else:
+ read_end = vert_len
+ read_tuples[vert] = (read_start,read_end)
+ # print read_starts, read_ends, vert
+
+
+for vert in vertices_of_interest:
+
+ vert_id, vert_or = vert.split("_")
+ if vert_or == '1':
+ read_tuples[vert] = read_tuples[vert_id+"_0"]
+
+
+start_vertices = [x for x in vertices_of_interest if in_graph.in_degree(x) == 0 or in_graph.out_degree(x) > 1]
+h = nx.DiGraph()
+
+read_tuples_raw = {}
+for vertex in vertices_of_interest:
+ successors = in_graph.successors(vertex)
+ if successors:
+ succ = successors[0]
+ d = in_graph.get_edge_data(vertex,succ)
+ read_tuples_raw[vertex] = (d['read_a_start_raw'], d['read_a_end_raw'])
+ else:
+ predecessors = in_graph.predecessors(vertex)
+ if not len(predecessors) == 0:
+ pred = predecessors[0]
+ d = in_graph.get_edge_data(pred,vertex)
+ read_tuples_raw[vertex] = (d['read_b_start_raw'], d['read_b_end_raw'])
+ else:
+ read_tuples_raw[vertex] = (0,0)
+
+
+for vertex in vertices_of_interest:
+ h.add_node(vertex)
+ if vertex.split("_")[1] == '0':
+ path_var = [(vertex,(read_tuples[vertex][0], read_tuples[vertex][1]))]
+ else:
+ path_var = [(vertex,(read_tuples[vertex][1], read_tuples[vertex][0]))]
+ #print path_var
+ segment = get_string(path_var)
+ h.node[vertex]['start_read'] = path_var[0][1][0]
+ h.node[vertex]['end_read'] = path_var[0][1][1]
+ h.node[vertex]['path'] = [vertex]
+ h.node[vertex]['segment'] = segment
+
+vertices_used = set([x for x in h.nodes()])
+contig_no = 1
+for start_vertex in vertices_of_interest:
+ first_out_vertices = in_graph.successors(start_vertex)
+ # print start_vertex, first_out_vertices
+ for vertex in first_out_vertices:
+ predecessor = start_vertex
+ start_vertex_id,start_vertex_or = start_vertex.split("_")
+ cur_vertex = vertex
+ if start_vertex_or == '0':
+ cur_path = [(start_vertex,(read_tuples[start_vertex][1],
+ in_graph.edge[start_vertex][cur_vertex]['read_a_start']))]
+ elif start_vertex_or == '1':
+ cur_path = [(start_vertex,(read_tuples[start_vertex][0],
+ in_graph.edge[start_vertex][cur_vertex]['read_a_start']))]
+
+ while cur_vertex not in vertices_of_interest:
+ successor = in_graph.successors(cur_vertex)[0]
+ start_point = in_graph.edge[predecessor][cur_vertex]['read_b_start']
+ end_point = in_graph.edge[cur_vertex][successor]['read_a_start']
+ cur_path.append((cur_vertex,(start_point,end_point)))
+ vertices_used.add(cur_vertex)
+ predecessor = cur_vertex
+ cur_vertex = successor
+
+ stop_vertex_id, stop_vertex_or = cur_vertex.split("_")
+ if stop_vertex_or == '0':
+ cur_path.append((cur_vertex,(in_graph.edge[predecessor][cur_vertex]['read_b_start'],
+ read_tuples[cur_vertex][0])))
+ elif stop_vertex_or == '1':
+ cur_path.append((cur_vertex,(in_graph.edge[predecessor][cur_vertex]['read_b_start'],
+ read_tuples[cur_vertex][1])))
+
+
+ node_name = str(contig_no)
+ h.add_node(node_name)
+ contig_no += 1
+# print cur_path
+ node_path = [x[0] for x in cur_path]
+ h.node[node_name]['path'] = node_path
+ h.node[node_name]['start_read'] = cur_path[0][1][0]
+ h.node[node_name]['end_read'] = cur_path[-1][1][1]
+ h.node[node_name]['segment'] = get_string(cur_path)
+ h.add_edges_from([(start_vertex,node_name),(node_name,cur_vertex)])
+# paths.append(cur_path)
+
+#print read_tuples
+
+while set(in_graph.nodes())-vertices_used:
+ vert = list(set(in_graph.nodes())-vertices_used)[0]
+ vert_id,vert_or = vert.split("_")
+ if vert_or == '0':
+ read_start = min( min([(in_graph.edge[x][vert]['read_b_start']) for x in in_graph.predecessors(vert)]),
+ max([(in_graph.edge[vert][x]['read_a_start']) for x in in_graph.successors(vert)]))
+ read_end = max( min([(in_graph.edge[x][vert]['read_b_start']) for x in in_graph.predecessors(vert)]),
+ max([(in_graph.edge[vert][x]['read_a_start']) for x in in_graph.successors(vert)]))
+ vertRC = vert_id+"_1"
+ else:
+ read_start = max( min([(in_graph.edge[x][vert]['read_b_start']) for x in in_graph.predecessors(vert)]),
+ max([(in_graph.edge[vert][x]['read_a_start']) for x in in_graph.successors(vert)]))
+ read_end = min( min([(in_graph.edge[x][vert]['read_b_start']) for x in in_graph.predecessors(vert)]),
+ max([(in_graph.edge[vert][x]['read_a_start']) for x in in_graph.successors(vert)]))
+ vertRC = vert_id+"_0"
+
+ successor_start = in_graph.successors(vert)[0]
+ d = in_graph.get_edge_data(vert,successor_start)
+ read_tuples_raw[vert] = (d['read_a_start_raw'], d['read_a_end_raw'])
+
+ successor_start = in_graph.successors(vertRC)[0]
+ d = in_graph.get_edge_data(vertRC,successor_start)
+ read_tuples_raw[vertRC] = (d['read_a_start_raw'], d['read_a_end_raw'])
+
+ h.add_node(vert)
+ node_path = [vert]
+ h.node[vert]['path'] = node_path
+ h.node[vert]['start_read'] = read_start
+ h.node[vert]['end_read'] = read_end
+ h.node[vert]['segment'] = get_string([(vert,(read_start, read_end))])
+ vertices_used.add(vert)
+
+ first_out_vertices = in_graph.successors(vert)
+ for vertex in first_out_vertices:
+ predecessor = vert
+ cur_vertex = vertex
+ cur_path = []
+ while cur_vertex != vert:
+ successor = in_graph.successors(cur_vertex)[0]
+ start_point = in_graph.edge[predecessor][cur_vertex]['read_b_start']
+ end_point = in_graph.edge[cur_vertex][successor]['read_a_start']
+ cur_path.append((cur_vertex,(start_point,end_point)))
+ vertices_used.add(cur_vertex)
+ predecessor = cur_vertex
+ cur_vertex = successor
+ node_name = str(contig_no)
+ h.add_node(node_name)
+ contig_no += 1
+# print cur_path
+
+ node_path = [x[0] for x in cur_path]
+ h.node[node_name]['path'] = node_path
+ try:
+ h.node[node_name]['start_read'] = cur_path[0][1][0]
+ h.node[node_name]['end_read'] = cur_path[-1][1][1]
+ except:
+ print path_var
+ raise
+ h.node[node_name]['segment'] = get_string(cur_path)
+ h.add_edges_from([(vert,node_name),(node_name,vert)])
+
+ if vertRC not in vertices_used:
+ h.add_node(vertRC)
+ h.node[vertRC]['segment'] = get_string([(vertRC,(read_end, read_start))])
+ h.node[vertRC]['path'] = [vertRC]
+ h.node[vertRC]['start_read'] = read_end
+ h.node[vertRC]['end_read'] = read_start
+
+ vertices_used.add(vertRC)
+ first_out_vertices = in_graph.successors(vertRC)
+ for vertex in first_out_vertices:
+ predecessor = vertRC
+ cur_vertex = vertex
+ cur_path = []
+ while cur_vertex != vertRC:
+ successor = in_graph.successors(cur_vertex)[0]
+ start_point = in_graph.edge[predecessor][cur_vertex]['read_b_start']
+ end_point = in_graph.edge[cur_vertex][successor]['read_a_start']
+ cur_path.append((cur_vertex,(start_point,end_point)))
+ vertices_used.add(cur_vertex)
+ predecessor = cur_vertex
+ cur_vertex = successor
+ node_name = str(contig_no)
+ h.add_node(node_name)
+ contig_no += 1
+ # print cur_path
+
+ node_path = [x[0] for x in cur_path]
+ h.node[node_name]['path'] = node_path
+ h.node[node_name]['start_read'] = cur_path[0][1][0]
+ h.node[node_name]['end_read'] = cur_path[-1][1][1]
+ h.node[node_name]['segment'] = get_string(cur_path)
+ # print len(cur_path)
+ h.add_edges_from([(vertRC,node_name),(node_name,vertRC)])
+
+
+
+outfile = filedir + '/' + filename + ".edges.list"
+# outfile_norevcomp = filedir + '/' + filename + ".norevcomp.edges.list"
+
+
+vert_to_merge = [x for x in h.nodes() if len(h.successors(x)) == 1 and len(h.predecessors(h.successors(x)[0])) == 1 and
+ x != h.successors(x)[0] ]
+
+# while True:
+
+
+for vert in vert_to_merge:
+
+ # and
+ # len(nx.node_connected_component(h.to_undirected(), x)) > 2]
+
+ if len(h.successors(x)) != 1 or len(h.predecessors(h.successors(x)[0])) != 1 or x == h.successors(x)[0]:
+ continue
+
+
+ succ = h.successors(vert)[0]
+ preds = h.predecessors(vert)
+
+ if succ in preds:
+ continue
+
+ h.node[succ]['segment'] = h.node[vert]['segment'] + h.node[succ]['segment']
+ h.node[succ]['path'] = h.node[vert]['path'] + h.node[succ]['path'][1:]
+
+ for pred in preds:
+ #print pred, succ
+ h.add_edges_from([(pred,succ)])
+ h.remove_edge(pred,vert)
+
+ h.remove_edge(vert,succ)
+ h.remove_node(vert)
+
+
+
+path_to_vert = {}
+RCmap = {}
+
+for i, vert in enumerate(h.nodes()):
+ path = h.node[vert]['path']
+ path_to_vert[':'.join(path)] = vert
+
+for path in path_to_vert:
+ path_to_search = ':'.join(list(reversed([ x.split('_')[0]+'_'+str(1-int(x.split('_')[1])) for x in path.split(':')])))
+ RCmap[path_to_vert[path]] = path_to_vert[path_to_search]
+
+# print path_to_vert
+
+
+# print RCmap
+# print [x for x in h.edges()]
+
+
+vert_to_merge = [x for x in h.nodes() if len(h.successors(x)) == 1 and len(h.predecessors(h.successors(x)[0])) == 1 and
+ x != h.successors(x)[0] and len(h.successors(h.successors(x)[0])) == 1 and h.successors(h.successors(x)[0])[0]== x
+ and len(nx.node_connected_component(h.to_undirected(), x)) == 2]
+
+
+for vert in vert_to_merge:
+
+ if vert not in h.nodes():
+ continue
+
+ if len(h.successors(vert)) == 1 and h.successors(vert)[0] == vert:
+ continue
+
+ succ = h.successors(vert)[0]
+
+ # print vert, succ
+
+ vertRC = RCmap[vert]
+ # print vert, vertRC
+
+ predRC = h.predecessors(vertRC)[0]
+
+ # print h.node[vert]['path']
+ # print h.node[succ]['path']
+
+ h.node[succ]['segment'] = h.node[vert]['segment'] + h.node[succ]['segment']
+ h.node[predRC]['segment'] = h.node[predRC]['segment'] + h.node[vertRC]['segment']
+
+ h.node[succ]['path'] = h.node[vert]['path'] + h.node[succ]['path']
+ h.node[predRC]['path'] = h.node[predRC]['path'] + h.node[vertRC]['path']
+
+ # print vert, succ, predRC, vertRC
+
+ h.add_edges_from([(succ,succ)])
+ h.add_edges_from([(predRC,predRC)])
+
+ h.remove_node(vert)
+ h.remove_node(vertRC)
+
+
+
+
+
+for i, vert in enumerate(h.nodes()):
+ pass
+ #print i,len(h.node[vert]['path'])
+
+cnt = 0
+with open(outfile, 'w') as f:
+ for i,node in enumerate(h.nodes()):
+ #print node
+ #print h.node[node]
+ path = h.node[node]['path']
+ h.node[node]['contig_id'] = cnt
+ cnt += 1
+ f.write('>Unitig%d\n'%(i))
+ if len(path) == 1:
+ #print path[0]
+ f.write(' '.join([path[0].split('_')[0], path[0].split('_')[1], str(read_tuples_raw[path[0]][0]), str(read_tuples_raw[path[0]][1])]) + '\n')
+ for j in range(len(path)-1):
+ nodeA = path[j].lstrip("B")
+ nodeB = path[j+1].lstrip("B")
+
+ d = in_graph.get_edge_data(path[j],path[j+1])
+ try:
+ f.write('%s %s %s %s %d %d %d %d %d\n'%(nodeA.split('_')[0],nodeA.split('_')[1] , nodeB.split('_')[0],
+ nodeB.split('_')[1], -d['read_a_start_raw'] + d['read_a_end_raw'] - d['read_b_start_raw'] + d['read_b_end_raw'],
+ d['read_a_start_raw'], d['read_a_end_raw'], d['read_b_start_raw'], d['read_b_end_raw']))
+ except:
+ print "in error"
+ print nodeB
+ print node
+ print h.node[node]['start_read']
+ print h.node[node]['end_read']
+ print h.node[node]['path']
+ print len(h.node[node]['segment'])
+ print d
+ raise
+
+
+# one_sided_contigs = []
+
+observed_paths = []
+cnt = 0
+
+out_graphml_name = filedir + '/' + filename +'_draft.graphml'
+
+
+gfaname = filedir + '/' + filename+ '_draft_python.gfa'
+if len(sys.argv) > 3:
+ consensus_name = sys.argv[3]
+else:
+ consensus_name = ''
+
+consensus_contigs = []
+try:
+ with open(consensus_name) as f:
+ for line in f:
+ if line[0] != '>':
+ consensus_contigs.append(line.strip())
+except:
+ pass
+# for i, vert in enumerate(h.nodes()):
+# print i,len(h.node[vert]['path']), len(h.node[vert]['segment']), len(consensus_contigs[i])
+
+
+one_sided_contigs = []
+
+observed_paths = []
+
+for i, vert in enumerate(h.nodes()):
+ path = [x.split('_')[0] for x in h.node[vert]['path']]
+ path_to_search = list(reversed(path))
+ if path_to_search not in observed_paths:
+ observed_paths.append(path)
+ one_sided_contigs.append(h.node[vert]['segment'])
+
+
+
+# commented out the block below so that the non-reverse-complemented contigs are not produced here
+
+# out_nonrevcomp_name = filedir + '/' + filename +'_nonrevcomp.fasta'
+# writer = FastaIO.FastaWriter(out_nonrevcomp_name)
+# for i, ctg in enumerate(one_sided_contigs):
+# print i, len(ctg)
+# new_header = str(i)
+# writer.writeRecord(new_header, ctg)
+
+
+
+
+
+
+#last = h.nodes()[-1]
+#print h.node[last]
+#path_last = h.node[last]['path']
+
+
+
+#for i in range(len(path_last)-1):
+# read_a = path_last[i]
+# read_b = path_last[i+1]
+# print read_a, read_b, in_graph.edge[read_a][read_b]
+
+for i,node in enumerate(h.nodes()):
+ h.node[node]['path'] = ';'.join(h.node[node]['path'])
+nx.write_graphml(h,out_graphml_name)
+
+
+# with open(gfaname,'w') as f:
+# f.write("H\tVN:Z:1.0\n")
+# for i,vert in enumerate(h.nodes()):
+# seg = h.node[vert]['segment']
+# print len(seg)
+
+# seg_line = "S\t"+vert+"\t"+seg + '\n'
+# f.write(seg_line)
+# for edge in h.edges():
+# edge_line = "L\t"+edge[0]+"\t+\t"+edge[1]+"\t+\t0M\n"
+# f.write(edge_line)
+
+
diff --git a/scripts/get_draft_path_norevcomp.py b/scripts/get_draft_path_norevcomp.py
new file mode 100644
index 0000000..09dab9d
--- /dev/null
+++ b/scripts/get_draft_path_norevcomp.py
@@ -0,0 +1,516 @@
+#!/usr/bin/env python
+
+import sys
+import os
+import subprocess
+from parse_read import *
+import numpy as np
+import networkx as nx
+import itertools
+from pbcore.io import FastaIO
+
+filedir = sys.argv[1]
+filename = sys.argv[2]
+graphml_path = sys.argv[3]
+
+in_graph = nx.read_graphml(graphml_path)
+
+reads = sorted(list(set([int(x.split("_")[0].lstrip("B")) for x in in_graph.nodes()])))
+
+dbshow_reads = ' '.join([str(x+1) for x in reads])
+
+DBshow_cmd = "DBshow "+ filedir+'/'+ filename+' '+dbshow_reads
+stream = subprocess.Popen(DBshow_cmd.split(),
+ stdout=subprocess.PIPE,bufsize=1)
+reads_queried = parse_read(stream.stdout)
+read_dict = {}
+for read_id,read in itertools.izip(reads,reads_queried):
+ rdlen = len(read[1])
+# print read
+ read_dict[read_id] = read
+
+complement = {'A':'T','C': 'G','T':'A', 'G':'C','a':'t','t':'a','c':'g','g':'c'}
+
+def reverse_complement(string):
+ return "".join(map(lambda x:complement[x],reversed(string)))
+
+def get_string(path):
+ # print path
+ ret_str = ''
+ for itm in path:
+ # print itm
+ read_id,rd_orientation = itm[0].split("_")
+ if rd_orientation == '1':
+ assert itm[1][0] >= itm[1][1]
+ str_st = itm[1][1]
+ str_end = itm[1][0]
+ read_str = read_dict[int(read_id.lstrip("B"))][1][str_st:str_end]
+ else:
+
+ assert itm[1][0] <= itm[1][1]
+ str_st = itm[1][0]
+ str_end = itm[1][1]
+ read_str = reverse_complement(read_dict[int(read_id.lstrip("B"))][1][str_st:str_end])
+# print str_st,str_end
+# print read_id
+# print read_dict[int(read_id)][str_st:str_end]
+# print read_str
+ # print 'read len',len(read_str)
+ ret_str += read_str
+ # print len(path), len(ret_str)
+ return ret_str
+
+
+
+
+vertices_of_interest = set([x for x in in_graph if in_graph.in_degree(x) != 1 or in_graph.out_degree(x) != 1])
+
+read_tuples = {}
+
+for vert in vertices_of_interest:
+
+ vert_id, vert_or = vert.split("_")
+ if vert_or == '1':
+ continue
+ vert_len = len(read_dict[int(vert_id)][1])
+# print vert_len
+ read_starts = [(in_graph.edge[x][vert]['read_b_start']) for x in in_graph.predecessors(vert)]
+ read_ends = [(in_graph.edge[vert][x]['read_a_start']) for x in in_graph.successors(vert)]
+ if read_starts:
+ read_start = max(read_starts)
+ else:
+ read_start = 0
+ if read_ends:
+ read_end = min(read_ends)
+ else:
+ read_end = vert_len
+ read_tuples[vert] = (read_start,read_end)
+ # print read_starts, read_ends, vert
+
+
+for vert in vertices_of_interest:
+
+ vert_id, vert_or = vert.split("_")
+ if vert_or == '1':
+ read_tuples[vert] = read_tuples[vert_id+"_0"]
+
+
+start_vertices = [x for x in vertices_of_interest if in_graph.in_degree(x) == 0 or in_graph.out_degree(x) > 1]
+h = nx.DiGraph()
+
+read_tuples_raw = {}
+for vertex in vertices_of_interest:
+ successors = in_graph.successors(vertex)
+ if successors:
+ succ = successors[0]
+ d = in_graph.get_edge_data(vertex,succ)
+ read_tuples_raw[vertex] = (d['read_a_start_raw'], d['read_a_end_raw'])
+ else:
+ predecessors = in_graph.predecessors(vertex)
+ if not len(predecessors) == 0:
+ pred = predecessors[0]
+ d = in_graph.get_edge_data(pred,vertex)
+ read_tuples_raw[vertex] = (d['read_b_start_raw'], d['read_b_end_raw'])
+ else:
+ read_tuples_raw[vertex] = (0,0)
+
+
+for vertex in vertices_of_interest:
+ h.add_node(vertex)
+ if vertex.split("_")[1] == '0':
+ path_var = [(vertex,(read_tuples[vertex][0], read_tuples[vertex][1]))]
+ else:
+ path_var = [(vertex,(read_tuples[vertex][1], read_tuples[vertex][0]))]
+ #print path_var
+ segment = get_string(path_var)
+ h.node[vertex]['start_read'] = path_var[0][1][0]
+ h.node[vertex]['end_read'] = path_var[0][1][1]
+ h.node[vertex]['path'] = [vertex]
+ h.node[vertex]['segment'] = segment
+
+vertices_used = set([x for x in h.nodes()])
+contig_no = 1
+for start_vertex in vertices_of_interest:
+ first_out_vertices = in_graph.successors(start_vertex)
+ # print start_vertex, first_out_vertices
+ for vertex in first_out_vertices:
+ predecessor = start_vertex
+ start_vertex_id,start_vertex_or = start_vertex.split("_")
+ cur_vertex = vertex
+ if start_vertex_or == '0':
+ cur_path = [(start_vertex,(read_tuples[start_vertex][1],
+ in_graph.edge[start_vertex][cur_vertex]['read_a_start']))]
+ elif start_vertex_or == '1':
+ cur_path = [(start_vertex,(read_tuples[start_vertex][0],
+ in_graph.edge[start_vertex][cur_vertex]['read_a_start']))]
+
+ while cur_vertex not in vertices_of_interest:
+ successor = in_graph.successors(cur_vertex)[0]
+ start_point = in_graph.edge[predecessor][cur_vertex]['read_b_start']
+ end_point = in_graph.edge[cur_vertex][successor]['read_a_start']
+ cur_path.append((cur_vertex,(start_point,end_point)))
+ vertices_used.add(cur_vertex)
+ predecessor = cur_vertex
+ cur_vertex = successor
+
+ stop_vertex_id, stop_vertex_or = cur_vertex.split("_")
+ if stop_vertex_or == '0':
+ cur_path.append((cur_vertex,(in_graph.edge[predecessor][cur_vertex]['read_b_start'],
+ read_tuples[cur_vertex][0])))
+ elif stop_vertex_or == '1':
+ cur_path.append((cur_vertex,(in_graph.edge[predecessor][cur_vertex]['read_b_start'],
+ read_tuples[cur_vertex][1])))
+
+
+ node_name = str(contig_no)
+ h.add_node(node_name)
+ contig_no += 1
+# print cur_path
+ node_path = [x[0] for x in cur_path]
+ h.node[node_name]['path'] = node_path
+ h.node[node_name]['start_read'] = cur_path[0][1][0]
+ h.node[node_name]['end_read'] = cur_path[-1][1][1]
+ h.node[node_name]['segment'] = get_string(cur_path)
+ h.add_edges_from([(start_vertex,node_name),(node_name,cur_vertex)])
+# paths.append(cur_path)
+
+#print read_tuples
+
+while set(in_graph.nodes())-vertices_used:
+ vert = list(set(in_graph.nodes())-vertices_used)[0]
+ vert_id,vert_or = vert.split("_")
+ if vert_or == '0':
+ read_start = min( min([(in_graph.edge[x][vert]['read_b_start']) for x in in_graph.predecessors(vert)]),
+ max([(in_graph.edge[vert][x]['read_a_start']) for x in in_graph.successors(vert)]))
+ read_end = max( min([(in_graph.edge[x][vert]['read_b_start']) for x in in_graph.predecessors(vert)]),
+ max([(in_graph.edge[vert][x]['read_a_start']) for x in in_graph.successors(vert)]))
+ vertRC = vert_id+"_1"
+ else:
+ read_start = max( min([(in_graph.edge[x][vert]['read_b_start']) for x in in_graph.predecessors(vert)]),
+ max([(in_graph.edge[vert][x]['read_a_start']) for x in in_graph.successors(vert)]))
+ read_end = min( min([(in_graph.edge[x][vert]['read_b_start']) for x in in_graph.predecessors(vert)]),
+ max([(in_graph.edge[vert][x]['read_a_start']) for x in in_graph.successors(vert)]))
+ vertRC = vert_id+"_0"
+
+ successor_start = in_graph.successors(vert)[0]
+ d = in_graph.get_edge_data(vert,successor_start)
+ read_tuples_raw[vert] = (d['read_a_start_raw'], d['read_a_end_raw'])
+
+ successor_start = in_graph.successors(vertRC)[0]
+ d = in_graph.get_edge_data(vertRC,successor_start)
+ read_tuples_raw[vertRC] = (d['read_a_start_raw'], d['read_a_end_raw'])
+
+ h.add_node(vert)
+ node_path = [vert]
+ h.node[vert]['path'] = node_path
+ h.node[vert]['start_read'] = read_start
+ h.node[vert]['end_read'] = read_end
+ h.node[vert]['segment'] = get_string([(vert,(read_start, read_end))])
+ vertices_used.add(vert)
+
+ first_out_vertices = in_graph.successors(vert)
+ for vertex in first_out_vertices:
+ predecessor = vert
+ cur_vertex = vertex
+ cur_path = []
+ while cur_vertex != vert:
+ successor = in_graph.successors(cur_vertex)[0]
+ start_point = in_graph.edge[predecessor][cur_vertex]['read_b_start']
+ end_point = in_graph.edge[cur_vertex][successor]['read_a_start']
+ cur_path.append((cur_vertex,(start_point,end_point)))
+ vertices_used.add(cur_vertex)
+ predecessor = cur_vertex
+ cur_vertex = successor
+ node_name = str(contig_no)
+ h.add_node(node_name)
+ contig_no += 1
+# print cur_path
+
+ node_path = [x[0] for x in cur_path]
+ h.node[node_name]['path'] = node_path
+ try:
+ h.node[node_name]['start_read'] = cur_path[0][1][0]
+ h.node[node_name]['end_read'] = cur_path[-1][1][1]
+ except:
+ print path_var
+ raise
+ h.node[node_name]['segment'] = get_string(cur_path)
+ h.add_edges_from([(vert,node_name),(node_name,vert)])
+
+ if vertRC not in vertices_used:
+ h.add_node(vertRC)
+ h.node[vertRC]['segment'] = get_string([(vertRC,(read_end, read_start))])
+ h.node[vertRC]['path'] = [vertRC]
+ h.node[vertRC]['start_read'] = read_end
+ h.node[vertRC]['end_read'] = read_start
+
+ vertices_used.add(vertRC)
+ first_out_vertices = in_graph.successors(vertRC)
+ for vertex in first_out_vertices:
+ predecessor = vertRC
+ cur_vertex = vertex
+ cur_path = []
+ while cur_vertex != vertRC:
+ successor = in_graph.successors(cur_vertex)[0]
+ start_point = in_graph.edge[predecessor][cur_vertex]['read_b_start']
+ end_point = in_graph.edge[cur_vertex][successor]['read_a_start']
+ cur_path.append((cur_vertex,(start_point,end_point)))
+ vertices_used.add(cur_vertex)
+ predecessor = cur_vertex
+ cur_vertex = successor
+ node_name = str(contig_no)
+ h.add_node(node_name)
+ contig_no += 1
+ # print cur_path
+
+ node_path = [x[0] for x in cur_path]
+ h.node[node_name]['path'] = node_path
+ h.node[node_name]['start_read'] = cur_path[0][1][0]
+ h.node[node_name]['end_read'] = cur_path[-1][1][1]
+ h.node[node_name]['segment'] = get_string(cur_path)
+ # print len(cur_path)
+ h.add_edges_from([(vertRC,node_name),(node_name,vertRC)])
+
+
+
+outfile = filedir + '/' + filename + ".edges.list"
+outfile_norevcomp = filedir + '/' + filename + ".norevcomp.edges.list"
+
+
+vert_to_merge = [x for x in h.nodes() if len(h.successors(x)) == 1 and len(h.predecessors(h.successors(x)[0])) == 1 and
+ x != h.successors(x)[0] and
+ len(nx.node_connected_component(h.to_undirected(), x)) > 2]
+
+while True:
+
+ vert_to_merge = [x for x in h.nodes() if len(h.successors(x)) == 1 and len(h.predecessors(h.successors(x)[0])) == 1 and
+ x != h.successors(x)[0] and
+ len(nx.node_connected_component(h.to_undirected(), x)) > 2]
+
+ if not vert_to_merge:
+ break
+ vert = vert_to_merge[0]
+ #print vert,
+ succ = h.successors(vert)[0]
+ preds = h.predecessors(vert)
+ h.node[succ]['segment'] = h.node[vert]['segment'] + h.node[succ]['segment']
+ h.node[succ]['path'] = h.node[vert]['path'] + h.node[succ]['path'][1:]
+
+ for pred in preds:
+ #print pred, succ
+ h.add_edges_from([(pred,succ)])
+ h.remove_edge(pred,vert)
+
+ h.remove_edge(vert,succ)
+ h.remove_node(vert)
+
+path_to_vert = {}
+RCmap = {}
+
+for i, vert in enumerate(h.nodes()):
+ path = h.node[vert]['path']
+ path_to_vert[':'.join(path)] = vert
+
+for path in path_to_vert:
+ path_to_search = ':'.join(list(reversed([ x.split('_')[0]+'_'+str(1-int(x.split('_')[1])) for x in path.split(':')])))
+ RCmap[path_to_vert[path]] = path_to_vert[path_to_search]
+
+# print path_to_vert
+
+
+# print RCmap
+# print [x for x in h.edges()]
+
+while True:
+ vert_to_merge = [x for x in h.nodes() if len(h.successors(x)) == 1 and len(h.predecessors(h.successors(x)[0])) == 1 and
+ x != h.successors(x)[0] and h.successors(h.successors(x)[0])[0]== x and len(h.successors(h.successors(x)[0])) == 1 and
+ len(nx.node_connected_component(h.to_undirected(), x)) == 2]
+
+ if not vert_to_merge:
+ break
+
+ vert = vert_to_merge[0]
+ succ = h.successors(vert)[0]
+
+ # print vert, succ
+
+ vertRC = RCmap[vert]
+ # print vert, vertRC
+
+ predRC = h.predecessors(vertRC)[0]
+
+ # print h.node[vert]['path']
+ # print h.node[succ]['path']
+
+ h.node[succ]['segment'] = h.node[vert]['segment'] + h.node[succ]['segment']
+ h.node[predRC]['segment'] = h.node[predRC]['segment'] + h.node[vertRC]['segment']
+
+ h.node[succ]['path'] = h.node[vert]['path'] + h.node[succ]['path']
+ h.node[predRC]['path'] = h.node[predRC]['path'] + h.node[vertRC]['path']
+
+ # print vert, succ, predRC, vertRC
+
+ h.add_edges_from([(succ,succ)])
+ h.add_edges_from([(predRC,predRC)])
+
+ h.remove_node(vert)
+ h.remove_node(vertRC)
+
+
+
+
+for i, vert in enumerate(h.nodes()):
+ print i,len(h.node[vert]['path'])
+
+# with open(outfile, 'w') as f:
+# for i,node in enumerate(h.nodes()):
+# #print node
+# #print h.node[node]
+# path = h.node[node]['path']
+
+# f.write('>Unitig%d\n'%(i))
+# if len(path) == 1:
+# #print path[0]
+# f.write(' '.join([path[0].split('_')[0], path[0].split('_')[1], str(read_tuples_raw[path[0]][0]), str(read_tuples_raw[path[0]][1])]) + '\n')
+# for j in range(len(path)-1):
+# nodeA = path[j].lstrip("B")
+# nodeB = path[j+1].lstrip("B")
+
+# d = in_graph.get_edge_data(path[j],path[j+1])
+# try:
+# f.write('%s %s %s %s %d %d %d %d %d\n'%(nodeA.split('_')[0],nodeA.split('_')[1] , nodeB.split('_')[0],
+# nodeB.split('_')[1], -d['read_a_start_raw'] + d['read_a_end_raw'] - d['read_b_start_raw'] + d['read_b_end_raw'],
+# d['read_a_start_raw'], d['read_a_end_raw'], d['read_b_start_raw'], d['read_b_end_raw']))
+# except:
+# print "in error"
+# print nodeB
+# print node
+# print h.node[node]['start_read']
+# print h.node[node]['end_read']
+# print h.node[node]['path']
+# print len(h.node[node]['segment'])
+# print d
+# raise
+
+
+# one_sided_contigs = []
+
+observed_paths = []
+cnt = 0
+
+
+out_graphml_name = filedir + '/' + filename +'_draft.graphml'
+
+
+gfaname = filedir + '/' + filename+ '_draft_python.gfa'
+if len(sys.argv) > 3:
+ consensus_name = sys.argv[3]
+else:
+ consensus_name = ''
+
+consensus_contigs = []
+try:
+ with open(consensus_name) as f:
+ for line in f:
+ if line[0] != '>':
+ consensus_contigs.append(line.strip())
+except:
+ pass
+# for i, vert in enumerate(h.nodes()):
+# print i,len(h.node[vert]['path']), len(h.node[vert]['segment']), len(consensus_contigs[i])
+
+
+one_sided_contigs = []
+
+observed_paths = []
+
+vertices_to_keep = []
+
+for i, vert in enumerate(h.nodes()):
+ path = [x.split('_')[0] for x in h.node[vert]['path']]
+ path_to_search = list(reversed(path))
+ if path_to_search not in observed_paths:
+ observed_paths.append(path)
+ one_sided_contigs.append(h.node[vert]['segment'])
+ vertices_to_keep.append(vert)
+
+
+
+# commented out the block below so that the non-reverse-complemented contigs are not produced here
+
+# out_nonrevcomp_name = filedir + '/' + filename +'_nonrevcomp.fasta'
+# writer = FastaIO.FastaWriter(out_nonrevcomp_name)
+# for i, ctg in enumerate(one_sided_contigs):
+# print i, len(ctg)
+# new_header = str(i)
+# writer.writeRecord(new_header, ctg)
+
+
+
+
+with open(outfile, 'w') as f:
+ for i,node in enumerate(h.nodes()):
+ #print node
+ #print h.node[node]
+ path = h.node[node]['path']
+
+ if node in vertices_to_keep:
+
+ f.write('>Unitig%d\n'%(cnt))
+ print "Writing contig number"
+ print cnt
+ cnt += 1
+ if len(path) == 1:
+ #print path[0]
+ f.write(' '.join([path[0].split('_')[0], path[0].split('_')[1], str(read_tuples_raw[path[0]][0]), str(read_tuples_raw[path[0]][1])]) + '\n')
+ for j in range(len(path)-1):
+ nodeA = path[j].lstrip("B")
+ nodeB = path[j+1].lstrip("B")
+
+ d = in_graph.get_edge_data(path[j],path[j+1])
+ try:
+ f.write('%s %s %s %s %d %d %d %d %d\n'%(nodeA.split('_')[0],nodeA.split('_')[1] , nodeB.split('_')[0],
+ nodeB.split('_')[1], -d['read_a_start_raw'] + d['read_a_end_raw'] - d['read_b_start_raw'] + d['read_b_end_raw'],
+ d['read_a_start_raw'], d['read_a_end_raw'], d['read_b_start_raw'], d['read_b_end_raw']))
+ except:
+ print "in error"
+ # print nodeB
+ # print node
+ # print h.node[node]['start_read']
+ # print h.node[node]['end_read']
+ # print h.node[node]['path']
+ # print len(h.node[node]['segment'])
+ print d
+ print in_graph
+ raise
+
+
+#last = h.nodes()[-1]
+#print h.node[last]
+#path_last = h.node[last]['path']
+
+
+
+#for i in range(len(path_last)-1):
+# read_a = path_last[i]
+# read_b = path_last[i+1]
+# print read_a, read_b, in_graph.edge[read_a][read_b]
+
+for i,node in enumerate(h.nodes()):
+ h.node[node]['path'] = ';'.join(h.node[node]['path'])
+nx.write_graphml(h,out_graphml_name)
+
+
+# with open(gfaname,'w') as f:
+# f.write("H\tVN:Z:1.0\n")
+# for i,vert in enumerate(h.nodes()):
+# seg = h.node[vert]['segment']
+# print len(seg)
+
+# seg_line = "S\t"+vert+"\t"+seg + '\n'
+# f.write(seg_line)
+# for edge in h.edges():
+# edge_line = "L\t"+edge[0]+"\t+\t"+edge[1]+"\t+\t0M\n"
+# f.write(edge_line)
+
+
diff --git a/scripts/interface_utils.py b/scripts/interface_utils.py
new file mode 100755
index 0000000..bb9234a
--- /dev/null
+++ b/scripts/interface_utils.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+
+import sys
+import os
+import subprocess
+from parse_read import *
+from parse_alignment import *
+from parse_qv import *
+
+#filename = sys.argv[1]
+#readarg = sys.argv[2]
+
+def get_reads(filename, readlist):
+ stream = subprocess.Popen(["DBshow", filename] + map(str,readlist),
+ stdout=subprocess.PIPE,bufsize=1)
+ reads = parse_read(stream.stdout) # generator
+ return reads
+
+def get_QV(filename, readlist):
+ stream = subprocess.Popen(["DBdump", filename, '-i'] + map(str,readlist),
+ stdout=subprocess.PIPE,bufsize=1)
+ qv = parse_qv(stream.stdout) # generator
+ return qv
+
+
+def get_alignments(filename, readlist):
+ stream = subprocess.Popen(["LAshow", filename,filename]+ map(str,readlist),
+ stdout=subprocess.PIPE,bufsize=1)
+ alignments = parse_alignment(stream.stdout) # generator
+ return alignments
+
+
+def get_alignments2(filename, alignmentname, readlist):
+ stream = subprocess.Popen(["LA4Awesome", filename, filename, alignmentname]+ map(str,readlist),
+ stdout=subprocess.PIPE,bufsize=1)
+ alignments = parse_alignment2(stream.stdout) # generator
+ return alignments
+
+
+def get_alignments_mapping(filename, ref, alignmentname, readlist):
+ stream = subprocess.Popen(["LA4Awesome", filename, ref, alignmentname]+ map(str,readlist)+ ['-F'],
+ stdout=subprocess.PIPE,bufsize=1)
+ alignments = parse_alignment2(stream.stdout) # generator
+ return alignments
+
+def get_alignments_mapping2(ref, filename, alignmentname):
+ print ref,filename,alignmentname
+ stream = subprocess.Popen(["LA4Awesome", ref, filename, alignmentname],
+ stdout=subprocess.PIPE,bufsize=1)
+ alignments = parse_alignment2(stream.stdout) # generator
+ return alignments
+
+
+
+def get_alignments_mapping3(ref, filename, alignmentname, contig_no):
+ print ref,filename,alignmentname
+ stream = subprocess.Popen(["LA4Awesome", ref, filename, alignmentname, contig_no],
+ stdout=subprocess.PIPE,bufsize=1)
+ alignments = parse_alignment2(stream.stdout) # generator
+ return alignments
+
+def get_all_reads(filename):
+ stream = subprocess.Popen(["DBshow", filename],
+ stdout=subprocess.PIPE,bufsize=1)
+ reads = parse_read(stream.stdout) # generator
+ return reads
+
+def get_all_alignments(filename):
+ stream = subprocess.Popen(["LAshow", filename, filename ],
+ stdout=subprocess.PIPE,bufsize=1)
+ alignments = parse_alignment(stream.stdout) # generator
+ return alignments
+
+def get_all_alignments2(filename, alignmentname):
+ stream = subprocess.Popen(["LA4Awesome", filename, filename, alignmentname ],
+ stdout=subprocess.PIPE,bufsize=1)
+ alignments = parse_alignment2(stream.stdout) # generator
+ return alignments
+
+def get_all_reads_in_alignment_with_one(filename,read):
+ this_read = get_reads(filename,[read])
+ alignments = list(get_alignments(filename,[read]))
+ readlist = map(lambda x:x[2],alignments)
+ print readlist
+ other_reads = get_reads(filename,readlist)
+
+ return [list(this_read), list(other_reads), alignments] # note that this is not a generator
+
+
+# test
+#for item in get_reads('G',[1]):
+# print item
+
+#for item in get_alignments('G',[1]):
+# print item
+
+#for item in get_alignments2('G','G.1.las',[1]):
+# print item
+
+#for item in get_all_reads_in_alignment_with_one('G',1):
+# print item
+
+#for item in get_reads('G', [1,2,3]):
+# print item
+
diff --git a/scripts/longest_path.py b/scripts/longest_path.py
new file mode 100755
index 0000000..ad583a3
--- /dev/null
+++ b/scripts/longest_path.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+
+import networkx as nx
+import sys
+from collections import Counter
+
+def longest_path(G):
+ dist = {} # stores [node, distance] pair
+ for node in nx.topological_sort(G):
+ # pairs of dist,node for all incoming edges
+ pairs = [(dist[v][0]+1,v) for v in G.pred[node]]
+ if pairs:
+ dist[node] = max(pairs)
+ else:
+ dist[node] = (0, node)
+ node,(length,_) = max(dist.items(), key=lambda x:x[1])
+ path = []
+ while length > 0:
+ path.append(node)
+ length,node = dist[node]
+ return list(reversed(path))
+
+
+filename = sys.argv[1]
+
+
+g = nx.DiGraph()
+
+with open(filename,'r') as f:
+ for line in f.xreadlines():
+ g.add_edge(*(line.strip().split('->')))
+
+
+print nx.info(g)
+degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+print Counter(degree_sequence)
+
+for i in range(7):
+ for node in g.nodes():
+ if g.in_degree(node) == 0:
+ g.remove_node(node)
+
+ print nx.info(g)
+
+degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+print Counter(degree_sequence)
+
+
+def rev(string):
+ if string[-1] == '\'':
+ return string[:-1]
+ else:
+ return string+'\''
+
+for edge in g.edges():
+ g.add_edge(rev(edge[1]), rev(edge[0]))
+ #print edge
+ #print rev(edge[1]), rev(edge[0])
+
+print nx.info(g)
+nx.write_graphml(g, filename.split('.')[0]+'.graphml')
+#print(list(nx.dfs_edges(g,sys.argv[2])))
+#p=nx.shortest_path(g)
diff --git a/scripts/merge_hinges.py b/scripts/merge_hinges.py
new file mode 100644
index 0000000..e6c42b1
--- /dev/null
+++ b/scripts/merge_hinges.py
@@ -0,0 +1,606 @@
+#!/usr/bin/env python
+
+import networkx as nx
+import random
+import sys
+from collections import Counter
+import ujson
+
+
+
+def dead_end_clipping(G,threshold):
+# H=nx.DiGraph()
+ H = G.copy()
+ start_nodes = set([x for x in H.nodes() if H.in_degree(x) ==0])
+
+ for st_node in start_nodes:
+ cur_path = [st_node]
+
+ if len(H.successors(st_node)) == 1:
+ cur_node = H.successors(st_node)[0]
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1 and len(cur_path) < threshold + 2:
+ cur_path.append(cur_node)
+ cur_node = H.successors(cur_node)[0]
+
+
+ if len(cur_path) <= threshold:
+ for vertex in cur_path:
+ H.remove_node(vertex)
+
+ end_nodes = set([x for x in H.nodes() if H.out_degree(x) ==0])
+
+ for end_node in end_nodes:
+ cur_path = [end_node]
+ if len(H.predecessors(end_node)) == 1:
+ cur_node = H.predecessors(end_node)[0]
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1 and len(cur_path) < threshold + 2:
+ cur_path.append(cur_node)
+ cur_node = H.predecessors(cur_node)[0]
+
+ if len(cur_path) <= threshold:
+ for vertex in cur_path:
+ H.remove_node(vertex)
+
+ return H
+
+
+
+# In[9]:
+
+def z_clipping(G,threshold,in_hinges,out_hinges,print_z = False):
+ H = G.copy()
+
+ start_nodes = set([x for x in H.nodes() if H.out_degree(x) > 1 and x not in out_hinges])
+
+ for st_node in start_nodes:
+ for sec_node in H.successors(st_node):
+
+ if H.out_degree(st_node) == 1:
+ break
+
+ cur_node = sec_node
+ cur_path = [[st_node,cur_node]]
+
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1:
+ cur_path.append([cur_node,H.successors(cur_node)[0]])
+ cur_node = H.successors(cur_node)[0]
+
+ if len(cur_path) > threshold + 1:
+ break
+
+ if len(cur_path) <= threshold and H.in_degree(cur_node) > 1 and H.out_degree(st_node) > 1 and cur_node not in in_hinges:
+ if print_z:
+ print cur_path
+
+ for edge in cur_path:
+ H.remove_edge(edge[0],edge[1])
+ for j in range(len(cur_path)-1):
+ H.remove_node(cur_path[j][1])
+
+ end_nodes = set([x for x in H.nodes() if H.in_degree(x) > 1 and x not in in_hinges])
+
+ for end_node in end_nodes:
+ for sec_node in H.predecessors(end_node):
+
+ if H.in_degree(end_node) == 1:
+ break
+
+
+ cur_node = sec_node
+ cur_path = [[cur_node,end_node]]
+
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1:
+ cur_path.append([H.predecessors(cur_node)[0],cur_node])
+ cur_node = H.predecessors(cur_node)[0]
+
+ if len(cur_path) > threshold + 1:
+ break
+
+ if len(cur_path) <= threshold and H.out_degree(cur_node) > 1 and H.in_degree(end_node) > 1 and cur_node not in out_hinges:
+ if print_z:
+ print cur_path
+ for edge in cur_path:
+ H.remove_edge(edge[0],edge[1])
+ for j in range(len(cur_path)-1):
+ H.remove_node(cur_path[j][0])
+
+ return H
+
+
+
+
+
+def merge_path(g,in_node,node,out_node):
+
+ g.add_edge(in_node,out_node,hinge_edge = -1,false_positive = 0)
+ g.remove_node(node)
+
+
+
+def merge_a_to_b(g,node_a,node_b):
+
+ if node_a not in g.nodes() or node_b not in g.nodes():
+ return
+
+ for node in g.predecessors(node_a):
+ if node != node_b:
+ g.add_edge(node,node_b,hinge_edge = 1,false_positive = 0)
+
+ for node in g.successors(node_a):
+ if node != node_b:
+ g.add_edge(node_b,node,hinge_edge = 1,false_positive = 0)
+
+ g.remove_node(node_a)
+
+
+def random_condensation(G,n_nodes):
+
+ g = G.copy()
+
+ max_iter = 20000
+ iter_cnt = 0
+
+ while len(g.nodes()) > n_nodes and iter_cnt < max_iter:
+
+ iter_cnt += 1
+
+ node = g.nodes()[random.randrange(len(g.nodes()))]
+
+ if g.in_degree(node) == 1 and g.out_degree(node) == 1:
+
+ in_node = g.in_edges(node)[0][0]
+ out_node = g.out_edges(node)[0][1]
+ if g.out_degree(in_node) == 1 and g.in_degree(out_node) == 1:
+ if in_node != node and out_node != node and in_node != out_node:
+ #print in_node, node, out_node
+# merge_path(g,in_node,node,out_node)
+
+ bad_node=False
+ for in_edge in g.in_edges(node):
+ if g.edge[in_edge[0]][in_edge[1]]['false_positive']==1:
+ bad_node=True
+ for out_edge in g.out_edges(node):
+ if g.edge[out_edge[0]][out_edge[1]]['false_positive']==1:
+ bad_node=True
+ if not bad_node:
+ #print in_node, node, out_node
+ merge_path(g,in_node,node,out_node)
+
+ if iter_cnt >= max_iter:
+ print "couldn't finish sparsification"+str(len(g.nodes()))
+
+ return g
+
+
+
+def add_groundtruth(g,json_file,in_hinges,out_hinges):
+
+ mapping = ujson.load(json_file)
+
+ print 'getting mapping'
+ mapped_nodes=0
+ print str(len(mapping))
+ print str(len(g.nodes()))
+
+ slack = 500
+
+
+ for node in g.nodes():
+ # print node
+ node_base=node.split("_")[0]
+ # print node_base
+
+ #print node
+ if mapping.has_key(node_base):
+ g.node[node]['aln_start'] = min (mapping[node_base][0][0],mapping[node_base][0][1])
+ g.node[node]['aln_end'] = max(mapping[node_base][0][1],mapping[node_base][0][0])
+# g.node[node]['chr'] = mapping[node_base][0][2]
+ mapped_nodes+=1
+ else:
+ # pass
+ g.node[node]['aln_start'] = 0
+ g.node[node]['aln_end'] = 0
+# g.node[node]['aln_strand'] = 0
+
+ if node in in_hinges or node in out_hinges:
+ g.node[node]['hinge'] = 1
+ else:
+ g.node[node]['hinge'] = 0
+
+
+ for edge in g.edges_iter():
+ in_node=edge[0]
+ out_node=edge[1]
+
+# if ((g.node[in_node]['aln_start'] < g.node[out_node]['aln_start'] and
+# g.node[out_node]['aln_start'] < g.node[in_node]['aln_end']) or
+# (g.node[in_node]['aln_start'] < g.node[out_node]['aln_end'] and
+# g.node[out_node]['aln_end'] < g.node[in_node]['aln_end'])):
+# g.edge[in_node][out_node]['false_positive']=0
+# else:
+# g.edge[in_node][out_node]['false_positive']=1
+
+
+
+ if ((g.node[in_node]['aln_start'] < g.node[out_node]['aln_start'] and
+ g.node[out_node]['aln_start'] < g.node[in_node]['aln_end']) or
+ (g.node[in_node]['aln_start'] < g.node[out_node]['aln_end'] and
+ g.node[out_node]['aln_end'] < g.node[in_node]['aln_end'])):
+ g.edge[in_node][out_node]['false_positive']=0
+ else:
+ g.edge[in_node][out_node]['false_positive']=1
+
+ return g
+
+
+
+
+
+
+def read_graph(edges_file,hg_file,gt_file, hinge_file):
+
+
+ prefix = edges_file.split('.')[0]
+
+ with open(gt_file) as f:
+ read_dict = ujson.load(f)
+
+ g = nx.DiGraph()
+
+ hinge_nodes = []
+ hinge_pos = {}
+
+
+ with open (hinge_file) as f:
+ for lines in f:
+ lines1=lines.split()
+ hinge_nodes.append(lines1[0] + "_0_" + lines1[1])
+ hinge_nodes.append(lines1[0] + "_1_" + lines1[1])
+
+
+ # if lines1[0] not in hinge_pos:
+ # hinge_pos[lines]
+ # hinge_pos[lines1[0]] = lines1[1]
+
+
+ # with open (hg_file) as f:
+ # for lines in f:
+ # lines1=lines.split()
+ # g.add_node(lines1[0] + "_" + lines1[2])
+ # g.add_node(lines1[1] + "_" + lines1[3])
+ # if lines1[0] in read_dict:
+ # g.node[lines1[0] + "_" + lines1[2]]['aln_start']=min(read_dict[lines1[0]][0][0],read_dict[lines1[0]][0][1])
+ # g.node[lines1[0] + "_" + lines1[2]]['aln_end']=max(read_dict[lines1[0]][0][0],read_dict[lines1[0]][0][1])
+ # else:
+ # g.node[lines1[0] + "_" + lines1[2]]['aln_start']=0
+ # g.node[lines1[0] + "_" + lines1[2]]['aln_end']=0
+ # if lines1[1] in read_dict:
+ # g.node[lines1[1] + "_" + lines1[3]]['aln_start']=min(read_dict[lines1[1]][0][0],read_dict[lines1[1]][0][1])
+ # g.node[lines1[1] + "_" + lines1[3]]['aln_end']=max(read_dict[lines1[1]][0][0],read_dict[lines1[1]][0][1])
+ # else:
+ # g.node[lines1[1] + "_" + lines1[3]]['aln_start']=0
+ # g.node[lines1[1] + "_" + lines1[3]]['aln_end']=0
+
+ # if lines1[0] in hinge_nodes:
+ # g.node[lines1[0] + "_" + lines1[2]]['active']=2
+ # else:
+ # g.node[lines1[0] + "_" + lines1[2]]['active']=1
+
+ # if lines1[1] in hinge_nodes:
+ # g.node[lines1[1] + "_" + lines1[3]]['active']=2
+ # else:
+ # g.node[lines1[1] + "_" + lines1[3]]['active']=int(lines1[4])
+
+
+ # g.add_edge(lines1[0] + "_" + lines1[2], lines1[1] + "_" + lines1[3], rev = int(lines1[5]))
+
+
+ # need to construct double stranded hinge graph, so that proper mapping can be found
+
+ with open (hg_file) as f:
+ for lines in f:
+ lines1=lines.split()
+
+ nodeA0 = lines1[0] + "_0_"+ lines1[2]
+ nodeA1 = lines1[0] + "_1_"+ lines1[2]
+ nodeB0 = lines1[1] + "_0_"+ lines1[3]
+ nodeB1 = lines1[1] + "_1_"+ lines1[3]
+
+ nodeA0short = lines1[0] + "_0"
+ nodeA1short = lines1[0] + "_1"
+ nodeB0short = lines1[1] + "_0"
+ nodeB1short = lines1[1] + "_1"
+
+ g.add_node(nodeA0)
+ g.add_node(nodeA1)
+ g.add_node(nodeB0)
+ g.add_node(nodeB1)
+
+ if nodeA0short not in hinge_pos:
+ hinge_pos[nodeA0short] = [int(lines1[2])]
+ hinge_pos[nodeA1short] = [int(lines1[2])]
+ elif lines1[2] not in hinge_pos[nodeA0short]:
+ hinge_pos[nodeA0short].append(int(lines1[2]))
+ hinge_pos[nodeA1short].append(int(lines1[2]))
+
+ if nodeB0 not in hinge_pos:
+ hinge_pos[nodeB0short] = [int(lines1[3])]
+ hinge_pos[nodeB1short] = [int(lines1[3])]
+ elif lines1[3] not in hinge_pos[nodeB0short]:
+ hinge_pos[nodeB0short].append(int(lines1[3]))
+ hinge_pos[nodeB1short].append(int(lines1[3]))
+
+
+ if lines1[0] in read_dict:
+ g.node[lines1[0] + "_0_"+ lines1[2]]['aln_start']=min(read_dict[lines1[0]][0][0],read_dict[lines1[0]][0][1])
+ g.node[lines1[0] + "_0_"+ lines1[2]]['aln_end']=max(read_dict[lines1[0]][0][0],read_dict[lines1[0]][0][1])
+ g.node[lines1[0] + "_1_"+ lines1[2]]['aln_start']=min(read_dict[lines1[0]][0][0],read_dict[lines1[0]][0][1])
+ g.node[lines1[0] + "_1_"+ lines1[2]]['aln_end']=max(read_dict[lines1[0]][0][0],read_dict[lines1[0]][0][1])
+ else:
+ g.node[lines1[0] + "_0_"+ lines1[2]]['aln_start']=0
+ g.node[lines1[0] + "_0_"+ lines1[2]]['aln_end']=0
+ g.node[lines1[0] + "_1_"+ lines1[2]]['aln_start']=0
+ g.node[lines1[0] + "_1_"+ lines1[2]]['aln_end']=0
+ if lines1[1] in read_dict:
+ g.node[lines1[1] + "_0_"+ lines1[3]]['aln_start']=min(read_dict[lines1[1]][0][0],read_dict[lines1[1]][0][1])
+ g.node[lines1[1] + "_0_"+ lines1[3]]['aln_end']=max(read_dict[lines1[1]][0][0],read_dict[lines1[1]][0][1])
+ g.node[lines1[1] + "_1_"+ lines1[3]]['aln_start']=min(read_dict[lines1[1]][0][0],read_dict[lines1[1]][0][1])
+ g.node[lines1[1] + "_1_"+ lines1[3]]['aln_end']=max(read_dict[lines1[1]][0][0],read_dict[lines1[1]][0][1])
+ else:
+ g.node[lines1[1] + "_0_"+ lines1[3]]['aln_start']=0
+ g.node[lines1[1] + "_0_"+ lines1[3]]['aln_end']=0
+ g.node[lines1[1] + "_1_"+ lines1[3]]['aln_start']=0
+ g.node[lines1[1] + "_1_"+ lines1[3]]['aln_end']=0
+
+ if nodeA0 in hinge_nodes:
+ g.node[nodeA0]['active']=2
+ g.node[nodeA1]['active']=2
+ else:
+ g.node[nodeA0]['active']=1
+ g.node[nodeA1]['active']=1
+
+ if nodeB0 in hinge_nodes:
+ g.node[nodeB0]['active']=2
+ g.node[nodeB1]['active']=2
+ else:
+ g.node[nodeB0]['active']=int(lines1[4])
+ g.node[nodeB1]['active']=int(lines1[4])
+
+
+ if int(lines1[5]) == 1: # reverse match
+ g.add_edge(nodeA0, nodeB1)
+ g.add_edge(nodeA1, nodeB0)
+ else:
+ g.add_edge(nodeA0,nodeB0)
+ g.add_edge(nodeA1,nodeB1)
+
+
+
+ # nx.write_graphml(g, filename.split('.')[0]+'_hgraph.graphml')
+
+ # for c in nx.connected_components(g):
+ # print len(c)
+
+ hinge_mapping = {}
+
+ for c in nx.weakly_connected_components(g):
+
+ if len(c) > 10:
+
+ component_sink = -1
+
+ for node in c:
+ if g.out_degree(node) == 0 and g.node[node]['active']== 2 and component_sink == -1:
+ component_sink = node
+ elif g.out_degree(node) == 0 and g.node[node]['active']== 2 and g.in_degree(node) > g.in_degree(component_sink):
+ component_sink = node
+
+ if component_sink != -1:
+ g.node[component_sink]['active'] = 3
+ else:
+ component_sink = list(c)[0]
+
+ # sink_shortname = component_sink.split('_')[0]
+
+ for node in c:
+ hinge_mapping[node] = component_sink
+
+ else:
+
+ for node in c:
+ g.node[node]['active'] = -1
+
+
+ nx.write_graphml(g, hg_file.split('.')[0]+'_hgraph2.graphml')
+
+
+ # print nx.number_weakly_connected_components(g)
+ # print nx.number_strongly_connected_components(g)
+
+
+ G = nx.DiGraph()
+
+ merging = 1
+
+ if merging == 0:
+
+ with open (edges_file) as f:
+ for lines in f:
+ lines1=lines.split()
+
+ if len(lines1) < 6:
+ continue
+
+ if int(lines1[5]) != 0:
+
+ if int(lines1[5]) == 1:
+ # nodeB_id = lines1[1]+"_"+lines1[4]
+ # hingepos = int(lines1[6])
+ nodeA_id = lines1[0] + "_" + lines1[3]
+
+ hinge_node = lines1[1]+"_"+lines1[4] + '_' + lines1[6]
+
+ print hinge_node
+
+ eff_hinge = hinge_mapping[hinge_node]
+
+ eff_b = eff_hinge.split('_')
+
+ if eff_b[0] + "_" + eff_b[1] != lines1[0] + "_" + lines1[3]:
+ G.add_edge(lines1[0] + "_" + lines1[3], eff_b[0] + "_" + eff_b[1],hinge_edge=1)
+ G.add_edge(eff_b[0] + "_" + str(1-int(eff_b[1])), lines1[0] + "_" + str(1-int(lines1[3])),hinge_edge=1)
+ else:
+
+ G.add_edge(lines1[0] + "_" + lines1[3], lines1[1] + "_" + lines1[4],hinge_edge=1)
+ G.add_edge(lines1[1] + "_" + str(1-int(lines1[4])), lines1[0] + "_" + str(1-int(lines1[3])),hinge_edge=1)
+
+
+ elif int(lines1[5]) == -1:
+
+ hinge_node = lines1[0]+"_"+lines1[3] + '_' + lines1[6]
+
+ eff_hinge = hinge_mapping[hinge_node]
+
+ eff_b = eff_hinge.split('_')
+
+ if eff_b[0] + "_" + eff_b[1] != lines1[1] + "_" + lines1[4] :
+ G.add_edge(eff_b[0] + "_" + eff_b[1], lines1[1] + "_" + lines1[4] ,hinge_edge=1)
+ G.add_edge(lines1[1] + "_" + str(1-int(lines1[4])), eff_b[0] + "_" + str(1-int(eff_b[1])),hinge_edge=1)
+ else:
+ G.add_edge(lines1[0] + "_" + lines1[3], lines1[1] + "_" + lines1[4],hinge_edge=1)
+ G.add_edge(lines1[1] + "_" + str(1-int(lines1[4])), lines1[0] + "_" + str(1-int(lines1[3])),hinge_edge=1)
+
+
+ # if nodeA_id in hinge_pos:
+ # print "found A"
+ # else:
+ # print "didnt find A"
+
+
+ # if nodeB_id in hinge_pos:
+
+ # print "Node B of hinged match IS in hinge_pos"
+
+ # hinge_found = False
+
+ # hingepos = hinge_pos[nodeB_id][0]
+ # for candidate_pos in hinge_pos[nodeB_id]:
+ # if (abs(candidate_pos - int(lines1[8][1:])) < 200):
+ # hingepos = candidate_pos
+
+ # print "Matching hinge found"
+
+ # hinge_found = True
+
+ # if not hinge_found:
+ # print "not found"
+ # print lines1[8][1:], hinge_pos[nodeB_id]
+
+
+ # hinge_node = nodeB_id + '_' + str(hingepos)
+ # eff_hinge = hinge_mapping[hinge_node]
+
+ # eff_b = eff_hinge.split('_')
+
+ # G.add_edge(lines1[0] + "_" + lines1[3], eff_b[0] + "_" + eff_b[1],hinge_edge=1)
+ # G.add_edge(eff_b[0] + "_" + str(1-int(eff_b[1])), lines1[0] + "_" + str(1-int(lines1[3])),hinge_edge=1)
+
+
+ else:
+
+ G.add_edge(lines1[0] + "_" + lines1[3], lines1[1] + "_" + lines1[4],hinge_edge=int(lines1[5]))
+ G.add_edge(lines1[1] + "_" + str(1-int(lines1[4])), lines1[0] + "_" + str(1-int(lines1[3])),hinge_edge=int(lines1[5]))
+
+
+
+ else:
+
+ to_be_merged = []
+
+ with open (edges_file) as f:
+ for lines in f:
+ lines1=lines.split()
+
+ if len(lines1) < 6:
+ continue
+
+ G.add_edge(lines1[0] + "_" + lines1[3], lines1[1] + "_" + lines1[4],hinge_edge=int(lines1[5]))
+ G.add_edge(lines1[1] + "_" + str(1-int(lines1[4])), lines1[0] + "_" + str(1-int(lines1[3])),hinge_edge=int(lines1[5]))
+
+
+ if int(lines1[5]) != 0:
+
+ if int(lines1[5]) == 1:
+
+ to_be_merged.append([lines1[1],lines1[6]])
+
+ elif int(lines1[5]) == -1:
+
+ to_be_merged.append([lines1[0],lines1[6]])
+
+ for pair in to_be_merged:
+
+ sink_node_long = hinge_mapping[pair[0]+'_0_'+pair[1]]
+ sink_node = sink_node_long.split('_')[0]+'_'+sink_node_long.split('_')[1]
+
+ if pair[0]+'_0' != sink_node:
+ merge_a_to_b(G,pair[0]+'_0',sink_node)
+
+ sink_node_long = hinge_mapping[pair[0]+'_1_'+pair[1]]
+ sink_node = sink_node_long.split('_')[0]+'_'+sink_node_long.split('_')[1]
+
+ if pair[0]+'_1' != sink_node:
+ merge_a_to_b(G,pair[0]+'_1',sink_node)
+
+
+ in_hinges = set()
+ out_hinges = set()
+
+ with open (hinge_file) as f:
+
+ for lines in f:
+ lines1=lines.split()
+
+ if lines1[2] == '1':
+ in_hinges.add(lines1[0]+'_0')
+ out_hinges.add(lines1[0]+'_1')
+ elif lines1[2] == '-1':
+ in_hinges.add(lines1[0]+'_1')
+ out_hinges.add(lines1[0]+'_0')
+
+
+ json_file = open(gt_file)
+ add_groundtruth(G,json_file,in_hinges,out_hinges)
+
+
+ G0 = G.copy()
+
+ nx.write_graphml(G0, prefix+'.'+'G0_merged'+'.graphml')
+
+
+ G0s = random_condensation(G0,3500)
+
+ nx.write_graphml(G0s, prefix+'.'+'G0s_merged'+'.graphml')
+
+
+ G1=dead_end_clipping(G0,10)
+
+ G1=z_clipping(G1,5,in_hinges,out_hinges)
+
+ nx.write_graphml(G1, prefix+'.'+'G1_merged'+'.graphml')
+
+
+ Gs = random_condensation(G1,2500)
+
+ nx.write_graphml(Gs, prefix+'.'+'Gs_merged'+'.graphml')
+
+
+
+if __name__ == "__main__":
+
+ read_graph(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4])
+
+
+
+
+
diff --git a/scripts/parallel_draw.sh b/scripts/parallel_draw.sh
new file mode 100755
index 0000000..e304c12
--- /dev/null
+++ b/scripts/parallel_draw.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+echo "Bash version ${BASH_VERSION}..."
+for i in $(seq 4000 1 20000)
+ do
+ echo drawing read $i
+ num1=$(ps -ef | grep 'python draw.py' | wc -l)
+ num2=$(ps -ef | grep 'LA4Awesome' | wc -l)
+ num=$(( $num1 + $num2 ))
+ echo $num running
+ while [ $num -gt 60 ]
+ do
+ sleep 5
+ echo waiting, $num running
+ num1=$(ps -ef | grep 'python draw.py' | wc -l)
+ num2=$(ps -ef | grep 'LA4Awesome' | wc -l)
+ num=$(( $num1 + $num2 ))
+ done
+ python draw2.py $i &
+ done
diff --git a/scripts/parallel_draw_large.sh b/scripts/parallel_draw_large.sh
new file mode 100755
index 0000000..99f45bd
--- /dev/null
+++ b/scripts/parallel_draw_large.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+echo "Bash version ${BASH_VERSION}..."
+for i in $(seq 1 1 100)
+ do
+ echo drawing read $i
+ num1=$(ps -ef | grep 'python draw.py' | wc -l)
+ num2=$(ps -ef | grep 'LA4Awesome' | wc -l)
+ num=$(( $num1 + $num2 ))
+ echo $num running
+ while [ $num -gt 12 ]
+ do
+ sleep 5
+ echo waiting
+ done
+ python draw.py $i &
+ done
\ No newline at end of file
diff --git a/scripts/parse.py b/scripts/parse.py
new file mode 100755
index 0000000..bb88341
--- /dev/null
+++ b/scripts/parse.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+import sys
+min_len_aln = 1000
+
+
+with sys.stdin as f:
+ for l in f:
+ l = l.strip().split()
+ if len(l) != 2:
+ continue
+
+ read_id = l[0]
+ seq = l[1]
+
+ print read_id,seq
+
+ #if len(seq) > max_len:
+ # seq = seq[:max_len-1]
+
+ if read_id not in ("+", "-", "*"):
+ if len(seq) >= min_len_aln:
+ if len(seqs) == 0:
+ seqs.append(seq) #the "seed"
+ seed_id = l[0]
+ if read_id not in read_ids: #avoidng using the same read twice. seed is used again here by design
+ seqs.append(seq)
+ read_ids.add(read_id)
+ elif l[0] == "+":
+ if len(seqs) >= min_cov_aln:
+ seqs = seqs[:1] + sorted(seqs[1:], key=lambda x: -len(x))
+ yield (seqs[:max_n_read], seed_id, config)
+ #seqs_data.append( (seqs, seed_id) )
+ seqs = []
+ read_ids = set()
+ seed_id = None
+ elif l[0] == "*":
+ seqs = []
+ read_ids = set()
+ seed_id = None
+ elif l[0] == "-":
+ #yield (seqs, seed_id)
+ #seqs_data.append( (seqs, seed_id) )
+ break
diff --git a/scripts/parse_alignment.py b/scripts/parse_alignment.py
new file mode 100755
index 0000000..b992d59
--- /dev/null
+++ b/scripts/parse_alignment.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+import sys
+import re
+
+
+def parse_alignment(stream = sys.stdin):
+ with stream as f:
+ for l in f:
+ sub = re.sub('[\[\].x:<difstraep()]',' ',l.strip())
+ sub = re.sub(',','',sub)
+ lst = sub.split()[:-1]
+ if len(lst) == 9:
+ yield [lst[2]] + map(int, lst[0:2] + lst[3:])
+
+
+def parse_alignment2(stream = sys.stdin):
+ with stream as f:
+ for l in f:
+
+ sub = re.sub('[\[\].x:<difstraep()/]',' ',l.strip())
+ sub = re.sub(',','',sub)
+ lst = sub.split()[:-1]
+ if len(lst) == 11:
+ yield [lst[2]] + map(int, lst[0:2] + lst[3:])
+
diff --git a/scripts/parse_qv.py b/scripts/parse_qv.py
new file mode 100755
index 0000000..2c97f9a
--- /dev/null
+++ b/scripts/parse_qv.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+
+import sys
+
+def parse_qv(stream = sys.stdin):
+ with stream as f:
+ for l in f:
+ if l[0] == 'I':
+ yield(l.split()[-1])
+ else:
+ pass
+
+#for qv in parse_qv():
+# print qv
+# do whatever you want to do with the reads
diff --git a/scripts/parse_read.py b/scripts/parse_read.py
new file mode 100755
index 0000000..ce8f530
--- /dev/null
+++ b/scripts/parse_read.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+import sys
+
+def parse_read(stream = sys.stdin):
+ sid = ''
+ seq = ''
+ with stream as f:
+ for l in f:
+ if l[0] == '>':
+ if sid == '':
+ sid = l[1:].strip()
+ else:
+ tsid = sid
+ tseq = seq
+ seq = ''
+ sid = l[1:].strip()
+ yield (tsid,tseq)
+
+ else:
+ seq += l.strip()
+
+ yield(sid,seq)
+
+
+#for read in parse_read():
+# print read
+# do whatever you want to do with the reads
diff --git a/scripts/pileup.ipynb b/scripts/pileup.ipynb
new file mode 100644
index 0000000..a10e6cb
--- /dev/null
+++ b/scripts/pileup.ipynb
@@ -0,0 +1,227 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "%matplotlib inline\n",
+ "%load_ext autoreload\n",
+ "%autoreload 2"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt\n",
+ "from ipywidgets.widgets import interact "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "import interface_utils as util"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "os.environ['PATH'] += ':/data/pacbio_assembly/AwesomeAssembler/DALIGNER'\n",
+ "#print os.popen(\"export\").read()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "path = '/data/pacbio_assembly/AwesomeAssembler/data/'\n",
+ "aln = []\n",
+ "for item in util.get_alignments2(path+'ecoli',path+'ecoli.las',[2]):\n",
+ " aln.append(item)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[['n', 0, 104, 170, 7424, 7424, 4749, 11779, 13726, 1294, 7],\n",
+ " ['c', 0, 757, 206, 7424, 7424, 3568, 10592, 15876, 1242, 7],\n",
+ " ['n', 0, 978, 214, 1850, 7424, 9996, 11675, 11675, 392, 1],\n",
+ " ['n', 0, 1183, 1183, 3057, 7424, 156, 2065, 5115, 359, 2],\n",
+ " ['n', 0, 1183, 3057, 6052, 7424, 2151, 5115, 5115, 517, 3]]"
+ ]
+ },
+ "execution_count": 6,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "aln[0:5]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAA28AAAJMCAYAAABtgJ7QAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAH9lJREFUeJzt3X2sJfV5H/DvLLt4AVO2BGdZXpy1ApZZlCoEFdK6ESdt\nSpaoxq5UYVdyldaoUoQbrCZtvPiPzkWqGoNUpaDI7ouTAlYhQYpiJ4JgXtKD1D/CKhUk2LAGJON6\nb8xisPxShTi83P4xs9rD+m737O6ZM/Ob8/lIV3fO3HP2/nbEiOe788wzCQAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAMEAvJvnzJE8m2d/uOzfJI0meS/Jwkh0z778lyfNJDiS5dmb/lUmebn92R6crBgAA\nWEFfSxPWZt2e5Nfa7U8m+XS7vSfJU0m2Jdmd5IUkVfuz/UmuarcfTLK3m+UCAACMy9YTeG911Ovr\nk1zT [...]
+ "text/plain": [
+ "<matplotlib.figure.Figure at 0x7feb3eb9f250>"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "aln.sort(key = lambda x:x[3])\n",
+ "num = len(aln)\n",
+ "#size_chunk = num/10\n",
+ "#for i in range(10):\n",
+ "# aln[i*size_chunk:min((i+1)*size_chunk, num)] = sorted(aln[i*size_chunk:min((i+1)*size_chunk, num)],key = lambda x: x[4]-x[3] ,reverse=True)\n",
+ "\n",
+ "plt.figure(figsize = (15,10))\n",
+ "plt.axes()\n",
+ "l = aln[0][5]\n",
+ "plt.xlim(-2000,l+2000)\n",
+ "plt.ylim(-5,num*10)\n",
+ "points = [[0,0], [l,0], [l+40,2.5], [l,5], [0,5]]\n",
+ "#rectangle = plt.Rectangle((0, 0), l, 5, fc='r',ec = 'none')\n",
+ "polygon = plt.Polygon(points,fc = 'r', ec = 'none')\n",
+ "plt.gca().add_patch(polygon)\n",
+ "\n",
+ "dotted_line = plt.Line2D((0, 0), (0, num*10 ),ls='-.') \n",
+ "plt.gca().add_line(dotted_line)\n",
+ "\n",
+ "dotted_line2 = plt.Line2D((l, l), (0, num*10 ),ls='-.') \n",
+ "plt.gca().add_line(dotted_line2)\n",
+ "\n",
+ "for i,item in enumerate(aln):\n",
+ " abpos = item[3]\n",
+ " aepos = item[4]\n",
+ " bbpos = item[6]\n",
+ " bepos = item[7]\n",
+ " blen = item[8]\n",
+ " strand = item[0]\n",
+ " points_begin = []\n",
+ " points_end = []\n",
+ " tip = l/200\n",
+ " ed = l/50\n",
+ " \n",
+ " if strand == 'n':\n",
+ " points = [[abpos, (i+1)*10], [aepos, (i+1)*10], [aepos + tip, (i+1)*10 + 2.5], [aepos, (i+1)*10+5], [abpos, (i+1)*10+5]]\n",
+ " if (bepos < blen):\n",
+ " points_end = [[aepos, (i+1)*10], [aepos + tip, (i+1)*10 + 2.5], [aepos, (i+1)*10+5], [aepos+ed, (i+1)*10+5], [aepos + ed+ tip, (i+1)*10 + 2.5], [aepos+ed, (i+1)*10]]\n",
+ " if (bbpos > 0):\n",
+ " points_start = [[abpos, (i+1)*10], [abpos, (i+1)*10+5], [abpos-ed, (i+1)*10+5], [abpos-ed, (i+1)*10]]\n",
+ " else:\n",
+ " points = [[abpos, (i+1)*10], [aepos, (i+1)*10], [aepos, (i+1)*10+5], [abpos, (i+1)*10+5], [abpos - tip, (i+1)*10 + 2.5]]\n",
+ " if (bepos < blen):\n",
+ " points_end = [[aepos, (i+1)*10], [aepos, (i+1)*10+5], [aepos+ed, (i+1)*10+5], [aepos+ed, (i+1)*10]]\n",
+ " if (bbpos > 0):\n",
+ " points_start = [[abpos, (i+1)*10],[abpos-tip, (i+1)*10+2.5], [abpos, (i+1)*10+5], [abpos-ed, (i+1)*10+5],[abpos-ed-tip, (i+1)*10+2.5], [abpos-ed, (i+1)*10]]\n",
+ " \n",
+ " polygon = plt.Polygon(points,fc = 'b', ec = 'none')\n",
+ " plt.gca().add_patch(polygon)\n",
+ " \n",
+ " if points_end != []:\n",
+ " polygon2 = plt.Polygon(points_end,fc = 'g', ec = 'none')\n",
+ " plt.gca().add_patch(polygon2)\n",
+ " \n",
+ "\n",
+ " if points_start != []:\n",
+ " polygon2 = plt.Polygon(points_start,fc = 'g', ec = 'none')\n",
+ " plt.gca().add_patch(polygon2)\n",
+ " \n",
+ " plt.savefig('test.pdf')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "233"
+ ]
+ },
+ "execution_count": 12,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "len(aln)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 2",
+ "language": "python",
+ "name": "python2"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 2
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython2",
+ "version": "2.7.6"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/scripts/pipeline_consensus.py b/scripts/pipeline_consensus.py
new file mode 100644
index 0000000..c1a5e7d
--- /dev/null
+++ b/scripts/pipeline_consensus.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+
+import sys
+import os
+import subprocess
+
+
+if len(sys.argv) >= 2:
+ bact_id = sys.argv[1]
+
+
+ini_path = 'nominal.ini'
+if len(sys.argv) >= 3:
+ ini_path = sys.argv[2]
+
+
+run_identifier = 'A'
+if len(sys.argv) >= 4:
+ run_identifier = sys.argv[3]
+
+graphml_file = bact_id+run_identifier+'.G2.graphml'
+
+
+# This is used to start the pipeline in the middle
+st_point = 0
+if len(sys.argv) >= 5:
+ st_point = int(sys.argv[4])
+
+
+# This is used to stop the pipeline in the middle
+end_point = 20
+if len(sys.argv) >= 6:
+ end_point = int(sys.argv[5])
+
+
+
+base_path = './'
+
+if st_point <= 1 and end_point >= 1:
+ draft_path_cmd = 'get_draft_path.py '+base_path+' '+ bact_id+' '+graphml_file
+ print '1: '+draft_path_cmd
+ subprocess.check_output(draft_path_cmd,cwd=base_path, shell=True)
+
+
+if st_point <= 2 and end_point >= 2:
+ draft_assembly_cmd = 'draft_assembly --db '+bact_id+' --las '+bact_id+'.las --prefix '+bact_id+' --config '+ini_path+' --out '+bact_id+'.draft'
+ print '2: '+draft_assembly_cmd
+ subprocess.check_output(draft_assembly_cmd,cwd=base_path, shell=True)
+
+
+if st_point <= 3 and end_point >= 3:
+ corr_head_cmd = 'correct_head.py '+bact_id+'.draft.fasta '+bact_id+'.draft.pb.fasta draft_map.txt'
+ print '3: '+corr_head_cmd
+ subprocess.check_output(corr_head_cmd,cwd=base_path, shell=True)
+
+
+if st_point <= 4 and end_point >= 4:
+ subprocess.call("rm -f draft.db",shell=True,cwd=base_path)
+ fasta2DB_cmd = "fasta2DB draft "+base_path+bact_id+'.draft.pb.fasta'
+ print '4: '+fasta2DB_cmd
+ subprocess.check_output(fasta2DB_cmd.split(),cwd=base_path)
+
+if st_point <= 5 and end_point >= 5:
+ subprocess.call("rm -f draft.*.las",shell=True,cwd=base_path)
+ mapper_cmd = "HPCmapper draft "+bact_id
+ print '5: '+mapper_cmd
+ subprocess.call(mapper_cmd.split(),stdout=open(base_path+'draft_consensus.sh','w') , cwd=base_path)
+
+
+if st_point <= 6 and end_point >= 6:
+ # modify_cmd = """awk '{gsub("daligner -A -k20 -h50 -e.85","daligner -A",$0); print $0}' draft_consensus.sh"""
+ modify_cmd = ['awk','{gsub("daligner -A -k20 -h50 -e.85","daligner -A",$0); print $0}','draft_consensus.sh']
+ print '6: '+"""awk '{gsub("daligner -A -k20 -h50 -e.85","daligner -A",$0); print $0}' draft_consensus.sh"""
+ subprocess.call(modify_cmd,stdout=open(base_path+'draft_consensus2.sh','w') , cwd=base_path)
+
+
+if st_point <= 7 and end_point >= 7:
+ mapper_shell_cmd = "csh -v draft_consensus.sh"
+ print '7: '+mapper_shell_cmd
+ subprocess.check_output(mapper_shell_cmd.split(), cwd=base_path)
+
+if st_point <= 8 and end_point >= 8:
+ # remove_cmd = 'rm -f nonrevcompdraft.'+bact_id+'.*.las'
+ # subprocess.call(remove_cmd,shell=True,cwd=base_path)
+ LAmerge_cmd = "LAmerge draft."+bact_id+".las "+'draft.'+bact_id+'.[0-9].las'
+ print '8: '+LAmerge_cmd
+ subprocess.check_output(LAmerge_cmd,cwd=base_path,shell=True)
+
+if st_point <= 9 and end_point >= 9:
+ consensus_cmd = 'consensus draft '+bact_id+' draft.'+bact_id+'.las '+bact_id+'.consensus.fasta '+ini_path
+ print '9: '+consensus_cmd
+ subprocess.check_output(consensus_cmd,cwd=base_path,shell=True)
+
+
+if st_point <= 10 and end_point >= 10:
+ gfa_cmd = 'get_consensus_gfa.py '+base_path+ ' '+ bact_id+ ' '+bact_id+'.consensus.fasta'
+ print '10: '+gfa_cmd
+ subprocess.check_output(gfa_cmd,cwd=base_path,shell=True)
+
+
+
+
diff --git a/scripts/pipeline_consensus_norevcomp.py b/scripts/pipeline_consensus_norevcomp.py
new file mode 100644
index 0000000..b3afe3f
--- /dev/null
+++ b/scripts/pipeline_consensus_norevcomp.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+
+
+import sys
+import os
+import subprocess
+
+
+if len(sys.argv) >= 2:
+ bact_id = sys.argv[1]
+
+
+ini_path = 'nominal.ini'
+if len(sys.argv) >= 3:
+ ini_path = sys.argv[2]
+
+
+run_identifier = 'A'
+if len(sys.argv) >= 4:
+ run_identifier = sys.argv[3]
+
+graphml_file = bact_id+run_identifier+'.G2.graphml'
+
+
+# This is used to start the pipeline in the middle
+st_point = 0
+if len(sys.argv) >= 5:
+ st_point = int(sys.argv[4])
+
+
+# This is used to stop the pipeline in the middle
+end_point = 20
+if len(sys.argv) >= 6:
+ end_point = int(sys.argv[5])
+
+
+
+base_path = './'
+
+if st_point <= 1 and end_point >= 1:
+ draft_path_cmd = 'get_draft_path_norevcomp.py '+base_path+' '+ bact_id+' '+graphml_file
+ print '1: '+draft_path_cmd
+ subprocess.check_output(draft_path_cmd,cwd=base_path, shell=True)
+
+
+if st_point <= 2 and end_point >= 2:
+ draft_assembly_cmd = 'draft_assembly --db '+bact_id+' --las '+bact_id+'.las --prefix '+bact_id+' --config '+ini_path+' --out '+bact_id+'.draft'
+ print '2: '+draft_assembly_cmd
+ subprocess.check_output(draft_assembly_cmd,cwd=base_path, shell=True)
+
+
+if st_point <= 3 and end_point >= 3:
+ corr_head_cmd = 'correct_head.py '+bact_id+'.draft.fasta '+bact_id+'.draft.pb.fasta draft_map.txt'
+ print '3: '+corr_head_cmd
+ subprocess.check_output(corr_head_cmd,cwd=base_path, shell=True)
+
+
+if st_point <= 4 and end_point >= 4:
+ subprocess.call("rm -f draft.db",shell=True,cwd=base_path)
+ fasta2DB_cmd = "fasta2DB draft "+base_path+bact_id+'.draft.pb.fasta'
+ print '4: '+fasta2DB_cmd
+ subprocess.check_output(fasta2DB_cmd.split(),cwd=base_path)
+
+if st_point <= 5 and end_point >= 5:
+ subprocess.call("rm -f draft.*.las",shell=True,cwd=base_path)
+ mapper_cmd = "HPCmapper draft "+bact_id
+ print '5: '+mapper_cmd
+ subprocess.call(mapper_cmd.split(),stdout=open(base_path+'draft_consensus.sh','w') , cwd=base_path)
+
+
+
+if st_point <= 6 and end_point >= 6:
+ # modify_cmd = """awk '{gsub("daligner -A -k20 -h50 -e.85","daligner -A",$0); print $0}' draft_consensus.sh"""
+ modify_cmd = ['awk','{gsub("daligner -A -k20 -h50 -e.85","daligner -A",$0); print $0}','draft_consensus.sh']
+ print '6: '+"""awk '{gsub("daligner -A -k20 -h50 -e.85","daligner -A",$0); print $0}' draft_consensus.sh"""
+ subprocess.call(modify_cmd,stdout=open(base_path+'draft_consensus2.sh','w') , cwd=base_path)
+
+
+
+if st_point <= 7 and end_point >= 7:
+ mapper_shell_cmd = "csh -v draft_consensus2.sh"
+ print '7: '+mapper_shell_cmd
+ subprocess.check_output(mapper_shell_cmd.split(), cwd=base_path)
+
+
+if st_point <= 8 and end_point >= 8:
+ # remove_cmd = 'rm -f nonrevcompdraft.'+bact_id+'.*.las'
+ # subprocess.call(remove_cmd,shell=True,cwd=base_path)
+ LAmerge_cmd = "LAmerge draft."+bact_id+".las "+'draft.'+bact_id+'.[0-9].las'
+ print '8: '+LAmerge_cmd
+ subprocess.check_output(LAmerge_cmd,cwd=base_path,shell=True)
+
+
+if st_point <= 9 and end_point >= 9:
+ consensus_cmd = 'consensus draft '+bact_id+' draft.'+bact_id+'.las '+bact_id+'.norevcomp_consensus.fasta '+ini_path
+ print '9: '+consensus_cmd
+ subprocess.check_output(consensus_cmd,cwd=base_path,shell=True)
+
+
+
+
+
diff --git a/scripts/pipeline_nctc.py b/scripts/pipeline_nctc.py
new file mode 100644
index 0000000..0942812
--- /dev/null
+++ b/scripts/pipeline_nctc.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+
+import sys
+import os
+import subprocess
+
+bact_name = "ecoli"
+
+if len(sys.argv) >= 2:
+ bact_id = sys.argv[1]
+
+st_point = 0
+if len(sys.argv) >= 3:
+ st_point = int(sys.argv[2])
+
+base_path='/data/pacbio_assembly/pb_data/NCTC/'+bact_id+"/"
+
+
+if len(sys.argv) >= 4:
+ base_path = sys.argv[3]
+
+
+
+fasta_names = [x for x in os.listdir(base_path) if x.endswith('.fasta')]
+assert len(fasta_names)==1
+
+fasta_name = fasta_names[0]
+bact_name = fasta_name.split('.fasta')[0]
+
+print bact_name
+
+
+if st_point <= 1:
+ subprocess.call("rm -f *.db",shell=True,cwd=base_path)
+ fasta2DB_cmd = "fasta2DB "+bact_name+' '+base_path+fasta_name
+ print fasta2DB_cmd
+ subprocess.check_output(fasta2DB_cmd.split(),cwd=base_path)
+
+if st_point <= 2:
+ DBsplit_cmd = "DBsplit -x500 -s100 "+bact_name
+ print DBsplit_cmd
+ subprocess.check_output(DBsplit_cmd.split(),cwd=base_path)
+
+if st_point <= 3:
+ subprocess.call("rm -f *.las",shell=True,cwd=base_path)
+ daligner_cmd = "HPCdaligner -t5 "+bact_name
+ daligner_shell_cmd = "csh -v daligner_cmd.sh"
+ print daligner_cmd
+ p = subprocess.call(daligner_cmd.split(),stdout=open(base_path+'daligner_cmd.sh','w') , cwd=base_path)
+ p2 = subprocess.check_output(daligner_shell_cmd.split(), cwd=base_path)
+if st_point <= 4:
+ remove_cmd = "rm "+base_path+bact_name+".*."+bact_name+".*"
+ print remove_cmd
+ os.system(remove_cmd)
+
+if st_point <= 5:
+ LAmerge_cmd = "LAmerge "+bact_name+".las "+bact_name+".*.las"
+ print LAmerge_cmd
+ subprocess.check_output(LAmerge_cmd,cwd=base_path,shell=True)
+
+if st_point <= 6:
+ remove_cmd2 = "rm "+base_path+bact_name+".*.las"
+ os.system(remove_cmd2)
+
+if st_point <= 7:
+ os.system("mkdir -p "+base_path+"log")
+
+if st_point <= 8:
+ DASqv_cmd = "DASqv -c100 "+bact_name+" "+bact_name+".las"
+ subprocess.check_output(DASqv_cmd.split(),cwd=base_path)
+
+if st_point <= 9:
+ Reads_filter_cmd = "Reads_filter --db "+bact_name+" --las "+bact_name+".las -x "+bact_name+" --config ~/AwesomeAssembler/utils/nominal.ini"
+ print Reads_filter_cmd
+ subprocess.check_output(Reads_filter_cmd,cwd=base_path, shell=True)
+
+if st_point <= 10:
+ hinging_cmd = "hinging --db "+bact_name+" --las "+bact_name+".las -x "+bact_name+" --config ~/AwesomeAssembler/utils/nominal.ini -o "+bact_name
+ print hinging_cmd
+ subprocess.check_output(hinging_cmd, cwd=base_path, shell=True)
+
+if st_point <= 11:
+ pruning_cmd = "python ~/AwesomeAssembler/scripts/pruning_and_clipping.py "+bact_name+".edges.hinges "+bact_name+".hinge.list A"
+ print pruning_cmd
+ subprocess.check_output(pruning_cmd, cwd=base_path, shell=True)
+
+
diff --git a/scripts/pruning_and_clipping.py b/scripts/pruning_and_clipping.py
new file mode 100755
index 0000000..93bbc2d
--- /dev/null
+++ b/scripts/pruning_and_clipping.py
@@ -0,0 +1,1290 @@
+#!/usr/bin/env python
+
+# coding: utf-8
+
+# In[115]:
+
+import networkx as nx
+import random
+import sys
+import numpy as np
+import ujson
+from colormap import rgb2hex
+import operator
+import matplotlib.colors
+# print G.number_of_edges(),G.number_of_nodes()
+
+
+# In[3]:
+
+def write_graph(G,flname):
+ with open(flname,'w') as f:
+ for edge in G.edges_iter():
+ f.write(str(edge[0])+'\t'+str(edge[1])+'\n')
+
+
+# In[4]:
+
+def write_graph2(G,Ginfo,flname):
+
+ count_no = 0
+ count_yes = 0
+
+ with open(flname,'w') as f:
+ for edge in G.edges_iter():
+
+
+ if (edge[0],edge[1]) not in Ginfo:
+ count_no += 1
+ print "not found"
+ continue
+ else:
+ count_yes += 1
+
+
+# line = Ginfo[(edge[0],edge[1])]
+# line_sp = line.split(' ')
+
+# f.write(str(edge[0])+' '+str(edge[1]))
+# for j in range(2,len(line_sp)):
+# f.write(' '+line_sp[j])
+
+ f.write(Ginfo[(edge[0],edge[1])]+'\n')
+
+ print count_no, count_yes
+
+
+
+
+
+
+
+# In[7]:
+
+def prune_graph(graph,in_hinges,out_hinges,reverse=False):
+
+ H=nx.DiGraph()
+ if reverse:
+ G=nx.reverse(graph,copy=True)
+ else:
+ G=graph
+ start_nodes = [x for x in G.nodes() if G.in_degree(x) ==0]
+
+ in_hinges = list(in_hinges.intersection(set(G.nodes())))
+ out_hinges = list(out_hinges.intersection(set(G.nodes())))
+
+ if reverse:
+ for node in in_hinges:
+ for successor in G.successors(node):
+# H.add_edge(node,successor)
+ H.add_node(successor)
+ for node in out_hinges:
+ H.add_node(node)
+ else:
+ for node in out_hinges:
+ for successor in G.successors(node):
+# H.add_edge(node,successor)
+ H.add_node(successor)
+ for node in in_hinges:
+ H.add_node(node)
+ map(H.add_node,start_nodes)
+ all_vertices=set(G.nodes())
+ current_vertices=set(H.nodes())
+ undiscovered_vertices=all_vertices-current_vertices
+ last_discovered_vertices=current_vertices
+ while undiscovered_vertices:
+ discovered_vertices_set=set([x for node in last_discovered_vertices
+ for x in G.successors(node)
+ if x not in current_vertices])
+ for vertex in discovered_vertices_set:
+ for v_predecessor in G.predecessors(vertex):
+ if v_predecessor in current_vertices:
+ H.add_edge(v_predecessor,vertex)
+ break
+ current_vertices=current_vertices.union(discovered_vertices_set)
+# print len(undiscovered_vertices)
+ if len(discovered_vertices_set)==0:
+ print last_discovered_vertices
+ print 'did not reach all nodes'
+ print 'size of G: '+str(len(G.nodes()))
+ print 'size of H: '+str(len(H.nodes()))
+# return H
+
+ rand_node = list(undiscovered_vertices)[0]
+
+ discovered_vertices_set.add(rand_node)
+
+
+ last_discovered_vertices=discovered_vertices_set
+ undiscovered_vertices=all_vertices-current_vertices
+# if reverse:
+# for vertex in out_hinges:
+# for v_predecessor in G.predecessors(vertex):
+# H.add_edge(v_predecessor,vertex)
+# else:
+# for vertex in in_hinges:
+# for v_predecessor in G.predecessors(vertex):
+# H.add_edge(v_predecessor,vertex)
+ if reverse:
+ for node in in_hinges:
+ for successor in G.successors(node):
+ H.add_edge(node,successor)
+ for node in out_hinges:
+ for predecessor in G.predecessors(node):
+ H.add_edge(predecessor,node)
+ else:
+ for node in out_hinges:
+ for successor in G.successors(node):
+ H.add_edge(node,successor)
+ for node in in_hinges:
+ for predecessor in G.predecessors(node):
+ H.add_edge(predecessor,node)
+ if reverse:
+ return nx.reverse(H)
+ return H
+
+
+# In[8]:
+
+def dead_end_clipping(G,threshold):
+# H=nx.DiGraph()
+ H = G.copy()
+ start_nodes = set([x for x in H.nodes() if H.in_degree(x) ==0])
+
+ for st_node in start_nodes:
+ cur_path = [st_node]
+
+ if len(H.successors(st_node)) == 1:
+ cur_node = H.successors(st_node)[0]
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1 and len(cur_path) < threshold + 2:
+ cur_path.append(cur_node)
+ cur_node = H.successors(cur_node)[0]
+
+
+ if len(cur_path) <= threshold:
+ for vertex in cur_path:
+ H.remove_node(vertex)
+
+ end_nodes = set([x for x in H.nodes() if H.out_degree(x) ==0])
+
+ for end_node in end_nodes:
+ cur_path = [end_node]
+ if len(H.predecessors(end_node)) == 1:
+ cur_node = H.predecessors(end_node)[0]
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1 and len(cur_path) < threshold + 2:
+ cur_path.append(cur_node)
+ cur_node = H.predecessors(cur_node)[0]
+
+ if len(cur_path) <= threshold:
+ for vertex in cur_path:
+ H.remove_node(vertex)
+
+ return H
+
+
+
+def rev_node(node):
+ node_id = node.split('_')[0]
+
+ return node_id + '_' + str(1-int(node.split('_')[1]))
+
+
+def dead_end_clipping_sym(G,threshold,print_debug = False):
+# H=nx.DiGraph()
+ H = G.copy()
+ start_nodes = set([x for x in H.nodes() if H.in_degree(x) ==0])
+
+ for st_node in start_nodes:
+
+ if st_node not in H.nodes():
+ continue
+
+ cur_path = [st_node]
+
+ cur_node = st_node
+ if print_debug:
+ print '----0'
+ print st_node
+
+ if len(H.successors(st_node)) == 1:
+ cur_node = H.successors(st_node)[0]
+
+ if print_debug:
+ print '----1'
+
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1 and len(cur_path) < threshold + 2:
+ cur_path.append(cur_node)
+
+ if print_debug:
+ print cur_node
+
+ cur_node = H.successors(cur_node)[0]
+
+ if len(cur_path) > threshold + 1:
+ break
+
+
+ if print_debug:
+ print '----2'
+ print cur_path
+
+
+ if len(cur_path) <= threshold and (H.in_degree(cur_node) > 1 or H.out_degree(cur_node) == 0):
+ for vertex in cur_path:
+ # try:
+ if print_debug:
+ print 'about to delete ',vertex,rev_node(vertex)
+ H.remove_node(vertex)
+ H.remove_node(rev_node(vertex))
+ # except:
+ # pass
+ if print_debug:
+ print 'deleted ',vertex,rev_node(vertex)
+
+
+ return H
+
+
+
+# In[9]:
+
+
+# This function is no longer used. See z_clipping_sym
+def z_clipping(G,threshold,in_hinges,out_hinges,print_z = False):
+ H = G.copy()
+
+ start_nodes = set([x for x in H.nodes() if H.out_degree(x) > 1 and x not in out_hinges])
+
+ for st_node in start_nodes:
+ for sec_node in H.successors(st_node):
+
+ if H.out_degree(st_node) == 1:
+ break
+
+ cur_node = sec_node
+ cur_path = [[st_node,cur_node]]
+
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1:
+ cur_path.append([cur_node,H.successors(cur_node)[0]])
+ cur_node = H.successors(cur_node)[0]
+
+ if len(cur_path) > threshold + 1:
+ break
+
+ if len(cur_path) <= threshold and H.in_degree(cur_node) > 1 and H.out_degree(st_node) > 1 and cur_node not in in_hinges:
+ if print_z:
+ print cur_path
+
+ for edge in cur_path:
+ H.remove_edge(edge[0],edge[1])
+ for j in range(len(cur_path)-1):
+ H.remove_node(cur_path[j][1])
+
+ end_nodes = set([x for x in H.nodes() if H.in_degree(x) > 1 and x not in in_hinges])
+
+ for end_node in end_nodes:
+ for sec_node in H.predecessors(end_node):
+
+ if H.in_degree(end_node) == 1:
+ break
+
+
+ cur_node = sec_node
+ cur_path = [[cur_node,end_node]]
+
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1:
+ cur_path.append([H.predecessors(cur_node)[0],cur_node])
+ cur_node = H.predecessors(cur_node)[0]
+
+ if len(cur_path) > threshold + 1:
+ break
+
+ if len(cur_path) <= threshold and H.out_degree(cur_node) > 1 and H.in_degree(end_node) > 1 and cur_node not in out_hinges:
+ if print_z:
+ print cur_path
+ for edge in cur_path:
+ H.remove_edge(edge[0],edge[1])
+ for j in range(len(cur_path)-1):
+ H.remove_node(cur_path[j][0])
+
+ return H
+
+
+
+def z_clipping_sym(G,threshold,in_hinges,out_hinges,print_z = False):
+
+ H = G.copy()
+ G0 = G.copy()
+
+ start_nodes = set([x for x in H.nodes() if H.out_degree(x) > 1 and x not in out_hinges])
+
+ for st_node in start_nodes:
+
+ try: # need this because we are deleting nodes inside loop
+ H.successors(st_node)
+ except:
+ continue
+
+ for sec_node in H.successors(st_node):
+
+ if H.out_degree(st_node) == 1:
+ break
+
+ cur_node = sec_node
+ cur_path = [[st_node,cur_node]]
+
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1:
+
+ cur_path.append([cur_node,H.successors(cur_node)[0]])
+ cur_node = H.successors(cur_node)[0]
+
+ if len(cur_path) > threshold + 1:
+ break
+
+ if len(cur_path) <= threshold and H.in_degree(cur_node) > 1 and H.out_degree(st_node) > 1 and cur_node not in in_hinges:
+ if print_z:
+ print cur_path
+
+ for edge in cur_path:
+
+ G0.edge[edge[0]][edge[1]]['z'] = 1
+ G0.edge[rev_node(edge[1])][rev_node(edge[0])]['z'] = 1
+
+ try:
+ H.remove_edge(edge[0],edge[1])
+ H.remove_edge(rev_node(edge[1]),rev_node(edge[0]))
+
+ except:
+ pass
+
+ for j in range(len(cur_path)-1):
+
+ G0.node[cur_path[j][1]]['z'] = 1
+ G0.node[rev_node(cur_path[j][1])]['z'] = 1
+
+ try:
+ H.remove_node(cur_path[j][1])
+ H.remove_node(rev_node(cur_path[j][1]))
+
+ except:
+ pass
+
+
+ return H, G0
+
+
+
+
+
+
+# In[48]:
+
+def merge_path(g,in_node,node,out_node):
+
+ # g.add_edge(in_node,out_node,hinge_edge = -1,false_positive = 0)
+
+ if g.edge[in_node][node]['intersection'] == 1 and g.edge[node][out_node]['intersection'] == 1:
+ g.add_edge(in_node,out_node,hinge_edge = -1,intersection = 1,z=0)
+ else:
+ g.add_edge(in_node,out_node,hinge_edge = -1,intersection = 0,z=0)
+
+ g.remove_node(node)
+
+
+
+# In[121]:
+
+def random_condensation(G,n_nodes,check_gt = False):
+
+ g = G.copy()
+
+ max_iter = 20000
+ iter_cnt = 0
+
+ while len(g.nodes()) > n_nodes and iter_cnt < max_iter:
+
+ iter_cnt += 1
+
+ node = g.nodes()[random.randrange(len(g.nodes()))]
+
+ if g.in_degree(node) == 1 and g.out_degree(node) == 1:
+
+ in_node = g.in_edges(node)[0][0]
+ out_node = g.out_edges(node)[0][1]
+ if g.out_degree(in_node) == 1 and g.in_degree(out_node) == 1:
+ if in_node != node and out_node != node and in_node != out_node:
+ #print in_node, node, out_node
+# merge_path(g,in_node,node,out_node)
+
+ bad_node=False
+ if check_gt:
+ for in_edge in g.in_edges(node):
+ if g.edge[in_edge[0]][in_edge[1]]['false_positive']==1:
+ bad_node=True
+ for out_edge in g.out_edges(node):
+ if g.edge[out_edge[0]][out_edge[1]]['false_positive']==1:
+ bad_node=True
+ if not bad_node:
+ #print in_node, node, out_node
+ merge_path(g,in_node,node,out_node)
+
+ if iter_cnt >= max_iter:
+ print "couldn't finish sparsification"+str(len(g.nodes()))
+
+ return g
+
+
+
+def random_condensation_sym(G,n_nodes,check_gt = False):
+
+ g = G.copy()
+
+ max_iter = 20000
+ iter_cnt = 0
+
+ while len(g.nodes()) > n_nodes and iter_cnt < max_iter:
+
+ iter_cnt += 1
+
+ node = g.nodes()[random.randrange(len(g.nodes()))]
+
+ if g.in_degree(node) == 1 and g.out_degree(node) == 1:
+
+ in_node = g.in_edges(node)[0][0]
+ out_node = g.out_edges(node)[0][1]
+ if g.out_degree(in_node) == 1 and g.in_degree(out_node) == 1:
+ if in_node != node and out_node != node and in_node != out_node:
+ #print in_node, node, out_node
+# merge_path(g,in_node,node,out_node)
+
+ bad_node=False
+ if check_gt:
+ for in_edge in g.in_edges(node):
+ if g.edge[in_edge[0]][in_edge[1]]['false_positive']==1:
+ bad_node=True
+ for out_edge in g.out_edges(node):
+ if g.edge[out_edge[0]][out_edge[1]]['false_positive']==1:
+ bad_node=True
+ if not bad_node:
+ #print in_node, node, out_node
+
+ try:
+ merge_path(g,in_node,node,out_node)
+ merge_path(g,rev_node(out_node),rev_node(node),rev_node(in_node))
+ except:
+ pass
+
+ if iter_cnt >= max_iter:
+ print "couldn't finish sparsification"+str(len(g.nodes()))
+
+ return g
+
+
+# In[118]:
+
+def random_condensation2(g,n_nodes):
+
+ g = G.copy()
+
+
+ max_iter = 20000
+ iter_cnt = 0
+
+ while len(g.nodes()) > n_nodes and iter_cnt < max_iter:
+
+ iter_cnt += 1
+
+ node = g.nodes()[random.randrange(len(g.nodes()))]
+
+ if g.in_degree(node) == 1 and g.out_degree(node) == 1:
+
+ base_node=node.split("_")[0]
+ orintation = node.split("_")[1]
+ # if orintation=='1':
+ # node2=base_node+'_0'
+ # else:
+ # node2=base_node+'_1'
+
+ # print node,node2
+
+ in_node = g.in_edges(node)[0][0]
+ out_node = g.out_edges(node)[0][1]
+
+ if g.node[node]['hinge']==0 and g.node[in_node]['hinge']==0 and g.node[out_node]['hinge']==0:
+ if g.out_degree(in_node) == 1 and g.in_degree(out_node) == 1:
+ if in_node != node and out_node != node and in_node != out_node:
+ bad_node=False
+ # print g.in_edges(node)
+ # print g.edge[g.in_edges(node)[0][0]][g.in_edges(node)[0][1]]
+ # print g.out_edges(node)
+
+
+ for in_edge in g.in_edges(node):
+ if g.edge[in_edge[0]][in_edge[1]]['false_positive']==1:
+ bad_node=True
+ for out_edge in g.out_edges(node):
+ if g.edge[out_edge[0]][out_edge[1]]['false_positive']==1:
+ bad_node=True
+ if not bad_node:
+ #print in_node, node, out_node
+ merge_path(g,in_node,node,out_node)
+
+
+
+ if iter_cnt >= max_iter:
+ print "couldn't finish sparsification: "+str(len(g.nodes()))
+
+
+ return g
+
+
+
+
+def bubble_bursting_sym(H,threshold,print_bubble = False):
+
+ start_nodes = set([x for x in H.nodes() if H.out_degree(x) == 2])
+
+ for st_node in start_nodes:
+
+ try: # need this because we are deleting nodes inside loop
+ H.successors(st_node)[1]
+ except:
+ continue
+
+ sec_node = H.successors(st_node)[0]
+
+ cur_node = sec_node
+ cur_path = [[st_node,cur_node]]
+
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1:
+
+ cur_path.append([cur_node,H.successors(cur_node)[0]])
+ cur_node = H.successors(cur_node)[0]
+
+ if len(cur_path) > threshold + 1:
+ break
+
+ end_node0 = cur_node
+ cur_node = H.successors(st_node)[1]
+ alt_path = [[st_node,cur_node]]
+
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1:
+
+ alt_path.append([cur_node,H.successors(cur_node)[0]])
+ cur_node = H.successors(cur_node)[0]
+
+ if len(alt_path) > threshold + 1:
+ break
+
+
+ if len(cur_path) <= threshold and len(alt_path) <= threshold and end_node0 == cur_node:
+
+ if print_bubble:
+ print 'found bubble'
+
+ for edge in cur_path:
+
+ # try:
+ H.remove_edge(edge[0],edge[1])
+ H.remove_edge(rev_node(edge[1]),rev_node(edge[0]))
+
+ # except:
+ # pass
+
+ for j in range(len(cur_path)-1):
+
+ # try:
+ H.remove_node(cur_path[j][1])
+ H.remove_node(rev_node(cur_path[j][1]))
+
+ # except:
+ # pass
+
+
+ return H
+
+
+def resolve_rep(g,rep_path,in_node,out_node):
+
+ prefix = 'B'
+
+ g.add_edge(in_node,prefix + rep_path[0],
+ read_a_start=g.edge[in_node][rep_path[0]]['read_a_start'],
+ read_a_end=g.edge[in_node][rep_path[0]]['read_a_end'],
+ read_b_start=g.edge[in_node][rep_path[0]]['read_b_start'],
+ read_b_end=g.edge[in_node][rep_path[0]]['read_b_end'],
+ read_a_start_raw=g.edge[in_node][rep_path[0]]['read_a_start_raw'],
+ read_a_end_raw=g.edge[in_node][rep_path[0]]['read_a_end_raw'],
+ read_b_start_raw=g.edge[in_node][rep_path[0]]['read_b_start_raw'],
+ read_b_end_raw=g.edge[in_node][rep_path[0]]['read_b_end_raw'])
+ g.remove_edge(in_node,rep_path[0])
+
+ g.add_edge(prefix+rep_path[-1],out_node,
+ read_a_start=g.edge[rep_path[-1]][out_node]['read_a_start'],
+ read_a_end=g.edge[rep_path[-1]][out_node]['read_a_end'],
+ read_b_start=g.edge[rep_path[-1]][out_node]['read_b_start'],
+ read_b_end=g.edge[rep_path[-1]][out_node]['read_b_end'],
+ read_a_start_raw=g.edge[rep_path[-1]][out_node]['read_a_start_raw'],
+ read_a_end_raw=g.edge[rep_path[-1]][out_node]['read_a_end_raw'],
+ read_b_start_raw=g.edge[rep_path[-1]][out_node]['read_b_start_raw'],
+ read_b_end_raw=g.edge[rep_path[-1]][out_node]['read_b_end_raw'])
+ g.remove_edge(rep_path[-1],out_node)
+
+
+ g.add_edge(rev_node(prefix + rep_path[0]),rev_node(in_node),
+ read_a_start=g.edge[rev_node(rep_path[0])][rev_node(in_node)]['read_a_start'],
+ read_a_end=g.edge[rev_node(rep_path[0])][rev_node(in_node)]['read_a_end'],
+ read_b_start=g.edge[rev_node(rep_path[0])][rev_node(in_node)]['read_b_start'],
+ read_b_end=g.edge[rev_node(rep_path[0])][rev_node(in_node)]['read_b_end'],
+ read_a_start_raw=g.edge[rev_node(rep_path[0])][rev_node(in_node)]['read_a_start_raw'],
+ read_a_end_raw=g.edge[rev_node(rep_path[0])][rev_node(in_node)]['read_a_end_raw'],
+ read_b_start_raw=g.edge[rev_node(rep_path[0])][rev_node(in_node)]['read_b_start_raw'],
+ read_b_end_raw=g.edge[rev_node(rep_path[0])][rev_node(in_node)]['read_b_end_raw'])
+ g.remove_edge(rev_node(rep_path[0]),rev_node(in_node))
+ g.add_edge(rev_node(out_node),rev_node(prefix+rep_path[-1]),
+ read_a_start=g.edge[rev_node(out_node)][rev_node(rep_path[-1])]['read_a_start'],
+ read_a_end=g.edge[rev_node(out_node)][rev_node(rep_path[-1])]['read_a_end'],
+ read_b_start=g.edge[rev_node(out_node)][rev_node(rep_path[-1])]['read_b_start'],
+ read_b_end=g.edge[rev_node(out_node)][rev_node(rep_path[-1])]['read_b_end'],
+ read_a_start_raw=g.edge[rev_node(out_node)][rev_node(rep_path[-1])]['read_a_start_raw'],
+ read_a_end_raw=g.edge[rev_node(out_node)][rev_node(rep_path[-1])]['read_a_end_raw'],
+ read_b_start_raw=g.edge[rev_node(out_node)][rev_node(rep_path[-1])]['read_b_start_raw'],
+ read_b_end_raw=g.edge[rev_node(out_node)][rev_node(rep_path[-1])]['read_b_end_raw'])
+ g.remove_edge(rev_node(out_node),rev_node(rep_path[-1]))
+
+
+
+
+ for i in range(0,len(rep_path)-1):
+ g.add_edge(prefix+rep_path[i],prefix+rep_path[i+1],
+ read_a_start=g.edge[rep_path[i]][rep_path[i+1]]['read_a_start'],
+ read_a_end=g.edge[rep_path[i]][rep_path[i+1]]['read_a_end'],
+ read_b_start=g.edge[rep_path[i]][rep_path[i+1]]['read_b_start'],
+ read_b_end=g.edge[rep_path[i]][rep_path[i+1]]['read_b_end'],
+ read_a_start_raw=g.edge[rep_path[i]][rep_path[i+1]]['read_a_start_raw'],
+ read_a_end_raw=g.edge[rep_path[i]][rep_path[i+1]]['read_a_end_raw'],
+ read_b_start_raw=g.edge[rep_path[i]][rep_path[i+1]]['read_b_start_raw'],
+ read_b_end_raw=g.edge[rep_path[i]][rep_path[i+1]]['read_b_end_raw'])
+ g.add_edge(rev_node(prefix+rep_path[i+1]),rev_node(prefix+rep_path[i]),
+ read_a_start=g.edge[rev_node(rep_path[i+1])][rev_node(rep_path[i])]['read_a_start'],
+ read_a_end=g.edge[rev_node(rep_path[i+1])][rev_node(rep_path[i])]['read_a_end'],
+ read_b_start=g.edge[rev_node(rep_path[i+1])][rev_node(rep_path[i])]['read_b_start'],
+ read_b_end=g.edge[rev_node(rep_path[i+1])][rev_node(rep_path[i])]['read_b_end'],
+ read_a_start_raw=g.edge[rev_node(rep_path[i+1])][rev_node(rep_path[i])]['read_a_start_raw'],
+ read_a_end_raw=g.edge[rev_node(rep_path[i+1])][rev_node(rep_path[i])]['read_a_end_raw'],
+ read_b_start_raw=g.edge[rev_node(rep_path[i+1])][rev_node(rep_path[i])]['read_b_start_raw'],
+ read_b_end_raw=g.edge[rev_node(rep_path[i+1])][rev_node(rep_path[i])]['read_b_end_raw'])
+
+
+
+
+def loop_resolution(g,max_nodes,flank,print_debug = False):
+
+ starting_nodes = [x for x in g.nodes() if g.out_degree(x) == 2]
+
+ if print_debug:
+ print '----'
+ print starting_nodes
+
+ tandem = []
+
+ for st_node in starting_nodes:
+
+
+ if g.out_degree(st_node) != 2:
+ continue
+
+ if print_debug:
+ print '----'
+ print st_node
+
+
+ for first_node in g.successors(st_node):
+
+
+ if g.out_degree(st_node) != 2:
+ continue
+
+ if print_debug:
+ print '----'
+ print first_node
+
+ other_successor = [x for x in g.successors(st_node) if x != first_node][0]
+
+ next_node = first_node
+ if print_debug:
+ print 'going on loop'
+
+ node_cnt = 0
+ while g.in_degree(next_node) == 1 and g.out_degree(next_node) == 1 and node_cnt < max_nodes:
+ node_cnt += 1
+ in_node = next_node
+ next_node = g.successors(next_node)[0]
+
+ first_node_of_repeat = next_node
+
+ if g.in_degree(next_node) == 2:
+ prev_node = [x for x in g.predecessors(next_node) if x != in_node][0]
+ node_cnt = 0
+ while g.in_degree(prev_node) == 1 and g.out_degree(prev_node) == 1:
+ node_cnt += 1
+ prev_node = g.predecessors(prev_node)[0]
+ if node_cnt >= flank:
+ break
+ if node_cnt < flank: # and prev_node != st_node:
+ continue
+
+
+ next_node = other_successor
+ node_cnt = 0
+ while g.in_degree(next_node) == 1 and g.out_degree(next_node) == 1:
+ node_cnt += 1
+ next_node = g.successors(next_node)[0]
+ if node_cnt >= flank:
+ break
+
+ if node_cnt < flank: # and next_node != first_node_of_repeat:
+ continue
+
+ rep = [first_node_of_repeat]
+ next_node = first_node_of_repeat
+
+ node_cnt = 0
+
+ if g.in_degree(next_node) == 2 and g.out_degree(next_node) == 1:
+ next_double_node = g.successors(next_node)[0]
+ rep.append(next_double_node)
+ else:
+ next_double_node = next_node
+
+ while g.in_degree(next_double_node) == 1 and g.out_degree(next_double_node) == 1 and node_cnt < max_nodes:
+ node_cnt += 1
+ next_double_node = g.successors(next_double_node)[0]
+ rep.append(next_double_node)
+
+
+ if next_double_node == st_node:
+ if print_debug:
+ print 'success!'
+ print 'rep is:'
+ print rep
+ print 'in_node and other_successor:'
+ print in_node, other_successor
+ resolve_rep(g,rep,in_node,other_successor)
+ # print next_double_node
+
+ if node_cnt < 5:
+
+ tandem.append(rep)
+
+
+
+ continue
+
+ if len(tandem) > 0:
+ with open('tandem.txt', 'w') as tandemout:
+ for rep in tandem:
+ tandemout.write(str(rep))
+
+
+ return g
+
+
+
+
+# In[72]:
+
+
+def add_groundtruth(g,json_file,in_hinges,out_hinges):
+
+ mapping = ujson.load(json_file)
+
+ print 'getting mapping'
+ mapped_nodes=0
+ print str(len(mapping))
+ print str(len(g.nodes()))
+
+ slack = 500
+ max_chr = 0
+
+ chr_length_dict = {}
+
+ for node in g.nodes():
+ # print node
+ node_base=node.split("_")[0]
+ # print node_base
+
+ #print node
+ g.node[node]['normpos'] = 0
+ if mapping.has_key(node_base):
+ g.node[node]['chr'] = mapping[node_base][0][2]+1
+ g.node[node]['aln_start'] = min (mapping[node_base][0][0],mapping[node_base][0][1])
+ g.node[node]['aln_end'] = max(mapping[node_base][0][1],mapping[node_base][0][0])
+
+
+ # max_chr = max(g.node[node]['chr'],max_chr)
+ # mapped_nodes+=1
+ else:
+ # pass
+ g.node[node]['chr'] = 0
+ g.node[node]['aln_start'] = 1
+ g.node[node]['aln_end'] = 1
+# g.node[node]['aln_strand'] = 0
+
+ if node in in_hinges or node in out_hinges:
+ g.node[node]['hinge'] = 1
+ else:
+ g.node[node]['hinge'] = 0
+
+ if g.node[node]['chr'] in chr_length_dict:
+ chr_length_dict[g.node[node]['chr']] = max(g.node[node]['aln_end'], chr_length_dict[g.node[node]['chr']])
+ else:
+ chr_length_dict[g.node[node]['chr']] = max(g.node[node]['aln_end'], 1)
+
+ chr_list = sorted(chr_length_dict.items(), key=operator.itemgetter(1), reverse=True)
+
+ max_chr_len1 = max([g.node[x]['aln_end'] for x in g.nodes()])
+ max_chr_multiplier = 10**len(str(max_chr_len1))
+ print [x for x in chr_list]
+ chr_set =[x [0] for x in chr_list]
+ print chr_set
+ # red_bk = 102
+ # green_bk = 102
+ # blue_bk = 102
+ colour_list = ['red', 'lawngreen', 'deepskyblue', 'deeppink', 'darkorange', 'purple', 'gold', 'mediumblue', 'saddlebrown', 'darkgreen']
+ for colour in colour_list:
+ print matplotlib.colors.colorConverter.to_rgb(colour)
+ for index, chrom in enumerate(chr_set):
+ node_set = set([x for x in g.nodes() if g.node[x]['chr'] == chrom])
+ print chrom
+
+
+ max_chr_len = max([g.node[x]['aln_end'] for x in g.nodes() if g.node[x]['chr'] == chrom])
+ # max_chr_multiplier = 10**len(str(max_chr_len))
+
+
+ if index < 10:
+ rgb_tuple = matplotlib.colors.colorConverter.to_rgb(colour_list[index])
+ red = int(255*rgb_tuple[0])
+ green = int(255*rgb_tuple[1])
+ blue = int(255*rgb_tuple[2])
+ else:
+ red = random.randint(0,255)
+ # green = random.randint(0,255)
+ blue = random.randint(0,255)
+ brightness = 200
+ green = max(0,min( 255,brightness - int((0.2126 *red + 0.0722 *blue)/0.7152 )))
+
+ red_bk = max(red-100,0)
+ blue_bk = max(blue-100,0)
+ green_bk = max(green-100,0)
+
+ print red,blue,green
+ for node in node_set:
+ g.node[node]['normpos'] = g.node[node]['chr'] * max_chr_multiplier + (g.node[node]['aln_end']/float(max_chr_len))*max_chr_multiplier
+ lamda = (g.node[node]['aln_end']/max_chr_len)
+ nd_red = (1-lamda)*red + lamda*red_bk
+ nd_green = (1-lamda)*green + lamda*green_bk
+ nd_blue = (1-lamda)*blue + lamda*blue_bk
+ g.node[node]['color'] = rgb2hex(nd_red, nd_green, nd_blue)
+ g.node[node]['color_r'] = nd_red
+ g.node[node]['color_g'] = nd_green
+ g.node[node]['color_b'] = nd_blue
+
+ # max_chr_len = len(str(max_chr))
+
+ # div_num = float(10**(max_chr_len))
+
+ # for node in g.nodes():
+ # g.node[node]['normpos'] = (g.node[node]['chr'] + g.node[node]['aln_end']/float(chr_length_dict[g.node[node]['chr']]))/div_num
+
+ for edge in g.edges_iter():
+ in_node=edge[0]
+ out_node=edge[1]
+
+# if ((g.node[in_node]['aln_start'] < g.node[out_node]['aln_start'] and
+# g.node[out_node]['aln_start'] < g.node[in_node]['aln_end']) or
+# (g.node[in_node]['aln_start'] < g.node[out_node]['aln_end'] and
+# g.node[out_node]['aln_end'] < g.node[in_node]['aln_end'])):
+# g.edge[in_node][out_node]['false_positive']=0
+# else:
+# g.edge[in_node][out_node]['false_positive']=1
+
+
+ if ((g.node[in_node]['aln_start'] < g.node[out_node]['aln_start'] and
+ g.node[out_node]['aln_start'] < g.node[in_node]['aln_end']) or
+ (g.node[in_node]['aln_start'] < g.node[out_node]['aln_end'] and
+ g.node[out_node]['aln_end'] < g.node[in_node]['aln_end'])):
+ g.edge[in_node][out_node]['false_positive']=0
+ else:
+ g.edge[in_node][out_node]['false_positive']=1
+
+ return g
+
+
+def mark_skipped_edges(G,skipped_name):
+
+ with open (skipped_name) as f:
+ for lines in f:
+ lines1=lines.split()
+
+ if len(lines1) < 5:
+ continue
+
+ e1 = (lines1[0] + "_" + lines1[3], lines1[1] + "_" + lines1[4])
+
+ if e1 in G.edges():
+ G.edge[lines1[0] + "_" + lines1[3]][lines1[1] + "_" + lines1[4]]['skipped'] = 1
+ G.edge[lines1[1] + "_" + str(1-int(lines1[4]))][lines1[0] + "_" + str(1-int(lines1[3]))]['skipped'] = 1
+
+
+
+
+
+def add_annotation(g,in_hinges,out_hinges):
+
+ for node in g.nodes():
+
+ if node in in_hinges:
+ g.node[node]['hinge'] = 1
+ elif node in out_hinges:
+ g.node[node]['hinge'] = -1
+ else:
+ g.node[node]['hinge'] = 0
+
+ return g
+
+
+
+def connect_strands(g):
+
+ for node in g.nodes():
+ revnode = rev_node(node)
+ g.add_edge(node,revnode)
+ g.add_edge(revnode,node)
+
+ return g
+
+
+
+
+
+def create_bidirected(g):
+
+ h = nx.DiGraph()
+
+ for u in g.nodes():
+
+ for successor in g.successors(u):
+
+ tail_id, tail_orientation = u.split('_')
+ head_id, head_orientation = successor.split('_')
+
+ h.add_edge(tail_id,head_id,tail_or = int(tail_orientation),head_or = int(head_orientation),
+ read_a_start=g.edge[u][successor]['read_a_start'],
+ read_a_end=g.edge[u][successor]['read_a_end'],
+ read_b_start=g.edge[u][successor]['read_b_start'],
+ read_b_end=g.edge[u][successor]['read_b_end'])
+
+
+ st_nodes = [x for x in g if g.in_degree(x) != 1 or g.out_degree(x) > 1]
+
+ for st_node in st_nodes:
+
+ for sec_node in g.successors(st_node):
+
+ cur_node = st_node
+ cur_id = cur_node.split('_')[0]
+ next_node = sec_node
+ next_id = next_node.split('_')[0]
+
+ if next_id in h.successors(cur_id) and cur_id in h.successors(next_id):
+ h.remove_edge(next_id,cur_id)
+
+ while g.in_degree(next_node) == 1 and g.out_degree(next_node) == 1:
+
+ cur_node = next_node
+ cur_id = cur_node.split('_')[0]
+ next_node = g.successors(next_node)[0]
+ next_id = next_node.split('_')[0]
+ # else:
+ # print 'not in h'
+
+ if next_id in h.successors(cur_id) and cur_id in h.successors(next_id):
+ h.remove_edge(next_id,cur_id)
+ else:
+ break
+
+
+ return h
+
+
+
+
+def create_bidirected2(g):
+
+ h = nx.DiGraph()
+
+ for u in g.nodes():
+
+ for successor in g.successors(u):
+
+ tail_id, tail_orientation = u.split('_')
+ head_id, head_orientation = successor.split('_')
+
+ h.add_edge(tail_id,head_id)
+
+ # h.add_edge(tail_id,head_id,tail_or = int(tail_orientation),head_or = int(head_orientation),
+ # read_a_start=g.edge[u][successor]['read_a_start'],
+ # read_a_end=g.edge[u][successor]['read_a_end'],
+ # read_b_start=g.edge[u][successor]['read_b_start'],
+ # read_b_end=g.edge[u][successor]['read_b_end'])
+
+
+ st_nodes = [x for x in g if g.in_degree(x) != 1 or g.out_degree(x) > 1]
+
+ for st_node in st_nodes:
+
+ for sec_node in g.successors(st_node):
+
+ cur_node = st_node
+ cur_id = cur_node.split('_')[0]
+ next_node = sec_node
+ next_id = next_node.split('_')[0]
+
+ if next_id in h.successors(cur_id) and cur_id in h.successors(next_id):
+ h.remove_edge(next_id,cur_id)
+
+ while g.in_degree(next_node) == 1 and g.out_degree(next_node) == 1:
+
+ cur_node = next_node
+ cur_id = cur_node.split('_')[0]
+ next_node = g.successors(next_node)[0]
+ next_id = next_node.split('_')[0]
+ # else:
+ # print 'not in h'
+
+ if next_id in h.successors(cur_id) and cur_id in h.successors(next_id):
+ h.remove_edge(next_id,cur_id)
+ else:
+ break
+
+
+ return g
+
+
+
+
+def write_graphml(g,prefix,suffix,suffix1):
+ h = g.copy()
+ connect_strands(h)
+ nx.write_graphml(h, prefix+suffix+'.'+'suffix1'+'.graphml')
+
+
+
+flname = sys.argv[1]
+# flname = '../pb_data/ecoli_shortened/ecoli4/ecolii2.edges.hinges'
+
+prefix = flname.split('.')[0]
+
+hingesname = sys.argv[2]
+# hingesname = '../pb_data/ecoli_shortened/ecoli4/ecolii2.hinge.list'
+
+
+suffix = sys.argv[3]
+
+if len(sys.argv)==5:
+ json_file = open(sys.argv[4])
+else:
+ json_file = None
+# path = '../pb_data/ecoli_shortened/ecoli4/'
+# suffix = 'i2'
+
+
+
+# In[116]:
+
+G = nx.DiGraph()
+
+Ginfo = {}
+
+with open (flname) as f:
+ for lines in f:
+ lines1=lines.split()
+
+ if len(lines1) < 5:
+ continue
+
+ e1 = (lines1[0] + "_" + lines1[3], lines1[1] + "_" + lines1[4])
+ # print lines1
+ # e1_match1 = abs(int(lines1[6].lstrip('['))-int(lines1[7].rstrip(']')))
+ # e1_match2 = abs(int(lines1[8].lstrip('['))-int(lines1[9].rstrip(']')))
+ e1_match_len = int(lines1[2])
+ ra_start = int(lines1[6].lstrip('['))
+ ra_end = int(lines1[7].rstrip(']'))
+ rb_start = int(lines1[8].lstrip('['))
+ rb_end = int(lines1[9].rstrip(']'))
+
+ ra_start_raw = int(lines1[-4].lstrip('['))
+ ra_end_raw = int(lines1[-3].rstrip(']'))
+ rb_start_raw = int(lines1[-2].lstrip('['))
+ rb_end_raw = int(lines1[-1].rstrip(']'))
+
+
+ if e1 in G.edges():
+ G.add_edge(lines1[0] + "_" + lines1[3], lines1[1] + "_" + lines1[4],
+ hinge_edge=int(lines1[5]),intersection=1,length=e1_match_len,z=0,
+ read_a_start=ra_start,read_a_end=ra_end,
+ read_b_start=rb_start,read_b_end=rb_end,
+ read_a_start_raw=ra_start_raw,read_a_end_raw=ra_end_raw,
+ read_b_start_raw=rb_start_raw,read_b_end_raw=rb_end_raw)
+ G.add_edge(lines1[1] + "_" + str(1-int(lines1[4])), lines1[0] + "_" + str(1-int(lines1[3])),
+ hinge_edge=int(lines1[5]),intersection=1,length=e1_match_len,z=0,
+ read_a_start=rb_start,read_a_end=rb_end,
+ read_b_start=ra_start,read_b_end=ra_end,
+ read_a_start_raw=rb_start_raw,read_a_end_raw=rb_end_raw,
+ read_b_start_raw=ra_start_raw,read_b_end_raw=ra_end_raw)
+ else:
+ G.add_edge(lines1[0] + "_" + lines1[3], lines1[1] + "_" + lines1[4],
+ hinge_edge=int(lines1[5]),intersection=0,length=e1_match_len,z=0,
+ read_a_start=ra_start,read_a_end=ra_end,
+ read_b_start=rb_start,read_b_end=rb_end,
+ read_a_start_raw=ra_start_raw,read_a_end_raw=ra_end_raw,
+ read_b_start_raw=rb_start_raw,read_b_end_raw=rb_end_raw)
+ G.add_edge(lines1[1] + "_" + str(1-int(lines1[4])), lines1[0] + "_" + str(1-int(lines1[3])),
+ hinge_edge=int(lines1[5]),intersection=0,length=e1_match_len,z=0,
+ read_a_start=rb_start,read_a_end=rb_end,
+ read_b_start=ra_start,read_b_end=ra_end,
+ read_a_start_raw=rb_start_raw,read_a_end_raw=rb_end_raw,
+ read_b_start_raw=ra_start_raw,read_b_end_raw=ra_end_raw)
+
+
+
+ towrite = lines1[0] + "_" + lines1[3] +' '+ lines1[1] + "_" + lines1[4] +' '+ lines1[2]+' '+str(int(lines1[11][:-1])-int(lines1[10][1:]))+' '+str(int(lines1[13][:-1])-int(lines1[12][1:]))
+ Ginfo[(lines1[0] + "_" + lines1[3],lines1[1] + "_" + lines1[4])] = towrite
+
+ towrite= lines1[1] + "_" + str(1-int(lines1[4])) +' '+ lines1[0] + "_" + str(1-int(lines1[3])) +' '+ lines1[2]+' '+str(int(lines1[13][:-1])-int(lines1[12][1:]))+' '+str(int(lines1[11][:-1])-int(lines1[10][1:]))
+ Ginfo[(lines1[1] + "_" + str(1-int(lines1[4])), lines1[0] + "_" + str(1-int(lines1[3])))] = towrite
+
+
+nx.write_graphml(G, prefix+suffix+'.'+'G00'+'.graphml')
+
+
+
+vertices=set()
+
+in_hinges = set()
+out_hinges = set()
+
+with open (hingesname) as f:
+
+ for lines in f:
+ lines1=lines.split()
+
+ if lines1[2] == '1':
+ in_hinges.add(lines1[0]+'_0')
+ out_hinges.add(lines1[0]+'_1')
+ elif lines1[2] == '-1':
+ in_hinges.add(lines1[0]+'_1')
+ out_hinges.add(lines1[0]+'_0')
+
+
+
+
+add_annotation(G,in_hinges,out_hinges)
+
+# try:
+mark_skipped_edges(G,flname.split('.')[0] + '.edges.skipped')
+# except:
+# print "some error here"
+# pass
+
+
+
+# json_file = open('../pb_data/ecoli_shortened/ecoli4/ecoli.mapping.1.json')
+
+
+if json_file!= None:
+ add_groundtruth(G,json_file,in_hinges,out_hinges)
+
+
+# In[ ]:
+
+G0 = G.copy()
+
+# Actual pruning, clipping and z deletion occurs below
+
+
+G0 = dead_end_clipping_sym(G0,10)
+
+# G1=z_clipping_sym(G1,5,in_hinges,out_hinges)
+G1,G0 = z_clipping_sym(G0,6,set(),set())
+# G1=z_clipping_sym(G1,5,in_hinges,out_hinges)
+# G1=z_clipping_sym(G1,5,in_hinges,out_hinges)
+# G1=z_clipping_sym(G1,5,in_hinges,out_hinges)
+
+
+G1 = bubble_bursting_sym(G1,10)
+
+G1 = dead_end_clipping_sym(G1,5)
+
+nx.write_graphml(G0, prefix+suffix+'.'+'G0'+'.graphml')
+nx.write_graphml(G1, prefix+suffix+'.'+'G1'+'.graphml')
+
+
+G2 = G1.copy()
+
+Gs = random_condensation_sym(G1,1000)
+
+
+loop_resolution(G2,500,50)
+
+G2s = random_condensation_sym(G2,1000)
+
+
+
+
+
+nx.write_graphml(G2, prefix+suffix+'.'+'G2'+'.graphml')
+
+nx.write_graphml(Gs, prefix+suffix+'.'+'Gs'+'.graphml')
+
+nx.write_graphml(G2s, prefix+suffix+'.'+'G2s'+'.graphml')
+
+Gc = connect_strands(Gs)
+
+nx.write_graphml(Gc, prefix+suffix+'.'+'Gc'+'.graphml')
+
+G2c = connect_strands(G2s)
+
+nx.write_graphml(G2c, prefix+suffix+'.'+'G2c'+'.graphml')
+
+# G2b = create_bidirected2(G2)
+
+# nx.write_graphml(G2b, prefix+suffix+'.'+'G2b'+'.graphml')
+
+
+
+
+# H=prune_graph(G1,in_hinges,out_hinges)
+# H=dead_end_clipping(H,5)
+
+
+# I=prune_graph(H,in_hinges,out_hinges,True)
+# I=dead_end_clipping(I,5)
+
+
+# Gs = random_condensation(G1,2000)
+# nx.write_graphml(Gs, path+'G'+suffix+'.graphml')
+# write_graph(Gs,path+'G'+suffix+'.txt')
+
+# Hs = random_condensation(H,2500)
+# nx.write_graphml(Hs, path+'H'+suffix+'.graphml')
+# write_graph(Hs,path+'H'+suffix+'.txt')
+
+# Is = random_condensation(I,2500)
+# nx.write_graphml(Is, path+'I'+suffix+'.graphml')
+# write_graph(Is,path+'I'+suffix+'.txt')
+
+
+
diff --git a/scripts/pruning_and_clipping2.py b/scripts/pruning_and_clipping2.py
new file mode 100755
index 0000000..a420d1e
--- /dev/null
+++ b/scripts/pruning_and_clipping2.py
@@ -0,0 +1,1219 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# In[115]:
+
+import networkx as nx
+import random
+import sys
+import numpy as np
+import ujson
+from colormap import rgb2hex
+
+
+# print G.number_of_edges(),G.number_of_nodes()
+
+
+# In[3]:
+
+def write_graph(G,flname):
+ with open(flname,'w') as f:
+ for edge in G.edges_iter():
+ f.write(str(edge[0])+'\t'+str(edge[1])+'\n')
+
+
+# In[4]:
+
+def write_graph2(G,Ginfo,flname):
+
+ count_no = 0
+ count_yes = 0
+
+ with open(flname,'w') as f:
+ for edge in G.edges_iter():
+
+
+ if (edge[0],edge[1]) not in Ginfo:
+ count_no += 1
+ print "not found"
+ continue
+ else:
+ count_yes += 1
+
+
+# line = Ginfo[(edge[0],edge[1])]
+# line_sp = line.split(' ')
+
+# f.write(str(edge[0])+' '+str(edge[1]))
+# for j in range(2,len(line_sp)):
+# f.write(' '+line_sp[j])
+
+ f.write(Ginfo[(edge[0],edge[1])]+'\n')
+
+ print count_no, count_yes
+
+
+
+
+
+
+
+# In[7]:
+
+def prune_graph(graph,in_hinges,out_hinges,reverse=False):
+
+ H=nx.DiGraph()
+ if reverse:
+ G=nx.reverse(graph,copy=True)
+ else:
+ G=graph
+ start_nodes = [x for x in G.nodes() if G.in_degree(x) ==0]
+
+ in_hinges = list(in_hinges.intersection(set(G.nodes())))
+ out_hinges = list(out_hinges.intersection(set(G.nodes())))
+
+ if reverse:
+ for node in in_hinges:
+ for successor in G.successors(node):
+# H.add_edge(node,successor)
+ H.add_node(successor)
+ for node in out_hinges:
+ H.add_node(node)
+ else:
+ for node in out_hinges:
+ for successor in G.successors(node):
+# H.add_edge(node,successor)
+ H.add_node(successor)
+ for node in in_hinges:
+ H.add_node(node)
+ map(H.add_node,start_nodes)
+ all_vertices=set(G.nodes())
+ current_vertices=set(H.nodes())
+ undiscovered_vertices=all_vertices-current_vertices
+ last_discovered_vertices=current_vertices
+ while undiscovered_vertices:
+ discovered_vertices_set=set([x for node in last_discovered_vertices
+ for x in G.successors(node)
+ if x not in current_vertices])
+ for vertex in discovered_vertices_set:
+ for v_predecessor in G.predecessors(vertex):
+ if v_predecessor in current_vertices:
+ H.add_edge(v_predecessor,vertex)
+ break
+ current_vertices=current_vertices.union(discovered_vertices_set)
+# print len(undiscovered_vertices)
+ if len(discovered_vertices_set)==0:
+ print last_discovered_vertices
+ print 'did not reach all nodes'
+ print 'size of G: '+str(len(G.nodes()))
+ print 'size of H: '+str(len(H.nodes()))
+# return H
+
+ rand_node = list(undiscovered_vertices)[0]
+
+ discovered_vertices_set.add(rand_node)
+
+
+ last_discovered_vertices=discovered_vertices_set
+ undiscovered_vertices=all_vertices-current_vertices
+# if reverse:
+# for vertex in out_hinges:
+# for v_predecessor in G.predecessors(vertex):
+# H.add_edge(v_predecessor,vertex)
+# else:
+# for vertex in in_hinges:
+# for v_predecessor in G.predecessors(vertex):
+# H.add_edge(v_predecessor,vertex)
+ if reverse:
+ for node in in_hinges:
+ for successor in G.successors(node):
+ H.add_edge(node,successor)
+ for node in out_hinges:
+ for predecessor in G.predecessors(node):
+ H.add_edge(predecessor,node)
+ else:
+ for node in out_hinges:
+ for successor in G.successors(node):
+ H.add_edge(node,successor)
+ for node in in_hinges:
+ for predecessor in G.predecessors(node):
+ H.add_edge(predecessor,node)
+ if reverse:
+ return nx.reverse(H)
+ return H
+
+
+# In[8]:
+
+def dead_end_clipping(G,threshold):
+# H=nx.DiGraph()
+ H = G.copy()
+ start_nodes = set([x for x in H.nodes() if H.in_degree(x) ==0])
+
+ for st_node in start_nodes:
+ cur_path = [st_node]
+
+ if len(H.successors(st_node)) == 1:
+ cur_node = H.successors(st_node)[0]
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1 and len(cur_path) < threshold + 2:
+ cur_path.append(cur_node)
+ cur_node = H.successors(cur_node)[0]
+
+
+ if len(cur_path) <= threshold:
+ for vertex in cur_path:
+ H.remove_node(vertex)
+
+ end_nodes = set([x for x in H.nodes() if H.out_degree(x) ==0])
+
+ for end_node in end_nodes:
+ cur_path = [end_node]
+ if len(H.predecessors(end_node)) == 1:
+ cur_node = H.predecessors(end_node)[0]
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1 and len(cur_path) < threshold + 2:
+ cur_path.append(cur_node)
+ cur_node = H.predecessors(cur_node)[0]
+
+ if len(cur_path) <= threshold:
+ for vertex in cur_path:
+ H.remove_node(vertex)
+
+ return H
+
+
+
+def rev_node(node):
+ node_id = node.split('_')[0]
+
+ return node_id + '_' + str(1-int(node.split('_')[1]))
+
+
+def dead_end_clipping_sym(G,threshold,print_debug = False):
+# H=nx.DiGraph()
+ H = G.copy()
+ start_nodes = set([x for x in H.nodes() if H.in_degree(x) ==0])
+
+ for st_node in start_nodes:
+
+ if st_node not in H.nodes():
+ continue
+
+ cur_path = [st_node]
+
+ if print_debug:
+ print '----0'
+ print st_node
+
+ if len(H.successors(st_node)) == 1:
+
+ cur_node = H.successors(st_node)[0]
+
+ if print_debug:
+ print '----1'
+
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1 and len(cur_path) < threshold + 2:
+ cur_path.append(cur_node)
+
+ if print_debug:
+ print cur_node
+
+ cur_node = H.successors(cur_node)[0]
+
+ if print_debug:
+ print '----2'
+ print cur_path
+
+
+ if len(cur_path) <= threshold:
+ for vertex in cur_path:
+ # try:
+ if print_debug:
+ print 'about to delete ',vertex,rev_node(vertex)
+ H.remove_node(vertex)
+ H.remove_node(rev_node(vertex))
+ # except:
+ # pass
+ if print_debug:
+ print 'deleted ',vertex,rev_node(vertex)
+
+
+ return H
+
+
+
+# In[9]:
+
+
+# This function is no longer used. See z_clipping_sym
+def z_clipping(G,threshold,in_hinges,out_hinges,print_z = False):
+ H = G.copy()
+
+ start_nodes = set([x for x in H.nodes() if H.out_degree(x) > 1 and x not in out_hinges])
+
+ for st_node in start_nodes:
+ for sec_node in H.successors(st_node):
+
+ if H.out_degree(st_node) == 1:
+ break
+
+ cur_node = sec_node
+ cur_path = [[st_node,cur_node]]
+
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1:
+ cur_path.append([cur_node,H.successors(cur_node)[0]])
+ cur_node = H.successors(cur_node)[0]
+
+ if len(cur_path) > threshold + 1:
+ break
+
+ if len(cur_path) <= threshold and H.in_degree(cur_node) > 1 and H.out_degree(st_node) > 1 and cur_node not in in_hinges:
+ if print_z:
+ print cur_path
+
+ for edge in cur_path:
+ H.remove_edge(edge[0],edge[1])
+ for j in range(len(cur_path)-1):
+ H.remove_node(cur_path[j][1])
+
+ end_nodes = set([x for x in H.nodes() if H.in_degree(x) > 1 and x not in in_hinges])
+
+ for end_node in end_nodes:
+ for sec_node in H.predecessors(end_node):
+
+ if H.in_degree(end_node) == 1:
+ break
+
+
+ cur_node = sec_node
+ cur_path = [[cur_node,end_node]]
+
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1:
+ cur_path.append([H.predecessors(cur_node)[0],cur_node])
+ cur_node = H.predecessors(cur_node)[0]
+
+ if len(cur_path) > threshold + 1:
+ break
+
+ if len(cur_path) <= threshold and H.out_degree(cur_node) > 1 and H.in_degree(end_node) > 1 and cur_node not in out_hinges:
+ if print_z:
+ print cur_path
+ for edge in cur_path:
+ H.remove_edge(edge[0],edge[1])
+ for j in range(len(cur_path)-1):
+ H.remove_node(cur_path[j][0])
+
+ return H
+
+
+
+def z_clipping_sym(G,threshold,in_hinges,out_hinges,print_z = False):
+
+ H = G.copy()
+ G0 = G.copy()
+
+ start_nodes = set([x for x in H.nodes() if H.out_degree(x) > 1 and x not in out_hinges])
+
+ for st_node in start_nodes:
+
+ try: # need this because we are deleting nodes inside loop
+ H.successors(st_node)
+ except:
+ continue
+
+ for sec_node in H.successors(st_node):
+
+ if H.out_degree(st_node) == 1:
+ break
+
+ cur_node = sec_node
+ cur_path = [[st_node,cur_node]]
+
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1:
+
+ cur_path.append([cur_node,H.successors(cur_node)[0]])
+ cur_node = H.successors(cur_node)[0]
+
+ if len(cur_path) > threshold + 1:
+ break
+
+ if len(cur_path) <= threshold and H.in_degree(cur_node) > 1 and H.out_degree(st_node) > 1 and cur_node not in in_hinges:
+ if print_z:
+ print cur_path
+
+ for edge in cur_path:
+
+ G0.edge[edge[0]][edge[1]]['z'] = 1
+ G0.edge[rev_node(edge[1])][rev_node(edge[0])]['z'] = 1
+
+ try:
+ H.remove_edge(edge[0],edge[1])
+ H.remove_edge(rev_node(edge[1]),rev_node(edge[0]))
+
+ except:
+ pass
+
+ for j in range(len(cur_path)-1):
+
+ G0.node[cur_path[j][1]]['z'] = 1
+ G0.node[rev_node(cur_path[j][1])]['z'] = 1
+
+ try:
+ H.remove_node(cur_path[j][1])
+ H.remove_node(rev_node(cur_path[j][1]))
+
+ except:
+ pass
+
+
+ return H, G0
+
+
+
+
+
+
+# In[48]:
+
+def merge_path(g,in_node,node,out_node):
+
+ # g.add_edge(in_node,out_node,hinge_edge = -1,false_positive = 0)
+
+ if g.edge[in_node][node]['intersection'] == 1 and g.edge[node][out_node]['intersection'] == 1:
+ g.add_edge(in_node,out_node,hinge_edge = -1,intersection = 1,z=0)
+ else:
+ g.add_edge(in_node,out_node,hinge_edge = -1,intersection = 0,z=0)
+
+ g.remove_node(node)
+
+
+
+# In[121]:
+
+def random_condensation(G,n_nodes,check_gt = False):
+
+ g = G.copy()
+
+ max_iter = 20000
+ iter_cnt = 0
+
+ while len(g.nodes()) > n_nodes and iter_cnt < max_iter:
+
+ iter_cnt += 1
+
+ node = g.nodes()[random.randrange(len(g.nodes()))]
+
+ if g.in_degree(node) == 1 and g.out_degree(node) == 1:
+
+ in_node = g.in_edges(node)[0][0]
+ out_node = g.out_edges(node)[0][1]
+ if g.out_degree(in_node) == 1 and g.in_degree(out_node) == 1:
+ if in_node != node and out_node != node and in_node != out_node:
+ #print in_node, node, out_node
+# merge_path(g,in_node,node,out_node)
+
+ bad_node=False
+ if check_gt:
+ for in_edge in g.in_edges(node):
+ if g.edge[in_edge[0]][in_edge[1]]['false_positive']==1:
+ bad_node=True
+ for out_edge in g.out_edges(node):
+ if g.edge[out_edge[0]][out_edge[1]]['false_positive']==1:
+ bad_node=True
+ if not bad_node:
+ #print in_node, node, out_node
+ merge_path(g,in_node,node,out_node)
+
+ if iter_cnt >= max_iter:
+ print "couldn't finish sparsification"+str(len(g.nodes()))
+
+ return g
+
+
+
+def random_condensation_sym(G,n_nodes,check_gt = False):
+
+ g = G.copy()
+
+ max_iter = 20000
+ iter_cnt = 0
+
+ while len(g.nodes()) > n_nodes and iter_cnt < max_iter:
+
+ iter_cnt += 1
+
+ node = g.nodes()[random.randrange(len(g.nodes()))]
+
+ if g.in_degree(node) == 1 and g.out_degree(node) == 1:
+
+ in_node = g.in_edges(node)[0][0]
+ out_node = g.out_edges(node)[0][1]
+ if g.out_degree(in_node) == 1 and g.in_degree(out_node) == 1:
+ if in_node != node and out_node != node and in_node != out_node:
+ #print in_node, node, out_node
+# merge_path(g,in_node,node,out_node)
+
+ bad_node=False
+ if check_gt:
+ for in_edge in g.in_edges(node):
+ if g.edge[in_edge[0]][in_edge[1]]['false_positive']==1:
+ bad_node=True
+ for out_edge in g.out_edges(node):
+ if g.edge[out_edge[0]][out_edge[1]]['false_positive']==1:
+ bad_node=True
+ if not bad_node:
+ #print in_node, node, out_node
+
+ try:
+ merge_path(g,in_node,node,out_node)
+ merge_path(g,rev_node(out_node),rev_node(node),rev_node(in_node))
+ except:
+ pass
+
+ if iter_cnt >= max_iter:
+ print "couldn't finish sparsification"+str(len(g.nodes()))
+
+ return g
+
+
+# In[118]:
+
+def random_condensation2(g,n_nodes):
+
+ g = G.copy()
+
+
+ max_iter = 20000
+ iter_cnt = 0
+
+ while len(g.nodes()) > n_nodes and iter_cnt < max_iter:
+
+ iter_cnt += 1
+
+ node = g.nodes()[random.randrange(len(g.nodes()))]
+
+ if g.in_degree(node) == 1 and g.out_degree(node) == 1:
+
+ base_node=node.split("_")[0]
+ orintation = node.split("_")[1]
+ # if orintation=='1':
+ # node2=base_node+'_0'
+ # else:
+ # node2=base_node+'_1'
+
+ # print node,node2
+
+ in_node = g.in_edges(node)[0][0]
+ out_node = g.out_edges(node)[0][1]
+
+ if g.node[node]['hinge']==0 and g.node[in_node]['hinge']==0 and g.node[out_node]['hinge']==0:
+ if g.out_degree(in_node) == 1 and g.in_degree(out_node) == 1:
+ if in_node != node and out_node != node and in_node != out_node:
+ bad_node=False
+ # print g.in_edges(node)
+ # print g.edge[g.in_edges(node)[0][0]][g.in_edges(node)[0][1]]
+ # print g.out_edges(node)
+
+
+ for in_edge in g.in_edges(node):
+ if g.edge[in_edge[0]][in_edge[1]]['false_positive']==1:
+ bad_node=True
+ for out_edge in g.out_edges(node):
+ if g.edge[out_edge[0]][out_edge[1]]['false_positive']==1:
+ bad_node=True
+ if not bad_node:
+ #print in_node, node, out_node
+ merge_path(g,in_node,node,out_node)
+
+
+
+ if iter_cnt >= max_iter:
+ print "couldn't finish sparsification: "+str(len(g.nodes()))
+
+
+ return g
+
+
+
+
+def bubble_bursting_sym(H,threshold,print_bubble = False):
+
+ start_nodes = set([x for x in H.nodes() if H.out_degree(x) == 2])
+
+ for st_node in start_nodes:
+
+ try: # need this because we are deleting nodes inside loop
+ H.successors(st_node)[1]
+ except:
+ continue
+
+ sec_node = H.successors(st_node)[0]
+
+ cur_node = sec_node
+ cur_path = [[st_node,cur_node]]
+
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1:
+
+ cur_path.append([cur_node,H.successors(cur_node)[0]])
+ cur_node = H.successors(cur_node)[0]
+
+ if len(cur_path) > threshold + 1:
+ break
+
+ end_node0 = cur_node
+ cur_node = H.successors(st_node)[1]
+ alt_path = [[st_node,cur_node]]
+
+ while H.in_degree(cur_node) == 1 and H.out_degree(cur_node) == 1:
+
+ alt_path.append([cur_node,H.successors(cur_node)[0]])
+ cur_node = H.successors(cur_node)[0]
+
+ if len(alt_path) > threshold + 1:
+ break
+
+
+ if len(cur_path) <= threshold and len(alt_path) <= threshold and end_node0 == cur_node:
+
+ if print_bubble:
+ print 'found bubble'
+
+ for edge in cur_path:
+
+ # try:
+ H.remove_edge(edge[0],edge[1])
+ H.remove_edge(rev_node(edge[1]),rev_node(edge[0]))
+
+ # except:
+ # pass
+
+ for j in range(len(cur_path)-1):
+
+ # try:
+ H.remove_node(cur_path[j][1])
+ H.remove_node(rev_node(cur_path[j][1]))
+
+ # except:
+ # pass
+
+
+ return H
+
+
+def resolve_rep(g,rep_path,in_node,out_node):
+
+ prefix = 'B'
+
+ g.add_edge(in_node,prefix + rep_path[0],
+ read_a_start=g.edge[in_node][rep_path[0]]['read_a_start'],
+ read_a_end=g.edge[in_node][rep_path[0]]['read_a_end'],
+ read_b_start=g.edge[in_node][rep_path[0]]['read_b_start'],
+ read_b_end=g.edge[in_node][rep_path[0]]['read_b_end'])
+ g.remove_edge(in_node,rep_path[0])
+
+ g.add_edge(prefix+rep_path[-1],out_node,
+ read_a_start=g.edge[rep_path[-1]][out_node]['read_a_start'],
+ read_a_end=g.edge[rep_path[-1]][out_node]['read_a_end'],
+ read_b_start=g.edge[rep_path[-1]][out_node]['read_b_start'],
+ read_b_end=g.edge[rep_path[-1]][out_node]['read_b_end'])
+ g.remove_edge(rep_path[-1],out_node)
+
+
+ g.add_edge(rev_node(prefix + rep_path[0]),rev_node(in_node),
+ read_a_start=g.edge[rev_node(rep_path[0])][rev_node(in_node)]['read_a_start'],
+ read_a_end=g.edge[rev_node(rep_path[0])][rev_node(in_node)]['read_a_end'],
+ read_b_start=g.edge[rev_node(rep_path[0])][rev_node(in_node)]['read_b_start'],
+ read_b_end=g.edge[rev_node(rep_path[0])][rev_node(in_node)]['read_b_end'])
+ g.remove_edge(rev_node(rep_path[0]),rev_node(in_node))
+ g.add_edge(rev_node(out_node),rev_node(prefix+rep_path[-1]),
+ read_a_start=g.edge[rev_node(out_node)][rev_node(rep_path[-1])]['read_a_start'],
+ read_a_end=g.edge[rev_node(out_node)][rev_node(rep_path[-1])]['read_a_end'],
+ read_b_start=g.edge[rev_node(out_node)][rev_node(rep_path[-1])]['read_b_start'],
+ read_b_end=g.edge[rev_node(out_node)][rev_node(rep_path[-1])]['read_b_end'])
+ g.remove_edge(rev_node(out_node),rev_node(rep_path[-1]))
+
+
+
+
+ for i in range(0,len(rep_path)-1):
+ g.add_edge(prefix+rep_path[i],prefix+rep_path[i+1],
+ read_a_start=g.edge[rep_path[i]][rep_path[i+1]]['read_a_start'],
+ read_a_end=g.edge[rep_path[i]][rep_path[i+1]]['read_a_end'],
+ read_b_start=g.edge[rep_path[i]][rep_path[i+1]]['read_b_start'],
+ read_b_end=g.edge[rep_path[i]][rep_path[i+1]]['read_b_end'])
+ g.add_edge(rev_node(prefix+rep_path[i+1]),rev_node(prefix+rep_path[i]),
+ read_a_start=g.edge[rev_node(rep_path[i+1])][rev_node(rep_path[i])]['read_a_start'],
+ read_a_end=g.edge[rev_node(rep_path[i+1])][rev_node(rep_path[i])]['read_a_end'],
+ read_b_start=g.edge[rev_node(rep_path[i+1])][rev_node(rep_path[i])]['read_b_start'],
+ read_b_end=g.edge[rev_node(rep_path[i+1])][rev_node(rep_path[i])]['read_b_end'])
+
+
+
+
+def loop_resolution(g,max_nodes,flank,print_debug = False):
+
+ starting_nodes = [x for x in g.nodes() if g.out_degree(x) == 2]
+
+ if print_debug:
+ print '----'
+ print starting_nodes
+
+ for st_node in starting_nodes:
+
+
+ if g.out_degree(st_node) != 2:
+ continue
+
+ if print_debug:
+ print '----'
+ print st_node
+
+
+ for first_node in g.successors(st_node):
+
+
+ if g.out_degree(st_node) != 2:
+ continue
+
+ if print_debug:
+ print '----'
+ print first_node
+
+ other_successor = [x for x in g.successors(st_node) if x != first_node][0]
+
+ next_node = first_node
+ if print_debug:
+ print 'going on loop'
+
+ node_cnt = 0
+ while g.in_degree(next_node) == 1 and g.out_degree(next_node) == 1 and node_cnt < max_nodes:
+ node_cnt += 1
+ in_node = next_node
+ next_node = g.successors(next_node)[0]
+
+ first_node_of_repeat = next_node
+
+ if g.in_degree(next_node) == 2:
+ prev_node = [x for x in g.predecessors(next_node) if x != in_node][0]
+ node_cnt = 0
+ while g.in_degree(prev_node) == 1 and g.out_degree(prev_node) == 1:
+ node_cnt += 1
+ prev_node = g.predecessors(prev_node)[0]
+ if node_cnt >= flank:
+ break
+ if node_cnt < flank and prev_node != st_node:
+ continue
+
+
+ next_node = other_successor
+ node_cnt = 0
+ while g.in_degree(next_node) == 1 and g.out_degree(next_node) == 1:
+ node_cnt += 1
+ next_node = g.successors(next_node)[0]
+ if node_cnt >= flank:
+ break
+
+ if node_cnt < flank and next_node != first_node_of_repeat:
+ continue
+
+ rep = [first_node_of_repeat]
+ next_node = first_node_of_repeat
+
+ node_cnt = 0
+
+ if g.in_degree(next_node) == 2 and g.out_degree(next_node) == 1:
+ next_double_node = g.successors(next_node)[0]
+ rep.append(next_double_node)
+ else:
+ next_double_node = next_node
+
+ while g.in_degree(next_double_node) == 1 and g.out_degree(next_double_node) == 1 and node_cnt < max_nodes:
+ node_cnt += 1
+ next_double_node = g.successors(next_double_node)[0]
+ rep.append(next_double_node)
+
+
+ if next_double_node == st_node:
+ if print_debug:
+ print 'success!'
+ print 'rep is:'
+ print rep
+ print 'in_node and other_successor:'
+ print in_node, other_successor
+ resolve_rep(g,rep,in_node,other_successor)
+ # print next_double_node
+
+ continue
+
+
+ return g
+
+
+
+
+# In[72]:
+
+
+def add_groundtruth(g,json_file,in_hinges,out_hinges):
+
+ mapping = ujson.load(json_file)
+
+ print 'getting mapping'
+ mapped_nodes=0
+ print str(len(mapping))
+ print str(len(g.nodes()))
+
+ slack = 500
+ max_chr = 0
+
+ chr_length_dict = {}
+
+ for node in g.nodes():
+ # print node
+ node_base=node.split("_")[0]
+ # print node_base
+
+ #print node
+ # g.node[node]['normpos'] = 0.0
+ if mapping.has_key(node_base):
+ g.node[node]['chr'] = mapping[node_base][0][2]+1
+ g.node[node]['aln_start'] = min (mapping[node_base][0][0],mapping[node_base][0][1])
+ g.node[node]['aln_end'] = max(mapping[node_base][0][1],mapping[node_base][0][0])
+
+
+ # max_chr = max(g.node[node]['chr'],max_chr)
+ # mapped_nodes+=1
+ else:
+ # pass
+ g.node[node]['chr'] = 0
+ g.node[node]['aln_start'] = 1
+ g.node[node]['aln_end'] = 1
+# g.node[node]['aln_strand'] = 0
+
+ if node in in_hinges or node in out_hinges:
+ g.node[node]['hinge'] = 1
+ else:
+ g.node[node]['hinge'] = 0
+
+ if g.node[node]['chr'] in chr_length_dict:
+ chr_length_dict[g.node[node]['chr']] = max(g.node[node]['aln_end'], chr_length_dict[g.node[node]['chr']])
+ else:
+ chr_length_dict[g.node[node]['chr']] = max(g.node[node]['aln_end'], 1)
+
+ chr_set = set([g.node[x]['chr'] for x in g.nodes()])
+ red_bk = 102
+ green_bk = 102
+ blue_bk = 102
+ for chrom in chr_set:
+ node_set = set([x for x in g.nodes() if g.node[x]['chr'] == chrom])
+
+ max_chr_len = float(max([g.node[x]['aln_end'] for x in g.nodes() if g.node[x]['chr'] == chrom]))
+
+ red = random.randint(0,255)
+ green = random.randint(0,255)
+ blue = random.randint(0,255)
+ for node in node_set:
+ lamda = (g.node[node]['aln_end']/max_chr_len)**3
+ nd_red = (1-lamda)*red + lamda*red_bk
+ nd_green = (1-lamda)*green + lamda*green_bk
+ nd_blue = (1-lamda)*blue + lamda*blue_bk
+ g.node[node]['color_r'] = nd_red
+ g.node[node]['color_g'] = nd_green
+ g.node[node]['color_b'] = nd_blue
+
+
+
+ # max_chr_len = len(str(max_chr))
+
+ # div_num = float(10**(max_chr_len))
+
+ # for node in g.nodes():
+ # g.node[node]['normpos'] = (g.node[node]['chr'] + g.node[node]['aln_end']/float(chr_length_dict[g.node[node]['chr']]))/div_num
+
+ for edge in g.edges_iter():
+ in_node=edge[0]
+ out_node=edge[1]
+
+# if ((g.node[in_node]['aln_start'] < g.node[out_node]['aln_start'] and
+# g.node[out_node]['aln_start'] < g.node[in_node]['aln_end']) or
+# (g.node[in_node]['aln_start'] < g.node[out_node]['aln_end'] and
+# g.node[out_node]['aln_end'] < g.node[in_node]['aln_end'])):
+# g.edge[in_node][out_node]['false_positive']=0
+# else:
+# g.edge[in_node][out_node]['false_positive']=1
+
+
+ if ((g.node[in_node]['aln_start'] < g.node[out_node]['aln_start'] and
+ g.node[out_node]['aln_start'] < g.node[in_node]['aln_end']) or
+ (g.node[in_node]['aln_start'] < g.node[out_node]['aln_end'] and
+ g.node[out_node]['aln_end'] < g.node[in_node]['aln_end'])):
+ g.edge[in_node][out_node]['false_positive']=0
+ else:
+ g.edge[in_node][out_node]['false_positive']=1
+
+ return g
+
+
+def mark_skipped_edges(G,skipped_name):
+
+ with open (skipped_name) as f:
+ for lines in f:
+ lines1=lines.split()
+
+ if len(lines1) < 5:
+ continue
+
+ e1 = (lines1[0] + "_" + lines1[3], lines1[1] + "_" + lines1[4])
+
+ if e1 in G.edges():
+ G.edge[lines1[0] + "_" + lines1[3]][lines1[1] + "_" + lines1[4]]['skipped'] = 1
+ G.edge[lines1[1] + "_" + str(1-int(lines1[4]))][lines1[0] + "_" + str(1-int(lines1[3]))]['skipped'] = 1
+
+
+
+
+
+def add_annotation(g,in_hinges,out_hinges):
+
+ for node in g.nodes():
+
+ if node in in_hinges:
+ g.node[node]['hinge'] = 1
+ elif node in out_hinges:
+ g.node[node]['hinge'] = -1
+ else:
+ g.node[node]['hinge'] = 0
+
+ return g
+
+
+
+def connect_strands(g):
+
+ for node in g.nodes():
+ revnode = rev_node(node)
+ g.add_edge(node,revnode)
+ g.add_edge(revnode,node)
+
+ return g
+
+
+
+
+
+def create_bidirected(g):
+
+ h = nx.DiGraph()
+
+ for u in g.nodes():
+
+ for successor in g.successors(u):
+
+ tail_id, tail_orientation = u.split('_')
+ head_id, head_orientation = successor.split('_')
+
+ h.add_edge(tail_id,head_id,tail_or = int(tail_orientation),head_or = int(head_orientation),
+ read_a_start=g.edge[u][successor]['read_a_start'],
+ read_a_end=g.edge[u][successor]['read_a_end'],
+ read_b_start=g.edge[u][successor]['read_b_start'],
+ read_b_end=g.edge[u][successor]['read_b_end'])
+
+
+ st_nodes = [x for x in g if g.in_degree(x) != 1 or g.out_degree(x) > 1]
+
+ for st_node in st_nodes:
+
+ for sec_node in g.successors(st_node):
+
+ cur_node = st_node
+ cur_id = cur_node.split('_')[0]
+ next_node = sec_node
+ next_id = next_node.split('_')[0]
+
+ if next_id in h.successors(cur_id) and cur_id in h.successors(next_id):
+ h.remove_edge(next_id,cur_id)
+
+ while g.in_degree(next_node) == 1 and g.out_degree(next_node) == 1:
+
+ cur_node = next_node
+ cur_id = cur_node.split('_')[0]
+ next_node = g.successors(next_node)[0]
+ next_id = next_node.split('_')[0]
+ # else:
+ # print 'not in h'
+
+ if next_id in h.successors(cur_id) and cur_id in h.successors(next_id):
+ h.remove_edge(next_id,cur_id)
+ else:
+ break
+
+
+ return h
+
+
+
+
+def create_bidirected2(g):
+
+ h = nx.DiGraph()
+
+ for u in g.nodes():
+
+ for successor in g.successors(u):
+
+ tail_id, tail_orientation = u.split('_')
+ head_id, head_orientation = successor.split('_')
+
+ h.add_edge(tail_id,head_id)
+
+ # h.add_edge(tail_id,head_id,tail_or = int(tail_orientation),head_or = int(head_orientation),
+ # read_a_start=g.edge[u][successor]['read_a_start'],
+ # read_a_end=g.edge[u][successor]['read_a_end'],
+ # read_b_start=g.edge[u][successor]['read_b_start'],
+ # read_b_end=g.edge[u][successor]['read_b_end'])
+
+
+ st_nodes = [x for x in g if g.in_degree(x) != 1 or g.out_degree(x) > 1]
+
+ for st_node in st_nodes:
+
+ for sec_node in g.successors(st_node):
+
+ cur_node = st_node
+ cur_id = cur_node.split('_')[0]
+ next_node = sec_node
+ next_id = next_node.split('_')[0]
+
+ if next_id in h.successors(cur_id) and cur_id in h.successors(next_id):
+ h.remove_edge(next_id,cur_id)
+
+ while g.in_degree(next_node) == 1 and g.out_degree(next_node) == 1:
+
+ cur_node = next_node
+ cur_id = cur_node.split('_')[0]
+ next_node = g.successors(next_node)[0]
+ next_id = next_node.split('_')[0]
+ # else:
+ # print 'not in h'
+
+ if next_id in h.successors(cur_id) and cur_id in h.successors(next_id):
+ h.remove_edge(next_id,cur_id)
+ else:
+ break
+
+
+ return g
+
+
+
+
+def write_graphml(g,prefix,suffix,suffix1):
+ h = g.copy()
+ connect_strands(h)
+ nx.write_graphml(h, prefix+suffix+'.'+'suffix1'+'.graphml')
+
+
+
+flname = sys.argv[1]
+# flname = '../pb_data/ecoli_shortened/ecoli4/ecolii2.edges.hinges'
+
+prefix = flname.split('.')[0]
+
+hingesname = sys.argv[2]
+# hingesname = '../pb_data/ecoli_shortened/ecoli4/ecolii2.hinge.list'
+
+
+suffix = sys.argv[3]
+
+if len(sys.argv)==5:
+ json_file = open(sys.argv[4])
+else:
+ json_file = None
+# path = '../pb_data/ecoli_shortened/ecoli4/'
+# suffix = 'i2'
+
+
+
+# In[116]:
+
+G = nx.DiGraph()
+
+Ginfo = {}
+
+with open (flname) as f:
+ for lines in f:
+ lines1=lines.split()
+
+ if len(lines1) < 5:
+ continue
+
+ e1 = (lines1[0] + "_" + lines1[3], lines1[1] + "_" + lines1[4])
+ # print lines1
+ # e1_match1 = abs(int(lines1[6].lstrip('['))-int(lines1[7].rstrip(']')))
+ # e1_match2 = abs(int(lines1[8].lstrip('['))-int(lines1[9].rstrip(']')))
+ e1_match_len = int(lines1[2])
+ ra_start = int(lines1[6].lstrip('['))
+ ra_end = int(lines1[7].rstrip(']'))
+ rb_start = int(lines1[8].lstrip('['))
+ rb_end = int(lines1[9].rstrip(']'))
+
+ ra_start_raw = int(lines1[-4].lstrip('['))
+ ra_end_raw = int(lines1[-3].rstrip(']'))
+ rb_start_raw = int(lines1[-2].lstrip('['))
+ rb_end_raw = int(lines1[-1].rstrip(']'))
+
+
+ if e1 in G.edges():
+ G.add_edge(lines1[0] + "_" + lines1[3], lines1[1] + "_" + lines1[4],
+ hinge_edge=int(lines1[5]),intersection=1,length=e1_match_len,z=0,
+ read_a_start=ra_start,read_a_end=ra_end,
+ read_b_start=rb_start,read_b_end=rb_end,
+ read_a_start_raw=ra_start_raw,read_a_end_raw=ra_end_raw,
+ read_b_start_raw=rb_start_raw,read_b_end_raw=rb_end_raw)
+ G.add_edge(lines1[1] + "_" + str(1-int(lines1[4])), lines1[0] + "_" + str(1-int(lines1[3])),
+ hinge_edge=int(lines1[5]),intersection=1,length=e1_match_len,z=0,
+ read_a_start=ra_start,read_a_end=ra_end,
+ read_b_start=rb_start,read_b_end=rb_end,
+ read_a_start_raw=ra_start_raw,read_a_end_raw=ra_end_raw,
+ read_b_start_raw=rb_start_raw,read_b_end_raw=rb_end_raw)
+ else:
+ G.add_edge(lines1[0] + "_" + lines1[3], lines1[1] + "_" + lines1[4],
+ hinge_edge=int(lines1[5]),intersection=0,length=e1_match_len,z=0,
+ read_a_start=ra_start,read_a_end=ra_end,
+ read_b_start=rb_start,read_b_end=rb_end,
+ read_a_start_raw=ra_start_raw,read_a_end_raw=ra_end_raw,
+ read_b_start_raw=rb_start_raw,read_b_end_raw=rb_end_raw)
+ G.add_edge(lines1[1] + "_" + str(1-int(lines1[4])), lines1[0] + "_" + str(1-int(lines1[3])),
+ hinge_edge=int(lines1[5]),intersection=0,length=e1_match_len,z=0,
+ read_a_start=ra_start,read_a_end=ra_end,
+ read_b_start=rb_start,read_b_end=rb_end,
+ read_a_start_raw=ra_start_raw,read_a_end_raw=ra_end_raw,
+ read_b_start_raw=rb_start_raw,read_b_end_raw=rb_end_raw)
+
+
+
+ towrite = lines1[0] + "_" + lines1[3] +' '+ lines1[1] + "_" + lines1[4] +' '+ lines1[2]+' '+str(int(lines1[11][:-1])-int(lines1[10][1:]))+' '+str(int(lines1[13][:-1])-int(lines1[12][1:]))
+ Ginfo[(lines1[0] + "_" + lines1[3],lines1[1] + "_" + lines1[4])] = towrite
+
+ towrite= lines1[1] + "_" + str(1-int(lines1[4])) +' '+ lines1[0] + "_" + str(1-int(lines1[3])) +' '+ lines1[2]+' '+str(int(lines1[13][:-1])-int(lines1[12][1:]))+' '+str(int(lines1[11][:-1])-int(lines1[10][1:]))
+ Ginfo[(lines1[1] + "_" + str(1-int(lines1[4])), lines1[0] + "_" + str(1-int(lines1[3])))] = towrite
+
+
+
+
+vertices=set()
+
+in_hinges = set()
+out_hinges = set()
+
+with open (hingesname) as f:
+
+ for lines in f:
+ lines1=lines.split()
+
+ if lines1[2] == '1':
+ in_hinges.add(lines1[0]+'_0')
+ out_hinges.add(lines1[0]+'_1')
+ elif lines1[2] == '-1':
+ in_hinges.add(lines1[0]+'_1')
+ out_hinges.add(lines1[0]+'_0')
+
+
+
+
+add_annotation(G,in_hinges,out_hinges)
+
+# try:
+mark_skipped_edges(G,flname.split('.')[0] + '.edges.skipped')
+# except:
+# print "some error here"
+# pass
+
+
+
+# json_file = open('../pb_data/ecoli_shortened/ecoli4/ecoli.mapping.1.json')
+
+
+if json_file!= None:
+ add_groundtruth(G,json_file,in_hinges,out_hinges)
+
+
+# In[ ]:
+
+G0 = G.copy()
+
+# Actual pruning, clipping and z deletion occurs below
+
+
+G0 = dead_end_clipping_sym(G0,10)
+
+# G1=z_clipping_sym(G1,5,in_hinges,out_hinges)
+G1,G0 = z_clipping_sym(G0,6,set(),set())
+# G1=z_clipping_sym(G1,5,in_hinges,out_hinges)
+# G1=z_clipping_sym(G1,5,in_hinges,out_hinges)
+# G1=z_clipping_sym(G1,5,in_hinges,out_hinges)
+
+
+G1 = bubble_bursting_sym(G1,10)
+
+G1 = dead_end_clipping_sym(G1,5)
+
+nx.write_graphml(G0, prefix+suffix+'.'+'G0'+'.graphml')
+nx.write_graphml(G1, prefix+suffix+'.'+'G1'+'.graphml')
+
+
+G2 = G1.copy()
+
+Gs = random_condensation_sym(G1,1000)
+
+
+loop_resolution(G2,500,50)
+
+G2s = random_condensation_sym(G2,1000)
+
+
+
+
+
+nx.write_graphml(G2, prefix+suffix+'.'+'G2'+'.graphml')
+
+nx.write_graphml(Gs, prefix+suffix+'.'+'Gs'+'.graphml')
+
+nx.write_graphml(G2s, prefix+suffix+'.'+'G2s'+'.graphml')
+
+Gc = connect_strands(Gs)
+
+nx.write_graphml(Gc, prefix+suffix+'.'+'Gc'+'.graphml')
+
+G2c = connect_strands(G2s)
+
+nx.write_graphml(G2c, prefix+suffix+'.'+'G2c'+'.graphml')
+
+# G2b = create_bidirected2(G2)
+
+# nx.write_graphml(G2b, prefix+suffix+'.'+'G2b'+'.graphml')
+
+
+
+
+# H=prune_graph(G1,in_hinges,out_hinges)
+# H=dead_end_clipping(H,5)
+
+
+# I=prune_graph(H,in_hinges,out_hinges,True)
+# I=dead_end_clipping(I,5)
+
+
+# Gs = random_condensation(G1,2000)
+# nx.write_graphml(Gs, path+'G'+suffix+'.graphml')
+# write_graph(Gs,path+'G'+suffix+'.txt')
+
+# Hs = random_condensation(H,2500)
+# nx.write_graphml(Hs, path+'H'+suffix+'.graphml')
+# write_graph(Hs,path+'H'+suffix+'.txt')
+
+# Is = random_condensation(I,2500)
+# nx.write_graphml(Is, path+'I'+suffix+'.graphml')
+# write_graph(Is,path+'I'+suffix+'.txt')
+
+
+
diff --git a/scripts/random_condensation.py b/scripts/random_condensation.py
new file mode 100644
index 0000000..30f909f
--- /dev/null
+++ b/scripts/random_condensation.py
@@ -0,0 +1,287 @@
+#!/usr/bin/env python
+
+import networkx as nx
+import random
+import sys
+from collections import Counter
+
+
+
+# This script does a random condensation of the graph down to 2000 nodes
+
+# python random_condensation.py ecoli.edges 2000
+
+# It also keeps the ground truth on the graph through the condensation steps (if a json file is available)
+
+
+
+def merge_path(g,in_node,node,out_node):
+
+ g.add_edge(in_node,out_node,hinge_edge = -1,false_positive = 0)
+ g.remove_node(node)
+
+
+def input1(flname):
+
+ print "input1"
+
+ g = nx.DiGraph()
+ with open (flname) as f:
+ for lines in f:
+ lines1=lines.split()
+ #print lines1
+ if len(lines1) < 5:
+ continue
+ #print lines1
+ g.add_edge(lines1[0] + "_" + lines1[3], lines1[1] + "_" + lines1[4], hinge_edge=int(lines1[5]))
+ g.add_edge(lines1[1] + "_" + str(1-int(lines1[4])), lines1[0] + "_" + str(1-int(lines1[3])),hinge_edge=int(lines1[5]))
+ return g
+
+def input2(flname):
+
+ print "input2"
+
+ g = nx.DiGraph()
+ with open (flname) as f:
+ for lines in f:
+ lines1=lines.split()
+ #print lines1
+ g.add_edge(lines1[0], lines1[1])
+ return g
+
+
+
+def input3(flname):
+
+ print "input3"
+ # g = nx.DiGraph()
+ g = nx.read_graphml(flname)
+
+
+def de_clip(filename, n_nodes, hinge_list,gt_file):
+
+ n_iter = 5
+
+
+ f=open(filename)
+ line1=f.readline()
+ print line1
+ f.close()
+
+ extension = filename.split('.')[-1]
+
+ if extension == 'graphml':
+ g=input3(filename)
+ elif len(line1.split()) !=2:
+ g=input1(filename)
+ else:
+ g=input2(filename)
+
+
+ print nx.info(g)
+ degree_sequence=sorted(g.degree().values(),reverse=True)
+ print Counter(degree_sequence)
+
+ degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+ print Counter(degree_sequence)
+
+ try:
+ import ujson
+ mapping = ujson.load(open(gt_file))
+
+ print 'getting mapping'
+ mapped_nodes=0
+ print str(len(mapping))
+ print str(len(g.nodes()))
+ for node in g.nodes():
+ # print node
+ node_base=node.split("_")[0]
+ # print node_base
+
+ #print node
+ if mapping.has_key(node_base):
+ g.node[node]['aln_start'] = min (mapping[node_base][0][0],mapping[node_base][0][1])
+ g.node[node]['aln_end'] = max(mapping[node_base][0][1],mapping[node_base][0][0])
+ g.node[node]['chr'] = mapping[node_base][0][2]
+ mapped_nodes+=1
+ else:
+ # pass
+ g.node[node]['aln_start'] = 0
+ g.node[node]['aln_end'] = 0
+ g.node[node]['aln_strand'] = 0
+
+
+ for edge in g.edges_iter():
+ in_node=edge[0]
+ out_node=edge[1]
+ # print 'akjdfakjhfakljh'
+ if ((g.node[in_node]['aln_start'] < g.node[out_node]['aln_start'] and
+ g.node[out_node]['aln_start'] < g.node[in_node]['aln_end']) or
+ (g.node[in_node]['aln_start'] < g.node[out_node]['aln_end'] and
+ g.node[out_node]['aln_end'] < g.node[in_node]['aln_end'])):
+ g.edge[in_node][out_node]['false_positive']=0
+ else:
+ g.edge[in_node][out_node]['false_positive']=1
+
+ except:
+ raise
+ # print "json "+filename.split('.')[0]+'.mapping.json'+" not found. exiting."
+
+ print hinge_list
+
+ print str(mapped_nodes)+" out of " +str(len(g.nodes()))+" nodes mapped."
+
+ # for i in range(5):
+ # merge_simple_path(g)
+ # degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+ # print Counter(degree_sequence)
+
+ in_hinges = set()
+ out_hinges = set()
+ num_iter=10000
+ iter_done=0
+ if hinge_list != None:
+ print "Found hinge list."
+ with open(hinge_list,'r') as f:
+ for lines in f:
+ lines1=lines.split()
+
+ if lines1[2] == '1':
+ in_hinges.add(lines1[0]+'_0')
+ out_hinges.add(lines1[0]+'_1')
+ elif lines1[2] == '-1':
+ in_hinges.add(lines1[0]+'_1')
+ out_hinges.add(lines1[0]+'_0')
+
+ print str(len(in_hinges))+' hinges found.'
+
+ for node in g.nodes():
+ if node in in_hinges and node in out_hinges:
+ g.node[node]['hinge']=100
+ elif node in in_hinges:
+ g.node[node]['hinge']=10
+ elif node in out_hinges:
+ g.node[node]['hinge']=-10
+ else:
+ g.node[node]['hinge']=0
+
+ while len(g.nodes()) > n_nodes and iter_done < num_iter :
+ node = g.nodes()[random.randrange(len(g.nodes()))]
+ iter_done+=1
+ # print iter_done
+ if g.in_degree(node) == 1 and g.out_degree(node) == 1:
+
+ base_node=node.split("_")[0]
+ orintation = node.split("_")[1]
+ # if orintation=='1':
+ # node2=base_node+'_0'
+ # else:
+ # node2=base_node+'_1'
+
+ # print node,node2
+
+ in_node = g.in_edges(node)[0][0]
+ out_node = g.out_edges(node)[0][1]
+
+ if g.node[node]['hinge']==0 and g.node[in_node]['hinge']==0 and g.node[out_node]['hinge']==0:
+ if g.out_degree(in_node) == 1 and g.in_degree(out_node) == 1:
+ if in_node != node and out_node != node and in_node != out_node:
+ bad_node=False
+ # print g.in_edges(node)
+ # print g.edge[g.in_edges(node)[0][0]][g.in_edges(node)[0][1]]
+ # print g.out_edges(node)
+ for in_edge in g.in_edges(node):
+ if g.edge[in_edge[0]][in_edge[1]]['false_positive']==1:
+ bad_node=True
+ for out_edge in g.out_edges(node):
+ if g.edge[out_edge[0]][out_edge[1]]['false_positive']==1:
+ bad_node=True
+ if not bad_node:
+ #print in_node, node, out_node
+ merge_path(g,in_node,node,out_node)
+
+
+ # print g.edge[edge1[0]][edge1[1]]['hinge_edge']
+
+ for nd in g.nodes():
+ if len(nd.split("_"))==1:
+ print nd + " in trouble"
+ # in_node = g.in_edges(node2)[0][0]
+ # out_node = g.out_edges(node2)[0][1]
+ # if g.node[node2]['hinge']==0 and g.node[in_node]['hinge']==0 and g.node[out_node]['hinge']==0:
+ # if g.out_degree(in_node) == 1 and g.in_degree(out_node) == 1:
+ # if in_node != node2 and out_node != node2 and in_node != out_node:
+ # bad_node=False
+ # for in_edge in g.in_edges(node2):
+ # if g.edge[in_edge]==1:
+ # bad_node=True
+ # for out_edge in g.out_edges(node2):
+ # if g.edge[out_edge]==1:
+ # bad_node=True
+ # if not bad_node:
+ # #print in_node, node, out_node
+ # merge_path(g,in_node,node2,out_node)
+
+
+ # for nd in g.nodes():
+ # print nd
+
+ else:
+ while len(g.nodes()) > n_nodes:
+
+ node = g.nodes()[random.randrange(len(g.nodes()))]
+
+
+
+ if g.in_degree(node) == 1 and g.out_degree(node) == 1:
+
+ # assert g.in_degree(node2) == 1 and g.out_degree(node2) == 1
+ # edge_1 = g.out_edges(node)[0]
+ # edge_2 = g.in_edges(node)[0]
+
+ edge1 = g.out_edges(node)[0]
+ edge2 = g.in_edges(node)[0]
+
+ # print g.edge[edge1[0]][edge1[1]]['hinge_edge']
+
+ if (g.edge[edge1[0]][edge1[1]]['hinge_edge'] == -1 and g.edge[edge2[0]][edge2[1]]['hinge_edge'] == -1):
+
+ in_node = g.in_edges(node)[0][0]
+ out_node = g.out_edges(node)[0][1]
+ if g.out_degree(in_node) == 1 and g.in_degree(out_node) == 1:
+ if in_node != node and out_node != node and in_node != out_node:
+ #print in_node, node, out_node
+ merge_path(g,in_node,node,out_node)
+
+
+
+
+
+
+
+
+ degree_sequence=sorted(nx.degree(g).values(),reverse=True)
+ print Counter(degree_sequence)
+
+
+ nx.write_graphml(g, filename.split('.')[0]+'.sparse3.graphml')
+
+ print nx.number_weakly_connected_components(g)
+ print nx.number_strongly_connected_components(g)
+
+
+if __name__ == "__main__":
+ filename = sys.argv[1]
+ try :
+ hinge_list=sys.argv[3]
+ print "Found hinge list."
+ except:
+ hinge_list=None
+ print "in except "+hinge_list
+
+ de_clip(filename, int(sys.argv[2]),hinge_list, sys.argv[4])
+
+
+
+
+
diff --git a/scripts/repeat_annotate_reads.py b/scripts/repeat_annotate_reads.py
new file mode 100644
index 0000000..ecfeae7
--- /dev/null
+++ b/scripts/repeat_annotate_reads.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+
+import sys
+import os
+
+
+
+def reverse_complement(bases):
+ rev_comp={'A':'T','C':'G','G':'C','T':'A','N':'N'}
+ return ''.join(map(lambda x :rev_comp[x],bases[::-1]))
+
+def run(multifasta_path,intermediate_repeat_file_path, gt_file_path, gt_annotated_file_path):
+
+ ##Read chromosomes from multifasta file
+ chrom={}
+ cur_chrom=''
+ start=True
+ chr_num=0
+ with open(multifasta_path,'r') as f:
+ for lines in f:
+ if lines[0]=='>':
+
+ if start:
+ start=False
+ chr_num=int(lines.split()[0][1:])-1
+ print 'detect chr '+ str(chr_num)
+ else:
+ chrom[chr_num]=cur_chrom
+ print len(cur_chrom)
+ chr_num=int(lines.split()[0][1:])-1
+ print 'detect chr '+ str(chr_num)
+ cur_chrom=''
+ else:
+ cur_chrom+=lines.strip()
+ print len(cur_chrom)
+ chrom[chr_num]=cur_chrom
+
+ ##Run mummer to get repeats
+ mummer_cmd='mummer -maxmatch -b -c -l 1000 -L '+multifasta_path+' '+multifasta_path +' > '+intermediate_repeat_file_path
+ os.system(mummer_cmd)
+
+
+ #Put repeats discovered by mummer in right form
+ chr_num=0
+ chr_repeats={}
+ rev_com=False
+ with open (intermediate_repeat_file_path) as f:
+ for line in f:
+ if line[0]=='>':
+ line1=line.strip().split()
+ chr_num=int(line1[1])-1
+ #print len(line1)
+ if len(line1)==6:
+ rev_com=True
+ chr_len=int(line1[5])
+ else:
+ rev_com=False
+ chr_len=int(line1[4])
+ else:
+ line1=line.strip().split()
+ chr2_num=int(line1[0])-1
+ chr2_start=int(line1[1])-1
+ chr1_start=int(line1[2])-1
+ rep_len=int(line1[3])
+ if not rev_com:
+ if not (chrom[chr_num][chr1_start:chr1_start+rep_len]
+ ==chrom[chr2_num][chr2_start:chr2_start+rep_len]):
+ print chr_num+1,line
+ if chr1_start==0 and rep_len==chr_len:
+ continue
+ chr_repeats.setdefault(chr_num,[]).append((chr1_start,chr1_start+rep_len))
+ else:
+ if not (chrom[chr_num][chr1_start-rep_len+1:chr1_start+1]
+ == reverse_complement(chrom[chr2_num][chr2_start:chr2_start+rep_len])):
+ print chr_num+1,line,rev_com
+ chr_repeats.setdefault(chr_num,[]).append((chr1_start-rep_len+1,chr1_start+1))
+
+ #Go through gt file and annotate reads that intersect with repeats.
+ with open(gt_file_path) as f:
+ with open(gt_annotated_file_path,'w') as g:
+ for line in f:
+ line1=line.split()
+ cr=int(line1[1])
+ rd_st=int(line1[2])
+ rd_end=int(line1[3])
+ is_repeat=0
+ for tup in chr_repeats[cr]:
+ if ((rd_st >= tup[0] and rd_st <= tup[1]) or
+ (rd_end >= tup[0] and rd_end <= tup[1])):
+ is_repeat=1
+ line2=line.strip()+"\t"+str(is_repeat)+"\n"
+ g.write(line2)
+
+if __name__ == '__main__':
+ multifasta_path=sys.argv[1]
+ gt_file_path=sys.argv[2]
+ gt_annotated_file_path=sys.argv[3]
+ intermediate_repeat_file_path='./repeats_discovered.txt'
+ if len(sys.argv) > 4:
+ intermediate_repeat_file_path=sys.argv[4]
+ run(multifasta_path,intermediate_repeat_file_path, gt_file_path, gt_annotated_file_path)ß
diff --git a/scripts/run_mapping.py b/scripts/run_mapping.py
new file mode 100755
index 0000000..e77c5dd
--- /dev/null
+++ b/scripts/run_mapping.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+import sys
+import os
+import subprocess
+from parse_read import *
+from parse_alignment import *
+
+filename,filename2 = sys.argv[1:3]
+alignmentname = sys.argv[3]
+readarg = sys.argv[4]
+
+
+stream = subprocess.Popen(["LA4Awesome", filename, filename2 , alignmentname ,readarg, '-F'],
+ stdout=subprocess.PIPE, bufsize=1)
+
+alignments = parse_alignment2(stream.stdout) # generator
+
+d = {}
+for alignment in alignments:
+ if not d.has_key(alignment[2]):
+ d[alignment[2]] = []
+ d[alignment[2]].append([alignment[0],alignment[3],alignment[4], alignment[6], alignment[7], alignment[1]])
+
+#print d
+
+mapping = {}
+
+for key,value in d.items():
+ value.sort(key = lambda x:x[2]-x[1], reverse=True)
+ aln = value[0]
+
+ if aln[0] == 'n':
+ mapping[str(key)] = (aln[1], aln[2],aln[-1], 0)
+ mapping[str(key)+'\''] = (aln[2], aln[1],aln[-1], 1)
+ else:
+ mapping[str(key)] = (aln[2], aln[1], aln[-1], 1)
+ mapping[str(key)+'\''] = (aln[1], aln[2], aln[-1], 0)
+
+#print mapping
+import ujson
+ujson.dump(mapping,open(filename+'.mapping.json','w'))
diff --git a/scripts/run_mapping2.py b/scripts/run_mapping2.py
new file mode 100755
index 0000000..247b427
--- /dev/null
+++ b/scripts/run_mapping2.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+import sys
+import os
+import subprocess
+from parse_read import *
+from parse_alignment import *
+
+filename,filename2 = sys.argv[1:3]
+alignmentname = sys.argv[3]
+readarg = sys.argv[4]
+k = int(sys.argv[5])
+
+
+stream = subprocess.Popen(["LA4Awesome", filename, filename2 , alignmentname ,readarg],
+ stdout=subprocess.PIPE, bufsize=1)
+
+alignments = parse_alignment2(stream.stdout) # generator
+
+d = {}
+for alignment in alignments:
+ if not d.has_key(alignment[2]):
+ d[alignment[2]] = []
+ d[alignment[2]].append([alignment[0],alignment[3],alignment[4], alignment[6], alignment[7], alignment[1]])
+
+#print d
+
+mapping = {}
+
+for key,value in d.items():
+ value.sort(key = lambda x:x[2]-x[1], reverse=True)
+ alns = value[:k]
+ max_val=alns[0][2]-alns[0][1]
+ for aln in alns:
+ if aln[2]-aln[1] > max_val/2.:
+ if not mapping.has_key(str(key)):
+ mapping[str(key)] = [(aln[1], aln[2],aln[-1], 1-int(aln[0] == 'n'))]
+ # mapping[str(key)+'\''] = [(aln[2], aln[1],aln[-1], int(aln[0] == 'n'))]
+ else:
+ mapping[str(key)].append((aln[1], aln[2],aln[-1], 1-int(aln[0] == 'n')))
+ # mapping[str(key)+'\''].append((aln[2], aln[1],aln[-1], int(aln[0] == 'n')))
+
+#print mapping
+import ujson
+ujson.dump(mapping,open(filename2+'.mapping.'+str(k)+'.json','w'))
diff --git a/scripts/run_mapping3.py b/scripts/run_mapping3.py
new file mode 100755
index 0000000..c8e9254
--- /dev/null
+++ b/scripts/run_mapping3.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+import sys
+import os
+import subprocess
+from parse_read import *
+from parse_alignment import *
+
+filename,filename2 = sys.argv[1:3]
+alignmentname = sys.argv[3]
+readarg = sys.argv[4]
+k = int(sys.argv[5])
+
+
+stream = subprocess.Popen(["LA4Awesome", filename, filename2 , alignmentname ,readarg],
+ stdout=subprocess.PIPE, bufsize=1)
+
+alignments = parse_alignment2(stream.stdout) # generator
+
+d = {}
+for alignment in alignments:
+ if not d.has_key(alignment[2]):
+ d[alignment[2]] = []
+ d[alignment[2]].append([alignment[0],alignment[3],alignment[4], alignment[6], alignment[7], alignment[1]])
+
+#print d
+
+mapping = {}
+
+for key,value in d.items():
+ value.sort(key = lambda x:x[2]-x[1], reverse=True)
+ #alns = value[:k]
+ if len(alns) > 0:
+ alns = [item for item in alns if (item[2] - item[1]) > (alns[0][2] - alns[0][1])/2]
+ for aln in alns:
+ if not mapping.has_key(str(key)):
+ mapping[str(key)] = [(aln[1], aln[2],aln[-1], 1-int(aln[0] == 'n'))]
+ mapping[str(key)+'\''] = [(aln[2], aln[1],aln[-1], int(aln[0] == 'n'))]
+ else:
+ mapping[str(key)].append((aln[1], aln[2],aln[-1], 1-int(aln[0] == 'n')))
+ mapping[str(key)+'\''].append((aln[2], aln[1],aln[-1], int(aln[0] == 'n')))
+
+#print mapping
+import ujson
+ujson.dump(mapping,open(filename2+'.mapping.json','w'))
diff --git a/scripts/run_parse_alignment.py b/scripts/run_parse_alignment.py
new file mode 100755
index 0000000..52645da
--- /dev/null
+++ b/scripts/run_parse_alignment.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+import sys
+import os
+import subprocess
+from parse_read import *
+from parse_alignment import *
+
+filename = sys.argv[1]
+readarg = sys.argv[2]
+
+
+stream = subprocess.Popen(["LAshow", filename , filename ,readarg],
+ stdout=subprocess.PIPE, bufsize=1)
+
+alignments = parse_alignment(stream.stdout) # generator
+
+for alignment in alignments:
+ print alignment
diff --git a/scripts/run_parse_read.py b/scripts/run_parse_read.py
new file mode 100755
index 0000000..ff6f546
--- /dev/null
+++ b/scripts/run_parse_read.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+
+import sys
+import os
+import subprocess
+from parse_read import *
+
+filename = sys.argv[1]
+readarg = sys.argv[2]
+
+
+stream = subprocess.Popen(["DBshow", filename ,readarg],
+ stdout=subprocess.PIPE,bufsize=1)
+
+reads = parse_read(stream.stdout) # generator
+
+for read in reads:
+ print read
+
+#print result
diff --git a/scripts/unitig.py b/scripts/unitig.py
new file mode 100755
index 0000000..98a7580
--- /dev/null
+++ b/scripts/unitig.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python
+import networkx as nx
+import sys
+import itertools
+
+filename = sys.argv[1]
+outfile = filename.split('.')[0] + ".edges.list"
+
+g = nx.read_graphml(filename)
+print nx.info(g)
+
+
+def get_circle(g,node,vertices_of_interest):
+ cur_path = [node]
+ cur_vertex = g.successors(node)[0]
+
+ i = 0
+ while cur_vertex != node:
+ cur_path.append(cur_vertex)
+ try:
+ assert len(g.successors(cur_vertex)) == 1
+ except:
+ print g.successors(cur_vertex), cur_vertex, node
+ print cur_vertex in vertices_of_interest
+ raise
+ successor = g.successors(cur_vertex)[0]
+ cur_vertex = successor
+
+ cur_path.append(cur_vertex)
+
+
+
+ return cur_path
+
+
+def get_unitigs(g):
+ paths = []
+ num_paths = 0
+ node_set = set(g.nodes())
+
+
+ vertices_of_interest = set([x for x in g if g.in_degree(x) != 1 or g.out_degree(x) != 1])
+ vertices_used = set(vertices_of_interest)
+ for start_vertex in vertices_of_interest:
+ first_out_vertices = g.successors(start_vertex)
+ print first_out_vertices
+ for vertex in first_out_vertices:
+ cur_path = [start_vertex]
+ cur_vertex = vertex
+ while cur_vertex not in vertices_of_interest:
+ successor = g.successors(cur_vertex)[0]
+ cur_path.append(cur_vertex)
+ predecessor = cur_vertex
+ cur_vertex = successor
+
+ cur_path.append(cur_vertex)
+ vertices_used = vertices_used.union(set(cur_path))
+ paths.append(cur_path)
+
+ print len(node_set)
+ print len(vertices_used)
+
+ while len(node_set-vertices_used) > 0:
+ node = list(node_set-vertices_used)[0]
+ # print list(node_set-vertices_used)
+ # # print vertices_of_interest
+ # # print len(node_set-vertices_used)
+ # break
+ path = get_circle(g, node, vertices_of_interest)
+ vertices_used = vertices_used.union(set(path))
+ if len(path) > 1:
+ paths.append(path)
+ print len(paths)
+ # print paths
+ print "paths"
+ return paths
+
+
+paths = get_unitigs(g)
+
+print len(paths)
+
+h = nx.DiGraph()
+for i, path in enumerate(paths):
+ h.add_node(i)
+ h.node[i]['path'] = path
+
+vertices_of_interest = set([x for x in g if g.in_degree(x) != 1 or g.out_degree(x) != 1])
+for vertex in vertices_of_interest:
+ successors = [x for x in h.nodes() if h.node[x]['path'][0] == vertex]
+ predecessors = [x for x in h.nodes() if h.node[x]['path'][-1] == vertex]
+ print successors,predecessors
+ assert len(successors)==1 or len(predecessors)==1
+ for succ, pred in itertools.product(successors,predecessors):
+ h.add_edge(pred,succ)
+
+ # if vertex.split('_') == '0':
+ # if len(predecessors) == 1:
+ # for succ in successors:
+ # rel_suc = h.node[succ]['path'][1]
+ # d = g.get_edge_data(vertex,rel_suc)
+ # h.edge[predecessors[0]][succ]['start_pos'] = d['read_a_end']
+ # h.edge[predecessors[0]][succ]['weight'] = d['read_a_end_raw'] - d['read_a_start']
+ # if len(successors) == 1:
+ # for pred in predecessors:
+ # rel_pred = h.node[pred]['path'][-2]
+ # d = g.get_edge_data(rel_pred,vertex)
+ # h.edge[predecessors[0]][succ][''] = d['read_a_end_raw'] - d['read_a_start']
+
+
+with open(outfile, 'w') as f:
+ for i,path in enumerate(paths):
+ f.write('>Unitig%d\n'%(i))
+ for j in range(len(path)-1):
+ nodeA = path[j].lstrip("B")
+ nodeB = path[j+1].lstrip("B")
+
+ d = g.get_edge_data(path[j],path[j+1])
+
+ f.write('%s %s %s %s %d %d %d %d %d\n'%(nodeA.split('_')[0],nodeA.split('_')[1] , nodeB.split('_')[0],
+ nodeB.split('_')[1], -d['read_a_start_raw'] + d['read_a_end_raw'] - d['read_b_start_raw'] + d['read_b_end_raw'],
+ d['read_a_start_raw'], d['read_a_end_raw'], d['read_b_start_raw'], d['read_b_end_raw']))
+
+
+
+ f.close()
+
+
+
+
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
new file mode 100644
index 0000000..271b6e2
--- /dev/null
+++ b/src/CMakeLists.txt
@@ -0,0 +1,47 @@
+cmake_minimum_required(VERSION 3.2)
+
+#if (${CMAKE_SYSTEM_NAME} MATCHES "Windows")
+# set(WINDOWS TRUE)
+#elseif (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
+# set(LINUX TRUE)
+# message( "Linux Detected, using gcc48")
+# set(CMAKE_C_COMPILER /usr/bin/gcc-4.8)
+# set(CMAKE_CXX_COMPILER /usr/bin/g++-4.8)
+#elseif (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
+# set(MACOSX TRUE)
+# set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/build")
+# message( "OS X Detected, using gcc49")
+# set(CMAKE_C_COMPILER /usr/local/bin/gcc-4.9)
+# set(CMAKE_CXX_COMPILER /usr/local/bin/g++-4.9)
+# set(CMAKE_INCLUDE_CURRENT_DIR ON)
+# set(CMAKE_AUTOMOC ON)
+#endif()
+
+
+
+set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=gnu++11")
+set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fopenmp")
+set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp")
+
+include_directories(include)
+
+## Libraries
+
+add_subdirectory(lib lib)
+
+add_subdirectory(spdlog)
+
+#add_subdirectory(ogdf)
+
+
+## Executables
+
+add_subdirectory(filter filter)
+
+add_subdirectory(consensus consensus)
+
+add_subdirectory(layout layout)
+
+## Tests
+
+add_subdirectory(test)
diff --git a/src/consensus/CMakeLists.txt b/src/consensus/CMakeLists.txt
new file mode 100644
index 0000000..9ac757f
--- /dev/null
+++ b/src/consensus/CMakeLists.txt
@@ -0,0 +1,10 @@
+cmake_minimum_required(VERSION 3.2)
+
+add_executable(draft_assembly draft)
+target_link_libraries(draft_assembly LAInterface ini falcon spdlog)
+
+add_executable(consensus consensus.cpp)
+target_link_libraries(consensus LAInterface falcon ini)
+
+add_executable(io io_base)
+target_link_libraries(io LAInterface ini spdlog)
diff --git a/src/consensus/consensus.cpp b/src/consensus/consensus.cpp
new file mode 100644
index 0000000..86efdd7
--- /dev/null
+++ b/src/consensus/consensus.cpp
@@ -0,0 +1,230 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <ctype.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "DB.h"
+#include "align.h"
+#include "LAInterface.h"
+
+#include <iostream>
+#include <fstream>
+#include <tuple>
+#include <string>
+#include <algorithm>
+#include <map>
+#include <unordered_map>
+extern "C" {
+#include "common.h"
+}
+#include "INIReader.h"
+
+
+#define LAST_READ_SYMBOL '$'
+
+bool compare_overlap(LAlignment * ovl1, LAlignment * ovl2) {
+ return ((ovl1->aepos - ovl1->abpos + ovl1->bepos - ovl1->bbpos) > (ovl2->aepos - ovl2->abpos + ovl2->bepos - ovl2->bbpos));
+}
+
+
+static int ORDER(const void *l, const void *r) {
+ int x = *((int32 *) l);
+ int y = *((int32 *) r);
+ return (x - y);
+}
+
+
+static char ToU[4] = { 'A', 'C', 'G', 'T' };
+
+int main(int argc, char *argv[]) {
+
+ std::string name_db1 = std::string(argv[1]);
+ std::string name_db2 = std::string(argv[2]);
+ std::string name_las = std::string(argv[3]);
+ char * name_out = argv[4];
+ char * name_config = argv[5];
+
+ std::ofstream out(name_out);
+
+ INIReader reader(name_config);
+
+ if (reader.ParseError() < 0) {
+ std::cout << "Can't load "<<name_config<<std::endl;
+ return 1;
+ }
+
+
+ int LENGTH_THRESHOLD = reader.GetInteger("consensus", "min_length", -1);
+ printf("length threshold:%d\n", LENGTH_THRESHOLD);
+ //std::cout<<name_db1 << " " << name_db2 << " " << name_las <<std::endl;
+
+ LAInterface la;
+ //std::cout << "hello" << std::endl;
+ Read *test_read;
+
+ la.openDB2(name_db1, name_db2);
+
+ int n_reads = la.getReadNumber2();
+ int n_contigs = la.getReadNumber();
+
+ std::cout<<"# Contigs:" << n_contigs << std::endl;
+ std::cout<<"# Reads:" << n_reads << std::endl;
+
+
+ la.openAlignmentFile(name_las);
+
+ int n_alns = la.getAlignmentNumber();
+
+ std::cout<<"# Alignments:" << n_alns << std::endl;
+
+
+ std::vector<LAlignment *> res;
+ la.resetAlignment();
+ la.getAlignment(res, 0, n_alns); // get all alignments
+
+
+ std::vector<std::vector<LAlignment *>> idx;
+
+ printf("%d\n", res.size());
+
+ for (int i = 0; i < n_contigs; i++)
+ idx.push_back(std::vector<LAlignment *>());
+
+ for (int i = 0; i < n_alns; i++) {
+ idx[res[i]->read_A_id_].push_back(res[i]);
+ }
+
+ for (int i = 0; i < n_contigs; i++) {
+ std::sort(idx[i].begin(), idx[i].end(), compare_overlap);
+ printf("%d %d\n", i, idx[i].size());
+ }
+
+ std::cout << "Getting read lengths" << std::endl;
+ std::vector<Read *> reads_vec;
+ la.getRead(reads_vec, 0, n_contigs);
+ for (int i = 0; i < n_contigs; i++){
+ std::cout << i << "\t" << (reads_vec[i]->bases).size() << std::endl;
+ }
+
+ std::cout << "Building consensus sequences..." << std::endl;
+
+ for (int i = 0; i < n_contigs; i++) {
+
+
+
+ int k = 0;
+ for (k = 0; k < idx[i].size(); k++)
+ if (idx[i][k]->aepos - idx[i][k]->abpos < LENGTH_THRESHOLD)
+ break;
+
+ int seq_count = k;
+
+ std::cout << "Contig " << i << ": " << seq_count << " reads" << std::endl;
+
+ if (seq_count == 0) {
+ out << ">Consensus" << i << std::endl;
+ out << reads_vec[i]->bases << std::endl;
+ continue;
+ }
+
+ std::vector<std::vector<int>> contig_base_scores;
+
+ std::vector<int> insertion_score (idx[i][0]->alen,0);
+ std::vector<std::vector<int>> insertion_base_scores; // handling single insertions only
+
+ std::vector<int> cov_depth (idx[i][0]->alen,0);
+
+ std::vector<int> zero_scores (5,0); // scores for A,C,G,T,- are initialized at 0
+ for (int j = 0; j < idx[i][0]->alen; j++) {
+ contig_base_scores.push_back(zero_scores);
+ insertion_base_scores.push_back(zero_scores);
+ }
+
+ for (int j = 0; j < seq_count ; j ++) {
+
+ la.recoverAlignment(idx[i][j]);
+ std::pair<std::string, std::string> alignment = la.getAlignmentTags(idx[i][j]);
+
+ int pos_in_contig = idx[i][j]->abpos;
+
+ for (int m = 0; m < alignment.first.length(); m++) {
+
+ unsigned int base = -1;
+ switch (alignment.second[m]) {
+ case 'A': base = 0; break;
+ case 'C': base = 1; break;
+ case 'G': base = 2; break;
+ case 'T': base = 3; break;
+ case '-': base = 4; break;
+ }
+
+ if (alignment.first[m] != '-') {
+ contig_base_scores[pos_in_contig][base]++;
+ cov_depth[pos_in_contig]++;
+ pos_in_contig++;
+ }
+ else {
+ insertion_score[pos_in_contig]++;
+ insertion_base_scores[pos_in_contig][base]++;
+ }
+
+ }
+
+ }
+
+ int good_bases = 0;
+ int insertions = 0; // insertion here means that a base is inserted in the consensus
+ int deletions = 0; // deletion here means that the base from the draft is deleted in the consensus
+
+ int consensus_length = 0;
+
+ out << ">Consensus" << i << std::endl;
+
+ for (int j=0; j < idx[i][0]->alen ; j++) {
+
+ unsigned int max_base = 0;
+
+ for (int b=1; b<5; b++) {
+ if (contig_base_scores[j][b] > contig_base_scores[j][max_base]) max_base = b;
+ }
+ if (max_base < 4) {
+ out << ToU[max_base];
+ good_bases++;
+ consensus_length++;
+ }
+ else {
+ deletions++;
+ }
+
+ if (insertion_score[j] > cov_depth[j]/2) {
+ unsigned int max_insertion_base = 0;
+ for (int b=1; b<4; b++) {
+ if (insertion_base_scores[j][b] > insertion_base_scores[j][max_insertion_base]) max_insertion_base = b;
+ }
+ out << ToU[max_insertion_base];
+ consensus_length++;
+ insertions++;
+ }
+
+ }
+ out << std::endl;
+
+
+ printf("Good bases: %d/%d\n",good_bases,idx[i][0]->alen);
+ printf("Insertions: %d/%d\n",insertions,idx[i][0]->alen);
+ printf("Deletions: %d/%d\n",deletions,idx[i][0]->alen);
+ printf("Consensus length: %d\n",consensus_length);
+
+
+ }
+
+
+
+
+ la.closeDB(); //close database*/
+ return 0;
+}
diff --git a/src/consensus/draft.cpp b/src/consensus/draft.cpp
new file mode 100644
index 0000000..a4bc8e1
--- /dev/null
+++ b/src/consensus/draft.cpp
@@ -0,0 +1,992 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <unordered_map>
+#include <algorithm>
+#include <fstream>
+#include <sstream>
+#include <iostream>
+#include <set>
+#include <omp.h>
+#include <tuple>
+#include <iomanip>
+
+#include "spdlog/spdlog.h"
+#include "cmdline.h"
+#include "INIReader.h"
+#include "DB.h"
+#include "align.h"
+#include "LAInterface.h"
+
+#include <utility>
+#include <boost/graph/adjacency_list.hpp>
+#include <boost/graph/connected_components.hpp>
+
+extern "C" {
+#include "common.h"
+}
+
+
+#define LAST_READ_SYMBOL '$'
+
+#define HINGED_EDGE 1
+#define UNHINGED_EDGE -1
+#define REVERSE_COMPLEMENT_MATCH 1
+#define SAME_DIRECTION_MATCH 0
+
+using namespace boost;
+
+typedef adjacency_list <vecS, vecS, undirectedS> Graph;
+typedef std::tuple<Node, Node, int> Edge_w;
+typedef std::pair<Node, Node> Edge_nw;
+
+
+static int ORDER(const void *l, const void *r) {
+ int x = *((int32 *) l);
+ int y = *((int32 *) r);
+ return (x - y);
+}
+
+
+
+std::vector<int> get_mapping(std::string aln_tag1, std::string aln_tag2) {
+ int pos = 0;
+ int count = 0;
+ int count2 = 0;
+
+ std::vector<int> ret;
+ while (pos < aln_tag1.size()) {
+ if (aln_tag1[pos] != '-') {
+ ret.push_back(count2);
+ count ++;
+ }
+ if (aln_tag2[pos] != '-') {
+ count2 ++;
+ }
+ pos++;
+ }
+ return ret;
+}
+
+
+
+std::string reverse_complement(std::string seq) {
+ static std::map<char, char> m = {{'a','t'}, {'c','g'}, {'g','c'}, {'t','a'}, {'A','T'}, {'C','G'}, {'T','A'}, {'G','C'}, {'n','n'}, {'N', 'N'}, {'-', '-'}};
+ std::reverse(seq.begin(), seq.end());
+ for (int i = 0; i < seq.size(); i++) {
+ seq[i] = m[seq[i]];
+ }
+ return seq;
+}
+
+
+
+std::ostream& operator<<(std::ostream& out, const MatchType value){
+ static std::map<MatchType, std::string> strings;
+ if (strings.size() == 0){
+#define INSERT_ELEMENT(p) strings[p] = #p
+ INSERT_ELEMENT(FORWARD);
+ INSERT_ELEMENT(BACKWARD);
+ INSERT_ELEMENT(ACOVERB);
+ INSERT_ELEMENT(BCOVERA);
+ INSERT_ELEMENT(INTERNAL);
+ INSERT_ELEMENT(UNDEFINED);
+ INSERT_ELEMENT(NOT_ACTIVE);
+#undef INSERT_ELEMENT
+ }
+ return out << strings[value];
+}
+
+std::vector<std::string> &split(const std::string &s, char delim, std::vector<std::string> &elems) {
+ std::stringstream ss(s);
+ std::string item;
+ while (std::getline(ss, item, delim)) {
+ elems.push_back(item);
+ }
+ return elems;
+}
+
+
+std::vector<std::string> split(const std::string &s, char delim) {
+ std::vector<std::string> elems;
+ split(s, delim, elems);
+ return elems;
+}
+
+
+
+bool compare_overlap(LOverlap * ovl1, LOverlap * ovl2) {
+ return ((ovl1->read_A_match_end_ - ovl1->read_A_match_start_
+ + ovl1->read_B_match_end_ - ovl1->read_B_match_start_) >
+ (ovl2->read_A_match_end_ - ovl2->read_A_match_start_
+ + ovl2->read_B_match_end_ - ovl2->read_B_match_start_));
+}
+
+
+bool compare_overlap_weight(LOverlap * ovl1, LOverlap * ovl2) {
+ return (ovl1->weight > ovl2->weight);
+}
+
+
+bool compare_overlap_abpos(LOverlap * ovl1, LOverlap * ovl2) {
+ return ovl1->read_A_match_start_ < ovl2->read_A_match_start_;
+}
+
+bool compare_overlap_aepos(LOverlap * ovl1, LOverlap * ovl2) {
+ return ovl1->read_A_match_start_ > ovl2->read_A_match_start_;
+}
+
+
+int main(int argc, char *argv[]) {
+
+ cmdline::parser cmdp;
+ cmdp.add<std::string>("db", 'b', "db file name", false, "");
+ cmdp.add<std::string>("las", 'l', "las file name", false, "");
+ cmdp.add<std::string>("paf", 'p', "paf file name", false, "");
+ cmdp.add<std::string>("config", 'c', "configuration file name", false, "");
+ cmdp.add<std::string>("fasta", 'f', "fasta file name", false, "");
+ cmdp.add<std::string>("prefix", 'x', "(intermediate output) input file prefix", true, "");
+ cmdp.add<std::string>("out", 'o', "final output file name", true, "");
+ cmdp.add<std::string>("log", 'g', "log folder name", false, "log");
+ cmdp.add<std::string>("path", 0, "path file name", false, "path");
+ cmdp.add("debug", '\0', "debug mode");
+
+// cmdp.add<std::string>("restrictreads",'r',"restrict to reads in the file",false,"");
+
+
+ cmdp.parse_check(argc, argv);
+
+ LAInterface la;
+ const char *name_db = cmdp.get<std::string>("db").c_str(); //.db file of reads to load
+ const char *name_las = cmdp.get<std::string>("las").c_str();//.las file of alignments
+ const char *name_paf = cmdp.get<std::string>("paf").c_str();
+ const char *name_fasta = cmdp.get<std::string>("fasta").c_str();
+ const char *name_config = cmdp.get<std::string>("config").c_str();//name of the configuration file, in INI format
+ std::string out = cmdp.get<std::string>("prefix");
+ std::string out_name = cmdp.get<std::string>("out");
+ std::string path_name = cmdp.get<std::string>("path");
+// const char * name_restrict = cmdp.get<std::string>("restrictreads").c_str();
+
+
+ std::string name_mask = out + ".mas";
+ std::string name_max = out + ".max";
+ std::string name_homo = out + ".homologous.txt";
+ std::string name_rep = out + ".repeat.txt";
+ std::string name_hg = out + ".hinges.txt";
+ std::string name_cov = out + ".coverage.txt";
+ std::string name_garbage = out + ".garbage.txt";
+ std::string name_contained = out + ".contained.txt";
+ std::string name_deadend = out_name + ".deadends.txt";
+
+
+ std::ofstream deadend_out(name_deadend);
+ std::ofstream maximal_reads(name_max);
+ std::ofstream garbage_out(name_garbage);
+ std::ofstream contained_out(name_contained);
+ std::ifstream homo(name_homo);
+ std::vector<int> homo_reads;
+
+
+ bool delete_telomere = false; // TODO: command line option to set this true
+
+ int read_id;
+ while (homo >> read_id) homo_reads.push_back(read_id);
+
+
+ namespace spd = spdlog;
+
+ //auto console = spd::stdout_logger_mt("console");
+ std::vector<spdlog::sink_ptr> sinks;
+ sinks.push_back(std::make_shared<spdlog::sinks::stdout_sink_st>());
+ sinks.push_back(
+ std::make_shared<spdlog::sinks::daily_file_sink_st>(cmdp.get<std::string>("log") + "/log", "txt", 23, 59));
+ auto console = std::make_shared<spdlog::logger>("log", std::begin(sinks), std::end(sinks));
+ spdlog::register_logger(console);
+
+ console->info("draft consensus");
+
+ if (cmdp.exist("debug")) {
+ char *buff = (char *) malloc(sizeof(char) * 2000);
+ getwd(buff);
+ console->info("current user {}, current working directory {}", getlogin(), buff);
+ free(buff);
+ }
+
+ console->info("name of db: {}, name of .las file {}", name_db, name_las);
+ console->info("name of fasta: {}, name of .paf file {}", name_fasta, name_paf);
+ console->info("filter files prefix: {}", out);
+ console->info("output prefix: {}", out_name);
+
+
+ std::ifstream ini_file(name_config);
+ std::string str((std::istreambuf_iterator<char>(ini_file)),
+ std::istreambuf_iterator<char>());
+
+ console->info("Parameters passed in \n{}", str);
+
+ if (strlen(name_db) > 0)
+ la.openDB(name_db);
+
+
+ if (strlen(name_las) > 0)
+ la.openAlignmentFile(name_las);
+
+ int64 n_aln = 0;
+
+ if (strlen(name_las) > 0) {
+ n_aln = la.getAlignmentNumber();
+ console->info("Load alignments from {}", name_las);
+ console->info("# Alignments: {}", n_aln);
+ }
+
+ int n_read;
+ if (strlen(name_db) > 0)
+ n_read = la.getReadNumber();
+
+ std::vector<Read *> reads; //Vector of pointers to all reads
+
+ if (strlen(name_fasta) > 0) {
+ n_read = la.loadFASTA(name_fasta, reads);
+ }
+
+ console->info("# Reads: {}", n_read); // output some statistics
+
+ std::vector<LOverlap *> aln;//Vector of pointers to all alignments
+
+ if (strlen(name_las) > 0) {
+ la.resetAlignment();
+ la.getOverlap(aln, 0, n_aln);
+ }
+
+ if (strlen(name_paf) > 0) {
+ n_aln = la.loadPAF(std::string(name_paf), aln);
+ console->info("Load alignments from {}", name_paf);
+ console->info("# Alignments: {}", n_aln);
+ }
+
+ if (n_aln == 0) {
+ console->error("No alignments!");
+ return 1;
+ }
+
+
+ if (strlen(name_db) > 0) {
+ la.getRead(reads, 0, n_read);
+ }
+
+ console->info("Input data finished");
+
+ INIReader reader(name_config);
+
+ if (reader.ParseError() < 0) {
+ console->warn("Can't load {}", name_config);
+ return 1;
+ }
+
+ int LENGTH_THRESHOLD = int(reader.GetInteger("filter", "length_threshold", -1));
+ double QUALITY_THRESHOLD = reader.GetReal("filter", "quality_threshold", 0.0);
+ int N_ITER = (int) reader.GetInteger("filter", "n_iter", -1);
+ int ALN_THRESHOLD = (int) reader.GetInteger("filter", "aln_threshold", -1);
+ int MIN_COV = (int) reader.GetInteger("filter", "min_cov", -1);
+ int CUT_OFF = (int) reader.GetInteger("filter", "cut_off", -1);
+ int THETA = (int) reader.GetInteger("filter", "theta", -1);
+ int THETA2 = (int) reader.GetInteger("filter", "theta2", 0);
+ int N_PROC = (int) reader.GetInteger("running", "n_proc", 4);
+ int HINGE_SLACK = (int) reader.GetInteger("layout", "hinge_slack", 1000);
+ //This is the amount by which a forward overlap
+ //must be longer than a forward internal overlap to be preferred while
+ //building a graph.
+ int HINGE_TOLERANCE = (int) reader.GetInteger("layout", "hinge_tolerance", 150);
+ //This is how far an overlap must start from a hinge to be considered an internal
+ //overlap.
+ int KILL_HINGE_OVERLAP_ALLOWANCE = (int) reader.GetInteger("layout", "kill_hinge_overlap", 300);
+ int KILL_HINGE_INTERNAL_ALLOWANCE = (int) reader.GetInteger("layout", "kill_hinge_internal", 40);
+
+ int MATCHING_HINGE_SLACK = (int) reader.GetInteger("layout", "matching_hinge_slack", 200);
+
+ int NUM_EVENTS_TELOMERE = (int) reader.GetInteger("layout", "num_events_telomere", 7);
+
+ int MIN_CONNECTED_COMPONENT_SIZE = (int) reader.GetInteger("layout", "min_connected_component_size", 8);
+
+
+ int MIN_COV2 = reader.GetInteger("draft", "min_cov", -1);
+ int EDGE_TRIM = reader.GetInteger("draft", "trim", -1);
+ int EDGE_SAFE = reader.GetInteger("draft", "edge_safe", -1);
+ int TSPACE = reader.GetInteger("draft", "tspace", -1);
+ int STEP = reader.GetInteger("draft", "step", -1);
+
+ console->info("LENGTH_THRESHOLD = {}", LENGTH_THRESHOLD);
+ console->info("QUALITY_THRESHOLD = {}", QUALITY_THRESHOLD);
+ console->info("ALN_THRESHOLD = {}", ALN_THRESHOLD);
+ console->info("MIN_COV = {}", MIN_COV);
+ console->info("CUT_OFF = {}", CUT_OFF);
+ console->info("THETA = {}", THETA);
+ console->info("N_ITER = {}", N_ITER);
+ console->info("THETA2 = {}", THETA2);
+ console->info("N_PROC = {}", N_PROC);
+ console->info("HINGE_SLACK = {}", HINGE_SLACK);
+ console->info("HINGE_TOLERANCE = {}", HINGE_TOLERANCE);
+ console->info("KILL_HINGE_OVERLAP_ALLOWANCE = {}", KILL_HINGE_OVERLAP_ALLOWANCE);
+ console->info("KILL_HINGE_INTERNAL_ALLOWANCE = {}", KILL_HINGE_INTERNAL_ALLOWANCE);
+ console->info("MATCHING_HINGE_SLACK = {}", MATCHING_HINGE_SLACK);
+ console->info("MIN_CONNECTED_COMPONENT_SIZE = {}", MIN_CONNECTED_COMPONENT_SIZE);
+
+
+ omp_set_num_threads(N_PROC);
+ std::vector<Edge_w> edgelist, edgelist_ms; // save output to edgelist
+ std::vector<std::unordered_map<int, std::vector<LOverlap *> > > idx_ab;
+
+
+ for (int i = 0; i < n_read; i++) {
+ //An initialisation for loop
+ //TODO Preallocate memory. Much more efficient.
+ idx_ab.push_back(std::unordered_map<int, std::vector<LOverlap *> >());
+ }
+
+ for (int i = 0; i < aln.size(); i++) {
+ idx_ab[aln[i]->read_A_id_][aln[i]->read_B_id_] = std::vector<LOverlap *>();
+ }
+
+ for (int i = 0; i < aln.size(); i++) {
+ idx_ab[aln[i]->read_A_id_][aln[i]->read_B_id_].push_back(aln[i]);
+ }
+
+
+ std::unordered_map<int, std::vector<LOverlap *> > idx3; // this is the pileup
+ std::vector<std::set<int> > has_overlap(n_read);
+ std::unordered_map<int, std::unordered_map<int, std::vector<LOverlap *> > > idx;
+
+
+ for (int i = 0; i < n_read; i++) {
+ //has_overlap[i] = std::set<int>();
+ idx3[i] = std::vector<LOverlap *>();
+ }
+
+ //for (int i = 0; i < aln.size(); i++)
+ // if (aln[i]->active)
+ // idx[std::pair<int, int>(aln[i]->aid, aln[i]->bid)] = std::vector<LOverlap *>();
+ for (int i = 0; i < aln.size(); i++) {
+ if (aln[i]->active) {
+ idx[aln[i]->read_A_id_][aln[i]->read_B_id_] = std::vector<LOverlap *>();
+ }
+ }
+
+
+ for (int i = 0; i < aln.size(); i++) {
+ if (aln[i]->active) {
+ has_overlap[aln[i]->read_A_id_].insert(aln[i]->read_B_id_);
+ }
+ }
+
+ for (int i = 0; i < aln.size(); i++) {
+ if (aln[i]->active) {
+ idx3[aln[i]->read_A_id_].push_back(aln[i]);
+ }
+ }
+
+
+ std::cout << "add data" << std::endl;
+ for (int i = 0; i < aln.size(); i++) {
+ if (aln[i]->active) {
+ idx[aln[i]->read_A_id_][aln[i]->read_B_id_].push_back(aln[i]);
+ }
+ }
+ std::cout << "add data" << std::endl;
+
+
+ std::string name_input= out + ".edges.list";
+ std::ifstream edges_file(name_input);
+
+ std::string name_output = out_name + ".fasta";
+ std::ofstream out_fa(name_output);
+
+ int num_contig = 0;
+ int num_one_read_contig = 0;
+ while (true) {
+ if (edges_file.eof()) break;
+ edgelist.clear();
+ std::string edge_line;
+ while (!edges_file.eof()) {
+ std::getline(edges_file, edge_line);
+ //std::cout << edge_line << std::endl;
+
+ std::vector<std::string> tokens = split(edge_line, ' ');
+
+ if (tokens.size() == 1) {
+ break;
+ }
+ //std::cout << tokens.size() << std::endl;
+
+ Node node0;
+ Node node1;
+ int w;
+ if (tokens.size() > 5 ) {
+ node0.id = std::stoi(tokens[0]);
+ node0.strand = std::stoi(tokens[1]);
+
+ node1.id = std::stoi(tokens[2]);
+ node1.strand = std::stoi(tokens[3]);;
+
+ w = std::stoi(tokens[4]);
+ edgelist.push_back(std::make_tuple(node0, node1, w));
+ }
+
+
+ if (tokens.size() == 4) {
+ out_fa << ">OneReadContig" << num_one_read_contig << std::endl;
+
+
+
+ int node_id = std::stoi(tokens[0]);
+ int node_strand = std::stoi(tokens[1]);
+ int from = std::stoi(tokens[2]);
+ int to = std::stoi(tokens[3]);
+
+
+ std::string current_seq;
+
+
+ if (node_strand == 0) current_seq = reads[node_id]->bases;
+ else current_seq = reverse_complement(reads[node_id]->bases);
+
+ out_fa << current_seq.substr(from, to-from) << std::endl;
+
+ num_one_read_contig++;
+ }
+ }
+
+ std::cout << "list size:" << edgelist.size() << std::endl;
+ if (edgelist.size() == 0) continue;
+
+
+ std::vector<LAlignment *> full_alns;
+ std::vector<LAlignment *> selected;
+ std::unordered_map<int, std::vector<LAlignment *>> idx_aln;
+ la.resetAlignment();
+ std::vector<int> range;
+
+ for (int i = 0; i < edgelist.size(); i++) {
+ range.push_back(std::get<0>(edgelist[i]).id);
+ idx_aln[std::get<0>(edgelist[i]).id] = std::vector<LAlignment *>();
+ }
+
+ std::sort(range.begin(), range.end());
+
+ la.getAlignment(full_alns, range);
+
+ for (auto i:full_alns) {
+ idx_aln[i->read_A_id_].push_back(i);
+ }
+
+ for (int i = 0; i < edgelist.size(); i++) {
+ int aid = std::get<0>(edgelist[i]).id;
+ int bid = std::get<1>(edgelist[i]).id;
+ bool found = false;
+ for (int j = 0; j < idx_aln[std::get<0>(edgelist[i]).id].size(); j++) {
+ //printf("%d %d %d %d\n",bid, idx_aln[aid][j]->bid, idx_aln[aid][j]->read_A_match_end_ - idx_aln[aid][j]->read_A_match_start_, std::get<2>(edgelist[i]));
+ if ((idx_aln[aid][j]->read_B_id_ == bid) and \
+ (idx_aln[aid][j]->aepos - idx_aln[aid][j]->abpos + idx_aln[aid][j]->bepos - idx_aln[aid][j]->bbpos == std::get<2>(edgelist[i]))) {
+ selected.push_back(idx_aln[aid][j]);
+ found = true;
+ break;
+ }
+ if (found) continue;
+ }
+ }
+
+ std::cout << "selected:" << selected.size() << std::endl;
+
+ std::unordered_map<int, std::unordered_map<int, std::pair<std::string, std::string> > > aln_tags_map;
+ std::vector<std::pair<std::string, std::string> > aln_tags_list;
+ std::vector<std::pair<std::string, std::string> > aln_tags_list_true_strand;
+
+
+ for (int i = 0; i < selected.size(); i++) {
+ la.recoverAlignment(selected[i]);
+ //printf("%d %d\n",selected[i]->tlen, selected[i]->trace_pts_len);
+ std::pair<std::string, std::string> res = la.getAlignmentTags(selected[i]);
+ aln_tags_map[selected[i]->read_A_id_][selected[i]->read_B_id_] = res;
+ aln_tags_list.push_back(res);
+ }
+
+
+
+ std::string sequence = "";
+
+ std::vector<LOverlap *> bedges;
+ std::vector<std::string> breads;
+
+ std::vector<std::vector<std::pair<int, int> > > pitfalls;
+
+
+ range.clear();
+ for (int i = 0; i < edgelist.size(); i++) {
+ range.push_back(std::get<0>(edgelist[i]).id);
+ }
+
+ std::vector<std::vector<int> *> coverages;
+
+ for (int i = 0; i < range.size(); i++) {
+ int aread = range[i];
+ if (idx3[aread].size() > 0) {
+ std::vector<int> *res = la.getCoverage(idx3[aread]);
+ std::vector<std::pair<int, int> > *res2 = la.lowCoverageRegions(*res, MIN_COV2);
+ //delete res;
+ coverages.push_back(res);
+ //printf("%d %d: (%d %d) ", i, aread, 0, idx3[aread][0]->alen);
+ //for (int j = 0; j < res2->size(); j++) {
+ // printf("[%d %d] ", res2->at(j).first, res2->at(j).second);
+ //}
+ //printf("\n");
+ pitfalls.push_back(*res2);
+ delete res2;
+ }
+ }
+
+
+ /***
+ * Prepare the data
+ */
+
+ for (int i = 0; i < edgelist.size(); i++) {
+
+ std::vector<LOverlap *> currentalns = idx[std::get<0>(edgelist[i]).id][std::get<1>(edgelist[i]).id];
+
+ LOverlap *currentaln = NULL;
+
+ for (int j = 0; j < currentalns.size(); j++) {
+ //std::cout << std::get<0>(edgelist[i]).id << " " << std::get<1>(edgelist[i]).id << " " << currentalns[j]->match_type_ << std::endl;
+ if (currentalns[j]->read_A_match_end_ - currentalns[j]->read_A_match_start_ + currentalns[j]->read_B_match_end_ - currentalns[j]->read_B_match_start_ ==
+ std::get<2>(edgelist[i]))
+ currentaln = currentalns[j];
+ }
+
+ if (currentaln == NULL) exit(1);
+ //currentaln->show();
+
+ std::string current_seq;
+ std::string next_seq;
+
+ std::string aln_tags1;
+ std::string aln_tags2;
+
+
+ if (std::get<0>(edgelist[i]).strand == 0)
+ current_seq = reads[std::get<0>(edgelist[i]).id]->bases;
+ else
+ current_seq = reverse_complement(reads[std::get<0>(edgelist[i]).id]->bases);
+
+ if (std::get<0>(edgelist[i]).strand == 0) {
+ aln_tags1 = aln_tags_list[i].first;
+ aln_tags2 = aln_tags_list[i].second;
+ } else {
+ aln_tags1 = reverse_complement(aln_tags_list[i].first);
+ aln_tags2 = reverse_complement(aln_tags_list[i].second);
+ }
+
+ aln_tags_list_true_strand.push_back(std::pair<std::string, std::string>(aln_tags1, aln_tags2));
+
+ if (std::get<1>(edgelist[i]).strand == 0)
+ next_seq = reads[std::get<1>(edgelist[i]).id]->bases;
+ else
+ next_seq = reverse_complement(reads[std::get<1>(edgelist[i]).id]->bases);
+
+ int abpos, aepos, alen, bbpos, bepos, blen, aes, aee, bes, bee;
+
+ alen = currentaln->alen;
+ blen = currentaln->blen;
+
+
+ if (std::get<0>(edgelist[i]).strand == 0) {
+ abpos = currentaln->read_A_match_start_;
+ aepos = currentaln->read_A_match_end_;
+
+ aes = currentaln->eff_read_A_start_;
+ aee = currentaln->eff_read_A_end_;
+
+ } else {
+ abpos = alen - currentaln->read_A_match_end_;
+ aepos = alen - currentaln->read_A_match_start_;
+
+ aes = alen - currentaln->eff_read_A_end_;
+ aee = alen - currentaln->eff_read_A_start_;
+ }
+
+ if (((std::get<1>(edgelist[i]).strand == 0))) {
+ bbpos = currentaln->read_B_match_start_;
+ bepos = currentaln->read_B_match_end_;
+
+ bes = currentaln->eff_read_B_start_;
+ bee = currentaln->eff_read_B_end_;
+
+ } else {
+ bbpos = blen - currentaln->read_B_match_end_;
+ bepos = blen - currentaln->read_B_match_start_;
+
+ bes = blen - currentaln->eff_read_B_end_;
+ bee = blen - currentaln->eff_read_B_start_;
+
+ }
+ aes = 0;
+ bes = 0;
+ aee = alen;
+ bee = blen;
+
+// printf("%d %d [[%d %d] << [%d %d]] x [[%d %d] << [%d %d]]\n", std::get<0>(edgelist[i]).id, std::get<1>(edgelist[i]).id, abpos, aepos, aes, aee, bbpos, bepos, bes, bee);
+
+ LOverlap *new_ovl = new LOverlap();
+ new_ovl->read_A_match_start_ = abpos;
+ new_ovl->read_A_match_end_ = aepos;
+ new_ovl->read_B_match_start_ = bbpos;
+ new_ovl->read_B_match_end_ = bepos;
+ new_ovl->eff_read_A_end_ = aee;
+ new_ovl->eff_read_A_start_ = aes;
+ new_ovl->eff_read_B_end_ = bee;
+ new_ovl->eff_read_B_start_ = bes;
+ new_ovl->alen = currentaln->alen;
+ new_ovl->blen = currentaln->blen;
+ new_ovl->read_A_id_ = std::get<0>(edgelist[i]).id;
+ new_ovl->read_B_id_ = std::get<1>(edgelist[i]).id;
+
+
+ bedges.push_back(new_ovl);
+ breads.push_back(current_seq);
+
+
+ }
+ //need to trim the end
+
+
+
+ std::vector<std::vector<int> > mappings;
+ for (int i = 0; i < range.size(); i++) {
+ mappings.push_back(get_mapping(aln_tags_list_true_strand[i].first, aln_tags_list_true_strand[i].second));
+ }
+
+ std::cout << bedges.size() << " " << breads.size() << " " << selected.size() << " "
+ << aln_tags_list.size() << " " << pitfalls.size() << " " << aln_tags_list_true_strand.size()
+ << " " << mappings.size() << " " << coverages.size() << std::endl;
+
+ /*for (int i = 0; i < bedges.size() - 1; i++) {
+ printf("%d %d %d %d %d\n", bedges[i]->read_B_match_start_, bedges[i]->read_B_match_end_, bedges[i+1]->read_A_match_start_, bedges[i+1]->read_A_match_end_, bedges[i]->read_B_match_end_ - bedges[i+1]->read_A_match_start_);
+ }*/
+
+
+ int tspace = TSPACE; // set lane length to be 500
+ int nlane = 0;
+
+
+ //printf("%d %d\n", mappings[0][800], mappings[0][1000]); // debug output
+ //printf("%s\n%s\n", breads[0].substr(bedges[0]->read_A_match_start_ + 800, 50).c_str(),
+ // breads[1].substr(bedges[0]->read_B_match_start_ + mappings[0][800], 50).c_str()); //debug output
+
+
+ std::vector<std::vector<std::pair<int, int>>> lanes;
+
+ std::string draft_assembly = "";
+
+
+ int currentlane = 0;
+ int current_starting_read = 0;
+ int current_starting_space = 1;
+ int current_starting_offset = 0;
+ int n_bb_reads = range.size();
+ std::vector<std::vector<int>> trace_pts(n_bb_reads);
+ bool revert = false;
+
+
+ int rmax = -1;
+ /**
+ * Move forward and put "trace points"
+ */
+ while (current_starting_read < n_bb_reads - 1) {
+ int currentread = current_starting_read;
+ int additional_offset = 0;
+ while (bedges[current_starting_read]->read_A_match_start_ + current_starting_space * tspace +
+ current_starting_offset + additional_offset <
+ bedges[current_starting_read]->read_A_match_end_ - EDGE_SAFE) {
+ int waypoint = bedges[current_starting_read]->read_A_match_start_ + tspace * current_starting_space +
+ current_starting_offset + additional_offset;
+ //if ((waypoint - bedges[current_starting_read]->read_A_match_start_) < EDGE_SAFE)
+ // waypoint += EDGE_SAFE;
+
+ //int next_waypoint = mappings[currentread][waypoint - bedges[current_starting_read]->read_A_match_start_] + bedges[current_starting_read]->read_B_match_start_;
+ std::vector<std::pair<int, int> > lane;
+
+ while ((waypoint > bedges[currentread]->read_A_match_start_) and
+ (waypoint < bedges[currentread]->read_A_match_end_)) {
+
+ printf("%d %d\n", currentread, waypoint);
+ trace_pts[currentread].push_back(waypoint);
+
+
+ /*if (waypoint > bedges[currentread]->read_A_match_end_ - EDGE_SAFE) {
+ printf("Reaching the end, neglect low coverage\n");
+ }
+
+ if ((coverages[currentread]->at(waypoint) < MIN_COV2) and (waypoint < bedges[currentread]->read_A_match_end_ - EDGE_SAFE)) {
+ revert = true;
+ printf("Low coverage, revert\n");
+ break;
+ }*/
+
+
+ lane.push_back(std::pair<int, int>(currentread, waypoint));
+ if (currentread > rmax) rmax = currentread;
+ //int previous_wp = waypoint;
+ waypoint = mappings[currentread][waypoint - bedges[currentread]->read_A_match_start_] +
+ bedges[currentread]->read_B_match_start_;
+ //printf("%s\n%s\n", breads[currentread].substr(previous_wp,50).c_str(), breads[currentread+1].substr(waypoint,50).c_str());
+ currentread++;
+ if (currentread >= n_bb_reads) break;
+ }
+ if (currentread < n_bb_reads) if (waypoint < bedges[currentread]->alen) {
+ lane.push_back(std::pair<int, int>(currentread, waypoint));
+ if (currentread > rmax) rmax = currentread;
+ }
+ /*if (revert) {
+ printf("revert\n");
+ revert = false;
+ while (currentread >= current_starting_read) {
+ trace_pts[currentread].pop_back();
+ currentread --;
+ additional_offset += STEP;
+ }
+ currentread = current_starting_read;
+ }
+ else*/
+ {
+ if (currentread >= rmax)
+ lanes.push_back(lane);
+ current_starting_space++;
+ currentread = current_starting_read;
+
+ }
+
+ }
+
+ current_starting_read++;
+ current_starting_space = 1;//get next space;
+ if (trace_pts[current_starting_read].size() == 0)
+ current_starting_offset = 0;
+ else
+ current_starting_offset =
+ trace_pts[current_starting_read].back() - bedges[current_starting_read]->read_A_match_start_;
+ }
+
+
+ /**
+ * Show trace points on reads
+ */
+ for (int i = 0; i < n_bb_reads; i++) {
+ printf("Read %d:", i);
+ for (int j = 0; j < trace_pts[i].size(); j++) {
+ printf("%d ", trace_pts[i][j]);
+ }
+ printf("\n");
+ }
+
+ /**
+ * Show lanes
+ */
+
+ for (int i = 0; i < lanes.size(); i++) {
+
+ printf("Lane %d\n", i);
+ for (int j = 0; j < lanes[i].size(); j++) {
+ printf("[%d %d] ", lanes[i][j].first, lanes[i][j].second);
+ }
+ printf("\n");
+ }
+
+
+ printf("In total %d lanes\n", lanes.size());
+ if (lanes.size() == 0) {
+ draft_assembly = breads[0];
+ out_fa << ">DraftAssemblyContig" << num_contig << std::endl;
+ out_fa << draft_assembly << std::endl;
+ num_contig++;
+ continue;
+ }
+
+
+
+ /**
+ * Consequtive lanes form a column (ladder)
+ */
+
+ std::vector<std::vector<std::tuple<int, int, int> > > ladders;
+
+ for (int i = 0; i < lanes.size() - 1; i++) {
+ std::vector<std::pair<int, int> > lane1 = lanes[i];
+ std::vector<std::pair<int, int> > lane2 = lanes[i + 1];
+ std::vector<std::tuple<int, int, int> > ladder;
+ int pos = 0;
+ for (int j = 0; j < lane2.size(); j++) {
+ while ((lane1[pos].first != lane2[j].first) and (pos < lane1.size() - 1)) pos++;
+ if ((lane1[pos].first == lane2[j].first))
+ ladder.push_back(std::make_tuple(lane2[j].first, lane1[pos].second, lane2[j].second));
+ }
+ ladders.push_back(ladder);
+ }
+
+
+ /**
+ * show ladders
+ */
+ for (int i = 0; i < ladders.size(); i++) {
+// printf("Ladder %d\n", i);
+// for (int j = 0; j < ladders[i].size(); j++) {
+// //printf("[%d %d-%d] ", std::get<0>(ladders[i][j]), std::get<1>(ladders[i][j]), std::get<2>(ladders[i][j]) );
+// //printf("%s\n", breads[std::get<0>(ladders[i][j])].substr(std::get<1>(ladders[i][j]),std::get<2>(ladders[i][j])-std::get<1>(ladders[i][j])).c_str());
+//
+// }
+
+ if (ladders[i].size() == 0) {
+ printf("low coverage!\n");
+ continue;
+ }
+
+ if (ladders[i].size() > 1) {
+
+
+ int mx = 0;
+ int maxcoverage = 0;
+ for (int j = 0; j < ladders[i].size(); j++) {
+ int mincoverage = 10000;
+ int read = std::get<0>(ladders[i][j]);
+ int start = std::get<1>(ladders[i][j]);
+ int end = std::get<2>(ladders[i][j]);
+ for (int pos = start; pos < end; pos++) {
+ if (coverages[read]->at(pos) < mincoverage) mincoverage = coverages[read]->at(pos);
+ }
+ if (mincoverage > maxcoverage) {
+ maxcoverage = mincoverage;
+ mx = j;
+ }
+ }
+
+// std::cout << "ladder " << i << " num reads " << ladders[i].size() << " possibly error here " <<
+// maxcoverage << "\n!";
+
+
+ //if (ladders[i].size() == 2) {
+ // draft_assembly += breads[std::get<0>(ladders[i][mx])].substr(std::get<1>(ladders[i][mx]),
+ // std::get<2>(ladders[i][mx]) -
+ // std::get<1>(ladders[i][mx]));
+ // continue;
+ // }
+
+
+ std::string base = breads[std::get<0>(ladders[i][mx])].substr(std::get<1>(ladders[i][mx]),
+ std::get<2>(ladders[i][mx]) -
+ std::get<1>(ladders[i][mx]));;
+ int seq_count = ladders[i].size();
+// printf("seq_count:%d, max %d\n", seq_count, mx);
+ align_tags_t **tags_list;
+ tags_list = (align_tags_t **) calloc(seq_count, sizeof(align_tags_t *));
+ consensus_data *consensus;
+
+ int alen = (std::get<2>(ladders[i][mx]) - std::get<1>(ladders[i][mx]));
+ for (int j = 0; j < ladders[i].size(); j++) {
+
+ int blen = (std::get<2>(ladders[i][j]) - std::get<1>(ladders[i][j]));
+ char *aseq = (char *) malloc(
+ (20 + (std::get<2>(ladders[i][mx]) - std::get<1>(ladders[i][mx]))) * sizeof(char));
+ char *bseq = (char *) malloc(
+ (20 + (std::get<2>(ladders[i][j]) - std::get<1>(ladders[i][j]))) * sizeof(char));
+ strcpy(aseq, breads[std::get<0>(ladders[i][mx])].substr(std::get<1>(ladders[i][mx]),
+ std::get<2>(ladders[i][mx]) -
+ std::get<1>(ladders[i][mx])).c_str());
+ strcpy(bseq, breads[std::get<0>(ladders[i][j])].substr(std::get<1>(ladders[i][j]),
+ std::get<2>(ladders[i][j]) -
+ std::get<1>(ladders[i][j])).c_str());
+
+
+ aln_range *arange = (aln_range *) calloc(1, sizeof(aln_range));
+ arange->s1 = 0;
+ arange->e1 = strlen(bseq);
+ arange->s2 = 0;
+ arange->e2 = strlen(aseq);
+ arange->score = 5;
+
+ //printf("blen %d alen%d\n",strlen(bseq), strlen(aseq));
+ //printf("before get tags\n");
+
+ alignment *alng = _align(bseq, blen, aseq, alen, 150, 1);
+
+ char *q_aln_str = (char *) malloc((5 + strlen(alng->q_aln_str)) * sizeof(char));
+ char *t_aln_str = (char *) malloc((5 + strlen(alng->t_aln_str)) * sizeof(char));
+
+
+ strcpy(q_aln_str + 1, alng->q_aln_str);
+ strcpy(t_aln_str + 1, alng->t_aln_str);
+ q_aln_str[0] = 'T';
+ t_aln_str[0] = 'T';
+
+
+ for (int pos = 0; pos < strlen(q_aln_str); pos++) q_aln_str[pos] = toupper(q_aln_str[pos]);
+ for (int pos = 0; pos < strlen(t_aln_str); pos++) t_aln_str[pos] = toupper(t_aln_str[pos]);
+
+ //printf("Q:%s\nT:%s\n", q_aln_str, t_aln_str);
+
+ tags_list[j] = get_align_tags(q_aln_str,
+ t_aln_str,
+ strlen(alng->q_aln_str) + 1,
+ arange, (unsigned int) j, 0);
+ //free(aseq);
+ //free(bseq);
+
+ /*for (int k = 0; k < tags_list[j]->len; k++) {
+ printf("%d %d %ld %d %c %c\n",j, k, tags_list[j]->align_tags[k].t_pos,
+ tags_list[j]->align_tags[k].delta,
+ //tags_list[j]->align_tags[k].p_q_base,
+ aseq[tags_list[j]->align_tags[k].t_pos],
+ tags_list[j]->align_tags[k].q_base);
+ }*/
+ free(q_aln_str);
+ free(t_aln_str);
+ free(aseq);
+ free(bseq);
+ free_alignment(alng);
+
+ }
+
+ //printf("%d %d\n%s\n",seq_count, strlen(seq), seq);
+
+ consensus = get_cns_from_align_tags(tags_list, seq_count, alen + 1, 1);
+// printf("Consensus len :%d\n",strlen(consensus->sequence));
+ draft_assembly += std::string(consensus->sequence);
+
+ free_consensus_data(consensus);
+ for (int j = 0; j < seq_count; j++)
+ free_align_tags(tags_list[j]);
+
+ } else {
+ draft_assembly += breads[std::get<0>(ladders[i][0])].substr(std::get<1>(ladders[i][0]),
+ std::get<2>(ladders[i][0]) -
+ std::get<1>(ladders[i][0]));
+ }
+
+// printf("\n");
+ }
+
+
+
+ /*for (int i = 0; i < mapping.size(); i++)
+ printf("%d %d\n", i, mapping[i]);
+ printf("[%d %d], [%d %d]\n", bedges[0]->read_A_match_start_, bedges[0]->read_A_match_end_, bedges[0]->read_B_match_start_, bedges[0]->read_B_match_end_);*/
+
+ std::cout << sequence.size() << std::endl;
+ std::cout << draft_assembly.size() << std::endl;
+
+
+ out_fa << ">Draft_assembly" << num_contig << std::endl;
+ out_fa << draft_assembly << std::endl;
+ num_contig++;
+
+ }
+
+ if (strlen(name_db) > 0)
+ la.closeDB(); //close database
+ return 0;
+}
diff --git a/src/consensus/draft_chopper.cpp b/src/consensus/draft_chopper.cpp
new file mode 100644
index 0000000..1ce4a30
--- /dev/null
+++ b/src/consensus/draft_chopper.cpp
@@ -0,0 +1,1005 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <unordered_map>
+#include <algorithm>
+#include <fstream>
+#include <sstream>
+#include <iostream>
+#include <set>
+#include <omp.h>
+#include <tuple>
+#include <iomanip>
+
+#include "spdlog/spdlog.h"
+#include "cmdline.h"
+#include "INIReader.h"
+#include "DB.h"
+#include "align.h"
+#include "LAInterface.h"
+
+#include <utility>
+#include <boost/graph/adjacency_list.hpp>
+#include <boost/graph/connected_components.hpp>
+
+extern "C" {
+#include "common.h"
+}
+
+
+#define LAST_READ_SYMBOL '$'
+
+#define HINGED_EDGE 1
+#define UNHINGED_EDGE -1
+#define REVERSE_COMPLEMENT_MATCH 1
+#define SAME_DIRECTION_MATCH 0
+
+using namespace boost;
+
+typedef adjacency_list <vecS, vecS, undirectedS> Graph;
+typedef std::tuple<Node, Node, int> Edge_w;
+typedef std::pair<Node, Node> Edge_nw;
+
+
+static int ORDER(const void *l, const void *r) {
+ int x = *((int32 *) l);
+ int y = *((int32 *) r);
+ return (x - y);
+}
+
+
+
+std::vector<int> get_mapping(std::string aln_tag1, std::string aln_tag2) {
+ int pos = 0;
+ int count = 0;
+ int count2 = 0;
+
+ std::vector<int> ret;
+ while (pos < aln_tag1.size()) {
+ if (aln_tag1[pos] != '-') {
+ ret.push_back(count2);
+ count ++;
+ }
+ if (aln_tag2[pos] != '-') {
+ count2 ++;
+ }
+ pos++;
+ }
+ return ret;
+}
+
+
+
+std::string reverse_complement(std::string seq) {
+ static std::map<char, char> m = {{'a','t'}, {'c','g'}, {'g','c'}, {'t','a'}, {'A','T'}, {'C','G'}, {'T','A'}, {'G','C'}, {'n','n'}, {'N', 'N'}, {'-', '-'}};
+ std::reverse(seq.begin(), seq.end());
+ for (int i = 0; i < seq.size(); i++) {
+ seq[i] = m[seq[i]];
+ }
+ return seq;
+}
+
+
+
+std::ostream& operator<<(std::ostream& out, const MatchType value){
+ static std::map<MatchType, std::string> strings;
+ if (strings.size() == 0){
+#define INSERT_ELEMENT(p) strings[p] = #p
+ INSERT_ELEMENT(FORWARD);
+ INSERT_ELEMENT(BACKWARD);
+ INSERT_ELEMENT(ACOVERB);
+ INSERT_ELEMENT(BCOVERA);
+ INSERT_ELEMENT(INTERNAL);
+ INSERT_ELEMENT(UNDEFINED);
+ INSERT_ELEMENT(NOT_ACTIVE);
+#undef INSERT_ELEMENT
+ }
+ return out << strings[value];
+}
+
+std::vector<std::string> &split(const std::string &s, char delim, std::vector<std::string> &elems) {
+ std::stringstream ss(s);
+ std::string item;
+ while (std::getline(ss, item, delim)) {
+ elems.push_back(item);
+ }
+ return elems;
+}
+
+
+std::vector<std::string> split(const std::string &s, char delim) {
+ std::vector<std::string> elems;
+ split(s, delim, elems);
+ return elems;
+}
+
+
+
+bool compare_overlap(LOverlap * ovl1, LOverlap * ovl2) {
+ return ((ovl1->read_A_match_end_ - ovl1->read_A_match_start_
+ + ovl1->read_B_match_end_ - ovl1->read_B_match_start_) >
+ (ovl2->read_A_match_end_ - ovl2->read_A_match_start_
+ + ovl2->read_B_match_end_ - ovl2->read_B_match_start_));
+}
+
+
+bool compare_overlap_weight(LOverlap * ovl1, LOverlap * ovl2) {
+ return (ovl1->weight > ovl2->weight);
+}
+
+
+bool compare_overlap_abpos(LOverlap * ovl1, LOverlap * ovl2) {
+ return ovl1->read_A_match_start_ < ovl2->read_A_match_start_;
+}
+
+bool compare_overlap_aepos(LOverlap * ovl1, LOverlap * ovl2) {
+ return ovl1->read_A_match_start_ > ovl2->read_A_match_start_;
+}
+
+
+int main(int argc, char *argv[]) {
+
+ cmdline::parser cmdp;
+ cmdp.add<std::string>("db", 'b', "db file name", false, "");
+ cmdp.add<std::string>("las", 'l', "las file name", false, "");
+ cmdp.add<std::string>("paf", 'p', "paf file name", false, "");
+ cmdp.add<std::string>("config", 'c', "configuration file name", false, "");
+ cmdp.add<std::string>("fasta", 'f', "fasta file name", false, "");
+ cmdp.add<std::string>("prefix", 'x', "(intermediate output) input file prefix", true, "");
+ cmdp.add<std::string>("out", 'o', "final output file name", true, "");
+ cmdp.add<std::string>("log", 'g', "log folder name", false, "log");
+ cmdp.add<std::string>("path", 0, "path file name", false, "path");
+ cmdp.add("debug", '\0', "debug mode");
+
+// cmdp.add<std::string>("restrictreads",'r',"restrict to reads in the file",false,"");
+
+
+ cmdp.parse_check(argc, argv);
+
+ LAInterface la;
+ const char *name_db = cmdp.get<std::string>("db").c_str(); //.db file of reads to load
+ const char *name_las = cmdp.get<std::string>("las").c_str();//.las file of alignments
+ const char *name_paf = cmdp.get<std::string>("paf").c_str();
+ const char *name_fasta = cmdp.get<std::string>("fasta").c_str();
+ const char *name_config = cmdp.get<std::string>("config").c_str();//name of the configuration file, in INI format
+ std::string out = cmdp.get<std::string>("prefix");
+ std::string out_name = cmdp.get<std::string>("out");
+ std::string path_name = cmdp.get<std::string>("path");
+// const char * name_restrict = cmdp.get<std::string>("restrictreads").c_str();
+
+
+ std::string name_mask = out + ".mas";
+ std::string name_max = out + ".max";
+ std::string name_homo = out + ".homologous.txt";
+ std::string name_rep = out + ".repeat.txt";
+ std::string name_hg = out + ".hinges.txt";
+ std::string name_cov = out + ".coverage.txt";
+ std::string name_garbage = out + ".garbage.txt";
+ std::string name_contained = out + ".contained.txt";
+ std::string name_deadend = out_name + ".deadends.txt";
+
+
+ std::ofstream deadend_out(name_deadend);
+ std::ofstream maximal_reads(name_max);
+ std::ofstream garbage_out(name_garbage);
+ std::ofstream contained_out(name_contained);
+ std::ifstream homo(name_homo);
+ std::vector<int> homo_reads;
+
+
+ bool delete_telomere = false; // TODO: command line option to set this true
+
+ int read_id;
+ while (homo >> read_id) homo_reads.push_back(read_id);
+
+
+ namespace spd = spdlog;
+
+ //auto console = spd::stdout_logger_mt("console");
+ std::vector<spdlog::sink_ptr> sinks;
+ sinks.push_back(std::make_shared<spdlog::sinks::stdout_sink_st>());
+ sinks.push_back(
+ std::make_shared<spdlog::sinks::daily_file_sink_st>(cmdp.get<std::string>("log") + "/log", "txt", 23, 59));
+ auto console = std::make_shared<spdlog::logger>("log", std::begin(sinks), std::end(sinks));
+ spdlog::register_logger(console);
+
+ console->info("draft consensus");
+
+ if (cmdp.exist("debug")) {
+ char *buff = (char *) malloc(sizeof(char) * 2000);
+ getwd(buff);
+ console->info("current user {}, current working directory {}", getlogin(), buff);
+ free(buff);
+ }
+
+ console->info("name of db: {}, name of .las file {}", name_db, name_las);
+ console->info("name of fasta: {}, name of .paf file {}", name_fasta, name_paf);
+ console->info("filter files prefix: {}", out);
+ console->info("output prefix: {}", out_name);
+
+
+ std::ifstream ini_file(name_config);
+ std::string str((std::istreambuf_iterator<char>(ini_file)),
+ std::istreambuf_iterator<char>());
+
+ console->info("Parameters passed in \n{}", str);
+
+ if (strlen(name_db) > 0)
+ la.openDB(name_db);
+
+
+ if (strlen(name_las) > 0)
+ la.openAlignmentFile(name_las);
+
+ int64 n_aln = 0;
+
+ if (strlen(name_las) > 0) {
+ n_aln = la.getAlignmentNumber();
+ console->info("Load alignments from {}", name_las);
+ console->info("# Alignments: {}", n_aln);
+ }
+
+ int n_read;
+ if (strlen(name_db) > 0)
+ n_read = la.getReadNumber();
+
+ std::vector<Read *> reads; //Vector of pointers to all reads
+
+ if (strlen(name_fasta) > 0) {
+ n_read = la.loadFASTA(name_fasta, reads);
+ }
+
+ console->info("# Reads: {}", n_read); // output some statistics
+
+ std::vector<LOverlap *> aln;//Vector of pointers to all alignments
+
+ if (strlen(name_las) > 0) {
+ la.resetAlignment();
+ la.getOverlap(aln, 0, n_aln);
+ }
+
+ if (strlen(name_paf) > 0) {
+ n_aln = la.loadPAF(std::string(name_paf), aln);
+ console->info("Load alignments from {}", name_paf);
+ console->info("# Alignments: {}", n_aln);
+ }
+
+ if (n_aln == 0) {
+ console->error("No alignments!");
+ return 1;
+ }
+
+
+ if (strlen(name_db) > 0) {
+ la.getRead(reads, 0, n_read);
+ }
+
+ console->info("Input data finished");
+
+ INIReader reader(name_config);
+
+ if (reader.ParseError() < 0) {
+ console->warn("Can't load {}", name_config);
+ return 1;
+ }
+
+ int LENGTH_THRESHOLD = int(reader.GetInteger("filter", "length_threshold", -1));
+ double QUALITY_THRESHOLD = reader.GetReal("filter", "quality_threshold", 0.0);
+ int N_ITER = (int) reader.GetInteger("filter", "n_iter", -1);
+ int ALN_THRESHOLD = (int) reader.GetInteger("filter", "aln_threshold", -1);
+ int MIN_COV = (int) reader.GetInteger("filter", "min_cov", -1);
+ int CUT_OFF = (int) reader.GetInteger("filter", "cut_off", -1);
+ int THETA = (int) reader.GetInteger("filter", "theta", -1);
+ int THETA2 = (int) reader.GetInteger("filter", "theta2", 0);
+ int N_PROC = (int) reader.GetInteger("running", "n_proc", 4);
+ int HINGE_SLACK = (int) reader.GetInteger("layout", "hinge_slack", 1000);
+ //This is the amount by which a forward overlap
+ //must be longer than a forward internal overlap to be preferred while
+ //building a graph.
+ int HINGE_TOLERANCE = (int) reader.GetInteger("layout", "hinge_tolerance", 150);
+ //This is how far an overlap must start from a hinge to be considered an internal
+ //overlap.
+ int KILL_HINGE_OVERLAP_ALLOWANCE = (int) reader.GetInteger("layout", "kill_hinge_overlap", 300);
+ int KILL_HINGE_INTERNAL_ALLOWANCE = (int) reader.GetInteger("layout", "kill_hinge_internal", 40);
+
+ int MATCHING_HINGE_SLACK = (int) reader.GetInteger("layout", "matching_hinge_slack", 200);
+
+ int NUM_EVENTS_TELOMERE = (int) reader.GetInteger("layout", "num_events_telomere", 7);
+
+ int MIN_CONNECTED_COMPONENT_SIZE = (int) reader.GetInteger("layout", "min_connected_component_size", 8);
+
+
+ int MIN_COV2 = reader.GetInteger("draft", "min_cov", -1);
+ int EDGE_TRIM = reader.GetInteger("draft", "trim", -1);
+ int EDGE_SAFE = reader.GetInteger("draft", "edge_safe", -1);
+ int TSPACE = reader.GetInteger("draft", "tspace", -1);
+ int STEP = reader.GetInteger("draft", "step", -1);
+
+ console->info("LENGTH_THRESHOLD = {}", LENGTH_THRESHOLD);
+ console->info("QUALITY_THRESHOLD = {}", QUALITY_THRESHOLD);
+ console->info("ALN_THRESHOLD = {}", ALN_THRESHOLD);
+ console->info("MIN_COV = {}", MIN_COV);
+ console->info("CUT_OFF = {}", CUT_OFF);
+ console->info("THETA = {}", THETA);
+ console->info("N_ITER = {}", N_ITER);
+ console->info("THETA2 = {}", THETA2);
+ console->info("N_PROC = {}", N_PROC);
+ console->info("HINGE_SLACK = {}", HINGE_SLACK);
+ console->info("HINGE_TOLERANCE = {}", HINGE_TOLERANCE);
+ console->info("KILL_HINGE_OVERLAP_ALLOWANCE = {}", KILL_HINGE_OVERLAP_ALLOWANCE);
+ console->info("KILL_HINGE_INTERNAL_ALLOWANCE = {}", KILL_HINGE_INTERNAL_ALLOWANCE);
+ console->info("MATCHING_HINGE_SLACK = {}", MATCHING_HINGE_SLACK);
+ console->info("MIN_CONNECTED_COMPONENT_SIZE = {}", MIN_CONNECTED_COMPONENT_SIZE);
+
+
+ omp_set_num_threads(N_PROC);
+ std::vector<Edge_w> edgelist, edgelist_ms; // save output to edgelist
+ std::vector<std::unordered_map<int, std::vector<LOverlap *> > > idx_ab;
+
+
+ for (int i = 0; i < n_read; i++) {
+ //An initialisation for loop
+ //TODO Preallocate memory. Much more efficient.
+ idx_ab.push_back(std::unordered_map<int, std::vector<LOverlap *> >());
+ }
+
+ for (int i = 0; i < aln.size(); i++) {
+ idx_ab[aln[i]->read_A_id_][aln[i]->read_B_id_] = std::vector<LOverlap *>();
+ }
+
+ for (int i = 0; i < aln.size(); i++) {
+ idx_ab[aln[i]->read_A_id_][aln[i]->read_B_id_].push_back(aln[i]);
+ }
+
+
+ std::unordered_map<int, std::vector<LOverlap *> > idx3; // this is the pileup
+ std::vector<std::set<int> > has_overlap(n_read);
+ std::unordered_map<int, std::unordered_map<int, std::vector<LOverlap *> > > idx;
+
+
+ for (int i = 0; i < n_read; i++) {
+ //has_overlap[i] = std::set<int>();
+ idx3[i] = std::vector<LOverlap *>();
+ }
+
+ //for (int i = 0; i < aln.size(); i++)
+ // if (aln[i]->active)
+ // idx[std::pair<int, int>(aln[i]->aid, aln[i]->bid)] = std::vector<LOverlap *>();
+ for (int i = 0; i < aln.size(); i++) {
+ if (aln[i]->active) {
+ idx[aln[i]->read_A_id_][aln[i]->read_B_id_] = std::vector<LOverlap *>();
+ }
+ }
+
+
+ for (int i = 0; i < aln.size(); i++) {
+ if (aln[i]->active) {
+ has_overlap[aln[i]->read_A_id_].insert(aln[i]->read_B_id_);
+ }
+ }
+
+ for (int i = 0; i < aln.size(); i++) {
+ if (aln[i]->active) {
+ idx3[aln[i]->read_A_id_].push_back(aln[i]);
+ }
+ }
+
+
+ std::cout << "add data" << std::endl;
+ for (int i = 0; i < aln.size(); i++) {
+ if (aln[i]->active) {
+ idx[aln[i]->read_A_id_][aln[i]->read_B_id_].push_back(aln[i]);
+ }
+ }
+ std::cout << "add data" << std::endl;
+
+ std::string name_input= out + ".edges.list";
+ std::ifstream edges_file(name_input);
+
+ std::string name_output = out_name + ".mega.fasta";
+ std::ofstream out_fa(name_output);
+
+ std::string name_output_orig = out_name + ".fasta";
+ std::ofstream out_fa_orig(name_output_orig);
+
+ int num_contig = 0;
+ int num_one_read_contig = 0;
+ while (true) {
+ if (edges_file.eof()) break;
+ edgelist.clear();
+ std::string edge_line;
+ while (!edges_file.eof()) {
+ std::getline(edges_file, edge_line);
+ //std::cout << edge_line << std::endl;
+
+ std::vector<std::string> tokens = split(edge_line, ' ');
+
+ if (tokens.size() == 1) {
+ break;
+ }
+ //std::cout << tokens.size() << std::endl;
+
+ Node node0;
+ Node node1;
+ int w;
+ if (tokens.size() > 5 ) {
+ node0.id = std::stoi(tokens[0]);
+ node0.strand = std::stoi(tokens[1]);
+
+ node1.id = std::stoi(tokens[2]);
+ node1.strand = std::stoi(tokens[3]);;
+
+ w = std::stoi(tokens[4]);
+ edgelist.push_back(std::make_tuple(node0, node1, w));
+ }
+
+
+ if (tokens.size() == 4) {
+ out_fa << ">OneReadContig" << num_one_read_contig << std::endl;
+ out_fa_orig << ">OneReadContig" << num_one_read_contig << std::endl;
+
+
+
+
+ int node_id = std::stoi(tokens[0]);
+ int node_strand = std::stoi(tokens[1]);
+ int from = std::stoi(tokens[2]);
+ int to = std::stoi(tokens[3]);
+
+
+ std::string current_seq;
+
+
+ if (node_strand == 0) current_seq = reads[node_id]->bases;
+ else current_seq = reverse_complement(reads[node_id]->bases);
+
+ out_fa << current_seq.substr(from, to-from) << std::endl;
+ out_fa_orig << current_seq.substr(from, to-from) << std::endl;
+
+ num_one_read_contig++;
+ }
+ }
+
+ std::cout << "list size:" << edgelist.size() << std::endl;
+ if (edgelist.size() == 0) continue;
+
+
+ std::vector<LAlignment *> full_alns;
+ std::vector<LAlignment *> selected;
+ std::unordered_map<int, std::vector<LAlignment *>> idx_aln;
+ la.resetAlignment();
+ std::vector<int> range;
+
+ for (int i = 0; i < edgelist.size(); i++) {
+ range.push_back(std::get<0>(edgelist[i]).id);
+ idx_aln[std::get<0>(edgelist[i]).id] = std::vector<LAlignment *>();
+ }
+
+ std::sort(range.begin(), range.end());
+
+ la.getAlignment(full_alns, range);
+
+ for (auto i:full_alns) {
+ idx_aln[i->read_A_id_].push_back(i);
+ }
+
+ for (int i = 0; i < edgelist.size(); i++) {
+ int aid = std::get<0>(edgelist[i]).id;
+ int bid = std::get<1>(edgelist[i]).id;
+ bool found = false;
+ for (int j = 0; j < idx_aln[std::get<0>(edgelist[i]).id].size(); j++) {
+ //printf("%d %d %d %d\n",bid, idx_aln[aid][j]->bid, idx_aln[aid][j]->read_A_match_end_ - idx_aln[aid][j]->read_A_match_start_, std::get<2>(edgelist[i]));
+ if ((idx_aln[aid][j]->read_B_id_ == bid) and \
+ (idx_aln[aid][j]->aepos - idx_aln[aid][j]->abpos + idx_aln[aid][j]->bepos - idx_aln[aid][j]->bbpos == std::get<2>(edgelist[i]))) {
+ selected.push_back(idx_aln[aid][j]);
+ found = true;
+ break;
+ }
+ if (found) continue;
+ }
+ }
+
+ std::cout << "selected:" << selected.size() << std::endl;
+
+ std::unordered_map<int, std::unordered_map<int, std::pair<std::string, std::string> > > aln_tags_map;
+ std::vector<std::pair<std::string, std::string> > aln_tags_list;
+ std::vector<std::pair<std::string, std::string> > aln_tags_list_true_strand;
+
+
+ for (int i = 0; i < selected.size(); i++) {
+ la.recoverAlignment(selected[i]);
+ //printf("%d %d\n",selected[i]->tlen, selected[i]->trace_pts_len);
+ std::pair<std::string, std::string> res = la.getAlignmentTags(selected[i]);
+ aln_tags_map[selected[i]->read_A_id_][selected[i]->read_B_id_] = res;
+ aln_tags_list.push_back(res);
+ }
+
+
+
+ std::string sequence = "";
+
+ std::vector<LOverlap *> bedges;
+ std::vector<std::string> breads;
+
+ std::vector<std::vector<std::pair<int, int> > > pitfalls;
+
+
+ range.clear();
+ for (int i = 0; i < edgelist.size(); i++) {
+ range.push_back(std::get<0>(edgelist[i]).id);
+ }
+
+ std::vector<std::vector<int> *> coverages;
+
+ for (int i = 0; i < range.size(); i++) {
+ int aread = range[i];
+ if (idx3[aread].size() > 0) {
+ std::vector<int> *res = la.getCoverage(idx3[aread]);
+ std::vector<std::pair<int, int> > *res2 = la.lowCoverageRegions(*res, MIN_COV2);
+ //delete res;
+ coverages.push_back(res);
+ //printf("%d %d: (%d %d) ", i, aread, 0, idx3[aread][0]->alen);
+ //for (int j = 0; j < res2->size(); j++) {
+ // printf("[%d %d] ", res2->at(j).first, res2->at(j).second);
+ //}
+ //printf("\n");
+ pitfalls.push_back(*res2);
+ delete res2;
+ }
+ }
+
+
+ /***
+ * Prepare the data
+ */
+
+ for (int i = 0; i < edgelist.size(); i++) {
+
+ std::vector<LOverlap *> currentalns = idx[std::get<0>(edgelist[i]).id][std::get<1>(edgelist[i]).id];
+
+ LOverlap *currentaln = NULL;
+
+ for (int j = 0; j < currentalns.size(); j++) {
+ //std::cout << std::get<0>(edgelist[i]).id << " " << std::get<1>(edgelist[i]).id << " " << currentalns[j]->match_type_ << std::endl;
+ if (currentalns[j]->read_A_match_end_ - currentalns[j]->read_A_match_start_ + currentalns[j]->read_B_match_end_ - currentalns[j]->read_B_match_start_ ==
+ std::get<2>(edgelist[i]))
+ currentaln = currentalns[j];
+ }
+
+ if (currentaln == NULL) exit(1);
+ //currentaln->show();
+
+ std::string current_seq;
+ std::string next_seq;
+
+ std::string aln_tags1;
+ std::string aln_tags2;
+
+
+ if (std::get<0>(edgelist[i]).strand == 0)
+ current_seq = reads[std::get<0>(edgelist[i]).id]->bases;
+ else
+ current_seq = reverse_complement(reads[std::get<0>(edgelist[i]).id]->bases);
+
+ if (std::get<0>(edgelist[i]).strand == 0) {
+ aln_tags1 = aln_tags_list[i].first;
+ aln_tags2 = aln_tags_list[i].second;
+ } else {
+ aln_tags1 = reverse_complement(aln_tags_list[i].first);
+ aln_tags2 = reverse_complement(aln_tags_list[i].second);
+ }
+
+ aln_tags_list_true_strand.push_back(std::pair<std::string, std::string>(aln_tags1, aln_tags2));
+
+ if (std::get<1>(edgelist[i]).strand == 0)
+ next_seq = reads[std::get<1>(edgelist[i]).id]->bases;
+ else
+ next_seq = reverse_complement(reads[std::get<1>(edgelist[i]).id]->bases);
+
+ int abpos, aepos, alen, bbpos, bepos, blen, aes, aee, bes, bee;
+
+ alen = currentaln->alen;
+ blen = currentaln->blen;
+
+
+ if (std::get<0>(edgelist[i]).strand == 0) {
+ abpos = currentaln->read_A_match_start_;
+ aepos = currentaln->read_A_match_end_;
+
+ aes = currentaln->eff_read_A_start_;
+ aee = currentaln->eff_read_A_end_;
+
+ } else {
+ abpos = alen - currentaln->read_A_match_end_;
+ aepos = alen - currentaln->read_A_match_start_;
+
+ aes = alen - currentaln->eff_read_A_end_;
+ aee = alen - currentaln->eff_read_A_start_;
+ }
+
+ if (((std::get<1>(edgelist[i]).strand == 0))) {
+ bbpos = currentaln->read_B_match_start_;
+ bepos = currentaln->read_B_match_end_;
+
+ bes = currentaln->eff_read_B_start_;
+ bee = currentaln->eff_read_B_end_;
+
+ } else {
+ bbpos = blen - currentaln->read_B_match_end_;
+ bepos = blen - currentaln->read_B_match_start_;
+
+ bes = blen - currentaln->eff_read_B_end_;
+ bee = blen - currentaln->eff_read_B_start_;
+
+ }
+ aes = 0;
+ bes = 0;
+ aee = alen;
+ bee = blen;
+
+ printf("%d %d [[%d %d] << [%d %d]] x [[%d %d] << [%d %d]]\n", std::get<0>(edgelist[i]).id, std::get<1>(edgelist[i]).id, abpos, aepos, aes, aee, bbpos, bepos, bes, bee);
+
+ LOverlap *new_ovl = new LOverlap();
+ new_ovl->read_A_match_start_ = abpos;
+ new_ovl->read_A_match_end_ = aepos;
+ new_ovl->read_B_match_start_ = bbpos;
+ new_ovl->read_B_match_end_ = bepos;
+ new_ovl->eff_read_A_end_ = aee;
+ new_ovl->eff_read_A_start_ = aes;
+ new_ovl->eff_read_B_end_ = bee;
+ new_ovl->eff_read_B_start_ = bes;
+ new_ovl->alen = currentaln->alen;
+ new_ovl->blen = currentaln->blen;
+ new_ovl->read_A_id_ = std::get<0>(edgelist[i]).id;
+ new_ovl->read_B_id_ = std::get<1>(edgelist[i]).id;
+
+
+ bedges.push_back(new_ovl);
+ breads.push_back(current_seq);
+
+
+ }
+ //need to trim the end
+
+
+
+ std::vector<std::vector<int> > mappings;
+ for (int i = 0; i < range.size(); i++) {
+ mappings.push_back(get_mapping(aln_tags_list_true_strand[i].first, aln_tags_list_true_strand[i].second));
+ }
+
+ std::cout << bedges.size() << " " << breads.size() << " " << selected.size() << " "
+ << aln_tags_list.size() << " " << pitfalls.size() << " " << aln_tags_list_true_strand.size()
+ << " " << mappings.size() << " " << coverages.size() << std::endl;
+
+ /*for (int i = 0; i < bedges.size() - 1; i++) {
+ printf("%d %d %d %d %d\n", bedges[i]->read_B_match_start_, bedges[i]->read_B_match_end_, bedges[i+1]->read_A_match_start_, bedges[i+1]->read_A_match_end_, bedges[i]->read_B_match_end_ - bedges[i+1]->read_A_match_start_);
+ }*/
+
+
+ int tspace = TSPACE; // set lane length to be 500
+ int nlane = 0;
+
+
+ //printf("%d %d\n", mappings[0][800], mappings[0][1000]); // debug output
+ //printf("%s\n%s\n", breads[0].substr(bedges[0]->read_A_match_start_ + 800, 50).c_str(),
+ // breads[1].substr(bedges[0]->read_B_match_start_ + mappings[0][800], 50).c_str()); //debug output
+
+
+ std::vector<std::vector<std::pair<int, int>>> lanes;
+
+ std::string draft_assembly = "";
+
+
+ int currentlane = 0;
+ int current_starting_read = 0;
+ int current_starting_space = 1;
+ int current_starting_offset = 0;
+ int n_bb_reads = range.size();
+ std::vector<std::vector<int>> trace_pts(n_bb_reads);
+ bool revert = false;
+
+
+ int rmax = -1;
+ /**
+ * Move forward and put "trace points"
+ */
+ while (current_starting_read < n_bb_reads - 1) {
+ int currentread = current_starting_read;
+ int additional_offset = 0;
+ while (bedges[current_starting_read]->read_A_match_start_ + current_starting_space * tspace +
+ current_starting_offset + additional_offset <
+ bedges[current_starting_read]->read_A_match_end_ - EDGE_SAFE) {
+ int waypoint = bedges[current_starting_read]->read_A_match_start_ + tspace * current_starting_space +
+ current_starting_offset + additional_offset;
+ //if ((waypoint - bedges[current_starting_read]->read_A_match_start_) < EDGE_SAFE)
+ // waypoint += EDGE_SAFE;
+
+ //int next_waypoint = mappings[currentread][waypoint - bedges[current_starting_read]->read_A_match_start_] + bedges[current_starting_read]->read_B_match_start_;
+ std::vector<std::pair<int, int> > lane;
+
+ while ((waypoint > bedges[currentread]->read_A_match_start_) and
+ (waypoint < bedges[currentread]->read_A_match_end_)) {
+
+ printf("%d %d\n", currentread, waypoint);
+ trace_pts[currentread].push_back(waypoint);
+
+
+ /*if (waypoint > bedges[currentread]->read_A_match_end_ - EDGE_SAFE) {
+ printf("Reaching the end, neglect low coverage\n");
+ }
+
+ if ((coverages[currentread]->at(waypoint) < MIN_COV2) and (waypoint < bedges[currentread]->read_A_match_end_ - EDGE_SAFE)) {
+ revert = true;
+ printf("Low coverage, revert\n");
+ break;
+ }*/
+
+
+ lane.push_back(std::pair<int, int>(currentread, waypoint));
+ if (currentread > rmax) rmax = currentread;
+ //int previous_wp = waypoint;
+ waypoint = mappings[currentread][waypoint - bedges[currentread]->read_A_match_start_] +
+ bedges[currentread]->read_B_match_start_;
+ //printf("%s\n%s\n", breads[currentread].substr(previous_wp,50).c_str(), breads[currentread+1].substr(waypoint,50).c_str());
+ currentread++;
+ if (currentread >= n_bb_reads) break;
+ }
+ if (currentread < n_bb_reads) if (waypoint < bedges[currentread]->alen) {
+ lane.push_back(std::pair<int, int>(currentread, waypoint));
+ if (currentread > rmax) rmax = currentread;
+ }
+ /*if (revert) {
+ printf("revert\n");
+ revert = false;
+ while (currentread >= current_starting_read) {
+ trace_pts[currentread].pop_back();
+ currentread --;
+ additional_offset += STEP;
+ }
+ currentread = current_starting_read;
+ }
+ else*/
+ {
+ if (currentread >= rmax)
+ lanes.push_back(lane);
+ current_starting_space++;
+ currentread = current_starting_read;
+
+ }
+
+ }
+
+ current_starting_read++;
+ current_starting_space = 1;//get next space;
+ if (trace_pts[current_starting_read].size() == 0)
+ current_starting_offset = 0;
+ else
+ current_starting_offset =
+ trace_pts[current_starting_read].back() - bedges[current_starting_read]->read_A_match_start_;
+ }
+
+
+ /**
+ * Show trace points on reads
+ */
+ for (int i = 0; i < n_bb_reads; i++) {
+ printf("Read %d:", i);
+ for (int j = 0; j < trace_pts[i].size(); j++) {
+ printf("%d ", trace_pts[i][j]);
+ }
+ printf("\n");
+ }
+
+ /**
+ * Show lanes
+ */
+
+ for (int i = 0; i < lanes.size(); i++) {
+
+ printf("Lane %d\n", i);
+ for (int j = 0; j < lanes[i].size(); j++) {
+ printf("[%d %d] ", lanes[i][j].first, lanes[i][j].second);
+ }
+ printf("\n");
+ }
+
+
+ printf("In total %d lanes\n", lanes.size());
+ if (lanes.size() == 0) {
+ draft_assembly = breads[0];
+ out_fa << ">DraftAssemblyContig" << num_contig << std::endl;
+ out_fa << draft_assembly << std::endl;
+ num_contig++;
+ continue;
+ }
+
+
+
+ /**
+ * Consequtive lanes form a column (ladder)
+ */
+
+ std::vector<std::vector<std::tuple<int, int, int> > > ladders;
+
+ for (int i = 0; i < lanes.size() - 1; i++) {
+ std::vector<std::pair<int, int> > lane1 = lanes[i];
+ std::vector<std::pair<int, int> > lane2 = lanes[i + 1];
+ std::vector<std::tuple<int, int, int> > ladder;
+ int pos = 0;
+ for (int j = 0; j < lane2.size(); j++) {
+ while ((lane1[pos].first != lane2[j].first) and (pos < lane1.size() - 1)) pos++;
+ if ((lane1[pos].first == lane2[j].first))
+ ladder.push_back(std::make_tuple(lane2[j].first, lane1[pos].second, lane2[j].second));
+ }
+ ladders.push_back(ladder);
+ }
+
+
+ /**
+ * show ladders
+ */
+ for (int i = 0; i < ladders.size(); i++) {
+ printf("Ladder %d\n", i);
+ for (int j = 0; j < ladders[i].size(); j++) {
+ //printf("[%d %d-%d] ", std::get<0>(ladders[i][j]), std::get<1>(ladders[i][j]), std::get<2>(ladders[i][j]) );
+ //printf("%s\n", breads[std::get<0>(ladders[i][j])].substr(std::get<1>(ladders[i][j]),std::get<2>(ladders[i][j])-std::get<1>(ladders[i][j])).c_str());
+
+ }
+
+ if (ladders[i].size() == 0) {
+ printf("low coverage!\n");
+ continue;
+ }
+
+ if (ladders[i].size() > 1) {
+
+
+ int mx = 0;
+ int maxcoverage = 0;
+ for (int j = 0; j < ladders[i].size(); j++) {
+ int mincoverage = 10000;
+ int read = std::get<0>(ladders[i][j]);
+ int start = std::get<1>(ladders[i][j]);
+ int end = std::get<2>(ladders[i][j]);
+ for (int pos = start; pos < end; pos++) {
+ if (coverages[read]->at(pos) < mincoverage) mincoverage = coverages[read]->at(pos);
+ }
+ if (mincoverage > maxcoverage) {
+ maxcoverage = mincoverage;
+ mx = j;
+ }
+ }
+
+ std::cout << "ladder " << i << " num reads " << ladders[i].size() << " possibly error here " <<
+ maxcoverage << "\n!";
+
+
+ //if (ladders[i].size() == 2) {
+ // draft_assembly += breads[std::get<0>(ladders[i][mx])].substr(std::get<1>(ladders[i][mx]),
+ // std::get<2>(ladders[i][mx]) -
+ // std::get<1>(ladders[i][mx]));
+ // continue;
+ // }
+
+
+ std::string base = breads[std::get<0>(ladders[i][mx])].substr(std::get<1>(ladders[i][mx]),
+ std::get<2>(ladders[i][mx]) -
+ std::get<1>(ladders[i][mx]));;
+ int seq_count = ladders[i].size();
+ printf("seq_count:%d, max %d\n", seq_count, mx);
+ align_tags_t **tags_list;
+ tags_list = (align_tags_t **) calloc(seq_count, sizeof(align_tags_t *));
+ consensus_data *consensus;
+
+ int alen = (std::get<2>(ladders[i][mx]) - std::get<1>(ladders[i][mx]));
+ for (int j = 0; j < ladders[i].size(); j++) {
+
+ int blen = (std::get<2>(ladders[i][j]) - std::get<1>(ladders[i][j]));
+ char *aseq = (char *) malloc(
+ (20 + (std::get<2>(ladders[i][mx]) - std::get<1>(ladders[i][mx]))) * sizeof(char));
+ char *bseq = (char *) malloc(
+ (20 + (std::get<2>(ladders[i][j]) - std::get<1>(ladders[i][j]))) * sizeof(char));
+ strcpy(aseq, breads[std::get<0>(ladders[i][mx])].substr(std::get<1>(ladders[i][mx]),
+ std::get<2>(ladders[i][mx]) -
+ std::get<1>(ladders[i][mx])).c_str());
+ strcpy(bseq, breads[std::get<0>(ladders[i][j])].substr(std::get<1>(ladders[i][j]),
+ std::get<2>(ladders[i][j]) -
+ std::get<1>(ladders[i][j])).c_str());
+
+
+ aln_range *arange = (aln_range *) calloc(1, sizeof(aln_range));
+ arange->s1 = 0;
+ arange->e1 = strlen(bseq);
+ arange->s2 = 0;
+ arange->e2 = strlen(aseq);
+ arange->score = 5;
+
+ //printf("blen %d alen%d\n",strlen(bseq), strlen(aseq));
+ //printf("before get tags\n");
+
+ alignment *alng = _align(bseq, blen, aseq, alen, 150, 1);
+
+ char *q_aln_str = (char *) malloc((5 + strlen(alng->q_aln_str)) * sizeof(char));
+ char *t_aln_str = (char *) malloc((5 + strlen(alng->t_aln_str)) * sizeof(char));
+
+
+ strcpy(q_aln_str + 1, alng->q_aln_str);
+ strcpy(t_aln_str + 1, alng->t_aln_str);
+ q_aln_str[0] = 'T';
+ t_aln_str[0] = 'T';
+
+
+ for (int pos = 0; pos < strlen(q_aln_str); pos++) q_aln_str[pos] = toupper(q_aln_str[pos]);
+ for (int pos = 0; pos < strlen(t_aln_str); pos++) t_aln_str[pos] = toupper(t_aln_str[pos]);
+
+ //printf("Q:%s\nT:%s\n", q_aln_str, t_aln_str);
+
+ tags_list[j] = get_align_tags(q_aln_str,
+ t_aln_str,
+ strlen(alng->q_aln_str) + 1,
+ arange, (unsigned int) j, 0);
+ //free(aseq);
+ //free(bseq);
+
+ /*for (int k = 0; k < tags_list[j]->len; k++) {
+ printf("%d %d %ld %d %c %c\n",j, k, tags_list[j]->align_tags[k].t_pos,
+ tags_list[j]->align_tags[k].delta,
+ //tags_list[j]->align_tags[k].p_q_base,
+ aseq[tags_list[j]->align_tags[k].t_pos],
+ tags_list[j]->align_tags[k].q_base);
+ }*/
+ free(q_aln_str);
+ free(t_aln_str);
+ free(aseq);
+ free(bseq);
+ free_alignment(alng);
+
+ }
+
+ //printf("%d %d\n%s\n",seq_count, strlen(seq), seq);
+
+ consensus = get_cns_from_align_tags(tags_list, seq_count, alen + 1, 1);
+ printf("Consensus len :%d\n",strlen(consensus->sequence));
+ draft_assembly += std::string(consensus->sequence);
+
+ free_consensus_data(consensus);
+ for (int j = 0; j < seq_count; j++)
+ free_align_tags(tags_list[j]);
+
+ } else {
+ draft_assembly += breads[std::get<0>(ladders[i][0])].substr(std::get<1>(ladders[i][0]),
+ std::get<2>(ladders[i][0]) -
+ std::get<1>(ladders[i][0]));
+ }
+
+ printf("\n");
+ }
+
+
+
+ /*for (int i = 0; i < mapping.size(); i++)
+ printf("%d %d\n", i, mapping[i]);
+ printf("[%d %d], [%d %d]\n", bedges[0]->read_A_match_start_, bedges[0]->read_A_match_end_, bedges[0]->read_B_match_start_, bedges[0]->read_B_match_end_);*/
+
+ std::cout << sequence.size() << std::endl;
+ std::cout << draft_assembly.size() << std::endl;
+
+
+ for (int i = 0; i < draft_assembly.size()/25000; i++) {
+ int len = 50000;
+ if (i*25000 + 50000 > draft_assembly.size()) len = draft_assembly.size()-25000*i;
+ out_fa << ">Draft_assembly_" << num_contig << "_" << i << std::endl;
+ out_fa << draft_assembly.substr(i*25000, len) << std::endl;
+ }
+
+ out_fa_orig << ">Draft_assembly_" << num_contig << std::endl;
+ out_fa_orig << draft_assembly << std::endl;
+
+
+ num_contig++;
+ }
+
+ if (strlen(name_db) > 0)
+ la.closeDB(); //close database
+ return 0;
+}
\ No newline at end of file
diff --git a/src/consensus/io_base.cpp b/src/consensus/io_base.cpp
new file mode 100644
index 0000000..dc4e18d
--- /dev/null
+++ b/src/consensus/io_base.cpp
@@ -0,0 +1,275 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <unordered_map>
+#include <algorithm>
+#include <fstream>
+#include <sstream>
+#include <iostream>
+#include <set>
+#include <omp.h>
+#include <tuple>
+#include <iomanip>
+
+#include "spdlog/spdlog.h"
+#include "cmdline.h"
+#include "INIReader.h"
+#include "DB.h"
+#include "align.h"
+#include "LAInterface.h"
+
+#include <utility>
+#include <boost/graph/adjacency_list.hpp>
+#include <boost/graph/connected_components.hpp>
+
+#define LAST_READ_SYMBOL '$'
+
+#define HINGED_EDGE 1
+#define UNHINGED_EDGE -1
+#define REVERSE_COMPLEMENT_MATCH 1
+#define SAME_DIRECTION_MATCH 0
+
+using namespace boost;
+
+typedef adjacency_list <vecS, vecS, undirectedS> Graph;
+typedef std::tuple<Node, Node, int> Edge_w;
+typedef std::pair<Node, Node> Edge_nw;
+
+
+
+std::ostream& operator<<(std::ostream& out, const MatchType value){
+ static std::map<MatchType, std::string> strings;
+ if (strings.size() == 0){
+#define INSERT_ELEMENT(p) strings[p] = #p
+ INSERT_ELEMENT(FORWARD);
+ INSERT_ELEMENT(BACKWARD);
+ INSERT_ELEMENT(ACOVERB);
+ INSERT_ELEMENT(BCOVERA);
+ INSERT_ELEMENT(INTERNAL);
+ INSERT_ELEMENT(UNDEFINED);
+ INSERT_ELEMENT(NOT_ACTIVE);
+#undef INSERT_ELEMENT
+ }
+ return out << strings[value];
+}
+
+
+
+bool compare_overlap(LOverlap * ovl1, LOverlap * ovl2) {
+ return ((ovl1->read_A_match_end_ - ovl1->read_A_match_start_
+ + ovl1->read_B_match_end_ - ovl1->read_B_match_start_) >
+ (ovl2->read_A_match_end_ - ovl2->read_A_match_start_
+ + ovl2->read_B_match_end_ - ovl2->read_B_match_start_));
+}
+
+bool compare_overlap_weight(LOverlap * ovl1, LOverlap * ovl2) {
+ return (ovl1->weight > ovl2->weight);
+}
+
+
+
+bool compare_overlap_abpos(LOverlap * ovl1, LOverlap * ovl2) {
+ return ovl1->read_A_match_start_ < ovl2->read_A_match_start_;
+}
+
+bool compare_overlap_aepos(LOverlap * ovl1, LOverlap * ovl2) {
+ return ovl1->read_A_match_start_ > ovl2->read_A_match_start_;
+}
+
+
+
+
+
+
+int main(int argc, char *argv[]) {
+
+ cmdline::parser cmdp;
+ cmdp.add<std::string>("db", 'b', "db file name", false, "");
+ cmdp.add<std::string>("las", 'l', "las file name", false, "");
+ cmdp.add<std::string>("paf", 'p', "paf file name", false, "");
+ cmdp.add<std::string>("config", 'c', "configuration file name", false, "");
+ cmdp.add<std::string>("fasta", 'f', "fasta file name", false, "");
+ cmdp.add<std::string>("prefix", 'x', "(intermediate output) input file prefix", true, "");
+ cmdp.add<std::string>("out", 'o', "final output file name", true, "");
+ cmdp.add<std::string>("log", 'g', "log folder name", false, "log");
+
+
+
+// cmdp.add<std::string>("restrictreads",'r',"restrict to reads in the file",false,"");
+
+
+ cmdp.parse_check(argc, argv);
+
+ LAInterface la;
+ const char *name_db = cmdp.get<std::string>("db").c_str(); //.db file of reads to load
+ const char *name_las = cmdp.get<std::string>("las").c_str();//.las file of alignments
+ const char *name_paf = cmdp.get<std::string>("paf").c_str();
+ const char *name_fasta = cmdp.get<std::string>("fasta").c_str();
+ const char *name_config = cmdp.get<std::string>("config").c_str();//name of the configuration file, in INI format
+ std::string out = cmdp.get<std::string>("prefix");
+ std::string out_name = cmdp.get<std::string>("out");
+// const char * name_restrict = cmdp.get<std::string>("restrictreads").c_str();
+
+
+ std::string name_mask = out + ".mas";
+ std::string name_max = out + ".max";
+ std::string name_homo = out + ".homologous.txt";
+ std::string name_rep = out + ".repeat.txt";
+ std::string name_hg = out + ".hinges.txt";
+ std::string name_cov = out + ".coverage.txt";
+ std::string name_garbage = out + ".garbage.txt";
+ std::string name_contained = out + ".contained.txt";
+ std::string name_deadend = out_name + ".deadends.txt";
+
+
+ std::ofstream deadend_out(name_deadend);
+ std::ofstream maximal_reads(name_max);
+ std::ofstream garbage_out(name_garbage);
+ std::ofstream contained_out(name_contained);
+ std::ifstream homo(name_homo);
+ std::vector<int> homo_reads;
+
+
+ bool delete_telomere = false; // TODO: command line option to set this true
+
+ int read_id;
+ while (homo >> read_id) homo_reads.push_back(read_id);
+
+
+ namespace spd = spdlog;
+
+ //auto console = spd::stdout_logger_mt("console");
+ std::vector<spdlog::sink_ptr> sinks;
+ sinks.push_back(std::make_shared<spdlog::sinks::stdout_sink_st>());
+ sinks.push_back(
+ std::make_shared<spdlog::sinks::daily_file_sink_st>(cmdp.get<std::string>("log") + "/log", "txt", 23, 59));
+ auto console = std::make_shared<spdlog::logger>("log", std::begin(sinks), std::end(sinks));
+ spdlog::register_logger(console);
+
+ console->info("Hinging layout");
+ char *buff = (char *) malloc(sizeof(char) * 2000);
+ getwd(buff);
+ console->info("current user {}, current working directory {}", getlogin(), buff);
+ free(buff);
+ console->info("name of db: {}, name of .las file {}", name_db, name_las);
+ console->info("name of fasta: {}, name of .paf file {}", name_fasta, name_paf);
+ console->info("filter files prefix: {}", out);
+ console->info("output prefix: {}", out_name);
+
+
+ std::ifstream ini_file(name_config);
+ std::string str((std::istreambuf_iterator<char>(ini_file)),
+ std::istreambuf_iterator<char>());
+
+ console->info("Parameters passed in \n{}", str);
+
+ if (strlen(name_db) > 0)
+ la.openDB(name_db);
+
+
+ if (strlen(name_las) > 0)
+ la.openAlignmentFile(name_las);
+
+ int64 n_aln = 0;
+
+ if (strlen(name_las) > 0) {
+ n_aln = la.getAlignmentNumber();
+ console->info("Load alignments from {}", name_las);
+ console->info("# Alignments: {}", n_aln);
+ }
+
+ int n_read;
+ if (strlen(name_db) > 0)
+ n_read = la.getReadNumber();
+
+ std::vector<Read *> reads; //Vector of pointers to all reads
+
+ if (strlen(name_fasta) > 0) {
+ n_read = la.loadFASTA(name_fasta, reads);
+ }
+
+ console->info("# Reads: {}", n_read); // output some statistics
+
+ std::vector<LOverlap *> aln;//Vector of pointers to all alignments
+
+ if (strlen(name_las) > 0) {
+ la.resetAlignment();
+ la.getOverlap(aln, 0, n_aln);
+ }
+
+ if (strlen(name_paf) > 0) {
+ n_aln = la.loadPAF(std::string(name_paf), aln);
+ console->info("Load alignments from {}", name_paf);
+ console->info("# Alignments: {}", n_aln);
+ }
+
+ if (n_aln == 0) {
+ console->error("No alignments!");
+ return 1;
+ }
+
+
+ if (strlen(name_db) > 0) {
+ la.getRead(reads, 0, n_read);
+ }
+
+ console->info("Input data finished");
+
+ INIReader reader(name_config);
+
+ if (reader.ParseError() < 0) {
+ console->warn("Can't load {}", name_config);
+ return 1;
+ }
+
+ int LENGTH_THRESHOLD = int(reader.GetInteger("filter", "length_threshold", -1));
+ double QUALITY_THRESHOLD = reader.GetReal("filter", "quality_threshold", 0.0);
+ int N_ITER = (int) reader.GetInteger("filter", "n_iter", -1);
+ int ALN_THRESHOLD = (int) reader.GetInteger("filter", "aln_threshold", -1);
+ int MIN_COV = (int) reader.GetInteger("filter", "min_cov", -1);
+ int CUT_OFF = (int) reader.GetInteger("filter", "cut_off", -1);
+ int THETA = (int) reader.GetInteger("filter", "theta", -1);
+ int THETA2 = (int) reader.GetInteger("filter", "theta2", 0);
+ int N_PROC = (int) reader.GetInteger("running", "n_proc", 4);
+ int HINGE_SLACK = (int) reader.GetInteger("layout", "hinge_slack", 1000);
+ //This is the amount by which a forward overlap
+ //must be longer than a forward internal overlap to be preferred while
+ //building a graph.
+ int HINGE_TOLERANCE = (int) reader.GetInteger("layout", "hinge_tolerance", 150);
+ //This is how far an overlap must start from a hinge to be considered an internal
+ //overlap.
+ int KILL_HINGE_OVERLAP_ALLOWANCE = (int) reader.GetInteger("layout", "kill_hinge_overlap", 300);
+ int KILL_HINGE_INTERNAL_ALLOWANCE = (int) reader.GetInteger("layout", "kill_hinge_internal", 40);
+
+ int MATCHING_HINGE_SLACK = (int) reader.GetInteger("layout", "matching_hinge_slack", 200);
+
+ int NUM_EVENTS_TELOMERE = (int) reader.GetInteger("layout", "num_events_telomere", 7);
+
+ int MIN_CONNECTED_COMPONENT_SIZE = (int) reader.GetInteger("layout", "min_connected_component_size", 8);
+
+
+ console->info("LENGTH_THRESHOLD = {}", LENGTH_THRESHOLD);
+ console->info("QUALITY_THRESHOLD = {}", QUALITY_THRESHOLD);
+ console->info("ALN_THRESHOLD = {}", ALN_THRESHOLD);
+ console->info("MIN_COV = {}", MIN_COV);
+ console->info("CUT_OFF = {}", CUT_OFF);
+ console->info("THETA = {}", THETA);
+ console->info("N_ITER = {}", N_ITER);
+ console->info("THETA2 = {}", THETA2);
+ console->info("N_PROC = {}", N_PROC);
+ console->info("HINGE_SLACK = {}", HINGE_SLACK);
+ console->info("HINGE_TOLERANCE = {}", HINGE_TOLERANCE);
+ console->info("KILL_HINGE_OVERLAP_ALLOWANCE = {}", KILL_HINGE_OVERLAP_ALLOWANCE);
+ console->info("KILL_HINGE_INTERNAL_ALLOWANCE = {}", KILL_HINGE_INTERNAL_ALLOWANCE);
+ console->info("MATCHING_HINGE_SLACK = {}", MATCHING_HINGE_SLACK);
+ console->info("MIN_CONNECTED_COMPONENT_SIZE = {}", MIN_CONNECTED_COMPONENT_SIZE);
+
+
+ omp_set_num_threads(N_PROC);
+ std::vector<Edge_w> edgelist, edgelist_ms; // save output to edgelist
+ std::vector<std::unordered_map<int, std::vector<LOverlap *> > > idx_ab;
+
+
+ if (strlen(name_db) > 0)
+ la.closeDB(); //close database
+ return 0;
+}
diff --git a/src/filter/CMakeLists.txt b/src/filter/CMakeLists.txt
new file mode 100644
index 0000000..9829e6a
--- /dev/null
+++ b/src/filter/CMakeLists.txt
@@ -0,0 +1,4 @@
+cmake_minimum_required(VERSION 3.2)
+
+add_executable(Reads_filter filter)
+target_link_libraries(Reads_filter LAInterface ini spdlog)
diff --git a/src/filter/filter.cpp b/src/filter/filter.cpp
new file mode 100644
index 0000000..02bc4e8
--- /dev/null
+++ b/src/filter/filter.cpp
@@ -0,0 +1,1109 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <ctype.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unordered_map>
+#include <algorithm>
+#include <fstream>
+#include <iostream>
+#include <set>
+#include <tuple>
+#include <random>
+#include <omp.h>
+#include <time.h>
+#include <glob.h>
+
+
+#include "INIReader.h"
+#include "spdlog/spdlog.h"
+#include "DB.h"
+#include "align.h"
+#include "LAInterface.h"
+#include "cmdline.h"
+
+
+#define LAST_READ_SYMBOL '$'
+
+
+typedef std::tuple<Node, Node, int> Edge_w; //Edge with weight
+typedef std::pair<Node, Node> Edge_nw; //Edge without weights
+
+
+inline std::vector<std::string> glob(const std::string& pat){
+ using namespace std;
+ glob_t glob_result;
+ glob(pat.c_str(),GLOB_TILDE,NULL,&glob_result);
+ vector<string> ret;
+ for(unsigned int i=0;i<glob_result.gl_pathc;++i){
+ ret.push_back(string(glob_result.gl_pathv[i]));
+ }
+ globfree(&glob_result);
+ return ret;
+}
+
+static int ORDER(const void *l, const void *r) {
+ //Returns the difference between l and r. Why void pointer?
+ int x = *((int32 *) l);
+ int y = *((int32 *) r);
+ return (x - y);
+}
+
+std::ostream& operator<<(std::ostream& out, const MatchType value){
+ //What is this doing?
+ static std::map<MatchType, std::string> strings;
+ if (strings.size() == 0){
+#define INSERT_ELEMENT(p) strings[p] = #p
+ INSERT_ELEMENT(FORWARD);
+ INSERT_ELEMENT(BACKWARD);
+ INSERT_ELEMENT(MISMATCH_LEFT);
+ INSERT_ELEMENT(MISMATCH_RIGHT);
+ INSERT_ELEMENT(COVERED);
+ INSERT_ELEMENT(COVERING);
+ INSERT_ELEMENT(UNDEFINED);
+ INSERT_ELEMENT(MIDDLE);
+#undef INSERT_ELEMENT
+ }
+
+ return out << strings[value];
+}
+
+bool pairAscend(const std::pair<int, int>& firstElem, const std::pair<int, int>& secondElem) {
+ return firstElem.first < secondElem.first;
+}
+
+bool pairDescend(const std::pair<int, int>& firstElem, const std::pair<int, int>& secondElem) {
+ return firstElem.first > secondElem.first;
+}
+
+
+bool compare_overlap(LOverlap * ovl1, LOverlap * ovl2) {
+ //Returns True if the sum of the match lengths of the two reads in ovl1 > the sum of the overlap lengths of the two reads in ovl2
+ //Returns False otherwise.
+ return ((ovl1->read_A_match_end_ - ovl1->read_A_match_start_ + ovl1->read_B_match_end_ - ovl1->read_B_match_start_)
+ > (ovl2->read_A_match_end_ - ovl2->read_A_match_start_ + ovl2->read_B_match_end_ - ovl2->read_B_match_start_));
+}
+
+bool compare_sum_overlaps(const std::vector<LOverlap * > * ovl1, const std::vector<LOverlap *> * ovl2) {
+ //Returns True if the sum of matches over both reads for overlaps in ovl1 > sum of matches over both reads for overlaps in ovl2
+ //Returns False otherwise
+ int sum1 = 0;
+ int sum2 = 0;
+ for (int i = 0; i < ovl1->size(); i++)
+ sum1 += (*ovl1)[i]->read_A_match_end_ - (*ovl1)[i]->read_A_match_start_ +
+ (*ovl1)[i]->read_B_match_end_ - (*ovl1)[i]->read_B_match_start_;
+ for (int i = 0; i < ovl2->size(); i++)
+ sum2 += (*ovl2)[i]->read_A_match_end_ - (*ovl2)[i]->read_A_match_start_ +
+ (*ovl2)[i]->read_B_match_end_ - (*ovl2)[i]->read_B_match_start_;
+ return sum1 > sum2;
+}
+
+bool compare_pos(LOverlap * ovl1, LOverlap * ovl2) {
+ //True if ovl1 starts earlier than ovl2 on read a.
+ return (ovl1->read_A_match_start_) > (ovl2->read_A_match_start_);
+}
+
+bool compare_overlap_abpos(LOverlap * ovl1, LOverlap * ovl2) {
+ //True if ovl2 starts earlier than ovl1 on read a.
+ //flips the two argumenst in compare_pos
+ return ovl1->read_A_match_start_ < ovl2->read_A_match_start_;
+}
+
+bool compare_overlap_aepos(LOverlap * ovl1, LOverlap * ovl2) {
+ //Same as compare_pos?
+ return ovl1->read_A_match_start_ > ovl2->read_A_match_start_;
+}
+
+std::vector<std::pair<int,int>> Merge(std::vector<LOverlap *> & intervals, int cutoff)
+//Returns sections of read a which are covered by overlaps. Each overlap is considered as
+// <start_pos+cutoff,end_pos-cutoff>.
+{
+ //std::cout<<"Merge"<<std::endl;
+ std::vector<std::pair<int, int > > ret;
+ int n = intervals.size(); // Length of the vector intervals
+ if (n == 0) return ret;
+
+ if(n == 1) {
+ ret.push_back(std::pair<int,int>(intervals[0]->read_A_match_start_, intervals[0]->read_A_match_end_));
+ return ret;
+ }
+
+ //Where is sort defined ? Is this std::sort?
+ sort(intervals.begin(),intervals.end(),compare_overlap_abpos); //sort according to left (start position of
+ // overlap beginning on a)
+
+ int left= intervals[0]->read_A_match_start_ + cutoff, right = intervals[0]->read_A_match_end_ - cutoff;
+ //left, right means maximal possible interval now
+
+ for(int i = 1; i < n; i++) {
+ //Ovl1 ~ Ovl2 if Ovl1 and Ovl2 have a nonzero intersection. (that is both the b read maps
+ // to the same position on the a read)
+ //This defines a chain of connected overlaps. This for loop returns a a vector ret which
+ // is a pair of <start of connected overlaps, end of connected overlaps>
+ if(intervals[i]->read_A_match_start_ + cutoff <= right)
+ {
+ right=std::max(right, intervals[i]->read_A_match_end_ - cutoff);
+ }
+ else
+ {
+ ret.push_back(std::pair<int, int>(left,right));
+ left = intervals[i]->read_A_match_start_ + cutoff;
+ right = intervals[i]->read_A_match_end_ - cutoff;
+ }
+ }
+ ret.push_back(std::pair<int, int>(left,right));
+ return ret;
+}
+
+//Interval = pair<int, int>. Defined in LAInterface.h
+Interval Effective_length(std::vector<LOverlap *> & intervals, int min_cov) {
+//Returns <start_pos, end_pos>
+//start_pos : the first position at which Read a of the overlaps have at least min_cov matches on it.
+//end_pos : the last position that the (#overlaps- min_cov)th read (in order of start positions ends).
+//Should compare_overlap_aepos actually compare read_A_match_end_? If that is done, then the end_pos
+// will be the last position
+// on the a read so that all positions beyond have less than min_cov matches on them
+ Interval ret;
+ sort(intervals.begin(),intervals.end(),compare_overlap_abpos); //sort according to left
+
+ if (intervals.size() > min_cov) {
+ ret.first = intervals[min_cov]->read_A_match_start_;
+ } else
+ ret.first = 0;
+ sort(intervals.begin(),intervals.end(),compare_overlap_aepos); //sort according to left
+ if (intervals.size() > min_cov) {
+ ret.second = intervals[min_cov]->read_A_match_end_;
+ } else
+ ret.second = 0;
+ return ret;
+}
+
+bool bridge(LOverlap* ovl, int s, int e){
+ //Returns True if [s e] on read a is bridged by ovl. False else.
+ //Put 500 in a typedef perhaps?
+ return ((ovl->read_A_match_start_ < s - 500) and (ovl->read_A_match_end_ > e + 500));
+}
+
+float number_of_bridging_reads(std::vector<LOverlap *> ovl_reads, int hinge_location, int hinge_type,int threshold){
+ int num_bridging_reads=0;
+ //int threshold=100;
+ std::vector<int> read_ends;
+ if (hinge_type==1){
+ for (int i=0; i < ovl_reads.size(); i++){
+ if ((ovl_reads[i]->read_A_match_start_ > hinge_location-threshold ) and
+ (ovl_reads[i]->read_A_match_start_ < hinge_location+threshold ))
+ read_ends.push_back(ovl_reads[i]->read_A_match_end_);
+ }
+ }
+ else if (hinge_type==-1){
+ for (int i=0; i < ovl_reads.size(); i++){
+ if ((ovl_reads[i]->read_A_match_end_ > hinge_location-threshold ) and
+ (ovl_reads[i]->read_A_match_end_ < hinge_location+threshold ))
+ read_ends.push_back(ovl_reads[i]->read_A_match_start_);
+ }
+ }
+ std::sort(read_ends.begin(),read_ends.end(), std::greater<int>());
+ int start_point=0;
+ int num_bins=0;
+ for (int i=0; i<read_ends.size(); i++) {
+ std::cout << hinge_location <<"\t"<< read_ends[i]<< std::endl;
+ if (read_ends[start_point] - read_ends[i] > 2 * threshold) {
+ num_bins++;
+ start_point = i;
+ }
+ }
+ return num_bins/((float)1);
+}
+
+
+int main(int argc, char *argv[]) {
+
+ mkdir("log",S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
+
+ cmdline::parser cmdp;
+ cmdp.add<std::string>("db", 'b', "db file name", false, "");
+ cmdp.add<std::string>("las", 'l', "las file name", false, "");
+ cmdp.add<std::string>("paf", 'p', "paf file name", false, "");
+ cmdp.add<std::string>("config", 'c', "configuration file name", false, "");
+ cmdp.add<std::string>("fasta", 'f', "fasta file name", false, "");
+ cmdp.add<std::string>("prefix", 'x', "prefix of (intermediate) output", false, "out");
+ cmdp.add<std::string>("restrictreads",'r',"restrict to reads in the file",false,"");
+ cmdp.add<std::string>("log", 'g', "log folder name", false, "log");
+ cmdp.add("debug", '\0', "debug mode");
+ cmdp.parse_check(argc, argv);
+
+ LAInterface la;
+ const char * name_db = cmdp.get<std::string>("db").c_str(); //.db file of reads to load
+ const char * name_las = cmdp.get<std::string>("las").c_str();//.las file of alignments
+ const char * name_paf = cmdp.get<std::string>("paf").c_str();
+ const char * name_fasta = cmdp.get<std::string>("fasta").c_str();
+ const char * name_config = cmdp.get<std::string>("config").c_str();//name of the configuration file, in INI format
+ std::string out = cmdp.get<std::string>("prefix");
+ bool has_qv = true;
+ const char * name_restrict = cmdp.get<std::string>("restrictreads").c_str();
+ /**
+ * There are two sets of input, the first is db+las, which corresponds to daligner as an overlapper,
+ * the other is fasta + paf, which corresponds to minimap as an overlapper.
+ */
+
+ namespace spd = spdlog;
+
+ //auto console = spd::stdout_logger_mt("console",true);
+
+ std::vector<spdlog::sink_ptr> sinks;
+ sinks.push_back(std::make_shared<spdlog::sinks::stdout_sink_st>());
+ sinks.push_back(std::make_shared<spdlog::sinks::daily_file_sink_st>(cmdp.get<std::string>("log") + "/log", "txt", 23, 59));
+ auto console = std::make_shared<spdlog::logger>("log", begin(sinks), end(sinks));
+ spdlog::register_logger(console);
+ //auto console = std::make_shared<spdlog::logger>("name", begin(sinks), end(sinks));
+
+
+ console->info("Reads filtering");
+
+ if (cmdp.exist("debug")) {
+ char *buff = (char *) malloc(sizeof(char) * 2000);
+ getwd(buff);
+ console->info("current user {}, current working directory {}", getlogin(), buff);
+ free(buff);
+ }
+
+ console->info("name of db: {}, name of .las file {}", name_db, name_las);
+ console->info("name of fasta: {}, name of .paf file {}", name_fasta, name_paf);
+
+
+ std::ifstream ini_file(name_config);
+ std::string str((std::istreambuf_iterator<char>(ini_file)),
+ std::istreambuf_iterator<char>());
+
+ console->info("Parameters passed in \n{}", str);
+
+ if (strlen(name_db) > 0)
+ la.openDB(name_db);
+
+ std::vector<std::string> name_las_list;
+ std::string name_las_str(name_las);
+ if (name_las_str.find('*') != -1)
+ name_las_list = glob(name_las_str);
+ else
+ name_las_list.push_back(name_las_str);
+
+
+
+
+ int n_read;
+ if (strlen(name_db) > 0)
+ n_read = la.getReadNumber();
+
+ std::vector<Read *> reads; //Vector of pointers to all reads
+
+ if (strlen(name_fasta) > 0) {
+ n_read = la.loadFASTA(name_fasta,reads);
+ has_qv = false;
+ }
+
+
+ console->info("# Reads: {}", n_read); // output some statistics
+
+
+
+
+ std::vector<std::vector<int>> QV;
+
+ if (strlen(name_db) > 0) {
+ la.getRead(reads,0,n_read);
+ la.getQV(QV,0,n_read); // load QV track from .db file
+ }
+
+
+ if (has_qv)
+ for (int i = 0; i < n_read; i++) {
+ for (int j = 0; j < QV[i].size(); j++) QV[i][j] = int(QV[i][j] < 40);
+ }
+ //Binarize QV vector, 40 is the threshold
+ std::set<int> reads_to_keep, reads_to_keep_initial;
+ char * line = NULL;
+ size_t len = 0;
+ if (strlen(name_restrict) > 0){
+ FILE * restrict_reads;
+ restrict_reads = fopen(name_restrict, "r");
+ while (getline(&line, &len, restrict_reads) != -1){
+ std::stringstream ss;
+ ss.clear();
+ ss << line;
+ int num;
+ ss >> num;
+ reads_to_keep.insert(num);
+ }
+ fclose(restrict_reads);
+ console->info("Reads to debug loaded from: {}", name_restrict);
+ console->info("Number of reads to debug loaded: {}", reads_to_keep.size());
+ }
+ else
+ console->info("No debug restrictions.");
+
+
+
+ if (strlen(name_las_list[0].c_str()) > 0)
+ la.openAlignmentFile(name_las_list[0]); // get tspace
+
+ std::vector<std::pair<int, int> > QV_mask(n_read);
+ // QV_mask is the mask based on QV for reads, for each read, it has one pair [start, end]
+
+ if (has_qv) {
+ for (int i = 0; i < n_read; i++) {
+ int s = 0, e = 0;
+ int max = 0, maxs = s, maxe = e;
+
+ for (int j = 0; j < QV[i].size(); j++) {
+ if ((QV[i][j] == 1) and (j<QV[i].size() - 1)) {
+ e ++;
+ }
+ else {
+ if (e - s > max) {
+ maxe = e ; maxs = s;
+ max = e - s;
+ }
+
+ s = j+1;
+ e = j+1;
+ }
+ }
+ // get the longest consecutive region that has good QV
+ //printf("maxs %d maxe %d size%d\n",maxs, maxe,QV[i].size());
+
+ QV_mask[i] = (std::pair<int, int>(maxs*la.tspace, maxe*la.tspace));
+ // tspace the the interval of trace points
+ // create mask by QV
+ }
+ }
+
+ INIReader reader(name_config);
+ if (reader.ParseError() < 0) {
+ console->warn("Can't load {}", name_config);
+ return 1;
+ }
+
+ int LENGTH_THRESHOLD = reader.GetInteger("filter", "length_threshold", -1);
+ double QUALITY_THRESHOLD = reader.GetReal("filter", "quality_threshold", 0.0);
+ int N_ITER = reader.GetInteger("filter", "n_iter", -1);
+ int ALN_THRESHOLD = reader.GetInteger("filter", "aln_threshold", -1);
+ int MIN_COV = reader.GetInteger("filter", "min_cov", -1);
+ int CUT_OFF = reader.GetInteger("filter", "cut_off", -1);
+ int THETA = reader.GetInteger("filter", "theta", -1);
+ int N_PROC = reader.GetInteger("running", "n_proc", 4);
+ int EST_COV = reader.GetInteger("filter", "ec", 0); // load the estimated coverage (probably from other programs) from ini file, if it is zero, then estimate it
+ int reso = 40; // resolution of masks, repeat annotation, coverage, etc = 40 basepairs
+ bool use_qv_mask = reader.GetBoolean("filter", "use_qv", true);
+ bool use_coverage_mask = reader.GetBoolean("filter", "coverage", true);
+ int COVERAGE_FRACTION = (int) reader.GetInteger("filter", "coverage_frac_repeat_annotation", 3);
+ const int MIN_REPEAT_ANNOTATION_THRESHOLD = (int) reader.GetInteger("filter", "min_repeat_annotation_threshold", 10);
+ const int MAX_REPEAT_ANNOTATION_THRESHOLD = (int) reader.GetInteger("filter", "max_repeat_annotation_threshold", 20);
+ const int REPEAT_ANNOTATION_GAP_THRESHOLD = (int) reader.GetInteger("filter", "repeat_annotation_gap_threshold",300);
+ const int NO_HINGE_REGION = (int) reader.GetInteger("filter", "no_hinge_region",500);
+ //How far two hinges of the same type can be
+ const int HINGE_MIN_SUPPORT = (int) reader.GetInteger("filter", "hinge_min_support", 7);
+ //Minimum number of reads that have to start in a reso length interval to be considered in hinge calling
+ const int HINGE_BIN_PILEUP_THRESHOLD = (int) reader.GetInteger("filter", "hinge_min_pileup", 7);
+ //Minimum number of reads to have in a pileup to consider a hinge bridged
+ const int HINGE_READ_UNBRIDGED_THRESHOLD = (int) reader.GetInteger("filter", "hinge_unbridged", 6);
+ //Number of reads that one has to see before a pileup to declare a potential hinge unbridged
+ int HINGE_BIN_LENGTH = (int) reader.GetInteger("filter", "hinge_bin", 100);
+ //Physical length of the bins considered
+ const int HINGE_TOLERANCE_LENGTH = (int) reader.GetInteger("filter", "hinge_tolerance_length", 100);
+ //Reads starting at +/- HINGE_TOLERANCE_LENGTH are considered reads starting at hinges
+ HINGE_BIN_LENGTH=2*HINGE_TOLERANCE_LENGTH;
+
+ console->info("use_qv_mask set to {}",use_qv_mask);
+ use_qv_mask = use_qv_mask and has_qv;
+
+ console->info("use_qv_mask set to {}",use_qv_mask);
+
+ omp_set_num_threads(N_PROC);
+ console->info("number processes set to {}", N_PROC);
+
+ console->info("LENGTH_THRESHOLD = {}",LENGTH_THRESHOLD);
+ console->info("QUALITY_THRESHOLD = {}",QUALITY_THRESHOLD);
+ console->info("N_ITER = {}",N_ITER);
+ console->info("ALN_THRESHOLD = {}",ALN_THRESHOLD);
+ console->info("MIN_COV = {}",MIN_COV);
+ console->info("CUT_OFF = {}",CUT_OFF);
+ console->info("THETA = {}",THETA);
+ console->info("EST_COV = {}",EST_COV);
+ console->info("reso = {}",reso);
+ console->info("use_coverage_mask = {}",use_coverage_mask);
+ console->info("COVERAGE_FRACTION = {}",COVERAGE_FRACTION);
+ console->info("MIN_REPEAT_ANNOTATION_THRESHOLD = {}",MIN_REPEAT_ANNOTATION_THRESHOLD);
+ console->info("MAX_REPEAT_ANNOTATION_THRESHOLD = {}",MAX_REPEAT_ANNOTATION_THRESHOLD);
+ console->info("REPEAT_ANNOTATION_GAP_THRESHOLD = {}",REPEAT_ANNOTATION_GAP_THRESHOLD);
+ console->info("NO_HINGE_REGION = {}",NO_HINGE_REGION);
+ console->info("HINGE_MIN_SUPPORT = {}",HINGE_MIN_SUPPORT);
+ console->info("HINGE_BIN_PILEUP_THRESHOLD = {}",HINGE_BIN_PILEUP_THRESHOLD);
+ console->info("HINGE_READ_UNBRIDGED_THRESHOLD = {}",HINGE_READ_UNBRIDGED_THRESHOLD);
+ console->info("HINGE_BIN_LENGTH = {}",HINGE_BIN_LENGTH);
+ console->info("HINGE_TOLERANCE_LENGTH = {}",HINGE_TOLERANCE_LENGTH);
+
+
+
+
+ std::vector<LOverlap *> aln;//Vector of pointers to all alignments
+ std::vector< std::vector<std::pair<int, int> > > coverages(n_read);
+ std::vector< std::vector<std::pair<int, int> > > cutoff_coverages(n_read);
+ std::vector< std::vector<std::pair<int, int> > > cgs(n_read); //coverage gradient;
+ std::vector<std::pair<int, int>> maskvec;
+ std::vector<std::vector<std::pair<int, int> > > repeat_annotation;
+ std::unordered_map<int, std::vector<std::pair<int, int>> > hinges;
+
+
+ std::ofstream cov(out + ".coverage.txt");
+ std::ofstream homo(out + ".homologous.txt");
+ std::ofstream rep(out + ".repeat.txt");
+ std::ofstream filtered(out + ".filtered.fasta");
+ std::ofstream hg(out + ".hinges.txt");
+ std::ofstream mask(out + ".mas");
+
+ for (int part = 0; part < name_las_list.size(); part++) {
+
+
+ console->info("name of las: {}", name_las_list[part]);
+
+
+ if (strlen(name_las_list[part].c_str()) > 0)
+ la.openAlignmentFile(name_las_list[part]);
+
+ int64 n_aln = 0;
+
+ if (strlen(name_las) > 0) {
+ n_aln = la.getAlignmentNumber();
+ console->info("Load alignments from {}", name_las_list[part]);
+ console->info("# Alignments: {}", n_aln);
+ }
+
+
+ if (strlen(name_las) > 0) {
+ la.resetAlignment();
+ la.getOverlap(aln, 0, n_aln);
+ }
+
+ if (strlen(name_paf) > 0) {
+ n_aln = la.loadPAF(std::string(name_paf), aln);
+ console->info("Load alignments from {}", name_paf);
+ console->info("# Alignments: {}", n_aln);
+ }
+
+ if (n_aln == 0) {
+ console->error("No alignments!");
+ return 1;
+ }
+
+ console->info("Input data finished, part {}/{}", part + 1, name_las_list.size());
+
+
+
+ int r_begin = aln.front()->read_A_id_;
+ int r_end = aln.back()->read_A_id_;
+
+
+ std::vector<std::vector <LOverlap * > > idx_pileup; // this is the pileup
+ std::vector<std::vector <LOverlap * > > idx_pileup_dedup; // this is the deduplicated pileup
+ std::vector<std::unordered_map<int, std::vector<LOverlap *> > > idx_ab; //unordered_map from (aid, bid) to alignments in a vector
+
+
+
+ for (int i = 0; i< n_read; i++) {
+ idx_pileup.push_back(std::vector<LOverlap *>());
+ idx_pileup_dedup.push_back(std::vector<LOverlap *>());
+ idx_ab.push_back(std::unordered_map<int, std::vector<LOverlap *>> ());
+ repeat_annotation.push_back(std::vector<std::pair<int, int> >());
+ maskvec.push_back(std::pair<int, int>());
+ }
+
+ for (int i = 0; i < aln.size(); i++) {
+ if (aln[i]->active) {
+ idx_pileup[aln[i]->read_A_id_].push_back(aln[i]);
+ }
+ }
+
+
+
+
+# pragma omp parallel for
+ for (int i = 0; i < n_read; i++) {// sort overlaps of a reads
+ std::sort(idx_pileup[i].begin(), idx_pileup[i].end(), compare_overlap);
+ }
+
+# pragma omp parallel for
+ for (int i = 0; i < aln.size(); i++) {
+ idx_ab[aln[i]->read_A_id_][aln[i]->read_B_id_] = std::vector<LOverlap *>();
+ }
+
+# pragma omp parallel for
+ for (int i = 0; i < aln.size(); i++) {
+ idx_ab[aln[i]->read_A_id_][aln[i]->read_B_id_].push_back(aln[i]);
+ }
+
+
+
+# pragma omp parallel for
+ for (int i = 0; i < n_read; i++) {
+ for (std::unordered_map<int, std::vector<LOverlap *> >::iterator it = idx_ab[i].begin(); it!= idx_ab[i].end(); it++) {
+ std::sort(it->second.begin(), it->second.end(), compare_overlap);
+ if (it->second.size() > 0)
+ idx_pileup_dedup[i].push_back(it->second[0]);
+ }
+ }
+
+ console->info("profile coverage (with and without CUT_OFF)");
+
+ //std::vector< std::vector<std::pair<int, int> > > his;
+ for (int i = r_begin; i <= r_end; i ++) {
+ std::vector<std::pair<int, int> > coverage;
+
+ std::vector<std::pair<int, int> > cutoff_coverage;
+
+
+ //TODO : Implement set based gradient
+ std::vector<std::pair<int, int> > cg;
+ //profileCoverage: get the coverage based on pile-o-gram
+ la.profileCoverage(idx_pileup[i], cutoff_coverage, reso, CUT_OFF);
+ la.profileCoverage(idx_pileup[i], coverage, reso, 0);
+ cov << "read " << i <<" ";
+ for (int j = 0; j < coverage.size(); j++)
+ cov << coverage[j].first << "," << coverage[j].second << " ";
+ cov << std::endl;
+
+ //Computes coverage gradients.
+ if (coverage.size() >= 2)
+ for (int j = 0; j < coverage.size() - 1; j++) {
+ cg.push_back(std::pair<int,int>(coverage[j].first, coverage[j+1].second - coverage[j].second));
+ }
+ else cg.push_back(std::pair<int, int> (0,0));
+
+ coverages[i] = (coverage);
+ cutoff_coverages[i] = (cutoff_coverage);
+ cgs[i] = (cg);
+ }
+
+ console->info("profile coverage done part {}/{}", part + 1, name_las_list.size());
+
+
+ std::set<int> rand_reads;
+ srand(time(NULL));
+ rand_reads.insert(0);
+ while (rand_reads.size() < (r_end - r_begin)/500){
+ int rd_id=rand()%(r_end - r_begin) + r_begin;
+ if (reads[rd_id]->len > 5000)
+ rand_reads.insert(rd_id);
+ }
+
+ int num_slot = 0;
+ long int total_cov = 0;
+
+ std::vector<int> read_coverage;
+ long int read_cov=0;
+ int read_slot =0;
+ //Finding the average coverage, probing a small proportion of reads
+
+// for (std::set<int>::iterator it=rand_reads.begin();it!=rand_reads.end(); ++it) {
+ for (int i =r_begin; i <= r_end; i++){
+ if (reads[i]->len < 5000)
+ continue;
+ read_cov=0;
+ read_slot=0;
+ for (int j = 0; j < coverages[i].size(); j++) {
+ //printf("%d\n", coverages[i][j].second);
+ read_cov+=coverages[i][j].second;
+ read_slot++;
+ }
+ total_cov += read_cov;
+ num_slot += read_slot;
+ int mean_read_cov=read_cov / std::max(1,read_slot);
+ read_coverage.push_back(mean_read_cov);
+ }
+
+
+
+ size_t median_id = read_coverage.size() / 2;
+ if (median_id > 0)
+ std::nth_element(read_coverage.begin(), read_coverage.begin()+median_id, read_coverage.end());
+
+ int cov_est= read_coverage[median_id];
+
+ int mean_cov_est = total_cov / num_slot;
+
+
+ //get estimated coverage
+
+ if (EST_COV != 0) cov_est = EST_COV;
+ console->info("Estimated mean coverage: {}", mean_cov_est); //if the coverage is specified by ini file, cover the estimated one
+ console->info("Estimated median coverage: {}", cov_est);
+
+
+ // mask vector, same format as mask_QV
+ if (MIN_COV < cov_est/3)
+ MIN_COV = cov_est/3;
+
+ if (reads_to_keep.size()>0) {
+ reads_to_keep_initial = reads_to_keep;
+ for (std::set<int>::iterator iter = reads_to_keep_initial.begin();
+ iter != reads_to_keep_initial.end(); ++iter) {
+ int i = *iter;
+ for (std::unordered_map<int, std::vector<LOverlap *> >::iterator it = idx_ab[i].begin();
+ it != idx_ab[i].end(); it++) {
+ if (it->second.size() > 0) {
+ LOverlap *ovl = it->second[0];
+ reads_to_keep.insert(ovl->read_B_id_);
+ }
+ }
+ }
+ console->info("After accounting for neighbours of reads selected, have {} reads", reads_to_keep.size());
+ }
+
+ for (int i = r_begin; i <= r_end; i++) {
+ for (int j = 0; j < cutoff_coverages[i].size(); j++) {
+ cutoff_coverages[i][j].second -= MIN_COV;
+ if (cutoff_coverages[i][j].second < 0) cutoff_coverages[i][j].second = 0;
+ }
+
+ //get the longest consecutive region that has decent coverage, decent coverage = estimated coverage / 3
+ int start = 0;
+ int end = start;
+ int maxlen = 0, maxstart = 0, maxend = 0;
+ for (int j = 0; j < cutoff_coverages[i].size(); j++) {
+ if (cutoff_coverages[i][j].second > 0) {
+ end = cutoff_coverages[i][j].first;
+ } else {
+ if (end > start) {
+ //std::cout<<"read" << i << " "<<start+reso << "->" << end << std::endl;
+ if (end - start - reso > maxlen) {
+ maxlen = end - start - reso;
+ maxstart = start + reso;
+ maxend = end;
+ }
+ }
+ start = cutoff_coverages[i][j].first;
+ end = start;
+ }
+ }
+ //std::cout << i << " " << maxstart << " " << maxend << std::endl;
+ //int s = std::max(maxstart, QV_mask[i].first);
+ //int l = std::min(maxend, QV_mask[i].second) - std::max(maxstart, QV_mask[i].first);
+ //if (l < 0) l = 0;
+ //filtered << ">read_" << i << std::endl;
+ //filtered << reads[i]->bases.substr(s,l) << std::endl;
+
+ if (reads_to_keep.size()>0) {
+ if (reads_to_keep.find(i) == reads_to_keep.end()){
+// std::cout<<"setting masks equal";
+ maxend=maxstart;
+ QV_mask[i].second=QV_mask[i].first;
+ }
+ }
+ if ((use_qv_mask) and (use_coverage_mask)) {
+ maskvec[i] = (
+ std::pair<int, int>(std::max(maxstart, QV_mask[i].first), std::min(maxend, QV_mask[i].second)));
+ //get the interestion of two masks
+ mask << i << " " << std::max(maxstart, QV_mask[i].first) << " " << std::min(maxend, QV_mask[i].second) << std::endl;
+ } else if ((use_coverage_mask) and (not use_qv_mask)) {
+ maskvec[i] = (std::pair<int, int>(maxstart, maxend));
+ mask << i << " " << maxstart << " " << maxend << std::endl;
+ } else {
+ maskvec[i] = (std::pair<int, int>(QV_mask[i].first, QV_mask[i].second));
+ mask << i << " " << QV_mask[i].first << " " << QV_mask[i].second << std::endl;
+ }
+ }
+
+ /*FILE* temp_out1;
+ FILE* temp_out2;
+ temp_out1=fopen("coverage.debug.txt","w");
+ temp_out2=fopen("coverage_gradient.debug.txt","w");
+
+ for (int i=0; i< n_read ; i++) {
+ fprintf(temp_out1,"%d \t", i);
+ for (int j=0; j < coverages[i].size(); j++){
+ fprintf(temp_out1,"%d:%d \t", coverages[i][j].first,coverages[i][j].second);
+ }
+ fprintf(temp_out1,"\n");
+ }
+
+ for (int i=0; i< n_read ; i++) {
+ fprintf(temp_out2,"%d \t", i);
+ for (int j=0; j < cgs[i].size(); j++){
+ fprintf(temp_out2,"%d:%d \t", cgs[i][j].first,cgs[i][j].second);
+ }
+ fprintf(temp_out2,"\n");
+ }
+ fclose(temp_out1);
+ fclose(temp_out2);*/
+
+ /*for (int i = 0; i < maskvec.size(); i++) {
+ printf("read %d %d %d\n", i, maskvec[i].first, maskvec[i].second);
+ printf("QV: read %d %d %d\n", i, QV_mask[i].first, QV_mask[i].second);
+ }*/
+
+
+ //binarize coverage gradient;
+
+
+
+
+
+
+ //detect repeats based on coverage gradient, mark it has rising (1) or falling (-1)
+ for (int i = r_begin; i <= r_end; i++) {
+ std::vector<std::pair<int, int> > anno;
+ for (int j = 0; j < cgs[i].size()-1; j++) { // changed, remove the last one
+ //std::cout<< i << " " << cgs[i][j].first << " " << cgs[i][j].second << std::endl;
+
+ if ((cgs[i][j].first >= maskvec[i].first + NO_HINGE_REGION) and (cgs[i][j].first <= maskvec[i].second - NO_HINGE_REGION)) {
+ if (cgs[i][j].second > std::min(
+ std::max((coverages[i][j].second+MIN_COV)/COVERAGE_FRACTION, MIN_REPEAT_ANNOTATION_THRESHOLD),
+ MAX_REPEAT_ANNOTATION_THRESHOLD))
+ anno.push_back(std::pair<int, int>(cgs[i][j].first, 1));
+ else if (cgs[i][j].second < - std::min(
+ std::max((coverages[i][j].second+MIN_COV)/COVERAGE_FRACTION, MIN_REPEAT_ANNOTATION_THRESHOLD),
+ MAX_REPEAT_ANNOTATION_THRESHOLD))
+ anno.push_back(std::pair<int, int>(cgs[i][j].first, -1));
+ }
+ }
+ repeat_annotation[i] = (anno);
+ }
+
+
+ // clean it a bit, merge consecutive 1, or consecutive -1, or adjacent 1 and -1 if their position is within gap_threshold (could be bursty error)
+ for (int i = r_begin; i <= r_end; i++) {
+ for (std::vector<std::pair<int, int> >::iterator iter = repeat_annotation[i].begin(); iter < repeat_annotation[i].end(); ) {
+ if (iter+1 < repeat_annotation[i].end()){
+ if (((iter->second == 1) and ((iter + 1)->second == 1)) and
+ ((iter+1)->first - iter->first < REPEAT_ANNOTATION_GAP_THRESHOLD)) {
+ repeat_annotation[i].erase((iter + 1));
+ } else if (((iter->second == -1) and ((iter + 1)->second == -1)) and
+ ((iter+1)->first - iter->first < REPEAT_ANNOTATION_GAP_THRESHOLD)) {
+ iter = repeat_annotation[i].erase(iter);
+ } else iter++;
+ } else iter ++;
+ }
+ }
+
+
+
+
+ //remove gaps
+// for (int i = 0; i < n_read; i++) {
+// for (std::vector<std::pair<int, int> >::iterator iter = repeat_annotation[i].begin(); iter < repeat_annotation[i].end(); ) {
+// if (iter+1 < repeat_annotation[i].end()){
+// if ((iter->second == -1) and ((iter+1)->second == 1) and
+// ((iter+1)->first - iter->first < REPEAT_ANNOTATION_GAP_THRESHOLD)){
+// iter = repeat_annotation[i].erase(iter);
+// iter = repeat_annotation[i].erase(iter); // fill gaps
+// } else if ((iter->second == 1) and ((iter+1)->second == -1) and
+// ((iter+1)->first - iter->first < REPEAT_ANNOTATION_GAP_THRESHOLD)) {
+// iter = repeat_annotation[i].erase(iter);
+// iter = repeat_annotation[i].erase(iter);
+// } else iter++;
+// } else iter ++;
+// }
+// }
+
+
+ /*temp_out1=fopen("repeat_annotation.debug.txt","w");
+ for (int i = 0; i < n_read; i++) {
+ fprintf(temp_out1,"%d \t%d\t",i,repeat_annotation[i].size());
+ for (std::vector<std::pair<int, int> >::iterator iter = repeat_annotation[i].begin(); iter < repeat_annotation[i].end();iter++) {
+ fprintf(temp_out1,"%d:%d\t",iter->first,iter->second);
+ }
+ fprintf(temp_out1,"\n");
+ }
+ fclose(temp_out1);*/
+ // need a better hinge detection
+
+ // get hinges from repeat annotation information
+
+ // n_read pos -1 = in_hinge 1 = out_hinge
+ std::ofstream debug_file("debug.txt");
+ for (int i = r_begin; i <= r_end; i++) {
+ //std::cout << i <<std::endl;
+ hinges[i] = std::vector<std::pair<int, int>>();
+
+ int coverage_at_start(0);
+ int num_at_start(0);
+ int num_at_end(0);
+ int coverage_at_end(0);
+ float avg_coverage_at_start;
+ float avg_coverage_at_end;
+ for (int j = 0; j < coverages[i].size(); j++){
+ if ((coverages[i][j].first <= maskvec[i].first + NO_HINGE_REGION) and
+ (coverages[i][j].first >= maskvec[i].first )){
+ coverage_at_start += coverages[i][j].second;
+ num_at_start++;
+ }
+ if ((coverages[i][j].first <= maskvec[i].second ) and
+ (coverages[i][j].first >= maskvec[i].second - NO_HINGE_REGION )){
+ coverage_at_end += coverages[i][j].second;
+ num_at_end++;
+ }
+ }
+
+ avg_coverage_at_end = (float)coverage_at_end/num_at_end;
+ avg_coverage_at_start = (float)coverage_at_start/num_at_start;
+ if (std::abs(avg_coverage_at_end-avg_coverage_at_start) < 10){
+ continue;
+ }
+
+ for (int j = 0; j < repeat_annotation[i].size(); j++) {
+ if (repeat_annotation[i][j].second == -1) { // look for out hinges, negative gradient
+
+ bool bridged = true;
+ int support = 0;
+ int num_reads_at_end=1;
+
+ std::vector<std::pair<int,int> > read_other_ends;
+
+
+ for (int k = 0; k < idx_pileup[i].size(); k++) {
+
+ int left_overhang, right_overhang;
+ int temp_id;
+ temp_id=idx_pileup[i][k]->read_B_id_;
+
+ if (idx_pileup[i][k]->reverse_complement_match_==0){
+ right_overhang= std::max(maskvec[temp_id].second-idx_pileup[i][k]->read_B_match_end_,0);
+ left_overhang= std::max(idx_pileup[i][k]->read_B_match_start_- maskvec[temp_id].first,0);
+ }
+ else if (idx_pileup[i][k]->reverse_complement_match_==1) {
+ right_overhang= std::max(idx_pileup[i][k]->read_B_match_start_- maskvec[temp_id].first,0);
+ left_overhang= std::max(maskvec[temp_id].second-idx_pileup[i][k]->read_B_match_end_,0);
+ }
+
+
+
+ if (right_overhang > THETA) {
+ if ((idx_pileup[i][k]->read_A_match_end_ >
+ repeat_annotation[i][j].first - HINGE_TOLERANCE_LENGTH)
+ and (idx_pileup[i][k]->read_A_match_end_ <
+ repeat_annotation[i][j].first + HINGE_TOLERANCE_LENGTH)) {
+
+
+ std::pair <int,int> other_end;
+ other_end.first=idx_pileup[i][k]->read_A_match_start_;
+ other_end.second=left_overhang;
+ read_other_ends.push_back(other_end);
+ support++;
+ }
+ }
+ }
+
+ if (support < HINGE_MIN_SUPPORT){
+ continue;
+ }
+
+ std::sort(read_other_ends.begin(),read_other_ends.end(), pairAscend);
+
+ int num_reads_considered=0;
+ int num_reads_extending_to_end=0;
+ int num_reads_with_internal_overlaps=0;
+
+ for (int id = 0; id < read_other_ends.size() ; ++id) {
+ if (read_other_ends[id].first -maskvec[i].first < HINGE_BIN_LENGTH){
+ num_reads_considered++;
+ num_reads_extending_to_end++;
+
+ if ((num_reads_extending_to_end > HINGE_READ_UNBRIDGED_THRESHOLD) or
+ ((num_reads_considered > HINGE_READ_UNBRIDGED_THRESHOLD) and
+ (read_other_ends[id].first - read_other_ends[0].first > HINGE_BIN_LENGTH))) {
+ bridged=false;
+ break;
+ }
+ }
+ else if (read_other_ends[id].second < THETA){
+ num_reads_considered++;
+
+ if ((num_reads_extending_to_end > HINGE_READ_UNBRIDGED_THRESHOLD) or
+ ((num_reads_considered > HINGE_READ_UNBRIDGED_THRESHOLD) and
+ (read_other_ends[id].first - read_other_ends[0].first > HINGE_BIN_LENGTH))) {
+ bridged=false;
+ break;
+ }
+ }
+ else if (read_other_ends[id].second > THETA) {
+ num_reads_with_internal_overlaps++;
+ num_reads_considered++;
+ int id1=id+1;
+ int pileup_length=1;
+
+ while (id1 < read_other_ends.size()){
+ if (read_other_ends[id1].first - read_other_ends[id].first < HINGE_BIN_LENGTH){
+ pileup_length++;
+ id1++;
+ }
+ else{
+ break;
+ }
+ }
+
+ if (pileup_length > HINGE_BIN_PILEUP_THRESHOLD){
+ bridged=true;
+ break;
+ }
+ }
+ }
+ if ((not bridged) and (support > HINGE_MIN_SUPPORT))
+ hinges[i].push_back(std::pair<int, int>(repeat_annotation[i][j].first,-1));
+
+
+ } else { // look for in_hinges, positive gradient
+ bool bridged = true;
+ int support = 0;
+ int num_reads_at_end=1;
+
+ std::vector<std::pair<int,int> > read_other_ends;
+
+
+ for (int k = 0; k < idx_pileup[i].size(); k++) {
+ int left_overhang, right_overhang;
+ int temp_id;
+ temp_id=idx_pileup[i][k]->read_B_id_;
+
+ if (idx_pileup[i][k]->reverse_complement_match_==0){
+ right_overhang= std::max(maskvec[temp_id].second-idx_pileup[i][k]->read_B_match_end_,0);
+ left_overhang= std::max(idx_pileup[i][k]->read_B_match_start_- maskvec[temp_id].first,0);
+ }
+ else if (idx_pileup[i][k]->reverse_complement_match_==1) {
+ right_overhang= std::max(idx_pileup[i][k]->read_B_match_start_- maskvec[temp_id].first,0);
+ left_overhang= std::max(maskvec[temp_id].second-idx_pileup[i][k]->read_B_match_end_,0);
+ }
+
+
+ if (left_overhang > THETA) {
+ if ((idx_pileup[i][k]->read_A_match_start_ >
+ repeat_annotation[i][j].first - HINGE_TOLERANCE_LENGTH)
+ and (idx_pileup[i][k]->read_A_match_start_ <
+ repeat_annotation[i][j].first + HINGE_TOLERANCE_LENGTH)) {
+
+ std::pair <int,int> other_end;
+ other_end.first=idx_pileup[i][k]->read_A_match_end_;
+ other_end.second=right_overhang;
+ read_other_ends.push_back(other_end);
+ support++;
+ }
+ }
+ }
+ if (support < HINGE_MIN_SUPPORT){
+ continue;
+ }
+
+
+ std::sort(read_other_ends.begin(),read_other_ends.end(),pairDescend);//Sort in descending order
+
+
+ int num_reads_considered=0;
+ int num_reads_extending_to_end=0;
+ int num_reads_with_internal_overlaps=0;
+
+
+
+ for (int id = 0; id < read_other_ends.size() ; ++id) {
+ if (maskvec[i].second-read_other_ends[id].first < HINGE_BIN_LENGTH){
+ num_reads_considered++;
+ num_reads_extending_to_end++;
+
+ if ((num_reads_extending_to_end > HINGE_READ_UNBRIDGED_THRESHOLD) or
+ ((num_reads_considered > HINGE_READ_UNBRIDGED_THRESHOLD) and
+ (read_other_ends[0].first - read_other_ends[id].first > HINGE_BIN_LENGTH))) {
+ bridged=false;
+ break;
+ }
+ }
+ else if (read_other_ends[id].second < THETA){
+ num_reads_considered++;
+
+ if ((num_reads_extending_to_end > HINGE_READ_UNBRIDGED_THRESHOLD) or
+ ((num_reads_considered > HINGE_READ_UNBRIDGED_THRESHOLD) and
+ (read_other_ends[0].first - read_other_ends[id].first > HINGE_BIN_LENGTH))) {
+ bridged=false;
+ break;
+ }
+ }
+ else if (read_other_ends[id].second > THETA) {
+ num_reads_with_internal_overlaps++;
+ num_reads_considered++;
+ int id1=id+1;
+ int pileup_length=1;
+
+ while (id1 < read_other_ends.size()){
+ if (read_other_ends[id].first - read_other_ends[id1].first < HINGE_BIN_LENGTH){
+ pileup_length++;
+ id1++;
+ }
+ else{
+ break;
+ }
+ }
+
+ if (pileup_length > HINGE_BIN_PILEUP_THRESHOLD){
+ bridged=true;
+ break;
+ }
+ }
+ }
+
+ if ((not bridged) and (support > HINGE_MIN_SUPPORT))
+ hinges[i].push_back(std::pair<int, int>(repeat_annotation[i][j].first, 1));
+
+
+ }
+ }
+ }
+
+ //output hinges
+
+ int ra_cnt = 0;
+
+ for (int i = r_begin; i <= r_end; i++) {
+ rep << i << " ";
+ for (int j = 0; j < repeat_annotation[i].size(); j++) {
+ rep << repeat_annotation[i][j].first << " " << repeat_annotation[i][j].second << " ";
+ }
+ ra_cnt += repeat_annotation[i].size();
+ rep << std::endl;
+ }
+ rep.close();
+ console->info("Number of hinges before filtering: {}", ra_cnt);
+
+ int hg_cnt = 0;
+
+ for (int i = r_begin; i < r_end; i++) {
+ hg << i << " ";
+ for (int j = 0; j < hinges[i].size(); j++) {
+ hg << hinges[i][j].first << " " << hinges[i][j].second << " ";
+ }
+ hg_cnt += hinges[i].size();
+ hg << std::endl;
+ }
+
+
+ console->info("Number of hinges: {}", hg_cnt);
+
+
+
+ for (int i = 0; i < aln.size(); i++) {
+ free(aln[i]);
+ }
+ aln.clear();
+ }
+
+
+
+ hg.close();
+
+
+ if (strlen(name_db)>0)
+ la.closeDB(); //close database
+ return 0;
+
+
+
+
+}
diff --git a/src/include/DB.h b/src/include/DB.h
new file mode 100755
index 0000000..92244c7
--- /dev/null
+++ b/src/include/DB.h
@@ -0,0 +1,449 @@
+/************************************************************************************\
+* *
+* Copyright (c) 2014, Dr. Eugene W. Myers (EWM). All rights reserved. *
+* *
+* Redistribution and use in source and binary forms, with or without modification, *
+* are permitted provided that the following conditions are met: *
+* *
+* · Redistributions of source code must retain the above copyright notice, this *
+* list of conditions and the following disclaimer. *
+* *
+* · Redistributions in binary form must reproduce the above copyright notice, this *
+* list of conditions and the following disclaimer in the documentation and/or *
+* other materials provided with the distribution. *
+* *
+* · The name of EWM may not be used to endorse or promote products derived from *
+* this software without specific prior written permission. *
+* *
+* THIS SOFTWARE IS PROVIDED BY EWM ”AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, *
+* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND *
+* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL EWM BE LIABLE *
+* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES *
+* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS *
+* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY *
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *
+* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN *
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
+* *
+* For any issues regarding this software and its use, contact EWM at: *
+* *
+* Eugene W. Myers Jr. *
+* Bautzner Str. 122e *
+* 01099 Dresden *
+* GERMANY *
+* Email: gene.myers at gmail.com *
+* *
+\************************************************************************************/
+
+/*******************************************************************************************
+ *
+ * Compressed data base module. Auxiliary routines to open and manipulate a data base for
+ * which the sequence and read information are separated into two separate files, and the
+ * sequence is compressed into 2-bits for each base. Support for tracks of additional
+ * information, and trimming according to the current partition. Eventually will also
+ * support compressed quality information.
+ *
+ * Author : Gene Myers
+ * Date : July 2013
+ * Revised: April 2014
+ *
+ ********************************************************************************************/
+
+#ifndef _HITS_DB
+
+#define _HITS_DB
+
+#include <stdio.h>
+
+#include "QV.h"
+
+#define HIDE_FILES // Auxiliary DB files start with a . so they are "hidden"
+ // Undefine if you don't want this
+
+// For interactive applications where it is inappropriate to simply exit with an error
+// message to standard error, define the constant INTERACTIVE. If set, then error
+// messages are put in the global variable Ebuffer and the caller of a DB routine
+// can decide how to deal with the error.
+//
+// DB, QV, or alignment routines that can encounter errors function as before in
+// non-INTERACTIVE mode by exiting after printing an error message to stderr. In
+// INTERACTIVE mode the routines place a message at EPLACE and return an error
+// value. For such routines that were previously void, they are now int, and
+// return 1 if an error occured, 0 otherwise.
+
+#undef INTERACTIVE
+
+#ifdef INTERACTIVE
+
+#define EPRINTF sprintf
+#define EPLACE Ebuffer
+#define EXIT(x) return (x)
+
+#else // BATCH
+
+#define EPRINTF fprintf
+#define EPLACE stderr
+#define EXIT(x) exit (1)
+
+#endif
+
+typedef unsigned char uint8;
+typedef unsigned short uint16;
+typedef unsigned int uint32;
+typedef unsigned long long uint64;
+typedef signed char int8;
+typedef signed short int16;
+typedef signed int int32;
+typedef signed long long int64;
+typedef float float32;
+typedef double float64;
+
+
+/*******************************************************************************************
+ *
+ * COMMAND LINE INTERPRETATION MACROS
+ *
+ ********************************************************************************************/
+
+extern char *Prog_Name; // Name of program
+
+#ifdef INTERACTIVE
+
+extern char Ebuffer[];
+
+#endif
+
+#define SYSTEM_ERROR \
+ { EPRINTF(EPLACE,"%s: System error, read failed!\n",Prog_Name); \
+ exit (2); \
+ }
+
+#define ARG_INIT(name) \
+ Prog_Name = Strdup(name,""); \
+ for (i = 0; i < 128; i++) \
+ flags[i] = 0;
+
+#define ARG_FLAGS(set) \
+ for (k = 1; argv[i][k] != '\0'; k++) \
+ { if (index(set,argv[i][k]) == NULL) \
+ { fprintf(stderr,"%s: -%c is an illegal option\n",Prog_Name,argv[i][k]); \
+ exit (1); \
+ } \
+ flags[(int) argv[i][k]] = 1; \
+ }
+
+#define ARG_POSITIVE(var,name) \
+ var = strtol(argv[i]+2,&eptr,10); \
+ if (*eptr != '\0' || argv[i][2] == '\0') \
+ { fprintf(stderr,"%s: -%c argument is not an integer\n",Prog_Name,argv[i][1]); \
+ exit (1); \
+ } \
+ if (var <= 0) \
+ { fprintf(stderr,"%s: %s must be positive (%d)\n",Prog_Name,name,var); \
+ exit (1); \
+ }
+
+#define ARG_NON_NEGATIVE(var,name) \
+ var = strtol(argv[i]+2,&eptr,10); \
+ if (*eptr != '\0' || argv[i][2] == '\0') \
+ { fprintf(stderr,"%s: -%c argument is not an integer\n",Prog_Name,argv[i][1]); \
+ exit (1); \
+ } \
+ if (var < 0) \
+ { fprintf(stderr,"%s: %s must be non-negative (%d)\n",Prog_Name,name,var); \
+ exit (1); \
+ }
+
+#define ARG_REAL(var) \
+ var = strtod(argv[i]+2,&eptr); \
+ if (*eptr != '\0' || argv[i][2] == '\0') \
+ { fprintf(stderr,"%s: -%c argument is not a real number\n",Prog_Name,argv[i][1]); \
+ exit (1); \
+ }
+
+/*******************************************************************************************
+ *
+ * UTILITIES
+ *
+ ********************************************************************************************/
+
+// The following general utilities return NULL if any of their input pointers are NULL, or if they
+// could not perform their function (in which case they also print an error to stderr).
+
+void *Malloc(int64 size, char *mesg); // Guarded versions of malloc, realloc
+void *Realloc(void *object, int64 size, char *mesg); // and strdup, that output "mesg" to
+char *Strdup(char *string, char *mesg); // stderr if out of memory
+
+FILE *Fopen(char *path, char *mode); // Open file path for "mode"
+char *PathTo(char *path); // Return path portion of file name "path"
+char *Root(char *path, char *suffix); // Return the root name, excluding suffix, of "path"
+
+// Catenate returns concatenation of path.sep.root.suffix in a *temporary* buffer
+// Numbered_Suffix returns concatenation of left.<num>.right in a *temporary* buffer
+
+char *Catenate(char *path, char *sep, char *root, char *suffix);
+char *Numbered_Suffix(char *left, int num, char *right);
+
+
+// DB-related utilities
+
+void Print_Number(int64 num, int width, FILE *out); // Print readable big integer
+int Number_Digits(int64 num); // Return # of digits in printed number
+
+#define COMPRESSED_LEN(len) (((len)+3) >> 2)
+
+void Compress_Read(int len, char *s); // Compress read in-place into 2-bit form
+void Uncompress_Read(int len, char *s); // Uncompress read in-place into numeric form
+void Print_Read(char *s, int width);
+
+void Lower_Read(char *s); // Convert read from numbers to lowercase letters (0-3 to acgt)
+void Upper_Read(char *s); // Convert read from numbers to uppercase letters (0-3 to ACGT)
+void Number_Read(char *s); // Convert read from letters to numbers
+
+
+/*******************************************************************************************
+ *
+ * DB IN-CORE DATA STRUCTURES
+ *
+ ********************************************************************************************/
+
+#define DB_QV 0x03ff // Mask for 3-digit quality value
+#define DB_CSS 0x0400 // This is the second or later of a group of reads from a given insert
+#define DB_BEST 0x0800 // This is the longest read of a given insert (may be the only 1)
+
+typedef struct
+ { int origin; // Well #
+ int rlen; // Length of the sequence (Last pulse = fpulse + rlen)
+ int fpulse; // First pulse
+ int64 boff; // Offset (in bytes) of compressed read in 'bases' file, or offset of
+ // uncompressed bases in memory block
+ int64 coff; // Offset (in bytes) of compressed quiva streams in 'quiva' file
+ int flags; // QV of read + reverse_complement_match_ above
+ } HITS_READ;
+
+// A track can be of 3 types:
+// data == NULL: there are nreads 'anno' records of size 'size'.
+// data != NULL && size == 4: anno is an array of nreads+1 int's and data[anno[i]..anno[i+1])
+// contains the variable length data
+// data != NULL && size == 8: anno is an array of nreads+1 int64's and data[anno[i]..anno[i+1])
+// contains the variable length data
+
+typedef struct _track
+ { struct _track *next; // Link to next track
+ char *name; // Symbolic name of track
+ int size; // Size in bytes of anno records
+ void *anno; // over [0,nreads]: read i annotation: int, int64, or 'size' records
+ void *data; // data[anno[i] .. anno[i+1]-1] is data if data != NULL
+ } HITS_TRACK;
+
+// The information for accessing QV streams is in a HITS_QV record that is a "pseudo-track"
+// named ". at qvs" and is always the first track record in the list (if present). Since normal
+// track names cannot begin with a . (this is enforced), this pseudo-track is never confused
+// with a normal track.
+
+typedef struct
+ { struct _track *next;
+ char *name;
+ int ncodes; // # of coding tables
+ QVcoding *coding; // array [0..ncodes-1] of coding schemes (see QV.h)
+ uint16 *table; // for i in [0,db->nreads-1]: read i should be decompressed with
+ // scheme coding[table[i]]
+ FILE *quiva; // the open file pointer to the .qvs file
+ } HITS_QV;
+
+// The DB record holds all information about the current state of an active DB including an
+// array of HITS_READS, one per read, and a linked list of HITS_TRACKs the first of which
+// is always a HITS_QV pseudo-track (if the QVs have been loaded).
+
+typedef struct
+ { int ureads; // Total number of reads in untrimmed DB
+ int treads; // Total number of reads in trimmed DB
+ int cutoff; // Minimum read length in block (-1 if not yet set)
+ int all; // Consider multiple reads from a given well
+ float freq[4]; // frequency of A, C, G, T, respectively
+
+ // Set with respect to "active" part of DB (all vs block, untrimmed vs trimmed)
+
+ int maxlen; // length of maximum read (initially over all DB)
+ int64 totlen; // total # of bases (initially over all DB)
+
+ int nreads; // # of reads in actively loaded portion of DB
+ int trimmed; // DB has been trimmed by cutoff/all
+ int part; // DB block (if > 0), total DB (if == 0)
+ int ufirst; // Index of first read in block (without trimming)
+ int tfirst; // Index of first read in block (with trimming)
+
+ // In order to avoid forcing users to have to rebuild all thier DBs to accommodate
+ // the addition of fields for the size of the actively loaded trimmed and untrimmed
+ // blocks, an additional read record is allocated in "reads" when a DB is loaded into
+ // memory (reads[-1]) and the two desired fields are crammed into the first two
+ // integer spaces of the record.
+
+ char *path; // Root name of DB for .bps, .qvs, and tracks
+ int loaded; // Are reads loaded in memory?
+ void *bases; // file pointer for bases file (to fetch reads from),
+ // or memory pointer to uncompressed block of all sequences.
+ HITS_READ *reads; // Array [-1..nreads] of HITS_READ
+ HITS_TRACK *tracks; // Linked list of loaded tracks
+ } HITS_DB;
+
+
+/*******************************************************************************************
+ *
+ * DB STUB FILE FORMAT = NFILE FDATA^nfile NBLOCK PARAMS BDATA^nblock
+ *
+ ********************************************************************************************/
+
+#define MAX_NAME 10000 // Longest file name or fasta header line
+
+#define DB_NFILE "files = %9d\n" // number of files
+#define DB_FDATA " %9d %s %s\n" // last read index + 1, fasta prolog, file name
+#define DB_NBLOCK "blocks = %9d\n" // number of blocks
+#define DB_PARAMS "size = %9lld cutoff = %9d all = %1d\n" // block size, len cutoff, all in well
+#define DB_BDATA " %9d %9d\n" // First read index (untrimmed), first read index (trimmed)
+
+
+/*******************************************************************************************
+ *
+ * DB ROUTINES
+ *
+ ********************************************************************************************/
+
+ // Suppose DB is the name of an original database. Then there will be files .DB.idx, .DB.bps,
+ // .DB.qvs, and files .DB.<track>.anno and DB.<track>.data where <track> is a track name
+ // (not containing a . !).
+
+ // A DAM is basically a DB except that:
+ // 1. there are no QV's, instead .coff points the '\0' terminated fasta header of the read
+ // in the file .<dam>.hdr file
+ // 2. .origin contains the contig # of the read within a fasta entry (assembly sequences
+ // contain N-separated contigs), and .fpulse the first base of the contig in the
+ // fasta entry
+
+ // Open the given database or dam, "path" into the supplied HITS_DB record "db". If the name has
+ // a part # in it then just the part is opened. The index array is allocated (for all or
+ // just the part) and read in.
+ // Return status of routine:
+ // -1: The DB could not be opened for a reason reported by the routine to EPLACE
+ // 0: Open of DB proceeded without mishap
+ // 1: Open of DAM proceeded without mishap
+
+int Open_DB(char *path, HITS_DB *db);
+
+ // Trim the DB or part thereof and all loaded tracks according to the cutoff and all settings
+ // of the current DB partition. Reallocate smaller memory blocks for the information kept
+ // for the retained reads.
+
+void Trim_DB(HITS_DB *db);
+
+ // Shut down an open 'db' by freeing all associated space, including tracks and QV structures,
+ // and any open file pointers. The record pointed at by db however remains (the user
+ // supplied it and so should free it).
+
+void Close_DB(HITS_DB *db);
+
+ // If QV pseudo track is not already in db's track list, then load it and set it up.
+ // The database must not have been trimmed yet. -1 is returned if a .qvs file is not
+ // present, and 1 is returned if an error (reported to EPLACE) occured and INTERACTIVE
+ // is defined. Otherwise a 0 is returned.
+
+int Load_QVs(HITS_DB *db);
+
+ // Remove the QV pseudo track, all space associated with it, and close the .qvs file.
+
+void Close_QVs(HITS_DB *db);
+
+ // Look up the file and header in the file of the indicated track. Return:
+ // 1: Track is for trimmed DB
+ // 0: Track is for untrimmed DB
+ // -1: Track is not the right size of DB either trimmed or untrimmed
+ // -2: Could not find the track
+ // In addition, if opened (0 or 1 returned), then kind points at an integer indicating
+ // the type of track as follows:
+ // CUSTOM 0 => a custom track
+ // MASK 1 => a mask track
+
+#define CUSTOM_TRACK 0
+#define MASK_TRACK 1
+
+int Check_Track(HITS_DB *db, char *track, int *kind);
+
+ // If track is not already in the db's track list, then allocate all the storage for it,
+ // read it in from the appropriate file, add it to the track list, and return a pointer
+ // to the newly created HITS_TRACK record. If the track does not exist or cannot be
+ // opened for some reason, then NULL is returned if INTERACTIVE is defined. Otherwise
+ // the routine prints an error message to stderr and exits if an error occurs, and returns
+ // with NULL only if the track does not exist.
+
+HITS_TRACK *Load_Track(HITS_DB *db, char *track);
+
+ // If track is on the db's track list, then it is removed and all storage associated with it
+ // is freed.
+
+void Close_Track(HITS_DB *db, char *track);
+
+ // Allocate and return a buffer big enough for the largest read in 'db'.
+ // **NB** free(x-1) if x is the value returned as *prefix* and suffix '\0'(4)-byte
+ // are needed by the alignment algorithms. If cannot allocate memory then return NULL
+ // if INTERACTIVE is defined, or print error to stderr and exit otherwise.
+
+char *New_Read_Buffer(HITS_DB *db);
+
+ // Load into 'read' the i'th read in 'db'. As a lower case ascii string if ascii is 1, an
+ // upper case ascii string if ascii is 2, and a numeric string over 0(A), 1(C), 2(G), and 3(T)
+ // otherwise. A '\0' (or 4) is prepended and appended to the string so it has a delimeter
+ // for traversals in either direction. A non-zero value is returned if an error occured
+ // and INTERACTIVE is defined.
+
+int Load_Read(HITS_DB *db, int i, char *read, int ascii);
+
+ // Load into 'read' the subread [beg,end] of the i'th read in 'db' and return a pointer to the
+ // the start of the subinterval (not necessarily = to read !!! ). As a lower case ascii
+ // string if ascii is 1, an upper case ascii string if ascii is 2, and a numeric string
+ // over 0(A), 1(C), 2(G), and 3(T) otherwise. A '\0' (or 4) is prepended and appended to
+ // the string holding the substring so it has a delimeter for traversals in either direction.
+ // A NULL pointer is returned if an error occured and INTERACTIVE is defined.
+
+char *Load_Subread(HITS_DB *db, int i, int beg, int end, char *read, int ascii);
+
+ // Allocate a set of 5 vectors large enough to hold the longest QV stream that will occur
+ // in the database. If cannot allocate memory then return NULL if INTERACTIVE is defined,
+ // or print error to stderr and exit otherwise.
+
+#define DEL_QV 0 // The deletion QVs are x[DEL_QV] if x is the buffer returned by New_QV_Buffer
+#define DEL_TAG 1 // The deleted characters
+#define INS_QV 2 // The insertion QVs
+#define SUB_QV 3 // The substitution QVs
+#define MRG_QV 4 // The merge QVs
+
+char **New_QV_Buffer(HITS_DB *db);
+
+ // Load into 'entry' the 5 QV vectors for i'th read in 'db'. The deletion tag or characters
+ // are converted to a numeric or upper/lower case ascii string as per ascii. Return with
+ // a zero, except when an error occurs and INTERACTIVE is defined in which case return wtih 1.
+
+int Load_QVentry(HITS_DB *db, int i, char **entry, int ascii);
+
+ // Allocate a block big enough for all the uncompressed sequences, read them into it,
+ // reset the 'off' in each read record to be its in-memory offset, and set the
+ // bases pointer to point at the block after closing the bases file. If ascii is
+ // 1 then the reads are converted to lowercase ascii, if 2 then uppercase ascii, and
+ // otherwise the reads are left as numeric strings over 0(A), 1(C), 2(G), and 3(T).
+ // Return with a zero, except when an error occurs and INTERACTIVE is defined in which
+ // case return wtih 1.
+
+int Read_All_Sequences(HITS_DB *db, int ascii);
+
+ // For the DB or DAM "path" = "prefix/root.[db|dam]", find all the files for that DB, i.e. all
+ // those of the form "prefix/[.]root.part" and call actor with the complete path to each file
+ // pointed at by path, and the suffix of the path by extension. The . proceeds the root
+ // name if the defined constant HIDE_FILES is set. Always the first call is with the
+ // path "prefix/root.[db|dam]" and extension "db" or "dam". There will always be calls for
+ // "prefix/[.]root.idx" and "prefix/[.]root.bps". All other calls are for *tracks* and
+ // so this routine gives one a way to know all the tracks associated with a given DB.
+ // -1 is returned if the path could not be found, and 1 is returned if an error (reported
+ // to EPLACE) occured and INTERACTIVE is defined. Otherwise a 0 is returned.
+
+int List_DB_Files(char *path, void actor(char *path, char *extension));
+
+#endif // _HITS_DB
diff --git a/src/include/INIReader.h b/src/include/INIReader.h
new file mode 100644
index 0000000..e2dec2d
--- /dev/null
+++ b/src/include/INIReader.h
@@ -0,0 +1,53 @@
+// Read an INI file into easy-to-access name/value pairs.
+
+// inih and INIReader are released under the New BSD license (see LICENSE.txt).
+// Go to the project home page for more info:
+//
+// https://github.com/benhoyt/inih
+
+#ifndef __INIREADER_H__
+#define __INIREADER_H__
+
+#include <map>
+#include <string>
+
+// Read an INI file into easy-to-access name/value pairs. (Note that I've gone
+// for simplicity here rather than speed, but it should be pretty decent.)
+class INIReader
+{
+public:
+ // Construct INIReader and parse given filename. See ini.h for more info
+ // about the parsing.
+ INIReader(std::string filename);
+
+ // Return the result of ini_parse(), i.e., 0 on success, line number of
+ // first error on parse error, or -1 on file open error.
+ int ParseError();
+
+ // Get a string value from INI file, returning default_value if not found.
+ std::string Get(std::string section, std::string name,
+ std::string default_value);
+
+ // Get an integer (long) value from INI file, returning default_value if
+ // not found or not a valid integer (decimal "1234", "-1234", or hex "0x4d2").
+ long GetInteger(std::string section, std::string name, long default_value);
+
+ // Get a real (floating point double) value from INI file, returning
+ // default_value if not found or not a valid floating point value
+ // according to strtod().
+ double GetReal(std::string section, std::string name, double default_value);
+
+ // Get a boolean value from INI file, returning default_value if not found or if
+ // not a valid true/false value. Valid true values are "true", "yes", "on", "1",
+ // and valid false values are "false", "no", "off", "0" (not case sensitive).
+ bool GetBoolean(std::string section, std::string name, bool default_value);
+
+private:
+ int _error;
+ std::map<std::string, std::string> _values;
+ static std::string MakeKey(std::string section, std::string name);
+ static int ValueHandler(void* user, const char* section, const char* name,
+ const char* value);
+};
+
+#endif // __INIREADER_H__
diff --git a/src/include/LAInterface.h b/src/include/LAInterface.h
new file mode 100644
index 0000000..eb87b0c
--- /dev/null
+++ b/src/include/LAInterface.h
@@ -0,0 +1,234 @@
+#ifndef LAINTERFACE
+#define LAINTERFACE
+
+#include <vector>
+#include <iostream>
+#include <string>
+
+extern "C" {
+#include "DB.h"
+#include "align.h"
+}
+typedef std::pair<int,int> Interval;
+
+class Read { // read class
+public:
+ int id; // id, start from 0
+ std::string name; // read name
+ std::string bases; // read bases
+ std::string qv; // qv currently not available
+ std::vector<Interval> intervals;
+ int effective_start,effective_end;
+ int len;
+ Read(int id, int length, std::string name, std::string bases) : id(id), bases(bases), name(name), len(length) { };
+ Read(int id, std::string name, std::string bases) : id(id), bases(bases), name(name) { };
+
+ bool active = true;
+ void showRead();
+};
+
+enum MatchType {
+ FORWARD, BACKWARD, ACOVERB, BCOVERA, UNDEFINED, INTERNAL, NOT_ACTIVE, COVERING,
+ COVERED, MIDDLE, MISMATCH_LEFT, MISMATCH_RIGHT, FORWARD_INTERNAL, BACKWARD_INTERNAL // different type of alignment
+/**
+ * FORWARD: Alignment and extend to the right
+ * BACKWARD: extend to the left
+ * COVERING: read a covering read b
+ * COVERED: read a covered by read b
+ * MISMATCH_LEFT: read a has a chimeric section on the left, and read b align with the rest of read a and extend it to the left
+ * MISMATCH_RIGHT: read a has a chimeric section on the right, read b align with the rest of read a and extend it to the right
+ * UNDEFINED: any other exceptions
+ * FORWARD_INTERNAL : forward on read A internal on B
+ * BACKWARD_INTERNAL : reverse on read A internal on B
+**/
+
+} ;
+
+class LAlignment { // because class Alignment is taken
+
+public:
+ LAlignment() { };
+ //std::string aseq;
+ //std::string bseq;
+ char * aseq;
+ char * bseq;
+
+ bool recovered = false;
+
+ void show() {printf("%d %d %d [%d...%d] x [%d...%d] %d diffs\n", read_A_id_, read_B_id_,flags,abpos,aepos,bbpos,bepos,diffs); };
+ int read_A_id_; // id of read a
+ int read_B_id_; // id of read b
+ int alen; // length of read a
+ int blen; // length of read b
+ int *trace; // trace
+ uint16 *trace_pts;
+ int trace_pts_len;
+ int tlen;
+ int diffs;
+ int abpos, bbpos; // begin position of read a and b
+ int aepos, bepos; // end position of read a and b
+ int flags; // flag = 1 : 'c', flag = 0 : 'n'
+ int tps;
+ MatchType aln_type;
+ bool active = true;
+};
+
+class LOverlap { // LOverlap is a simplified version of LAlignment, no trace
+public:
+ LOverlap() { };
+ void show() {printf("%d %d %d [%d...%d]/%d x [%d...%d]/%d %d diffs, %d type\n", read_A_id_, read_B_id_,
+ reverse_complement_match_,
+ read_A_match_start_, read_A_match_end_, alen, read_B_match_start_, read_B_match_end_, blen, diffs,
+ match_type_); };
+ int read_A_id_, read_B_id_;
+ int alen; // length of read a
+ int blen; // length of read b
+ int tlen;
+ int diffs; //differences
+ int read_A_match_start_, read_B_match_start_; // starting position and ending position of alignment in read a
+ int read_A_match_end_, read_B_match_end_; // starting position and ending position of alignment in read b
+ int eff_read_A_match_start_, eff_read_B_match_start_, eff_read_A_match_end_, eff_read_B_match_end_;
+ int tps;
+ int reverse_complement_match_; //reverse_complement_match_, reverse complement = 1, same direction = 0
+ int eff_read_A_start_, eff_read_A_end_, eff_read_B_start_, eff_read_B_end_;
+ MatchType match_type_ = UNDEFINED;
+ void addtype(int max_overhang); //classify overlaps
+ void AddTypesAsymmetric(int max_overhang, int min_overhang);
+ int GetMatchingPosition(int pos_A);
+ static const int CHI_THRESHOLD = 500; // threshold for chimeric/adaptor at the begining
+ bool active = true;
+ uint16 *trace_pts;
+ int trace_pts_len;
+ void trim_overlap();
+ void TrimOverlapNaive();
+ int eff_start_trace_point_index_, eff_end_trace_point_index_;
+ int weight;
+};
+
+
+class LAInterface {
+public:
+
+ HITS_DB _db1, *db1 = &_db1; // data base 1
+ HITS_DB _db2, *db2 = &_db2; // data base 2
+ Overlap _ovl, *ovl = &_ovl; // overlaps
+ Alignment _aln, *aln = &_aln; // alignments, those are data structures required to read the data base
+
+ char **flist = NULL;
+ int *findx = NULL;
+ int nfiles = 0; // n blocks of the read database
+
+ char ** flist2 = NULL;
+ int *findx2 = NULL;
+ int nfiles2 = 0; // n blocks of read database 2
+
+
+ FILE *input;
+ int64 novl;
+ int tspace, tbytes, small;
+ int reps, *pts;
+ int input_pts;
+
+
+ LAInterface() { };
+
+ int openDB2(std::string filename, std::string filename2); // open 2 databases
+
+ int openDB(std::string filename); // open database
+
+ int openAlignmentFile(std::string filename); // open .las Alignment file
+
+ void showRead(int from, int to); // show reads in a range
+
+ void showRead2(int from, int to); // show reads in a range
+
+ void showAlignment(int from, int to); // show alignment with 'A read' in a range
+
+ void showOverlap(int from, int to); // show alignment with 'A read' in a range
+
+ void resetAlignment(); // rewind the file, need to be called every time before obtaining alignments
+
+ Read *getRead(int number); //get one read
+
+ Read *getRead2(int number); //get one read
+
+ void getRead(std::vector<Read *> &reads, int from, int to); // get reads within a range
+
+ void getQV(std::vector<std::vector<int> > & QV, int from, int to);
+
+ void getRead2(std::vector<Read *> &reads, int from, int to); // get reads within a range
+
+
+ void getAlignmentB(std::vector<int> &, int n); // get all b reads aligned with a read
+
+ void getOverlap(std::vector<LOverlap *> &, int from, int64 to); // get overlap(simplified version of alignment) with a read in a range
+
+ void getOverlapw(std::vector<LOverlap *> &, int from, int to); // get overlap(simplified version of alignment) with a read in a range
+
+ void getOverlap(std::vector<LOverlap *> &, int n);
+
+ void getAlignment(std::vector<LAlignment *> &, int from, int to); // get alignment with 'A read' in a range
+
+ void getAlignment(std::vector<LAlignment *> &result_vec, std::vector<int> &range);
+
+ void getAlignment(std::vector<LAlignment *> &, int n);
+
+ int closeDB(); // close database
+
+ int getReadNumber(); // get total number of reads
+
+ int getReadNumber2(); // get total number of reads from database 2
+
+ int64 getAlignmentNumber(); // get total number of alignments
+
+ int closeDB2();
+
+ int printAlignment(FILE *file, Alignment *align, Work_Data *ework,
+ int indent, int width, int border, int upper, int coord);
+
+ int printAlignment_exp(FILE *file, LAlignment *align, Work_Data *ework,
+ int indent, int width, int border, int upper, int coord);
+
+
+ int computeTracePTS(Alignment *align, Work_Data *ework, int trace_spacing);
+
+
+ int showAlignmentTags(LAlignment *);
+
+ int generateConsensus(std::vector<LAlignment *> &);
+
+ int recoverAlignment(LAlignment *);
+
+ std::vector<int> * getCoverage(std::vector<LOverlap *> alns);
+
+ std::vector<int> * getCoverage(std::vector<LAlignment *> alns);
+
+ std::pair<std::string, std::string> getAlignmentTags(LAlignment *alignment);
+
+ std::vector<std::pair<int, int> > * lowCoverageRegions(std::vector<int> & cov, int min_cov);
+
+ void profileCoverage(std::vector<LOverlap *> &alignments, std::vector<std::pair<int, int> > & coverage,int reso, int cutoff);
+
+ void profileCoveragefine(std::vector<LOverlap *> &alignments, std::vector<std::pair<int, int> > & coverage,int reso, int cutoff, int est_coverage);
+
+ void repeatDetect(std::vector<std::pair<int, int> > & coverage, std::vector<std::pair<int, int> > & repeat);
+
+ int loadPAF(std::string filename, std::vector<LOverlap *> &);
+
+ int loadFASTA(std::string filename, std::vector<Read *> & reads);
+
+};
+
+class Node {
+public:
+ int id;
+ int strand;
+ bool pseudo = false;
+ Node(int id, int strand): id(id), strand(strand) {};
+ Node() {};
+ void show() { std::cout<<id; if (strand == 1) std::cout<<"\'";}
+};
+
+
+
+#endif
\ No newline at end of file
diff --git a/src/include/QV.h b/src/include/QV.h
new file mode 100755
index 0000000..35fbadc
--- /dev/null
+++ b/src/include/QV.h
@@ -0,0 +1,125 @@
+/************************************************************************************\
+* *
+* Copyright (c) 2014, Dr. Eugene W. Myers (EWM). All rights reserved. *
+* *
+* Redistribution and use in source and binary forms, with or without modification, *
+* are permitted provided that the following conditions are met: *
+* *
+* · Redistributions of source code must retain the above copyright notice, this *
+* list of conditions and the following disclaimer. *
+* *
+* · Redistributions in binary form must reproduce the above copyright notice, this *
+* list of conditions and the following disclaimer in the documentation and/or *
+* other materials provided with the distribution. *
+* *
+* · The name of EWM may not be used to endorse or promote products derived from *
+* this software without specific prior written permission. *
+* *
+* THIS SOFTWARE IS PROVIDED BY EWM ”AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, *
+* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND *
+* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL EWM BE LIABLE *
+* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES *
+* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS *
+* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY *
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *
+* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN *
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
+* *
+* For any issues regarding this software and its use, contact EWM at: *
+* *
+* Eugene W. Myers Jr. *
+* Bautzner Str. 122e *
+* 01099 Dresden *
+* GERMANY *
+* Email: gene.myers at gmail.com *
+* *
+\************************************************************************************/
+
+/*******************************************************************************************
+ *
+ * Compressor/decompressor for .quiv files: customized Huffman codes for each stream based on
+ * the histogram of values occuring in a given file. The two low complexity streams
+ * (deletionQV and substitutionQV) use a Huffman coding of the run length of the prevelant
+ * character.
+ *
+ * Author: Gene Myers
+ * Date: Jan 18, 2014
+ * Modified: July 25, 2014
+ *
+ ********************************************************************************************/
+
+#ifndef _QV_COMPRESSOR
+
+#define _QV_COMPRESSOR
+
+ // The defined constant INTERACTIVE (set in DB.h) determines whether an interactive or
+ // batch version of the routines in this library are compiled. In batch mode, routines
+ // print an error message and exit. In interactive mode, the routines place the error
+ // message in EPLACE (also defined in DB.h) and return an error value, typically NULL
+ // if the routine returns a pointer, and an unusual integer value if the routine returns
+ // an integer.
+ // Below when an error return is described, one should understand that this value is returned
+ // only if the routine was compiled in INTERACTIVE mode.
+
+ // A PacBio compression scheme
+
+typedef struct
+ { void *delScheme; // Huffman scheme for deletion QVs
+ void *insScheme; // Huffman scheme for insertion QVs
+ void *mrgScheme; // Huffman scheme for merge QVs
+ void *subScheme; // Huffman scheme for substitution QVs
+ void *dRunScheme; // Huffman scheme for deletion run lengths (if delChar > 0)
+ void *sRunScheme; // Huffman scheme for substitution run lengths (if subChar > 0)
+ int delChar; // If > 0, run-encoded deletion value
+ int subChar; // If > 0, run-encoded substitution value
+ int flip; // Need to flip multi-byte integers
+ char *prefix; // Header line prefix
+ } QVcoding;
+
+ // Read the next nlines of input, and QVentry returns a pointer to the first line if needed.
+ // If end-of-input is encountered before any further input, -1 is returned. If there is
+ // an error than -2 is returned. Otherwise the length of the line(s) read is returned.
+
+int Read_Lines(FILE *input, int nlines);
+char *QVentry();
+
+ // Read the .quiva file on input and record frequency statistics. If there is an error
+ // then 1 is returned, otherwise 0.
+
+int QVcoding_Scan(FILE *input);
+
+ // Given QVcoding_Scan has been called at least once, create an encoding scheme based on
+ // the accumulated statistics and return a pointer to it. The returned encoding object
+ // is *statically allocated within the routine. If lossy is set then use a lossy scaling
+ // for the insertion and merge streams. If there is an error, then NULL is returned.
+
+QVcoding *Create_QVcoding(int lossy);
+
+ // Read/write a coding scheme to input/output. The encoding object returned by the reader
+ // is *statically* allocated within the routine. If an error occurs while reading then
+ // NULL is returned.
+
+QVcoding *Read_QVcoding(FILE *input);
+void Write_QVcoding(FILE *output, QVcoding *coding);
+
+ // Free all the auxiliary storage associated with coding (but not the object itself!)
+
+void Free_QVcoding(QVcoding *coding);
+
+ // Assuming the file pointer is positioned just beyond an entry header line, read the
+ // next set of 5 QV lines, compress them according to 'coding', and output. If lossy
+ // is set then the scheme is a lossy one. A non-zero value is return only if an
+ // error occured.
+
+int Compress_Next_QVentry(FILE *input, FILE *output, QVcoding *coding, int lossy);
+
+ // Assuming the input is position just beyond the compressed encoding of an entry header,
+ // read the set of compressed encodings for the ensuing 5 QV vectors, decompress them,
+ // and place their decompressed values into entry which is a 5 element array of character
+ // pointers. The parameter rlen computed from the preceeding header line, critically
+ // provides the length of each of the 5 vectors. A non-zero value is return only if an
+ // error occured.
+
+int Uncompress_Next_QVentry(FILE *input, char **entry, QVcoding *coding, int rlen);
+
+#endif // _QV_COMPRESSOR
diff --git a/src/include/align.h b/src/include/align.h
new file mode 100755
index 0000000..4815182
--- /dev/null
+++ b/src/include/align.h
@@ -0,0 +1,372 @@
+/************************************************************************************\
+* *
+* Copyright (c) 2014, Dr. Eugene W. Myers (EWM). All rights reserved. *
+* *
+* Redistribution and use in source and binary forms, with or without modification, *
+* are permitted provided that the following conditions are met: *
+* *
+* · Redistributions of source code must retain the above copyright notice, this *
+* list of conditions and the following disclaimer. *
+* *
+* · Redistributions in binary form must reproduce the above copyright notice, this *
+* list of conditions and the following disclaimer in the documentation and/or *
+* other materials provided with the distribution. *
+* *
+* · The name of EWM may not be used to endorse or promote products derived from *
+* this software without specific prior written permission. *
+* *
+* THIS SOFTWARE IS PROVIDED BY EWM ”AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, *
+* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND *
+* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL EWM BE LIABLE *
+* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES *
+* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS *
+* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY *
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *
+* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN *
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
+* *
+* For any issues regarding this software and its use, contact EWM at: *
+* *
+* Eugene W. Myers Jr. *
+* Bautzner Str. 122e *
+* 01099 Dresden *
+* GERMANY *
+* Email: gene.myers at gmail.com *
+* *
+\************************************************************************************/
+
+/*******************************************************************************************
+ *
+ * Local alignment module. Routines for finding local alignments given a seed position,
+ * representing such an l.a. with its interval and a set of pass-thru points, so that
+ * a detailed alignment can be efficiently computed on demand.
+ *
+ * All routines work on a numeric representation of DNA sequences, i.e. 0 for A, 1 for C,
+ * 2 for G, and 3 for T.
+ *
+ * Author: Gene Myers
+ * Date : July 2013
+ *
+ ********************************************************************************************/
+
+#ifndef _A_MODULE
+
+#define _A_MODULE
+
+#include "DB.h"
+
+#define TRACE_XOVR 125 // If the trace spacing is not more than this value, then can
+ // and do compress traces pts to 8-bit unsigned ints
+
+/*** INTERACTIVE vs BATCH version
+
+ The defined constant INTERACTIVE (set in DB.h) determines whether an interactive or
+ batch version of the routines in this library are compiled. In batch mode, routines
+ print an error message and exit. In interactive mode, the routines place the error
+ message in EPLACE (also defined in DB.h) and return an error value, typically NULL
+ if the routine returns a pointer, and an unusual integer value if the routine returns
+ an integer.
+ Below when an error return is described, one should understand that this value is returned
+ only if the routine was compiled in INTERACTIVE mode.
+
+***/
+
+
+/*** PATH ABSTRACTION:
+
+ Coordinates are *between* characters where 0 is the tick just before the first char,
+ 1 is the tick between the first and second character, and so on. Our data structure
+ is called a Path refering to its conceptualization in an edit graph.
+
+ A local alignment is specified by the point '(read_A_match_start_,read_B_match_start_)' at which its path in
+ the underlying edit graph starts, and the point '(read_A_match_end_,read_B_match_end_)' at which it ends.
+ In otherwords A[read_A_match_start_+1..read_A_match_end_] is aligned to B[read_B_match_start_+1..read_B_match_end_] (assuming X[1] is
+ the *first* character of X).
+
+ There are 'diffs' differences in an optimal local alignment between the beginning and
+ end points of the alignment (if computed by Compute_Trace), or nearly so (if computed
+ by Local_Alignment).
+
+ Optionally, a Path can have additional information about the exact nature of the
+ aligned substrings if the field 'trace' is not NULL. Trace points to either an
+ array of integers (if computed by a Compute_Trace routine), or an array of unsigned
+ short integers (if computed by Local_Alignment).
+
+ If computed by Local_Alignment 'trace' points at a list of 'tlen' (always even) short
+ values:
+
+ d_0, b_0, d_1, b_1, ... d_n-1, b_n-1, d_n, b_n
+
+ to be interpreted as follows. The alignment from (read_A_match_start_,read_B_match_start_) to (read_A_match_end_,read_B_match_end_)
+ passes through the n trace points for i in [1,n]:
+
+ (a_i,b_i) where a_i = floor(read_A_match_start_/TS)*TS + i*TS
+ and b_i = read_B_match_start_ + (b_0 + b_1 + b_i-1)
+
+ where also let a_0,b_0 = read_A_match_start_,read_B_match_start_ and a_(n+1),b_(n+1) = read_A_match_end_,read_B_match_end_. That is, the
+ interior (i.e. i != 0 and i != n+1) trace points pass through every TS'th position of
+ the aread where TS is the "trace spacing" employed when finding the alignment (see
+ New_Align_Spec). Typically TS is 100. Then d_i is the number of differences in the
+ portion of the alignment between (a_i,b_i) and (a_i+1,b_i+1). These trace points allow
+ the Compute_Trace routines to efficiently compute the exact alignment between the two
+ reads by efficiently computing exact alignments between consecutive pairs of trace points.
+ Moreover, the diff values give one an idea of the quality of the alignment along every
+ segment of TS symbols of the aread.
+
+ If computed by a Compute_Trace routine, 'trace' points at a list of 'tlen' integers
+ < i1, i2, ... in > that encodes an exact alignment as follows. A negative number j
+ indicates that a dash should be placed before A[-j] and a positive number k indicates
+ that a dash should be placed before B[k], where A and B are the two sequences of the
+ overlap. The indels occur in the trace in the order in which they occur along the
+ alignment. For a good example of how to "decode" a trace into an alignment, see the
+ code for the routine Print_Alignment.
+
+***/
+
+typedef struct
+ { void *trace;
+ int tlen;
+ int diffs;
+ int abpos, bbpos;
+ int aepos, bepos;
+ } Path;
+
+
+/*** ALIGNMENT ABSTRACTION:
+
+ An alignment is modeled by an Alignment record, which in addition to a *pointer* to a
+ 'path', gives pointers to the A and B sequences, their lengths, and indicates whether
+ the B-sequence needs to be complemented ('comp' non-zero if so). The 'trace' pointer
+ of the 'path' subrecord can be either NULL, a list of pass-through points, or an exact
+ trace depending on what routines have been called on the record.
+
+ One can (1) compute a trace, with Compute_Trace, either from scratch if 'path.trace' = NULL,
+ or using the sequence of pass-through points in trace, (2) print an ASCII representation
+ of an alignment, or (3) reverse the roles of A and B, and (4) complement a sequence
+ (which is a reversible process).
+
+ If the alignment record shows the B sequence as complemented, *** THEN IT IS THE
+ RESPONSIBILITY OF THE CALLER *** to make sure that bseq points at a complement of
+ the sequence before calling Compute_Trace or Print_Alignment. Complement_Seq complements
+ the sequence a of length n. The operation does the complementation/reversal in place.
+ Calling it a second time on a given fragment restores it to its original state.
+***/
+
+#define COMP(x) ((x) & 0x1)
+
+#define COMP_FLAG 0x1
+
+typedef struct
+ { Path *path;
+ uint32 flags; /* Pipeline status and complementation reverse_complement_match_ */
+ char *aseq; /* Pointer to A sequence */
+ char *bseq; /* Pointer to B sequence */
+ int alen; /* Length of A sequence */
+ int blen; /* Length of B sequence */
+ } Alignment;
+
+void Complement_Seq(char *a, int n);
+
+ /* Many routines like Local_Alignment, Compute_Trace, and Print_Alignment need working
+ storage that is more efficiently reused with each call, rather than being allocated anew
+ with each call. Each *thread* can create a Work_Data object with New_Work_Data and this
+ object holds and retains the working storage for routines of this module between calls
+ to the routines. If enough memory for a Work_Data is not available then NULL is returned.
+ Free_Work_Data frees a Work_Data object and all working storage held by it.
+ */
+
+ typedef void Work_Data;
+
+ Work_Data *New_Work_Data();
+
+ void Free_Work_Data(Work_Data *work);
+
+ /* Local_Alignment seeks local alignments of a quality determined by a number of parameters.
+ These are coded in an Align_Spec object that can be created with New_Align_Spec and
+ freed with Free_Align_Spec when no longer needed. There are 4 essential parameters:
+
+ ave_corr: the average correlation (1 - 2*error_rate) for the sought alignments. For Pacbio
+ data we set this to .70 assuming an average of 15% error in each read.
+ trace_space: the spacing interval for keeping trace points and segment differences (see
+ description of 'trace' for Paths above)
+ freq[4]: a 4-element vector where afreq[0] = frequency of A, f(A), freq[1] = f(C),
+ freq[2] = f(G), and freq[3] = f(T). This vector is part of the header
+ of every HITS database (see db.h).
+
+ If an alignment cannot reach the boundary of the d.p. matrix with this condition (i.e.
+ overlap), then the last/first 30 columns of the alignment are guaranteed to be
+ suffix/prefix positive at correlation ave_corr * g(freq) where g is an empirically
+ measured function that increases from 1 as the entropy of freq decreases. If memory is
+ unavailable or the freq distribution is too skewed then NULL is returned.
+
+ You can get back the original parameters used to create an Align_Spec with the simple
+ utility functions below.
+ */
+
+ typedef void Align_Spec;
+
+ Align_Spec *New_Align_Spec(double ave_corr, int trace_space, float *freq);
+
+ void Free_Align_Spec(Align_Spec *spec);
+
+ int Trace_Spacing (Align_Spec *spec);
+ double Average_Correlation(Align_Spec *spec);
+ float *Base_Frequencies (Align_Spec *spec);
+
+ /* Local_Alignment finds the longest significant local alignment between the sequences in
+ 'align' subject to:
+
+ (a) the alignment criterion given by the Align_Spec 'spec',
+ (b) it passes through one of the points (anti+k)/2,(anti-k)/2 for k in [low,hgh] within
+ the underlying dynamic programming matrix (i.e. the points on diagonals low to hgh
+ on anti-diagonal anti or anti-1 (depending on whether the diagonal is odd or even)),
+ (c) if lbord >= 0, then the alignment is always above diagonal low-lbord, and
+ (d) if hbord >= 0, then the alignment is always below diagonal hgh+hbord.
+
+ The path record of 'align' has its 'trace' filled from the point of view of an overlap
+ between the aread and the bread. In addition a Path record from the point of view of the
+ bread versus the aread is returned by the function, with this Path's 'trace' filled in
+ appropriately. The space for the returned path and the two 'trace's are in the working
+ storage supplied by the Work_Data packet and this space is reused with each call, so if
+ one wants to retain the bread-path and the two trace point sequences, then they must be
+ copied to user-allocated storage before calling the routine again. NULL is returned in
+ the event of an error.
+
+ Find_Extension is a variant of Local_Alignment that simply finds a local alignment that
+ either ends (if prefix is non-zero) or begins (if prefix is zero) at the point
+ (anti+diag)/2,(anti-diag)/2). All other parameters are as before. It returns a non-zero
+ value only when INTERACTIVE is on and it cannot allocate the memory it needs.
+ Only the path and trace with respect to the aread is returned. This routine is experimental
+ and may not persist in later versions of the code.
+ */
+
+ Path *Local_Alignment(Alignment *align, Work_Data *work, Align_Spec *spec,
+ int low, int hgh, int anti, int lbord, int hbord);
+
+ int Find_Extension(Alignment *align, Work_Data *work, Align_Spec *spec, // experimental !!
+ int diag, int anti, int lbord, int hbord, int prefix);
+
+ /* Given a legitimate Alignment object, Compute_Trace_X computes an exact trace for the alignment.
+ If 'path.trace' is non-NULL, then it is assumed to be a sequence of pass-through points
+ and diff levels computed by Local_Alignment. In either case 'path.trace' is set
+ to point at an integer array within the storage of the Work_Data packet encoding an
+ exact optimal trace from the start to end points. If the trace is needed beyond the
+ next call to a routine that sets it, then it should be copied to an array allocated
+ and managed by the caller.
+
+ Compute_Trace_ALL does not require a sequence of pass-through points, as it computes the
+ best alignment between (path->read_A_match_start_,path->read_B_match_start_) and (path->read_A_match_end_,path->read_B_match_end_) in the
+ edit graph between the sequences. Compute_Trace_PTS computes a trace by computing the
+ trace between successive pass through points. It is much, much faster than Compute_Trace_ALL
+ but at the tradeoff of not necessarily being optimal as pass-through points are not all
+ perfect. Compute_Trace_MID computes a trace by computing the trace between the mid-points
+ of alignments between two adjacent pairs of pass through points. It is generally twice as
+ slow as Compute_Trace_PTS, but it produces nearer optimal alignments. All these routines
+ return 1 if an error occurred and 0 otherwise.
+ */
+
+#define LOWERMOST -1 // Possible modes for "mode" parameter below)
+#define GREEDIEST 0
+#define UPPERMOST 1
+
+ int Compute_Trace_ALL(Alignment *align, Work_Data *work);
+ int Compute_Trace_PTS(Alignment *align, Work_Data *work, int trace_spacing, int mode);
+ int Compute_Trace_MID(Alignment *align, Work_Data *work, int trace_spacing, int mode);
+
+ /* Compute_Trace_IRR (IRR for IRRegular) computes a trace for the given alignment where
+ it assumes the spacing between trace points between both the A and B read varies, and
+ futher assumes that the A-spacing is given in the short integers normally occupied by
+ the differences in the alignment between the trace points. This routine is experimental
+ and may not persist in later versions of the code.
+ */
+
+ int Compute_Trace_IRR(Alignment *align, Work_Data *work, int mode); // experimental !!
+
+ /* Alignment_Cartoon prints an ASCII representation of the overlap relationhip between the
+ two reads of 'align' to the given 'file' indented by 'indent' space. Coord controls
+ the display width of numbers, it must be not less than the width of any number to be
+ displayed.
+
+ If the alignment trace is an exact trace, then one can ask Print_Alignment to print an
+ ASCII representation of the alignment 'align' to the file 'file'. Indent the display
+ by "indent" spaces and put "width" columns per line in the display. Show "border"
+ characters of sequence on each side of the aligned region. If upper is non-zero then
+ display bases in upper case. If coord is greater than 0, then the positions of the
+ first character in A and B in the given row is displayed with a field width given by
+ coord's value.
+
+ Print_Reference is like Print_Alignment but rather than printing exaclty "width" columns
+ per segment, it prints "block" characters of the A sequence in each segment. This results
+ in segments of different lengths, but is convenient when looking at two alignments involving
+ A as segments are guaranteed to cover the same interval of A in a segment.
+
+ Both Print routines return 1 if an error occurred (not enough memory), and 0 otherwise.
+
+ Flip_Alignment modifies align so the roles of A and B are reversed. If full is off then
+ the trace is ignored, otherwise the trace must be to a full alignment trace and this trace
+ is also appropriately inverted.
+ */
+
+ void Alignment_Cartoon(FILE *file, Alignment *align, int indent, int coord);
+
+ int Print_Alignment(FILE *file, Alignment *align, Work_Data *work,
+ int indent, int width, int border, int upper, int coord);
+
+ int Print_Reference(FILE *file, Alignment *align, Work_Data *work,
+ int indent, int block, int border, int upper, int coord);
+
+ void Flip_Alignment(Alignment *align, int full);
+
+
+/*** OVERLAP ABSTRACTION:
+
+ Externally, between modules an Alignment is modeled by an "Overlap" record, which
+ (a) replaces the pointers to the two sequences with their ID's in the HITS data bases,
+ (b) does not contain the length of the 2 sequences (must fetch from DB), and
+ (c) contains its path as a subrecord rather than as a pointer (indeed, typically the
+ corresponding Alignment record points at the Overlap's path sub-record). The trace pointer
+ is always to a sequence of trace points and can be either compressed (uint8) or
+ uncompressed (uint16). One can read and write binary records of an "Overlap".
+***/
+
+typedef struct {
+ Path path; /* Path: begin- and end-point of alignment + diffs */
+ uint32 flags; /* Pipeline status and complementation reverse_complement_match_ */
+ int aread; /* Id # of A sequence */
+ int bread; /* Id # of B sequence */
+} Overlap;
+
+
+ /* Read_Overlap reads the next Overlap record from stream 'input', not including the trace
+ (if any), and without modifying 'ovl's trace pointer. Read_Trace reads the ensuing trace
+ into the memory pointed at by the trace field of 'ovl'. It is assumed to be big enough to
+ accommodate the trace where each value take 'tbytes' bytes (1 if uint8 or 2 if uint16).
+
+ Write_Overlap write 'ovl' to stream 'output' followed by its trace vector (if any) that
+ occupies 'tbytes' bytes per value.
+
+ Print_Overlap prints an ASCII version of the contents of 'ovl' to stream 'output'
+ where the trace occupes 'tbytes' per value and the print out is indented from the left
+ margin by 'indent' spaces.
+
+ Compress_TraceTo8 converts a trace fo 16-bit values to 8-bit values in place, and
+ Decompress_TraceTo16 does the reverse conversion.
+
+ Check_Trace_Points checks that the number of trace points is correct and that the sum
+ of the b-read displacements equals the b-read alignment interval, assuming the trace
+ spacing is 'tspace'. It reports an error message if there is a problem and 'verbose'
+ is non-zero. The 'ovl' came from the file names 'fname'.
+ */
+
+ int Read_Overlap(FILE *input, Overlap *ovl);
+ int Read_Trace(FILE *innput, Overlap *ovl, int tbytes);
+
+ void Write_Overlap(FILE *output, Overlap *ovl, int tbytes);
+ void Print_Overlap(FILE *output, Overlap *ovl, int tbytes, int indent);
+
+ void Compress_TraceTo8(Overlap *ovl);
+ void Decompress_TraceTo16(Overlap *ovl);
+
+ int Check_Trace_Points(Overlap *ovl, int tspace, int verbose, char *fname);
+
+#endif // _A_MODULE
diff --git a/src/include/cmdline.h b/src/include/cmdline.h
new file mode 100644
index 0000000..de9eaf7
--- /dev/null
+++ b/src/include/cmdline.h
@@ -0,0 +1,809 @@
+/*
+ Copyright (c) 2009, Hideyuki Tanaka
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the <organization> nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY <copyright holder> ''AS IS'' AND ANY
+ EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#pragma once
+
+#include <iostream>
+#include <sstream>
+#include <vector>
+#include <map>
+#include <string>
+#include <stdexcept>
+#include <typeinfo>
+#include <cstring>
+#include <algorithm>
+#include <cxxabi.h>
+#include <cstdlib>
+
+namespace cmdline{
+
+namespace detail{
+
+template <typename Target, typename Source, bool Same>
+class lexical_cast_t{
+public:
+ static Target cast(const Source &arg){
+ Target ret;
+ std::stringstream ss;
+ if (!(ss<<arg && ss>>ret && ss.eof()))
+ throw std::bad_cast();
+
+ return ret;
+ }
+};
+
+template <typename Target, typename Source>
+class lexical_cast_t<Target, Source, true>{
+public:
+ static Target cast(const Source &arg){
+ return arg;
+ }
+};
+
+template <typename Source>
+class lexical_cast_t<std::string, Source, false>{
+public:
+ static std::string cast(const Source &arg){
+ std::ostringstream ss;
+ ss<<arg;
+ return ss.str();
+ }
+};
+
+template <typename Target>
+class lexical_cast_t<Target, std::string, false>{
+public:
+ static Target cast(const std::string &arg){
+ Target ret;
+ std::istringstream ss(arg);
+ if (!(ss>>ret && ss.eof()))
+ throw std::bad_cast();
+ return ret;
+ }
+};
+
+template <typename T1, typename T2>
+struct is_same {
+ static const bool value = false;
+};
+
+template <typename T>
+struct is_same<T, T>{
+ static const bool value = true;
+};
+
+template<typename Target, typename Source>
+Target lexical_cast(const Source &arg)
+{
+ return lexical_cast_t<Target, Source, detail::is_same<Target, Source>::value>::cast(arg);
+}
+
+static inline std::string demangle(const std::string &name)
+{
+ int status=0;
+ char *p=abi::__cxa_demangle(name.c_str(), 0, 0, &status);
+ std::string ret(p);
+ free(p);
+ return ret;
+}
+
+template <class T>
+std::string readable_typename()
+{
+ return demangle(typeid(T).name());
+}
+
+template <class T>
+std::string default_value(T def)
+{
+ return detail::lexical_cast<std::string>(def);
+}
+
+template <>
+inline std::string readable_typename<std::string>()
+{
+ return "string";
+}
+
+} // detail
+
+//-----
+
+class cmdline_error : public std::exception {
+public:
+ cmdline_error(const std::string &msg): msg(msg){}
+ ~cmdline_error() throw() {}
+ const char *what() const throw() { return msg.c_str(); }
+private:
+ std::string msg;
+};
+
+template <class T>
+struct default_reader{
+ T operator()(const std::string &str){
+ return detail::lexical_cast<T>(str);
+ }
+};
+
+template <class T>
+struct range_reader{
+ range_reader(const T &low, const T &high): low(low), high(high) {}
+ T operator()(const std::string &s) const {
+ T ret=default_reader<T>()(s);
+ if (!(ret>=low && ret<=high)) throw cmdline::cmdline_error("range_error");
+ return ret;
+ }
+private:
+ T low, high;
+};
+
+template <class T>
+range_reader<T> range(const T &low, const T &high)
+{
+ return range_reader<T>(low, high);
+}
+
+template <class T>
+struct oneof_reader{
+ T operator()(const std::string &s){
+ T ret=default_reader<T>()(s);
+ if (std::find(alt.begin(), alt.end(), ret)==alt.end())
+ throw cmdline_error("");
+ return ret;
+ }
+ void add(const T &v){ alt.push_back(v); }
+private:
+ std::vector<T> alt;
+};
+
+template <class T>
+oneof_reader<T> oneof(T a1)
+{
+ oneof_reader<T> ret;
+ ret.add(a1);
+ return ret;
+}
+
+template <class T>
+oneof_reader<T> oneof(T a1, T a2)
+{
+ oneof_reader<T> ret;
+ ret.add(a1);
+ ret.add(a2);
+ return ret;
+}
+
+template <class T>
+oneof_reader<T> oneof(T a1, T a2, T a3)
+{
+ oneof_reader<T> ret;
+ ret.add(a1);
+ ret.add(a2);
+ ret.add(a3);
+ return ret;
+}
+
+template <class T>
+oneof_reader<T> oneof(T a1, T a2, T a3, T a4)
+{
+ oneof_reader<T> ret;
+ ret.add(a1);
+ ret.add(a2);
+ ret.add(a3);
+ ret.add(a4);
+ return ret;
+}
+
+template <class T>
+oneof_reader<T> oneof(T a1, T a2, T a3, T a4, T a5)
+{
+ oneof_reader<T> ret;
+ ret.add(a1);
+ ret.add(a2);
+ ret.add(a3);
+ ret.add(a4);
+ ret.add(a5);
+ return ret;
+}
+
+template <class T>
+oneof_reader<T> oneof(T a1, T a2, T a3, T a4, T a5, T a6)
+{
+ oneof_reader<T> ret;
+ ret.add(a1);
+ ret.add(a2);
+ ret.add(a3);
+ ret.add(a4);
+ ret.add(a5);
+ ret.add(a6);
+ return ret;
+}
+
+template <class T>
+oneof_reader<T> oneof(T a1, T a2, T a3, T a4, T a5, T a6, T a7)
+{
+ oneof_reader<T> ret;
+ ret.add(a1);
+ ret.add(a2);
+ ret.add(a3);
+ ret.add(a4);
+ ret.add(a5);
+ ret.add(a6);
+ ret.add(a7);
+ return ret;
+}
+
+template <class T>
+oneof_reader<T> oneof(T a1, T a2, T a3, T a4, T a5, T a6, T a7, T a8)
+{
+ oneof_reader<T> ret;
+ ret.add(a1);
+ ret.add(a2);
+ ret.add(a3);
+ ret.add(a4);
+ ret.add(a5);
+ ret.add(a6);
+ ret.add(a7);
+ ret.add(a8);
+ return ret;
+}
+
+template <class T>
+oneof_reader<T> oneof(T a1, T a2, T a3, T a4, T a5, T a6, T a7, T a8, T a9)
+{
+ oneof_reader<T> ret;
+ ret.add(a1);
+ ret.add(a2);
+ ret.add(a3);
+ ret.add(a4);
+ ret.add(a5);
+ ret.add(a6);
+ ret.add(a7);
+ ret.add(a8);
+ ret.add(a9);
+ return ret;
+}
+
+template <class T>
+oneof_reader<T> oneof(T a1, T a2, T a3, T a4, T a5, T a6, T a7, T a8, T a9, T a10)
+{
+ oneof_reader<T> ret;
+ ret.add(a1);
+ ret.add(a2);
+ ret.add(a3);
+ ret.add(a4);
+ ret.add(a5);
+ ret.add(a6);
+ ret.add(a7);
+ ret.add(a8);
+ ret.add(a9);
+ ret.add(a10);
+ return ret;
+}
+
+//-----
+
+class parser{
+public:
+ parser(){
+ }
+ ~parser(){
+ for (std::map<std::string, option_base*>::iterator p=options.begin();
+ p!=options.end(); p++)
+ delete p->second;
+ }
+
+ void add(const std::string &name,
+ char short_name=0,
+ const std::string &desc=""){
+ if (options.count(name)) throw cmdline_error("multiple definition: "+name);
+ options[name]=new option_without_value(name, short_name, desc);
+ ordered.push_back(options[name]);
+ }
+
+ template <class T>
+ void add(const std::string &name,
+ char short_name=0,
+ const std::string &desc="",
+ bool need=true,
+ const T def=T()){
+ add(name, short_name, desc, need, def, default_reader<T>());
+ }
+
+ template <class T, class F>
+ void add(const std::string &name,
+ char short_name=0,
+ const std::string &desc="",
+ bool need=true,
+ const T def=T(),
+ F reader=F()){
+ if (options.count(name)) throw cmdline_error("multiple definition: "+name);
+ options[name]=new option_with_value_with_reader<T, F>(name, short_name, need, def, desc, reader);
+ ordered.push_back(options[name]);
+ }
+
+ void footer(const std::string &f){
+ ftr=f;
+ }
+
+ void set_program_name(const std::string &name){
+ prog_name=name;
+ }
+
+ bool exist(const std::string &name) const {
+ if (options.count(name)==0) throw cmdline_error("there is no flag: --"+name);
+ return options.find(name)->second->has_set();
+ }
+
+ template <class T>
+ const T &get(const std::string &name) const {
+ if (options.count(name)==0) throw cmdline_error("there is no flag: --"+name);
+ const option_with_value<T> *p=dynamic_cast<const option_with_value<T>*>(options.find(name)->second);
+ if (p==NULL) throw cmdline_error("type mismatch flag '"+name+"'");
+ return p->get();
+ }
+
+ const std::vector<std::string> &rest() const {
+ return others;
+ }
+
+ bool parse(const std::string &arg){
+ std::vector<std::string> args;
+
+ std::string buf;
+ bool in_quote=false;
+ for (std::string::size_type i=0; i<arg.length(); i++){
+ if (arg[i]=='\"'){
+ in_quote=!in_quote;
+ continue;
+ }
+
+ if (arg[i]==' ' && !in_quote){
+ args.push_back(buf);
+ buf="";
+ continue;
+ }
+
+ if (arg[i]=='\\'){
+ i++;
+ if (i>=arg.length()){
+ errors.push_back("unexpected occurrence of '\\' at end of string");
+ return false;
+ }
+ }
+
+ buf+=arg[i];
+ }
+
+ if (in_quote){
+ errors.push_back("quote is not closed");
+ return false;
+ }
+
+ if (buf.length()>0)
+ args.push_back(buf);
+
+ for (size_t i=0; i<args.size(); i++)
+ std::cout<<"\""<<args[i]<<"\""<<std::endl;
+
+ return parse(args);
+ }
+
+ bool parse(const std::vector<std::string> &args){
+ int argc=static_cast<int>(args.size());
+ std::vector<const char*> argv(argc);
+
+ for (int i=0; i<argc; i++)
+ argv[i]=args[i].c_str();
+
+ return parse(argc, &argv[0]);
+ }
+
+ bool parse(int argc, const char * const argv[]){
+ errors.clear();
+ others.clear();
+
+ if (argc<1){
+ errors.push_back("argument number must be longer than 0");
+ return false;
+ }
+ if (prog_name=="")
+ prog_name=argv[0];
+
+ std::map<char, std::string> lookup;
+ for (std::map<std::string, option_base*>::iterator p=options.begin();
+ p!=options.end(); p++){
+ if (p->first.length()==0) continue;
+ char initial=p->second->short_name();
+ if (initial){
+ if (lookup.count(initial)>0){
+ lookup[initial]="";
+ errors.push_back(std::string("short option '")+initial+"' is ambiguous");
+ return false;
+ }
+ else lookup[initial]=p->first;
+ }
+ }
+
+ for (int i=1; i<argc; i++){
+ if (strncmp(argv[i], "--", 2)==0){
+ const char *p=strchr(argv[i]+2, '=');
+ if (p){
+ std::string name(argv[i]+2, p);
+ std::string val(p+1);
+ set_option(name, val);
+ }
+ else{
+ std::string name(argv[i]+2);
+ if (options.count(name)==0){
+ errors.push_back("undefined option: --"+name);
+ continue;
+ }
+ if (options[name]->has_value()){
+ if (i+1>=argc){
+ errors.push_back("option needs value: --"+name);
+ continue;
+ }
+ else{
+ i++;
+ set_option(name, argv[i]);
+ }
+ }
+ else{
+ set_option(name);
+ }
+ }
+ }
+ else if (strncmp(argv[i], "-", 1)==0){
+ if (!argv[i][1]) continue;
+ char last=argv[i][1];
+ for (int j=2; argv[i][j]; j++){
+ last=argv[i][j];
+ if (lookup.count(argv[i][j-1])==0){
+ errors.push_back(std::string("undefined short option: -")+argv[i][j-1]);
+ continue;
+ }
+ if (lookup[argv[i][j-1]]==""){
+ errors.push_back(std::string("ambiguous short option: -")+argv[i][j-1]);
+ continue;
+ }
+ set_option(lookup[argv[i][j-1]]);
+ }
+
+ if (lookup.count(last)==0){
+ errors.push_back(std::string("undefined short option: -")+last);
+ continue;
+ }
+ if (lookup[last]==""){
+ errors.push_back(std::string("ambiguous short option: -")+last);
+ continue;
+ }
+
+ if (i+1<argc && options[lookup[last]]->has_value()){
+ set_option(lookup[last], argv[i+1]);
+ i++;
+ }
+ else{
+ set_option(lookup[last]);
+ }
+ }
+ else{
+ others.push_back(argv[i]);
+ }
+ }
+
+ for (std::map<std::string, option_base*>::iterator p=options.begin();
+ p!=options.end(); p++)
+ if (!p->second->valid())
+ errors.push_back("need option: --"+std::string(p->first));
+
+ return errors.size()==0;
+ }
+
+ void parse_check(const std::string &arg){
+ if (!options.count("help"))
+ add("help", '?', "print this message");
+ check(0, parse(arg));
+ }
+
+ void parse_check(const std::vector<std::string> &args){
+ if (!options.count("help"))
+ add("help", '?', "print this message");
+ check(args.size(), parse(args));
+ }
+
+ void parse_check(int argc, char *argv[]){
+ if (!options.count("help"))
+ add("help", '?', "print this message");
+ check(argc, parse(argc, argv));
+ }
+
+ std::string error() const{
+ return errors.size()>0?errors[0]:"";
+ }
+
+ std::string error_full() const{
+ std::ostringstream oss;
+ for (size_t i=0; i<errors.size(); i++)
+ oss<<errors[i]<<std::endl;
+ return oss.str();
+ }
+
+ std::string usage() const {
+ std::ostringstream oss;
+ oss<<"usage: "<<prog_name<<" ";
+ for (size_t i=0; i<ordered.size(); i++){
+ if (ordered[i]->must())
+ oss<<ordered[i]->short_description()<<" ";
+ }
+
+ oss<<"[options] ... "<<ftr<<std::endl;
+ oss<<"options:"<<std::endl;
+
+ size_t max_width=0;
+ for (size_t i=0; i<ordered.size(); i++){
+ max_width=std::max(max_width, ordered[i]->name().length());
+ }
+ for (size_t i=0; i<ordered.size(); i++){
+ if (ordered[i]->short_name()){
+ oss<<" -"<<ordered[i]->short_name()<<", ";
+ }
+ else{
+ oss<<" ";
+ }
+
+ oss<<"--"<<ordered[i]->name();
+ for (size_t j=ordered[i]->name().length(); j<max_width+4; j++)
+ oss<<' ';
+ oss<<ordered[i]->description()<<std::endl;
+ }
+ return oss.str();
+ }
+
+private:
+
+ void check(int argc, bool ok){
+ if ((argc==1 && !ok) || exist("help")){
+ std::cerr<<usage();
+ exit(0);
+ }
+
+ if (!ok){
+ std::cerr<<error()<<std::endl<<usage();
+ exit(1);
+ }
+ }
+
+ void set_option(const std::string &name){
+ if (options.count(name)==0){
+ errors.push_back("undefined option: --"+name);
+ return;
+ }
+ if (!options[name]->set()){
+ errors.push_back("option needs value: --"+name);
+ return;
+ }
+ }
+
+ void set_option(const std::string &name, const std::string &value){
+ if (options.count(name)==0){
+ errors.push_back("undefined option: --"+name);
+ return;
+ }
+ if (!options[name]->set(value)){
+ errors.push_back("option value is invalid: --"+name+"="+value);
+ return;
+ }
+ }
+
+ class option_base{
+ public:
+ virtual ~option_base(){}
+
+ virtual bool has_value() const=0;
+ virtual bool set()=0;
+ virtual bool set(const std::string &value)=0;
+ virtual bool has_set() const=0;
+ virtual bool valid() const=0;
+ virtual bool must() const=0;
+
+ virtual const std::string &name() const=0;
+ virtual char short_name() const=0;
+ virtual const std::string &description() const=0;
+ virtual std::string short_description() const=0;
+ };
+
+ class option_without_value : public option_base {
+ public:
+ option_without_value(const std::string &name,
+ char short_name,
+ const std::string &desc)
+ :nam(name), snam(short_name), desc(desc), has(false){
+ }
+ ~option_without_value(){}
+
+ bool has_value() const { return false; }
+
+ bool set(){
+ has=true;
+ return true;
+ }
+
+ bool set(const std::string &){
+ return false;
+ }
+
+ bool has_set() const {
+ return has;
+ }
+
+ bool valid() const{
+ return true;
+ }
+
+ bool must() const{
+ return false;
+ }
+
+ const std::string &name() const{
+ return nam;
+ }
+
+ char short_name() const{
+ return snam;
+ }
+
+ const std::string &description() const {
+ return desc;
+ }
+
+ std::string short_description() const{
+ return "--"+nam;
+ }
+
+ private:
+ std::string nam;
+ char snam;
+ std::string desc;
+ bool has;
+ };
+
+ template <class T>
+ class option_with_value : public option_base {
+ public:
+ option_with_value(const std::string &name,
+ char short_name,
+ bool need,
+ const T &def,
+ const std::string &desc)
+ : nam(name), snam(short_name), need(need), has(false)
+ , def(def), actual(def) {
+ this->desc=full_description(desc);
+ }
+ ~option_with_value(){}
+
+ const T &get() const {
+ return actual;
+ }
+
+ bool has_value() const { return true; }
+
+ bool set(){
+ return false;
+ }
+
+ bool set(const std::string &value){
+ try{
+ actual=read(value);
+ has=true;
+ }
+ catch(const std::exception &e){
+ return false;
+ }
+ return true;
+ }
+
+ bool has_set() const{
+ return has;
+ }
+
+ bool valid() const{
+ if (need && !has) return false;
+ return true;
+ }
+
+ bool must() const{
+ return need;
+ }
+
+ const std::string &name() const{
+ return nam;
+ }
+
+ char short_name() const{
+ return snam;
+ }
+
+ const std::string &description() const {
+ return desc;
+ }
+
+ std::string short_description() const{
+ return "--"+nam+"="+detail::readable_typename<T>();
+ }
+
+ protected:
+ std::string full_description(const std::string &desc){
+ return
+ desc+" ("+detail::readable_typename<T>()+
+ (need?"":" [="+detail::default_value<T>(def)+"]")
+ +")";
+ }
+
+ virtual T read(const std::string &s)=0;
+
+ std::string nam;
+ char snam;
+ bool need;
+ std::string desc;
+
+ bool has;
+ T def;
+ T actual;
+ };
+
+ template <class T, class F>
+ class option_with_value_with_reader : public option_with_value<T> {
+ public:
+ option_with_value_with_reader(const std::string &name,
+ char short_name,
+ bool need,
+ const T def,
+ const std::string &desc,
+ F reader)
+ : option_with_value<T>(name, short_name, need, def, desc), reader(reader){
+ }
+
+ private:
+ T read(const std::string &s){
+ return reader(s);
+ }
+
+ F reader;
+ };
+
+ std::map<std::string, option_base*> options;
+ std::vector<option_base*> ordered;
+ std::string ftr;
+
+ std::string prog_name;
+ std::vector<std::string> others;
+
+ std::vector<std::string> errors;
+};
+
+} // cmdline
diff --git a/src/include/common.h b/src/include/common.h
new file mode 100755
index 0000000..32fabef
--- /dev/null
+++ b/src/include/common.h
@@ -0,0 +1,307 @@
+
+/*
+ * =====================================================================================
+ *
+ * Filename: common.h
+ *
+ * Description: Common delclaration for the code base
+ *
+ * Version: 0.1
+ * Created: 07/16/2013 07:46:23 AM
+ * Revision: none
+ * Compiler: gcc
+ *
+ * Author: Jason Chin,
+ * Company:
+ *
+ * =====================================================================================
+
+ #################################################################################$$
+ # Copyright (c) 2011-2014, Pacific Biosciences of California, Inc.
+ #
+ # All rights reserved.
+ #
+ # Redistribution and use in source and binary forms, with or without
+ # modification, are permitted (subject to the limitations in the
+ # disclaimer below) provided that the following conditions are met:
+ #
+ # * Redistributions of source code must retain the above copyright
+ # notice, this list of conditions and the following disclaimer.
+ #
+ # * Redistributions in binary form must reproduce the above
+ # copyright notice, this list of conditions and the following
+ # disclaimer in the documentation and/or other materials provided
+ # with the distribution.
+ #
+ # * Neither the name of Pacific Biosciences nor the names of its
+ # contributors may be used to endorse or promote products derived
+ # from this software without specific prior written permission.
+ #
+ # NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
+ # GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY PACIFIC
+ # BIOSCIENCES AND ITS CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
+ # WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ # DISCLAIMED. IN NO EVENT SHALL PACIFIC BIOSCIENCES OR ITS
+ # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ # USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ # OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ # SUCH DAMAGE.
+ #################################################################################$$
+ */
+
+#ifndef COMMON_H
+#define COMMON_H
+#include <stdint.h>
+
+
+typedef int seq_coor_t;
+
+typedef struct {
+ seq_coor_t aln_str_size ;
+ seq_coor_t dist ;
+ seq_coor_t aln_q_s;
+ seq_coor_t aln_q_e;
+ seq_coor_t aln_t_s;
+ seq_coor_t aln_t_e;
+ char * q_aln_str;
+ char * t_aln_str;
+
+} alignment;
+
+
+typedef struct {
+ seq_coor_t pre_k;
+ seq_coor_t x1;
+ seq_coor_t y1;
+ seq_coor_t x2;
+ seq_coor_t y2;
+} d_path_data;
+
+typedef struct {
+ seq_coor_t d;
+ seq_coor_t k;
+ seq_coor_t pre_k;
+ seq_coor_t x1;
+ seq_coor_t y1;
+ seq_coor_t x2;
+ seq_coor_t y2;
+} d_path_data2;
+
+typedef struct {
+ seq_coor_t x;
+ seq_coor_t y;
+} path_point;
+
+typedef struct {
+ seq_coor_t start;
+ seq_coor_t last;
+ seq_coor_t count;
+} kmer_lookup;
+
+typedef unsigned char base;
+typedef base * seq_array;
+typedef seq_coor_t seq_addr;
+typedef seq_addr * seq_addr_array;
+
+
+typedef struct {
+ seq_coor_t count;
+ seq_coor_t * query_pos;
+ seq_coor_t * target_pos;
+} kmer_match;
+
+
+typedef struct {
+ seq_coor_t s1;
+ seq_coor_t e1;
+ seq_coor_t s2;
+ seq_coor_t e2;
+ long int score;
+} aln_range;
+
+
+typedef struct {
+ char * sequence;
+ int * eqv;
+} consensus_data;
+
+kmer_lookup * allocate_kmer_lookup (seq_coor_t);
+void init_kmer_lookup ( kmer_lookup *, seq_coor_t );
+void free_kmer_lookup(kmer_lookup *);
+
+seq_array allocate_seq(seq_coor_t);
+void init_seq_array( seq_array, seq_coor_t);
+void free_seq_array(seq_array);
+
+seq_addr_array allocate_seq_addr(seq_coor_t size);
+
+void free_seq_addr_array(seq_addr_array);
+
+
+aln_range * find_best_aln_range(kmer_match *,
+ seq_coor_t,
+ seq_coor_t,
+ seq_coor_t);
+
+void free_aln_range( aln_range *);
+
+kmer_match * find_kmer_pos_for_seq( char *,
+ seq_coor_t,
+ unsigned int K,
+ seq_addr_array,
+ kmer_lookup * );
+
+void free_kmer_match( kmer_match * ptr);
+void free_kmer_lookup(kmer_lookup * );
+
+
+
+void add_sequence ( seq_coor_t,
+ unsigned int,
+ char *,
+ seq_coor_t,
+ seq_addr_array,
+ seq_array,
+ kmer_lookup *);
+
+void mask_k_mer(seq_coor_t, kmer_lookup *, seq_coor_t);
+
+
+alignment *_align(char *aseq, seq_coor_t aseq_pos,
+ char *bseq, seq_coor_t bseq_pos,
+ seq_coor_t t,
+ int t2);
+
+
+
+void free_alignment(alignment *);
+
+
+void free_consensus_data(consensus_data *);
+
+
+void print_d_path( d_path_data2 * base, unsigned long max_idx);
+
+void d_path_sort( d_path_data2 * base, unsigned long max_idx);
+
+int compare_d_path(const void * a, const void * b);
+
+
+typedef struct {
+ seq_coor_t t_pos;
+ uint8_t delta;
+ char q_base;
+ seq_coor_t p_t_pos; // the tag position of the previous base
+ uint8_t p_delta; // the tag delta of the previous base
+ char p_q_base; // the previous base
+ unsigned q_id;
+} align_tag_t;
+
+
+typedef struct {
+ seq_coor_t len;
+ align_tag_t * align_tags;
+} align_tags_t;
+
+
+typedef struct {
+ uint16_t size;
+ uint16_t n_link;
+ seq_coor_t * p_t_pos; // the tag position of the previous base
+ uint8_t * p_delta; // the tag delta of the previous base
+ char * p_q_base; // the previous base
+ uint16_t * link_count;
+ uint16_t count;
+ seq_coor_t best_p_t_pos;
+ uint8_t best_p_delta;
+ uint8_t best_p_q_base; // encoded base
+ double score;
+} align_tag_col_t;
+
+
+typedef struct {
+ align_tag_col_t * base;
+} msa_base_group_t;
+
+
+typedef struct {
+ uint8_t size;
+ uint8_t max_delta;
+ msa_base_group_t * delta;
+} msa_delta_group_t;
+
+typedef msa_delta_group_t * msa_pos_t;
+
+
+align_tags_t * get_align_tags( char * aln_q_seq,
+ char * aln_t_seq,
+ seq_coor_t aln_seq_len,
+ aln_range * range,
+ unsigned q_id,
+ seq_coor_t t_offset);
+
+align_tags_t * get_align_tags2( char * aln_q_seq,
+ char * aln_t_seq,
+ seq_coor_t aln_seq_len,
+ aln_range * range,
+ unsigned q_id,
+ seq_coor_t t_offset);
+
+
+void free_align_tags( align_tags_t * tags);
+
+
+void allocate_aln_col( align_tag_col_t * col);
+
+void realloc_aln_col( align_tag_col_t * col );
+
+void free_aln_col( align_tag_col_t * col);
+
+void allocate_delta_group( msa_delta_group_t * g);
+
+void realloc_delta_group( msa_delta_group_t * g, uint16_t new_size );
+
+void free_delta_group( msa_delta_group_t * g);
+
+void update_col( align_tag_col_t * col, seq_coor_t p_t_pos, uint8_t p_delta, char p_q_base);
+
+msa_pos_t * get_msa_working_sapce(unsigned int max_t_len);
+
+void clean_msa_working_space( msa_pos_t * msa_array, unsigned int max_t_len);
+
+consensus_data * get_cns_from_align_tags( align_tags_t ** tag_seqs,
+ unsigned n_tag_seqs,
+ unsigned t_len,
+ unsigned min_cov );
+
+consensus_data * get_cns_from_align_tags_large( align_tags_t ** tag_seqs,
+ unsigned n_tag_seqs,
+ unsigned t_len,
+ unsigned min_cov );
+
+consensus_data * generate_consensus( char ** input_seq,
+ unsigned int n_seq,
+ unsigned min_cov,
+ unsigned K,
+ double min_idt);
+
+
+consensus_data * generate_utg_consensus( char ** input_seq,
+ seq_coor_t *offset,
+ unsigned int n_seq,
+ unsigned min_cov,
+ unsigned K,
+ double min_idt);
+
+
+void free_consensus_data( consensus_data * consensus );
+
+#endif
+
+
+
\ No newline at end of file
diff --git a/src/include/ini.h b/src/include/ini.h
new file mode 100644
index 0000000..d75196f
--- /dev/null
+++ b/src/include/ini.h
@@ -0,0 +1,83 @@
+/* inih -- simple .INI file parser
+
+inih is released under the New BSD license (see LICENSE.txt). Go to the project
+home page for more info:
+
+https://github.com/benhoyt/inih
+
+*/
+
+#ifndef __INI_H__
+#define __INI_H__
+
+/* Make this header file easier to include in C++ code */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+
+/* Typedef for prototype of handler function. */
+typedef int (*ini_handler)(void* user, const char* section,
+ const char* name, const char* value);
+
+/* Typedef for prototype of fgets-style reader function. */
+typedef char* (*ini_reader)(char* str, int num, void* stream);
+
+/* Parse given INI-style file. May have [section]s, name=value pairs
+ (whitespace stripped), and comments starting with ';' (semicolon). Section
+ is "" if name=value pair parsed before any section heading. name:value
+ pairs are also supported as a concession to Python's ConfigParser.
+
+ For each name=value pair parsed, call handler function with given user
+ pointer as well as section, name, and value (data only valid for duration
+ of handler call). Handler should return nonzero on success, zero on error.
+
+ Returns 0 on success, line number of first error on parse error (doesn't
+ stop on first error), -1 on file open error, or -2 on memory allocation
+ error (only when INI_USE_STACK is zero).
+*/
+int ini_parse(const char* filename, ini_handler handler, void* user);
+
+/* Same as ini_parse(), but takes a FILE* instead of filename. This doesn't
+ close the file when it's finished -- the caller must do that. */
+int ini_parse_file(FILE* file, ini_handler handler, void* user);
+
+/* Same as ini_parse(), but takes an ini_reader function pointer instead of
+ filename. Used for implementing custom or string-based I/O. */
+int ini_parse_stream(ini_reader reader, void* stream, ini_handler handler,
+ void* user);
+
+/* Nonzero to allow multi-line value parsing, in the style of Python's
+ ConfigParser. If allowed, ini_parse() will call the handler with the same
+ name for each subsequent line parsed. */
+#ifndef INI_ALLOW_MULTILINE
+#define INI_ALLOW_MULTILINE 1
+#endif
+
+/* Nonzero to allow a UTF-8 BOM sequence (0xEF 0xBB 0xBF) at the start of
+ the file. See http://code.google.com/p/inih/issues/detail?id=21 */
+#ifndef INI_ALLOW_BOM
+#define INI_ALLOW_BOM 1
+#endif
+
+/* Nonzero to use stack, zero to use heap (malloc/free). */
+#ifndef INI_USE_STACK
+#define INI_USE_STACK 1
+#endif
+
+/* Stop parsing on first error (default is to keep parsing). */
+#ifndef INI_STOP_ON_FIRST_ERROR
+#define INI_STOP_ON_FIRST_ERROR 0
+#endif
+
+/* Maximum line length for any line in INI file. */
+#ifndef INI_MAX_LINE
+#define INI_MAX_LINE 200
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INI_H__ */
diff --git a/src/include/kseq.h b/src/include/kseq.h
new file mode 100644
index 0000000..d9dc686
--- /dev/null
+++ b/src/include/kseq.h
@@ -0,0 +1,256 @@
+/* The MIT License
+
+ Copyright (c) 2008, 2009, 2011 Attractive Chaos <attractor at live.co.uk>
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+*/
+
+/* Last Modified: 05MAR2012 */
+
+#ifndef AC_KSEQ_H
+#define AC_KSEQ_H
+
+#include <ctype.h>
+#include <string.h>
+#include <stdlib.h>
+
+#ifndef klib_unused
+#if (defined __clang__ && __clang_major__ >= 3) || (defined __GNUC__ && __GNUC__ >= 3)
+#define klib_unused __attribute__ ((__unused__))
+#else
+#define klib_unused
+#endif
+#endif /* klib_unused */
+
+#define KS_SEP_SPACE 0 // isspace(): \t, \n, \v, \f, \r
+#define KS_SEP_TAB 1 // isspace() && !' '
+#define KS_SEP_LINE 2 // line separator: "\n" (Unix) or "\r\n" (Windows)
+#define KS_SEP_MAX 2
+
+#define __KS_TYPE(type_t) \
+ typedef struct __kstream_t { \
+ int begin, end; \
+ int is_eof:2, bufsize:30; \
+ type_t f; \
+ unsigned char *buf; \
+ } kstream_t;
+
+#define ks_eof(ks) ((ks)->is_eof && (ks)->begin >= (ks)->end)
+#define ks_rewind(ks) ((ks)->is_eof = (ks)->begin = (ks)->end = 0)
+
+#define __KS_BASIC(SCOPE, type_t, __bufsize) \
+ SCOPE kstream_t *ks_init(type_t f) \
+ { \
+ kstream_t *ks = (kstream_t*)calloc(1, sizeof(kstream_t)); \
+ ks->f = f; ks->bufsize = __bufsize; \
+ ks->buf = (unsigned char*)malloc(__bufsize); \
+ return ks; \
+ } \
+ SCOPE void ks_destroy(kstream_t *ks) \
+ { \
+ if (!ks) return; \
+ free(ks->buf); \
+ free(ks); \
+ }
+
+#define __KS_INLINED(__read) \
+ static inline klib_unused int ks_getc(kstream_t *ks) \
+ { \
+ if (ks->is_eof && ks->begin >= ks->end) return -1; \
+ if (ks->begin >= ks->end) { \
+ ks->begin = 0; \
+ ks->end = __read(ks->f, ks->buf, ks->bufsize); \
+ if (ks->end < ks->bufsize) ks->is_eof = 1; \
+ if (ks->end == 0) return -1; \
+ } \
+ return (int)ks->buf[ks->begin++]; \
+ } \
+ static inline klib_unused int ks_getuntil(kstream_t *ks, int delimiter, kstring_t *str, int *dret) \
+ { return ks_getuntil2(ks, delimiter, str, dret, 0); }
+
+#ifndef KSTRING_T
+#define KSTRING_T kstring_t
+typedef struct __kstring_t {
+ size_t l, m;
+ char *s;
+} kstring_t;
+#endif
+
+#ifndef kroundup32
+#define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x))
+#endif
+
+#define __KS_GETUNTIL(SCOPE, __read) \
+ SCOPE int ks_getuntil2(kstream_t *ks, int delimiter, kstring_t *str, int *dret, int append) \
+ { \
+ if (dret) *dret = 0; \
+ str->l = append? str->l : 0; \
+ if (ks->begin >= ks->end && ks->is_eof) return -1; \
+ for (;;) { \
+ int i; \
+ if (ks->begin >= ks->end) { \
+ if (!ks->is_eof) { \
+ ks->begin = 0; \
+ ks->end = __read(ks->f, ks->buf, ks->bufsize); \
+ if (ks->end < ks->bufsize) ks->is_eof = 1; \
+ if (ks->end == 0) break; \
+ } else break; \
+ } \
+ if (delimiter == KS_SEP_LINE) { \
+ for (i = ks->begin; i < ks->end; ++i) \
+ if (ks->buf[i] == '\n') break; \
+ } else if (delimiter > KS_SEP_MAX) { \
+ for (i = ks->begin; i < ks->end; ++i) \
+ if (ks->buf[i] == delimiter) break; \
+ } else if (delimiter == KS_SEP_SPACE) { \
+ for (i = ks->begin; i < ks->end; ++i) \
+ if (isspace(ks->buf[i])) break; \
+ } else if (delimiter == KS_SEP_TAB) { \
+ for (i = ks->begin; i < ks->end; ++i) \
+ if (isspace(ks->buf[i]) && ks->buf[i] != ' ') break; \
+ } else i = 0; /* never come to here! */ \
+ if (str->m - str->l < (size_t)(i - ks->begin + 1)) { \
+ str->m = str->l + (i - ks->begin) + 1; \
+ kroundup32(str->m); \
+ str->s = (char*)realloc(str->s, str->m); \
+ } \
+ memcpy(str->s + str->l, ks->buf + ks->begin, i - ks->begin); \
+ str->l = str->l + (i - ks->begin); \
+ ks->begin = i + 1; \
+ if (i < ks->end) { \
+ if (dret) *dret = ks->buf[i]; \
+ break; \
+ } \
+ } \
+ if (str->s == 0) { \
+ str->m = 1; \
+ str->s = (char*)calloc(1, 1); \
+ } else if (delimiter == KS_SEP_LINE && str->l > 1 && str->s[str->l-1] == '\r') --str->l; \
+ str->s[str->l] = '\0'; \
+ return str->l; \
+ }
+
+#define KSTREAM_INIT2(SCOPE, type_t, __read, __bufsize) \
+ __KS_TYPE(type_t) \
+ __KS_BASIC(SCOPE, type_t, __bufsize) \
+ __KS_GETUNTIL(SCOPE, __read) \
+ __KS_INLINED(__read)
+
+#define KSTREAM_INIT(type_t, __read, __bufsize) KSTREAM_INIT2(static, type_t, __read, __bufsize)
+
+#define KSTREAM_DECLARE(type_t, __read) \
+ __KS_TYPE(type_t) \
+ extern int ks_getuntil2(kstream_t *ks, int delimiter, kstring_t *str, int *dret, int append); \
+ extern kstream_t *ks_init(type_t f); \
+ extern void ks_destroy(kstream_t *ks); \
+ __KS_INLINED(__read)
+
+/******************
+ * FASTA/Q parser *
+ ******************/
+
+#define kseq_rewind(ks) ((ks)->last_char = (ks)->f->is_eof = (ks)->f->begin = (ks)->f->end = 0)
+
+#define __KSEQ_BASIC(SCOPE, type_t) \
+ SCOPE kseq_t *kseq_init(type_t fd) \
+ { \
+ kseq_t *s = (kseq_t*)calloc(1, sizeof(kseq_t)); \
+ s->f = ks_init(fd); \
+ return s; \
+ } \
+ SCOPE void kseq_destroy(kseq_t *ks) \
+ { \
+ if (!ks) return; \
+ free(ks->name.s); free(ks->comment.s); free(ks->seq.s); free(ks->qual.s); \
+ ks_destroy(ks->f); \
+ free(ks); \
+ }
+
+/* Return value:
+ >=0 length of the sequence (normal)
+ -1 end-of-file
+ -2 truncated quality string
+ */
+#define __KSEQ_READ(SCOPE) \
+ SCOPE int kseq_read(kseq_t *seq) \
+ { \
+ int c; \
+ kstream_t *ks = seq->f; \
+ if (seq->last_char == 0) { /* then jump to the next header line */ \
+ while ((c = ks_getc(ks)) != -1 && c != '>' && c != '@'); \
+ if (c == -1) return -1; /* end of file */ \
+ seq->last_char = c; \
+ } /* else: the first header char has been read in the previous call */ \
+ seq->comment.l = seq->seq.l = seq->qual.l = 0; /* reset all members */ \
+ if (ks_getuntil(ks, 0, &seq->name, &c) < 0) return -1; /* normal exit: EOF */ \
+ if (c != '\n') ks_getuntil(ks, KS_SEP_LINE, &seq->comment, 0); /* read FASTA/Q comment */ \
+ if (seq->seq.s == 0) { /* we can do this in the loop below, but that is slower */ \
+ seq->seq.m = 256; \
+ seq->seq.s = (char*)malloc(seq->seq.m); \
+ } \
+ while ((c = ks_getc(ks)) != -1 && c != '>' && c != '+' && c != '@') { \
+ if (c == '\n') continue; /* skip empty lines */ \
+ seq->seq.s[seq->seq.l++] = c; /* this is safe: we always have enough space for 1 char */ \
+ ks_getuntil2(ks, KS_SEP_LINE, &seq->seq, 0, 1); /* read the rest of the line */ \
+ } \
+ if (c == '>' || c == '@') seq->last_char = c; /* the first header char has been read */ \
+ if (seq->seq.l + 1 >= seq->seq.m) { /* seq->seq.s[seq->seq.l] below may be out of boundary */ \
+ seq->seq.m = seq->seq.l + 2; \
+ kroundup32(seq->seq.m); /* rounded to the next closest 2^k */ \
+ seq->seq.s = (char*)realloc(seq->seq.s, seq->seq.m); \
+ } \
+ seq->seq.s[seq->seq.l] = 0; /* null terminated string */ \
+ if (c != '+') return seq->seq.l; /* FASTA */ \
+ if (seq->qual.m < seq->seq.m) { /* allocate memory for qual in case insufficient */ \
+ seq->qual.m = seq->seq.m; \
+ seq->qual.s = (char*)realloc(seq->qual.s, seq->qual.m); \
+ } \
+ while ((c = ks_getc(ks)) != -1 && c != '\n'); /* skip the rest of '+' line */ \
+ if (c == -1) return -2; /* error: no quality string */ \
+ while (ks_getuntil2(ks, KS_SEP_LINE, &seq->qual, 0, 1) >= 0 && seq->qual.l < seq->seq.l); \
+ seq->last_char = 0; /* we have not come to the next header line */ \
+ if (seq->seq.l != seq->qual.l) return -2; /* error: qual string is of a different length */ \
+ return seq->seq.l; \
+ }
+
+#define __KSEQ_TYPE(type_t) \
+ typedef struct { \
+ kstring_t name, comment, seq, qual; \
+ int last_char; \
+ kstream_t *f; \
+ } kseq_t;
+
+#define KSEQ_INIT2(SCOPE, type_t, __read) \
+ KSTREAM_INIT2(SCOPE, type_t, __read, 16384) \
+ __KSEQ_TYPE(type_t) \
+ __KSEQ_BASIC(SCOPE, type_t) \
+ __KSEQ_READ(SCOPE)
+
+#define KSEQ_INIT(type_t, __read) KSEQ_INIT2(static, type_t, __read)
+
+#define KSEQ_DECLARE(type_t) \
+ __KS_TYPE(type_t) \
+ __KSEQ_TYPE(type_t) \
+ extern kseq_t *kseq_init(type_t fd); \
+ void kseq_destroy(kseq_t *ks); \
+ int kseq_read(kseq_t *seq);
+
+#endif
diff --git a/src/include/paf.h b/src/include/paf.h
new file mode 100644
index 0000000..5c8ecf9
--- /dev/null
+++ b/src/include/paf.h
@@ -0,0 +1,63 @@
+/* The MIT License
+
+ Copyright (c) 2008, 2009, 2011 Attractive Chaos <attractor at live.co.uk>
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+*/
+
+#ifndef PAF_PAF_H
+#define PAF_PAF_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#ifndef KSTRING_T
+#define KSTRING_T kstring_t
+typedef struct __kstring_t {
+ size_t l, m;
+ char *s;
+} kstring_t;
+#endif
+
+typedef struct {
+ void *fp;
+ kstring_t buf;
+} paf_file_t;
+
+typedef struct {
+ const char *qn, *tn; // these point to the input string; NOT allocated
+ uint32_t ql, qs, qe, tl, ts, te;
+ uint32_t ml:31, rev:1, bl;
+} paf_rec_t;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+paf_file_t *paf_open(const char *fn);
+int paf_close(paf_file_t *pf);
+int paf_read(paf_file_t *pf, paf_rec_t *r);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/layout/CMakeLists.txt b/src/layout/CMakeLists.txt
new file mode 100644
index 0000000..912bb8e
--- /dev/null
+++ b/src/layout/CMakeLists.txt
@@ -0,0 +1,9 @@
+cmake_minimum_required(VERSION 3.2)
+
+set(Boost_USE_STATIC_LIBS ON)
+
+FIND_PACKAGE( Boost COMPONENTS graph REQUIRED )
+INCLUDE_DIRECTORIES( ${Boost_INCLUDE_DIR} )
+
+add_executable(hinging hinging)
+target_link_libraries(hinging LAInterface ini spdlog ${Boost_LIBRARIES})
diff --git a/src/layout/hinging.cpp b/src/layout/hinging.cpp
new file mode 100644
index 0000000..73bbf83
--- /dev/null
+++ b/src/layout/hinging.cpp
@@ -0,0 +1,2056 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <unordered_map>
+#include <algorithm>
+#include <fstream>
+#include <sstream>
+#include <iostream>
+#include <set>
+#include <omp.h>
+#include <tuple>
+#include <iomanip>
+
+#include "spdlog/spdlog.h"
+#include "cmdline.h"
+#include "INIReader.h"
+#include "DB.h"
+#include "align.h"
+#include "LAInterface.h"
+
+#include <utility>
+#include <boost/graph/adjacency_list.hpp>
+#include <boost/graph/connected_components.hpp>
+
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#define LAST_READ_SYMBOL '$'
+
+#define HINGED_EDGE 1
+#define UNHINGED_EDGE -1
+#define REVERSE_COMPLEMENT_MATCH 1
+#define SAME_DIRECTION_MATCH 0
+
+using namespace boost;
+
+typedef adjacency_list <vecS, vecS, undirectedS> Graph;
+typedef std::tuple<Node, Node, int> Edge_w;
+typedef std::pair<Node, Node> Edge_nw;
+
+
+static int ORDER(const void *l, const void *r) {
+ int x = *((int32 *) l);
+ int y = *((int32 *) r);
+ return (x - y);
+}
+
+
+std::ostream& operator<<(std::ostream& out, const MatchType value){
+ static std::map<MatchType, std::string> strings;
+ if (strings.size() == 0){
+#define INSERT_ELEMENT(p) strings[p] = #p
+ INSERT_ELEMENT(FORWARD);
+ INSERT_ELEMENT(BACKWARD);
+ INSERT_ELEMENT(ACOVERB);
+ INSERT_ELEMENT(BCOVERA);
+ INSERT_ELEMENT(INTERNAL);
+ INSERT_ELEMENT(UNDEFINED);
+ INSERT_ELEMENT(NOT_ACTIVE);
+#undef INSERT_ELEMENT
+ }
+ return out << strings[value];
+}
+
+
+
+bool compare_overlap(LOverlap * ovl1, LOverlap * ovl2) {
+ return ((ovl1->read_A_match_end_ - ovl1->read_A_match_start_
+ + ovl1->read_B_match_end_ - ovl1->read_B_match_start_) >
+ (ovl2->read_A_match_end_ - ovl2->read_A_match_start_
+ + ovl2->read_B_match_end_ - ovl2->read_B_match_start_));
+}
+
+bool compare_overlap_effective(LOverlap * ovl1, LOverlap * ovl2) {
+ return ((ovl1->eff_read_A_match_end_ - ovl1->eff_read_A_match_start_
+ + ovl1->eff_read_B_match_end_ - ovl1->eff_read_B_match_start_) >
+ (ovl2->eff_read_A_match_end_ - ovl2->eff_read_A_match_start_
+ + ovl2->eff_read_B_match_end_ - ovl2->eff_read_B_match_start_));
+}
+
+bool compare_overlap_weight(LOverlap * ovl1, LOverlap * ovl2) {
+ return (ovl1->weight > ovl2->weight);
+}
+
+bool compare_sum_overlaps(const std::vector<LOverlap * > * ovl1, const std::vector<LOverlap *> * ovl2) {
+ int sum1 = 0;
+ int sum2 = 0;
+ for (int i = 0; i < ovl1->size(); i++)
+ sum1 += (*ovl1)[i]->read_A_match_end_ - (*ovl1)[i]->read_A_match_start_
+ + (*ovl1)[i]->read_B_match_end_ - (*ovl1)[i]->read_B_match_start_;
+ for (int i = 0; i < ovl2->size(); i++)
+ sum2 += (*ovl2)[i]->read_A_match_end_ - (*ovl2)[i]->read_A_match_start_
+ + (*ovl2)[i]->read_B_match_end_ - (*ovl2)[i]->read_B_match_start_;
+ return sum1 > sum2;
+}
+
+bool compare_pos(LOverlap * ovl1, LOverlap * ovl2) {
+ return (ovl1->read_A_match_start_) > (ovl2->read_A_match_start_);
+}
+
+bool compare_overlap_abpos(LOverlap * ovl1, LOverlap * ovl2) {
+ return ovl1->read_A_match_start_ < ovl2->read_A_match_start_;
+}
+
+bool compare_overlap_aepos(LOverlap * ovl1, LOverlap * ovl2) {
+ return ovl1->read_A_match_start_ > ovl2->read_A_match_start_;
+}
+
+std::vector<std::pair<int,int>> Merge(std::vector<LOverlap *> & intervals, int cutoff)
+{
+ //std::cout<<"Merge"<<std::endl;
+ std::vector<std::pair<int, int > > ret;
+ int n = intervals.size();
+ if (n == 0) return ret;
+
+ if(n == 1) {
+ ret.push_back(std::pair<int,int>(intervals[0]->read_A_match_start_, intervals[0]->read_A_match_end_));
+ return ret;
+ }
+
+ sort(intervals.begin(),intervals.end(),compare_overlap_abpos); //sort according to left
+
+ int left= intervals[0]->read_A_match_start_ + cutoff, right = intervals[0]->read_A_match_end_ - cutoff;
+ //left, right means maximal possible interval now
+
+ for(int i = 1; i < n; i++)
+ {
+ if(intervals[i]->read_A_match_start_ + cutoff <= right)
+ {
+ right=std::max(right, intervals[i]->read_A_match_end_ - cutoff);
+ }
+ else
+ {
+ ret.push_back(std::pair<int, int>(left,right));
+ left = intervals[i]->read_A_match_start_ + cutoff;
+ right = intervals[i]->read_A_match_end_ - cutoff;
+ }
+ }
+ ret.push_back(std::pair<int, int>(left,right));
+ return ret;
+}
+
+Interval Effective_length(std::vector<LOverlap *> & intervals, int min_cov) {
+ Interval ret;
+ sort(intervals.begin(),intervals.end(),compare_overlap_abpos); //sort according to left
+
+ if (intervals.size() > min_cov) {
+ ret.first = intervals[min_cov]->read_A_match_start_;
+ } else
+ ret.first = 0;
+ sort(intervals.begin(),intervals.end(),compare_overlap_aepos); //sort according to left
+ if (intervals.size() > min_cov) {
+ ret.second = intervals[min_cov]->read_A_match_end_;
+ } else
+ ret.second = 0;
+ return ret;
+}
+
+bool ProcessAlignment(LOverlap * match, Read * read_A, Read * read_B, int ALN_THRESHOLD,
+ int THETA, int THETA2, bool trim){
+ //Function takes as input pointers to a match, and the read_A and read_B of that match, set constants
+ //ALN_THRESHOLD and THETA
+ //It inputs the effective read start and end into the match class object
+ //Next it trims match
+ //Finally it figures out the type of match we have here by calling AddTypesAsymmetric() on the
+ //class object
+ //std::cout<<" In ProcessAlignment"<<std::endl;
+ bool contained=false;
+ match->eff_read_A_start_ = read_A->effective_start;
+ match->eff_read_A_end_ = read_A->effective_end;
+
+ // removed the following if, so that things agree with the convention for reverse complement matches
+
+ match->eff_read_B_start_ = read_B->effective_start;
+ match->eff_read_B_end_ = read_B->effective_end;
+
+// if (match->reverse_complement_match_ == 0) {
+// match->eff_read_B_start_ = read_B->effective_start;
+// match->eff_read_B_end_ = read_B->effective_end;
+// } else {
+// match->eff_read_B_start_ = read_B->len - read_B->effective_end;
+// match->eff_read_B_end_ = read_B->len - read_B->effective_start;
+// }
+
+ /*printf("bef %d %d %d [%d %d] [%d %d] [%d %d] [%d %d]\n", match->read_A_id_, match->read_B_id_,
+ * match->reverse_complement_match_,
+ match->read_A_match_start_, match->read_A_match_end_, match->read_B_match_start_, match->read_B_match_end_,
+ match->eff_read_A_start_, match->eff_read_A_end_, match->eff_read_B_start_, match->eff_read_B_end_
+ );*/
+
+ if (trim)
+ match->trim_overlap();
+ else {
+ match->eff_read_B_match_start_ = match->read_B_match_start_;
+ match->eff_read_B_match_end_ = match->read_B_match_end_;
+ match->eff_read_A_match_start_ = match->read_A_match_start_;
+ match->eff_read_A_match_end_ = match->read_A_match_end_;
+ }
+ /*printf("aft %d %d %d [%d %d] [%d %d] [%d %d] [%d %d]\n", match->read_A_id_, match->read_B_id_,
+ * match->reverse_complement_match_,
+ match->eff_read_A_match_start_, match->eff_read_A_match_end_, match->eff_read_B_match_start_,
+ match->eff_read_B_match_end_,
+ match->eff_read_A_start_, match->eff_read_A_end_, match->eff_read_B_start_, match->eff_read_B_end_
+ );*/
+ //std::cout<< contained<<std::endl;
+ if (((match->eff_read_B_match_end_ - match->eff_read_B_match_start_) < ALN_THRESHOLD)
+ or ((match->eff_read_A_match_end_ - match->eff_read_A_match_start_) < ALN_THRESHOLD) or (!match->active))
+
+ {
+ match->active = false;
+ match->match_type_ = NOT_ACTIVE;
+ } else {
+ match->AddTypesAsymmetric(THETA,THETA2);
+ if (match->match_type_ == BCOVERA) {
+ contained = true;
+ }
+ //std::cout<< contained<< std::endl;
+ }
+
+ match->weight =
+ match->eff_read_A_match_end_ - match->eff_read_A_match_start_
+ + match->eff_read_B_match_end_ - match->eff_read_B_match_start_;
+
+ return contained;
+}
+
+class Hinge {
+public:
+ int pos;
+ int type; // 1, -1
+ bool active;
+ Hinge(int pos, int t, bool active):pos(pos),type(t), active(active) {};
+ Hinge():pos(0),type(1), active(true) {};
+};
+
+// if we uncomment this, we need to make sure it works with the new convention of B_match_start and
+// B_match_end for reverse complement matches
+
+//bool isValidHinge(LOverlap *match, std::vector<Hinge> &read_hinges){
+// //Returns true if read_hinges (a vector of all hinges corresponding to a read )
+// // has a hinge of appropriate type within tolerance from positions of start of the
+// // overlap on read_B of the overlap given.
+// int tolerance=100;//TODO put as #define
+// int position=match->eff_read_B_match_start_; // parei aqui
+// int type; //TODO : Make enum
+// if (match->match_type_==FORWARD_INTERNAL)
+// type=1;
+// else if (match->match_type_==BACKWARD_INTERNAL)
+// type=-1;
+//
+// if (match->reverse_complement_match_==1){
+// type=-type;
+// position=match->eff_read_B_match_end_;
+// }
+//
+// bool valid=false;
+// for (int index=0; index < read_hinges.size(); index++) {
+// if ((abs(position - read_hinges[index].pos) < tolerance) and (type == read_hinges[index].type))
+// valid = true;
+// return valid;
+// }
+//}
+
+
+
+void PrintOverlapToFile(FILE * file_pointer, LOverlap * match) {
+
+ int direction = match->reverse_complement_match_;
+ int hinged;
+
+ if ((match->match_type_ == FORWARD) or (match->match_type_ == BACKWARD))
+ hinged = UNHINGED_EDGE;
+
+ else if ((match->match_type_ == FORWARD_INTERNAL) or (match->match_type_ == BACKWARD_INTERNAL))
+ hinged = HINGED_EDGE;
+
+ if ((match->match_type_ == FORWARD_INTERNAL) or (match->match_type_ == FORWARD)) {
+ fprintf(file_pointer, "%d %d %d %d %d %d [%d %d] [%d %d] [%d %d] [%d %d] [%d %d] [%d %d]\n",
+ match->read_A_id_,
+ match->read_B_id_,
+ match->weight,
+ 0,
+ direction,
+ hinged,
+ match->eff_read_A_match_start_,
+ match->eff_read_A_match_end_,
+ match->eff_read_B_match_start_,
+ match->eff_read_B_match_end_,
+ match->eff_read_A_start_,
+ match->eff_read_A_end_,
+ match->eff_read_B_start_,
+ match->eff_read_B_end_,
+
+ match->read_A_match_start_,
+ match->read_A_match_end_,
+ match->read_B_match_start_,
+ match->read_B_match_end_
+
+
+ );
+ }
+ else if ((match->match_type_ == BACKWARD_INTERNAL) or (match->match_type_ == BACKWARD)){
+ fprintf(file_pointer, "%d %d %d %d %d %d [%d %d] [%d %d] [%d %d] [%d %d] [%d %d] [%d %d]\n",
+ match->read_B_id_,
+ match->read_A_id_,
+ match->weight,
+ direction,
+ 0,
+ hinged,
+ match->eff_read_B_match_start_,
+ match->eff_read_B_match_end_,
+ match->eff_read_A_match_start_,
+ match->eff_read_A_match_end_,
+ match->eff_read_B_start_,
+ match->eff_read_B_end_,
+ match->eff_read_A_start_,
+ match->eff_read_A_end_,
+
+ match->read_A_match_start_,
+ match->read_A_match_end_,
+ match->read_B_match_start_,
+ match->read_B_match_end_
+
+ );
+ }
+}
+
+
+
+
+void PrintOverlapToFile2(FILE * file_pointer, LOverlap * match, int hinge_pos) {
+
+ int direction = match->reverse_complement_match_;
+ int hinged;
+
+// if ((match->match_type_ == FORWARD) or (match->match_type_ == BACKWARD))
+// hinged = UNHINGED_EDGE;
+//
+// else if ((match->match_type_ == FORWARD_INTERNAL) or (match->match_type_ == BACKWARD_INTERNAL))
+// hinged = HINGED_EDGE;
+
+// if ((match->match_type_ == FORWARD) or (match->match_type_ == BACKWARD))
+// hinged = 0;
+// else if (match->match_type_ == FORWARD_INTERNAL)
+// hinged = 1;
+// else if (match->match_type_ == BACKWARD_INTERNAL)
+// hinged = -1;
+
+ if (match->match_type_ == FORWARD) {
+ fprintf(file_pointer, "%d %d %d %d %d %d %d [%d %d] [%d %d] [%d %d] [%d %d]\n",
+ match->read_A_id_,
+ match->read_B_id_,
+ match->weight,
+ 0,
+ direction,
+ 0,
+ -1, // hinge pos
+ match->eff_read_A_match_start_,
+ match->eff_read_A_match_end_,
+ match->eff_read_B_match_start_,
+ match->eff_read_B_match_end_,
+ match->eff_read_A_start_,
+ match->eff_read_A_end_,
+ match->eff_read_B_start_,
+ match->eff_read_B_end_);
+ }
+ else if (match->match_type_ == BACKWARD) {
+ fprintf(file_pointer, "%d %d %d %d %d %d %d [%d %d] [%d %d] [%d %d] [%d %d]\n",
+ match->read_B_id_,
+ match->read_A_id_,
+ match->weight,
+ direction,
+ 0,
+ 0,
+ -1, // hinge pos
+ match->eff_read_B_match_start_,
+ match->eff_read_B_match_end_,
+ match->eff_read_A_match_start_,
+ match->eff_read_A_match_end_,
+ match->eff_read_B_start_,
+ match->eff_read_B_end_,
+ match->eff_read_A_start_,
+ match->eff_read_A_end_);
+ }
+ else if (match->match_type_ == FORWARD_INTERNAL) {
+
+ fprintf(file_pointer, "%d %d %d %d %d %d %d [%d %d] [%d %d] [%d %d] [%d %d]\n",
+ match->read_A_id_,
+ match->read_B_id_,
+ match->weight,
+ 0,
+ direction,
+ 1, // hinged forward
+ hinge_pos,
+ match->eff_read_A_match_start_,
+ match->eff_read_A_match_end_,
+ match->eff_read_B_match_start_,
+ match->eff_read_B_match_end_,
+ match->eff_read_A_start_,
+ match->eff_read_A_end_,
+ match->eff_read_B_start_,
+ match->eff_read_B_end_);
+ }
+ else if (match->match_type_ == BACKWARD_INTERNAL) {
+ fprintf(file_pointer, "%d %d %d %d %d %d %d [%d %d] [%d %d] [%d %d] [%d %d]\n",
+ match->read_B_id_,
+ match->read_A_id_,
+ match->weight,
+ direction,
+ 0,
+ -1, // hinged backward
+ hinge_pos,
+ match->eff_read_B_match_start_,
+ match->eff_read_B_match_end_,
+ match->eff_read_A_match_start_,
+ match->eff_read_A_match_end_,
+ match->eff_read_B_start_,
+ match->eff_read_B_end_,
+ match->eff_read_A_start_,
+ match->eff_read_A_end_);
+ }
+}
+
+
+
+
+
+
+int main(int argc, char *argv[]) {
+
+ mkdir("log",S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
+
+
+ cmdline::parser cmdp;
+ cmdp.add<std::string>("db", 'b', "db file name", false, "");
+ cmdp.add<std::string>("las", 'l', "las file name", false, "");
+ cmdp.add<std::string>("paf", 'p', "paf file name", false, "");
+ cmdp.add<std::string>("config", 'c', "configuration file name", false, "");
+ cmdp.add<std::string>("fasta", 'f', "fasta file name", false, "");
+ cmdp.add<std::string>("prefix", 'x', "(intermediate output) input file prefix", true, "");
+ cmdp.add<std::string>("out", 'o', "final output file name", true, "");
+ cmdp.add<std::string>("log", 'g', "log folder name", false, "log");
+ cmdp.add("debug", '\0', "debug mode");
+
+
+
+
+// cmdp.add<std::string>("restrictreads",'r',"restrict to reads in the file",false,"");
+
+
+ cmdp.parse_check(argc, argv);
+
+ LAInterface la;
+ const char *name_db = cmdp.get<std::string>("db").c_str(); //.db file of reads to load
+ const char *name_las = cmdp.get<std::string>("las").c_str();//.las file of alignments
+ const char *name_paf = cmdp.get<std::string>("paf").c_str();
+ const char *name_fasta = cmdp.get<std::string>("fasta").c_str();
+ const char *name_config = cmdp.get<std::string>("config").c_str();//name of the configuration file, in INI format
+ std::string out = cmdp.get<std::string>("prefix");
+ std::string out_name = cmdp.get<std::string>("out");
+// const char * name_restrict = cmdp.get<std::string>("restrictreads").c_str();
+
+
+ std::string name_mask = out + ".mas";
+ std::string name_max = out + ".max";
+ std::string name_homo = out + ".homologous.txt";
+ std::string name_rep = out + ".repeat.txt";
+ std::string name_hg = out + ".hinges.txt";
+ std::string name_cov = out + ".coverage.txt";
+ std::string name_garbage = out + ".garbage.txt";
+ std::string name_contained = out + ".contained.txt";
+ std::string name_deadend = out_name + ".deadends.txt";
+
+
+ std::ofstream deadend_out(name_deadend);
+ std::ofstream maximal_reads(name_max);
+ std::ofstream garbage_out(name_garbage);
+ std::ofstream contained_out(name_contained);
+ std::ifstream homo(name_homo);
+ std::vector<int> homo_reads;
+
+
+// bool delete_telomere = false; // TODO: command line option to set this true
+
+ int read_id;
+ while (homo >> read_id) homo_reads.push_back(read_id);
+
+
+ namespace spd = spdlog;
+
+ //auto console = spd::stdout_logger_mt("console");
+ std::vector<spdlog::sink_ptr> sinks;
+ sinks.push_back(std::make_shared<spdlog::sinks::stdout_sink_st>());
+ sinks.push_back(
+ std::make_shared<spdlog::sinks::daily_file_sink_st>(cmdp.get<std::string>("log") + "/log", "txt", 23, 59));
+ auto console = std::make_shared<spdlog::logger>("log", std::begin(sinks), std::end(sinks));
+ spdlog::register_logger(console);
+
+ console->info("Hinging layout");
+
+
+ if (cmdp.exist("debug")) {
+ char *buff = (char *) malloc(sizeof(char) * 2000);
+ getwd(buff);
+ console->info("current user {}, current working directory {}", getlogin(), buff);
+ free(buff);
+ }
+
+ console->info("name of db: {}, name of .las file {}", name_db, name_las);
+ console->info("name of fasta: {}, name of .paf file {}", name_fasta, name_paf);
+ console->info("filter files prefix: {}", out);
+ console->info("output prefix: {}", out_name);
+
+
+ std::ifstream ini_file(name_config);
+ std::string str((std::istreambuf_iterator<char>(ini_file)),
+ std::istreambuf_iterator<char>());
+
+ console->info("Parameters passed in \n{}", str);
+
+ if (strlen(name_db) > 0)
+ la.openDB(name_db);
+
+
+ if (strlen(name_las) > 0)
+ la.openAlignmentFile(name_las);
+
+ int64 n_aln = 0;
+
+ if (strlen(name_las) > 0) {
+ n_aln = la.getAlignmentNumber();
+ console->info("Load alignments from {}", name_las);
+ console->info("# Alignments: {}", n_aln);
+ }
+
+ int n_read;
+ if (strlen(name_db) > 0)
+ n_read = la.getReadNumber();
+
+ std::vector<Read *> reads; //Vector of pointers to all reads
+
+ if (strlen(name_fasta) > 0) {
+ n_read = la.loadFASTA(name_fasta, reads);
+ }
+
+ console->info("# Reads: {}", n_read); // output some statistics
+
+ std::vector<LOverlap *> aln;//Vector of pointers to all alignments
+
+ if (strlen(name_las) > 0) {
+ la.resetAlignment();
+ la.getOverlap(aln, 0, n_aln);
+ }
+
+ if (strlen(name_paf) > 0) {
+ n_aln = la.loadPAF(std::string(name_paf), aln);
+ console->info("Load alignments from {}", name_paf);
+ console->info("# Alignments: {}", n_aln);
+ }
+
+ if (n_aln == 0) {
+ console->error("No alignments!");
+ return 1;
+ }
+
+
+ if (strlen(name_db) > 0) {
+ la.getRead(reads, 0, n_read);
+ }
+
+ console->info("Input data finished");
+
+ INIReader reader(name_config);
+
+ if (reader.ParseError() < 0) {
+ console->warn("Can't load {}", name_config);
+ return 1;
+ }
+
+ int LENGTH_THRESHOLD = int(reader.GetInteger("filter", "length_threshold", -1));
+ double QUALITY_THRESHOLD = reader.GetReal("filter", "quality_threshold", 0.0);
+ int N_ITER = (int) reader.GetInteger("filter", "n_iter", -1);
+ int ALN_THRESHOLD = (int) reader.GetInteger("filter", "aln_threshold", -1);
+ int MIN_COV = (int) reader.GetInteger("filter", "min_cov", -1);
+ int CUT_OFF = (int) reader.GetInteger("filter", "cut_off", -1);
+ int THETA = (int) reader.GetInteger("filter", "theta", -1);
+ int THETA2 = (int) reader.GetInteger("filter", "theta2", 0);
+ int N_PROC = (int) reader.GetInteger("running", "n_proc", 4);
+ int HINGE_SLACK = (int) reader.GetInteger("layout", "hinge_slack", 1000);
+ //This is the amount by which a forward overlap
+ //must be longer than a forward internal overlap to be preferred while
+ //building a graph.
+ int HINGE_TOLERANCE = (int) reader.GetInteger("layout", "hinge_tolerance", 150);
+ //This is how far an overlap must start from a hinge to be considered an internal
+ //overlap.
+ int KILL_HINGE_OVERLAP_ALLOWANCE = (int) reader.GetInteger("layout", "kill_hinge_overlap", 300);
+ int KILL_HINGE_INTERNAL_ALLOWANCE = (int) reader.GetInteger("layout", "kill_hinge_internal", 40);
+
+ int MATCHING_HINGE_SLACK = (int) reader.GetInteger("layout", "matching_hinge_slack", 200);
+
+ int NUM_EVENTS_TELOMERE = (int) reader.GetInteger("layout", "num_events_telomere", 7);
+
+ int MIN_CONNECTED_COMPONENT_SIZE = (int) reader.GetInteger("layout", "min_connected_component_size", 8);
+
+ bool USE_TWO_MATCHES = (int) reader.GetInteger("layout", "use_two_matches", 1);
+ bool delete_telomere = (int) reader.GetInteger("layout", "del_telomere", 0);
+
+
+
+
+ console->info("LENGTH_THRESHOLD = {}", LENGTH_THRESHOLD);
+ console->info("QUALITY_THRESHOLD = {}", QUALITY_THRESHOLD);
+ console->info("ALN_THRESHOLD = {}", ALN_THRESHOLD);
+ console->info("MIN_COV = {}", MIN_COV);
+ console->info("CUT_OFF = {}", CUT_OFF);
+ console->info("THETA = {}", THETA);
+ console->info("N_ITER = {}", N_ITER);
+ console->info("THETA2 = {}", THETA2);
+ console->info("N_PROC = {}", N_PROC);
+ console->info("HINGE_SLACK = {}", HINGE_SLACK);
+ console->info("HINGE_TOLERANCE = {}", HINGE_TOLERANCE);
+ console->info("KILL_HINGE_OVERLAP_ALLOWANCE = {}", KILL_HINGE_OVERLAP_ALLOWANCE);
+ console->info("KILL_HINGE_INTERNAL_ALLOWANCE = {}", KILL_HINGE_INTERNAL_ALLOWANCE);
+ console->info("MATCHING_HINGE_SLACK = {}", MATCHING_HINGE_SLACK);
+ console->info("MIN_CONNECTED_COMPONENT_SIZE = {}", MIN_CONNECTED_COMPONENT_SIZE);
+ console->info("USE_TWO_MATCHES = {}", USE_TWO_MATCHES);
+ console->info("del_telomeres = {}", delete_telomere);
+
+
+
+
+
+ omp_set_num_threads(N_PROC);
+ //std::vector< std::vector<std::vector<LOverlap*>* > > idx2(n_read);
+ // unordered_map from (aid) to alignments in a vector
+ std::vector<Edge_w> edgelist, edgelist_ms; // save output to edgelist
+ //std::unordered_map<int, std::vector <LOverlap * > >idx3,idx4;
+ // this is the pileup
+ std::vector<std::unordered_map<int, std::vector<LOverlap *> > > idx_ab;
+ /*
+ idx is a vector of length n_read, each element idx3[read A id] is a map,
+ from read B id to a vector of overlaps
+ */
+ //std::vector<std::vector<LOverlap *>> idx2;
+ /*
+ idx2 is a vector of length n_read, each element idx2[read A id] is a vector,
+ for each read B, we put the best overlap into that vector
+ */
+ //std::vector<std::unordered_map<int, LOverlap *>> idx3;
+ /*
+ idx3 is a vector of length n_read, each element idx3[read A id] is a map,
+ from read read B id to the best overlap of read A and read B
+ */
+ std::vector<std::vector<LOverlap *>> matches_forward, matches_backward;
+ //matches_forward is the vector of vectors where matches_forward[read_id] is a vector of matches of read_id
+ //of type FORWARD, and FORWARD_INTERNAL
+ //matches_backward is the vector of vectors where matches_backward[read_id] is a vector of matches of read_id
+ //of type BACKWARD, and BACKWARD_INTERNAL
+
+
+ std::vector<std::vector<LOverlap *>> edges_forward, edges_backward;
+ // edges_forward is a "filtered" version of matches_forward, where every (active) read has at exactly
+ // one outgoing match
+ // edges_backward is a "filtered" version of matches_backward, where every (active) read has at exactly
+ // one incoming match
+
+
+ std::vector<std::vector<LOverlap *>> intersection_edges_forward, intersection_edges_backward;
+ //Stores the intersection of edges constructing the intersection list of edges
+
+
+ FILE *mask_file;
+ mask_file = fopen(name_mask.c_str(), "r");
+ int read, rs, re;
+
+ while (fscanf(mask_file, "%d %d %d", &read, &rs, &re) != EOF) {
+ reads[read]->effective_start = rs;
+ reads[read]->effective_end = re;
+ }
+ console->info("read mask finished");
+
+ FILE *repeat_file;
+ repeat_file = fopen(name_rep.c_str(), "r");
+ FILE *hinge_file;
+ hinge_file = fopen(name_hg.c_str(), "r");
+ char *line = NULL;
+ size_t len = 0;
+ std::unordered_map<int, std::vector<std::pair<int, int>>> marked_repeats;
+
+ int telomere_cnt = 0;
+
+ while (getline(&line, &len, repeat_file) != -1) {
+ std::stringstream ss;
+ ss.clear();
+ ss << line;
+ int num;
+ ss >> num;
+ //printf("%d\n",num);
+ marked_repeats[num] = std::vector<std::pair<int, int>>();
+ int r1 = 0, r2 = 0;
+ while (!ss.eof()) {
+ r1 = 0;
+ r2 = 0;
+ ss >> r1 >> r2;
+ if ((r1 != 0) and (r2 != 0)) {
+ //printf("[%d %d]\n", r1, r2);
+ marked_repeats[num].push_back(std::pair<int, int>(r1, r2));
+ }
+ }
+ ss.clear();
+
+ if ((delete_telomere) and (marked_repeats[num].size() > NUM_EVENTS_TELOMERE)) {
+ reads[num]->active = false;
+ telomere_cnt++;
+ }
+
+ }
+ fclose(repeat_file);
+ console->info("read marked repeats");
+ console->info("killed {} reads with many repeats",telomere_cnt);
+
+ std::unordered_map<int, std::vector<std::pair<int, int>>> marked_hinges;
+ while (getline(&line, &len, hinge_file) != -1) {
+ std::stringstream ss;
+ ss << line;
+ int num;
+ ss >> num;
+ //printf("%d\n",num);
+ marked_hinges[num] = std::vector<std::pair<int, int>>();
+ int r1 = 0, r2 = 0;
+ while (!ss.eof()) {
+ r1 = 0;
+ r2 = 0;
+ ss >> r1 >> r2;
+ if ((r1 != 0) and (r2 != 0)) {
+ //printf("[%d %d]\n", r1, r2);
+ marked_hinges[num].push_back(std::pair<int, int>(r1, r2));
+ }
+ }
+ ss.clear();
+ }
+ fclose(hinge_file);
+
+ console->info("read marked hinges");
+
+ if (line)
+ free(line);
+
+ int num_active_read = 0;
+
+ //This seems to be an unnecessary stub
+ for (int i = 0; i < n_read; i++) {
+ if (reads[i]->active) num_active_read++;
+ }
+ console->info("active reads: {}", num_active_read);
+
+
+ num_active_read = 0;
+ for (int i = 0; i < n_read; i++) {
+ if (reads[i]->effective_end - reads[i]->effective_start < LENGTH_THRESHOLD) {
+ reads[i]->active = false;
+ garbage_out << i << std::endl;
+ }
+ else num_active_read++;
+ }
+ console->info("active reads: {}", num_active_read);
+
+ for (int i = 0; i < n_read; i++) {
+ //An initialisation for loop
+ //TODO Preallocate memory. Much more efficient.
+ idx_ab.push_back(std::unordered_map<int, std::vector<LOverlap *> >());
+ //idx2.push_back(std::vector<LOverlap *>());
+ matches_forward.push_back(std::vector<LOverlap *>());
+ matches_backward.push_back(std::vector<LOverlap *>());
+ edges_forward.push_back(std::vector<LOverlap *>());
+ edges_backward.push_back(std::vector<LOverlap *>());
+ intersection_edges_forward.push_back(std::vector<LOverlap *>());
+ intersection_edges_backward.push_back(std::vector<LOverlap *>());
+ }
+
+//int num_finished = 0;
+ int num_overlaps = 0;
+ int num_forward_overlaps(0), num_forward_internal_overlaps(0), num_reverse_overlaps(0),
+ num_reverse_internal_overlaps(0), rev_complemented_matches(0);
+//# pragma omp parallel for
+ for (int i = 0; i < aln.size(); i++) {
+ idx_ab[aln[i]->read_A_id_][aln[i]->read_B_id_] = std::vector<LOverlap *>();
+ }
+
+ for (int i = 0; i < aln.size(); i++) {
+ idx_ab[aln[i]->read_A_id_][aln[i]->read_B_id_].push_back(aln[i]);
+ }
+
+ int n_overlaps = 0;
+ int n_rev_overlaps = 0;
+ for (int i = 0; i < aln.size(); i++) {
+ n_overlaps++;
+ n_rev_overlaps += aln[i]->reverse_complement_match_;
+ }
+
+ console->info("overlaps {} rev_overlaps {}", n_overlaps, n_rev_overlaps);
+
+ console->info("index finished");
+ console->info("Number reads {}", n_read);
+
+
+ for (int i = 0; i < n_read; i++) {
+ bool contained = false;
+ //std::cout<< "Testing opt " << i << std::endl;
+ if (reads[i]->active == false) {
+ continue;
+ }
+
+ int containing_read;
+
+ for (std::unordered_map<int, std::vector<LOverlap *> >::iterator it = idx_ab[i].begin();
+ it != idx_ab[i].end(); it++) {
+ std::sort(it->second.begin(), it->second.end(), compare_overlap);//Sort overlaps by lengths
+ //std::cout<<"Giving input to ProcessAlignment "<<it->second.size() <<std::endl;
+
+ if (it->second.size() > 0) {
+ //Figure out if read is contained
+ LOverlap *ovl = it->second[0];
+ bool contained_alignment;
+
+ if (strlen(name_db) > 0)
+ contained_alignment = ProcessAlignment(ovl, reads[ovl->read_A_id_],
+ reads[ovl->read_B_id_], ALN_THRESHOLD, THETA, THETA2, true);
+ else
+ contained_alignment = ProcessAlignment(ovl, reads[ovl->read_A_id_],
+ reads[ovl->read_B_id_], ALN_THRESHOLD, THETA, THETA2, false);
+ if (contained_alignment == true) {
+ containing_read = ovl->read_B_id_;
+ }
+
+ if (reads[ovl->read_B_id_]->active == true)
+ contained = contained or contained_alignment;
+
+ //Filter matches that matter.
+ //TODO Figure out a way to do this more efficiently
+ if ((ovl->match_type_ == FORWARD) or (ovl->match_type_ == FORWARD_INTERNAL))
+ matches_forward[i].push_back(it->second[0]);
+ else if ((ovl->match_type_ == BACKWARD) or (ovl->match_type_ == BACKWARD_INTERNAL))
+ matches_backward[i].push_back(it->second[0]);
+
+ }
+
+
+ if ((it->second.size() > 1) and (USE_TWO_MATCHES)) {
+ //Figure out if read is contained
+ LOverlap *ovl = it->second[1];
+ bool contained_alignment;
+
+ if (strlen(name_db) > 0)
+ contained_alignment = ProcessAlignment(ovl, reads[ovl->read_A_id_],
+ reads[ovl->read_B_id_], ALN_THRESHOLD, THETA, THETA2, true);
+ else
+ contained_alignment = ProcessAlignment(ovl, reads[ovl->read_A_id_],
+ reads[ovl->read_B_id_], ALN_THRESHOLD, THETA, THETA2, false);
+ if (contained_alignment == true) {
+ containing_read = ovl->read_B_id_;
+ }
+
+ if (reads[ovl->read_B_id_]->active == true)
+ contained = contained or contained_alignment;
+
+ //Filter matches that matter.
+ //TODO Figure out a way to do this more efficiently
+ if ((ovl->match_type_ == FORWARD) or (ovl->match_type_ == FORWARD_INTERNAL))
+ matches_forward[i].push_back(it->second[1]);
+ else if ((ovl->match_type_ == BACKWARD) or (ovl->match_type_ == BACKWARD_INTERNAL))
+ matches_backward[i].push_back(it->second[1]);
+
+ }
+
+
+
+
+ }
+ if (contained) {
+ reads[i]->active = false;
+ contained_out << i << "\t" << containing_read << std::endl;
+
+ }
+ }
+
+
+ for (int i = 0; i < n_read; i++) {//Isn't this just 0 or 1?
+ num_overlaps += matches_forward[i].size() + matches_backward[i].size();
+ for (int j = 0; j < matches_forward[i].size(); j++)
+ rev_complemented_matches += matches_forward[i][j]->reverse_complement_match_;
+ for (int j = 0; j < matches_backward[i].size(); j++)
+ rev_complemented_matches += matches_backward[i][j]->reverse_complement_match_;
+ }
+ console->info("{} overlaps", num_overlaps);
+ console->info("{} rev overlaps", rev_complemented_matches);
+
+ num_active_read = 0;
+ for (int i = 0; i < n_read; i++) {
+ if (reads[i]->active) {
+ num_active_read++;
+ maximal_reads << i << std::endl;
+ }
+ }
+ console->info("removed contained reads, active reads: {}", num_active_read);
+
+ num_active_read = 0;
+ for (int i = 0; i < n_read; i++) {
+ if (reads[i]->active) num_active_read++;
+ }
+ console->info("active reads: {}", num_active_read);
+
+ num_overlaps = 0;
+ num_forward_overlaps = 0;
+ num_forward_internal_overlaps = 0;
+ num_reverse_overlaps = 0;
+ num_reverse_internal_overlaps = 0;
+ rev_complemented_matches = 0;
+ int rev_complemented_fwd_matches(0), rev_complemented_bck_matches(0), rev_complemented_fwd_int_matches(0),
+ rev_complemented_bck_int_matches(0);
+
+ for (int i = 0; i < n_read; i++) {
+ if (reads[i]->active) {
+ for (int j = 0; j < matches_forward[i].size(); j++) {
+ if (reads[matches_forward[i][j]->read_B_id_]->active) {
+ num_overlaps++;
+ if (matches_forward[i][j]->match_type_ == FORWARD) {
+ num_forward_overlaps++;
+ rev_complemented_fwd_matches += matches_forward[i][j]->reverse_complement_match_;
+ }
+ else if (matches_forward[i][j]->match_type_ == FORWARD_INTERNAL) {
+ num_forward_internal_overlaps++;
+ rev_complemented_fwd_int_matches += matches_forward[i][j]->reverse_complement_match_;
+ }
+ if (matches_forward[i][j]->reverse_complement_match_ == 1)
+ rev_complemented_matches++;
+ }
+ }
+ //std::cout <<"First for done "<<std::endl;
+ for (int j = 0; j < matches_backward[i].size(); j++) {
+ if (reads[matches_backward[i][j]->read_B_id_]->active) {
+ num_overlaps++;
+ if (matches_backward[i][j]->match_type_ == BACKWARD) {
+ num_reverse_overlaps++;
+ rev_complemented_bck_matches += matches_backward[i][j]->reverse_complement_match_;
+ }
+ else if (matches_backward[i][j]->match_type_ == BACKWARD_INTERNAL) {
+ num_reverse_internal_overlaps++;
+ rev_complemented_bck_int_matches += matches_backward[i][j]->reverse_complement_match_;
+ }
+ if (matches_backward[i][j]->reverse_complement_match_ == 1)
+ rev_complemented_matches++;
+ }
+ }
+ }
+ }
+ /*std::cout<<num_overlaps << " overlaps " << num_forward_overlaps << " fwd overlaps "
+ << num_forward_internal_overlaps << " fwd internal overlaps "<< num_reverse_overlaps
+ << " backward overlaps " << num_reverse_internal_overlaps
+ << " backward internal overlaps "<< rev_complemented_matches << " reverse complement overlaps\n"
+ << rev_complemented_fwd_matches <<" rev cmplment fwd matches "
+ << rev_complemented_fwd_int_matches << " rev cmplement fwd int matches "
+ << rev_complemented_bck_matches << " rev cmplment bck matches "
+ << rev_complemented_bck_int_matches << " rev cmplement bck int matches " << std::endl;*/
+
+# pragma omp parallel for
+ for (int i = 0; i < n_read; i++) {
+ if (reads[i]->active) {
+ std::sort(matches_forward[i].begin(), matches_forward[i].end(), compare_overlap_weight);
+ std::sort(matches_backward[i].begin(), matches_backward[i].end(), compare_overlap_weight);
+ }
+ }
+
+ // temporary
+ FILE *G_out;
+ G_out = fopen("edges.g_out.txt", "w");
+ for (int i = 0; i < n_read; i++) {
+ if (reads[i]->active) {
+ for (int j = 0; j < matches_forward[i].size(); j++) {
+ if (reads[matches_forward[i][j]->read_B_id_]->active) {
+ fprintf(G_out, "%d %d %d %d %d [%d %d] [%d %d] [%d %d] [%d %d] \n",
+ matches_forward[i][j]->read_A_id_, matches_forward[i][j]->read_B_id_,
+ matches_forward[i][j]->weight, matches_forward[i][j]->reverse_complement_match_,
+ matches_forward[i][j]->match_type_, matches_forward[i][j]->eff_read_A_match_start_,
+ matches_forward[i][j]->eff_read_A_match_end_,
+ matches_forward[i][j]->eff_read_B_match_start_,
+ matches_forward[i][j]->eff_read_B_match_end_,
+ matches_forward[i][j]->eff_read_A_start_, matches_forward[i][j]->eff_read_A_end_,
+ matches_forward[i][j]->eff_read_B_start_, matches_forward[i][j]->eff_read_B_end_);
+ break;
+ }
+ }
+ }
+ }
+
+ fprintf(G_out, "bkw\n");
+
+ for (int i = 0; i < n_read; i++) {
+ if (reads[i]->active) {
+ for (int j = 0; j < matches_backward[i].size(); j++) {
+ if (reads[matches_backward[i][j]->read_B_id_]->active) {
+ fprintf(G_out, "%d %d %d %d %d [%d %d] [%d %d] [%d %d] [%d %d] \n",
+ matches_backward[i][j]->read_A_id_, matches_backward[i][j]->read_B_id_,
+ matches_backward[i][j]->weight, matches_backward[i][j]->reverse_complement_match_,
+ matches_backward[i][j]->match_type_, matches_backward[i][j]->eff_read_A_match_start_,
+ matches_backward[i][j]->eff_read_A_match_end_,
+ matches_backward[i][j]->eff_read_B_match_start_,
+ matches_backward[i][j]->eff_read_B_match_end_,
+ matches_backward[i][j]->eff_read_A_start_, matches_backward[i][j]->eff_read_A_end_,
+ matches_backward[i][j]->eff_read_B_start_, matches_backward[i][j]->eff_read_B_end_);
+ break;
+ }
+ }
+ }
+ }
+
+ FILE *out_backup;
+ out_backup = fopen("edges.fwd.backup.txt", "w");
+ for (int i = 0; i < n_read; i++) {
+ if (reads[i]->active)
+ for (int j = 0; j < matches_forward[i].size(); j++) {
+ if (reads[matches_forward[i][j]->read_B_id_]->active)
+ fprintf(out_backup, "%d %d %d %d %d [%d %d] [%d %d] [%d %d] [%d %d] \n",
+ matches_forward[i][j]->read_A_id_, matches_forward[i][j]->read_B_id_,
+ matches_forward[i][j]->weight, matches_forward[i][j]->reverse_complement_match_,
+ matches_forward[i][j]->match_type_, matches_forward[i][j]->eff_read_A_match_start_,
+ matches_forward[i][j]->eff_read_A_match_end_,
+ matches_forward[i][j]->eff_read_B_match_start_,
+ matches_forward[i][j]->eff_read_B_match_end_,
+ matches_forward[i][j]->eff_read_A_start_, matches_forward[i][j]->eff_read_A_end_,
+ matches_forward[i][j]->eff_read_B_start_, matches_forward[i][j]->eff_read_B_end_);
+ }
+ }
+ fclose(out_backup);
+ out_backup = fopen("edges.bkw.backup.txt", "w");
+ for (int i = 0; i < n_read; i++) {
+ if (reads[i]->active)
+ for (int j = 0; j < matches_backward[i].size(); j++) {
+ if (reads[matches_backward[i][j]->read_B_id_]->active)
+ fprintf(out_backup, "%d %d %d %d %d [%d %d] [%d %d] [%d %d] [%d %d] \n",
+ matches_backward[i][j]->read_A_id_, matches_backward[i][j]->read_B_id_,
+ matches_backward[i][j]->weight, matches_backward[i][j]->reverse_complement_match_,
+ matches_backward[i][j]->match_type_, matches_backward[i][j]->eff_read_A_match_start_,
+ matches_backward[i][j]->eff_read_A_match_end_,
+ matches_backward[i][j]->eff_read_B_match_start_,
+ matches_backward[i][j]->eff_read_B_match_end_,
+ matches_backward[i][j]->eff_read_A_start_, matches_backward[i][j]->eff_read_A_end_,
+ matches_backward[i][j]->eff_read_B_start_, matches_backward[i][j]->eff_read_B_end_);
+ }
+ }
+ fclose(out_backup);
+
+
+ FILE *out_g1;
+ FILE *out_g2;
+ FILE *out_hg;
+ FILE *out_hg2;
+ FILE *out_greedy;
+ FILE *out_skipped;
+
+ out_g1 = fopen((std::string(out_name) + ".edges.1").c_str(), "w");
+ out_g2 = fopen((std::string(out_name) + ".edges.2").c_str(), "w");
+
+ // Output files for edges
+ out_hg = fopen((std::string(out_name) + ".edges.hinges").c_str(), "w");
+ out_hg2 = fopen((std::string(out_name) + ".edges.hinges2").c_str(), "w");
+ out_greedy = fopen((std::string(out_name) + ".edges.greedy").c_str(), "w");
+ out_skipped = fopen((std::string(out_name) + ".edges.skipped").c_str(), "w");
+
+ // All hinges ikmported from the hinges.txt file
+ std::unordered_map<int, std::vector<Hinge> > hinges_vec;
+
+ // Hinges that we were previously killed in filter.cpp due to bridging
+ std::unordered_map<int, std::vector<Hinge> > killed_hinges_vec;
+
+ // Hinges that will be killed for being matched with a hinge in killed_hinges_vec
+ std::unordered_map<int, std::vector<Hinge> > new_killed_hinges_vec;
+
+ int n = 0;
+ int kh = 0;
+ for (int i = 0; i < n_read; i++) {
+ hinges_vec[i] = std::vector<Hinge>();
+ std::set<std::pair<int, int> > surviving_hinges(marked_hinges[i].begin(), marked_hinges[i].end());
+ for (int j = 0; j < marked_hinges[i].size(); j++) {
+ hinges_vec[i].push_back(Hinge(marked_hinges[i][j].first, marked_hinges[i][j].second, true));
+ if (reads[i]->active) {
+ n++;
+ }
+ }
+ for (int j = 0; j < marked_repeats[i].size(); j++) {
+ if (surviving_hinges.find(marked_repeats[i][j]) == surviving_hinges.end()) {
+ killed_hinges_vec[i].push_back(Hinge(marked_repeats[i][j].first, marked_repeats[i][j].second, false));
+ if (reads[i]->active) {
+ kh++;
+ }
+ }
+ }
+ }
+ console->info("{} killed hinges", kh);
+ console->info("{} hinges", n);
+
+ std::ofstream killed_out(out + ".killed.hinges");
+ for (int i = 0; i < n_read; i++) {
+ killed_out << i << " ";
+ for (int j = 0; j < killed_hinges_vec[i].size(); j++) {
+ killed_out << killed_hinges_vec[i][j].type << " " << killed_hinges_vec[i][j].pos << " ";
+ }
+ killed_out << std::endl;
+ }
+
+ n = 0;
+ for (int i = 0; i < n_read; i++) {
+ for (int j = 0; j < hinges_vec[i].size(); j++) {
+ if ((reads[i]->active) and (hinges_vec[i][j].active)) n++;
+ }
+ }
+ console->info("{} active hinges", n);
+
+ /**
+ * Switch to naive hinge filtering
+ * Keep the hinge only if there are HINGE_READS reads that start near the hinge and continue to the end of the read
+ */
+
+ /*int HINGE_READS = 1;
+
+ for (int i = 0; i < n_read; i++) {
+ for (int j = 0; j < hinges_vec[i].size(); j++) {
+ int num_near_hinge_reads = 0;
+ if ((reads[i]->active) and (hinges_vec[i][j].active) and (hinges_vec[i][j].type == 1)) {
+ // count reads that start near the hinge and continue to the end of the read
+ printf("read %d hinge %d type %d pos %d ", i, j, 1, hinges_vec[i][j].pos);
+ num_near_hinge_reads = 0;
+ for (int k = 0; k < matches_forward[i].size(); k ++ ) {
+ if ((matches_forward[i][k]->match_type_ == FORWARD) and
+ (reads[matches_forward[i][k]->read_B_id_]->active)
+ and abs((matches_forward[i][k]->eff_read_A_match_start_ - hinges_vec[i][j].pos ) < 300))
+ num_near_hinge_reads ++;
+ }
+ printf("num %d\n", num_near_hinge_reads);
+ } else if ((reads[i]->active) and (hinges_vec[i][j].active) and (hinges_vec[i][j].type == -1)) {
+ printf("read %d hinge %d type %d pos %d ", i, j, -1, hinges_vec[i][j].pos);
+ num_near_hinge_reads = 0;
+ for (int k = 0; k < matches_backward[i].size(); k ++ ) {
+ if ((matches_backward[i][k]->match_type_ == BACKWARD) and
+ (reads[matches_backward[i][k]->read_B_id_]->active)
+ and (abs(matches_backward[i][k]->eff_read_A_match_end_ - hinges_vec[i][j].pos ) < 300))
+ num_near_hinge_reads ++;
+ }
+ printf("num %d\n", num_near_hinge_reads);
+ }
+ //if (num_near_hinge_reads != HINGE_READS) hinges_vec[i][j].active = false;
+ }
+ }*/
+
+
+
+
+ // TODO: Technically we dont need this filtering, as we can use the hinge graph
+ // construction to do the filtering as well
+
+
+
+ for (int i = 0; i < n_read; i++) {
+ //This is in essence the filtering step
+ //For each read find the best forward match, and remove all incoming hinges starting after the start
+ //of the match corresponding to this.
+ //Update 2/19: Now, we remove any in-hinge (out-hinge) if there is a FORWARD or FORWARD_INTERNAL match
+ // (BACKWARD or BACKWARD_INTERNAL) that starts on or before (after) the hinge. 40 is error margin.
+
+ if (reads[i]->active) {
+ int forward = 0;
+ int backward = 0;
+ for (int j = 0; j < matches_forward[i].size(); j++) {
+ if (matches_forward[i][j]->active) {
+ if (((matches_forward[i][j]->match_type_ == FORWARD) or
+ (matches_forward[i][j]->match_type_ == FORWARD_INTERNAL)) and
+ (reads[matches_forward[i][j]->read_B_id_]->active)) {
+
+ for (int k = 0; k < hinges_vec[i].size(); k++) {
+ if ((((matches_forward[i][j]->eff_read_A_match_start_ <
+ hinges_vec[i][k].pos + KILL_HINGE_INTERNAL_ALLOWANCE) and
+ (matches_forward[i][j]->match_type_ == FORWARD_INTERNAL))
+ or ((matches_forward[i][j]->eff_read_A_match_start_ <
+ hinges_vec[i][k].pos - KILL_HINGE_OVERLAP_ALLOWANCE) and
+ (matches_forward[i][j]->match_type_ == FORWARD)))
+ and (hinges_vec[i][k].type == 1)) {
+ hinges_vec[i][k].active = false;
+
+ }
+ }
+ //}
+ //forward++;
+ }
+ }
+ }
+
+ for (int j = 0; j < matches_backward[i].size(); j++) {
+ if (matches_backward[i][j]->active) {
+ if (((matches_backward[i][j]->match_type_ == BACKWARD) or
+ (matches_backward[i][j]->match_type_ == BACKWARD_INTERNAL)) and
+ (reads[matches_backward[i][j]->read_B_id_]->active)) {
+ // if (backward < 1) {
+ //remove certain hinges
+ for (int k = 0; k < hinges_vec[i].size(); k++) {
+ if ((((matches_backward[i][j]->eff_read_A_match_end_ >
+ hinges_vec[i][k].pos - KILL_HINGE_INTERNAL_ALLOWANCE) and
+ (matches_backward[i][j]->match_type_ == BACKWARD_INTERNAL)) or
+ ((matches_backward[i][j]->eff_read_A_match_end_ >
+ hinges_vec[i][k].pos + KILL_HINGE_OVERLAP_ALLOWANCE) and
+ (matches_backward[i][j]->match_type_ == BACKWARD)))
+ and (hinges_vec[i][k].type == -1)) {
+ hinges_vec[i][k].active = false;
+
+ }
+ }
+ //}
+ //backward++;
+ }
+ }
+ }
+ }
+ }
+
+
+
+ console->info("Building hinge graph");
+
+ //ogdf::Graph hinge_graph;
+ //ogdf::HashArray<int, ogdf::node> hinge_graph_node_list;
+
+ int num_hinges(0);
+ for (int i = 0; i < n_read; i++) {
+ num_hinges+=hinges_vec[i].size();
+ }
+
+ //ogdf::Graph hinge_graph;
+ //ogdf::HashArray<int, ogdf::node> hinge_graph_node_list;
+ console->info("num hinges {}", num_hinges);
+ Graph hinge_graph (num_hinges);
+ int hg(0);
+ std::map< std::pair <int, int>, int> hinge_graph_node_map;
+ std::map<int, std:: pair<int,int> > hinge_graph_node_map_rev;
+
+ for (int i=0; i< hinges_vec.size(); i++){
+ for(int j=0; j < hinges_vec[i].size(); j++){
+ hinge_graph_node_map[std::make_pair(i,j)]=hg;
+ hinge_graph_node_map_rev[hg]= std::make_pair(i,j);
+ hg++;
+ }
+ }
+
+ // Hinge graph construction
+ // En passant, we identify the new_killed_hinges
+
+ FILE *out_hgraph;
+ out_hgraph = fopen((std::string(out_name) + ".hgraph").c_str(), "w");
+
+ FILE *out_debug;
+ out_debug = fopen((std::string(out_name) + ".debug").c_str(), "w");
+
+ FILE * OverlapDebugFile;
+ OverlapDebugFile = fopen("overlap_debug.txt", "w");
+
+ int pos_B;
+
+ for (int i = 0; i < n_read; i++) {
+
+ if (reads[i]->active) {
+
+ for (int k = 0; k < hinges_vec[i].size(); k++) {
+
+ for (int j = 0; j < matches_forward[i].size(); j++) {
+ if (matches_forward[i][j]->active) {
+ if (((matches_forward[i][j]->match_type_ == FORWARD) or
+ (matches_forward[i][j]->match_type_ == FORWARD_INTERNAL)) and
+ (reads[matches_forward[i][j]->read_B_id_]->active)) {
+
+
+ // Here we check whether read B has a hinge matching hinges_vec[i][k]
+
+ // Should we also check whether hinges are active?
+
+ pos_B = matches_forward[i][j]->GetMatchingPosition(hinges_vec[i][k].pos);
+
+// console->info("Matching position is {}", pos_B); // for debugging
+ int req_hinge_type;
+
+ int rev_int = 0;
+
+ if (matches_forward[i][j]->reverse_complement_match_ == true) {
+ req_hinge_type = -1 * hinges_vec[i][k].type;
+ rev_int = 1;
+ }
+ else {
+ req_hinge_type = hinges_vec[i][k].type;
+ }
+// std::cout << req_hinge_type << std::endl;
+
+
+ int b_id = matches_forward[i][j]->read_B_id_;
+
+
+ for (int l = 0; l < hinges_vec[b_id].size(); l++) {
+
+ if ((hinges_vec[b_id][l].pos < pos_B + MATCHING_HINGE_SLACK) and
+ (hinges_vec[b_id][l].pos > pos_B - MATCHING_HINGE_SLACK)) {
+
+ // found a matching hinge
+
+ if (req_hinge_type == hinges_vec[b_id][l].type) {
+
+
+ std::pair <int,int> first_coord, second_coord;
+
+ first_coord=std::make_pair(i,k);
+ second_coord=std::make_pair(b_id,l);
+
+
+ if (hinges_vec[i][k].type == 1) {
+
+ add_edge(hinge_graph_node_map[first_coord], hinge_graph_node_map[second_coord], hinge_graph);
+ fprintf(out_hgraph, "%d %d %d %d %d %d\n",
+ i,
+ b_id,
+ hinges_vec[i][k].pos,
+ hinges_vec[b_id][l].pos, 1,
+ rev_int);
+
+ }
+ else {
+
+ add_edge(hinge_graph_node_map[second_coord], hinge_graph_node_map[first_coord], hinge_graph);
+ fprintf(out_hgraph, "%d %d %d %d %d %d\n",
+ b_id,
+ i,
+ hinges_vec[b_id][l].pos,
+ hinges_vec[i][k].pos, 1,
+ rev_int);
+ }
+ }
+ }
+
+ }
+
+ for (int l = 0; l < killed_hinges_vec[b_id].size(); l++) {
+// std::cout << i <<"\t" << b_id <<"\t" << k << "\t" << l <<std::endl;
+
+ if ((killed_hinges_vec[b_id][l].pos < pos_B + MATCHING_HINGE_SLACK) and
+ (killed_hinges_vec[b_id][l].pos > pos_B - MATCHING_HINGE_SLACK)) {
+
+ // found a matching hinge
+ if (req_hinge_type == killed_hinges_vec[b_id][l].type) {
+
+ if (hinges_vec[i][k].type == 1) {
+
+ fprintf(out_hgraph, "%d %d %d %d %d %d\n",
+ i,
+ b_id,
+ hinges_vec[i][k].pos,
+ killed_hinges_vec[b_id][l].pos, 0,
+ rev_int);
+ }
+ else {
+
+ fprintf(out_hgraph, "%d %d %d %d %d %d\n",
+ b_id,
+ i,
+ killed_hinges_vec[b_id][l].pos,
+ hinges_vec[i][k].pos, 0,
+ rev_int);
+ }
+
+ if (matches_forward[i][j]->match_type_ == FORWARD) {
+
+ new_killed_hinges_vec[i].push_back(Hinge(hinges_vec[i][k].pos,hinges_vec[i][k].type,false));
+
+ if (hinges_vec[i][k].type == -1) {
+ //console->info("This should not have happened.");
+ // If this is a -1 hinge, read i should also bridge the repeat,
+ // and hinges_vec[i][k] would have been killed in filter
+
+ fprintf(out_debug,"%d %d %d %d %d [%d %d] [%d %d] [%d %d] [%d %d] \n",
+ matches_forward[i][j]->read_A_id_, matches_forward[i][j]->read_B_id_,
+ matches_forward[i][j]->weight, matches_forward[i][j]->reverse_complement_match_,
+ matches_forward[i][j]->match_type_, matches_forward[i][j]->eff_read_A_match_start_,
+ matches_forward[i][j]->eff_read_A_match_end_,
+ matches_forward[i][j]->eff_read_B_match_start_,
+ matches_forward[i][j]->eff_read_B_match_end_,
+ matches_forward[i][j]->eff_read_A_start_, matches_forward[i][j]->eff_read_A_end_,
+ matches_forward[i][j]->eff_read_B_start_, matches_forward[i][j]->eff_read_B_end_);
+
+ fprintf(out_debug, "%d %d %d %d\n", hinges_vec[i][k].pos,
+ hinges_vec[i][k].type,
+ killed_hinges_vec[b_id][l].pos,
+ killed_hinges_vec[b_id][l].type);
+
+ }
+
+ }
+
+
+ }
+
+
+ }
+
+
+ }
+
+
+ }
+
+ }
+ }
+
+
+ for (int j = 0; j < matches_backward[i].size(); j++) {
+ if (matches_backward[i][j]->active) {
+ if (((matches_backward[i][j]->match_type_ == BACKWARD) or
+ (matches_backward[i][j]->match_type_ == BACKWARD_INTERNAL)) and
+ (reads[matches_backward[i][j]->read_B_id_]->active)) {
+
+ // Need to check whether read B has a hinge matching hinges_vec[i][k]
+
+
+ pos_B = matches_backward[i][j]->GetMatchingPosition(hinges_vec[i][k].pos);
+
+// console->info("Matching position is {}", pos_B); // for debugging
+
+ int req_hinge_type;
+
+ int rev_int = 0;
+
+ if (matches_backward[i][j]->reverse_complement_match_ == true) {
+ req_hinge_type = -1 * hinges_vec[i][k].type;
+ rev_int = 1;
+ }
+ else {
+ req_hinge_type = hinges_vec[i][k].type;
+ }
+// std::cout << req_hinge_type << std::endl;
+
+ int b_id = matches_backward[i][j]->read_B_id_;
+ for (int l = 0; l < hinges_vec[b_id].size(); l++) {
+
+ if ((hinges_vec[b_id][l].pos < pos_B + MATCHING_HINGE_SLACK) and
+ (hinges_vec[b_id][l].pos > pos_B - MATCHING_HINGE_SLACK)) {
+
+
+ // found a matching hinge
+ std::pair <int,int> first_coord, second_coord;
+ first_coord=std::make_pair(i,k);
+ second_coord=std::make_pair(b_id,l);
+
+
+ if (req_hinge_type == hinges_vec[b_id][l].type) {
+
+
+ if (hinges_vec[i][k].type == -1) {
+
+ add_edge(hinge_graph_node_map[first_coord], hinge_graph_node_map[second_coord], hinge_graph);
+ fprintf(out_hgraph, "%d %d %d %d %d %d\n",
+ i,
+ b_id,
+ hinges_vec[i][k].pos,
+ hinges_vec[b_id][l].pos, 1,
+ rev_int);
+
+ }
+ else {
+
+ add_edge(hinge_graph_node_map[second_coord], hinge_graph_node_map[first_coord], hinge_graph);
+ fprintf(out_hgraph, "%d %d %d %d %d %d\n",
+ b_id,
+ i,
+ hinges_vec[b_id][l].pos,
+ hinges_vec[i][k].pos, 1,
+ rev_int);
+ }
+
+
+ }
+ }
+
+ }
+ for (int l = 0; l < killed_hinges_vec[b_id].size(); l++) {
+
+ if ((killed_hinges_vec[b_id][l].pos < pos_B + MATCHING_HINGE_SLACK) and
+ (killed_hinges_vec[b_id][l].pos > pos_B - MATCHING_HINGE_SLACK)) {
+
+ // found a matching hinge
+
+ if (req_hinge_type == killed_hinges_vec[b_id][l].type) {
+
+ if (hinges_vec[i][k].type == -1) {
+
+ fprintf(out_hgraph, "%d %d %d %d %d %d\n",
+ i,
+ b_id,
+ hinges_vec[i][k].pos,
+ killed_hinges_vec[b_id][l].pos, 0,
+ rev_int);
+
+ }
+ else {
+
+ fprintf(out_hgraph, "%d %d %d %d %d %d\n",
+ b_id,
+ i,
+ killed_hinges_vec[b_id][l].pos,
+ hinges_vec[i][k].pos, 0,
+ rev_int);
+ }
+
+ }
+
+ if (matches_backward[i][j]->match_type_ == BACKWARD) {
+
+ new_killed_hinges_vec[i].push_back(Hinge(hinges_vec[i][k].pos,hinges_vec[i][k].type,false));
+
+ if (hinges_vec[i][k].type != -1) {
+ //console->info("This should not have happened 2.");
+ // If this is a +1 hinge, read i should also bridge the repeat,
+ // and hinges_vec[i][k] would have been killed in filter
+ }
+
+ }
+
+
+ }
+
+ }
+
+
+ }
+ }
+ }
+
+ }
+ }
+ }
+
+
+ console->info("Hinge graph built");
+ std::vector<int> component(num_vertices(hinge_graph));
+ int num = connected_components(hinge_graph, &component[0]);
+
+ std::vector<int>::size_type i;
+ std::cout << "Total number of components: " << num << std::endl;
+
+ std::map<int,int> component_size;
+ for (i = 0; i != component.size(); ++i){ // are we skipping i=0?
+ if ( component_size.find(component[i]) == component_size.end() ){
+ component_size[component[i]]=1;
+ }
+ else
+ component_size[component[i]]+=1;
+ }
+// std::unordered_map<int, std::vector<Hinge> > filtered_hinges_vec;
+// for (int i = 0; i < n_read; i++) {
+// filtered_hinges_vec[i] = std::vector<Hinge>();
+// }
+
+
+
+
+ for (int i = 0; i != component.size(); ++i) {
+ if (component_size[component[i]] < MIN_CONNECTED_COMPONENT_SIZE) {
+ int ind1, ind2;
+ ind1 = hinge_graph_node_map_rev[i].first;
+ ind2 = hinge_graph_node_map_rev[i].second;
+ hinges_vec[ind1][ind2].active=false;
+// filtered_hinges_vec[ind1].push_back(hinges_vec[ind1][ind2]);
+ }
+
+ }
+
+
+// std::map< std::pair <int, int>, int> hinge_graph_node_map;
+
+
+ std::map<int, std::pair <int, int>> component_sink;
+ for (i = 0; i != component.size(); ++i){
+ int ind1, ind2;
+ ind1 = hinge_graph_node_map_rev[i].first;
+ ind2 = hinge_graph_node_map_rev[i].second;
+
+ // for now let us just pick an arbitrary active hinge as the component main sink
+ if ( hinges_vec[ind1][ind2].active == true )
+ component_sink[component[i]]= std::make_pair(ind1,ind2);
+
+ }
+
+ n = 0;
+ FILE *out_hglist;
+ out_hglist = fopen((std::string(out_name) + ".hinge.list").c_str(), "w");
+ for (int i = 0; i < n_read; i++) {
+ for (int j = 0; j < hinges_vec[i].size(); j++) {
+ if ((reads[i]->active) and ((hinges_vec[i][j].active))) {
+ fprintf(out_hglist, "%d %d %d\n", i, marked_hinges[i][j].first, marked_hinges[i][j].second);
+ n++;
+ }
+ }
+ }
+ fclose(out_hglist);
+ console->info("after filter {} active hinges", n);
+
+
+ // filter hinges
+ std::vector<bool> repeat_status_front;
+ std::vector<bool> repeat_status_back;
+
+ for (int i = 0; i < n_read; i++) {
+ bool in = false;
+ bool out = false;
+ for (int j = 0; j < hinges_vec[i].size(); j++) {
+ if ((hinges_vec[i][j].active) and (hinges_vec[i][j].type == 1)) in = true;
+ if ((hinges_vec[i][j].active) and (hinges_vec[i][j].type == -1)) out = true;
+ }
+ repeat_status_front.push_back(out);
+ repeat_status_back.push_back(in);
+ }
+
+ //Perform greedy graph construction and write outputs out and out2
+ for (int i = 0; i < n_read; i++) {
+ if (reads[i]->active) {
+ int forward = 0;
+ int backward = 0;
+ for (int j = 0; j < matches_forward[i].size(); j++) {
+ if (matches_forward[i][j]->active) {
+ if ((matches_forward[i][j]->match_type_ == FORWARD) and
+ (reads[matches_forward[i][j]->read_B_id_]->active)) {
+ /*if (not repeat_status_back[i])*/
+ {
+ if (forward < 1) {
+
+ PrintOverlapToFile(out_greedy, matches_forward[i][j]);
+
+ if (matches_forward[i][j]->reverse_complement_match_ == 0)
+ fprintf(out_g1, "%d %d %d [%d %d] [%d %d] [%d %d] [%d %d]\n",
+ matches_forward[i][j]->read_A_id_,
+ matches_forward[i][j]->read_B_id_, matches_forward[i][j]->weight,
+ matches_forward[i][j]->eff_read_A_match_start_,
+ matches_forward[i][j]->eff_read_A_match_end_,
+ matches_forward[i][j]->eff_read_B_match_start_,
+ matches_forward[i][j]->eff_read_B_match_end_,
+ matches_forward[i][j]->eff_read_A_start_,
+ matches_forward[i][j]->eff_read_A_end_,
+ matches_forward[i][j]->eff_read_B_start_,
+ matches_forward[i][j]->eff_read_B_end_);
+ else
+ fprintf(out_g1, "%d %d' %d [%d %d] [%d %d] [%d %d] [%d %d]\n",
+ matches_forward[i][j]->read_A_id_,
+ matches_forward[i][j]->read_B_id_, matches_forward[i][j]->weight,
+ matches_forward[i][j]->eff_read_A_match_start_,
+ matches_forward[i][j]->eff_read_A_match_end_,
+ matches_forward[i][j]->eff_read_B_match_start_,
+ matches_forward[i][j]->eff_read_B_match_end_,
+ matches_forward[i][j]->eff_read_A_start_,
+ matches_forward[i][j]->eff_read_A_end_,
+ matches_forward[i][j]->eff_read_B_start_,
+ matches_forward[i][j]->eff_read_B_end_);
+
+ if (matches_forward[i][j]->reverse_complement_match_ == 0)
+ fprintf(out_g2, "%d' %d' %d [%d %d] [%d %d] [%d %d] [%d %d]\n",
+ matches_forward[i][j]->read_B_id_,
+ matches_forward[i][j]->read_A_id_, matches_forward[i][j]->weight,
+ matches_forward[i][j]->eff_read_A_match_start_,
+ matches_forward[i][j]->eff_read_A_match_end_,
+ matches_forward[i][j]->eff_read_B_match_start_,
+ matches_forward[i][j]->eff_read_B_match_end_,
+ matches_forward[i][j]->eff_read_A_start_,
+ matches_forward[i][j]->eff_read_A_end_,
+ matches_forward[i][j]->eff_read_B_start_,
+ matches_forward[i][j]->eff_read_B_end_);
+ else
+ fprintf(out_g2, "%d %d' %d [%d %d] [%d %d] [%d %d] [%d %d]\n",
+ matches_forward[i][j]->read_B_id_,
+ matches_forward[i][j]->read_A_id_, matches_forward[i][j]->weight,
+ matches_forward[i][j]->eff_read_A_match_start_,
+ matches_forward[i][j]->eff_read_A_match_end_,
+ matches_forward[i][j]->eff_read_B_match_start_,
+ matches_forward[i][j]->eff_read_B_match_end_,
+ matches_forward[i][j]->eff_read_A_start_,
+ matches_forward[i][j]->eff_read_A_end_,
+ matches_forward[i][j]->eff_read_B_start_,
+ matches_forward[i][j]->eff_read_B_end_);
+
+ }
+ }
+ forward++;
+ }
+ }
+ }
+ for (int j = 0; j < matches_backward[i].size(); j++) {
+ if (matches_backward[i][j]->active) {
+ if ((matches_backward[i][j]->match_type_ == BACKWARD) and
+ (reads[matches_backward[i][j]->read_B_id_]->active)) {
+ /*if (not repeat_status_back[i])*/
+ {
+ if (backward < 1) {
+
+ PrintOverlapToFile(out_greedy, matches_backward[i][j]);
+
+ if (matches_backward[i][j]->reverse_complement_match_ == 0)
+ fprintf(out_g1, "%d %d %d [%d %d] [%d %d] [%d %d] [%d %d]\n",
+ matches_backward[i][j]->read_A_id_,
+ matches_backward[i][j]->read_B_id_, matches_backward[i][j]->weight,
+ matches_backward[i][j]->eff_read_A_match_start_,
+ matches_backward[i][j]->eff_read_A_match_end_,
+ matches_backward[i][j]->eff_read_B_match_start_,
+ matches_backward[i][j]->eff_read_B_match_end_,
+ matches_backward[i][j]->eff_read_A_start_,
+ matches_backward[i][j]->eff_read_A_end_,
+ matches_backward[i][j]->eff_read_B_start_,
+ matches_backward[i][j]->eff_read_B_end_);
+ else
+ fprintf(out_g1, "%d %d' %d [%d %d] [%d %d] [%d %d] [%d %d]\n",
+ matches_backward[i][j]->read_A_id_,
+ matches_backward[i][j]->read_B_id_, matches_backward[i][j]->weight,
+ matches_backward[i][j]->eff_read_A_match_start_,
+ matches_backward[i][j]->eff_read_A_match_end_,
+ matches_backward[i][j]->eff_read_B_match_start_,
+ matches_backward[i][j]->eff_read_B_match_end_,
+ matches_backward[i][j]->eff_read_A_start_,
+ matches_backward[i][j]->eff_read_A_end_,
+ matches_backward[i][j]->eff_read_B_start_,
+ matches_backward[i][j]->eff_read_B_end_);
+
+ if (matches_backward[i][j]->reverse_complement_match_ == 0)
+ fprintf(out_g2, "%d' %d' %d [%d %d] [%d %d] [%d %d] [%d %d]\n",
+ matches_backward[i][j]->read_B_id_,
+ matches_backward[i][j]->read_A_id_, matches_backward[i][j]->weight,
+ matches_backward[i][j]->eff_read_A_match_start_,
+ matches_backward[i][j]->eff_read_A_match_end_,
+ matches_backward[i][j]->eff_read_B_match_start_,
+ matches_backward[i][j]->eff_read_B_match_end_,
+ matches_backward[i][j]->eff_read_A_start_,
+ matches_backward[i][j]->eff_read_A_end_,
+ matches_backward[i][j]->eff_read_B_start_,
+ matches_backward[i][j]->eff_read_B_end_);
+ else
+ fprintf(out_g2, "%d %d' %d [%d %d] [%d %d] [%d %d] [%d %d]\n",
+ matches_backward[i][j]->read_B_id_,
+ matches_backward[i][j]->read_A_id_, matches_backward[i][j]->weight,
+ matches_backward[i][j]->eff_read_A_match_start_,
+ matches_backward[i][j]->eff_read_A_match_end_,
+ matches_backward[i][j]->eff_read_B_match_start_,
+ matches_backward[i][j]->eff_read_B_match_end_,
+ matches_backward[i][j]->eff_read_A_start_,
+ matches_backward[i][j]->eff_read_A_end_,
+ matches_backward[i][j]->eff_read_B_start_,
+ matches_backward[i][j]->eff_read_B_end_);
+ }
+ }
+ backward++;
+ }
+ }
+ }
+ }
+ }
+
+ num_overlaps = 0;
+ num_forward_overlaps=0;
+ num_forward_internal_overlaps=0;
+ num_reverse_overlaps=0;
+ num_reverse_internal_overlaps=0;
+ rev_complemented_matches=0;
+ for (int i = 0; i < n_read; i++) {
+ if (reads[i]->active) {
+ for (int j = 0; j < matches_forward[i].size(); j++) {
+ if (reads[matches_forward[i][j]->read_B_id_]->active) {
+ num_overlaps++;
+ if (matches_forward[i][j]->match_type_==FORWARD)
+ num_forward_overlaps++;
+ else if (matches_forward[i][j]->match_type_==FORWARD_INTERNAL)
+ num_forward_internal_overlaps++;
+ if (matches_forward[i][j]->reverse_complement_match_==1)
+ rev_complemented_matches++;
+ }
+ }
+
+ for (int j = 0; j < matches_backward[i].size(); j++) {
+ if (reads[matches_backward[i][j]->read_B_id_]->active) {
+ num_overlaps++;
+ if (matches_backward[i][j]->match_type_==BACKWARD)
+ num_reverse_overlaps++;
+ else if (matches_backward[i][j]->match_type_==BACKWARD_INTERNAL)
+ num_reverse_internal_overlaps++;
+ if (matches_backward[i][j]->reverse_complement_match_==1)
+ rev_complemented_matches++;
+ }
+ }
+ }
+ }
+
+ /*std::cout<<num_overlaps << " overlaps " << num_forward_overlaps << " fwd overlaps "
+ << num_forward_internal_overlaps << " fwd internal overlaps "<< num_reverse_overlaps
+ << " backward overlaps " << num_reverse_internal_overlaps
+ << " backward internal overlaps "<< rev_complemented_matches << " reverse complement overlaps" << std::endl;
+ */
+
+
+ std::ofstream debug_fle("hinge_debug.txt");
+
+ console->info("Starting to build assembly graph.");
+
+
+// int eff_b_id;
+ int hinge_pos = -1;
+
+ for (int i = 0; i < n_read; i++) {
+ if (reads[i]->active) {
+
+ int forward = 0;
+ int forward_internal = 0;
+ int backward = 0;
+ int backward_internal = 0;
+
+
+ LOverlap * chosen_match = NULL;
+
+
+ for (int j = 0; j < matches_forward[i].size(); j++){
+
+
+ if (matches_forward[i][j]->active) {
+
+
+ if ((reads[matches_forward[i][j]->read_B_id_]->active)) { // and (forward == 0)) {
+ //printf("hinge size %d\n", hinges_vec[matches_forward[i][j]->read_B_id_].size());
+
+
+ if ((matches_forward[i][j]->match_type_ == FORWARD) and (forward == 0)) {
+
+ // check if read j has new_killed_hinge
+ //TODO: should this be checked for FORWARD_INTERNAL as well?
+
+ bool poisoned = false;
+
+ for (int k = 0; k < new_killed_hinges_vec[i].size(); k++) {
+
+ if ( (matches_forward[i][j]->reverse_complement_match_ != 1) and
+ (new_killed_hinges_vec[i][k].type == -1) and
+ (new_killed_hinges_vec[i][k].pos > matches_forward[i][j]->eff_read_B_match_end_) ) {
+
+ //TODO: do we need a tolerance in the comparison above?
+
+ PrintOverlapToFile(out_skipped, matches_forward[i][j]);
+ poisoned = true;
+
+ }
+ else if ( (matches_forward[i][j]->reverse_complement_match_ == 1) and
+ (new_killed_hinges_vec[i][k].type == 1) and
+ (new_killed_hinges_vec[i][k].pos < matches_forward[i][j]->eff_read_B_match_start_) ) {
+
+
+ PrintOverlapToFile(out_skipped, matches_forward[i][j]);
+ poisoned = true;
+
+ }
+
+ }
+
+ if (not poisoned) {
+ chosen_match = matches_forward[i][j];
+ hinge_pos = -1;
+ forward = 1;
+ //break;
+ }
+
+ }
+ else if ((matches_forward[i][j]->match_type_ == FORWARD_INTERNAL)
+ //and isValidHinge(matches_forward[i][j], hinges_vec[matches_forward[i][j]->read_B_id_])
+ and (hinges_vec[matches_forward[i][j]->read_B_id_].size() > 0)
+ and (forward_internal == 0)){
+
+ // In the case of a forward_internal match we check whether
+ // the hinge on read B is an in-hinge
+ // (or an out-hinge if it's a reverse complement match)
+
+// int hinge_index = 0;
+
+ int read_B_match_start = matches_forward[i][j]->read_B_match_start_;
+ if (matches_forward[i][j]->reverse_complement_match_ == 1) {
+ read_B_match_start = matches_forward[i][j]->read_B_match_end_;
+ }
+
+ for (int k = 0; k < hinges_vec[matches_forward[i][j]->read_B_id_].size(); k++) {
+ if ( (read_B_match_start >
+ hinges_vec[matches_forward[i][j]->read_B_id_][k].pos - HINGE_TOLERANCE)
+ and (read_B_match_start <
+ hinges_vec[matches_forward[i][j]->read_B_id_][k].pos + HINGE_TOLERANCE)
+ and (hinges_vec[matches_forward[i][j]->read_B_id_][k].type ==
+ (1-2*matches_forward[i][j]->reverse_complement_match_))
+ and (hinges_vec[matches_forward[i][j]->read_B_id_][k].active) ) {
+
+ if ((forward == 0) or
+ (matches_forward[i][j]->weight > chosen_match->weight - 2*HINGE_SLACK)) {
+
+ chosen_match = matches_forward[i][j];
+ forward = 1;
+ forward_internal = 1;
+
+ hinge_pos = hinges_vec[matches_forward[i][j]->read_B_id_][k].pos;
+
+
+
+
+ }
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (chosen_match != NULL) {
+ PrintOverlapToFile(out_hg,chosen_match);
+
+ edges_forward[i].push_back(chosen_match);
+
+ PrintOverlapToFile2(out_hg2,chosen_match,hinge_pos);
+
+ chosen_match = NULL;
+ }
+ else {
+
+ // Deadend debugging
+
+ // Forward dead-end
+ deadend_out << i;
+// deadend_out << "\t Active: " << reads[i]->active << std::endl;
+ deadend_out << "\t matches_forward size: " << matches_forward[i].size() << std::endl;
+
+ }
+
+ for (int j = 0; j < matches_backward[i].size(); j++){
+
+
+ if (matches_backward[i][j]->active) {
+
+ if ((reads[matches_backward[i][j]->read_B_id_]->active)) {
+
+
+ if ((matches_backward[i][j]->match_type_ == BACKWARD) and (backward == 0)){
+
+ // check if read j has new_killed_hinge
+
+ bool poisoned = false;
+
+ for (int k = 0; k < new_killed_hinges_vec[i].size(); k++) {
+
+ if ( (matches_backward[i][j]->reverse_complement_match_ != 1) and
+ (new_killed_hinges_vec[i][k].type == 1) and
+ (new_killed_hinges_vec[i][k].pos < matches_backward[i][j]->eff_read_B_match_start_) ) {
+
+ //TODO: do we need a tolerance in the comparison above?
+
+ PrintOverlapToFile(out_skipped, matches_backward[i][j]);
+ poisoned = true;
+
+ }
+ else if ( (matches_backward[i][j]->reverse_complement_match_ == 1) and
+ (new_killed_hinges_vec[i][k].type == -1) and
+ (new_killed_hinges_vec[i][k].pos > matches_backward[i][j]->eff_read_B_match_end_) ) {
+
+ PrintOverlapToFile(out_skipped, matches_backward[i][j]);
+ poisoned = true;
+
+ }
+
+ }
+
+ if (not poisoned) {
+ chosen_match = matches_backward[i][j];
+ backward = 1;
+ hinge_pos = -1;
+ }
+
+ }
+ else if ((matches_backward[i][j]->match_type_ == BACKWARD_INTERNAL)
+ and (hinges_vec[matches_backward[i][j]->read_B_id_].size() > 0)
+ and (backward_internal == 0)) {
+
+ // In the case of a backward_internal match
+ // we check whether the hinge on read B is an in-hinge
+ // (or an in-hinge if it's a reverse complement match)
+
+
+ int read_B_match_end = matches_backward[i][j]->read_B_match_end_;
+ if (matches_backward[i][j]->reverse_complement_match_ == 1) {
+ read_B_match_end = matches_backward[i][j]->read_B_match_start_;
+ }
+
+ for (int k = 0; k < hinges_vec[matches_backward[i][j]->read_B_id_].size(); k++) {
+
+
+ if ( (read_B_match_end >
+ hinges_vec[matches_backward[i][j]->read_B_id_][k].pos - HINGE_TOLERANCE)
+ and (read_B_match_end <
+ hinges_vec[matches_backward[i][j]->read_B_id_][k].pos + HINGE_TOLERANCE)
+ and (hinges_vec[matches_backward[i][j]->read_B_id_][k].type ==
+ (-1+2*matches_backward[i][j]->reverse_complement_match_))
+ and (hinges_vec[matches_backward[i][j]->read_B_id_][k].active) ) {
+
+ if ((backward == 0) or
+ (matches_backward[i][j]->weight > chosen_match->weight - 2*HINGE_SLACK)) {
+ chosen_match = matches_backward[i][j];
+ backward = 1;
+ backward_internal = 1;
+
+
+ hinge_pos = hinges_vec[matches_backward[i][j]->read_B_id_][k].pos;
+
+// int hinge_graph_id = hinge_graph_node_map[std::make_pair(matches_backward[i][j]->read_B_id_,k)];
+
+// eff_b_id = component_sink[component[hinge_graph_id]].first;
+
+ }
+
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (chosen_match != NULL) {
+ PrintOverlapToFile(out_hg,chosen_match);
+
+ edges_backward[i].push_back(chosen_match);
+
+ PrintOverlapToFile2(out_hg2,chosen_match,hinge_pos);
+
+ }
+ else {
+ // Deadend debugging
+
+ // Backward dead-end
+ deadend_out << i;
+// deadend_out << "\t Active: " << reads[i]->active << std::endl;
+ deadend_out << "\t matches_backward size: " << matches_backward[i].size() << std::endl;
+
+ }
+ }
+ }
+
+ console->info("sort and output finished");
+ console->info("version 0.0.3");
+
+ if (strlen(name_db) > 0)
+ la.closeDB(); //close database
+ return 0;
+}
diff --git a/src/lib/CMakeLists.txt b/src/lib/CMakeLists.txt
new file mode 100644
index 0000000..6d0298f
--- /dev/null
+++ b/src/lib/CMakeLists.txt
@@ -0,0 +1,23 @@
+cmake_minimum_required(VERSION 3.2)
+
+add_library(ini ini.c INIReader.cpp)
+add_library(DB DB.c QV.c)
+add_library(LA align.c)
+
+add_library(PAF paf.c)
+find_package( ZLIB REQUIRED )
+if ( ZLIB_FOUND )
+ include_directories( ${ZLIB_INCLUDE_DIRS} )
+ target_link_libraries( PAF ${ZLIB_LIBRARIES} )
+endif( ZLIB_FOUND )
+
+add_library(LAInterface LAInterface.cpp)
+target_link_libraries(LAInterface LA DB PAF)
+
+
+add_library(kmer_lookup kmer_lookup.c)
+add_library(DW_banded DW_banded.c)
+add_library(falcon falcon.c)
+target_link_libraries(falcon kmer_lookup DW_banded)
+
+
diff --git a/src/lib/DB.c b/src/lib/DB.c
new file mode 100755
index 0000000..616239a
--- /dev/null
+++ b/src/lib/DB.c
@@ -0,0 +1,1712 @@
+/************************************************************************************\
+* *
+* Copyright (c) 2014, Dr. Eugene W. Myers (EWM). All rights reserved. *
+* *
+* Redistribution and use in source and binary forms, with or without modification, *
+* are permitted provided that the following conditions are met: *
+* *
+* · Redistributions of source code must retain the above copyright notice, this *
+* list of conditions and the following disclaimer. *
+* *
+* · Redistributions in binary form must reproduce the above copyright notice, this *
+* list of conditions and the following disclaimer in the documentation and/or *
+* other materials provided with the distribution. *
+* *
+* · The name of EWM may not be used to endorse or promote products derived from *
+* this software without specific prior written permission. *
+* *
+* THIS SOFTWARE IS PROVIDED BY EWM ”AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, *
+* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND *
+* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL EWM BE LIABLE *
+* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES *
+* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS *
+* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY *
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *
+* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN *
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
+* *
+* For any issues regarding this software and its use, contact EWM at: *
+* *
+* Eugene W. Myers Jr. *
+* Bautzner Str. 122e *
+* 01099 Dresden *
+* GERMANY *
+* Email: gene.myers at gmail.com *
+* *
+\************************************************************************************/
+
+/*******************************************************************************************
+ *
+ * Compressed data base module. Auxiliary routines to open and manipulate a data base for
+ * which the sequence and read information are separated into two separate files, and the
+ * sequence is compressed into 2-bits for each base. Support for tracks of additional
+ * information, and trimming according to the current partition.
+ *
+ * Author : Gene Myers
+ * Date : July 2013
+ * Revised: April 2014
+ *
+ ********************************************************************************************/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <ctype.h>
+#include <unistd.h>
+#include <dirent.h>
+
+#include "DB.h"
+
+#ifdef HIDE_FILES
+#define PATHSEP "/."
+#else
+#define PATHSEP "/"
+#endif
+
+
+/*******************************************************************************************
+ *
+ * GENERAL UTILITIES
+ *
+ ********************************************************************************************/
+
+char *Prog_Name;
+
+#ifdef INTERACTIVE
+
+char Ebuffer[1000];
+
+#endif
+
+void *Malloc(int64 size, char *mesg)
+{ void *p;
+
+ if ((p = malloc(size)) == NULL)
+ { if (mesg == NULL)
+ EPRINTF(EPLACE,"%s: Out of memory\n",Prog_Name);
+ else
+ EPRINTF(EPLACE,"%s: Out of memory (%s)\n",Prog_Name,mesg);
+ }
+ return (p);
+}
+
+void *Realloc(void *p, int64 size, char *mesg)
+{ if ((p = realloc(p,size)) == NULL)
+ { if (mesg == NULL)
+ EPRINTF(EPLACE,"%s: Out of memory\n",Prog_Name);
+ else
+ EPRINTF(EPLACE,"%s: Out of memory (%s)\n",Prog_Name,mesg);
+ }
+ return (p);
+}
+
+char *Strdup(char *name, char *mesg)
+{ char *s;
+
+ if (name == NULL)
+ return (NULL);
+ if ((s = strdup(name)) == NULL)
+ { if (mesg == NULL)
+ EPRINTF(EPLACE,"%s: Out of memory\n",Prog_Name);
+ else
+ EPRINTF(EPLACE,"%s: Out of memory (%s)\n",Prog_Name,mesg);
+ }
+ return (s);
+}
+
+FILE *Fopen(char *name, char *mode)
+{ FILE *f;
+
+ if (name == NULL || mode == NULL)
+ return (NULL);
+ if ((f = fopen(name,mode)) == NULL)
+ EPRINTF(EPLACE,"%s: Cannot open %s for '%s'\n",Prog_Name,name,mode);
+ return (f);
+}
+
+char *PathTo(char *name)
+{ char *path, *find;
+
+ if (name == NULL)
+ return (NULL);
+ if ((find = rindex(name,'/')) != NULL)
+ { *find = '\0';
+ path = Strdup(name,"Extracting path from");
+ *find = '/';
+ }
+ else
+ path = Strdup(".","Allocating default path");
+ return (path);
+}
+
+char *Root(char *name, char *suffix)
+{ char *path, *find, *dot;
+ int epos;
+
+ if (name == NULL)
+ return (NULL);
+ find = rindex(name,'/');
+ if (find == NULL)
+ find = name;
+ else
+ find += 1;
+ if (suffix == NULL)
+ { dot = strchr(find,'.');
+ if (dot != NULL)
+ *dot = '\0';
+ path = Strdup(find,"Extracting root from");
+ if (dot != NULL)
+ *dot = '.';
+ }
+ else
+ { epos = strlen(find);
+ epos -= strlen(suffix);
+ if (epos > 0 && strcasecmp(find+epos,suffix) == 0)
+ { find[epos] = '\0';
+ path = Strdup(find,"Extracting root from");
+ find[epos] = suffix[0];
+ }
+ else
+ path = Strdup(find,"Allocating root");
+ }
+ return (path);
+}
+
+char *Catenate(char *path, char *sep, char *root, char *suffix)
+{ static char *cat = NULL;
+ static int max = -1;
+ int len;
+
+ if (path == NULL || root == NULL || sep == NULL || suffix == NULL)
+ return (NULL);
+ len = strlen(path);
+ len += strlen(sep);
+ len += strlen(root);
+ len += strlen(suffix);
+ if (len > max)
+ { max = ((int) (1.2*len)) + 100;
+ if ((cat = (char *) realloc(cat,max+1)) == NULL)
+ { EPRINTF(EPLACE,"%s: Out of memory (Making path name for %s)\n",Prog_Name,root);
+ return (NULL);
+ }
+ }
+ sprintf(cat,"%s%s%s%s",path,sep,root,suffix);
+ return (cat);
+}
+
+char *Numbered_Suffix(char *left, int num, char *right)
+{ static char *suffix = NULL;
+ static int max = -1;
+ int len;
+
+ if (left == NULL || right == NULL)
+ return (NULL);
+ len = strlen(left);
+ len += strlen(right) + 40;
+ if (len > max)
+ { max = ((int) (1.2*len)) + 100;
+ if ((suffix = (char *) realloc(suffix,max+1)) == NULL)
+ { EPRINTF(EPLACE,"%s: Out of memory (Making number suffix for %d)\n",Prog_Name,num);
+ return (NULL);
+ }
+ }
+ sprintf(suffix,"%s%d%s",left,num,right);
+ return (suffix);
+}
+
+
+#define COMMA ','
+
+// Print big integers with commas/periods for better readability
+
+void Print_Number(int64 num, int width, FILE *out)
+{ if (width == 0)
+ { if (num < 1000ll)
+ fprintf(out,"%lld",num);
+ else if (num < 1000000ll)
+ fprintf(out,"%lld%c%03lld",num/1000ll,COMMA,num%1000ll);
+ else if (num < 1000000000ll)
+ fprintf(out,"%lld%c%03lld%c%03lld",num/1000000ll,
+ COMMA,(num%1000000ll)/1000ll,COMMA,num%1000ll);
+ else
+ fprintf(out,"%lld%c%03lld%c%03lld%c%03lld",num/1000000000ll,
+ COMMA,(num%1000000000ll)/1000000ll,
+ COMMA,(num%1000000ll)/1000ll,COMMA,num%1000ll);
+ }
+ else
+ { if (num < 1000ll)
+ fprintf(out,"%*lld",width,num);
+ else if (num < 1000000ll)
+ { if (width <= 4)
+ fprintf(out,"%lld%c%03lld",num/1000ll,COMMA,num%1000ll);
+ else
+ fprintf(out,"%*lld%c%03lld",width-4,num/1000ll,COMMA,num%1000ll);
+ }
+ else if (num < 1000000000ll)
+ { if (width <= 8)
+ fprintf(out,"%lld%c%03lld%c%03lld",num/1000000ll,COMMA,(num%1000000ll)/1000ll,
+ COMMA,num%1000ll);
+ else
+ fprintf(out,"%*lld%c%03lld%c%03lld",width-8,num/1000000ll,COMMA,(num%1000000ll)/1000ll,
+ COMMA,num%1000ll);
+ }
+ else
+ { if (width <= 12)
+ fprintf(out,"%lld%c%03lld%c%03lld%c%03lld",num/1000000000ll,COMMA,
+ (num%1000000000ll)/1000000ll,COMMA,
+ (num%1000000ll)/1000ll,COMMA,num%1000ll);
+ else
+ fprintf(out,"%*lld%c%03lld%c%03lld%c%03lld",width-12,num/1000000000ll,COMMA,
+ (num%1000000000ll)/1000000ll,COMMA,
+ (num%1000000ll)/1000ll,COMMA,num%1000ll);
+ }
+ }
+}
+
+// Return the number of digits, base 10, of num
+
+int Number_Digits(int64 num)
+{ int digit;
+
+ digit = 0;
+ while (num >= 1)
+ { num /= 10;
+ digit += 1;
+ }
+ return (digit);
+}
+
+
+/*******************************************************************************************
+ *
+ * READ COMPRESSION/DECOMPRESSION UTILITIES
+ *
+ ********************************************************************************************/
+
+// Compress read into 2-bits per base (from [0-3] per byte representation
+
+void Compress_Read(int len, char *s)
+{ int i;
+ char c, d;
+ char *s0, *s1, *s2, *s3;
+
+ s0 = s;
+ s1 = s0+1;
+ s2 = s1+1;
+ s3 = s2+1;
+
+ c = s1[len];
+ d = s2[len];
+ s0[len] = s1[len] = s2[len] = 0;
+
+ for (i = 0; i < len; i += 4)
+ *s++ = (char ) ((s0[i] << 6) | (s1[i] << 4) | (s2[i] << 2) | s3[i]);
+
+ s1[len] = c;
+ s2[len] = d;
+}
+
+// Uncompress read form 2-bits per base into [0-3] per byte representation
+
+void Uncompress_Read(int len, char *s)
+{ int i, tlen, byte;
+ char *s0, *s1, *s2, *s3;
+ char *t;
+
+ s0 = s;
+ s1 = s0+1;
+ s2 = s1+1;
+ s3 = s2+1;
+
+ tlen = (len-1)/4;
+
+ t = s+tlen;
+ for (i = tlen*4; i >= 0; i -= 4)
+ { byte = *t--;
+ s0[i] = (char) ((byte >> 6) & 0x3);
+ s1[i] = (char) ((byte >> 4) & 0x3);
+ s2[i] = (char) ((byte >> 2) & 0x3);
+ s3[i] = (char) (byte & 0x3);
+ }
+ s[len] = 4;
+}
+
+// Convert read in [0-3] representation to ascii representation (end with '\n')
+
+void Lower_Read(char *s)
+{ static char letter[4] = { 'a', 'c', 'g', 't' };
+
+ for ( ; *s != 4; s++)
+ *s = letter[(int) *s];
+ *s = '\0';
+}
+
+void Upper_Read(char *s)
+{ static char letter[4] = { 'A', 'C', 'G', 'T' };
+
+ for ( ; *s != 4; s++)
+ *s = letter[(int) *s];
+ *s = '\0';
+}
+
+// Convert read in ascii representation to [0-3] representation (end with 4)
+
+void Number_Read(char *s)
+{ static char number[128] =
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 0, 0, 0, 2,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 3, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 0, 0, 0, 2,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 3, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ };
+
+ for ( ; *s != '\0'; s++)
+ *s = number[(int) *s];
+ *s = 4;
+}
+
+
+/*******************************************************************************************
+ *
+ * DB OPEN, TRIM & CLOSE ROUTINES
+ *
+ ********************************************************************************************/
+
+
+// Open the given database or dam, "path" into the supplied HITS_DB record "db". If the name has
+// a part # in it then just the part is opened. The index array is allocated (for all or
+// just the part) and read in.
+// Return status of routine:
+// -1: The DB could not be opened for a reason reported by the routine to EPLACE
+// 0: Open of DB proceeded without mishap
+// 1: Open of DAM proceeded without mishap
+
+int Open_DB(char* path, HITS_DB *db)
+{ HITS_DB dbcopy;
+ char *root, *pwd, *bptr, *fptr, *cat;
+ int nreads;
+ FILE *index, *dbvis;
+ int status, plen, isdam;
+ int part, cutoff, all;
+ int ufirst, tfirst, ulast, tlast;
+
+ status = -1;
+ dbcopy = *db;
+
+ plen = strlen(path);
+ if (strcmp(path+(plen-4),".dam") == 0)
+ root = Root(path,".dam");
+ else
+ root = Root(path,".db");
+ pwd = PathTo(path);
+
+ bptr = rindex(root,'.');
+ if (bptr != NULL && bptr[1] != '\0' && bptr[1] != '-')
+ { part = strtol(bptr+1,&fptr,10);
+ if (*fptr != '\0' || part == 0)
+ part = 0;
+ else
+ *bptr = '\0';
+ }
+ else
+ part = 0;
+
+ isdam = 0;
+ cat = Catenate(pwd,"/",root,".db");
+ if (cat == NULL)
+ return (-1);
+ if ((dbvis = fopen(cat,"r")) == NULL)
+ { cat = Catenate(pwd,"/",root,".dam");
+ if (cat == NULL)
+ return (-1);
+ if ((dbvis = fopen(cat,"r")) == NULL)
+ { EPRINTF(EPLACE,"%s: Could not open database %s\n",Prog_Name,path);
+ goto error;
+ }
+ isdam = 1;
+ }
+
+ if ((index = Fopen(Catenate(pwd,PATHSEP,root,".idx"),"r")) == NULL)
+ goto error1;
+ if (fread(db,sizeof(HITS_DB),1,index) != 1)
+ { EPRINTF(EPLACE,"%s: Index file (.idx) of %s is junk\n",Prog_Name,root);
+ goto error2;
+ }
+
+ { int p, nblocks, nfiles;
+ int64 size;
+ char fname[MAX_NAME], prolog[MAX_NAME];
+
+ nblocks = 0;
+ if (fscanf(dbvis,DB_NFILE,&nfiles) != 1)
+ { EPRINTF(EPLACE,"%s: Stub file (.db) of %s is junk\n",Prog_Name,root);
+ goto error2;
+ }
+ for (p = 0; p < nfiles; p++)
+ if (fscanf(dbvis,DB_FDATA,&tlast,fname,prolog) != 3)
+ { EPRINTF(EPLACE,"%s: Stub file (.db) of %s is junk\n",Prog_Name,root);
+ goto error2;
+ }
+ if (fscanf(dbvis,DB_NBLOCK,&nblocks) != 1)
+ if (part == 0)
+ { cutoff = 0;
+ all = 1;
+ }
+ else
+ { EPRINTF(EPLACE,"%s: DB %s has not yet been partitioned, cannot request a block !\n",
+ Prog_Name,root);
+ goto error2;
+ }
+ else
+ { if (fscanf(dbvis,DB_PARAMS,&size,&cutoff,&all) != 3)
+ { EPRINTF(EPLACE,"%s: Stub file (.db) of %s is junk\n",Prog_Name,root);
+ goto error2;
+ }
+ if (part > nblocks)
+ { EPRINTF(EPLACE,"%s: DB %s has only %d blocks\n",Prog_Name,root,nblocks);
+ goto error2;
+ }
+ }
+
+ if (part > 0)
+ { for (p = 1; p <= part; p++)
+ if (fscanf(dbvis,DB_BDATA,&ufirst,&tfirst) != 2)
+ { EPRINTF(EPLACE,"%s: Stub file (.db) of %s is junk\n",Prog_Name,root);
+ goto error2;
+ }
+ if (fscanf(dbvis,DB_BDATA,&ulast,&tlast) != 2)
+ { EPRINTF(EPLACE,"%s: Stub file (.db) of %s is junk\n",Prog_Name,root);
+ goto error2;
+ }
+ }
+ else
+ { ufirst = tfirst = 0;
+ ulast = db->ureads;
+ tlast = db->treads;
+ }
+ }
+
+ db->trimmed = 0;
+ db->tracks = NULL;
+ db->part = part;
+ db->cutoff = cutoff;
+ db->all = all;
+ db->ufirst = ufirst;
+ db->tfirst = tfirst;
+
+ nreads = ulast-ufirst;
+ if (part <= 0)
+ { db->reads = (HITS_READ *) Malloc(sizeof(HITS_READ)*(nreads+2),"Allocating Open_DB index");
+ if (db->reads == NULL)
+ goto error2;
+ db->reads += 1;
+ if (fread(db->reads,sizeof(HITS_READ),nreads,index) != (size_t) nreads)
+ { EPRINTF(EPLACE,"%s: Index file (.idx) of %s is junk\n",Prog_Name,root);
+ free(db->reads);
+ goto error2;
+ }
+ }
+ else
+ { HITS_READ *reads;
+ int i, r, maxlen;
+ int64 totlen;
+
+ reads = (HITS_READ *) Malloc(sizeof(HITS_READ)*(nreads+2),"Allocating Open_DB index");
+ if (reads == NULL)
+ goto error2;
+ reads += 1;
+
+ fseeko(index,sizeof(HITS_READ)*ufirst,SEEK_CUR);
+ if (fread(reads,sizeof(HITS_READ),nreads,index) != (size_t) nreads)
+ { EPRINTF(EPLACE,"%s: Index file (.idx) of %s is junk\n",Prog_Name,root);
+ free(reads);
+ goto error2;
+ }
+
+ totlen = 0;
+ maxlen = 0;
+ for (i = 0; i < nreads; i++)
+ { r = reads[i].rlen;
+ totlen += r;
+ if (r > maxlen)
+ maxlen = r;
+ }
+
+ db->maxlen = maxlen;
+ db->totlen = totlen;
+ db->reads = reads;
+ }
+
+ ((int *) (db->reads))[-1] = ulast - ufirst; // Kludge, need these for DB part
+ ((int *) (db->reads))[-2] = tlast - tfirst;
+
+ db->nreads = nreads;
+ db->path = Strdup(Catenate(pwd,PATHSEP,root,""),"Allocating Open_DB path");
+ if (db->path == NULL)
+ goto error2;
+ db->bases = NULL;
+ db->loaded = 0;
+
+ status = isdam;
+
+error2:
+ fclose(index);
+error1:
+ fclose(dbvis);
+error:
+ if (bptr != NULL)
+ *bptr = '.';
+
+ free(pwd);
+ free(root);
+
+ if (status < 0)
+ *db = dbcopy;
+
+ return (status);
+}
+
+
+// Trim the DB or part thereof and all loaded tracks according to the cuttof and all settings
+// of the current DB partition. Reallocate smaller memory blocks for the information kept
+// for the retained reads.
+
+void Trim_DB(HITS_DB *db)
+{ int i, j, r;
+ int allflag, cutoff;
+ int64 totlen;
+ int maxlen, nreads;
+ HITS_TRACK *record;
+ HITS_READ *reads;
+
+ if (db->trimmed) return;
+
+ if (db->cutoff <= 0 && db->all) return;
+
+ cutoff = db->cutoff;
+ if (db->all)
+ allflag = 0;
+ else
+ allflag = DB_BEST;
+
+ reads = db->reads;
+ nreads = db->nreads;
+
+ for (record = db->tracks; record != NULL; record = record->next)
+ if (strcmp(record->name,". at qvs") == 0)
+ { uint16 *table = ((HITS_QV *) record)->table;
+
+ j = 0;
+ for (i = 0; i < db->nreads; i++)
+ if ((reads[i].flags & DB_BEST) >= allflag && reads[i].rlen >= cutoff)
+ table[j++] = table[i];
+ }
+ else
+ { int *anno4, size;
+ int64 *anno8;
+ char *anno, *data;
+
+ size = record->size;
+ data = (char *) record->data;
+ if (data == NULL)
+ { anno = (char *) record->anno;
+ j = 0;
+ for (i = r = 0; i < db->nreads; i++, r += size)
+ if ((reads[i].flags & DB_BEST) >= allflag && reads[i].rlen >= cutoff)
+ { memmove(anno+j,anno+r,size);
+ j += size;
+ }
+ memmove(anno+j,anno+r,size);
+ }
+ else if (size == 4)
+ { int ai;
+
+ anno4 = (int *) (record->anno);
+ j = anno4[0] = 0;
+ for (i = 0; i < db->nreads; i++)
+ if ((reads[i].flags & DB_BEST) >= allflag && reads[i].rlen >= cutoff)
+ { ai = anno4[i];
+ anno4[j+1] = anno4[j] + (anno4[i+1]-ai);
+ memmove(data+anno4[j],data+ai,anno4[i+1]-ai);
+ j += 1;
+ }
+ record->data = Realloc(record->data,anno4[j],NULL);
+ }
+ else // size == 8
+ { int64 ai;
+
+ anno8 = (int64 *) (record->anno);
+ j = anno8[0] = 0;
+ for (i = 0; i < db->nreads; i++)
+ if ((reads[i].flags & DB_BEST) >= allflag && reads[i].rlen >= cutoff)
+ { ai = anno8[i];
+ anno8[j+1] = anno8[j] + (anno8[i+1]-ai);
+ memmove(data+anno8[j],data+ai,anno8[i+1]-ai);
+ j += 1;
+ }
+ record->data = Realloc(record->data,anno8[j],NULL);
+ }
+ record->anno = Realloc(record->anno,record->size*(j+1),NULL);
+ }
+
+ totlen = maxlen = 0;
+ for (j = i = 0; i < nreads; i++)
+ { r = reads[i].rlen;
+ if ((reads[i].flags & DB_BEST) >= allflag && r >= cutoff)
+ { totlen += r;
+ if (r > maxlen)
+ maxlen = r;
+ reads[j++] = reads[i];
+ }
+ }
+
+ db->totlen = totlen;
+ db->maxlen = maxlen;
+ db->nreads = j;
+ db->trimmed = 1;
+
+ if (j < nreads)
+ { db->reads = Realloc(reads-1,sizeof(HITS_READ)*(j+2),NULL);
+ db->reads += 1;
+ }
+}
+
+// The DB has already been trimmed, but a track over the untrimmed DB needs to be loaded.
+// Trim the track by rereading the untrimmed DB index from the file system.
+
+static int Late_Track_Trim(HITS_DB *db, HITS_TRACK *track, int ispart)
+{ int i, j, r;
+ int allflag, cutoff;
+ int ureads;
+ char *root;
+ HITS_READ read;
+ FILE *indx;
+
+ if (!db->trimmed) return (0);
+
+ if (db->cutoff <= 0 && db->all) return (0);
+
+ cutoff = db->cutoff;
+ if (db->all)
+ allflag = 0;
+ else
+ allflag = DB_BEST;
+
+ root = rindex(db->path,'/') + 2;
+ indx = Fopen(Catenate(db->path,"","",".idx"),"r");
+ fseeko(indx,sizeof(HITS_DB) + sizeof(HITS_READ)*db->ufirst,SEEK_SET);
+ if (ispart)
+ ureads = ((int *) (db->reads))[-1];
+ else
+ ureads = db->ureads;
+
+ if (strcmp(track->name,". at qvs") == 0)
+ { EPRINTF(EPLACE,"%s: Cannot load QV track after trimming\n",Prog_Name);
+ fclose(indx);
+ EXIT(1);
+ }
+
+ { int *anno4, size;
+ int64 *anno8;
+ char *anno, *data;
+
+ size = track->size;
+ data = (char *) track->data;
+ if (data == NULL)
+ { anno = (char *) track->anno;
+ j = r = 0;
+ for (i = r = 0; i < ureads; i++, r += size)
+ { if (fread(&read,sizeof(HITS_READ),1,indx) != 1)
+ { EPRINTF(EPLACE,"%s: Index file (.idx) of %s is junk\n",Prog_Name,root);
+ fclose(indx);
+ EXIT(1);
+ }
+ if ((read.flags & DB_BEST) >= allflag && read.rlen >= cutoff)
+ { memmove(anno+j,anno+r,size);
+ j += size;
+ }
+ r += size;
+ }
+ memmove(anno+j,anno+r,size);
+ }
+ else if (size == 4)
+ { int ai;
+
+ anno4 = (int *) (track->anno);
+ j = anno4[0] = 0;
+ for (i = 0; i < ureads; i++)
+ { if (fread(&read,sizeof(HITS_READ),1,indx) != 1)
+ { EPRINTF(EPLACE,"%s: Index file (.idx) of %s is junk\n",Prog_Name,root);
+ fclose(indx);
+ EXIT(1);
+ }
+ if ((read.flags & DB_BEST) >= allflag && read.rlen >= cutoff)
+ { ai = anno4[i];
+ anno4[j+1] = anno4[j] + (anno4[i+1]-ai);
+ memmove(data+anno4[j],data+ai,anno4[i+1]-ai);
+ j += 1;
+ }
+ }
+ track->data = Realloc(track->data,anno4[j],NULL);
+ }
+ else // size == 8
+ { int64 ai;
+
+ anno8 = (int64 *) (track->anno);
+ j = anno8[0] = 0;
+ for (i = 0; i < ureads; i++)
+ { if (fread(&read,sizeof(HITS_READ),1,indx) != 1)
+ { EPRINTF(EPLACE,"%s: Index file (.idx) of %s is junk\n",Prog_Name,root);
+ fclose(indx);
+ EXIT(1);
+ }
+ if ((read.flags & DB_BEST) >= allflag && read.rlen >= cutoff)
+ { ai = anno8[i];
+ anno8[j+1] = anno8[j] + (anno8[i+1]-ai);
+ memmove(data+anno8[j],data+ai,anno8[i+1]-ai);
+ j += 1;
+ }
+ }
+ track->data = Realloc(track->data,anno8[j],NULL);
+ }
+ track->anno = Realloc(track->anno,track->size*(j+1),NULL);
+ }
+
+ fclose(indx);
+ return (0);
+}
+
+// Shut down an open 'db' by freeing all associated space, including tracks and QV structures,
+// and any open file pointers. The record pointed at by db however remains (the user
+// supplied it and so should free it).
+
+void Close_DB(HITS_DB *db)
+{ HITS_TRACK *t, *p;
+
+ if (db->loaded)
+ free(((char *) (db->bases)) - 1);
+ else if (db->bases != NULL)
+ fclose((FILE *) db->bases);
+ free(db->reads-1);
+ free(db->path);
+
+ Close_QVs(db);
+
+ for (t = db->tracks; t != NULL; t = p)
+ { p = t->next;
+ free(t->anno);
+ free(t->data);
+ free(t);
+ }
+}
+
+
+/*******************************************************************************************
+ *
+ * QV LOAD & CLOSE ROUTINES
+ *
+ ********************************************************************************************/
+
+HITS_DB *Active_DB = NULL; // Last db/qv used by "Load_QVentry"
+HITS_QV *Active_QV; // Becomes invalid after closing
+
+int Load_QVs(HITS_DB *db)
+{ FILE *quiva, *istub, *indx;
+ char *root;
+ uint16 *table;
+ HITS_QV *qvtrk;
+ QVcoding *coding, *nx;
+ int ncodes;
+
+ if (db->tracks != NULL && strcmp(db->tracks->name,". at qvs") == 0)
+ return (0);
+
+ if (db->trimmed)
+ { EPRINTF(EPLACE,"%s: Cannot load QVs after trimming the DB\n",Prog_Name);
+ EXIT(1);
+ }
+
+ if (db->reads[db->nreads-1].coff < 0)
+ { EPRINTF(EPLACE,"%s: The requested QVs have not been added to the DB!\n",Prog_Name);
+ EXIT(1);
+ }
+
+ // Open .qvs, .idx, and .db files
+
+ quiva = Fopen(Catenate(db->path,"","",".qvs"),"r");
+ if (quiva == NULL)
+ return (-1);
+
+ istub = NULL;
+ indx = NULL;
+ table = NULL;
+ coding = NULL;
+ qvtrk = NULL;
+
+ root = rindex(db->path,'/') + 2;
+ istub = Fopen(Catenate(db->path,"/",root,".db"),"r");
+ if (istub == NULL)
+ goto error;
+
+ { int first, last, nfiles;
+ char prolog[MAX_NAME], fname[MAX_NAME];
+ int i, j;
+
+ if (fscanf(istub,DB_NFILE,&nfiles) != 1)
+ { EPRINTF(EPLACE,"%s: Stub file (.db) of %s is junk\n",Prog_Name,root);
+ goto error;
+ }
+
+ if (db->part > 0)
+ { int pfirst, plast;
+ int fbeg, fend;
+ int n, k;
+ FILE *indx;
+
+ // Determine first how many and which files span the block (fbeg to fend)
+
+ pfirst = db->ufirst;
+ plast = pfirst + db->nreads;
+
+ first = 0;
+ for (fbeg = 0; fbeg < nfiles; fbeg++)
+ { if (fscanf(istub,DB_FDATA,&last,fname,prolog) != 3)
+ { EPRINTF(EPLACE,"%s: Stub file (.db) of %s is junk\n",Prog_Name,root);
+ goto error;
+ }
+ if (last > pfirst)
+ break;
+ first = last;
+ }
+ for (fend = fbeg+1; fend <= nfiles; fend++)
+ { if (last >= plast)
+ break;
+ if (fscanf(istub,DB_FDATA,&last,fname,prolog) != 3)
+ { EPRINTF(EPLACE,"%s: Stub file (.db) of %s is junk\n",Prog_Name,root);
+ goto error;
+ }
+ first = last;
+ }
+
+ indx = Fopen(Catenate(db->path,"","",".idx"),"r");
+ ncodes = fend-fbeg;
+ coding = (QVcoding *) Malloc(sizeof(QVcoding)*ncodes,"Allocating coding schemes");
+ table = (uint16 *) Malloc(sizeof(uint16)*db->nreads,"Allocating QV table indices");
+ if (indx == NULL || coding == NULL || table == NULL)
+ { ncodes = 0;
+ goto error;
+ }
+
+ // Carefully get the first coding scheme (its offset is most likely in a HITS_RECORD
+ // in .idx that is *not* in memory). Get all the other coding schemes normally and
+ // assign the tables # for each read in the block in "tables".
+
+ rewind(istub);
+ fscanf(istub,DB_NFILE,&nfiles);
+
+ first = 0;
+ for (n = 0; n < fbeg; n++)
+ { fscanf(istub,DB_FDATA,&last,fname,prolog);
+ first = last;
+ }
+
+ for (n = fbeg; n < fend; n++)
+ { fscanf(istub,DB_FDATA,&last,fname,prolog);
+
+ i = n-fbeg;
+ if (first < pfirst)
+ { HITS_READ read;
+
+ fseeko(indx,sizeof(HITS_DB) + sizeof(HITS_READ)*first,SEEK_SET);
+ if (fread(&read,sizeof(HITS_READ),1,indx) != 1)
+ { EPRINTF(EPLACE,"%s: Index file (.idx) of %s is junk\n",Prog_Name,root);
+ ncodes = i;
+ goto error;
+ }
+ fseeko(quiva,read.coff,SEEK_SET);
+ nx = Read_QVcoding(quiva);
+ if (nx == NULL)
+ { ncodes = i;
+ goto error;
+ }
+ coding[i] = *nx;
+ }
+ else
+ { fseeko(quiva,db->reads[first-pfirst].coff,SEEK_SET);
+ nx = Read_QVcoding(quiva);
+ if (nx == NULL)
+ { ncodes = i;
+ goto error;
+ }
+ coding[i] = *nx;
+ db->reads[first-pfirst].coff = ftello(quiva);
+ }
+
+ j = first-pfirst;
+ if (j < 0)
+ j = 0;
+ k = last-pfirst;
+ if (k > db->nreads)
+ k = db->nreads;
+ while (j < k)
+ table[j++] = (uint16) i;
+
+ first = last;
+ }
+
+ fclose(indx);
+ indx = NULL;
+ }
+
+ else
+ { // Load in coding scheme for each file, adjust .coff of first read in the file, and
+ // record which table each read uses
+
+ ncodes = nfiles;
+ coding = (QVcoding *) Malloc(sizeof(QVcoding)*nfiles,"Allocating coding schemes");
+ table = (uint16 *) Malloc(sizeof(uint16)*db->nreads,"Allocating QV table indices");
+ if (coding == NULL || table == NULL)
+ goto error;
+
+ first = 0;
+ for (i = 0; i < nfiles; i++)
+ { if (fscanf(istub,DB_FDATA,&last,fname,prolog) != 3)
+ { EPRINTF(EPLACE,"%s: Stub file (.db) of %s is junk\n",Prog_Name,root);
+ goto error;
+ }
+
+ fseeko(quiva,db->reads[first].coff,SEEK_SET);
+ nx = Read_QVcoding(quiva);
+ if (nx == NULL)
+ { ncodes = i;
+ goto error;
+ }
+ coding[i] = *nx;
+ db->reads[first].coff = ftello(quiva);
+
+ for (j = first; j < last; j++)
+ table[j] = (uint16) i;
+
+ first = last;
+ }
+ }
+
+ // Allocate and fill in the HITS_QV record and add it to the front of the
+ // track list
+
+ qvtrk = (HITS_QV *) Malloc(sizeof(HITS_QV),"Allocating QV pseudo-track");
+ if (qvtrk == NULL)
+ goto error;
+ qvtrk->name = Strdup(". at qvs","Allocating QV pseudo-track name");
+ if (qvtrk->name == NULL)
+ goto error;
+ qvtrk->next = db->tracks;
+ db->tracks = (HITS_TRACK *) qvtrk;
+ qvtrk->ncodes = ncodes;
+ qvtrk->table = table;
+ qvtrk->coding = coding;
+ qvtrk->quiva = quiva;
+ }
+
+ fclose(istub);
+ return (0);
+
+error:
+ if (qvtrk != NULL)
+ free(qvtrk);
+ if (table != NULL)
+ free(table);
+ if (coding != NULL)
+ { int i;
+ for (i = 0; i < ncodes; i++)
+ Free_QVcoding(coding+i);
+ free(coding);
+ }
+ if (indx != NULL)
+ fclose(indx);
+ if (istub != NULL)
+ fclose(istub);
+ fclose(quiva);
+ EXIT(1);
+}
+
+// Close the QV stream, free the QV pseudo track and all associated memory
+
+void Close_QVs(HITS_DB *db)
+{ HITS_TRACK *track;
+ HITS_QV *qvtrk;
+ int i;
+
+ Active_DB = NULL;
+
+ track = db->tracks;
+ if (track != NULL && strcmp(track->name,". at qvs") == 0)
+ { qvtrk = (HITS_QV *) track;
+ for (i = 0; i < qvtrk->ncodes; i++)
+ Free_QVcoding(qvtrk->coding+i);
+ free(qvtrk->coding);
+ free(qvtrk->table);
+ fclose(qvtrk->quiva);
+ db->tracks = track->next;
+ free(track);
+ }
+ return;
+}
+
+
+/*******************************************************************************************
+ *
+ * TRACK LOAD & CLOSE ROUTINES
+ *
+ ********************************************************************************************/
+
+// Return status of track:
+// 1: Track is for trimmed DB
+// 0: Track is for untrimmed DB
+// -1: Track is not the right size of DB either trimmed or untrimmed
+// -2: Could not find the track
+
+int Check_Track(HITS_DB *db, char *track, int *kind)
+{ FILE *afile;
+ int tracklen, size, ispart;
+ int ureads, treads;
+
+ afile = NULL;
+ if (db->part > 0)
+ { afile = fopen(Catenate(db->path,Numbered_Suffix(".",db->part,"."),track,".anno"),"r");
+ ispart = 1;
+ }
+ if (afile == NULL)
+ { afile = fopen(Catenate(db->path,".",track,".anno"),"r");
+ ispart = 0;
+ }
+ if (afile == NULL)
+ return (-2);
+
+ if (fread(&tracklen,sizeof(int),1,afile) != 1)
+ return (-1);
+ if (fread(&size,sizeof(int),1,afile) != 1)
+ return (-1);
+
+ if (size == 0)
+ *kind = MASK_TRACK;
+ else if (size > 0)
+ *kind = CUSTOM_TRACK;
+ else
+ return (-1);
+
+ fclose(afile);
+
+ if (ispart)
+ { ureads = ((int *) (db->reads))[-1];
+ treads = ((int *) (db->reads))[-2];
+ }
+ else
+ { ureads = db->ureads;
+ treads = db->treads;
+ }
+
+ if (tracklen == ureads)
+ return (0);
+ else if (tracklen == treads)
+ return (1);
+ else
+ return (-1);
+}
+
+// If track is not already in the db's track list, then allocate all the storage for it,
+// read it in from the appropriate file, add it to the track list, and return a pointer
+// to the newly created HITS_TRACK record. If the track does not exist or cannot be
+// opened for some reason, then NULL is returned.
+
+HITS_TRACK *Load_Track(HITS_DB *db, char *track)
+{ FILE *afile, *dfile;
+ int tracklen, size;
+ int nreads, ispart;
+ int treads, ureads;
+ void *anno;
+ void *data;
+ char *name;
+ HITS_TRACK *record;
+
+ if (track[0] == '.')
+ { EPRINTF(EPLACE,"%s: Track name, '%s', cannot begin with a .\n",Prog_Name,track);
+ EXIT(NULL);
+ }
+
+ for (record = db->tracks; record != NULL; record = record->next)
+ if (strcmp(record->name,track) == 0)
+ return (record);
+
+ afile = NULL;
+ if (db->part)
+ { afile = fopen(Catenate(db->path,Numbered_Suffix(".",db->part,"."),track,".anno"),"r");
+ ispart = 1;
+ }
+ if (afile == NULL)
+ { afile = fopen(Catenate(db->path,".",track,".anno"),"r");
+ ispart = 0;
+ }
+ if (afile == NULL)
+ { EPRINTF(EPLACE,"%s: Track '%s' does not exist\n",Prog_Name,track);
+ return (NULL);
+ }
+
+ dfile = NULL;
+ anno = NULL;
+ data = NULL;
+ record = NULL;
+
+ if (ispart)
+ name = Catenate(db->path,Numbered_Suffix(".",db->part,"."),track,".data");
+ else
+ name = Catenate(db->path,".",track,".data");
+ if (name == NULL)
+ goto error;
+ dfile = fopen(name,"r");
+
+ if (fread(&tracklen,sizeof(int),1,afile) != 1)
+ { EPRINTF(EPLACE,"%s: Track '%s' annotation file is junk\n",Prog_Name,track);
+ goto error;
+ }
+ if (fread(&size,sizeof(int),1,afile) != 1)
+ { EPRINTF(EPLACE,"%s: Track '%s' annotation file is junk\n",Prog_Name,track);
+ goto error;
+ }
+
+ if (size < 0)
+ { EPRINTF(EPLACE,"%s: Track '%s' annotation file is junk\n",Prog_Name,track);
+ goto error;
+ }
+ if (size == 0)
+ size = 8;
+
+ if (ispart)
+ { ureads = ((int *) (db->reads))[-1];
+ treads = ((int *) (db->reads))[-2];
+ }
+ else
+ { ureads = db->ureads;
+ treads = db->treads;
+ }
+
+ if (db->trimmed)
+ { if (tracklen != treads && tracklen != ureads)
+ { EPRINTF(EPLACE,"%s: Track '%s' not same size as database !\n",Prog_Name,track);
+ goto error;
+ }
+ if ( ! ispart && db->part > 0)
+ { if (tracklen == treads)
+ fseeko(afile,size*db->tfirst,SEEK_CUR);
+ else
+ fseeko(afile,size*db->ufirst,SEEK_CUR);
+ }
+ }
+ else
+ { if (tracklen != ureads)
+ { if (tracklen == treads)
+ EPRINTF(EPLACE,"%s: Track '%s' is for a trimmed DB !\n",Prog_Name,track);
+ else
+ EPRINTF(EPLACE,"%s: Track '%s' not same size as database !\n",Prog_Name,track);
+ goto error;
+ }
+ if ( ! ispart && db->part > 0)
+ fseeko(afile,size*db->ufirst,SEEK_CUR);
+ }
+ nreads = db->nreads;
+
+ anno = (void *) Malloc(size*(nreads+1),"Allocating Track Anno Vector");
+ if (anno == NULL)
+ goto error;
+
+ if (dfile != NULL)
+ { int64 *anno8, off8, dlen;
+ int *anno4, off4;
+ int i;
+
+ if (fread(anno,size,nreads+1,afile) != (size_t) (nreads+1))
+ { EPRINTF(EPLACE,"%s: Track '%s' annotation file is junk\n",Prog_Name,track);
+ goto error;
+ }
+
+ if (size == 4)
+ { anno4 = (int *) anno;
+ off4 = anno4[0];
+ if (off4 != 0)
+ { for (i = 0; i <= nreads; i++)
+ anno4[i] -= off4;
+ fseeko(dfile,off4,SEEK_SET);
+ }
+ dlen = anno4[nreads];
+ data = (void *) Malloc(dlen,"Allocating Track Data Vector");
+ }
+ else
+ { anno8 = (int64 *) anno;
+ off8 = anno8[0];
+ if (off8 != 0)
+ { for (i = 0; i <= nreads; i++)
+ anno8[i] -= off8;
+ fseeko(dfile,off8,SEEK_SET);
+ }
+ dlen = anno8[nreads];
+ data = (void *) Malloc(dlen,"Allocating Track Data Vector");
+ }
+ if (data == NULL)
+ goto error;
+ if (dlen > 0)
+ { if (fread(data,dlen,1,dfile) != 1)
+ { EPRINTF(EPLACE,"%s: Track '%s' data file is junk\n",Prog_Name,track);
+ goto error;
+ }
+ }
+ fclose(dfile);
+ dfile = NULL;
+ }
+ else
+ { if (fread(anno,size,nreads,afile) != (size_t) nreads)
+ { EPRINTF(EPLACE,"%s: Track '%s' annotation file is junk\n",Prog_Name,track);
+ goto error;
+ }
+ data = NULL;
+ }
+
+ fclose(afile);
+
+ record = (HITS_TRACK *) Malloc(sizeof(HITS_TRACK),"Allocating Track Record");
+ if (record == NULL)
+ goto error;
+ record->name = Strdup(track,"Allocating Track Name");
+ if (record->name == NULL)
+ goto error;
+ record->data = data;
+ record->anno = anno;
+ record->size = size;
+
+ if (db->trimmed && tracklen != treads)
+ { if (Late_Track_Trim(db,record,ispart))
+ goto error;
+ }
+
+ if (db->tracks != NULL && strcmp(db->tracks->name,". at qvs") == 0)
+ { record->next = db->tracks->next;
+ db->tracks->next = record;
+ }
+ else
+ { record->next = db->tracks;
+ db->tracks = record;
+ }
+
+ return (record);
+
+error:
+ if (record != NULL)
+ free(record);
+ if (data != NULL)
+ free(data);
+ if (anno != NULL)
+ free(anno);
+ if (dfile != NULL)
+ fclose(dfile);
+ fclose(afile);
+ EXIT (NULL);
+}
+
+void Close_Track(HITS_DB *db, char *track)
+{ HITS_TRACK *record, *prev;
+
+ prev = NULL;
+ for (record = db->tracks; record != NULL; record = record->next)
+ { if (strcmp(record->name,track) == 0)
+ { free(record->anno);
+ free(record->data);
+ free(record->name);
+ if (prev == NULL)
+ db->tracks = record->next;
+ else
+ prev->next = record->next;
+ free(record);
+ return;
+ }
+ prev = record;
+ }
+ return;
+}
+
+
+/*******************************************************************************************
+ *
+ * READ BUFFER ALLOCATION AND READ ACCESS
+ *
+ ********************************************************************************************/
+
+// Allocate and return a buffer big enough for the largest read in 'db', leaving room
+// for an initial delimiter character
+
+char *New_Read_Buffer(HITS_DB *db)
+{ char *read;
+
+ read = (char *) Malloc(db->maxlen+4,"Allocating New Read Buffer");
+ if (read == NULL)
+ EXIT(NULL);
+ return (read+1);
+}
+
+// Load into 'read' the i'th read in 'db'. As an upper case ASCII string if ascii is 2, as a
+// lower-case ASCII string is ascii is 1, and as a numeric string over 0(A), 1(C), 2(G), and
+// 3(T) otherwise.
+//
+// **NB**, the byte before read will be set to a delimiter character!
+
+int Load_Read(HITS_DB *db, int i, char *read, int ascii)
+{ FILE *bases = (FILE *) db->bases;
+ int64 off;
+ int len, clen;
+ HITS_READ *r = db->reads;
+
+ if (i >= db->nreads)
+ { EPRINTF(EPLACE,"%s: Index out of bounds (Load_Read)\n",Prog_Name);
+ EXIT(1);
+ }
+ if (bases == NULL)
+ { bases = Fopen(Catenate(db->path,"","",".bps"),"r");
+ if (bases == NULL)
+ EXIT(1);
+ db->bases = (void *) bases;
+ }
+
+ off = r[i].boff;
+ len = r[i].rlen;
+
+ if (ftello(bases) != off)
+ fseeko(bases,off,SEEK_SET);
+ clen = COMPRESSED_LEN(len);
+ if (clen > 0)
+ { if (fread(read,clen,1,bases) != 1)
+ { EPRINTF(EPLACE,"%s: Failed read of .bps file (Load_Read)\n",Prog_Name);
+ EXIT(1);
+ }
+ }
+ Uncompress_Read(len,read);
+ if (ascii == 1)
+ { Lower_Read(read);
+ read[-1] = '\0';
+ }
+ else if (ascii == 2)
+ { Upper_Read(read);
+ read[-1] = '\0';
+ }
+ else
+ read[-1] = 4;
+ return (0);
+}
+
+char *Load_Subread(HITS_DB *db, int i, int beg, int end, char *read, int ascii)
+{ FILE *bases = (FILE *) db->bases;
+ int64 off;
+ int len, clen;
+ int bbeg, bend;
+ HITS_READ *r = db->reads;
+
+ if (i >= db->nreads)
+ { EPRINTF(EPLACE,"%s: Index out of bounds (Load_Read)\n",Prog_Name);
+ EXIT(NULL);
+ }
+ if (bases == NULL)
+ { bases = Fopen(Catenate(db->path,"","",".bps"),"r");
+ if (bases == NULL)
+ EXIT(NULL);
+ db->bases = (void *) bases;
+ }
+
+ bbeg = beg/4;
+ bend = (end-1)/4+1;
+
+ off = r[i].boff + bbeg;
+ len = end - beg;
+
+ if (ftello(bases) != off)
+ fseeko(bases,off,SEEK_SET);
+ clen = bend-bbeg;
+ if (clen > 0)
+ { if (fread(read,clen,1,bases) != 1)
+ { EPRINTF(EPLACE,"%s: Failed read of .bps file (Load_Read)\n",Prog_Name);
+ EXIT(NULL);
+ }
+ }
+ Uncompress_Read(4*clen,read);
+ read += beg%4;
+ read[len] = 4;
+ if (ascii == 1)
+ { Lower_Read(read);
+ read[-1] = '\0';
+ }
+ else if (ascii == 2)
+ { Upper_Read(read);
+ read[-1] = '\0';
+ }
+ else
+ read[-1] = 4;
+
+ return (read);
+}
+
+
+/*******************************************************************************************
+ *
+ * QV BUFFER ALLOCATION QV READ ACCESS
+ *
+ ********************************************************************************************/
+
+// Allocate and return a buffer of 5 vectors big enough for the largest read in 'db'
+
+char **New_QV_Buffer(HITS_DB *db)
+{ char **entry;
+ char *qvs;
+ int i;
+
+ qvs = (char *) Malloc(db->maxlen*5,"Allocating New QV Buffer");
+ entry = (char **) Malloc(sizeof(char *)*5,"Allocating New QV Buffer");
+ if (qvs == NULL || entry == NULL)
+ EXIT(NULL);
+ for (i = 0; i < 5; i++)
+ entry[i] = qvs + i*db->maxlen;
+ return (entry);
+}
+
+// Load into entry the QV streams for the i'th read from db. The parameter ascii applies to
+// the DELTAG stream as described for Load_Read.
+
+int Load_QVentry(HITS_DB *db, int i, char **entry, int ascii)
+{ HITS_READ *reads;
+ FILE *quiva;
+ int rlen;
+
+ if (db != Active_DB)
+ { if (db->tracks == NULL || strcmp(db->tracks->name,". at qvs") != 0)
+ { EPRINTF(EPLACE,"%s: QV's are not loaded (Load_QVentry)\n",Prog_Name);
+ EXIT(1);
+ }
+ Active_QV = (HITS_QV *) db->tracks;
+ Active_DB = db;
+ }
+ if (i >= db->nreads)
+ { EPRINTF(EPLACE,"%s: Index out of bounds (Load_QVentry)\n",Prog_Name);
+ EXIT(1);
+ }
+
+ reads = db->reads;
+ quiva = Active_QV->quiva;
+ rlen = reads[i].rlen;
+
+ fseeko(quiva,reads[i].coff,SEEK_SET);
+ if (Uncompress_Next_QVentry(quiva,entry,Active_QV->coding+Active_QV->table[i],rlen))
+ EXIT(1);
+
+ if (ascii != 1)
+ { char *deltag = entry[1];
+
+ if (ascii != 2)
+ { char x = deltag[rlen];
+ deltag[rlen] = '\0';
+ Number_Read(deltag);
+ deltag[rlen] = x;
+ }
+ else
+ { int j;
+ int u = 'A'-'a';
+
+ for (j = 0; j < rlen; j++)
+ deltag[j] = (char) (deltag[j]+u);
+ }
+ }
+
+ return (0);
+}
+
+
+/*******************************************************************************************
+ *
+ * BLOCK LOAD OF ALL READS (PRIMARILY FOR DALIGNER)
+ *
+ ********************************************************************************************/
+
+// Allocate a block big enough for all the uncompressed sequences, read them into it,
+// reset the 'off' in each read record to be its in-memory offset, and set the
+// bases pointer to point at the block after closing the bases file. If ascii is
+// non-zero then the reads are converted to ACGT ascii, otherwise the reads are left
+// as numeric strings over 0(A), 1(C), 2(G), and 3(T).
+
+int Read_All_Sequences(HITS_DB *db, int ascii)
+{ FILE *bases;
+ int nreads = db->nreads;
+ HITS_READ *reads = db->reads;
+ void (*translate)(char *s);
+
+ char *seq;
+ int64 o, off;
+ int i, len, clen;
+
+ bases = Fopen(Catenate(db->path,"","",".bps"),"r");
+ if (bases == NULL)
+ EXIT(1);
+
+ seq = (char *) Malloc(db->totlen+nreads+4,"Allocating All Sequence Reads");
+ if (seq == NULL)
+ { fclose(bases);
+ EXIT(1);
+ }
+
+ *seq++ = 4;
+
+ if (ascii == 1)
+ translate = Lower_Read;
+ else
+ translate = Upper_Read;
+
+ o = 0;
+ for (i = 0; i < nreads; i++)
+ { len = reads[i].rlen;
+ off = reads[i].boff;
+ if (ftello(bases) != off)
+ fseeko(bases,off,SEEK_SET);
+ clen = COMPRESSED_LEN(len);
+ if (clen > 0)
+ { if (fread(seq+o,clen,1,bases) != 1)
+ { EPRINTF(EPLACE,"%s: Read of .bps file failed (Read_All_Sequences)\n",Prog_Name);
+ free(seq);
+ fclose(bases);
+ EXIT(1);
+ }
+ }
+ Uncompress_Read(len,seq+o);
+ if (ascii)
+ translate(seq+o);
+ reads[i].boff = o;
+ o += (len+1);
+ }
+ reads[nreads].boff = o;
+
+ fclose(bases);
+
+ db->bases = (void *) seq;
+ db->loaded = 1;
+
+ return (0);
+}
+
+int List_DB_Files(char *path, void actor(char *path, char *extension))
+{ int status, plen, rlen, dlen;
+ char *root, *pwd, *name;
+ int isdam;
+ DIR *dirp;
+ struct dirent *dp;
+
+ status = 0;
+ pwd = PathTo(path);
+ plen = strlen(path);
+ if (strcmp(path+(plen-4),".dam") == 0)
+ root = Root(path,".dam");
+ else
+ root = Root(path,".db");
+ rlen = strlen(root);
+
+ if (root == NULL || pwd == NULL)
+ { free(pwd);
+ free(root);
+ EXIT(1);
+ }
+
+ if ((dirp = opendir(pwd)) == NULL)
+ { EPRINTF(EPLACE,"%s: Cannot open directory %s (List_DB_Files)\n",Prog_Name,pwd);
+ status = -1;
+ goto error;
+ }
+
+ isdam = 0;
+ while ((dp = readdir(dirp)) != NULL) // Get case dependent root name (if necessary)
+ { name = dp->d_name;
+ if (strcmp(name,Catenate("","",root,".db")) == 0)
+ break;
+ if (strcmp(name,Catenate("","",root,".dam")) == 0)
+ { isdam = 1;
+ break;
+ }
+ if (strcasecmp(name,Catenate("","",root,".db")) == 0)
+ { strncpy(root,name,rlen);
+ break;
+ }
+ if (strcasecmp(name,Catenate("","",root,".dam")) == 0)
+ { strncpy(root,name,rlen);
+ isdam = 1;
+ break;
+ }
+ }
+ if (dp == NULL)
+ { EPRINTF(EPLACE,"%s: Cannot find %s (List_DB_Files)\n",Prog_Name,pwd);
+ status = -1;
+ closedir(dirp);
+ goto error;
+ }
+
+ if (isdam)
+ actor(Catenate(pwd,"/",root,".dam"),"dam");
+ else
+ actor(Catenate(pwd,"/",root,".db"),"db");
+
+ rewinddir(dirp); // Report each auxiliary file
+ while ((dp = readdir(dirp)) != NULL)
+ { name = dp->d_name;
+ dlen = strlen(name);
+#ifdef HIDE_FILES
+ if (name[0] != '.')
+ continue;
+ dlen -= 1;
+ name += 1;
+#endif
+ if (dlen < rlen+1)
+ continue;
+ if (name[rlen] != '.')
+ continue;
+ if (strncmp(name,root,rlen) != 0)
+ continue;
+ actor(Catenate(pwd,PATHSEP,name,""),name+(rlen+1));
+ }
+ closedir(dirp);
+
+error:
+ free(pwd);
+ free(root);
+ return (status);
+}
+
+void Print_Read(char *s, int width)
+{ int i;
+
+ if (s[0] < 4)
+ { for (i = 0; s[i] != 4; i++)
+ { if (i%width == 0 && i != 0)
+ printf("\n");
+ printf("%d",s[i]);
+ }
+ printf("\n");
+ }
+ else
+ { for (i = 0; s[i] != '\0'; i++)
+ { if (i%width == 0 && i != 0)
+ printf("\n");
+ printf("%c",s[i]);
+ }
+ printf("\n");
+ }
+}
diff --git a/src/lib/DW_banded.c b/src/lib/DW_banded.c
new file mode 100755
index 0000000..f3f9ecc
--- /dev/null
+++ b/src/lib/DW_banded.c
@@ -0,0 +1,319 @@
+
+/*
+ * =====================================================================================
+ *
+ * Filename: DW_banded.c
+ *
+ * Description: A banded version for the O(ND) greedy sequence alignment algorithm
+ *
+ * Version: 0.1
+ * Created: 07/20/2013 17:00:00
+ * Revision: none
+ * Compiler: gcc
+ *
+ * Author: Jason Chin,
+ * Company:
+ *
+ * =====================================================================================
+
+ #################################################################################$$
+ # Copyright (c) 2011-2014, Pacific Biosciences of California, Inc.
+ #
+ # All rights reserved.
+ #
+ # Redistribution and use in source and binary forms, with or without
+ # modification, are permitted (subject to the limitations in the
+ # disclaimer below) provided that the following conditions are met:
+ #
+ # * Redistributions of source code must retain the above copyright
+ # notice, this list of conditions and the following disclaimer.
+ #
+ # * Redistributions in binary form must reproduce the above
+ # copyright notice, this list of conditions and the following
+ # disclaimer in the documentation and/or other materials provided
+ # with the distribution.
+ #
+ # * Neither the name of Pacific Biosciences nor the names of its
+ # contributors may be used to endorse or promote products derived
+ # from this software without specific prior written permission.
+ #
+ # NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
+ # GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY PACIFIC
+ # BIOSCIENCES AND ITS CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
+ # WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ # DISCLAIMED. IN NO EVENT SHALL PACIFIC BIOSCIENCES OR ITS
+ # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ # USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ # OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ # SUCH DAMAGE.
+ #################################################################################$$
+
+
+*/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <limits.h>
+#include <stdbool.h>
+#include "common.h"
+
+int compare_d_path(const void * a, const void * b)
+{
+ const d_path_data2 * arg1 = a;
+ const d_path_data2 * arg2 = b;
+ if (arg1->d - arg2->d == 0) {
+ return arg1->k - arg2->k;
+ } else {
+ return arg1->d - arg2->d;
+ }
+}
+
+
+void d_path_sort( d_path_data2 * base, unsigned long max_idx) {
+ qsort(base, max_idx, sizeof(d_path_data2), compare_d_path);
+}
+
+d_path_data2 * get_dpath_idx( seq_coor_t d, seq_coor_t k, unsigned long max_idx, d_path_data2 * base) {
+ d_path_data2 d_tmp;
+ d_path_data2 *rtn;
+ d_tmp.d = d;
+ d_tmp.k = k;
+ rtn = (d_path_data2 *) bsearch( &d_tmp, base, max_idx, sizeof(d_path_data2), compare_d_path);
+ //printf("dp %ld %ld %ld %ld %ld %ld %ld\n", (rtn)->d, (rtn)->k, (rtn)->x1, (rtn)->y1, (rtn)->x2, (rtn)->y2, (rtn)->pre_k);
+
+ return rtn;
+
+}
+
+void print_d_path( d_path_data2 * base, unsigned long max_idx) {
+ unsigned long idx;
+ for (idx = 0; idx < max_idx; idx++){
+ printf("dp %ld %d %d %d %d %d %d %d\n",idx, (base+idx)->d, (base+idx)->k, (base+idx)->x1, (base+idx)->y1, (base+idx)->x2, (base+idx)->y2, (base+idx)->pre_k);
+ }
+}
+
+
+alignment *_align(char *query_seq, seq_coor_t q_len,
+ char *target_seq, seq_coor_t t_len,
+ seq_coor_t band_tolerance,
+ int get_aln_str) {
+ seq_coor_t * V;
+ seq_coor_t * U; // array of matched bases for each "k"
+ seq_coor_t k_offset;
+ seq_coor_t d;
+ seq_coor_t k, k2;
+ seq_coor_t best_m; // the best "matches" for each d
+ seq_coor_t min_k, new_min_k;
+ seq_coor_t max_k, new_max_k;
+ seq_coor_t pre_k;
+ seq_coor_t x, y;
+ seq_coor_t cd;
+ seq_coor_t ck;
+ seq_coor_t cx, cy, nx, ny;
+ seq_coor_t max_d;
+ seq_coor_t band_size;
+ unsigned long d_path_idx = 0;
+ unsigned long max_idx = 0;
+
+ d_path_data2 * d_path;
+ d_path_data2 * d_path_aux;
+ path_point * aln_path;
+ seq_coor_t aln_path_idx;
+ alignment * align_rtn;
+ seq_coor_t aln_pos;
+ seq_coor_t i;
+ bool aligned = false;
+
+ //printf("debug: %ld %ld\n", q_len, t_len);
+ //printf("%s\n", query_seq);
+
+ max_d = (int) (0.3*(q_len + t_len));
+
+ band_size = band_tolerance * 2;
+
+ V = calloc( max_d * 2 + 1, sizeof(seq_coor_t) );
+ U = calloc( max_d * 2 + 1, sizeof(seq_coor_t) );
+
+ k_offset = max_d;
+
+ // We should probably use hashmap to store the backtracing information to save memory allocation time
+ // This O(MN) block allocation scheme is convient for now but it is slower for very long sequences
+ d_path = calloc( max_d * (band_size + 1 ) * 2 + 1, sizeof(d_path_data2) );
+
+ aln_path = calloc( q_len + t_len + 1, sizeof(path_point) );
+
+ align_rtn = calloc( 1, sizeof(alignment));
+ align_rtn->t_aln_str = calloc( q_len + t_len + 1, sizeof(char));
+ align_rtn->q_aln_str = calloc( q_len + t_len + 1, sizeof(char));
+ align_rtn->aln_str_size = 0;
+ align_rtn->aln_q_s = 0;
+ align_rtn->aln_q_e = 0;
+ align_rtn->aln_t_s = 0;
+ align_rtn->aln_t_e = 0;
+
+ //printf("max_d: %lu, band_size: %lu\n", max_d, band_size);
+ best_m = -1;
+ min_k = 0;
+ max_k = 0;
+ d_path_idx = 0;
+ max_idx = 0;
+ for (d = 0; d < max_d; d ++ ) {
+ if (max_k - min_k > band_size) {
+ break;
+ }
+
+ for (k = min_k; k <= max_k; k += 2) {
+
+ if ( (k == min_k) || (k != max_k) && (V[ k - 1 + k_offset ] < V[ k + 1 + k_offset]) ) {
+ pre_k = k + 1;
+ x = V[ k + 1 + k_offset];
+ } else {
+ pre_k = k - 1;
+ x = V[ k - 1 + k_offset] + 1;
+ }
+ y = x - k;
+ d_path[d_path_idx].d = d;
+ d_path[d_path_idx].k = k;
+ d_path[d_path_idx].x1 = x;
+ d_path[d_path_idx].y1 = y;
+
+ while ( x < q_len && y < t_len && query_seq[x] == target_seq[y] ){
+ x++;
+ y++;
+ }
+
+ d_path[d_path_idx].x2 = x;
+ d_path[d_path_idx].y2 = y;
+ d_path[d_path_idx].pre_k = pre_k;
+ d_path_idx ++;
+
+ V[ k + k_offset ] = x;
+ U[ k + k_offset ] = x + y;
+
+ if ( x + y > best_m) {
+ best_m = x + y;
+ }
+
+ if ( x >= q_len || y >= t_len) {
+ aligned = true;
+ max_idx = d_path_idx;
+ break;
+ }
+ }
+
+ // For banding
+ new_min_k = max_k;
+ new_max_k = min_k;
+
+ for (k2 = min_k; k2 <= max_k; k2 += 2) {
+ if (U[ k2 + k_offset] >= best_m - band_tolerance ) {
+ if ( k2 < new_min_k ) {
+ new_min_k = k2;
+ }
+ if ( k2 > new_max_k ) {
+ new_max_k = k2;
+ }
+ }
+ }
+
+ max_k = new_max_k + 1;
+ min_k = new_min_k - 1;
+
+ // For no banding
+ // max_k ++;
+ // min_k --;
+
+ // For debuging
+ // printf("min_max_k,d, %ld %ld %ld\n", min_k, max_k, d);
+
+ if (aligned == true) {
+ align_rtn->aln_q_e = x;
+ align_rtn->aln_t_e = y;
+ align_rtn->dist = d;
+ align_rtn->aln_str_size = (x + y + d) / 2;
+ align_rtn->aln_q_s = 0;
+ align_rtn->aln_t_s = 0;
+
+ d_path_sort(d_path, max_idx);
+ //print_d_path(d_path, max_idx);
+
+ if (get_aln_str > 0) {
+ cd = d;
+ ck = k;
+ aln_path_idx = 0;
+ while (cd >= 0 && aln_path_idx < q_len + t_len + 1) {
+ d_path_aux = (d_path_data2 *) get_dpath_idx( cd, ck, max_idx, d_path);
+ aln_path[aln_path_idx].x = d_path_aux -> x2;
+ aln_path[aln_path_idx].y = d_path_aux -> y2;
+ aln_path_idx ++;
+ aln_path[aln_path_idx].x = d_path_aux -> x1;
+ aln_path[aln_path_idx].y = d_path_aux -> y1;
+ aln_path_idx ++;
+ ck = d_path_aux -> pre_k;
+ cd -= 1;
+ }
+ aln_path_idx --;
+ cx = aln_path[aln_path_idx].x;
+ cy = aln_path[aln_path_idx].y;
+ align_rtn->aln_q_s = cx;
+ align_rtn->aln_t_s = cy;
+ aln_pos = 0;
+ while ( aln_path_idx > 0 ) {
+ aln_path_idx --;
+ nx = aln_path[aln_path_idx].x;
+ ny = aln_path[aln_path_idx].y;
+ if (cx == nx && cy == ny){
+ continue;
+ }
+ if (nx == cx && ny != cy){ //advance in y
+ for (i = 0; i < ny - cy; i++) {
+ align_rtn->q_aln_str[aln_pos + i] = '-';
+ }
+ for (i = 0; i < ny - cy; i++) {
+ align_rtn->t_aln_str[aln_pos + i] = target_seq[cy + i];
+ }
+ aln_pos += ny - cy;
+ } else if (nx != cx && ny == cy){ //advance in x
+ for (i = 0; i < nx - cx; i++) {
+ align_rtn->q_aln_str[aln_pos + i] = query_seq[cx + i];
+ }
+ for (i = 0; i < nx - cx; i++) {
+ align_rtn->t_aln_str[aln_pos + i] = '-';
+ }
+ aln_pos += nx - cx;
+ } else {
+ for (i = 0; i < nx - cx; i++) {
+ align_rtn->q_aln_str[aln_pos + i] = query_seq[cx + i];
+ }
+ for (i = 0; i < ny - cy; i++) {
+ align_rtn->t_aln_str[aln_pos + i] = target_seq[cy + i];
+ }
+ aln_pos += ny - cy;
+ }
+ cx = nx;
+ cy = ny;
+ }
+ align_rtn->aln_str_size = aln_pos;
+ }
+ break;
+ }
+ }
+
+ free(V);
+ free(U);
+ free(d_path);
+ free(aln_path);
+ return align_rtn;
+}
+
+
+void free_alignment(alignment * aln) {
+ free(aln->q_aln_str);
+ free(aln->t_aln_str);
+ free(aln);
+}
diff --git a/src/lib/INIReader.cpp b/src/lib/INIReader.cpp
new file mode 100644
index 0000000..f243772
--- /dev/null
+++ b/src/lib/INIReader.cpp
@@ -0,0 +1,81 @@
+// Read an INI file into easy-to-access name/value pairs.
+
+// inih and INIReader are released under the New BSD license (see LICENSE.txt).
+// Go to the project home page for more info:
+//
+// https://github.com/benhoyt/inih
+
+#include <algorithm>
+#include <cctype>
+#include <cstdlib>
+#include "ini.h"
+#include "INIReader.h"
+
+using std::string;
+
+INIReader::INIReader(string filename)
+{
+ _error = ini_parse(filename.c_str(), ValueHandler, this);
+}
+
+int INIReader::ParseError()
+{
+ return _error;
+}
+
+string INIReader::Get(string section, string name, string default_value)
+{
+ string key = MakeKey(section, name);
+ return _values.count(key) ? _values[key] : default_value;
+}
+
+long INIReader::GetInteger(string section, string name, long default_value)
+{
+ string valstr = Get(section, name, "");
+ const char* value = valstr.c_str();
+ char* end;
+ // This parses "1234" (decimal) and also "0x4D2" (hex)
+ long n = strtol(value, &end, 0);
+ return end > value ? n : default_value;
+}
+
+double INIReader::GetReal(string section, string name, double default_value)
+{
+ string valstr = Get(section, name, "");
+ const char* value = valstr.c_str();
+ char* end;
+ double n = strtod(value, &end);
+ return end > value ? n : default_value;
+}
+
+bool INIReader::GetBoolean(string section, string name, bool default_value)
+{
+ string valstr = Get(section, name, "");
+ // Convert to lower case to make string comparisons case-insensitive
+ std::transform(valstr.begin(), valstr.end(), valstr.begin(), ::tolower);
+ if (valstr == "true" || valstr == "yes" || valstr == "on" || valstr == "1")
+ return true;
+ else if (valstr == "false" || valstr == "no" || valstr == "off" || valstr == "0")
+ return false;
+ else
+ return default_value;
+}
+
+string INIReader::MakeKey(string section, string name)
+{
+ string key = section + "=" + name;
+ // Convert to lower case to make section/name lookups case-insensitive
+ std::transform(key.begin(), key.end(), key.begin(), ::tolower);
+ return key;
+}
+
+int INIReader::ValueHandler(void* user, const char* section, const char* name,
+ const char* value)
+{
+ INIReader* reader = (INIReader*)user;
+ string key = MakeKey(section, name);
+ if (reader->_values[key].size() > 0)
+ reader->_values[key] += "\n";
+ reader->_values[key] += value;
+ return 1;
+}
diff --git a/src/lib/LAInterface.cpp b/src/lib/LAInterface.cpp
new file mode 100644
index 0000000..57e1d1f
--- /dev/null
+++ b/src/lib/LAInterface.cpp
@@ -0,0 +1,4794 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <ctype.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sstream>
+#include <iostream>
+#include <algorithm>
+#include <unordered_map>
+#include <tgmath.h>
+#include <iomanip>
+#include <fstream>
+#include <zlib.h>
+
+#include "LAInterface.h"
+#include "align.h"
+#include "DB.h"
+#include "paf.h"
+#include "kseq.h"
+
+void Read::showRead() {
+ std::cout << "read #" << id << std::endl;
+ std::cout << ">" << name << std::endl;
+ std::cout << bases << std::endl;
+}
+
+
+int LAInterface::openDB2(std::string filename, std::string filename2) {
+ char *fn = new char[filename.length() + 1];
+ strcpy(fn, filename.c_str());
+
+ char *fn2 = new char[filename2.length() + 1];
+ strcpy(fn2, filename2.c_str());
+
+ int status = Open_DB(fn, this->db1);
+ if (status < 0)
+ exit(1);
+ if (this->db1->part > 0) {
+ fprintf(stderr, "%s: Cannot be called on a block: %s\n", "test", fn);
+ exit(1);
+ }
+
+
+ status = Open_DB(fn2, this->db2);
+ if (status < 0)
+ exit(1);
+ if (this->db2->part > 0) {
+ fprintf(stderr, "%s: Cannot be called on a block: %s\n", "test", fn);
+ exit(1);
+ }
+
+ Trim_DB(db1);
+ Trim_DB(db2);
+
+
+
+ char *fn_1 = new char[filename.length() + 1 + 3];
+ strcpy(fn_1, fn);
+ strcat(fn_1, ".db");
+
+ FILE * dstub = Fopen(fn_1, "r");
+ if (dstub == NULL)
+ exit(1);
+
+ if (fscanf(dstub, DB_NFILE, &nfiles) != 1) SYSTEM_ERROR
+
+ printf("%d files\n", nfiles);
+
+ flist = (char **) Malloc(sizeof(char *) * nfiles, "Allocating file list");
+ findx = (int *) Malloc(sizeof(int *) * (nfiles + 1), "Allocating file index");
+
+ if (flist == NULL || findx == NULL)
+ exit(1);
+
+ findx += 1;
+ findx[-1] = 0;
+
+ for (int i = 0; i < nfiles; i++) {
+ char prolog[MAX_NAME], fname[MAX_NAME];
+
+ if (fscanf(dstub, DB_FDATA, findx + i, fname, prolog) != 3) SYSTEM_ERROR
+ if ((flist[i] = Strdup(prolog, "Adding to file list")) == NULL)
+ exit(1);
+ }
+
+ fclose(dstub);
+
+
+
+ char *fn_2 = new char[filename2.length() + 1 + 3];
+ strcpy(fn_2, fn2);
+ strcat(fn_2, ".db");
+
+ dstub = Fopen(fn_2, "r");
+ if (dstub == NULL)
+ exit(1);
+
+ if (fscanf(dstub, DB_NFILE, &nfiles2) != 1) SYSTEM_ERROR
+
+ printf("%d files\n", nfiles2);
+
+ flist2 = (char **) Malloc(sizeof(char *) * nfiles2, "Allocating file list");
+ findx2 = (int *) Malloc(sizeof(int *) * (nfiles2 + 1), "Allocating file index");
+
+ if (flist2 == NULL || findx2 == NULL)
+ exit(1);
+
+ findx2 += 1;
+ findx2[-1] = 0;
+
+ for (int i = 0; i < nfiles2; i++) {
+ char prolog[MAX_NAME], fname[MAX_NAME];
+
+ if (fscanf(dstub, DB_FDATA, findx2 + i, fname, prolog) != 3) SYSTEM_ERROR
+ if ((flist2[i] = Strdup(prolog, "Adding to file list")) == NULL)
+ exit(1);
+ }
+
+ fclose(dstub);
+
+
+ delete [] fn;
+ delete [] fn2;
+ delete [] fn_1;
+ delete [] fn_2;
+ return 0;
+}
+
+
+int LAInterface::openDB(std::string filename) {
+ char *fn = new char[filename.length() + 1];
+ strcpy(fn, filename.c_str());
+
+ int status = Open_DB(fn, this->db1);
+ if (status < 0)
+ exit(1);
+ if (this->db1->part > 0) {
+ fprintf(stderr, "%s: Cannot be called on a block: %s\n", "test", fn);
+ exit(1);
+ }
+
+ this->db2 = this->db1;
+ Trim_DB(db1);
+
+ FILE *dstub;
+
+ char *fn2 = new char[filename.length() + 1 + 3];
+ strcpy(fn2, fn);
+ strcat(fn2, ".db");
+
+ dstub = Fopen(fn2, "r");
+ if (dstub == NULL)
+ exit(1);
+
+ if (fscanf(dstub, DB_NFILE, &nfiles) != 1) SYSTEM_ERROR
+
+ //printf("%d files\n", nfiles);
+
+ flist = (char **) Malloc(sizeof(char *) * nfiles, "Allocating file list");
+ findx = (int *) Malloc(sizeof(int *) * (nfiles + 1), "Allocating file index");
+
+ if (flist == NULL || findx == NULL)
+ exit(1);
+
+ findx += 1;
+ findx[-1] = 0;
+
+ for (int i = 0; i < nfiles; i++) {
+ char prolog[MAX_NAME], fname[MAX_NAME];
+
+ if (fscanf(dstub, DB_FDATA, findx + i, fname, prolog) != 3) SYSTEM_ERROR
+ if ((flist[i] = Strdup(prolog, "Adding to file list")) == NULL)
+ exit(1);
+ }
+
+ fclose(dstub);
+
+ delete[] fn;
+
+
+ return 0;
+}
+
+int LAInterface::closeDB() {
+ Close_DB(db1);
+ return 0;
+}
+
+
+int LAInterface::closeDB2() {
+ Close_DB(db1);
+ Close_DB(db2);
+ return 0;
+}
+
+void LAInterface::showRead(int from, int to) {
+ if (flist == NULL || findx == NULL)
+ exit(1);
+ HITS_READ *reads;
+ HITS_TRACK *first;
+ char *read, **entry;
+ int c, b, e, i;
+ int hilight, substr;
+ int map;
+ int (*iscase)(int);
+
+ read = New_Read_Buffer(db1);
+ int UPPER = 1;
+ int WIDTH = 80;
+ //printf("2");
+ {
+ entry = NULL;
+ first = db1->tracks;
+ }
+
+
+ hilight = 'A' - 'a';
+ iscase = islower;
+
+ map = 0;
+ reads = db1->reads;
+ substr = 0;
+
+ c = 0;
+
+ b = from;
+ e = to;
+
+ for (i = b; i < e; i++) {
+ int len;
+ int fst, lst;
+ int flags, qv;
+ HITS_READ *r;
+ HITS_TRACK *track;
+
+ r = reads + i;
+ len = r->rlen;
+
+ flags = r->flags;
+ qv = (flags & DB_QV);
+
+ {
+ while (i < findx[map - 1])
+ map -= 1;
+ while (i >= findx[map])
+ map += 1;
+ printf(">%s/%d/%d_%d", flist[map], r->origin, r->fpulse, r->fpulse + len);
+ if (qv > 0)
+ printf(" RQ=0.%3d", qv);
+ }
+ printf("\n");
+
+
+ Load_Read(db1, i, read, UPPER);
+
+ for (track = first; track != NULL; track = track->next) {
+ int64 *anno;
+ int *data;
+ int64 s, f, j;
+ int bd, ed, m;
+
+ anno = (int64 *) track->anno;
+ data = (int *) track->data;
+
+ s = (anno[i] >> 2);
+ f = (anno[i + 1] >> 2);
+ if (s < f) {
+ for (j = s; j < f; j += 2) {
+ bd = data[j];
+ ed = data[j + 1];
+ for (m = bd; m < ed; m++)
+ if (iscase(read[m]))
+ read[m] = (char) (read[m] + hilight);
+ if (j == s)
+ printf("> %s:", track->name);
+ printf(" [%d,%d]", bd, ed);
+ }
+ printf("\n");
+ }
+ }
+
+
+ fst = 0;
+ lst = len;
+
+ {
+ int j;
+
+ for (j = fst; j + WIDTH < lst; j += WIDTH)
+ printf("%.*s\n", WIDTH, read + j);
+ if (j < lst)
+ printf("%.*s\n", lst - j, read + j);
+ }
+
+ }
+}
+
+
+void LAInterface::showRead2(int from, int to) {
+ if (flist2 == NULL || findx2 == NULL)
+ exit(1);
+ HITS_READ *reads;
+ HITS_TRACK *first;
+ char *read, **entry;
+ int c, b, e, i;
+ int hilight, substr;
+ int map;
+ int (*iscase)(int);
+
+ read = New_Read_Buffer(db2);
+ int UPPER = 1;
+ int WIDTH = 80;
+ //printf("2");
+ {
+ entry = NULL;
+ first = db2->tracks;
+ }
+
+
+ hilight = 'A' - 'a';
+ iscase = islower;
+
+ map = 0;
+ reads = db2->reads;
+ substr = 0;
+
+ c = 0;
+
+ b = from;
+ e = to;
+
+ for (i = b; i < e; i++) {
+ int len;
+ int fst, lst;
+ int flags, qv;
+ HITS_READ *r;
+ HITS_TRACK *track;
+
+ r = reads + i;
+ len = r->rlen;
+
+ flags = r->flags;
+ qv = (flags & DB_QV);
+
+ {
+ while (i < findx[map - 1])
+ map -= 1;
+ while (i >= findx[map])
+ map += 1;
+ printf(">%s/%d/%d_%d", flist[map], r->origin, r->fpulse, r->fpulse + len);
+ if (qv > 0)
+ printf(" RQ=0.%3d", qv);
+ }
+ printf("\n");
+
+
+ Load_Read(db2, i, read, UPPER);
+
+ for (track = first; track != NULL; track = track->next) {
+ int64 *anno;
+ int *data;
+ int64 s, f, j;
+ int bd, ed, m;
+
+ anno = (int64 *) track->anno;
+ data = (int *) track->data;
+
+ s = (anno[i] >> 2);
+ f = (anno[i + 1] >> 2);
+ if (s < f) {
+ for (j = s; j < f; j += 2) {
+ bd = data[j];
+ ed = data[j + 1];
+ for (m = bd; m < ed; m++)
+ if (iscase(read[m]))
+ read[m] = (char) (read[m] + hilight);
+ if (j == s)
+ printf("> %s:", track->name);
+ printf(" [%d,%d]", bd, ed);
+ }
+ printf("\n");
+ }
+ }
+
+
+ fst = 0;
+ lst = len;
+
+ {
+ int j;
+
+ for (j = fst; j + WIDTH < lst; j += WIDTH)
+ printf("%.*s\n", WIDTH, read + j);
+ if (j < lst)
+ printf("%.*s\n", lst - j, read + j);
+ }
+
+ }
+}
+
+
+
+Read *LAInterface::getRead(int number) {
+
+ std::stringstream ss;
+ std::string read_name;
+ std::string read_bases;
+ if (flist == NULL || findx == NULL)
+ exit(1);
+ HITS_READ *reads;
+ HITS_TRACK *first;
+ char *read, **entry;
+ int c, b, e, i;
+ int hilight, substr;
+ int map;
+ int (*iscase)(int);
+ read = New_Read_Buffer(db1);
+ int UPPER = 1;
+ int WIDTH = 80;
+ //printf("2");
+ entry = NULL;
+ first = db1->tracks;
+ hilight = 'A' - 'a';
+
+ map = 0;
+ reads = db1->reads;
+ substr = 0;
+
+ c = 0;
+
+ b = number;
+ e = number + 1;
+
+ for (i = b; i < e; i++) {
+ int len;
+ int fst, lst;
+ int flags, qv;
+ HITS_READ *r;
+ HITS_TRACK *track;
+
+ r = reads + i;
+ len = r->rlen;
+
+ flags = r->flags;
+ qv = (flags & DB_QV);
+
+ {
+ while (i < findx[map - 1])
+ map -= 1;
+ while (i >= findx[map])
+ map += 1;
+ ss << flist[map] << '/' << r->origin << '/' << r->fpulse << '_' << r->fpulse + len;
+ if (qv > 0)
+ ss << "RQ=" << qv;
+ }
+
+ ss >> read_name;
+
+ Load_Read(db1, i, read, UPPER);
+
+ for (track = first; track != NULL; track = track->next) {
+ int64 *anno;
+ int *data;
+ int64 s, f, j;
+ int bd, ed, m;
+
+ anno = (int64 *) track->anno;
+ data = (int *) track->data;
+
+ s = (anno[i] >> 2);
+ f = (anno[i + 1] >> 2);
+ if (s < f) {
+ for (j = s; j < f; j += 2) {
+ bd = data[j];
+ ed = data[j + 1];
+ for (m = bd; m < ed; m++)
+ if (iscase(read[m]))
+ read[m] = (char) (read[m] + hilight);
+ if (j == s)
+ printf("> %s:", track->name);
+ printf(" [%d,%d]", bd, ed);
+ }
+ printf("\n");
+ }
+ }
+
+ read_bases = std::string(read);
+ fst = 0;
+ lst = len;
+
+
+ }
+ Read *new_r = new Read(number, read_name, read_bases);
+ return new_r;
+}
+
+Read *LAInterface::getRead2(int number) {
+
+ std::stringstream ss;
+ std::string read_name;
+ std::string read_bases;
+ if (flist2 == NULL || findx2 == NULL)
+ exit(1);
+ HITS_READ *reads;
+ HITS_TRACK *first;
+ char *read, **entry;
+ int c, b, e, i;
+ int hilight, substr;
+ int map;
+ int (*iscase)(int);
+ read = New_Read_Buffer(db2);
+ int UPPER = 1;
+ int WIDTH = 80;
+ //printf("2");
+ entry = NULL;
+ first = db2->tracks;
+ hilight = 'A' - 'a';
+
+ map = 0;
+ reads = db2->reads;
+ substr = 0;
+
+ c = 0;
+
+ b = number;
+ e = number + 1;
+
+ for (i = b; i < e; i++) {
+ int len;
+ int fst, lst;
+ int flags, qv;
+ HITS_READ *r;
+ HITS_TRACK *track;
+
+ r = reads + i;
+ len = r->rlen;
+
+ flags = r->flags;
+ qv = (flags & DB_QV);
+
+ {
+ while (i < findx2[map - 1])
+ map -= 1;
+ while (i >= findx2[map])
+ map += 1;
+ ss << flist2[map] << '/' << r->origin << '/' << r->fpulse << '_' << r->fpulse + len;
+ if (qv > 0)
+ ss << "RQ=" << qv;
+ }
+
+ ss >> read_name;
+
+ Load_Read(db2, i, read, UPPER);
+
+ for (track = first; track != NULL; track = track->next) {
+ int64 *anno;
+ int *data;
+ int64 s, f, j;
+ int bd, ed, m;
+
+ anno = (int64 *) track->anno;
+ data = (int *) track->data;
+
+ s = (anno[i] >> 2);
+ f = (anno[i + 1] >> 2);
+ if (s < f) {
+ for (j = s; j < f; j += 2) {
+ bd = data[j];
+ ed = data[j + 1];
+ for (m = bd; m < ed; m++)
+ if (iscase(read[m]))
+ read[m] = (char) (read[m] + hilight);
+ if (j == s)
+ printf("> %s:", track->name);
+ printf(" [%d,%d]", bd, ed);
+ }
+ printf("\n");
+ }
+ }
+
+ read_bases = std::string(read);
+ fst = 0;
+ lst = len;
+
+
+ }
+ Read *new_r = new Read(number, read_name, read_bases);
+ return new_r;
+}
+
+
+int LAInterface::openAlignmentFile(std::string filename) {
+
+ char *fn = new char[filename.size() + 1];
+ strcpy(fn, filename.c_str());
+
+ input = Fopen(fn, "r");
+ if (input == NULL)
+ exit(1);
+
+ if (fread(&novl, sizeof(int64), 1, input) != 1) SYSTEM_ERROR
+ if (fread(&tspace, sizeof(int), 1, input) != 1) SYSTEM_ERROR
+
+ if (tspace <= TRACE_XOVR) {
+ small = 1;
+ tbytes = sizeof(uint8);
+ }
+ else {
+ small = 0;
+ tbytes = sizeof(uint16);
+ }
+
+ //printf("\n%s: ", fn);
+ //Print_Number(novl, 0, stdout);
+ //printf(" records\n");
+
+ return 0;
+}
+
+
+void LAInterface::showAlignment(int from, int to) {
+ int j;
+ uint16 *trace;
+ Work_Data *work;
+ int tmax;
+ int in, npt, idx, ar;
+ int64 tps;
+ char *abuffer, *bbuffer;
+ int ar_wide, br_wide;
+ int ai_wide, bi_wide;
+ int mn_wide, mx_wide;
+ int tp_wide;
+ int blast, match, seen, lhalf, rhalf;
+ bool ALIGN = true;
+ bool REFERENCE = false;
+ bool CARTOON = false;
+ bool OVERLAP = true;
+ bool FLIP = false;
+ bool UPPERCASE = false;
+ bool MAP = false;
+ int INDENT = 4;
+ int WIDTH = 100;
+ int BORDER = 10;
+
+ aln->path = &(ovl->path);
+ if (ALIGN || REFERENCE) {
+ work = New_Work_Data();
+ abuffer = New_Read_Buffer(db1);
+ bbuffer = New_Read_Buffer(db2);
+ }
+ else {
+ abuffer = NULL;
+ bbuffer = NULL;
+ work = NULL;
+ }
+
+ tmax = 1000;
+ trace = (uint16 *) Malloc(sizeof(uint16) * tmax, "Allocating trace vector");
+ if (trace == NULL)
+ exit(1);
+ in = 0;
+
+ //if (pts!=NULL) free(pts);
+ //pts = NULL;
+ pts = new int[4];
+ pts[0] = from + 1;
+ pts[1] = to;
+ pts[2] = INT32_MAX;
+
+ npt = pts[0];
+ idx = 1;
+
+ ar_wide = Number_Digits((int64) db1->nreads);
+ br_wide = Number_Digits((int64) db2->nreads);
+ ai_wide = Number_Digits((int64) db1->maxlen);
+ bi_wide = Number_Digits((int64) db2->maxlen);
+ if (db1->maxlen < db2->maxlen) {
+ mn_wide = ai_wide;
+ mx_wide = bi_wide;
+ tp_wide = Number_Digits((int64) db1->maxlen / tspace + 2);
+ }
+ else {
+ mn_wide = bi_wide;
+ mx_wide = ai_wide;
+ tp_wide = Number_Digits((int64) db2->maxlen / tspace + 2);
+ }
+ ar_wide += (ar_wide - 1) / 3;
+ br_wide += (br_wide - 1) / 3;
+ ai_wide += (ai_wide - 1) / 3;
+ bi_wide += (bi_wide - 1) / 3;
+ mn_wide += (mn_wide - 1) / 3;
+ tp_wide += (tp_wide - 1) / 3;
+ if (FLIP) {
+ int x;
+ x = ar_wide;
+ ar_wide = br_wide;
+ br_wide = x;
+ x = ai_wide;
+ ai_wide = bi_wide;
+ bi_wide = x;
+ }
+
+ // For each record do
+
+ blast = -1;
+ match = 0;
+ seen = 0;
+ lhalf = rhalf = 0;
+
+ for (j = 0; j < novl; j++)
+
+ // Read it in
+
+ {
+ //printf("j:%d/%d\n",j,novl);
+ Read_Overlap(input, ovl);
+ if (ovl->path.tlen > tmax) {
+ tmax = ((int) 1.2 * ovl->path.tlen) + 100;
+ trace = (uint16 *) Realloc(trace, sizeof(uint16) * tmax, "Allocating trace vector");
+ if (trace == NULL)
+ exit(1);
+ }
+ ovl->path.trace = (void *) trace;
+ Read_Trace(input, ovl, tbytes);
+ // Determine if it should be displayed
+
+ ar = ovl->aread + 1;
+ if (in) {
+ while (ar > npt) {
+ npt = pts[idx++];
+ if (ar < npt) {
+ in = 0;
+ break;
+ }
+ npt = pts[idx++];
+ }
+ }
+ else {
+ while (ar >= npt) {
+ npt = pts[idx++];
+ if (ar <= npt) {
+ in = 1;
+ break;
+ }
+ npt = pts[idx++];
+ }
+ }
+ if (!in)
+ continue;
+
+ // If -o check display only overlaps
+
+ aln->alen = db1->reads[ovl->aread].rlen;
+ aln->blen = db2->reads[ovl->bread].rlen;
+ aln->flags = ovl->flags;
+ tps = ovl->path.tlen / 2;
+
+ if (OVERLAP) {
+ if (ovl->path.abpos != 0 && ovl->path.bbpos != 0)
+ continue;
+ if (ovl->path.aepos != aln->alen && ovl->path.bepos != aln->blen)
+ continue;
+ }
+
+ // If -M option then check the completeness of the implied mapping
+
+ if (MAP) {
+ while (ovl->bread != blast) {
+ if (!match && seen && !(lhalf && rhalf)) {
+ printf("Missing ");
+ Print_Number((int64) blast + 1, br_wide + 1, stdout);
+ printf(" %d ->%lld\n", db2->reads[blast].rlen, db2->reads[blast].coff);
+ }
+ match = 0;
+ seen = 0;
+ lhalf = rhalf = 0;
+ blast += 1;
+ }
+ seen = 1;
+ if (ovl->path.abpos == 0)
+ rhalf = 1;
+ if (ovl->path.aepos == aln->alen)
+ lhalf = 1;
+ if (ovl->path.bbpos != 0 || ovl->path.bepos != aln->blen)
+ continue;
+ match = 1;
+ }
+
+ // Display it
+
+ if (ALIGN || CARTOON || REFERENCE)
+ printf("\n");
+ if (FLIP) {
+ Flip_Alignment(aln, 0);
+ Print_Number((int64) ovl->bread + 1, ar_wide + 1, stdout);
+ printf(" ");
+ Print_Number((int64) ovl->aread + 1, br_wide + 1, stdout);
+ }
+ else {
+ Print_Number((int64) ovl->aread , ar_wide + 1, stdout);
+ printf(" ");
+ Print_Number((int64) ovl->bread , br_wide + 1, stdout);
+ }
+ if (COMP(ovl->flags))
+ printf(" c");
+ else
+ printf(" n");
+ printf(" [");
+ Print_Number((int64) ovl->path.abpos, ai_wide, stdout);
+ printf("..");
+ Print_Number((int64) ovl->path.aepos, ai_wide, stdout);
+ printf("]%d x [",aln->alen);
+ Print_Number((int64) ovl->path.bbpos, bi_wide, stdout);
+ printf("..");
+ Print_Number((int64) ovl->path.bepos, bi_wide, stdout);
+ printf("]%d", aln->blen);
+
+ if (ALIGN || CARTOON || REFERENCE) {
+ if (ALIGN || REFERENCE) {
+ char *aseq, *bseq;
+ int amin, amax;
+ int bmin, bmax;
+
+ if (FLIP)
+ Flip_Alignment(aln, 0);
+ if (small)
+ Decompress_TraceTo16(ovl);
+
+ amin = ovl->path.abpos - BORDER;
+ if (amin < 0) amin = 0;
+ amax = ovl->path.aepos + BORDER;
+ if (amax > aln->alen) amax = aln->alen;
+ if (COMP(aln->flags)) {
+ bmin = (aln->blen - ovl->path.bepos) - BORDER;
+ if (bmin < 0) bmin = 0;
+ bmax = (aln->blen - ovl->path.bbpos) + BORDER;
+ if (bmax > aln->blen) bmax = aln->blen;
+ }
+ else {
+ bmin = ovl->path.bbpos - BORDER;
+ if (bmin < 0) bmin = 0;
+ bmax = ovl->path.bepos + BORDER;
+ if (bmax > aln->blen) bmax = aln->blen;
+ }
+
+ aseq = Load_Subread(db1, ovl->aread, amin, amax, abuffer, 0);
+ bseq = Load_Subread(db2, ovl->bread, bmin, bmax, bbuffer, 0);
+
+ aln->aseq = aseq - amin;
+ if (COMP(aln->flags)) {
+ Complement_Seq(bseq, bmax - bmin);
+ aln->bseq = bseq - (aln->blen - bmax);
+ }
+ else
+ aln->bseq = bseq - bmin;
+
+ computeTracePTS(aln, work, tspace);
+
+ if (FLIP) {
+ if (COMP(aln->flags)) {
+ Complement_Seq(aseq, amax - amin);
+ Complement_Seq(bseq, bmax - bmin);
+ aln->aseq = aseq - (aln->alen - amax);
+ aln->bseq = bseq - bmin;
+ }
+ Flip_Alignment(aln, 1);
+ }
+ }
+ if (CARTOON) {
+ printf(" (");
+ Print_Number(tps, tp_wide, stdout);
+ printf(" trace pts)\n\n");
+ Alignment_Cartoon(stdout, aln, INDENT, mx_wide);
+ }
+ else {
+ printf(" : = ");
+ Print_Number((int64) ovl->path.diffs, mn_wide, stdout);
+ printf(" diffs (");
+ Print_Number(tps, tp_wide, stdout);
+ printf(" trace pts)\n");
+ }
+ if (REFERENCE)
+ Print_Reference(stdout, aln, work, INDENT, WIDTH, BORDER, UPPERCASE, mx_wide);
+ if (ALIGN)
+ printAlignment(stdout, aln, work, INDENT, WIDTH, BORDER, UPPERCASE, mx_wide);
+ }
+ else {
+ printf(" : < ");
+ Print_Number((int64) ovl->path.diffs, mn_wide, stdout);
+ printf(" diffs (");
+ Print_Number(tps, tp_wide, stdout);
+ printf(" trace pts)\n");
+ }
+ }
+
+ free(trace);
+ if (ALIGN) {
+ free(bbuffer - 1);
+ free(abuffer - 1);
+ Free_Work_Data(work);
+ }
+
+
+}
+
+
+void LAInterface::getAlignmentB(std::vector<int> &result, int from) {
+
+ int j;
+ uint16 *trace;
+ Work_Data *work;
+ int tmax;
+ int in, npt, idx, ar;
+ int64 tps;
+ char *abuffer, *bbuffer;
+ int ar_wide, br_wide;
+ int ai_wide, bi_wide;
+ int mn_wide, mx_wide;
+ int tp_wide;
+ int blast, match, seen, lhalf, rhalf;
+ bool ALIGN = false;
+ bool REFERENCE = false;
+ bool CARTOON = false;
+ bool OVERLAP = true;
+ bool FLIP = false;
+ bool UPPERCASE = false;
+ bool MAP = false;
+ int INDENT = 4;
+ int WIDTH = 100;
+ int BORDER = 10;
+
+ aln->path = &(ovl->path);
+ if (ALIGN || REFERENCE) {
+ work = New_Work_Data();
+ abuffer = New_Read_Buffer(db1);
+ bbuffer = New_Read_Buffer(db2);
+ }
+ else {
+ abuffer = NULL;
+ bbuffer = NULL;
+ work = NULL;
+ }
+
+ tmax = 1000;
+ trace = (uint16 *) Malloc(sizeof(uint16) * tmax, "Allocating trace vector");
+ if (trace == NULL)
+ exit(1);
+ in = 0;
+
+ //if (pts!=NULL) free(pts);
+ //pts = NULL;
+ pts = new int[4];
+ pts[0] = from + 1;
+ pts[1] = from + 1;
+ pts[2] = INT32_MAX;
+
+ npt = pts[0];
+ idx = 1;
+
+ ar_wide = Number_Digits((int64) db1->nreads);
+ br_wide = Number_Digits((int64) db2->nreads);
+ ai_wide = Number_Digits((int64) db1->maxlen);
+ bi_wide = Number_Digits((int64) db2->maxlen);
+ if (db1->maxlen < db2->maxlen) {
+ mn_wide = ai_wide;
+ mx_wide = bi_wide;
+ tp_wide = Number_Digits((int64) db1->maxlen / tspace + 2);
+ }
+ else {
+ mn_wide = bi_wide;
+ mx_wide = ai_wide;
+ tp_wide = Number_Digits((int64) db2->maxlen / tspace + 2);
+ }
+ ar_wide += (ar_wide - 1) / 3;
+ br_wide += (br_wide - 1) / 3;
+ ai_wide += (ai_wide - 1) / 3;
+ bi_wide += (bi_wide - 1) / 3;
+ mn_wide += (mn_wide - 1) / 3;
+ tp_wide += (tp_wide - 1) / 3;
+ if (FLIP) {
+ int x;
+ x = ar_wide;
+ ar_wide = br_wide;
+ br_wide = x;
+ x = ai_wide;
+ ai_wide = bi_wide;
+ bi_wide = x;
+ }
+
+ // For each record do
+
+ blast = -1;
+ match = 0;
+ seen = 0;
+ lhalf = rhalf = 0;
+
+ for (j = 0; j < novl; j++)
+
+ // Read it in
+
+ {
+ //printf("j:%d/%d\n",j,novl);
+ Read_Overlap(input, ovl);
+ if (ovl->path.tlen > tmax) {
+ tmax = ((int) 1.2 * ovl->path.tlen) + 100;
+ trace = (uint16 *) Realloc(trace, sizeof(uint16) * tmax, "Allocating trace vector");
+ if (trace == NULL)
+ exit(1);
+ }
+ ovl->path.trace = (void *) trace;
+ Read_Trace(input, ovl, tbytes);
+ // Determine if it should be displayed
+
+ ar = ovl->aread + 1;
+ if (in) {
+ while (ar > npt) {
+ npt = pts[idx++];
+ if (ar < npt) {
+ in = 0;
+ break;
+ }
+ npt = pts[idx++];
+ }
+ }
+ else {
+ while (ar >= npt) {
+ npt = pts[idx++];
+ if (ar <= npt) {
+ in = 1;
+ break;
+ }
+ npt = pts[idx++];
+ }
+ }
+ if (!in)
+ continue;
+
+ // If -o check display only overlaps
+
+ aln->alen = db1->reads[ovl->aread].rlen;
+ aln->blen = db2->reads[ovl->bread].rlen;
+ aln->flags = ovl->flags;
+ tps = ovl->path.tlen / 2;
+
+ if (OVERLAP) {
+ if (ovl->path.abpos != 0 && ovl->path.bbpos != 0)
+ continue;
+ if (ovl->path.aepos != aln->alen && ovl->path.bepos != aln->blen)
+ continue;
+ }
+
+ // If -M option then check the completeness of the implied mapping
+
+ if (MAP) {
+ while (ovl->bread != blast) {
+ if (!match && seen && !(lhalf && rhalf)) {
+ printf("Missing ");
+ Print_Number((int64) blast + 1, br_wide + 1, stdout);
+ printf(" %d ->%lld\n", db2->reads[blast].rlen, db2->reads[blast].coff);
+ }
+ match = 0;
+ seen = 0;
+ lhalf = rhalf = 0;
+ blast += 1;
+ }
+ seen = 1;
+ if (ovl->path.abpos == 0)
+ rhalf = 1;
+ if (ovl->path.aepos == aln->alen)
+ lhalf = 1;
+ if (ovl->path.bbpos != 0 || ovl->path.bepos != aln->blen)
+ continue;
+ match = 1;
+ }
+
+ // Display it
+
+ if (ALIGN || CARTOON || REFERENCE)
+ printf("\n");
+ if (FLIP) {
+ Flip_Alignment(aln, 0);
+ //Print_Number((int64) ovl->bread+1,ar_wide+1,stdout);
+ //printf(" ");
+ //Print_Number((int64) ovl->aread+1,br_wide+1,stdout);
+ }
+ else { //Print_Number((int64) ovl->aread+1,ar_wide+1,stdout);
+ //printf(" ");
+ //Print_Number((int64) ovl->bread+1,br_wide+1,stdout);
+ result.push_back(ovl->bread);
+ }
+ //if (COMP(ovl->reverse_complement_match_))
+ // printf(" c");
+ //else
+ // printf(" n");
+ //printf(" [");
+ //Print_Number((int64) ovl->path.read_A_match_start_,ai_wide,stdout);
+ //printf("..");
+ //Print_Number((int64) ovl->path.read_A_match_end_,ai_wide,stdout);
+ //printf("] x [");
+ //Print_Number((int64) ovl->path.read_B_match_start_,bi_wide,stdout);
+ //printf("..");
+ //Print_Number((int64) ovl->path.read_B_match_end_,bi_wide,stdout);
+ //printf("]");
+
+ if ((ALIGN || CARTOON || REFERENCE) && (false)) {
+ if (ALIGN || REFERENCE) {
+ char *aseq, *bseq;
+ int amin, amax;
+ int bmin, bmax;
+
+ if (FLIP)
+ Flip_Alignment(aln, 0);
+ if (small)
+ Decompress_TraceTo16(ovl);
+
+ amin = ovl->path.abpos - BORDER;
+ if (amin < 0) amin = 0;
+ amax = ovl->path.aepos + BORDER;
+ if (amax > aln->alen) amax = aln->alen;
+ if (COMP(aln->flags)) {
+ bmin = (aln->blen - ovl->path.bepos) - BORDER;
+ if (bmin < 0) bmin = 0;
+ bmax = (aln->blen - ovl->path.bbpos) + BORDER;
+ if (bmax > aln->blen) bmax = aln->blen;
+ }
+ else {
+ bmin = ovl->path.bbpos - BORDER;
+ if (bmin < 0) bmin = 0;
+ bmax = ovl->path.bepos + BORDER;
+ if (bmax > aln->blen) bmax = aln->blen;
+ }
+
+ aseq = Load_Subread(db1, ovl->aread, amin, amax, abuffer, 0);
+ bseq = Load_Subread(db2, ovl->bread, bmin, bmax, bbuffer, 0);
+
+ aln->aseq = aseq - amin;
+ if (COMP(aln->flags)) {
+ Complement_Seq(bseq, bmax - bmin);
+ aln->bseq = bseq - (aln->blen - bmax);
+ }
+ else
+ aln->bseq = bseq - bmin;
+
+ Compute_Trace_PTS(aln, work, tspace,GREEDIEST);
+
+ if (FLIP) {
+ if (COMP(aln->flags)) {
+ Complement_Seq(aseq, amax - amin);
+ Complement_Seq(bseq, bmax - bmin);
+ aln->aseq = aseq - (aln->alen - amax);
+ aln->bseq = bseq - bmin;
+ }
+ Flip_Alignment(aln, 1);
+ }
+ }
+ if (CARTOON) {
+ printf(" (");
+ Print_Number(tps, tp_wide, stdout);
+ printf(" trace pts)\n\n");
+ Alignment_Cartoon(stdout, aln, INDENT, mx_wide);
+ }
+ else {
+ printf(" : = ");
+ Print_Number((int64) ovl->path.diffs, mn_wide, stdout);
+ printf(" diffs (");
+ Print_Number(tps, tp_wide, stdout);
+ printf(" trace pts)\n");
+ }
+ if (REFERENCE)
+ Print_Reference(stdout, aln, work, INDENT, WIDTH, BORDER, UPPERCASE, mx_wide);
+ if (ALIGN)
+ Print_Alignment(stdout, aln, work, INDENT, WIDTH, BORDER, UPPERCASE, mx_wide);
+ }
+ else {// printf(" : < ");
+ // Print_Number((int64) ovl->path.diffs,mn_wide,stdout);
+ // printf(" diffs (");
+ // Print_Number(tps,tp_wide,stdout);
+ // printf(" trace pts)\n");
+ }
+ }
+
+ free(trace);
+ if (ALIGN) {
+ free(bbuffer - 1);
+ free(abuffer - 1);
+ Free_Work_Data(work);
+ }
+
+}
+
+
+void LAInterface::getRead(std::vector<Read *> &reads_vec, int from, int to) {
+
+ std::stringstream ss;
+ std::string read_name;
+ std::string read_bases;
+ if (flist == NULL || findx == NULL)
+ exit(1);
+ HITS_READ *reads;
+ HITS_TRACK *first;
+ char *read, **entry;
+ int c, b, e, i;
+ int hilight, substr;
+ int map;
+ int (*iscase)(int);
+ read = New_Read_Buffer(db1);
+ int UPPER = 1;
+ int WIDTH = 80;
+ entry = NULL;
+ first = db1->tracks;
+ hilight = 'A' - 'a';
+
+ map = 0;
+ reads = db1->reads;
+ substr = 0;
+
+ c = 0;
+
+ b = from;
+ e = to;
+
+ for (i = b; i < e; i++) {
+ int len;
+ int fst, lst;
+ int flags, qv;
+ HITS_READ *r;
+ HITS_TRACK *track;
+
+ r = reads + i;
+ len = r->rlen;
+
+ flags = r->flags;
+ qv = (flags & DB_QV);
+
+ {
+ while (i < findx[map - 1])
+ map -= 1;
+ while (i >= findx[map])
+ map += 1;
+ ss << flist[map] << '/' << r->origin << '/' << r->fpulse << '_' << r->fpulse + len;
+ if (qv > 0)
+ ss << "RQ=" << qv;
+ }
+
+ ss >> read_name;
+
+ Load_Read(db1, i, read, UPPER);
+
+ for (track = first; track != NULL; track = track->next) {
+ int64 *anno;
+ int *data;
+ int64 s, f, j;
+ int bd, ed, m;
+
+ anno = (int64 *) track->anno;
+ data = (int *) track->data;
+
+ s = (anno[i] >> 2);
+ f = (anno[i + 1] >> 2);
+ if (s < f) {
+ for (j = s; j < f; j += 2) {
+ bd = data[j];
+ ed = data[j + 1];
+ for (m = bd; m < ed; m++)
+ if (iscase(read[m]))
+ read[m] = (char) (read[m] + hilight);
+ if (j == s)
+ printf("> %s:", track->name);
+ printf(" [%d,%d]", bd, ed);
+ }
+ printf("\n");
+ }
+ }
+
+ read_bases = std::string(read);
+ fst = 0;
+ lst = len;
+ Read *new_r = new Read(i, len, read_name, read_bases);
+ reads_vec.push_back(new_r);
+
+ }
+
+}
+
+
+void LAInterface::getRead2(std::vector<Read *> &reads_vec, int from, int to) {
+
+ std::stringstream ss;
+ std::string read_name;
+ std::string read_bases;
+ if (flist2 == NULL || findx2 == NULL)
+ exit(1);
+ HITS_READ *reads;
+ HITS_TRACK *first;
+ char *read, **entry;
+ int c, b, e, i;
+ int hilight, substr;
+ int map;
+ int (*iscase)(int);
+ read = New_Read_Buffer(db2);
+ int UPPER = 1;
+ int WIDTH = 80;
+ entry = NULL;
+ first = db2->tracks;
+ hilight = 'A' - 'a';
+
+ map = 0;
+ reads = db2->reads;
+ substr = 0;
+
+ c = 0;
+
+ b = from;
+ e = to;
+
+ for (i = b; i < e; i++) {
+ int len;
+ int fst, lst;
+ int flags, qv;
+ HITS_READ *r;
+ HITS_TRACK *track;
+
+ r = reads + i;
+ len = r->rlen;
+
+ flags = r->flags;
+ qv = (flags & DB_QV);
+
+ {
+ while (i < findx2[map - 1])
+ map -= 1;
+ while (i >= findx2[map])
+ map += 1;
+ ss << flist2[map] << '/' << r->origin << '/' << r->fpulse << '_' << r->fpulse + len;
+ if (qv > 0)
+ ss << "RQ=" << qv;
+ }
+
+ ss >> read_name;
+
+ Load_Read(db2, i, read, UPPER);
+
+ for (track = first; track != NULL; track = track->next) {
+ int64 *anno;
+ int *data;
+ int64 s, f, j;
+ int bd, ed, m;
+
+ anno = (int64 *) track->anno;
+ data = (int *) track->data;
+
+ s = (anno[i] >> 2);
+ f = (anno[i + 1] >> 2);
+ if (s < f) {
+ for (j = s; j < f; j += 2) {
+ bd = data[j];
+ ed = data[j + 1];
+ for (m = bd; m < ed; m++)
+ if (iscase(read[m]))
+ read[m] = (char) (read[m] + hilight);
+ if (j == s)
+ printf("> %s:", track->name);
+ printf(" [%d,%d]", bd, ed);
+ }
+ printf("\n");
+ }
+ }
+
+ read_bases = std::string(read);
+ fst = 0;
+ lst = len;
+ Read *new_r = new Read(i, len, read_name, read_bases);
+ reads_vec.push_back(new_r);
+
+ }
+
+}
+
+
+void LAInterface::resetAlignment() {
+ rewind(input);
+
+ if (fread(&novl, sizeof(int64), 1, input) != 1) SYSTEM_ERROR
+ if (fread(&tspace, sizeof(int), 1, input) != 1) SYSTEM_ERROR
+
+ if (tspace <= TRACE_XOVR) {
+ small = 1;
+ tbytes = sizeof(uint8);
+ }
+ else {
+ small = 0;
+ tbytes = sizeof(uint16);
+ }
+
+ //printf("\n%s: ", "read again");
+ //Print_Number(novl, 0, stdout);
+ //printf(" records\n");
+
+
+}
+
+
+void LAInterface::getOverlap(std::vector<LOverlap *> &result_vec, int from, int64 to) {
+
+ int j;
+ uint16 *trace;
+ int tmax;
+ int in, npt, idx, ar;
+ int64 tps;
+
+ aln->path = &(ovl->path);
+
+ tmax = 1000;
+ trace = (uint16 *) Malloc(sizeof(uint16) * tmax, "Allocating trace vector");
+ if (trace == NULL)
+ exit(1);
+ in = 0;
+
+ pts = new int[4];
+ pts[0] = from + 1;
+ pts[1] = to + 0;
+ pts[2] = INT32_MAX;
+
+ npt = pts[0];
+ idx = 1;
+
+ // For each record do
+
+ for (j = 0; j < novl; j++)
+ // Read it in
+ {
+ //if (j % (novl/100) == 0) {
+ // printf("%d percent finished\n", j/(novl/100));
+ //}
+ Read_Overlap(input, ovl);
+ if (ovl->path.tlen > tmax) {
+ tmax = ((int) 1.2 * ovl->path.tlen) + 100;
+ trace = (uint16 *) Realloc(trace, sizeof(uint16) * tmax, "Allocating trace vector");
+ if (trace == NULL)
+ exit(1);
+ }
+ ovl->path.trace = (void *) trace;
+ Read_Trace(input, ovl, tbytes);
+ // Determine if it should be displayed
+
+ ar = ovl->aread + 1;
+ if (in) {
+ while (ar > npt) {
+ npt = pts[idx++];
+ if (ar < npt) {
+ in = 0;
+ break;
+ }
+ npt = pts[idx++];
+ }
+ }
+ else {
+ while (ar >= npt) {
+ npt = pts[idx++];
+ if (ar <= npt) {
+ in = 1;
+ break;
+ }
+ npt = pts[idx++];
+ }
+ }
+ if (!in)
+ continue;
+
+ aln->alen = db1->reads[ovl->aread].rlen;
+ aln->blen = db2->reads[ovl->bread].rlen;
+ aln->flags = ovl->flags;
+ tps = ovl->path.tlen / 2;
+ LOverlap *new_ovl = new LOverlap();
+
+ if (COMP(ovl->flags))
+ { new_ovl->reverse_complement_match_ = 1;
+ }
+ else {
+ new_ovl->reverse_complement_match_ = 0;
+ }
+
+ if (small)
+ Decompress_TraceTo16(ovl);
+
+ new_ovl->trace_pts_len = ovl->path.tlen;
+ new_ovl->trace_pts = (uint16 *)malloc(ovl->path.tlen * sizeof(uint16));
+
+ memcpy(new_ovl->trace_pts, ovl->path.trace, ovl->path.tlen * sizeof(uint16));
+
+ new_ovl->read_A_id_ = ovl->aread;
+ new_ovl->read_B_id_ = ovl->bread;
+ new_ovl->read_A_match_start_ = ovl->path.abpos;
+ new_ovl->read_A_match_end_ = ovl->path.aepos;
+ new_ovl->alen = aln->alen;
+ new_ovl->blen = aln->blen;
+
+ if (new_ovl->reverse_complement_match_ == 0) {
+ new_ovl->read_B_match_start_ = ovl->path.bbpos;
+ new_ovl->read_B_match_end_ = ovl->path.bepos;
+ }
+ else {
+ new_ovl->read_B_match_start_ = new_ovl->blen - ovl->path.bepos;
+ new_ovl->read_B_match_end_ = new_ovl->blen - ovl->path.bbpos;
+ }
+
+ new_ovl->diffs = ovl->path.diffs;
+ new_ovl->tlen = ovl->path.tlen;
+ new_ovl->tps = tps;
+ result_vec.push_back(new_ovl);
+ }
+ free(trace);
+}
+
+void LAInterface::getOverlapw(std::vector<LOverlap *> &result_vec, int from, int to) {
+
+ int j;
+ uint16 *trace;
+ int tmax;
+ int in, npt, idx, ar;
+ int64 tps;
+
+ aln->path = &(ovl->path);
+
+ tmax = 1000;
+ trace = (uint16 *) Malloc(sizeof(uint16) * tmax, "Allocating trace vector");
+ if (trace == NULL)
+ exit(1);
+ in = 0;
+
+ pts = new int[4];
+ pts[0] = from + 1;
+ pts[1] = to + 0;
+ pts[2] = INT32_MAX;
+
+ npt = pts[0];
+ idx = 1;
+
+ // For each record do
+
+ for (j = 0; j < novl; j++)
+ // Read it in
+ {
+ if (j % (novl/100) == 0) {
+ printf("%d percent finished\n", j/(novl/100));
+ }
+ Read_Overlap(input, ovl);
+ if (ovl->path.tlen > tmax) {
+ tmax = ((int) 1.2 * ovl->path.tlen) + 100;
+ trace = (uint16 *) Realloc(trace, sizeof(uint16) * tmax, "Allocating trace vector");
+ if (trace == NULL)
+ exit(1);
+ }
+ ovl->path.trace = (void *) trace;
+ Read_Trace(input, ovl, tbytes);
+ // Determine if it should be displayed
+
+ ar = ovl->aread + 1;
+ if (in) {
+ while (ar > npt) {
+ npt = pts[idx++];
+ if (ar < npt) {
+ in = 0;
+ break;
+ }
+ npt = pts[idx++];
+ }
+ }
+ else {
+ while (ar >= npt) {
+ npt = pts[idx++];
+ if (ar <= npt) {
+ in = 1;
+ break;
+ }
+ npt = pts[idx++];
+ }
+ }
+ if (!in)
+ continue;
+
+ aln->alen = db1->reads[ovl->aread].rlen;
+ aln->blen = db2->reads[ovl->bread].rlen;
+ aln->flags = ovl->flags;
+ tps = ovl->path.tlen / 2;
+ LOverlap *new_ovl = new LOverlap();
+
+ if (COMP(ovl->flags))
+ { new_ovl->reverse_complement_match_ = 1;
+ }
+ else {
+ new_ovl->reverse_complement_match_ = 0;
+ }
+
+ if (small)
+ Decompress_TraceTo16(ovl);
+
+ new_ovl->trace_pts_len = ovl->path.tlen;
+ //new_ovl->trace_pts = (uint16 *)malloc(ovl->path.tlen * sizeof(uint16));
+ //memcpy(new_ovl->trace_pts, ovl->path.trace, ovl->path.tlen * sizeof(uint16));
+
+ new_ovl->trace_pts = 0;
+ new_ovl->read_A_id_ = ovl->aread;
+ new_ovl->read_B_id_ = ovl->bread;
+ new_ovl->read_A_match_start_ = ovl->path.abpos;
+ new_ovl->read_A_match_end_ = ovl->path.aepos;
+ new_ovl->read_B_match_start_ = ovl->path.bbpos;
+ new_ovl->read_B_match_end_ = ovl->path.bepos;
+ new_ovl->alen = aln->alen;
+ new_ovl->blen = aln->blen;
+ new_ovl->diffs = ovl->path.diffs;
+ new_ovl->tlen = ovl->path.tlen;
+ new_ovl->tps = tps;
+ result_vec.push_back(new_ovl);
+ }
+ free(trace);
+}
+
+
+void LAInterface::getOverlap(std::vector<LOverlap *> &result_vec, int n) {
+
+ getOverlap(result_vec, n, n + 1);
+
+}
+
+void LAInterface::getAlignment(std::vector<LAlignment *> &result_vec, int from) {
+
+ getAlignment(result_vec, from, from + 1);
+
+}
+
+void LAInterface::getAlignment(std::vector<LAlignment *> &result_vec, int from, int to) {
+
+ int j;
+ uint16 *trace;
+ Work_Data *work;
+ int tmax;
+ int in, npt, idx, ar;
+ int64 tps;
+ char *abuffer, *bbuffer;
+ int ar_wide, br_wide;
+ int ai_wide, bi_wide;
+ int mn_wide, mx_wide;
+ int tp_wide;
+ int blast, match, seen, lhalf, rhalf;
+ bool ALIGN = true;
+ bool REFERENCE = false;
+ bool CARTOON = false;
+ bool OVERLAP = false;
+ bool FLIP = false;
+ bool UPPERCASE = false;
+ bool MAP = false;
+ int INDENT = 4;
+ int WIDTH = 100;
+ int BORDER = 10;
+
+ aln->path = &(ovl->path);
+ if (ALIGN || REFERENCE) {
+ work = New_Work_Data();
+ abuffer = New_Read_Buffer(db1);
+ bbuffer = New_Read_Buffer(db2);
+ }
+ else {
+ abuffer = NULL;
+ bbuffer = NULL;
+ work = NULL;
+ }
+
+
+ tmax = 1000;
+ trace = (uint16 *) Malloc(sizeof(uint16) * tmax, "Allocating trace vector");
+ if (trace == NULL)
+ exit(1);
+ in = 0;
+
+ //if (pts!=NULL) free(pts);
+ //pts = NULL;
+ pts = new int[4];
+ pts[0] = from + 1;
+ pts[1] = to ;
+ pts[2] = INT32_MAX;
+
+ npt = pts[0];
+ idx = 1;
+
+ ar_wide = Number_Digits((int64) db1->nreads);
+ br_wide = Number_Digits((int64) db2->nreads);
+ ai_wide = Number_Digits((int64) db1->maxlen);
+ bi_wide = Number_Digits((int64) db2->maxlen);
+ if (db1->maxlen < db2->maxlen) {
+ mn_wide = ai_wide;
+ mx_wide = bi_wide;
+ tp_wide = Number_Digits((int64) db1->maxlen / tspace + 2);
+ }
+ else {
+ mn_wide = bi_wide;
+ mx_wide = ai_wide;
+ tp_wide = Number_Digits((int64) db2->maxlen / tspace + 2);
+ }
+ ar_wide += (ar_wide - 1) / 3;
+ br_wide += (br_wide - 1) / 3;
+ ai_wide += (ai_wide - 1) / 3;
+ bi_wide += (bi_wide - 1) / 3;
+ mn_wide += (mn_wide - 1) / 3;
+ tp_wide += (tp_wide - 1) / 3;
+ if (FLIP) {
+ int x;
+ x = ar_wide;
+ ar_wide = br_wide;
+ br_wide = x;
+ x = ai_wide;
+ ai_wide = bi_wide;
+ bi_wide = x;
+ }
+
+ // For each record do
+
+ blast = -1;
+ match = 0;
+ seen = 0;
+ lhalf = rhalf = 0;
+
+ for (j = 0; j < novl; j++)
+ // Read it in
+ {
+ //printf("j:%d/%d\n",j,novl);
+ Read_Overlap(input, ovl);
+ if (ovl->path.tlen > tmax) {
+ tmax = ((int) 1.2 * ovl->path.tlen) + 100;
+ trace = (uint16 *) Realloc(trace, sizeof(uint16) * tmax, "Allocating trace vector");
+ if (trace == NULL)
+ exit(1);
+ }
+ ovl->path.trace = (void *) trace;
+ Read_Trace(input, ovl, tbytes);
+ // Determine if it should be displayed
+
+
+
+
+ ar = ovl->aread + 1;
+ if (in) {
+ while (ar > npt) {
+ npt = pts[idx++];
+ if (ar < npt) {
+ in = 0;
+ break;
+ }
+ npt = pts[idx++];
+ }
+ }
+ else {
+ while (ar >= npt) {
+ npt = pts[idx++];
+ if (ar <= npt) {
+ in = 1;
+ break;
+ }
+ npt = pts[idx++];
+ }
+ }
+ if (!in)
+ continue;
+
+ //printf("j:%d/%d\n",j,novl);
+
+ // If -o check display only overlaps
+
+ aln->alen = db1->reads[ovl->aread].rlen;
+ aln->blen = db2->reads[ovl->bread].rlen;
+ aln->flags = ovl->flags;
+ tps = ovl->path.tlen / 2;
+ LAlignment *new_al = new LAlignment();
+ new_al->read_A_id_ = ovl->aread;
+ new_al->read_B_id_ = ovl->bread;
+
+ if (COMP(ovl->flags))
+ //printf(" c");
+ new_al->flags = 1;
+ else
+ new_al->flags = 0;
+ //printf(" n");
+ //printf(" [");
+ //Print_Number((int64) ovl->path.read_A_match_start_,ai_wide,stdout);
+ new_al->abpos = ovl->path.abpos;
+ //printf("..");
+ //Print_Number((int64) ovl->path.read_A_match_end_,ai_wide,stdout);
+ new_al->aepos = ovl->path.aepos;
+ //printf("] x [");
+ //Print_Number((int64) ovl->path.read_B_match_start_,bi_wide,stdout);
+ //printf("..");
+ //Print_Number((int64) ovl->path.read_B_match_end_,bi_wide,stdout);
+ //printf("]");
+ new_al->bbpos = ovl->path.bbpos;
+ new_al->bepos = ovl->path.bepos;
+ new_al->alen = aln->alen;
+ new_al->blen = aln->blen;
+ new_al->diffs = ovl->path.diffs;
+ new_al->tlen = ovl->path.tlen;
+ new_al->tps = tps;
+
+ if (OVERLAP) {
+ if (ovl->path.abpos != 0 && ovl->path.bbpos != 0)
+ continue;
+ if (ovl->path.aepos != aln->alen && ovl->path.bepos != aln->blen)
+ continue;
+ }
+
+ // If -M option then check the completeness of the implied mapping
+
+ if (MAP) {
+ while (ovl->bread != blast) {
+ if (!match && seen && !(lhalf && rhalf)) {
+ printf("Missing ");
+ Print_Number((int64) blast + 1, br_wide + 1, stdout);
+ printf(" %d ->%lld\n", db2->reads[blast].rlen, db2->reads[blast].coff);
+ }
+ match = 0;
+ seen = 0;
+ lhalf = rhalf = 0;
+ blast += 1;
+ }
+ seen = 1;
+ if (ovl->path.abpos == 0)
+ rhalf = 1;
+ if (ovl->path.aepos == aln->alen)
+ lhalf = 1;
+ if (ovl->path.bbpos != 0 || ovl->path.bepos != aln->blen)
+ continue;
+ match = 1;
+ }
+
+ // Display it
+
+ //if (ALIGN || CARTOON || REFERENCE)
+ //printf("\n");
+ if (FLIP) {
+ Flip_Alignment(aln, 0);
+ //Print_Number((int64) ovl->bread+1,ar_wide+1,stdout);
+ //printf(" ");
+ //Print_Number((int64) ovl->aread+1,br_wide+1,stdout);
+ }
+ else { //Print_Number((int64) ovl->aread+1,ar_wide+1,stdout);
+
+ //printf(" ");
+ //Print_Number((int64) ovl->bread+1,br_wide+1,stdout);
+ //result.push_back(ovl->bread);
+ }
+ //if (COMP(ovl->reverse_complement_match_))
+ // printf(" c");
+ //else
+ // printf(" n");
+ //printf(" [");
+ //Print_Number((int64) ovl->path.read_A_match_start_,ai_wide,stdout);
+ //printf("..");
+ //Print_Number((int64) ovl->path.read_A_match_end_,ai_wide,stdout);
+ //printf("] x [");
+ //Print_Number((int64) ovl->path.read_B_match_start_,bi_wide,stdout);
+ //printf("..");
+ //Print_Number((int64) ovl->path.read_B_match_end_,bi_wide,stdout);
+ //printf("]");
+
+
+ if ((ALIGN || CARTOON || REFERENCE) || true) {
+ if (ALIGN || REFERENCE) {
+ char *aseq, *bseq;
+ int amin, amax;
+ int bmin, bmax;
+
+ if (FLIP)
+ Flip_Alignment(aln, 0);
+ //if (small)
+ // Decompress_TraceTo16(ovl);
+
+
+ if (small)
+ Decompress_TraceTo16(ovl);
+
+ new_al->trace_pts_len = ovl->path.tlen;
+ new_al->trace_pts = (uint16 *)malloc(ovl->path.tlen * sizeof(uint16));
+
+ memcpy(new_al->trace_pts, ovl->path.trace, ovl->path.tlen * sizeof(uint16));
+
+ /*{
+ printf("\n");
+
+ uint16 *pp = (uint16 *) ovl->path.trace;
+ for (int uu = 0; uu < ovl->path.tlen; uu++) {
+ printf("%d ", pp[uu]);
+ new_al->trace_pts[uu] = pp[uu];
+ }
+
+
+ printf("\n");
+
+
+ }*/
+#ifdef DOALIGN
+ amin = ovl->path.read_A_match_start_ - BORDER;
+ if (amin < 0) amin = 0;
+ amax = ovl->path.read_A_match_end_ + BORDER;
+ if (amax > aln->alen) amax = aln->alen;
+ if (COMP(aln->reverse_complement_match_)) {
+ bmin = (aln->blen - ovl->path.read_B_match_end_) - BORDER;
+ if (bmin < 0) bmin = 0;
+ bmax = (aln->blen - ovl->path.read_B_match_start_) + BORDER;
+ if (bmax > aln->blen) bmax = aln->blen;
+ }
+ else {
+ bmin = ovl->path.read_B_match_start_ - BORDER;
+ if (bmin < 0) bmin = 0;
+ bmax = ovl->path.read_B_match_end_ + BORDER;
+ if (bmax > aln->blen) bmax = aln->blen;
+ }
+
+ aseq = Load_Subread(db1, ovl->aread, amin, amax, abuffer, 0);
+ bseq = Load_Subread(db2, ovl->bread, bmin, bmax, bbuffer, 0);
+
+
+ aln->aseq = aseq - amin;
+ if (COMP(aln->reverse_complement_match_)) {
+ Complement_Seq(bseq, bmax - bmin);
+ aln->bseq = bseq - (aln->blen - bmax);
+ }
+ else
+ aln->bseq = bseq - bmin;
+
+
+
+
+ Compute_Trace_PTS(aln, work, tspace,GREEDIEST);
+
+
+ /*new_al->aseq = (char *) malloc(new_al->alen * sizeof(char));
+ new_al->bseq = (char *) malloc(new_al->blen * sizeof(char));
+
+ memcpy(new_al->aseq, aln->aseq, new_al->alen* sizeof(char));
+ memcpy(new_al->bseq, aln->bseq, new_al->blen* sizeof(char));*/
+ new_al->aseq = NULL;
+ new_al->bseq = NULL;
+
+
+ /*{
+ int tlen = aln->path->tlen;
+ int *trace = (int *) aln->path->trace;
+ int u;
+ printf(" ");
+ for (u = 0; u < tlen; u++)
+ printf("%d,", (int) trace[u]);
+ printf("\n");
+ }*/
+
+ new_al->tlen = aln->path->tlen;
+ new_al->trace = (int *) malloc(sizeof(int) * aln->path->tlen*2);
+ //if (new_al->trace == NULL)
+ // exit(1);
+ //memcpy(new_al->trace, (void *) aln->path->trace, sizeof(int) * sizeof(int) * aln->path->tlen);
+
+ //free(trace);
+
+ //printf("after\n");
+ {
+ int tlen = aln->path->tlen;
+ int *trace = (int *) aln->path->trace;
+ int u;
+ //printf(" ");
+ for (u = 0; u < tlen; u++) {
+ //printf("%d,", (int) trace[u]);
+ new_al->trace[u] = (int)trace[u];
+ }
+ //printf("\n");
+ }
+
+#endif
+ if (FLIP) {
+ if (COMP(aln->flags)) {
+ Complement_Seq(aseq, amax - amin);
+ Complement_Seq(bseq, bmax - bmin);
+ aln->aseq = aseq - (aln->alen - amax);
+ aln->bseq = bseq - bmin;
+ }
+ Flip_Alignment(aln, 1);
+ }
+ }
+ if (CARTOON) {
+ //printf(" (");
+ //Print_Number(tps, tp_wide, stdout);
+ //printf(" trace pts)\n\n");
+ //Alignment_Cartoon(stdout, aln, INDENT, mx_wide);
+ }
+ else {
+ //printf(" : = ");
+ //Print_Number((int64) ovl->path.diffs, mn_wide, stdout);
+ //printf(" diffs (");
+ //Print_Number(tps, tp_wide, stdout);
+ //printf(" trace pts)\n");
+ }
+ if (REFERENCE)
+ Print_Reference(stdout, aln, work, INDENT, WIDTH, BORDER, UPPERCASE, mx_wide);
+ //if (ALIGN)
+ //printAlignment(stdout, aln, work, INDENT, WIDTH, BORDER, UPPERCASE, mx_wide);
+ // printAlignment_exp(stdout, new_al, work, INDENT, WIDTH, BORDER, UPPERCASE, mx_wide);
+
+
+
+ }
+ else {// printf(" : < ");
+ // Print_Number((int64) ovl->path.diffs,mn_wide,stdout);
+ // printf(" diffs (");
+ // Print_Number(tps,tp_wide,stdout);
+ // printf(" trace pts)\n");
+ }
+
+ result_vec.push_back(new_al);
+
+ }
+
+ free(trace);
+
+ if (ALIGN) {
+ free(bbuffer - 1);
+ free(abuffer - 1);
+ Free_Work_Data(work);
+ }
+
+}
+
+
+void LAInterface::getAlignment(std::vector<LAlignment *> &result_vec, std::vector<int> & range) {
+
+ int j;
+ uint16 *trace;
+ Work_Data *work;
+ int tmax;
+ int in, npt, idx, ar;
+ int64 tps;
+ char *abuffer, *bbuffer;
+ int ar_wide, br_wide;
+ int ai_wide, bi_wide;
+ int mn_wide, mx_wide;
+ int tp_wide;
+ int blast, match, seen, lhalf, rhalf;
+ bool ALIGN = true;
+ bool REFERENCE = false;
+ bool CARTOON = false;
+ bool OVERLAP = false;
+ bool FLIP = false;
+ bool UPPERCASE = false;
+ bool MAP = false;
+ int INDENT = 4;
+ int WIDTH = 100;
+ int BORDER = 10;
+
+ aln->path = &(ovl->path);
+ if (ALIGN || REFERENCE) {
+ work = New_Work_Data();
+ abuffer = New_Read_Buffer(db1);
+ bbuffer = New_Read_Buffer(db2);
+ }
+ else {
+ abuffer = NULL;
+ bbuffer = NULL;
+ work = NULL;
+ }
+
+
+ tmax = 1000;
+ trace = (uint16 *) Malloc(sizeof(uint16) * tmax, "Allocating trace vector");
+ if (trace == NULL)
+ exit(1);
+ in = 0;
+
+ //if (pts!=NULL) free(pts);
+ //pts = NULL;
+ pts = new int[range.size()*2+20];
+ for (int k = 0; k < range.size(); k++) {
+ pts[k*2] = range[k] + 1;
+ pts[k*2+1] = range[k] + 1;
+ }
+ pts[range.size()*2] = INT32_MAX;
+
+ /*for (int i = 0; i < range.size()*2+2; i++) {
+ printf("%d\n",pts[i]);
+ }*/
+
+ npt = pts[0];
+ idx = 1;
+
+ ar_wide = Number_Digits((int64) db1->nreads);
+ br_wide = Number_Digits((int64) db2->nreads);
+ ai_wide = Number_Digits((int64) db1->maxlen);
+ bi_wide = Number_Digits((int64) db2->maxlen);
+ if (db1->maxlen < db2->maxlen) {
+ mn_wide = ai_wide;
+ mx_wide = bi_wide;
+ tp_wide = Number_Digits((int64) db1->maxlen / tspace + 2);
+ }
+ else {
+ mn_wide = bi_wide;
+ mx_wide = ai_wide;
+ tp_wide = Number_Digits((int64) db2->maxlen / tspace + 2);
+ }
+ ar_wide += (ar_wide - 1) / 3;
+ br_wide += (br_wide - 1) / 3;
+ ai_wide += (ai_wide - 1) / 3;
+ bi_wide += (bi_wide - 1) / 3;
+ mn_wide += (mn_wide - 1) / 3;
+ tp_wide += (tp_wide - 1) / 3;
+ if (FLIP) {
+ int x;
+ x = ar_wide;
+ ar_wide = br_wide;
+ br_wide = x;
+ x = ai_wide;
+ ai_wide = bi_wide;
+ bi_wide = x;
+ }
+
+ // For each record do
+
+ blast = -1;
+ match = 0;
+ seen = 0;
+ lhalf = rhalf = 0;
+
+ for (j = 0; j < novl; j++)
+ // Read it in
+ {
+ //printf("j:%d/%d\n",j,novl);
+ Read_Overlap(input, ovl);
+ if (ovl->path.tlen > tmax) {
+ tmax = ((int) 1.2 * ovl->path.tlen) + 100;
+ trace = (uint16 *) Realloc(trace, sizeof(uint16) * tmax, "Allocating trace vector");
+ if (trace == NULL)
+ exit(1);
+ }
+ ovl->path.trace = (void *) trace;
+ Read_Trace(input, ovl, tbytes);
+ // Determine if it should be displayed
+
+
+
+
+ ar = ovl->aread + 1;
+ if (in) {
+ while (ar > npt) {
+ npt = pts[idx++];
+ if (ar < npt) {
+ in = 0;
+ break;
+ }
+ npt = pts[idx++];
+ }
+ }
+ else {
+ while (ar >= npt) {
+ npt = pts[idx++];
+ if (ar <= npt) {
+ in = 1;
+ break;
+ }
+ npt = pts[idx++];
+ }
+ }
+ if (!in)
+ continue;
+
+ //printf("j:%d/%d\n",j,novl);
+
+ // If -o check display only overlaps
+
+ aln->alen = db1->reads[ovl->aread].rlen;
+ aln->blen = db2->reads[ovl->bread].rlen;
+ aln->flags = ovl->flags;
+ tps = ovl->path.tlen / 2;
+ LAlignment *new_al = new LAlignment();
+ new_al->read_A_id_ = ovl->aread;
+ new_al->read_B_id_ = ovl->bread;
+
+ if (COMP(ovl->flags))
+ //printf(" c");
+ new_al->flags = 1;
+ else
+ new_al->flags = 0;
+ //printf(" n");
+ //printf(" [");
+ //Print_Number((int64) ovl->path.read_A_match_start_,ai_wide,stdout);
+ new_al->abpos = ovl->path.abpos;
+ //printf("..");
+ //Print_Number((int64) ovl->path.read_A_match_end_,ai_wide,stdout);
+ new_al->aepos = ovl->path.aepos;
+ //printf("] x [");
+ //Print_Number((int64) ovl->path.read_B_match_start_,bi_wide,stdout);
+ //printf("..");
+ //Print_Number((int64) ovl->path.read_B_match_end_,bi_wide,stdout);
+ //printf("]");
+ new_al->bbpos = ovl->path.bbpos;
+ new_al->bepos = ovl->path.bepos;
+ new_al->alen = aln->alen;
+ new_al->blen = aln->blen;
+ new_al->diffs = ovl->path.diffs;
+ new_al->tlen = ovl->path.tlen;
+ new_al->tps = tps;
+
+ if (OVERLAP) {
+ if (ovl->path.abpos != 0 && ovl->path.bbpos != 0)
+ continue;
+ if (ovl->path.aepos != aln->alen && ovl->path.bepos != aln->blen)
+ continue;
+ }
+
+ // If -M option then check the completeness of the implied mapping
+
+ if (MAP) {
+ while (ovl->bread != blast) {
+ if (!match && seen && !(lhalf && rhalf)) {
+ printf("Missing ");
+ Print_Number((int64) blast + 1, br_wide + 1, stdout);
+ printf(" %d ->%lld\n", db2->reads[blast].rlen, db2->reads[blast].coff);
+ }
+ match = 0;
+ seen = 0;
+ lhalf = rhalf = 0;
+ blast += 1;
+ }
+ seen = 1;
+ if (ovl->path.abpos == 0)
+ rhalf = 1;
+ if (ovl->path.aepos == aln->alen)
+ lhalf = 1;
+ if (ovl->path.bbpos != 0 || ovl->path.bepos != aln->blen)
+ continue;
+ match = 1;
+ }
+
+ // Display it
+
+ //if (ALIGN || CARTOON || REFERENCE)
+ //printf("\n");
+ if (FLIP) {
+ Flip_Alignment(aln, 0);
+ //Print_Number((int64) ovl->bread+1,ar_wide+1,stdout);
+ //printf(" ");
+ //Print_Number((int64) ovl->aread+1,br_wide+1,stdout);
+ }
+ else { //Print_Number((int64) ovl->aread+1,ar_wide+1,stdout);
+
+ //printf(" ");
+ //Print_Number((int64) ovl->bread+1,br_wide+1,stdout);
+ //result.push_back(ovl->bread);
+ }
+ //if (COMP(ovl->reverse_complement_match_))
+ // printf(" c");
+ //else
+ // printf(" n");
+ //printf(" [");
+ //Print_Number((int64) ovl->path.read_A_match_start_,ai_wide,stdout);
+ //printf("..");
+ //Print_Number((int64) ovl->path.read_A_match_end_,ai_wide,stdout);
+ //printf("] x [");
+ //Print_Number((int64) ovl->path.read_B_match_start_,bi_wide,stdout);
+ //printf("..");
+ //Print_Number((int64) ovl->path.read_B_match_end_,bi_wide,stdout);
+ //printf("]");
+
+
+ if ((ALIGN || CARTOON || REFERENCE) || true) {
+ if (ALIGN || REFERENCE) {
+ char *aseq, *bseq;
+ int amin, amax;
+ int bmin, bmax;
+
+ if (FLIP)
+ Flip_Alignment(aln, 0);
+ //if (small)
+ // Decompress_TraceTo16(ovl);
+
+
+ if (small)
+ Decompress_TraceTo16(ovl);
+
+ new_al->trace_pts_len = ovl->path.tlen;
+ new_al->trace_pts = (uint16 *)malloc(ovl->path.tlen * sizeof(uint16));
+ memcpy(new_al->trace_pts, ovl->path.trace, ovl->path.tlen * sizeof(uint16));
+
+ /*{
+ printf("\n");
+
+ uint16 *pp = (uint16 *) ovl->path.trace;
+ for (int uu = 0; uu < ovl->path.tlen; uu++) {
+ printf("%d ", pp[uu]);
+ new_al->trace_pts[uu] = pp[uu];
+ }
+
+
+ printf("\n");
+
+
+ }*/
+
+
+//#define DOALIGN
+#ifdef DOALIGN
+ amin = ovl->path.read_A_match_start_ - BORDER;
+ if (amin < 0) amin = 0;
+ amax = ovl->path.read_A_match_end_ + BORDER;
+ if (amax > aln->alen) amax = aln->alen;
+ if (COMP(aln->reverse_complement_match_)) {
+ bmin = (aln->blen - ovl->path.read_B_match_end_) - BORDER;
+ if (bmin < 0) bmin = 0;
+ bmax = (aln->blen - ovl->path.read_B_match_start_) + BORDER;
+ if (bmax > aln->blen) bmax = aln->blen;
+ }
+ else {
+ bmin = ovl->path.read_B_match_start_ - BORDER;
+ if (bmin < 0) bmin = 0;
+ bmax = ovl->path.read_B_match_end_ + BORDER;
+ if (bmax > aln->blen) bmax = aln->blen;
+ }
+
+ aseq = Load_Subread(db1, ovl->aread, amin, amax, abuffer, 0);
+ bseq = Load_Subread(db2, ovl->bread, bmin, bmax, bbuffer, 0);
+
+
+ aln->aseq = aseq - amin;
+ if (COMP(aln->reverse_complement_match_)) {
+ Complement_Seq(bseq, bmax - bmin);
+ aln->bseq = bseq - (aln->blen - bmax);
+ }
+ else
+ aln->bseq = bseq - bmin;
+
+
+
+
+ Compute_Trace_PTS(aln, work, tspace,GREEDIEST);
+
+#endif
+ /*new_al->aseq = (char *) malloc(new_al->alen * sizeof(char));
+ new_al->bseq = (char *) malloc(new_al->blen * sizeof(char));
+
+ memcpy(new_al->aseq, aln->aseq, new_al->alen* sizeof(char));
+ memcpy(new_al->bseq, aln->bseq, new_al->blen* sizeof(char));*/
+ //new_al->aseq = NULL;
+ //new_al->bseq = NULL;
+
+
+ /*{
+ int tlen = aln->path->tlen;
+ int *trace = (int *) aln->path->trace;
+ int u;
+ printf(" ");
+ for (u = 0; u < tlen; u++)
+ printf("%d,", (int) trace[u]);
+ printf("\n");
+ }*/
+
+#ifdef DOALIGN
+ new_al->tlen = aln->path->tlen;
+ new_al->trace = (int *) malloc(sizeof(int) * aln->path->tlen*2);
+ //if (new_al->trace == NULL)
+ // exit(1);
+ //memcpy(new_al->trace, (void *) aln->path->trace, sizeof(int) * sizeof(int) * aln->path->tlen);
+
+ //free(trace);
+
+ //printf("after\n");
+ {
+ int tlen = aln->path->tlen;
+ int *trace = (int *) aln->path->trace;
+ int u;
+ //printf(" ");
+ for (u = 0; u < tlen; u++) {
+ //printf("%d,", (int) trace[u]);
+ new_al->trace[u] = (int)trace[u];
+ }
+ //printf("\n");
+ }
+
+#endif
+ if (FLIP) {
+ if (COMP(aln->flags)) {
+ Complement_Seq(aseq, amax - amin);
+ Complement_Seq(bseq, bmax - bmin);
+ aln->aseq = aseq - (aln->alen - amax);
+ aln->bseq = bseq - bmin;
+ }
+ Flip_Alignment(aln, 1);
+ }
+ }
+ if (CARTOON) {
+ //printf(" (");
+ //Print_Number(tps, tp_wide, stdout);
+ //printf(" trace pts)\n\n");
+ //Alignment_Cartoon(stdout, aln, INDENT, mx_wide);
+ }
+ else {
+ //printf(" : = ");
+ //Print_Number((int64) ovl->path.diffs, mn_wide, stdout);
+ //printf(" diffs (");
+ //Print_Number(tps, tp_wide, stdout);
+ //printf(" trace pts)\n");
+ }
+ if (REFERENCE)
+ Print_Reference(stdout, aln, work, INDENT, WIDTH, BORDER, UPPERCASE, mx_wide);
+ //if (ALIGN)
+ //printAlignment(stdout, aln, work, INDENT, WIDTH, BORDER, UPPERCASE, mx_wide);
+ // printAlignment_exp(stdout, new_al, work, INDENT, WIDTH, BORDER, UPPERCASE, mx_wide);
+
+
+
+ }
+ else {// printf(" : < ");
+ // Print_Number((int64) ovl->path.diffs,mn_wide,stdout);
+ // printf(" diffs (");
+ // Print_Number(tps,tp_wide,stdout);
+ // printf(" trace pts)\n");
+ }
+
+ result_vec.push_back(new_al);
+
+ }
+
+ free(trace);
+
+ if (ALIGN) {
+ free(bbuffer - 1);
+ free(abuffer - 1);
+ Free_Work_Data(work);
+ }
+
+}
+
+
+int LAInterface::getReadNumber() {
+ return db1->nreads;
+}
+
+
+int LAInterface::getReadNumber2() {
+ return db2->nreads;
+}
+
+int64 LAInterface::getAlignmentNumber() {
+ resetAlignment();
+ return novl;
+
+}
+
+
+
+void LAInterface::showOverlap(int from, int to) {
+ int j;
+ uint16 *trace;
+ Work_Data *work;
+ int tmax;
+ int in, npt, idx, ar;
+ int64 tps;
+
+ char *abuffer, *bbuffer;
+ int ar_wide, br_wide;
+ int ai_wide, bi_wide;
+ int mn_wide, mx_wide;
+ int tp_wide;
+ int blast, match, seen, lhalf, rhalf;
+ bool ALIGN = false;
+ bool REFERENCE = false;
+ bool CARTOON = false;
+ bool OVERLAP = false;
+ bool FLIP = false;
+ bool UPPERCASE = false;
+ bool MAP = false;
+ int INDENT = 4;
+ int WIDTH = 100;
+ int BORDER = 10;
+
+ aln->path = &(ovl->path);
+ if (ALIGN || REFERENCE)
+ { work = New_Work_Data();
+ abuffer = New_Read_Buffer(db1);
+ bbuffer = New_Read_Buffer(db2);
+ }
+ else
+ { abuffer = NULL;
+ bbuffer = NULL;
+ work = NULL;
+ }
+
+ tmax = 1000;
+ trace = (uint16 *) Malloc(sizeof(uint16)*tmax,"Allocating trace vector");
+ if (trace == NULL)
+ exit (1);
+
+ in = 0;
+ npt = pts[0];
+ idx = 1;
+
+ ar_wide = Number_Digits((int64) db1->nreads);
+ br_wide = Number_Digits((int64) db2->nreads);
+ ai_wide = Number_Digits((int64) db1->maxlen);
+ bi_wide = Number_Digits((int64) db2->maxlen);
+ if (db1->maxlen < db2->maxlen)
+ { mn_wide = ai_wide;
+ mx_wide = bi_wide;
+ tp_wide = Number_Digits((int64) db1->maxlen/tspace+2);
+ }
+ else
+ { mn_wide = bi_wide;
+ mx_wide = ai_wide;
+ tp_wide = Number_Digits((int64) db2->maxlen/tspace+2);
+ }
+ ar_wide += (ar_wide-1)/3;
+ br_wide += (br_wide-1)/3;
+ ai_wide += (ai_wide-1)/3;
+ bi_wide += (bi_wide-1)/3;
+ mn_wide += (mn_wide-1)/3;
+ tp_wide += (tp_wide-1)/3;
+
+ if (FLIP)
+ { int x;
+ x = ar_wide; ar_wide = br_wide; br_wide = x;
+ x = ai_wide; ai_wide = bi_wide; bi_wide = x;
+ }
+
+ // For each record do
+
+ blast = -1;
+ match = 0;
+ seen = 0;
+ lhalf = rhalf = 0;
+
+
+ pts = new int[4];
+ pts[0] = from + 1;
+ pts[1] = to ;
+ pts[2] = INT32_MAX;
+
+ npt = pts[0];
+ idx = 1;
+
+ for (j = 0; j < novl; j++)
+
+ // Read it in
+
+ { Read_Overlap(input,ovl);
+ if (ovl->path.tlen > tmax)
+ { tmax = ((int) 1.2*ovl->path.tlen) + 100;
+ trace = (uint16 *) Realloc(trace,sizeof(uint16)*tmax,"Allocating trace vector");
+ if (trace == NULL)
+ exit (1);
+ }
+ ovl->path.trace = (void *) trace;
+ Read_Trace(input,ovl,tbytes);
+
+ // Determine if it should be displayed
+
+ ar = ovl->aread+1;
+ if (in)
+ { while (ar > npt)
+ { npt = pts[idx++];
+ if (ar < npt)
+ { in = 0;
+ break;
+ }
+ npt = pts[idx++];
+ }
+ }
+ else
+ { while (ar >= npt)
+ { npt = pts[idx++];
+ if (ar <= npt)
+ { in = 1;
+ break;
+ }
+ npt = pts[idx++];
+ }
+ }
+ if (!in)
+ continue;
+
+ // If -o check display only overlaps
+
+ aln->alen = db1->reads[ovl->aread].rlen;
+ aln->blen = db2->reads[ovl->bread].rlen;
+ aln->flags = ovl->flags;
+ tps = ovl->path.tlen/2;
+
+ if (OVERLAP)
+ { if (ovl->path.abpos != 0 && ovl->path.bbpos != 0)
+ continue;
+ if (ovl->path.aepos != aln->alen && ovl->path.bepos != aln->blen)
+ continue;
+ }
+
+ // If -M option then check the completeness of the implied mapping
+
+ if (MAP)
+ { while (ovl->bread != blast)
+ { if (!match && seen && !(lhalf && rhalf))
+ { printf("Missing ");
+ Print_Number((int64) blast+1,br_wide+1,stdout);
+ printf(" %d ->%lld\n",db2->reads[blast].rlen,db2->reads[blast].coff);
+ }
+ match = 0;
+ seen = 0;
+ lhalf = rhalf = 0;
+ blast += 1;
+ }
+ seen = 1;
+ if (ovl->path.abpos == 0)
+ rhalf = 1;
+ if (ovl->path.aepos == aln->alen)
+ lhalf = 1;
+ if (ovl->path.bbpos != 0 || ovl->path.bepos != aln->blen)
+ continue;
+ match = 1;
+ }
+
+ // Display it
+
+ if (ALIGN || CARTOON || REFERENCE)
+ printf("\n");
+ if (FLIP)
+ { Flip_Alignment(aln,0);
+ Print_Number((int64) ovl->bread+1,ar_wide+1,stdout);
+ printf(" ");
+ Print_Number((int64) ovl->aread+1,br_wide+1,stdout);
+ }
+ else
+ { Print_Number((int64) ovl->aread+1,ar_wide+1,stdout);
+ printf(" ");
+ Print_Number((int64) ovl->bread+1,br_wide+1,stdout);
+ }
+ if (COMP(ovl->flags))
+ printf(" c");
+ else
+ printf(" n");
+ printf(" [");
+ Print_Number((int64) ovl->path.abpos,ai_wide,stdout);
+ printf("..");
+ Print_Number((int64) ovl->path.aepos,ai_wide,stdout);
+ printf("] x [");
+ Print_Number((int64) ovl->path.bbpos,bi_wide,stdout);
+ printf("..");
+ Print_Number((int64) ovl->path.bepos,bi_wide,stdout);
+ printf("]%d",aln->blen);
+
+ if (ALIGN || CARTOON || REFERENCE)
+ { if (ALIGN || REFERENCE)
+ { char *aseq, *bseq;
+ int amin, amax;
+ int bmin, bmax;
+
+ if (FLIP)
+ Flip_Alignment(aln,0);
+ if (small)
+ Decompress_TraceTo16(ovl);
+
+ amin = ovl->path.abpos - BORDER;
+ if (amin < 0) amin = 0;
+ amax = ovl->path.aepos + BORDER;
+ if (amax > aln->alen) amax = aln->alen;
+ if (COMP(aln->flags))
+ { bmin = (aln->blen-ovl->path.bepos) - BORDER;
+ if (bmin < 0) bmin = 0;
+ bmax = (aln->blen-ovl->path.bbpos) + BORDER;
+ if (bmax > aln->blen) bmax = aln->blen;
+ }
+ else
+ { bmin = ovl->path.bbpos - BORDER;
+ if (bmin < 0) bmin = 0;
+ bmax = ovl->path.bepos + BORDER;
+ if (bmax > aln->blen) bmax = aln->blen;
+ }
+
+ aseq = Load_Subread(db1,ovl->aread,amin,amax,abuffer,0);
+ bseq = Load_Subread(db2,ovl->bread,bmin,bmax,bbuffer,0);
+
+ aln->aseq = aseq - amin;
+ if (COMP(aln->flags))
+ { Complement_Seq(bseq,bmax-bmin);
+ aln->bseq = bseq - (aln->blen - bmax);
+ }
+ else
+ aln->bseq = bseq - bmin;
+
+ Compute_Trace_PTS(aln,work,tspace,GREEDIEST);
+
+ if (FLIP)
+ { if (COMP(aln->flags))
+ { Complement_Seq(aseq,amax-amin);
+ Complement_Seq(bseq,bmax-bmin);
+ aln->aseq = aseq - (aln->alen - amax);
+ aln->bseq = bseq - bmin;
+ }
+ Flip_Alignment(aln,1);
+ }
+ }
+ if (CARTOON)
+ { printf(" (");
+ Print_Number(tps,tp_wide,stdout);
+ printf(" trace pts)\n\n");
+ Alignment_Cartoon(stdout,aln,INDENT,mx_wide);
+ }
+ else
+ { printf(" : = ");
+ Print_Number((int64) ovl->path.diffs,mn_wide,stdout);
+ printf(" diffs (");
+ Print_Number(tps,tp_wide,stdout);
+ printf(" trace pts)\n");
+ }
+ if (REFERENCE)
+ Print_Reference(stdout,aln,work,INDENT,WIDTH,BORDER,UPPERCASE,mx_wide);
+ if (ALIGN)
+ Print_Alignment(stdout,aln,work,INDENT,WIDTH,BORDER,UPPERCASE,mx_wide);
+ }
+ else
+ { printf(" : < ");
+ Print_Number((int64) ovl->path.diffs,mn_wide,stdout);
+ printf(" diffs (");
+ Print_Number(tps,tp_wide,stdout);
+ printf(" trace pts)\n");
+ }
+ }
+
+ free(trace);
+ if (ALIGN)
+ { free(bbuffer-1);
+ free(abuffer-1);
+ Free_Work_Data(work);
+ }
+
+ return;
+}
+
+typedef struct // Hidden from the user, working space for each thread
+{ int vecmax;
+ void *vector;
+ int celmax;
+ void *cells;
+ int pntmax;
+ void *points;
+ int tramax;
+ void *trace;
+} _Work_Data;
+
+static int enlarge_vector(_Work_Data *work, int newmax)
+{ void *vec;
+ int max;
+
+ max = ((int) (newmax*1.2)) + 10000;
+ vec = Realloc(work->vector,max,"Enlarging DP vector");
+ if (vec == NULL)
+ EXIT(1);
+ work->vecmax = max;
+ work->vector = vec;
+ return (0);
+}
+
+static char ToL[8] = { 'a', 'c', 'g', 't', '.', '[', ']', '-' };
+static char ToU[8] = { 'A', 'C', 'G', 'T', '.', '[', ']', '-' };
+
+int LAInterface::printAlignment(FILE *file, Alignment *align, Work_Data *ework,
+ int indent, int width, int border, int upper, int coord)
+{ _Work_Data *work = (_Work_Data *) ework;
+ int *trace = (int *) align->path->trace;
+ int tlen = align->path->tlen;
+
+ char *Abuf, *Bbuf, *Dbuf;
+ int i, j, o;
+ char *a, *b;
+ char mtag, dtag;
+ int prefa, prefb;
+ int aend, bend;
+ int sa, sb;
+ int match, diff;
+ char *N2A;
+
+ if (trace == NULL) return (0);
+
+#ifdef SHOW_TRACE
+ fprintf(file,"\nTrace:\n");
+ for (i = 0; i < tlen; i++)
+ fprintf(file," %3d\n",trace[i]);
+#endif
+
+ o = sizeof(char)*3*(width+1);
+ if (o > work->vecmax)
+ if (enlarge_vector(work,o))
+ EXIT(1);
+
+ if (upper)
+ N2A = ToU;
+ else
+ N2A = ToL;
+
+ Abuf = (char *) work->vector;
+ Bbuf = Abuf + (width+1);
+ Dbuf = Bbuf + (width+1);
+
+ aend = align->path->aepos;
+ bend = align->path->bepos;
+
+ Abuf[width] = Bbuf[width] = Dbuf[width] = '\0';
+ /* buffer/output next column */
+#define COLUMN(x,y) \
+{ int u, v; \
+ if (o >= width) \
+ { fprintf(file,"\n"); \
+ fprintf(file,"%*s",indent,""); \
+ if (coord > 0) \
+ { if (sa <= aend) \
+ fprintf(file," %*d",coord,sa); \
+ else \
+ fprintf(file," %*s",coord,""); \
+ fprintf(file," %s\n",Abuf); \
+ fprintf(file,"%*s %*s %s\n",indent,"",coord,"",Dbuf); \
+ fprintf(file,"%*s",indent,""); \
+ if (sb <= bend) \
+ fprintf(file," %*d",coord,sb); \
+ else \
+ fprintf(file," %*s",coord,""); \
+ fprintf(file," %s",Bbuf); \
+ } \
+ else \
+ { fprintf(file," %s\n",Abuf); \
+ fprintf(file,"%*s %s\n",indent,"",Dbuf); \
+ fprintf(file,"%*s %s",indent,"",Bbuf); \
+ } \
+ fprintf(file," %5.1f%%\n",(100.*diff)/(diff+match)); \
+ o = 0; \
+ sa = i; \
+ sb = j; \
+ match = diff = 0; \
+ } \
+ u = (x); \
+ v = (y); \
+ if (u == 4 || v == 4) \
+ Dbuf[o] = ' '; \
+ else if (u == v) \
+ Dbuf[o] = mtag; \
+ else \
+ Dbuf[o] = dtag; \
+ Abuf[o] = N2A[u]; \
+ Bbuf[o] = N2A[v]; \
+ o += 1; \
+}
+
+ a = align->aseq - 1;
+ b = align->bseq - 1;
+
+ o = 0;
+ i = j = 1;
+
+ prefa = align->path->abpos;
+ prefb = align->path->bbpos;
+
+ if (prefa > border)
+ { i = prefa-(border-1);
+ prefa = border;
+ }
+ if (prefb > border)
+ { j = prefb-(border-1);
+ prefb = border;
+ }
+
+ sa = i;
+ sb = j;
+ mtag = ':';
+ dtag = ':';
+
+ while (prefa > prefb)
+ { COLUMN(a[i],4)
+ i += 1;
+ prefa -= 1;
+ }
+ while (prefb > prefa)
+ { COLUMN(4,b[j])
+ j += 1;
+ prefb -= 1;
+ }
+ while (prefa > 0)
+ { COLUMN(a[i],b[j])
+ i += 1;
+ j += 1;
+ prefa -= 1;
+ }
+
+ mtag = '[';
+ if (prefb > 0)
+ COLUMN(5,5)
+
+ mtag = '|';
+ dtag = '*';
+
+ match = diff = 0;
+
+ { int p, c; /* Output columns of alignment til reach trace end */
+
+ for (c = 0; c < tlen; c++)
+ if ((p = trace[c]) < 0)
+ { p = -p;
+ while (i != p)
+ { COLUMN(a[i],b[j])
+ if (a[i] == b[j])
+ match += 1;
+ else
+ diff += 1;
+ i += 1;
+ j += 1;
+ }
+ COLUMN(7,b[j])
+ j += 1;
+ diff += 1;
+ }
+ else
+ { while (j != p)
+ { COLUMN(a[i],b[j])
+ if (a[i] == b[j])
+ match += 1;
+ else
+ diff += 1;
+ i += 1;
+ j += 1;
+ }
+ COLUMN(a[i],7)
+ i += 1;
+ diff += 1;
+ }
+ p = align->path->aepos;
+ while (i <= p)
+ { COLUMN(a[i],b[j])
+ if (a[i] == b[j])
+ match += 1;
+ else
+ diff += 1;
+ i += 1;
+ j += 1;
+ }
+ }
+
+ { int c; /* Output remaining column including unaligned suffix */
+
+ mtag = ']';
+ if (a[i] != 4 && b[j] != 4 && border > 0)
+ COLUMN(6,6)
+
+ mtag = ':';
+ dtag = ':';
+
+ c = 0;
+ while (c < border && (a[i] != 4 || b[j] != 4))
+ { if (a[i] != 4)
+ if (b[j] != 4)
+ { COLUMN(a[i],b[j])
+ i += 1;
+ j += 1;
+ }
+ else
+ { COLUMN(a[i],4)
+ i += 1;
+ }
+ else
+ { COLUMN(4,b[j])
+ j += 1;
+ }
+ c += 1;
+ }
+ }
+
+ /* Print remainder of buffered col.s */
+
+ fprintf(file,"\n");
+ fprintf(file,"%*s",indent,"");
+ if (coord > 0)
+ { if (sa <= aend)
+ fprintf(file," %*d",coord,sa);
+ else
+ fprintf(file," %*s",coord,"");
+ fprintf(file," %.*s\n",o,Abuf);
+ fprintf(file,"%*s %*s %.*s\n",indent,"",coord,"",o,Dbuf);
+ fprintf(file,"%*s",indent,"");
+ if (sb <= bend)
+ fprintf(file," %*d",coord,sb);
+ else
+ fprintf(file," %*s",coord,"");
+ fprintf(file," %.*s",o,Bbuf);
+ }
+ else
+ { fprintf(file," %.*s\n",o,Abuf);
+ fprintf(file,"%*s %.*s\n",indent,"",o,Dbuf);
+ fprintf(file,"%*s %.*s",indent,"",o,Bbuf);
+ }
+ if (diff+match > 0)
+ fprintf(file," %5.1f%%\n",(100.*diff)/(diff+match));
+ else
+ fprintf(file,"\n");
+
+ //fprintf(file, "Cool!\n");
+ fflush(file);
+ return (0);
+}
+
+
+typedef void Work_Data;
+
+typedef struct
+{ int *Stop; // Ongoing stack of alignment indels
+ char *Aabs, *Babs; // Absolute base of A and B sequences
+
+ int **PVF, **PHF; // List of waves for iterative np algorithms
+ int mida, midb; // mid point division for mid-point algorithms
+
+ int *VF, *VB; // Forward/Reverse waves for nd algorithms
+ // (defunct: were used for O(nd) algorithms)
+} Trace_Waves;
+
+static int enlarge_trace(_Work_Data *work, int newmax)
+{ void *vec;
+ int max;
+
+ max = ((int) (newmax*1.2)) + 10000;
+ vec = Realloc(work->trace,max,"Enlarging trace vector");
+ if (vec == NULL)
+ EXIT(1);
+ work->tramax = max;
+ work->trace = vec;
+ return (0);
+}
+
+static int iter_np(char *A, int M, char *B, int N, Trace_Waves *wave)
+{ int **PVF = wave->PVF;
+ int **PHF = wave->PHF;
+ int D;
+ int del = M-N;
+
+ { int *F0, *F1, *F2;
+ int *HF;
+ int low, hgh;
+ int posl, posh;
+
+#ifdef DEBUG_ALIGN
+ printf("\n%*s BASE %ld,%ld: %d vs %d\n",depth,"",A-wave->Aabs,B-wave->Babs,M,N);
+ printf("%*s A = ",depth,"");
+ for (D = 0; D < M; D++)
+ printf("%c",ToA[(int) A[D]]);
+ printf("\n");
+ printf("%*s B = ",depth,"");
+ for (D = 0; D < N; D++)
+ printf("%c",ToA[(int) B[D]]);
+ printf("\n");
+#endif
+
+ if (del >= 0)
+ { low = 0;
+ hgh = del;
+ }
+ else
+ { low = del;
+ hgh = 0;
+ }
+
+ posl = -INT32_MAX;
+ posh = INT32_MAX;
+ if (wave->Aabs == wave->Babs)
+ { if (B == A)
+ { EPRINTF(EPLACE,"Error: self comparison starts on diagonal 0 (Compute_Trace)\n");
+ EXIT(-1);
+ }
+ else if (B < A)
+ posl = (B-A)+1;
+ else
+ posh = (B-A)-1;
+ }
+
+ F1 = PVF[-2];
+ F0 = PVF[-1];
+
+ for (D = low-1; D <= hgh+1; D++)
+ F1[D] = F0[D] = -2;
+ F0[0] = -1;
+
+ low += 1;
+ hgh -= 1;
+
+ for (D = 0; 1; D += 1)
+ { int k, i, j;
+ int am, ac, ap;
+ char *a;
+
+ F2 = F1;
+ F1 = F0;
+ F0 = PVF[D];
+ HF = PHF[D];
+
+ if ((D & 0x1) == 0)
+ { if (low > posl)
+ low -= 1;
+ if (hgh < posh)
+ hgh += 1;
+ }
+ F0[hgh+1] = F0[low-1] = -2;
+
+#define FS_MOVE(mdir,pdir) \
+ ac = F1[k]+1; \
+ if (ac < am) \
+ if (ap < am) \
+ { HF[k] = mdir; \
+ j = am; \
+ } \
+ else \
+ { HF[k] = pdir; \
+ j = ap; \
+ } \
+ else \
+ if (ap < ac) \
+ { HF[k] = 0; \
+ j = ac; \
+ } \
+ else \
+ { HF[k] = pdir; \
+ j = ap; \
+ } \
+ \
+ if (N < i) \
+ while (j < N && B[j] == a[j]) \
+ j += 1; \
+ else \
+ while (j < i && B[j] == a[j]) \
+ j += 1; \
+ F0[k] = j;
+
+ j = -2;
+ a = A + hgh;
+ i = M - hgh;
+ for (k = hgh; k > del; k--)
+ { ap = j+1;
+ am = F2[k-1];
+ FS_MOVE(-1,4)
+ a -= 1;
+ i += 1;
+ }
+
+ j = -2;
+ a = A + low;
+ i = M - low;
+ for (k = low; k < del; k++)
+ { ap = F2[k+1]+1;
+ am = j;
+ FS_MOVE(2,1)
+ a += 1;
+ i -= 1;
+ }
+
+ ap = F0[del+1]+1;
+ am = j;
+ FS_MOVE(2,4)
+
+#ifdef DEBUG_AWAVE
+ print_awave(F0,low,hgh);
+ print_awave(HF,low,hgh);
+#endif
+
+ if (F0[del] >= N)
+ break;
+ }
+ }
+
+ { int k, h, m, e, c;
+ char *a;
+ int ap = (wave->Aabs-A)-1;
+ int bp = (B-wave->Babs)+1;
+
+ PHF[0][0] = 3;
+
+ c = N;
+ k = del;
+ e = PHF[D][k];
+ PHF[D][k] = 3;
+ while (e != 3)
+ { h = k+e;
+ if (e > 1)
+ h -= 3;
+ else if (e == 0)
+ D -= 1;
+ else
+ D -= 2;
+ if (h < k) // => e = -1 or 2
+ { a = A + k;
+ if (k < 0)
+ m = -k;
+ else
+ m = 0;
+ if (PVF[D][h] <= c)
+ c = PVF[D][h]-1;
+ while (c >= m && a[c] == B[c])
+ c -= 1;
+ if (e < 1) // => edge is 2, others are 1, and 0
+ { if (c <= PVF[D+2][k+1])
+ { e = 4;
+ h = k+1;
+ D = D+2;
+ }
+ else if (c == PVF[D+1][k])
+ { e = 0;
+ h = k;
+ D = D+1;
+ }
+ else
+ PVF[D][h] = c+1;
+ }
+ else // => edge is 0, others are 1, and 2 (if k != del), 0 (otherwise)
+ { if (k == del)
+ m = D;
+ else
+ m = D-2;
+ if (c <= PVF[m][k+1])
+ { if (k == del)
+ e = 4;
+ else
+ e = 1;
+ h = k+1;
+ D = m;
+ }
+ else if (c == PVF[D-1][k])
+ { e = 0;
+ h = k;
+ D = D-1;
+ }
+ else
+ PVF[D][h] = c+1;
+ }
+ }
+ m = PHF[D][h];
+ PHF[D][h] = e;
+ e = m;
+
+ k = h;
+ }
+
+ k = D = 0;
+ e = PHF[D][k];
+ while (e != 3)
+ { h = k-e;
+ c = PVF[D][k];
+ if (e > 1)
+ h += 3;
+ else if (e == 0)
+ D += 1;
+ else
+ D += 2;
+ if (h > k)
+ *wave->Stop++ = bp+c;
+ else if (h < k)
+ *wave->Stop++ = ap-(c+k);
+ k = h;
+ e = PHF[D][h];
+ }
+
+#ifdef DEBUG_SCRIPT
+ k = D = 0;
+ e = PHF[D][k];
+ while (e != 3)
+ { h = k-e;
+ c = PVF[D][k];
+ if (e > 1)
+ h += 3;
+ else if (e == 0)
+ D += 1;
+ else
+ D += 2;
+ if (h > k)
+ printf("%*s D %d(%d)\n",depth,"",(c-k)-(ap-1),c+bp);
+ else if (h < k)
+ printf("%*s I %d(%d)\n",depth,"",c+(bp-1),(c+k)-ap);
+ else
+ printf("%*s %d S %d\n",depth,"",(c+k)-(ap+1),c+(bp-1));
+ k = h;
+ e = PHF[D][h];
+ }
+#endif
+ }
+
+ return (D + abs(del));
+}
+
+
+
+int LAInterface::computeTracePTS(Alignment *align, Work_Data *ework, int trace_spacing)
+{ _Work_Data *work = (_Work_Data *) ework;
+ Trace_Waves wave;
+
+ Path *path;
+ char *aseq, *bseq;
+ uint16 *points;
+ int tlen;
+ int ab, bb;
+ int ae, be;
+ int diffs;
+
+ path = align->path;
+ aseq = align->aseq;
+ bseq = align->bseq;
+ tlen = path->tlen;
+ points = (uint16 *) path->trace;
+
+ { int64 s;
+ int d;
+ int M, N;
+ int dmax, nmax;
+ int **PVF, **PHF;
+
+ M = path->aepos-path->abpos;
+ N = path->bepos-path->bbpos;
+ if (M < N)
+ s = N*sizeof(int);
+ else
+ s = M*sizeof(int);
+ if (s > work->tramax)
+ if (enlarge_trace(work,s))
+ EXIT(1);
+
+ nmax = 0;
+ dmax = 0;
+ for (d = 1; d < tlen; d += 2)
+ { if (points[d-1] > dmax)
+ dmax = points[d-1];
+ if (points[d] > nmax)
+ nmax = points[d];
+ }
+ if (tlen <= 1)
+ nmax = N;
+ if (points[d-1] > dmax)
+ dmax = points[d-1];
+
+ s = (dmax+3)*2*((trace_spacing+nmax+3)*sizeof(int) + sizeof(int *));
+
+ if (s > work->vecmax)
+ if (enlarge_vector(work,s))
+ EXIT(1);
+
+ wave.PVF = PVF = ((int **) (work->vector)) + 2;
+ wave.PHF = PHF = PVF + (dmax+3);
+
+ s = trace_spacing+nmax+3;
+ PVF[-2] = ((int *) (PHF + (dmax+1))) + (nmax+1);
+ for (d = -1; d <= dmax; d++)
+ PVF[d] = PVF[d-1] + s;
+ PHF[-2] = PVF[dmax] + s;
+ for (d = -1; d <= dmax; d++)
+ PHF[d] = PHF[d-1] + s;
+ }
+
+ wave.Stop = (int *) (work->trace);
+ wave.Aabs = aseq;
+ wave.Babs = bseq;
+
+ { int i, d;
+
+ diffs = 0;
+ ab = path->abpos;
+ ae = (ab/trace_spacing)*trace_spacing;
+ bb = path->bbpos;
+ tlen -= 2;
+ for (i = 1; i < tlen; i += 2)
+ { ae = ae + trace_spacing;
+ be = bb + points[i];
+ d = iter_np(aseq+ab,ae-ab,bseq+bb,be-bb,&wave);
+ if (d < 0)
+ EXIT(1);
+ diffs += d;
+ ab = ae;
+ bb = be;
+ }
+ ae = path->aepos;
+ be = path->bepos;
+ d = iter_np(aseq+ab,ae-ab,bseq+bb,be-bb,&wave);
+ if (d < 0)
+ EXIT(1);
+ diffs += d;
+ }
+
+ path->trace = work->trace;
+ path->tlen = wave.Stop - ((int *) path->trace);
+ path->diffs = diffs;
+
+ return (0);
+}
+
+int LAInterface::showAlignmentTags(LAlignment *alignment) {
+
+ //load aseq and bseq first
+
+ //printf("A:%s\n",alignment->aseq);
+ //printf("B:%s\n",alignment->bseq);
+ int amin, amax, bmin, bmax;
+ const int BORDER = 10;
+ amin = alignment->abpos - BORDER;
+ if (amin < 0) amin = 0;
+ amax = alignment->aepos + BORDER;
+ if (amax > alignment->alen) amax = alignment->alen;
+ if (alignment->flags == 1) {
+ bmin = (alignment->blen - alignment->bepos) - BORDER;
+ if (bmin < 0) bmin = 0;
+ bmax = (alignment->blen - alignment->bbpos) + BORDER;
+ if (bmax > alignment->blen) bmax = alignment->blen;
+ }
+ else {
+ bmin = alignment->bbpos - BORDER;
+ if (bmin < 0) bmin = 0;
+ bmax = alignment->bepos + BORDER;
+ if (bmax > alignment->blen) bmax = alignment->blen;
+ }
+
+
+ char * abuffer = New_Read_Buffer(db1);
+ char * bbuffer = New_Read_Buffer(db2);
+
+
+ char * aseq = Load_Subread(db1, alignment->read_A_id_, amin, amax, abuffer, 0);
+ char * bseq = Load_Subread(db2, alignment->read_B_id_, bmin, bmax, bbuffer, 0);
+
+
+ alignment->aseq = aseq - amin;
+ if (alignment->flags == 1) {
+ Complement_Seq(bseq, bmax - bmin);
+ alignment->bseq = bseq - (alignment->blen - bmax);
+ }
+ else
+ alignment->bseq = bseq - bmin;
+
+
+
+
+ char *Abuf, *Bbuf, *Dbuf;
+ int i, j, o;
+ char *a, *b;
+ char mtag, dtag;
+ int prefa, prefb;
+ int aend, bend;
+ int sa, sb;
+ int match, diff;
+ char *N2A;
+ int border = 10;
+
+ int tlen = alignment->tlen;
+ int * trace = alignment->trace;
+
+ a = alignment->aseq - 1;
+ b = alignment->bseq - 1;
+
+ i = j = 1;
+
+ prefa = alignment->abpos;
+ prefb = alignment->bbpos;
+
+ if (prefa > border)
+ { i = prefa-(border-1);
+ prefa = border;
+ }
+ if (prefb > border)
+ { j = prefb-(border-1);
+ prefb = border;
+ }
+
+ sa = i;
+ sb = j;
+ mtag = ':';
+ dtag = ':';
+
+#define COLUMN(x,y) \
+ { \
+ printf(" %c-%c ",ToU[x],ToU[y]); \
+ } \
+
+
+ while (prefa > prefb)
+ { COLUMN(a[i],4)
+ i += 1;
+ prefa -= 1;
+ }
+ while (prefb > prefa)
+ { COLUMN(4,b[j])
+ j += 1;
+ prefb -= 1;
+ }
+ while (prefa > 0)
+ { COLUMN(a[i],b[j])
+ i += 1;
+ j += 1;
+ prefa -= 1;
+ }
+
+ mtag = '[';
+ if (prefb > 0)
+ COLUMN(5,5)
+
+ mtag = '|';
+ dtag = '*';
+
+ match = diff = 0;
+
+ { int p, c; /* Output columns of alignment til reach trace end */
+
+ for (c = 0; c < tlen; c++)
+ if ((p = trace[c]) < 0)
+ { p = -p;
+ //printf("%d\n",trace[c]);
+ while (i != p)
+ { COLUMN(a[i],b[j])
+ if (a[i] == b[j])
+ match += 1;
+ else
+ diff += 1;
+ i += 1;
+ j += 1;
+ }
+ COLUMN(7,b[j])
+ j += 1;
+ diff += 1;
+ }
+ else
+ { while (j != p)
+ { COLUMN(a[i],b[j])
+ if (a[i] == b[j])
+ match += 1;
+ else
+ diff += 1;
+ i += 1;
+ j += 1;
+ }
+ COLUMN(a[i],7)
+ i += 1;
+ diff += 1;
+ }
+ p = alignment->aepos;
+ while (i <= p)
+ { COLUMN(a[i],b[j])
+ if (a[i] == b[j])
+ match += 1;
+ else
+ diff += 1;
+ i += 1;
+ j += 1;
+ }
+ }
+
+ { int c; /* Output remaining column including unaligned suffix */
+
+ mtag = ']';
+ if (a[i] != 4 && b[j] != 4 && border > 0)
+ COLUMN(6,6)
+
+ mtag = ':';
+ dtag = ':';
+
+ c = 0;
+ while (c < border && (a[i] != 4 || b[j] != 4))
+ { if (a[i] != 4)
+ if (b[j] != 4)
+ { COLUMN(a[i],b[j])
+ i += 1;
+ j += 1;
+ }
+ else
+ { COLUMN(a[i],4)
+ i += 1;
+ }
+ else
+ { COLUMN(4,b[j])
+ j += 1;
+ }
+ c += 1;
+ }
+ }
+
+ free(abuffer - 1);
+ free(bbuffer - 1);
+
+ alignment->aseq = NULL;
+ alignment->bseq = NULL;
+
+
+ return 0;
+}
+
+
+std::pair<std::string, std::string> LAInterface::getAlignmentTags(LAlignment *alignment) {
+
+ //load aseq and bseq first
+
+ //printf("A:%s\n",alignment->aseq);
+ //printf("B:%s\n",alignment->bseq);
+ int amin, amax, bmin, bmax;
+ const int BORDER = 10;
+ amin = alignment->abpos - BORDER;
+ if (amin < 0) amin = 0;
+ amax = alignment->aepos + BORDER;
+ if (amax > alignment->alen) amax = alignment->alen;
+ if (alignment->flags == 1) {
+ bmin = (alignment->blen - alignment->bepos) - BORDER;
+ if (bmin < 0) bmin = 0;
+ bmax = (alignment->blen - alignment->bbpos) + BORDER;
+ if (bmax > alignment->blen) bmax = alignment->blen;
+ }
+ else {
+ bmin = alignment->bbpos - BORDER;
+ if (bmin < 0) bmin = 0;
+ bmax = alignment->bepos + BORDER;
+ if (bmax > alignment->blen) bmax = alignment->blen;
+ }
+
+
+ char * abuffer = New_Read_Buffer(db1);
+ char * bbuffer = New_Read_Buffer(db2);
+
+
+ char * aseq = Load_Subread(db1, alignment->read_A_id_, amin, amax, abuffer, 0);
+ char * bseq = Load_Subread(db2, alignment->read_B_id_, bmin, bmax, bbuffer, 0);
+
+
+ alignment->aseq = aseq - amin;
+ if (alignment->flags == 1) {
+ Complement_Seq(bseq, bmax - bmin);
+ alignment->bseq = bseq - (alignment->blen - bmax);
+ }
+ else
+ alignment->bseq = bseq - bmin;
+
+
+
+
+ char *Abuf, *Bbuf, *Dbuf;
+ int i, j, o;
+ char *a, *b;
+ char mtag, dtag;
+ int prefa, prefb;
+ int aend, bend;
+ int sa, sb;
+ int match, diff;
+ char *N2A;
+ int border = 10;
+
+ int tlen = alignment->tlen;
+ int * trace = alignment->trace; // get the trace from here
+
+ a = alignment->aseq - 1;
+ b = alignment->bseq - 1;
+
+ i = j = 1;
+
+ prefa = alignment->abpos;
+ prefb = alignment->bbpos;
+
+ if (prefa > border)
+ { i = prefa-(border-1);
+ prefa = border;
+ }
+ if (prefb > border)
+ { j = prefb-(border-1);
+ prefb = border;
+ }
+
+ sa = i;
+ sb = j;
+ mtag = ':';
+ dtag = ':';
+
+ std::string aa = "";
+ std::string bb = "";
+ aa.reserve((alignment->aepos - alignment->abpos) * 2);
+ bb.reserve((alignment->bepos - alignment->bbpos) * 2);
+
+#define COLUMN(x,y) \
+ { \
+ aa.append(1,ToU[x]); \
+ bb.append(1,ToU[y]); \
+ } \
+
+
+ while (prefa > prefb)
+ { //COLUMN(a[i],4)
+ i += 1;
+ prefa -= 1;
+ }
+ while (prefb > prefa)
+ { //COLUMN(4,b[j])
+ j += 1;
+ prefb -= 1;
+ }
+ while (prefa > 0)
+ { //COLUMN(a[i],b[j])
+ i += 1;
+ j += 1;
+ prefa -= 1;
+ }
+
+ mtag = '[';
+ if (prefb > 0)
+ //COLUMN(5,5)
+
+ mtag = '|';
+ dtag = '*';
+
+ match = diff = 0;
+
+ { int p, c; /* Output columns of alignment til reach trace end */
+
+ for (c = 0; c < tlen; c++)
+ if ((p = trace[c]) < 0)
+ { p = -p;
+ //printf("%d\n",trace[c]);
+ while (i != p)
+ { COLUMN(a[i],b[j])
+ if (a[i] == b[j])
+ match += 1;
+ else
+ diff += 1;
+ i += 1;
+ j += 1;
+ }
+ COLUMN(7,b[j])
+ j += 1;
+ diff += 1;
+ }
+ else
+ { while (j != p)
+ { COLUMN(a[i],b[j])
+ if (a[i] == b[j])
+ match += 1;
+ else
+ diff += 1;
+ i += 1;
+ j += 1;
+ }
+ COLUMN(a[i],7)
+ i += 1;
+ diff += 1;
+ }
+ p = alignment->aepos;
+ while (i <= p)
+ { COLUMN(a[i],b[j])
+ if (a[i] == b[j])
+ match += 1;
+ else
+ diff += 1;
+ i += 1;
+ j += 1;
+ }
+ }
+
+ /* { int c; // Output remaining column including unaligned suffix
+
+ mtag = ']';
+ if (a[i] != 4 && b[j] != 4 && border > 0)
+ COLUMN(6,6)
+
+ mtag = ':';
+ dtag = ':';
+
+ c = 0;
+ while (c < border && (a[i] != 4 || b[j] != 4))
+ { if (a[i] != 4)
+ if (b[j] != 4)
+ { COLUMN(a[i],b[j])
+ i += 1;
+ j += 1;
+ }
+ else
+ { COLUMN(a[i],4)
+ i += 1;
+ }
+ else
+ { COLUMN(4,b[j])
+ j += 1;
+ }
+ c += 1;
+ }
+ }*/
+
+
+ //printf("%s\n%s\n", aa.c_str(), bb.c_str());
+ free(abuffer - 1);
+ free(bbuffer - 1);
+
+ alignment->aseq = NULL;
+ alignment->bseq = NULL;
+
+
+ return std::pair<std::string, std::string>(aa,bb);
+}
+
+
+
+int LAInterface::printAlignment_exp(FILE *file, LAlignment *align, Work_Data *ework,
+ int indent, int width, int border, int upper, int coord)
+{ _Work_Data *work = (_Work_Data *) ework;
+ int *trace = (int *) align->trace;
+ int tlen = align->tlen;
+
+ char *Abuf, *Bbuf, *Dbuf;
+ int i, j, o;
+ char *a, *b;
+ char mtag, dtag;
+ int prefa, prefb;
+ int aend, bend;
+ int sa, sb;
+ int match, diff;
+ char *N2A;
+
+ if (trace == NULL) return (0);
+
+#ifdef SHOW_TRACE
+ fprintf(file,"\nTrace:\n");
+ for (i = 0; i < tlen; i++)
+ fprintf(file," %3d\n",trace[i]);
+#endif
+
+ o = sizeof(char)*3*(width+1);
+ if (o > work->vecmax)
+ if (enlarge_vector(work,o))
+ EXIT(1);
+
+ if (upper)
+ N2A = ToU;
+ else
+ N2A = ToL;
+
+ Abuf = (char *) work->vector;
+ Bbuf = Abuf + (width+1);
+ Dbuf = Bbuf + (width+1);
+
+ aend = align->aepos;
+ bend = align->bepos;
+
+ Abuf[width] = Bbuf[width] = Dbuf[width] = '\0';
+ /* buffer/output next column */
+#define COLUMN(x,y) \
+{ int u, v; \
+ if (o >= width) \
+ { fprintf(file,"\n"); \
+ fprintf(file,"%*s",indent,""); \
+ if (coord > 0) \
+ { if (sa <= aend) \
+ fprintf(file," %*d",coord,sa); \
+ else \
+ fprintf(file," %*s",coord,""); \
+ fprintf(file," %s\n",Abuf); \
+ fprintf(file,"%*s %*s %s\n",indent,"",coord,"",Dbuf); \
+ fprintf(file,"%*s",indent,""); \
+ if (sb <= bend) \
+ fprintf(file," %*d",coord,sb); \
+ else \
+ fprintf(file," %*s",coord,""); \
+ fprintf(file," %s",Bbuf); \
+ } \
+ else \
+ { fprintf(file," %s\n",Abuf); \
+ fprintf(file,"%*s %s\n",indent,"",Dbuf); \
+ fprintf(file,"%*s %s",indent,"",Bbuf); \
+ } \
+ fprintf(file," %5.1f%%\n",(100.*diff)/(diff+match)); \
+ o = 0; \
+ sa = i; \
+ sb = j; \
+ match = diff = 0; \
+ } \
+ u = (x); \
+ v = (y); \
+ if (u == 4 || v == 4) \
+ Dbuf[o] = ' '; \
+ else if (u == v) \
+ Dbuf[o] = mtag; \
+ else \
+ Dbuf[o] = dtag; \
+ Abuf[o] = N2A[u]; \
+ Bbuf[o] = N2A[v]; \
+ o += 1; \
+}
+
+ a = align->aseq - 1;
+ b = align->bseq - 1;
+
+ o = 0;
+ i = j = 1;
+
+ prefa = align->abpos;
+ prefb = align->bbpos;
+
+ if (prefa > border)
+ { i = prefa-(border-1);
+ prefa = border;
+ }
+ if (prefb > border)
+ { j = prefb-(border-1);
+ prefb = border;
+ }
+
+ sa = i;
+ sb = j;
+ mtag = ':';
+ dtag = ':';
+
+ while (prefa > prefb)
+ { COLUMN(a[i],4)
+ i += 1;
+ prefa -= 1;
+ }
+ while (prefb > prefa)
+ { COLUMN(4,b[j])
+ j += 1;
+ prefb -= 1;
+ }
+ while (prefa > 0)
+ { COLUMN(a[i],b[j])
+ i += 1;
+ j += 1;
+ prefa -= 1;
+ }
+
+ mtag = '[';
+ if (prefb > 0)
+ COLUMN(5,5)
+
+ mtag = '|';
+ dtag = '*';
+
+ match = diff = 0;
+
+ { int p, c; /* Output columns of alignment til reach trace end */
+
+ for (c = 0; c < tlen; c++)
+ if ((p = trace[c]) < 0)
+ { p = -p;
+ while (i != p)
+ { COLUMN(a[i],b[j])
+ if (a[i] == b[j])
+ match += 1;
+ else
+ diff += 1;
+ i += 1;
+ j += 1;
+ }
+ COLUMN(7,b[j])
+ j += 1;
+ diff += 1;
+ }
+ else
+ { while (j != p)
+ { COLUMN(a[i],b[j])
+ if (a[i] == b[j])
+ match += 1;
+ else
+ diff += 1;
+ i += 1;
+ j += 1;
+ }
+ COLUMN(a[i],7)
+ i += 1;
+ diff += 1;
+ }
+ p = align->aepos;
+ while (i <= p)
+ { COLUMN(a[i],b[j])
+ if (a[i] == b[j])
+ match += 1;
+ else
+ diff += 1;
+ i += 1;
+ j += 1;
+ }
+ }
+
+ { int c; /* Output remaining column including unaligned suffix */
+
+ mtag = ']';
+ if (a[i] != 4 && b[j] != 4 && border > 0)
+ COLUMN(6,6)
+
+ mtag = ':';
+ dtag = ':';
+
+ c = 0;
+ while (c < border && (a[i] != 4 || b[j] != 4))
+ { if (a[i] != 4)
+ if (b[j] != 4)
+ { COLUMN(a[i],b[j])
+ i += 1;
+ j += 1;
+ }
+ else
+ { COLUMN(a[i],4)
+ i += 1;
+ }
+ else
+ { COLUMN(4,b[j])
+ j += 1;
+ }
+ c += 1;
+ }
+ }
+
+ /* Print remainder of buffered col.s */
+
+ fprintf(file,"\n");
+ fprintf(file,"%*s",indent,"");
+ if (coord > 0)
+ { if (sa <= aend)
+ fprintf(file," %*d",coord,sa);
+ else
+ fprintf(file," %*s",coord,"");
+ fprintf(file," %.*s\n",o,Abuf);
+ fprintf(file,"%*s %*s %.*s\n",indent,"",coord,"",o,Dbuf);
+ fprintf(file,"%*s",indent,"");
+ if (sb <= bend)
+ fprintf(file," %*d",coord,sb);
+ else
+ fprintf(file," %*s",coord,"");
+ fprintf(file," %.*s",o,Bbuf);
+ }
+ else
+ { fprintf(file," %.*s\n",o,Abuf);
+ fprintf(file,"%*s %.*s\n",indent,"",o,Dbuf);
+ fprintf(file,"%*s %.*s",indent,"",o,Bbuf);
+ }
+ if (diff+match > 0)
+ fprintf(file," %5.1f%%\n",(100.*diff)/(diff+match));
+ else
+ fprintf(file,"\n");
+
+ //fprintf(file, "Cool!\n");
+ fflush(file);
+ return (0);
+}
+
+int LAInterface::generateConsensus(std::vector<LAlignment *> &alns) {
+
+ int seq_count = alns.size();
+
+ //TBD
+
+
+ return 0;
+}
+
+int LAInterface::recoverAlignment(LAlignment *alignment) {
+
+ if (alignment->recovered) return -1;
+
+ int j;
+ uint16 *trace;
+ Work_Data *work;
+ int in, npt, idx, ar;
+ int64 tps;
+ char *abuffer, *bbuffer;
+ int ar_wide, br_wide;
+ int ai_wide, bi_wide;
+ int mn_wide, mx_wide;
+ int tp_wide;
+ int blast, match, seen, lhalf, rhalf;
+ bool ALIGN = true;
+ bool REFERENCE = false;
+ bool CARTOON = false;
+ bool OVERLAP = false;
+ bool FLIP = false;
+ bool UPPERCASE = false;
+ bool MAP = false;
+ int INDENT = 4;
+ int WIDTH = 100;
+ int BORDER = 10;
+
+ //int tmax = 3000;
+ //trace = (uint16 *) malloc(sizeof(uint16) * tmax);
+ //if (trace == NULL)
+ // exit(1);
+
+ int amin, amax, bmin, bmax;
+
+
+ work = New_Work_Data();
+ abuffer = New_Read_Buffer(db1);
+ bbuffer = New_Read_Buffer(db2);
+
+ Overlap * ovl = (Overlap *) malloc(sizeof(Overlap));
+ Alignment * aln = (Alignment *) malloc(sizeof (Alignment));
+
+ aln->path = &(ovl->path);
+ Path * path = &(ovl->path);
+
+ path->abpos = alignment->abpos;
+ path->aepos = alignment->aepos;
+ path->bbpos = alignment->bbpos;
+ path->bepos = alignment->bepos;
+ path->diffs = alignment->diffs;
+ path->tlen = alignment->tlen;
+ aln->alen = alignment->alen;
+ aln->blen = alignment->blen;
+ aln->flags = (uint32)alignment->flags;
+ ovl->aread = alignment->read_A_id_;
+ ovl->bread = alignment->read_B_id_;
+
+ path->trace = (uint16 *)malloc(path->tlen * sizeof(uint16));
+ memcpy(path->trace, alignment->trace_pts, path->tlen * sizeof(uint16));
+
+
+ amin = ovl->path.abpos - BORDER;
+ if (amin < 0) amin = 0;
+ amax = ovl->path.aepos + BORDER;
+ if (amax > aln->alen) amax = aln->alen;
+ if (COMP(aln->flags)) {
+ bmin = (aln->blen - ovl->path.bepos) - BORDER;
+ if (bmin < 0) bmin = 0;
+ bmax = (aln->blen - ovl->path.bbpos) + BORDER;
+ if (bmax > aln->blen) bmax = aln->blen;
+ }
+ else {
+ bmin = ovl->path.bbpos - BORDER;
+ if (bmin < 0) bmin = 0;
+ bmax = ovl->path.bepos + BORDER;
+ if (bmax > aln->blen) bmax = aln->blen;
+ }
+
+ char * aseq = Load_Subread(db1, ovl->aread, amin, amax, abuffer, 0);
+ char * bseq = Load_Subread(db2, ovl->bread, bmin, bmax, bbuffer, 0);
+
+
+ aln->aseq = aseq - amin;
+ if (COMP(aln->flags)) {
+ Complement_Seq(bseq, bmax - bmin);
+ aln->bseq = bseq - (aln->blen - bmax);
+ }
+ else
+ aln->bseq = bseq - bmin;
+
+
+ computeTracePTS(aln, work, tspace);
+
+
+ /*{
+ int tlen = aln->path->tlen;
+ int *trace = (int *) aln->path->trace;
+ int u;
+ printf(" ");
+ for (u = 0; u < tlen; u++)
+ printf("%d,", (int) trace[u]);
+ printf("\n");
+ }*/
+
+
+ alignment->tlen = aln->path->tlen;
+ alignment->trace = (int *) malloc(sizeof(int) * aln->path->tlen*2);
+ {
+ int tlen = aln->path->tlen;
+ int *trace = (int *) aln->path->trace;
+ int u;
+ //printf(" ");
+ for (u = 0; u < tlen; u++) {
+ //printf("%d,", (int) trace[u]);
+ alignment->trace[u] = (int)trace[u];
+ }
+ //printf("\n");
+ }
+
+ free(bbuffer - 1);
+ free(abuffer - 1);
+ Free_Work_Data(work);
+ //free(aln->path->trace);
+
+ alignment->recovered = true;
+
+
+ return 0;
+}
+
+std::vector<int> * LAInterface::getCoverage(std::vector<LOverlap *> alns) {
+ std::vector<int> * res = new std::vector<int>( alns[0]->alen, 0 );
+
+ for (int i = 0; i < alns.size(); i++) {
+ for (int j = alns[i]->read_A_match_start_; j < alns[i]->read_A_match_end_; j++)
+ (*res)[j] ++;
+ }
+
+ return res;
+}
+
+
+std::vector<int> *LAInterface::getCoverage(std::vector<LAlignment *> alns) {
+ std::vector<int> * res = new std::vector<int>( alns[0]->alen, 0 );
+
+ for (int i = 0; i < alns.size(); i++) {
+ for (int j = alns[i]->abpos; j < alns[i]->aepos; j++)
+ (*res)[j] ++;
+ }
+
+ return res;
+}
+
+std::vector<std::pair<int, int> > * LAInterface::lowCoverageRegions(std::vector<int> &cov, int min_cov) {
+ std::vector<std::pair < int, int>> * reg = new std::vector<std::pair < int, int>> ();
+ int pos = 0;
+ while (pos < cov.size()) {
+ int start = 0;
+ if (cov[pos] < min_cov){
+ start = pos;
+ while ((cov[pos] < min_cov) and (pos < cov.size()))
+ pos ++;
+ reg->push_back(std::pair<int, int >(start, pos) ); //low coverage region in [a,b)
+ }
+ else pos ++;
+ }
+ return reg;
+}
+
+bool compare_event(std::pair<int, int> event1,std::pair<int, int> event2) {
+ return event1.first < event2.first;
+}
+
+
+void LAInterface::profileCoverage(std::vector<LOverlap *> &alignments, std::vector<std::pair<int, int> > & coverage,int reso, int cutoff) {
+ //Returns coverage, which is a pair of ints <i*reso, coverage at position i*reso of read a>
+ std::vector<std::pair<int, int> > events;
+ for (int i = 0; i < alignments.size(); i ++) {
+ events.push_back(std::pair<int, int>(alignments[i]->read_A_match_start_ + cutoff, 1));
+ events.push_back(std::pair<int, int>(alignments[i]->read_A_match_end_ - cutoff, -1));
+ }
+
+ std::sort(events.begin(), events.end(), compare_event);
+
+ int pos = 0;
+ int i = 0;
+ int count = 0;
+ while (pos < events.size()) {
+ while ((events[pos].first < i*reso) and (pos < events.size())) {
+ count += events[pos].second;
+ pos++;
+ }
+ coverage.push_back(std::pair<int, int>(i*reso, count));
+ i++;
+ }
+ return;
+}
+
+
+void LAInterface::profileCoveragefine(std::vector<LOverlap *> &alignments, std::vector<std::pair<int, int> > & coverage,int reso, int cutoff, int est_coverage) {
+ std::vector<std::pair<int, int> > events;
+ int sz = alignments.size();
+ if (sz > est_coverage) sz = est_coverage;
+
+ for (int i = 0; i < sz; i ++) {
+ events.push_back(std::pair<int, int>(alignments[i]->read_A_match_start_ + cutoff, 1));
+ events.push_back(std::pair<int, int>(alignments[i]->read_A_match_end_ - cutoff, -1));
+ }
+
+ std::sort(events.begin(), events.end(), compare_event);
+
+ int pos = 0;
+ int i = 0;
+ int count = 0;
+ while (pos < events.size()) {
+ while ((events[pos].first < i*reso) and (pos < events.size())) {
+ count += events[pos].second;
+ pos++;
+ }
+ coverage.push_back(std::pair<int, int>(i*reso, count));
+ i++;
+ }
+ return;
+}
+
+
+
+void LAInterface::repeatDetect(std::vector<std::pair<int, int> > & coverage, std::vector<std::pair<int, int> > & repeat) {
+ for (int i = 1; i < coverage.size(); i++) {
+ if (coverage[i].second > 2*coverage[i-1].second) repeat.push_back(std::pair<int, int>(coverage[i].first, 1));
+ if (coverage[i].second < 0.5*coverage[i-1].second) repeat.push_back(std::pair<int, int>(coverage[i].first, -1));
+ }
+ return;
+}
+
+
+static int qv_map[51] =
+ { 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j',
+ 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
+ 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D',
+ 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
+ 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
+ 'Y'
+ };
+
+void LAInterface::getQV(std::vector<std::vector<int> > & QV, int from, int to) {
+ int b,e;
+ b = from;
+ e = to;
+ HITS_READ * reads = db1->reads;
+ bool UPPER = true;
+
+ int64 *qv_idx;
+ uint8 *qv_val;
+
+ //if (DOIQV)
+ { int status, kind;
+ HITS_TRACK *track;
+ status = Check_Track(db1,"qual",&kind);
+ if (status == -2)
+ { fprintf(stderr,"%s: .qual-track does not exist for this db.\n",Prog_Name);
+ exit (1);
+ }
+ if (status == -1)
+ { fprintf(stderr,"%s: .qual-track not sync'd with db.\n",Prog_Name);
+ exit (1);
+ }
+ track = Load_Track(db1,"qual");
+ qv_idx = (int64 *) track->anno;
+ qv_val = (uint8 *) track->data;
+ }
+
+
+ for (int i = b; i < e; i++)
+ {
+ int len;
+ int fst, lst;
+ int flags, qv;
+ HITS_READ *r;
+
+ r = reads + i;
+ len = r->rlen;
+ /*if (DORED)
+ printf("R %d\n",i+1);*/
+
+ flags = r->flags;
+ qv = (flags & DB_QV);
+ /*if (DOHDR)
+ { if (DAM)
+ { char header[MAX_NAME];
+
+ fseeko(hdrs,r->coff,SEEK_SET);
+ fgets(header,MAX_NAME,hdrs);
+ header[strlen(header)-1] = '\0';
+ printf("H %ld %s\n",strlen(header),header);
+ printf("L %d %d %d\n",r->origin,r->fpulse,r->fpulse+len);
+ }
+ else
+ { while (i < findx[map-1])
+ map -= 1;
+ while (i >= findx[map])
+ map += 1;
+ printf("H %ld %s\n",strlen(flist[map]),flist[map]);
+ printf("L %d %d %d\n",r->origin,r->fpulse,r->fpulse+len);
+ if (qv > 0)
+ printf("Q: %d\n",qv);
+ }
+ }*/
+
+ /*if (DOQVS)
+ Load_QVentry(db,i,entry,UPPER);*/
+ /*if (DOSEQ)
+ Load_Read(db,i,read,UPPER);*/
+
+ /*for (m = 0; m < MTOP; m++)
+ { int64 *anno;
+ int *data;
+ int64 s, f, j;
+
+ anno = (int64 *) MTRACK[m]->anno;
+ data = (int *) MTRACK[m]->data;
+
+ s = (anno[i] >> 2);
+ f = (anno[i+1] >> 2);
+ printf("T%d %lld ",m,(f-s)/2);
+ if (s < f)
+ { for (j = s; j < f; j += 2)
+ printf(" %d %d",data[j],data[j+1]);
+ }
+ printf("\n");
+ }
+
+ if (substr)
+ { fst = iter->beg;
+ lst = iter->end;
+ }
+ else
+ { fst = 0;
+ lst = len;
+ }
+
+ if (DOSEQ)
+ { printf("S %d ",lst-fst);
+ printf("%.*s\n",lst-fst,read+fst);
+ }
+ */
+ //if (DOIQV)
+ { int64 k, e;
+ std::vector<int> qv;
+ k = qv_idx[i];
+ e = qv_idx[i+1];
+ //printf("I %lld ",e-k);
+ while (k < e) {
+ qv.push_back(qv_val[k++]);
+ //putchar(qv_map[qv_val[k++]]);
+
+ }
+ //printf("\n");
+ QV.push_back(qv);
+ }
+ /*if (DOQVS)
+ { int k;
+
+ for (k = 0; k < 5; k++)
+ { printf("%c %d ",qvname[k],lst-fst);
+ printf("%.*s\n",lst-fst,entry[k]+fst);
+ }
+ }*/
+ }
+ return;
+}
+
+
+
+int LOverlap::GetMatchingPosition(int pos_A) {
+
+ /**
+ * GetMatchingPosition: Given a position on read A inside the matched segment,
+ * return the corresponding position on B
+ */
+
+
+ if ((pos_A < this->read_A_match_start_) or (pos_A > this->read_A_match_end_)) {
+ return -1;
+ }
+
+ int rev_sign = 1 - 2*this->reverse_complement_match_;
+
+ int current_pos_read_A = this->read_A_match_start_;
+ int next_pos_read_A = current_pos_read_A;
+
+ int current_pos_read_B = this->read_B_match_start_;
+ if (this->reverse_complement_match_ == 1) {
+ current_pos_read_B = this->read_B_match_end_;
+ }
+
+
+ for (int j = 0; j < this->trace_pts_len/2-1; j++) {
+
+ if (current_pos_read_A % 100 != 0)
+ next_pos_read_A = int(ceil(current_pos_read_A / 100.0)) * 100;
+ else
+ next_pos_read_A = current_pos_read_A + 100;
+
+
+ if (next_pos_read_A >= pos_A) {
+ return current_pos_read_B + pos_A - current_pos_read_A;
+ }
+
+ current_pos_read_B = current_pos_read_B + rev_sign * this->trace_pts[2 * j + 1];
+ current_pos_read_A = next_pos_read_A;
+
+ }
+
+ // if we got here, it means the hinge is in the last trace_pt window of A
+
+ if (current_pos_read_A < pos_A) { // technically, we shouldn' need to check this
+ return current_pos_read_B + pos_A - current_pos_read_A;
+ }
+
+ return -2; // this shouldn't happen
+
+}
+
+
+
+
+
+void LOverlap::trim_overlap() {
+ /**
+ * Trim overlap: the reads are trimmed according to qualities and coverage,
+ * To be consistent, the overlap needs to be trimmed.
+ * Rather than running DAligner on trimmed reads, this function trims the overlap according to trace points.
+ * It finds the trace point that are not trimmed in both reads.
+ */
+
+ //before trimming, the positions are read_A_match_start_, read_B_match_start_, read_A_match_end_
+ // and read_B_match_end_, we add a eff_ prefix to it after trimming
+
+ this->eff_read_B_match_start_ = this->read_B_match_start_;
+ this->eff_read_B_match_end_ = this->read_B_match_end_;
+ this->eff_read_A_match_start_ = this->read_A_match_start_;
+ this->eff_read_A_match_end_ = this->read_A_match_end_;
+
+
+ std::vector<std::pair<int,int> > trace_points;
+
+ if (this->reverse_complement_match_ == 0) {
+ trace_points.push_back(std::pair<int,int>(this->read_A_match_start_, this->read_B_match_start_));
+ }
+ else {
+ trace_points.push_back(std::pair<int,int>(this->read_A_match_start_, this->read_B_match_end_));
+ }
+
+ int rev_sign = 1 - 2*this->reverse_complement_match_;
+
+ int current_position_read_A = this->read_A_match_start_;
+ // this for loop change trace points stored trace_pts[] into coordinate pairs vector: tps
+ for (int j = 0; j < this->trace_pts_len/2-1; j++) {
+ if (current_position_read_A % 100 != 0)
+ current_position_read_A = int(ceil(current_position_read_A / 100.0)) * 100;
+ else
+ current_position_read_A += 100;
+ trace_points.push_back(std::pair<int,int>(current_position_read_A,
+ trace_points.back().second + rev_sign * this->trace_pts[2 * j + 1]));
+ }
+ if (this->reverse_complement_match_ == 0) {
+ trace_points.push_back(std::pair<int, int>(this->read_A_match_end_, this->read_B_match_end_));
+ }
+ else {
+ trace_points.push_back(std::pair<int, int>(this->read_A_match_end_, this->read_B_match_start_));
+ }
+
+
+ //printf("[%6d %6d] [%6d %6d]\n", this->eff_read_A_start_, this->eff_read_A_end_, this->eff_read_B_start_, this->eff_read_B_end_);
+
+ //printf("[%6d %6d] [%6d %6d]\n", this->eff_read_A_match_start_, this->eff_read_A_match_end_, this->eff_read_B_match_start_, this->eff_read_B_match_end_);
+
+ /*for (int j = 0; j < trace_points.size(); j++) {
+ printf("a%d b%d ", trace_points[j].first, trace_points[j].second);
+ }
+ printf("\n");
+
+ // for debugging
+ */
+
+ this->eff_start_trace_point_index_ = trace_points.size();
+ this->eff_end_trace_point_index_ = 0;
+
+
+
+ if (this->reverse_complement_match_ == 0) {
+
+ //for trace point pairs, get the first one that is in untrimmed regions for both reads
+
+ for (int i = 0; i < trace_points.size(); i++) {
+ if ( (trace_points[i].first >= this->eff_read_A_start_) and
+ (trace_points[i].second >= this->eff_read_B_start_) ) {
+ this->eff_read_A_match_start_ = trace_points[i].first;
+ this->eff_read_B_match_start_ = trace_points[i].second;
+ this->eff_start_trace_point_index_ = i;
+ break;
+ }
+ }
+
+ //for trace point pairs, get the last one that is in untrimmed regions for both reads
+ for (int i = (int) trace_points.size() - 1; i >= 0; i--) {
+ if ((trace_points[i].first <= this->eff_read_A_end_) and
+ (trace_points[i].second <= this->eff_read_B_end_)) {
+ this->eff_read_A_match_end_ = trace_points[i].first;
+ this->eff_read_B_match_end_ = trace_points[i].second;
+ this->eff_end_trace_point_index_ = i;
+ break;
+ }
+ }
+
+ }
+ else {
+
+ for (int i = 0; i < trace_points.size(); i++) {
+ if ( (trace_points[i].first >= this->eff_read_A_start_) and
+ (trace_points[i].second <= this->eff_read_B_end_) ) {
+ this->eff_read_A_match_start_ = trace_points[i].first;
+ this->eff_read_B_match_end_ = trace_points[i].second;
+ this->eff_start_trace_point_index_ = i; // "start" with respect to A
+ break;
+ }
+ }
+
+ for (int i = (int) trace_points.size() - 1; i >= 0; i--) {
+ if ((trace_points[i].first <= this->eff_read_A_end_) and
+ (trace_points[i].second >= this->eff_read_B_start_)) {
+ this->eff_read_A_match_end_ = trace_points[i].first;
+ this->eff_read_B_match_start_ = trace_points[i].second;
+ this->eff_end_trace_point_index_ = i;
+ break;
+ }
+ }
+
+ }
+
+ if (this->eff_start_trace_point_index_ >= this->eff_end_trace_point_index_)
+ {
+ this->active = false;
+ }
+
+ /*printf("[%6d %6d] [%6d %6d]\n", this->eff_read_A_match_start_, this->eff_read_A_match_end_, this->eff_read_B_match_start_, this->eff_read_B_match_end_);
+
+ int overhang_read_A_left = this->eff_read_A_match_start_ - this->eff_read_A_start_;
+ int overhang_read_A_right = this->eff_read_A_end_ - this->eff_read_A_match_end_;
+ int overhang_read_B_left = this->eff_read_B_match_start_ - this->eff_read_B_start_;
+ int overhang_read_B_right = this->eff_read_B_end_ - this->eff_read_B_match_end_;
+
+ printf("trim A_left %6d, A_right %6d, B_left %6d, B_right %6d\n",
+ overhang_read_A_left, overhang_read_A_right,
+ overhang_read_B_left, overhang_read_B_right);
+
+ */
+
+}
+
+
+void LOverlap::TrimOverlapNaive(){
+ this->eff_read_B_match_start_ = std::max (this->read_B_match_start_,this->eff_read_B_start_);
+ this->eff_read_B_match_end_ = std::min (this->read_B_match_end_,this->eff_read_B_end_);
+ this->eff_read_A_match_start_ = std::max (this->read_A_match_start_,this->eff_read_A_start_);
+ this->eff_read_A_match_end_ = std::min (this->read_A_match_end_,this->eff_read_A_end_);;
+}
+
+
+// This function is no longer used in hinging_v1.cpp
+
+void LOverlap::addtype(int max_overhang) {
+ /**
+ * addtype is a function for classifying overlaps, edges are classified into forward, backward, internal match, bcovera and acoverb,
+ it is based on effective positions, rather than positions
+ */
+
+ int overhang = std::min(this->eff_read_A_match_start_ - this->eff_read_A_start_, this->eff_read_B_match_start_ - this->eff_read_B_start_) + std::min(this->eff_read_A_end_ - this->eff_read_A_match_end_, this->eff_read_B_end_ - this->eff_read_B_match_end_);
+
+ //int tol = 0;
+ if (overhang > max_overhang)
+ this->match_type_ = INTERNAL;
+ else if ((this->eff_read_A_match_start_ - this->eff_read_A_start_ <= this->eff_read_B_match_start_ - this->eff_read_B_start_) and (this->eff_read_A_end_ - this->eff_read_A_match_end_ <= this->eff_read_B_end_ - this->eff_read_B_match_end_))
+ this->match_type_ = BCOVERA;
+ else if ((this->eff_read_A_match_start_ - this->eff_read_A_start_ >= this->eff_read_B_match_start_ - this->eff_read_B_start_) and (this->eff_read_A_end_ - this->eff_read_A_match_end_ >= this->eff_read_B_end_ - this->eff_read_B_match_end_))
+ this->match_type_ = ACOVERB;
+ else if (this->eff_read_A_match_start_ - this->eff_read_A_start_ > this->eff_read_B_match_start_ - this->eff_read_B_start_) {
+ if ((this->eff_read_B_end_ - this->eff_read_B_match_end_ > 0) and (this->eff_read_A_match_start_ - this->eff_read_A_start_ > 0))
+ this->match_type_ = FORWARD;
+ }
+ else {
+ if ((this->eff_read_B_match_start_ - this->eff_read_B_start_ > 0) and (this->eff_read_A_end_ - this->eff_read_A_match_end_ > 0))
+ this->match_type_ = BACKWARD;
+ }
+}
+
+void LOverlap::AddTypesAsymmetric(int max_overhang, int min_overhang) {
+ //Getting a parameter max_overhang, which is the maximum overlap that one can attribute to bad DAligner ends
+ //The function sets the class variable match_type_ according to the relative positions of the reads.
+ //Possible things it can set to are:
+ // BCOVERA, ACOVERB, INTERNAL, FORWARD, FORWARD_INTERNAL, BACKWARD, BACKWARD_INTERNAL
+ int overhang_read_A_left = this->eff_read_A_match_start_ - this->eff_read_A_start_;
+ int overhang_read_A_right = this->eff_read_A_end_ - this->eff_read_A_match_end_;
+ int overhang_read_B_left = this->eff_read_B_match_start_ - this->eff_read_B_start_;
+ int overhang_read_B_right = this->eff_read_B_end_ - this->eff_read_B_match_end_;
+
+
+ //printf(" A_left %6d, A_right %6d, B_left %6d, B_right %6d\n",
+ // overhang_read_A_left, overhang_read_A_right,
+ // overhang_read_B_left, overhang_read_B_right);
+
+ if (this->reverse_complement_match_ == 1) {
+ //Exchange overhang left and right of read B if match is reverse complement
+ overhang_read_B_left = this->eff_read_B_end_ - this->eff_read_B_match_end_;
+ overhang_read_B_right = this->eff_read_B_match_start_ - this->eff_read_B_start_;
+ }
+
+
+ if ((std::max(overhang_read_A_left, overhang_read_A_right) < max_overhang)
+ and (std::min(overhang_read_B_left, overhang_read_B_right) > min_overhang ))
+ // and ((overhang_read_A_left <= overhang_read_B_left)
+ // and (overhang_read_A_right <= overhang_read_B_right)))
+ this->match_type_ = BCOVERA;
+ else if ((std::max(overhang_read_B_left, overhang_read_B_right) < max_overhang)
+ and (std::min(overhang_read_A_left, overhang_read_A_right) > min_overhang ))
+ //and (overhang_read_A_left >= overhang_read_B_left)
+ // and (overhang_read_A_right >= overhang_read_B_right))
+ //
+ this->match_type_ = ACOVERB;
+ else if ((std::min(overhang_read_A_left, overhang_read_A_right) > max_overhang))
+ this->match_type_ = INTERNAL;
+ else if (overhang_read_A_left <= max_overhang) {
+ //Check if read B if a left extension. As we've handled internal,
+ //we know that this is a BACKWARD or BACKWARD_INTERNAL match
+ if ((overhang_read_B_right <= max_overhang) and (overhang_read_B_left >= max_overhang)) {
+ //Alignment internal in B. (It may be an overlap or a non extending overlap)
+ this->match_type_ = BACKWARD;
+ }
+ else if ((overhang_read_B_right >= max_overhang) and (overhang_read_B_left >= max_overhang)) {
+ //Alignment is a overlap on B.
+ this->match_type_ = BACKWARD_INTERNAL;
+ }
+ }
+ else if (overhang_read_A_right <= max_overhang) {
+ //Check if read B if a right extension. As we've handled internal,
+ //we know that this is a FORWARD or FORWARD_INTERNAL match
+ if ((overhang_read_B_left <= max_overhang) and (overhang_read_B_right >= max_overhang)) {
+ //Alignment internal in B. (It may be an overlap or a non extending overlap)
+ this->match_type_ = FORWARD;
+ }
+ else if ((overhang_read_B_left >= max_overhang) and (overhang_read_B_right >= max_overhang)) {
+ //Alignment is a overlap on B.
+ this->match_type_ = FORWARD_INTERNAL;
+ }
+ else{
+ this->match_type_ = UNDEFINED;
+ }
+ }
+
+ /*std::ofstream ofs ("overlapt.txt", std::ofstream::app);
+ ofs << "===============================================\n"
+ << "Read A id "<< std::setfill('0') << std::setw(5) <<this->read_A_id_
+ << "\nRead B id " << std::setfill('0') << std::setw(5) << this->read_B_id_
+ << "\nRead A eff start "<< std::setfill('0') << std::setw(5) << this->eff_read_A_start_
+ << " Read A eff end "<< std::setfill('0') << std::setw(5) << this->eff_read_A_end_
+ << " Read A length " << std::setfill('0') << std::setw(5) << this->alen
+ << " Read A match start "<< std::setfill('0') << std::setw(5) << this->read_A_match_start_
+ << " Read A eff match start " << std::setfill('0') << std::setw(5) << this->eff_read_A_match_start_
+ << " Read A match end " << std::setfill('0') << std::setw(5) << this->read_A_match_end_
+ << " Read A eff match end " << std::setfill('0') << std::setw(5) << this->eff_read_A_match_end_
+ << "\nRead B eff start " << std::setfill('0') << std::setw(5) << this->eff_read_B_start_
+ << " Read B eff end " << std::setfill('0') << std::setw(5) << this->eff_read_B_end_
+ << " Read B length " << std::setfill('0') << std::setw(5) << this->blen
+ << " Read B match start "<< std::setfill('0') << std::setw(5) << this->read_B_match_start_
+ << " Read B eff match start " << std::setfill('0') << std::setw(5) << this->eff_read_B_match_start_
+ << " Read B match end " << std::setfill('0') << std::setw(5) << this->read_B_match_end_
+ << " Read B eff match end " << std::setfill('0') << std::setw(5) << this->eff_read_B_match_end_
+ << "\nReverse complement " << std::setfill('0') << std::setw(5) << this->reverse_complement_match_
+ << "\nMatch type "<<this->match_type_
+ << "\n" << std::endl;
+ ofs.close();*/
+}
+
+int get_id_from_string(const char * name_str) {
+
+
+ const char * sub0 = strchr(name_str, '/');
+ const char * sub1 = sub0 + 1;
+ const char * sub2 = strchr(sub1, '/');
+
+ char substr[15];
+ strncpy(substr, sub1, strlen(sub1) - strlen(sub2));
+ substr[strlen(sub1) - strlen(sub2)] = 0;
+ return atoi(substr);
+}
+
+
+int LAInterface::loadPAF(std::string filename, std::vector<LOverlap *> & alns) {
+ paf_file_t *fp;
+ paf_rec_t r;
+ fp = paf_open(filename.c_str());
+ int num = 0;
+ while (paf_read(fp, &r) >= 0) {
+ num ++;
+ LOverlap *new_ovl = new LOverlap();
+
+ new_ovl->read_A_match_start_ = r.qs;
+ new_ovl->read_B_match_start_ = r.ts;
+ new_ovl->read_A_match_end_ = r.qe;
+ new_ovl->read_B_match_end_ = r.te;
+ new_ovl->alen = r.ql;
+ new_ovl->blen = r.tl;
+ new_ovl->reverse_complement_match_ = r.rev;
+ new_ovl->diffs = 0;
+ new_ovl->read_A_id_ = get_id_from_string(r.qn) - 1;
+ new_ovl->read_B_id_ = get_id_from_string(r.tn) - 1; //change 1 based to 0 based
+
+ alns.push_back(new_ovl);
+ }
+ return num;
+}
+
+KSEQ_INIT(gzFile, gzread)
+
+int LAInterface::loadFASTA(std::string filename, std::vector<Read *> & reads) {
+ gzFile fp;
+ kseq_t *seq;
+ int l;
+ int num = 0;
+ fp = gzopen(filename.c_str(), "r"); // STEP 2: open the file handler
+ seq = kseq_init(fp); // STEP 3: initialize seq
+ while ((l = kseq_read(seq)) >= 0) { // STEP 4: read sequence
+ //printf("name: %s\n", seq->name.s);
+ //if (seq->comment.l) printf("comment: %s\n", seq->comment.s);
+ //printf("seq: %s\n", seq->seq.s);
+ //if (seq->qual.l) printf("qual: %s\n", seq->qual.s);
+
+ Read *new_r = new Read(num, strlen(seq->seq.s), std::string(seq->name.s), std::string(seq->seq.s));
+ reads.push_back(new_r);
+ num++;
+ }
+ //printf("return value: %d\n", l);
+ kseq_destroy(seq); // STEP 5: destroy seq
+ gzclose(fp); // STEP 6: close the file handler
+ return num;
+}
diff --git a/src/lib/QV.c b/src/lib/QV.c
new file mode 100755
index 0000000..38f6db4
--- /dev/null
+++ b/src/lib/QV.c
@@ -0,0 +1,1406 @@
+/************************************************************************************\
+* *
+* Copyright (c) 2014, Dr. Eugene W. Myers (EWM). All rights reserved. *
+* *
+* Redistribution and use in source and binary forms, with or without modification, *
+* are permitted provided that the following conditions are met: *
+* *
+* · Redistributions of source code must retain the above copyright notice, this *
+* list of conditions and the following disclaimer. *
+* *
+* · Redistributions in binary form must reproduce the above copyright notice, this *
+* list of conditions and the following disclaimer in the documentation and/or *
+* other materials provided with the distribution. *
+* *
+* · The name of EWM may not be used to endorse or promote products derived from *
+* this software without specific prior written permission. *
+* *
+* THIS SOFTWARE IS PROVIDED BY EWM ”AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, *
+* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND *
+* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL EWM BE LIABLE *
+* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES *
+* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS *
+* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY *
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *
+* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN *
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
+* *
+* For any issues regarding this software and its use, contact EWM at: *
+* *
+* Eugene W. Myers Jr. *
+* Bautzner Str. 122e *
+* 01099 Dresden *
+* GERMANY *
+* Email: gene.myers at gmail.com *
+* *
+\************************************************************************************/
+
+/*******************************************************************************************
+ *
+ * Compressor/decompressor for .quiv files: customized Huffman codes for each stream based on
+ * the histogram of values occuring in a given file. The two low complexity streams
+ * (deletionQV and substitutionQV) use a Huffman coding of the run length of the prevelant
+ * character.
+ *
+ * Author: Gene Myers
+ * Date: Jan 18, 2014
+ * Modified: July 25, 2014
+ *
+ ********************************************************************************************/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include <unistd.h>
+
+#include "DB.h"
+
+#undef DEBUG
+
+#define MIN_BUFFER 1000
+
+#define HUFF_CUTOFF 16 // This cannot be larger than 16 !
+
+
+/*******************************************************************************************
+ *
+ * Endian flipping routines
+ *
+ ********************************************************************************************/
+
+static int LittleEndian; // Little-endian machine ?
+ // Referred by: Decode & Decode_Run
+static int Flip; // Flip endian of all coded shorts and ints
+ // Referred by: Decode & Decode_Run & Read_Scheme
+
+static void Set_Endian(int flip)
+{ uint32 x = 3;
+ uint8 *b = (uint8 *) (&x);
+
+ Flip = flip;
+ LittleEndian = (b[0] == 3);
+}
+
+static void Flip_Long(void *w)
+{ uint8 *v = (uint8 *) w;
+ uint8 x;
+
+ x = v[0];
+ v[0] = v[3];
+ v[3] = x;
+ x = v[1];
+ v[1] = v[2];
+ v[2] = x;
+}
+
+static void Flip_Short(void *w)
+{ uint8 *v = (uint8 *) w;
+ uint8 x;
+
+ x = v[0];
+ v[0] = v[1];
+ v[1] = x;
+}
+
+
+/*******************************************************************************************
+ *
+ * Routines for computing a Huffman Encoding Scheme
+ *
+ ********************************************************************************************/
+
+typedef struct
+ { int type; // 0 => normal, 1 => normal but has long codes, 2 => truncated
+ uint32 codebits[256]; // If type = 2, then code 255 is the special code for
+ int codelens[256]; // non-Huffman exceptions
+ int lookup[0x10000]; // Lookup table (just for decoding)
+ } HScheme;
+
+typedef struct _HTree
+ { struct _HTree *lft, *rgt;
+ uint64 count;
+ } HTree;
+
+ // Establish heap property from node s down (1 is root, siblings of n are 2n and 2n+1)
+ // assuming s is the only perturbation in the tree.
+
+static void Reheap(int s, HTree **heap, int hsize)
+{ int c, l, r;
+ HTree *hs, *hr, *hl;
+
+ c = s;
+ hs = heap[s];
+ while ((l = 2*c) <= hsize)
+ { r = l+1;
+ hl = heap[l];
+ hr = heap[r];
+ if (r > hsize || hr->count > hl->count)
+ { if (hs->count > hl->count)
+ { heap[c] = hl;
+ c = l;
+ }
+ else
+ break;
+ }
+ else
+ { if (hs->count > hr->count)
+ { heap[c] = hr;
+ c = r;
+ }
+ else
+ break;
+ }
+ }
+ if (c != s)
+ heap[c] = hs;
+}
+
+ // Given Huffman tree build a table of codes from it, the low-order codelens[s] bits
+ // of codebits[s] contain the code for symbol s.
+
+static void Build_Table(HTree *node, int code, int len, uint32 *codebits, int *codelens)
+{ if (node->rgt == NULL)
+ { uint64 symbol = (uint64) (node->lft);
+ codebits[symbol] = code;
+ codelens[symbol] = len;
+ }
+ else
+ { code <<= 1;
+ len += 1;
+ Build_Table(node->lft,code,len,codebits,codelens);
+ Build_Table(node->rgt,code+1,len,codebits,codelens);
+ }
+}
+
+ // For the non-zero symbols in hist, compute a huffman tree over them, and then
+ // build a table of the codes. If inscheme is not NULL, then place all symbols
+ // with code 255 or with more than HUFF_CUTOFF bits in the encoding by inscheme
+ // as a single united entity, whose code signals that the value of these symbols
+ // occur explicitly in 8 (values) or 16 (run lengths) bits following the code.
+ // All the symbols in this class will have the same entry in the code table and
+ // 255 is always in this class.
+
+static HScheme *Huffman(uint64 *hist, HScheme *inscheme)
+{ HScheme *scheme;
+ HTree *heap[259];
+ HTree node[512];
+ int hsize;
+ HTree *lft, *rgt;
+ int value, range;
+ int i;
+
+ scheme = (HScheme *) Malloc(sizeof(HScheme),"Allocating Huffman scheme record");
+ if (scheme == NULL)
+ return (NULL);
+
+ hsize = 0; // Load heap
+ value = 0;
+ if (inscheme != NULL)
+ { node[0].count = 0;
+ node[0].lft = (HTree *) (uint64) 255;
+ node[0].rgt = NULL;
+ heap[++hsize] = node+(value++);
+ }
+ for (i = 0; i < 256; i++)
+ if (hist[i] > 0)
+ { if (inscheme != NULL && (inscheme->codelens[i] > HUFF_CUTOFF || i == 255))
+ node[0].count += hist[i];
+ else
+ { node[value].count = hist[i];
+ node[value].lft = (HTree *) (uint64) i;
+ node[value].rgt = NULL;
+ heap[++hsize] = node+(value++);
+ }
+ }
+
+ for (i = hsize/2; i >= 1; i--) // Establish heap property
+ Reheap(i,heap,hsize);
+
+ range = value; // Merge pairs with smallest count until have a tree
+ for (i = 1; i < value; i++)
+ { lft = heap[1];
+ heap[1] = heap[hsize--];
+ Reheap(1,heap,hsize);
+ rgt = heap[1];
+ node[range].lft = lft;
+ node[range].rgt = rgt;
+ node[range].count = lft->count + rgt->count;
+ heap[1] = node+(range++);
+ Reheap(1,heap,hsize);
+ }
+
+ for (i = 0; i < 256; i++) // Build the code table
+ { scheme->codebits[i] = 0;
+ scheme->codelens[i] = 0;
+ }
+
+ Build_Table(node+(range-1),0,0,scheme->codebits,scheme->codelens);
+
+ if (inscheme != NULL) // Set scheme type and if truncated (2), map truncated codes
+ { scheme->type = 2; // to code and length for 255
+ for (i = 0; i < 255; i++)
+ if (inscheme->codelens[i] > HUFF_CUTOFF || scheme->codelens[i] > HUFF_CUTOFF)
+ { scheme->codelens[i] = scheme->codelens[255];
+ scheme->codebits[i] = scheme->codebits[255];
+ }
+ }
+ else
+ { scheme->type = 0;
+ for (i = 0; i < 256; i++)
+ { if (scheme->codelens[i] > HUFF_CUTOFF)
+ scheme->type = 1;
+ }
+ }
+
+ return (scheme);
+}
+
+#ifdef DEBUG
+
+ // For debug, show the coding table
+
+static void Print_Table(HScheme *scheme, uint64 *hist, int infosize)
+{ uint64 total_bits;
+ uint32 specval, mask, code, *bits;
+ int speclen, clen, *lens;
+ int i, k;
+
+ total_bits = 0;
+ bits = scheme->codebits;
+ lens = scheme->codelens;
+ if (scheme->type == 2)
+ { specval = bits[255];
+ speclen = lens[255];
+ }
+ else
+ specval = speclen = 0x7fffffff;
+
+ printf("\nCode Table:\n");
+ for (i = 0; i < 256; i++)
+ if (lens[i] > 0)
+ { clen = lens[i];
+ mask = (1 << clen);
+ code = bits[i];
+ printf(" %3d: %2d ",i,clen);
+ for (k = 0; k < clen; k++)
+ { mask >>= 1;
+ if (code & mask)
+ printf("1");
+ else
+ printf("0");
+ }
+ if (code == specval && clen == speclen)
+ { printf(" ***");
+ if (hist != NULL)
+ total_bits += (clen+infosize)*hist[i];
+ }
+ else if (hist != NULL)
+ total_bits += clen*hist[i];
+ printf("\n");
+ }
+ if (hist != NULL)
+ printf("\nTotal Bytes = %lld\n",(total_bits-1)/8+1);
+}
+
+ // For debug, show the histogram
+
+static void Print_Histogram(uint64 *hist)
+{ int i, low, hgh;
+ uint64 count;
+
+ for (hgh = 255; hgh >= 0; hgh--)
+ if (hist[hgh] != 0)
+ break;
+ for (low = 0; low < 256; low++)
+ if (hist[low] != 0)
+ break;
+ count = 0;
+ for (i = low; i <= hgh; i++)
+ count += hist[i];
+
+ for (i = hgh; i >= low; i--)
+ printf(" %3d: %8llu %5.1f%%\n",i,hist[i],(hist[i]*100.)/count);
+}
+
+#endif
+
+
+/*******************************************************************************************
+ *
+ * Read and Write Huffman Schemes
+ *
+ ********************************************************************************************/
+
+ // Write the code table to out.
+
+static void Write_Scheme(HScheme *scheme, FILE *out)
+{ int i;
+ uint8 x;
+ uint32 *bits;
+ int *lens;
+
+ lens = scheme->codelens;
+ bits = scheme->codebits;
+
+ x = (uint8) (scheme->type);
+ fwrite(&x,1,1,out);
+
+ for (i = 0; i < 256; i++)
+ { x = (uint8) (lens[i]);
+ fwrite(&x,1,1,out);
+ if (x > 0)
+ fwrite(bits+i,sizeof(uint32),1,out);
+ }
+}
+
+ // Allocate and read a code table from in, and return a pointer to it.
+
+static HScheme *Read_Scheme(FILE *in)
+{ HScheme *scheme;
+ int *look, *lens;
+ uint32 *bits, base;
+ int i, j, powr;
+ uint8 x;
+
+ scheme = (HScheme *) Malloc(sizeof(HScheme),"Allocating Huffman scheme record");
+ if (scheme == NULL)
+ return (NULL);
+
+ lens = scheme->codelens;
+ bits = scheme->codebits;
+ look = scheme->lookup;
+
+ if (fread(&x,1,1,in) != 1)
+ { EPRINTF(EPLACE,"Could not read scheme type byte (Read_Scheme)\n");
+ free(scheme);
+ return (NULL);
+ }
+ scheme->type = x;
+ for (i = 0; i < 256; i++)
+ { if (fread(&x,1,1,in) != 1)
+ { EPRINTF(EPLACE,"Could not read length of %d'th code (Read_Scheme)\n",i);
+ return (NULL);
+ }
+ lens[i] = x;
+ if (x > 0)
+ { if (fread(bits+i,sizeof(uint32),1,in) != 1)
+ { EPRINTF(EPLACE,"Could not read bit encoding of %d'th code (Read_Scheme)\n",i);
+ free(scheme);
+ return (NULL);
+ }
+ }
+ else
+ bits[i] = 0;
+ }
+
+ if (Flip)
+ { for (i = 0; i < 256; i++)
+ Flip_Long(bits+i);
+ }
+
+ for (i = 0; i < 256; i++)
+ { if (lens[i] > 0)
+ { base = (bits[i] << (16-lens[i]));
+ powr = (1 << (16-lens[i]));
+ for (j = 0; j < powr; j++)
+ look[base+j] = i;
+ }
+ }
+
+ return (scheme);
+}
+
+
+/*******************************************************************************************
+ *
+ * Encoders and Decoders
+ *
+ ********************************************************************************************/
+
+ // Encode read[0..rlen-1] according to scheme and write to out
+
+static void Encode(HScheme *scheme, FILE *out, uint8 *read, int rlen)
+{ uint32 x, c, ocode;
+ int n, k, olen, llen;
+ int *nlens;
+ uint32 *nbits;
+ uint32 nspec;
+ int nslen;
+
+ nlens = scheme->codelens;
+ nbits = scheme->codebits;
+
+ if (scheme->type == 2)
+ { nspec = nbits[255];
+ nslen = nlens[255];
+ }
+ else
+ nspec = nslen = 0x7fffffff;
+
+#define OCODE(L,C) \
+{ int len = olen + (L); \
+ uint32 code = (C); \
+ \
+ llen = olen; \
+ if (len >= 32) \
+ { olen = len-32; \
+ ocode |= (code >> olen); \
+ fwrite(&ocode,sizeof(uint32),1,out); \
+ if (olen > 0) \
+ ocode = (code << (32-olen)); \
+ else \
+ ocode = 0; \
+ } \
+ else \
+ { olen = len; \
+ ocode |= (code << (32-olen));; \
+ } \
+}
+
+ llen = 0;
+ olen = 0;
+ ocode = 0;
+ for (k = 0; k < rlen; k++)
+ { x = read[k];
+ n = nlens[x];
+ c = nbits[x];
+ OCODE(n,c);
+ if (c == nspec && n == nslen)
+ OCODE(8,x);
+ }
+
+ if (olen > 0) // Tricky: must pad so decoder does not read past
+ { fwrite(&ocode,sizeof(uint32),1,out); // last integer int the coded output.
+ if (llen > 16 && olen > llen)
+ fwrite(&ocode,sizeof(uint32),1,out);
+ }
+ else if (llen > 16)
+ fwrite(&ocode,sizeof(uint32),1,out);
+}
+
+ // Encode read[0..rlen-1] according to non-rchar table neme, and run-length table reme for
+ // runs of rchar characters. Write to out.
+
+static void Encode_Run(HScheme *neme, HScheme *reme, FILE *out, uint8 *read, int rlen, int rchar)
+{ uint32 x, c, ocode;
+ int n, h, k, olen, llen;
+ int *nlens, *rlens;
+ uint32 *nbits, *rbits;
+ uint32 nspec, rspec;
+ int nslen, rslen;
+
+ nlens = neme->codelens;
+ nbits = neme->codebits;
+ rlens = reme->codelens;
+ rbits = reme->codebits;
+
+ if (neme->type == 2)
+ { nspec = nbits[255];
+ nslen = nlens[255];
+ }
+ else
+ nspec = nslen = 0x7fffffff;
+
+ rspec = rbits[255];
+ rslen = rlens[255];
+
+ llen = 0;
+ olen = 0;
+ ocode = 0;
+ k = 0;
+ while (k < rlen)
+ { h = k;
+ while (k < rlen && read[k] == rchar)
+ k += 1;
+ if (k-h >= 255)
+ x = 255;
+ else
+ x = k-h;
+ n = rlens[x];
+ c = rbits[x];
+ OCODE(n,c);
+ if (c == rspec && n == rslen)
+ OCODE(16,k-h);
+ if (k < rlen)
+ { x = read[k];
+ n = nlens[x];
+ c = nbits[x];
+ OCODE(n,c);
+ if (c == nspec && n == nslen)
+ OCODE(8,x);
+ k += 1;
+ }
+ }
+
+ if (olen > 0)
+ { fwrite(&ocode,sizeof(uint32),1,out);
+ if (llen > 16 && olen > llen)
+ fwrite(&ocode,sizeof(uint32),1,out);
+ }
+ else if (llen > 16)
+ fwrite(&ocode,sizeof(uint32),1,out);
+}
+
+ // Read and decode from in, the next rlen symbols into read according to scheme
+
+static int Decode(HScheme *scheme, FILE *in, char *read, int rlen)
+{ int *look, *lens;
+ int signal, ilen;
+ uint64 icode;
+ uint32 *ipart;
+ uint16 *xpart;
+ uint8 *cpart;
+ int j, n, c;
+
+ if (LittleEndian)
+ { ipart = ((uint32 *) (&icode));
+ xpart = ((uint16 *) (&icode)) + 2;
+ cpart = ((uint8 *) (&icode)) + 5;
+ }
+ else
+ { ipart = ((uint32 *) (&icode)) + 1;
+ xpart = ((uint16 *) (&icode)) + 1;
+ cpart = ((uint8 *) (&icode)) + 2;
+ }
+
+ if (scheme->type == 2)
+ signal = 255;
+ else
+ signal = 256;
+ lens = scheme->codelens;
+ look = scheme->lookup;
+
+#define GET \
+ if (n > ilen) \
+ { icode <<= ilen; \
+ if (fread(ipart,sizeof(uint32),1,in) != 1) \
+ { EPRINTF(EPLACE,"Could not read more bits (Decode)\n"); \
+ return (1); \
+ } \
+ ilen = n-ilen; \
+ icode <<= ilen; \
+ ilen = 32-ilen; \
+ } \
+ else \
+ { icode <<= n; \
+ ilen -= n; \
+ }
+
+#define GETFLIP \
+ if (n > ilen) \
+ { icode <<= ilen; \
+ if (fread(ipart,sizeof(uint32),1,in) != 1) \
+ { EPRINTF(EPLACE,"Could not read more bits (Decode)\n"); \
+ return (1); \
+ } \
+ Flip_Long(ipart); \
+ ilen = n-ilen; \
+ icode <<= ilen; \
+ ilen = 32-ilen; \
+ } \
+ else \
+ { icode <<= n; \
+ ilen -= n; \
+ }
+
+ n = 16;
+ ilen = 0;
+ icode = 0;
+ if (Flip)
+ for (j = 0; j < rlen; j++)
+ { GETFLIP
+ c = look[*xpart];
+ n = lens[c];
+ if (c == signal)
+ { GETFLIP
+ c = *cpart;
+ n = 8;
+ }
+ read[j] = (char) c;
+ }
+ else
+ for (j = 0; j < rlen; j++)
+ { GET
+ c = look[*xpart];
+ n = lens[c];
+ if (c == signal)
+ { GET
+ c = *cpart;
+ n = 8;
+ }
+ read[j] = (char) c;
+ }
+
+ return (0);
+}
+
+ // Read and decode from in, the next rlen symbols into read according to non-rchar scheme
+ // neme, and the rchar runlength shceme reme
+
+static int Decode_Run(HScheme *neme, HScheme *reme, FILE *in, char *read,
+ int rlen, int rchar)
+{ int *nlook, *nlens;
+ int *rlook, *rlens;
+ int nsignal, ilen;
+ uint64 icode;
+ uint32 *ipart;
+ uint16 *xpart;
+ uint8 *cpart;
+ int j, n, c, k;
+
+ if (LittleEndian)
+ { ipart = ((uint32 *) (&icode));
+ xpart = ((uint16 *) (&icode)) + 2;
+ cpart = ((uint8 *) (&icode)) + 5;
+ }
+ else
+ { ipart = ((uint32 *) (&icode)) + 1;
+ xpart = ((uint16 *) (&icode)) + 1;
+ cpart = ((uint8 *) (&icode)) + 2;
+ }
+
+ if (neme->type == 2)
+ nsignal = 255;
+ else
+ nsignal = 256;
+ nlens = neme->codelens;
+ nlook = neme->lookup;
+
+ rlens = reme->codelens;
+ rlook = reme->lookup;
+
+ n = 16;
+ ilen = 0;
+ icode = 0;
+ if (Flip)
+ for (j = 0; j < rlen; j++)
+ { GETFLIP
+ c = rlook[*xpart];
+ n = rlens[c];
+ if (c == 255)
+ { GETFLIP
+ c = *xpart;
+ n = 16;
+ }
+ for (k = 0; k < c; k++)
+ read[j++] = (char) rchar;
+
+ if (j < rlen)
+ { GETFLIP
+ c = nlook[*xpart];
+ n = nlens[c];
+ if (c == nsignal)
+ { GETFLIP
+ c = *cpart;
+ n = 8;
+ }
+ read[j] = (char) c;
+ }
+ }
+ else
+ for (j = 0; j < rlen; j++)
+ { GET
+ c = rlook[*xpart];
+ n = rlens[c];
+ if (c == 255)
+ { GET
+ c = *xpart;
+ n = 16;
+ }
+ for (k = 0; k < c; k++)
+ read[j++] = (char) rchar;
+
+ if (j < rlen)
+ { GET
+ c = nlook[*xpart];
+ n = nlens[c];
+ if (c == nsignal)
+ { GET
+ c = *cpart;
+ n = 8;
+ }
+ read[j] = (char) c;
+ }
+ }
+
+ return (0);
+}
+
+
+/*******************************************************************************************
+ *
+ * Histogrammers
+ *
+ ********************************************************************************************/
+
+// Histogram runlengths of symbol runChar in stream[0..rlen-1] into run.
+
+static void Histogram_Seqs(uint64 *hist, uint8 *stream, int rlen)
+{ int k;
+
+ for (k = 0; k < rlen; k++)
+ hist[stream[k]] += 1;
+}
+
+static void Histogram_Runs(uint64 *run, uint8 *stream, int rlen, int runChar)
+{ int k, h;
+
+ k = 0;
+ while (k < rlen)
+ { h = k;
+ while (k < rlen && stream[k] == runChar)
+ k += 1;
+ if (k-h >= 256)
+ run[255] += 1;
+ else
+ run[k-h] += 1;
+ if (k < rlen)
+ k += 1;
+ }
+}
+
+
+/*******************************************************************************************
+ *
+ * Reader
+ *
+ ********************************************************************************************/
+
+static char *Read = NULL; // Referred by: QVentry, Read_Lines, QVcoding_Scan,
+static int Rmax = -1; // Compress_Next_QVentry
+
+static int Nline; // Referred by: QVcoding_Scan
+
+char *QVentry()
+{ return (Read); }
+
+// If nlines == 1 trying to read a single header, nlines = 5 trying to read 5 QV/fasta lines
+// for a sequence. Place line j at Read+j*Rmax and the length of every line is returned
+// unless eof occurs in which case return -1. If any error occurs return -2.
+
+int Read_Lines(FILE *input, int nlines)
+{ int i, rlen;
+ int tmax;
+ char *tread;
+ char *other;
+
+ if (Read == NULL)
+ { tmax = MIN_BUFFER;
+ tread = (char *) Malloc(5*tmax,"Allocating QV entry read buffer");
+ if (tread == NULL)
+ EXIT(-2);
+ Rmax = tmax;
+ Read = tread;
+ }
+
+ Nline += 1;
+ if (fgets(Read,Rmax,input) == NULL)
+ return (-1);
+
+ rlen = strlen(Read);
+ while (Read[rlen-1] != '\n')
+ { tmax = ((int) 1.4*Rmax) + MIN_BUFFER;
+ tread = (char *) Realloc(Read,5*tmax,"Reallocating QV entry read buffer");
+ if (tread == NULL)
+ EXIT(-2);
+ Rmax = tmax;
+ Read = tread;
+ if (fgets(Read+rlen,Rmax-rlen,input) == NULL)
+ { EPRINTF(EPLACE,"Line %d: Last line does not end with a newline !\n",Nline);
+ EXIT(-2);
+ }
+ rlen += strlen(Read+rlen);
+ }
+ other = Read;
+ for (i = 1; i < nlines; i++)
+ { other += Rmax;
+ Nline += 1;
+ if (fgets(other,Rmax,input) == NULL)
+ { EPRINTF(EPLACE,"Line %d: incomplete last entry of .quiv file\n",Nline);
+ EXIT(-2);
+ }
+ if (rlen != (int) strlen(other))
+ { EPRINTF(EPLACE,"Line %d: Lines for an entry are not the same length\n",Nline);
+ EXIT(-2);
+ }
+ }
+ return (rlen-1);
+}
+
+
+/*******************************************************************************************
+ *
+ * Tag compression and decompression routines
+ *
+ ********************************************************************************************/
+
+// Keep only the symbols in tags[0..rlen-1] for which qvs[k] != rchar and
+// return the # of symbols kept.
+
+static int Pack_Tag(char *tags, char *qvs, int rlen, int rchar)
+{ int j, k;
+
+ j = 0;
+ for (k = 0; k < rlen; k++)
+ if (qvs[k] != rchar)
+ tags[j++] = tags[k];
+ tags[j] = '\0';
+ return (j);
+}
+
+ // Count the # of non-rchar symbols in qvs[0..rlen-1]
+
+static int Packed_Length(char *qvs, int rlen, int rchar)
+{ int k, clen;
+
+ clen = 0;
+ for (k = 0; k < rlen; k++)
+ if (qvs[k] != rchar)
+ clen += 1;
+ return (clen);
+}
+
+ // Unpack tags by moving its i'th char to position k where qvs[k] is the i'th non-rchar
+ // symbol in qvs. All other chars are set to rchar. rlen is the length of qvs and
+ // the unpacked result, clen is the initial length of tags.
+
+static void Unpack_Tag(char *tags, int clen, char *qvs, int rlen, int rchar)
+{ int j, k;
+
+ j = clen-1;
+ for (k = rlen-1; k >= 0; k--)
+ { if (qvs[k] == rchar)
+ tags[k] = 'n';
+ else
+ tags[k] = tags[j--];
+ }
+}
+
+
+/*******************************************************************************************
+ *
+ * Statistics Scan and Scheme creation and write
+ *
+ ********************************************************************************************/
+
+ // Read .quiva file from input, recording stats in the histograms. If zero is set then
+ // start the stats anew with this file.
+
+static uint64 delHist[256], insHist[256], mrgHist[256], subHist[256], delRun[256], subRun[256];
+static uint64 totChar;
+static int delChar, subChar;
+
+ // Referred by: QVcoding_Scan, Create_QVcoding
+
+int QVcoding_Scan(FILE *input)
+{ char *slash;
+ int rlen;
+
+ // Zero histograms
+
+ bzero(delHist,sizeof(uint64)*256);
+ bzero(mrgHist,sizeof(uint64)*256);
+ bzero(insHist,sizeof(uint64)*256);
+ bzero(subHist,sizeof(uint64)*256);
+
+ { int i;
+
+ for (i = 0; i < 256; i++)
+ delRun[i] = subRun[i] = 1;
+ }
+
+ totChar = 0;
+ delChar = -1;
+ subChar = -1;
+
+ // Make a sweep through the .quiva entries, histogramming the relevant things
+ // and figuring out the run chars for the deletion and substition streams
+
+ Nline = 0;
+ while (1)
+ { int well, beg, end, qv;
+
+ rlen = Read_Lines(input,1);
+ if (rlen == -2)
+ EXIT(1);
+ if (rlen < 0)
+ break;
+
+ if (rlen == 0 || Read[0] != '@')
+ { EPRINTF(EPLACE,"Line %d: Header in quiv file is missing\n",Nline);
+ EXIT(1);
+ }
+ slash = index(Read+1,'/');
+ if (slash == NULL)
+ { EPRINTF(EPLACE,"%s: Line %d: Header line incorrectly formatted ?\n",
+ Prog_Name,Nline);
+ EXIT(1);
+ }
+ if (sscanf(slash+1,"%d/%d_%d RQ=0.%d\n",&well,&beg,&end,&qv) != 4)
+ { EPRINTF(EPLACE,"%s: Line %d: Header line incorrectly formatted ?\n",
+ Prog_Name,Nline);
+ EXIT(1);
+ }
+
+ rlen = Read_Lines(input,5);
+ if (rlen < 0)
+ { if (rlen == -1)
+ EPRINTF(EPLACE,"Line %d: incomplete last entry of .quiv file\n",Nline);
+ EXIT(1);
+ }
+
+ Histogram_Seqs(delHist,(uint8 *) (Read),rlen);
+ Histogram_Seqs(insHist,(uint8 *) (Read+2*Rmax),rlen);
+ Histogram_Seqs(mrgHist,(uint8 *) (Read+3*Rmax),rlen);
+ Histogram_Seqs(subHist,(uint8 *) (Read+4*Rmax),rlen);
+
+ if (delChar < 0)
+ { int k;
+ char *del = Read+Rmax;
+
+ for (k = 0; k < rlen; k++)
+ if (del[k] == 'n' || del[k] == 'N')
+ { delChar = Read[k];
+ break;
+ }
+ }
+ if (delChar >= 0)
+ Histogram_Runs( delRun,(uint8 *) (Read),rlen,delChar);
+ totChar += rlen;
+ if (subChar < 0)
+ { if (totChar >= 100000)
+ { int k;
+
+ subChar = 0;
+ for (k = 1; k < 256; k++)
+ if (subHist[k] > subHist[subChar])
+ subChar = k;
+ }
+ }
+ if (subChar >= 0)
+ Histogram_Runs( subRun,(uint8 *) (Read+4*Rmax),rlen,subChar);
+ }
+
+ return (0);
+}
+
+ // Using the statistics in the global stat tables, create the Huffman schemes and write
+ // them to output. If lossy is set, then create a lossy table for the insertion and merge
+ // QVs.
+
+QVcoding *Create_QVcoding(int lossy)
+{ static QVcoding coding;
+
+ HScheme *delScheme, *insScheme, *mrgScheme, *subScheme;
+ HScheme *dRunScheme, *sRunScheme;
+
+ delScheme = NULL;
+ dRunScheme = NULL;
+ insScheme = NULL;
+ mrgScheme = NULL;
+ subScheme = NULL;
+ sRunScheme = NULL;
+
+ // Check whether using a subtitution run char is a win
+
+ if (totChar < 200000 || subHist[subChar] < .5*totChar)
+ subChar = -1;
+
+ // If lossy encryption is enabled then scale insertions and merge QVs.
+
+ if (lossy)
+ { int k;
+
+ for (k = 0; k < 256; k += 2)
+ { insHist[k] += insHist[k+1];
+ insHist[k+1] = 0;
+ }
+
+ for (k = 0; k < 256; k += 4)
+ { mrgHist[k] += mrgHist[k+1];
+ mrgHist[k] += mrgHist[k+2];
+ mrgHist[k] += mrgHist[k+3];
+ mrgHist[k+1] = 0;
+ mrgHist[k+2] = 0;
+ mrgHist[k+3] = 0;
+ }
+ }
+
+ // Build a Huffman scheme for each stream entity from the histograms
+
+#define SCHEME_MACRO(meme,hist,label,bits) \
+ scheme = Huffman( (hist), NULL); \
+ if (scheme == NULL) \
+ goto error; \
+ if (scheme->type) \
+ { (meme) = Huffman( (hist), scheme); \
+ free(scheme); \
+ } \
+ else \
+ (meme) = scheme;
+
+#ifdef DEBUG
+
+#define MAKE_SCHEME(meme,hist,label,bits) \
+ SCHEME_MACRO(meme,hist,label,bits) \
+ printf("\n%s\n", (label) ); \
+ Print_Histogram( (hist)); \
+ Print_Table( (meme), (hist), (bits));
+
+#else
+
+#define MAKE_SCHEME(meme,hist,label,bits) \
+ SCHEME_MACRO(meme,hist,label,bits)
+
+#endif
+
+ { HScheme *scheme;
+
+ if (delChar < 0)
+ { MAKE_SCHEME(delScheme,delHist, "Hisotgram of Deletion QVs", 8);
+ dRunScheme = NULL;
+ }
+ else
+ { delHist[delChar] = 0;
+ MAKE_SCHEME(delScheme,delHist, "Hisotgram of Deletion QVs less run char", 8);
+ MAKE_SCHEME(dRunScheme,delRun, "Histogram of Deletion Runs QVs", 16);
+#ifdef DEBUG
+ printf("\nRun char is '%c'\n",delChar);
+#endif
+ }
+
+#ifdef DEBUG
+ { int k;
+ uint64 count;
+
+ count = 0;
+ for (k = 0; k < 256; k++)
+ count += delHist[k];
+ printf("\nDelTag will require %lld bytes\n",count/4);
+ }
+#endif
+
+ MAKE_SCHEME(insScheme,insHist, "Hisotgram of Insertion QVs", 8);
+ MAKE_SCHEME(mrgScheme,mrgHist, "Hisotgram of Merge QVs", 8);
+
+ if (subChar < 0)
+ { MAKE_SCHEME(subScheme,subHist, "Hisotgram of Subsitution QVs", 8);
+ sRunScheme = NULL;
+ }
+ else
+ { subHist[subChar] = 0;
+ MAKE_SCHEME(subScheme,subHist, "Hisotgram of Subsitution QVs less run char", 8);
+ MAKE_SCHEME(sRunScheme,subRun, "Histogram of Substitution Run QVs", 16);
+#ifdef DEBUG
+ printf("\nRun char is '%c'\n",subChar);
+#endif
+ }
+ }
+
+ // Setup endian handling
+
+ Set_Endian(0);
+
+ coding.delScheme = delScheme;
+ coding.insScheme = insScheme;
+ coding.mrgScheme = mrgScheme;
+ coding.subScheme = subScheme;
+ coding.dRunScheme = dRunScheme;
+ coding.sRunScheme = sRunScheme;
+ coding.delChar = delChar;
+ coding.subChar = subChar;
+ coding.prefix = NULL;
+ coding.flip = 0;
+
+ return (&coding);
+
+error:
+ if (delScheme != NULL)
+ free(delScheme);
+ if (dRunScheme != NULL)
+ free(dRunScheme);
+ if (insScheme != NULL)
+ free(insScheme);
+ if (mrgScheme != NULL)
+ free(mrgScheme);
+ if (subScheme != NULL)
+ free(subScheme);
+ if (sRunScheme != NULL)
+ free(sRunScheme);
+ EXIT(NULL);
+}
+
+ // Write the encoding scheme 'coding' to 'output'
+
+void Write_QVcoding(FILE *output, QVcoding *coding)
+{
+ // Write out the endian key, run chars, and prefix (if not NULL)
+
+ { uint16 half;
+ int len;
+
+ half = 0x33cc;
+ fwrite(&half,sizeof(uint16),1,output);
+
+ if (coding->delChar < 0)
+ half = 256;
+ else
+ half = (uint16) (coding->delChar);
+ fwrite(&half,sizeof(uint16),1,output);
+
+ if (coding->subChar < 0)
+ half = 256;
+ else
+ half = (uint16) (coding->subChar);
+ fwrite(&half,sizeof(uint16),1,output);
+
+ len = strlen(coding->prefix);
+ fwrite(&len,sizeof(int),1,output);
+ fwrite(coding->prefix,1,len,output);
+ }
+
+ // Write out the scheme tables
+
+ Write_Scheme(coding->delScheme,output);
+ if (coding->delChar >= 0)
+ Write_Scheme(coding->dRunScheme,output);
+ Write_Scheme(coding->insScheme,output);
+ Write_Scheme(coding->mrgScheme,output);
+ Write_Scheme(coding->subScheme,output);
+ if (coding->subChar >= 0)
+ Write_Scheme(coding->sRunScheme,output);
+}
+
+ // Read the encoding scheme 'coding' to 'output'
+
+QVcoding *Read_QVcoding(FILE *input)
+{ static QVcoding coding;
+
+ // Read endian key, run chars, and short name common to all headers
+
+ { uint16 half;
+ int len;
+
+ if (fread(&half,sizeof(uint16),1,input) != 1)
+ { EPRINTF(EPLACE,"Could not read flip byte (Read_QVcoding)\n");
+ EXIT(NULL);
+ }
+ coding.flip = (half != 0x33cc);
+
+ if (fread(&half,sizeof(uint16),1,input) != 1)
+ { EPRINTF(EPLACE,"Could not read deletion char (Read_QVcoding)\n");
+ EXIT(NULL);
+ }
+ if (coding.flip)
+ Flip_Short(&half);
+ coding.delChar = half;
+ if (coding.delChar >= 256)
+ coding.delChar = -1;
+
+ if (fread(&half,sizeof(uint16),1,input) != 1)
+ { EPRINTF(EPLACE,"Could not read substitution char (Read_QVcoding)\n");
+ EXIT(NULL);
+ }
+ if (coding.flip)
+ Flip_Short(&half);
+ coding.subChar = half;
+ if (coding.subChar >= 256)
+ coding.subChar = -1;
+
+ // Read the short name common to all headers
+
+ if (fread(&len,sizeof(int),1,input) != 1)
+ { EPRINTF(EPLACE,"Could not read header name length (Read_QVcoding)\n");
+ EXIT(NULL);
+ }
+ if (coding.flip)
+ Flip_Long(&len);
+ coding.prefix = (char *) Malloc(len+1,"Allocating header prefix");
+ if (coding.prefix == NULL)
+ EXIT(NULL);
+ if (len > 0)
+ { if (fread(coding.prefix,len,1,input) != 1)
+ { EPRINTF(EPLACE,"Could not read header name (Read_QVcoding)\n");
+ EXIT(NULL);
+ }
+ }
+ coding.prefix[len] = '\0';
+ }
+
+ // Setup endian handling
+
+ Set_Endian(coding.flip);
+
+ // Read the Huffman schemes used to compress the data
+
+ coding.delScheme = NULL;
+ coding.dRunScheme = NULL;
+ coding.insScheme = NULL;
+ coding.mrgScheme = NULL;
+ coding.subScheme = NULL;
+ coding.sRunScheme = NULL;
+
+ coding.delScheme = Read_Scheme(input);
+ if (coding.delScheme == NULL)
+ goto error;
+ if (coding.delChar >= 0)
+ { coding.dRunScheme = Read_Scheme(input);
+ if (coding.dRunScheme == NULL)
+ goto error;
+ }
+ coding.insScheme = Read_Scheme(input);
+ if (coding.insScheme == NULL)
+ goto error;
+ coding.mrgScheme = Read_Scheme(input);
+ if (coding.mrgScheme == NULL)
+ goto error;
+ coding.subScheme = Read_Scheme(input);
+ if (coding.subScheme == NULL)
+ goto error;
+ if (coding.subChar >= 0)
+ { coding.sRunScheme = Read_Scheme(input);
+ if (coding.sRunScheme == NULL)
+ goto error;
+ }
+
+ return (&coding);
+
+error:
+ if (coding.delScheme != NULL)
+ free(coding.delScheme);
+ if (coding.dRunScheme != NULL)
+ free(coding.dRunScheme);
+ if (coding.insScheme != NULL)
+ free(coding.insScheme);
+ if (coding.mrgScheme != NULL)
+ free(coding.mrgScheme);
+ if (coding.subScheme != NULL)
+ free(coding.subScheme);
+ if (coding.sRunScheme != NULL)
+ free(coding.sRunScheme);
+ EXIT(NULL);
+}
+
+ // Free all the auxilliary storage associated with the encoding argument
+
+void Free_QVcoding(QVcoding *coding)
+{ if (coding->subChar >= 0)
+ free(coding->sRunScheme);
+ free(coding->subScheme);
+ free(coding->mrgScheme);
+ free(coding->insScheme);
+ if (coding->delChar >= 0)
+ free(coding->dRunScheme);
+ free(coding->delScheme);
+ free(coding->prefix);
+}
+
+
+/*******************************************************************************************
+ *
+ * Encode/Decode (w.r.t. coding) next entry from input and write to output
+ *
+ ********************************************************************************************/
+
+int Compress_Next_QVentry(FILE *input, FILE *output, QVcoding *coding, int lossy)
+{ int rlen, clen;
+
+ // Get all 5 streams, compress each with its scheme, and output
+
+ rlen = Read_Lines(input,5);
+ if (rlen < 0)
+ { if (rlen == -1)
+ EPRINTF(EPLACE,"Line %d: incomplete last entry of .quiv file\n",Nline);
+ EXIT (1);
+ }
+
+ if (coding->delChar < 0)
+ { Encode(coding->delScheme, output, (uint8 *) Read, rlen);
+ clen = rlen;
+ }
+ else
+ { Encode_Run(coding->delScheme, coding->dRunScheme, output,
+ (uint8 *) Read, rlen, coding->delChar);
+ clen = Pack_Tag(Read+Rmax,Read,rlen,coding->delChar);
+ }
+ Number_Read(Read+Rmax);
+ Compress_Read(clen,Read+Rmax);
+ fwrite(Read+Rmax,1,COMPRESSED_LEN(clen),output);
+
+ if (lossy)
+ { uint8 *insert = (uint8 *) (Read+2*Rmax);
+ uint8 *merge = (uint8 *) (Read+3*Rmax);
+ int k;
+
+ for (k = 0; k < rlen; k++)
+ { insert[k] = (uint8) ((insert[k] >> 1) << 1);
+ merge[k] = (uint8) (( merge[k] >> 2) << 2);
+ }
+ }
+
+ Encode(coding->insScheme, output, (uint8 *) (Read+2*Rmax), rlen);
+ Encode(coding->mrgScheme, output, (uint8 *) (Read+3*Rmax), rlen);
+ if (coding->subChar < 0)
+ Encode(coding->subScheme, output, (uint8 *) (Read+4*Rmax), rlen);
+ else
+ Encode_Run(coding->subScheme, coding->sRunScheme, output,
+ (uint8 *) (Read+4*Rmax), rlen, coding->subChar);
+
+ return (0);
+}
+
+int Uncompress_Next_QVentry(FILE *input, char **entry, QVcoding *coding, int rlen)
+{ int clen, tlen;
+
+ // Decode each stream and write to output
+
+ if (coding->delChar < 0)
+ { if (Decode(coding->delScheme, input, entry[0], rlen))
+ EXIT(1);
+ clen = rlen;
+ tlen = COMPRESSED_LEN(clen);
+ if (tlen > 0)
+ { if (fread(entry[1],tlen,1,input) != 1)
+ { EPRINTF(EPLACE,"Could not read deletions entry (Uncompress_Next_QVentry\n");
+ EXIT(1);
+ }
+ }
+ Uncompress_Read(clen,entry[1]);
+ Lower_Read(entry[1]);
+ }
+ else
+ { if (Decode_Run(coding->delScheme, coding->dRunScheme, input,
+ entry[0], rlen, coding->delChar))
+ EXIT(1);
+ clen = Packed_Length(entry[0],rlen,coding->delChar);
+ tlen = COMPRESSED_LEN(clen);
+ if (tlen > 0)
+ { if (fread(entry[1],tlen,1,input) != 1)
+ { EPRINTF(EPLACE,"Could not read deletions entry (Uncompress_Next_QVentry\n");
+ EXIT(1);
+ }
+ }
+ Uncompress_Read(clen,entry[1]);
+ Lower_Read(entry[1]);
+ Unpack_Tag(entry[1],clen,entry[0],rlen,coding->delChar);
+ }
+
+ if (Decode(coding->insScheme, input, entry[2], rlen))
+ EXIT(1);
+
+ if (Decode(coding->mrgScheme, input, entry[3], rlen))
+ EXIT(1);
+
+ if (coding->subChar < 0)
+ { if (Decode(coding->subScheme, input, entry[4], rlen))
+ EXIT(1);
+ }
+ else
+ { if (Decode_Run(coding->subScheme, coding->sRunScheme, input,
+ entry[4], rlen, coding->subChar))
+ EXIT(1);
+ }
+
+ return (0);
+}
diff --git a/src/lib/align.c b/src/lib/align.c
new file mode 100755
index 0000000..61aa21e
--- /dev/null
+++ b/src/lib/align.c
@@ -0,0 +1,5149 @@
+/************************************************************************************\
+* *
+* Copyright (c) 2014, Dr. Eugene W. Myers (EWM). All rights reserved. *
+* *
+* Redistribution and use in source and binary forms, with or without modification, *
+* are permitted provided that the following conditions are met: *
+* *
+* · Redistributions of source code must retain the above copyright notice, this *
+* list of conditions and the following disclaimer. *
+* *
+* · Redistributions in binary form must reproduce the above copyright notice, this *
+* list of conditions and the following disclaimer in the documentation and/or *
+* other materials provided with the distribution. *
+* *
+* · The name of EWM may not be used to endorse or promote products derived from *
+* this software without specific prior written permission. *
+* *
+* THIS SOFTWARE IS PROVIDED BY EWM ”AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, *
+* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND *
+* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL EWM BE LIABLE *
+* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES *
+* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS *
+* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY *
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *
+* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN *
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
+* *
+* For any issues regarding this software and its use, contact EWM at: *
+* *
+* Eugene W. Myers Jr. *
+* Bautzner Str. 122e *
+* 01099 Dresden *
+* GERMANY *
+* Email: gene.myers at gmail.com *
+* *
+\************************************************************************************/
+
+/*******************************************************************************************
+ *
+ * Fast alignment discovery and trace generation along with utilites for displaying alignments
+ * Based on previously unpublished ideas from 2005, subsequently refined in 2013-14. Basic
+ * idea is to keep a dynamically selected interval of the f.r. waves from my 1986 O(nd) paper.
+ * A recent cool idea is to not record all the details of an alignment while discovering it
+ * but simply record trace points through which the optimal alignment passes every 100bp,
+ * allowing rapid recomputation of the alignment details between trace points.
+ *
+ * Author : Gene Myers
+ * First : June 2013
+ * Current: June 1, 2014
+ *
+ ********************************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <math.h>
+#include <limits.h>
+
+#include "DB.h"
+#include "align.h"
+
+#undef DEBUG_PASSES // Show forward / backward extension termini for Local_Alignment
+#undef DEBUG_POINTS // Show trace points
+#undef DEBUG_WAVE // Show waves of Local_Alignment
+#undef SHOW_MATCH_WAVE // For waves of Local_Alignment also show # of matches
+#undef SHOW_TRAIL // Show trace at the end of forward and reverse passes
+#undef SHOW_TPS // Show trace points as they are encountered in a wave
+
+#undef DEBUG_EXTEND // Show waves of Extend_Until_Overlap
+
+#undef DEBUG_ALIGN // Show division points of Compute_Trace
+#undef DEBUG_SCRIPT // Show trace additions for Compute_Trace
+#undef DEBUG_AWAVE // Show F/R waves of Compute_Trace
+
+#undef SHOW_TRACE // Show full trace for Print_Alignment
+
+#undef WAVE_STATS
+
+
+/****************************************************************************************\
+* *
+* Working Storage Abstraction *
+* *
+\****************************************************************************************/
+
+typedef struct // Hidden from the user, working space for each thread
+ { int vecmax;
+ void *vector;
+ int celmax;
+ void *cells;
+ int pntmax;
+ void *points;
+ int tramax;
+ void *trace;
+ } _Work_Data;
+
+Work_Data *New_Work_Data()
+{ _Work_Data *work;
+
+ work = (_Work_Data *) Malloc(sizeof(_Work_Data),"Allocating work data block");
+ if (work == NULL)
+ EXIT(NULL);
+ work->vecmax = 0;
+ work->vector = NULL;
+ work->pntmax = 0;
+ work->points = NULL;
+ work->tramax = 0;
+ work->trace = NULL;
+ work->celmax = 0;
+ work->cells = NULL;
+ return ((Work_Data *) work);
+}
+
+static int enlarge_vector(_Work_Data *work, int newmax)
+{ void *vec;
+ int max;
+
+ max = ((int) (newmax*1.2)) + 10000;
+ vec = Realloc(work->vector,max,"Enlarging DP vector");
+ if (vec == NULL)
+ EXIT(1);
+ work->vecmax = max;
+ work->vector = vec;
+ return (0);
+}
+
+static int enlarge_points(_Work_Data *work, int newmax)
+{ void *vec;
+ int max;
+
+ max = ((int) (newmax*1.2)) + 10000;
+ vec = Realloc(work->points,max,"Enlarging point vector");
+ if (vec == NULL)
+ EXIT(1);
+ work->pntmax = max;
+ work->points = vec;
+ return (0);
+}
+
+static int enlarge_trace(_Work_Data *work, int newmax)
+{ void *vec;
+ int max;
+
+ max = ((int) (newmax*1.2)) + 10000;
+ vec = Realloc(work->trace,max,"Enlarging trace vector");
+ if (vec == NULL)
+ EXIT(1);
+ work->tramax = max;
+ work->trace = vec;
+ return (0);
+}
+
+void Free_Work_Data(Work_Data *ework)
+{ _Work_Data *work = (_Work_Data *) ework;
+ if (work->vector != NULL)
+ free(work->vector);
+ if (work->cells != NULL)
+ free(work->cells);
+ if (work->trace != NULL)
+ free(work->trace);
+ if (work->points != NULL)
+ free(work->points);
+ free(work);
+}
+
+
+/****************************************************************************************\
+* *
+* ADAPTIVE PATH FINDING *
+* *
+\****************************************************************************************/
+
+ // Absolute/Fixed Parameters
+
+#define BVEC uint64 // Can be uint32 if PATH_LEN <= 32
+
+#define TRIM_LEN 15 // Report as the tip, the last wave maximum for which the last
+ // 2*TRIM_LEN edits are prefix-positive at rate ave_corr*f(bias)
+ // (max value is 20)
+
+#define PATH_LEN 60 // Follow the last PATH_LEN columns/edges (max value is 63)
+
+ // Derivative fixed parameters
+
+#define PATH_TOP 0x1000000000000000ll // Must be 1 << PATH_LEN
+#define PATH_INT 0x0fffffffffffffffll // Must be PATH_TOP-1
+#define TRIM_MASK 0x7fff // Must be (1 << TRIM_LEN) - 1
+#define TRIM_MLAG 200 // How far can last trim point be behind best point
+#define WAVE_LAG 30 // How far can worst point be behind the best point
+
+static double Bias_Factor[10] = { .690, .690, .690, .690, .780,
+ .850, .900, .933, .966, 1.000 };
+
+ // Adjustable paramters
+
+typedef struct
+ { double ave_corr;
+ int trace_space;
+ float freq[4];
+ int ave_path;
+ int16 *score;
+ int16 *table;
+ } _Align_Spec;
+
+/* Fill in bit table: TABLE[x] = 1 iff the alignment modeled by x (1 = match, 0 = mismatch)
+ has a non-negative score for every suffix of the alignment under the scoring scheme
+ where match = MATCH and mismatch = -1. MATCH is set so that an alignment with TRIM_PCT
+ matches has zero score ( (1-TRIM_PCT) / TRIM_PCT ). */
+
+#define FRACTION 1000 // Implicit fractional part of scores, i.e. score = x/FRACTION
+
+typedef struct
+ { int mscore;
+ int dscore;
+ int16 *table;
+ int16 *score;
+ } Table_Bits;
+
+static void set_table(int bit, int prefix, int score, int max, Table_Bits *parms)
+{ if (bit >= TRIM_LEN)
+ { parms->table[prefix] = (int16) (score-max);
+ parms->score[prefix] = (int16) score;
+ }
+ else
+ { if (score > max)
+ max = score;
+ set_table(bit+1,(prefix<<1),score - parms->dscore,max,parms);
+ set_table(bit+1,(prefix<<1) | 1,score + parms->mscore,max,parms);
+ }
+}
+
+/* Create an alignment specification record including path tip tables & values */
+
+Align_Spec *New_Align_Spec(double ave_corr, int trace_space, float *freq)
+{ _Align_Spec *spec;
+ Table_Bits parms;
+ double match;
+ int bias;
+
+ spec = (_Align_Spec *) Malloc(sizeof(_Align_Spec),"Allocating alignment specification");
+ if (spec == NULL)
+ EXIT(NULL);
+
+ spec->ave_corr = ave_corr;
+ spec->trace_space = trace_space;
+ spec->freq[0] = freq[0];
+ spec->freq[1] = freq[1];
+ spec->freq[2] = freq[2];
+ spec->freq[3] = freq[3];
+
+ match = freq[0] + freq[3];
+ if (match > .5)
+ match = 1.-match;
+ bias = (int) ((match+.025)*20.-1.);
+ if (match < .2)
+ { EPRINTF(EPLACE,"Base bias worse than 80/20%% ! (New_Align_Spec)\n");
+ free(spec);
+ EXIT(NULL);
+ }
+
+ spec->ave_path = (int) (PATH_LEN * (1. - Bias_Factor[bias] * (1. - ave_corr)));
+ parms.mscore = (int) (FRACTION * Bias_Factor[bias] * (1. - ave_corr));
+ parms.dscore = FRACTION - parms.mscore;
+
+ parms.score = (int16 *) Malloc(sizeof(int16)*(TRIM_MASK+1)*2,"Allocating trim table");
+ if (parms.score == NULL)
+ { free(spec);
+ EXIT(NULL);
+ }
+ parms.table = parms.score + (TRIM_MASK+1);
+
+ set_table(0,0,0,0,&parms);
+
+ spec->table = parms.table;
+ spec->score = parms.score;
+
+ return ((Align_Spec *) spec);
+}
+
+void Free_Align_Spec(Align_Spec *espec)
+{ _Align_Spec *spec = (_Align_Spec *) espec;
+ free(spec->score);
+ free(spec);
+}
+
+double Average_Correlation(Align_Spec *espec)
+{ return (((_Align_Spec *) espec)->ave_corr); }
+
+int Trace_Spacing(Align_Spec *espec)
+{ return (((_Align_Spec *) espec)->trace_space); }
+
+float *Base_Frequencies(Align_Spec *espec)
+{ return (((_Align_Spec *) espec)->freq); }
+
+
+/****************************************************************************************\
+* *
+* LOCAL ALIGNMENT FINDER: forward_/reverse_wave and Local_Alignment *
+* *
+\****************************************************************************************/
+
+
+#ifdef WAVE_STATS
+
+static int64 MAX, TOT, NWV;
+static int64 RESTARTS;
+
+void Init_Stats()
+{ MAX = TOT = NWV = 0;
+ RESTARTS = 0;
+}
+
+void Print_Stats()
+{ printf("\nMax = %lld Ave = %.1f # = %lld\n",MAX,(1.*TOT)/NWV,NWV);
+ printf("\nRestarts = %lld\n",RESTARTS);
+}
+
+#endif
+
+
+#ifdef DEBUG_WAVE
+
+static void print_wave(int *V, int *M, int low, int hgh, int besta)
+{ int k, bestk;
+
+ (void) M;
+ printf(" [%6d,%6d]: ",low,hgh);
+ for (k = low; k <= hgh; k++)
+ { if (besta == V[k])
+ bestk = k;
+ // printf(" %3d",(V[k]+k)/2);
+ printf(" %3d",besta-V[k]);
+ }
+ printf(" : %d (%d,%d)\n",besta,(besta+bestk)/2,(besta-bestk)/2);
+#ifdef SHOW_MATCH_WAVE
+ printf(" ");
+ for (k = low; k <= hgh; k++)
+ printf(" %3d",M[k]);
+ printf("\n");
+#endif
+ fflush(stdout);
+}
+
+#endif
+
+/* At each furthest reaching point, keep a-coordinate of point (V), bitvector
+ recording the last TRIM_LEN columns of the implied alignment (T), and the
+ # of matches (1-bits) in the bitvector (M). */
+
+typedef struct
+ { int ptr;
+ int diag;
+ int diff;
+ int mark;
+ } Pebble;
+
+static int VectorEl = 6*sizeof(int) + sizeof(BVEC);
+
+static int forward_wave(_Work_Data *work, _Align_Spec *spec, Alignment *align, Path *bpath,
+ int *mind, int maxd, int mida, int minp, int maxp)
+{ char *aseq = align->aseq;
+ char *bseq = align->bseq;
+ Path *apath = align->path;
+
+ int hgh, low, dif;
+ int vlen, vmin, vmax;
+ int *V, *M;
+ int *_V, *_M;
+ BVEC *T;
+ BVEC *_T;
+
+ int *HA, *HB;
+ int *_HA, *_HB;
+ int *NA, *NB;
+ int *_NA, *_NB;
+ Pebble *cells;
+ int avail, cmax, boff;
+
+ int TRACE_SPACE = spec->trace_space;
+ int PATH_AVE = spec->ave_path;
+ int16 *SCORE = spec->score;
+ int16 *TABLE = spec->table;
+
+ int besta, besty;
+ int trima, trimy, trimd;
+ int trimha, trimhb;
+ int morea, morey, mored;
+ int moreha, morehb;
+ int more, morem, lasta;
+ int aclip, bclip;
+
+ hgh = maxd;
+ low = *mind;
+ dif = 0;
+
+ { int span, wing;
+
+ span = (hgh-low)+1;
+ vlen = work->vecmax/VectorEl;
+ wing = (vlen - span)/2;
+ vmin = low - wing;
+ vmax = hgh + wing;
+
+ _V = ((int *) work->vector);
+ _M = _V + vlen;
+ _HA = _M + vlen;
+ _HB = _HA + vlen;
+ _NA = _HB + vlen;
+ _NB = _NA + vlen;
+ _T = ((BVEC *) (_NB + vlen));
+
+ V = _V-vmin;
+ M = _M-vmin;
+ HA = _HA-vmin;
+ HB = _HB-vmin;
+ NA = _NA-vmin;
+ NB = _NB-vmin;
+ T = _T-vmin;
+
+ cells = (Pebble *) (work->cells);
+ cmax = work->celmax;
+ avail = 0;
+
+ if (COMP(align->flags))
+ boff = align->blen % TRACE_SPACE;
+ else
+ boff = 0;
+ }
+
+ /* Compute 0-wave starting from mid-line */
+
+ more = 1;
+ aclip = INT32_MAX;
+ bclip = -INT32_MAX;
+
+ besta = trima = morea = lasta = mida;
+ besty = trimy = morey = (mida-hgh) >> 1;
+ trimd = mored = 0;
+ trimha = moreha = 0;
+ trimhb = morehb = 1;
+ morem = -1;
+
+ { int k;
+ char *a;
+
+ a = aseq + hgh;
+ for (k = hgh; k >= low; k--)
+ { int y, c, d;
+ int ha, hb;
+ int na, nb;
+ Pebble *pb;
+
+ y = (mida-k) >> 1;
+
+ if (avail >= cmax-1)
+ { cmax = ((int) (avail*1.2)) + 10000;
+ cells = (Pebble *) Realloc(cells,cmax*sizeof(Pebble),"Reallocating trace cells");
+ if (cells == NULL)
+ EXIT(1);
+ work->celmax = cmax;
+ work->cells = (void *) cells;
+ }
+
+ na = ((y+k)/TRACE_SPACE)*TRACE_SPACE;
+#ifdef SHOW_TPS
+ printf(" A %d: %d,%d,0,%d\n",avail,-1,k,na); fflush(stdout);
+#endif
+ pb = cells+avail;
+ pb->ptr = -1;
+ pb->diag = k;
+ pb->diff = 0;
+ pb->mark = na;
+ ha = avail++;
+ na += TRACE_SPACE;
+
+ nb = ((y+(TRACE_SPACE-boff))/TRACE_SPACE-1)*TRACE_SPACE+boff;
+#ifdef SHOW_TPS
+ printf(" B %d: %d,%d,0,%d\n",avail,-1,k,nb); fflush(stdout);
+#endif
+ pb = cells+avail;
+ pb->ptr = -1;
+ pb->diag = k;
+ pb->diff = 0;
+ pb->mark = nb;
+ hb = avail++;
+ nb += TRACE_SPACE;
+
+ while (1)
+ { c = bseq[y];
+ if (c == 4)
+ { more = 0;
+ if (bclip < k)
+ bclip = k;
+ break;
+ }
+ d = a[y];
+ if (c != d)
+ { if (d == 4)
+ { more = 0;
+ aclip = k;
+ }
+ break;
+ }
+ y += 1;
+ }
+ c = (y << 1) + k;
+
+ while (y+k >= na)
+ { if (avail >= cmax)
+ { cmax = ((int) (avail*1.2)) + 10000;
+ cells = (Pebble *) Realloc(cells,cmax*sizeof(Pebble),"Reallocating trace cells");
+ if (cells == NULL)
+ EXIT(1);
+ work->celmax = cmax;
+ work->cells = (void *) cells;
+ }
+#ifdef SHOW_TPS
+ printf(" A %d: %d,%d,0,%d\n",avail,ha,k,na); fflush(stdout);
+#endif
+ pb = cells+avail;
+ pb->ptr = ha;
+ pb->diag = k;
+ pb->diff = 0;
+ pb->mark = na;
+ ha = avail++;
+ na += TRACE_SPACE;
+ }
+ while (y >= nb)
+ { if (avail >= cmax)
+ { cmax = ((int) (avail*1.2)) + 10000;
+ cells = (Pebble *) Realloc(cells,cmax*sizeof(Pebble),"Reallocating trace cells");
+ if (cells == NULL)
+ EXIT(1);
+ work->celmax = cmax;
+ work->cells = (void *) cells;
+ }
+#ifdef SHOW_TPS
+ printf(" B %d: %d,%d,0,%d\n",avail,hb,k,nb); fflush(stdout);
+#endif
+ pb = cells+avail;
+ pb->ptr = hb;
+ pb->diag = k;
+ pb->diff = 0;
+ pb->mark = nb;
+ hb = avail++;
+ nb += TRACE_SPACE;
+ }
+
+ if (c > besta)
+ { besta = trima = lasta = c;
+ besty = trimy = y;
+ trimha = ha;
+ trimhb = hb;
+ }
+
+ V[k] = c;
+ T[k] = PATH_INT;
+ M[k] = PATH_LEN;
+ HA[k] = ha;
+ HB[k] = hb;
+ NA[k] = na;
+ NB[k] = nb;
+
+ a -= 1;
+ }
+ }
+
+ if (more == 0)
+ { if (bseq[besty] != 4 && aseq[besta - besty] != 4)
+ more = 1;
+ if (hgh >= aclip)
+ { hgh = aclip-1;
+ if (morem <= M[aclip])
+ { morem = M[aclip];
+ morea = V[aclip];
+ morey = (morea - aclip)/2;
+ moreha = HA[aclip];
+ morehb = HB[aclip];
+ }
+ }
+ if (low <= bclip)
+ { low = bclip+1;
+ if (morem <= M[bclip])
+ { morem = M[bclip];
+ morea = V[bclip];
+ morey = (morea - bclip)/2;
+ moreha = HA[bclip];
+ morehb = HB[bclip];
+ }
+ }
+ aclip = INT32_MAX;
+ bclip = -INT32_MAX;
+ }
+
+#ifdef DEBUG_WAVE
+ printf("\nFORWARD WAVE:\n");
+ print_wave(V,M,low,hgh,besta);
+#endif
+
+ /* Compute successive waves until no furthest reaching points remain */
+
+ while (more && lasta >= besta - TRIM_MLAG)
+ { int k, n;
+ int ua, ub;
+ BVEC t;
+ int am, ac, ap;
+ char *a;
+
+ if (low <= vmin || hgh >= vmax)
+ { int span, wing;
+ int64 move;
+ int64 vd, md, had, hbd, nad, nbd, td;
+
+ span = (hgh-low)+1;
+ if (.8*vlen < span)
+ { if (enlarge_vector(work,vlen*VectorEl))
+ EXIT(1);
+
+ move = ((void *) _V) - work->vector;
+ vlen = work->vecmax/VectorEl;
+
+ _V = (int *) work->vector;
+ _M = _V + vlen;
+ _HA = _M + vlen;
+ _HB = _HA + vlen;
+ _NA = _HB + vlen;
+ _NB = _NA + vlen;
+ _T = ((BVEC *) (_NB + vlen));
+ }
+ else
+ move = 0;
+
+ wing = (vlen - span)/2;
+
+ vd = ((void *) ( _V+wing)) - (((void *) ( V+low)) - move);
+ md = ((void *) ( _M+wing)) - (((void *) ( M+low)) - move);
+ had = ((void *) (_HA+wing)) - (((void *) (HA+low)) - move);
+ hbd = ((void *) (_HB+wing)) - (((void *) (HB+low)) - move);
+ nad = ((void *) (_NA+wing)) - (((void *) (NA+low)) - move);
+ nbd = ((void *) (_NB+wing)) - (((void *) (NB+low)) - move);
+ td = ((void *) ( _T+wing)) - (((void *) ( T+low)) - move);
+
+ if (vd < 0)
+ memmove( _V+wing, ((void *) ( V+low)) - move, span*sizeof(int));
+ if (md < 0)
+ memmove( _M+wing, ((void *) ( M+low)) - move, span*sizeof(int));
+ if (had < 0)
+ memmove(_HA+wing, ((void *) (HA+low)) - move, span*sizeof(int));
+ if (hbd < 0)
+ memmove(_HB+wing, ((void *) (HB+low)) - move, span*sizeof(int));
+ if (nad < 0)
+ memmove(_NA+wing, ((void *) (NA+low)) - move, span*sizeof(int));
+ if (nbd < 0)
+ memmove(_NB+wing, ((void *) (NB+low)) - move, span*sizeof(int));
+ if (td < 0)
+ memmove( _T+wing, ((void *) ( T+low)) - move, span*sizeof(BVEC));
+
+ if (td > 0)
+ memmove( _T+wing, ((void *) ( T+low)) - move, span*sizeof(BVEC));
+ if (nbd > 0)
+ memmove(_NB+wing, ((void *) (NB+low)) - move, span*sizeof(int));
+ if (nad > 0)
+ memmove(_NA+wing, ((void *) (NA+low)) - move, span*sizeof(int));
+ if (hbd > 0)
+ memmove(_HB+wing, ((void *) (HB+low)) - move, span*sizeof(int));
+ if (had > 0)
+ memmove(_HA+wing, ((void *) (HA+low)) - move, span*sizeof(int));
+ if (md > 0)
+ memmove( _M+wing, ((void *) ( M+low)) - move, span*sizeof(int));
+ if (vd > 0)
+ memmove( _V+wing, ((void *) ( V+low)) - move, span*sizeof(int));
+
+ vmin = low-wing;
+ vmax = hgh+wing;
+
+ V = _V-vmin;
+ M = _M-vmin;
+ HA = _HA-vmin;
+ HB = _HB-vmin;
+ NA = _NA-vmin;
+ NB = _NB-vmin;
+ T = _T-vmin;
+ }
+
+ if (low > minp)
+ { low -= 1;
+ NA[low] = NA[low+1];
+ NB[low] = NB[low+1];
+ V[low] = -1;
+ }
+ if (hgh < maxp)
+ { hgh += 1;
+ NA[hgh] = NA[hgh-1];
+ NB[hgh] = NB[hgh-1];
+ V[hgh] = am = -1;
+ }
+ else
+ am = V[hgh];
+ dif += 1;
+
+ ac = V[hgh+1] = V[low-1] = -1;
+ a = aseq + hgh;
+ t = PATH_INT;
+ n = PATH_LEN;
+ ua = ub = -1;
+ for (k = hgh; k >= low; k--)
+ { int y, m;
+ int ha, hb;
+ int c, d;
+ BVEC b;
+ Pebble *pb;
+
+ ap = ac;
+ ac = am;
+ am = V[d = k-1];
+
+ if (ac < am)
+ if (am < ap)
+ { c = ap+1;
+ m = n;
+ b = t;
+ ha = ua;
+ hb = ub;
+ }
+ else
+ { c = am+1;
+ m = M[d];
+ b = T[d];
+ ha = HA[d];
+ hb = HB[d];
+ }
+ else
+ if (ac < ap)
+ { c = ap+1;
+ m = n;
+ b = t;
+ ha = ua;
+ hb = ub;
+ }
+ else
+ { c = ac+2;
+ m = M[k];
+ b = T[k];
+ ha = HA[k];
+ hb = HB[k];
+ }
+
+ if ((b & PATH_TOP) != 0)
+ m -= 1;
+ b <<= 1;
+
+ y = (c-k) >> 1;
+ while (1)
+ { c = bseq[y];
+ if (c == 4)
+ { more = 0;
+ if (bclip < k)
+ bclip = k;
+ break;
+ }
+ d = a[y];
+ if (c != d)
+ { if (d == 4)
+ { more = 0;
+ aclip = k;
+ }
+ break;
+ }
+ y += 1;
+ if ((b & PATH_TOP) == 0)
+ m += 1;
+ b = (b << 1) | 1;
+ }
+ c = (y << 1) + k;
+
+ while (y+k >= NA[k])
+ { if (cells[ha].mark < NA[k])
+ { if (avail >= cmax)
+ { cmax = ((int) (avail*1.2)) + 10000;
+ cells = (Pebble *) Realloc(cells,cmax*sizeof(Pebble),
+ "Reallocating trace cells");
+ if (cells == NULL)
+ EXIT(1);
+ work->celmax = cmax;
+ work->cells = (void *) cells;
+ }
+#ifdef SHOW_TPS
+ printf(" A %d: %d,%d,%d,%d\n",avail,ha,k,dif,NA[k]); fflush(stdout);
+#endif
+ pb = cells+avail;
+ pb->ptr = ha;
+ pb->diag = k;
+ pb->diff = dif;
+ pb->mark = NA[k];
+ ha = avail++;
+ }
+ NA[k] += TRACE_SPACE;
+ }
+
+ while (y >= NB[k])
+ { if (cells[hb].mark < NB[k])
+ { if (avail >= cmax)
+ { cmax = ((int) (avail*1.2)) + 10000;
+ cells = (Pebble *) Realloc(cells,cmax*sizeof(Pebble),
+ "Reallocating trace cells");
+ if (cells == NULL)
+ EXIT(1);
+ work->celmax = cmax;
+ work->cells = (void *) cells;
+ }
+#ifdef SHOW_TPS
+ printf(" B %d: %d,%d,%d,%d\n",avail,hb,k,dif,NB[k]); fflush(stdout);
+#endif
+ pb = cells+avail;
+ pb->ptr = hb;
+ pb->diag = k;
+ pb->diff = dif;
+ pb->mark = NB[k];
+ hb = avail++;
+ }
+ NB[k] += TRACE_SPACE;
+ }
+
+ if (c > besta)
+ { besta = c;
+ besty = y;
+ if (m >= PATH_AVE)
+ { lasta = c;
+ if (TABLE[b & TRIM_MASK] >= 0)
+ if (TABLE[(b >> TRIM_LEN) & TRIM_MASK] + SCORE[b & TRIM_MASK] >= 0)
+ { trima = c;
+ trimy = y;
+ trimd = dif;
+ trimha = ha;
+ trimhb = hb;
+ }
+ }
+ }
+
+ t = T[k];
+ n = M[k];
+ ua = HA[k];
+ ub = HB[k];
+ V[k] = c;
+ T[k] = b;
+ M[k] = m;
+ HA[k] = ha;
+ HB[k] = hb;
+
+ a -= 1;
+ }
+
+ if (more == 0)
+ { if (bseq[besty] != 4 && aseq[besta-besty] != 4)
+ more = 1;
+ if (hgh >= aclip)
+ { hgh = aclip-1;
+ if (morem <= M[aclip])
+ { morem = M[aclip];
+ morea = V[aclip];
+ morey = (morea - aclip)/2;
+ mored = dif;
+ moreha = HA[aclip];
+ morehb = HB[aclip];
+ }
+ }
+ if (low <= bclip)
+ { low = bclip+1;
+ if (morem <= M[bclip])
+ { morem = M[bclip];
+ morea = V[bclip];
+ morey = (morea - bclip)/2;
+ mored = dif;
+ moreha = HA[bclip];
+ morehb = HB[bclip];
+ }
+ }
+ aclip = INT32_MAX;
+ bclip = -INT32_MAX;
+ }
+
+ n = besta - WAVE_LAG;
+ while (hgh >= low)
+ if (V[hgh] < n)
+ hgh -= 1;
+ else
+ { while (V[low] < n)
+ low += 1;
+ break;
+ }
+
+#ifdef WAVE_STATS
+ k = (hgh-low)+1;
+ if (k > MAX)
+ MAX = k;
+ TOT += k;
+ NWV += 1;
+#endif
+
+#ifdef DEBUG_WAVE
+ print_wave(V,M,low,hgh,besta);
+#endif
+ }
+
+ { uint16 *atrace = (uint16 *) apath->trace;
+ uint16 *btrace = (uint16 *) bpath->trace;
+ int atlen, btlen;
+ int trimx;
+ int a, b, k, h;
+ int d, e;
+
+ if (morem >= 0)
+ { trimx = morea-morey;
+ trimy = morey;
+ trimd = mored;
+ trimha = moreha;
+ trimhb = morehb;
+ }
+ else
+ trimx = trima-trimy;
+
+ atlen = btlen = 0;
+
+ a = -1;
+ for (h = trimha; h >= 0; h = b)
+ { b = cells[h].ptr;
+ cells[h].ptr = a;
+ a = h;
+ }
+ h = a;
+
+ k = cells[h].diag;
+ b = (mida-k)/2;
+ e = 0;
+#ifdef SHOW_TRAIL
+ printf(" A path = (%5d,%5d)\n",(mida+k)/2,b); fflush(stdout);
+#endif
+ for (h = cells[h].ptr; h >= 0; h = cells[h].ptr)
+ { k = cells[h].diag;
+ a = cells[h].mark - k;
+ d = cells[h].diff;
+ atrace[atlen++] = (uint16) (d-e);
+ atrace[atlen++] = (uint16) (a-b);
+#ifdef SHOW_TRAIL
+ printf(" %4d: (%5d,%5d): %3d / %3d\n",h,a+k,a,d-e,a-b); fflush(stdout);
+#endif
+ b = a;
+ e = d;
+ }
+ if (b+k != trimx)
+ { atrace[atlen++] = (uint16) (trimd-e);
+ atrace[atlen++] = (uint16) (trimy-b);
+#ifdef SHOW_TRAIL
+ printf(" (%5d,%5d): %3d / %3d\n",trimx,trimy,trimd-e,trimy-b); fflush(stdout);
+#endif
+ }
+ else if (b != trimy)
+ { atrace[atlen-1] = (uint16) (atrace[atlen-1] + (trimy-b));
+ atrace[atlen-2] = (uint16) (atrace[atlen-2] + (trimd-e));
+#ifdef SHOW_TRAIL
+ printf(" @ (%5d,%5d): %3d / %3d\n",trimx,trimy,trimd-e,trimy-b); fflush(stdout);
+#endif
+ }
+
+ a = -1;
+ for (h = trimhb; h >= 0; h = b)
+ { b = cells[h].ptr;
+ cells[h].ptr = a;
+ a = h;
+ }
+ h = a;
+
+ k = cells[h].diag;
+ b = (mida+k)/2;
+ e = 0;
+ low = k;
+#ifdef SHOW_TRAIL
+ printf(" B path = (%5d,%5d)\n",b,(mida-k)/2); fflush(stdout);
+#endif
+ for (h = cells[h].ptr; h >= 0; h = cells[h].ptr)
+ { k = cells[h].diag;
+ a = cells[h].mark + k;
+ d = cells[h].diff;
+ btrace[btlen++] = (uint16) (d-e);
+ btrace[btlen++] = (uint16) (a-b);
+#ifdef SHOW_TRAIL
+ printf(" %4d: (%5d,%5d): %3d / %3d\n",h,a,a-k,d-e,a-b); fflush(stdout);
+#endif
+ b = a;
+ e = d;
+ }
+ if (b-k != trimy)
+ { btrace[btlen++] = (uint16) (trimd-e);
+ btrace[btlen++] = (uint16) (trimx-b);
+#ifdef SHOW_TRAIL
+ printf(" (%5d,%5d): %3d / %3d\n",trimx,trimy,trimd-e,trimx-b); fflush(stdout);
+#endif
+ }
+ else if (b != trimx)
+ { btrace[btlen-1] = (uint16) (btrace[btlen-1] + (trimx-b));
+ btrace[btlen-2] = (uint16) (btrace[btlen-2] + (trimd-e));
+#ifdef SHOW_TRAIL
+ printf(" @ (%5d,%5d): %3d / %3d\n",trimx,trimy,trimd-e,trimx-b); fflush(stdout);
+#endif
+ }
+
+ apath->aepos = trimx;
+ apath->bepos = trimy;
+ apath->diffs = trimd;
+ apath->tlen = atlen;
+ if (COMP(align->flags))
+ { bpath->abpos = align->blen - apath->bepos;
+ bpath->bbpos = align->alen - apath->aepos;
+ }
+ else
+ { bpath->aepos = apath->bepos;
+ bpath->bepos = apath->aepos;
+ }
+ bpath->diffs = trimd;
+ bpath->tlen = btlen;
+ }
+
+ *mind = low;
+ return (0);
+}
+
+/*** Reverse Wave ***/
+
+static int reverse_wave(_Work_Data *work, _Align_Spec *spec, Alignment *align, Path *bpath,
+ int mind, int maxd, int mida, int minp, int maxp)
+{ char *aseq = align->aseq - 1;
+ char *bseq = align->bseq - 1;
+ Path *apath = align->path;
+
+ int hgh, low, dif;
+ int vlen, vmin, vmax;
+ int *V, *M;
+ int *_V, *_M;
+ BVEC *T;
+ BVEC *_T;
+
+ int *HA, *HB;
+ int *_HA, *_HB;
+ int *NA, *NB;
+ int *_NA, *_NB;
+ Pebble *cells;
+ int avail, cmax, boff;
+
+ int TRACE_SPACE = spec->trace_space;
+ int PATH_AVE = spec->ave_path;
+ int16 *SCORE = spec->score;
+ int16 *TABLE = spec->table;
+
+ int besta, besty;
+ int trima, trimy, trimd;
+ int trimha, trimhb;
+ int morea, morey, mored;
+ int moreha, morehb;
+ int more, morem, lasta;
+ int aclip, bclip;
+
+ hgh = maxd;
+ low = mind;
+ dif = 0;
+
+ { int span, wing;
+
+ span = (hgh-low)+1;
+ vlen = work->vecmax/VectorEl;
+ wing = (vlen - span)/2;
+ vmin = low - wing;
+ vmax = hgh + wing;
+
+ _V = ((int *) work->vector);
+ _M = _V + vlen;
+ _HA = _M + vlen;
+ _HB = _HA + vlen;
+ _NA = _HB + vlen;
+ _NB = _NA + vlen;
+ _T = ((BVEC *) (_NB + vlen));
+
+ V = _V-vmin;
+ M = _M-vmin;
+ HA = _HA-vmin;
+ HB = _HB-vmin;
+ NA = _NA-vmin;
+ NB = _NB-vmin;
+ T = _T-vmin;
+
+ cells = (Pebble *) (work->cells);
+ cmax = work->celmax;
+ avail = 0;
+
+ if (COMP(align->flags))
+ boff = align->blen % TRACE_SPACE;
+ else
+ boff = 0;
+ }
+
+ more = 1;
+ aclip = -INT32_MAX;
+ bclip = INT32_MAX;
+
+ besta = trima = morea = lasta = mida;
+ besty = trimy = morey = (mida-hgh) >> 1;
+ trimd = mored = 0;
+ trimha = moreha = 0;
+ trimhb = morehb = 1;
+ morem = -1;
+
+ { int k;
+ char *a;
+
+ a = aseq + low;
+ for (k = low; k <= hgh; k++)
+ { int y, c, d;
+ int ha, hb;
+ int na, nb;
+ Pebble *pb;
+
+ y = (mida-k) >> 1;
+
+ if (avail >= cmax-1)
+ { cmax = ((int) (avail*1.2)) + 10000;
+ cells = (Pebble *) Realloc(cells,cmax*sizeof(Pebble),"Reallocating trace cells");
+ if (cells == NULL)
+ EXIT(1);
+ work->celmax = cmax;
+ work->cells = (void *) cells;
+ }
+
+ na = ((y+k+TRACE_SPACE-1)/TRACE_SPACE-1)*TRACE_SPACE;
+#ifdef SHOW_TPS
+ printf(" A %d: -1,%d,0,%d\n",avail,k,na+TRACE_SPACE); fflush(stdout);
+#endif
+ pb = cells+avail;
+ pb->ptr = -1;
+ pb->diag = k;
+ pb->diff = 0;
+ pb->mark = y+k;
+ ha = avail++;
+
+ nb = ((y+(TRACE_SPACE-boff)-1)/TRACE_SPACE-1)*TRACE_SPACE+boff;
+#ifdef SHOW_TPS
+ printf(" B %d: -1,%d,0,%d\n",avail,k,nb+TRACE_SPACE); fflush(stdout);
+#endif
+ pb = cells+avail;
+ pb->ptr = -1;
+ pb->diag = k;
+ pb->diff = 0;
+ pb->mark = y;
+ hb = avail++;
+
+ while (1)
+ { c = bseq[y];
+ if (c == 4)
+ { more = 0;
+ if (bclip > k)
+ bclip = k;
+ break;
+ }
+ d = a[y];
+ if (c != d)
+ { if (d == 4)
+ { more = 0;
+ aclip = k;
+ }
+ break;
+ }
+ y -= 1;
+ }
+ c = (y << 1) + k;
+
+ while (y+k <= na)
+ { if (avail >= cmax)
+ { cmax = ((int) (avail*1.2)) + 10000;
+ cells = (Pebble *) Realloc(cells,cmax*sizeof(Pebble),"Reallocating trace cells");
+ if (cells == NULL)
+ EXIT(1);
+ work->celmax = cmax;
+ work->cells = (void *) cells;
+ }
+#ifdef SHOW_TPS
+ printf(" A %d: %d,%d,0,%d\n",avail,ha,k,na); fflush(stdout);
+#endif
+ pb = cells+avail;
+ pb->ptr = ha;
+ pb->diag = k;
+ pb->diff = 0;
+ pb->mark = na;
+ ha = avail++;
+ na -= TRACE_SPACE;
+ }
+ while (y <= nb)
+ { if (avail >= cmax)
+ { cmax = ((int) (avail*1.2)) + 10000;
+ cells = (Pebble *) Realloc(cells,cmax*sizeof(Pebble),"Reallocating trace cells");
+ if (cells == NULL)
+ EXIT(1);
+ work->celmax = cmax;
+ work->cells = (void *) cells;
+ }
+#ifdef SHOW_TPS
+ printf(" B %d: %d,%d,0,%d\n",avail,hb,k,nb); fflush(stdout);
+#endif
+ pb = cells+avail;
+ pb->ptr = hb;
+ pb->diag = k;
+ pb->diff = 0;
+ pb->mark = nb;
+ hb = avail++;
+ nb -= TRACE_SPACE;
+ }
+
+ if (c < besta)
+ { besta = trima = lasta = c;
+ besty = trimy = y;
+ trimha = ha;
+ trimhb = hb;
+ }
+
+ V[k] = c;
+ T[k] = PATH_INT;
+ M[k] = PATH_LEN;
+ HA[k] = ha;
+ HB[k] = hb;
+ NA[k] = na;
+ NB[k] = nb;
+
+ a += 1;
+ }
+ }
+
+ if (more == 0)
+ { if (bseq[besty] != 4 && aseq[besta - besty] != 4)
+ more = 1;
+ if (low <= aclip)
+ { low = aclip+1;
+ if (morem <= M[aclip])
+ { morem = M[aclip];
+ morea = V[aclip];
+ morey = (morea - aclip)/2;
+ moreha = HA[aclip];
+ morehb = HB[aclip];
+ }
+ }
+ if (hgh >= bclip)
+ { hgh = bclip-1;
+ if (morem <= M[bclip])
+ { morem = M[bclip];
+ morea = V[bclip];
+ morey = (morea - bclip)/2;
+ moreha = HA[bclip];
+ morehb = HB[bclip];
+ }
+ }
+ aclip = -INT32_MAX;
+ bclip = INT32_MAX;
+ }
+
+#ifdef DEBUG_WAVE
+ printf("\nREVERSE WAVE:\n");
+ print_wave(V,M,low,hgh,besta);
+#endif
+
+ while (more && lasta <= besta + TRIM_MLAG)
+ { int k, n;
+ int ua, ub;
+ BVEC t;
+ int am, ac, ap;
+ char *a;
+
+ if (low <= vmin || hgh >= vmax)
+ { int span, wing;
+ int64 move, vd, md, had, hbd, nad, nbd, td;
+
+ span = (hgh-low)+1;
+ if (.8*vlen < span)
+ { if (enlarge_vector(work,vlen*VectorEl))
+ EXIT(1);
+
+ move = ((void *) _V) - work->vector;
+ vlen = work->vecmax/VectorEl;
+
+ _V = (int *) work->vector;
+ _M = _V + vlen;
+ _HA = _M + vlen;
+ _HB = _HA + vlen;
+ _NA = _HB + vlen;
+ _NB = _NA + vlen;
+ _T = ((BVEC *) (_NB + vlen));
+ }
+ else
+ move = 0;
+
+ wing = (vlen - span)/2;
+
+ vd = ((void *) ( _V+wing)) - (((void *) ( V+low)) - move);
+ md = ((void *) ( _M+wing)) - (((void *) ( M+low)) - move);
+ had = ((void *) (_HA+wing)) - (((void *) (HA+low)) - move);
+ hbd = ((void *) (_HB+wing)) - (((void *) (HB+low)) - move);
+ nad = ((void *) (_NA+wing)) - (((void *) (NA+low)) - move);
+ nbd = ((void *) (_NB+wing)) - (((void *) (NB+low)) - move);
+ td = ((void *) ( _T+wing)) - (((void *) ( T+low)) - move);
+
+ if (vd < 0)
+ memmove( _V+wing, ((void *) ( V+low)) - move, span*sizeof(int));
+ if (md < 0)
+ memmove( _M+wing, ((void *) ( M+low)) - move, span*sizeof(int));
+ if (had < 0)
+ memmove(_HA+wing, ((void *) (HA+low)) - move, span*sizeof(int));
+ if (hbd < 0)
+ memmove(_HB+wing, ((void *) (HB+low)) - move, span*sizeof(int));
+ if (nad < 0)
+ memmove(_NA+wing, ((void *) (NA+low)) - move, span*sizeof(int));
+ if (nbd < 0)
+ memmove(_NB+wing, ((void *) (NB+low)) - move, span*sizeof(int));
+ if (td < 0)
+ memmove( _T+wing, ((void *) ( T+low)) - move, span*sizeof(BVEC));
+
+ if (td > 0)
+ memmove( _T+wing, ((void *) ( T+low)) - move, span*sizeof(BVEC));
+ if (nbd > 0)
+ memmove(_NB+wing, ((void *) (NB+low)) - move, span*sizeof(int));
+ if (nad > 0)
+ memmove(_NA+wing, ((void *) (NA+low)) - move, span*sizeof(int));
+ if (hbd > 0)
+ memmove(_HB+wing, ((void *) (HB+low)) - move, span*sizeof(int));
+ if (had > 0)
+ memmove(_HA+wing, ((void *) (HA+low)) - move, span*sizeof(int));
+ if (md > 0)
+ memmove( _M+wing, ((void *) ( M+low)) - move, span*sizeof(int));
+ if (vd > 0)
+ memmove( _V+wing, ((void *) ( V+low)) - move, span*sizeof(int));
+
+ vmin = low-wing;
+ vmax = hgh+wing;
+
+ V = _V-vmin;
+ M = _M-vmin;
+ HA = _HA-vmin;
+ HB = _HB-vmin;
+ NA = _NA-vmin;
+ NB = _NB-vmin;
+ T = _T-vmin;
+ }
+
+ if (low > minp)
+ { low -= 1;
+ NA[low] = NA[low+1];
+ NB[low] = NB[low+1];
+ V[low] = ap = INT32_MAX;
+ }
+ else
+ ap = V[low];
+ if (hgh < maxp)
+ { hgh += 1;
+ NA[hgh] = NA[hgh-1];
+ NB[hgh] = NB[hgh-1];
+ V[hgh] = INT32_MAX;
+ }
+ dif += 1;
+
+ ac = V[hgh+1] = V[low-1] = INT32_MAX;
+ a = aseq + low;
+ t = PATH_INT;
+ n = PATH_LEN;
+ ua = ub = -1;
+ for (k = low; k <= hgh; k++)
+ { int y, m;
+ int ha, hb;
+ int c, d;
+ BVEC b;
+ Pebble *pb;
+
+ am = ac;
+ ac = ap;
+ ap = V[d = k+1];
+
+ if (ac > ap)
+ if (ap > am)
+ { c = am-1;
+ m = n;
+ b = t;
+ ha = ua;
+ hb = ub;
+ }
+ else
+ { c = ap-1;
+ m = M[d];
+ b = T[d];
+ ha = HA[d];
+ hb = HB[d];
+ }
+ else
+ if (ac > am)
+ { c = am-1;
+ m = n;
+ b = t;
+ ha = ua;
+ hb = ub;
+ }
+ else
+ { c = ac-2;
+ m = M[k];
+ b = T[k];
+ ha = HA[k];
+ hb = HB[k];
+ }
+
+ if ((b & PATH_TOP) != 0)
+ m -= 1;
+ b <<= 1;
+
+ y = (c-k) >> 1;
+ while (1)
+ { c = bseq[y];
+ if (c == 4)
+ { more = 0;
+ if (bclip > k)
+ bclip = k;
+ break;
+ }
+ d = a[y];
+ if (c != d)
+ { if (d == 4)
+ { more = 0;
+ aclip = k;
+ }
+ break;
+ }
+ y -= 1;
+ if ((b & PATH_TOP) == 0)
+ m += 1;
+ b = (b << 1) | 1;
+ }
+ c = (y << 1) + k;
+
+ while (y+k <= NA[k])
+ { if (cells[ha].mark > NA[k])
+ { if (avail >= cmax)
+ { cmax = ((int) (avail*1.2)) + 10000;
+ cells = (Pebble *) Realloc(cells,cmax*sizeof(Pebble),
+ "Reallocating trace cells");
+ if (cells == NULL)
+ EXIT(1);
+ work->celmax = cmax;
+ work->cells = (void *) cells;
+ }
+#ifdef SHOW_TPS
+ printf(" A %d: %d,%d,%d,%d\n",avail,ha,k,dif,NA[k]); fflush(stdout);
+#endif
+ pb = cells+avail;
+ pb->ptr = ha;
+ pb->diag = k;
+ pb->diff = dif;
+ pb->mark = NA[k];
+ ha = avail++;
+ }
+ NA[k] -= TRACE_SPACE;
+ }
+ while (y <= NB[k])
+ { if (cells[hb].mark > NB[k])
+ { if (avail >= cmax)
+ { cmax = ((int) (avail*1.2)) + 10000;
+ cells = (Pebble *) Realloc(cells,cmax*sizeof(Pebble),
+ "Reallocating trace cells");
+ if (cells == NULL)
+ EXIT(1);
+ work->celmax = cmax;
+ work->cells = (void *) cells;
+ }
+#ifdef SHOW_TPS
+ printf(" B %d: %d,%d,%d,%d\n",avail,hb,k,dif,NB[k]); fflush(stdout);
+#endif
+ pb = cells+avail;
+ pb->ptr = hb;
+ pb->diag = k;
+ pb->diff = dif;
+ pb->mark = NB[k];
+ hb = avail++;
+ }
+ NB[k] -= TRACE_SPACE;
+ }
+
+ if (c < besta)
+ { besta = c;
+ besty = y;
+ if (m >= PATH_AVE)
+ { lasta = c;
+ if (TABLE[b & TRIM_MASK] >= 0)
+ if (TABLE[(b >> TRIM_LEN) & TRIM_MASK] + SCORE[b & TRIM_MASK] >= 0)
+ { trima = c;
+ trimy = y;
+ trimd = dif;
+ trimha = ha;
+ trimhb = hb;
+ }
+ }
+ }
+
+ t = T[k];
+ n = M[k];
+ ua = HA[k];
+ ub = HB[k];
+ V[k] = c;
+ T[k] = b;
+ M[k] = m;
+ HA[k] = ha;
+ HB[k] = hb;
+
+ a += 1;
+ }
+
+ if (more == 0)
+ { if (bseq[besty] != 4 && aseq[besta - besty] != 4)
+ more = 1;
+ if (low <= aclip)
+ { low = aclip+1;
+ if (morem <= M[aclip])
+ { morem = M[aclip];
+ morea = V[aclip];
+ morey = (morea - aclip)/2;
+ mored = dif;
+ moreha = HA[aclip];
+ morehb = HB[aclip];
+ }
+ }
+ if (hgh >= bclip)
+ { hgh = bclip-1;
+ if (morem <= M[bclip])
+ { morem = M[bclip];
+ morea = V[bclip];
+ morey = (morea - bclip)/2;
+ mored = dif;
+ moreha = HA[bclip];
+ morehb = HB[bclip];
+ }
+ }
+ aclip = -INT32_MAX;
+ bclip = INT32_MAX;
+ }
+
+ n = besta + WAVE_LAG;
+ while (hgh >= low)
+ if (V[hgh] > n)
+ hgh -= 1;
+ else
+ { while (V[low] > n)
+ low += 1;
+ break;
+ }
+
+#ifdef WAVE_STATS
+ k = (hgh-low)+1;
+ if (k > MAX)
+ MAX = k;
+ TOT += k;
+ NWV += 1;
+#endif
+
+#ifdef DEBUG_WAVE
+ print_wave(V,M,low,hgh,besta);
+#endif
+ }
+
+ { uint16 *atrace = (uint16 *) apath->trace;
+ uint16 *btrace = (uint16 *) bpath->trace;
+ int atlen, btlen;
+ int trimx;
+ int a, b, k, h;
+ int d, e;
+
+ if (morem >= 0)
+ { trimx = morea-morey;
+ trimy = morey;
+ trimd = mored;
+ trimha = moreha;
+ trimhb = morehb;
+ }
+ else
+ trimx = trima-trimy;
+
+ atlen = btlen = 0;
+
+ a = -1;
+ for (h = trimha; h >= 0; h = b)
+ { b = cells[h].ptr;
+ cells[h].ptr = a;
+ a = h;
+ }
+ h = a;
+
+ k = cells[h].diag;
+ b = cells[h].mark - k;
+ e = 0;
+#ifdef SHOW_TRAIL
+ printf(" A path = (%5d,%5d)\n",b+k,b); fflush(stdout);
+#endif
+ if ((b+k)%TRACE_SPACE != 0)
+ { h = cells[h].ptr;
+ if (h < 0)
+ { a = trimy;
+ d = trimd;
+ }
+ else
+ { k = cells[h].diag;
+ a = cells[h].mark - k;
+ d = cells[h].diff;
+ }
+#ifdef SHOW_TRAIL
+ printf(" +%4d: (%5d,%5d): %3d / %3d\n",h,a+k,a,d-e,b-a); fflush(stdout);
+#endif
+ if (apath->tlen == 0)
+ { atrace[--atlen] = (uint16) (b-a);
+ atrace[--atlen] = (uint16) (d-e);
+ }
+ else
+ { atrace[1] = (uint16) (atrace[1] + (b-a));
+ atrace[0] = (uint16) (atrace[0] + (d-e));
+ }
+ b = a;
+ e = d;
+ }
+ if (h >= 0)
+ { for (h = cells[h].ptr; h >= 0; h = cells[h].ptr)
+ { k = cells[h].diag;
+ a = cells[h].mark - k;
+ atrace[--atlen] = (uint16) (b-a);
+ d = cells[h].diff;
+ atrace[--atlen] = (uint16) (d-e);
+#ifdef SHOW_TRAIL
+ printf(" %4d: (%5d,%5d): %3d / %3d\n",h,a+k,a,d-e,b-a); fflush(stdout);
+#endif
+ b = a;
+ e = d;
+ }
+ if (b+k != trimx)
+ { atrace[--atlen] = (uint16) (b-trimy);
+ atrace[--atlen] = (uint16) (trimd-e);
+#ifdef SHOW_TRAIL
+ printf(" (%5d,%5d): %3d / %3d\n",trimx,trimy,trimd-e,b-trimy); fflush(stdout);
+#endif
+ }
+ else if (b != trimy)
+ { atrace[atlen+1] = (uint16) (atrace[atlen+1] + (b-trimy));
+ atrace[atlen] = (uint16) (atrace[atlen] + (trimd-e));
+#ifdef SHOW_TRAIL
+ printf(" @ (%5d,%5d): %3d / %3d\n",trimx,trimy,trimd-e,b-trimy); fflush(stdout);
+#endif
+ }
+ }
+
+ a = -1;
+ for (h = trimhb; h >= 0; h = b)
+ { b = cells[h].ptr;
+ cells[h].ptr = a;
+ a = h;
+ }
+ h = a;
+
+ k = cells[h].diag;
+ b = cells[h].mark + k;
+ e = 0;
+#ifdef SHOW_TRAIL
+ printf(" B path = (%5d,%5d)\n",b,b-k); fflush(stdout);
+#endif
+ if ((b-k)%TRACE_SPACE != boff)
+ { h = cells[h].ptr;
+ if (h < 0)
+ { a = trimx;
+ d = trimd;
+ }
+ else
+ { k = cells[h].diag;
+ a = cells[h].mark + k;
+ d = cells[h].diff;
+ }
+#ifdef SHOW_TRAIL
+ printf(" +%4d: (%5d,%5d): %3d / %3d\n",h,a,a-k,d-e,b-a); fflush(stdout);
+#endif
+ if (bpath->tlen == 0)
+ { btrace[--btlen] = (uint16) (b-a);
+ btrace[--btlen] = (uint16) (b-a);
+ }
+ else
+ { btrace[1] = (uint16) (btrace[1] + (b-a));
+ btrace[0] = (uint16) (btrace[0] + (d-e));
+ }
+ b = a;
+ e = d;
+ }
+
+ if (h >= 0)
+ { for (h = cells[h].ptr; h >= 0; h = cells[h].ptr)
+ { k = cells[h].diag;
+ a = cells[h].mark + k;
+ btrace[--btlen] = (uint16) (b-a);
+ d = cells[h].diff;
+ btrace[--btlen] = (uint16) (d-e);
+#ifdef SHOW_TRAIL
+ printf(" %4d: (%5d,%5d): %3d / %3d\n",h,a,a-k,d-e,b-a); fflush(stdout);
+#endif
+ b = a;
+ e = d;
+ }
+ if (b-k != trimy)
+ { btrace[--btlen] = (uint16) (b-trimx);
+ btrace[--btlen] = (uint16) (trimd-e);
+#ifdef SHOW_TRAIL
+ printf(" (%5d,%5d): %3d / %3d\n",trimx,trimy,trimd-e,b-trimx); fflush(stdout);
+#endif
+ }
+ else if (b != trimx)
+ { btrace[btlen+1] = (uint16) (btrace[btlen+1] + (b-trimx));
+ btrace[btlen] = (uint16) (btrace[btlen] + (trimd-e));
+#ifdef SHOW_TRAIL
+ printf(" @ (%5d,%5d): %3d / %3d\n",trimx,trimy,trimd-e,b-trimx); fflush(stdout);
+#endif
+ }
+ }
+
+ apath->abpos = trimx;
+ apath->bbpos = trimy;
+ apath->diffs = apath->diffs + trimd;
+ apath->tlen = apath->tlen - atlen;
+ apath->trace = atrace + atlen;
+ if (COMP(align->flags))
+ { bpath->aepos = align->blen - apath->bbpos;
+ bpath->bepos = align->alen - apath->abpos;
+ }
+ else
+ { bpath->abpos = apath->bbpos;
+ bpath->bbpos = apath->abpos;
+ }
+ bpath->diffs = bpath->diffs + trimd;
+ bpath->tlen = bpath->tlen - btlen;
+ bpath->trace = btrace + btlen;
+ }
+
+ return (0);
+}
+
+
+/* Find the longest local alignment between aseq and bseq through (xcnt,ycnt)
+ See associated .h file for the precise definition of the interface.
+*/
+
+Path *Local_Alignment(Alignment *align, Work_Data *ework, Align_Spec *espec,
+ int low, int hgh, int anti, int lbord, int hbord)
+{ _Work_Data *work = ( _Work_Data *) ework;
+ _Align_Spec *spec = (_Align_Spec *) espec;
+
+ Path *apath, *bpath;
+ int minp, maxp;
+ int selfie;
+
+ { int alen, blen;
+ int maxtp, wsize;
+
+ alen = align->alen;
+ blen = align->blen;
+
+ if (hgh-low >= 7500)
+ wsize = VectorEl*(hgh-low+1);
+ else
+ wsize = VectorEl*10000;
+ if (wsize >= work->vecmax)
+ if (enlarge_vector(work,wsize))
+ EXIT(NULL);
+
+ if (alen < blen)
+ maxtp = 2*(blen/spec->trace_space+2);
+ else
+ maxtp = 2*(alen/spec->trace_space+2);
+ wsize = 4*maxtp*sizeof(uint16) + sizeof(Path);
+ if (wsize > work->pntmax)
+ if (enlarge_points(work,wsize))
+ EXIT(NULL);
+
+ apath = align->path;
+ bpath = (Path *) work->points;
+
+ apath->trace = ((uint16 *) (bpath+1)) + maxtp;
+ bpath->trace = ((uint16 *) apath->trace) + 2*maxtp;
+ }
+
+#ifdef DEBUG_PASSES
+ printf("\n");
+#endif
+
+ selfie = (align->aseq == align->bseq);
+
+ if (lbord < 0)
+ { if (selfie && low >= 0)
+ minp = 1;
+ else
+ minp = -INT32_MAX;
+ }
+ else
+ minp = low-lbord;
+ if (hbord < 0)
+ { if (selfie && hgh <= 0)
+ maxp = -1;
+ else
+ maxp = INT32_MAX;
+ }
+ else
+ maxp = hgh+hbord;
+
+ if (forward_wave(work,spec,align,bpath,&low,hgh,anti,minp,maxp))
+ EXIT(NULL);
+
+#ifdef DEBUG_PASSES
+ printf("F1 (%d,%d) ~ %d => (%d,%d) %d\n",
+ (2*anti+(low+hgh))/4,(anti-(low+hgh))/4,hgh-low,
+ apath->read_A_match_end_,apath->read_B_match_end_,apath->diffs);
+#endif
+
+ if (reverse_wave(work,spec,align,bpath,low,low,anti,minp,maxp))
+ EXIT(NULL);
+
+#ifdef DEBUG_PASSES
+ printf("R1 (%d,%d) => (%d,%d) %d\n",
+ (anti+low)/2,(anti-low)/2,apath->read_A_match_start_,apath->read_B_match_start_,apath->diffs);
+#endif
+
+ if (COMP(align->flags))
+ { uint16 *trace = (uint16 *) bpath->trace;
+ uint16 p;
+ int i, j;
+
+ i = bpath->tlen-2;
+ j = 0;
+ while (j < i)
+ { p = trace[i];
+ trace[i] = trace[j];
+ trace[j] = p;
+ p = trace[i+1];
+ trace[i+1] = trace[j+1];
+ trace[j+1] = p;
+ i -= 2;
+ j += 2;
+ }
+ }
+
+#ifdef DEBUG_POINTS
+ { uint16 *trace = (uint16 *) apath->trace;
+ int a, h;
+
+ printf("\nA-path (%d,%d)->(%d,%d)",apath->read_A_match_start_,apath->read_B_match_start_,apath->read_A_match_end_,apath->read_B_match_end_);
+ printf(" %c\n",(COMP(_align->reverse_complement_match_) ? 'c' : 'n'));
+ a = apath->read_B_match_start_;
+ for (h = 1; h < apath->tlen; h += 2)
+ { int dif = trace[h-1];
+ int del = trace[h];
+ a += del;
+ printf(" %d / %d (%d)\n",dif,del,a);
+ }
+ }
+
+ { uint16 *trace = (uint16 *) bpath->trace;
+ int a, h;
+
+ printf("\nB-path (%d,%d)->(%d,%d)",bpath->read_A_match_start_,bpath->read_B_match_start_,bpath->read_A_match_end_,bpath->read_B_match_end_);
+ printf(" %c [%d,%d]\n",(COMP(align->reverse_complement_match_) ? 'c' : 'n'),align->blen,align->alen);
+ a = bpath->read_B_match_start_;
+ for (h = 1; h < bpath->tlen; h += 2)
+ { int dif = trace[h-1];
+ int del = trace[h];
+ a += del;
+ printf(" %d / %d (%d)\n",dif,del,a);
+ }
+ }
+#endif
+
+ return (bpath);
+}
+
+
+/****************************************************************************************\
+* *
+* EXTENSION VERSION OF LOCAL ALIGNMENT *
+* *
+\****************************************************************************************/
+
+static int VectorEn = 4*sizeof(int) + sizeof(BVEC);
+
+static int forward_extend(_Work_Data *work, _Align_Spec *spec, Alignment *align,
+ int midd, int mida, int minp, int maxp)
+{ char *aseq = align->aseq;
+ char *bseq = align->bseq;
+ Path *apath = align->path;
+
+ int hgh, low, dif;
+ int vlen, vmin, vmax;
+ int *V, *M;
+ int *_V, *_M;
+ BVEC *T;
+ BVEC *_T;
+
+ int *HA, *NA;
+ int *_HA, *_NA;
+ Pebble *cells;
+ int avail, cmax, boff;
+
+ int TRACE_SPACE = spec->trace_space;
+ int PATH_AVE = spec->ave_path;
+ int16 *SCORE = spec->score;
+ int16 *TABLE = spec->table;
+
+ int besta, besty;
+ int trima, trimy, trimd;
+ int trimha;
+ int morea, morey, mored;
+ int moreha;
+ int more, morem, lasta;
+ int aclip, bclip;
+
+ hgh = midd;
+ low = midd;
+ dif = 0;
+
+ { int span, wing;
+
+ span = (hgh-low)+1;
+ vlen = work->vecmax/VectorEn;
+ wing = (vlen - span)/2;
+ vmin = low - wing;
+ vmax = hgh + wing;
+
+ _V = ((int *) work->vector);
+ _M = _V + vlen;
+ _HA = _M + vlen;
+ _NA = _HA + vlen;
+ _T = ((BVEC *) (_NA + vlen));
+
+ V = _V-vmin;
+ M = _M-vmin;
+ HA = _HA-vmin;
+ NA = _NA-vmin;
+ T = _T-vmin;
+
+ cells = (Pebble *) (work->cells);
+ cmax = work->celmax;
+ avail = 0;
+
+ if (COMP(align->flags))
+ boff = align->blen % TRACE_SPACE;
+ else
+ boff = 0;
+ }
+
+ /* Compute 0-wave starting from mid-line */
+
+ more = 1;
+ aclip = INT32_MAX;
+ bclip = -INT32_MAX;
+
+ besta = trima = morea = lasta = mida;
+ besty = trimy = morey = (mida-hgh) >> 1;
+ trimd = mored = 0;
+ trimha = moreha = 0;
+ morem = -1;
+
+ { int k;
+ char *a;
+
+ a = aseq + hgh;
+ for (k = hgh; k >= low; k--)
+ { int y, c, d;
+ int ha, na;
+ Pebble *pb;
+
+ y = (mida-k) >> 1;
+
+ if (avail >= cmax-1)
+ { cmax = ((int) (avail*1.2)) + 10000;
+ cells = (Pebble *) Realloc(cells,cmax*sizeof(Pebble),"Reallocating trace cells");
+ if (cells == NULL)
+ EXIT(1);
+ work->celmax = cmax;
+ work->cells = (void *) cells;
+ }
+
+ na = ((y+k)/TRACE_SPACE)*TRACE_SPACE;
+#ifdef SHOW_TPS
+ printf(" A %d: %d,%d,0,%d\n",avail,-1,k,na); fflush(stdout);
+#endif
+ pb = cells+avail;
+ pb->ptr = -1;
+ pb->diag = k;
+ pb->diff = 0;
+ pb->mark = na;
+ ha = avail++;
+ na += TRACE_SPACE;
+
+ while (1)
+ { c = bseq[y];
+ if (c == 4)
+ { more = 0;
+ if (bclip < k)
+ bclip = k;
+ break;
+ }
+ d = a[y];
+ if (c != d)
+ { if (d == 4)
+ { more = 0;
+ aclip = k;
+ }
+ break;
+ }
+ y += 1;
+ }
+ c = (y << 1) + k;
+
+ while (y+k >= na)
+ { if (avail >= cmax)
+ { cmax = ((int) (avail*1.2)) + 10000;
+ cells = (Pebble *) Realloc(cells,cmax*sizeof(Pebble),"Reallocating trace cells");
+ if (cells == NULL)
+ EXIT(1);
+ work->celmax = cmax;
+ work->cells = (void *) cells;
+ }
+#ifdef SHOW_TPS
+ printf(" A %d: %d,%d,0,%d\n",avail,ha,k,na); fflush(stdout);
+#endif
+ pb = cells+avail;
+ pb->ptr = ha;
+ pb->diag = k;
+ pb->diff = 0;
+ pb->mark = na;
+ ha = avail++;
+ na += TRACE_SPACE;
+ }
+
+ if (c > besta)
+ { besta = trima = lasta = c;
+ besty = trimy = y;
+ trimha = ha;
+ }
+
+ V[k] = c;
+ T[k] = PATH_INT;
+ M[k] = PATH_LEN;
+ HA[k] = ha;
+ NA[k] = na;
+
+ a -= 1;
+ }
+ }
+
+ if (more == 0)
+ { if (bseq[besty] != 4 && aseq[besta - besty] != 4)
+ more = 1;
+ if (hgh >= aclip)
+ { hgh = aclip-1;
+ if (morem <= M[aclip])
+ { morem = M[aclip];
+ morea = V[aclip];
+ morey = (morea - aclip)/2;
+ moreha = HA[aclip];
+ }
+ }
+ if (low <= bclip)
+ { low = bclip+1;
+ if (morem <= M[bclip])
+ { morem = M[bclip];
+ morea = V[bclip];
+ morey = (morea - bclip)/2;
+ moreha = HA[bclip];
+ }
+ }
+ aclip = INT32_MAX;
+ bclip = -INT32_MAX;
+ }
+
+#ifdef DEBUG_WAVE
+ printf("\nFORWARD WAVE:\n");
+ print_wave(V,M,low,hgh,besta);
+#endif
+
+ /* Compute successive waves until no furthest reaching points remain */
+
+ while (more && lasta >= besta - TRIM_MLAG)
+ { int k, n;
+ int ua;
+ BVEC t;
+ int am, ac, ap;
+ char *a;
+
+ if (low <= vmin || hgh >= vmax)
+ { int span, wing;
+ int64 move;
+ int64 vd, md, had, nad, td;
+
+ span = (hgh-low)+1;
+ if (.8*vlen < span)
+ { if (enlarge_vector(work,vlen*VectorEn))
+ EXIT(1);
+
+ move = ((void *) _V) - work->vector;
+ vlen = work->vecmax/VectorEn;
+
+ _V = (int *) work->vector;
+ _M = _V + vlen;
+ _HA = _M + vlen;
+ _NA = _HA + vlen;
+ _T = ((BVEC *) (_NA + vlen));
+ }
+ else
+ move = 0;
+
+ wing = (vlen - span)/2;
+
+ vd = ((void *) ( _V+wing)) - (((void *) ( V+low)) - move);
+ md = ((void *) ( _M+wing)) - (((void *) ( M+low)) - move);
+ had = ((void *) (_HA+wing)) - (((void *) (HA+low)) - move);
+ nad = ((void *) (_NA+wing)) - (((void *) (NA+low)) - move);
+ td = ((void *) ( _T+wing)) - (((void *) ( T+low)) - move);
+
+ if (vd < 0)
+ memmove( _V+wing, ((void *) ( V+low)) - move, span*sizeof(int));
+ if (md < 0)
+ memmove( _M+wing, ((void *) ( M+low)) - move, span*sizeof(int));
+ if (had < 0)
+ memmove(_HA+wing, ((void *) (HA+low)) - move, span*sizeof(int));
+ if (nad < 0)
+ memmove(_NA+wing, ((void *) (NA+low)) - move, span*sizeof(int));
+ if (td < 0)
+ memmove( _T+wing, ((void *) ( T+low)) - move, span*sizeof(BVEC));
+
+ if (td > 0)
+ memmove( _T+wing, ((void *) ( T+low)) - move, span*sizeof(BVEC));
+ if (nad > 0)
+ memmove(_NA+wing, ((void *) (NA+low)) - move, span*sizeof(int));
+ if (had > 0)
+ memmove(_HA+wing, ((void *) (HA+low)) - move, span*sizeof(int));
+ if (md > 0)
+ memmove( _M+wing, ((void *) ( M+low)) - move, span*sizeof(int));
+ if (vd > 0)
+ memmove( _V+wing, ((void *) ( V+low)) - move, span*sizeof(int));
+
+ vmin = low-wing;
+ vmax = hgh+wing;
+
+ V = _V-vmin;
+ M = _M-vmin;
+ HA = _HA-vmin;
+ NA = _NA-vmin;
+ T = _T-vmin;
+ }
+
+ if (low > minp)
+ { low -= 1;
+ NA[low] = NA[low+1];
+ V[low] = -1;
+ }
+ if (hgh < maxp)
+ { hgh += 1;
+ NA[hgh] = NA[hgh-1];
+ V[hgh] = am = -1;
+ }
+ else
+ am = V[hgh];
+ dif += 1;
+
+ ac = V[hgh+1] = V[low-1] = -1;
+ a = aseq + hgh;
+ t = PATH_INT;
+ n = PATH_LEN;
+ ua = -1;
+ for (k = hgh; k >= low; k--)
+ { int y, m;
+ int ha;
+ int c, d;
+ BVEC b;
+ Pebble *pb;
+
+ ap = ac;
+ ac = am;
+ am = V[d = k-1];
+
+ if (ac < am)
+ if (am < ap)
+ { c = ap+1;
+ m = n;
+ b = t;
+ ha = ua;
+ }
+ else
+ { c = am+1;
+ m = M[d];
+ b = T[d];
+ ha = HA[d];
+ }
+ else
+ if (ac < ap)
+ { c = ap+1;
+ m = n;
+ b = t;
+ ha = ua;
+ }
+ else
+ { c = ac+2;
+ m = M[k];
+ b = T[k];
+ ha = HA[k];
+ }
+
+ if ((b & PATH_TOP) != 0)
+ m -= 1;
+ b <<= 1;
+
+ y = (c-k) >> 1;
+ while (1)
+ { c = bseq[y];
+ if (c == 4)
+ { more = 0;
+ if (bclip < k)
+ bclip = k;
+ break;
+ }
+ d = a[y];
+ if (c != d)
+ { if (d == 4)
+ { more = 0;
+ aclip = k;
+ }
+ break;
+ }
+ y += 1;
+ if ((b & PATH_TOP) == 0)
+ m += 1;
+ b = (b << 1) | 1;
+ }
+ c = (y << 1) + k;
+
+ while (y+k >= NA[k])
+ { if (cells[ha].mark < NA[k])
+ { if (avail >= cmax)
+ { cmax = ((int) (avail*1.2)) + 10000;
+ cells = (Pebble *) Realloc(cells,cmax*sizeof(Pebble),
+ "Reallocating trace cells");
+ if (cells == NULL)
+ EXIT(1);
+ work->celmax = cmax;
+ work->cells = (void *) cells;
+ }
+#ifdef SHOW_TPS
+ printf(" A %d: %d,%d,%d,%d\n",avail,ha,k,dif,NA[k]); fflush(stdout);
+#endif
+ pb = cells+avail;
+ pb->ptr = ha;
+ pb->diag = k;
+ pb->diff = dif;
+ pb->mark = NA[k];
+ ha = avail++;
+ }
+ NA[k] += TRACE_SPACE;
+ }
+
+ if (c > besta)
+ { besta = c;
+ besty = y;
+ if (m >= PATH_AVE)
+ { lasta = c;
+ if (TABLE[b & TRIM_MASK] >= 0)
+ if (TABLE[(b >> TRIM_LEN) & TRIM_MASK] + SCORE[b & TRIM_MASK] >= 0)
+ { trima = c;
+ trimy = y;
+ trimd = dif;
+ trimha = ha;
+ }
+ }
+ }
+
+ t = T[k];
+ n = M[k];
+ ua = HA[k];
+ V[k] = c;
+ T[k] = b;
+ M[k] = m;
+ HA[k] = ha;
+
+ a -= 1;
+ }
+
+ if (more == 0)
+ { if (bseq[besty] != 4 && aseq[besta-besty] != 4)
+ more = 1;
+ if (hgh >= aclip)
+ { hgh = aclip-1;
+ if (morem <= M[aclip])
+ { morem = M[aclip];
+ morea = V[aclip];
+ morey = (morea - aclip)/2;
+ mored = dif;
+ moreha = HA[aclip];
+ }
+ }
+ if (low <= bclip)
+ { low = bclip+1;
+ if (morem <= M[bclip])
+ { morem = M[bclip];
+ morea = V[bclip];
+ morey = (morea - bclip)/2;
+ mored = dif;
+ moreha = HA[bclip];
+ }
+ }
+ aclip = INT32_MAX;
+ bclip = -INT32_MAX;
+ }
+
+ n = besta - WAVE_LAG;
+ while (hgh >= low)
+ if (V[hgh] < n)
+ hgh -= 1;
+ else
+ { while (V[low] < n)
+ low += 1;
+ break;
+ }
+
+#ifdef WAVE_STATS
+ k = (hgh-low)+1;
+ if (k > MAX)
+ MAX = k;
+ TOT += k;
+ NWV += 1;
+#endif
+
+#ifdef DEBUG_WAVE
+ print_wave(V,M,low,hgh,besta);
+#endif
+ }
+
+ { uint16 *atrace = (uint16 *) apath->trace;
+ int atlen;
+ int trimx;
+ int a, b, k, h;
+ int d, e;
+
+ if (morem >= 0)
+ { trimx = morea-morey;
+ trimy = morey;
+ trimd = mored;
+ trimha = moreha;
+ }
+ else
+ trimx = trima-trimy;
+
+ atlen = 0;
+
+ a = -1;
+ for (h = trimha; h >= 0; h = b)
+ { b = cells[h].ptr;
+ cells[h].ptr = a;
+ a = h;
+ }
+ h = a;
+
+ k = cells[h].diag;
+ b = (mida-k)/2;
+ e = 0;
+#ifdef SHOW_TRAIL
+ printf(" A path = (%5d,%5d)\n",(mida+k)/2,b); fflush(stdout);
+#endif
+ for (h = cells[h].ptr; h >= 0; h = cells[h].ptr)
+ { k = cells[h].diag;
+ a = cells[h].mark - k;
+ d = cells[h].diff;
+ atrace[atlen++] = (uint16) (d-e);
+ atrace[atlen++] = (uint16) (a-b);
+#ifdef SHOW_TRAIL
+ printf(" %4d: (%5d,%5d): %3d / %3d\n",h,a+k,a,d-e,a-b); fflush(stdout);
+#endif
+ b = a;
+ e = d;
+ }
+ if (b+k != trimx)
+ { atrace[atlen++] = (uint16) (trimd-e);
+ atrace[atlen++] = (uint16) (trimy-b);
+#ifdef SHOW_TRAIL
+ printf(" (%5d,%5d): %3d / %3d\n",trimx,trimy,trimd-e,trimy-b); fflush(stdout);
+#endif
+ }
+ else if (b != trimy)
+ { atrace[atlen-1] = (uint16) (atrace[atlen-1] + (trimy-b));
+ atrace[atlen-2] = (uint16) (atrace[atlen-2] + (trimd-e));
+#ifdef SHOW_TRAIL
+ printf(" @ (%5d,%5d): %3d / %3d\n",trimx,trimy,trimd-e,trimy-b); fflush(stdout);
+#endif
+ }
+
+ apath->aepos = trimx;
+ apath->bepos = trimy;
+ apath->diffs = trimd;
+ apath->tlen = atlen;
+ }
+
+ return (0);
+}
+
+static int reverse_extend(_Work_Data *work, _Align_Spec *spec, Alignment *align,
+ int midd, int mida, int minp, int maxp)
+{ char *aseq = align->aseq - 1;
+ char *bseq = align->bseq - 1;
+ Path *apath = align->path;
+
+ int hgh, low, dif;
+ int vlen, vmin, vmax;
+ int *V, *M;
+ int *_V, *_M;
+ BVEC *T;
+ BVEC *_T;
+
+ int *HA, *NA;
+ int *_HA, *_NA;
+ Pebble *cells;
+ int avail, cmax, boff;
+
+ int TRACE_SPACE = spec->trace_space;
+ int PATH_AVE = spec->ave_path;
+ int16 *SCORE = spec->score;
+ int16 *TABLE = spec->table;
+
+ int besta, besty;
+ int trima, trimy, trimd;
+ int trimha;
+ int morea, morey, mored;
+ int moreha;
+ int more, morem, lasta;
+ int aclip, bclip;
+
+ hgh = midd;
+ low = midd;
+ dif = 0;
+
+ { int span, wing;
+
+ span = (hgh-low)+1;
+ vlen = work->vecmax/VectorEn;
+ wing = (vlen - span)/2;
+ vmin = low - wing;
+ vmax = hgh + wing;
+
+ _V = ((int *) work->vector);
+ _M = _V + vlen;
+ _HA = _M + vlen;
+ _NA = _HA + vlen;
+ _T = ((BVEC *) (_NA + vlen));
+
+ V = _V-vmin;
+ M = _M-vmin;
+ HA = _HA-vmin;
+ NA = _NA-vmin;
+ T = _T-vmin;
+
+ cells = (Pebble *) (work->cells);
+ cmax = work->celmax;
+ avail = 0;
+
+ if (COMP(align->flags))
+ boff = align->blen % TRACE_SPACE;
+ else
+ boff = 0;
+ }
+
+ more = 1;
+ aclip = -INT32_MAX;
+ bclip = INT32_MAX;
+
+ besta = trima = morea = lasta = mida;
+ besty = trimy = morey = (mida-hgh) >> 1;
+ trimd = mored = 0;
+ trimha = moreha = 0;
+ morem = -1;
+
+ { int k;
+ char *a;
+
+ a = aseq + low;
+ for (k = low; k <= hgh; k++)
+ { int y, c, d;
+ int ha, na;
+ Pebble *pb;
+
+ y = (mida-k) >> 1;
+
+ if (avail >= cmax-1)
+ { cmax = ((int) (avail*1.2)) + 10000;
+ cells = (Pebble *) Realloc(cells,cmax*sizeof(Pebble),"Reallocating trace cells");
+ if (cells == NULL)
+ EXIT(1);
+ work->celmax = cmax;
+ work->cells = (void *) cells;
+ }
+
+ na = ((y+k+TRACE_SPACE-1)/TRACE_SPACE-1)*TRACE_SPACE;
+#ifdef SHOW_TPS
+ printf(" A %d: -1,%d,0,%d\n",avail,k,na+TRACE_SPACE); fflush(stdout);
+#endif
+ pb = cells+avail;
+ pb->ptr = -1;
+ pb->diag = k;
+ pb->diff = 0;
+ pb->mark = y+k;
+ ha = avail++;
+
+ while (1)
+ { c = bseq[y];
+ if (c == 4)
+ { more = 0;
+ if (bclip > k)
+ bclip = k;
+ break;
+ }
+ d = a[y];
+ if (c != d)
+ { if (d == 4)
+ { more = 0;
+ aclip = k;
+ }
+ break;
+ }
+ y -= 1;
+ }
+ c = (y << 1) + k;
+
+ while (y+k <= na)
+ { if (avail >= cmax)
+ { cmax = ((int) (avail*1.2)) + 10000;
+ cells = (Pebble *) Realloc(cells,cmax*sizeof(Pebble),"Reallocating trace cells");
+ if (cells == NULL)
+ EXIT(1);
+ work->celmax = cmax;
+ work->cells = (void *) cells;
+ }
+#ifdef SHOW_TPS
+ printf(" A %d: %d,%d,0,%d\n",avail,ha,k,na); fflush(stdout);
+#endif
+ pb = cells+avail;
+ pb->ptr = ha;
+ pb->diag = k;
+ pb->diff = 0;
+ pb->mark = na;
+ ha = avail++;
+ na -= TRACE_SPACE;
+ }
+
+ if (c < besta)
+ { besta = trima = lasta = c;
+ besty = trimy = y;
+ trimha = ha;
+ }
+
+ V[k] = c;
+ T[k] = PATH_INT;
+ M[k] = PATH_LEN;
+ HA[k] = ha;
+ NA[k] = na;
+
+ a += 1;
+ }
+ }
+
+ if (more == 0)
+ { if (bseq[besty] != 4 && aseq[besta - besty] != 4)
+ more = 1;
+ if (low <= aclip)
+ { low = aclip+1;
+ if (morem <= M[aclip])
+ { morem = M[aclip];
+ morea = V[aclip];
+ morey = (morea - aclip)/2;
+ moreha = HA[aclip];
+ }
+ }
+ if (hgh >= bclip)
+ { hgh = bclip-1;
+ if (morem <= M[bclip])
+ { morem = M[bclip];
+ morea = V[bclip];
+ morey = (morea - bclip)/2;
+ moreha = HA[bclip];
+ }
+ }
+ aclip = -INT32_MAX;
+ bclip = INT32_MAX;
+ }
+
+#ifdef DEBUG_WAVE
+ printf("\nREVERSE WAVE:\n");
+ print_wave(V,M,low,hgh,besta);
+#endif
+
+ while (more && lasta <= besta + TRIM_MLAG)
+ { int k, n;
+ int ua;
+ BVEC t;
+ int am, ac, ap;
+ char *a;
+
+ if (low <= vmin || hgh >= vmax)
+ { int span, wing;
+ int64 move, vd, md, had, nad, td;
+
+ span = (hgh-low)+1;
+ if (.8*vlen < span)
+ { if (enlarge_vector(work,vlen*VectorEn))
+ EXIT(1);
+
+ move = ((void *) _V) - work->vector;
+ vlen = work->vecmax/VectorEn;
+
+ _V = (int *) work->vector;
+ _M = _V + vlen;
+ _HA = _M + vlen;
+ _NA = _HA + vlen;
+ _T = ((BVEC *) (_NA + vlen));
+ }
+ else
+ move = 0;
+
+ wing = (vlen - span)/2;
+
+ vd = ((void *) ( _V+wing)) - (((void *) ( V+low)) - move);
+ md = ((void *) ( _M+wing)) - (((void *) ( M+low)) - move);
+ had = ((void *) (_HA+wing)) - (((void *) (HA+low)) - move);
+ nad = ((void *) (_NA+wing)) - (((void *) (NA+low)) - move);
+ td = ((void *) ( _T+wing)) - (((void *) ( T+low)) - move);
+
+ if (vd < 0)
+ memmove( _V+wing, ((void *) ( V+low)) - move, span*sizeof(int));
+ if (md < 0)
+ memmove( _M+wing, ((void *) ( M+low)) - move, span*sizeof(int));
+ if (had < 0)
+ memmove(_HA+wing, ((void *) (HA+low)) - move, span*sizeof(int));
+ if (nad < 0)
+ memmove(_NA+wing, ((void *) (NA+low)) - move, span*sizeof(int));
+ if (td < 0)
+ memmove( _T+wing, ((void *) ( T+low)) - move, span*sizeof(BVEC));
+
+ if (td > 0)
+ memmove( _T+wing, ((void *) ( T+low)) - move, span*sizeof(BVEC));
+ if (nad > 0)
+ memmove(_NA+wing, ((void *) (NA+low)) - move, span*sizeof(int));
+ if (had > 0)
+ memmove(_HA+wing, ((void *) (HA+low)) - move, span*sizeof(int));
+ if (md > 0)
+ memmove( _M+wing, ((void *) ( M+low)) - move, span*sizeof(int));
+ if (vd > 0)
+ memmove( _V+wing, ((void *) ( V+low)) - move, span*sizeof(int));
+
+ vmin = low-wing;
+ vmax = hgh+wing;
+
+ V = _V-vmin;
+ M = _M-vmin;
+ HA = _HA-vmin;
+ NA = _NA-vmin;
+ T = _T-vmin;
+ }
+
+ if (low > minp)
+ { low -= 1;
+ NA[low] = NA[low+1];
+ V[low] = ap = INT32_MAX;
+ }
+ else
+ ap = V[low];
+ if (hgh < maxp)
+ { hgh += 1;
+ NA[hgh] = NA[hgh-1];
+ V[hgh] = INT32_MAX;
+ }
+ dif += 1;
+
+ ac = V[hgh+1] = V[low-1] = INT32_MAX;
+ a = aseq + low;
+ t = PATH_INT;
+ n = PATH_LEN;
+ ua = -1;
+ for (k = low; k <= hgh; k++)
+ { int y, m;
+ int ha;
+ int c, d;
+ BVEC b;
+ Pebble *pb;
+
+ am = ac;
+ ac = ap;
+ ap = V[d = k+1];
+
+ if (ac > ap)
+ if (ap > am)
+ { c = am-1;
+ m = n;
+ b = t;
+ ha = ua;
+ }
+ else
+ { c = ap-1;
+ m = M[d];
+ b = T[d];
+ ha = HA[d];
+ }
+ else
+ if (ac > am)
+ { c = am-1;
+ m = n;
+ b = t;
+ ha = ua;
+ }
+ else
+ { c = ac-2;
+ m = M[k];
+ b = T[k];
+ ha = HA[k];
+ }
+
+ if ((b & PATH_TOP) != 0)
+ m -= 1;
+ b <<= 1;
+
+ y = (c-k) >> 1;
+ while (1)
+ { c = bseq[y];
+ if (c == 4)
+ { more = 0;
+ if (bclip > k)
+ bclip = k;
+ break;
+ }
+ d = a[y];
+ if (c != d)
+ { if (d == 4)
+ { more = 0;
+ aclip = k;
+ }
+ break;
+ }
+ y -= 1;
+ if ((b & PATH_TOP) == 0)
+ m += 1;
+ b = (b << 1) | 1;
+ }
+ c = (y << 1) + k;
+
+ while (y+k <= NA[k])
+ { if (cells[ha].mark > NA[k])
+ { if (avail >= cmax)
+ { cmax = ((int) (avail*1.2)) + 10000;
+ cells = (Pebble *) Realloc(cells,cmax*sizeof(Pebble),
+ "Reallocating trace cells");
+ if (cells == NULL)
+ EXIT(1);
+ work->celmax = cmax;
+ work->cells = (void *) cells;
+ }
+#ifdef SHOW_TPS
+ printf(" A %d: %d,%d,%d,%d\n",avail,ha,k,dif,NA[k]); fflush(stdout);
+#endif
+ pb = cells+avail;
+ pb->ptr = ha;
+ pb->diag = k;
+ pb->diff = dif;
+ pb->mark = NA[k];
+ ha = avail++;
+ }
+ NA[k] -= TRACE_SPACE;
+ }
+
+ if (c < besta)
+ { besta = c;
+ besty = y;
+ if (m >= PATH_AVE)
+ { lasta = c;
+ if (TABLE[b & TRIM_MASK] >= 0)
+ if (TABLE[(b >> TRIM_LEN) & TRIM_MASK] + SCORE[b & TRIM_MASK] >= 0)
+ { trima = c;
+ trimy = y;
+ trimd = dif;
+ trimha = ha;
+ }
+ }
+ }
+
+ t = T[k];
+ n = M[k];
+ ua = HA[k];
+ V[k] = c;
+ T[k] = b;
+ M[k] = m;
+ HA[k] = ha;
+
+ a += 1;
+ }
+
+ if (more == 0)
+ { if (bseq[besty] != 4 && aseq[besta - besty] != 4)
+ more = 1;
+ if (low <= aclip)
+ { low = aclip+1;
+ if (morem <= M[aclip])
+ { morem = M[aclip];
+ morea = V[aclip];
+ morey = (morea - aclip)/2;
+ mored = dif;
+ moreha = HA[aclip];
+ }
+ }
+ if (hgh >= bclip)
+ { hgh = bclip-1;
+ if (morem <= M[bclip])
+ { morem = M[bclip];
+ morea = V[bclip];
+ morey = (morea - bclip)/2;
+ mored = dif;
+ moreha = HA[bclip];
+ }
+ }
+ aclip = -INT32_MAX;
+ bclip = INT32_MAX;
+ }
+
+ n = besta + WAVE_LAG;
+ while (hgh >= low)
+ if (V[hgh] > n)
+ hgh -= 1;
+ else
+ { while (V[low] > n)
+ low += 1;
+ break;
+ }
+
+#ifdef WAVE_STATS
+ k = (hgh-low)+1;
+ if (k > MAX)
+ MAX = k;
+ TOT += k;
+ NWV += 1;
+#endif
+
+#ifdef DEBUG_WAVE
+ print_wave(V,M,low,hgh,besta);
+#endif
+ }
+
+ { uint16 *atrace = (uint16 *) apath->trace;
+ int atlen;
+ int trimx;
+ int a, b, k, h;
+ int d, e;
+
+ if (morem >= 0)
+ { trimx = morea-morey;
+ trimy = morey;
+ trimd = mored;
+ trimha = moreha;
+ }
+ else
+ trimx = trima-trimy;
+
+ atlen = 0;
+
+ a = -1;
+ for (h = trimha; h >= 0; h = b)
+ { b = cells[h].ptr;
+ cells[h].ptr = a;
+ a = h;
+ }
+ h = a;
+
+ k = cells[h].diag;
+ b = cells[h].mark - k;
+ e = 0;
+#ifdef SHOW_TRAIL
+ printf(" A path = (%5d,%5d)\n",b+k,b); fflush(stdout);
+#endif
+ if ((b+k)%TRACE_SPACE != 0)
+ { h = cells[h].ptr;
+ if (h < 0)
+ { a = trimy;
+ d = trimd;
+ }
+ else
+ { k = cells[h].diag;
+ a = cells[h].mark - k;
+ d = cells[h].diff;
+ }
+#ifdef SHOW_TRAIL
+ printf(" +%4d: (%5d,%5d): %3d / %3d\n",h,a+k,a,d-e,b-a); fflush(stdout);
+#endif
+ atrace[--atlen] = (uint16) (b-a);
+ atrace[--atlen] = (uint16) (d-e);
+ b = a;
+ e = d;
+ }
+ if (h >= 0)
+ { for (h = cells[h].ptr; h >= 0; h = cells[h].ptr)
+ { k = cells[h].diag;
+ a = cells[h].mark - k;
+ atrace[--atlen] = (uint16) (b-a);
+ d = cells[h].diff;
+ atrace[--atlen] = (uint16) (d-e);
+#ifdef SHOW_TRAIL
+ printf(" %4d: (%5d,%5d): %3d / %3d\n",h,a+k,a,d-e,b-a); fflush(stdout);
+#endif
+ b = a;
+ e = d;
+ }
+ if (b+k != trimx)
+ { atrace[--atlen] = (uint16) (b-trimy);
+ atrace[--atlen] = (uint16) (trimd-e);
+#ifdef SHOW_TRAIL
+ printf(" (%5d,%5d): %3d / %3d\n",trimx,trimy,trimd-e,b-trimy); fflush(stdout);
+#endif
+ }
+ else if (b != trimy)
+ { atrace[atlen+1] = (uint16) (atrace[atlen+1] + (b-trimy));
+ atrace[atlen] = (uint16) (atrace[atlen] + (trimd-e));
+#ifdef SHOW_TRAIL
+ printf(" @ (%5d,%5d): %3d / %3d\n",trimx,trimy,trimd-e,b-trimy); fflush(stdout);
+#endif
+ }
+ }
+
+ apath->abpos = trimx;
+ apath->bbpos = trimy;
+ apath->diffs = trimd;
+ apath->tlen = - atlen;
+ apath->trace = atrace + atlen;
+ }
+
+ return (0);
+}
+
+
+/* Find the longest local alignment between aseq and bseq through (xcnt,ycnt)
+ See associated .h file for the precise definition of the interface.
+*/
+
+int Find_Extension(Alignment *align, Work_Data *ework, Align_Spec *espec,
+ int diag, int anti, int lbord, int hbord, int prefix)
+{ _Work_Data *work = ( _Work_Data *) ework;
+ _Align_Spec *spec = (_Align_Spec *) espec;
+
+ Path *apath;
+ int minp, maxp;
+
+ { int alen, blen;
+ int maxtp, wsize;
+
+ alen = align->alen;
+ blen = align->blen;
+
+ wsize = VectorEn*10000;
+ if (wsize >= work->vecmax)
+ if (enlarge_vector(work,wsize))
+ EXIT(1);
+
+ if (alen < blen)
+ maxtp = 2*(blen/spec->trace_space+2);
+ else
+ maxtp = 2*(alen/spec->trace_space+2);
+ wsize = 2*maxtp*sizeof(uint16);
+ if (wsize > work->pntmax)
+ if (enlarge_points(work,wsize))
+ EXIT(1);
+
+ apath = align->path;
+ apath->trace = ((uint16 *) work->points) + maxtp;
+ }
+
+#ifdef DEBUG_PASSES
+ printf("\n");
+#endif
+
+ if (lbord < 0)
+ minp = -INT32_MAX;
+ else
+ minp = diag-lbord;
+ if (hbord < 0)
+ maxp = INT32_MAX;
+ else
+ maxp = diag+hbord;
+
+ if (prefix)
+ { if (reverse_extend(work,spec,align,diag,anti,minp,maxp))
+ EXIT(1);
+ apath->aepos = (anti-diag)/2;
+ apath->bepos = (anti+diag)/2;
+#ifdef DEBUG_PASSES
+ printf("E1 (%d,%d) => (%d,%d) %d\n",
+ (anti+diag)/2,(anti-diag)/2,apath->read_A_match_start_,apath->read_B_match_start_,apath->diffs);
+#endif
+ }
+ else
+ { if (forward_extend(work,spec,align,diag,anti,minp,maxp))
+ EXIT(1);
+ apath->abpos = (anti-diag)/2;
+ apath->bbpos = (anti+diag)/2;
+#ifdef DEBUG_PASSES
+ printf("F1 (%d,%d) => (%d,%d) %d\n",
+ (anti+diag)/2,(anti-diag)/2,apath->read_A_match_end_,apath->read_B_match_end_,apath->diffs);
+#endif
+ }
+
+#ifdef DEBUG_POINTS
+ { uint16 *trace = (uint16 *) apath->trace;
+ int a, h;
+
+ printf("\nA-path (%d,%d)->(%d,%d)",apath->read_A_match_start_,apath->read_B_match_start_,apath->read_A_match_end_,apath->read_B_match_end_);
+ printf(" %c\n",(COMP(_align->reverse_complement_match_) ? 'c' : 'n'));
+ a = apath->read_B_match_start_;
+ for (h = 1; h < apath->tlen; h += 2)
+ { int dif = trace[h-1];
+ int del = trace[h];
+ a += del;
+ printf(" %d / %d (%d)\n",dif,del,a);
+ }
+ }
+#endif
+
+ return (0);
+}
+
+
+/****************************************************************************************\
+* *
+* OVERLAP MANIPULATION *
+* *
+\****************************************************************************************/
+
+static int64 PtrSize = sizeof(void *);
+static int64 OvlIOSize = sizeof(Overlap) - sizeof(void *);
+
+int Read_Overlap(FILE *input, Overlap *ovl)
+{ if (fread( ((char *) ovl) + PtrSize, OvlIOSize, 1, input) != 1)
+ return (1);
+ return (0);
+}
+
+int Read_Trace(FILE *input, Overlap *ovl, int tbytes)
+{ if (tbytes > 0 && ovl->path.tlen > 0)
+ { if (fread(ovl->path.trace, tbytes*ovl->path.tlen, 1, input) != 1)
+ return (1);
+ }
+ return (0);
+}
+
+void Write_Overlap(FILE *output, Overlap *ovl, int tbytes)
+{ fwrite( ((char *) ovl) + PtrSize, OvlIOSize, 1, output);
+ if (ovl->path.trace != NULL)
+ fwrite(ovl->path.trace,tbytes,ovl->path.tlen,output);
+}
+
+void Compress_TraceTo8(Overlap *ovl)
+{ uint16 *t16 = (uint16 *) ovl->path.trace;
+ uint8 *t8 = (uint8 *) ovl->path.trace;
+ int j;
+
+ for (j = 0; j < ovl->path.tlen; j++)
+ t8[j] = (uint8) (t16[j]);
+}
+
+void Decompress_TraceTo16(Overlap *ovl)
+{ uint16 *t16 = (uint16 *) ovl->path.trace;
+ uint8 *t8 = (uint8 *) ovl->path.trace;
+ int j;
+
+ for (j = ovl->path.tlen-1; j >= 0; j--)
+ t16[j] = t8[j];
+}
+
+void Print_Overlap(FILE *output, Overlap *ovl, int tbytes, int indent)
+{ int i;
+
+ fprintf(output,"%*s%d vs. ",indent,"",ovl->aread);
+ if (COMP(ovl->flags))
+ fprintf(output,"c(%d)\n",ovl->bread);
+ else
+ fprintf(output,"%d\n",ovl->bread);
+ fprintf(output,"%*s [%d,%d] vs [%d,%d] w. %d diffs\n",indent,"",
+ ovl->path.abpos,ovl->path.aepos,ovl->path.bbpos,ovl->path.bepos,ovl->path.diffs);
+
+ if (tbytes == 1)
+ { uint8 *trace = (uint8 *) (ovl->path.trace);
+ if (trace != NULL)
+ { int p = ovl->path.bbpos + trace[1];
+ fprintf(output,"%*sTrace: %3d/%5d",indent,"",trace[0],p);
+ for (i = 3; i < ovl->path.tlen; i += 2)
+ { if (i%10 == 0)
+ fprintf(output,"\n%*s",indent+6,"");
+ p += trace[i];
+ fprintf(output," %3d/%5d",trace[i-1],p);
+ }
+ fprintf(output,"\n");
+ }
+ }
+ else
+ { uint16 *trace = (uint16 *) (ovl->path.trace);
+ if (trace != NULL)
+ { int p = ovl->path.bbpos + trace[1];
+ fprintf(output,"%*sTrace: %3d/%5d",indent,"",trace[0],p);
+ for (i = 3; i < ovl->path.tlen; i += 2)
+ { if (i%10 == 0)
+ fprintf(output,"\n%*s",indent+6,"");
+ p += trace[i];
+ fprintf(output," %3d/%5d",trace[i-1],p);
+ }
+ fprintf(output,"\n");
+ }
+ }
+}
+
+int Check_Trace_Points(Overlap *ovl, int tspace, int verbose, char *fname)
+{ int i, p;
+
+ if (((ovl->path.aepos-1)/tspace - ovl->path.abpos/tspace)*2 != ovl->path.tlen-2)
+ { if (verbose)
+ EPRINTF(EPLACE," %s: Wrong number of trace points\n",fname);
+ return (1);
+ }
+ p = ovl->path.bbpos;
+ if (tspace <= TRACE_XOVR)
+ { uint8 *trace8 = (uint8 *) ovl->path.trace;
+ for (i = 1; i < ovl->path.tlen; i += 2)
+ p += trace8[i];
+ }
+ else
+ { uint16 *trace16 = (uint16 *) ovl->path.trace;
+ for (i = 1; i < ovl->path.tlen; i += 2)
+ p += trace16[i];
+ }
+ if (p != ovl->path.bepos)
+ { if (verbose)
+ EPRINTF(EPLACE," %s: Trace point sum != aligned interval\n",fname);
+ return (1);
+ }
+ return (0);
+}
+
+
+void Flip_Alignment(Alignment *align, int full)
+{ char *aseq = align->aseq;
+ char *bseq = align->bseq;
+ int alen = align->alen;
+ int blen = align->blen;
+ Path *path = align->path;
+ int comp = COMP(align->flags);
+
+ int *trace = (int *) path->trace;
+ int tlen = path->tlen;
+
+ int i, j, p;
+
+ if (comp)
+ { p = path->abpos;
+ path->abpos = blen - path->bepos;
+ path->bepos = alen - p;
+ p = path->aepos;
+ path->aepos = blen - path->bbpos;
+ path->bbpos = alen - p;
+
+ if (full)
+ { alen += 2;
+ blen += 2;
+
+ for (i = 0; i < tlen; i++)
+ if ((p = trace[i]) < 0)
+ trace[i] = alen + p;
+ else
+ trace[i] = p - blen;
+
+ i = tlen-1;
+ j = 0;
+ while (j < i)
+ { p = trace[i];
+ trace[i] = trace[j];
+ trace[j] = p;
+ i -= 1;
+ j += 1;
+ }
+
+ alen -= 2;
+ blen -= 2;
+ }
+ }
+ else
+ { p = path->abpos;
+ path->abpos = path->bbpos;
+ path->bbpos = p;
+ p = path->aepos;
+ path->aepos = path->bepos;
+ path->bepos = p;
+
+ if (full)
+ for (i = 0; i < tlen; i++)
+ trace[i] = - (trace[i]);
+ }
+
+ align->aseq = bseq;
+ align->bseq = aseq;
+ align->alen = blen;
+ align->blen = alen;
+}
+
+
+/****************************************************************************************\
+* *
+* ALIGNMENT PRINTING *
+* *
+\****************************************************************************************/
+
+/* Complement the sequence in fragment aseq. The operation does the
+ complementation/reversal in place. Calling it a second time on a
+ given fragment restores it to its original state. */
+
+void Complement_Seq(char *aseq, int len)
+{ char *s, *t;
+ int c;
+
+ s = aseq;
+ t = aseq + (len-1);
+ while (s < t)
+ { c = 3 - *s;
+ *s++ = (char) (3 - *t);
+ *t-- = (char) c;
+ }
+ if (s == t)
+ *s = (char) (3 - *s);
+}
+
+
+/* Print an alignment to file between a and b given in trace (unpacked).
+ Prefix gives the length of the initial prefix of a that is unaligned. */
+
+static char ToL[8] = { 'a', 'c', 'g', 't', '.', '[', ']', '-' };
+static char ToU[8] = { 'A', 'C', 'G', 'T', '.', '[', ']', '-' };
+
+int Print_Alignment(FILE *file, Alignment *align, Work_Data *ework,
+ int indent, int width, int border, int upper, int coord)
+{ _Work_Data *work = (_Work_Data *) ework;
+ int *trace = align->path->trace;
+ int tlen = align->path->tlen;
+
+ char *Abuf, *Bbuf, *Dbuf;
+ int i, j, o;
+ char *a, *b;
+ char mtag, dtag;
+ int prefa, prefb;
+ int aend, bend;
+ int sa, sb;
+ int match, diff;
+ char *N2A;
+
+ if (trace == NULL) return (0);
+
+#ifdef SHOW_TRACE
+ fprintf(file,"\nTrace:\n");
+ for (i = 0; i < tlen; i++)
+ fprintf(file," %3d\n",trace[i]);
+#endif
+
+ o = sizeof(char)*3*(width+1);
+ if (o > work->vecmax)
+ if (enlarge_vector(work,o))
+ EXIT(1);
+
+ if (upper)
+ N2A = ToU;
+ else
+ N2A = ToL;
+
+ Abuf = (char *) work->vector;
+ Bbuf = Abuf + (width+1);
+ Dbuf = Bbuf + (width+1);
+
+ aend = align->path->aepos;
+ bend = align->path->bepos;
+
+ Abuf[width] = Bbuf[width] = Dbuf[width] = '\0';
+ /* buffer/output next column */
+#define COLUMN(x,y) \
+{ int u, v; \
+ if (o >= width) \
+ { fprintf(file,"\n"); \
+ fprintf(file,"%*s",indent,""); \
+ if (coord > 0) \
+ { if (sa <= aend) \
+ fprintf(file," %*d",coord,sa); \
+ else \
+ fprintf(file," %*s",coord,""); \
+ fprintf(file," %s\n",Abuf); \
+ fprintf(file,"%*s %*s %s\n",indent,"",coord,"",Dbuf); \
+ fprintf(file,"%*s",indent,""); \
+ if (sb <= bend) \
+ fprintf(file," %*d",coord,sb); \
+ else \
+ fprintf(file," %*s",coord,""); \
+ fprintf(file," %s",Bbuf); \
+ } \
+ else \
+ { fprintf(file," %s\n",Abuf); \
+ fprintf(file,"%*s %s\n",indent,"",Dbuf); \
+ fprintf(file,"%*s %s",indent,"",Bbuf); \
+ } \
+ fprintf(file," %5.1f%%\n",(100.*diff)/(diff+match)); \
+ o = 0; \
+ sa = i; \
+ sb = j; \
+ match = diff = 0; \
+ } \
+ u = (x); \
+ v = (y); \
+ if (u == 4 || v == 4) \
+ Dbuf[o] = ' '; \
+ else if (u == v) \
+ Dbuf[o] = mtag; \
+ else \
+ Dbuf[o] = dtag; \
+ Abuf[o] = N2A[u]; \
+ Bbuf[o] = N2A[v]; \
+ o += 1; \
+}
+
+ a = align->aseq - 1;
+ b = align->bseq - 1;
+
+ o = 0;
+ i = j = 1;
+
+ prefa = align->path->abpos;
+ prefb = align->path->bbpos;
+
+ if (prefa > border)
+ { i = prefa-(border-1);
+ prefa = border;
+ }
+ if (prefb > border)
+ { j = prefb-(border-1);
+ prefb = border;
+ }
+
+ sa = i;
+ sb = j;
+ mtag = ':';
+ dtag = ':';
+
+ while (prefa > prefb)
+ { COLUMN(a[i],4)
+ i += 1;
+ prefa -= 1;
+ }
+ while (prefb > prefa)
+ { COLUMN(4,b[j])
+ j += 1;
+ prefb -= 1;
+ }
+ while (prefa > 0)
+ { COLUMN(a[i],b[j])
+ i += 1;
+ j += 1;
+ prefa -= 1;
+ }
+
+ mtag = '[';
+ if (prefb > 0)
+ COLUMN(5,5)
+
+ mtag = '|';
+ dtag = '*';
+
+ match = diff = 0;
+
+ { int p, c; /* Output columns of alignment til reach trace end */
+
+ for (c = 0; c < tlen; c++)
+ if ((p = trace[c]) < 0)
+ { p = -p;
+ while (i != p)
+ { COLUMN(a[i],b[j])
+ if (a[i] == b[j])
+ match += 1;
+ else
+ diff += 1;
+ i += 1;
+ j += 1;
+ }
+ COLUMN(7,b[j])
+ j += 1;
+ diff += 1;
+ }
+ else
+ { while (j != p)
+ { COLUMN(a[i],b[j])
+ if (a[i] == b[j])
+ match += 1;
+ else
+ diff += 1;
+ i += 1;
+ j += 1;
+ }
+ COLUMN(a[i],7)
+ i += 1;
+ diff += 1;
+ }
+ p = align->path->aepos;
+ while (i <= p)
+ { COLUMN(a[i],b[j])
+ if (a[i] == b[j])
+ match += 1;
+ else
+ diff += 1;
+ i += 1;
+ j += 1;
+ }
+ }
+
+ { int c; /* Output remaining column including unaligned suffix */
+
+ mtag = ']';
+ if (a[i] != 4 && b[j] != 4 && border > 0)
+ COLUMN(6,6)
+
+ mtag = ':';
+ dtag = ':';
+
+ c = 0;
+ while (c < border && (a[i] != 4 || b[j] != 4))
+ { if (a[i] != 4)
+ if (b[j] != 4)
+ { COLUMN(a[i],b[j])
+ i += 1;
+ j += 1;
+ }
+ else
+ { COLUMN(a[i],4)
+ i += 1;
+ }
+ else
+ { COLUMN(4,b[j])
+ j += 1;
+ }
+ c += 1;
+ }
+ }
+
+ /* Print remainder of buffered col.s */
+
+ fprintf(file,"\n");
+ fprintf(file,"%*s",indent,"");
+ if (coord > 0)
+ { if (sa <= aend)
+ fprintf(file," %*d",coord,sa);
+ else
+ fprintf(file," %*s",coord,"");
+ fprintf(file," %.*s\n",o,Abuf);
+ fprintf(file,"%*s %*s %.*s\n",indent,"",coord,"",o,Dbuf);
+ fprintf(file,"%*s",indent,"");
+ if (sb <= bend)
+ fprintf(file," %*d",coord,sb);
+ else
+ fprintf(file," %*s",coord,"");
+ fprintf(file," %.*s",o,Bbuf);
+ }
+ else
+ { fprintf(file," %.*s\n",o,Abuf);
+ fprintf(file,"%*s %.*s\n",indent,"",o,Dbuf);
+ fprintf(file,"%*s %.*s",indent,"",o,Bbuf);
+ }
+ if (diff+match > 0)
+ fprintf(file," %5.1f%%\n",(100.*diff)/(diff+match));
+ else
+ fprintf(file,"\n");
+
+ fflush(file);
+ return (0);
+}
+
+int Print_Reference(FILE *file, Alignment *align, Work_Data *ework,
+ int indent, int block, int border, int upper, int coord)
+{ _Work_Data *work = (_Work_Data *) ework;
+ int *trace = align->path->trace;
+ int tlen = align->path->tlen;
+
+ char *Abuf, *Bbuf, *Dbuf;
+ int i, j, o;
+ char *a, *b;
+ char mtag, dtag;
+ int prefa, prefb;
+ int aend, bend;
+ int sa, sb, s0;
+ int match, diff;
+ char *N2A;
+ int vmax;
+
+ if (trace == NULL) return (0);
+
+#ifdef SHOW_TRACE
+ fprintf(file,"\nTrace:\n");
+ for (i = 0; i < tlen; i++)
+ fprintf(file," %3d\n",trace[i]);
+#endif
+
+ vmax = work->vecmax/3;
+ o = sizeof(char)*6*(block+1);
+ if (o > vmax)
+ { if (enlarge_vector(work,3*o))
+ EXIT(1);
+ vmax = work->vecmax/3;
+ }
+
+ Abuf = (char *) work->vector;
+ Bbuf = Abuf + vmax;
+ Dbuf = Bbuf + vmax;
+
+ if (upper)
+ N2A = ToU;
+ else
+ N2A = ToL;
+
+ aend = align->path->aepos;
+ bend = align->path->bepos;
+
+#define BLOCK(x,y) \
+{ int u, v; \
+ if (i%block == 1 && i != s0 && x < 4 && o > 0) \
+ { fprintf(file,"\n"); \
+ fprintf(file,"%*s",indent,""); \
+ if (coord > 0) \
+ { if (sa <= aend) \
+ fprintf(file," %*d",coord,sa); \
+ else \
+ fprintf(file," %*s",coord,""); \
+ fprintf(file," %.*s\n",o,Abuf); \
+ fprintf(file,"%*s %*s %.*s\n",indent,"",coord,"",o,Dbuf); \
+ fprintf(file,"%*s",indent,""); \
+ if (sb <= bend) \
+ fprintf(file," %*d",coord,sb); \
+ else \
+ fprintf(file," %*s",coord,""); \
+ fprintf(file," %.*s",o,Bbuf); \
+ } \
+ else \
+ { fprintf(file," %.*s\n",o,Abuf); \
+ fprintf(file,"%*s %.*s\n",indent,"",o,Dbuf); \
+ fprintf(file,"%*s %.*s",indent,"",o,Bbuf); \
+ } \
+ fprintf(file," %5.1f%%\n",(100.*diff)/(diff+match)); \
+ o = 0; \
+ sa = i; \
+ sb = j; \
+ match = diff = 0; \
+ } \
+ u = (x); \
+ v = (y); \
+ if (u == 4 || v == 4) \
+ Dbuf[o] = ' '; \
+ else if (u == v) \
+ Dbuf[o] = mtag; \
+ else \
+ Dbuf[o] = dtag; \
+ Abuf[o] = N2A[u]; \
+ Bbuf[o] = N2A[v]; \
+ o += 1; \
+ if (o >= vmax) \
+ { if (enlarge_vector(work,3*o)) \
+ EXIT(1); \
+ vmax = work->vecmax/3; \
+ memmove(work->vector+2*vmax,Dbuf,o); \
+ memmove(work->vector+vmax,Bbuf,o); \
+ memmove(work->vector,Abuf,o); \
+ Abuf = (char *) work->vector; \
+ Bbuf = Abuf + vmax; \
+ Dbuf = Bbuf + vmax; \
+ } \
+}
+
+ a = align->aseq - 1;
+ b = align->bseq - 1;
+
+ o = 0;
+ i = j = 1;
+
+ prefa = align->path->abpos;
+ prefb = align->path->bbpos;
+
+ if (prefa > border)
+ { i = prefa-(border-1);
+ prefa = border;
+ }
+ if (prefb > border)
+ { j = prefb-(border-1);
+ prefb = border;
+ }
+
+ s0 = i;
+ sa = i;
+ sb = j;
+ mtag = ':';
+ dtag = ':';
+
+ while (prefa > prefb)
+ { BLOCK(a[i],4)
+ i += 1;
+ prefa -= 1;
+ }
+ while (prefb > prefa)
+ { BLOCK(4,b[j])
+ j += 1;
+ prefb -= 1;
+ }
+ while (prefa > 0)
+ { BLOCK(a[i],b[j])
+ i += 1;
+ j += 1;
+ prefa -= 1;
+ }
+
+ mtag = '[';
+ if (prefb > 0)
+ BLOCK(5,5)
+
+ mtag = '|';
+ dtag = '*';
+
+ match = diff = 0;
+
+ { int p, c; /* Output columns of alignment til reach trace end */
+
+ for (c = 0; c < tlen; c++)
+ if ((p = trace[c]) < 0)
+ { p = -p;
+ while (i != p)
+ { BLOCK(a[i],b[j])
+ if (a[i] == b[j])
+ match += 1;
+ else
+ diff += 1;
+ i += 1;
+ j += 1;
+ }
+ BLOCK(7,b[j])
+ j += 1;
+ diff += 1;
+ }
+ else
+ { while (j != p)
+ { BLOCK(a[i],b[j])
+ if (a[i] == b[j])
+ match += 1;
+ else
+ diff += 1;
+ i += 1;
+ j += 1;
+ }
+ BLOCK(a[i],7)
+ i += 1;
+ diff += 1;
+ }
+ p = align->path->aepos;
+ while (i <= p)
+ { BLOCK(a[i],b[j])
+ if (a[i] == b[j])
+ match += 1;
+ else
+ diff += 1;
+ i += 1;
+ j += 1;
+ }
+ }
+
+ { int c; /* Output remaining column including unaligned suffix */
+
+ mtag = ']';
+ if (a[i] != 4 && b[j] != 4 && border > 0)
+ BLOCK(6,6)
+
+ mtag = ':';
+ dtag = ':';
+
+ c = 0;
+ while (c < border && (a[i] != 4 || b[j] != 4))
+ { if (a[i] != 4)
+ if (b[j] != 4)
+ { BLOCK(a[i],b[j])
+ i += 1;
+ j += 1;
+ }
+ else
+ { BLOCK(a[i],4)
+ i += 1;
+ }
+ else
+ { BLOCK(4,b[j])
+ j += 1;
+ }
+ c += 1;
+ }
+ }
+
+ /* Print remainder of buffered col.s */
+
+ fprintf(file,"\n");
+ fprintf(file,"%*s",indent,"");
+ if (coord > 0)
+ { if (sa <= aend)
+ fprintf(file," %*d",coord,sa);
+ else
+ fprintf(file," %*s",coord,"");
+ fprintf(file," %.*s\n",o,Abuf);
+ fprintf(file,"%*s %*s %.*s\n",indent,"",coord,"",o,Dbuf);
+ fprintf(file,"%*s",indent,"");
+ if (sb <= bend)
+ fprintf(file," %*d",coord,sb);
+ else
+ fprintf(file," %*s",coord,"");
+ fprintf(file," %.*s",o,Bbuf);
+ }
+ else
+ { fprintf(file," %.*s\n",o,Abuf);
+ fprintf(file,"%*s %.*s\n",indent,"",o,Dbuf);
+ fprintf(file,"%*s %.*s",indent,"",o,Bbuf);
+ }
+ if (diff+match > 0)
+ fprintf(file," %5.1f%%\n",(100.*diff)/(diff+match));
+ else
+ fprintf(file,"\n");
+
+ fflush(file);
+ return (0);
+}
+
+/* Print an ASCII representation of the overlap in _align between fragments
+ a and b to given file. */
+
+static inline void repchar(FILE *file, int symbol, int rep)
+{ while (rep-- > 0)
+ fputc(symbol,file);
+}
+
+void Alignment_Cartoon(FILE *file, Alignment *align, int indent, int coord)
+{ int alen = align->alen;
+ int blen = align->blen;
+ Path *path = align->path;
+ int comp = COMP(align->flags);
+ int w;
+
+ fprintf(file,"%*s",indent,"");
+ if (path->abpos > 0)
+ fprintf(file," %*d ",coord,path->abpos);
+ else
+ fprintf(file,"%*s",coord+5,"");
+ if (path->aepos < alen)
+ fprintf(file,"%*s%d",coord+8,"",alen-path->aepos);
+ fprintf(file,"\n");
+
+ fprintf(file,"%*s",indent,"");
+ if (path->abpos > 0)
+ { fprintf(file,"A ");
+ w = Number_Digits((int64) path->abpos);
+ repchar(file,' ',coord-w);
+ repchar(file,'=',w+3);
+ fputc('+',file);
+ repchar(file,'-',coord+5);
+ }
+ else
+ { fprintf(file,"A %*s",coord+4,"");
+ repchar(file,'-',coord+5);
+ }
+
+ if (path->aepos < alen)
+ { fputc('+',file);
+ w = Number_Digits((int64) (alen-path->aepos));
+ repchar(file,'=',w+2);
+ fputc('>',file);
+ repchar(file,' ',w);
+ }
+ else
+ { fputc('>',file);
+ repchar(file,' ',coord+3);
+ }
+
+ { int asub, bsub;
+
+ asub = path->aepos - path->abpos;
+ bsub = path->bepos - path->bbpos;
+ fprintf(file," dif/(len1+len2) = %d/(%d+%d) = %5.2f%%\n",
+ path->diffs,asub,bsub,(200.*path->diffs)/(asub+bsub));
+ }
+
+ { int sym1e, sym2e;
+ int sym1p, sym2p;
+
+ if (comp > 0)
+ { sym1p = '<'; sym2p = '-'; sym1e = '<'; sym2e = '='; }
+ else
+ { sym1p = '-'; sym2p = '>'; sym1e = '='; sym2e = '>'; }
+
+ fprintf(file,"%*s",indent,"");
+ if (path->bbpos > 0)
+ { fprintf(file,"B ");
+ w = Number_Digits((int64) path->bbpos);
+ repchar(file,' ',coord-w);
+ fputc(sym1e,file);
+ repchar(file,'=',w+2);
+ fputc('+',file);
+ repchar(file,'-',coord+5);
+ }
+ else
+ { fprintf(file,"B ");
+ repchar(file,' ',coord+3);
+ fputc(sym1p,file);
+ repchar(file,'-',coord+5);
+ }
+ if (path->bepos < blen)
+ { fprintf(file,"+");
+ w = Number_Digits((int64) (blen-path->bepos));
+ repchar(file,'=',w+2);
+ fprintf(file,"%c\n",sym2e);
+ }
+ else
+ fprintf(file,"%c\n",sym2p);
+ }
+
+ fprintf(file,"%*s",indent,"");
+ if (path->bbpos > 0)
+ fprintf(file," %*d ",coord,path->bbpos);
+ else
+ fprintf(file,"%*s",coord+5,"");
+ if (path->bepos < blen)
+ fprintf(file,"%*s%d",coord+8,"",blen-path->bepos);
+ fprintf(file,"\n");
+
+ fflush(file);
+}
+
+
+/****************************************************************************************\
+* *
+* O(ND) trace algorithm *
+* *
+\****************************************************************************************/
+
+
+#ifdef DEBUG_AWAVE
+
+static void print_awave(int *V, int low, int hgh)
+{ int k;
+
+ printf(" [%6d,%6d]: ",low,hgh);
+ for (k = low; k <= hgh; k++)
+ printf(" %3d",V[k]);
+ printf("\n");
+ fflush(stdout);
+}
+
+#endif
+
+#ifdef DEBUG_ALIGN
+
+static int depth = 0;
+
+#endif
+
+typedef struct
+ { int *Stop; // Ongoing stack of alignment indels
+ char *Aabs, *Babs; // Absolute base of A and B sequences
+
+ int **PVF, **PHF; // List of waves for iterative np algorithms
+ int mida, midb; // mid point division for mid-point algorithms
+
+ int *VF, *VB; // Forward/Reverse waves for nd algorithms
+ // (defunct: were used for O(nd) algorithms)
+ } Trace_Waves;
+
+static int dandc_nd(char *A, int M, char *B, int N, Trace_Waves *wave)
+{ int x, y;
+ int D;
+
+#ifdef DEBUG_ALIGN
+ printf("%*s %ld,%ld: %d vs %d\n",depth,"",A-wave->Aabs,B-wave->Babs,M,N);
+#endif
+
+ if (M <= 0)
+ { x = (wave->Aabs-A)-1;
+ for (y = 1; y <= N; y++)
+ { *wave->Stop++ = x;
+#ifdef DEBUG_SCRIPT
+ printf("%*s *I %ld(%ld)\n",depth,"",y+(B-wave->Babs),(A-wave->Aabs)+1);
+#endif
+ }
+ return (N);
+ }
+ if (N <= 0)
+ { y = (B-wave->Babs)+1;
+ for (x = 1; x <= M; x++)
+ { *wave->Stop++ = y;
+#ifdef DEBUG_SCRIPT
+ printf("%*s *D %ld(%ld)\n",depth,"",x+(A-wave->Aabs),(B-wave->Babs)+1);
+#endif
+ }
+ return (M);
+ }
+
+ { int *VF = wave->VF;
+ int *VB = wave->VB;
+ int flow; // fhgh == D !
+ int blow, bhgh;
+ char *a;
+
+ y = 0;
+ if (N < M)
+ while (y < N && B[y] == A[y])
+ y += 1;
+ else
+ { while (y < M && B[y] == A[y])
+ y += 1;
+ if (y >= M && N == M)
+ return (0);
+ }
+
+ flow = 0;
+ VF[0] = y;
+ VF[-1] = -2;
+
+ x = N-M;
+ a = A-x;
+ y = N-1;
+ if (N > M)
+ while (y >= x && B[y] == a[y])
+ y -= 1;
+ else
+ while (y >= 0 && B[y] == a[y])
+ y -= 1;
+
+ blow = bhgh = -x;
+ VB += x;
+ VB[blow] = y;
+ VB[blow-1] = N+1;
+
+ for (D = 1; 1; D += 1)
+ { int k, r;
+ int am, ac, ap;
+
+ // Forward wave
+
+ flow -= 1;
+ am = ac = VF[flow-1] = -2;
+
+ a = A + D;
+ x = M - D;
+ for (k = D; k >= flow; k--)
+ { ap = ac;
+ ac = am+1;
+ am = VF[k-1];
+
+ if (ac < am)
+ if (ap < am)
+ y = am;
+ else
+ y = ap;
+ else
+ if (ap < ac)
+ y = ac;
+ else
+ y = ap;
+
+ if (blow <= k && k <= bhgh)
+ { r = VB[k];
+ if (y > r)
+ { D = (D<<1)-1;
+ if (ap > r)
+ y = ap;
+ else if (ac > r)
+ y = ac;
+ else
+ y = r+1;
+ x = k+y;
+ goto OVERLAP2;
+ }
+ }
+
+ if (N < x)
+ while (y < N && B[y] == a[y])
+ y += 1;
+ else
+ while (y < x && B[y] == a[y])
+ y += 1;
+
+ VF[k] = y;
+ a -= 1;
+ x += 1;
+ }
+
+#ifdef DEBUG_AWAVE
+ print_awave(VF,flow,D);
+#endif
+
+ // Reverse Wave
+
+ bhgh += 1;
+ blow -= 1;
+ am = ac = VB[blow-1] = N+1;
+
+ a = A + bhgh;
+ x = -bhgh;
+ for (k = bhgh; k >= blow; k--)
+ { ap = ac+1;
+ ac = am;
+ am = VB[k-1];
+
+ if (ac > am)
+ if (ap > am)
+ y = am;
+ else
+ y = ap;
+ else
+ if (ap > ac)
+ y = ac;
+ else
+ y = ap;
+
+ if (flow <= k && k <= D)
+ { r = VF[k];
+ if (y <= r)
+ { D = (D << 1);
+ if (ap <= r)
+ y = ap;
+ else if (ac <= r)
+ y = ac;
+ else
+ y = r;
+ x = k+y;
+ goto OVERLAP2;
+ }
+ }
+
+ y -= 1;
+ if (x > 0)
+ while (y >= x && B[y] == a[y])
+ y -= 1;
+ else
+ while (y >= 0 && B[y] == a[y])
+ y -= 1;
+
+ VB[k] = y;
+ a -= 1;
+ x += 1;
+ }
+
+#ifdef DEBUG_AWAVE
+ print_awave(VB,blow,bhgh);
+#endif
+ }
+ }
+
+OVERLAP2:
+
+#ifdef DEBUG_ALIGN
+ printf("%*s (%d,%d) @ %d\n",depth,"",x,y,D);
+ fflush(stdout);
+#endif
+ if (D > 1)
+ {
+#ifdef DEBUG_ALIGN
+ depth += 2;
+#endif
+ dandc_nd(A,x,B,y,wave);
+ dandc_nd(A+x,M-x,B+y,N-y,wave);
+#ifdef DEBUG_ALIGN
+ depth -= 2;
+#endif
+ }
+ else if (D == 1)
+ { if (M > N)
+ { *wave->Stop++ = (B-wave->Babs)+y+1;
+#ifdef DEBUG_SCRIPT
+ printf("%*s D %ld(%ld)\n",depth,"",(A-wave->Aabs)+x,(B-wave->Babs)+y+1);
+#endif
+ }
+ else if (M < N)
+ { *wave->Stop++ = (wave->Aabs-A)-x-1;
+#ifdef DEBUG_SCRIPT
+ printf("%*s I %ld(%ld)\n",depth,"",(B-wave->Babs)+y,(A-wave->Aabs)+x+1);
+#endif
+ }
+#ifdef DEBUG_SCRIPT
+ else
+ printf("%*s %ld S %ld\n",depth,"",(wave->Aabs-A)+x,(B-wave->Babs)+y);
+#endif
+ }
+
+ return (D);
+}
+
+
+static int Compute_Trace_ND_ALL(Alignment *align, Work_Data *ework)
+{ _Work_Data *work = (_Work_Data *) ework;
+ Trace_Waves wave;
+
+ int L, D;
+ int asub, bsub;
+ Path *path;
+ int *trace;
+
+ path = align->path;
+ asub = path->aepos-path->abpos;
+ bsub = path->bepos-path->bbpos;
+
+ if (asub < bsub)
+ L = bsub;
+ else
+ L = asub;
+ L *= sizeof(int);
+ if (L > work->tramax)
+ if (enlarge_trace(work,L))
+ EXIT(1);
+
+ trace = wave.Stop = ((int *) work->trace);
+
+ D = 2*(path->diffs + 4)*sizeof(int);
+ if (D > work->vecmax)
+ if (enlarge_vector(work,D))
+ EXIT(1);
+
+ D = (path->diffs+3)/2;
+ wave.VF = ((int *) work->vector) + (D+1);
+ wave.VB = wave.VF + (2*D+1);
+
+ wave.Aabs = align->aseq;
+ wave.Babs = align->bseq;
+
+ path->diffs = dandc_nd(align->aseq+path->abpos,path->aepos-path->abpos,
+ align->bseq+path->bbpos,path->bepos-path->bbpos,&wave);
+ path->trace = trace;
+ path->tlen = wave.Stop - trace;
+ return (0);
+}
+
+
+/****************************************************************************************\
+* *
+* O(NP) tracing algorithms *
+* *
+\****************************************************************************************/
+
+/* Iterative O(np) algorithm for finding the alignment between two substrings (specified
+ by a Path record). The variation includes handling substitutions and guarantees
+ to find left-most alignments so that low complexity runs are always aligned in
+ the same way.
+*/
+
+#ifdef DEBUG_ALIGN
+
+static int ToA[4] = { 'a', 'c', 'g', 't' };
+
+#endif
+
+static int iter_np(char *A, int M, char *B, int N, Trace_Waves *wave, int mode)
+{ int **PVF = wave->PVF;
+ int **PHF = wave->PHF;
+ int D;
+ int del = M-N;
+
+ { int *F0, *F1, *F2;
+ int *HF;
+ int low, hgh;
+ int posl, posh;
+
+#ifdef DEBUG_ALIGN
+ printf("\n BASE %ld,%ld: %d vs %d\n",A-wave->Aabs,B-wave->Babs,M,N);
+ printf(" A = ");
+ for (D = 0; D < M; D++)
+ printf("%c",ToA[(int) A[D]]);
+ printf("\n");
+ printf(" B = ");
+ for (D = 0; D < N; D++)
+ printf("%c",ToA[(int) B[D]]);
+ printf("\n");
+#endif
+
+ if (del >= 0)
+ { low = 0;
+ hgh = del;
+ }
+ else
+ { low = del;
+ hgh = 0;
+ }
+
+ posl = -INT32_MAX;
+ posh = INT32_MAX;
+ if (wave->Aabs == wave->Babs)
+ { if (B == A)
+ { EPRINTF(EPLACE,"Error: self comparison starts on diagonal 0 (Compute_Trace)\n");
+ EXIT(-1);
+ }
+ else if (B < A)
+ posl = (B-A)+1;
+ else
+ posh = (B-A)-1;
+ }
+
+ F1 = PVF[-2];
+ F0 = PVF[-1];
+
+ for (D = low-1; D <= hgh+1; D++)
+ F1[D] = F0[D] = -2;
+ F0[0] = -1;
+
+ low += 1;
+ hgh -= 1;
+
+ for (D = 0; 1; D += 1)
+ { int k, i, j;
+ int am, ac, ap;
+ char *a;
+
+ F2 = F1;
+ F1 = F0;
+ F0 = PVF[D];
+ HF = PHF[D];
+
+ if ((D & 0x1) == 0)
+ { if (low > posl)
+ low -= 1;
+ if (hgh < posh)
+ hgh += 1;
+ }
+ F0[hgh+1] = F0[low-1] = -2;
+
+#define FS_MOVE(mdir,pdir) \
+ ac = F1[k]+1; \
+ if (ac < am) \
+ if (ap < am) \
+ { HF[k] = mdir; \
+ j = am; \
+ } \
+ else \
+ { HF[k] = pdir; \
+ j = ap; \
+ } \
+ else \
+ if (ap < ac) \
+ { HF[k] = 0; \
+ j = ac; \
+ } \
+ else \
+ { HF[k] = pdir; \
+ j = ap; \
+ } \
+ \
+ if (N < i) \
+ while (j < N && B[j] == a[j]) \
+ j += 1; \
+ else \
+ while (j < i && B[j] == a[j]) \
+ j += 1; \
+ F0[k] = j;
+
+ j = -2;
+ a = A + hgh;
+ i = M - hgh;
+ for (k = hgh; k > del; k--)
+ { ap = j+1;
+ am = F2[k-1];
+ FS_MOVE(-1,4)
+ a -= 1;
+ i += 1;
+ }
+
+ j = -2;
+ a = A + low;
+ i = M - low;
+ for (k = low; k < del; k++)
+ { ap = F2[k+1]+1;
+ am = j;
+ FS_MOVE(2,1)
+ a += 1;
+ i -= 1;
+ }
+
+ ap = F0[del+1]+1;
+ am = j;
+ FS_MOVE(2,4)
+
+#ifdef DEBUG_AWAVE
+ print_awave(F0,low,hgh);
+ print_awave(HF,low,hgh);
+#endif
+
+ if (F0[del] >= N)
+ break;
+ }
+ }
+
+ { int k, h, m, e, c;
+ int ap = (wave->Aabs-A)-1;
+ int bp = (B-wave->Babs)+1;
+
+ PHF[0][0] = 3;
+
+ c = N;
+ k = del;
+ e = PHF[D][k];
+ PHF[D][k] = 3;
+
+ if (mode == UPPERMOST)
+
+ while (e != 3)
+ { h = k+e;
+ if (e > 1)
+ h -= 3;
+ else if (e == 0)
+ D -= 1;
+ else
+ D -= 2;
+
+ if (h < k) // => e = -1 or 2, UPPERMOST
+ { char *a;
+
+ a = A + k;
+ if (k < 0)
+ m = -k;
+ else
+ m = 0;
+ if (PVF[D][h] <= c)
+ c = PVF[D][h]-1;
+ while (c >= m && a[c] == B[c])
+ c -= 1;
+ if (e == -1) // => edge is 2, others are 1, and 0
+ { if (c <= PVF[D+2][k+1])
+ { e = 4;
+ h = k+1;
+ D = D+2;
+ }
+ else if (c == PVF[D+1][k])
+ { e = 0;
+ h = k;
+ D = D+1;
+ }
+ else
+ PVF[D][h] = c+1;
+ }
+ else // => edge is 0, others are 1, and 2 (if k != del), 0 (otherwise)
+ { if (k == del)
+ m = D;
+ else
+ m = D-2;
+ if (c <= PVF[m][k+1])
+ { if (k == del)
+ e = 4;
+ else
+ e = 1;
+ h = k+1;
+ D = m;
+ }
+ else if (c == PVF[D-1][k])
+ { e = 0;
+ h = k;
+ D = D-1;
+ }
+ else
+ PVF[D][h] = c+1;
+ }
+ }
+
+ m = PHF[D][h];
+ PHF[D][h] = e;
+ e = m;
+ k = h;
+ }
+
+ else if (mode == LOWERMOST)
+
+ while (e != 3)
+ { h = k+e;
+ if (e > 1)
+ h -= 3;
+ else if (e == 0)
+ D -= 1;
+ else
+ D -= 2;
+
+ if (h > k) // => e = 1 or 4, LOWERMOST
+ { char *a;
+
+ a = A + k;
+ if (k < 0)
+ m = -k;
+ else
+ m = 0;
+ if (PVF[D][h] < c)
+ c = PVF[D][h];
+ while (c >= m && a[c] == B[c])
+ c -= 1;
+ if (e == 1) // => edge is 2, others are 1, and 0
+ { if (c < PVF[D+2][k-1])
+ { e = 2;
+ h = k-1;
+ D = D+2;
+ }
+ else if (c == PVF[D+1][k])
+ { e = 0;
+ h = k;
+ D = D+1;
+ }
+ else
+ PVF[D][h] = c--;
+ }
+ else // => edge is 0, others are 1, and 2 (if k != del), 0 (otherwise)
+ { if (k == del)
+ m = D;
+ else
+ m = D-2;
+ if (c < PVF[m][k-1])
+ { if (k == del)
+ e = 2;
+ else
+ e = -1;
+ h = k-1;
+ D = m;
+ }
+ else if (c == PVF[D-1][k])
+ { e = 0;
+ h = k;
+ D = D-1;
+ }
+ else
+ PVF[D][h] = c--;
+ }
+ }
+
+ m = PHF[D][h];
+ PHF[D][h] = e;
+ e = m;
+ k = h;
+ }
+
+ else // mode == GREEDIEST
+
+ while (e != 3)
+ { h = k+e;
+ if (e > 1)
+ h -= 3;
+ else if (e == 0)
+ D -= 1;
+ else
+ D -= 2;
+
+ m = PHF[D][h];
+ PHF[D][h] = e;
+ e = m;
+ k = h;
+ }
+
+ k = D = 0;
+ e = PHF[D][k];
+ while (e != 3)
+ { h = k-e;
+ c = PVF[D][k];
+ if (e > 1)
+ h += 3;
+ else if (e == 0)
+ D += 1;
+ else
+ D += 2;
+#ifdef DEBUG_SCRIPT
+ if (h > k)
+ printf(" D %d(%d)\n",(c-k)-(ap-1),c+bp);
+ else if (h < k)
+ printf(" I %d(%d)\n",c+(bp-1),(c+k)-ap);
+ else
+ printf(" %d S %d\n",(c+k)-(ap+1),c+(bp-1));
+#endif
+ if (h > k)
+ *wave->Stop++ = bp+c;
+ else if (h < k)
+ *wave->Stop++ = ap-(c+k);
+ k = h;
+ e = PHF[D][h];
+ }
+ }
+
+ return (D + abs(del));
+}
+
+static int middle_np(char *A, int M, char *B, int N, Trace_Waves *wave, int mode)
+{ int **PVF = wave->PVF;
+ int **PHF = wave->PHF;
+ int D;
+ int del = M-N;
+
+ { int *F0, *F1, *F2;
+ int *HF;
+ int low, hgh;
+ int posl, posh;
+
+#ifdef DEBUG_ALIGN
+ printf("\n%*s BASE %ld,%ld: %d vs %d\n",depth,"",A-wave->Aabs,B-wave->Babs,M,N);
+ printf("%*s A = ",depth,"");
+ for (D = 0; D < M; D++)
+ printf("%c",ToA[(int) A[D]]);
+ printf("\n");
+ printf("%*s B = ",depth,"");
+ for (D = 0; D < N; D++)
+ printf("%c",ToA[(int) B[D]]);
+ printf("\n");
+#endif
+
+ if (del >= 0)
+ { low = 0;
+ hgh = del;
+ }
+ else
+ { low = del;
+ hgh = 0;
+ }
+
+ posl = -INT32_MAX;
+ posh = INT32_MAX;
+ if (wave->Aabs == wave->Babs)
+ { if (B == A)
+ { EPRINTF(EPLACE,"Error: self comparison starts on diagonal 0 (Compute_Trace)\n");
+ EXIT(1);
+ }
+ else if (B < A)
+ posl = (B-A)+1;
+ else
+ posh = (B-A)-1;
+ }
+
+ F1 = PVF[-2];
+ F0 = PVF[-1];
+
+ for (D = low-1; D <= hgh+1; D++)
+ F1[D] = F0[D] = -2;
+ F0[0] = -1;
+
+ low += 1;
+ hgh -= 1;
+
+ for (D = 0; 1; D += 1)
+ { int k, i, j;
+ int am, ac, ap;
+ char *a;
+
+ F2 = F1;
+ F1 = F0;
+ F0 = PVF[D];
+ HF = PHF[D];
+
+ if ((D & 0x1) == 0)
+ { if (low > posl)
+ low -= 1;
+ if (hgh < posh)
+ hgh += 1;
+ }
+ F0[hgh+1] = F0[low-1] = -2;
+
+ j = -2;
+ a = A + hgh;
+ i = M - hgh;
+ for (k = hgh; k > del; k--)
+ { ap = j+1;
+ am = F2[k-1];
+ FS_MOVE(-1,4)
+ a -= 1;
+ i += 1;
+ }
+
+ j = -2;
+ a = A + low;
+ i = M - low;
+ for (k = low; k < del; k++)
+ { ap = F2[k+1]+1;
+ am = j;
+ FS_MOVE(2,1)
+ a += 1;
+ i -= 1;
+ }
+
+ ap = F0[del+1]+1;
+ am = j;
+ FS_MOVE(2,4)
+
+#ifdef DEBUG_AWAVE
+ print_awave(F0,low,hgh);
+ print_awave(HF,low,hgh);
+#endif
+
+ if (F0[del] >= N)
+ break;
+ }
+ }
+
+ { int k, h, m, e, c;
+ int d, f;
+
+ d = D + abs(del);
+ c = N;
+ k = del;
+
+ if (mode == UPPERMOST)
+
+ for (f = d/2; d > f; d--)
+ { e = PHF[D][k];
+ h = k+e;
+ if (e > 1)
+ h -= 3;
+ else if (e == 0)
+ D -= 1;
+ else
+ D -= 2;
+
+ if (h < k) // => e = -1 or 2, UPPERMOST
+ { char *a;
+
+ a = A + k;
+ if (k < 0)
+ m = -k;
+ else
+ m = 0;
+ if (PVF[D][h] <= c)
+ c = PVF[D][h]-1;
+ while (c >= m && a[c] == B[c])
+ c -= 1;
+ if (e == -1) // => edge is 2, others are 1, and 0
+ { if (c <= PVF[D+2][k+1])
+ { e = 4;
+ h = k+1;
+ D = D+2;
+ }
+ else if (c == PVF[D+1][k])
+ { e = 0;
+ h = k;
+ D = D+1;
+ }
+ else
+ PVF[D][h] = c+1;
+ }
+ else // => edge is 0, others are 1, and 2 (if k != del), 0 (otherwise)
+ { if (k == del)
+ m = D;
+ else
+ m = D-2;
+ if (c <= PVF[m][k+1])
+ { if (k == del)
+ e = 4;
+ else
+ e = 1;
+ h = k+1;
+ D = m;
+ }
+ else if (c == PVF[D-1][k])
+ { e = 0;
+ h = k;
+ D = D-1;
+ }
+ else
+ PVF[D][h] = c+1;
+ }
+ }
+
+ k = h;
+ }
+
+ else if (mode == LOWERMOST)
+
+ for (f = d/2; d > f; d--)
+ { e = PHF[D][k];
+ h = k+e;
+ if (e > 1)
+ h -= 3;
+ else if (e == 0)
+ D -= 1;
+ else
+ D -= 2;
+
+ if (h > k) // => e = 1 or 4, LOWERMOST
+ { char *a;
+
+ a = A + k;
+ if (k < 0)
+ m = -k;
+ else
+ m = 0;
+ if (PVF[D][h] < c)
+ c = PVF[D][h];
+ while (c >= m && a[c] == B[c])
+ c -= 1;
+ if (e == 1) // => edge is 2, others are 1, and 0
+ { if (c < PVF[D+2][k-1])
+ { e = 2;
+ h = k-1;
+ D = D+2;
+ }
+ else if (c == PVF[D+1][k])
+ { e = 0;
+ h = k;
+ D = D+1;
+ }
+ else
+ PVF[D][h] = c--;
+ }
+ else // => edge is 0, others are 1, and 2 (if k != del), 0 (otherwise)
+ { if (k == del)
+ m = D;
+ else
+ m = D-2;
+ if (c < PVF[m][k-1])
+ { if (k == del)
+ e = 2;
+ else
+ e = -1;
+ h = k-1;
+ D = m;
+ }
+ else if (c == PVF[D-1][k])
+ { e = 0;
+ h = k;
+ D = D-1;
+ }
+ else
+ PVF[D][h] = c--;
+ }
+ }
+
+ k = h;
+ }
+
+ else // mode == GREEDIEST
+
+ for (f = d/2; d > f; d--)
+ { e = PHF[D][k];
+ h = k+e;
+ if (e > 1)
+ h -= 3;
+ else if (e == 0)
+ D -= 1;
+ else
+ D -= 2;
+ k = h;
+ }
+
+ wave->midb = (B-wave->Babs) + PVF[D][k];
+ wave->mida = (A-wave->Aabs) + k + PVF[D][k];
+ }
+
+ return (0);
+}
+
+
+/****************************************************************************************\
+* *
+* COMPUTE_TRACE FLAVORS *
+* *
+\****************************************************************************************/
+
+int Compute_Trace_ALL(Alignment *align, Work_Data *ework)
+{ _Work_Data *work = (_Work_Data *) ework;
+ Trace_Waves wave;
+
+ Path *path;
+ char *aseq, *bseq;
+ int M, N, D;
+
+ path = align->path;
+ aseq = align->aseq;
+ bseq = align->bseq;
+
+ M = path->aepos-path->abpos;
+ N = path->bepos-path->bbpos;
+
+ { int64 s;
+ int d;
+ int dmax;
+ int **PVF, **PHF;
+
+ if (M < N)
+ s = N;
+ else
+ s = M;
+ s *= sizeof(int);
+ if (s > work->tramax)
+ if (enlarge_trace(work,s))
+ EXIT(1);
+
+ dmax = path->diffs - abs(M-N);
+
+ s = (dmax+3)*2*((M+N+3)*sizeof(int) + sizeof(int *));
+
+ if (s > 256000000)
+ return (Compute_Trace_ND_ALL(align,ework));
+
+ if (s > work->vecmax)
+ if (enlarge_vector(work,s))
+ EXIT(1);
+
+ wave.PVF = PVF = ((int **) (work->vector)) + 2;
+ wave.PHF = PHF = PVF + (dmax+3);
+
+ s = M+N+3;
+ PVF[-2] = ((int *) (PHF + (dmax+1))) + (N+1);
+ for (d = -1; d <= dmax; d++)
+ PVF[d] = PVF[d-1] + s;
+ PHF[-2] = PVF[dmax] + s;
+ for (d = -1; d <= dmax; d++)
+ PHF[d] = PHF[d-1] + s;
+ }
+
+ wave.Stop = ((int *) work->trace);
+ wave.Aabs = aseq;
+ wave.Babs = bseq;
+
+ D = iter_np(aseq+path->abpos,M,bseq+path->bbpos,N,&wave,GREEDIEST);
+ if (D < 0)
+ EXIT(1);
+ path->diffs = D;
+ path->trace = work->trace;
+ path->tlen = wave.Stop - ((int *) path->trace);
+
+ return (0);
+}
+
+int Compute_Trace_PTS(Alignment *align, Work_Data *ework, int trace_spacing, int mode)
+{ _Work_Data *work = (_Work_Data *) ework;
+ Trace_Waves wave;
+
+ Path *path;
+ char *aseq, *bseq;
+ uint16 *points;
+ int tlen;
+ int ab, bb;
+ int ae, be;
+ int diffs;
+
+ path = align->path;
+ aseq = align->aseq;
+ bseq = align->bseq;
+ tlen = path->tlen;
+ points = (uint16 *) path->trace;
+
+ { int64 s;
+ int d;
+ int M, N;
+ int dmax, nmax;
+ int **PVF, **PHF;
+
+ M = path->aepos-path->abpos;
+ N = path->bepos-path->bbpos;
+ if (M < N)
+ s = N*sizeof(int);
+ else
+ s = M*sizeof(int);
+ if (s > work->tramax)
+ if (enlarge_trace(work,s))
+ EXIT(1);
+
+ nmax = 0;
+ dmax = 0;
+ for (d = 1; d < tlen; d += 2)
+ { if (points[d-1] > dmax)
+ dmax = points[d-1];
+ if (points[d] > nmax)
+ nmax = points[d];
+ }
+ if (tlen <= 1)
+ nmax = N;
+
+ s = (dmax+3)*2*((trace_spacing+nmax+3)*sizeof(int) + sizeof(int *));
+
+ if (s > work->vecmax)
+ if (enlarge_vector(work,s))
+ EXIT(1);
+
+ wave.PVF = PVF = ((int **) (work->vector)) + 2;
+ wave.PHF = PHF = PVF + (dmax+3);
+
+ s = trace_spacing+nmax+3;
+ PVF[-2] = ((int *) (PHF + (dmax+1))) + (nmax+1);
+ for (d = -1; d <= dmax; d++)
+ PVF[d] = PVF[d-1] + s;
+ PHF[-2] = PVF[dmax] + s;
+ for (d = -1; d <= dmax; d++)
+ PHF[d] = PHF[d-1] + s;
+ }
+
+ wave.Stop = (int *) (work->trace);
+ wave.Aabs = aseq;
+ wave.Babs = bseq;
+
+ { int i, d;
+
+ diffs = 0;
+ ab = path->abpos;
+ ae = (ab/trace_spacing)*trace_spacing;
+ bb = path->bbpos;
+ tlen -= 2;
+ for (i = 1; i < tlen; i += 2)
+ { ae = ae + trace_spacing;
+ be = bb + points[i];
+ d = iter_np(aseq+ab,ae-ab,bseq+bb,be-bb,&wave,mode);
+ if (d < 0)
+ EXIT(1);
+ diffs += d;
+ ab = ae;
+ bb = be;
+ }
+ ae = path->aepos;
+ be = path->bepos;
+ d = iter_np(aseq+ab,ae-ab,bseq+bb,be-bb,&wave,mode);
+ if (d < 0)
+ EXIT(1);
+ diffs += d;
+ }
+
+ path->trace = work->trace;
+ path->tlen = wave.Stop - ((int *) path->trace);
+ path->diffs = diffs;
+
+ return (0);
+}
+
+int Compute_Trace_MID(Alignment *align, Work_Data *ework, int trace_spacing, int mode)
+{ _Work_Data *work = (_Work_Data *) ework;
+ Trace_Waves wave;
+
+ Path *path;
+ char *aseq, *bseq;
+ uint16 *points;
+ int tlen;
+ int ab, bb;
+ int ae, be;
+ int diffs;
+
+ path = align->path;
+ aseq = align->aseq;
+ bseq = align->bseq;
+ tlen = path->tlen;
+ points = (uint16 *) path->trace;
+
+ { int64 s;
+ int d;
+ int M, N;
+ int dmax, nmax;
+ int **PVF, **PHF;
+
+ M = path->aepos-path->abpos;
+ N = path->bepos-path->bbpos;
+ if (M < N)
+ s = N*sizeof(int);
+ else
+ s = M*sizeof(int);
+ if (s > work->tramax)
+ if (enlarge_trace(work,s))
+ EXIT(1);
+
+ nmax = 0;
+ dmax = 0;
+ for (d = 1; d < tlen; d += 2)
+ { if (points[d-1] > dmax)
+ dmax = points[d-1];
+ if (points[d] > nmax)
+ nmax = points[d];
+ }
+ if (tlen <= 1)
+ nmax = N;
+
+ s = (dmax+3)*4*((trace_spacing+nmax+3)*sizeof(int) + sizeof(int *));
+
+ if (s > work->vecmax)
+ if (enlarge_vector(work,s))
+ EXIT(1);
+
+ wave.PVF = PVF = ((int **) (work->vector)) + 2;
+ wave.PHF = PHF = PVF + (dmax+3);
+
+ s = trace_spacing+nmax+3;
+ PVF[-2] = ((int *) (PHF + (dmax+1))) + (nmax+1);
+ for (d = -1; d <= dmax; d++)
+ PVF[d] = PVF[d-1] + s;
+ PHF[-2] = PVF[dmax] + s;
+ for (d = -1; d <= dmax; d++)
+ PHF[d] = PHF[d-1] + s;
+ }
+
+ wave.Stop = ((int *) work->trace);
+ wave.Aabs = aseq;
+ wave.Babs = bseq;
+
+ { int i, d;
+ int as, bs;
+ int af, bf;
+
+ diffs = 0;
+ ab = as = af = path->abpos;
+ ae = (ab/trace_spacing)*trace_spacing;
+ bb = bs = bf = path->bbpos;
+ tlen -= 2;
+ for (i = 1; i < tlen; i += 2)
+ { ae = ae + trace_spacing;
+ be = bb + points[i];
+ if (middle_np(aseq+ab,ae-ab,bseq+bb,be-bb,&wave,mode))
+ EXIT(1);
+ af = wave.mida;
+ bf = wave.midb;
+ d = iter_np(aseq+as,af-as,bseq+bs,bf-bs,&wave,mode);
+ if (d < 0)
+ EXIT(1);
+ diffs += d;
+ ab = ae;
+ bb = be;
+ as = af;
+ bs = bf;
+ }
+
+ ae = path->aepos;
+ be = path->bepos;
+
+ if (middle_np(aseq+ab,ae-ab,bseq+bb,be-bb,&wave,mode))
+ EXIT(1);
+ af = wave.mida;
+ bf = wave.midb;
+ d = iter_np(aseq+as,af-as,bseq+bs,bf-bs,&wave,mode);
+ if (d < 0)
+ EXIT(1);
+ diffs += d;
+ as = af;
+ bs = bf;
+
+ d += iter_np(aseq+af,ae-as,bseq+bf,be-bs,&wave,mode);
+ if (d < 0)
+ EXIT(1);
+ diffs += d;
+ }
+
+ path->trace = work->trace;
+ path->tlen = wave.Stop - ((int *) path->trace);
+ path->diffs = diffs;
+
+ return (0);
+}
+
+int Compute_Trace_IRR(Alignment *align, Work_Data *ework, int mode)
+{ _Work_Data *work = (_Work_Data *) ework;
+ Trace_Waves wave;
+
+ Path *path;
+ char *aseq, *bseq;
+ uint16 *points;
+ int tlen;
+ int ab, bb;
+ int ae, be;
+ int diffs;
+
+ path = align->path;
+ aseq = align->aseq;
+ bseq = align->bseq;
+ tlen = path->tlen;
+ points = (uint16 *) path->trace;
+
+ { int64 s;
+ int d;
+ int M, N;
+ int mmax, nmax, dmax;
+ int **PVF, **PHF;
+
+ M = path->aepos-path->abpos;
+ N = path->bepos-path->bbpos;
+ if (M < N)
+ s = N*sizeof(int);
+ else
+ s = M*sizeof(int);
+ if (s > work->tramax)
+ if (enlarge_trace(work,s))
+ EXIT(1);
+
+ nmax = mmax = 0;
+ for (d = 0; d < tlen; d += 2)
+ { if (points[d] > mmax)
+ mmax = points[d];
+ if (points[d+1] > nmax)
+ nmax = points[d+1];
+ }
+ if (tlen <= 1)
+ { mmax = M;
+ nmax = N;
+ }
+ if (mmax > nmax)
+ dmax = nmax;
+ else
+ dmax = mmax;
+
+ s = (dmax+3)*2*((mmax+nmax+3)*sizeof(int) + sizeof(int *));
+
+ if (s > work->vecmax)
+ if (enlarge_vector(work,s))
+ EXIT(1);
+
+ wave.PVF = PVF = ((int **) (work->vector)) + 2;
+ wave.PHF = PHF = PVF + (dmax+3);
+
+ s = mmax+nmax+3;
+ PVF[-2] = ((int *) (PHF + (dmax+1))) + (nmax+1);
+ for (d = -1; d <= dmax; d++)
+ PVF[d] = PVF[d-1] + s;
+ PHF[-2] = PVF[dmax] + s;
+ for (d = -1; d <= dmax; d++)
+ PHF[d] = PHF[d-1] + s;
+ }
+
+ wave.Stop = (int *) (work->trace);
+ wave.Aabs = aseq;
+ wave.Babs = bseq;
+
+ { int i, d;
+
+ diffs = 0;
+ ab = path->abpos;
+ bb = path->bbpos;
+ for (i = 0; i < tlen; i += 2)
+ { ae = ab + points[i];
+ be = bb + points[i+1];
+ d = iter_np(aseq+ab,ae-ab,bseq+bb,be-bb,&wave,mode);
+ if (d < 0)
+ EXIT(1);
+ diffs += d;
+ ab = ae;
+ bb = be;
+ }
+ }
+
+ path->trace = work->trace;
+ path->tlen = wave.Stop - ((int *) path->trace);
+ path->diffs = diffs;
+
+ return (0);
+}
diff --git a/src/lib/falcon.c b/src/lib/falcon.c
new file mode 100755
index 0000000..0956577
--- /dev/null
+++ b/src/lib/falcon.c
@@ -0,0 +1,804 @@
+/*
+ * =====================================================================================
+ *
+ * Filename: fastcon.c
+ *
+ * Description:
+ *
+ * Version: 0.1
+ * Created: 07/20/2013 17:00:00
+ * Revision: none
+ * Compiler: gcc
+ *
+ * Author: Jason Chin,
+ * Company:
+ *
+ * =====================================================================================
+
+ #################################################################################$$
+ # Copyright (c) 2011-2014, Pacific Biosciences of California, Inc.
+ #
+ # All rights reserved.
+ #
+ # Redistribution and use in source and binary forms, with or without
+ # modification, are permitted (subject to the limitations in the
+ # disclaimer below) provided that the following conditions are met:
+ #
+ # * Redistributions of source code must retain the above copyright
+ # notice, this list of conditions and the following disclaimer.
+ #
+ # * Redistributions in binary form must reproduce the above
+ # copyright notice, this list of conditions and the following
+ # disclaimer in the documentation and/or other materials provided
+ # with the distribution.
+ #
+ # * Neither the name of Pacific Biosciences nor the names of its
+ # contributors may be used to endorse or promote products derived
+ # from this software without specific prior written permission.
+ #
+ # NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
+ # GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY PACIFIC
+ # BIOSCIENCES AND ITS CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
+ # WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ # DISCLAIMED. IN NO EVENT SHALL PACIFIC BIOSCIENCES OR ITS
+ # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ # USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ # OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ # SUCH DAMAGE.
+ #################################################################################$$
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <limits.h>
+#include <string.h>
+#include <assert.h>
+#include <stdint.h>
+#include "common.h"
+
+
+
+
+
+
+align_tags_t * get_align_tags( char * aln_q_seq,
+ char * aln_t_seq,
+ seq_coor_t aln_seq_len,
+ aln_range * range,
+ unsigned q_id,
+ seq_coor_t t_offset) {
+ char p_q_base;
+ align_tags_t * tags;
+ seq_coor_t i, j, jj, k, p_j, p_jj;
+
+ tags = calloc( 1, sizeof(align_tags_t) );
+ tags->len = aln_seq_len;
+ tags->align_tags = calloc( aln_seq_len + 1, sizeof(align_tag_t) );
+ i = range->s1 - 1;
+ j = range->s2 - 1;
+ jj = 0;
+ p_j = -1;
+ p_jj = 0;
+ p_q_base = '.';
+
+ for (k = 0; k < aln_seq_len; k++) {
+ if (aln_q_seq[k] != '-') {
+ i ++;
+ jj ++;
+ }
+ if (aln_t_seq[k] != '-') {
+ j ++;
+ jj = 0;
+ }
+ //printf("t %d %d %d %c %c\n", q_id, j, jj, aln_t_seq[k], aln_q_seq[k]);
+
+
+ if ( j + t_offset >= 0 && jj < UINT8_MAX && p_jj < UINT8_MAX) {
+ (tags->align_tags[k]).t_pos = j + t_offset;
+ (tags->align_tags[k]).delta = jj;
+ (tags->align_tags[k]).p_t_pos = p_j + t_offset;
+ (tags->align_tags[k]).p_delta = p_jj;
+ (tags->align_tags[k]).p_q_base = p_q_base;
+ (tags->align_tags[k]).q_base = aln_q_seq[k];
+ (tags->align_tags[k]).q_id = q_id;
+
+ p_j = j;
+ p_jj = jj;
+ p_q_base = aln_q_seq[k];
+ }
+ }
+ // sentinal at the end
+ //k = aln_seq_len;
+ tags->len = k;
+ (tags->align_tags[k]).t_pos = UINT_MAX;
+ (tags->align_tags[k]).delta = UINT8_MAX;
+ (tags->align_tags[k]).q_base = '.';
+ (tags->align_tags[k]).q_id = UINT_MAX;
+ return tags;
+}
+
+void free_align_tags( align_tags_t * tags) {
+ free( tags->align_tags );
+ free( tags );
+}
+
+
+void allocate_aln_col( align_tag_col_t * col) {
+ col->p_t_pos = ( seq_coor_t * ) calloc(col->size, sizeof( seq_coor_t ));
+ col->p_delta = ( uint8_t * ) calloc(col->size, sizeof( uint8_t ));
+ col->p_q_base = ( char * )calloc(col->size, sizeof( char ));
+ col->link_count = ( uint16_t * ) calloc(col->size, sizeof( uint16_t ));
+}
+
+void realloc_aln_col( align_tag_col_t * col ) {
+ col->p_t_pos = (seq_coor_t *) realloc( col->p_t_pos, (col->size) * sizeof( seq_coor_t ));
+ col->p_delta = ( uint8_t *) realloc( col->p_delta, (col->size) * sizeof( uint8_t ));
+ col->p_q_base = (char *) realloc( col->p_q_base, (col->size) * sizeof( char ));
+ col->link_count = ( uint16_t *) realloc( col->link_count, (col->size) * sizeof( uint16_t ));
+}
+
+void free_aln_col( align_tag_col_t * col) {
+ free(col->p_t_pos);
+ free(col->p_delta);
+ free(col->p_q_base);
+ free(col->link_count);
+}
+
+
+void allocate_delta_group( msa_delta_group_t * g) {
+ int i,j;
+ g->max_delta = 0;
+ g->delta = (msa_base_group_t *) calloc( g->size, sizeof(msa_base_group_t));
+ for (i = 0; i< g->size; i++) {
+ g->delta[i].base = ( align_tag_col_t * ) calloc( 5, sizeof(align_tag_col_t ) );
+ for (j = 0; j < 5; j++ ) {
+ g->delta[i].base[j].size = 8;
+ allocate_aln_col(&(g->delta[i].base[j]));
+ }
+ }
+}
+
+void realloc_delta_group( msa_delta_group_t * g, uint16_t new_size ) {
+ int i, j, bs, es;
+ bs = g->size;
+ es = new_size;
+ g->delta = (msa_base_group_t *) realloc(g->delta, new_size * sizeof(msa_base_group_t));
+ for (i=bs; i < es; i++) {
+ g->delta[i].base = ( align_tag_col_t *) calloc( 5, sizeof(align_tag_col_t ) );
+ for (j = 0; j < 5; j++ ) {
+ g->delta[i].base[j].size = 8;
+ allocate_aln_col(&(g->delta[i].base[j]));
+ }
+ }
+ g->size = new_size;
+}
+
+void free_delta_group( msa_delta_group_t * g) {
+ //manything to do here
+ int i, j;
+ for (i = 0; i < g->size; i++) {
+ for (j = 0; j < 5; j++) {
+ free_aln_col( &(g->delta[i].base[j]) );
+ }
+ free(g->delta[i].base);
+ }
+ free(g->delta);
+}
+
+void update_col( align_tag_col_t * col, seq_coor_t p_t_pos, uint8_t p_delta, char p_q_base) {
+ int updated = 0;
+ int kk;
+ col->count += 1;
+ for (kk = 0; kk < col->n_link; kk++) {
+ if ( p_t_pos == col->p_t_pos[kk] &&
+ p_delta == col->p_delta[kk] &&
+ p_q_base == col->p_q_base[kk] ) {
+ col->link_count[kk] ++;
+ updated = 1;
+ break;
+ }
+ }
+ if (updated == 0) {
+ if (col->n_link + 1 > col->size) {
+ if (col->size < (UINT16_MAX > 1)-1) {
+ col->size *= 2;
+ } else {
+ col->size += 256;
+ }
+ assert( col->size < UINT16_MAX-1 );
+ realloc_aln_col(col);
+ }
+ kk = col->n_link;
+
+ col->p_t_pos[kk] = p_t_pos;
+ col->p_delta[kk] = p_delta;
+ col->p_q_base[kk] = p_q_base;
+ col->link_count[kk] = 1;
+ col->n_link++;
+ }
+}
+
+
+msa_pos_t * get_msa_working_sapce(unsigned int max_t_len) {
+ msa_pos_t * msa_array;
+ unsigned int i;
+ msa_array = calloc(max_t_len, sizeof(msa_pos_t *));
+ for (i = 0; i < max_t_len; i++) {
+ msa_array[i] = calloc(1, sizeof(msa_delta_group_t));
+ msa_array[i]->size = 8;
+ allocate_delta_group(msa_array[i]);
+ }
+ return msa_array;
+}
+
+void clean_msa_working_space( msa_pos_t * msa_array, unsigned int max_t_len) {
+ unsigned int i,j,k;
+ align_tag_col_t * col;
+ for (i = 0; i < max_t_len; i++) {
+ for (j =0; j < msa_array[i]->max_delta + 1; j++) {
+ for (k = 0; k < 5; k++ ) {
+ col = msa_array[i]->delta[j].base + k;
+ /*
+ for (c =0; c < col->size; c++) {
+ col->p_t_pos[c] = 0;
+ col->p_delta[c] = 0;
+ col->p_q_base[c] = 0;
+ col->link_count[c] =0;
+ }
+ */
+ col->n_link = 0;
+ col->count = 0;
+ col->best_p_t_pos = 0;
+ col->best_p_delta = 0;
+ col->best_p_q_base = 0;
+ col->score = 0;
+ }
+ }
+ msa_array[i]->max_delta = 0;
+ }
+}
+
+#define STATIC_ALLOCATE
+//#undef STATIC_ALLOCATE
+
+consensus_data * get_cns_from_align_tags( align_tags_t ** tag_seqs,
+ unsigned n_tag_seqs,
+ unsigned t_len,
+ unsigned min_cov ) {
+
+ seq_coor_t i, j;
+ seq_coor_t t_pos = 0;
+ unsigned int * coverage;
+ unsigned int * local_nbase;
+
+ consensus_data * consensus;
+ //char * consensus;
+ align_tag_t * c_tag;
+ static msa_pos_t * msa_array = NULL;
+
+ coverage = calloc( t_len, sizeof(unsigned int) );
+ local_nbase = calloc( t_len, sizeof(unsigned int) );
+
+#ifndef STATIC_ALLOCATE
+
+ msa_array = calloc(t_len, sizeof(msa_pos_t *));
+
+ for (i = 0; i < t_len; i++) {
+ msa_array[i] = calloc(1, sizeof(msa_delta_group_t));
+ msa_array[i]->size = 8;
+ allocate_delta_group(msa_array[i]);
+ }
+
+#endif
+
+#ifdef STATIC_ALLOCATE
+
+ if ( msa_array == NULL) {
+ msa_array = get_msa_working_sapce( 100000 );
+ }
+
+ assert(t_len < 100000);
+
+#endif
+
+
+ // loop through every alignment
+ //printf("XX %d\n", n_tag_seqs);
+ for (i = 0; i < n_tag_seqs; i++) {
+
+ // for each alignment position, insert the alignment tag to msa_array
+ for (j = 0; j < tag_seqs[i]->len; j++) {
+ c_tag = tag_seqs[i]->align_tags + j;
+ unsigned int delta;
+ delta = c_tag->delta;
+ if (delta == 0) {
+ t_pos = c_tag->t_pos;
+ coverage[ t_pos ] ++;
+ }
+ // Assume t_pos was set on earlier iteration.
+ // (Otherwise, use its initial value, which might be an error. ~cd)
+ if (delta > msa_array[t_pos]->max_delta) {
+ msa_array[t_pos]->max_delta = delta;
+ if (msa_array[t_pos]->max_delta + 4 > msa_array[t_pos]->size ) {
+ realloc_delta_group(msa_array[t_pos], msa_array[t_pos]->max_delta + 8);
+ }
+ }
+
+ unsigned int base = -1;
+ switch (c_tag->q_base) {
+ case 'A': base = 0; break;
+ case 'C': base = 1; break;
+ case 'G': base = 2; break;
+ case 'T': base = 3; break;
+ case '-': base = 4; break;
+ }
+ // Note: On bad input, base may be -1.
+ update_col( &(msa_array[t_pos]->delta[delta].base[base]), c_tag->p_t_pos, c_tag->p_delta, c_tag->p_q_base);
+ local_nbase[ t_pos ] ++;
+ }
+ }
+
+ // propogate score throught the alignment links, setup backtracking information
+ align_tag_col_t * g_best_aln_col = 0;
+ unsigned int g_best_ck = 0;
+ seq_coor_t g_best_t_pos = 0;
+ {
+ int kk;
+ int ck;
+ // char base;
+ int best_i;
+ int best_j;
+ int best_b;
+ int best_ck = -1;
+ double score;
+ double best_score;
+ double g_best_score;
+ // char best_mark;
+
+ align_tag_col_t * aln_col;
+
+ g_best_score = -1;
+
+ for (i = 0; i < t_len; i++) { //loop through every template base
+ //printf("max delta: %d %d\n", i, msa_array[i]->max_delta);
+ for (j = 0; j <= msa_array[i]->max_delta; j++) { // loop through every delta position
+ for (kk = 0; kk < 5; kk++) { // loop through diff bases of the same delta posiiton
+ /*
+ switch (kk) {
+ case 0: base = 'A'; break;
+ case 1: base = 'C'; break;
+ case 2: base = 'G'; break;
+ case 3: base = 'T'; break;
+ case 4: base = '-'; break;
+ }
+ */
+ aln_col = msa_array[i]->delta[j].base + kk;
+ if (aln_col->count >= 0) {
+ best_score = -1;
+ best_i = -1;
+ best_j = -1;
+ best_b = -1;
+
+ for (ck = 0; ck < aln_col->n_link; ck++) { // loop through differnt link to previous column
+ int pi;
+ int pj;
+ int pkk;
+ pi = aln_col->p_t_pos[ck];
+ pj = aln_col->p_delta[ck];
+ switch (aln_col->p_q_base[ck]) {
+ case 'A': pkk = 0; break;
+ case 'C': pkk = 1; break;
+ case 'G': pkk = 2; break;
+ case 'T': pkk = 3; break;
+ case '-': pkk = 4; break;
+ default: pkk = 4;
+ }
+
+ if (aln_col->p_t_pos[ck] == -1) {
+ score = (double) aln_col->link_count[ck] - (double) coverage[i] * 0.5;
+ } else {
+ score = msa_array[pi]->delta[pj].base[pkk].score +
+ (double) aln_col->link_count[ck] - (double) coverage[i] * 0.5;
+ }
+ // best_mark = ' ';
+ if (score > best_score) {
+ best_score = score;
+ aln_col->best_p_t_pos = best_i = pi;
+ aln_col->best_p_delta = best_j = pj;
+ aln_col->best_p_q_base = best_b = pkk;
+ best_ck = ck;
+ // best_mark = '*';
+ }
+ /*
+ printf("X %d %d %d %c %d %d %d %c %d %lf %c\n", coverage[i], i, j, base, aln_col->count,
+ aln_col->p_t_pos[ck],
+ aln_col->p_delta[ck],
+ aln_col->p_q_base[ck],
+ aln_col->link_count[ck],
+ score, best_mark);
+ */
+ }
+ aln_col->score = best_score;
+ if (best_score > g_best_score) {
+ g_best_score = best_score;
+ g_best_aln_col = aln_col;
+ g_best_ck = best_ck;
+ g_best_t_pos = i;
+ //printf("GB %d %d %d %d\n", i, j, ck, g_best_aln_col);
+ }
+ }
+ }
+ }
+ }
+ assert(g_best_score != -1);
+ }
+
+ // reconstruct the sequences
+ unsigned int index;
+ char bb = '$';
+ int ck;
+ char * cns_str;
+ int * eqv;
+ double score0;
+
+ consensus = calloc( 1, sizeof(consensus_data) );
+ consensus->sequence = calloc( t_len * 2 + 1, sizeof(char) );
+ consensus->eqv = calloc( t_len * 2 + 1, sizeof(unsigned int) );
+ cns_str = consensus->sequence;
+ eqv = consensus->eqv;
+
+ index = 0;
+ ck = g_best_ck;
+ i = g_best_t_pos;
+
+ while (1) {
+ if (coverage[i] > min_cov) {
+ switch (ck) {
+ case 0: bb = 'A'; break;
+ case 1: bb = 'C'; break;
+ case 2: bb = 'G'; break;
+ case 3: bb = 'T'; break;
+ case 4: bb = '-'; break;
+ }
+ } else {
+ switch (ck) {
+ case 0: bb = 'a'; break;
+ case 1: bb = 'c'; break;
+ case 2: bb = 'g'; break;
+ case 3: bb = 't'; break;
+ case 4: bb = '-'; break;
+ }
+ }
+ // Note: On bad input, bb will keep previous value, possibly '$'.
+
+ score0 = g_best_aln_col->score;
+ i = g_best_aln_col->best_p_t_pos;
+ if (i == -1 || index >= t_len * 2) break;
+ j = g_best_aln_col->best_p_delta;
+ ck = g_best_aln_col->best_p_q_base;
+ g_best_aln_col = msa_array[i]->delta[j].base + ck;
+
+ if (bb != '-') {
+ cns_str[index] = bb;
+ eqv[index] = (int) score0 - (int) g_best_aln_col->score;
+ //printf("C %d %d %c %lf %d %d\n", i, index, bb, g_best_aln_col->score, coverage[i], eqv[index] );
+ index ++;
+ }
+ }
+
+ // reverse the sequence
+ for (i = 0; i < index/2; i++) {
+ cns_str[i] = cns_str[i] ^ cns_str[index-i-1];
+ cns_str[index-i-1] = cns_str[i] ^ cns_str[index-i-1];
+ cns_str[i] = cns_str[i] ^ cns_str[index-i-1];
+ eqv[i] = eqv[i] ^ eqv[index-i-1];
+ eqv[index-i-1] = eqv[i] ^ eqv[index-i-1];
+ eqv[i] = eqv[i] ^ eqv[index-i-1];
+ }
+
+ cns_str[index] = 0;
+ //printf("%s\n", cns_str);
+#ifndef STATIC_ALLOCATE
+ for (i = 0; i < t_len; i++) {
+ free_delta_group(msa_array[i]);
+ free(msa_array[i]);
+ }
+
+ free(msa_array);
+#endif
+
+#ifdef STATIC_ALLOCATE
+ clean_msa_working_space(msa_array, t_len+1);
+#endif
+
+ free(coverage);
+ free(local_nbase);
+ return consensus;
+}
+
+//const unsigned int K = 8;
+
+consensus_data * generate_consensus( char ** input_seq,
+ unsigned int n_seq,
+ unsigned min_cov,
+ unsigned K,
+ double min_idt) {
+ unsigned int j;
+ unsigned int seq_count;
+ unsigned int aligned_seq_count;
+ kmer_lookup * lk_ptr;
+ seq_array sa_ptr;
+ seq_addr_array sda_ptr;
+ kmer_match * kmer_match_ptr;
+ aln_range * arange;
+ alignment * aln;
+ align_tags_t ** tags_list;
+ //char * consensus;
+ consensus_data * consensus;
+ double max_diff;
+ max_diff = 1.0 - min_idt;
+
+ seq_count = n_seq;
+ //printf("XX n_seq %d\n", n_seq);
+ //for (j=0; j < seq_count; j++) {
+ // printf("seq_len: %u %u\n", j, strlen(input_seq[j]));
+ //};
+ fflush(stdout);
+
+ tags_list = calloc( seq_count, sizeof(align_tags_t *) );
+ lk_ptr = allocate_kmer_lookup( 1 << (K * 2) );
+ sa_ptr = allocate_seq( (seq_coor_t) strlen( input_seq[0]) );
+ sda_ptr = allocate_seq_addr( (seq_coor_t) strlen( input_seq[0]) );
+ add_sequence( 0, K, input_seq[0], strlen(input_seq[0]), sda_ptr, sa_ptr, lk_ptr);
+ //mask_k_mer(1 << (K * 2), lk_ptr, 16);
+
+ aligned_seq_count = 0;
+ for (j=1; j < seq_count; j++) {
+
+ //printf("seq_len: %ld %u\n", j, strlen(input_seq[j]));
+
+ kmer_match_ptr = find_kmer_pos_for_seq(input_seq[j], strlen(input_seq[j]), K, sda_ptr, lk_ptr);
+#define INDEL_ALLOWENCE_0 6
+
+ arange = find_best_aln_range(kmer_match_ptr, K, K * INDEL_ALLOWENCE_0, 5); // narrow band to avoid aligning through big indels
+
+ //printf("1:%ld %ld %ld %ld\n", arange_->s1, arange_->e1, arange_->s2, arange_->e2);
+
+ //arange = find_best_aln_range2(kmer_match_ptr, K, K * INDEL_ALLOWENCE_0, 5); // narrow band to avoid aligning through big indels
+
+ //printf("2:%ld %ld %ld %ld\n\n", arange->s1, arange->e1, arange->s2, arange->e2);
+
+#define INDEL_ALLOWENCE_1 0.10
+ if (arange->e1 - arange->s1 < 100 || arange->e2 - arange->s2 < 100 ||
+ abs( (arange->e1 - arange->s1 ) - (arange->e2 - arange->s2) ) >
+ (int) (0.5 * INDEL_ALLOWENCE_1 * (arange->e1 - arange->s1 + arange->e2 - arange->s2))) {
+ free_kmer_match( kmer_match_ptr);
+ free_aln_range(arange);
+ continue;
+ }
+ //printf("%ld %s\n", strlen(input_seq[j]), input_seq[j]);
+ //printf("%ld %s\n\n", strlen(input_seq[0]), input_seq[0]);
+
+
+#define INDEL_ALLOWENCE_2 150
+
+ aln = _align(input_seq[j]+arange->s1, arange->e1 - arange->s1 ,
+ input_seq[0]+arange->s2, arange->e2 - arange->s2 ,
+ INDEL_ALLOWENCE_2, 1);
+ if (aln->aln_str_size > 500 && ((double) aln->dist / (double) aln->aln_str_size) < max_diff) {
+ tags_list[aligned_seq_count] = get_align_tags( aln->q_aln_str,
+ aln->t_aln_str,
+ aln->aln_str_size,
+ arange, j,
+ 0);
+ aligned_seq_count ++;
+ }
+ /***
+ for (k = 0; k < tags_list[j]->len; k++) {
+ printf("%ld %d %c\n", tags_list[j]->align_tags[k].t_pos,
+ tags_list[j]->align_tags[k].delta,
+ tags_list[j]->align_tags[k].q_base);
+ }
+ ***/
+ free_aln_range(arange);
+ free_alignment(aln);
+ free_kmer_match( kmer_match_ptr);
+ }
+
+ if (aligned_seq_count > 0) {
+ consensus = get_cns_from_align_tags( tags_list, aligned_seq_count, strlen(input_seq[0]), min_cov );
+ } else {
+ // allocate an empty consensus sequence
+ consensus = calloc( 1, sizeof(consensus_data) );
+ consensus->sequence = calloc( 1, sizeof(char) );
+ consensus->eqv = calloc( 1, sizeof(unsigned int) );
+ }
+ //free(consensus);
+ free_seq_addr_array(sda_ptr);
+ free_seq_array(sa_ptr);
+ free_kmer_lookup(lk_ptr);
+ for (j=0; j < aligned_seq_count; j++) {
+ free_align_tags(tags_list[j]);
+ }
+ free(tags_list);
+ return consensus;
+}
+
+consensus_data * generate_utg_consensus( char ** input_seq,
+ seq_coor_t *offset,
+ unsigned int n_seq,
+ unsigned min_cov,
+ unsigned K,
+ double min_idt) {
+
+ unsigned int j;
+ unsigned int seq_count;
+ unsigned int aligned_seq_count;
+ aln_range * arange;
+ alignment * aln;
+ align_tags_t ** tags_list;
+ //char * consensus;
+ consensus_data * consensus;
+ double max_diff;
+ seq_coor_t utg_len;
+ seq_coor_t r_len;
+ max_diff = 1.0 - min_idt;
+
+
+ seq_count = n_seq;
+ /***
+ for (j=0; j < seq_count; j++) {
+ printf("seq_len: %u %u\n", j, strlen(input_seq[j]));
+ };
+ fflush(stdout);
+ ***/
+ tags_list = calloc( seq_count+1, sizeof(align_tags_t *) );
+ utg_len = strlen(input_seq[0]);
+ aligned_seq_count = 0;
+ arange = calloc( 1, sizeof(aln_range) );
+
+ arange->s1 = 0;
+ arange->e1 = strlen(input_seq[0]);
+ arange->s2 = 0;
+ arange->e2 = strlen(input_seq[0]);
+ tags_list[aligned_seq_count] = get_align_tags( input_seq[0], input_seq[0],
+ strlen(input_seq[0]), arange, 0, 0);
+ aligned_seq_count += 1;
+ for (j=1; j < seq_count; j++) {
+ arange->s1 = 0;
+ arange->e1 = strlen(input_seq[j])-1;
+ arange->s2 = 0;
+ arange->e2 = strlen(input_seq[j])-1;
+
+ r_len = strlen(input_seq[j]);
+ //printf("seq_len: %u %u\n", j, r_len);
+ if ( offset[j] < 0) {
+ if ((r_len + offset[j]) < 128) {
+ continue;
+ }
+ if ( r_len + offset[j] < utg_len ) {
+
+ //printf("1: %ld %u %u\n", offset[j], r_len, utg_len);
+ aln = _align(input_seq[j] - offset[j], r_len + offset[j] ,
+ input_seq[0], r_len + offset[j] ,
+ 500, 1);
+ } else {
+ //printf("2: %ld %u %u\n", offset[j], r_len, utg_len);
+ aln = _align(input_seq[j] - offset[j], utg_len ,
+ input_seq[0], utg_len ,
+ 500, 1);
+ }
+ offset[j] = 0;
+
+ } else {
+ if ( offset[j] > utg_len - 128) {
+ continue;
+ }
+ if ( offset[j] + r_len > utg_len ) {
+ //printf("3: %ld %u %u\n", offset[j], r_len, utg_len);
+ aln = _align(input_seq[j], utg_len - offset[j] ,
+ input_seq[0]+offset[j], utg_len - offset[j],
+ 500, 1);
+ } else {
+ //printf("4: %ld %u %u\n", offset[j], r_len, utg_len);
+ aln = _align(input_seq[j], r_len ,
+ input_seq[0]+offset[j], r_len ,
+ 500, 1);
+ }
+ }
+ if (aln->aln_str_size > 500 && ((double) aln->dist / (double) aln->aln_str_size) < max_diff) {
+ tags_list[aligned_seq_count] = get_align_tags( aln->q_aln_str, aln->t_aln_str,
+ aln->aln_str_size, arange, j,
+ offset[j]);
+ aligned_seq_count ++;
+ }
+ free_alignment(aln);
+ }
+ free_aln_range(arange);
+ if (aligned_seq_count > 0) {
+ consensus = get_cns_from_align_tags( tags_list, aligned_seq_count, utg_len, 0 );
+ } else {
+ // allocate an empty consensus sequence
+ consensus = calloc( 1, sizeof(consensus_data) );
+ consensus->sequence = calloc( 1, sizeof(char) );
+ consensus->eqv = calloc( 1, sizeof(unsigned int) );
+ }
+ //free(consensus);
+ for (j=0; j < aligned_seq_count; j++) {
+ free_align_tags(tags_list[j]);
+ }
+ free(tags_list);
+ return consensus;
+}
+
+
+void free_consensus_data( consensus_data * consensus ){
+ free(consensus->sequence);
+ free(consensus->eqv);
+ free(consensus);
+}
+
+/***
+void main() {
+ unsigned int j;
+ char small_buffer[1024];
+ char big_buffer[65536];
+ char ** input_seq;
+ char ** seq_id;
+ int seq_count;
+ char * consensus;
+
+ input_seq = calloc( 501, sizeof(char *));
+ seq_id = calloc( 501, sizeof(char *));
+
+ while(1) {
+ seq_count = 0;
+ while (1) {
+
+ scanf("%s", small_buffer);
+ seq_id[seq_count] = calloc( strlen(small_buffer) + 1, sizeof(char));
+ strcpy(seq_id[seq_count], small_buffer);
+
+ scanf("%s", big_buffer);
+ input_seq[seq_count] = calloc( strlen(big_buffer) + 1 , sizeof(char));
+ strcpy(input_seq[seq_count], big_buffer);
+
+ if (strcmp(seq_id[seq_count], "+") == 0) {
+ break;
+ }
+ if (strcmp(seq_id[seq_count], "-") == 0) {
+ break;
+ }
+ //printf("%s\n", seq_id[seq_count]);
+ seq_count += 1;
+ if (seq_count > 500) break;
+ }
+ //printf("sc: %d\n", seq_count);
+ if (seq_count < 10 && strcmp(seq_id[seq_count], "-") != 0 ) continue;
+ if (seq_count < 10 && strcmp(seq_id[seq_count], "-") == 0 ) break;
+
+ consensus = generate_consensus(input_seq, seq_count, 8, 8);
+ if (strlen(consensus) > 500) {
+ printf(">%s\n%s\n", seq_id[0], consensus);
+ }
+ fflush(stdout);
+ free(consensus);
+ for (j=0; j < seq_count; j++) {
+ free(seq_id[j]);
+ free(input_seq[j]);
+ };
+
+ }
+ for (j=0; j < seq_count; j++) {
+ free(seq_id[j]);
+ free(input_seq[j]);
+ };
+ free(seq_id);
+ free(input_seq);
+}
+***/
diff --git a/src/lib/ini.c b/src/lib/ini.c
new file mode 100644
index 0000000..2c278af
--- /dev/null
+++ b/src/lib/ini.c
@@ -0,0 +1,187 @@
+/* inih -- simple .INI file parser
+
+inih is released under the New BSD license (see LICENSE.txt). Go to the project
+home page for more info:
+
+https://github.com/benhoyt/inih
+
+*/
+
+#ifdef _MSC_VER
+#define _CRT_SECURE_NO_WARNINGS
+#endif
+
+#include <stdio.h>
+#include <ctype.h>
+#include <string.h>
+
+#include "ini.h"
+
+#if !INI_USE_STACK
+#include <stdlib.h>
+#endif
+
+#define MAX_SECTION 50
+#define MAX_NAME 50
+
+/* Strip whitespace chars off end of given string, in place. Return s. */
+static char* rstrip(char* s)
+{
+ char* p = s + strlen(s);
+ while (p > s && isspace((unsigned char)(*--p)))
+ *p = '\0';
+ return s;
+}
+
+/* Return pointer to first non-whitespace char in given string. */
+static char* lskip(const char* s)
+{
+ while (*s && isspace((unsigned char)(*s)))
+ s++;
+ return (char*)s;
+}
+
+/* Return pointer to first char c or ';' comment in given string, or pointer to
+ null at end of string if neither found. ';' must be prefixed by a whitespace
+ character to register as a comment. */
+static char* find_char_or_comment(const char* s, char c)
+{
+ int was_whitespace = 0;
+ while (*s && *s != c && !(was_whitespace && *s == ';')) {
+ was_whitespace = isspace((unsigned char)(*s));
+ s++;
+ }
+ return (char*)s;
+}
+
+/* Version of strncpy that ensures dest (size bytes) is null-terminated. */
+static char* strncpy0(char* dest, const char* src, size_t size)
+{
+ strncpy(dest, src, size);
+ dest[size - 1] = '\0';
+ return dest;
+}
+
+/* See documentation in header file. */
+int ini_parse_stream(ini_reader reader, void* stream, ini_handler handler,
+ void* user)
+{
+ /* Uses a fair bit of stack (use heap instead if you need to) */
+#if INI_USE_STACK
+ char line[INI_MAX_LINE];
+#else
+ char* line;
+#endif
+ char section[MAX_SECTION] = "";
+ char prev_name[MAX_NAME] = "";
+
+ char* start;
+ char* end;
+ char* name;
+ char* value;
+ int lineno = 0;
+ int error = 0;
+
+#if !INI_USE_STACK
+ line = (char*)malloc(INI_MAX_LINE);
+ if (!line) {
+ return -2;
+ }
+#endif
+
+ /* Scan through stream line by line */
+ while (reader(line, INI_MAX_LINE, stream) != NULL) {
+ lineno++;
+
+ start = line;
+#if INI_ALLOW_BOM
+ if (lineno == 1 && (unsigned char)start[0] == 0xEF &&
+ (unsigned char)start[1] == 0xBB &&
+ (unsigned char)start[2] == 0xBF) {
+ start += 3;
+ }
+#endif
+ start = lskip(rstrip(start));
+
+ if (*start == ';' || *start == '#') {
+ /* Per Python ConfigParser, allow '#' comments at start of line */
+ }
+#if INI_ALLOW_MULTILINE
+ else if (*prev_name && *start && start > line) {
+ /* Non-black line with leading whitespace, treat as continuation
+ of previous name's value (as per Python ConfigParser). */
+ if (!handler(user, section, prev_name, start) && !error)
+ error = lineno;
+ }
+#endif
+ else if (*start == '[') {
+ /* A "[section]" line */
+ end = find_char_or_comment(start + 1, ']');
+ if (*end == ']') {
+ *end = '\0';
+ strncpy0(section, start + 1, sizeof(section));
+ *prev_name = '\0';
+ }
+ else if (!error) {
+ /* No ']' found on section line */
+ error = lineno;
+ }
+ }
+ else if (*start && *start != ';') {
+ /* Not a comment, must be a name[=:]value pair */
+ end = find_char_or_comment(start, '=');
+ if (*end != '=') {
+ end = find_char_or_comment(start, ':');
+ }
+ if (*end == '=' || *end == ':') {
+ *end = '\0';
+ name = rstrip(start);
+ value = lskip(end + 1);
+ end = find_char_or_comment(value, '\0');
+ if (*end == ';')
+ *end = '\0';
+ rstrip(value);
+
+ /* Valid name[=:]value pair found, call handler */
+ strncpy0(prev_name, name, sizeof(prev_name));
+ if (!handler(user, section, name, value) && !error)
+ error = lineno;
+ }
+ else if (!error) {
+ /* No '=' or ':' found on name[=:]value line */
+ error = lineno;
+ }
+ }
+
+#if INI_STOP_ON_FIRST_ERROR
+ if (error)
+ break;
+#endif
+ }
+
+#if !INI_USE_STACK
+ free(line);
+#endif
+
+ return error;
+}
+
+/* See documentation in header file. */
+int ini_parse_file(FILE* file, ini_handler handler, void* user)
+{
+ return ini_parse_stream((ini_reader)fgets, file, handler, user);
+}
+
+/* See documentation in header file. */
+int ini_parse(const char* filename, ini_handler handler, void* user)
+{
+ FILE* file;
+ int error;
+
+ file = fopen(filename, "r");
+ if (!file)
+ return -1;
+ error = ini_parse_file(file, handler, user);
+ fclose(file);
+ return error;
+}
diff --git a/src/lib/kmer_lookup.c b/src/lib/kmer_lookup.c
new file mode 100755
index 0000000..e19e200
--- /dev/null
+++ b/src/lib/kmer_lookup.c
@@ -0,0 +1,589 @@
+/*
+ * =====================================================================================
+ *
+ * Filename: kmer_count.c
+ *
+ * Description:
+ *
+ * Version: 0.1
+ * Created: 07/20/2013 17:00:00
+ * Revision: none
+ * Compiler: gcc
+ *
+ * Author: Jason Chin,
+ * Company:
+ *
+ * =====================================================================================
+
+ #################################################################################$$
+ # Copyright (c) 2011-2014, Pacific Biosciences of California, Inc.
+ #
+ # All rights reserved.
+ #
+ # Redistribution and use in source and binary forms, with or without
+ # modification, are permitted (subject to the limitations in the
+ # disclaimer below) provided that the following conditions are met:
+ #
+ # * Redistributions of source code must retain the above copyright
+ # notice, this list of conditions and the following disclaimer.
+ #
+ # * Redistributions in binary form must reproduce the above
+ # copyright notice, this list of conditions and the following
+ # disclaimer in the documentation and/or other materials provided
+ # with the distribution.
+ #
+ # * Neither the name of Pacific Biosciences nor the names of its
+ # contributors may be used to endorse or promote products derived
+ # from this software without specific prior written permission.
+ #
+ # NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
+ # GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY PACIFIC
+ # BIOSCIENCES AND ITS CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
+ # WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ # DISCLAIMED. IN NO EVENT SHALL PACIFIC BIOSCIENCES OR ITS
+ # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ # USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ # OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ # SUCH DAMAGE.
+ #################################################################################$$
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <limits.h>
+#include "common.h"
+
+
+const unsigned int KMERMATCHINC = 10000;
+
+int compare_seq_coor(const void * a, const void * b) {
+ const seq_coor_t * arg1 = a;
+ const seq_coor_t * arg2 = b;
+ return (* arg1) - (* arg2);
+}
+
+
+kmer_lookup * allocate_kmer_lookup ( seq_coor_t size ) {
+ kmer_lookup * kl;
+
+ //printf("%lu is allocated for kmer lookup\n", size);
+ kl = (kmer_lookup *) malloc( size * sizeof(kmer_lookup) );
+ init_kmer_lookup( kl, size);
+ return kl;
+}
+
+void init_kmer_lookup ( kmer_lookup * kl, seq_coor_t size ) {
+ seq_coor_t i;
+ //printf("%lu is allocated for kmer lookup\n", size);
+ for (i=0; i<size; i++) {
+ kl[i].start = INT_MAX;
+ kl[i].last = INT_MAX;
+ kl[i].count = 0;
+ }
+}
+
+
+void free_kmer_lookup( kmer_lookup * ptr) {
+ free(ptr);
+}
+
+seq_array allocate_seq(seq_coor_t size) {
+ seq_array sa;
+ sa = (seq_array) malloc( size * sizeof(base) );
+ init_seq_array( sa, size);
+ return sa;
+}
+
+void init_seq_array( seq_array sa, seq_coor_t size) {
+ seq_coor_t i;
+ for (i=0; i<size; i++) {
+ sa[i] = 0xff;
+ }
+}
+
+void free_seq_array( seq_array sa) {
+ free(sa);
+}
+
+seq_addr_array allocate_seq_addr(seq_coor_t size) {
+ return (seq_addr_array) calloc( size, sizeof(seq_addr));
+}
+
+void free_seq_addr_array(seq_addr_array sda) {
+ free(sda);
+}
+
+seq_coor_t get_kmer_bitvector(seq_array sa, unsigned int K) {
+ unsigned int i;
+ seq_coor_t kmer_bv = 0;
+ seq_coor_t kmer_mask;
+
+ kmer_mask = 0;
+ for (i = 0; i < K; i++) {
+ kmer_mask <<= 2;
+ kmer_mask |= 0x00000003;
+ }
+
+ for (i = 0; i < K; i++) {
+ kmer_bv <<= 2;
+ kmer_bv |= (unsigned int) sa[i];
+ }
+
+ return kmer_bv;
+}
+
+void add_sequence ( seq_coor_t start,
+ unsigned int K,
+ char * seq,
+ seq_coor_t seq_len,
+ seq_addr_array sda,
+ seq_array sa,
+ kmer_lookup * lk ) {
+
+ seq_coor_t i;
+ seq_coor_t kmer_bv;
+ seq_coor_t kmer_mask;
+
+ kmer_mask = 0;
+ for (i = 0; i < K; i++) {
+ kmer_mask <<= 2;
+ kmer_mask |= 0x00000003;
+ }
+
+ for (i = 0; i < seq_len; i++) {
+ switch ( seq[i] ) {
+ case 'A':
+ sa[ start + i ] = 0;
+ break;
+ case 'C':
+ sa[ start + i ] = 1;
+ break;
+ case 'G':
+ sa[ start + i ] = 2;
+ break;
+ case 'T':
+ sa[ start + i ] = 3;
+ }
+ }
+ kmer_bv = get_kmer_bitvector( sa + start, K);
+ for (i = 0; i < seq_len - K; i++) {
+ //printf("%lu %lu\n", i, kmer_bv);
+ //printf("lk before init: %lu %lu %lu\n", kmer_bv, lk[kmer_bv].start, lk[kmer_bv].last);
+ if (lk[kmer_bv].start == INT_MAX) {
+ lk[kmer_bv].start = start + i;
+ lk[kmer_bv].last = start + i;
+ lk[kmer_bv].count += 1;
+ //printf("lk init: %lu %lu %lu\n", kmer_bv, lk[kmer_bv].start, lk[kmer_bv].last);
+ } else {
+ sda[ lk[kmer_bv].last ] = start + i;
+ lk[kmer_bv].count += 1;
+ lk[kmer_bv].last = start + i;
+ //printf("lk change: %lu %lu %lu\n", kmer_bv, lk[kmer_bv].start, lk[kmer_bv].last);
+ }
+ kmer_bv <<= 2;
+ kmer_bv |= sa[ start + i + K];
+ kmer_bv &= kmer_mask;
+ }
+}
+
+
+void mask_k_mer(seq_coor_t size, kmer_lookup * kl, seq_coor_t threshold) {
+ seq_coor_t i;
+ for (i=0; i<size; i++) {
+ if (kl[i].count > threshold) {
+ kl[i].start = INT_MAX;
+ kl[i].last = INT_MAX;
+ //kl[i].count = 0;
+ }
+ }
+}
+
+
+kmer_match * find_kmer_pos_for_seq( char * seq, seq_coor_t seq_len, unsigned int K,
+ seq_addr_array sda,
+ kmer_lookup * lk) {
+ seq_coor_t i;
+ seq_coor_t kmer_bv;
+ seq_coor_t kmer_mask;
+ seq_coor_t kmer_pos;
+ seq_coor_t next_kmer_pos;
+ unsigned int half_K;
+ seq_coor_t kmer_match_rtn_allocation_size = KMERMATCHINC;
+ kmer_match * kmer_match_rtn;
+ base * sa;
+
+ kmer_match_rtn = (kmer_match *) malloc( sizeof(kmer_match) );
+ kmer_match_rtn->count = 0;
+ kmer_match_rtn->query_pos = (seq_coor_t *) calloc( kmer_match_rtn_allocation_size, sizeof( seq_coor_t ) );
+ kmer_match_rtn->target_pos = (seq_coor_t *) calloc( kmer_match_rtn_allocation_size, sizeof( seq_coor_t ) );
+
+ sa = calloc( seq_len, sizeof(base) );
+
+ kmer_mask = 0;
+ for (i = 0; i < K; i++) {
+ kmer_mask <<= 2;
+ kmer_mask |= 0x00000003;
+ }
+
+ for (i = 0; i < seq_len; i++) {
+ switch ( seq[i] ) {
+ case 'A':
+ sa[ i ] = 0;
+ break;
+ case 'C':
+ sa[ i ] = 1;
+ break;
+ case 'G':
+ sa[ i ] = 2;
+ break;
+ case 'T':
+ sa[ i ] = 3;
+ }
+ }
+
+
+ kmer_bv = get_kmer_bitvector(sa, K);
+ half_K = K >> 1;
+ for (i = 0; i < seq_len - K; i += half_K) {
+ kmer_bv = get_kmer_bitvector(sa + i, K);
+ if (lk[kmer_bv].start == INT_MAX) { //for high count k-mers
+ continue;
+ }
+ kmer_pos = lk[ kmer_bv ].start;
+ next_kmer_pos = sda[ kmer_pos ];
+ kmer_match_rtn->query_pos[ kmer_match_rtn->count ] = i;
+ kmer_match_rtn->target_pos[ kmer_match_rtn->count ] = kmer_pos;
+ kmer_match_rtn->count += 1;
+ if (kmer_match_rtn->count > kmer_match_rtn_allocation_size - 1000) {
+ kmer_match_rtn_allocation_size += KMERMATCHINC;
+ kmer_match_rtn->query_pos = (seq_coor_t *) realloc( kmer_match_rtn->query_pos,
+ kmer_match_rtn_allocation_size * sizeof(seq_coor_t) );
+ kmer_match_rtn->target_pos = (seq_coor_t *) realloc( kmer_match_rtn->target_pos,
+ kmer_match_rtn_allocation_size * sizeof(seq_coor_t) );
+ }
+ while ( next_kmer_pos > kmer_pos ){
+ kmer_pos = next_kmer_pos;
+ next_kmer_pos = sda[ kmer_pos ];
+ kmer_match_rtn->query_pos[ kmer_match_rtn->count ] = i;
+ kmer_match_rtn->target_pos[ kmer_match_rtn->count ] = kmer_pos;
+ kmer_match_rtn->count += 1;
+ if (kmer_match_rtn->count > kmer_match_rtn_allocation_size - 1000) {
+ kmer_match_rtn_allocation_size += KMERMATCHINC;
+ kmer_match_rtn->query_pos = (seq_coor_t *) realloc( kmer_match_rtn->query_pos,
+ kmer_match_rtn_allocation_size * sizeof(seq_coor_t) );
+ kmer_match_rtn->target_pos = (seq_coor_t *) realloc( kmer_match_rtn->target_pos,
+ kmer_match_rtn_allocation_size * sizeof(seq_coor_t) );
+ }
+ }
+ }
+ free(sa);
+ return kmer_match_rtn;
+}
+
+void free_kmer_match( kmer_match * ptr) {
+ free(ptr->query_pos);
+ free(ptr->target_pos);
+ free(ptr);
+}
+
+aln_range* find_best_aln_range(kmer_match * km_ptr,
+ seq_coor_t K,
+ seq_coor_t bin_size,
+ seq_coor_t count_th) {
+ seq_coor_t i;
+ seq_coor_t j;
+ seq_coor_t q_min, q_max, t_min, t_max;
+ seq_coor_t * d_count;
+ seq_coor_t * q_coor;
+ seq_coor_t * t_coor;
+ aln_range * arange;
+
+ long int d, d_min, d_max;
+ long int cur_score;
+ long int max_score;
+ long int max_k_mer_count;
+ long int max_k_mer_bin;
+ seq_coor_t cur_start;
+
+ arange = calloc(1 , sizeof(aln_range));
+
+ q_min = INT_MAX;
+ q_max = 0;
+ t_min = INT_MAX;
+ t_max = 0;
+
+ d_min = INT_MAX;
+ d_max = LONG_MIN;
+
+ for (i = 0; i < km_ptr->count; i++ ) {
+ if ( km_ptr -> query_pos[i] < q_min) {
+ q_min = km_ptr->query_pos[i];
+ }
+ if ( km_ptr -> query_pos[i] > q_max) {
+ q_max = km_ptr->query_pos[i];
+ }
+ if ( km_ptr -> target_pos[i] < t_min) {
+ t_min = km_ptr->target_pos[i];
+ }
+ if ( km_ptr -> query_pos[i] > t_max) {
+ t_max = km_ptr->target_pos[i];
+ }
+ d = (long int) km_ptr->query_pos[i] - (long int) km_ptr->target_pos[i];
+ if ( d < d_min ) {
+ d_min = d;
+ }
+ if ( d > d_max ) {
+ d_max = d;
+ }
+ }
+
+ //printf("%lu %ld %ld\n" , km_ptr->count, d_min, d_max);
+ d_count = calloc( (d_max - d_min)/bin_size + 1, sizeof(seq_coor_t) );
+ q_coor = calloc( km_ptr->count, sizeof(seq_coor_t) );
+ t_coor = calloc( km_ptr->count, sizeof(seq_coor_t) );
+
+ for (i = 0; i < km_ptr->count; i++ ) {
+ d = (long int) (km_ptr->query_pos[i]) - (long int) (km_ptr->target_pos[i]);
+ d_count[ (d - d_min)/ (long int) bin_size ] += 1;
+ q_coor[i] = INT_MAX;
+ t_coor[i] = INT_MAX;
+ }
+
+ j = 0;
+ max_k_mer_count = 0;
+ max_k_mer_bin = INT_MAX;
+ for (i = 0; i < km_ptr->count; i++ ) {
+ d = (long int) (km_ptr->query_pos[i]) - (long int) (km_ptr->target_pos[i]);
+ if ( d_count[ (d - d_min)/ (long int) bin_size ] > max_k_mer_count) {
+ max_k_mer_count = d_count[ (d - d_min)/ (long int) bin_size ];
+ max_k_mer_bin = (d - d_min)/ (long int) bin_size;
+ }
+ }
+ //printf("k_mer: %lu %lu\n" , max_k_mer_count, max_k_mer_bin);
+
+ if ( max_k_mer_bin != INT_MAX && max_k_mer_count > count_th ) {
+ for (i = 0; i < km_ptr->count; i++ ) {
+ d = (long int) (km_ptr->query_pos[i]) - (long int) (km_ptr->target_pos[i]);
+ if ( abs( ( (d - d_min)/ (long int) bin_size ) - max_k_mer_bin ) > 5 ) {
+ continue;
+ }
+ if (d_count[ (d - d_min)/ (long int) bin_size ] > count_th) {
+ q_coor[j] = km_ptr->query_pos[i];
+ t_coor[j] = km_ptr->target_pos[i];
+ //printf("d_count: %lu %lu\n" ,i, d_count[(d - d_min)/ (long int) bin_size]);
+ //printf("coor: %lu %lu\n" , q_coor[j], t_coor[j]);
+ j ++;
+ }
+ }
+ }
+
+ if (j > 1) {
+ arange->s1 = q_coor[0];
+ arange->e1 = q_coor[0];
+ arange->s2 = t_coor[0];
+ arange->e2 = t_coor[0];
+ arange->score = 0;
+
+ max_score = 0;
+ cur_score = 0;
+ cur_start = 0;
+
+ for (i = 1; i < j; i++) {
+ cur_score += 32 - (q_coor[i] - q_coor[i-1]);
+ //printf("deltaD, %lu %ld\n", q_coor[i] - q_coor[i-1], cur_score);
+ if (cur_score < 0) {
+ cur_score = 0;
+ cur_start = i;
+ } else if (cur_score > max_score) {
+ arange->s1 = q_coor[cur_start];
+ arange->s2 = t_coor[cur_start];
+ arange->e1 = q_coor[i];
+ arange->e2 = t_coor[i];
+ max_score = cur_score;
+ arange->score = max_score;
+ //printf("%lu %lu %lu %lu\n", arange.s1, arange.e1, arange.s2, arange.e2);
+ }
+ }
+
+ } else {
+ arange->s1 = 0;
+ arange->e1 = 0;
+ arange->s2 = 0;
+ arange->e2 = 0;
+ arange->score = 0;
+ }
+
+ // printf("free\n");
+
+ free(d_count);
+ free(q_coor);
+ free(t_coor);
+ return arange;
+}
+
+aln_range* find_best_aln_range2(kmer_match * km_ptr,
+ seq_coor_t K,
+ seq_coor_t bin_width,
+ seq_coor_t count_th) {
+
+ seq_coor_t * d_coor;
+ seq_coor_t * hit_score;
+ seq_coor_t * hit_count;
+ seq_coor_t * last_hit;
+ seq_coor_t max_q, max_t;
+ seq_coor_t s, e, max_s, max_e, max_span, d_s, d_e, delta, d_len;
+ seq_coor_t px, py, cx, cy;
+ seq_coor_t max_hit_idx;
+ seq_coor_t max_hit_score, max_hit_count;
+ seq_coor_t i, j;
+ seq_coor_t candidate_idx, max_d, d;
+
+ aln_range * arange;
+
+ arange = calloc(1 , sizeof(aln_range));
+
+ d_coor = calloc( km_ptr->count, sizeof(seq_coor_t) );
+
+ max_q = -1;
+ max_t = -1;
+
+ for (i = 0; i < km_ptr->count; i++ ) {
+ d_coor[i] = km_ptr->query_pos[i] - km_ptr->target_pos[i];
+ max_q = max_q > km_ptr->query_pos[i] ? max_q : km_ptr->query_pos[i];
+ max_t = max_t > km_ptr->target_pos[i] ? max_q : km_ptr->target_pos[i];
+
+ }
+
+ qsort(d_coor, km_ptr->count, sizeof(seq_coor_t), compare_seq_coor);
+
+
+ s = 0;
+ e = 0;
+ max_s = -1;
+ max_e = -1;
+ max_span = -1;
+ delta = (long int) ( 0.05 * ( max_q + max_t ) );
+ d_len = km_ptr->count;
+ d_s = -1;
+ d_e = -1;
+ while (1) {
+ d_s = d_coor[s];
+ d_e = d_coor[e];
+ while (d_e < d_s + delta && e < d_len-1) {
+ e += 1;
+ d_e = d_coor[e];
+ }
+ if ( max_span == -1 || e - s > max_span ) {
+ max_span = e - s;
+ max_s = s;
+ max_e = e;
+ }
+ s += 1;
+ if (s == d_len || e == d_len) {
+ break;
+ }
+ }
+
+ if (max_s == -1 || max_e == -1 || max_e - max_s < 32) {
+ arange->s1 = 0;
+ arange->e1 = 0;
+ arange->s2 = 0;
+ arange->e2 = 0;
+ arange->score = 0;
+ free(d_coor);
+ return arange;
+ }
+
+ last_hit = calloc( km_ptr->count, sizeof(seq_coor_t) );
+ hit_score = calloc( km_ptr->count, sizeof(seq_coor_t) );
+ hit_count = calloc( km_ptr->count, sizeof(seq_coor_t) );
+
+ for (i = 0; i < km_ptr->count; i++ ) {
+ last_hit[i] = -1;
+ hit_score[i] = 0;
+ hit_count[i] = 0;
+ }
+ max_hit_idx = -1;
+ max_hit_score = 0;
+ for (i = 0; i < km_ptr->count; i ++) {
+ cx = km_ptr->query_pos[i];
+ cy = km_ptr->target_pos[i];
+ d = cx - cy;
+ if ( d < d_coor[max_s] || d > d_coor[max_e] ) continue;
+
+ j = i - 1;
+ candidate_idx = -1;
+ max_d = 65535;
+ while (1) {
+ if ( j < 0 ) break;
+ px = km_ptr->query_pos[j];
+ py = km_ptr->target_pos[j];
+ d = px - py;
+ if ( d < d_coor[max_s] || d > d_coor[max_e] ) {
+ j--;
+ continue;
+ }
+ if (cx - px > 320) break; //the number here controling how big alignment gap to be considered
+ if (cy > py && cx - px + cy - py < max_d && cy - py <= 320 ) {
+ max_d = cx - px + cy - py;
+ candidate_idx = j;
+ }
+ j--;
+ }
+ if (candidate_idx != -1) {
+ last_hit[i] = candidate_idx;
+ hit_score[i] = hit_score[candidate_idx] + (64 - max_d);
+ hit_count[i] = hit_count[candidate_idx] + 1;
+ if (hit_score[i] < 0) {
+ hit_score[i] = 0;
+ hit_count[i] = 0;
+ }
+ } else {
+ hit_score[i] = 0;
+ hit_count[i] = 0;
+ }
+ if (hit_score[i] > max_hit_score) {
+ max_hit_score = hit_score[i];
+ max_hit_count = hit_count[i];
+ max_hit_idx = i;
+ }
+
+ }
+ if (max_hit_idx == -1) {
+ arange->s1 = 0;
+ arange->e1 = 0;
+ arange->s2 = 0;
+ arange->e2 = 0;
+ arange->score = 0;
+ free(d_coor);
+ free(last_hit);
+ free(hit_score);
+ free(hit_count);
+ return arange;
+ }
+
+ arange->score = max_hit_count + 1;
+ arange->e1 = km_ptr->query_pos[max_hit_idx];
+ arange->e2 = km_ptr->target_pos[max_hit_idx];
+ i = max_hit_idx;
+ while (last_hit[i] != -1) {
+ i = last_hit[i];
+ }
+ arange->s1 = km_ptr->query_pos[i];
+ arange->s2 = km_ptr->target_pos[i];
+
+ free(d_coor);
+ free(last_hit);
+ free(hit_score);
+ free(hit_count);
+ return arange;
+}
+
+void free_aln_range( aln_range * arange) {
+ free(arange);
+}
diff --git a/src/lib/paf.c b/src/lib/paf.c
new file mode 100644
index 0000000..299d741
--- /dev/null
+++ b/src/lib/paf.c
@@ -0,0 +1,92 @@
+/* The MIT License
+
+ Copyright (c) 2008, 2009, 2011 Attractive Chaos <attractor at live.co.uk>
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+*/
+
+#include <zlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "paf.h"
+
+#include "kseq.h"
+KSTREAM_INIT(gzFile, gzread, 0x10000)
+
+paf_file_t *paf_open(const char *fn)
+{
+ kstream_t *ks;
+ gzFile fp;
+ paf_file_t *pf;
+ fp = fn && strcmp(fn, "-")? gzopen(fn, "r") : gzdopen(fileno(stdin), "r");
+ if (fp == 0) return 0;
+ ks = ks_init(fp);
+ pf = (paf_file_t*)calloc(1, sizeof(paf_file_t));
+ pf->fp = ks;
+ return pf;
+}
+
+int paf_close(paf_file_t *pf)
+{
+ kstream_t *ks;
+ if (pf == 0) return 0;
+ free(pf->buf.s);
+ ks = (kstream_t*)pf->fp;
+ gzclose(ks->f);
+ ks_destroy(ks);
+ free(pf);
+ return 0;
+}
+
+int paf_parse(int l, char *s, paf_rec_t *pr) // s must be NULL terminated
+{ // on return: <0 for failure; 0 for success; >0 for filtered
+ char *q, *r;
+ int i, t;
+ for (i = t = 0, q = s; i <= l; ++i) {
+ if (i < l && s[i] != '\t') continue;
+ s[i] = 0;
+ if (t == 0) pr->qn = q;
+ else if (t == 1) pr->ql = strtol(q, &r, 10);
+ else if (t == 2) pr->qs = strtol(q, &r, 10);
+ else if (t == 3) pr->qe = strtol(q, &r, 10);
+ else if (t == 4) pr->rev = (*q == '-');
+ else if (t == 5) pr->tn = q;
+ else if (t == 6) pr->tl = strtol(q, &r, 10);
+ else if (t == 7) pr->ts = strtol(q, &r, 10);
+ else if (t == 8) pr->te = strtol(q, &r, 10);
+ else if (t == 9) pr->ml = strtol(q, &r, 10);
+ else if (t == 10) pr->bl = strtol(q, &r, 10);
+ ++t, q = i < l? &s[i+1] : 0;
+ }
+ if (t < 10) return -1;
+ return 0;
+}
+
+int paf_read(paf_file_t *pf, paf_rec_t *r)
+{
+ int ret, dret;
+file_read_more:
+ ret = ks_getuntil((kstream_t*)pf->fp, KS_SEP_LINE, &pf->buf, &dret);
+ if (ret < 0) return ret;
+ ret = paf_parse(pf->buf.l, pf->buf.s, r);
+ if (ret < 0) goto file_read_more;
+ return ret;
+}
diff --git a/src/test/CMakeLists.txt b/src/test/CMakeLists.txt
new file mode 100644
index 0000000..8947204
--- /dev/null
+++ b/src/test/CMakeLists.txt
@@ -0,0 +1,2 @@
+cmake_minimum_required(VERSION 3.2)
+
diff --git a/src/test/LAInterface_consensus_test.cpp b/src/test/LAInterface_consensus_test.cpp
new file mode 100644
index 0000000..9b0a90e
--- /dev/null
+++ b/src/test/LAInterface_consensus_test.cpp
@@ -0,0 +1,146 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <ctype.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "DB.h"
+#include "align.h"
+#include "LAInterface.h"
+
+#include <iostream>
+#include <tuple>
+#include <string>
+#include <algorithm>
+extern "C" {
+#include "common.h"
+}
+
+#define LAST_READ_SYMBOL '$'
+
+static int ORDER(const void *l, const void *r) {
+ int x = *((int32 *) l);
+ int y = *((int32 *) r);
+ return (x - y);
+}
+
+int main(int argc, char *argv[]) {
+ LAInterface la;
+ std::cout << "hello" << std::endl;
+ Read *test_read;
+
+ la.openDB("G");
+ std::cout<<"# Reads:" << la.getReadNumber() << std::endl;
+
+ la.showRead(1, 3); //show read [1,3)
+
+ test_read = la.getRead(0); //get read 0
+ test_read->showRead(); // show read 0
+
+ la.openAlignmentFile("G.1.las");
+ la.showAlignment(0, 2); // show alignments of read [0,2)
+
+ std::cout<<"# Alignments:" << la.getAlignmentNumber() << std::endl;
+
+ la.resetAlignment();
+ std::vector<int> res;
+ la.getAlignmentB(res, 1); //get alignment for read 1
+ for (auto i:res)
+ printf("%d ", i);
+ printf("\n");
+
+ std::vector<LOverlap *> res1;
+ la.resetAlignment();
+ la.getOverlap(res1, 3, 5); // get alignment(overlap) for reads [3,5)
+
+ for (auto i:res1)
+ i->show();
+ printf("\n");
+
+ std::vector<LAlignment *> res2;
+ la.resetAlignment();
+ //la.getAlignment(res2, 0, 3);// get alignment for reads [0,3)
+
+ la.getAlignment(res2, 0, 1);
+
+
+ int seq_count = res2.size();
+ align_tags_t ** tags_list;
+ tags_list = (align_tags_t **) calloc( seq_count+1, sizeof(align_tags_t *) );
+
+
+ test_read = la.getRead(0); //get read 0
+ std::string base_structure = test_read->bases;
+
+ std::transform(base_structure.begin(), base_structure.end(),base_structure.begin(), ::toupper);
+
+ aln_range * arange;
+ arange = (aln_range*) calloc(1 , sizeof(aln_range));
+ arange->s1 = 0;
+ arange->s2 = 0;
+ arange->e1 = base_structure.size();
+ arange->e2 = base_structure.size();
+
+ char * seq = (char *) malloc(base_structure.size()* sizeof(char));
+ strcpy(seq, base_structure.c_str());
+
+ tags_list[0] = get_align_tags( seq,
+ seq,
+ strlen(seq),
+ arange, 0, 0);
+
+
+ for (int i = 0; i < seq_count; i ++) {
+ res2[i]->show();
+
+ la.recoverAlignment(res2[i]);
+ std::pair<std::string, std::string> alignment = la.getAlignmentTags(res2[i]);
+
+ //alignment.first.erase (std::remove(alignment.first.begin(), alignment.first.end(), '-'), alignment.first.end());
+ //alignment.second.erase (std::remove(alignment.second.begin(), alignment.second.end(), '-'), alignment.second.end());
+
+ //std::cout << alignment.first.size() <<alignment.first << std::endl;
+ //std::cout << alignment.second.size() <<alignment.second << std::endl;
+
+ char * t_aln_str = (char *) malloc(alignment.first.size()* sizeof(char));
+ char * q_aln_str = (char *) malloc(alignment.second.size()* sizeof(char));
+
+ strcpy(q_aln_str, alignment.second.c_str());
+ strcpy(t_aln_str, alignment.first.c_str());
+
+ seq_coor_t aln_str_size = strlen(q_aln_str);
+ aln_range * arange;
+ arange = (aln_range*) calloc(1 , sizeof(aln_range));
+ arange->s1 = res2[i]->bbpos;
+ arange->e1 = res2[i]->bepos;
+ arange->s2 = res2[i]->abpos;
+ arange->e2 = res2[i]->aepos;
+
+ tags_list[i+1] = get_align_tags( q_aln_str,
+ t_aln_str,
+ aln_str_size,
+ arange, (unsigned int)i + 1, 0);
+
+ free(q_aln_str);
+ free(t_aln_str);
+ free_aln_range(arange);
+
+ }
+
+ //print consensus
+
+ consensus_data * consensus;
+ consensus = get_cns_from_align_tags( tags_list, seq_count+1, strlen(seq), 6 );
+
+ printf("Consensus:%s\n", consensus->sequence);
+
+ free_consensus_data(consensus);
+ for (int i = 0; i <seq_count + 1; i++)
+ free_align_tags(tags_list[i]);
+
+ la.closeDB(); //close database
+ return 0;
+}
\ No newline at end of file
diff --git a/src/test/LAInterface_test.cpp b/src/test/LAInterface_test.cpp
new file mode 100644
index 0000000..09b9b7a
--- /dev/null
+++ b/src/test/LAInterface_test.cpp
@@ -0,0 +1,88 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <ctype.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "DB.h"
+#include "align.h"
+#include "LAInterface.h"
+
+#include "gtest/gtest.h"
+
+#include <iostream>
+
+
+#define LAST_READ_SYMBOL '$'
+
+static int ORDER(const void *l, const void *r) {
+ int x = *((int32 *) l);
+ int y = *((int32 *) r);
+ return (x - y);
+}
+
+int main(int argc, char *argv[]) {
+ LAInterface la;
+ std::cout << "hello" << std::endl;
+ Read *test_read;
+
+ la.openDB("G");
+ std::cout<<"# Reads:" << la.getReadNumber() << std::endl;
+
+ la.showRead(1, 3); //show read [1,3)
+
+ test_read = la.getRead(0); //get read 0
+ test_read->showRead(); // show read 0
+
+ la.openAlignmentFile("G.1.las");
+ la.showAlignment(0, 2); // show alignments of read [0,2)
+
+ std::cout<<"# Alignments:" << la.getAlignmentNumber() << std::endl;
+
+ la.resetAlignment();
+ std::vector<int> res;
+ la.getAlignmentB(res, 1); //get alignment for read 1
+ for (auto i:res)
+ printf("%d ", i);
+ printf("\n");
+
+ std::vector<LOverlap *> res1;
+ la.resetAlignment();
+ la.getOverlap(res1, 3, 5); // get alignment(overlap) for reads [3,5)
+
+ for (auto i:res1)
+ i->show();
+ printf("\n");
+
+ std::vector<LAlignment *> res2;
+ la.resetAlignment();
+ //la.getAlignment(res2, 0, 3);// get alignment for reads [0,3)
+
+ la.getAlignment(res2, 0, 2);
+
+
+ for (int i = 0; i < 5; i ++) {
+ res2[i]->show();
+ /*int tlen = res2[i]->tlen;
+ int *trace = (int *) res2[i]->trace;
+ int u;
+ printf(" ");
+
+ for (u = 0; u < tlen; u++) {
+ printf("%d,", (int) trace[u]);
+ }
+ printf("\n");
+ la.showAlignmentTags(res2[i]);*/
+
+ /*printf("\n");
+ for (int j = 0; j < res2[i]->trace_pts_len; j++)
+ printf(" %d", res2[i]->trace_pts[j]);
+ printf("\n");*/
+
+ }
+ la.closeDB(); //close database
+ return 0;
+}
\ No newline at end of file
diff --git a/src/test/LAInterface_test1.cpp b/src/test/LAInterface_test1.cpp
new file mode 100644
index 0000000..c28c02f
--- /dev/null
+++ b/src/test/LAInterface_test1.cpp
@@ -0,0 +1,20 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <ctype.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "DB.h"
+#include "align.h"
+#include "LAInterface.h"
+
+#include "gtest/gtest.h"
+
+#include <iostream>
+
+TEST(SimpleTest, Plus) {
+ ASSERT_EQ(1+1, 2);
+}
\ No newline at end of file
diff --git a/src/test/LAInterface_test_2DB.cpp b/src/test/LAInterface_test_2DB.cpp
new file mode 100644
index 0000000..f65b8fe
--- /dev/null
+++ b/src/test/LAInterface_test_2DB.cpp
@@ -0,0 +1,90 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <ctype.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "DB.h"
+#include "align.h"
+#include "LAInterface.h"
+
+#include <iostream>
+
+
+#define LAST_READ_SYMBOL '$'
+
+static int ORDER(const void *l, const void *r) {
+ int x = *((int32 *) l);
+ int y = *((int32 *) r);
+ return (x - y);
+}
+
+int main(int argc, char *argv[]) {
+ LAInterface la;
+ std::cout << "hello" << std::endl;
+ Read *test_read;
+
+ la.openDB2("G", "G");
+ std::cout<<"# Reads:" << la.getReadNumber() << std::endl;
+ std::cout<<"# Reads:" << la.getReadNumber2() << std::endl;
+
+ la.showRead(1, 3); //show read [1,3)
+
+ test_read = la.getRead(0); //get read 0
+ test_read->showRead(); // show read 0
+
+ test_read = la.getRead2(1); //get read 0
+ test_read->showRead(); // show read 0
+
+ la.openAlignmentFile("G.1.las");
+ la.showAlignment(0, 2); // show alignments of read [0,2)
+
+ std::cout<<"# Alignments:" << la.getAlignmentNumber() << std::endl;
+
+ la.resetAlignment();
+ std::vector<int> res;
+ la.getAlignmentB(res, 1); //get alignment for read 1
+ for (auto i:res)
+ printf("%d ", i);
+ printf("\n");
+
+ std::vector<LOverlap *> res1;
+ la.resetAlignment();
+ la.getOverlap(res1, 3, 5); // get alignment(overlap) for reads [3,5)
+
+ for (auto i:res1)
+ i->show();
+ printf("\n");
+
+ std::vector<LAlignment *> res2;
+ la.resetAlignment();
+ //la.getAlignment(res2, 0, 3);// get alignment for reads [0,3)
+
+ la.getAlignment(res2, 0, 2);
+
+
+ for (int i = 0; i < 5; i ++) {
+ res2[i]->show();
+ /*int tlen = res2[i]->tlen;
+ int *trace = (int *) res2[i]->trace;
+ int u;
+ printf(" ");
+
+ for (u = 0; u < tlen; u++) {
+ printf("%d,", (int) trace[u]);
+ }
+ printf("\n");
+ la.showAlignmentTags(res2[i]);*/
+
+ /*printf("\n");
+ for (int j = 0; j < res2[i]->trace_pts_len; j++)
+ printf(" %d", res2[i]->trace_pts[j]);
+ printf("\n");*/
+
+ }
+ la.closeDB(); //close database
+ return 0;
+}
\ No newline at end of file
diff --git a/src/test/omp_test.c b/src/test/omp_test.c
new file mode 100644
index 0000000..a127905
--- /dev/null
+++ b/src/test/omp_test.c
@@ -0,0 +1,31 @@
+//
+// Created by Fei Xia on 10/25/15.
+//
+
+
+#include <omp.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+int main (int argc, char *argv[])
+{
+ int nthreads, tid;
+
+/* Fork a team of threads giving them their own copies of variables */
+#pragma omp parallel private(nthreads, tid)
+ {
+
+ /* Obtain thread number */
+ tid = omp_get_thread_num();
+ printf("Hello World from thread = %d\n", tid);
+
+ /* Only master thread does this */
+ if (tid == 0)
+ {
+ nthreads = omp_get_num_threads();
+ printf("Number of threads = %d\n", nthreads);
+ }
+
+ } /* All threads join master thread and disband */
+
+}
diff --git a/utils/build.sh b/utils/build.sh
new file mode 100755
index 0000000..0f1ea9c
--- /dev/null
+++ b/utils/build.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+pwd=$PWD
+
+cd $pwd/thirdparty/DAZZ_DB
+make -j 8
+
+cd $pwd/thirdparty//DALIGNER
+make -j 8
+
+cd $pwd/thirdparty/DASCRUBBER
+make -j 8
+
+cd $pwd/thirdparty/DEXTRACTOR
+make -j 8
+
+cd $pwd
+mkdir build
+cd $pwd/build
+cmake .. -DCMAKE_C_COMPILER=gcc-4.8 -DCMAKE_CXX_COMPILER=g++-4.8
+make -j 8
+
+exit $?
diff --git a/utils/clean.sh b/utils/clean.sh
new file mode 100755
index 0000000..4fcde16
--- /dev/null
+++ b/utils/clean.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+pwd=$PWD
+cd $pwd/thirdparty/DAZZ_DB
+make clean
+
+cd $pwd/thirdparty//DALIGNER
+make clean
+
+cd $pwd/thirdparty/DASCRUBBER
+make clean
+
+cd $pwd/thirdparty/DEXTRACTOR
+make clean
+
+cd $pwd
+mkdir build
+cd $pwd/build
+cmake .. -DCMAKE_C_COMPILER=gcc-4.9 -DCMAKE_CXX_COMPILER=g++-4.9
+make clean
+
+exit $?
\ No newline at end of file
diff --git a/utils/compile.sh b/utils/compile.sh
new file mode 100755
index 0000000..e2a86cf
--- /dev/null
+++ b/utils/compile.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+pwd=/io
+
+cd $pwd
+rm -rf build
+mkdir build
+cd $pwd/build
+
+
+cmake .. -DCMAKE_C_COMPILER=gcc-4.8 -DCMAKE_CXX_COMPILER=g++-4.8
+make
+
+exit $?
diff --git a/utils/nominal.ini b/utils/nominal.ini
new file mode 100644
index 0000000..142af7b
--- /dev/null
+++ b/utils/nominal.ini
@@ -0,0 +1,31 @@
+
+[filter]
+length_threshold = 1000;
+quality_threshold = 0.23;
+n_iter = 3; // filter iteration
+aln_threshold = 1000;
+min_cov = 5;
+cut_off = 300;
+theta = 300;
+use_qv = true;
+
+[running]
+n_proc = 12;
+
+[draft]
+min_cov = 10;
+trim = 200;
+edge_safe = 100;
+tspace = 900;
+step = 50;
+
+
+[consensus]
+min_length = 4000;
+trim_end = 200;
+best_n = 1;
+quality_threshold = 0.23;
+
+[layout]
+hinge_slack = 1000
+min_connected_component_size = 8
diff --git a/utils/run.sh b/utils/run.sh
new file mode 100755
index 0000000..21e2c0a
--- /dev/null
+++ b/utils/run.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+echo "Setting stuff up"
+cur_fol=$PWD
+cd ~/AwesomeAssembler && source utils/setup.sh
+cd $cur_fol
+
+
+echo "Running filter"
+Reads_filter --las $1.las --db $1 --config ~/AwesomeAssembler/utils/nominal.ini -x $1
+
+echo "Running hinging"
+
+hinging --las $1.las --db $1 --config ~/AwesomeAssembler/utils/nominal.ini -o $1.$USER -x $1
+
+echo "Running Visualise"
+
+python ~/AwesomeAssembler/scripts/Visualise_graph.py $1.edges.hinges hinge_list.txt
+
+echo "Running Condense"
+
+python ~/AwesomeAssembler/scripts/condense_graph.py $1.edges.hinges
+
+echo "Putting ground truth and condensing"
+if [ -e "$1.mapping.1.json" ]
+ then
+ python ~/AwesomeAssembler/scripts/condense_graph_with_aln_json.py $1.edges.hinges $1.mapping.1.json
+fi
\ No newline at end of file
diff --git a/utils/setup.sh b/utils/setup.sh
new file mode 100755
index 0000000..b65630e
--- /dev/null
+++ b/utils/setup.sh
@@ -0,0 +1,7 @@
+#DIR=`dirname ${0}`
+PPWD=$PWD
+#echo $PWD
+#echo $DIR
+export PATH="$PATH:$PPWD/thirdparty/DALIGNER:$PPWD/thirdparty/DAZZ_DB:$PPWD/thirdparty/DEXTRACTOR/:$PPWD/thirdparty/DASCRUBBER"
+export PATH="$PATH:$PPWD/scripts"
+export PATH="$PATH:$PPWD/build/bin/consensus:$PPWD/build/bin/filter:$PPWD/build/bin/layout"
\ No newline at end of file
diff --git a/utils/test.sh b/utils/test.sh
new file mode 100755
index 0000000..a365a31
--- /dev/null
+++ b/utils/test.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+pwd=$PWD
+cd $pwd/DAZZ_DB
+make clean && make -j 8
+
+cd $pwd/DALIGNER
+make clean && make -j 8
+
+cd $pwd/DASCRUBBER
+make clean && make -j 8
+
+cd $pwd
+source setup.sh
+
+mkdir $pwd/data
+cd $pwd/data
+
+#rm -rf *
+rm G.*
+simulator 1.0 -c50. >G.fasta
+fasta2DB G G.fasta
+DBsplit -s20 G
+HPCdaligner G | csh -v
+rm G.*.G.*.las
+LAmerge G G.*.las
+DASqv -c50 G G.las
+
+touch log.txt
+LAInterface_test>log.txt
+Consensus_test>log.txt
+LAInterface_test_2DB>log.txt
diff --git a/utils/update.sh b/utils/update.sh
new file mode 100755
index 0000000..1fa5e68
--- /dev/null
+++ b/utils/update.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+if [ "$2" == "scripts" ];
+then rsync -rizP --delete --exclude '.*' --exclude '*.pyc' --exclude 'figures' scripts/ $1 at shannon.stanford.edu:/home/$1/AwesomeAssembler/scripts
+fi
+
+if [ "$2" == "utils" ];
+then rsync -rizP --delete --exclude '.*' --exclude '*.pyc' --exclude 'figures' utils/ $1 at shannon.stanford.edu:/home/$1/AwesomeAssembler/utils
+fi
+
+if [ "$2" == "push" ];
+then rsync -rizP --delete --exclude '.*' --exclude 'build' src/ $1 at shannon.stanford.edu:/home/$1/AwesomeAssembler/src
+fi
+
+if [ "$2" == "pull" ];
+then rsync -rizP --delete --exclude '.*' --exclude 'build' $1 at shannon.stanford.edu:/home/$1/AwesomeAssembler/src/ src
+fi
+
+if [ "$2" == "update" ];
+then ssh -t $1 at shannon.stanford.edu "cd /home/$1/AwesomeAssembler && ./utils/build.sh"
+fi
+
+if [ "$2" == "all" ];
+then rsync -rizP --delete --exclude '.*' --exclude 'data' --exclude '*.pyc' --exclude 'figures' --exclude 'build' . $1 at shannon.stanford.edu:/home/$1/AwesomeAssembler
+fi
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/hinge.git
More information about the debian-med-commit
mailing list