[med-svn] [spades] 01/07: Imported Upstream version 3.7.0+dfsg

Sascha Steinbiss sascha at steinbiss.name
Wed Feb 24 22:08:29 UTC 2016


This is an automated email from the git hooks/post-receive script.

sascha-guest pushed a commit to branch master
in repository spades.

commit 5546f5159ada560b7ef207bfa79b67eb65ba9937
Author: Sascha Steinbiss <sascha at steinbiss.name>
Date:   Wed Feb 24 19:18:55 2016 +0000

    Imported Upstream version 3.7.0+dfsg
---
 LICENSE                                            |   16 +-
 README                                             |   14 +-
 VERSION                                            |    2 +-
 changelog.html                                     |    8 +
 configs/corrector/corrector.info                   |    7 +
 .../debruijn/{config.info.template => config.info} |   23 +-
 configs/debruijn/config.info.template              |   23 +-
 configs/debruijn/construction.info                 |   23 +
 ...sed_rr.info.template => coverage_based_rr.info} |    0
 configs/debruijn/detail_info_printer.info          |   43 +
 configs/debruijn/detail_info_printer.info.template |   47 +-
 configs/debruijn/distance_estimation.info          |   91 ++
 .../{log.properties.template => log.properties}    |    8 +
 configs/debruijn/log.properties.template           |    8 +
 configs/debruijn/path_extend/pe_params.info        |  186 +++
 .../debruijn/path_extend/pe_params.info.template   |  257 ++--
 configs/debruijn/simplification.info               |  561 ++++++++
 configs/debruijn/simplification.info.template      |  647 +++++----
 configs/debruijn/tsa.info                          |    5 +
 configs/dipspades/config.info                      |   64 +
 .../log.properties}                                |   26 +-
 .../hammer/{config.info.template => config.info}   |    4 +-
 configs/hammer/config.info.template                |    4 +-
 configs/ionhammer/ionhammer.cfg                    |   12 +
 dipspades.py                                       |   14 +-
 dipspades_manual.html                              |    6 +
 ext/include/city/city.h                            |  109 ++
 ext/include/city/citycrc.h                         |   43 +
 ext/include/llvm/AlignOf.h                         |  189 +++
 ext/include/llvm/PointerEmbeddedInt.h              |   87 ++
 ext/include/llvm/PointerIntPair.h                  |  192 +++
 ext/include/llvm/PointerLikeTypeTraits.h           |   92 ++
 ext/include/llvm/PointerSumType.h                  |  176 +++
 ext/src/CMakeLists.txt                             |    3 +-
 ext/src/ConsensusCore/Version.cpp                  |    2 +-
 {src/mph_index => ext/src/cityhash}/CMakeLists.txt |    8 +-
 ext/src/cityhash/city.cc                           |  639 +++++++++
 ext/src/ssw/ssw.c                                  |  349 ++---
 ext/src/ssw/ssw_cpp.cpp                            |  258 ++--
 manual.html                                        |  151 ++-
 spades.py                                          |   21 +-
 spades_init.py                                     |    2 +-
 src/CMakeLists.txt                                 |    7 +-
 src/cmake/pack.cmake                               |    6 +-
 src/corrector/CMakeLists.txt                       |   14 +-
 src/corrector/contig_processor.cpp                 |  132 +-
 src/corrector/contig_processor.hpp                 |   14 +-
 src/corrector/dataset_processor.cpp                |    4 +-
 src/corrector/dataset_processor.hpp                |    3 +-
 src/corrector/interesting_pos_processor.hpp        |    2 +-
 src/corrector/main.cpp                             |    4 +-
 src/corrector/read.cpp                             |  164 ---
 src/debruijn/CMakeLists.txt                        |   15 +-
 src/debruijn/bwa_pair_info_filler.cpp              |  407 ++++++
 src/debruijn/bwa_pair_info_filler.hpp              |  254 ++++
 src/debruijn/config_struct.cpp                     |  169 ++-
 src/debruijn/config_struct.hpp                     |   66 +-
 src/debruijn/construction.cpp                      |    4 +-
 src/debruijn/dataset_readers.hpp                   |   12 +-
 src/debruijn/debruijn_data.hpp                     |   15 +-
 src/debruijn/debruijn_stats.cpp                    | 1050 +++++++--------
 src/debruijn/detail_coverage.hpp                   |    3 +
 src/debruijn/distance_estimation.cpp               |   14 +-
 src/debruijn/early_simplification.hpp              |   52 +-
 src/debruijn/gap_closer.cpp                        |   58 +-
 src/debruijn/genome_consistance_checker.cpp        |  236 ++++
 src/debruijn/genome_consistance_checker.hpp        |  126 +-
 src/debruijn/genome_storage.cpp                    |   45 +
 src/debruijn/genome_storage.hpp                    |   33 +
 src/debruijn/genomic_info_filler.cpp               |    6 +-
 src/debruijn/genomic_quality.hpp                   |    5 +
 src/debruijn/graph_pack.hpp                        |   19 +-
 src/debruijn/graph_simplification.hpp              | 1000 --------------
 src/debruijn/graphio.hpp                           |   75 +-
 src/debruijn/indices/edge_multi_index.hpp          |    1 +
 src/debruijn/indices/kmer_extension_index.hpp      |    1 -
 src/debruijn/indices/kmer_splitters.hpp            |    8 +-
 src/debruijn/indices/perfect_hash_map.hpp          |    6 +
 src/debruijn/is_counter.hpp                        |  153 +--
 src/debruijn/kmer_coverage_model.cpp               |    2 +-
 src/debruijn/launch.hpp                            |   36 +-
 src/debruijn/long_read_mapper.hpp                  |   67 +-
 src/debruijn/main.cpp                              |    1 +
 src/debruijn/mismatch_correction.cpp               |    7 +-
 src/debruijn/overlap_analysis.hpp                  |  113 ++
 src/debruijn/pacbio/pac_index.hpp                  |   71 +-
 src/debruijn/pacbio/pacbio_gap_closer.hpp          |   50 +-
 src/debruijn/pacbio_aligning.cpp                   |   54 +-
 src/debruijn/pair_info_count.cpp                   |   71 +-
 src/debruijn/pair_info_filler.hpp                  |   43 +-
 src/debruijn/pair_info_improver.hpp                |  210 +--
 src/debruijn/paired_statistics.hpp                 |    4 +-
 src/debruijn/path_extend/bidirectional_path.hpp    |  136 +-
 src/debruijn/path_extend/extension_chooser.hpp     |  581 +++++---
 src/debruijn/path_extend/ideal_pair_info.hpp       |    9 +-
 src/debruijn/path_extend/loop_traverser.hpp        |   10 +-
 src/debruijn/path_extend/next_path_searcher.hpp    |  103 +-
 src/debruijn/path_extend/paired_library.hpp        |  161 +--
 src/debruijn/path_extend/path_extend_launch.hpp    |  446 ++++--
 src/debruijn/path_extend/path_extender.hpp         |  673 +++++++---
 src/debruijn/path_extend/pe_config_struct.cpp      |  166 ++-
 src/debruijn/path_extend/pe_config_struct.hpp      |   97 +-
 src/debruijn/path_extend/pe_io.hpp                 |  109 +-
 src/debruijn/path_extend/pe_resolver.hpp           |  198 ++-
 src/debruijn/path_extend/pe_utils.hpp              |   12 +-
 .../scaffolder2015/connection_condition2015.cpp    |  111 ++
 .../scaffolder2015/connection_condition2015.hpp    |   69 +
 .../scaffolder2015/extension_chooser2015.cpp       |   81 ++
 .../scaffolder2015/extension_chooser2015.hpp       |   49 +
 .../scaffolder2015/scaff_supplementary.cpp         |   66 +
 .../scaffolder2015/scaff_supplementary.hpp         |   75 ++
 .../path_extend/scaffolder2015/scaffold_graph.cpp  |  275 ++++
 .../path_extend/scaffolder2015/scaffold_graph.hpp  |  233 ++++
 .../scaffolder2015/scaffold_graph_constructor.cpp  |   73 +
 .../scaffolder2015/scaffold_graph_constructor.hpp  |  101 ++
 .../scaffolder2015/scaffold_graph_visualizer.cpp   |   71 +
 .../scaffolder2015/scaffold_graph_visualizer.hpp   |   73 +
 src/debruijn/path_extend/split_graph_pair_info.hpp |   90 +-
 src/debruijn/path_extend/weight_counter.hpp        |  500 +++----
 src/debruijn/path_utils.hpp                        |   15 +-
 src/debruijn/positions.hpp                         |    2 +
 src/debruijn/read_converter.hpp                    |   61 +-
 src/debruijn/repeat.hpp                            |  371 -----
 src/debruijn/repeat_resolving.cpp                  |   15 +-
 src/debruijn/second_phase_setup.cpp                |    5 +-
 src/debruijn/sequence_mapper.hpp                   |  227 +++-
 src/debruijn/sequence_mapper_notifier.hpp          |   37 +-
 src/debruijn/simplification.cpp                    |  399 +++++-
 .../simplification/graph_simplification.hpp        |  825 ++++++++++++
 .../parallel_simplification_algorithms.hpp         |  351 ++++-
 .../simplification/simplification_settings.hpp     |  212 +--
 .../simplification/single_cell_simplification.hpp  |  110 ++
 src/debruijn/split_path_constructor.hpp            |   19 +-
 src/debruijn/stage.hpp                             |    8 +-
 src/debruijn/stats/debruijn_stats.hpp              |  243 ++--
 .../contig_correctors/close_gaps_corrector.hpp     |    4 +-
 .../contig_correctors/overlap_searcher.hpp         |    8 +-
 .../contig_correctors/redundant_contig_remover.hpp |    8 +-
 src/dipspades/dipspades.hpp                        |   14 +-
 src/dipspades/dipspades_config.cpp                 |    2 +-
 src/dipspades/dipspades_config.hpp                 |    2 +-
 src/dipspades/main.cpp                             |   31 +-
 .../bulge_paths_searcher.hpp                       |    9 +-
 src/hammer/CMakeLists.txt                          |    2 +-
 src/hammer/config_struct_hammer.cpp                |    4 +-
 src/hammer/config_struct_hammer.hpp                |    2 +
 src/hammer/expander.cpp                            |   14 +-
 src/hammer/hamcluster.cpp                          |   86 +-
 src/hammer/hamcluster.hpp                          |    9 +
 src/hammer/hammer_tools.cpp                        |    4 +-
 src/hammer/kmer_data.cpp                           |  265 +++-
 src/hammer/main.cpp                                |   15 +-
 src/hammer/misc/memusg                             |    2 +-
 src/hammer/read_corrector.cpp                      |   45 +-
 src/hammer/read_corrector.hpp                      |    7 +-
 src/include/adt/concurrent_dsu.hpp                 |  314 +++--
 src/include/adt/flat_map.hpp                       |  320 +++++
 src/include/adt/flat_set.hpp                       |  230 ++++
 src/include/adt/function_traits.hpp                |   70 +
 src/include/adt/iterator_range.hpp                 |   13 +
 src/include/adt/queue_iterator.hpp                 |   14 +
 src/include/adt/small_pod_vector.hpp               |  379 ++++++
 src/include/de/conj_iterator.hpp                   |  140 ++
 src/include/de/data_divider.hpp                    |    2 +-
 src/include/de/distance_estimation.hpp             |   60 +-
 src/include/de/extensive_distance_estimation.hpp   |   69 +-
 src/include/de/index_point.hpp                     |  455 +++++++
 src/include/de/insert_size_refiner.hpp             |   97 +-
 src/include/de/pair_info_filters.hpp               |  161 +--
 src/include/de/paired_info.hpp                     | 1417 ++++++++++----------
 src/include/de/paired_info_helpers.hpp             |  149 ++
 src/include/de/smoothing_distance_estimation.hpp   |   33 +-
 src/include/de/weighted_distance_estimation.hpp    |   14 +-
 src/include/func.hpp                               |  116 +-
 src/include/graph_print_utils.hpp                  |    4 +-
 src/include/io/kmer_iterator.hpp                   |   54 +
 src/include/io/library.hpp                         |   48 +-
 src/include/io/mmapped_reader.hpp                  |   68 +-
 src/include/io/paired_read.hpp                     |  100 +-
 src/include/io/read.hpp                            |    7 +-
 src/{corrector => include/io/sam}/read.hpp         |  101 +-
 src/{corrector => include/io/sam}/sam_reader.hpp   |    5 +-
 src/include/io/single_read.hpp                     |  171 ++-
 src/include/logger/logger.hpp                      |    1 +
 src/include/mph_index/MurmurHash3.h                |   38 -
 src/include/mph_index/base_hash.hpp                |   67 +-
 src/include/mph_index/kmer_index.hpp               |   10 +-
 src/include/omni/action_handlers.hpp               |    4 +-
 src/include/omni/basic_edge_conditions.hpp         |   42 +-
 src/include/omni/bulge_remover.hpp                 |  784 ++++++++---
 src/include/omni/complex_bulge_remover.hpp         |   13 +-
 src/include/omni/complex_tip_clipper.hpp           |    8 +-
 src/include/omni/coverage.hpp                      |   21 +-
 .../omni/dijkstra_tools/dijkstra_algorithm.hpp     |  354 ++---
 .../omni/dijkstra_tools/dijkstra_helper.hpp        |   50 +-
 .../omni/dijkstra_tools/dijkstra_settings.hpp      |   19 +-
 .../omni/dijkstra_tools/vertex_process_checker.hpp |   55 +-
 src/include/omni/edges_position_handler.hpp        |    4 +
 src/include/omni/erroneous_connection_remover.hpp  |   73 +-
 src/include/omni/graph_component.hpp               |    6 +-
 src/include/omni/graph_core.hpp                    |   27 +-
 src/include/omni/graph_iterators.hpp               |  107 +-
 src/include/omni/graph_processing_algorithm.hpp    |   88 +-
 src/include/omni/mapping_path.hpp                  |   92 +-
 src/include/omni/observable_graph.hpp              |   72 +-
 src/include/omni/omni_tools.hpp                    |  153 ++-
 src/include/omni/order_and_law.hpp                 |    8 +-
 src/include/omni/parallel_processing.hpp           |  387 +++---
 src/include/omni/path_processor.hpp                |  418 +++---
 src/include/omni/range.hpp                         |   92 ++
 src/include/omni/relative_coverage_remover.hpp     |   89 +-
 src/include/omni/splitters.hpp                     |   10 +-
 src/include/omni/tip_clipper.hpp                   |   78 +-
 src/include/pred.hpp                               |  165 +++
 src/include/sequence/rtseq.hpp                     |    6 +-
 src/include/sequence/seq.hpp                       |   17 +-
 src/include/sequence/simple_seq.hpp                |   37 +-
 src/include/simple_tools.hpp                       |   14 +
 src/include/ssw/ssw.h                              |  138 +-
 src/include/ssw/ssw_cpp.h                          |   50 +-
 src/include/version.hpp.in                         |    7 +
 src/io/CMakeLists.txt                              |    6 +-
 src/io/library.cpp                                 |   10 +-
 src/io/sam/read.cpp                                |   42 +
 src/{corrector => io/sam}/sam_reader.cpp           |   20 +-
 src/ionhammer/CMakeLists.txt                       |    2 +-
 src/ionhammer/HSeq.hpp                             |    6 +-
 src/ionhammer/main.cpp                             |    2 +
 src/mph_index/MurmurHash3.cpp                      |  345 -----
 src/scaffold_correction/scaffold_correction.hpp    |   11 +-
 .../__pycache__/corrector_logic.cpython-34.pyc     |  Bin 0 -> 2257 bytes
 .../__pycache__/dipspades_logic.cpython-34.pyc     |  Bin 0 -> 9800 bytes
 .../__pycache__/dipspades_logic.cpython-35.pyc     |  Bin 0 -> 9746 bytes
 .../__pycache__/hammer_logic.cpython-33.pyc        |  Bin 0 -> 6876 bytes
 .../__pycache__/hammer_logic.cpython-34.pyc        |  Bin 0 -> 5030 bytes
 .../__pycache__/hammer_logic.cpython-35.pyc        |  Bin 0 -> 4982 bytes
 .../__pycache__/options_storage.cpython-33.pyc     |  Bin 0 -> 19269 bytes
 .../__pycache__/options_storage.cpython-34.pyc     |  Bin 0 -> 14925 bytes
 .../__pycache__/options_storage.cpython-35.pyc     |  Bin 0 -> 14806 bytes
 .../__pycache__/process_cfg.cpython-33.pyc         |  Bin 0 -> 8923 bytes
 .../__pycache__/process_cfg.cpython-34.pyc         |  Bin 0 -> 5323 bytes
 .../__pycache__/process_cfg.cpython-35.pyc         |  Bin 0 -> 5299 bytes
 .../__pycache__/spades_logic.cpython-33.pyc        |  Bin 0 -> 15617 bytes
 .../__pycache__/spades_logic.cpython-34.pyc        |  Bin 0 -> 11542 bytes
 .../__pycache__/spades_logic.cpython-35.pyc        |  Bin 0 -> 11366 bytes
 .../__pycache__/support.cpython-33.pyc             |  Bin 0 -> 39538 bytes
 .../__pycache__/support.cpython-34.pyc             |  Bin 0 -> 26771 bytes
 .../__pycache__/support.cpython-35.pyc             |  Bin 0 -> 26508 bytes
 src/spades_pipeline/common/SeqIO.pyc               |  Bin 0 -> 7634 bytes
 .../common/__pycache__/SeqIO.cpython-33.pyc        |  Bin 0 -> 9245 bytes
 .../common/__pycache__/SeqIO.cpython-34.pyc        |  Bin 0 -> 5553 bytes
 .../common/__pycache__/SeqIO.cpython-35.pyc        |  Bin 0 -> 5539 bytes
 .../common/__pycache__/alignment.cpython-33.pyc    |  Bin 0 -> 3826 bytes
 .../common/__pycache__/alignment.cpython-34.pyc    |  Bin 0 -> 2685 bytes
 .../common/__pycache__/alignment.cpython-35.pyc    |  Bin 0 -> 2673 bytes
 .../__pycache__/parallel_launcher.cpython-34.pyc   |  Bin 0 -> 3014 bytes
 .../__pycache__/parallel_launcher.cpython-35.pyc   |  Bin 0 -> 3008 bytes
 .../common/__pycache__/sam_parser.cpython-33.pyc   |  Bin 0 -> 15134 bytes
 .../common/__pycache__/sam_parser.cpython-34.pyc   |  Bin 0 -> 9071 bytes
 .../common/__pycache__/sam_parser.cpython-35.pyc   |  Bin 0 -> 9062 bytes
 src/spades_pipeline/common/alignment.pyc           |  Bin 0 -> 3118 bytes
 src/spades_pipeline/common/parallel_launcher.pyc   |  Bin 0 -> 4205 bytes
 src/spades_pipeline/common/sam_parser.pyc          |  Bin 0 -> 11960 bytes
 src/spades_pipeline/easy_align.py                  |   43 +
 src/spades_pipeline/hammer_logic.py                |    2 +
 src/spades_pipeline/options_storage.py             |   61 +-
 src/spades_pipeline/run_contig_breaker.py          |   53 +-
 src/spades_pipeline/spades_logic.py                |   10 +-
 src/spades_pipeline/support.py                     |   56 +-
 .../__pycache__/barcode_extraction.cpython-34.pyc  |  Bin 0 -> 5268 bytes
 .../__pycache__/barcode_extraction.cpython-35.pyc  |  Bin 0 -> 5250 bytes
 .../__pycache__/break_by_coverage.cpython-33.pyc   |  Bin 0 -> 9213 bytes
 .../__pycache__/break_by_coverage.cpython-34.pyc   |  Bin 0 -> 6014 bytes
 .../__pycache__/break_by_coverage.cpython-35.pyc   |  Bin 0 -> 5969 bytes
 .../__pycache__/generate_quality.cpython-33.pyc    |  Bin 0 -> 3155 bytes
 .../__pycache__/generate_quality.cpython-34.pyc    |  Bin 0 -> 2126 bytes
 .../__pycache__/generate_quality.cpython-35.pyc    |  Bin 0 -> 2117 bytes
 .../__pycache__/id_generation.cpython-34.pyc       |  Bin 0 -> 2443 bytes
 .../__pycache__/id_generation.cpython-35.pyc       |  Bin 0 -> 2437 bytes
 .../__pycache__/launch_options.cpython-34.pyc      |  Bin 0 -> 4470 bytes
 .../__pycache__/launch_options.cpython-35.pyc      |  Bin 0 -> 4438 bytes
 .../moleculo_filter_contigs.cpython-33.pyc         |  Bin 0 -> 2792 bytes
 .../moleculo_filter_contigs.cpython-34.pyc         |  Bin 0 -> 1689 bytes
 .../moleculo_filter_contigs.cpython-35.pyc         |  Bin 0 -> 1677 bytes
 .../moleculo_postprocessing.cpython-33.pyc         |  Bin 0 -> 3213 bytes
 .../moleculo_postprocessing.cpython-34.pyc         |  Bin 0 -> 2218 bytes
 .../moleculo_postprocessing.cpython-35.pyc         |  Bin 0 -> 2212 bytes
 .../reference_construction.cpython-34.pyc          |  Bin 0 -> 7113 bytes
 .../reference_construction.cpython-35.pyc          |  Bin 0 -> 7080 bytes
 .../__pycache__/string_dist_utils.cpython-34.pyc   |  Bin 0 -> 1904 bytes
 .../__pycache__/string_dist_utils.cpython-35.pyc   |  Bin 0 -> 1904 bytes
 .../truspades/barcode_extraction.py                |    5 +-
 .../truspades/barcode_extraction.pyc               |  Bin 0 -> 6460 bytes
 src/spades_pipeline/truspades/break_by_coverage.py |   12 +-
 .../truspades/break_by_coverage.pyc                |  Bin 0 -> 7561 bytes
 src/spades_pipeline/truspades/generate_quality.pyc |  Bin 0 -> 2396 bytes
 src/spades_pipeline/truspades/id_generation.pyc    |  Bin 0 -> 3083 bytes
 src/spades_pipeline/truspades/launch_options.py    |   45 +-
 src/spades_pipeline/truspades/launch_options.pyc   |  Bin 0 -> 5075 bytes
 .../truspades/moleculo_filter_contigs.pyc          |  Bin 0 -> 2288 bytes
 .../truspades/moleculo_postprocessing.pyc          |  Bin 0 -> 2568 bytes
 .../truspades/reference_construction.pyc           |  Bin 0 -> 8701 bytes
 .../truspades/string_dist_utils.pyc                |  Bin 0 -> 2474 bytes
 truspades.py                                       |   29 +-
 truspades_manual.html                              |   15 +-
 305 files changed, 17639 insertions(+), 8684 deletions(-)

diff --git a/LICENSE b/LICENSE
index 0137d11..27b20f9 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,3 @@
-
 SPADES: SAINT-PETERSBURG GENOME ASSEMBLER
 Copyright (c) 2015 Saint Petersburg State University
 Copyright (c) 2011-2014 Saint Petersburg Academic University
@@ -21,21 +20,20 @@ with this program; if not, write to the Free Software Foundation, Inc.,
 Current SPAdes contributors:
 
     Dmitry Antipov,
-    Anton Bankevich.
+    Anton Bankevich,
+    Yuriy Gorshkov,
     Alexey Gurevich,
     Anton Korobeynikov,
+    Dmitriy Meleshko,
     Sergey Nurk,
     Andrey Prjibelski,
     Yana Safonova,
-    Artem Tarasov,
-    Irina Vasilinetc,
-    Alla Lapidus,
-    Glenn Tesler,
-    Max Alekseyev and
+    Alla Lapidus and
     Pavel Pevzner
 
 Also contributed:
 
+    Max Alekseyev,
     Mikhail Dvorkin,
     Alexander Kulikov,
     Valery Lesin,
@@ -45,6 +43,9 @@ Also contributed:
     Vladislav Saveliev,
     Alexander Sirotkin,
     Yakov Sirotkin,
+    Artem Tarasov,
+    Glenn Tesler,
+    Irina Vasilinetc,
     Nikolay Vyahhi
 
 Contacts:
@@ -66,3 +67,4 @@ Yakov Sirotkin, Ramunas Stepanauskas, Jeffrey McLean, Roger Lasken,
 Scott Clingenpeel, Tanja Woyke, Glenn Tesler, Max Alekseyev, and Pavel Pevzner.
 Assembling Genomes and Mini-metagenomes from Highly Chimeric Reads. Lecture Notes 
 in Computer Science 7821 (2013), pp. 158-170. doi:10.1007/978-3-642-37195-0_13
+
diff --git a/README b/README
index 0393af4..742fb6c 100755
--- a/README
+++ b/README
@@ -8,21 +8,20 @@ Developed in Algorithmic Biology Lab of St. Petersburg Academic University of th
 Current SPAdes contributors:
 
     Dmitry Antipov,
-    Anton Bankevich.
+    Anton Bankevich,
+    Yuriy Gorshkov,
     Alexey Gurevich,
     Anton Korobeynikov,
+    Dmitriy Meleshko,
     Sergey Nurk,
     Andrey Prjibelski,
     Yana Safonova,
-    Artem Tarasov,
-    Irina Vasilinetc,
-    Alla Lapidus,
-    Glenn Tesler,
-    Max Alekseyev and
+    Alla Lapidus and
     Pavel Pevzner
 
 Also contributed:
 
+    Max Alekseyev,
     Mikhail Dvorkin,
     Alexander Kulikov,
     Valery Lesin,
@@ -32,6 +31,9 @@ Also contributed:
     Vladislav Saveliev,
     Alexander Sirotkin,
     Yakov Sirotkin,
+    Artem Tarasov,
+    Glenn Tesler,
+    Irina Vasilinetc,
     Nikolay Vyahhi
 
 Installation instructions and manual can be found on the website:
diff --git a/VERSION b/VERSION
index b727628..7c69a55 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-3.6.2
+3.7.0
diff --git a/changelog.html b/changelog.html
index 91070fb..80c05e1 100644
--- a/changelog.html
+++ b/changelog.html
@@ -3,6 +3,14 @@
 
 <h2>SPAdes Genome Assembler changelog</h2>
 
+<h3>SPAdes 3.7.0, 24 February 2016</h3>
+
+<p>NEW: metaSPAdes metagenomic pipeline.<p>
+
+<p>CHANGE: improved performance for both error correction and assembly stages.</p>
+
+<p>FIX: Multiple bug fixes.<p>
+
 <h3>SPAdes 3.6.2, 20 November 2015</h3>
 
 <p>NEW: Contigs/scaffolds paths for assembly_graph.fastg in Bandage-supported format.<p>
diff --git a/configs/corrector/corrector.info b/configs/corrector/corrector.info
new file mode 100644
index 0000000..22740a3
--- /dev/null
+++ b/configs/corrector/corrector.info
@@ -0,0 +1,7 @@
+{
+dataset: ./configs/debruijn/datasets/ECOLI_IS220_QUAKE.yaml, 
+work_dir: ./test_dataset/input/corrected/tmp, 
+output_dir: ./test_dataset/input/corrected,
+max_nthreads: 16,
+strategy: mapped_squared
+}
diff --git a/configs/debruijn/config.info.template b/configs/debruijn/config.info
similarity index 91%
copy from configs/debruijn/config.info.template
copy to configs/debruijn/config.info
index c2c03b2..98b8803 100755
--- a/configs/debruijn/config.info.template
+++ b/configs/debruijn/config.info
@@ -4,8 +4,8 @@
 #include "construction.info"
 #include "distance_estimation.info"
 #include "detail_info_printer.info"
-#include "coverage_based_rr.info"
 #include "tsa.info"
+#include "path_extend/pe_params.info"
 
 K		55
 
@@ -21,7 +21,6 @@ main_iteration  true
 additional_contigs	tmp_contigs.fasta
 load_from         latest/saves/ ; tmp or latest 
 
-
 ; Multithreading options
 temp_bin_reads_dir	.bin_reads/
 max_threads		8
@@ -41,8 +40,10 @@ scaffold_correction_mode false
 ; enabled (1) or disabled (0) repeat resolution (former "paired_mode")
 rr_enable true
 
-; enables or disables two-step repeat resolution process (currently in meta mode only)
+; two-step pipeline (currently in meta mode only)
 two_step_rr true
+; enables/disables usage of intermediate contigs in two-step pipeline
+use_intermediate_contigs true
 
 ;use single reads for rr (all | only_single_libs | none )
 single_reads_rr only_single_libs
@@ -96,10 +97,6 @@ resolving_mode path_extend
 
 use_scaffolder  true
 
-path_extend_params {
-	#include "path_extend/pe_params.info"
-}
-
 avoid_rc_connections true
 
 ;position handling
@@ -147,6 +144,7 @@ pacbio_processor
 	long_seq_limit 400
 	pacbio_min_gap_quantity 2
 	contigs_min_gap_quantity 1
+    max_contigs_gap_length 10000
 }
 ; consensus
 need_consensus  false ; output is VERY large(gigabytes).
@@ -169,5 +167,14 @@ sc_cor
     max_cut_length 50
 }
 
+
+bwa_aligner
+{
+    enabled false
+    debug false
+    path_to_bwa ./bin/bwa-spades
+    min_contig_len 0
+}
+
 ;flanking coverage range
-flanking_range 50
+flanking_range 55
diff --git a/configs/debruijn/config.info.template b/configs/debruijn/config.info.template
index c2c03b2..98b8803 100755
--- a/configs/debruijn/config.info.template
+++ b/configs/debruijn/config.info.template
@@ -4,8 +4,8 @@
 #include "construction.info"
 #include "distance_estimation.info"
 #include "detail_info_printer.info"
-#include "coverage_based_rr.info"
 #include "tsa.info"
+#include "path_extend/pe_params.info"
 
 K		55
 
@@ -21,7 +21,6 @@ main_iteration  true
 additional_contigs	tmp_contigs.fasta
 load_from         latest/saves/ ; tmp or latest 
 
-
 ; Multithreading options
 temp_bin_reads_dir	.bin_reads/
 max_threads		8
@@ -41,8 +40,10 @@ scaffold_correction_mode false
 ; enabled (1) or disabled (0) repeat resolution (former "paired_mode")
 rr_enable true
 
-; enables or disables two-step repeat resolution process (currently in meta mode only)
+; two-step pipeline (currently in meta mode only)
 two_step_rr true
+; enables/disables usage of intermediate contigs in two-step pipeline
+use_intermediate_contigs true
 
 ;use single reads for rr (all | only_single_libs | none )
 single_reads_rr only_single_libs
@@ -96,10 +97,6 @@ resolving_mode path_extend
 
 use_scaffolder  true
 
-path_extend_params {
-	#include "path_extend/pe_params.info"
-}
-
 avoid_rc_connections true
 
 ;position handling
@@ -147,6 +144,7 @@ pacbio_processor
 	long_seq_limit 400
 	pacbio_min_gap_quantity 2
 	contigs_min_gap_quantity 1
+    max_contigs_gap_length 10000
 }
 ; consensus
 need_consensus  false ; output is VERY large(gigabytes).
@@ -169,5 +167,14 @@ sc_cor
     max_cut_length 50
 }
 
+
+bwa_aligner
+{
+    enabled false
+    debug false
+    path_to_bwa ./bin/bwa-spades
+    min_contig_len 0
+}
+
 ;flanking coverage range
-flanking_range 50
+flanking_range 55
diff --git a/configs/debruijn/construction.info b/configs/debruijn/construction.info
new file mode 100644
index 0000000..f3d1b2c
--- /dev/null
+++ b/configs/debruijn/construction.info
@@ -0,0 +1,23 @@
+; construction
+
+construction
+{
+	; mode of construction: extension (construct hash map of kmers to extentions), old (construct set of k+1-mers)
+	mode extension
+
+	; enable keeping in graph perfect cycles. This slows down condensing but some plasmids can be lost if this is turned off.
+	keep_perfect_loops true
+
+	; size of buffer for each thread in MB, 0 for autodetection
+	read_buffer_size 0
+	
+	early_tip_clipper
+	{
+		; tip clipper can be enabled only in extension mode
+		enable true
+
+		; optional parameter. By default tips of length rl-k are removed
+;		length_bound 10
+	}
+}
+
diff --git a/configs/debruijn/coverage_based_rr.info.template b/configs/debruijn/coverage_based_rr.info
similarity index 100%
rename from configs/debruijn/coverage_based_rr.info.template
rename to configs/debruijn/coverage_based_rr.info
diff --git a/configs/debruijn/detail_info_printer.info b/configs/debruijn/detail_info_printer.info
new file mode 100644
index 0000000..e19cf36
--- /dev/null
+++ b/configs/debruijn/detail_info_printer.info
@@ -0,0 +1,43 @@
+info_printers
+{
+    default
+    {
+       basic_stats                      false
+       save_full_graph                  false
+       extended_stats                   false
+       detailed_dot_write               false
+       write_components                 false
+       components_for_genome_pos        "" ; (k+1)-mers starting on this positions will be investigated
+	   components_for_kmer              ""
+       write_components_along_genome    false
+       write_components_along_contigs   false
+       write_error_loc                  false
+       write_full_graph                 false
+       write_full_nc_graph              false
+    }
+
+	before_first_gap_closer
+    {
+    }
+
+	before_simplification
+    {
+    }
+
+    before_post_simplification    
+    {
+    }
+
+    final_simplified
+    {
+    }
+
+    final_gap_closed
+    {
+    }
+
+    before_repeat_resolution
+    {
+    }
+
+}  
diff --git a/configs/debruijn/detail_info_printer.info.template b/configs/debruijn/detail_info_printer.info.template
index 1715f7c..e19cf36 100644
--- a/configs/debruijn/detail_info_printer.info.template
+++ b/configs/debruijn/detail_info_printer.info.template
@@ -2,61 +2,42 @@ info_printers
 {
     default
     {
-       ;print_stats                      true
-       print_stats                      false
+       basic_stats                      false
+       save_full_graph                  false
+       extended_stats                   false
        detailed_dot_write               false
        write_components                 false
        components_for_genome_pos        "" ; (k+1)-mers starting on this positions will be investigated
 	   components_for_kmer              ""
        write_components_along_genome    false
        write_components_along_contigs   false
-       save_full_graph                  false
        write_error_loc                  false
        write_full_graph                 false
        write_full_nc_graph              false
-}
-
-;	before_simplification
-;	tip_clipping
-;	bulge_removal
-;	err_con_removal
-;	before_post_simplification
-;	final_err_con_removal
-;	final_tip_clipping
-;	final_bulge_removal
-;	removing_isolated_edges
-;	final_simplified
-;	before_repeat_resolution
+    }
 
+	before_first_gap_closer
+    {
+    }
 
 	before_simplification
     {
-;       write_components                 true
-;       write_components_along_genome    true
-;       ;detailed_dot_write		true
     }
 
     before_post_simplification    
     {
-;       write_components                 true
-;       write_components_along_genome    true
-;       write_error_loc                  true
     }
 
     final_simplified
     {
-;       write_components                 true
-;       write_components_along_genome    true
-;       write_error_loc                  true
     }
 
-;    before_repeat_resolution
-;    {
-;       ;write_components                 true
-;       ;write_components_along_genome    true
-;       ;write_error_loc                  true
-;	write_full_nc_graph              true
-;
-;   }
+    final_gap_closed
+    {
+    }
+
+    before_repeat_resolution
+    {
+    }
 
 }  
diff --git a/configs/debruijn/distance_estimation.info b/configs/debruijn/distance_estimation.info
new file mode 100644
index 0000000..949216b
--- /dev/null
+++ b/configs/debruijn/distance_estimation.info
@@ -0,0 +1,91 @@
+; distance estimator:
+
+sc_de
+{
+    linkage_distance_coeff      0.0
+    max_distance_coeff  	    2.0
+    max_distance_coeff_scaff    2000.0 
+    filter_threshold            2.0 
+}
+
+usual_de
+{
+    linkage_distance_coeff    0.0
+    max_distance_coeff        2.0
+    max_distance_coeff_scaff  2000.0
+    filter_threshold          2.0
+}
+
+old_sc_de
+{
+    linkage_distance_coeff      0.3
+    max_distance_coeff      	2.0
+    max_distance_coeff_scaff    2000.0
+    filter_threshold            10.0 ;bigger than in non-single cell because normalization is disabled
+}
+
+old_usual_de
+{
+    linkage_distance_coeff      0.3
+    max_distance_coeff          2.0
+    max_distance_coeff_scaff    2000.0
+    filter_threshold            0.2
+}
+
+; advanced distance estimator:
+
+sc_ade
+{
+    ;data dividing
+        threshold          80 ;maximal distance between two points in cluster
+
+    ;local maximum seeking
+        range_coeff          0.2 ;data_length*range_coeff := width of the averaging window
+        delta_coeff          0.4 ;data_length*delta_coeff := maximal difference between possible distance and real peak on the graph
+
+    ;fft smoothing
+        percentage          0.01 ;percent of data for baseline subraction
+        cutoff              3 ;the number of the lowest freqs in fourier decomp being taken
+
+    ;other
+        min_peak_points       3 ;the minimal number of points in cluster to be considered
+        inv_density         5.0 ;maximal inverse density of points in cluster to be considered
+
+    ;hard_mode arguments
+        derivative_threshold  0.2 ;threshold for derivative in hard mode
+
+}
+
+usual_ade
+{
+    ;data dividing
+        threshold          80 ;maximal distance between two points in cluster
+
+    ;local maximum seeking
+        range_coeff          0.2 ;data_length*range_coeff := width of the averaging window
+        delta_coeff          0.4 ;data_length*delta_coeff := maximal difference between possible distance and real peak on the graph
+
+    ;fft smoothing
+        percentage          0.01 ;percent of data for baseline subraction
+        cutoff              3 ;the number of the lowest freqs in fourier decomp being taken
+
+    ;other
+        min_peak_points       3 ;the minimal number of points in cluster to be considered
+        inv_density         5.0 ;maximal inverse density of points in cluster to be considered
+
+    ;hard_mode arguments
+        derivative_threshold  0.2 ;threshold for derivative in hard mode
+
+}
+
+; ambiguous pair info checker parameters 
+amb_de {
+        enabled                         false; true
+        haplom_threshold                500
+        relative_length_threshold       0.8
+        relative_seq_threshold          0.5
+}
+
+sensitive_mapper {
+    k   19
+}
diff --git a/configs/debruijn/log.properties.template b/configs/debruijn/log.properties
similarity index 84%
copy from configs/debruijn/log.properties.template
copy to configs/debruijn/log.properties
index efd96f1..a4052e3 100644
--- a/configs/debruijn/log.properties.template
+++ b/configs/debruijn/log.properties
@@ -42,3 +42,11 @@ default=INFO
 #PathExtendPI=DEBUG
 #LoopTraverser=DEBUG
 #PEResolver=DEBUG
+#ExtensionChooser2015=DEBUG
+#ScaffoldingUniqueEdgeStorage=DEBUG
+#ScaffoldingUniqueEdgeAnalyzer=DEBUG
+#LoopDetectingPathExtender=DEBUG
+#SimpleExtender=DEBUG
+#ScaffoldingPathExtender=DEBUG
+
+#BWAPairInfo=TRACE
diff --git a/configs/debruijn/log.properties.template b/configs/debruijn/log.properties.template
index efd96f1..a4052e3 100644
--- a/configs/debruijn/log.properties.template
+++ b/configs/debruijn/log.properties.template
@@ -42,3 +42,11 @@ default=INFO
 #PathExtendPI=DEBUG
 #LoopTraverser=DEBUG
 #PEResolver=DEBUG
+#ExtensionChooser2015=DEBUG
+#ScaffoldingUniqueEdgeStorage=DEBUG
+#ScaffoldingUniqueEdgeAnalyzer=DEBUG
+#LoopDetectingPathExtender=DEBUG
+#SimpleExtender=DEBUG
+#ScaffoldingPathExtender=DEBUG
+
+#BWAPairInfo=TRACE
diff --git a/configs/debruijn/path_extend/pe_params.info b/configs/debruijn/path_extend/pe_params.info
new file mode 100644
index 0000000..279c8e4
--- /dev/null
+++ b/configs/debruijn/path_extend/pe_params.info
@@ -0,0 +1,186 @@
+default_pe {
+
+; output options
+
+debug_output    false
+
+output {
+    write_overlaped_paths   true
+    write_paths             true
+}
+
+visualize {
+    print_overlaped_paths   true
+    print_paths             true
+}
+
+; none | break_gaps | break_all
+output_broken_scaffolds     break_gaps
+
+params {
+    ; old | 2015 | combined | old_pe_2015
+    scaffolding_mode old
+
+    split_edge_length    99
+    normalize_weight     false
+    cut_all_overlaps  false
+    
+    ; extension selection
+    extension_options
+    {
+        use_default_single_threshold false
+        single_threshold           1.75676
+;    A.mirum threshold 0.076
+;    E.coli RL36 threshold 0.717949
+;    E.coli IS220 threshold 1.75676
+        weight_threshold           0.5
+        priority_coeff             1.5
+    }    
+
+    mate_pair_options
+    {
+        use_default_single_threshold true
+        single_threshold           30
+        weight_threshold           0.5
+        priority_coeff             1.5
+    }
+
+    scaffolder {
+        on            true
+        cutoff        2
+        rel_cutoff    0.1
+        sum_threshold 3  
+
+        cluster_info  true
+        cl_threshold  0
+
+        fix_gaps       true
+        use_la_gap_joiner true
+        ;next param should be 0.51 - 1.0 if use_old_score = true and 3.0 otherwise
+        min_gap_score   0.7
+
+        max_must_overlap  -2
+        max_can_overlap   0.5
+        short_overlap     6
+        artificial_gap    10
+        use_old_score   true
+
+        min_overlap_length 10
+        flank_addition_coefficient -5.9
+        flank_multiplication_coefficient 0.97
+    }
+    
+    loop_removal
+    {
+        max_loops       10
+        mp_max_loops    10
+    }
+
+    remove_overlaps     true
+    use_coordinated_coverage false
+    coordinated_coverage
+    {
+       max_edge_length_repeat 300
+       delta                  0.4
+    }
+
+    scaffolding2015 {
+        autodetect      true
+        min_unique_length 10000
+        unique_coverage_variation 0.5
+        ; (median * (1+variation) > unique > median * (1 - variation))
+    }
+
+    scaffold_graph {
+        construct    false
+        output       false
+        min_read_count 20
+        graph_connectivity false
+        max_path_length 10000
+    }
+}
+
+
+long_reads {
+    pacbio_reads {
+        filtering   2.5
+        weight_priority    1.2
+        unique_edge_priority 5.0
+    }
+
+    single_reads {
+        filtering  1.25 
+        weight_priority    5.0
+        unique_edge_priority 1000.0
+    }
+
+    coverage_base_rr {
+        filtering   0.0
+        weight_priority    1.5
+        unique_edge_priority 2.0
+    }
+}
+}
+
+sc_pe {
+params {
+    normalize_weight        true
+
+    ; extension selection
+    extension_options
+    {
+        use_default_single_threshold false
+        single_threshold           0.001
+        weight_threshold           0.6
+    }
+
+}
+}
+
+moleculo_pe {
+params {
+    normalize_weight        true
+    cut_all_overlaps  true
+
+    ; extension selection
+    extension_options
+    {
+        use_default_single_threshold false
+        single_threshold           0.001
+        weight_threshold           0.6
+    }
+
+    scaffolder {
+        short_overlap     10
+        use_la_gap_joiner false
+    }
+}
+}
+
+;NB decsends from sc_pe
+meta_pe {
+params {
+    remove_overlaps     true
+    cut_all_overlaps  true
+
+    ;TODO proper configuration of different extenders is not supported 
+    ;TODO most settings ard hardcoded for now 
+
+    ;normalize_weight        NA
+    extension_options
+    {
+        ;use_default_single_threshold NA
+        ;single_threshold           NA 
+        weight_threshold           0.6
+    }
+    
+    use_coordinated_coverage true
+}
+}
+
+prelim_pe {
+params {
+    use_coordinated_coverage false
+    remove_overlaps     false
+}
+}
diff --git a/configs/debruijn/path_extend/pe_params.info.template b/configs/debruijn/path_extend/pe_params.info.template
index 67dae6c..279c8e4 100644
--- a/configs/debruijn/path_extend/pe_params.info.template
+++ b/configs/debruijn/path_extend/pe_params.info.template
@@ -1,3 +1,5 @@
+default_pe {
+
 ; output options
 
 debug_output    false
@@ -14,157 +16,11 @@ visualize {
 
 ; none | break_gaps | break_all
 output_broken_scaffolds     break_gaps
-singlecell {
-    split_edge_length       99
-    normalize_weight        true
-    cut_all_overlaps  false
-
-    ; extension selection
-    extension_options
-    {
-        recalculate_threshold      true
-        single_threshold           0.001
-        weight_threshold           0.6
-        priority_coeff             1.5
-    }
-
-    mate_pair_options
-    {
-        recalculate_threshold      true
-        single_threshold           0.0001
-        weight_threshold           0.5
-        priority_coeff             1.5
-    }
-
-    scaffolder {
-        on              true
-        cutoff          2
-        rel_cutoff      0.1
-        sum_threshold   3  
-
-        cluster_info    true
-        cl_threshold    0
-
-        fix_gaps        true
-        min_gap_score   3.0
-
-        max_must_overlap  -2
-        max_can_overlap   0.5
-        short_overlap     6
-        artificial_gap    10
-        use_old_score   false
-    }
-    
-    loop_removal
-    {
-        max_loops       10
-        mp_max_loops    10
-    }
-
-    remove_overlaps     true
-}
-
-meta {
-    split_edge_length       99
-    normalize_weight        true
-    cut_all_overlaps  false
-
-    ; extension selection
-    extension_options
-    {
-        recalculate_threshold      false
-        single_threshold           0.035
-        weight_threshold           0.6
-        priority_coeff             1.5
-    }
-
-    mate_pair_options
-    {
-        recalculate_threshold      true
-        single_threshold           0.0001
-        weight_threshold           0.5
-        priority_coeff             1.5
-    }
-
-    scaffolder {
-        on              true
-        cutoff          2
-        rel_cutoff      0.1
-        sum_threshold   3  
-
-        cluster_info    true
-        cl_threshold    0
-
-        fix_gaps        true
-        min_gap_score   3.0
-
-        max_must_overlap  -2
-        max_can_overlap   0.5
-        short_overlap     6
-        artificial_gap    10
-        use_old_score   false
-    }
-    
-    loop_removal
-    {
-        max_loops       10
-        mp_max_loops    10
-    }
-
-    remove_overlaps     true
-}
-
-moleculo {
-    split_edge_length       99
-    normalize_weight        true
-    cut_all_overlaps  true
-
-    ; extension selection
-    extension_options
-    {
-        recalculate_threshold      true
-        single_threshold           0.001
-        weight_threshold           0.6
-        priority_coeff             1.5
-    }
 
-    mate_pair_options
-    {
-        recalculate_threshold      true
-        single_threshold           0.0001
-        weight_threshold           0.5
-        priority_coeff             1.5
-    }
-
-    scaffolder {
-        on              true
-        cutoff          2
-        rel_cutoff      0.1
-        sum_threshold   3  
-
-        cluster_info    true
-        cl_threshold    0
-
-        fix_gaps        true
-        min_gap_score   0.7
+params {
+    ; old | 2015 | combined | old_pe_2015
+    scaffolding_mode old
 
-        max_must_overlap  -2
-        max_can_overlap   0.5
-        short_overlap     10
-        artificial_gap    10
-        use_old_score     true
-    }
-    
-    loop_removal
-    {
-        max_loops       10
-        mp_max_loops    10
-    }
-
-    remove_overlaps     true
-}
-
-multicell {
     split_edge_length    99
     normalize_weight     false
     cut_all_overlaps  false
@@ -172,7 +28,7 @@ multicell {
     ; extension selection
     extension_options
     {
-        recalculate_threshold      true
+        use_default_single_threshold false
         single_threshold           1.75676
 ;    A.mirum threshold 0.076
 ;    E.coli RL36 threshold 0.717949
@@ -183,8 +39,8 @@ multicell {
 
     mate_pair_options
     {
-        recalculate_threshold      true
-        single_threshold           0.001
+        use_default_single_threshold true
+        single_threshold           30
         weight_threshold           0.5
         priority_coeff             1.5
     }
@@ -199,13 +55,19 @@ multicell {
         cl_threshold  0
 
         fix_gaps       true
-        min_gap_score   3.0
+        use_la_gap_joiner true
+        ;next param should be 0.51 - 1.0 if use_old_score = true and 3.0 otherwise
+        min_gap_score   0.7
 
         max_must_overlap  -2
         max_can_overlap   0.5
         short_overlap     6
         artificial_gap    10
-        use_old_score   false
+        use_old_score   true
+
+        min_overlap_length 10
+        flank_addition_coefficient -5.9
+        flank_multiplication_coefficient 0.97
     }
     
     loop_removal
@@ -215,10 +77,31 @@ multicell {
     }
 
     remove_overlaps     true
+    use_coordinated_coverage false
+    coordinated_coverage
+    {
+       max_edge_length_repeat 300
+       delta                  0.4
+    }
+
+    scaffolding2015 {
+        autodetect      true
+        min_unique_length 10000
+        unique_coverage_variation 0.5
+        ; (median * (1+variation) > unique > median * (1 - variation))
+    }
+
+    scaffold_graph {
+        construct    false
+        output       false
+        min_read_count 20
+        graph_connectivity false
+        max_path_length 10000
+    }
 }
 
-long_reads {
 
+long_reads {
     pacbio_reads {
         filtering   2.5
         weight_priority    1.2
@@ -237,3 +120,67 @@ long_reads {
         unique_edge_priority 2.0
     }
 }
+}
+
+sc_pe {
+params {
+    normalize_weight        true
+
+    ; extension selection
+    extension_options
+    {
+        use_default_single_threshold false
+        single_threshold           0.001
+        weight_threshold           0.6
+    }
+
+}
+}
+
+moleculo_pe {
+params {
+    normalize_weight        true
+    cut_all_overlaps  true
+
+    ; extension selection
+    extension_options
+    {
+        use_default_single_threshold false
+        single_threshold           0.001
+        weight_threshold           0.6
+    }
+
+    scaffolder {
+        short_overlap     10
+        use_la_gap_joiner false
+    }
+}
+}
+
+;NB decsends from sc_pe
+meta_pe {
+params {
+    remove_overlaps     true
+    cut_all_overlaps  true
+
+    ;TODO proper configuration of different extenders is not supported 
+    ;TODO most settings ard hardcoded for now 
+
+    ;normalize_weight        NA
+    extension_options
+    {
+        ;use_default_single_threshold NA
+        ;single_threshold           NA 
+        weight_threshold           0.6
+    }
+    
+    use_coordinated_coverage true
+}
+}
+
+prelim_pe {
+params {
+    use_coordinated_coverage false
+    remove_overlaps     false
+}
+}
diff --git a/configs/debruijn/simplification.info b/configs/debruijn/simplification.info
new file mode 100644
index 0000000..7f6768d
--- /dev/null
+++ b/configs/debruijn/simplification.info
@@ -0,0 +1,561 @@
+; simplification
+
+default
+{
+    ; number of iterations in basic simplification cycle
+    cycle_iter_count 10
+
+    ; enable advanced simplification algo
+    post_simplif_enabled true
+    
+    ; enable advanced ec removal algo
+    topology_simplif_enabled false
+    
+    ; tip clipper:
+    tc
+    {
+        ; rctc: tip_cov < rctc * not_tip_cov
+        ; tc_lb: max_tip_length = max((min(k, read_length / 2) * tc_lb), read_length);
+        ; todo think about params one more time
+        condition               "{ tc_lb 3.5, cb 1000000, rctc 2.0 } { tc_lb 10., cb auto }"
+    }
+  
+	; bulge remover:
+	br
+	{
+		enabled				true
+        main_iteration_only false
+		max_bulge_length_coefficient	3.	; max_bulge_length = max_bulge_length_coefficient * k
+       	max_additive_length_coefficient 100
+		max_coverage			1000.0
+		max_relative_coverage		1.1	; bulge_cov < this * not_bulge_cov
+		max_delta			3
+		max_relative_delta		0.1
+        max_number_edges        1000
+        parallel true
+        buff_size 10000
+        buff_cov_diff 2.
+        buff_cov_rel_diff 0.2
+	}
+	
+	; erroneous connections remover:
+	ec
+	{
+       ; ec_lb: max_ec_length = k + ec_lb
+       ; icb: iterative coverage bound
+       ; to_ec_lb: max_ec_length = 2*tip_length(to_ec_lb) - 1
+        condition               "{ to_ec_lb 5, icb auto }"
+       ; condition               "{ ec_lb 9, icb 40.0 }"
+    }
+    
+    ; relative coverage erroneous component remover:
+    rcc
+    {
+        enabled false
+        coverage_gap    5.
+        max_length_coeff    2.0
+        max_length_with_tips_coeff   3.0
+        max_vertex_cnt      30
+        max_ec_length_coefficient   30
+        max_coverage_coeff  2.0
+    }
+    
+    ; relative edge disconnector:
+    relative_ed
+    {
+        enabled false
+        diff_mult  20.
+    }
+
+    ; final tip clipper:
+    final_tc
+    {
+        condition               ""
+    }
+
+    ; final bulge remover:
+    final_br
+    {
+        enabled				false
+        main_iteration_only false
+        max_bulge_length_coefficient	3.	; max_bulge_length = max_bulge_length_coefficient * k
+        max_additive_length_coefficient 100
+        max_coverage			1000.0
+        max_relative_coverage		1.1	; bulge_cov < this * not_bulge_cov
+        max_delta			3
+        max_relative_delta		0.1
+        max_number_edges        1000
+        parallel true
+        buff_size 10000
+        buff_cov_diff 2.
+        buff_cov_rel_diff 0.2
+    }
+    
+    ; topology based erroneous connection remover
+    tec
+    {
+        max_ec_length_coefficient   55  ; max_ec_length = k + max_ec_length_coefficient
+        uniqueness_length       5000
+        plausibility_length     200
+    }
+
+    ; topology and reliability based erroneous connection remover
+    trec
+    {
+        max_ec_length_coefficient   100 ; max_ec_length = k + max_ec_length_coefficient
+        uniqueness_length       1500
+        unreliable_coverage     2.5
+    }
+    
+    ; interstrand erroneous connection remover (thorn remover)
+    isec
+    {
+        max_ec_length_coefficient   100 ; max_ec_length = k + max_ec_length_coefficient
+        uniqueness_length       1500
+        span_distance       15000
+    }
+
+    ; max flow erroneous connection remover
+    mfec
+    {
+        enabled false
+        max_ec_length_coefficient   30  ; max_ec_length = k + max_ec_length_coefficient
+        uniqueness_length       1500
+        plausibility_length     200
+    }
+    
+    piec
+    {
+        max_ec_length_coefficient   30  ; max_ec_length = k + max_ec_length_coefficient
+        min_neighbour_length        100
+    }
+
+    ; isolated edges remover
+    ier
+    {
+        enabled                     true
+        max_length                  0
+        max_coverage                2
+        max_length_any_cov          150 ; will be taken max with read_length 
+    }
+    
+    ; topology tip clipper:
+    ttc
+    {
+        length_coeff    3.5
+        plausibility_length 250
+        uniqueness_length   1500
+    }
+
+    ; complex tip clipper
+    complex_tc
+    {
+    enabled 0
+    }       
+
+    ; complex bulge remover
+    cbr
+    {
+        enabled false
+        max_relative_length 5.
+        max_length_difference   5
+    }
+
+    ; hidden ec remover
+    her
+    {
+        enabled false
+        uniqueness_length           1500
+        unreliability_threshold     4
+        relative_threshold          5     
+    }
+
+    init_clean
+    {
+        self_conj_condition "{ ec_lb 100, cb 1.0 }"
+        early_it_only   false
+       ; will be enabled only if average coverage \leq activate_cov
+        activation_cov  10.
+
+        ; isolated edges remover
+        ier
+        {
+            enabled                     true
+            max_length                  0
+            max_coverage                0
+            max_length_any_cov          0 ; will be taken max with read_length 
+        }
+
+        tip_condition   "{ tc_lb 3.5, cb auto }"
+        ec_condition    "{ ec_lb 10, cb 2.0 }"
+        ; edges with flank cov around alternative less than value will be disconnected 
+        ; negative value to disable
+        disconnect_flank_cov    -1.0
+    }
+    
+}
+
+sc
+{
+    ; enable advanced ec removal algo
+    topology_simplif_enabled true 
+
+    ; tip clipper:
+    tc
+    {
+        ; rctc: tip_cov < rctc * not_tip_cov
+        ; tc_lb: max_tip_length = max((min(k, read_length / 2) * tc_lb), read_length);
+        condition               "{ tc_lb 3.5, cb 1000000, rctc 2.0 }"
+	}
+
+	; erroneous connections remover:
+	ec
+	{
+       ; ec_lb: max_ec_length = k + ec_lb
+       ; icb: iterative coverage bound
+       ; condition               "{ ec_lb 30, icb 20.0 }"
+       condition               "{ ec_lb 30, icb auto }"
+    }
+    
+    final_tc
+    {
+        condition               "{ tc_lb 3.5, cb 100000, rctc 10000 }"
+    }
+
+	; bulge remover:
+	final_br
+	{
+        enabled true
+		max_coverage			1000000.0
+		max_relative_coverage		100000.		; bulge_cov < this * not_bulge_cov
+	}
+	
+    ; relative coverage erroneous component remover:
+    rcc
+    {
+        enabled true
+        coverage_gap    20.
+        max_length_coeff    2.0
+        max_length_with_tips_coeff   3.0
+        max_vertex_cnt      30
+        max_ec_length_coefficient   30
+        max_coverage_coeff  5.0
+    }
+    
+    tec
+    {
+        max_ec_length_coefficient   55  ; max_ec_length = k + max_ec_length_coefficient
+        uniqueness_length       1500
+        plausibility_length     200
+    }
+    
+    ; topology and reliability based erroneous connection remover
+    trec
+    {
+        max_ec_length_coefficient   100 ; max_ec_length = k + max_ec_length_coefficient
+        uniqueness_length       1500
+        unreliable_coverage     2.5
+    }
+    
+    ; topology tip clipper:
+    ttc
+    {
+        length_coeff    3.5
+        plausibility_length 250
+        uniqueness_length   1500
+    }
+
+    ; complex bulge remover
+    cbr
+    {
+        enabled true
+    }
+
+    ; hidden ec remover
+    her
+    {
+        enabled                     true
+        uniqueness_length           1500
+        unreliability_threshold     0.2
+        relative_threshold          5     
+    }
+
+    init_clean
+    {
+        activation_cov  -1.
+        ier
+        {
+            enabled false
+        }
+
+        tip_condition   ""
+        ec_condition    ""
+    }
+}
+
+moleculo
+{
+    ; enable advanced ec removal algo
+    topology_simplif_enabled false 
+
+    ; tip clipper:
+    tc
+    {
+        ; rctc: tip_cov < rctc * not_tip_cov
+        ; tc_lb: max_tip_length = max((min(k, read_length / 2) * tc_lb), read_length);
+        condition               "{ tc_lb 2.5, cb 3, rctc 10000 } { tc_lb 4.5, mmm 2 }"
+    }
+
+    ; bulge remover:
+    br
+    {
+        max_coverage            3
+        max_relative_coverage       100000.     ; bulge_cov < this * not_bulge_cov
+    }
+    
+    ; erroneous connections remover:
+    ec
+    {
+       ; ec_lb: max_ec_length = k + ec_lb
+       ; icb: iterative coverage bound
+       ; condition               "{ ec_lb 30, icb 20.0 }"
+       condition               "{ ec_lb 30, icb 3.1 }"
+    }
+    
+    ; relative coverage erroneous component remover:
+    rcc
+    {
+        enabled true
+        coverage_gap    20.
+        max_length_coeff    2.0
+        max_length_with_tips_coeff   3.0
+        max_vertex_cnt      30
+        max_ec_length_coefficient   30
+        max_coverage_coeff  5.0
+    }
+    
+    tec
+    {
+        max_ec_length_coefficient   55  ; max_ec_length = k + max_ec_length_coefficient
+        uniqueness_length       1500
+        plausibility_length     200
+    }
+    
+    ; topology and reliability based erroneous connection remover
+    trec
+    {
+        max_ec_length_coefficient   100 ; max_ec_length = k + max_ec_length_coefficient
+        uniqueness_length       1500
+        unreliable_coverage     2.5
+    }
+    
+    ; topology tip clipper:
+    ttc
+    {
+        length_coeff    3.5
+        plausibility_length 250
+        uniqueness_length   1500
+    }
+
+    ; complex bulge remover
+    cbr
+    {
+        enabled true
+        pics_enabled 0
+        folder  complex_br_components 
+        max_relative_length 5.
+        max_length_difference   5
+    }
+
+    ; hidden ec remover
+    her
+    {
+        enabled                     true
+        uniqueness_length           1500
+        unreliability_threshold     0.2
+        relative_threshold          5     
+    }
+
+    init_clean
+    {
+        early_it_only   true
+
+        activation_cov  -1.
+        ier
+        {
+            enabled                     false
+        }
+
+        tip_condition   ""
+        ec_condition    ""
+    }
+}
+
+careful
+{
+    ; bulge remover:
+    br
+    {
+        max_coverage            1000000.0
+        max_relative_coverage       1.5     ; bulge_cov < this * not_bulge_cov
+        parallel false
+    }
+    
+    ; complex bulge remover
+    cbr
+    {
+        enabled false
+    }
+
+    ; relative coverage erroneous component remover:
+    rcc
+    {
+        enabled false
+    }
+
+    init_clean
+    {
+        early_it_only   true
+
+        activation_cov  -1.
+        ier
+        {
+            enabled                     false
+        }
+
+        tip_condition   ""
+        ec_condition    ""
+    }
+}
+
+diploid_simp
+{
+    post_simplif_enabled false
+
+	; bulge remover:
+	br
+	{
+		enabled				false
+	}
+}
+
+meta
+{
+    cycle_iter_count 3
+
+    ; enable advanced ec removal algo
+    topology_simplif_enabled false
+
+    ; erroneous connections remover:
+    ec
+    {
+       ; ec_lb: max_ec_length = k + ec_lb
+       ; icb: iterative coverage bound
+       ; condition               "{ ec_lb 30, icb 20.0 }"
+       condition               "{ ec_lb 30, icb 2.5 }"
+    }
+    
+    ; tip clipper:
+    tc
+    {
+        ; rctc: tip_cov < rctc * not_tip_cov
+        ; tc_lb: max_tip_length = max((min(k, read_length / 2) * tc_lb), read_length);
+        condition               "{ tc_lb 3.5, cb 10000 } { tc_lb 6., cb 2.5 }"
+    }
+
+    ; relative coverage erroneous component remover:
+    rcc
+    {
+        enabled true
+        coverage_gap    5.
+        max_length_coeff    3.0
+        max_length_with_tips_coeff   5.0
+        max_vertex_cnt      100
+        max_ec_length_coefficient   300
+        max_coverage_coeff  -1.0
+	}
+
+	; relative edge disconnector:
+	relative_ed
+	{
+        enabled true
+        diff_mult  10.
+	}
+
+	; bulge remover:
+	br
+	{
+		max_coverage			1000000.0
+		max_relative_coverage		100000.		; bulge_cov < this * not_bulge_cov
+		max_delta			10
+		max_relative_delta		0.1
+        parallel true
+	}
+
+    ; final tip clipper:
+    final_tc
+    {
+        ; rctc: tip_cov < rctc * not_tip_cov
+        ; tc_lb: max_tip_length = max((min(k, read_length / 2) * tc_lb), read_length);
+        condition               "{ lb 500, cb 3., rctc 1.0 } { lb 1500, cb 20., rctc 0.2 }"
+    }
+
+    ; final bulge remover:
+    final_br
+    {
+        enabled true
+        main_iteration_only true
+        max_bulge_length_coefficient    50.     ; max_bulge_length = max_bulge_length_coefficient * k
+        max_coverage			1000000.0
+        max_relative_coverage       0.5     ; bulge_cov < this * not_bulge_cov
+        max_delta           50
+        max_relative_delta		0.1
+    }
+
+    ; second final bulge remover:
+    ; only in meta mode, inherits settings of final_br
+    second_final_br
+    {
+        max_delta           1500
+        max_number_edges        3
+    }
+
+    init_clean
+    {
+       early_it_only   true
+       ier {
+           enabled true
+       }
+       tip_condition   "{ tc_lb 3.5, cb 2.0 }"
+       ec_condition    "{ ec_lb 10, cb 0.5 }"
+    }
+
+}
+
+preliminary
+{
+    init_clean
+    {
+        self_conj_condition "{ ec_lb 100, cb 20.0 }"
+        early_it_only   false
+        ier
+        {
+            enabled true
+        }
+        tip_condition   "{ lb 20, cb 1.1, mmm 2 }"
+        ec_condition    "{ ec_lb 0, cb 0.9 }"
+        disconnect_flank_cov    0.9
+    }
+
+    post_simplif_enabled false
+
+	; bulge remover:
+	br
+	{
+		max_coverage			1000000.0
+		max_relative_coverage		0.5		; bulge_cov < this * not_bulge_cov
+		max_delta			10
+		max_relative_delta		0.1
+	}
+	
+}
diff --git a/configs/debruijn/simplification.info.template b/configs/debruijn/simplification.info.template
index 0eb60b1..7f6768d 100644
--- a/configs/debruijn/simplification.info.template
+++ b/configs/debruijn/simplification.info.template
@@ -11,26 +11,31 @@ default
     ; enable advanced ec removal algo
     topology_simplif_enabled false
     
-	; tip clipper:
-	tc
-	{
+    ; tip clipper:
+    tc
+    {
         ; rctc: tip_cov < rctc * not_tip_cov
         ; tc_lb: max_tip_length = max((min(k, read_length / 2) * tc_lb), read_length);
         ; todo think about params one more time
         condition               "{ tc_lb 3.5, cb 1000000, rctc 2.0 } { tc_lb 10., cb auto }"
-	}
+    }
   
-
 	; bulge remover:
 	br
 	{
 		enabled				true
+        main_iteration_only false
 		max_bulge_length_coefficient	3.	; max_bulge_length = max_bulge_length_coefficient * k
        	max_additive_length_coefficient 100
 		max_coverage			1000.0
 		max_relative_coverage		1.1	; bulge_cov < this * not_bulge_cov
 		max_delta			3
 		max_relative_delta		0.1
+        max_number_edges        1000
+        parallel true
+        buff_size 10000
+        buff_cov_diff 2.
+        buff_cov_rel_diff 0.2
 	}
 	
 	; erroneous connections remover:
@@ -41,11 +46,11 @@ default
        ; to_ec_lb: max_ec_length = 2*tip_length(to_ec_lb) - 1
         condition               "{ to_ec_lb 5, icb auto }"
        ; condition               "{ ec_lb 9, icb 40.0 }"
-	}
-	
-	; relative coverage erroneous component remover:
-	rcc
-	{
+    }
+    
+    ; relative coverage erroneous component remover:
+    rcc
+    {
         enabled false
         coverage_gap    5.
         max_length_coeff    2.0
@@ -53,109 +58,141 @@ default
         max_vertex_cnt      30
         max_ec_length_coefficient   30
         max_coverage_coeff  2.0
-	}
-	
-	; topology based erroneous connection remover
-	tec
-	{
-		max_ec_length_coefficient	55	; max_ec_length = k + max_ec_length_coefficient
-		uniqueness_length		5000
-		plausibility_length		200
-	}
+    }
+    
+    ; relative edge disconnector:
+    relative_ed
+    {
+        enabled false
+        diff_mult  20.
+    }
 
-	; topology and reliability based erroneous connection remover
-	trec
-	{
-		max_ec_length_coefficient	100	; max_ec_length = k + max_ec_length_coefficient
-		uniqueness_length		1500
-		unreliable_coverage		2.5
-	}
-	
-	; interstrand erroneous connection remover (thorn remover)
-	isec
-	{
-		max_ec_length_coefficient	100	; max_ec_length = k + max_ec_length_coefficient
-		uniqueness_length		1500
-		span_distance		15000
-	}
+    ; final tip clipper:
+    final_tc
+    {
+        condition               ""
+    }
 
-	; max flow erroneous connection remover
-	mfec
-	{
+    ; final bulge remover:
+    final_br
+    {
+        enabled				false
+        main_iteration_only false
+        max_bulge_length_coefficient	3.	; max_bulge_length = max_bulge_length_coefficient * k
+        max_additive_length_coefficient 100
+        max_coverage			1000.0
+        max_relative_coverage		1.1	; bulge_cov < this * not_bulge_cov
+        max_delta			3
+        max_relative_delta		0.1
+        max_number_edges        1000
+        parallel true
+        buff_size 10000
+        buff_cov_diff 2.
+        buff_cov_rel_diff 0.2
+    }
+    
+    ; topology based erroneous connection remover
+    tec
+    {
+        max_ec_length_coefficient   55  ; max_ec_length = k + max_ec_length_coefficient
+        uniqueness_length       5000
+        plausibility_length     200
+    }
+
+    ; topology and reliability based erroneous connection remover
+    trec
+    {
+        max_ec_length_coefficient   100 ; max_ec_length = k + max_ec_length_coefficient
+        uniqueness_length       1500
+        unreliable_coverage     2.5
+    }
+    
+    ; interstrand erroneous connection remover (thorn remover)
+    isec
+    {
+        max_ec_length_coefficient   100 ; max_ec_length = k + max_ec_length_coefficient
+        uniqueness_length       1500
+        span_distance       15000
+    }
+
+    ; max flow erroneous connection remover
+    mfec
+    {
         enabled false
-		max_ec_length_coefficient	30	; max_ec_length = k + max_ec_length_coefficient
-		uniqueness_length		1500
-		plausibility_length		200
-	}
-	
-	piec
-	{
-		max_ec_length_coefficient	30	; max_ec_length = k + max_ec_length_coefficient
-		min_neighbour_length		100
-	}
+        max_ec_length_coefficient   30  ; max_ec_length = k + max_ec_length_coefficient
+        uniqueness_length       1500
+        plausibility_length     200
+    }
+    
+    piec
+    {
+        max_ec_length_coefficient   30  ; max_ec_length = k + max_ec_length_coefficient
+        min_neighbour_length        100
+    }
 
-	; isolated edges remover
-	ier
-	{
+    ; isolated edges remover
+    ier
+    {
+        enabled                     true
         max_length                  0
         max_coverage                2
         max_length_any_cov          150 ; will be taken max with read_length 
-	}
-	
-	; topology tip clipper:
-	ttc
-	{
+    }
+    
+    ; topology tip clipper:
+    ttc
+    {
         length_coeff    3.5
         plausibility_length 250
         uniqueness_length   1500
-	}
+    }
 
-	; complex tip clipper
-	complex_tc
-	{
-	enabled 0
-	}		
+    ; complex tip clipper
+    complex_tc
+    {
+    enabled 0
+    }       
 
-	; complex bulge remover
-	cbr
-	{
-		enabled	false
-		max_relative_length	5.
-		max_length_difference	5
-	}
+    ; complex bulge remover
+    cbr
+    {
+        enabled false
+        max_relative_length 5.
+        max_length_difference   5
+    }
 
-	; hidden ec remover
-	her
-	{
-	    enabled false
-	    uniqueness_length           1500
-	    unreliability_threshold     4
-	    relative_threshold          5     
-	}
-    
-    ;master switch over presimp, persistent iterators and disabling br
-    fast_features true
-    fast_activation_cov 10.
+    ; hidden ec remover
+    her
+    {
+        enabled false
+        uniqueness_length           1500
+        unreliability_threshold     4
+        relative_threshold          5     
+    }
 
-    presimp 
+    init_clean
     {
-       enabled true
-       parallel false
+        self_conj_condition "{ ec_lb 100, cb 1.0 }"
+        early_it_only   false
        ; will be enabled only if average coverage \leq activate_cov
-       tip_condition   "{ tc_lb 3.5, cb auto }"
-       ec_condition    "{ ec_lb 10, cb 2.0 }"
-
-       ; isolated edges remover
-       ier
-       {
-           max_length                  0
-           max_coverage                0
-           max_length_any_cov          0 ; will be taken max with read_length 
-       }
+        activation_cov  10.
+
+        ; isolated edges remover
+        ier
+        {
+            enabled                     true
+            max_length                  0
+            max_coverage                0
+            max_length_any_cov          0 ; will be taken max with read_length 
+        }
+
+        tip_condition   "{ tc_lb 3.5, cb auto }"
+        ec_condition    "{ ec_lb 10, cb 2.0 }"
+        ; edges with flank cov around alternative less than value will be disconnected 
+        ; negative value to disable
+        disconnect_flank_cov    -1.0
     }
-
-    persistent_cycle_iterators true
-    disable_br_in_cycle false
+    
 }
 
 sc
@@ -163,26 +200,14 @@ sc
     ; enable advanced ec removal algo
     topology_simplif_enabled true 
 
-	; tip clipper:
-	tc
-	{
+    ; tip clipper:
+    tc
+    {
         ; rctc: tip_cov < rctc * not_tip_cov
         ; tc_lb: max_tip_length = max((min(k, read_length / 2) * tc_lb), read_length);
-        condition               "{ tc_lb 3.5, cb 100000, rctc 10000 }"
+        condition               "{ tc_lb 3.5, cb 1000000, rctc 2.0 }"
 	}
 
-	; bulge remover:
-	br
-	{
-		enabled				true
-		max_bulge_length_coefficient	3.		; max_bulge_length = max_bulge_length_coefficient * k
-       	max_additive_length_coefficient 100
-		max_coverage			1000000.0
-		max_relative_coverage		100000.		; bulge_cov < this * not_bulge_cov
-		max_delta			3
-		max_relative_delta		0.1
-	}
-	
 	; erroneous connections remover:
 	ec
 	{
@@ -190,11 +215,24 @@ sc
        ; icb: iterative coverage bound
        ; condition               "{ ec_lb 30, icb 20.0 }"
        condition               "{ ec_lb 30, icb auto }"
+    }
+    
+    final_tc
+    {
+        condition               "{ tc_lb 3.5, cb 100000, rctc 10000 }"
+    }
+
+	; bulge remover:
+	final_br
+	{
+        enabled true
+		max_coverage			1000000.0
+		max_relative_coverage		100000.		; bulge_cov < this * not_bulge_cov
 	}
 	
-	; relative coverage erroneous component remover:
-	rcc
-	{
+    ; relative coverage erroneous component remover:
+    rcc
+    {
         enabled true
         coverage_gap    20.
         max_length_coeff    2.0
@@ -202,47 +240,57 @@ sc
         max_vertex_cnt      30
         max_ec_length_coefficient   30
         max_coverage_coeff  5.0
-	}
-	
-	tec
-	{
-		max_ec_length_coefficient	55	; max_ec_length = k + max_ec_length_coefficient
-		uniqueness_length		1500
-		plausibility_length		200
-	}
-	
-	; topology and reliability based erroneous connection remover
-	trec
-	{
-		max_ec_length_coefficient	100	; max_ec_length = k + max_ec_length_coefficient
-		uniqueness_length		1500
-		unreliable_coverage		2.5
-	}
-	
-	; topology tip clipper:
-	ttc
-	{
+    }
+    
+    tec
+    {
+        max_ec_length_coefficient   55  ; max_ec_length = k + max_ec_length_coefficient
+        uniqueness_length       1500
+        plausibility_length     200
+    }
+    
+    ; topology and reliability based erroneous connection remover
+    trec
+    {
+        max_ec_length_coefficient   100 ; max_ec_length = k + max_ec_length_coefficient
+        uniqueness_length       1500
+        unreliable_coverage     2.5
+    }
+    
+    ; topology tip clipper:
+    ttc
+    {
         length_coeff    3.5
         plausibility_length 250
         uniqueness_length   1500
-	}
+    }
 
-	; complex bulge remover
-	cbr
-	{
-		enabled	true
-	}
+    ; complex bulge remover
+    cbr
+    {
+        enabled true
+    }
 
-	; hidden ec remover
-	her
-	{
-	    enabled                     true
-	    uniqueness_length           1500
-	    unreliability_threshold     0.2
-	    relative_threshold          5     
-	}
+    ; hidden ec remover
+    her
+    {
+        enabled                     true
+        uniqueness_length           1500
+        unreliability_threshold     0.2
+        relative_threshold          5     
+    }
 
-    fast_features false
+    init_clean
+    {
+        activation_cov  -1.
+        ier
+        {
+            enabled false
+        }
+
+        tip_condition   ""
+        ec_condition    ""
+    }
 }
 
 moleculo
@@ -250,38 +298,33 @@ moleculo
     ; enable advanced ec removal algo
     topology_simplif_enabled false 
 
-	; tip clipper:
-	tc
-	{
+    ; tip clipper:
+    tc
+    {
         ; rctc: tip_cov < rctc * not_tip_cov
         ; tc_lb: max_tip_length = max((min(k, read_length / 2) * tc_lb), read_length);
         condition               "{ tc_lb 2.5, cb 3, rctc 10000 } { tc_lb 4.5, mmm 2 }"
-	}
+    }
 
-	; bulge remover:
-	br
-	{
-		enabled				true
-		max_bulge_length_coefficient	3.		; max_bulge_length = max_bulge_length_coefficient * k
-        	max_additive_length_coefficient 100
-		max_coverage			3
-		max_relative_coverage		100000.		; bulge_cov < this * not_bulge_cov
-		max_delta			3
-		max_relative_delta		0.1
-	}
-	
-	; erroneous connections remover:
-	ec
-	{
+    ; bulge remover:
+    br
+    {
+        max_coverage            3
+        max_relative_coverage       100000.     ; bulge_cov < this * not_bulge_cov
+    }
+    
+    ; erroneous connections remover:
+    ec
+    {
        ; ec_lb: max_ec_length = k + ec_lb
        ; icb: iterative coverage bound
        ; condition               "{ ec_lb 30, icb 20.0 }"
        condition               "{ ec_lb 30, icb 3.1 }"
-	}
-	
-	; relative coverage erroneous component remover:
-	rcc
-	{
+    }
+    
+    ; relative coverage erroneous component remover:
+    rcc
+    {
         enabled true
         coverage_gap    20.
         max_length_coeff    2.0
@@ -289,87 +332,100 @@ moleculo
         max_vertex_cnt      30
         max_ec_length_coefficient   30
         max_coverage_coeff  5.0
-	}
-	
-	tec
-	{
-		max_ec_length_coefficient	55	; max_ec_length = k + max_ec_length_coefficient
-		uniqueness_length		1500
-		plausibility_length		200
-	}
-	
-	; topology and reliability based erroneous connection remover
-	trec
-	{
-		max_ec_length_coefficient	100	; max_ec_length = k + max_ec_length_coefficient
-		uniqueness_length		1500
-		unreliable_coverage		2.5
-	}
-	
-	; topology tip clipper:
-	ttc
-	{
+    }
+    
+    tec
+    {
+        max_ec_length_coefficient   55  ; max_ec_length = k + max_ec_length_coefficient
+        uniqueness_length       1500
+        plausibility_length     200
+    }
+    
+    ; topology and reliability based erroneous connection remover
+    trec
+    {
+        max_ec_length_coefficient   100 ; max_ec_length = k + max_ec_length_coefficient
+        uniqueness_length       1500
+        unreliable_coverage     2.5
+    }
+    
+    ; topology tip clipper:
+    ttc
+    {
         length_coeff    3.5
         plausibility_length 250
         uniqueness_length   1500
-	}
+    }
 
-	; complex bulge remover
-	cbr
-	{
-		enabled	1
-		pics_enabled 0
+    ; complex bulge remover
+    cbr
+    {
+        enabled true
+        pics_enabled 0
         folder  complex_br_components 
-		max_relative_length	5.
-		max_length_difference	5
-	}
-
-	; hidden ec remover
-	her
-	{
-	    enabled                     true
-	    uniqueness_length           1500
-	    unreliability_threshold     0.2
-	    relative_threshold          5     
-	}
+        max_relative_length 5.
+        max_length_difference   5
+    }
 
-    presimp 
+    ; hidden ec remover
+    her
     {
-        enabled false
+        enabled                     true
+        uniqueness_length           1500
+        unreliability_threshold     0.2
+        relative_threshold          5     
     }
 
-    stats_mode 0
-}
+    init_clean
+    {
+        early_it_only   true
 
+        activation_cov  -1.
+        ier
+        {
+            enabled                     false
+        }
 
+        tip_condition   ""
+        ec_condition    ""
+    }
+}
 
 careful
 {
-	; bulge remover:
-	br
-	{
-		enabled				true
-		max_bulge_length_coefficient	3.		; max_bulge_length = max_bulge_length_coefficient * k
-       	max_additive_length_coefficient 100
-		max_coverage			1000000.0
-		max_relative_coverage		1.5		; bulge_cov < this * not_bulge_cov
-		max_delta			3
-		max_relative_delta		0.1
-	}
-	
-	; complex bulge remover
-	cbr
-	{
-		enabled	false
-	}
+    ; bulge remover:
+    br
+    {
+        max_coverage            1000000.0
+        max_relative_coverage       1.5     ; bulge_cov < this * not_bulge_cov
+        parallel false
+    }
+    
+    ; complex bulge remover
+    cbr
+    {
+        enabled false
+    }
 
-	; relative coverage erroneous component remover:
-	rcc
-	{
+    ; relative coverage erroneous component remover:
+    rcc
+    {
         enabled false
-	}
+    }
+
+    init_clean
+    {
+        early_it_only   true
 
-    fast_features false
+        activation_cov  -1.
+        ier
+        {
+            enabled                     false
+        }
+
+        tip_condition   ""
+        ec_condition    ""
+    }
 }
 
 diploid_simp
@@ -380,12 +436,6 @@ diploid_simp
 	br
 	{
 		enabled				false
-		max_bulge_length_coefficient	3.		; max_bulge_length = max_bulge_length_coefficient * k
-        	max_additive_length_coefficient 100
-		max_coverage			1000000.0
-		max_relative_coverage		1.5		; bulge_cov < this * not_bulge_cov
-		max_delta			3
-		max_relative_delta		0.1
 	}
 }
 
@@ -396,81 +446,112 @@ meta
     ; enable advanced ec removal algo
     topology_simplif_enabled false
 
-	; erroneous connections remover:
-	ec
-	{
+    ; erroneous connections remover:
+    ec
+    {
        ; ec_lb: max_ec_length = k + ec_lb
        ; icb: iterative coverage bound
        ; condition               "{ ec_lb 30, icb 20.0 }"
        condition               "{ ec_lb 30, icb 2.5 }"
-	}
-	
-	; relative coverage erroneous component remover:
-	rcc
-	{
+    }
+    
+    ; tip clipper:
+    tc
+    {
+        ; rctc: tip_cov < rctc * not_tip_cov
+        ; tc_lb: max_tip_length = max((min(k, read_length / 2) * tc_lb), read_length);
+        condition               "{ tc_lb 3.5, cb 10000 } { tc_lb 6., cb 2.5 }"
+    }
+
+    ; relative coverage erroneous component remover:
+    rcc
+    {
         enabled true
         coverage_gap    5.
-        max_length_coeff    2.0
+        max_length_coeff    3.0
         max_length_with_tips_coeff   5.0
         max_vertex_cnt      100
         max_ec_length_coefficient   300
         max_coverage_coeff  -1.0
 	}
 
+	; relative edge disconnector:
+	relative_ed
+	{
+        enabled true
+        diff_mult  10.
+	}
+
 	; bulge remover:
 	br
 	{
-		enabled				true
-		max_bulge_length_coefficient	3.		; max_bulge_length = max_bulge_length_coefficient * k
-       	max_additive_length_coefficient 100
 		max_coverage			1000000.0
 		max_relative_coverage		100000.		; bulge_cov < this * not_bulge_cov
 		max_delta			10
 		max_relative_delta		0.1
+        parallel true
 	}
 
-    fast_features true
+    ; final tip clipper:
+    final_tc
+    {
+        ; rctc: tip_cov < rctc * not_tip_cov
+        ; tc_lb: max_tip_length = max((min(k, read_length / 2) * tc_lb), read_length);
+        condition               "{ lb 500, cb 3., rctc 1.0 } { lb 1500, cb 20., rctc 0.2 }"
+    }
 
-    presimp 
+    ; final bulge remover:
+    final_br
     {
-       enabled true
-       parallel false
-       tip_condition   "{ tc_lb 3.5, cb 100 }"
-       ec_condition    "{ ec_lb 10, cb 1.5 }"
+        enabled true
+        main_iteration_only true
+        max_bulge_length_coefficient    50.     ; max_bulge_length = max_bulge_length_coefficient * k
+        max_coverage			1000000.0
+        max_relative_coverage       0.5     ; bulge_cov < this * not_bulge_cov
+        max_delta           50
+        max_relative_delta		0.1
     }
 
-	; final tip clipper:
-	final_tc
-	{
-        ; rctc: tip_cov < rctc * not_tip_cov
-        ; tc_lb: max_tip_length = max((min(k, read_length / 2) * tc_lb), read_length);
-        condition               "{ tc_lb 30, cb 20, rctc 0.5 }"
-	}
+    ; second final bulge remover:
+    ; only in meta mode, inherits settings of final_br
+    second_final_br
+    {
+        max_delta           1500
+        max_number_edges        3
+    }
 
-	; final bulge remover:
-	final_br
-	{
-		enabled				true
-		max_bulge_length_coefficient	50.		; max_bulge_length = max_bulge_length_coefficient * k
-       	max_additive_length_coefficient 100
-		max_coverage			1000000.0
-		max_relative_coverage		0.5		; bulge_cov < this * not_bulge_cov
-		max_delta			1500
-		max_relative_delta		0.1
-	}
+    init_clean
+    {
+       early_it_only   true
+       ier {
+           enabled true
+       }
+       tip_condition   "{ tc_lb 3.5, cb 2.0 }"
+       ec_condition    "{ ec_lb 10, cb 0.5 }"
+    }
 
 }
 
 preliminary
 {
+    init_clean
+    {
+        self_conj_condition "{ ec_lb 100, cb 20.0 }"
+        early_it_only   false
+        ier
+        {
+            enabled true
+        }
+        tip_condition   "{ lb 20, cb 1.1, mmm 2 }"
+        ec_condition    "{ ec_lb 0, cb 0.9 }"
+        disconnect_flank_cov    0.9
+    }
+
     post_simplif_enabled false
 
 	; bulge remover:
 	br
 	{
-		enabled				true
-		max_bulge_length_coefficient	3.		; max_bulge_length = max_bulge_length_coefficient * k
-       	max_additive_length_coefficient 100
 		max_coverage			1000000.0
 		max_relative_coverage		0.5		; bulge_cov < this * not_bulge_cov
 		max_delta			10
diff --git a/configs/debruijn/tsa.info b/configs/debruijn/tsa.info
new file mode 100644
index 0000000..c948068
--- /dev/null
+++ b/configs/debruijn/tsa.info
@@ -0,0 +1,5 @@
+tsa
+{
+    scaffolds_file /home/anton/gitrep/algorithmic-biology/assembler/BC087/K55/scaffolds.fasta
+    genome_file genome.fasta
+}
diff --git a/configs/dipspades/config.info b/configs/dipspades/config.info
new file mode 100644
index 0000000..773fdaa
--- /dev/null
+++ b/configs/dipspades/config.info
@@ -0,0 +1,64 @@
+; base parameters ;
+bp {
+	K			        55;
+	use_multithreading	true;
+	max_threads		    16;
+	max_memory		    512;
+    ; size of buffer for each thread in MB, 0 for autodetection
+    read_buffer_size 0
+}
+
+; input-output params ;
+io {
+	haplocontigs	haplocontigs
+	log_filename 	log.properties
+	output_base		data/debruijn
+	output_dir		simulated_e.coli_100k
+	tmp_dir			dipspades_tmp/
+	saves   		data/debruijn
+}
+
+; run params ;
+rp {
+	entry_point	    dipspades
+    : entry_point   dipspades:heterozygosity_estimation
+    ; entry_point   dipspades:contig_graph_construction
+    ; entry_point   dipspades:polymorphic_br
+    ; entry_point   dipspades:dipspades
+    ; entry_point   dipspades:consensus_construction
+    ; entry_point   dipspades:haplotype_assembly
+	developer_mode	true
+}
+
+; polymorphic bulge remover config
+pbr {
+	enabled                         true
+	rel_bulge_length                .8      ; min(len1, len2) / max(len1, len2) >= rel_bulge_length
+                                                ; where len1, len2 - lengths of bulge sides
+	rel_bulge_align                 .5      ; editdist(seq1, seq2) / min(|seq1|, |seq2|) <= rel_bulge_align
+                                                ; where seq1, seq2 - sequences of bulge sides
+	paired_vert_abs_threshold       50      ;
+	paired_vert_rel_threshold       .15     ;
+	max_bulge_nucls_len             25000   ; maximal length (in nt number) of bulge sides
+	max_neigh_number                100     ; maximal number of neighs for bulge search
+	num_iters_lbr                   15      ; number of light bulge remover iterations
+}
+
+; consensus constructor config
+cc {
+	    enabled                         true
+        bulge_len_quantile              .95                             ; value of this quantile of bulge length histogram
+                                                                        ; is upper bound of bulge length in contigs
+        tails_lie_on_bulges             true                            ; tail has to lie on bulge side
+		estimate_tails					true
+        align_bulge_sides               true                            ; check bulge into paired haplocontigs for alignment
+        min_overlap_size                1500                            ; minimal allowable length of overlap (in nt)
+        min_lcs_size                    1500                            ; minimal allowable length of shared subsequence of
+                                                                        ; paired contigs (in nt)
+        max_loop_length                 500                             ; maximal length of loop that can ignored in remover red contigs
+}
+
+; haplotype_assembly
+ha {
+	ha_enabled                         true
+}
diff --git a/configs/debruijn/log.properties.template b/configs/dipspades/log.properties
similarity index 62%
copy from configs/debruijn/log.properties.template
copy to configs/dipspades/log.properties
index efd96f1..68df20a 100644
--- a/configs/debruijn/log.properties.template
+++ b/configs/dipspades/log.properties
@@ -1,19 +1,21 @@
 default=INFO
 
-#RelativeCoverageHelper=TRACE
-#RelativelyLowCoveredComponentSearcher=TRACE
-#RelativelyLowCoveredComponentChecker=TRACE
-#RelativeCoverageComponentRemover=TRACE
-#FlankingCoverage=TRACE
 #PolymorphicBulgeRemover=TRACE
 #BulgeSplitter=TRACE
 #SubpathSplitter=TRACE
 #ComplexBulgeGluer=TRACE
 #GluingVericesDefiner=TRACE
 #GluingVericesDefinerResults=TRACE
+#ConsensusContigsConstructor=TRACE
+#CloseGapsCorrector=TRACE
+#LoopBulgeDeletionCorrector=TRACE
+#CompositeMappingContig=TRACE
+#DiploidContigSeparator=TRACE
+#ContigLabelAllocator=TRACE
+#OverlappedContigsMap=TRACE
+#OverlapCorrector=TRACE
+#EqualSequencesGluer=TRACE
 
-#TwoStepAlgorithmRunner=TRACE
-#AlgorithmRunner=TRACE
 #DeBruijnGraphConstructor=TRACE
 #PairedHandlerApplier=TRACE
 #QualityEdgeLocalityPrintingRH=TRACE
@@ -32,13 +34,3 @@ default=INFO
 #PathSetGraphConstructor=TRACE
 #NewExtendedSequenceMapper=TRACE
 #JumpingPairInfoChecker=TRACE
-
-#PathExtender=DEBUG
-#BidirectionalPath=DEBUG
-#NextPathSearcher=DEBUG
-#ExtensionChooser=DEBUG
-#WeightCounter=DEBUG
-#PathExtendIO=DEBUG
-#PathExtendPI=DEBUG
-#LoopTraverser=DEBUG
-#PEResolver=DEBUG
diff --git a/configs/hammer/config.info.template b/configs/hammer/config.info
similarity index 90%
copy from configs/hammer/config.info.template
copy to configs/hammer/config.info
index 5150bda..a7d3ffa 100644
--- a/configs/hammer/config.info.template
+++ b/configs/hammer/config.info
@@ -20,6 +20,7 @@ count_do				1
 count_numfiles				16
 count_merge_nthreads			16
 count_split_buffer			0
+count_filter_singletons                 0
 
 ; hamming graph clustering
 hamming_do				1
@@ -36,7 +37,7 @@ bayes_debug_output			0
 bayes_hammer_mode			0
 bayes_write_solid_kmers			0
 bayes_write_bad_kmers			0
-bayes_initial_refine                    0
+bayes_initial_refine                    1
 
 ; iterative expansion step
 expand_do				1
@@ -52,3 +53,4 @@ correct_use_threshold			1
 correct_threshold			0.98
 correct_nthreads			4
 correct_readbuffer			100000
+correct_stats                           1
diff --git a/configs/hammer/config.info.template b/configs/hammer/config.info.template
index 5150bda..a7d3ffa 100644
--- a/configs/hammer/config.info.template
+++ b/configs/hammer/config.info.template
@@ -20,6 +20,7 @@ count_do				1
 count_numfiles				16
 count_merge_nthreads			16
 count_split_buffer			0
+count_filter_singletons                 0
 
 ; hamming graph clustering
 hamming_do				1
@@ -36,7 +37,7 @@ bayes_debug_output			0
 bayes_hammer_mode			0
 bayes_write_solid_kmers			0
 bayes_write_bad_kmers			0
-bayes_initial_refine                    0
+bayes_initial_refine                    1
 
 ; iterative expansion step
 expand_do				1
@@ -52,3 +53,4 @@ correct_use_threshold			1
 correct_threshold			0.98
 correct_nthreads			4
 correct_readbuffer			100000
+correct_stats                           1
diff --git a/configs/ionhammer/ionhammer.cfg b/configs/ionhammer/ionhammer.cfg
new file mode 100644
index 0000000..6daf8ef
--- /dev/null
+++ b/configs/ionhammer/ionhammer.cfg
@@ -0,0 +1,12 @@
+dataset           : dataset.cfg
+working_dir       : ./test_dataset/input/corrected/tmp
+output_dir        : ./test_dataset/input/corrected
+hard_memory_limit : 250
+max_nthreads      : 16
+kmer_qual_threshold   : 1e-24
+center_qual_threshold   : 1e-24
+delta_score_threshold   : 10.0
+keep_uncorrected_ends : true
+tau               : 1
+debug_mode        : false
+start_stage       : count
diff --git a/dipspades.py b/dipspades.py
index a4ba848..f12fa7f 100755
--- a/dipspades.py
+++ b/dipspades.py
@@ -17,6 +17,9 @@ import support
 import options_storage
 import dipspades_logic
 import spades_init
+spades_init.init()
+spades_version = spades_init.spades_version
+
 
 def main():
     all_long_options = list(set(options_storage.long_options + dipspades_logic.DS_Args_List.long_options))
@@ -30,11 +33,11 @@ def main():
     except getopt.GetoptError:
         _, exc, _ = sys.exc_info()
         sys.stderr.write(str(exc) + "\n")
-        options_storage.usage("", dipspades=True)
+        options_storage.usage(spades_version, dipspades=True)
         sys.stderr.flush()
         sys.exit(1)
     if not options:
-        options_storage.usage("", dipspades=True)
+        options_storage.usage(spades_version, dipspades=True)
         sys.stderr.flush()
         sys.exit(1)
 
@@ -53,11 +56,14 @@ def main():
             output_dir = abspath(expanduser(arg))
         elif opt == '--careful' or opt == '--mismatch-correction':
             continue
+        if opt == '-v' or opt == '--version':
+            options_storage.version(spades_version, mode="dipSPAdes")
+            sys.exit(0)
         if opt == '-h' or opt == '--help':
-            options_storage.usage("", dipspades=True)
+            options_storage.usage(spades_version, dipspades=True)
             sys.exit(0)
         elif opt == "--help-hidden":
-            options_storage.usage("", show_hidden=True, dipspades=True)
+            options_storage.usage(spades_version, show_hidden=True, dipspades=True)
             sys.exit(0)
         # for all other options
         cur_opt_arg = [opt]
diff --git a/dipspades_manual.html b/dipspades_manual.html
index add4ea5..ff3b7dd 100644
--- a/dipspades_manual.html
+++ b/dipspades_manual.html
@@ -109,6 +109,12 @@ Note that we assume that SPAdes installation directory is added to the <code>PAT
 <p>
     <code>-h</code> (or <code>--help</code>)<br>
         Prints help.
+</p>
+
+<p>
+    <code>-v</code> (or <code>--version</code>)<br>
+        Prints version.
+</p>
     
 <a id = "input_data"></a>
 <h4>3.2.2 Input data</h4>
diff --git a/ext/include/city/city.h b/ext/include/city/city.h
new file mode 100644
index 0000000..f8c733f
--- /dev/null
+++ b/ext/include/city/city.h
@@ -0,0 +1,109 @@
+// Copyright (c) 2011 Google, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+// CityHash, by Geoff Pike and Jyrki Alakuijala
+//
+// http://code.google.com/p/cityhash/
+//
+// This file provides a few functions for hashing strings.  All of them are
+// high-quality functions in the sense that they pass standard tests such
+// as Austin Appleby's SMHasher.  They are also fast.
+//
+// For 64-bit x86 code, on short strings, we don't know of anything faster than
+// CityHash64 that is of comparable quality.  We believe our nearest competitor
+// is Murmur3.  For 64-bit x86 code, CityHash64 is an excellent choice for hash
+// tables and most other hashing (excluding cryptography).
+//
+// For 64-bit x86 code, on long strings, the picture is more complicated.
+// On many recent Intel CPUs, such as Nehalem, Westmere, Sandy Bridge, etc.,
+// CityHashCrc128 appears to be faster than all competitors of comparable
+// quality.  CityHash128 is also good but not quite as fast.  We believe our
+// nearest competitor is Bob Jenkins' Spooky.  We don't have great data for
+// other 64-bit CPUs, but for long strings we know that Spooky is slightly
+// faster than CityHash on some relatively recent AMD x86-64 CPUs, for example.
+// Note that CityHashCrc128 is declared in citycrc.h.
+//
+// For 32-bit x86 code, we don't know of anything faster than CityHash32 that
+// is of comparable quality.  We believe our nearest competitor is Murmur3A.
+// (On 64-bit CPUs, it is typically faster to use the other CityHash variants.)
+//
+// Functions in the CityHash family are not suitable for cryptography.
+//
+// Please see CityHash's README file for more details on our performance
+// measurements and so on.
+//
+// WARNING: This code has been only lightly tested on big-endian platforms!
+// It is known to work well on little-endian platforms that have a small penalty
+// for unaligned reads, such as current Intel and AMD moderate-to-high-end CPUs.
+// It should work on all 32-bit and 64-bit platforms that allow unaligned reads;
+// bug reports are welcome.
+//
+// By the way, for some hash functions, given strings a and b, the hash
+// of a+b is easily derived from the hashes of a and b.  This property
+// doesn't hold for any hash functions in this file.
+
+#ifndef CITY_HASH_H_
+#define CITY_HASH_H_
+
+#include <stdlib.h>  // for size_t.
+#include <stdint.h>
+#include <utility>
+
+typedef std::pair<uint64_t, uint64_t> city_uint128;
+
+inline uint64_t Uint128Low64(const city_uint128& x) { return x.first; }
+inline uint64_t Uint128High64(const city_uint128& x) { return x.second; }
+
+// Hash function for a byte array.
+uint64_t CityHash64(const char *buf, size_t len);
+
+// Hash function for a byte array.  For convenience, a 64-bit seed is also
+// hashed into the result.
+uint64_t CityHash64WithSeed(const char *buf, size_t len, uint64_t seed);
+
+// Hash function for a byte array.  For convenience, two seeds are also
+// hashed into the result.
+uint64_t CityHash64WithSeeds(const char *buf, size_t len,
+                             uint64_t seed0, uint64_t seed1);
+
+// Hash function for a byte array.
+city_uint128 CityHash128(const char *s, size_t len);
+
+// Hash function for a byte array.  For convenience, a 128-bit seed is also
+// hashed into the result.
+city_uint128 CityHash128WithSeed(const char *s, size_t len, city_uint128 seed);
+
+// Hash function for a byte array.  Most useful in 32-bit binaries.
+uint32_t CityHash32(const char *buf, size_t len);
+
+// Hash 128 input bits down to 64 bits of output.
+// This is intended to be a reasonably good hash function.
+inline uint64_t Hash128to64(const city_uint128& x) {
+  // Murmur-inspired hashing.
+  const uint64_t kMul = 0x9ddfea08eb382d69ULL;
+  uint64_t a = (Uint128Low64(x) ^ Uint128High64(x)) * kMul;
+  a ^= (a >> 47);
+  uint64_t b = (Uint128High64(x) ^ a) * kMul;
+  b ^= (b >> 47);
+  b *= kMul;
+  return b;
+}
+
+#endif  // CITY_HASH_H_
diff --git a/ext/include/city/citycrc.h b/ext/include/city/citycrc.h
new file mode 100644
index 0000000..318e391
--- /dev/null
+++ b/ext/include/city/citycrc.h
@@ -0,0 +1,43 @@
+// Copyright (c) 2011 Google, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+// CityHash, by Geoff Pike and Jyrki Alakuijala
+//
+// This file declares the subset of the CityHash functions that require
+// _mm_crc32_u64().  See the CityHash README for details.
+//
+// Functions in the CityHash family are not suitable for cryptography.
+
+#ifndef CITY_HASH_CRC_H_
+#define CITY_HASH_CRC_H_
+
+#include <city.h>
+
+// Hash function for a byte array.
+uint128 CityHashCrc128(const char *s, size_t len);
+
+// Hash function for a byte array.  For convenience, a 128-bit seed is also
+// hashed into the result.
+uint128 CityHashCrc128WithSeed(const char *s, size_t len, uint128 seed);
+
+// Hash function for a byte array.  Sets result[0] ... result[3].
+void CityHashCrc256(const char *s, size_t len, uint64 *result);
+
+#endif  // CITY_HASH_CRC_H_
diff --git a/ext/include/llvm/AlignOf.h b/ext/include/llvm/AlignOf.h
new file mode 100644
index 0000000..ccbe432
--- /dev/null
+++ b/ext/include/llvm/AlignOf.h
@@ -0,0 +1,189 @@
+//===--- AlignOf.h - Portable calculation of type alignment -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the AlignOf function that computes alignments for
+// arbitrary types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ALIGNOF_H
+#define LLVM_SUPPORT_ALIGNOF_H
+
+#include <cstddef>
+#include <cstdint>
+#include <type_traits>
+
+namespace llvm {
+
+namespace detail {
+
+// For everything other than an abstract class we can calulate alignment by
+// building a class with a single character and a member of the given type.
+template <typename T, bool = std::is_abstract<T>::value>
+struct AlignmentCalcImpl {
+  char x;
+  T t;
+private:
+  AlignmentCalcImpl() {} // Never instantiate.
+};
+
+// Abstract base class helper, this will have the minimal alignment and size
+// for any abstract class. We don't even define its destructor because this
+// type should never be used in a way that requires it.
+struct AlignmentCalcImplBase {
+  virtual ~AlignmentCalcImplBase() = 0;
+};
+
+// When we have an abstract class type, specialize the alignment computation
+// engine to create another abstract class that derives from both an empty
+// abstract base class and the provided type. This has the same effect as the
+// above except that it handles the fact that we can't actually create a member
+// of type T.
+template <typename T>
+struct AlignmentCalcImpl<T, true> : AlignmentCalcImplBase, T {
+  virtual ~AlignmentCalcImpl() = 0;
+};
+
+} // End detail namespace.
+
+/// AlignOf - A templated class that contains an enum value representing
+///  the alignment of the template argument.  For example,
+///  AlignOf<int>::Alignment represents the alignment of type "int".  The
+///  alignment calculated is the minimum alignment, and not necessarily
+///  the "desired" alignment returned by GCC's __alignof__ (for example).  Note
+///  that because the alignment is an enum value, it can be used as a
+///  compile-time constant (e.g., for template instantiation).
+template <typename T>
+struct AlignOf {
+  // Avoid warnings from GCC like:
+  //   comparison between 'enum llvm::AlignOf<X>::<anonymous>' and 'enum
+  //   llvm::AlignOf<Y>::<anonymous>' [-Wenum-compare]
+  // by using constexpr instead of enum.
+  // (except on MSVC, since it doesn't support constexpr yet).
+  static constexpr unsigned Alignment = static_cast<unsigned int>(
+      sizeof(detail::AlignmentCalcImpl<T>) - sizeof(T));
+  enum { Alignment_GreaterEqual_2Bytes = Alignment >= 2 ? 1 : 0 };
+  enum { Alignment_GreaterEqual_4Bytes = Alignment >= 4 ? 1 : 0 };
+  enum { Alignment_GreaterEqual_8Bytes = Alignment >= 8 ? 1 : 0 };
+  enum { Alignment_GreaterEqual_16Bytes = Alignment >= 16 ? 1 : 0 };
+
+  enum { Alignment_LessEqual_2Bytes = Alignment <= 2 ? 1 : 0 };
+  enum { Alignment_LessEqual_4Bytes = Alignment <= 4 ? 1 : 0 };
+  enum { Alignment_LessEqual_8Bytes = Alignment <= 8 ? 1 : 0 };
+  enum { Alignment_LessEqual_16Bytes = Alignment <= 16 ? 1 : 0 };
+};
+
+template <typename T> constexpr unsigned AlignOf<T>::Alignment;
+
+/// alignOf - A templated function that returns the minimum alignment of
+///  of a type.  This provides no extra functionality beyond the AlignOf
+///  class besides some cosmetic cleanliness.  Example usage:
+///  alignOf<int>() returns the alignment of an int.
+template <typename T>
+inline unsigned alignOf() { return AlignOf<T>::Alignment; }
+
+/// \struct AlignedCharArray
+/// \brief Helper for building an aligned character array type.
+///
+/// This template is used to explicitly build up a collection of aligned
+/// character array types. We have to build these up using a macro and explicit
+/// specialization to cope with old versions of MSVC and GCC where only an
+/// integer literal can be used to specify an alignment constraint. Once built
+/// up here, we can then begin to indirect between these using normal C++
+/// template parameters.
+
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
+#ifndef __has_extension
+# define __has_extension(x) 0
+#endif
+
+#ifndef __has_attribute
+# define __has_attribute(x) 0
+#endif
+
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+#if __has_feature(cxx_alignas)
+template<std::size_t Alignment, std::size_t Size>
+struct AlignedCharArray {
+  alignas(Alignment) char buffer[Size];
+};
+
+#elif defined(__GNUC__) || defined(__IBM_ATTRIBUTES)
+/// \brief Create a type with an aligned char buffer.
+template<std::size_t Alignment, std::size_t Size>
+struct AlignedCharArray;
+
+#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
+  template<std::size_t Size> \
+  struct AlignedCharArray<x, Size> { \
+    __attribute__((aligned(x))) char buffer[Size]; \
+  };
+
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(1)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(2)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(4)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(8)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128)
+
+#undef LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT
+
+#else
+# error No supported align as directive.
+#endif
+
+namespace detail {
+template <typename T1,
+          typename T2 = char, typename T3 = char, typename T4 = char,
+          typename T5 = char, typename T6 = char, typename T7 = char,
+          typename T8 = char, typename T9 = char, typename T10 = char>
+class AlignerImpl {
+  T1 t1; T2 t2; T3 t3; T4 t4; T5 t5; T6 t6; T7 t7; T8 t8; T9 t9; T10 t10;
+
+  AlignerImpl(); // Never defined or instantiated.
+};
+
+template <typename T1,
+          typename T2 = char, typename T3 = char, typename T4 = char,
+          typename T5 = char, typename T6 = char, typename T7 = char,
+          typename T8 = char, typename T9 = char, typename T10 = char>
+union SizerImpl {
+  char arr1[sizeof(T1)], arr2[sizeof(T2)], arr3[sizeof(T3)], arr4[sizeof(T4)],
+       arr5[sizeof(T5)], arr6[sizeof(T6)], arr7[sizeof(T7)], arr8[sizeof(T8)],
+       arr9[sizeof(T9)], arr10[sizeof(T10)];
+};
+} // end namespace detail
+
+/// \brief This union template exposes a suitably aligned and sized character
+/// array member which can hold elements of any of up to ten types.
+///
+/// These types may be arrays, structs, or any other types. The goal is to
+/// expose a char array buffer member which can be used as suitable storage for
+/// a placement new of any of these types. Support for more than ten types can
+/// be added at the cost of more boilerplate.
+template <typename T1,
+          typename T2 = char, typename T3 = char, typename T4 = char,
+          typename T5 = char, typename T6 = char, typename T7 = char,
+          typename T8 = char, typename T9 = char, typename T10 = char>
+struct AlignedCharArrayUnion : llvm::AlignedCharArray<
+    AlignOf<detail::AlignerImpl<T1, T2, T3, T4, T5,
+                                T6, T7, T8, T9, T10> >::Alignment,
+    sizeof(detail::SizerImpl<T1, T2, T3, T4, T5,
+                             T6, T7, T8, T9, T10>)> {
+};
+} // end namespace llvm
+#endif
diff --git a/ext/include/llvm/PointerEmbeddedInt.h b/ext/include/llvm/PointerEmbeddedInt.h
new file mode 100644
index 0000000..5bcce54
--- /dev/null
+++ b/ext/include/llvm/PointerEmbeddedInt.h
@@ -0,0 +1,87 @@
+//===- llvm/ADT/PointerEmbeddedInt.h ----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POINTEREMBEDDEDINT_H
+#define LLVM_ADT_POINTEREMBEDDEDINT_H
+
+#include "PointerLikeTypeTraits.h"
+#include <climits>
+
+namespace llvm {
+
+/// Utility to embed an integer into a pointer-like type. This is specifically
+/// intended to allow embedding integers where fewer bits are required than
+/// exist in a pointer, and the integer can participate in abstractions along
+/// side other pointer-like types. For example it can be placed into a \c
+/// PointerSumType or \c PointerUnion.
+///
+/// Note that much like pointers, an integer value of zero has special utility
+/// due to boolean conversions. For example, a non-null value can be tested for
+/// in the above abstractions without testing the particular active member.
+/// Also, the default constructed value zero initializes the integer.
+template <typename IntT, int Bits = sizeof(IntT) * CHAR_BIT>
+class PointerEmbeddedInt {
+  uintptr_t Value;
+
+  static_assert(Bits < sizeof(uintptr_t) * CHAR_BIT,
+                "Cannot embed more bits than we have in a pointer!");
+
+  enum : uintptr_t {
+    // We shift as many zeros into the value as we can while preserving the
+    // number of bits desired for the integer.
+    Shift = sizeof(uintptr_t) * CHAR_BIT - Bits,
+
+    // We also want to be able to mask out the preserved bits for asserts.
+    Mask = static_cast<uintptr_t>(-1) << Bits
+  };
+
+  friend class PointerLikeTypeTraits<PointerEmbeddedInt>;
+
+  explicit PointerEmbeddedInt(uintptr_t Value) : Value(Value) {}
+
+public:
+  PointerEmbeddedInt() : Value(0) {}
+
+  PointerEmbeddedInt(IntT I) : Value(static_cast<uintptr_t>(I) << Shift) {
+    assert((I & Mask) == 0 && "Integer has bits outside those preserved!");
+  }
+
+  PointerEmbeddedInt &operator=(IntT I) {
+    assert((I & Mask) == 0 && "Integer has bits outside those preserved!");
+    Value = static_cast<uintptr_t>(I) << Shift;
+  }
+
+  // Note that this imilict conversion additionally allows all of the basic
+  // comparison operators to work transparently, etc.
+  operator IntT() const { return static_cast<IntT>(Value >> Shift); }
+};
+
+// Provide pointer like traits to support use with pointer unions and sum
+// types.
+template <typename IntT, int Bits>
+class PointerLikeTypeTraits<PointerEmbeddedInt<IntT, Bits>> {
+  typedef PointerEmbeddedInt<IntT, Bits> T;
+
+public:
+  static inline void *getAsVoidPointer(const T &P) {
+    return reinterpret_cast<void *>(P.Value);
+  }
+  static inline T getFromVoidPointer(void *P) {
+    return T(reinterpret_cast<uintptr_t>(P));
+  }
+  static inline T getFromVoidPointer(const void *P) {
+    return T(reinterpret_cast<uintptr_t>(P));
+  }
+
+  enum { NumLowBitsAvailable = T::Shift };
+};
+
+}
+
+#endif
diff --git a/ext/include/llvm/PointerIntPair.h b/ext/include/llvm/PointerIntPair.h
new file mode 100644
index 0000000..e9d3cbe
--- /dev/null
+++ b/ext/include/llvm/PointerIntPair.h
@@ -0,0 +1,192 @@
+//===- llvm/ADT/PointerIntPair.h - Pair for pointer and int -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PointerIntPair class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POINTERINTPAIR_H
+#define LLVM_ADT_POINTERINTPAIR_H
+
+#include "PointerLikeTypeTraits.h"
+#include <cstdlib>
+#include <cstdint>
+#include <cassert>
+#include <limits>
+
+namespace llvm {
+
+template <typename T> struct DenseMapInfo;
+
+template <typename PointerT, unsigned IntBits, typename PtrTraits>
+struct PointerIntPairInfo;
+
+/// PointerIntPair - This class implements a pair of a pointer and small
+/// integer.  It is designed to represent this in the space required by one
+/// pointer by bitmangling the integer into the low part of the pointer.  This
+/// can only be done for small integers: typically up to 3 bits, but it depends
+/// on the number of bits available according to PointerLikeTypeTraits for the
+/// type.
+///
+/// Note that PointerIntPair always puts the IntVal part in the highest bits
+/// possible.  For example, PointerIntPair<void*, 1, bool> will put the bit for
+/// the bool into bit #2, not bit #0, which allows the low two bits to be used
+/// for something else.  For example, this allows:
+///   PointerIntPair<PointerIntPair<void*, 1, bool>, 1, bool>
+/// ... and the two bools will land in different bits.
+///
+template <typename PointerTy, unsigned IntBits, typename IntType = unsigned,
+          typename PtrTraits = PointerLikeTypeTraits<PointerTy>,
+          typename Info = PointerIntPairInfo<PointerTy, IntBits, PtrTraits>>
+class PointerIntPair {
+  intptr_t Value;
+
+public:
+  PointerIntPair() : Value(0) {}
+  PointerIntPair(PointerTy PtrVal, IntType IntVal) {
+    setPointerAndInt(PtrVal, IntVal);
+  }
+  explicit PointerIntPair(PointerTy PtrVal) { initWithPointer(PtrVal); }
+
+  PointerTy getPointer() const { return Info::getPointer(Value); }
+
+  IntType getInt() const { return (IntType)Info::getInt(Value); }
+
+  void setPointer(PointerTy PtrVal) {
+    Value = Info::updatePointer(Value, PtrVal);
+  }
+
+  void setInt(IntType IntVal) { Value = Info::updateInt(Value, IntVal); }
+
+  void initWithPointer(PointerTy PtrVal) {
+    Value = Info::updatePointer(0, PtrVal);
+  }
+
+  void setPointerAndInt(PointerTy PtrVal, IntType IntVal) {
+    Value = Info::updateInt(Info::updatePointer(0, PtrVal), IntVal);
+  }
+
+  PointerTy const *getAddrOfPointer() const {
+    return const_cast<PointerIntPair *>(this)->getAddrOfPointer();
+  }
+
+  PointerTy *getAddrOfPointer() {
+    assert(Value == reinterpret_cast<intptr_t>(getPointer()) &&
+           "Can only return the address if IntBits is cleared and "
+           "PtrTraits doesn't change the pointer");
+    return reinterpret_cast<PointerTy *>(&Value);
+  }
+
+  void *getOpaqueValue() const { return reinterpret_cast<void *>(Value); }
+  void setFromOpaqueValue(void *Val) {
+    Value = reinterpret_cast<intptr_t>(Val);
+  }
+
+  static PointerIntPair getFromOpaqueValue(void *V) {
+    PointerIntPair P;
+    P.setFromOpaqueValue(V);
+    return P;
+  }
+
+  // Allow PointerIntPairs to be created from const void * if and only if the
+  // pointer type could be created from a const void *.
+  static PointerIntPair getFromOpaqueValue(const void *V) {
+    (void)PtrTraits::getFromVoidPointer(V);
+    return getFromOpaqueValue(const_cast<void *>(V));
+  }
+
+  bool operator==(const PointerIntPair &RHS) const {
+    return Value == RHS.Value;
+  }
+  bool operator!=(const PointerIntPair &RHS) const {
+    return Value != RHS.Value;
+  }
+  bool operator<(const PointerIntPair &RHS) const { return Value < RHS.Value; }
+  bool operator>(const PointerIntPair &RHS) const { return Value > RHS.Value; }
+  bool operator<=(const PointerIntPair &RHS) const {
+    return Value <= RHS.Value;
+  }
+  bool operator>=(const PointerIntPair &RHS) const {
+    return Value >= RHS.Value;
+  }
+};
+
+template <typename PointerT, unsigned IntBits, typename PtrTraits>
+struct PointerIntPairInfo {
+  static_assert(PtrTraits::NumLowBitsAvailable <
+                    std::numeric_limits<uintptr_t>::digits,
+                "cannot use a pointer type that has all bits free");
+  static_assert(IntBits <= PtrTraits::NumLowBitsAvailable,
+                "PointerIntPair with integer size too large for pointer");
+  enum : uintptr_t {
+    /// PointerBitMask - The bits that come from the pointer.
+    PointerBitMask =
+        ~(uintptr_t)(((intptr_t)1 << PtrTraits::NumLowBitsAvailable) - 1),
+
+    /// IntShift - The number of low bits that we reserve for other uses, and
+    /// keep zero.
+    IntShift = (uintptr_t)PtrTraits::NumLowBitsAvailable - IntBits,
+
+    /// IntMask - This is the unshifted mask for valid bits of the int type.
+    IntMask = (uintptr_t)(((intptr_t)1 << IntBits) - 1),
+
+    // ShiftedIntMask - This is the bits for the integer shifted in place.
+    ShiftedIntMask = (uintptr_t)(IntMask << IntShift)
+  };
+
+  static PointerT getPointer(intptr_t Value) {
+    return PtrTraits::getFromVoidPointer(
+        reinterpret_cast<void *>(Value & PointerBitMask));
+  }
+
+  static intptr_t getInt(intptr_t Value) {
+    return (Value >> IntShift) & IntMask;
+  }
+
+  static intptr_t updatePointer(intptr_t OrigValue, PointerT Ptr) {
+    intptr_t PtrWord =
+        reinterpret_cast<intptr_t>(PtrTraits::getAsVoidPointer(Ptr));
+    assert((PtrWord & ~PointerBitMask) == 0 &&
+           "Pointer is not sufficiently aligned");
+    // Preserve all low bits, just update the pointer.
+    return PtrWord | (OrigValue & ~PointerBitMask);
+  }
+
+  static intptr_t updateInt(intptr_t OrigValue, intptr_t Int) {
+    intptr_t IntWord = static_cast<intptr_t>(Int);
+    assert((IntWord & ~IntMask) == 0 && "Integer too large for field");
+
+    // Preserve all bits other than the ones we are updating.
+    return (OrigValue & ~ShiftedIntMask) | IntWord << IntShift;
+  }
+};
+
+template <typename PointerTy, unsigned IntBits, typename IntType,
+          typename PtrTraits, typename Info>
+class PointerLikeTypeTraits<PointerIntPair<PointerTy, IntBits, IntType, PtrTraits, Info> > {
+  typedef PointerIntPair<PointerTy, IntBits, IntType, PtrTraits, Info> self;
+public:
+  static inline void *getAsVoidPointer(self P) {
+    return reinterpret_cast<void *>(P.getPointer());
+  }
+  static inline self getFromVoidPointer(void *P) {
+    return self(reinterpret_cast<PointerTy>(P));
+  }
+  enum { NumLowBitsAvailable = PtrTraits::NumLowBitsAvailable };
+};
+
+
+template <typename T> struct isPodLike;
+template <typename PointerTy, unsigned IntBits, typename IntType>
+struct isPodLike<PointerIntPair<PointerTy, IntBits, IntType>> {
+  static const bool value = true;
+};
+
+} // end namespace llvm
+#endif
diff --git a/ext/include/llvm/PointerLikeTypeTraits.h b/ext/include/llvm/PointerLikeTypeTraits.h
new file mode 100644
index 0000000..3d5b240
--- /dev/null
+++ b/ext/include/llvm/PointerLikeTypeTraits.h
@@ -0,0 +1,92 @@
+//===- llvm/Support/PointerLikeTypeTraits.h - Pointer Traits ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PointerLikeTypeTraits class.  This allows data
+// structures to reason about pointers and other things that are pointer sized.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_POINTERLIKETYPETRAITS_H
+#define LLVM_SUPPORT_POINTERLIKETYPETRAITS_H
+
+#include "AlignOf.h"
+#include <cstdint>
+
+namespace llvm {
+
+/// A traits type that is used to handle pointer types and things that are just
+/// wrappers for pointers as a uniform entity.
+template <typename T> class PointerLikeTypeTraits {
+  // getAsVoidPointer
+  // getFromVoidPointer
+  // getNumLowBitsAvailable
+};
+
+namespace detail {
+/// A tiny meta function to compute the log2 of a compile time constant.
+template <size_t N>
+struct ConstantLog2
+    : std::integral_constant<size_t, ConstantLog2<N / 2>::value + 1> {};
+template <> struct ConstantLog2<1> : std::integral_constant<size_t, 0> {};
+}
+
+// Provide PointerLikeTypeTraits for non-cvr pointers.
+template <typename T> struct PointerLikeTypeTraits<T *> {
+  static inline void *getAsVoidPointer(T *P) { return P; }
+  static inline T *getFromVoidPointer(void *P) { return static_cast<T *>(P); }
+
+  enum {
+    NumLowBitsAvailable = detail::ConstantLog2<AlignOf<T>::Alignment>::value
+  };
+};
+
+template <> struct PointerLikeTypeTraits<void *> {
+  static inline void *getAsVoidPointer(void *P) { return P; }
+  static inline void *getFromVoidPointer(void *P) { return P; }
+
+  /// Note, we assume here that void* is related to raw malloc'ed memory and
+  /// that malloc returns objects at least 4-byte aligned. However, this may be
+  /// wrong, or pointers may be from something other than malloc. In this case,
+  /// you should specify a real typed pointer or avoid this template.
+  ///
+  /// All clients should use assertions to do a run-time check to ensure that
+  /// this is actually true.
+  enum { NumLowBitsAvailable = 2 };
+};
+
+// Provide PointerLikeTypeTraits for const pointers.
+template <typename T> class PointerLikeTypeTraits<const T *> {
+  typedef PointerLikeTypeTraits<T *> NonConst;
+
+public:
+  static inline const void *getAsVoidPointer(const T *P) {
+    return NonConst::getAsVoidPointer(const_cast<T *>(P));
+  }
+  static inline const T *getFromVoidPointer(const void *P) {
+    return NonConst::getFromVoidPointer(const_cast<void *>(P));
+  }
+  enum { NumLowBitsAvailable = NonConst::NumLowBitsAvailable };
+};
+
+// Provide PointerLikeTypeTraits for uintptr_t.
+template <> class PointerLikeTypeTraits<uintptr_t> {
+public:
+  static inline void *getAsVoidPointer(uintptr_t P) {
+    return reinterpret_cast<void *>(P);
+  }
+  static inline uintptr_t getFromVoidPointer(void *P) {
+    return reinterpret_cast<uintptr_t>(P);
+  }
+  // No bits are available!
+  enum { NumLowBitsAvailable = 0 };
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/ext/include/llvm/PointerSumType.h b/ext/include/llvm/PointerSumType.h
new file mode 100644
index 0000000..51e3b2d
--- /dev/null
+++ b/ext/include/llvm/PointerSumType.h
@@ -0,0 +1,176 @@
+//===- llvm/ADT/PointerSumType.h --------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POINTERSUMTYPE_H
+#define LLVM_ADT_POINTERSUMTYPE_H
+
+#include "PointerLikeTypeTraits.h"
+
+namespace llvm {
+
+/// A compile time pair of an integer tag and the pointer-like type which it
+/// indexes within a sum type. Also allows the user to specify a particular
+/// traits class for pointer types with custom behavior such as over-aligned
+/// allocation.
+template <uintptr_t N, typename PointerArgT,
+          typename TraitsArgT = PointerLikeTypeTraits<PointerArgT>>
+struct PointerSumTypeMember {
+  enum { Tag = N };
+  typedef PointerArgT PointerT;
+  typedef TraitsArgT TraitsT;
+};
+
+namespace detail {
+
+template <typename TagT, typename... MemberTs>
+struct PointerSumTypeHelper;
+
+}
+
+/// A sum type over pointer-like types.
+///
+/// This is a normal tagged union across pointer-like types that uses the low
+/// bits of the pointers to store the tag.
+///
+/// Each member of the sum type is specified by passing a \c
+/// PointerSumTypeMember specialization in the variadic member argument list.
+/// This allows the user to control the particular tag value associated with
+/// a particular type, use the same type for multiple different tags, and
+/// customize the pointer-like traits used for a particular member. Note that
+/// these *must* be specializations of \c PointerSumTypeMember, no other type
+/// will suffice, even if it provides a compatible interface.
+///
+/// This type implements all of the comparison operators and even hash table
+/// support by comparing the underlying storage of the pointer values. It
+/// doesn't support delegating to particular members for comparisons.
+///
+/// It also default constructs to a zero tag with a null pointer, whatever that
+/// would be. This means that the zero value for the tag type is significant
+/// and may be desireable to set to a state that is particularly desirable to
+/// default construct.
+///
+/// There is no support for constructing or accessing with a dynamic tag as
+/// that would fundamentally violate the type safety provided by the sum type.
+template <typename TagT, typename... MemberTs> class PointerSumType {
+  uintptr_t Value;
+
+  typedef detail::PointerSumTypeHelper<TagT, MemberTs...> HelperT;
+
+public:
+  PointerSumType() : Value(0) {}
+
+  /// A typed constructor for a specific tagged member of the sum type.
+  template <TagT N>
+  static PointerSumType
+  create(typename HelperT::template Lookup<N>::PointerT Pointer) {
+    PointerSumType Result;
+    void *V = HelperT::template Lookup<N>::TraitsT::getAsVoidPointer(Pointer);
+    assert((reinterpret_cast<uintptr_t>(V) & HelperT::TagMask) == 0 &&
+           "Pointer is insufficiently aligned to store the discriminant!");
+    Result.Value = reinterpret_cast<uintptr_t>(V) | N;
+    return Result;
+  }
+
+  TagT getTag() const { return static_cast<TagT>(Value & HelperT::TagMask); }
+
+  template <TagT N> bool is() const { return N == getTag(); }
+
+  template <TagT N> typename HelperT::template Lookup<N>::PointerT get() const {
+    void *P = is<N>() ? getImpl() : nullptr;
+    return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(P);
+  }
+
+  template <TagT N>
+  typename HelperT::template Lookup<N>::PointerT cast() const {
+    assert(is<N>() && "This instance has a different active member.");
+    return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(getImpl());
+  }
+
+  operator bool() const { return Value & HelperT::PointerMask; }
+  bool operator==(const PointerSumType &R) const { return Value == R.Value; }
+  bool operator!=(const PointerSumType &R) const { return Value != R.Value; }
+  bool operator<(const PointerSumType &R) const { return Value < R.Value; }
+  bool operator>(const PointerSumType &R) const { return Value > R.Value; }
+  bool operator<=(const PointerSumType &R) const { return Value <= R.Value; }
+  bool operator>=(const PointerSumType &R) const { return Value >= R.Value; }
+
+  uintptr_t getOpaqueValue() const { return Value; }
+
+protected:
+  void *getImpl() const {
+    return reinterpret_cast<void *>(Value & HelperT::PointerMask);
+  }
+};
+
+namespace detail {
+
+/// A helper template for implementing \c PointerSumType. It provides fast
+/// compile-time lookup of the member from a particular tag value, along with
+/// useful constants and compile time checking infrastructure..
+template <typename TagT, typename... MemberTs>
+struct PointerSumTypeHelper : MemberTs... {
+  // First we use a trick to allow quickly looking up information about
+  // a particular member of the sum type. This works because we arranged to
+  // have this type derive from all of the member type templates. We can select
+  // the matching member for a tag using type deduction during overload
+  // resolution.
+  template <TagT N, typename PointerT, typename TraitsT>
+  static PointerSumTypeMember<N, PointerT, TraitsT>
+  LookupOverload(PointerSumTypeMember<N, PointerT, TraitsT> *);
+  template <TagT N> static void LookupOverload(...);
+  template <TagT N> struct Lookup {
+    // Compute a particular member type by resolving the lookup helper ovorload.
+    typedef decltype(LookupOverload<N>(
+        static_cast<PointerSumTypeHelper *>(nullptr))) MemberT;
+
+    /// The Nth member's pointer type.
+    typedef typename MemberT::PointerT PointerT;
+
+    /// The Nth member's traits type.
+    typedef typename MemberT::TraitsT TraitsT;
+  };
+
+  // Next we need to compute the number of bits available for the discriminant
+  // by taking the min of the bits available for each member. Much of this
+  // would be amazingly easier with good constexpr support.
+  template <uintptr_t V, uintptr_t... Vs>
+  struct Min : std::integral_constant<
+                   uintptr_t, (V < Min<Vs...>::value ? V : Min<Vs...>::value)> {
+  };
+  template <uintptr_t V>
+  struct Min<V> : std::integral_constant<uintptr_t, V> {};
+  enum { NumTagBits = Min<MemberTs::TraitsT::NumLowBitsAvailable...>::value };
+
+  // Also compute the smallest discriminant and various masks for convenience.
+  enum : uint64_t {
+    MinTag = Min<MemberTs::Tag...>::value,
+    PointerMask = static_cast<uint64_t>(-1) << NumTagBits,
+    TagMask = ~PointerMask
+  };
+
+  // Finally we need a recursive template to do static checks of each
+  // member.
+  template <typename MemberT, typename... InnerMemberTs>
+  struct Checker : Checker<InnerMemberTs...> {
+    static_assert(MemberT::Tag < (1 << NumTagBits),
+                  "This discriminant value requires too many bits!");
+  };
+  template <typename MemberT> struct Checker<MemberT> : std::true_type {
+    static_assert(MemberT::Tag < (1 << NumTagBits),
+                  "This discriminant value requires too many bits!");
+  };
+  static_assert(Checker<MemberTs...>::value,
+                "Each member must pass the checker.");
+};
+
+}
+
+}
+
+#endif
diff --git a/ext/src/CMakeLists.txt b/ext/src/CMakeLists.txt
index 0da89d4..26d0f6b 100644
--- a/ext/src/CMakeLists.txt
+++ b/ext/src/CMakeLists.txt
@@ -11,4 +11,5 @@ add_subdirectory(ConsensusCore)
 add_subdirectory(bamtools)
 add_subdirectory(samtools)
 add_subdirectory(cppformat)
-add_subdirectory(ssw)
\ No newline at end of file
+add_subdirectory(ssw)
+add_subdirectory(cityhash)
\ No newline at end of file
diff --git a/ext/src/ConsensusCore/Version.cpp b/ext/src/ConsensusCore/Version.cpp
index 0a4116a..898fe96 100644
--- a/ext/src/ConsensusCore/Version.cpp
+++ b/ext/src/ConsensusCore/Version.cpp
@@ -41,7 +41,7 @@
 #include <boost/tuple/tuple.hpp>
 #include <boost/tuple/tuple_comparison.hpp>
 
-#include "Version.hpp"
+#include "ConsensusCore/Version.hpp"
 
 namespace ConsensusCore
 {
diff --git a/src/mph_index/CMakeLists.txt b/ext/src/cityhash/CMakeLists.txt
similarity index 64%
rename from src/mph_index/CMakeLists.txt
rename to ext/src/cityhash/CMakeLists.txt
index 461cb85..a7705ac 100644
--- a/src/mph_index/CMakeLists.txt
+++ b/ext/src/cityhash/CMakeLists.txt
@@ -1,11 +1,11 @@
 ############################################################################
 # Copyright (c) 2015 Saint Petersburg State University
-# Copyright (c) 2011-2014 Saint Petersburg Academic University
 # All Rights Reserved
 # See file LICENSE for details.
 ############################################################################
 
-project(mph_index CXX)
+project(cityhash CXX)
+
+add_library(cityhash STATIC
+            city.cc)
 
-add_library(mph_index STATIC
-            MurmurHash3.cpp)
diff --git a/ext/src/cityhash/city.cc b/ext/src/cityhash/city.cc
new file mode 100644
index 0000000..cf9120b
--- /dev/null
+++ b/ext/src/cityhash/city.cc
@@ -0,0 +1,639 @@
+// Copyright (c) 2011 Google, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+// CityHash, by Geoff Pike and Jyrki Alakuijala
+//
+// This file provides CityHash64() and related functions.
+//
+// It's probably possible to create even faster hash functions by
+// writing a program that systematically explores some of the space of
+// possible hash functions, by using SIMD instructions, or by
+// compromising on hash quality.
+
+#include <city/city.h>
+
+#include <algorithm>
+#include <string.h>  // for memcpy and memset
+
+using namespace std;
+
+static uint64_t UNALIGNED_LOAD64(const char *p) {
+  uint64_t result;
+  memcpy(&result, p, sizeof(result));
+  return result;
+}
+
+static uint32_t UNALIGNED_LOAD32(const char *p) {
+  uint32_t result;
+  memcpy(&result, p, sizeof(result));
+  return result;
+}
+
+#ifdef _MSC_VER
+
+#include <stdlib.h>
+#define bswap_32(x) _byteswap_ulong(x)
+#define bswap_64(x) _byteswap_uint64(x)
+
+#elif defined(__APPLE__)
+
+// Mac OS X / Darwin features
+#include <libkern/OSByteOrder.h>
+#define bswap_32(x) OSSwapInt32(x)
+#define bswap_64(x) OSSwapInt64(x)
+
+#elif defined(__sun) || defined(sun)
+
+#include <sys/byteorder.h>
+#define bswap_32(x) BSWAP_32(x)
+#define bswap_64(x) BSWAP_64(x)
+
+#elif defined(__FreeBSD__)
+
+#include <sys/endian.h>
+#define bswap_32(x) bswap32(x)
+#define bswap_64(x) bswap64(x)
+
+#elif defined(__OpenBSD__)
+
+#include <sys/types.h>
+#define bswap_32(x) swap32(x)
+#define bswap_64(x) swap64(x)
+
+#elif defined(__NetBSD__)
+
+#include <sys/types.h>
+#include <machine/bswap.h>
+#if defined(__BSWAP_RENAME) && !defined(__bswap_32)
+#define bswap_32(x) bswap32(x)
+#define bswap_64(x) bswap64(x)
+#endif
+
+#else
+
+#include <byteswap.h>
+
+#endif
+
+#ifdef WORDS_BIGENDIAN
+#define uint32_in_expected_order(x) (bswap_32(x))
+#define uint64_in_expected_order(x) (bswap_64(x))
+#else
+#define uint32_in_expected_order(x) (x)
+#define uint64_in_expected_order(x) (x)
+#endif
+
+#define LIKELY(x) (__builtin_expect(!!(x), 1))
+
+static uint64_t Fetch64(const char *p) {
+  return uint64_in_expected_order(UNALIGNED_LOAD64(p));
+}
+
+static uint32_t Fetch32(const char *p) {
+  return uint32_in_expected_order(UNALIGNED_LOAD32(p));
+}
+
+// Some primes between 2^63 and 2^64 for various uses.
+static const uint64_t k0 = 0xc3a5c85c97cb3127ULL;
+static const uint64_t k1 = 0xb492b66fbe98f273ULL;
+static const uint64_t k2 = 0x9ae16a3b2f90404fULL;
+
+// Magic numbers for 32-bit hashing.  Copied from Murmur3.
+static const uint32_t c1 = 0xcc9e2d51;
+static const uint32_t c2 = 0x1b873593;
+
+// A 32-bit to 32-bit integer hash copied from Murmur3.
+static uint32_t fmix(uint32_t h)
+{
+  h ^= h >> 16;
+  h *= 0x85ebca6b;
+  h ^= h >> 13;
+  h *= 0xc2b2ae35;
+  h ^= h >> 16;
+  return h;
+}
+
+static uint32_t Rotate32(uint32_t val, int shift) {
+  // Avoid shifting by 32: doing so yields an undefined result.
+  return shift == 0 ? val : ((val >> shift) | (val << (32 - shift)));
+}
+
+#undef PERMUTE3
+#define PERMUTE3(a, b, c) do { std::swap(a, b); std::swap(a, c); } while (0)
+
+static uint32_t Mur(uint32_t a, uint32_t h) {
+  // Helper from Murmur3 for combining two 32-bit values.
+  a *= c1;
+  a = Rotate32(a, 17);
+  a *= c2;
+  h ^= a;
+  h = Rotate32(h, 19);
+  return h * 5 + 0xe6546b64;
+}
+
+static uint32_t Hash32Len13to24(const char *s, size_t len) {
+  uint32_t a = Fetch32(s - 4 + (len >> 1));
+  uint32_t b = Fetch32(s + 4);
+  uint32_t c = Fetch32(s + len - 8);
+  uint32_t d = Fetch32(s + (len >> 1));
+  uint32_t e = Fetch32(s);
+  uint32_t f = Fetch32(s + len - 4);
+  uint32_t h = len;
+
+  return fmix(Mur(f, Mur(e, Mur(d, Mur(c, Mur(b, Mur(a, h)))))));
+}
+
+static uint32_t Hash32Len0to4(const char *s, size_t len) {
+  uint32_t b = 0;
+  uint32_t c = 9;
+  for (size_t i = 0; i < len; i++) {
+    signed char v = s[i];
+    b = b * c1 + v;
+    c ^= b;
+  }
+  return fmix(Mur(b, Mur(len, c)));
+}
+
+static uint32_t Hash32Len5to12(const char *s, size_t len) {
+  uint32_t a = len, b = len * 5, c = 9, d = b;
+  a += Fetch32(s);
+  b += Fetch32(s + len - 4);
+  c += Fetch32(s + ((len >> 1) & 4));
+  return fmix(Mur(c, Mur(b, Mur(a, d))));
+}
+
+uint32_t CityHash32(const char *s, size_t len) {
+  if (len <= 24) {
+    return len <= 12 ?
+        (len <= 4 ? Hash32Len0to4(s, len) : Hash32Len5to12(s, len)) :
+        Hash32Len13to24(s, len);
+  }
+
+  // len > 24
+  uint32_t h = len, g = c1 * len, f = g;
+  uint32_t a0 = Rotate32(Fetch32(s + len - 4) * c1, 17) * c2;
+  uint32_t a1 = Rotate32(Fetch32(s + len - 8) * c1, 17) * c2;
+  uint32_t a2 = Rotate32(Fetch32(s + len - 16) * c1, 17) * c2;
+  uint32_t a3 = Rotate32(Fetch32(s + len - 12) * c1, 17) * c2;
+  uint32_t a4 = Rotate32(Fetch32(s + len - 20) * c1, 17) * c2;
+  h ^= a0;
+  h = Rotate32(h, 19);
+  h = h * 5 + 0xe6546b64;
+  h ^= a2;
+  h = Rotate32(h, 19);
+  h = h * 5 + 0xe6546b64;
+  g ^= a1;
+  g = Rotate32(g, 19);
+  g = g * 5 + 0xe6546b64;
+  g ^= a3;
+  g = Rotate32(g, 19);
+  g = g * 5 + 0xe6546b64;
+  f += a4;
+  f = Rotate32(f, 19);
+  f = f * 5 + 0xe6546b64;
+  size_t iters = (len - 1) / 20;
+  do {
+    uint32_t a0 = Rotate32(Fetch32(s) * c1, 17) * c2;
+    uint32_t a1 = Fetch32(s + 4);
+    uint32_t a2 = Rotate32(Fetch32(s + 8) * c1, 17) * c2;
+    uint32_t a3 = Rotate32(Fetch32(s + 12) * c1, 17) * c2;
+    uint32_t a4 = Fetch32(s + 16);
+    h ^= a0;
+    h = Rotate32(h, 18);
+    h = h * 5 + 0xe6546b64;
+    f += a1;
+    f = Rotate32(f, 19);
+    f = f * c1;
+    g += a2;
+    g = Rotate32(g, 18);
+    g = g * 5 + 0xe6546b64;
+    h ^= a3 + a1;
+    h = Rotate32(h, 19);
+    h = h * 5 + 0xe6546b64;
+    g ^= a4;
+    g = bswap_32(g) * 5;
+    h += a4 * 5;
+    h = bswap_32(h);
+    f += a0;
+    PERMUTE3(f, h, g);
+    s += 20;
+  } while (--iters != 0);
+  g = Rotate32(g, 11) * c1;
+  g = Rotate32(g, 17) * c1;
+  f = Rotate32(f, 11) * c1;
+  f = Rotate32(f, 17) * c1;
+  h = Rotate32(h + g, 19);
+  h = h * 5 + 0xe6546b64;
+  h = Rotate32(h, 17) * c1;
+  h = Rotate32(h + f, 19);
+  h = h * 5 + 0xe6546b64;
+  h = Rotate32(h, 17) * c1;
+  return h;
+}
+
+// Bitwise right rotate.  Normally this will compile to a single
+// instruction, especially if the shift is a manifest constant.
+static uint64_t Rotate(uint64_t val, int shift) {
+  // Avoid shifting by 64: doing so yields an undefined result.
+  return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));
+}
+
+static uint64_t ShiftMix(uint64_t val) {
+  return val ^ (val >> 47);
+}
+
+static uint64_t HashLen16(uint64_t u, uint64_t v) {
+  return Hash128to64(city_uint128(u, v));
+}
+
+static uint64_t HashLen16(uint64_t u, uint64_t v, uint64_t mul) {
+  // Murmur-inspired hashing.
+  uint64_t a = (u ^ v) * mul;
+  a ^= (a >> 47);
+  uint64_t b = (v ^ a) * mul;
+  b ^= (b >> 47);
+  b *= mul;
+  return b;
+}
+
+static uint64_t HashLen0to16(const char *s, size_t len) {
+  if (len >= 8) {
+    uint64_t mul = k2 + len * 2;
+    uint64_t a = Fetch64(s) + k2;
+    uint64_t b = Fetch64(s + len - 8);
+    uint64_t c = Rotate(b, 37) * mul + a;
+    uint64_t d = (Rotate(a, 25) + b) * mul;
+    return HashLen16(c, d, mul);
+  }
+  if (len >= 4) {
+    uint64_t mul = k2 + len * 2;
+    uint64_t a = Fetch32(s);
+    return HashLen16(len + (a << 3), Fetch32(s + len - 4), mul);
+  }
+  if (len > 0) {
+    uint8_t a = s[0];
+    uint8_t b = s[len >> 1];
+    uint8_t c = s[len - 1];
+    uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8);
+    uint32_t z = len + (static_cast<uint32_t>(c) << 2);
+    return ShiftMix(y * k2 ^ z * k0) * k2;
+  }
+  return k2;
+}
+
+// This probably works well for 16-byte strings as well, but it may be overkill
+// in that case.
+static uint64_t HashLen17to32(const char *s, size_t len) {
+  uint64_t mul = k2 + len * 2;
+  uint64_t a = Fetch64(s) * k1;
+  uint64_t b = Fetch64(s + 8);
+  uint64_t c = Fetch64(s + len - 8) * mul;
+  uint64_t d = Fetch64(s + len - 16) * k2;
+  return HashLen16(Rotate(a + b, 43) + Rotate(c, 30) + d,
+                   a + Rotate(b + k2, 18) + c, mul);
+}
+
+// Return a 16-byte hash for 48 bytes.  Quick and dirty.
+// Callers do best to use "random-looking" values for a and b.
+static pair<uint64_t, uint64_t> WeakHashLen32WithSeeds(
+    uint64_t w, uint64_t x, uint64_t y, uint64_t z, uint64_t a, uint64_t b) {
+  a += w;
+  b = Rotate(b + a + z, 21);
+  uint64_t c = a;
+  a += x;
+  a += y;
+  b += Rotate(a, 44);
+  return make_pair(a + z, b + c);
+}
+
+// Return a 16-byte hash for s[0] ... s[31], a, and b.  Quick and dirty.
+static pair<uint64_t, uint64_t> WeakHashLen32WithSeeds(
+    const char* s, uint64_t a, uint64_t b) {
+  return WeakHashLen32WithSeeds(Fetch64(s),
+                                Fetch64(s + 8),
+                                Fetch64(s + 16),
+                                Fetch64(s + 24),
+                                a,
+                                b);
+}
+
+// Return an 8-byte hash for 33 to 64 bytes.
+static uint64_t HashLen33to64(const char *s, size_t len) {
+  uint64_t mul = k2 + len * 2;
+  uint64_t a = Fetch64(s) * k2;
+  uint64_t b = Fetch64(s + 8);
+  uint64_t c = Fetch64(s + len - 24);
+  uint64_t d = Fetch64(s + len - 32);
+  uint64_t e = Fetch64(s + 16) * k2;
+  uint64_t f = Fetch64(s + 24) * 9;
+  uint64_t g = Fetch64(s + len - 8);
+  uint64_t h = Fetch64(s + len - 16) * mul;
+  uint64_t u = Rotate(a + g, 43) + (Rotate(b, 30) + c) * 9;
+  uint64_t v = ((a + g) ^ d) + f + 1;
+  uint64_t w = bswap_64((u + v) * mul) + h;
+  uint64_t x = Rotate(e + f, 42) + c;
+  uint64_t y = (bswap_64((v + w) * mul) + g) * mul;
+  uint64_t z = e + f + c;
+  a = bswap_64((x + z) * mul + y) + b;
+  b = ShiftMix((z + a) * mul + d + h) * mul;
+  return b + x;
+}
+
+uint64_t CityHash64(const char *s, size_t len) {
+  if (len <= 32) {
+    if (len <= 16) {
+      return HashLen0to16(s, len);
+    } else {
+      return HashLen17to32(s, len);
+    }
+  } else if (len <= 64) {
+    return HashLen33to64(s, len);
+  }
+
+  // For strings over 64 bytes we hash the end first, and then as we
+  // loop we keep 56 bytes of state: v, w, x, y, and z.
+  uint64_t x = Fetch64(s + len - 40);
+  uint64_t y = Fetch64(s + len - 16) + Fetch64(s + len - 56);
+  uint64_t z = HashLen16(Fetch64(s + len - 48) + len, Fetch64(s + len - 24));
+  pair<uint64_t, uint64_t> v = WeakHashLen32WithSeeds(s + len - 64, len, z);
+  pair<uint64_t, uint64_t> w = WeakHashLen32WithSeeds(s + len - 32, y + k1, x);
+  x = x * k1 + Fetch64(s);
+
+  // Decrease len to the nearest multiple of 64, and operate on 64-byte chunks.
+  len = (len - 1) & ~static_cast<size_t>(63);
+  do {
+    x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1;
+    y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1;
+    x ^= w.second;
+    y += v.first + Fetch64(s + 40);
+    z = Rotate(z + w.first, 33) * k1;
+    v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first);
+    w = WeakHashLen32WithSeeds(s + 32, z + w.second, y + Fetch64(s + 16));
+    std::swap(z, x);
+    s += 64;
+    len -= 64;
+  } while (len != 0);
+  return HashLen16(HashLen16(v.first, w.first) + ShiftMix(y) * k1 + z,
+                   HashLen16(v.second, w.second) + x);
+}
+
+uint64_t CityHash64WithSeed(const char *s, size_t len, uint64_t seed) {
+  return CityHash64WithSeeds(s, len, k2, seed);
+}
+
+uint64_t CityHash64WithSeeds(const char *s, size_t len,
+                           uint64_t seed0, uint64_t seed1) {
+  return HashLen16(CityHash64(s, len) - seed0, seed1);
+}
+
+// A subroutine for CityHash128().  Returns a decent 128-bit hash for strings
+// of any length representable in signed long.  Based on City and Murmur.
+static city_uint128 CityMurmur(const char *s, size_t len, city_uint128 seed) {
+  uint64_t a = Uint128Low64(seed);
+  uint64_t b = Uint128High64(seed);
+  uint64_t c = 0;
+  uint64_t d = 0;
+  signed long l = len - 16;
+  if (l <= 0) {  // len <= 16
+    a = ShiftMix(a * k1) * k1;
+    c = b * k1 + HashLen0to16(s, len);
+    d = ShiftMix(a + (len >= 8 ? Fetch64(s) : c));
+  } else {  // len > 16
+    c = HashLen16(Fetch64(s + len - 8) + k1, a);
+    d = HashLen16(b + len, c + Fetch64(s + len - 16));
+    a += d;
+    do {
+      a ^= ShiftMix(Fetch64(s) * k1) * k1;
+      a *= k1;
+      b ^= a;
+      c ^= ShiftMix(Fetch64(s + 8) * k1) * k1;
+      c *= k1;
+      d ^= c;
+      s += 16;
+      l -= 16;
+    } while (l > 0);
+  }
+  a = HashLen16(a, c);
+  b = HashLen16(d, b);
+  return city_uint128(a ^ b, HashLen16(b, a));
+}
+
+city_uint128 CityHash128WithSeed(const char *s, size_t len, city_uint128 seed) {
+  if (len < 128) {
+    return CityMurmur(s, len, seed);
+  }
+
+  // We expect len >= 128 to be the common case.  Keep 56 bytes of state:
+  // v, w, x, y, and z.
+  pair<uint64_t, uint64_t> v, w;
+  uint64_t x = Uint128Low64(seed);
+  uint64_t y = Uint128High64(seed);
+  uint64_t z = len * k1;
+  v.first = Rotate(y ^ k1, 49) * k1 + Fetch64(s);
+  v.second = Rotate(v.first, 42) * k1 + Fetch64(s + 8);
+  w.first = Rotate(y + z, 35) * k1 + x;
+  w.second = Rotate(x + Fetch64(s + 88), 53) * k1;
+
+  // This is the same inner loop as CityHash64(), manually unrolled.
+  do {
+    x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1;
+    y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1;
+    x ^= w.second;
+    y += v.first + Fetch64(s + 40);
+    z = Rotate(z + w.first, 33) * k1;
+    v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first);
+    w = WeakHashLen32WithSeeds(s + 32, z + w.second, y + Fetch64(s + 16));
+    std::swap(z, x);
+    s += 64;
+    x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1;
+    y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1;
+    x ^= w.second;
+    y += v.first + Fetch64(s + 40);
+    z = Rotate(z + w.first, 33) * k1;
+    v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first);
+    w = WeakHashLen32WithSeeds(s + 32, z + w.second, y + Fetch64(s + 16));
+    std::swap(z, x);
+    s += 64;
+    len -= 128;
+  } while (LIKELY(len >= 128));
+  x += Rotate(v.first + z, 49) * k0;
+  y = y * k0 + Rotate(w.second, 37);
+  z = z * k0 + Rotate(w.first, 27);
+  w.first *= 9;
+  v.first *= k0;
+  // If 0 < len < 128, hash up to 4 chunks of 32 bytes each from the end of s.
+  for (size_t tail_done = 0; tail_done < len; ) {
+    tail_done += 32;
+    y = Rotate(x + y, 42) * k0 + v.second;
+    w.first += Fetch64(s + len - tail_done + 16);
+    x = x * k0 + w.first;
+    z += w.second + Fetch64(s + len - tail_done);
+    w.second += v.first;
+    v = WeakHashLen32WithSeeds(s + len - tail_done, v.first + z, v.second);
+    v.first *= k0;
+  }
+  // At this point our 56 bytes of state should contain more than
+  // enough information for a strong 128-bit hash.  We use two
+  // different 56-byte-to-8-byte hashes to get a 16-byte final result.
+  x = HashLen16(x, v.first);
+  y = HashLen16(y + z, w.first);
+  return city_uint128(HashLen16(x + v.second, w.second) + y,
+                      HashLen16(x + w.second, y + v.second));
+}
+
+city_uint128 CityHash128(const char *s, size_t len) {
+  return len >= 16 ?
+      CityHash128WithSeed(s + 16, len - 16,
+                          city_uint128(Fetch64(s), Fetch64(s + 8) + k0)) :
+      CityHash128WithSeed(s, len, city_uint128(k0, k1));
+}
+
+#ifdef __SSE4_2__
+#include <citycrc.h>
+#include <nmmintrin.h>
+
+// Requires len >= 240.
+static void CityHashCrc256Long(const char *s, size_t len,
+                               uint32_t seed, uint64_t *result) {
+  uint64_t a = Fetch64(s + 56) + k0;
+  uint64_t b = Fetch64(s + 96) + k0;
+  uint64_t c = result[0] = HashLen16(b, len);
+  uint64_t d = result[1] = Fetch64(s + 120) * k0 + len;
+  uint64_t e = Fetch64(s + 184) + seed;
+  uint64_t f = 0;
+  uint64_t g = 0;
+  uint64_t h = c + d;
+  uint64_t x = seed;
+  uint64_t y = 0;
+  uint64_t z = 0;
+
+  // 240 bytes of input per iter.
+  size_t iters = len / 240;
+  len -= iters * 240;
+  do {
+#undef CHUNK
+#define CHUNK(r)                                \
+    PERMUTE3(x, z, y);                          \
+    b += Fetch64(s);                            \
+    c += Fetch64(s + 8);                        \
+    d += Fetch64(s + 16);                       \
+    e += Fetch64(s + 24);                       \
+    f += Fetch64(s + 32);                       \
+    a += b;                                     \
+    h += f;                                     \
+    b += c;                                     \
+    f += d;                                     \
+    g += e;                                     \
+    e += z;                                     \
+    g += x;                                     \
+    z = _mm_crc32_u64(z, b + g);                \
+    y = _mm_crc32_u64(y, e + h);                \
+    x = _mm_crc32_u64(x, f + a);                \
+    e = Rotate(e, r);                           \
+    c += e;                                     \
+    s += 40
+
+    CHUNK(0); PERMUTE3(a, h, c);
+    CHUNK(33); PERMUTE3(a, h, f);
+    CHUNK(0); PERMUTE3(b, h, f);
+    CHUNK(42); PERMUTE3(b, h, d);
+    CHUNK(0); PERMUTE3(b, h, e);
+    CHUNK(33); PERMUTE3(a, h, e);
+  } while (--iters > 0);
+
+  while (len >= 40) {
+    CHUNK(29);
+    e ^= Rotate(a, 20);
+    h += Rotate(b, 30);
+    g ^= Rotate(c, 40);
+    f += Rotate(d, 34);
+    PERMUTE3(c, h, g);
+    len -= 40;
+  }
+  if (len > 0) {
+    s = s + len - 40;
+    CHUNK(33);
+    e ^= Rotate(a, 43);
+    h += Rotate(b, 42);
+    g ^= Rotate(c, 41);
+    f += Rotate(d, 40);
+  }
+  result[0] ^= h;
+  result[1] ^= g;
+  g += h;
+  a = HashLen16(a, g + z);
+  x += y << 32;
+  b += x;
+  c = HashLen16(c, z) + h;
+  d = HashLen16(d, e + result[0]);
+  g += e;
+  h += HashLen16(x, f);
+  e = HashLen16(a, d) + g;
+  z = HashLen16(b, c) + a;
+  y = HashLen16(g, h) + c;
+  result[0] = e + z + y + x;
+  a = ShiftMix((a + y) * k0) * k0 + b;
+  result[1] += a + result[0];
+  a = ShiftMix(a * k0) * k0 + c;
+  result[2] = a + result[1];
+  a = ShiftMix((a + e) * k0) * k0;
+  result[3] = a + result[2];
+}
+
+// Requires len < 240.
+static void CityHashCrc256Short(const char *s, size_t len, uint64_t *result) {
+  char buf[240];
+  memcpy(buf, s, len);
+  memset(buf + len, 0, 240 - len);
+  CityHashCrc256Long(buf, 240, ~static_cast<uint32>(len), result);
+}
+
+void CityHashCrc256(const char *s, size_t len, uint64_t *result) {
+  if (LIKELY(len >= 240)) {
+    CityHashCrc256Long(s, len, 0, result);
+  } else {
+    CityHashCrc256Short(s, len, result);
+  }
+}
+
+city_uint128 CityHashCrc128WithSeed(const char *s, size_t len, city_uint128 seed) {
+  if (len <= 900) {
+    return CityHash128WithSeed(s, len, seed);
+  } else {
+    uint64_t result[4];
+    CityHashCrc256(s, len, result);
+    uint64_t u = Uint128High64(seed) + result[0];
+    uint64_t v = Uint128Low64(seed) + result[1];
+    return uint128(HashLen16(u, v + result[2]),
+                   HashLen16(Rotate(v, 32), u * k0 + result[3]));
+  }
+}
+
+city_uint128 CityHashCrc128(const char *s, size_t len) {
+  if (len <= 900) {
+    return CityHash128(s, len);
+  } else {
+    uint64_t result[4];
+    CityHashCrc256(s, len, result);
+    return uint128(result[2], result[3]);
+  }
+}
+
+#endif
diff --git a/ext/src/ssw/ssw.c b/ext/src/ssw/ssw.c
index c6f5aa9..a77f39a 100755
--- a/ext/src/ssw/ssw.c
+++ b/ext/src/ssw/ssw.c
@@ -20,7 +20,7 @@
    BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
    ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
    CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-   SOFTWARE.	
+   SOFTWARE.
 */
 
 /* Contact: Mengyao Zhao <zhangmp at bc.edu> */
@@ -31,7 +31,7 @@
  *  Created by Mengyao Zhao on 6/22/10.
  *  Copyright 2010 Boston College. All rights reserved.
  *	Version 0.1.4
- *	Last revision by Mengyao Zhao on 12/07/12.
+ *	Last revision by Mengyao Zhao on 06/27/14.
  *
  */
 
@@ -66,8 +66,8 @@
 
 typedef struct {
 	uint16_t score;
-	int32_t ref;	 //0-based position 
-	int32_t read;    //alignment ending position on read, 0-based 
+	int32_t ref;	 //0-based position
+	int32_t read;    //alignment ending position on read, 0-based
 } alignment_end;
 
 typedef struct {
@@ -86,24 +86,24 @@ struct _profile{
 };
 
 /* Generate query profile rearrange query sequence & calculate the weight of match/mismatch. */
-__m128i* qP_byte (const int8_t* read_num,
+static __m128i* qP_byte (const int8_t* read_num,
 				  const int8_t* mat,
 				  const int32_t readLen,
 				  const int32_t n,	/* the edge length of the squre matrix mat */
 				  uint8_t bias) {
- 
-	int32_t segLen = (readLen + 15) / 16; /* Split the 128 bit register into 16 pieces. 
-								     Each piece is 8 bit. Split the read into 16 segments. 
+
+	int32_t segLen = (readLen + 15) / 16; /* Split the 128 bit register into 16 pieces.
+								     Each piece is 8 bit. Split the read into 16 segments.
 								     Calculat 16 segments in parallel.
 								   */
 	__m128i* vProfile = (__m128i*)malloc(n * segLen * sizeof(__m128i));
 	int8_t* t = (int8_t*)vProfile;
 	int32_t nt, i, j, segNum;
-	
+
 	/* Generate query profile rearrange query sequence & calculate the weight of match/mismatch */
 	for (nt = 0; LIKELY(nt < n); nt ++) {
 		for (i = 0; i < segLen; i ++) {
-			j = i; 
+			j = i;
 			for (segNum = 0; LIKELY(segNum < 16) ; segNum ++) {
 				*t++ = j>= readLen ? bias : mat[nt * n + read_num[j]] + bias;
 				j += segLen;
@@ -114,26 +114,26 @@ __m128i* qP_byte (const int8_t* read_num,
 }
 
 /* Striped Smith-Waterman
-   Record the highest score of each reference position. 
-   Return the alignment score and ending position of the best alignment, 2nd best alignment, etc. 
-   Gap begin and gap extension are different. 
+   Record the highest score of each reference position.
+   Return the alignment score and ending position of the best alignment, 2nd best alignment, etc.
+   Gap begin and gap extension are different.
    wight_match > 0, all other weights < 0.
    The returned positions are 0-based.
- */ 
-alignment_end* sw_sse2_byte (const int8_t* ref,
+ */
+static alignment_end* sw_sse2_byte (const int8_t* ref,
 							 int8_t ref_dir,	// 0: forward ref; 1: reverse ref
 							 int32_t refLen,
-							 int32_t readLen, 
+							 int32_t readLen,
 							 const uint8_t weight_gapO, /* will be used as - */
 							 const uint8_t weight_gapE, /* will be used as - */
-							 __m128i* vProfile,
-							 uint8_t terminate,	/* the best alignment score: used to terminate 
-												   the matrix calculation when locating the 
-												   alignment beginning point. If this score 
+							 const __m128i* vProfile,
+							 uint8_t terminate,	/* the best alignment score: used to terminate
+												   the matrix calculation when locating the
+												   alignment beginning point. If this score
 												   is set to 0, it will not be used */
 	 						 uint8_t bias,  /* Shift 0 point to a positive value. */
-							 int32_t maskLen) {  
-      
+							 int32_t maskLen) {
+
 #define max16(m, vm) (vm) = _mm_max_epu8((vm), _mm_srli_si128((vm), 8)); \
 					  (vm) = _mm_max_epu8((vm), _mm_srli_si128((vm), 4)); \
 					  (vm) = _mm_max_epu8((vm), _mm_srli_si128((vm), 2)); \
@@ -144,13 +144,13 @@ alignment_end* sw_sse2_byte (const int8_t* ref,
 	int32_t end_read = readLen - 1;
 	int32_t end_ref = -1; /* 0_based best alignment ending point; Initialized as isn't aligned -1. */
 	int32_t segLen = (readLen + 15) / 16; /* number of segment */
-	
+
 	/* array to record the largest score of each reference position */
-	uint8_t* maxColumn = (uint8_t*) calloc(refLen, 1); 
-	
+	uint8_t* maxColumn = (uint8_t*) calloc(refLen, 1);
+
 	/* array to record the alignment read ending position of the largest score of each reference position */
 	int32_t* end_read_column = (int32_t*) calloc(refLen, sizeof(int32_t));
-	
+
 	/* Define 16 byte 0 vector. */
 	__m128i vZero = _mm_set1_epi32(0);
 
@@ -162,17 +162,17 @@ alignment_end* sw_sse2_byte (const int8_t* ref,
 	int32_t i, j;
 	/* 16 byte insertion begin vector */
 	__m128i vGapO = _mm_set1_epi8(weight_gapO);
-	
+
 	/* 16 byte insertion extension vector */
-	__m128i vGapE = _mm_set1_epi8(weight_gapE);	
-	
+	__m128i vGapE = _mm_set1_epi8(weight_gapE);
+
 	/* 16 byte bias vector */
-	__m128i vBias = _mm_set1_epi8(bias);	
+	__m128i vBias = _mm_set1_epi8(bias);
 
 	__m128i vMaxScore = vZero; /* Trace the highest score of the whole SW matrix. */
-	__m128i vMaxMark = vZero; /* Trace the highest score till the previous column. */	
+	__m128i vMaxMark = vZero; /* Trace the highest score till the previous column. */
 	__m128i vTemp;
-	int32_t edge, begin = 0, end = refLen, step = 1; 
+	int32_t edge, begin = 0, end = refLen, step = 1;
 //	int32_t distance = readLen * 2 / 3;
 //	int32_t distance = readLen / 2;
 //	int32_t distance = readLen;
@@ -185,21 +185,21 @@ alignment_end* sw_sse2_byte (const int8_t* ref,
 	}
 	for (i = begin; LIKELY(i != end); i += step) {
 		int32_t cmp;
-		__m128i e = vZero, vF = vZero, vMaxColumn = vZero; /* Initialize F value to 0. 
-							   Any errors to vH values will be corrected in the Lazy_F loop. 
+		__m128i e, vF = vZero, vMaxColumn = vZero; /* Initialize F value to 0.
+							   Any errors to vH values will be corrected in the Lazy_F loop.
 							 */
 //		max16(maxColumn[i], vMaxColumn);
 //		fprintf(stderr, "middle[%d]: %d\n", i, maxColumn[i]);
 
 		__m128i vH = pvHStore[segLen - 1];
 		vH = _mm_slli_si128 (vH, 1); /* Shift the 128-bit value in vH left by 1 byte. */
-		__m128i* vP = vProfile + ref[i] * segLen; /* Right part of the vProfile */
+		const __m128i* vP = vProfile + ref[i] * segLen; /* Right part of the vProfile */
 
 		/* Swap the 2 H buffers. */
 		__m128i* pv = pvHLoad;
 		pvHLoad = pvHStore;
 		pvHStore = pv;
-		
+
 		/* inner loop to process the query sequence */
 		for (j = 0; LIKELY(j < segLen); ++j) {
 			vH = _mm_adds_epu8(vH, _mm_load_si128(vP + j));
@@ -215,7 +215,7 @@ alignment_end* sw_sse2_byte (const int8_t* ref,
 			vH = _mm_max_epu8(vH, e);
 			vH = _mm_max_epu8(vH, vF);
 			vMaxColumn = _mm_max_epu8(vMaxColumn, vH);
-			
+
 	//	max16(maxColumn[i], vMaxColumn);
 	//	fprintf(stderr, "middle[%d]: %d\n", i, maxColumn[i]);
 //	for (t = (int8_t*)&vMaxColumn, ti = 0; ti < 16; ++ti) fprintf(stderr, "%d\t", *t++);
@@ -228,11 +228,11 @@ alignment_end* sw_sse2_byte (const int8_t* ref,
 			e = _mm_subs_epu8(e, vGapE);
 			e = _mm_max_epu8(e, vH);
 			_mm_store_si128(pvE + j, e);
-			
+
 			/* Update vF value. */
 			vF = _mm_subs_epu8(vF, vGapE);
 			vF = _mm_max_epu8(vF, vH);
-			
+
 			/* Load the next vH. */
 			vH = _mm_load_si128(pvHLoad + j);
 		}
@@ -251,7 +251,7 @@ alignment_end* sw_sse2_byte (const int8_t* ref,
 		vTemp = _mm_cmpeq_epi8 (vTemp, vZero);
 		cmp  = _mm_movemask_epi8 (vTemp);
 
-        while (cmp != 0xffff) 
+        while (cmp != 0xffff)
         {
             vH = _mm_max_epu8 (vH, vF);
 			vMaxColumn = _mm_max_epu8(vMaxColumn, vH);
@@ -275,27 +275,27 @@ alignment_end* sw_sse2_byte (const int8_t* ref,
 		vTemp = _mm_cmpeq_epi8(vMaxMark, vMaxScore);
 		cmp = _mm_movemask_epi8(vTemp);
 		if (cmp != 0xffff) {
-			uint8_t temp; 
+			uint8_t temp;
 			vMaxMark = vMaxScore;
 			max16(temp, vMaxScore);
 			vMaxScore = vMaxMark;
-			
+
 			if (LIKELY(temp > max)) {
 				max = temp;
 				if (max + bias >= 255) break;	//overflow
 				end_ref = i;
-			
+
 				/* Store the column with the highest alignment score in order to trace the alignment ending position on read. */
 				for (j = 0; LIKELY(j < segLen); ++j) pvHmax[j] = pvHStore[j];
 			}
 		}
 
-		/* Record the max score of current column. */	
+		/* Record the max score of current column. */
 		max16(maxColumn[i], vMaxColumn);
 //		fprintf(stderr, "maxColumn[%d]: %d\n", i, maxColumn[i]);
 		if (maxColumn[i] == terminate) break;
 	}
-	
+
 	/* Trace the alignment ending position on read. */
 	uint8_t *t = (uint8_t*)pvHmax;
 	int32_t column_len = segLen * 16;
@@ -310,21 +310,21 @@ alignment_end* sw_sse2_byte (const int8_t* ref,
 	free(pvHmax);
 	free(pvE);
 	free(pvHLoad);
-	free(pvHStore); 	
+	free(pvHStore);
 
 	/* Find the most possible 2nd best alignment. */
 	alignment_end* bests = (alignment_end*) calloc(2, sizeof(alignment_end));
 	bests[0].score = max + bias >= 255 ? 255 : max;
 	bests[0].ref = end_ref;
 	bests[0].read = end_read;
-	
+
 	bests[1].score = 0;
 	bests[1].ref = 0;
 	bests[1].read = 0;
 
 	edge = (end_ref - maskLen) > 0 ? (end_ref - maskLen) : 0;
 	for (i = 0; i < edge; i ++) {
-//			fprintf (stderr, "maxColumn[%d]: %d\n", i, maxColumn[i]); 
+//			fprintf (stderr, "maxColumn[%d]: %d\n", i, maxColumn[i]);
 		if (maxColumn[i] > bests[1].score) {
 			bests[1].score = maxColumn[i];
 			bests[1].ref = i;
@@ -332,33 +332,33 @@ alignment_end* sw_sse2_byte (const int8_t* ref,
 	}
 	edge = (end_ref + maskLen) > refLen ? refLen : (end_ref + maskLen);
 	for (i = edge + 1; i < refLen; i ++) {
-//			fprintf (stderr, "refLen: %d\tmaxColumn[%d]: %d\n", refLen, i, maxColumn[i]); 
+//			fprintf (stderr, "refLen: %d\tmaxColumn[%d]: %d\n", refLen, i, maxColumn[i]);
 		if (maxColumn[i] > bests[1].score) {
 			bests[1].score = maxColumn[i];
 			bests[1].ref = i;
 		}
 	}
-	
+
 	free(maxColumn);
 	free(end_read_column);
 	return bests;
 }
 
-__m128i* qP_word (const int8_t* read_num,
+static __m128i* qP_word (const int8_t* read_num,
 				  const int8_t* mat,
 				  const int32_t readLen,
-				  const int32_t n) { 
-					
-	int32_t segLen = (readLen + 7) / 8; 
+				  const int32_t n) {
+
+	int32_t segLen = (readLen + 7) / 8;
 	__m128i* vProfile = (__m128i*)malloc(n * segLen * sizeof(__m128i));
 	int16_t* t = (int16_t*)vProfile;
 	int32_t nt, i, j;
 	int32_t segNum;
-	
+
 	/* Generate query profile rearrange query sequence & calculate the weight of match/mismatch */
 	for (nt = 0; LIKELY(nt < n); nt ++) {
 		for (i = 0; i < segLen; i ++) {
-			j = i; 
+			j = i;
 			for (segNum = 0; LIKELY(segNum < 8) ; segNum ++) {
 				*t++ = j>= readLen ? 0 : mat[nt * n + read_num[j]];
 				j += segLen;
@@ -368,32 +368,32 @@ __m128i* qP_word (const int8_t* read_num,
 	return vProfile;
 }
 
-alignment_end* sw_sse2_word (const int8_t* ref, 
+static alignment_end* sw_sse2_word (const int8_t* ref,
 							 int8_t ref_dir,	// 0: forward ref; 1: reverse ref
 							 int32_t refLen,
-							 int32_t readLen, 
+							 int32_t readLen,
 							 const uint8_t weight_gapO, /* will be used as - */
 							 const uint8_t weight_gapE, /* will be used as - */
-						     __m128i* vProfile,
-							 uint16_t terminate, 
-							 int32_t maskLen) { 
+							 const __m128i* vProfile,
+							 uint16_t terminate,
+							 int32_t maskLen) {
 
 #define max8(m, vm) (vm) = _mm_max_epi16((vm), _mm_srli_si128((vm), 8)); \
 					(vm) = _mm_max_epi16((vm), _mm_srli_si128((vm), 4)); \
 					(vm) = _mm_max_epi16((vm), _mm_srli_si128((vm), 2)); \
 					(m) = _mm_extract_epi16((vm), 0)
-	
+
 	uint16_t max = 0;		                     /* the max alignment score */
 	int32_t end_read = readLen - 1;
 	int32_t end_ref = 0; /* 1_based best alignment ending point; Initialized as isn't aligned - 0. */
 	int32_t segLen = (readLen + 7) / 8; /* number of segment */
-	
+
 	/* array to record the largest score of each reference position */
-	uint16_t* maxColumn = (uint16_t*) calloc(refLen, 2); 
-	
+	uint16_t* maxColumn = (uint16_t*) calloc(refLen, 2);
+
 	/* array to record the alignment read ending position of the largest score of each reference position */
 	int32_t* end_read_column = (int32_t*) calloc(refLen, sizeof(int32_t));
-	
+
 	/* Define 16 byte 0 vector. */
 	__m128i vZero = _mm_set1_epi32(0);
 
@@ -405,13 +405,12 @@ alignment_end* sw_sse2_word (const int8_t* ref,
 	int32_t i, j, k;
 	/* 16 byte insertion begin vector */
 	__m128i vGapO = _mm_set1_epi16(weight_gapO);
-	
+
 	/* 16 byte insertion extension vector */
-	__m128i vGapE = _mm_set1_epi16(weight_gapE);	
+	__m128i vGapE = _mm_set1_epi16(weight_gapE);
 
-	/* 16 byte bias vector */
 	__m128i vMaxScore = vZero; /* Trace the highest score of the whole SW matrix. */
-	__m128i vMaxMark = vZero; /* Trace the highest score till the previous column. */	
+	__m128i vMaxMark = vZero; /* Trace the highest score till the previous column. */
 	__m128i vTemp;
 	int32_t edge, begin = 0, end = refLen, step = 1;
 
@@ -423,21 +422,21 @@ alignment_end* sw_sse2_word (const int8_t* ref,
 	}
 	for (i = begin; LIKELY(i != end); i += step) {
 		int32_t cmp;
-		__m128i e = vZero, vF = vZero; /* Initialize F value to 0. 
-							   Any errors to vH values will be corrected in the Lazy_F loop. 
+		__m128i e, vF = vZero; /* Initialize F value to 0.
+							   Any errors to vH values will be corrected in the Lazy_F loop.
 							 */
 		__m128i vH = pvHStore[segLen - 1];
 		vH = _mm_slli_si128 (vH, 2); /* Shift the 128-bit value in vH left by 2 byte. */
-		
+
 		/* Swap the 2 H buffers. */
 		__m128i* pv = pvHLoad;
-		
+
 		__m128i vMaxColumn = vZero; /* vMaxColumn is used to record the max values of column i. */
-		
-		__m128i* vP = vProfile + ref[i] * segLen; /* Right part of the vProfile */
+
+		const __m128i* vP = vProfile + ref[i] * segLen; /* Right part of the vProfile */
 		pvHLoad = pvHStore;
 		pvHStore = pv;
-		
+
 		/* inner loop to process the query sequence */
 		for (j = 0; LIKELY(j < segLen); j ++) {
 			vH = _mm_adds_epi16(vH, _mm_load_si128(vP + j));
@@ -447,7 +446,7 @@ alignment_end* sw_sse2_word (const int8_t* ref,
 			vH = _mm_max_epi16(vH, e);
 			vH = _mm_max_epi16(vH, vF);
 			vMaxColumn = _mm_max_epi16(vMaxColumn, vH);
-			
+
 			/* Save vH values. */
 			_mm_store_si128(pvHStore + j, vH);
 
@@ -460,7 +459,7 @@ alignment_end* sw_sse2_word (const int8_t* ref,
 			/* Update vF value. */
 			vF = _mm_subs_epu16(vF, vGapE);
 			vF = _mm_max_epi16(vF, vH);
-			
+
 			/* Load the next vH. */
 			vH = _mm_load_si128(pvHLoad + j);
 		}
@@ -478,27 +477,27 @@ alignment_end* sw_sse2_word (const int8_t* ref,
 			}
 		}
 
-end:	
-		vMaxScore = _mm_max_epi16(vMaxScore, vMaxColumn);	
+end:
+		vMaxScore = _mm_max_epi16(vMaxScore, vMaxColumn);
 		vTemp = _mm_cmpeq_epi16(vMaxMark, vMaxScore);
 		cmp = _mm_movemask_epi8(vTemp);
 		if (cmp != 0xffff) {
-			uint16_t temp; 
+			uint16_t temp;
 			vMaxMark = vMaxScore;
 			max8(temp, vMaxScore);
 			vMaxScore = vMaxMark;
-			
+
 			if (LIKELY(temp > max)) {
 				max = temp;
 				end_ref = i;
 				for (j = 0; LIKELY(j < segLen); ++j) pvHmax[j] = pvHStore[j];
 			}
 		}
-		
-		/* Record the max score of current column. */	
+
+		/* Record the max score of current column. */
 		max8(maxColumn[i], vMaxColumn);
 		if (maxColumn[i] == terminate) break;
-	} 	
+	}
 
 	/* Trace the alignment ending position on read. */
 	uint16_t *t = (uint16_t*)pvHmax;
@@ -514,21 +513,21 @@ end:
 	free(pvHmax);
 	free(pvE);
 	free(pvHLoad);
-	free(pvHStore); 
-	
+	free(pvHStore);
+
 	/* Find the most possible 2nd best alignment. */
 	alignment_end* bests = (alignment_end*) calloc(2, sizeof(alignment_end));
 	bests[0].score = max;
 	bests[0].ref = end_ref;
 	bests[0].read = end_read;
-	
+
 	bests[1].score = 0;
 	bests[1].ref = 0;
 	bests[1].read = 0;
 
 	edge = (end_ref - maskLen) > 0 ? (end_ref - maskLen) : 0;
 	for (i = 0; i < edge; i ++) {
-		if (maxColumn[i] > bests[1].score) { 
+		if (maxColumn[i] > bests[1].score) {
 			bests[1].score = maxColumn[i];
 			bests[1].ref = i;
 		}
@@ -540,31 +539,33 @@ end:
 			bests[1].ref = i;
 		}
 	}
-	
+
 	free(maxColumn);
 	free(end_read_column);
 	return bests;
 }
 
-cigar* banded_sw (const int8_t* ref,
-				 const int8_t* read, 
-				 int32_t refLen, 
+static cigar* banded_sw (const int8_t* ref,
+				 const int8_t* read,
+				 int32_t refLen,
 				 int32_t readLen,
 				 int32_t score,
 				 const uint32_t weight_gapO,  /* will be used as - */
 				 const uint32_t weight_gapE,  /* will be used as - */
 				 int32_t band_width,
 				 const int8_t* mat,	/* pointer to the weight matrix */
-				 int32_t n) {	
+				 int32_t n) {
 
 	uint32_t *c = (uint32_t*)malloc(16 * sizeof(uint32_t)), *c1;
-	int32_t i, j, e, f, temp1, temp2, s = 16, s1 = 8, s2 = 1024, l, max = 0;
+	int32_t i, j, e, f, temp1, temp2, s = 16, s1 = 8, l, max = 0;
+	int64_t s2 = 1024;
+	char op, prev_op;
 	int32_t width, width_d, *h_b, *e_b, *h_c;
 	int8_t *direction, *direction_line;
 	cigar* result = (cigar*)malloc(sizeof(cigar));
-	h_b = (int32_t*)malloc(s1 * sizeof(int32_t)); 
-	e_b = (int32_t*)malloc(s1 * sizeof(int32_t)); 
-	h_c = (int32_t*)malloc(s1 * sizeof(int32_t)); 
+	h_b = (int32_t*)malloc(s1 * sizeof(int32_t));
+	e_b = (int32_t*)malloc(s1 * sizeof(int32_t));
+	h_c = (int32_t*)malloc(s1 * sizeof(int32_t));
 	direction = (int8_t*)malloc(s2 * sizeof(int8_t));
 
 	do {
@@ -572,9 +573,9 @@ cigar* banded_sw (const int8_t* ref,
 		while (width >= s1) {
 			++s1;
 			kroundup32(s1);
-			h_b = (int32_t*)realloc(h_b, s1 * sizeof(int32_t)); 
-			e_b = (int32_t*)realloc(e_b, s1 * sizeof(int32_t)); 
-			h_c = (int32_t*)realloc(h_c, s1 * sizeof(int32_t)); 
+			h_b = (int32_t*)realloc(h_b, s1 * sizeof(int32_t));
+			e_b = (int32_t*)realloc(e_b, s1 * sizeof(int32_t));
+			h_c = (int32_t*)realloc(h_c, s1 * sizeof(int32_t));
 		}
 		while (width_d * readLen * 3 >= s2) {
 			++s2;
@@ -583,7 +584,7 @@ cigar* banded_sw (const int8_t* ref,
 				fprintf(stderr, "Alignment score and position are not consensus.\n");
 				exit(1);
 			}
-			direction = (int8_t*)realloc(direction, s2 * sizeof(int8_t)); 
+			direction = (int8_t*)realloc(direction, s2 * sizeof(int8_t));
 		}
 		direction_line = direction;
 		for (j = 1; LIKELY(j < width - 1); j ++) h_b[j] = 0;
@@ -597,7 +598,7 @@ cigar* banded_sw (const int8_t* ref,
 
 			for (j = beg; LIKELY(j <= end); j ++) {
 				int32_t b, e1, f1, d, de, df, dh;
-				set_u(u, band_width, i, j);	set_u(e, band_width, i - 1, j); 
+				set_u(u, band_width, i, j);	set_u(e, band_width, i - 1, j);
 				set_u(b, band_width, i, j - 1); set_u(d, band_width, i - 1, j - 1);
 				set_d(de, band_width, i, j, 0);
 				set_d(df, band_width, i, j, 1);
@@ -606,21 +607,22 @@ cigar* banded_sw (const int8_t* ref,
 				temp1 = i == 0 ? -weight_gapO : h_b[e] - weight_gapO;
 				temp2 = i == 0 ? -weight_gapE : e_b[e] - weight_gapE;
 				e_b[u] = temp1 > temp2 ? temp1 : temp2;
+				//fprintf(stderr, "de: %d\twidth_d: %d\treadLen: %d\ts2:%d\n", de, width_d, readLen, s2);
 				direction_line[de] = temp1 > temp2 ? 3 : 2;
-		
+
 				temp1 = h_c[b] - weight_gapO;
 				temp2 = f - weight_gapE;
 				f = temp1 > temp2 ? temp1 : temp2;
 				direction_line[df] = temp1 > temp2 ? 5 : 4;
-				
+
 				e1 = e_b[u] > 0 ? e_b[u] : 0;
 				f1 = f > 0 ? f : 0;
 				temp1 = e1 > f1 ? e1 : f1;
 				temp2 = h_b[d] + mat[ref[j] * n + read[i]];
 				h_c[u] = temp1 > temp2 ? temp1 : temp2;
-		
+
 				if (h_c[u] > max) max = h_c[u];
-		
+
 				if (temp1 <= temp2) direction_line[dh] = 1;
 				else direction_line[dh] = e1 > f1 ? direction_line[de] : direction_line[df];
 			}
@@ -635,45 +637,51 @@ cigar* banded_sw (const int8_t* ref,
 	j = refLen - 1;
 	e = 0;	// Count the number of M, D or I.
 	l = 0;	// record length of current cigar
-	f = max = 0; // M
+	op = prev_op = 'M';
 	temp2 = 2;	// h
 	while (LIKELY(i > 0)) {
 		set_d(temp1, band_width, i, j, temp2);
 		switch (direction_line[temp1]) {
-			case 1: 
+			case 1:
 				--i;
 				--j;
 				temp2 = 2;
 				direction_line -= width_d * 3;
-				f = 0;	// M
+				op = 'M';
 				break;
 			case 2:
 			 	--i;
 				temp2 = 0;	// e
 				direction_line -= width_d * 3;
-				f = 1;	// I
-				break;		
+				op = 'I';
+				break;
 			case 3:
 				--i;
 				temp2 = 2;
 				direction_line -= width_d * 3;
-				f = 1;	// I
+				op = 'I';
 				break;
 			case 4:
 				--j;
 				temp2 = 1;
-				f = 2;	// D
+				op = 'D';
 				break;
 			case 5:
 				--j;
 				temp2 = 2;
-				f = 2;	// D
+				op = 'D';
 				break;
-			default: 
+			default:
 				fprintf(stderr, "Trace back error: %d.\n", direction_line[temp1 - 1]);
+				free(direction);
+				free(h_c);
+				free(e_b);
+				free(h_b);
+				free(c);
+				free(result);
 				return 0;
 		}
-		if (f == max) ++e;
+		if (op == prev_op) ++e;
 		else {
 			++l;
 			while (l >= s) {
@@ -681,19 +689,19 @@ cigar* banded_sw (const int8_t* ref,
 				kroundup32(s);
 				c = (uint32_t*)realloc(c, s * sizeof(uint32_t));
 			}
-			c[l - 1] = e<<4|max;
-			max = f;
+			c[l - 1] = to_cigar_int(e, prev_op);
+			prev_op = op;
 			e = 1;
 		}
 	}
-	if (f == 0) {
+	if (op == 'M') {
 		++l;
 		while (l >= s) {
 			++s;
 			kroundup32(s);
 			c = (uint32_t*)realloc(c, s * sizeof(uint32_t));
 		}
-		c[l - 1] = (e+1)<<4;
+		c[l - 1] = to_cigar_int(e + 1, op);
 	}else {
 		l += 2;
 		while (l >= s) {
@@ -701,20 +709,20 @@ cigar* banded_sw (const int8_t* ref,
 			kroundup32(s);
 			c = (uint32_t*)realloc(c, s * sizeof(uint32_t));
 		}
-		c[l - 2] = e<<4|f;
-		c[l - 1] = 16;	// 1M
+		c[l - 2] = to_cigar_int(e, op);
+		c[l - 1] = to_cigar_int(1, 'M');
 	}
 
 	// reverse cigar
 	c1 = (uint32_t*)malloc(l * sizeof(uint32_t));
 	s = 0;
 	e = l - 1;
-	while (LIKELY(s <= e)) {			
-		c1[s] = c[e];		
-		c1[e] = c[s];		
-		++ s;					
-		-- e;						
-	}								
+	while (LIKELY(s <= e)) {
+		c1[s] = c[e];
+		c1[e] = c[s];
+		++ s;
+		-- e;
+	}
 	result->seq = c1;
 	result->length = l;
 
@@ -726,25 +734,25 @@ cigar* banded_sw (const int8_t* ref,
 	return result;
 }
 
-int8_t* seq_reverse(const int8_t* seq, int32_t end)	/* end is 0-based alignment ending position */	
-{									
-	int8_t* reverse = (int8_t*)calloc(end + 1, sizeof(int8_t));	
+static int8_t* seq_reverse(const int8_t* seq, int32_t end)	/* end is 0-based alignment ending position */
+{
+	int8_t* reverse = (int8_t*)calloc(end + 1, sizeof(int8_t));
 	int32_t start = 0;
-	while (LIKELY(start <= end)) {			
-		reverse[start] = seq[end];		
-		reverse[end] = seq[start];		
-		++ start;					
-		-- end;						
-	}								
-	return reverse;					
+	while (LIKELY(start <= end)) {
+		reverse[start] = seq[end];
+		reverse[end] = seq[start];
+		++ start;
+		-- end;
+	}
+	return reverse;
 }
-		
+
 s_profile* ssw_init (const int8_t* read, const int32_t readLen, const int8_t* mat, const int32_t n, const int8_t score_size) {
 	s_profile* p = (s_profile*)calloc(1, sizeof(struct _profile));
 	p->profile_byte = 0;
 	p->profile_word = 0;
 	p->bias = 0;
-	
+
 	if (score_size == 0 || score_size == 2) {
 		/* Find the bias to use in the substitution matrix */
 		int32_t bias = 0, i;
@@ -768,11 +776,11 @@ void init_destroy (s_profile* p) {
 	free(p);
 }
 
-s_align* ssw_align (const s_profile* prof, 
-					const int8_t* ref, 
-				  	int32_t refLen, 
-				  	const uint8_t weight_gapO, 
-				  	const uint8_t weight_gapE, 
+s_align* ssw_align (const s_profile* prof,
+					const int8_t* ref,
+				  	int32_t refLen,
+				  	const uint8_t weight_gapO,
+				  	const uint8_t weight_gapE,
 					const uint8_t flag,	//  (from high to low) bit 5: return the best alignment beginning position; 6: if (ref_end1 - ref_begin1 <= filterd) && (read_end1 - read_begin1 <= filterd), return cigar; 7: if max score >= filters, return cigar; 8: always return cigar; if 6 & 7 are both setted, only return cigar when both filter fulfilled
 					const uint16_t filters,
 					const int32_t filterd,
@@ -788,9 +796,9 @@ s_align* ssw_align (const s_profile* prof,
 	r->read_begin1 = -1;
 	r->cigar = 0;
 	r->cigarLen = 0;
-	if (maskLen < 15) {
+//	if (maskLen < 15) {
 //		fprintf(stderr, "When maskLen < 15, the function ssw_align doesn't return 2nd best alignment information.\n");
-	}
+//	}
 
 	// Find the alignment scores and ending positions
 	if (prof->profile_byte) {
@@ -801,14 +809,16 @@ s_align* ssw_align (const s_profile* prof,
 			word = 1;
 		} else if (bests[0].score == 255) {
 			fprintf(stderr, "Please set 2 to the score_size parameter of the function ssw_init, otherwise the alignment results will be incorrect.\n");
-			return 0;
+			free(r);
+			return NULL;
 		}
 	}else if (prof->profile_word) {
 		bests = sw_sse2_word(ref, 0, refLen, readLen, weight_gapO, weight_gapE, prof->profile_word, -1, maskLen);
 		word = 1;
 	}else {
 		fprintf(stderr, "Please call the function ssw_init before ssw_align.\n");
-		return 0;
+		free(r);
+		return NULL;
 	}
 	r->score1 = bests[0].score;
 	r->ref_end1 = bests[0].ref;
@@ -844,14 +854,17 @@ s_align* ssw_align (const s_profile* prof,
 	readLen = r->read_end1 - r->read_begin1 + 1;
 	band_width = abs(refLen - readLen) + 1;
 	path = banded_sw(ref + r->ref_begin1, prof->read + r->read_begin1, refLen, readLen, r->score1, weight_gapO, weight_gapE, band_width, prof->mat, prof->n);
-	if (path == 0) r = 0;
+	if (path == 0) {
+		free(r);
+		r = NULL;
+	}
 	else {
 		r->cigar = path->seq;
 		r->cigarLen = path->length;
 		free(path);
 	}
-	
-end: 
+
+end:
 	return r;
 }
 
@@ -859,3 +872,31 @@ void align_destroy (s_align* a) {
 	free(a->cigar);
 	free(a);
 }
+
+char cigar_int_to_op (uint32_t cigar_int)
+{
+	uint8_t letter_code = cigar_int & 0xfU;
+	static const char map[] = {
+		'M',
+		'I',
+		'D',
+		'N',
+		'S',
+		'H',
+		'P',
+		'=',
+		'X',
+	};
+
+	if (letter_code >= (sizeof(map)/sizeof(map[0]))) {
+		return 'M';
+	}
+
+	return map[letter_code];
+}
+
+uint32_t cigar_int_to_len (uint32_t cigar_int)
+{
+	uint32_t res = cigar_int >> 4;
+	return res;
+}
diff --git a/ext/src/ssw/ssw_cpp.cpp b/ext/src/ssw/ssw_cpp.cpp
index 221a9d1..21dde2a 100644
--- a/ext/src/ssw/ssw_cpp.cpp
+++ b/ext/src/ssw/ssw_cpp.cpp
@@ -1,29 +1,26 @@
 #include "ssw/ssw_cpp.h"
+#include "ssw/ssw.h"
 
 #include <sstream>
 
-extern "C" {
-#include "ssw/ssw.h"
-}
-
 namespace {
 
-static int8_t kBaseTranslation[128] = {
-    4, 4, 4, 4,  4, 4, 4, 4,  4, 4, 4, 4,  4, 4, 4, 4, 
-    4, 4, 4, 4,  4, 4, 4, 4,  4, 4, 4, 4,  4, 4, 4, 4, 
+static const int8_t kBaseTranslation[128] = {
+    4, 4, 4, 4,  4, 4, 4, 4,  4, 4, 4, 4,  4, 4, 4, 4,
+    4, 4, 4, 4,  4, 4, 4, 4,  4, 4, 4, 4,  4, 4, 4, 4,
+    4, 4, 4, 4,  4, 4, 4, 4,  4, 4, 4, 4,  4, 4, 4, 4,
     4, 4, 4, 4,  4, 4, 4, 4,  4, 4, 4, 4,  4, 4, 4, 4,
-    4, 4, 4, 4,  4, 4, 4, 4,  4, 4, 4, 4,  4, 4, 4, 4, 
   //   A     C            G
-    4, 0, 4, 1,  4, 4, 4, 2,  4, 4, 4, 4,  4, 4, 4, 4, 
+    4, 0, 4, 1,  4, 4, 4, 2,  4, 4, 4, 4,  4, 4, 4, 4,
   //             T
-    4, 4, 4, 4,  3, 0, 4, 4,  4, 4, 4, 4,  4, 4, 4, 4, 
+    4, 4, 4, 4,  3, 0, 4, 4,  4, 4, 4, 4,  4, 4, 4, 4,
   //   a     c            g
-    4, 0, 4, 1,  4, 4, 4, 2,  4, 4, 4, 4,  4, 4, 4, 4, 
+    4, 0, 4, 1,  4, 4, 4, 2,  4, 4, 4, 4,  4, 4, 4, 4,
   //             t
-    4, 4, 4, 4,  3, 0, 4, 4,  4, 4, 4, 4,  4, 4, 4, 4 
+    4, 4, 4, 4,  3, 0, 4, 4,  4, 4, 4, 4,  4, 4, 4, 4
 };
 
-void BuildSwScoreMatrix(const uint8_t& match_score, 
+void BuildSwScoreMatrix(const uint8_t& match_score,
                         const uint8_t& mismatch_penalty,
 			int8_t* matrix) {
 
@@ -47,11 +44,11 @@ void BuildSwScoreMatrix(const uint8_t& match_score,
 
   for (int i = 0; i < 5; ++i)
     matrix[id++] = 0;
-    
+
 }
 
-void ConvertAlignment(const s_align& s_al, 
-                      const int& query_len, 
+void ConvertAlignment(const s_align& s_al,
+                      const int& query_len,
                       StripedSmithWaterman::Alignment* al) {
   al->sw_score           = s_al.score1;
   al->sw_score_next_best = s_al.score2;
@@ -63,29 +60,23 @@ void ConvertAlignment(const s_align& s_al,
 
   al->cigar.clear();
   al->cigar_string.clear();
-  
+
   if (s_al.cigarLen > 0) {
     std::ostringstream cigar_string;
     if (al->query_begin > 0) {
-      uint32_t cigar = (al->query_begin << 4) | 0x0004;
+      uint32_t cigar = to_cigar_int(al->query_begin, 'S');
       al->cigar.push_back(cigar);
       cigar_string << al->query_begin << 'S';
     }
 
     for (int i = 0; i < s_al.cigarLen; ++i) {
       al->cigar.push_back(s_al.cigar[i]);
-      cigar_string << (s_al.cigar[i] >> 4);
-      uint8_t op = s_al.cigar[i] & 0x000f;
-      switch(op) {
-        case 0: cigar_string << 'M'; break;
-        case 1: cigar_string << 'I'; break;
-        case 2: cigar_string << 'D'; break;
-      }
+      cigar_string << cigar_int_to_len(s_al.cigar[i]) << cigar_int_to_op(s_al.cigar[i]);
     }
 
     int end = query_len - al->query_end - 1;
     if (end > 0) {
-      uint32_t cigar = (end << 4) | 0x0004;
+      uint32_t cigar = to_cigar_int(end, 'S');
       al->cigar.push_back(cigar);
       cigar_string << end << 'S';
     }
@@ -94,33 +85,124 @@ void ConvertAlignment(const s_align& s_al,
   } // end if
 }
 
+// @Function:
+//     Calculate the length of the previous cigar operator
+//     and store it in new_cigar and new_cigar_string.
+//     Clean up in_M (false), in_X (false), length_M (0), and length_X(0).
+void CleanPreviousMOperator(
+    bool* in_M,
+    bool* in_X,
+    uint32_t* length_M,
+    uint32_t* length_X,
+    std::vector<uint32_t>* new_cigar,
+    std::ostringstream* new_cigar_string) {
+  if (*in_M) {
+    uint32_t match = to_cigar_int(*length_M, '=');
+    new_cigar->push_back(match);
+    (*new_cigar_string) << *length_M << '=';
+  } else if (*in_X){ //in_X
+    uint32_t match = to_cigar_int(*length_X, 'X');
+    new_cigar->push_back(match);
+    (*new_cigar_string) << *length_X << 'X';
+  }
+
+  // Clean up
+  *in_M = false;
+  *in_X = false;
+  *length_M = 0;
+  *length_X = 0;
+}
+
+// @Function:
+//     1. Calculate the number of mismatches.
+//     2. Modify the cigar string:
+//         differentiate matches (M) and mismatches(X).
+//         Note that SSW does not differentiate matches and mismatches.
+// @Return:
+//     The number of mismatches.
 int CalculateNumberMismatch(
-    const StripedSmithWaterman::Alignment& al,
-    const int8_t* matrix,
+    StripedSmithWaterman::Alignment* al,
     int8_t const *ref,
-    int8_t const *query) {
-  
-  ref   += al.ref_begin;
-  query += al.query_begin;
+    int8_t const *query,
+    const int& query_len) {
+
+  ref   += al->ref_begin;
+  query += al->query_begin;
   int mismatch_length = 0;
-  for (unsigned int i = 0; i < al.cigar.size(); ++i) {
-    int32_t op = al.cigar[i] & 0x0000000f;
-    int32_t length = (al.cigar[i] >> 4) & 0x0fffffff;
-    if (op == 0) { // M
-      for (int j = 0; j < length; ++j) {
-        if (matrix[*ref] != matrix[*query]) ++mismatch_length;
+
+  std::vector<uint32_t> new_cigar;
+  std::ostringstream new_cigar_string;
+
+  if (al->query_begin > 0) {
+    uint32_t cigar = to_cigar_int(al->query_begin, 'S');
+    new_cigar.push_back(cigar);
+    new_cigar_string << al->query_begin << 'S';
+  }
+
+  bool in_M = false; // the previous is match
+  bool in_X = false; // the previous is mismatch
+  uint32_t length_M = 0;
+  uint32_t length_X = 0;
+
+  for (unsigned int i = 0; i < al->cigar.size(); ++i) {
+    char op = cigar_int_to_op(al->cigar[i]);
+    uint32_t length = cigar_int_to_len(al->cigar[i]);
+    if (op == 'M') {
+      for (uint32_t j = 0; j < length; ++j) {
+	if (*ref != *query) {
+	  ++mismatch_length;
+          if (in_M) { // the previous is match; however the current one is mismatche
+	    uint32_t match = to_cigar_int(length_M, '=');
+	    new_cigar.push_back(match);
+	    new_cigar_string << length_M << '=';
+	  }
+	  length_M = 0;
+	  ++length_X;
+	  in_M = false;
+	  in_X = true;
+	} else { // *ref == *query
+	  if (in_X) { // the previous is mismatch; however the current one is matche
+	    uint32_t match = to_cigar_int(length_X, 'X');
+	    new_cigar.push_back(match);
+	    new_cigar_string << length_X << 'X';
+	  }
+	  ++length_M;
+	  length_X = 0;
+	  in_M = true;
+	  in_X = false;
+	} // end of if (*ref != *query)
 	++ref;
 	++query;
       }
-    } else if (op == 1) { // I
+    } else if (op == 'I') {
       query += length;
       mismatch_length += length;
-    } else if (op == 2) { // D
+      CleanPreviousMOperator(&in_M, &in_X, &length_M, &length_X, &new_cigar, &new_cigar_string);
+      new_cigar.push_back(al->cigar[i]);
+      new_cigar_string << length << 'I';
+    } else if (op == 'D') {
       ref += length;
       mismatch_length += length;
+      CleanPreviousMOperator(&in_M, &in_X, &length_M, &length_X, &new_cigar, &new_cigar_string);
+      new_cigar.push_back(al->cigar[i]);
+      new_cigar_string << length << 'D';
     }
   }
 
+  CleanPreviousMOperator(&in_M, &in_X, &length_M, &length_X, &new_cigar, &new_cigar_string);
+
+  int end = query_len - al->query_end - 1;
+  if (end > 0) {
+    uint32_t cigar = to_cigar_int(end, 'S');
+    new_cigar.push_back(cigar);
+    new_cigar_string << end << 'S';
+  }
+
+  al->cigar_string.clear();
+  al->cigar.clear();
+  al->cigar_string = new_cigar_string.str();
+  al->cigar = new_cigar;
+
   return mismatch_length;
 }
 
@@ -129,6 +211,13 @@ void SetFlag(const StripedSmithWaterman::Filter& filter, uint8_t* flag) {
   if (filter.report_cigar) *flag |= 0x0f;
 }
 
+// http://www.cplusplus.com/faq/sequences/arrays/sizeof-array/#cpp
+template <typename T, size_t N>
+inline size_t SizeOfArray( const T(&)[ N ] )
+{
+  return N;
+}
+
 } // namespace
 
 
@@ -139,8 +228,6 @@ Aligner::Aligner(void)
     : score_matrix_(NULL)
     , score_matrix_size_(5)
     , translation_matrix_(NULL)
-    , default_matrix_(false)
-    , matrix_built_(false)
     , match_score_(2)
     , mismatch_penalty_(2)
     , gap_opening_penalty_(3)
@@ -160,8 +247,6 @@ Aligner::Aligner(
     : score_matrix_(NULL)
     , score_matrix_size_(5)
     , translation_matrix_(NULL)
-    , default_matrix_(false)
-    , matrix_built_(false)
     , match_score_(match_score)
     , mismatch_penalty_(mismatch_penalty)
     , gap_opening_penalty_(gap_opening_penalty)
@@ -176,12 +261,10 @@ Aligner::Aligner(const int8_t* score_matrix,
                  const int&    score_matrix_size,
 	         const int8_t* translation_matrix,
 		 const int&    translation_matrix_size)
-    
+
     : score_matrix_(NULL)
     , score_matrix_size_(score_matrix_size)
     , translation_matrix_(NULL)
-    , default_matrix_(true)
-    , matrix_built_(false)
     , match_score_(2)
     , mismatch_penalty_(2)
     , gap_opening_penalty_(3)
@@ -193,7 +276,6 @@ Aligner::Aligner(const int8_t* score_matrix,
   memcpy(score_matrix_, score_matrix, sizeof(int8_t) * score_matrix_size_ * score_matrix_size_);
   translation_matrix_ = new int8_t[translation_matrix_size];
   memcpy(translation_matrix_, translation_matrix, sizeof(int8_t) * translation_matrix_size);
-  matrix_built_ = true;
 }
 
 
@@ -202,19 +284,19 @@ Aligner::~Aligner(void){
 }
 
 int Aligner::SetReferenceSequence(const char* seq, const int& length) {
-  
+
   int len = 0;
-  if (matrix_built_) {
+  if (translation_matrix_) {
     // calculate the valid length
     //int calculated_ref_length = static_cast<int>(strlen(seq));
-    //int valid_length = (calculated_ref_length > length) 
+    //int valid_length = (calculated_ref_length > length)
     //                   ? length : calculated_ref_length;
     int valid_length = length;
     // delete the current buffer
     CleanReferenceSequence();
     // allocate a new buffer
     translated_reference_ = new int8_t[valid_length];
-  
+
     len = TranslateBase(seq, valid_length, translated_reference_);
   } else {
     // nothing
@@ -226,10 +308,10 @@ int Aligner::SetReferenceSequence(const char* seq, const int& length) {
 
 }
 
-int Aligner::TranslateBase(const char* bases, const int& length, 
+int Aligner::TranslateBase(const char* bases, const int& length,
     int8_t* translated) const {
 
-  char* ptr = (char*)bases;
+  const char* ptr = bases;
   int len = 0;
   for (int i = 0; i < length; ++i) {
     translated[i] = translation_matrix_[(int) *ptr];
@@ -241,10 +323,10 @@ int Aligner::TranslateBase(const char* bases, const int& length,
 }
 
 
-bool Aligner::Align(const char* query, const Filter& filter, 
+bool Aligner::Align(const char* query, const Filter& filter,
                     Alignment* alignment) const
 {
-  if (!matrix_built_) return false;
+  if (!translation_matrix_) return false;
   if (reference_length_ == 0) return false;
 
   int query_len = strlen(query);
@@ -253,24 +335,23 @@ bool Aligner::Align(const char* query, const Filter& filter,
   TranslateBase(query, query_len, translated_query);
 
   const int8_t score_size = 2;
-  s_profile* profile = ssw_init(translated_query, query_len, score_matrix_, 
+  s_profile* profile = ssw_init(translated_query, query_len, score_matrix_,
                                 score_matrix_size_, score_size);
 
   uint8_t flag = 0;
   SetFlag(filter, &flag);
   s_align* s_al = ssw_align(profile, translated_reference_, reference_length_,
-                                 static_cast<int>(gap_opening_penalty_), 
+                                 static_cast<int>(gap_opening_penalty_),
 				 static_cast<int>(gap_extending_penalty_),
 				 flag, filter.score_filter, filter.distance_filter, query_len);
-  
+
   alignment->Clear();
   ConvertAlignment(*s_al, query_len, alignment);
-  alignment->mismatches = CalculateNumberMismatch(*alignment, score_matrix_, translated_reference_, translated_query);
+  alignment->mismatches = CalculateNumberMismatch(&*alignment, translated_reference_, translated_query, query_len);
 
 
   // Free memory
-  if (query_len > 1) delete [] translated_query;
-  else delete translated_query;
+  delete [] translated_query;
   align_destroy(s_al);
   init_destroy(profile);
 
@@ -281,8 +362,8 @@ bool Aligner::Align(const char* query, const Filter& filter,
 bool Aligner::Align(const char* query, const char* ref, const int& ref_len,
                     const Filter& filter, Alignment* alignment) const
 {
-  if (!matrix_built_) return false;
-  
+  if (!translation_matrix_) return false;
+
   int query_len = strlen(query);
   if (query_len == 0) return false;
   int8_t* translated_query = new int8_t[query_len];
@@ -290,7 +371,7 @@ bool Aligner::Align(const char* query, const char* ref, const int& ref_len,
 
   // calculate the valid length
   //int calculated_ref_length = static_cast<int>(strlen(ref));
-  //int valid_ref_len = (calculated_ref_length > ref_len) 
+  //int valid_ref_len = (calculated_ref_length > ref_len)
   //                    ? ref_len : calculated_ref_length;
   int valid_ref_len = ref_len;
   int8_t* translated_ref = new int8_t[valid_ref_len];
@@ -298,25 +379,23 @@ bool Aligner::Align(const char* query, const char* ref, const int& ref_len,
 
 
   const int8_t score_size = 2;
-  s_profile* profile = ssw_init(translated_query, query_len, score_matrix_, 
+  s_profile* profile = ssw_init(translated_query, query_len, score_matrix_,
                                 score_matrix_size_, score_size);
 
   uint8_t flag = 0;
   SetFlag(filter, &flag);
   s_align* s_al = ssw_align(profile, translated_ref, valid_ref_len,
-                                 static_cast<int>(gap_opening_penalty_), 
+                                 static_cast<int>(gap_opening_penalty_),
 				 static_cast<int>(gap_extending_penalty_),
 				 flag, filter.score_filter, filter.distance_filter, query_len);
-  
+
   alignment->Clear();
   ConvertAlignment(*s_al, query_len, alignment);
-  alignment->mismatches = CalculateNumberMismatch(*alignment, score_matrix_, translated_ref, translated_query);
+  alignment->mismatches = CalculateNumberMismatch(&*alignment, translated_ref, translated_query, query_len);
 
   // Free memory
-  if (query_len > 1) delete [] translated_query;
-  else delete translated_query;
-  if (valid_ref_len > 1) delete [] translated_ref;
-  else delete translated_ref;
+  delete [] translated_query;
+  delete [] translated_ref;
   align_destroy(s_al);
   init_destroy(profile);
 
@@ -324,23 +403,12 @@ bool Aligner::Align(const char* query, const char* ref, const int& ref_len,
 }
 
 void Aligner::Clear(void) {
-  if (score_matrix_) delete [] score_matrix_;
-  score_matrix_ = NULL;
-
-  if (!default_matrix_ && translation_matrix_) 
-    delete [] translation_matrix_;
-  translation_matrix_ = NULL;
-
+  ClearMatrices();
   CleanReferenceSequence();
-
-  default_matrix_ = false;
-  matrix_built_   = false;
 }
 
 void Aligner::SetAllDefault(void) {
   score_matrix_size_     = 5;
-  default_matrix_        = false;
-  matrix_built_          = false;
   match_score_           = 2;
   mismatch_penalty_      = 2;
   gap_opening_penalty_   = 3;
@@ -349,7 +417,7 @@ void Aligner::SetAllDefault(void) {
 }
 
 bool Aligner::ReBuild(void) {
-  if (matrix_built_) return false;
+  if (translation_matrix_) return false;
 
   SetAllDefault();
   BuildDefaultMatrix();
@@ -362,7 +430,7 @@ bool Aligner::ReBuild(
     const uint8_t& mismatch_penalty,
     const uint8_t& gap_opening_penalty,
     const uint8_t& gap_extending_penalty) {
-  if (matrix_built_) return false;
+  if (translation_matrix_) return false;
 
   SetAllDefault();
 
@@ -382,20 +450,28 @@ bool Aligner::ReBuild(
     const int8_t* translation_matrix,
     const int&    translation_matrix_size) {
 
+  ClearMatrices();
   score_matrix_ = new int8_t[score_matrix_size_ * score_matrix_size_];
   memcpy(score_matrix_, score_matrix, sizeof(int8_t) * score_matrix_size_ * score_matrix_size_);
   translation_matrix_ = new int8_t[translation_matrix_size];
   memcpy(translation_matrix_, translation_matrix, sizeof(int8_t) * translation_matrix_size);
-  matrix_built_ = true;
 
   return true;
 }
 
 void Aligner::BuildDefaultMatrix(void) {
+  ClearMatrices();
   score_matrix_ = new int8_t[score_matrix_size_ * score_matrix_size_];
   BuildSwScoreMatrix(match_score_, mismatch_penalty_, score_matrix_);
-  translation_matrix_ = kBaseTranslation;
-  matrix_built_   = true;
-  default_matrix_ = true;
+  translation_matrix_ = new int8_t[SizeOfArray(kBaseTranslation)];
+  memcpy(translation_matrix_, kBaseTranslation, sizeof(int8_t) * SizeOfArray(kBaseTranslation));
+}
+
+void Aligner::ClearMatrices(void) {
+  delete [] score_matrix_;
+  score_matrix_ = NULL;
+
+  delete [] translation_matrix_;
+  translation_matrix_ = NULL;
 }
 } // namespace StripedSmithWaterman
diff --git a/manual.html b/manual.html
index bcc29cd..12f5040 100644
--- a/manual.html
+++ b/manual.html
@@ -1,6 +1,6 @@
 <html>
 <head>
-    <title>SPAdes 3.6.2 Manual</title>
+    <title>SPAdes 3.7.0 Manual</title>
     <style type="text/css">
         .code {
             background-color: lightgray;
@@ -8,7 +8,7 @@
     </style>
 </head>
 <body>
-<h1>SPAdes 3.6.2 Manual</h1>
+<h1>SPAdes 3.7.0 Manual</h1>
 
 1. <a href="#sec1">About SPAdes</a><br>
     1.1. <a href="#sec1.1">Supported data types</a><br>
@@ -35,19 +35,18 @@
 <h2>1. About SPAdes</h2>
 <p>
     SPAdes – St. Petersburg genome assembler – is intended for both standard isolates and single-cell MDA bacteria assemblies. This manual will help you to install and run SPAdes. 
-SPAdes version 3.6.2 was released under GPLv2 on November 20, 2015 and can be downloaded from  <a href="http://bioinf.spbau.ru/en/spades" target="_blank">http://bioinf.spbau.ru/en/spades</a>.
-
+SPAdes version 3.7.0 was released under GPLv2 on February 24, 2016 and can be downloaded from  <a href="http://bioinf.spbau.ru/en/spades" target="_blank">http://bioinf.spbau.ru/en/spades</a>.
 
 <a name="sec1.1"></a>
 <h3>1.1 Supported data types</h3>
 <p>
     The current version of SPAdes works with Illumina or IonTorrent reads and is capable of providing hybrid assemblies using PacBio, Oxford Nanopore and Sanger reads. You can also provide additional contigs that will be used as long reads.
 <p>
-    Version 3.6.2 of SPAdes supports paired-end reads, mate-pairs and unpaired reads. SPAdes can take as input several paired-end and mate-pair libraries simultaneously. 
+    Version 3.7.0 of SPAdes supports paired-end reads, mate-pairs and unpaired reads. SPAdes can take as input several paired-end and mate-pair libraries simultaneously. Note, that SPAdes was initially designed for small genomes. It was tested on single-cell and standard bacterial and fungal data sets. SPAdes is not intended for larger genomes (e.g. mammalian size genomes). For such purposes you can use it at your own risk.
 <p>
-    Note, that SPAdes was initially designed for small genomes. It was tested on single-cell and standard bacterial and fungal data sets. SPAdes is not intended for larger genomes (e.g. mammalian size genomes) and metagenomic projects. For such purposes you can use it at your own risk.
+    SPAdes 3.7.0 also includes metaSPAdes – a pipeline designed specially for metagenomic data sets. To learn more see <a href="#meta">options</a>.
 <p>
-    SPAdes has also a separate modules for assembling highly polymorphic diploid genomes and for TruSeq barcode assembly. For more information see <a href="dipspades_manual.html" target="_blank">dipSPAdes manual</a> and <a href="truspades_manual.html" target="_blank">truSPAdes manual</a> .
+    Additionally, SPAdes has a separate modules for assembling highly polymorphic diploid genomes and for TruSeq barcode assembly. For more information see <a href="dipspades_manual.html" target="_blank">dipSPAdes manual</a> and <a href="truspades_manual.html" target="_blank">truSPAdes manual</a> .
 
 
 <a name="sec1.2"></a>
@@ -75,7 +74,7 @@ SPAdes comes in several separate modules:
         <li> <a href="http://spades.bioinf.spbau.ru/spades_test_datasets/ecoli_sc/" target="_blank">MDA single-cell <i>E. coli</i></a>; 6.3 Gb, 29M reads, 2x100bp, insert size ~ 270bp </li>
     </ul>
 <p>
-    We ran SPAdes with default parameters using 16 threads on a server with Intel Xeon 2.27GHz processors. BayesHammer runs in approximately 30-40 minutes and takes up to 8Gb of RAM to perform read error correction on each data set. Assembly takes about 10 minutes for the <i>E. coli</i> isolate data set and 20 minutes for the <i>E. coli</i> single-cell data set. Both data sets require 9Gb of RAM (see notes below). MismatchCorrector runs for about an hour on standard <i>E. coli</i>, a bit [...]
+    We ran SPAdes with default parameters using 16 threads on a server with Intel Xeon 2.27GHz processors. BayesHammer runs in approximately 30-40 minutes and takes up to 8Gb of RAM to perform read error correction on each data set. Assembly takes about 15 minutes for the <i>E. coli</i> isolate data set and 30 minutes for the <i>E. coli</i> single-cell data set. Both data sets require about 9Gb of RAM (see notes below). MismatchCorrector runs for about 25 minutes on both data sets, and r [...]
 
 <p>
     <table border="1" cellpadding="4" cellspacing="0">
@@ -97,42 +96,42 @@ SPAdes comes in several separate modules:
 
         <tr>
             <td> BayesHammer </td>
-            <td align="center"> 33m </td> 
+            <td align="center"> 34m </td>
+            <td align="center"> 7.7 </td>
+            <td align="center"> 8.4 </td>
+            <td align="center"> 40m </td>
             <td align="center"> 7.5 </td>
-            <td align="center"> 9 </td>
-            <td align="center"> 41m </td> 
-            <td align="center"> 7 </td>
-            <td align="center"> 10 </td>
+            <td align="center"> 8.8 </td>
         </tr>
 
         <tr>
             <td> SPAdes </td>
-            <td align="center"> 12m </td> 
-            <td align="center"> 9 </td>
-            <td align="center"> 1.5 </td>
-            <td align="center"> 22m </td> 
-            <td align="center"> 9 </td>
-            <td align="center"> 2.5 </td>
+            <td align="center"> 16m </td>
+            <td align="center"> 8.6 </td>
+            <td align="center"> 1.6 </td>
+            <td align="center"> 28m </td>
+            <td align="center"> 8.6 </td>
+            <td align="center"> 2.7 </td>
         </tr>
 
         <tr>
             <td> MismatchCorrector </td>
-            <td align="center"> 1h 7m </td> 
-            <td align="center"> 1 </td>
-            <td align="center"> 22 </td>
-            <td align="center"> 1h 18m </td> 
-            <td align="center"> 1 </td>
-            <td align="center"> 24 </td>
+            <td align="center"> 22m </td>
+            <td align="center"> 1.8 </td>
+            <td align="center"> 21.8 </td>
+            <td align="center"> 26m </td>
+            <td align="center"> 1.8 </td>
+            <td align="center"> 22.9 </td>
         </tr>
 
         <tr>
             <td> Whole pipeline </td>
-            <td align="center"> 1h 52m </td> 
-            <td align="center"> 9 </td>
-            <td align="center"> 24 </td>
-            <td align="center"> 2h 21m </td> 
-            <td align="center"> 9 </td>
-            <td align="center"> 26 </td>
+            <td align="center"> 1h 12m </td>
+            <td align="center"> 8.6 </td>
+            <td align="center"> 24.2 </td>
+            <td align="center"> 1h 34m </td>
+            <td align="center"> 8.6 </td>
+            <td align="center"> 25.5 </td>
         </tr>
     </table>
 
@@ -142,26 +141,27 @@ SPAdes comes in several separate modules:
         <li> Running SPAdes without preliminary read error correction (e.g. without BayesHammer or IonHammer) will likely require more time and memory. </li>
         <li> Each module removes its temporary files as soon as it finishes. </li>
         <li> SPAdes uses 512 Mb per thread for buffers, which results in higher memory consumption. If you set memory limit manually, SPAdes will use smaller buffers and thus less RAM. </li>
-        <li> Performance statistics is given for SPAdes version 3.6.2. </li>
+        <li> Performance statistics is given for SPAdes version 3.7.0. </li>
     </ul>
 
 
 <a name="sec2"></a>
 <h2>2. Installation</h2>
 <p>
+
     SPAdes requires a 64-bit Linux system or Mac OS and Python (supported versions are 2.4, 2.5, 2.6, 2.7, 3.2, 3.3, 3.4 and 3.5) to be pre-installed on it. To obtain SPAdes you can either download binaries or download source code and compile it yourself.
 
 <a name="sec2.1"></a>
 <h3>2.1 Downloading SPAdes Linux binaries</h3>
 
 <p>
-    To download <a href="http://spades.bioinf.spbau.ru/release3.6.2/SPAdes-3.6.2-Linux.tar.gz">SPAdes Linux binaries</a> and extract them, go to the directory in which you wish SPAdes to be installed and run:
+    To download <a href="http://spades.bioinf.spbau.ru/release3.7.0/SPAdes-3.7.0-Linux.tar.gz">SPAdes Linux binaries</a> and extract them, go to the directory in which you wish SPAdes to be installed and run:
 
 <pre  class="code">
 <code>
-    wget http://spades.bioinf.spbau.ru/release3.6.2/SPAdes-3.6.2-Linux.tar.gz
-    tar -xzf SPAdes-3.6.2-Linux.tar.gz
-    cd SPAdes-3.6.2-Linux/bin/
+    wget http://spades.bioinf.spbau.ru/release3.7.0/SPAdes-3.7.0-Linux.tar.gz
+    tar -xzf SPAdes-3.7.0-Linux.tar.gz
+    cd SPAdes-3.7.0-Linux/bin/
 </code>
 </pre>
 
@@ -187,13 +187,13 @@ SPAdes comes in several separate modules:
 <h3>2.2 Downloading SPAdes binaries for Mac</h3>
 
 <p>
-    To obtain <a href="http://spades.bioinf.spbau.ru/release3.6.2/SPAdes-3.6.2-Darwin.tar.gz">SPAdes binaries for Mac</a>, go to the directory in which you wish SPAdes to be installed and run:
+    To obtain <a href="http://spades.bioinf.spbau.ru/release3.7.0/SPAdes-3.7.0-Darwin.tar.gz">SPAdes binaries for Mac</a>, go to the directory in which you wish SPAdes to be installed and run:
 
 <pre  class="code">
 <code>
-    curl http://spades.bioinf.spbau.ru/release3.6.2/SPAdes-3.6.2-Darwin.tar.gz -o SPAdes-3.6.2-Darwin.tar.gz
-    tar -zxf SPAdes-3.6.2-Darwin.tar.gz
-    cd SPAdes-3.6.2-Darwin/bin/
+    curl http://spades.bioinf.spbau.ru/release3.7.0/SPAdes-3.7.0-Darwin.tar.gz -o SPAdes-3.7.0-Darwin.tar.gz
+    tar -zxf SPAdes-3.7.0-Darwin.tar.gz
+    cd SPAdes-3.7.0-Darwin/bin/
 </code>
 </pre>
 
@@ -228,13 +228,13 @@ SPAdes comes in several separate modules:
     </ul>
 
 <p>
-    If you meet these requirements, you can download the <a href="http://spades.bioinf.spbau.ru/release3.6.2/SPAdes-3.6.2.tar.gz">SPAdes source code</a>: 
+    If you meet these requirements, you can download the <a href="http://spades.bioinf.spbau.ru/release3.7.0/SPAdes-3.7.0.tar.gz">SPAdes source code</a>: 
 
 <pre class="code">
 <code>
-    wget http://spades.bioinf.spbau.ru/release3.6.2/SPAdes-3.6.2.tar.gz
-    tar -xzf SPAdes-3.6.2.tar.gz
-    cd SPAdes-3.6.2
+    wget http://spades.bioinf.spbau.ru/release3.7.0/SPAdes-3.7.0.tar.gz
+    tar -xzf SPAdes-3.7.0.tar.gz
+    cd SPAdes-3.7.0
 </code>
 </pre>
 
@@ -338,7 +338,7 @@ Thank you for using SPAdes!
     SPAdes takes as input paired-end reads, mate-pairs and single (unpaired) reads in FASTA and FASTQ. For IonTorrent data SPAdes also supports unpaired reads in unmapped BAM format (like the one produced by Torrent Server). However, in order to run read error correction, reads should be in FASTQ or BAM format. Sanger, Oxford Nanopore and PacBio CLR reads can be provided in both formats since SPAdes does not run error correction for these types of data.
 
 <p>
-    To run SPAdes 3.6.2 you need at least one library of the following types:
+    To run SPAdes 3.7.0 you need at least one library of the following types:
     <ul>
         <li>Illumina paired-end/high-quality mate-pairs/unpaired reads</li>
         <li>IonTorrent paired-end/high-quality mate-pairs/unpaired reads</li>
@@ -373,7 +373,7 @@ SPAdes supports mate-pair only assembly. However, we recommend to use only high-
 
 <h4>Read-pair libraries</h4>
 <p>
-    By using command line interface, you can specify up to five different paired-end libraries, up to five mate-pair libraries and also up to five high-quality mate-pair ones. If you wish to use more, you can use <a href="#yaml">YAML data set file</a>. We further refer to paired-end and mate-pair libraries simply as to read-pair libraries. 
+    By using command line interface, you can specify up to nine different paired-end libraries, up to nine mate-pair libraries and also up to nine high-quality mate-pair ones. If you wish to use more, you can use <a href="#yaml">YAML data set file</a>. We further refer to paired-end and mate-pair libraries simply as to read-pair libraries.
 
 <p>
     By default, SPAdes assumes that paired-end and high-quality mate-pair reads have forward-reverse (fr) orientation and usual mate-pairs have reverse-forward (rf) orientation. However, different orientations can be set for any library by using SPAdes options. 
@@ -396,7 +396,7 @@ SPAdes supports mate-pair only assembly. However, we recommend to use only high-
 
 <h4>Unpaired (single-read) libraries</h4>
 <p>
-    By using command line interface, you can specify up to five different single-read libraries. To input more libraries, you can use <a href="#yaml">YAML data set file</a>. 
+    By using command line interface, you can specify up to nine different single-read libraries. To input more libraries, you can use <a href="#yaml">YAML data set file</a>.
 <p>
     Single librairies are assumed to have high quality and a reasonable coverage. For example, you can provide PacBio CCS reads as a single-read library. Additionally, if you have merged a paired-end library with overlapping read-pairs (for example, using <a href="http://ccb.jhu.edu/software/FLASH/" target="_blank">FLASh</a>), you can provide the resulting reads as a single-read library.
 <p>
@@ -452,6 +452,12 @@ Note that we assume that SPAdes installation directory is added to the <code>PAT
         This flag is required for MDA (single-cell) data.
 </p>
 
+<a name="meta"></a>
+<p>
+    <code>--meta </code><br>
+        This flag is required when assembling metagenomic data sets (runs metaSPAdes). Note, that metaSPAdes supports only a <b>single</b> paired-end library and does not support <a href="#correctoropt">careful mode</a> (mismatch correction is not available). In addition, you cannot specify coverage cutoff for metaSPAdes.
+</p>
+
 <p>
     <code>--iontorrent </code><br>
         This flag is required when assembling IonTorrent data. Allows BAM files as input. Carefully read <a href="#sec3.3">section 3.3</a> before using this option.
@@ -468,6 +474,10 @@ Note that we assume that SPAdes installation directory is added to the <code>PAT
         Prints help.
 </p>
 
+<p>
+    <code>-v</code> (or <code>--version</code>)<br>
+        Prints SPAdes version.
+</p>
 
 
 <a name="pipelineopt">
@@ -486,7 +496,7 @@ Note that we assume that SPAdes installation directory is added to the <code>PAT
 <a name="correctoropt">
 <p>
     <code>--careful</code><br>
-        Tries to reduce the number of mismatches and short indels. Also runs MismatchCorrector – a post processing tool, which uses <a href="http://bio-bwa.sourceforge.net" target="_blank">BWA</a> tool (comes with SPAdes). This option is recommended. 
+        Tries to reduce the number of mismatches and short indels. Also runs MismatchCorrector – a post processing tool, which uses <a href="http://bio-bwa.sourceforge.net" target="_blank">BWA</a> tool (comes with SPAdes). This option is recommended for single-genome assembly, but is not supported by metaSPAdes metagenomic pipeline. 
 
 </p>
 
@@ -553,7 +563,7 @@ In comparison to the <code>--continue</code> option, you can change some of the
 
 <p>
     <code>--s<b><#></b> <file_name> </code><br>
-        File for single-read library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,3,4,5). For example, for the first paired-end library the option is:
+        File for single-read library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9). For example, for the first paired-end library the option is:
     <code>--s1 <file_name> </code><br>
         Do not use <code>-s</code> options for single-read libraries, since it specifies unpaired reads for the first paired-end library.
 </p>
@@ -563,29 +573,29 @@ In comparison to the <code>--continue</code> option, you can change some of the
 
 <p>
     <code>--pe<b><#></b>-12 <file_name> </code><br>
-        File with interlaced reads for paired-end library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,3,4,5). For example, for the first single-read library the option is:
+        File with interlaced reads for paired-end library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9). For example, for the first single-read library the option is:
     <code>--pe1-12 <file_name> </code><br>
 </p>
 
 <p>
     <code>--pe<b><#></b>-1 <file_name> </code><br>
-        File with left reads for paired-end library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,3,4,5).
+        File with left reads for paired-end library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9).
 </p>
 
 <p>
     <code>--pe<b><#></b>-2 <file_name> </code><br>
-        File with right reads for paired-end library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,3,4,5).
+        File with right reads for paired-end library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9).
 </p>
 
 <p>
     <code>--pe<b><#></b>-s <file_name> </code><br>
-        File with unpaired reads from paired-end library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,3,4,5) <br>
+        File with unpaired reads from paired-end library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9) <br>
         For example, paired reads can become unpaired during the error correction procedure.
 </p>
 
 <p>
     <code>--pe<b><#></b>-<b><or></b> <file_name> </code><br>
-        Orientation of reads for paired-end library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,3,4,5; <code><b><or></b></code> = "fr","rf","ff"). <br>
+        Orientation of reads for paired-end library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9; <code><b><or></b></code> = "fr","rf","ff"). <br>
         The default orientation for paired-end libraries is forward-reverse. For example, to specify reverse-forward orientation for the second paired-end library, you should use the flag:
     <code>--pe2-rf </code><br>
 </p>
@@ -593,21 +603,21 @@ In comparison to the <code>--continue</code> option, you can change some of the
 <li><b> Mate-pair libraries</b></li>
 <p>
     <code>--mp<b><#></b>-12 <file_name> </code><br>
-        File with interlaced reads for mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,3,4,5).
+        File with interlaced reads for mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9).
 </p>
 
 <p>
     <code>--mp<b><#></b>-1 <file_name> </code><br>
-        File with left reads for mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,3,4,5).
+        File with left reads for mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9).
 </p>
 
 <p>
     <code>--mp<b><#></b>-2 <file_name> </code><br>
-        File with right reads for mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,3,4,5).
+        File with right reads for mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9).
 </p>
 <p>
     <code>--mp<b><#></b>-<b><or></b> <file_name> </code><br>
-        Orientation of reads for mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,3,4,5; <code><b><or></b></code> = "fr","rf","ff"). <br>
+        Orientation of reads for mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9; <code><b><or></b></code> = "fr","rf","ff"). <br>
         The default orientation for mate-pair libraries is reverse-forward. For example, to specify forward-forward orientation for the first mate-pair library, you should use the flag:
     <code>--mp1-ff </code><br>
 </p>
@@ -617,26 +627,26 @@ In comparison to the <code>--continue</code> option, you can change some of the
 
 <p>
     <code>--hqmp<b><#></b>-12 <file_name> </code><br>
-        File with interlaced reads for high-quality mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,3,4,5).
+        File with interlaced reads for high-quality mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9).
 </p>
 
 <p>
     <code>--hqmp<b><#></b>-1 <file_name> </code><br>
-        File with left reads for high-quality mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,3,4,5).
+        File with left reads for high-quality mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9).
 </p>
 
 <p>
     <code>--hqmp<b><#></b>-2 <file_name> </code><br>
-        File with right reads for high-quality mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,3,4,5).
+        File with right reads for high-quality mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9).
 </p>
 <p>
     <code>--hqmp<b><#></b>-s <file_name> </code><br>
-        File with unpaired reads from high-quality mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,3,4,5) <br>
+        File with unpaired reads from high-quality mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9) <br>
 </p>
 
 <p>
     <code>--hqmp<b><#></b>-<b><or></b> <file_name> </code><br>
-        Orientation of reads for high-quality mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,3,4,5; <code><b><or></b></code> = "fr","rf","ff"). <br>
+        Orientation of reads for high-quality mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9; <code><b><or></b></code> = "fr","rf","ff"). <br>
         The default orientation for high-quality mate-pair libraries is forward-reverse. For example, to specify reverse-forward orientation for the first high-quality mate-pair library, you should use the flag:
     <code>--hqmp1-rf </code><br>
 </p>
@@ -648,12 +658,12 @@ In comparison to the <code>--continue</code> option, you can change some of the
 
 <p>
     <code>--nxmate<b><#></b>-1 <file_name> </code><br>
-        File with left reads for Lucigen NxSeq® Long Mate Pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,3,4,5).
+        File with left reads for Lucigen NxSeq® Long Mate Pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9).
 </p>
 
 <p>
     <code>--nxmate<b><#></b>-2 <file_name> </code><br>
-        File with right reads for Lucigen NxSeq® Long Mate Pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,3,4,5).
+        File with right reads for Lucigen NxSeq® Long Mate Pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9).
 </p>
 
 </p>
@@ -826,7 +836,7 @@ and PacBio CCS and CLR reads:
 
 <p>
     <code>--cov-cutoff <float></code><br>
-        Read coverage cutoff value. Must be a positive float value, or 'auto', or 'off'. Default value is 'off'. When set to 'auto' SPAdes automatically computes coverage threshold using conservative strategy.
+        Read coverage cutoff value. Must be a positive float value, or 'auto', or 'off'. Default value is 'off'. When set to 'auto' SPAdes automatically computes coverage threshold using conservative strategy. Note, that this option is not supported by metaSPAdes.
 </p>
 
 
@@ -1143,8 +1153,13 @@ The full list of <code><output_dir></code> content is presented below:
 <a name="sec4">
 <h2>4. Citation</h2>
 <p>
-    If you use SPAdes in your research, please include <a href="http://link.springer.com/chapter/10.1007%2F978-3-642-37195-0_13" target="_blank">Nurk, Bankevich et al., 2013</a> in your reference list. You can also add 
-<a href="http://online.liebertpub.com/doi/abs/10.1089/cmb.2012.0021" target="_blank">Bankevich, Nurk et al., 2012</a> instead.
+    If you use SPAdes in your research, please include <a href="http://link.springer.com/chapter/10.1007%2F978-3-642-37195-0_13" target="_blank">Nurk, Bankevich et al., 2013</a> in your reference list. You may also add <a href="http://online.liebertpub.com/doi/abs/10.1089/cmb.2012.0021" target="_blank">Bankevich, Nurk et al., 2012</a> instead.
+
+<p>
+    If you use PacBio or Nanopore reads, you may also cite  <a href="http://bioinformatics.oxfordjournals.org/content/early/2015/11/20/bioinformatics.btv688.short" target="_blank">Antipov et al., 2014</a>. If you use multiple paired-end and/or mate-pair libraries you may also cite papers describing SPAdes repeat resolution algorithms <a href="http://bioinformatics.oxfordjournals.org/content/30/12/i293.short" target="_blank">Prjibelski et al., 2014</a> and <a href="http://bioinformatics.o [...]
+
+<p>  
+    For the information about dipSPAdes and truSPAdes papers see <a href="dipspades_manual.html" target="_blank">dipSPAdes manual</a> and <a href="truspades_manual.html" target="_blank">truSPAdes manual</a> respectively.
 
 <p>
     In addition, we would like to list your publications that use our software on our website. Please email the reference, the name of your lab, department and institution to <a href="mailto:spades.support at bioinf.spbau.ru" target="_blank">spades.support at bioinf.spbau.ru</a>.
diff --git a/spades.py b/spades.py
index c851eaf..2c4a95a 100755
--- a/spades.py
+++ b/spades.py
@@ -42,6 +42,7 @@ elif sys.version.startswith('3.'):
 import moleculo_postprocessing
 import alignment
 
+
 def print_used_values(cfg, log):
     def print_value(cfg, section, param, pretty_param="", margin="  "):
         if not pretty_param:
@@ -95,7 +96,8 @@ def print_used_values(cfg, log):
             log.info(" Illumina TruSeq mode")
         else:
             log.info("  Multi-cell mode (you should set '--sc' flag if input data"\
-                     " was obtained with MDA (single-cell) technology")
+                     " was obtained with MDA (single-cell) technology"\
+                     " or --meta flag if processing metagenomic dataset)")
         if cfg["dataset"].iontorrent:
             log.info("  IonTorrent data")
 
@@ -216,7 +218,7 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         elif opt == "--sc":
             options_storage.single_cell = True
         elif opt == "--meta":
-            support.error('--meta option not available yet')
+            options_storage.meta = True
         elif opt == "--iontorrent":
             options_storage.iontorrent = True
         elif opt == "--disable-gzip-output":
@@ -300,6 +302,8 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
             options_storage.mismatch_corrector = False
             options_storage.careful = False
 
+        elif opt == '-v' or opt == "--version":
+            show_version()
         elif opt == '-h' or opt == "--help":
             show_usage(0)
         elif opt == "--help-hidden":
@@ -330,6 +334,9 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
             options_storage.save_restart_options(log)
         else:  # overriding previous run parameters
             options_storage.load_restart_options()
+    if options_storage.meta:
+        if options_storage.careful or options_storage.mismatch_corrector or options_storage.cov_cutoff != "off":
+            support.error("you cannot specify --careful, --mismatch-correction or --cov-cutoff in metagenomic mode!", log)
     if options_storage.continue_mode:
         return None, None
 
@@ -397,9 +404,13 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         if options_storage.bh_heap_check:
             cfg["error_correction"].__dict__["heap_check"] = options_storage.bh_heap_check
         cfg["error_correction"].__dict__["iontorrent"] = options_storage.iontorrent
+        if options_storage.meta:
+            cfg["error_correction"].__dict__["count_filter_singletons"] = 1
 
     # assembly
     if not options_storage.only_error_correction:
+        if options_storage.k_mers == 'auto' and options_storage.restart_from is None:
+            options_storage.k_mers = None
         if options_storage.k_mers:
             cfg["assembly"].__dict__["iterative_K"] = options_storage.k_mers
         else:
@@ -424,7 +435,6 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
     cfg["run_truseq_postprocessing"] = options_storage.run_truseq_postprocessing
     return cfg, dataset_data
 
-
 def check_cfg_for_partial_run(cfg, type='restart-from'):  # restart-from ot stop-after
     if type == 'restart-from':
         check_point = options_storage.restart_from
@@ -491,6 +501,11 @@ def get_options_from_params(params_filename, spades_py_name=None):
     return cmd_line, cmd_line[spades_py_pos + len(spades_py_name):].split()
 
 
+def show_version():
+    options_storage.version(spades_version)
+    sys.exit(0)
+
+
 def show_usage(code, show_hidden=False):
     options_storage.usage(spades_version, show_hidden=show_hidden)
     sys.exit(code)
diff --git a/spades_init.py b/spades_init.py
index aa15c04..ce55e1a 100644
--- a/spades_init.py
+++ b/spades_init.py
@@ -35,4 +35,4 @@ def init():
     for dir in source_dirs:
         sys.path.append(os.path.join(python_modules_home, 'spades_pipeline', dir))
 
-    spades_version = open(os.path.join(spades_home, 'VERSION'), 'r').readline()
+    spades_version = open(os.path.join(spades_home, 'VERSION'), 'r').readline().strip()
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 05b0d7c..d971bc3 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -56,8 +56,10 @@ include(libs)
 
 include(GetGitRevisionDescription)
 get_git_head_revision(SPADES_GIT_REFSPEC SPADES_GIT_SHA1)
-if (SPADES_GIT_REFSPEC STREQUAL "GITDIR-NOTFOUND")
+if ((NOT SPADES_GIT_REFSPEC) OR (SPADES_GIT_REFSPEC STREQUAL "GITDIR-NOTFOUND"))
   set(SPADES_GIT_REFSPEC "N/A")
+endif()
+if (NOT SPADES_GIT_SHA1)
   set(SPADES_GIT_SHA1 "N/A")
 endif()
 
@@ -65,6 +67,8 @@ message("Building SPAdes from ${SPADES_GIT_REFSPEC}, sha: ${SPADES_GIT_SHA1}")
 
 configure_file("${SPADES_MAIN_INCLUDE_DIR}/config.hpp.in"
                "${SPADES_BUILT_INCLUDE_DIR}/config.hpp")
+configure_file("${SPADES_MAIN_INCLUDE_DIR}/version.hpp.in"
+               "${SPADES_BUILT_INCLUDE_DIR}/version.hpp")
 
 # Build external dependencies (if any)
 add_subdirectory("${EXT_DIR}/src" "${Project_BINARY_DIR}/ext")
@@ -78,7 +82,6 @@ endif()
 
 # sub projects
 add_subdirectory(io)
-add_subdirectory(mph_index)
 add_subdirectory(debruijn)
 add_subdirectory(hammer)
 add_subdirectory(ionhammer)
diff --git a/src/cmake/pack.cmake b/src/cmake/pack.cmake
index 0a22636..73bf02f 100644
--- a/src/cmake/pack.cmake
+++ b/src/cmake/pack.cmake
@@ -12,10 +12,10 @@ set(CPACK_PACKAGE_NAME "SPAdes")
 set(CPACK_PACKAGE_VENDOR "Saint Petersburg Academic University")
 set(CPACK_PACKAGE_DESCRIPTION_FILE "${SPADES_MAIN_SRC_DIR}/../README")
 set(CPACK_RESOURCE_FILE_LICENSE "${SPADES_MAIN_SRC_DIR}/../LICENSE")
-set(CPACK_PACKAGE_VERSION "3.6.2")
+set(CPACK_PACKAGE_VERSION "3.7.0")
 set(CPACK_PACKAGE_VERSION_MAJOR "3")
-set(CPACK_PACKAGE_VERSION_MINOR "6")
-set(CPACK_PACKAGE_VERSION_PATCH "2")
+set(CPACK_PACKAGE_VERSION_MINOR "7")
+set(CPACK_PACKAGE_VERSION_PATCH "0")
 set(CPACK_STRIP_FILES bin/spades bin/hammer bin/ionhammer bin/dipspades bin/spades-bwa bin/corrector bin/scaffold_correction)
 
 # Source stuff
diff --git a/src/corrector/CMakeLists.txt b/src/corrector/CMakeLists.txt
index f7ebfd5..42c66bb 100644
--- a/src/corrector/CMakeLists.txt
+++ b/src/corrector/CMakeLists.txt
@@ -11,16 +11,14 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR})
 
 
 add_executable(corrector
-	positional_read.cpp
-	read.cpp
-	sam_reader.cpp
-	interesting_pos_processor.cpp
-	contig_processor.cpp
-	dataset_processor.cpp
-	config_struct.cpp
+	    positional_read.cpp
+        interesting_pos_processor.cpp
+        contig_processor.cpp
+        dataset_processor.cpp
+        config_struct.cpp
         main.cpp)
 
-target_link_libraries(corrector samtools input yaml-cpp ${COMMON_LIBRARIES})
+target_link_libraries(corrector input yaml-cpp ${COMMON_LIBRARIES})
 
 
 
diff --git a/src/corrector/contig_processor.cpp b/src/corrector/contig_processor.cpp
index 063fa18..325d797 100644
--- a/src/corrector/contig_processor.cpp
+++ b/src/corrector/contig_processor.cpp
@@ -40,14 +40,14 @@ void ContigProcessor::ReadContig() {
 
 void ContigProcessor::UpdateOneRead(const SingleSamRead &tmp, MappedSamStream &sm) {
     unordered_map<size_t, position_description> all_positions;
-    if (tmp.get_contig_id() < 0) {
+    if (tmp.contig_id() < 0) {
         return;
     }
-    auto cur_s = sm.get_contig_name(tmp.get_contig_id());
+    auto cur_s = sm.get_contig_name(tmp.contig_id());
     if (contig_name_.compare(cur_s) != 0) {
         return;
     }
-    tmp.CountPositions(all_positions, contig_.length());
+    CountPositions(tmp, all_positions);
     size_t error_num = 0;
 
     for (auto &pos : all_positions) {
@@ -123,6 +123,128 @@ size_t ContigProcessor::UpdateOneBase(size_t i, stringstream &ss, const unordere
     }
 }
 
+
+bool ContigProcessor::CountPositions(const SingleSamRead &read, unordered_map<size_t, position_description> &ps) const {
+
+    if (read.contig_id() < 0) {
+        DEBUG("not this contig");
+        return false;
+    }
+    //TODO: maybe change to read.is_properly_aligned() ?
+    if (read.map_qual() == 0) {
+        DEBUG("zero qual");
+        return false;
+    }
+    int pos = read.pos();
+    if (pos < 0) {
+        WARN("Negative position " << pos << " found on read " << read.name() << ", skipping");
+        return false;
+    }
+    size_t position = size_t(pos);
+    int mate = 1;  // bonus for mate mapped can be here;
+    size_t l_read = (size_t) read.data_len();
+    size_t l_cigar = read.cigar_len();
+
+    int aligned_length = 0;
+    uint32_t *cigar = read.cigar_ptr();
+    //* in cigar;
+    if (l_cigar == 0)
+        return false;
+    if (bam_cigar_opchr(cigar[0]) == '*')
+        return false;
+    for (size_t i = 0; i < l_cigar; i++)
+        if (bam_cigar_opchr(cigar[i]) == 'M')
+            aligned_length += bam_cigar_oplen(cigar[i]);
+//It's about bad aligned reads, but whether it is necessary?
+    double read_len_double = (double) l_read;
+    if ((aligned_length < min(read_len_double * 0.4, 40.0)) && (position > read_len_double / 2) && (contig_.length() > read_len_double / 2 + (double) position)) {
+        return false;
+    }
+    int state_pos = 0;
+    int shift = 0;
+    size_t skipped = 0;
+    size_t deleted = 0;
+    string insertion_string = "";
+    auto seq = read.seq_ptr();
+    for (size_t i = 0; i < l_read; i++) {
+        DEBUG(i << " " << position << " " << skipped);
+        if (shift + bam_cigar_oplen(cigar[state_pos]) <= i) {
+            shift += bam_cigar_oplen(cigar[state_pos]);
+            state_pos += 1;
+        }
+        if (insertion_string != "" and bam_cigar_opchr(cigar[state_pos]) != 'I') {
+            VERIFY(i + position >= skipped + 1);
+            size_t ind = i + position - skipped - 1;
+            if (ind >= contig_.length())
+                break;
+            ps[ind].insertions[insertion_string] += 1;
+            insertion_string = "";
+        }
+        char cur_state = bam_cigar_opchr(cigar[state_pos]);
+        if (cur_state == 'M') {
+            VERIFY(i >= deleted);
+            if (i + position < skipped) {
+                WARN(i << " " << position << " " << skipped);
+                INFO(read.name());
+            }
+            VERIFY(i + position >= skipped);
+
+            size_t ind = i + position - skipped;
+            size_t cur = var_to_pos[(int) bam_nt16_rev_table[bam1_seqi(seq, i - deleted)]];
+            if (ind >= contig_.length())
+                continue;
+            ps[ind].votes[cur] = ps[ind].votes[cur] + mate;
+
+        } else {
+            if (cur_state == 'I' || cur_state == 'H' || cur_state == 'S' ) {
+                if (cur_state == 'I') {
+                    if (insertion_string == "") {
+                        size_t ind = i + position - skipped - 1;
+                        if (ind >= contig_.length())
+                            break;
+                        ps[ind].votes[Variants::Insertion] += mate;
+                    }
+                    insertion_string += bam_nt16_rev_table[bam1_seqi(seq, i - deleted)];
+                }
+                skipped += 1;
+            } else if (bam_cigar_opchr(cigar[state_pos]) == 'D') {
+                if (i + position - skipped >= contig_.length())
+                    break;
+                ps[i + position - skipped].votes[Variants::Deletion] += mate;
+                deleted += 1;
+            }
+        }
+    }
+    if (insertion_string != "" and bam_cigar_opchr(cigar[state_pos]) != 'I') {
+        VERIFY(l_read + position >= skipped + 1);
+        size_t ind = l_read + position - skipped - 1;
+        if (ind < contig_.length()) {
+            ps[ind].insertions[insertion_string] += 1;
+        }
+        insertion_string = "";
+    }
+    return true;
+}
+
+
+bool ContigProcessor::CountPositions(const PairedSamRead &read, unordered_map<size_t, position_description> &ps) const {
+
+    TRACE("starting pairing");
+    bool t1 = CountPositions(read.Left(), ps );
+    unordered_map<size_t, position_description> tmp;
+    bool t2 = CountPositions(read.Right(), tmp);
+    //overlaps.. multimap? Look on qual?
+    if (ps.size() == 0 || tmp.size() == 0) {
+        //We do not need paired reads which are not really paired
+        ps.clear();
+        return false;
+    }
+    TRACE("counted, uniting maps of " << tmp.size() << " and " << ps.size());
+    ps.insert(tmp.begin(), tmp.end());
+    TRACE("united");
+    return (t1 && t2);
+}
+
 size_t ContigProcessor::ProcessMultipleSamFiles() {
     error_counts_.resize(kMaxErrorNum);
     for (const auto &sf : sam_files_) {
@@ -144,11 +266,11 @@ size_t ContigProcessor::ProcessMultipleSamFiles() {
             if (sf.second == io::LibraryType::PairedEnd ) {
                 PairedSamRead tmp;
                 sm >> tmp;
-                tmp.CountPositions(ps, contig_.length());
+                CountPositions(tmp, ps);
             } else {
                 SingleSamRead tmp;
                 sm >> tmp;
-                tmp.CountPositions(ps, contig_.length());
+                CountPositions(tmp, ps);
             }
             ipp_.UpdateInterestingRead(ps);
         }
diff --git a/src/corrector/contig_processor.hpp b/src/corrector/contig_processor.hpp
index 7a5830d..1011323 100644
--- a/src/corrector/contig_processor.hpp
+++ b/src/corrector/contig_processor.hpp
@@ -13,19 +13,22 @@
  */
 
 #pragma once
-#include "sam_reader.hpp"
-#include "read.hpp"
 #include "interesting_pos_processor.hpp"
 #include "positional_read.hpp"
-
-#include "io/library.hpp"
 #include "openmp_wrapper.h"
 
+#include <io/sam/sam_reader.hpp>
+#include <io/sam/read.hpp>
+#include <io/library.hpp>
+
 #include <string>
 #include <vector>
 #include <unordered_map>
 
 namespace corrector {
+
+using namespace sam_reader;
+
 typedef std::vector<std::pair<std::string, io::LibraryType> > sam_files_type;
 class ContigProcessor {
     sam_files_type sam_files_;
@@ -48,6 +51,9 @@ public:
     size_t ProcessMultipleSamFiles();
 private:
     void ReadContig();
+//Moved from read.hpp
+    bool CountPositions(const SingleSamRead &read, std::unordered_map<size_t, position_description> &ps) const;
+    bool CountPositions(const PairedSamRead &read, std::unordered_map<size_t, position_description> &ps) const;
 
     void UpdateOneRead(const SingleSamRead &tmp, MappedSamStream &sm);
     //returns: number of changed nucleotides;
diff --git a/src/corrector/dataset_processor.cpp b/src/corrector/dataset_processor.cpp
index b209ce7..94dd8f6 100644
--- a/src/corrector/dataset_processor.cpp
+++ b/src/corrector/dataset_processor.cpp
@@ -253,7 +253,7 @@ void DatasetProcessor::ProcessDataset() {
                     SplitPairedLibrary(samf, lib_num);
                     lib_num++;
                 } else {
-                    WARN("Failed to align paired reads " << left << " and " << right);
+                    FATAL_ERROR("Failed to align paired reads " << left << " and " << right);
                 }
             }
             for (auto iter = dataset.single_begin(); iter != dataset.single_end(); iter++) {
@@ -268,7 +268,7 @@ void DatasetProcessor::ProcessDataset() {
                     SplitSingleLibrary(samf, lib_num);
                     lib_num++;
                 } else {
-                    WARN("Failed to align single reads " << left);
+                    FATAL_ERROR("Failed to align single reads " << left);
                 }
             }
         }
diff --git a/src/corrector/dataset_processor.hpp b/src/corrector/dataset_processor.hpp
index 236d033..1012c8f 100644
--- a/src/corrector/dataset_processor.hpp
+++ b/src/corrector/dataset_processor.hpp
@@ -11,7 +11,8 @@
 
 #include "io/file_reader.hpp"
 #include "path_helper.hpp"
-#include "io/library.hpp"
+
+#include <io/library.hpp>
 
 #include <string>
 #include <set>
diff --git a/src/corrector/interesting_pos_processor.hpp b/src/corrector/interesting_pos_processor.hpp
index 8eab501..6e1cc62 100644
--- a/src/corrector/interesting_pos_processor.hpp
+++ b/src/corrector/interesting_pos_processor.hpp
@@ -6,7 +6,7 @@
 //***************************************************************************
 
 #pragma once
-#include "read.hpp"
+#include "positional_read.hpp"
 
 #include <vector>
 #include <string>
diff --git a/src/corrector/main.cpp b/src/corrector/main.cpp
index 5eb4971..52d345e 100644
--- a/src/corrector/main.cpp
+++ b/src/corrector/main.cpp
@@ -11,6 +11,8 @@
 #include "logger/log_writers.hpp"
 #include "segfault_handler.hpp"
 
+#include "version.hpp"
+
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <string>
@@ -46,7 +48,7 @@ int main(int argc, char** argv) {
         path::make_dir(corr_cfg::get().work_dir);
 
     INFO("Starting MismatchCorrector, built from " SPADES_GIT_REFSPEC ", git revision " SPADES_GIT_SHA1);
-    
+
     corrector::DatasetProcessor dp(contig_name, corr_cfg::get().work_dir, corr_cfg::get().output_dir, corr_cfg::get().max_nthreads);
     dp.ProcessDataset();
     unsigned ms = (unsigned) pc.time_ms();
diff --git a/src/corrector/read.cpp b/src/corrector/read.cpp
deleted file mode 100644
index 7611ed3..0000000
--- a/src/corrector/read.cpp
+++ /dev/null
@@ -1,164 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-#include "read.hpp"
-#include "variants_table.hpp"
-
-#include "logger/log_writers.hpp"
-
-using namespace std;
-
-
-namespace corrector {
-
-int SingleSamRead::CountPositions(unordered_map<size_t, position_description> &ps, const size_t &contig_length) const {
-
-    if (get_contig_id() < 0) {
-        DEBUG("not this contig");
-        return -1;
-    }
-    if (data_->core.qual == 0) {
-        DEBUG("zero qual");
-        return -1;
-    }
-    int pos = data_->core.pos;
-    if (pos < 0) {
-        WARN("Negative position " << pos << " found on read " << get_name() << ", skipping");
-        return -1;
-    }
-    size_t position = size_t(pos);
-    int mate = 1;  // bonus for mate mapped can be here;
-    size_t l_read = get_data_len();
-    size_t l_cigar = get_cigar_len();
-
-    int aligned_length = 0;
-    uint32_t *cigar = bam1_cigar(data_);
-    //* in cigar;
-    if (l_cigar == 0)
-        return -1;
-    if (bam_cigar_opchr(cigar[0]) == '*')
-        return -1;
-    for (size_t i = 0; i < l_cigar; i++)
-        if (bam_cigar_opchr(cigar[i]) == 'M')
-            aligned_length += bam_cigar_oplen(cigar[i]);
-//It's about bad aligned reads, but whether it is necessary?
-    double read_len_double = (double) l_read;
-    if ((aligned_length < min(read_len_double * 0.4, 40.0)) && (position > read_len_double / 2) && (contig_length > read_len_double / 2 + (double) position)) {
-        return -1;
-    }
-    int state_pos = 0;
-    int shift = 0;
-    size_t skipped = 0;
-    size_t deleted = 0;
-    string insertion_string = "";
-    auto seq = bam1_seq(data_);
-    for (size_t i = 0; i < l_read; i++) {
-        DEBUG(i << " " << position << " " << skipped);
-        if (shift + bam_cigar_oplen(cigar[state_pos]) <= i) {
-            shift += bam_cigar_oplen(cigar[state_pos]);
-            state_pos += 1;
-        }
-        if (insertion_string != "" and bam_cigar_opchr(cigar[state_pos]) != 'I') {
-            VERIFY(i + position >= skipped + 1);
-            size_t ind = i + position - skipped - 1;
-            if (ind >= contig_length)
-                break;
-            ps[ind].insertions[insertion_string] += 1;
-            insertion_string = "";
-        }
-        char cur_state = bam_cigar_opchr(cigar[state_pos]);
-        if (cur_state == 'M') {
-            VERIFY(i >= deleted);
-            if (i + position < skipped) {
-                WARN(i << " " << position << " " << skipped);
-                INFO(get_name());
-            }
-            VERIFY(i + position >= skipped);
-
-            size_t ind = i + position - skipped;
-            size_t cur = var_to_pos[(int) bam_nt16_rev_table[bam1_seqi(seq, i - deleted)]];
-            if (ind >= contig_length)
-                continue;
-            ps[ind].votes[cur] = ps[ind].votes[cur] + mate;
-
-        } else {
-            if (cur_state == 'I' || cur_state == 'H' || cur_state == 'S' ) {
-                if (cur_state == 'I') {
-                    if (insertion_string == "") {
-                        size_t ind = i + position - skipped - 1;
-                        if (ind >= contig_length)
-                            break;
-                        ps[ind].votes[Variants::Insertion] += mate;
-                    }
-                    insertion_string += bam_nt16_rev_table[bam1_seqi(seq, i - deleted)];
-                }
-                skipped += 1;
-            } else if (bam_cigar_opchr(cigar[state_pos]) == 'D') {
-                if (i + position - skipped >= contig_length)
-                    break;
-                ps[i + position - skipped].votes[Variants::Deletion] += mate;
-                deleted += 1;
-            }
-        }
-    }
-    if (insertion_string != "" and bam_cigar_opchr(cigar[state_pos]) != 'I') {
-        VERIFY(l_read + position >= skipped + 1);
-        size_t ind = l_read + position - skipped - 1;
-        if (ind < contig_length) {
-            ps[ind].insertions[insertion_string] += 1;
-        }
-        insertion_string = "";
-    }
-    return 0;
-}
-
-string SingleSamRead::get_cigar() const {
-    uint32_t *cigar = bam1_cigar(data_);
-    string res;
-    res.reserve(data_->core.n_cigar);
-    for (size_t k = 0; k < data_->core.n_cigar; ++k) {
-        res += std::to_string(bam_cigar_oplen(cigar[k]));
-        res += bam_cigar_opchr(cigar[k]);
-
-    }
-    return res;
-}
-
-string SingleSamRead::get_name() const {
-    string res(bam1_qname(data_));
-    return res;
-}
-
-string SingleSamRead::get_seq() const {
-    string res = "";
-    auto b = bam1_seq(data_);
-    for (int k = 0; k < data_->core.l_qseq; ++k) {
-        res += bam_nt16_rev_table[bam1_seqi(b, k)];
-    }
-    return res;
-}
-
-int PairedSamRead::CountPositions(unordered_map<size_t, position_description> &ps, const size_t &contig_length) const {
-
-    TRACE("starting pairing");
-    int t1 = r1.CountPositions(ps, contig_length);
-    unordered_map<size_t, position_description> tmp;
-    int t2 = r2.CountPositions(tmp, contig_length);
-    //overlaps.. multimap? Look on qual?
-    if (ps.size() == 0 || tmp.size() == 0) {
-        //We do not need paired reads which are not really paired
-        ps.clear();
-        return -1;
-    }
-    TRACE("counted, uniting maps of " << tmp.size() << " and " << ps.size());
-    ps.insert(tmp.begin(), tmp.end());
-    TRACE("united");
-    return t1 + t2;
-}
-
-}
-;
diff --git a/src/debruijn/CMakeLists.txt b/src/debruijn/CMakeLists.txt
index 1556914..2bd2da0 100644
--- a/src/debruijn/CMakeLists.txt
+++ b/src/debruijn/CMakeLists.txt
@@ -12,6 +12,13 @@ add_library(debruijn STATIC
             config_struct.cpp
             path_extend/pe_config_struct.cpp
             path_extend/bidirectional_path.cpp
+            path_extend/scaffolder2015/scaff_supplementary.cpp
+            path_extend/scaffolder2015/extension_chooser2015.cpp
+            path_extend/scaffolder2015/scaffold_graph.cpp
+            path_extend/scaffolder2015/scaffold_graph_constructor.cpp
+            path_extend/scaffolder2015/scaffold_graph_visualizer.cpp
+            path_extend/scaffolder2015/connection_condition2015.cpp
+            genome_consistance_checker.cpp
             stage.cpp
             construction.cpp
             gap_closer.cpp
@@ -22,16 +29,18 @@ add_library(debruijn STATIC
             distance_estimation.cpp
             repeat_resolving.cpp
             genomic_info_filler.cpp
-            pacbio_aligning.cpp)
+            pacbio_aligning.cpp
+            bwa_pair_info_filler.cpp
+            genome_storage.cpp)
   
-# We have to do this to minimize changes in ConsensusCore itself
 target_include_directories(debruijn PRIVATE ${EXT_DIR}/include/ConsensusCore)
-target_link_libraries(debruijn ConsensusCore input mph_index nlopt BamTools yaml-cpp ${COMMON_LIBRARIES})
+target_link_libraries(debruijn ConsensusCore input cityhash nlopt BamTools ssw yaml-cpp ${COMMON_LIBRARIES})
 
 add_executable(spades
                main.cpp)
 target_link_libraries(spades debruijn)
 
+
 if (SPADES_STATIC_BUILD)
   set_target_properties(spades PROPERTIES LINK_SEARCH_END_STATIC 1)
 endif()
diff --git a/src/debruijn/bwa_pair_info_filler.cpp b/src/debruijn/bwa_pair_info_filler.cpp
new file mode 100644
index 0000000..36511fc
--- /dev/null
+++ b/src/debruijn/bwa_pair_info_filler.cpp
@@ -0,0 +1,407 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#include "bwa_pair_info_filler.hpp"
+
+
+namespace bwa_pair_info {
+
+
+void MapperReadT::ParseCigar(const string& cigar) {
+    string num = "";
+    bool left_side = true;
+    for (size_t i = 0; i < cigar.length(); ++i) {
+        if (isdigit(cigar[i])) {
+            num += cigar[i];
+        }
+        else {
+            if (cigar[i] == 'H') {
+                if (left_side)
+                    left_hard_clip_ = (uint16_t) std::stoi(num);
+                else
+                    right_hard_clip_ = (uint16_t) std::stoi(num);
+                num = "";
+            }
+            else if (cigar[i] == 'S') {
+                if (left_side)
+                    left_soft_clip_ = (uint16_t) std::stoi(num);
+                else
+                    right_soft_clip_ = (uint16_t) std::stoi(num);
+                num = "";
+            }
+            else {
+                left_side = false;
+                num = "";
+            }
+        }
+    }
+}
+
+//Correct read algnment according to orientation and clippings
+void BWACorrectingProcessor::ProcessPairedRead(const MapperReadT& l, const MapperReadT& r) {
+    using io::LibraryOrientation;
+
+    if (!l.IsValid() || !r.IsValid()) {
+        return;
+    }
+    ++count_;
+
+    MappedPositionT left_pos(edge_id_map_.at(stoi(l.get_contig_id())), l.pos());
+    MappedPositionT right_pos(edge_id_map_.at(stoi(r.get_contig_id())), r.pos());
+
+    //This function if overloaded in BWAISCounter and BWAIndexFiller
+    if (!CheckAlignments(left_pos, right_pos)) {
+        return;
+    }
+
+    int r_from_pos_to_right_end = r.len() + r.right_hard_clip() - r.left_soft_clip();
+    int l_from_pos_to_left_end = l.left_soft_clip() + l.left_hard_clip();
+
+    if ((!l.is_forward() && (lib_.orientation() == LibraryOrientation::FF || lib_.orientation() == LibraryOrientation::FR)) ||
+        (l.is_forward() && (lib_.orientation() == LibraryOrientation::RF || lib_.orientation() == LibraryOrientation::RR))) {
+        left_pos.e = g_.conjugate(left_pos.e);
+        left_pos.pos = (int) g_.length(left_pos.e) - left_pos.pos - (l.len() - l.left_soft_clip() - l.right_soft_clip()) + (int) g_.k();
+        l_from_pos_to_left_end = l.right_soft_clip() + l.right_hard_clip();
+    }
+    if ((!r.is_forward() && (lib_.orientation() == LibraryOrientation::FF || lib_.orientation() == LibraryOrientation::RF)) ||
+        (r.is_forward() && (lib_.orientation() == LibraryOrientation::FR || lib_.orientation() == LibraryOrientation::RR))) {
+        right_pos.e = g_.conjugate(right_pos.e);
+        right_pos.pos = (int) g_.length(right_pos.e) - right_pos.pos - (r.len() - r.left_soft_clip() - r.right_soft_clip()) + (int) g_.k();
+        r_from_pos_to_right_end = r.len() + r.left_hard_clip() - r.right_soft_clip();
+    }
+
+    right_pos.pos = right_pos.pos + r_from_pos_to_right_end;
+    left_pos.pos = left_pos.pos - l_from_pos_to_left_end;
+
+    //This function if overloaded in BWAISCounter and BWAIndexFiller
+    ProcessAlignments(left_pos, right_pos);
+}
+
+// ==== insert size counter overloads ====
+bool BWAISCounter::CheckAlignments(const MappedPositionT& l, const MappedPositionT& r) {
+    return l.e == r.e && g_.length(l.e) >= min_contig_len_;
+}
+
+void BWAISCounter::ProcessAlignments(const MappedPositionT& l, const MappedPositionT& r) {
+    ++mapped_count_;
+
+    int is = r.pos - l.pos;
+    if (is > 0 || !ignore_negative_) {
+        hist_[is] += 1;
+    } else {
+        ++negative_count_;
+    }
+}
+
+bool BWAISCounter::RefineInsertSize(SequencingLibraryT& reads) const {
+    using namespace omnigraph;
+    size_t correctly_mapped = mapped_count_ - negative_count_;
+    INFO(correctly_mapped << " paired reads (" << ((double) correctly_mapped * 100.0 / (double) count_) << "% of all) aligned to long edges");
+
+    if (negative_count_ > 3 * correctly_mapped)
+        WARN("Too much reads aligned with negative insert size. Is the library orientation set properly?");
+    if (mapped_count_ == 0)
+        return false;
+
+    std::map<size_t, size_t> percentiles;
+    find_mean(hist_, reads.data().mean_insert_size, reads.data().insert_size_deviation, percentiles);
+    find_median(hist_, reads.data().median_insert_size, reads.data().insert_size_mad, reads.data().insert_size_distribution);
+    if (reads.data().median_insert_size < reads.data().read_length) {
+        return false;
+    }
+
+    std::tie(reads.data().insert_size_left_quantile, reads.data().insert_size_right_quantile) =
+        GetISInterval(0.8, reads.data().insert_size_distribution);
+
+    return !reads.data().insert_size_distribution.empty();
+}
+
+// ==== pair info index filler overloads ====
+EdgePair BWAIndexFiller::ConjugatePair(EdgePair ep) const {
+    return make_pair(g_.conjugate(ep.second), g_.conjugate(ep.first));
+}
+
+void BWAIndexFiller::ProcessAlignments(const MappedPositionT& l, const MappedPositionT& r) {
+    EdgePair ep{l.e, r.e};
+    TRACE("Lpos " << l.pos << ", Rpos " << r.pos);
+    int edge_distance = (int) lib_.data().mean_insert_size  - r.pos + l.pos;
+    TRACE("Distance " << edge_distance);
+
+    paired_index_.Add(ep.first, ep.second, { (double) edge_distance, 1.0 });
+}
+
+bool BWAIndexFiller::CheckAlignments(const MappedPositionT& l, const MappedPositionT& r) {
+    return g_.length(l.e) >= min_contig_len_ && g_.length(r.e) >= min_contig_len_;
+}
+
+
+//Main class realization
+void BWAPairInfoFiller::OutputEdges(const string &filename) const {
+    io::osequencestream_simple oss(filename);
+    for (auto it = g_.ConstEdgeBegin(true); !it.IsEnd(); ++it) {
+        debruijn_graph::EdgeId e = *it;
+        oss.set_header(ToString(g_.int_id(e)));
+        oss << g_.EdgeNucls(e);
+    }
+}
+void BWAPairInfoFiller::FillEdgeIdMap() {
+    for (auto it = g_.ConstEdgeBegin(true); !it.IsEnd(); ++it) {
+        debruijn_graph::EdgeId e = *it;
+        edge_id_map_.insert(make_pair(g_.int_id(e), e));
+    }
+}
+
+bool BWAPairInfoFiller::CreateIndex(const string& contigs) {
+    int run_res = 0;
+    string err_log = path::append_path(work_dir_, "index.err");
+    string index_line = bwa_path_ + string(" index ") + "-a is " + contigs + " 2>" + err_log;
+    INFO("Running bwa index ... ");
+    INFO("Command line: " << index_line);
+    run_res = system(index_line.c_str());
+    if (run_res != 0) {
+        ERROR("bwa index failed, cannot align reads");
+        return false;
+    }
+    return true;
+}
+
+
+bool BWAPairInfoFiller::RunBWA(const string& reads_file, const string& out_sam_file) const {
+    string run_command = bwa_path_ + " mem -t " + ToString(nthreads_) + " " + index_base_ + " "  + reads_file + "  > " + out_sam_file + " 2>"
+        + out_sam_file + ".txt";
+    INFO("Running bwa mem ...");
+    INFO("Command line: " << run_command);
+
+    int run_res = system(run_command.c_str());
+    if (run_res != 0) {
+        ERROR("bwa mem failed, cannot align reads");
+        return false;
+    }
+    return true;
+}
+
+bool BWAPairInfoFiller::AlignLib(const SequencingLibraryT& lib,
+                                 const string& sam_file_base,
+                                 vector<pair<string, string>>& resulting_sam_files) {
+
+    VERIFY_MSG(Init(), "BWA index was not constructed properly");
+    resulting_sam_files.clear();
+    size_t file_index = 0;
+    bool any_aligned = false;
+
+    for (auto iter = lib.paired_begin(); iter != lib.paired_end(); iter++) {
+        string left_reads = iter->first;
+        string left_sam = sam_file_base + "_1_" + ToString(file_index) + ".sam";
+        bool res = RunBWA(left_reads, left_sam);
+        if (!res) {
+            WARN("Failed to align left reads " << left_reads);
+            continue;
+        }
+        string right_reads = iter->second;
+        string right_sam = sam_file_base + "_2_" + ToString(file_index) + ".sam";
+        res = RunBWA(right_reads, right_sam);
+        if (!res) {
+            WARN("Failed to align right reads " << right_reads);
+            continue;
+        }
+
+        resulting_sam_files.push_back(make_pair(left_sam, right_sam));
+        any_aligned = true;
+    }
+    return any_aligned;
+}
+
+
+void BWAPairInfoFiller::ProcessSAMFiles(const string &left_sam, const string &right_sam,
+                                        BWAPairedReadProcessor& processor) {
+
+    //Left and right reads are stored in maps until pair is detected
+    unordered_map<string, MapperReadT> left_reads;
+    unordered_map<string, MapperReadT> right_reads;
+    size_t counter = 0;
+    //Check for duplicating read IDs
+    bool left_duplicated = false;
+    bool right_duplicated = false;
+
+    INFO("Reading SAM files " << left_sam << " and " << right_sam);
+    MappedSamStream lf(left_sam);
+    MappedSamStream rf(right_sam);
+    while (!lf.eof() || !rf.eof()) {
+        SingleSamRead left_read;
+        MapperReadT left_data;
+        string l_name = "";
+
+        SingleSamRead right_read;
+        MapperReadT right_data;
+        string r_name = "";
+
+        if (!lf.eof()) {
+            lf >> left_read;
+            l_name = left_read.name();
+            if (left_read.is_properly_aligned()) {
+                TRACE("Left read " << l_name);
+                left_data = MapperReadT(string(lf.get_contig_name(left_read.contig_id())),
+                                        left_read.pos(),
+                                        left_read.data_len(),
+                                        left_read.strand(),
+                                        left_read.cigar());
+            }
+            else if (!left_read.is_main_alignment()) {
+                //If not primary alignment ignore mapping
+                TRACE("Ignoring left read");
+                l_name = "";
+            }
+        }
+        if (!rf.eof()) {
+            rf >> right_read;
+            r_name = right_read.name();
+            if (right_read.is_properly_aligned()) {
+                TRACE("Right read " << r_name);
+                right_data = MapperReadT(string(rf.get_contig_name(right_read.contig_id())),
+                                         right_read.pos(),
+                                         right_read.data_len(),
+                                         right_read.strand(),
+                                         right_read.cigar());
+            }
+            else if (!right_read.is_main_alignment()) {
+                //If not primary alignment ignore mapping
+                TRACE("Ignoring right read");
+                r_name = "";
+            }
+        }
+
+        //Think about custom read names
+        if (l_name == r_name) {
+            TRACE("Equal processing");
+            //Process immideately if ids are equal in both SAM entries
+            processor.ProcessPairedRead(left_data, right_data);
+            VERBOSE_POWER2(++counter, "Processed " << counter << " paired reads");
+            continue;
+        }
+
+        if (r_name != "") {
+            auto it = left_reads.find(r_name);
+            if (it != left_reads.end())  {
+                //Right read's mate found in map
+                TRACE("Right read's mate found, processing");
+                processor.ProcessPairedRead(it->second, right_data);
+                VERBOSE_POWER2(++counter, "Processed " << counter << " paired reads");
+                //Remove mate as used
+                left_reads.erase(it);
+            }
+            else {
+                TRACE("Right read's mate not found, adding to map");
+                if (right_reads.count(r_name) == 0) {
+                    //Insert read without mate for further analysis
+                    //TODO inspect map size and performance
+                    right_reads.emplace(r_name, right_data);
+                } else {
+                    DEBUG("Right read " << r_name << " is duplicated!");
+                    //Report duplication
+                    right_duplicated = true;
+                }
+            }
+        }
+
+        if (l_name != "") {
+            auto it = right_reads.find(l_name);
+            if (it != right_reads.end()) {
+                //Left read's mate found in map
+                TRACE("Left read's mate found, processing");
+                processor.ProcessPairedRead(left_data, it->second);
+                VERBOSE_POWER2(++counter, "Processed " << counter << " paired reads");
+                //Remove mate as used
+                right_reads.erase(it);
+            }
+            else {
+                TRACE("Left read's mate not found, adding to map");
+                if (left_reads.count(l_name) == 0) {
+                    //Insert read without mate for further analysis
+                    //TODO inspect map size and performance
+                    left_reads.emplace(l_name, left_data);
+                } else {
+                    DEBUG("Left read " << r_name << " is duplicated!");
+                    //Report duplication
+                    left_duplicated = true;
+                }
+
+            }
+        }
+    }
+
+    if (left_duplicated)
+        WARN("SAM file " << left_sam << " contains duplicated read ids");
+    if (right_duplicated)
+        WARN("SAM file " << right_sam << " contains duplicated read ids");
+}
+
+bool BWAPairInfoFiller::Init() {
+    if (!index_constructed_) {
+        INFO("Initializing bwa pair info counter, working dir " << work_dir_);
+        path::make_dir(base_dir_);
+        work_dir_ = path::make_temp_dir(base_dir_, "");
+        index_base_= path::append_path(work_dir_, "long_edges.fasta");
+        INFO("Saving edges to " << index_base_);
+        OutputEdges(index_base_);
+        FillEdgeIdMap();
+        index_constructed_ = CreateIndex(index_base_);
+    }
+    return index_constructed_;
+}
+
+bool BWAPairInfoFiller::ProcessLib(size_t lib_index,
+                                   SequencingLibraryT& lib,
+                                   PairedInfoIndexT& paired_index,
+                                   size_t counter_edge_len,
+                                   size_t index_filler_edge_len) {
+    //Initialize if needed
+    Init();
+    string lib_dir =  path::append_path(work_dir_, ToString(lib_index));
+    path::make_dir(lib_dir);
+    vector<pair<string, string>> sam_files;
+    bool result = false;
+
+    INFO("Mapping lib #" << lib_index << " using BWA");
+    if (!AlignLib(lib, path::append_path(lib_dir, "single"), sam_files)) {
+        WARN("Failed to align lib #" << lib_index);
+        return false;
+    }
+
+    INFO("Estimating insert size for library #" << lib_index);
+    BWAISCounter counter(lib, edge_id_map_, g_, counter_edge_len);
+    for (const auto& sam_pair : sam_files) {
+        ProcessSAMFiles(sam_pair.first, sam_pair.second, counter);
+    }
+
+    if (!counter.RefineInsertSize(lib)) {
+        lib.data().mean_insert_size = 0.0;
+        WARN("Unable to estimate insert size paired library #" << lib_index);
+    }
+    else {
+        INFO("  Estimated insert size for paired library #" << lib_index);
+        INFO("  Insert size = " << lib.data().mean_insert_size <<
+            ", deviation = " << lib.data().insert_size_deviation <<
+            ", left quantile = " << lib.data().insert_size_left_quantile <<
+            ", right quantile = " << lib.data().insert_size_right_quantile <<
+            ", read length = " << lib.data().read_length);
+
+        INFO("Collecting paired information for library #" << lib_index);
+        paired_index.Init();
+
+        BWAIndexFiller filler(lib, edge_id_map_, g_, paired_index, index_filler_edge_len);
+        for (const auto& sam_pair : sam_files) {
+            ProcessSAMFiles(sam_pair.first, sam_pair.second, filler);
+        }
+        result = true;
+    }
+    if (remove_tmp_files_)
+        path::remove_dir(lib_dir);
+    return result;
+}
+
+
+}
diff --git a/src/debruijn/bwa_pair_info_filler.hpp b/src/debruijn/bwa_pair_info_filler.hpp
new file mode 100644
index 0000000..92eedeb
--- /dev/null
+++ b/src/debruijn/bwa_pair_info_filler.hpp
@@ -0,0 +1,254 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#include "standard.hpp"
+#include "debruijn_graph.hpp"
+#include "config_struct.hpp"
+
+#include <io/sam/sam_reader.hpp>
+#include <io/sam/read.hpp>
+
+#include <io/osequencestream.hpp>
+#include <de/paired_info.hpp>
+#include <de/insert_size_refiner.hpp>
+
+#ifndef PROJECT_BWA_PAIR_INFO_FILLER_HPP_H
+#define PROJECT_BWA_PAIR_INFO_FILLER_HPP_H
+
+namespace bwa_pair_info {
+
+using namespace sam_reader;
+using debruijn_graph::EdgeId;
+
+typedef omnigraph::de::UnclusteredPairedInfoIndexT<debruijn_graph::Graph> PairedInfoIndexT;
+typedef io::SequencingLibrary<debruijn_graph::debruijn_config::DataSetData> SequencingLibraryT;
+typedef std::pair<debruijn_graph::EdgeId, debruijn_graph::EdgeId> EdgePair;
+typedef unordered_map<size_t, debruijn_graph::EdgeId> EdgeIdMap;
+
+//More compact representation of aligned read for storing in map
+class MapperReadT {
+public:
+    MapperReadT(): contig_id_(""), pos_(-1), len_(-1), is_forward_(true),
+                   left_hard_clip_(0), right_hard_clip_(0), left_soft_clip_(0), right_soft_clip_(0){}
+
+    MapperReadT(const string& ctg_id, int32_t pos, int32_t len, bool is_forward, const string& cigar):
+        contig_id_(ctg_id), pos_(pos), len_(len), is_forward_(is_forward),
+        left_hard_clip_(0), right_hard_clip_(0), left_soft_clip_(0), right_soft_clip_(0) {
+
+        ParseCigar(cigar);
+    }
+
+    bool IsValid() const {
+        return contig_id_ != "";
+    }
+
+private:
+
+    void ParseCigar(const string& cigar);
+
+public:
+    const string &get_contig_id() const {
+        return contig_id_;
+    }
+    int32_t pos() const {
+        return pos_;
+    }
+    int32_t len() const {
+        return len_;
+    }
+    bool is_forward() const {
+        return is_forward_;
+    }
+    uint32_t left_soft_clip() const {
+        return left_soft_clip_;
+    }
+    uint32_t right_soft_clip() const {
+        return right_soft_clip_;
+    }
+    uint32_t left_hard_clip() const {
+        return left_hard_clip_;
+    }
+    uint32_t right_hard_clip() const {
+        return right_hard_clip_;
+    }
+
+private:
+    string contig_id_;
+    int32_t pos_;
+    int32_t len_;
+    bool is_forward_;
+    uint32_t left_hard_clip_:16, right_hard_clip_:16;
+    uint32_t left_soft_clip_:16, right_soft_clip_:16;
+};
+
+//Base class for aligned read processor (simple analog of SequenceMapperListener)
+class BWAPairedReadProcessor {
+public:
+    virtual void ProcessPairedRead(const MapperReadT& l, const MapperReadT& r) = 0;
+
+    virtual ~BWAPairedReadProcessor() {
+
+    }
+};
+
+//Class that corrects mapping positions according to lib orientation and clippings
+class BWACorrectingProcessor: public BWAPairedReadProcessor {
+protected:
+    const SequencingLibraryT& lib_;
+
+    const EdgeIdMap& edge_id_map_;
+
+    const debruijn_graph::Graph& g_;
+
+    size_t count_;
+
+public:
+
+    struct MappedPositionT {
+        EdgeId e;
+        int pos;
+
+        MappedPositionT(EdgeId e_, int pos_): e(e_), pos(pos_) {
+
+        }
+    };
+
+    BWACorrectingProcessor(const SequencingLibraryT& lib, const EdgeIdMap& edge_id_map, const debruijn_graph::Graph& g):
+        lib_(lib), edge_id_map_(edge_id_map), g_(g), count_(0) {
+    }
+
+    virtual bool CheckAlignments(const MappedPositionT& l, const MappedPositionT& r) = 0;
+
+    virtual void ProcessAlignments(const MappedPositionT& l, const MappedPositionT& r) = 0;
+//Correct read algnment according to orientation and clippings
+    virtual void ProcessPairedRead(const MapperReadT& l, const MapperReadT& r);
+};
+
+//Insert size counter
+class BWAISCounter: public BWACorrectingProcessor {
+private:
+    HistType hist_;
+    size_t min_contig_len_;
+    bool ignore_negative_;
+    size_t mapped_count_;
+    size_t negative_count_;
+
+public:
+    BWAISCounter(const SequencingLibraryT& lib, const EdgeIdMap& edge_id_map, const debruijn_graph::Graph& g,
+                 size_t min_contig_len, bool ignore_negative = false):
+        BWACorrectingProcessor(lib, edge_id_map, g), hist_(), min_contig_len_(min_contig_len),
+        ignore_negative_(ignore_negative), mapped_count_(0), negative_count_(0) {
+    }
+
+    bool CheckAlignments(const MappedPositionT& l, const MappedPositionT& r) override;
+
+    void ProcessAlignments(const MappedPositionT& l, const MappedPositionT& r) override;
+
+    bool RefineInsertSize(SequencingLibraryT& reads) const ;
+
+};
+
+//Pair info filler
+class BWAIndexFiller: public BWACorrectingProcessor {
+
+private:
+    PairedInfoIndexT& paired_index_;
+
+    size_t min_contig_len_;
+
+    EdgePair ConjugatePair(EdgePair ep) const;
+
+public:
+    BWAIndexFiller(const SequencingLibraryT& lib, const EdgeIdMap& edge_id_map, const debruijn_graph::Graph& g,
+                   PairedInfoIndexT& paired_index, size_t min_contig_len = 0):
+        BWACorrectingProcessor(lib, edge_id_map, g), paired_index_(paired_index), min_contig_len_(min_contig_len) {
+    }
+
+    bool CheckAlignments(const MappedPositionT& l, const MappedPositionT& r) override;
+
+    void ProcessAlignments(const MappedPositionT& l, const MappedPositionT& r) override;
+};
+
+//Class for running BWA, managing and parsing SAM files
+class BWAPairInfoFiller {
+public:
+    DECL_LOGGER("BWAPairInfo");
+
+private:
+    const debruijn_graph::Graph& g_;
+
+    string bwa_path_;
+
+    string base_dir_;
+
+    string work_dir_;
+
+    size_t nthreads_;
+
+    string index_base_;
+
+    bool index_constructed_;
+
+    bool remove_tmp_files_;
+
+    unordered_map<size_t, debruijn_graph::EdgeId> edge_id_map_;
+
+private:
+
+    //Save graph in fasta format
+    void OutputEdges(const string& filename) const;
+
+    //Construct int_id -> EdgeId map
+    void FillEdgeIdMap();
+
+    //Run bwa index
+    bool CreateIndex(const string& contigs);
+
+    //Initialize for read aligment (includes all above)
+    bool Init();
+
+    //Run bwa mem on single file
+    bool RunBWA(const string& reads_file, const string& out_sam_file) const;
+
+    //Process single read library
+    bool AlignLib(const SequencingLibraryT& lib,
+                      const string& sam_file_base,
+                      vector<pair<string, string>>& resulting_sam_files);
+
+    //Parse a pair of same files and analyze alignments with processor
+    void ProcessSAMFiles(const string &left_sam, const string &right_sam,
+                         BWAPairedReadProcessor& processor);
+
+public:
+
+    BWAPairInfoFiller(const debruijn_graph::Graph& g,
+                      const string& bwa_path,
+                      const string& work_dir,
+                      size_t nthreads = 1,
+                      bool remove_tmp = true):
+        g_(g), bwa_path_(bwa_path), base_dir_(work_dir), work_dir_(""),
+        nthreads_(nthreads), index_base_(""), index_constructed_(false),
+        remove_tmp_files_(remove_tmp),
+        edge_id_map_() {
+    }
+
+    ~BWAPairInfoFiller() {
+        if (remove_tmp_files_)
+            path::remove_if_exists(work_dir_);
+    }
+
+    //Count IS and fill pair info index for the given lib
+    bool ProcessLib(size_t lib_index,
+                    SequencingLibraryT& lib,
+                    PairedInfoIndexT& paired_index,
+                    size_t counter_edge_len,
+                    size_t index_filler_edge_len);
+};
+
+}
+
+#endif //PROJECT_BWA_PAIR_INFO_FILLER_HPP_H
diff --git a/src/debruijn/config_struct.cpp b/src/debruijn/config_struct.cpp
index 4066584..b5b8fbd 100644
--- a/src/debruijn/config_struct.cpp
+++ b/src/debruijn/config_struct.cpp
@@ -37,6 +37,7 @@ struct convert<io::SequencingLibrary<debruijn_graph::debruijn_config::DataSetDat
       node["insert size distribution"]   = data.insert_size_distribution;
       node["average coverage"]           = data.average_coverage;
       node["pi threshold"]               = data.pi_threshold;
+      node["binary converted"]           = data.binary_coverted;
       node["single reads mapped"]        = data.single_reads_mapped;
 
       return node;
@@ -60,6 +61,7 @@ struct convert<io::SequencingLibrary<debruijn_graph::debruijn_config::DataSetDat
 
       data.average_coverage           = node["average coverage"].as<double>(0.0);
       data.pi_threshold               = node["pi threshold"].as<double>(0.0);
+      data.binary_coverted            = node["binary converted"].as<bool>(false);
       data.single_reads_mapped        = node["single reads mapped"].as<bool>(false);
 
       return true;
@@ -108,12 +110,12 @@ void load_lib_data(const std::string& prefix) {
   cfg::get_writable().ds.reads.load(prefix + ".lib_data");
 
   // Now, infer the common parameters
-  const auto& reads = cfg::get().ds.reads;
   size_t max_rl = 0;
-  double avg_cov = 0.0, avg_rl;
-  for (auto it = reads.library_begin(), et = reads.library_end(); it != et; ++it) {
-      auto const& data = it->data();
-      if (it->is_graph_contructable())
+  double avg_cov = 0.0;
+  double avg_rl = 0.0;
+  for (const auto& lib : cfg::get().ds.reads.libraries()) {
+      auto const& data = lib.data();
+      if (lib.is_graph_contructable())
           max_rl = std::max(max_rl, data.read_length);
       if (data.average_coverage > 0)
           avg_cov = data.average_coverage;
@@ -194,17 +196,23 @@ inline void load(estimation_mode& est_mode,
 }
 
 void load(debruijn_config::simplification::bulge_remover& br,
-          boost::property_tree::ptree const& pt, bool /*complete*/) {
+          boost::property_tree::ptree const& pt, bool complete) {
   using config_common::load;
 
-  load(br.enabled                           , pt,   "enabled"					);
-  load(br.max_bulge_length_coefficient		, pt,   "max_bulge_length_coefficient");
+  load(br.enabled                           , pt,   "enabled"					  , complete);
+  load(br.main_iteration_only               , pt,   "main_iteration_only"	      , complete);
+  load(br.max_bulge_length_coefficient		, pt,   "max_bulge_length_coefficient", complete);
   load(br.max_additive_length_coefficient	, pt,
-       "max_additive_length_coefficient");
-  load(br.max_coverage,                     pt,     "max_coverage");
-  load(br.max_relative_coverage,            pt,     "max_relative_coverage");
-  load(br.max_delta,                        pt,     "max_delta");
-  load(br.max_relative_delta,               pt,     "max_relative_delta");
+       "max_additive_length_coefficient", complete);
+  load(br.max_coverage,                     pt,     "max_coverage", complete);
+  load(br.max_relative_coverage,            pt,     "max_relative_coverage", complete);
+  load(br.max_delta,                        pt,     "max_delta", complete);
+  load(br.max_relative_delta,               pt,     "max_relative_delta", complete);
+  load(br.max_number_edges,                 pt,     "max_number_edges", complete);
+  load(br.parallel,                         pt,     "parallel", complete);
+  load(br.buff_size,                        pt,     "buff_size", complete);
+  load(br.buff_cov_diff,                    pt,     "buff_cov_diff", complete);
+  load(br.buff_cov_rel_diff,                pt,     "buff_cov_rel_diff", complete);
 }
 
 void load(debruijn_config::simplification::topology_tip_clipper& ttc,
@@ -221,6 +229,13 @@ void load(debruijn_config::simplification::complex_tip_clipper& ctc,
   load(ctc.enabled, pt, "enabled");
 }
 
+void load(debruijn_config::simplification::relative_coverage_edge_disconnector& relative_ed,
+        boost::property_tree::ptree const& pt, bool complete) {
+  using config_common::load;
+  load(relative_ed.enabled, pt, "enabled", complete);
+  load(relative_ed.diff_mult, pt, "diff_mult", complete);
+}
+
 void load(debruijn_config::simplification::relative_coverage_comp_remover& rcc,
           boost::property_tree::ptree const& pt, bool complete) {
   using config_common::load;
@@ -234,12 +249,24 @@ void load(debruijn_config::simplification::relative_coverage_comp_remover& rcc,
 }
 
 void load(debruijn_config::simplification::isolated_edges_remover& ier,
-          boost::property_tree::ptree const& pt, bool /*complete*/) {
+          boost::property_tree::ptree const& pt, bool complete) {
   using config_common::load;
+  load(ier.enabled, pt, "enabled", complete);
+  load(ier.max_length, pt, "max_length", complete);
+  load(ier.max_coverage, pt, "max_coverage", complete);
+  load(ier.max_length_any_cov, pt, "max_length_any_cov", complete);
+}
 
-  load(ier.max_length, pt, "max_length");
-  load(ier.max_coverage, pt, "max_coverage");
-  load(ier.max_length_any_cov, pt, "max_length_any_cov");
+void load(debruijn_config::simplification::init_cleaning& init_clean,
+          boost::property_tree::ptree const& pt, bool complete) {
+  using config_common::load;
+  load(init_clean.self_conj_condition, pt, "self_conj_condition", complete);
+  load(init_clean.early_it_only, pt, "early_it_only", complete);
+  load(init_clean.activation_cov, pt, "activation_cov", complete);
+  load(init_clean.ier, pt, "ier", complete);
+  load(init_clean.tip_condition, pt, "tip_condition", complete);
+  load(init_clean.ec_condition, pt, "ec_condition", complete);
+  load(init_clean.disconnect_flank_cov, pt, "disconnect_flank_cov", complete);
 }
 
 void load(debruijn_config::simplification::complex_bulge_remover& cbr,
@@ -353,6 +380,14 @@ void load(debruijn_config::truseq_analysis& tsa,
   load(tsa.genome_file, pt, "genome_file");
 }
 
+void load(debruijn_config::bwa_aligner& bwa,
+          boost::property_tree::ptree const& pt, bool /*complete*/) {
+    using config_common::load;
+    load(bwa.enabled, pt, "enabled");
+    load(bwa.debug, pt, "debug");
+    load(bwa.path_to_bwa, pt, "path_to_bwa");
+    load(bwa.min_contig_len, pt, "min_contig_len");
+}
 
 void load(debruijn_config::pacbio_processor& pb,
           boost::property_tree::ptree const& pt, bool /*complete*/) {
@@ -367,6 +402,8 @@ void load(debruijn_config::pacbio_processor& pb,
   load(pb.long_seq_limit, pt, "long_seq_limit");
   load(pb.pacbio_min_gap_quantity, pt, "pacbio_min_gap_quantity");
   load(pb.contigs_min_gap_quantity, pt, "contigs_min_gap_quantity");
+  load(pb.max_contigs_gap_length, pt, "max_contigs_gap_length");
+
 }
 
 
@@ -441,7 +478,7 @@ void load_reads(debruijn_config::dataset& ds,
 void load_reference_genome(debruijn_config::dataset& ds,
                            std::string input_dir) {
   if (ds.reference_genome_filename == "") {
-    ds.reference_genome = Sequence();
+    ds.reference_genome = "";
     return;
   }
   if (ds.reference_genome_filename[0] != '/')
@@ -450,19 +487,7 @@ void load_reference_genome(debruijn_config::dataset& ds,
   io::FileReadStream genome_stream(ds.reference_genome_filename);
   io::SingleRead genome;
   genome_stream >> genome;
-  VERIFY(genome.IsValid());
-  ds.reference_genome = genome.sequence();
-}
-
-void load(debruijn_config::simplification::presimplification& presimp,
-          boost::property_tree::ptree const& pt, bool complete) {
-  using config_common::load;
-
-  load(presimp.enabled, pt, "enabled", complete);
-  load(presimp.parallel, pt, "parallel", complete);
-  load(presimp.tip_condition, pt, "tip_condition", complete); // pre tip clipper:
-  load(presimp.ec_condition, pt, "ec_condition", complete); // pre ec remover:
-  load(presimp.ier, pt, "ier", complete);
+  ds.reference_genome = genome.GetSequenceString();
 }
 
 void load(debruijn_config::simplification& simp,
@@ -477,6 +502,7 @@ void load(debruijn_config::simplification& simp,
   load(simp.br, pt, "br", complete); // bulge remover:
   load(simp.ec, pt, "ec", complete); // erroneous connections remover:
   load(simp.rcc, pt, "rcc", complete); // relative coverage component remover:
+  load(simp.relative_ed, pt, "relative_ed", complete); // relative edge disconnector:
   load(simp.tec, pt, "tec", complete); // topology aware erroneous connections remover:
   load(simp.trec, pt, "trec", complete); // topology and reliability based erroneous connections remover:
   load(simp.isec, pt, "isec", complete); // interstrand erroneous connections remover (thorn remover):
@@ -484,24 +510,18 @@ void load(debruijn_config::simplification& simp,
   load(simp.ier, pt, "ier", complete); // isolated edges remover
   load(simp.cbr, pt, "cbr", complete); // complex bulge remover
   load(simp.her, pt, "her", complete); // hidden ec remover
-  load(simp.fast_features, pt, "fast_features", complete); // master switch for speed-up tricks
-  load(simp.fast_activation_cov, pt, "fast_activation_cov", complete);
-  load(simp.presimp, pt, "presimp", complete); // presimplification
-  load(simp.persistent_cycle_iterators, pt, "persistent_cycle_iterators", complete);
-  load(simp.disable_br_in_cycle, pt, "disable_br_in_cycle", complete);
-//  load(simp.stats_mode, pt, "stats_mode", complete); // temporary stats counting mode
-
-  simp.final_tc = simp.tc; // final tip clipper:
-  load(simp.final_tc, pt, "final_tc", false);
-  //final bulge removers:
-  simp.final_br = simp.br; // final bulge remover:
-  load(simp.final_br, pt, "final_br", false);
+  load(simp.init_clean, pt, "init_clean", complete); // presimplification
+  load(simp.final_tc, pt, "final_tc", complete);
+  load(simp.final_br, pt, "final_br", complete);
+  simp.second_final_br = simp.final_br; 
+  load(simp.second_final_br, pt, "second_final_br", false);
 }
 
 void load(debruijn_config::info_printer& printer,
           boost::property_tree::ptree const& pt, bool complete) {
   using config_common::load;
-  load(printer.print_stats, pt, "print_stats", complete);
+  load(printer.basic_stats, pt, "basic_stats", complete);
+  load(printer.extended_stats, pt, "extended_stats", complete);
   load(printer.write_components, pt, "write_components", complete);
   load(printer.components_for_kmer, pt, "components_for_kmer", complete);
   load(printer.components_for_genome_pos, pt, "components_for_genome_pos",
@@ -516,17 +536,17 @@ void load(debruijn_config::info_printer& printer,
   load(printer.write_error_loc, pt, "write_error_loc", complete);
 }
 
-void clear(debruijn_config::info_printer& printer) {
-    printer.print_stats = false;
-    printer.write_components = false;
-    printer.components_for_kmer = "";
-    printer.components_for_genome_pos = "";
-    printer.write_components_along_genome = false;
-    printer.save_full_graph = false;
-    printer.write_full_graph = false;
-    printer.write_full_nc_graph = false;
-    printer.write_error_loc = false;
-}
+//void clear(debruijn_config::info_printer& printer) {
+//    printer.print_stats = false;
+//    printer.write_components = false;
+//    printer.components_for_kmer = "";
+//    printer.components_for_genome_pos = "";
+//    printer.write_components_along_genome = false;
+//    printer.save_full_graph = false;
+//    printer.write_full_graph = false;
+//    printer.write_full_nc_graph = false;
+//    printer.write_error_loc = false;
+//}
 
 
 void load(debruijn_config::info_printers_t& printers,
@@ -632,6 +652,7 @@ void load(debruijn_config& cfg, boost::property_tree::ptree const& pt,
 
   load(cfg.rr_enable, pt, "rr_enable");
   load(cfg.two_step_rr, pt, "two_step_rr");
+  load(cfg.use_intermediate_contigs, pt, "use_intermediate_contigs");
   load(cfg.single_reads_rr, pt, "single_reads_rr");
   cfg.use_single_reads = false;
 
@@ -692,14 +713,24 @@ void load(debruijn_config& cfg, boost::property_tree::ptree const& pt,
   else {
       load(cfg.de, pt, (cfg.ds.single_cell ? "old_sc_de" : "old_usual_de"));
   }
-  cfg.pe_params.name = "multicell";
-  if (cfg.ds.meta)
-    cfg.pe_params.name = "meta";
-  else if (cfg.ds.single_cell)
-    cfg.pe_params.name = "singlecell";
-  else if (cfg.ds.moleculo)
-    cfg.pe_params.name = "moleculo";
-  load(cfg.pe_params, pt, "path_extend_params");
+
+  load(cfg.pe_params, pt, "default_pe");
+  if (cfg.ds.single_cell) {
+      VERIFY(pt.count("sc_pe"));
+      load(cfg.pe_params, pt, "sc_pe", false);
+  }
+  if (cfg.ds.meta) {
+      VERIFY(pt.count("meta_pe"));
+      load(cfg.pe_params, pt, "meta_pe", false);
+  }
+  if (cfg.ds.moleculo) {
+      VERIFY(pt.count("moleculo_pe"));
+      load(cfg.pe_params, pt, "moleculo_pe", false);
+  }
+
+  cfg.prelim_pe_params = cfg.pe_params;
+  VERIFY(pt.count("prelim_pe"));
+  load(cfg.prelim_pe_params, pt, "prelim_pe", false);
 
   if (!cfg.developer_mode) {
       cfg.pe_params.debug_output = false;
@@ -723,14 +754,14 @@ void load(debruijn_config& cfg, boost::property_tree::ptree const& pt,
   load(cfg.con, pt, "construction");
   load(cfg.sensitive_map, pt, "sensitive_mapper");
   load(cfg.flanking_range, pt, "flanking_range");
+  if (cfg.ds.meta) {
+    INFO("Flanking range overwritten to 30 for meta mode");
+    cfg.flanking_range = 30;
+  }
 
   load(cfg.info_printers, pt, "info_printers");
-  if (!cfg.developer_mode) {
-      for (auto iter = cfg.info_printers.begin(); iter != cfg.info_printers.end(); ++iter) {
-          clear(iter->second);
-      }
-  }
   load_reads(cfg.ds, cfg.input_dir);
+
   load_reference_genome(cfg.ds, cfg.input_dir);
 
   cfg.need_mapping = cfg.developer_mode || cfg.correct_mismatches 
@@ -746,6 +777,7 @@ void load(debruijn_config& cfg, boost::property_tree::ptree const& pt,
 
   if (cfg.ds.moleculo)
     load(cfg.simp, pt, "moleculo", false);
+
   if (cfg.diploid_mode)
     load(cfg.simp, pt, "diploid_simp", false);
 
@@ -754,6 +786,7 @@ void load(debruijn_config& cfg, boost::property_tree::ptree const& pt,
 
   cfg.preliminary_simp = cfg.simp;
   load(cfg.preliminary_simp, pt, "preliminary", false);
+  load(cfg.bwa, pt, "bwa_aligner", false);
 }
 
 void load(debruijn_config& cfg, const std::string &filename) {
diff --git a/src/debruijn/config_struct.hpp b/src/debruijn/config_struct.hpp
index 8752280..7a8d7b3 100644
--- a/src/debruijn/config_struct.hpp
+++ b/src/debruijn/config_struct.hpp
@@ -46,14 +46,7 @@ enum info_printer_pos {
     ipp_default = 0,
     ipp_before_first_gap_closer,
     ipp_before_simplification,
-    ipp_tip_clipping,
-    ipp_bulge_removal,
-    ipp_err_con_removal,
     ipp_before_post_simplification,
-    ipp_final_err_con_removal,
-    ipp_final_tip_clipping,
-    ipp_final_bulge_removal,
-    ipp_removing_isolated_edges,
     ipp_final_simplified,
     ipp_final_gap_closed,
     ipp_before_repeat_resolution,
@@ -65,11 +58,8 @@ namespace details {
 
 inline const char* info_printer_pos_name(size_t pos) {
     const char* names[] = { "default", "before_first_gap_closer",
-                            "before_simplification", "tip_clipping", "bulge_removal",
-                            "err_con_removal", "before_post_simplification",
-                            "final_err_con_removal", "final_tip_clipping",
-                            "final_bulge_removal", "removing_isolated_edges",
-                            "final_simplified","final_gap_closed", "before_repeat_resolution" };
+                            "before_simplification", "before_post_simplification",
+                            "final_simplified", "final_gap_closed", "before_repeat_resolution" };
 
     utils::check_array_size < ipp_total > (names);
     return names[pos];
@@ -213,6 +203,8 @@ struct debruijn_config {
     struct simplification {
         struct tip_clipper {
             std::string condition;
+            tip_clipper() {}
+            tip_clipper(std::string condition_) : condition(condition_) {}
         };
 
         struct topology_tip_clipper {
@@ -227,16 +219,24 @@ struct debruijn_config {
 
         struct bulge_remover {
             bool enabled;
+            bool main_iteration_only;
             double max_bulge_length_coefficient;
             size_t max_additive_length_coefficient;
             double max_coverage;
             double max_relative_coverage;
             size_t max_delta;
             double max_relative_delta;
+            size_t max_number_edges;
+            bool parallel;
+            size_t buff_size;
+            double buff_cov_diff;
+            double buff_cov_rel_diff;
         };
 
         struct erroneous_connections_remover {
             std::string condition;
+            erroneous_connections_remover() {}
+            erroneous_connections_remover(std::string condition_) : condition(condition_) {}
         };
 
         struct relative_coverage_ec_remover {
@@ -271,6 +271,7 @@ struct debruijn_config {
         };
 
         struct isolated_edges_remover {
+            bool enabled;
             size_t max_length;
             double max_coverage;
             size_t max_length_any_cov;
@@ -289,6 +290,11 @@ struct debruijn_config {
             double relative_threshold;
         };
 
+        struct relative_coverage_edge_disconnector {
+            bool enabled;
+            double diff_mult;
+        };
+
         struct relative_coverage_comp_remover {
             bool enabled;
             double coverage_gap;
@@ -299,12 +305,15 @@ struct debruijn_config {
             size_t vertex_count_limit;
         };
 
-        struct presimplification {
-            bool enabled;
-            bool parallel;
+        struct init_cleaning {
+            std::string self_conj_condition;
+
+            bool early_it_only;
+            double activation_cov;
             isolated_edges_remover ier;
             std::string tip_condition;
             std::string ec_condition;
+            double disconnect_flank_cov;
         };
 
         size_t cycle_iter_count;
@@ -316,6 +325,7 @@ struct debruijn_config {
         bulge_remover br;
         erroneous_connections_remover ec;
         relative_coverage_comp_remover rcc;
+        relative_coverage_edge_disconnector relative_ed;
         topology_based_ec_remover tec;
         tr_based_ec_remover trec;
         interstrand_ec_remover isec;
@@ -323,16 +333,12 @@ struct debruijn_config {
         isolated_edges_remover ier;
         complex_bulge_remover cbr;
         hidden_ec_remover her;
-        //bool stats_mode;
 
         tip_clipper final_tc;
         bulge_remover final_br;
+        bulge_remover second_final_br;
 
-        bool fast_features;
-        double fast_activation_cov;
-        presimplification presimp;
-        bool persistent_cycle_iterators;
-        bool disable_br_in_cycle;
+        init_cleaning init_clean;
     };
 
     struct construction {
@@ -395,6 +401,7 @@ struct debruijn_config {
       size_t long_seq_limit; //400
       size_t pacbio_min_gap_quantity; //2
       size_t contigs_min_gap_quantity; //1
+      size_t max_contigs_gap_length; // 10000
     };
 
     struct DataSetData {
@@ -408,6 +415,7 @@ struct debruijn_config {
         double insert_size_mad;
         std::map<int, size_t> insert_size_distribution;
 
+        bool binary_coverted;
         bool single_reads_mapped;
 
         uint64_t total_nucls;
@@ -425,6 +433,7 @@ struct debruijn_config {
                 insert_size_right_quantile(0.0),
                 median_insert_size(0.0),
                 insert_size_mad(0.0),
+                binary_coverted(false),
                 single_reads_mapped(false),
                 total_nucls(0),
                 average_coverage(0.0),
@@ -466,7 +475,7 @@ struct debruijn_config {
         std::string reference_genome_filename;
         std::string reads_filename;
 
-        Sequence reference_genome;
+        std::string reference_genome;
 
         dataset(): max_read_length(0), average_coverage(0.0) {
         }
@@ -490,7 +499,8 @@ struct debruijn_config {
     };
 
     struct info_printer {
-        bool print_stats;
+        bool basic_stats;
+        bool extended_stats;
         bool write_components;
         std::string components_for_kmer;
         std::string components_for_genome_pos;
@@ -515,6 +525,13 @@ struct debruijn_config {
         bool use_coverage_threshold;
     };
 
+    struct bwa_aligner {
+        bool enabled;
+        bool debug;
+        std::string path_to_bwa;
+        size_t min_contig_len;
+    };
+
     typedef std::map<info_printer_pos, info_printer> info_printers_t;
 
     std::string dataset_file;
@@ -557,6 +574,7 @@ struct debruijn_config {
 
     bool rr_enable;
     bool two_step_rr;
+    bool use_intermediate_contigs;
 
     single_read_resolving_mode single_reads_rr;
     bool use_single_reads;
@@ -589,6 +607,7 @@ struct debruijn_config {
 
     resolving_mode rm;
     path_extend::pe_config::MainPEParamsT pe_params;
+    path_extend::pe_config::MainPEParamsT prelim_pe_params;
     bool avoid_rc_connections;
 
     construction con;
@@ -604,6 +623,7 @@ struct debruijn_config {
     graph_read_corr_cfg graph_read_corr;
     info_printers_t info_printers;
     kmer_coverage_model kcm;
+    bwa_aligner bwa;
 
     size_t flanking_range;
 
diff --git a/src/debruijn/construction.cpp b/src/debruijn/construction.cpp
index 61a0887..545acb9 100644
--- a/src/debruijn/construction.cpp
+++ b/src/debruijn/construction.cpp
@@ -51,8 +51,8 @@ void Construction::run(conj_graph_pack &gp, const char*) {
         if (lib.type() != io::LibraryType::TrustedContigs)
             continue;
 
-        for (auto it = lib.single_begin(); it != lib.single_end(); ++it) {
-            trusted_contigs.push_back(io::EasyStream(*it, true));
+        for (const auto& read : lib.single_reads()) {
+            trusted_contigs.push_back(io::EasyStream(read, true));
             trusted_contigs_exist = true;
         }
     }
diff --git a/src/debruijn/dataset_readers.hpp b/src/debruijn/dataset_readers.hpp
index 077ccbb..2d69b6e 100644
--- a/src/debruijn/dataset_readers.hpp
+++ b/src/debruijn/dataset_readers.hpp
@@ -24,8 +24,8 @@ io::PairedStreamPtr paired_easy_reader(const io::SequencingLibrary<debruijn_conf
                                        bool use_orientation = true,
                                        io::OffsetType offset_type = io::PhredOffset) {
   io::ReadStreamList<io::PairedRead> streams;
-  for (auto it = lib.paired_begin(); it != lib.paired_end(); ++it) {
-      streams.push_back(io::PairedEasyStream(it->first, it->second, followed_by_rc, insert_size, change_read_order,
+  for (auto read_pair : lib.paired_reads()) {
+      streams.push_back(io::PairedEasyStream(read_pair.first, read_pair.second, followed_by_rc, insert_size, change_read_order,
                                              use_orientation, lib.orientation(), offset_type));
   }
   return io::MultifileWrap<io::PairedRead>(streams);
@@ -39,13 +39,13 @@ io::ReadStreamList<io::SingleRead> single_easy_readers(const io::SequencingLibra
                                        io::OffsetType offset_type = io::PhredOffset) {
   io::ReadStreamList<io::SingleRead> streams;
   if (including_paired_reads) {
-    for (auto it = lib.reads_begin(); it != lib.reads_end(); ++it) {
+    for (const auto& read : lib.reads()) {
       //do we need input_file function here?
-      streams.push_back(io::EasyStream(*it, followed_by_rc, handle_Ns, offset_type));
+      streams.push_back(io::EasyStream(read, followed_by_rc, handle_Ns, offset_type));
     }
   } else {
-    for (auto it = lib.single_begin(); it != lib.single_end(); ++it) {
-      streams.push_back(io::EasyStream(*it, followed_by_rc, handle_Ns, offset_type));
+    for (const auto& read : lib.single_reads()) {
+      streams.push_back(io::EasyStream(read, followed_by_rc, handle_Ns, offset_type));
     }
   }
   return streams;
diff --git a/src/debruijn/debruijn_data.hpp b/src/debruijn/debruijn_data.hpp
index f999530..abfa4d6 100644
--- a/src/debruijn/debruijn_data.hpp
+++ b/src/debruijn/debruijn_data.hpp
@@ -9,6 +9,7 @@
 #include <vector>
 #include <set>
 #include <cstring>
+#include "verify.hpp"
 #include "logger/logger.hpp"
 #include "sequence/sequence_tools.hpp"
 
@@ -106,7 +107,7 @@ public:
 
     const EdgeData MergeData(const std::vector<const EdgeData*>& to_merge, bool safe_merging = true) const;
 
-    std::pair<VertexData, std::pair<EdgeData, EdgeData>> SplitData(const EdgeData& edge, size_t position) const;
+    std::pair<VertexData, std::pair<EdgeData, EdgeData>> SplitData(const EdgeData& edge, size_t position, bool is_self_conj = false) const;
 
     EdgeData GlueData(const EdgeData&, const EdgeData& data2) const;
 
@@ -149,8 +150,16 @@ inline const DeBruijnEdgeData DeBruijnDataMaster::MergeData(const std::vector<co
     return EdgeData(MergeOverlappingSequences(ss, k_, safe_merging));
 }
 
-inline std::pair<DeBruijnVertexData, std::pair<DeBruijnEdgeData, DeBruijnEdgeData>> DeBruijnDataMaster::SplitData(const EdgeData& edge, size_t position) const {
-    return std::make_pair(VertexData(), std::make_pair(EdgeData(edge.nucls().Subseq(0, position + k_)), EdgeData(edge.nucls().Subseq(position))));
+inline std::pair<DeBruijnVertexData, std::pair<DeBruijnEdgeData, DeBruijnEdgeData>> DeBruijnDataMaster::SplitData(const EdgeData& edge,
+                                                                                                                  size_t position, 
+                                                                                                                  bool is_self_conj) const {
+    const Sequence& nucls = edge.nucls();
+    size_t end = nucls.size();
+    if (is_self_conj) {
+        VERIFY(position < end);
+        end -= position;
+    }
+    return std::make_pair(VertexData(), std::make_pair(EdgeData(edge.nucls().Subseq(0, position + k_)), EdgeData(nucls.Subseq(position, end))));
 }
 
 inline DeBruijnEdgeData DeBruijnDataMaster::GlueData(const DeBruijnEdgeData&, const DeBruijnEdgeData& data2) const {
diff --git a/src/debruijn/debruijn_stats.cpp b/src/debruijn/debruijn_stats.cpp
index a7e0c6e..96f2e67 100644
--- a/src/debruijn/debruijn_stats.cpp
+++ b/src/debruijn/debruijn_stats.cpp
@@ -1,525 +1,525 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-// FIXME: Refactor and turn into stage
-
-//todo rewrite with extended sequence mapper!
-template<class Graph, class Index>
-class EtalonPairedInfoCounter {
-	typedef typename Graph::EdgeId EdgeId;
-
-	const Graph& g_;
-	const Index& index_;
-	const KmerMapper<Graph>& kmer_mapper_;
-	size_t k_;
-
-	size_t insert_size_;
-	size_t read_length_;
-	int gap_;
-	size_t delta_;
-
-  void AddEtalonInfo(PairedInfoIndexT<Graph>& index, EdgeId e1, EdgeId e2, double d) {
-    index.AddPairInfo(e1, e2, d, 1000., 0.);
-	}
-
-  void ProcessSequence(const Sequence& sequence, PairedInfoIndexT<Graph>& index)
-  {
-		int mod_gap = (gap_ + (int) k_ > (int) delta_ ) ? gap_ - (int) delta_ : 0 - (int) k_;
-		runtime_k::RtSeq left(k_ +1, sequence);
-		left >>= 0;
-		for (size_t left_idx = 0;
-             left_idx + 2 * (k_ + 1) + mod_gap <= sequence.size();
-             ++left_idx) {
-			left <<= sequence[left_idx + k_];
-			runtime_k::RtSeq left_upd = kmer_mapper_.Substitute(left);
-			if (!index_.contains(left_upd)) {
-				continue;
-			}
-			pair<EdgeId, size_t> left_pos = index_.get(left_upd);
-
-			size_t right_idx = left_idx + k_ + 1 + mod_gap;
-			runtime_k::RtSeq right(k_ + 1, sequence, right_idx);
-			right >>= 0;
-			for (;
-			     right_idx + k_ + 1 <= left_idx + insert_size_ + delta_ && right_idx + k_ + 1 <= sequence.size();
-			     ++right_idx) {
-				right <<= sequence[right_idx + k_];
-				runtime_k::RtSeq right_upd = kmer_mapper_.Substitute(right);
-				if (!index_.contains(right_upd)) {
-					continue;
-				}
-				pair<EdgeId, size_t> right_pos = index_.get(right_upd);
-
-				AddEtalonInfo(index, left_pos.first, right_pos.first,
-				              0. + (double) right_idx - (double) left_idx +
-				              (double) left_pos.second - (double) right_pos.second);
-			}
-		}
-	}
-
-public:
-    EtalonPairedInfoCounter(const Graph& g, const Index& index,
-                            const KmerMapper<Graph>& kmer_mapper,
-                            size_t insert_size, size_t read_length,
-                            size_t delta, size_t k)
-            : g_(g),
-              index_(index),
-              kmer_mapper_(kmer_mapper),
-              k_(k),
-              insert_size_(insert_size),
-              read_length_(read_length),
-              gap_((int) (insert_size_ - 2 * read_length_)),
-              delta_(delta) {
-//		VERIFY(insert_size_ >= 2 * read_length_);
-    }
-
-    void FillEtalonPairedInfo(const Sequence& genome,
-                              omnigraph::de::PairedInfoIndexT<Graph>& paired_info) {
-        ProcessSequence(genome, paired_info);
-        ProcessSequence(!genome, paired_info);
-    }
-};
-
-template<class Graph>
-void GetAllDistances(const PairedInfoIndexT<Graph>& paired_index,
-                     PairedInfoIndexT<Graph>& result,
-                     const GraphDistanceFinder<Graph>& dist_finder) {
-    for (auto iter = paired_index.begin(); iter != paired_index.end(); ++iter) {
-        EdgeId e1 = iter.first();
-        EdgeId e2 = iter.second();
-        vector<size_t> forward = dist_finder.GetGraphDistancesLengths(e1, e2);
-        for (size_t i = 0; i < forward.size(); ++i)
-            result.AddPairInfo(e1, e2, (double) forward[i], -10.0, 0.0, false);
-    }
-}
-
-template<class Graph>
-void GetAllDistances(const Graph& g,
-                     const PairedInfoIndexT<Graph>& paired_index,
-                     const PairedInfoIndexT<Graph>& clustered_index,
-                     const GraphDistanceFinder<Graph>& dist_finder,
-                     PairedInfoIndexT<Graph>& result)
-{
-    typedef typename Graph::EdgeId EdgeId;
-    typedef vector<EdgeId> Path;
-    for (auto iter = paired_index.begin(); iter != paired_index.end(); ++iter) {
-        EdgeId first = iter.first();
-        EdgeId second = iter.second();
-        const vector<Path>& raw_paths = dist_finder.GetGraphDistances(first, second);
-        // adding first edge to every path
-        vector<Path> paths;
-        for (size_t i = 0; i < raw_paths.size(); ++i) {
-            Path path;
-            path.push_back(first);
-            for (size_t j = 0; j < raw_paths[i].size(); ++j)
-                path.push_back(raw_paths[i][j]);
-            path.push_back(second);
-
-            paths.push_back(path);
-        }
-        vector<size_t> path_lengths;
-        vector<double> path_weights;
-        for (size_t i = 0; i < paths.size(); ++i) {
-            size_t len_total = 0 ;
-            double weight_total = 0.;
-            for (size_t j = 0; j < paths[i].size(); ++j) {
-                len_total += g.length(paths[i][j]);
-                size_t cur_length = 0;
-                for (size_t l = j + 1; l < paths[i].size(); ++l) {
-                    cur_length += g.length(paths[i][l - 1]);
-                    const de::Histogram& infos = clustered_index.GetEdgePairInfo(paths[i][j], paths[i][l]);
-                    for (auto iterator = infos.begin(); iterator != infos.end(); ++iterator) {
-                        const Point& info = *iterator;
-                        if (info.d == cur_length) {
-                            weight_total += info.weight;
-                            break;
-                        }
-                    }
-                }
-            }
-            path_lengths.push_back(len_total - g.length(second));
-            path_weights.push_back(weight_total);
-        }
-
-        for (size_t i = 0; i < paths.size(); ++i) {
-            cout << first.int_id() << "(" << g.length(first) << ") "
-                 << second.int_id() << "(" << g.length(second) << ") : "
-                 << (i + 1) << "-th path (" << path_lengths[i] << ", " << path_weights[i] << ")   :::   ";
-            for (size_t j = 0; j < paths[i].size(); ++j) {
-                cout << paths[i][j].int_id() << "(" << g.length(paths[i][j]) << ") ";
-            }
-            cout << endl;
-        }
-    }
-}
-
-template<class Graph, class Index>
-void FillEtalonPairedIndex(PairedInfoIndexT<Graph>& etalon_paired_index,
-                           const Graph &g, const Index& index,
-                           const KmerMapper<Graph>& kmer_mapper, size_t is, size_t rs,
-                           size_t delta, const Sequence& genome, size_t k)
-{
-    VERIFY_MSG(genome.size() > 0,
-               "The genome seems not to be loaded, program will exit");
-    INFO((string) (FormattedString("Counting etalon paired info for genome of length=%i, k=%i, is=%i, rs=%i, delta=%i")
-                   << genome.size() << k << is << rs << delta));
-
-    EtalonPairedInfoCounter<Graph, Index> etalon_paired_info_counter(g, index, kmer_mapper, is, rs, delta, k);
-    etalon_paired_info_counter.FillEtalonPairedInfo(genome, etalon_paired_index);
-
-    DEBUG("Etalon paired info counted");
-}
-
-template<class Graph, class Index>
-void FillEtalonPairedIndex(PairedInfoIndexT<Graph>& etalon_paired_index,
-                           const Graph &g, const Index& index,
-                           const KmerMapper<Graph>& kmer_mapper, const Sequence& genome,
-                           const io::SequencingLibrary<debruijn_config::DataSetData> &lib,
-                           size_t k) {
-
-    FillEtalonPairedIndex(etalon_paired_index, g, index, kmer_mapper,
-                          size_t(lib.data().mean_insert_size), lib.data().read_length, size_t(lib.data().insert_size_deviation),
-                          genome, k);
-
-    //////////////////DEBUG
-    //	SimpleSequenceMapper<k + 1, Graph> simple_mapper(g, index);
-    //	Path<EdgeId> path = simple_mapper.MapSequence(genome);
-    //	SequenceBuilder sequence_builder;
-    //	sequence_builder.append(Seq<k>(g.EdgeNucls(path[0])));
-    //	for (auto it = path.begin(); it != path.end(); ++it) {
-    //		sequence_builder.append(g.EdgeNucls(*it).Subseq(k));
-    //	}
-    //	Sequence new_genome = sequence_builder.BuildSequence();
-    //	NewEtalonPairedInfoCounter<k, Graph> new_etalon_paired_info_counter(g, index,
-    //			insert_size, read_length, insert_size * 0.1);
-    //	PairedInfoIndexT<Graph> new_paired_info_index(g);
-    //	new_etalon_paired_info_counter.FillEtalonPairedInfo(new_genome, new_paired_info_index);
-    //	CheckInfoEquality(etalon_paired_index, new_paired_info_index);
-    //////////////////DEBUG
-    //	INFO("Etalon paired info counted");
-}
-
-template<class Graph>
-void CountPairedInfoStats(const Graph& g,
-                          const io::SequencingLibrary<debruijn_config::DataSetData> &lib,
-                          const PairedInfoIndexT<Graph>& paired_index,
-                          const PairedInfoIndexT<Graph>& etalon_index,
-                          const string& output_folder) {
-    PairedInfoIndexT<Graph> filtered_index = paired_index;
-//    PairInfoWeightFilter<Graph>(g, 40).Filter(filtered_index);
-    PairInfoFilter<Graph>(PairInfoWeightChecker<Graph>(g, 40)).Filter(filtered_index);
-    INFO("Counting paired info stats");
-    EdgePairStat<Graph>(g, paired_index, output_folder).Count();
-
-    //todo remove filtration if launch on etalon info is ok
-    UniquePathStat<Graph>(g, filtered_index,
-                          (size_t)math::round(lib.data().mean_insert_size),
-                          lib.data().read_length,
-                          0.1 * lib.data().mean_insert_size).Count();
-    UniqueDistanceStat<Graph>(etalon_index).Count();
-    INFO("Paired info stats counted");
-}
-
-// leave only those pairs, which edges have no path in the graph between them
-template<class Graph>
-void FilterIndexWithExistingPaths(PairedIndexT& scaf_clustered_index,
-                                  const PairedIndexT& index,
-                                  const conj_graph_pack &gp,
-                                  const GraphDistanceFinder<Graph>& dist_finder) {
-    for (auto it = index.begin(); it != index.end(); ++it) {
-        const de::Histogram& histogram = *it;
-        EdgeId e1 = it.first();
-        EdgeId e2 = it.second();
-        if (gp.g.OutgoingEdgeCount(gp.g.EdgeEnd(e1)) == 0 && gp.g.IncomingEdgeCount(gp.g.EdgeEnd(e1)) == 1 &&
-            gp.g.IncomingEdgeCount(gp.g.EdgeStart(e2)) == 0 && gp.g.OutgoingEdgeCount(gp.g.EdgeStart(e2)) == 1)     {
-            vector<size_t> dists = dist_finder.GetGraphDistancesLengths(e1, e2);
-            if (dists.size() == 0)
-                for (auto point_iter = histogram.begin(); point_iter != histogram.end(); ++point_iter)
-                    if (math::gr(point_iter->d, 0.)) {
-                        scaf_clustered_index.AddPairInfo(it.first(), it.second(),
-                                                         point_iter->d, point_iter->weight, 20.);
-                    }
-        }
-    }
-}
-
-inline
-void tSeparatedStats(conj_graph_pack& gp, const Sequence& contig,
-                     PairedInfoIndex<conj_graph_pack::graph_t> &ind,
-                     const io::SequencingLibrary<debruijn_config::DataSetData> &lib,
-                     size_t /*k*/) {
-    typedef omnigraph::de::PairInfo<EdgeId> PairInfo;
-
-    MappingPath<Graph::EdgeId> m_path1 = FindGenomeMappingPath(contig, gp.g,
-                                                               gp.index, gp.kmer_mapper);
-
-    map<Graph::EdgeId, vector<pair<int, int>>> inGenomeWay;
-    int CurI = 0;
-    int gaps = 0;
-    for (size_t i = 0; i < m_path1.size(); i++) {
-        bool new_edge_added = false;
-        EdgeId ei = m_path1[i].first;
-        MappingRange mr = m_path1[i].second;
-        int start = (int)(mr.initial_range.start_pos - mr.mapped_range.start_pos);
-        if (inGenomeWay.find(ei) == inGenomeWay.end()) {
-            vector<pair<int, int>> tmp;
-            tmp.push_back(make_pair(CurI, start));
-            inGenomeWay[ei] = tmp;
-            CurI++;
-            new_edge_added = true;
-            DEBUG("Edge " << gp.g.str(ei) << " num " << CurI << " pos " << start);
-        } else {
-            if (m_path1[i - 1].first == ei) {
-                if (abs(start - inGenomeWay[ei][(inGenomeWay[ei].size() - 1)].second) > 50) {
-                    inGenomeWay[ei].push_back(make_pair(CurI, start));
-                    CurI++;
-                    new_edge_added = true;
-                    DEBUG("Edge " << gp.g().str(ei) << " num " << CurI << " pos " << start);
-                }
-            } else {
-                inGenomeWay[ei].push_back(make_pair(CurI, start));
-                CurI++;
-                new_edge_added = true;
-                DEBUG("Edge " << gp.g.str(ei) << " num " << CurI << " pos " << start);
-            }
-        }
-        if (new_edge_added && (i > 0)) {
-            if (gp.g.EdgeStart(ei) != gp.g.EdgeEnd(m_path1[i - 1].first)) {
-                gaps++;
-            }
-        }
-    }
-    INFO("Totaly " << CurI << " edges in genome path, with " << gaps << "not adjacent conequences");
-
-    vector<int> stats(10);
-    vector<int> stats_d(10);
-    int PosInfo = 0;
-    int AllignedPI = 0;
-    int ExactDPI = 0;
-    int OurD = int(lib.data().mean_insert_size) - int(lib.data().read_length);
-    for (auto p_iter = ind.begin(), p_end_iter = ind.end();
-         p_iter != p_end_iter; ++p_iter) {
-        vector<PairInfo> pi = *p_iter;
-        for (size_t j = 0; j < pi.size(); j++) {
-            EdgeId left_edge = pi[j].first;
-            EdgeId right_edge = pi[j].second;
-            double d = pi[j].d();
-            if (d < 0.001)
-                continue;
-            int best_d = 100;
-            int best_t = 0;
-            PosInfo++;
-            DEBUG(
-                "PairInfo " << gp.g().str(left_edge) << " -- " << gp.g().str(right_edge) << " d " << d);
-            bool ExactOnD = false;
-            for (size_t left_i = 0; left_i < inGenomeWay[left_edge].size();
-                 left_i++)
-                for (size_t right_i = 0;
-                     right_i < inGenomeWay[right_edge].size(); right_i++) {
-                    if (best_d
-                        > abs(
-                            inGenomeWay[right_edge][right_i].second
-                            - inGenomeWay[left_edge][left_i].second
-                            - d)) {
-                        best_d = (int)math::round(abs(
-                            inGenomeWay[right_edge][right_i].second
-                            - inGenomeWay[left_edge][left_i].second
-                            - d));
-                        best_t = inGenomeWay[right_edge][right_i].first
-                                 - inGenomeWay[left_edge][left_i].first;
-                        DEBUG("best d " << best_d);
-                        if ((inGenomeWay[right_edge][right_i].second
-                             - inGenomeWay[left_edge][left_i].second
-                             - (int) gp.g.length(left_edge) <= OurD)
-                            && (inGenomeWay[right_edge][right_i].second
-                                - inGenomeWay[left_edge][left_i].second
-                                + (int) gp.g.length(right_edge) >= OurD))
-                            ExactOnD = true;
-                        else
-                            ExactOnD = false;
-                    }
-                }
-            if (best_t > 5)
-                best_t = 5;
-            if (best_d < 100) {
-                AllignedPI++;
-                stats[best_t]++;
-                if (ExactOnD) {
-                    stats_d[best_t]++;
-                    ExactDPI++;
-                }
-            }
-
-        }
-    }INFO(
-        "Total positive pair info " << PosInfo << " alligned to genome " << AllignedPI << " with exact distance " << ExactDPI);
-    INFO(
-        "t-separated stats Alligneg: 1 - " << stats[1] << " 2 - " << stats[2] << " 3 - " << stats[3] << " 4 - " << stats[4] << " >4 - " << stats[5]);
-    INFO(
-        "t-separated stats Exact: 1 - " << stats_d[1] << " 2 - " << stats_d[2] << " 3 - " << stats_d[3] << " 4 - " << stats_d[4] << " >4 - " << stats[5]);
-}
-
-template<class Graph>
-void CountAndSaveAllPaths(const Graph& g, const io::SequencingLibrary<debruijn_config::DataSetData> &lib,
-                          const PairedInfoIndexT<Graph>& paired_index, const PairedInfoIndexT<Graph>& /*clustered_index*/) {
-    PairedIndexT all_paths(g);
-    GetAllDistances<Graph>(paired_index,
-                           all_paths,
-                           GraphDistanceFinder<Graph>(g,
-                                                      size_t(lib.data().mean_insert_size),
-                                                      lib.data().read_length,
-                                                      size_t(lib.data().insert_size_deviation)));
-
-    std::string dir_name = cfg::get().output_dir + "estimation_qual/";
-    make_dir(dir_name);
-
-    graphio::ConjugateDataPrinter<Graph> printer(g);
-    printer.savePaired(dir_name + "paths", all_paths);
-
-    //PairedIndexT& all_paths_2(g);
-    //GetAllDistances<Graph>(g,
-    //paired_index, clustered_index,
-    //all_paths_2,
-    //GraphDistanceFinder<Graph>(g, *cfg::get().ds.IS, *cfg::get().ds.RL,
-    //size_t(*cfg::get().ds.is_var)));
-    //printer.savePaired(dir_name + "paths_all", all_paths_2);
-}
-
-void FillAndCorrectEtalonPairedInfo(PairedIndexT&  corrected_etalon_index,
-                                    const conj_graph_pack& gp,
-                                    const PairedIndexT&  paired_index, size_t insert_size,
-                                    size_t read_length, size_t delta,
-                                    bool save_etalon_info_history = false) {
-    INFO("Filling etalon paired index");
-    PairedIndexT etalon_index(gp.g);
-    bool successful_load = false;
-    if (cfg::get().entry_point >= ws_distance_estimation) {
-        string p = path::append_path(cfg::get().load_from, "../etalon");
-        if (!path::is_regular_file(p + ".prd")) {
-            DEBUG("file " << p + ".prd" << " does not exist");
-        }
-        else {
-            INFO("Loading etalon pair info from the previous run...");
-            Graph& graph = const_cast<Graph&>(gp.g);
-            graphio::ConjugateDataScanner<Graph> scanner(graph);
-            scanner.loadPaired(p, etalon_index);
-            path::files_t files;
-            files.push_back(p);
-            path::copy_files_by_prefix(files, cfg::get().output_dir);
-            successful_load = true;
-        }
-    }
-    if (!successful_load)
-        FillEtalonPairedIndex(etalon_index, gp.g,
-                              gp.index, gp.kmer_mapper, insert_size, read_length, delta,
-                              gp.genome, gp.k_value);
-    INFO("Etalon paired index filled");
-
-    INFO("Correction of etalon paired info has been started");
-
-    INFO("Filtering etalon info");
-    //leave only info between edges both present in paired_index
-    PairedIndexT filtered_etalon_index(gp.g);
-    for (auto iter = etalon_index.begin(); iter != etalon_index.end(); ++iter) {
-        const de::Histogram& histogram = *iter;
-        EdgeId first_edge = iter.first();
-        EdgeId second_edge = iter.second();
-        if (paired_index.GetEdgePairInfo(first_edge, second_edge).size() > 0) {
-            for (auto point = histogram.begin(); point != histogram.end(); ++point)
-                filtered_etalon_index.AddPairInfo(first_edge, second_edge, *point);
-        }
-        else
-            DEBUG("Filtering out pair_info " << gp.g.int_id(first_edge) << " "
-                  << gp.g.int_id(second_edge));
-    }
-
-    INFO("Pushing etalon info through estimator");
-    GraphDistanceFinder<Graph> dist_finder(gp.g, insert_size, read_length, delta);
-    DistanceEstimator<Graph> estimator(gp.g, filtered_etalon_index, dist_finder, 0., 4.);
-    estimator.Estimate(corrected_etalon_index);
-    if (save_etalon_info_history) {
-        INFO("Saving etalon paired info indices on different stages");
-        ConjugateDataPrinter<Graph> data_printer(gp.g);
-        data_printer.savePaired(cfg::get().output_dir + "etalon", etalon_index);
-        data_printer.savePaired(cfg::get().output_dir + "etalon_filtered_by_index",
-                                filtered_etalon_index);
-        data_printer.savePaired(cfg::get().output_dir + "etalon_corrected_by_graph",
-                                corrected_etalon_index);
-        INFO("Everything is saved");
-
-        if (cfg::get().paired_info_scaffolder) {
-            GraphDistanceFinder<Graph> dist_finder(gp.g, insert_size, read_length, delta);
-            INFO("Saving paired information statistics for a scaffolding");
-            PairedIndexT scaf_etalon_index(gp.g);
-            FilterIndexWithExistingPaths(scaf_etalon_index, etalon_index, gp, dist_finder);
-            data_printer.savePaired(
-                cfg::get().output_dir + "scaf_etalon",
-                scaf_etalon_index);
-            PairedIndexT scaf_filtered_etalon_index(gp.g);
-            FilterIndexWithExistingPaths(scaf_filtered_etalon_index, filtered_etalon_index, gp, dist_finder);
-            data_printer.savePaired(
-                cfg::get().output_dir + "scaf_etalon_filtered",
-                scaf_filtered_etalon_index);
-        }
-
-        INFO("Everything saved");
-    }
-    INFO("Correction finished");
-}
-
-void CountClusteredPairedInfoStats(const conj_graph_pack &gp,
-                                   const io::SequencingLibrary<debruijn_config::DataSetData> &lib,
-                                   const PairedInfoIndexT<Graph> &paired_index,
-                                   const PairedInfoIndexT<Graph> &clustered_index) {
-    PairedIndexT etalon_index(gp.g);
-
-    FillAndCorrectEtalonPairedInfo(etalon_index, gp, paired_index,
-                                 (size_t)math::round(lib.data().mean_insert_size),
-                                 lib.data().read_length,
-                                 (size_t)math::round(lib.data().insert_size_deviation), true);
-
-	CountAndSaveAllPaths(gp.g, lib, paired_index, clustered_index);
-
-	INFO("Counting clustered info stats");
-	EdgeQuality<Graph, Index> edge_qual(gp.g, gp.index, gp.kmer_mapper, gp.genome);
-  //EstimationQualityStat<Graph> estimation_stat(gp.g, edge_qual,
-                                              //paired_index, clustered_index, etalon_index);
-  //estimation_stat.Count();
-  //estimation_stat.SaveStats(cfg::get().output_dir + "estimation_qual/");
-
-	INFO("Counting overall cluster stat");
-	ClusterStat<Graph>(clustered_index).Count();
-	INFO("Overall cluster stat");
-
-  if (cfg::get().paired_info_scaffolder) {
-		ConjugateDataPrinter<Graph> data_printer(gp.g);
-    INFO("Generating the statistics of pair info for scaffolding");
-    PairedIndexT scaf_clustered_index(gp.g);
-    FilterIndexWithExistingPaths(scaf_clustered_index,
-                                 clustered_index, gp,
-                                 GraphDistanceFinder<Graph>(gp.g,
-                                         (size_t)math::round(lib.data().mean_insert_size),
-                                         lib.data().read_length,
-                                         (size_t)math::round(lib.data().insert_size_deviation)));
-    data_printer.savePaired(cfg::get().output_dir + "scaf_clustered",
-                            scaf_clustered_index);
-  }
-  //  PairedInfoIndexT<Graph> etalon_clustered_index;
-	//	DistanceEstimator<Graph> estimator(g, etalon_index, insert_size,
-	//			max_read_length, cfg::get().de.delta,
-	//			cfg::get().de.linkage_distance, cfg::get().de.max_distance);
-	//	estimator.Estimate(etalon_clustered_index);
-
-  //  PairedInfoIndexT<Graph> filtered_clustered_index(g);
-	//	PairInfoFilter<Graph> (g, 1000.).Filter(
-  //      clustered_index[>etalon_clustered_index<], filtered_clustered_index);
-	INFO("Counting mate-pair transformation stat");
-	MatePairTransformStat<Graph>(gp.g, //filtered_
-	    clustered_index).Count();
-	INFO("Mate-pair transformation stat counted");
-	INFO("Clustered info stats counted");
-}
+////***************************************************************************
+////* Copyright (c) 2015 Saint Petersburg State University
+////* Copyright (c) 2011-2014 Saint Petersburg Academic University
+////* All Rights Reserved
+////* See file LICENSE for details.
+////***************************************************************************
+//
+//// FIXME: Refactor and turn into stage
+//
+////todo rewrite with extended sequence mapper!
+//template<class Graph, class Index>
+//class EtalonPairedInfoCounter {
+//	typedef typename Graph::EdgeId EdgeId;
+//
+//	const Graph& g_;
+//	const Index& index_;
+//	const KmerMapper<Graph>& kmer_mapper_;
+//	size_t k_;
+//
+//	size_t insert_size_;
+//	size_t read_length_;
+//	int gap_;
+//	size_t delta_;
+//
+//  void AddEtalonInfo(PairedInfoIndexT<Graph>& index, EdgeId e1, EdgeId e2, double d) {
+//    index.AddPairInfo(e1, e2, d, 1000., 0.);
+//	}
+//
+//  void ProcessSequence(const Sequence& sequence, PairedInfoIndexT<Graph>& index)
+//  {
+//		int mod_gap = (gap_ + (int) k_ > (int) delta_ ) ? gap_ - (int) delta_ : 0 - (int) k_;
+//		runtime_k::RtSeq left(k_ +1, sequence);
+//		left >>= 0;
+//		for (size_t left_idx = 0;
+//             left_idx + 2 * (k_ + 1) + mod_gap <= sequence.size();
+//             ++left_idx) {
+//			left <<= sequence[left_idx + k_];
+//			runtime_k::RtSeq left_upd = kmer_mapper_.Substitute(left);
+//			if (!index_.contains(left_upd)) {
+//				continue;
+//			}
+//			pair<EdgeId, size_t> left_pos = index_.get(left_upd);
+//
+//			size_t right_idx = left_idx + k_ + 1 + mod_gap;
+//			runtime_k::RtSeq right(k_ + 1, sequence, right_idx);
+//			right >>= 0;
+//			for (;
+//			     right_idx + k_ + 1 <= left_idx + insert_size_ + delta_ && right_idx + k_ + 1 <= sequence.size();
+//			     ++right_idx) {
+//				right <<= sequence[right_idx + k_];
+//				runtime_k::RtSeq right_upd = kmer_mapper_.Substitute(right);
+//				if (!index_.contains(right_upd)) {
+//					continue;
+//				}
+//				pair<EdgeId, size_t> right_pos = index_.get(right_upd);
+//
+//				AddEtalonInfo(index, left_pos.first, right_pos.first,
+//				              0. + (double) right_idx - (double) left_idx +
+//				              (double) left_pos.second - (double) right_pos.second);
+//			}
+//		}
+//	}
+//
+//public:
+//    EtalonPairedInfoCounter(const Graph& g, const Index& index,
+//                            const KmerMapper<Graph>& kmer_mapper,
+//                            size_t insert_size, size_t read_length,
+//                            size_t delta, size_t k)
+//            : g_(g),
+//              index_(index),
+//              kmer_mapper_(kmer_mapper),
+//              k_(k),
+//              insert_size_(insert_size),
+//              read_length_(read_length),
+//              gap_((int) (insert_size_ - 2 * read_length_)),
+//              delta_(delta) {
+////		VERIFY(insert_size_ >= 2 * read_length_);
+//    }
+//
+//    void FillEtalonPairedInfo(const Sequence& genome,
+//                              omnigraph::de::PairedInfoIndexT<Graph>& paired_info) {
+//        ProcessSequence(genome, paired_info);
+//        ProcessSequence(!genome, paired_info);
+//    }
+//};
+//
+//template<class Graph>
+//void GetAllDistances(const PairedInfoIndexT<Graph>& paired_index,
+//                     PairedInfoIndexT<Graph>& result,
+//                     const GraphDistanceFinder<Graph>& dist_finder) {
+//    for (auto iter = paired_index.begin(); iter != paired_index.end(); ++iter) {
+//        EdgeId e1 = iter.first();
+//        EdgeId e2 = iter.second();
+//        vector<size_t> forward = dist_finder.GetGraphDistancesLengths(e1, e2);
+//        for (size_t i = 0; i < forward.size(); ++i)
+//            result.AddPairInfo(e1, e2, (double) forward[i], -10.0, 0.0, false);
+//    }
+//}
+//
+//template<class Graph>
+//void GetAllDistances(const Graph& g,
+//                     const PairedInfoIndexT<Graph>& paired_index,
+//                     const PairedInfoIndexT<Graph>& clustered_index,
+//                     const GraphDistanceFinder<Graph>& dist_finder,
+//                     PairedInfoIndexT<Graph>& result)
+//{
+//    typedef typename Graph::EdgeId EdgeId;
+//    typedef vector<EdgeId> Path;
+//    for (auto iter = paired_index.begin(); iter != paired_index.end(); ++iter) {
+//        EdgeId first = iter.first();
+//        EdgeId second = iter.second();
+//        const vector<Path>& raw_paths = dist_finder.GetGraphDistances(first, second);
+//        // adding first edge to every path
+//        vector<Path> paths;
+//        for (size_t i = 0; i < raw_paths.size(); ++i) {
+//            Path path;
+//            path.push_back(first);
+//            for (size_t j = 0; j < raw_paths[i].size(); ++j)
+//                path.push_back(raw_paths[i][j]);
+//            path.push_back(second);
+//
+//            paths.push_back(path);
+//        }
+//        vector<size_t> path_lengths;
+//        vector<double> path_weights;
+//        for (size_t i = 0; i < paths.size(); ++i) {
+//            size_t len_total = 0 ;
+//            double weight_total = 0.;
+//            for (size_t j = 0; j < paths[i].size(); ++j) {
+//                len_total += g.length(paths[i][j]);
+//                size_t cur_length = 0;
+//                for (size_t l = j + 1; l < paths[i].size(); ++l) {
+//                    cur_length += g.length(paths[i][l - 1]);
+//                    const de::Histogram& infos = clustered_index.GetEdgePairInfo(paths[i][j], paths[i][l]);
+//                    for (auto iterator = infos.begin(); iterator != infos.end(); ++iterator) {
+//                        const Point& info = *iterator;
+//                        if (info.d == cur_length) {
+//                            weight_total += info.weight;
+//                            break;
+//                        }
+//                    }
+//                }
+//            }
+//            path_lengths.push_back(len_total - g.length(second));
+//            path_weights.push_back(weight_total);
+//        }
+//
+//        for (size_t i = 0; i < paths.size(); ++i) {
+//            cout << first.int_id() << "(" << g.length(first) << ") "
+//                 << second.int_id() << "(" << g.length(second) << ") : "
+//                 << (i + 1) << "-th path (" << path_lengths[i] << ", " << path_weights[i] << ")   :::   ";
+//            for (size_t j = 0; j < paths[i].size(); ++j) {
+//                cout << paths[i][j].int_id() << "(" << g.length(paths[i][j]) << ") ";
+//            }
+//            cout << endl;
+//        }
+//    }
+//}
+//
+//template<class Graph, class Index>
+//void FillEtalonPairedIndex(PairedInfoIndexT<Graph>& etalon_paired_index,
+//                           const Graph &g, const Index& index,
+//                           const KmerMapper<Graph>& kmer_mapper, size_t is, size_t rs,
+//                           size_t delta, const Sequence& genome, size_t k)
+//{
+//    VERIFY_MSG(genome.size() > 0,
+//               "The genome seems not to be loaded, program will exit");
+//    INFO((string) (FormattedString("Counting etalon paired info for genome of length=%i, k=%i, is=%i, rs=%i, delta=%i")
+//                   << genome.size() << k << is << rs << delta));
+//
+//    EtalonPairedInfoCounter<Graph, Index> etalon_paired_info_counter(g, index, kmer_mapper, is, rs, delta, k);
+//    etalon_paired_info_counter.FillEtalonPairedInfo(genome, etalon_paired_index);
+//
+//    DEBUG("Etalon paired info counted");
+//}
+//
+//template<class Graph, class Index>
+//void FillEtalonPairedIndex(PairedInfoIndexT<Graph>& etalon_paired_index,
+//                           const Graph &g, const Index& index,
+//                           const KmerMapper<Graph>& kmer_mapper, const Sequence& genome,
+//                           const io::SequencingLibrary<debruijn_config::DataSetData> &lib,
+//                           size_t k) {
+//
+//    FillEtalonPairedIndex(etalon_paired_index, g, index, kmer_mapper,
+//                          size_t(lib.data().mean_insert_size), lib.data().read_length, size_t(lib.data().insert_size_deviation),
+//                          genome, k);
+//
+//    //////////////////DEBUG
+//    //	SimpleSequenceMapper<k + 1, Graph> simple_mapper(g, index);
+//    //	Path<EdgeId> path = simple_mapper.MapSequence(genome);
+//    //	SequenceBuilder sequence_builder;
+//    //	sequence_builder.append(Seq<k>(g.EdgeNucls(path[0])));
+//    //	for (auto it = path.begin(); it != path.end(); ++it) {
+//    //		sequence_builder.append(g.EdgeNucls(*it).Subseq(k));
+//    //	}
+//    //	Sequence new_genome = sequence_builder.BuildSequence();
+//    //	NewEtalonPairedInfoCounter<k, Graph> new_etalon_paired_info_counter(g, index,
+//    //			insert_size, read_length, insert_size * 0.1);
+//    //	PairedInfoIndexT<Graph> new_paired_info_index(g);
+//    //	new_etalon_paired_info_counter.FillEtalonPairedInfo(new_genome, new_paired_info_index);
+//    //	CheckInfoEquality(etalon_paired_index, new_paired_info_index);
+//    //////////////////DEBUG
+//    //	INFO("Etalon paired info counted");
+//}
+//
+//template<class Graph>
+//void CountPairedInfoStats(const Graph& g,
+//                          const io::SequencingLibrary<debruijn_config::DataSetData> &lib,
+//                          const PairedInfoIndexT<Graph>& paired_index,
+//                          const PairedInfoIndexT<Graph>& etalon_index,
+//                          const string& output_folder) {
+//    PairedInfoIndexT<Graph> filtered_index = paired_index;
+////    PairInfoWeightFilter<Graph>(g, 40).Filter(filtered_index);
+//    PairInfoFilter<Graph>(PairInfoWeightChecker<Graph>(g, 40)).Filter(filtered_index);
+//    INFO("Counting paired info stats");
+//    EdgePairStat<Graph>(g, paired_index, output_folder).Count();
+//
+//    //todo remove filtration if launch on etalon info is ok
+//    UniquePathStat<Graph>(g, filtered_index,
+//                          (size_t)math::round(lib.data().mean_insert_size),
+//                          lib.data().read_length,
+//                          0.1 * lib.data().mean_insert_size).Count();
+//    UniqueDistanceStat<Graph>(etalon_index).Count();
+//    INFO("Paired info stats counted");
+//}
+//
+//// leave only those pairs, which edges have no path in the graph between them
+//template<class Graph>
+//void FilterIndexWithExistingPaths(PairedIndexT& scaf_clustered_index,
+//                                  const PairedIndexT& index,
+//                                  const conj_graph_pack &gp,
+//                                  const GraphDistanceFinder<Graph>& dist_finder) {
+//    for (auto it = index.begin(); it != index.end(); ++it) {
+//        const de::Histogram& histogram = *it;
+//        EdgeId e1 = it.first();
+//        EdgeId e2 = it.second();
+//        if (gp.g.OutgoingEdgeCount(gp.g.EdgeEnd(e1)) == 0 && gp.g.IncomingEdgeCount(gp.g.EdgeEnd(e1)) == 1 &&
+//            gp.g.IncomingEdgeCount(gp.g.EdgeStart(e2)) == 0 && gp.g.OutgoingEdgeCount(gp.g.EdgeStart(e2)) == 1)     {
+//            vector<size_t> dists = dist_finder.GetGraphDistancesLengths(e1, e2);
+//            if (dists.size() == 0)
+//                for (auto point_iter = histogram.begin(); point_iter != histogram.end(); ++point_iter)
+//                    if (math::gr(point_iter->d, 0.)) {
+//                        scaf_clustered_index.AddPairInfo(it.first(), it.second(),
+//                                                         point_iter->d, point_iter->weight, 20.);
+//                    }
+//        }
+//    }
+//}
+//
+//inline
+//void tSeparatedStats(conj_graph_pack& gp, const Sequence& contig,
+//                     PairedInfoIndex<conj_graph_pack::graph_t> &ind,
+//                     const io::SequencingLibrary<debruijn_config::DataSetData> &lib,
+//                     size_t /*k*/) {
+//    typedef omnigraph::de::PairInfo<EdgeId> PairInfo;
+//
+//    MappingPath<Graph::EdgeId> m_path1 = FindGenomeMappingPath(contig, gp.g,
+//                                                               gp.index, gp.kmer_mapper);
+//
+//    map<Graph::EdgeId, vector<pair<int, int>>> inGenomeWay;
+//    int CurI = 0;
+//    int gaps = 0;
+//    for (size_t i = 0; i < m_path1.size(); i++) {
+//        bool new_edge_added = false;
+//        EdgeId ei = m_path1[i].first;
+//        MappingRange mr = m_path1[i].second;
+//        int start = (int)(mr.initial_range.start_pos - mr.mapped_range.start_pos);
+//        if (inGenomeWay.find(ei) == inGenomeWay.end()) {
+//            vector<pair<int, int>> tmp;
+//            tmp.push_back(make_pair(CurI, start));
+//            inGenomeWay[ei] = tmp;
+//            CurI++;
+//            new_edge_added = true;
+//            DEBUG("Edge " << gp.g.str(ei) << " num " << CurI << " pos " << start);
+//        } else {
+//            if (m_path1[i - 1].first == ei) {
+//                if (abs(start - inGenomeWay[ei][(inGenomeWay[ei].size() - 1)].second) > 50) {
+//                    inGenomeWay[ei].push_back(make_pair(CurI, start));
+//                    CurI++;
+//                    new_edge_added = true;
+//                    DEBUG("Edge " << gp.g().str(ei) << " num " << CurI << " pos " << start);
+//                }
+//            } else {
+//                inGenomeWay[ei].push_back(make_pair(CurI, start));
+//                CurI++;
+//                new_edge_added = true;
+//                DEBUG("Edge " << gp.g.str(ei) << " num " << CurI << " pos " << start);
+//            }
+//        }
+//        if (new_edge_added && (i > 0)) {
+//            if (gp.g.EdgeStart(ei) != gp.g.EdgeEnd(m_path1[i - 1].first)) {
+//                gaps++;
+//            }
+//        }
+//    }
+//    INFO("Totaly " << CurI << " edges in genome path, with " << gaps << "not adjacent conequences");
+//
+//    vector<int> stats(10);
+//    vector<int> stats_d(10);
+//    int PosInfo = 0;
+//    int AllignedPI = 0;
+//    int ExactDPI = 0;
+//    int OurD = int(lib.data().mean_insert_size) - int(lib.data().read_length);
+//    for (auto p_iter = ind.begin(), p_end_iter = ind.end();
+//         p_iter != p_end_iter; ++p_iter) {
+//        vector<PairInfo> pi = *p_iter;
+//        for (size_t j = 0; j < pi.size(); j++) {
+//            EdgeId left_edge = pi[j].first;
+//            EdgeId right_edge = pi[j].second;
+//            double d = pi[j].d();
+//            if (d < 0.001)
+//                continue;
+//            int best_d = 100;
+//            int best_t = 0;
+//            PosInfo++;
+//            DEBUG(
+//                "PairInfo " << gp.g().str(left_edge) << " -- " << gp.g().str(right_edge) << " d " << d);
+//            bool ExactOnD = false;
+//            for (size_t left_i = 0; left_i < inGenomeWay[left_edge].size();
+//                 left_i++)
+//                for (size_t right_i = 0;
+//                     right_i < inGenomeWay[right_edge].size(); right_i++) {
+//                    if (best_d
+//                        > abs(
+//                            inGenomeWay[right_edge][right_i].second
+//                            - inGenomeWay[left_edge][left_i].second
+//                            - d)) {
+//                        best_d = (int)math::round(abs(
+//                            inGenomeWay[right_edge][right_i].second
+//                            - inGenomeWay[left_edge][left_i].second
+//                            - d));
+//                        best_t = inGenomeWay[right_edge][right_i].first
+//                                 - inGenomeWay[left_edge][left_i].first;
+//                        DEBUG("best d " << best_d);
+//                        if ((inGenomeWay[right_edge][right_i].second
+//                             - inGenomeWay[left_edge][left_i].second
+//                             - (int) gp.g.length(left_edge) <= OurD)
+//                            && (inGenomeWay[right_edge][right_i].second
+//                                - inGenomeWay[left_edge][left_i].second
+//                                + (int) gp.g.length(right_edge) >= OurD))
+//                            ExactOnD = true;
+//                        else
+//                            ExactOnD = false;
+//                    }
+//                }
+//            if (best_t > 5)
+//                best_t = 5;
+//            if (best_d < 100) {
+//                AllignedPI++;
+//                stats[best_t]++;
+//                if (ExactOnD) {
+//                    stats_d[best_t]++;
+//                    ExactDPI++;
+//                }
+//            }
+//
+//        }
+//    }INFO(
+//        "Total positive pair info " << PosInfo << " alligned to genome " << AllignedPI << " with exact distance " << ExactDPI);
+//    INFO(
+//        "t-separated stats Alligneg: 1 - " << stats[1] << " 2 - " << stats[2] << " 3 - " << stats[3] << " 4 - " << stats[4] << " >4 - " << stats[5]);
+//    INFO(
+//        "t-separated stats Exact: 1 - " << stats_d[1] << " 2 - " << stats_d[2] << " 3 - " << stats_d[3] << " 4 - " << stats_d[4] << " >4 - " << stats[5]);
+//}
+//
+//template<class Graph>
+//void CountAndSaveAllPaths(const Graph& g, const io::SequencingLibrary<debruijn_config::DataSetData> &lib,
+//                          const PairedInfoIndexT<Graph>& paired_index, const PairedInfoIndexT<Graph>& /*clustered_index*/) {
+//    PairedIndexT all_paths(g);
+//    GetAllDistances<Graph>(paired_index,
+//                           all_paths,
+//                           GraphDistanceFinder<Graph>(g,
+//                                                      size_t(lib.data().mean_insert_size),
+//                                                      lib.data().read_length,
+//                                                      size_t(lib.data().insert_size_deviation)));
+//
+//    std::string dir_name = cfg::get().output_dir + "estimation_qual/";
+//    make_dir(dir_name);
+//
+//    graphio::ConjugateDataPrinter<Graph> printer(g);
+//    printer.savePaired(dir_name + "paths", all_paths);
+//
+//    //PairedIndexT& all_paths_2(g);
+//    //GetAllDistances<Graph>(g,
+//    //paired_index, clustered_index,
+//    //all_paths_2,
+//    //GraphDistanceFinder<Graph>(g, *cfg::get().ds.IS, *cfg::get().ds.RL,
+//    //size_t(*cfg::get().ds.is_var)));
+//    //printer.savePaired(dir_name + "paths_all", all_paths_2);
+//}
+//
+//void FillAndCorrectEtalonPairedInfo(PairedIndexT&  corrected_etalon_index,
+//                                    const conj_graph_pack& gp,
+//                                    const PairedIndexT&  paired_index, size_t insert_size,
+//                                    size_t read_length, size_t delta,
+//                                    bool save_etalon_info_history = false) {
+//    INFO("Filling etalon paired index");
+//    PairedIndexT etalon_index(gp.g);
+//    bool successful_load = false;
+//    if (cfg::get().entry_point >= ws_distance_estimation) {
+//        string p = path::append_path(cfg::get().load_from, "../etalon");
+//        if (!path::is_regular_file(p + ".prd")) {
+//            DEBUG("file " << p + ".prd" << " does not exist");
+//        }
+//        else {
+//            INFO("Loading etalon pair info from the previous run...");
+//            Graph& graph = const_cast<Graph&>(gp.g);
+//            graphio::ConjugateDataScanner<Graph> scanner(graph);
+//            scanner.loadPaired(p, etalon_index);
+//            path::files_t files;
+//            files.push_back(p);
+//            path::copy_files_by_prefix(files, cfg::get().output_dir);
+//            successful_load = true;
+//        }
+//    }
+//    if (!successful_load)
+//        FillEtalonPairedIndex(etalon_index, gp.g,
+//                              gp.index, gp.kmer_mapper, insert_size, read_length, delta,
+//                              gp.genome, gp.k_value);
+//    INFO("Etalon paired index filled");
+//
+//    INFO("Correction of etalon paired info has been started");
+//
+//    INFO("Filtering etalon info");
+//    //leave only info between edges both present in paired_index
+//    PairedIndexT filtered_etalon_index(gp.g);
+//    for (auto iter = etalon_index.begin(); iter != etalon_index.end(); ++iter) {
+//        const de::Histogram& histogram = *iter;
+//        EdgeId first_edge = iter.first();
+//        EdgeId second_edge = iter.second();
+//        if (paired_index.GetEdgePairInfo(first_edge, second_edge).size() > 0) {
+//            for (auto point = histogram.begin(); point != histogram.end(); ++point)
+//                filtered_etalon_index.AddPairInfo(first_edge, second_edge, *point);
+//        }
+//        else
+//            DEBUG("Filtering out pair_info " << gp.g.int_id(first_edge) << " "
+//                  << gp.g.int_id(second_edge));
+//    }
+//
+//    INFO("Pushing etalon info through estimator");
+//    GraphDistanceFinder<Graph> dist_finder(gp.g, insert_size, read_length, delta);
+//    DistanceEstimator<Graph> estimator(gp.g, filtered_etalon_index, dist_finder, 0., 4.);
+//    estimator.Estimate(corrected_etalon_index);
+//    if (save_etalon_info_history) {
+//        INFO("Saving etalon paired info indices on different stages");
+//        ConjugateDataPrinter<Graph> data_printer(gp.g);
+//        data_printer.savePaired(cfg::get().output_dir + "etalon", etalon_index);
+//        data_printer.savePaired(cfg::get().output_dir + "etalon_filtered_by_index",
+//                                filtered_etalon_index);
+//        data_printer.savePaired(cfg::get().output_dir + "etalon_corrected_by_graph",
+//                                corrected_etalon_index);
+//        INFO("Everything is saved");
+//
+//        if (cfg::get().paired_info_scaffolder) {
+//            GraphDistanceFinder<Graph> dist_finder(gp.g, insert_size, read_length, delta);
+//            INFO("Saving paired information statistics for a scaffolding");
+//            PairedIndexT scaf_etalon_index(gp.g);
+//            FilterIndexWithExistingPaths(scaf_etalon_index, etalon_index, gp, dist_finder);
+//            data_printer.savePaired(
+//                cfg::get().output_dir + "scaf_etalon",
+//                scaf_etalon_index);
+//            PairedIndexT scaf_filtered_etalon_index(gp.g);
+//            FilterIndexWithExistingPaths(scaf_filtered_etalon_index, filtered_etalon_index, gp, dist_finder);
+//            data_printer.savePaired(
+//                cfg::get().output_dir + "scaf_etalon_filtered",
+//                scaf_filtered_etalon_index);
+//        }
+//
+//        INFO("Everything saved");
+//    }
+//    INFO("Correction finished");
+//}
+//
+//void CountClusteredPairedInfoStats(const conj_graph_pack &gp,
+//                                   const io::SequencingLibrary<debruijn_config::DataSetData> &lib,
+//                                   const PairedInfoIndexT<Graph> &paired_index,
+//                                   const PairedInfoIndexT<Graph> &clustered_index) {
+//    PairedIndexT etalon_index(gp.g);
+//
+//    FillAndCorrectEtalonPairedInfo(etalon_index, gp, paired_index,
+//                                 (size_t)math::round(lib.data().mean_insert_size),
+//                                 lib.data().read_length,
+//                                 (size_t)math::round(lib.data().insert_size_deviation), true);
+//
+//	CountAndSaveAllPaths(gp.g, lib, paired_index, clustered_index);
+//
+//	INFO("Counting clustered info stats");
+//	EdgeQuality<Graph, Index> edge_qual(gp.g, gp.index, gp.kmer_mapper, gp.genome);
+//  //EstimationQualityStat<Graph> estimation_stat(gp.g, edge_qual,
+//                                              //paired_index, clustered_index, etalon_index);
+//  //estimation_stat.Count();
+//  //estimation_stat.SaveStats(cfg::get().output_dir + "estimation_qual/");
+//
+//	INFO("Counting overall cluster stat");
+//	ClusterStat<Graph>(clustered_index).Count();
+//	INFO("Overall cluster stat");
+//
+//  if (cfg::get().paired_info_scaffolder) {
+//		ConjugateDataPrinter<Graph> data_printer(gp.g);
+//    INFO("Generating the statistics of pair info for scaffolding");
+//    PairedIndexT scaf_clustered_index(gp.g);
+//    FilterIndexWithExistingPaths(scaf_clustered_index,
+//                                 clustered_index, gp,
+//                                 GraphDistanceFinder<Graph>(gp.g,
+//                                         (size_t)math::round(lib.data().mean_insert_size),
+//                                         lib.data().read_length,
+//                                         (size_t)math::round(lib.data().insert_size_deviation)));
+//    data_printer.savePaired(cfg::get().output_dir + "scaf_clustered",
+//                            scaf_clustered_index);
+//  }
+//  //  PairedInfoIndexT<Graph> etalon_clustered_index;
+//	//	DistanceEstimator<Graph> estimator(g, etalon_index, insert_size,
+//	//			max_read_length, cfg::get().de.delta,
+//	//			cfg::get().de.linkage_distance, cfg::get().de.max_distance);
+//	//	estimator.Estimate(etalon_clustered_index);
+//
+//  //  PairedInfoIndexT<Graph> filtered_clustered_index(g);
+//	//	PairInfoFilter<Graph> (g, 1000.).Filter(
+//  //      clustered_index[>etalon_clustered_index<], filtered_clustered_index);
+//	INFO("Counting mate-pair transformation stat");
+//	MatePairTransformStat<Graph>(gp.g, //filtered_
+//	    clustered_index).Count();
+//	INFO("Mate-pair transformation stat counted");
+//	INFO("Clustered info stats counted");
+//}
diff --git a/src/debruijn/detail_coverage.hpp b/src/debruijn/detail_coverage.hpp
index 30ef4e5..b559239 100644
--- a/src/debruijn/detail_coverage.hpp
+++ b/src/debruijn/detail_coverage.hpp
@@ -132,6 +132,9 @@ public:
         //todo maybe improve later
         SetCoverageSimilarToAverageFlanking(new_edge_1, old_edge);
         SetCoverageSimilarToAverageGlobal(new_edge_2, old_edge);
+        if (old_edge == g_.conjugate(old_edge)) {
+            SetCoverageSimilarToAverageGlobal(g_.conjugate(new_edge_1), old_edge);
+        }
     }
 
     virtual void HandleDelete(EdgeId e) {
diff --git a/src/debruijn/distance_estimation.cpp b/src/debruijn/distance_estimation.cpp
index e6d77eb..ef846f7 100644
--- a/src/debruijn/distance_estimation.cpp
+++ b/src/debruijn/distance_estimation.cpp
@@ -9,7 +9,7 @@
 #include "dataset_readers.hpp"
 #include "pair_info_improver.hpp"
 
-#include "de/paired_info.hpp"
+#include "de/paired_info_helpers.hpp"
 #include "de/pair_info_filters.hpp"
 #include "de/distance_estimation.hpp"
 #include "de/weighted_distance_estimation.hpp"
@@ -56,11 +56,11 @@ void estimate_with_estimator(const Graph &graph,
 // Postprocessing, checking that clusters do not intersect
 template<class Graph>
 void RefinePairedInfo(const Graph& graph, PairedInfoIndexT<Graph>& clustered_index) {
-    for (auto iter = clustered_index.begin(); iter != clustered_index.end(); ++iter) {
+    for (auto iter = pair_begin(clustered_index); iter != pair_end(clustered_index); ++iter) {
         EdgeId first_edge = iter.first();
         EdgeId second_edge = iter.second();
-        const auto& infos = *iter;
-        if (infos.size() == 0)
+        auto infos = iter->Unwrap(); //we need an ordered histogram here
+        if (infos.empty())
             continue;
 
         auto prev_it = infos.begin();
@@ -82,12 +82,12 @@ void RefinePairedInfo(const Graph& graph, PairedInfoIndexT<Graph>& clustered_ind
                         double var = inner_it->d + inner_it->var;
                         for (auto inner_it_2 = prev_it; inner_it_2 != inner_it; ++inner_it_2) {
                             TRACE("Removing pair info " << *inner_it_2);
-                            clustered_index.RemovePairInfo(first_edge, second_edge, *inner_it_2);
+                            clustered_index.Remove(first_edge, second_edge, *inner_it_2);
                         }
-                        clustered_index.RemovePairInfo(first_edge, second_edge, *inner_it);
+                        clustered_index.Remove(first_edge, second_edge, *inner_it);
                         Point new_point(center, total_weight, var);
                         TRACE("Adding new pair info " << first_edge << " " << second_edge << " " << new_point);
-                        clustered_index.AddPairInfo(first_edge, second_edge, new_point);
+                        clustered_index.Add(first_edge, second_edge, new_point);
                         break;
                     }
                 }
diff --git a/src/debruijn/early_simplification.hpp b/src/debruijn/early_simplification.hpp
index 6eff790..4ee6f20 100644
--- a/src/debruijn/early_simplification.hpp
+++ b/src/debruijn/early_simplification.hpp
@@ -43,13 +43,20 @@ public:
 
 	//TODO make parallel
 	void CleanLinks() {
-		for (auto it  = index_.kmer_begin(); it.good(); ++it) {
-		    KeyWithHash kh = index_.ConstructKWH(runtime_k::RtSeq(index_.k(), *it));
-			for(char i = 0; i < 4; i++) {
-				CleanForwardLinks(kh, i);
-				CleanBackwardLinks(kh, i);
-			}
-		}
+        vector<Index::kmer_iterator> iters = index_.kmer_begin(10 * cfg::get().max_threads);
+#   pragma omp parallel for schedule(guided)
+        for(size_t i = 0; i < iters.size(); i++) {
+            for (Index::kmer_iterator &it = iters[i]; it.good(); ++it) {
+                KeyWithHash kh = index_.ConstructKWH(runtime_k::RtSeq(index_.k(), *it));
+                if (kh.is_minimal()) {
+                    KeyWithHash kh = index_.ConstructKWH(runtime_k::RtSeq(index_.k(), *it));
+                    for (char i = 0; i < 4; i++) {
+                        CleanForwardLinks(kh, i);
+                        CleanBackwardLinks(kh, i);
+                    }
+                }
+            }
+        }
 	}
 };
 
@@ -218,17 +225,26 @@ private:
 
 	//TODO make parallel
 	size_t RoughClipTips() {
-		size_t result = 0;
-		for (auto it  = index_.kmer_begin(); it.good(); ++it) {
-			KeyWithHash kh = index_.ConstructKWH(runtime_k::RtSeq(index_.k(), *it));
-			if(index_.OutgoingEdgeCount(kh)  >= 2) {
-				result += RemoveForward(kh);
-			}
-			if(index_.IncomingEdgeCount(kh)  >= 2) {
-				result += RemoveBackward(kh);
-			}
-		}
-		return result;
+        vector<Index::kmer_iterator> iters = index_.kmer_begin(10 * cfg::get().max_threads);
+        vector<size_t> result(iters.size());
+#   pragma omp parallel for schedule(guided)
+        for(size_t i = 0; i < iters.size(); i++) {
+            for(Index::kmer_iterator &it = iters[i]; it.good(); ++it) {
+                KeyWithHash kh = index_.ConstructKWH(runtime_k::RtSeq(index_.k(), *it));
+                if(kh.is_minimal()) {
+                    if (index_.OutgoingEdgeCount(kh) >= 2) {
+                        result[i] += RemoveForward(kh);
+                    }
+                    if (index_.IncomingEdgeCount(kh) >= 2) {
+                        result[i] += RemoveBackward(kh);
+                    }
+                }
+            }
+        }
+        size_t sum = 0;
+        for(size_t i = 0; i < result.size(); i++)
+            sum += result[i];
+		return sum;
 	}
 
 
diff --git a/src/debruijn/gap_closer.cpp b/src/debruijn/gap_closer.cpp
index 1f8b15d..616d631 100644
--- a/src/debruijn/gap_closer.cpp
+++ b/src/debruijn/gap_closer.cpp
@@ -57,8 +57,12 @@ class GapCloserPairedIndexFiller {
             if (OutTipIter != OutTipMap.end()) {
                 for (size_t j = 0; j < path2.size(); ++j) {
                     auto InTipIter = InTipMap.find(path2[j]);
-                    if (InTipIter != InTipMap.end())
-                        paired_index.AddPairInfo(OutTipIter->second.first, InTipIter->second.first, { 1000000., 1.});
+                    if (InTipIter != InTipMap.end()) {
+                        auto e1 = OutTipIter->second.first;
+                        auto e2 = InTipIter->second.first;
+                        paired_index.SwapConj(e1, e2);
+                        paired_index.Add(e1, e2, omnigraph::de::RawPoint(1000000., 1.));
+                    }
                 }
             }
         }
@@ -125,7 +129,7 @@ class GapCloserPairedIndexFiller {
         INFO("Processing paired reads (takes a while)");
 
         size_t nthreads = streams.size();
-        std::vector<omnigraph::de::PairedInfoBuffer<Graph> >buffer_pi(nthreads);
+        omnigraph::de::PairedInfoBuffersT<Graph> buffer_pi(graph_, nthreads);
 
         size_t counter = 0;
 #       pragma omp parallel for num_threads(nthreads) reduction(+ : counter)
@@ -145,7 +149,7 @@ class GapCloserPairedIndexFiller {
 
         INFO("Merging paired indices");
         for (auto& index: buffer_pi) {
-          paired_index.AddAll(index);
+          paired_index.Merge(index);
           index.Clear();
         }
     }
@@ -283,7 +287,7 @@ class GapCloser {
             DEBUG("Splitting first edge.");
             pair<EdgeId, EdgeId> split_res = g_.SplitEdge(first, g_.length(first) - overlap + diff_pos.front());
             first = split_res.first;
-            tips_paired_idx_.RemoveEdgeInfo(split_res.second);
+            tips_paired_idx_.Remove(split_res.second);
             DEBUG("Adding new edge.");
             VERIFY(MatchesEnd(new_sequence, g_.VertexNucls(g_.EdgeEnd(first)), true));
             VERIFY(MatchesEnd(new_sequence, g_.VertexNucls(g_.EdgeStart(second)), false));
@@ -307,7 +311,7 @@ class GapCloser {
             DEBUG("Splitting second edge.");
             pair<EdgeId, EdgeId> split_res = g_.SplitEdge(second, diff_pos.back() + 1);
             second = split_res.second;
-            tips_paired_idx_.RemoveEdgeInfo(split_res.first);
+            tips_paired_idx_.Remove(split_res.first);
             DEBUG("Adding new edge.");
             VERIFY(MatchesEnd(new_sequence, g_.VertexNucls(g_.EdgeEnd(first)), true));
             VERIFY(MatchesEnd(new_sequence, g_.VertexNucls(g_.EdgeStart(second)), false));
@@ -397,35 +401,41 @@ class GapCloser {
     }
 
   public:
+    //TODO extract methods
     void CloseShortGaps() {
-        typedef typename omnigraph::de::PairedInfoIndexT<Graph>::EdgeIterator EdgeIterator;
-
         INFO("Closing short gaps");
         size_t gaps_filled = 0;
         size_t gaps_checked = 0;
         for (auto edge = g_.SmartEdgeBegin(); !edge.IsEnd(); ++edge) {
             EdgeId first_edge = *edge;
-            auto edge_info = tips_paired_idx_.GetEdgeInfo(first_edge, 0);
-
-            for (EdgeIterator it(edge_info.begin(), edge_info.end()),
-                         et(edge_info.end(), edge_info.end());
-                 it != et; ++it) {
-                std::pair<EdgeId, omnigraph::de::Point> entry = *it;
-                EdgeId second_edge = entry.first;
-                const omnigraph::de::Point& point = entry.second;
-                if (first_edge != second_edge && math::ge(point.weight, weight_threshold_)) {
-                    if (!g_.IsDeadEnd(g_.EdgeEnd(first_edge)) || !g_.IsDeadStart(g_.EdgeStart(second_edge))) {
-                        // WARN("Topologically wrong tips");
+            for (auto i : tips_paired_idx_.Get(first_edge)) {
+                EdgeId second_edge = i.first;
+                if (first_edge == second_edge)
+                    continue;
+
+                if (!g_.IsDeadEnd(g_.EdgeEnd(first_edge)) || !g_.IsDeadStart(g_.EdgeStart(second_edge))) {
+                    // WARN("Topologically wrong tips");
+                    continue;
+                }
+
+                bool closed = false;
+                for (auto point : i.second) {
+                    if (math::ls(point.d, 0))
                         continue;
-                    }
+                    if (math::ls(point.weight, weight_threshold_))
+                        continue;
+
                     ++gaps_checked;
-                    if (ProcessPair(first_edge, second_edge)) {
+                    closed = ProcessPair(first_edge, second_edge);
+                    if (closed) {
                         ++gaps_filled;
                         break;
                     }
                 }
-            }
-        }
+                if (closed)
+                    break;
+            } // second edge
+        } // first edge
 
         INFO("Closing short gaps complete: filled " << gaps_filled
              << " gaps after checking " << gaps_checked
@@ -486,7 +496,7 @@ void GapClosing::run(conj_graph_pack &gp, const char*) {
 
     for (size_t i = 0; i < cfg::get().ds.reads.lib_count(); ++i) {
         if (cfg::get().ds.reads[i].type() == io::LibraryType::PairedEnd) {
-            auto streams = paired_binary_readers(cfg::get().ds.reads[i], true, 0);
+            auto streams = paired_binary_readers(cfg::get().ds.reads[i], false, 0);
             CloseGaps(gp, streams);
         }
     }
diff --git a/src/debruijn/genome_consistance_checker.cpp b/src/debruijn/genome_consistance_checker.cpp
new file mode 100644
index 0000000..cc954d5
--- /dev/null
+++ b/src/debruijn/genome_consistance_checker.cpp
@@ -0,0 +1,236 @@
+#include "genome_consistance_checker.hpp"
+#include "debruijn_graph.hpp"
+#include <algorithm>
+#include <limits>
+namespace debruijn_graph {
+using omnigraph::MappingRange;
+using namespace std;
+
+//gap or overlap size. WITHOUT SIGN!
+static size_t gap(const Range &a, const Range &b) {
+    return max(a.end_pos, b.start_pos) - min (a.end_pos, b.start_pos);
+}
+bool GenomeConsistenceChecker::consequent(const Range &mr1, const Range &mr2) const{
+    if (mr1.end_pos > mr2.start_pos + absolute_max_gap_)
+        return false;
+    if (mr1.end_pos + absolute_max_gap_ < mr2.start_pos)
+        return false;
+    return true;
+
+}
+bool GenomeConsistenceChecker::consequent(const MappingRange &mr1, const MappingRange &mr2) const {
+    //do not want to think about handling gaps near 0 position.
+    if (!consequent(mr1.initial_range, mr2.initial_range) || !consequent(mr1.mapped_range, mr2.mapped_range))
+        return false;
+    size_t initial_gap = gap(mr1.initial_range, mr2.initial_range);
+    size_t mapped_gap = gap(mr1.mapped_range, mr2.mapped_range);
+    size_t max_gap = max(initial_gap, mapped_gap);
+    if ( max_gap > relative_max_gap_* double (max (min(mr1.initial_range.size(), mr1.mapped_range.size()), min(mr2.initial_range.size(), mr2.mapped_range.size()))))
+        return false;
+    return true;
+}
+
+PathScore GenomeConsistenceChecker::CountMisassemblies(const BidirectionalPath &path) const {
+    PathScore straight = CountMisassembliesWithStrand(path, "0");
+    PathScore reverse = CountMisassembliesWithStrand(path, "1");
+    size_t total_length = path.LengthAt(0);
+//TODO: constant;
+    if (total_length > std::max(straight.mapped_length, reverse.mapped_length) * 2) {
+        DEBUG("mapped less than half of the path, skipping");
+        return PathScore(0,0,0);
+    } else {
+        if (straight.mapped_length > reverse.mapped_length) {
+            return straight;
+        } else {
+            return reverse;
+        }
+    }
+}
+
+void GenomeConsistenceChecker::SpellGenome() {
+    vector<pair<EdgeId, MappingRange> > to_sort;
+    for(auto e: storage_) {
+        if (excluded_unique_.find(e) == excluded_unique_.end() ) {
+            set<MappingRange> mappings = gp_.edge_pos.GetEdgePositions(e, "fxd0");
+            if (mappings.size() > 1) {
+                INFO("edge " << e << "smth strange");
+            } else if (mappings.size() == 0) {
+                continue;
+            } else {
+                to_sort.push_back(make_pair(e, *mappings.begin()));
+            }
+        }
+    }
+    sort(to_sort.begin(), to_sort.end(), [](const pair<EdgeId, MappingRange> & a, const pair<EdgeId, MappingRange> & b) -> bool
+    {
+        return a.second.initial_range.start_pos < b.second.initial_range.start_pos;
+    }
+    );
+    size_t count = 0;
+    for(auto p: to_sort) {
+        INFO("edge " << gp_.g.int_id(p.first) << " length "<< gp_.g.length(p.first) << " coverage " << gp_.g.coverage(p.first) << " mapped to " << p.second.mapped_range.start_pos << " - " << p.second.mapped_range.end_pos << " init_range " << p.second.initial_range.start_pos << " - " << p.second.initial_range.end_pos );
+        genome_spelled_[p.first] = count;
+        count++;
+    }
+}
+
+PathScore GenomeConsistenceChecker::CountMisassembliesWithStrand(const BidirectionalPath &path, const string strand) const {
+    if (strand == "1") {
+        return (CountMisassembliesWithStrand(*path.GetConjPath(), "0"));
+    }
+    PathScore res(0, 0, 0);
+    EdgeId prev;
+    size_t prev_in_genome = std::numeric_limits<std::size_t>::max();
+    size_t prev_in_path = std::numeric_limits<std::size_t>::max();
+    MappingRange prev_range;
+    for (int i = 0; i < (int) path.Size(); i++) {
+        if (genome_spelled_.find(path.At(i)) != genome_spelled_.end()) {
+            size_t cur_in_genome =  genome_spelled_[path.At(i)];
+            MappingRange cur_range = *gp_.edge_pos.GetEdgePositions(path.At(i), "fxd0").begin();
+            if (prev_in_genome != std::numeric_limits<std::size_t>::max()) {
+                if (cur_in_genome == prev_in_genome + 1) {
+                    int dist_in_genome = (int) cur_range.initial_range.start_pos -  (int) prev_range.initial_range.end_pos;
+                    int dist_in_path = (int) path.LengthAt(prev_in_path) - (int) path.LengthAt(i) +  (int) cur_range.mapped_range.start_pos - (int) prev_range.mapped_range.end_pos;
+                    DEBUG("Edge " << prev.int_id() << "  position in genome ordering: " << prev_in_genome);
+                    DEBUG("Gap in genome / gap in path: " << dist_in_genome << " / " << dist_in_path);
+                    if (abs(dist_in_genome - dist_in_path) >absolute_max_gap_ && (dist_in_genome * (1 + relative_max_gap_) < dist_in_path || dist_in_path * (1 + relative_max_gap_) < dist_in_genome)) {
+
+                        res.wrong_gap_size ++;
+                    }
+                } else {
+                    if (path.At(i) != circular_edge_ && path.At(prev_in_path) != circular_edge_)
+                        res.misassemblies++;
+                    else
+                        INFO("Skipping fake(circular) misassembly");
+                }
+            }
+            res.mapped_length += cur_range.mapped_range.size();
+            prev = path.At(i);
+            prev_in_genome = cur_in_genome;
+            prev_range = cur_range;
+            prev_in_path = i;
+        }
+    }
+    if (prev_in_path != std::numeric_limits<std::size_t>::max())
+        DEBUG("Edge " << prev.int_id() << "  position in genome ordering: " << prev_in_genome);
+    return res;
+}
+void GenomeConsistenceChecker::RefillPos() {
+    RefillPos("0");
+    RefillPos("1");
+}
+
+
+void GenomeConsistenceChecker::RefillPos(const string &strand) {
+    for (auto e: storage_) {
+        RefillPos(strand, e);
+    }
+}
+
+void GenomeConsistenceChecker::FindBestRangeSequence(const set<MappingRange>& old_mappings, vector<MappingRange>& used_mappings) const {
+    vector<MappingRange> to_process (old_mappings.begin(), old_mappings.end());
+    sort(to_process.begin(), to_process.end(), [](const MappingRange & a, const MappingRange & b) -> bool
+    {
+        return a.mapped_range.start_pos < b.mapped_range.start_pos;
+    } );
+    size_t sz = to_process.size();
+//max weight path in orgraph of mappings
+    TRACE("constructing mapping graph" << sz << " vertices");
+    vector<vector<size_t>> consecutive_mappings(sz);
+    for(size_t i = 0; i < sz; i++) {
+        for (size_t j = i + 1; j < sz; j++) {
+            if (consequent(to_process[i], to_process[j])) {
+                consecutive_mappings[i].push_back(j);
+            } else {
+                if (to_process[j].mapped_range.start_pos > to_process[i].mapped_range.end_pos + absolute_max_gap_) {
+                    break;
+                }
+            }
+        }
+    }
+    vector<size_t> scores(sz), prev(sz);
+    for(size_t i = 0; i < sz; i++) {
+        scores[i] = to_process[i].initial_range.size();
+        prev[i] = std::numeric_limits<std::size_t>::max();
+    }
+    for(size_t i = 0; i < sz; i++) {
+        for (size_t j = 0; j < consecutive_mappings[i].size(); j++) {
+            TRACE(consecutive_mappings[i][j]);
+            if (scores[consecutive_mappings[i][j]] < scores[i] + to_process[consecutive_mappings[i][j]].initial_range.size()) {
+                scores[consecutive_mappings[i][j]] = scores[i] + to_process[consecutive_mappings[i][j]].initial_range.size();
+                prev[consecutive_mappings[i][j]] = i;
+            }
+        }
+    }
+    size_t cur_max = 0;
+    size_t cur_i = 0;
+    for(size_t i = 0; i < sz; i++) {
+        if (scores[i] > cur_max) {
+            cur_max = scores[i];
+            cur_i = i;
+        }
+    }
+    used_mappings.clear();
+    while (cur_i != std::numeric_limits<std::size_t>::max()) {
+        used_mappings.push_back(to_process[cur_i]);
+        cur_i = prev[cur_i];
+    }
+    reverse(used_mappings.begin(), used_mappings.end());
+};
+
+void GenomeConsistenceChecker::RefillPos(const string &strand, const EdgeId &e) {
+    set<MappingRange> old_mappings = gp_.edge_pos.GetEdgePositions(e, strand);
+    TRACE("old mappings sz " << old_mappings.size() );
+    size_t total_mapped = 0;
+    for (auto mp:old_mappings) {
+        total_mapped += mp.initial_range.size();
+    }
+    if (total_mapped  > (double) gp_.g.length(e) * 1.5) {
+       INFO ("Edge " << gp_.g.int_id(e) << "is not unique, excluding");
+       excluded_unique_.insert(e);
+       return;
+    }
+//TODO: support non-unique edges;
+    if (total_mapped  < (double) gp_.g.length(e) * 0.5) {
+        DEBUG ("Edge " << gp_.g.int_id(e) << "is not mapped on strand "<< strand <<", not used");
+        return;
+    }
+    TRACE(total_mapped << " " << gp_.g.length(e));
+    string new_strand = "fxd" + strand;
+    vector<MappingRange> used_mappings;
+    FindBestRangeSequence(old_mappings, used_mappings);
+
+    size_t cur_i = 0;
+    MappingRange new_mapping;
+    new_mapping = used_mappings[cur_i];
+    size_t used_mapped = new_mapping.initial_range.size();
+    TRACE ("Edge " << gp_.g.int_id(e) << " length "<< gp_.g.length(e));
+    TRACE ("new_mapping mp_range "<< new_mapping.mapped_range.start_pos << " - " << new_mapping.mapped_range.end_pos
+         << " init_range " << new_mapping.initial_range.start_pos << " - " << new_mapping.initial_range.end_pos );
+    while (cur_i  < used_mappings.size() - 1) {
+        cur_i ++;
+        used_mapped += used_mappings[cur_i].initial_range.size();
+        new_mapping = new_mapping.Merge(used_mappings[cur_i]);
+        TRACE("new_mapping mp_range "<< new_mapping.mapped_range.start_pos << " - " << new_mapping.mapped_range.end_pos
+             << " init_range " << new_mapping.initial_range.start_pos << " - " << new_mapping.initial_range.end_pos );
+    }
+//used less that 0.9 of aligned length
+    if (total_mapped * 10  >=  used_mapped * 10  + gp_.g.length(e)) {
+        INFO ("Edge " << gp_.g.int_id(e) << " length "<< gp_.g.length(e)  << "is potentially misassembled! mappings: ");
+        for (auto mp:old_mappings) {
+            INFO("mp_range "<< mp.mapped_range.start_pos << " - " << mp.mapped_range.end_pos << " init_range " << mp.initial_range.start_pos << " - " << mp.initial_range.end_pos );
+            if (mp.initial_range.start_pos < absolute_max_gap_) {
+                INFO ("Fake(linear order) misassembly on edge "<< e.int_id());
+                if (strand == "0") {
+                    circular_edge_ = e;
+                }
+            }
+        }
+
+    }
+    gp_.edge_pos.AddEdgePosition(e, new_strand, new_mapping);
+}
+
+
+
+}
diff --git a/src/debruijn/genome_consistance_checker.hpp b/src/debruijn/genome_consistance_checker.hpp
index cec971f..e2a1ba5 100644
--- a/src/debruijn/genome_consistance_checker.hpp
+++ b/src/debruijn/genome_consistance_checker.hpp
@@ -5,98 +5,74 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-/*
- * genome_consistance_checker.hpp
- *
- *  Created on: Oct 24, 2013
- *      Author: anton
- */
 
 #pragma once
-
+#include "omni/visualization/graph_labeler.hpp"
 #include "omni/edges_position_handler.hpp"
 #include "omni/mapping_path.hpp"
+#include "omni/edges_position_handler.hpp"
+#include "sequence/sequence.hpp"
+#include "graph_pack.hpp"
+#include "positions.hpp"
+#include "path_extend/bidirectional_path.hpp"
+#include "path_extend/scaffolder2015/scaff_supplementary.hpp"
 
 namespace debruijn_graph {
 
-template<class Graph>
+
+using path_extend::BidirectionalPath;
+using path_extend::ScaffoldingUniqueEdgeStorage;
+
+struct PathScore{
+    size_t misassemblies;
+    size_t wrong_gap_size;
+    size_t mapped_length;
+    PathScore(size_t m, size_t w, size_t ml): misassemblies(m), wrong_gap_size(w), mapped_length(ml) {}
+};
 class GenomeConsistenceChecker {
+
 private:
-	using omnigraph::MappingRange;
+    const conj_graph_pack &gp_;
 	const Graph &graph_;
+    //EdgesPositionHandler<Graph> &position_handler_;
 	Sequence genome_;
-	omnigraph::EdgesPositionHandler<Graph> genome_mapping_;
-	size_t max_gap_;
+    ScaffoldingUniqueEdgeStorage storage_;
+	size_t absolute_max_gap_;
 	double relative_max_gap_;
+    set<EdgeId> excluded_unique_;
+    EdgeId circular_edge_;
+//map from unique edges to their order in genome spelling;
+    mutable map<EdgeId, size_t> genome_spelled_;
+    bool consequent(const Range &mr1, const Range &mr2) const;
+	bool consequent(const MappingRange &mr1, const MappingRange &mr2) const ;
 
-	bool consequent(const MappingRange &mr1, const MappingRange &mr2, size_t gap) const {
-		if (mr2.mapped_range.start_pos == 0 && mr1.mapped_range.end_pos == genome_.size())
-			return true;
-		if (mr1.initial_range.end_pos > mr2.initial_range.start_pos + gap)
-			return false;
-		if (mr1.initial_range.end_pos + gap > mr2.initial_range.start_pos)
-			return false;
-		if (mr1.mapped_range.end_pos > mr2.mapped_range.start_pos + gap)
-			return false;
-		if (mr1.mapped_range.end_pos + gap > mr2.mapped_range.start_pos)
-			return false;
-		return true;
-	}
+    PathScore CountMisassembliesWithStrand(const BidirectionalPath &path, const string strand) const;
+//constructs longest sequence of consequetive ranges, stores result in used_mappings
+    void FindBestRangeSequence(const set<MappingRange>& old_mappings, vector<MappingRange>& used_mappings) const;
+//Refills genomic positions uniting alingments separated with small gaps
+    void RefillPos();
+    void RefillPos(const string &strand);
+    void RefillPos(const string &strand, const EdgeId &e);
+DECL_LOGGER("GenomeConsistenceChecker");
 
-	set<MappingRange> FillPositionGaps(const set<MappingRange> &info, size_t gap) const {
-		set<MappingRange> result;
-		auto cur = info.begin();
-		while(cur != info.end()) {
-			MappingRange new_range = *cur;
-			++cur;
-			while(cur != info.end() && consequent(new_range, *cur, gap)) {
-				new_range = new_range.Merge(*cur);
-				++cur;
-			}
-			result.insert(new_range);
-		}
-		return result;
-	}
-
-	void Merge(set<MappingRange> &ranges, set<MappingRange> &to_merge, int shift) {
-		for(set<MappingRange>::iterator it = to_merge.begin(); it != to_merge.end(); ++it) {
-			ranges.insert(genome_mapping_.EraseAndExtract(ranges, it->Shift(shift)));
-		}
-	}
-
-	bool IsConsistentWithGenomeStrand(const vector<EdgeId> &path, const string &strand) const {
-		size_t len = graph_.length(path[0]);
-		for (size_t i = 1; i < path.size(); i++) {
-			Merge(res, genome_mapping_.GetEdgePositions(path[i], strand));
-			len += graph_.length(path[i]);
-		}
-		FillPositionGaps(res, len);
-		if (res.size() > 0) {
-			for (size_t i = 0; i < res.size(); i++) {
-				size_t m_len = res[i].initial_range.size();
-				if (abs(int(res[i].initial_range.size()) - int(len)) < max(1.0 * max_gap_, 0.07 * len))
-					return true;
-			}
-		}
-		return false;
-	}
 
 public:
-	template<class GraphPack>
-	GenomeConsistenceChecker(const GraphPack gp, size_t max_gap, double relative_max_gap /*= 0.2*/) :
-			graph_(gp.g), genome_(gp.genome), genome_mapping_(gp.g), max_gap_(max_gap), relative_max_gap_(relative_max_gap) {
-        FillPos(gp, gp.genome, "0");
-        FillPos(gp, !gp.genome, "1");
+	GenomeConsistenceChecker(const conj_graph_pack &gp, ScaffoldingUniqueEdgeStorage &storage, size_t max_gap, double relative_max_gap /*= 0.2*/) : gp_(gp),
+			graph_(gp.g), /*position_handler_(gp.edge_pos),*/ genome_(gp.genome.GetSequence()), storage_(storage),
+        absolute_max_gap_(max_gap), relative_max_gap_(relative_max_gap), excluded_unique_(), circular_edge_() {
+        if (!gp.edge_pos.IsAttached()) {
+            gp.edge_pos.Attach();
+        }
+        gp.edge_pos.clear();
+        FillPos(gp_, gp_.genome.GetSequence(), "0");
+        FillPos(gp_, !gp_.genome.GetSequence(), "1");
+        RefillPos();
 	}
+	PathScore CountMisassemblies(const BidirectionalPath &path) const;
+//spells genome in language of long unique edges from storage;
+    void SpellGenome();
 
-	bool IsConsistentWithGenome(vector<EdgeId> path) const {
-		if (path.size() == 0)
-			return false;
-		for (size_t i = 0; i + 1 < path.size(); i++) {
-			if (graph_.EdgeStart(path[i + 1]) != graph_.EdgeEnd(path[i]))
-				return false;
-		}
-		return IsConsistentWithGenomeStrand(path, "0") || IsConsistentWithGenomeStrand(path, "1");
-	}
 };
+
+
 }
diff --git a/src/debruijn/genome_storage.cpp b/src/debruijn/genome_storage.cpp
new file mode 100644
index 0000000..17decdf
--- /dev/null
+++ b/src/debruijn/genome_storage.cpp
@@ -0,0 +1,45 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+//
+// Created by lab42 on 8/19/15.
+//
+
+#include "genome_storage.hpp"
+#include "sequence/nucl.hpp"
+using namespace std;
+
+namespace debruijn_graph {
+//TODO exterminate this where possible
+    Sequence GenomeStorage::GetSequence() const{
+        stringstream ss;
+        size_t l = 0, r = 0;
+        for(size_t i = 0; i < s_.size(); i++) {
+            if (! is_nucl(s_[i]) ) {
+                if (r > l) {
+                    ss << s_.substr(l, r - l);
+                }
+                r = i + 1;
+                l = i + 1;
+            } else {
+                r++;
+            }
+        }
+        if (r > l) {
+            ss << s_.substr(l, r - l);
+        }
+        return Sequence(ss.str());
+    }
+    void GenomeStorage::SetSequence(const Sequence &s) {
+        s_ = s.str();
+    }
+    string GenomeStorage::str() const{
+        return s_;
+    }
+    size_t GenomeStorage::size() const {
+        return s_.size();
+    }
+}
\ No newline at end of file
diff --git a/src/debruijn/genome_storage.hpp b/src/debruijn/genome_storage.hpp
new file mode 100644
index 0000000..aaff952
--- /dev/null
+++ b/src/debruijn/genome_storage.hpp
@@ -0,0 +1,33 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+//
+// Created by lab42 on 8/19/15.
+//
+
+#ifndef GENOME_STORAGE_HPP_
+#define GENOME_STORAGE_HPP_
+
+#include <string>
+#include "sequence/sequence.hpp"
+namespace debruijn_graph {
+    class GenomeStorage {
+    private:
+        std::string s_;
+    public:
+        GenomeStorage():s_(""){
+        }
+
+        GenomeStorage(const std::string &s): s_(s){
+        }
+
+        Sequence GetSequence() const;
+        void SetSequence(const Sequence &s);
+        std::string str() const;
+        size_t size() const;
+    };
+}
+#endif //PROJECT_GENOME_STORAGE_HPP
diff --git a/src/debruijn/genomic_info_filler.cpp b/src/debruijn/genomic_info_filler.cpp
index 73b565e..0f5a9cb 100644
--- a/src/debruijn/genomic_info_filler.cpp
+++ b/src/debruijn/genomic_info_filler.cpp
@@ -107,11 +107,11 @@ void GenomicInfoFiller::run(conj_graph_pack &gp, const char*) {
             INFO("Failed to estimate mean coverage");
 
         if (cfg::get().kcm.use_coverage_threshold) {
-            double coef = (cfg::get().ds.aRL() - cfg::get().K + 1) / cfg::get().ds.aRL();
+            double coef = (cfg::get().ds.aRL() - double(cfg::get().K) + 1) / cfg::get().ds.aRL();
             if (coef < 0)
-                coef = (cfg::get().ds.RL() - cfg::get().K + 1) / cfg::get().ds.RL();
+                coef = double(cfg::get().ds.RL() - cfg::get().K + 1) / double(cfg::get().ds.RL());
             gp.ginfo.set_trusted_bound(CovModel.converged() && cfg::get().kcm.coverage_threshold == 0.0 ?
-                                       CovModel.GetLowThreshold() :
+                                       double(CovModel.GetLowThreshold()) :
                                        cfg::get().kcm.coverage_threshold * coef);
         }
 
diff --git a/src/debruijn/genomic_quality.hpp b/src/debruijn/genomic_quality.hpp
index 0bd6205..3821369 100644
--- a/src/debruijn/genomic_quality.hpp
+++ b/src/debruijn/genomic_quality.hpp
@@ -76,6 +76,11 @@ public:
 
     virtual void HandleSplit(EdgeId old_edge, EdgeId new_edge1,
             EdgeId new_edge2) {
+        if (old_edge == this->g().conjugate(old_edge)) {
+            WARN("EdgeQuality does not support self-conjugate splits");
+            return;
+        }
+        VERIFY(old_edge != this->g().conjugate(old_edge));
         quality_[new_edge1] = quality_[old_edge] * this->g().length(new_edge1)
                 / (this->g().length(new_edge1) + this->g().length(new_edge2));
         quality_[new_edge2] = quality_[old_edge] * this->g().length(new_edge2)
diff --git a/src/debruijn/graph_pack.hpp b/src/debruijn/graph_pack.hpp
index 3998619..d608417 100644
--- a/src/debruijn/graph_pack.hpp
+++ b/src/debruijn/graph_pack.hpp
@@ -18,6 +18,7 @@
 #include "genomic_info.hpp"
 #include "long_read_storage.hpp"
 #include "detail_coverage.hpp"
+#include "genome_storage.hpp"
 
 namespace debruijn_graph {
 
@@ -29,7 +30,8 @@ struct graph_pack: private boost::noncopyable {
     typedef typename Graph::EdgeId EdgeId;
     typedef SeqType seq_t;
     typedef EdgeIndex<graph_t, seq_t, KmerEdgeIndex> index_t;
-    typedef omnigraph::de::PairedInfoIndicesT<Graph> PairedInfoIndicesT;
+    using PairedInfoIndicesT = omnigraph::de::PairedInfoIndicesT<Graph>;
+    //typedef omnigraph::de::PairedInfoIndicesT<Graph> PairedInfoIndicesT;
     typedef omnigraph::de::UnclusteredPairedInfoIndicesT<Graph> UnclusteredPairedInfoIndicesT;
     typedef LongReadContainer<Graph> LongReadContainerT;
 
@@ -45,12 +47,12 @@ struct graph_pack: private boost::noncopyable {
     LongReadContainerT single_long_reads;
     GenomicInfo ginfo;
 
-    Sequence genome;
+    GenomeStorage genome;
 	EdgeQuality<Graph> edge_qual;
-    EdgesPositionHandler<graph_t> edge_pos;
+    mutable EdgesPositionHandler<graph_t> edge_pos;
  
     graph_pack(size_t k, const std::string &workdir, size_t lib_count,
-                        Sequence genome = Sequence(),
+                        const std::string &genome = "",
                         size_t flanking_range = 50,
                         size_t max_mapping_gap = 0,
                         size_t max_gap_diff = 0,
@@ -58,7 +60,7 @@ struct graph_pack: private boost::noncopyable {
             : k_value(k), g(k), index(g, workdir),
               kmer_mapper(g),
               flanking_cov(g, flanking_range),
-              paired_indices(lib_count),
+              paired_indices(g, lib_count),
               clustered_indices(g, lib_count),
               scaffolding_indices(g, lib_count),
               single_long_reads(g, lib_count),
@@ -72,7 +74,7 @@ struct graph_pack: private boost::noncopyable {
     }
 
     void FillQuality() {
-        edge_qual.Fill(index, kmer_mapper, genome);
+        edge_qual.Fill(index, kmer_mapper, genome.GetSequence());
     }
 
     //todo remove with usages after checking
@@ -107,8 +109,8 @@ struct graph_pack: private boost::noncopyable {
             edge_pos.Attach();
         }
         edge_pos.clear();
-        FillPos(*this, genome, "ref0");
-        FillPos(*this, !genome, "ref1");
+        FillPos(*this, genome.GetSequence(), "ref0");
+        FillPos(*this, !genome.GetSequence(), "ref1");
     }
     
     void EnsureDebugInfo() {
@@ -149,5 +151,4 @@ typedef conj_graph_pack::LongReadContainerT LongReadContainerT;
 typedef omnigraph::de::PairedInfoIndexT<ConjugateDeBruijnGraph> PairedIndexT;
 typedef omnigraph::de::UnclusteredPairedInfoIndexT<ConjugateDeBruijnGraph> UnclusteredPairedIndexT;
 
-
 } // namespace debruijn_graph
diff --git a/src/debruijn/graph_simplification.hpp b/src/debruijn/graph_simplification.hpp
deleted file mode 100644
index 2f6c86c..0000000
--- a/src/debruijn/graph_simplification.hpp
+++ /dev/null
@@ -1,1000 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-/*
- * graph_simplification.hpp
- *
- *  Created on: Aug 12, 2011
- *      Author: sergey
- */
-
-#pragma once
-
-#include "standard_base.hpp"
-#include "config_struct.hpp"
-#include "debruijn_graph.hpp"
-#include "stats/debruijn_stats.hpp"
-
-#include "omni/visualization/graph_colorer.hpp"
-#include "omni/omni_utils.hpp"
-#include "omni/omni_tools.hpp"
-#include "omni/tip_clipper.hpp"
-#include "omni/complex_tip_clipper.hpp"
-#include "omni/bulge_remover.hpp"
-#include "omni/complex_bulge_remover.hpp"
-#include "omni/erroneous_connection_remover.hpp"
-#include "omni/relative_coverage_remover.hpp"
-#include "omni/mf_ec_remover.hpp"
-#include "utils.hpp"
-#include "simplification/simplification_settings.hpp"
-#include "simplification/parallel_simplification_algorithms.hpp"
-
-#include "detail_coverage.hpp"
-#include "graph_read_correction.hpp"
-#include "detail_coverage.hpp"
-
-#include "stats/chimera_stats.hpp"
-#include "moleculo.hpp"
-
-namespace debruijn {
-
-namespace simplification {
-
-//todo remove this line
-using namespace debruijn_graph;
-
-//todo move to visualization
-template<class graph_pack>
-shared_ptr<omnigraph::visualization::GraphColorer<typename graph_pack::graph_t>> DefaultGPColorer(
-        const graph_pack& gp) {
-    auto mapper = MapperInstance(gp);
-    auto path1 = mapper->MapSequence(gp.genome).path();
-    auto path2 = mapper->MapSequence(!gp.genome).path();
-    return omnigraph::visualization::DefaultColorer(gp.g, path1, path2);
-}
-
-template<class Graph>
-class EditDistanceTrackingCallback {
-    typedef typename Graph::EdgeId EdgeId;
-    typedef typename Graph::EdgeData EdgeData;
-    const Graph& g_;
-
-public:
-    EditDistanceTrackingCallback(const Graph& g)
-            : g_(g) {
-    }
-
-    bool operator()(EdgeId edge, const vector<EdgeId>& path) const {
-        vector<Sequence> path_sequences;
-        for (auto it = path.begin(); it != path.end(); ++it) {
-            path_sequences.push_back(g_.EdgeNucls(*it));
-        }
-        Sequence path_sequence(
-            MergeOverlappingSequences(path_sequences, g_.k()));
-        size_t dist = EditDistance(g_.EdgeNucls(edge), path_sequence);
-        TRACE( "Bulge sequences with distance " << dist << " were " << g_.EdgeNucls(edge) << " and " << path_sequence);
-        return true;
-    }
-
-private:
-    DECL_LOGGER("EditDistanceTrackingCallback")
-    ;
-};
-
-template<class Graph, class SmartEdgeIt>
-bool ClipTips(
-    Graph& g,
-    SmartEdgeIt& it,
-    const debruijn_config::simplification::tip_clipper& tc_config,
-    const SimplifInfoContainer& info,
-    std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
-
-    INFO("Clipping tips");
-
-    string condition_str = tc_config.condition;
-
-    ConditionParser<Graph> parser(g, condition_str, info);
-    auto condition = parser();
-
-    omnigraph::EdgeRemovingAlgorithm<Graph> tc(g,
-                                               omnigraph::AddTipCondition(g, condition),
-                                               removal_handler, true);
-
-    TRACE("Tip length bound " << parser.max_length_bound());
-    return tc.RunFromIterator(it,
-                      make_shared<LengthUpperBound<Graph>>(g, parser.max_length_bound()));
-}
-
-template<class Graph>
-bool ClipTips(
-    Graph& g,
-    const debruijn_config::simplification::tip_clipper& tc_config,
-    const SimplifInfoContainer& info,
-    std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
-
-    auto it = g.SmartEdgeBegin(LengthComparator<Graph>(g), true);
-    return ClipTips(g, it, tc_config, info, removal_handler);
-}
-
-//enabling tip projection, todo optimize if hotspot
-template<class gp_t>
-std::function<void(typename Graph::EdgeId)> WrapWithProjectionCallback(
-    gp_t& gp,
-    std::function<void(typename Graph::EdgeId)> removal_handler) {
-    typedef typename Graph::EdgeId EdgeId;
-    typedef std::function<void(EdgeId)> HandlerF;
-    TipsProjector<gp_t> tip_projector(gp);
-
-    HandlerF projecting_callback = std::bind(&TipsProjector<gp_t>::ProjectTip,
-                                             tip_projector, std::placeholders::_1);
-
-    return func::Composition<EdgeId>(std::ref(removal_handler), projecting_callback);
-}
-
-template<class Graph, class SmartEdgeIt>
-bool RemoveBulges(
-    Graph& g,
-    SmartEdgeIt& it,
-    const debruijn_config::simplification::bulge_remover& br_config,
-    std::function<void(typename Graph::EdgeId, const std::vector<typename Graph::EdgeId> &)> opt_handler = 0,
-    std::function<void(typename Graph::EdgeId)> removal_handler = 0,
-    size_t additional_length_bound = 0) {
-
-	if(!br_config.enabled)
-		return false;
-
-    INFO("Removing bulges");
-    size_t max_length = LengthThresholdFinder::MaxBulgeLength(
-        g.k(), br_config.max_bulge_length_coefficient,
-        br_config.max_additive_length_coefficient);
-
-    DEBUG("Max bulge length " << max_length);
-
-    if (additional_length_bound != 0 && additional_length_bound < max_length) {
-        DEBUG("Setting additional bound " << additional_length_bound);
-        max_length = additional_length_bound;
-    }
-
-    BulgeRemover<Graph> br(g, max_length, br_config.max_coverage,
-                           br_config.max_relative_coverage, br_config.max_delta,
-                           br_config.max_relative_delta,
-                           opt_handler, removal_handler);
-
-    return br.RunFromIterator(it,
-                      make_shared<CoverageUpperBound<Graph>>(g, br_config.max_coverage));
-}
-
-template<class Graph>
-bool RemoveBulges(
-        Graph& g,
-        const debruijn_config::simplification::bulge_remover& br_config,
-        std::function<void(typename Graph::EdgeId, const std::vector<typename Graph::EdgeId> &)> opt_handler = 0,
-        std::function<void(typename Graph::EdgeId)> removal_handler = 0,
-        size_t additional_length_bound = 0) {
-    auto it = g.SmartEdgeBegin(CoverageComparator<Graph>(g), true);
-    return RemoveBulges(g, it, br_config, opt_handler, removal_handler, additional_length_bound);
-}
-
-template<class Graph, class SmartEdgeIt>
-bool RemoveLowCoverageEdges(
-    Graph &g,
-    SmartEdgeIt& it,
-    const debruijn_config::simplification::erroneous_connections_remover& ec_config,
-    const SimplifInfoContainer& info_container,
-    std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
-
-    INFO("Removing low covered connections");
-    ConditionParser<Graph> parser(g, ec_config.condition, info_container);
-
-    auto condition = parser();
-    omnigraph::EdgeRemovingAlgorithm<Graph> erroneous_edge_remover(
-        g, omnigraph::AddAlternativesPresenceCondition(g, condition), removal_handler, true);
-    return erroneous_edge_remover.RunFromIterator(it,
-                                   make_shared<CoverageUpperBound<Graph>>(g, parser.max_coverage_bound()));
-}
-
-template<class Graph>
-bool RemoveLowCoverageEdges(
-    Graph &g,
-    const debruijn_config::simplification::erroneous_connections_remover& ec_config,
-    const SimplifInfoContainer& info_container,
-    std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
-    auto it = g.SmartEdgeBegin(CoverageComparator<Graph>(g), true);
-    return RemoveLowCoverageEdges(g, it, ec_config, info_container, removal_handler);
-}
-
-template<class Graph>
-bool RemoveSelfConjugateEdges(Graph &g, size_t max_length, double max_coverage,
-                std::function<void(typename Graph::EdgeId)> removal_handler = 0, size_t chunk_cnt = 1) {
-    INFO("Removing short low covered self-conjugate connections");
-
-    auto condition = func::And<typename Graph::EdgeId>(make_shared<SelfConjugateCondition<Graph>>(g),
-                                       func::And<typename Graph::EdgeId>(make_shared<LengthUpperBound<Graph>>(g, max_length),
-                                                         make_shared<CoverageUpperBound<Graph>>(g, max_coverage)));
-
-    SemiParallelAlgorithmRunner<Graph, typename Graph::EdgeId> runner(g);
-    SemiParallelEdgeRemovingAlgorithm<Graph> removing_algo(g, condition, removal_handler);
-
-    return RunEdgeAlgorithm(g, runner, removing_algo, chunk_cnt);
-}
-
-template<class Graph>
-bool RemoveRelativelyLowCoverageComponents(
-        Graph &g,
-        const FlankingCoverage<Graph>& flanking_cov,
-        const debruijn_config::simplification::relative_coverage_comp_remover& rcc_config,
-        const SimplifInfoContainer& info,
-        typename ComponentRemover<Graph>::HandlerF removal_handler = 0) {
-    if (rcc_config.enabled) {
-        INFO("Removing relatively low covered connections");
-        size_t connecting_path_length_bound = LengthThresholdFinder::MaxErroneousConnectionLength(
-            g.k(), rcc_config.max_ec_length_coefficient);
-
-        std::string pics_dir = "";//cfg::get().output_dir + "rel_cov_components/"
-
-        double max_coverage = math::ge(rcc_config.max_coverage_coeff, 0.) 
-                                ? info.detected_coverage_bound() * rcc_config.max_coverage_coeff 
-                                : std::numeric_limits<double>::max();
-
-        omnigraph::simplification::relative_coverage::
-            RelativeCoverageComponentRemover<Graph> rel_rem(
-                g,
-                std::bind(&FlankingCoverage<Graph>::LocalCoverage,
-                          std::cref(flanking_cov), std::placeholders::_1, std::placeholders::_2),
-                rcc_config.coverage_gap, size_t(double(info.read_length()) * rcc_config.length_coeff),
-                size_t(double(info.read_length()) * rcc_config.tip_allowing_length_coeff),
-                connecting_path_length_bound,
-                max_coverage,
-                removal_handler, rcc_config.vertex_count_limit, pics_dir);
-        return rel_rem.Run();
-    } else {
-        INFO("Removal of relatively low covered connections disabled");
-        return false;
-    }
-}
-
-template<class Graph>
-bool DisconnectRelativelyLowCoverageEdges(Graph &g,
-        const FlankingCoverage<Graph>& flanking_cov) {
-	INFO("Disconnecting edges with relatively low coverage");
-	omnigraph::simplification::relative_coverage::RelativeCoverageHelper<Graph> helper(g, std::bind(&FlankingCoverage<Graph>::LocalCoverage,
-            std::cref(flanking_cov), std::placeholders::_1, std::placeholders::_2), 1.0);
-    omnigraph::simplification::relative_coverage::RelativeCoverageDisconnector<Graph> disconnector(g, helper);
-	return disconnector.Run();
-}
-
-template<class Graph>
-bool TopologyRemoveErroneousEdges(
-    Graph &g,
-    const debruijn_config::simplification::topology_based_ec_remover& tec_config,
-    std::function<void(typename Graph::EdgeId)> removal_handler) {
-    INFO("Removing connections based on topology");
-    size_t max_length = LengthThresholdFinder::MaxErroneousConnectionLength(
-        g.k(), tec_config.max_ec_length_coefficient);
-
-    shared_ptr<Predicate<typename Graph::EdgeId>> condition = make_shared<DefaultUniquenessPlausabilityCondition<Graph>>(g, tec_config.uniqueness_length, tec_config.plausibility_length);
-
-    return omnigraph::RemoveErroneousEdgesInLengthOrder(g, condition, max_length, removal_handler);
-}
-
-template<class Graph>
-bool TopologyClipTips(
-    Graph &g,
-    const debruijn_config::simplification::topology_tip_clipper& ttc_config,
-    size_t read_length,
-    std::function<void(typename Graph::EdgeId)> removal_handler) {
-    INFO("Clipping tips based on topology");
-
-    size_t max_length = LengthThresholdFinder::MaxTipLength(
-        read_length, g.k(), ttc_config.length_coeff);
-
-    shared_ptr<Predicate<typename Graph::EdgeId>> condition
-        = make_shared<DefaultUniquenessPlausabilityCondition<Graph>>(g,
-            ttc_config.uniqueness_length, ttc_config.plausibility_length);
-
-    return omnigraph::ClipTips(g, max_length,
-                    condition, removal_handler);
-}
-
-template<class Graph>
-bool MultiplicityCountingRemoveErroneousEdges(
-    Graph &g,
-    const debruijn_config::simplification::topology_based_ec_remover& tec_config,
-    std::function<void(typename Graph::EdgeId)> removal_handler) {
-    INFO("Removing connections based on topological multiplicity counting");
-    size_t max_length = LengthThresholdFinder::MaxErroneousConnectionLength(
-        g.k(), tec_config.max_ec_length_coefficient);
-
-    shared_ptr<func::Predicate<typename Graph::EdgeId>> condition
-        = make_shared<MultiplicityCountingCondition<Graph>>(g, tec_config.uniqueness_length,
-            /*plausibility*/MakePathLengthLowerBound(g, PlausiblePathFinder<Graph>(g, 2 * tec_config.plausibility_length), tec_config.plausibility_length));
-
-    return omnigraph::RemoveErroneousEdgesInLengthOrder(g, condition, max_length, removal_handler);
-}
-
-template<class Graph>
-bool RemoveThorns(
-    Graph &g,
-    const debruijn_config::simplification::interstrand_ec_remover& isec_config,
-    std::function<void(typename Graph::EdgeId)> removal_handler) {
-    INFO("Removing interstrand connections");
-    size_t max_length = LengthThresholdFinder::MaxErroneousConnectionLength(
-        g.k(), isec_config.max_ec_length_coefficient);
-
-    shared_ptr<func::Predicate<typename Graph::EdgeId>> condition
-            = func::And<typename Graph::EdgeId>(make_shared<LengthUpperBound<Graph>>(g, max_length),
-                                       make_shared<ThornCondition<Graph>>(g, isec_config.uniqueness_length, isec_config.span_distance));
-
-    return omnigraph::RemoveErroneousEdgesInCoverageOrder(g, condition, numeric_limits<double>::max(), removal_handler);
-}
-
-template<class Graph>
-bool TopologyReliabilityRemoveErroneousEdges(
-    Graph &g,
-    const debruijn_config::simplification::tr_based_ec_remover& trec_config,
-    std::function<void(typename Graph::EdgeId)> removal_handler) {
-    INFO("Removing connections based on topology and reliable coverage");
-    size_t max_length = LengthThresholdFinder::MaxErroneousConnectionLength(
-        g.k(), trec_config.max_ec_length_coefficient);
-
-    shared_ptr<func::Predicate<typename Graph::EdgeId>> condition
-            = func::And<typename Graph::EdgeId>(make_shared<CoverageUpperBound<Graph>>(g, trec_config.unreliable_coverage),
-                                       make_shared<PredicateUniquenessPlausabilityCondition<Graph>>(
-                                               g,
-                                               /*uniqueness*/MakePathLengthLowerBound(g, UniquePathFinder<Graph>(g), trec_config.uniqueness_length),
-                                               /*plausibility*/make_shared<func::AlwaysTrue<typename Graph::EdgeId>>()));
-
-    return omnigraph::RemoveErroneousEdgesInLengthOrder(g, condition, max_length, removal_handler);
-}
-
-template<class Graph>
-bool MaxFlowRemoveErroneousEdges(
-    Graph &g,
-    const debruijn_config::simplification::max_flow_ec_remover& mfec_config,
-    std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
-    if (!mfec_config.enabled)
-        return false;
-    INFO("Removing connections based on max flow strategy");
-    size_t max_length = LengthThresholdFinder::MaxErroneousConnectionLength(
-        g.k(), (size_t) mfec_config.max_ec_length_coefficient);
-    omnigraph::MaxFlowECRemover<Graph> erroneous_edge_remover(
-        g, max_length, mfec_config.uniqueness_length,
-        mfec_config.plausibility_length, removal_handler);
-    return erroneous_edge_remover.Process();
-}
-
-template<class Graph>
-bool RemoveComplexBulges(
-    Graph& g,
-    debruijn_config::simplification::complex_bulge_remover cbr_config,
-    size_t /*iteration*/ = 0) {
-    if (!cbr_config.enabled)
-        return false;
-    INFO("Removing complex bulges");
-    size_t max_length = (size_t) ((double) g.k() * cbr_config.max_relative_length);
-    size_t max_diff = cbr_config.max_length_difference;
-    omnigraph::complex_br::ComplexBulgeRemover<Graph> complex_bulge_remover(
-        g, max_length, max_diff);
-    return complex_bulge_remover.Run();
-}
-
-template<class Graph>
-bool RemoveHiddenEC(Graph& g,
-                    const FlankingCoverage<Graph>& flanking_cov,
-                    double determined_coverage_threshold,
-                    debruijn_config::simplification::hidden_ec_remover her_config,
-                    std::function<void(typename Graph::EdgeId)> removal_handler) {
-    if (her_config.enabled) {
-        INFO("Removing hidden erroneous connections");
-        return HiddenECRemover<Graph>(g, her_config.uniqueness_length, flanking_cov,
-                               her_config.unreliability_threshold, determined_coverage_threshold,
-                               her_config.relative_threshold, removal_handler).Run();
-    }
-    return false;
-}
-
-template<class Graph>
-bool RemoveIsolatedEdges(Graph &g, size_t max_length, double max_coverage, size_t max_length_any_cov,
-                 std::function<void(typename Graph::EdgeId)> removal_handler = 0, size_t chunk_cnt = 1) {
-    typedef typename Graph::EdgeId EdgeId;
-
-    //todo add info that some other edges might be removed =)
-    INFO("Removing isolated edges");
-    INFO("All edges shorter than " << max_length_any_cov << " will be removed");
-    INFO("Also edges shorter than " << max_length << " and coverage smaller than " << max_coverage << " will be removed");
-    //todo add warn on max_length_any_cov > max_length
-
-    auto condition = func::And<EdgeId>(
-            make_shared<IsolatedEdgeCondition<Graph>>(g),
-            func::Or<EdgeId>(
-                make_shared<LengthUpperBound<Graph>>(g, max_length_any_cov),
-                func::And<EdgeId>(
-                    make_shared<LengthUpperBound<Graph>>(g, max_length),
-                    make_shared<CoverageUpperBound<Graph>>(g, max_coverage)
-                )));
-
-    if (chunk_cnt == 1) {
-        omnigraph::EdgeRemovingAlgorithm<Graph> removing_algo(g, condition, removal_handler);
-
-        return removing_algo.Run(LengthComparator<Graph>(g),
-                                         make_shared<LengthUpperBound<Graph>>(g, std::max(max_length, max_length_any_cov)));
-    } else {
-        SemiParallelAlgorithmRunner<Graph, EdgeId> runner(g);
-        SemiParallelEdgeRemovingAlgorithm<Graph> removing_algo(g, condition, removal_handler);
-
-        return RunEdgeAlgorithm(g, runner, removing_algo, chunk_cnt);
-    }
-}
-
-template<class Graph>
-bool RemoveIsolatedEdges(Graph &g, debruijn_config::simplification::isolated_edges_remover ier,
-                 size_t read_length,
-                 std::function<void(typename Graph::EdgeId)> removal_handler = 0,
-                 size_t chunk_cnt = 1) {
-    size_t max_length = std::max(read_length, ier.max_length_any_cov);
-    return RemoveIsolatedEdges(g, ier.max_length, ier.max_coverage, max_length, removal_handler, chunk_cnt);
-}
-
-//todo move to some of the utils files
-template<class Graph>
-class CountingCallback {
-    typedef typename Graph::EdgeId EdgeId;
-    bool report_on_destruction_;
-    std::atomic<size_t> cnt_;
-
-public:
-    CountingCallback(bool report_on_destruction = false) :
-            report_on_destruction_(report_on_destruction), cnt_(0) {
-    }
-
-    ~CountingCallback() {
-        if (report_on_destruction_)
-            Report();
-    }
-    
-    void HandleDelete(EdgeId /*e*/) {
-        cnt_++;
-    }
-
-    void Report() {
-        TRACE(cnt_ << " edges were removed.")
-        cnt_ = 0;
-    }
-
-private:
-    DECL_LOGGER("CountingCallback");
-};
-
-template<class Graph>
-std::function<void(typename Graph::EdgeId)> AddCountingCallback(CountingCallback<Graph>& cnt_callback, std::function<void(typename Graph::EdgeId)> handler) {
-    std::function<void(typename Graph::EdgeId)> cnt_handler = std::bind(&CountingCallback<Graph>::HandleDelete, std::ref(cnt_callback), std::placeholders::_1);
-    return func::Composition<typename Graph::EdgeId>(handler, cnt_handler);
-}
-
-//std::function<void(EdgeId)> AddCountingCallback(std::function<void(EdgeId)> handler) {
-//    auto cnt_callback_ptr = make_shared<CountingCallback<Graph>>(true);
-//    std::function<void(EdgeId)> cnt_handler = boost::bind(&CountingCallback<Graph>::HandleDelete, cnt_callback_ptr, _1);
-//    return func::Composition<EdgeId>(handler, cnt_handler);
-//}
-
-template<class Graph>
-void ParallelCompress(Graph& g, size_t chunk_cnt, bool loop_post_compression = true) {
-    INFO("Parallel compression");
-    debruijn::simplification::ParallelCompressor<Graph> compressor(g);
-    TwoStepAlgorithmRunner<Graph, typename Graph::VertexId> runner(g, false);
-    RunVertexAlgorithm(g, runner, compressor, chunk_cnt);
-
-    //have to call cleaner to get rid of new isolated vertices
-    CleanGraph(g, chunk_cnt);
-
-    if (loop_post_compression) {
-        INFO("Launching post-compression to compress loops");
-        CompressAllVertices(g, chunk_cnt);
-    }
-}
-
-template<class Graph>
-bool ParallelClipTips(Graph& g,
-              const string& tip_condition,
-              const SimplifInfoContainer& info,
-              std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
-    INFO("Parallel tip clipping");
-
-    string condition_str = tip_condition;
-
-    ConditionParser<Graph> parser(g, condition_str, info);
-
-    parser();
-
-    debruijn::simplification::ParallelTipClippingFunctor<Graph> tip_clipper(g, 
-        parser.max_length_bound(), parser.max_coverage_bound(), removal_handler);
-    
-    AlgorithmRunner<Graph, typename Graph::VertexId> runner(g);
-
-    RunVertexAlgorithm(g, runner, tip_clipper, info.chunk_cnt());
-
-    ParallelCompress(g, info.chunk_cnt());
-    //Cleaner is launched inside ParallelCompression
-    //CleanGraph(g, info.chunk_cnt());
-
-    return true;
-}
-
-template<class Graph>
-bool ClipComplexTips(Graph& g, debruijn_config::simplification::complex_tip_clipper ctc_conf) {
-    if(!ctc_conf.enabled) {
-        INFO("Complex tip clipping disabled");
-    	return false;
-    }
-    INFO("Complex tip clipping");
-    size_t max_edge_length = g.k() * 2;
-    ComplexTipClipper<Graph> tip_clipper(g, max_edge_length, "");
-    tip_clipper.Run();
-    return true;
-}
-
-
-//template<class Graph>
-//bool ParallelRemoveBulges(Graph& g,
-//              const debruijn_config::simplification::bulge_remover& br_config,
-//              size_t /*read_length*/,
-//              std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
-//    INFO("Parallel bulge remover");
-//
-//    size_t max_length = LengthThresholdFinder::MaxBulgeLength(
-//        g.k(), br_config.max_bulge_length_coefficient,
-//        br_config.max_additive_length_coefficient);
-//
-//    DEBUG("Max bulge length " << max_length);
-//
-//    debruijn::simplification::ParallelSimpleBRFunctor<Graph> bulge_remover(g,
-//                            max_length,
-//                            br_config.max_coverage,
-//                            br_config.max_relative_coverage,
-//                            br_config.max_delta,
-//                            br_config.max_relative_delta,
-//                            removal_handler);
-//    for (VertexId v : g) {
-//        bulge_remover(v);
-//    }
-//
-//    Compress(g);
-//    return true;
-//}
-
-template<class Graph>
-bool ParallelEC(Graph& g,
-              const string& ec_condition,
-              const SimplifInfoContainer& info,
-              std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
-    INFO("Parallel ec remover");
-
-    ConditionParser<Graph> parser(g, ec_condition, info);
-
-    auto condition = parser();
-
-    size_t max_length = parser.max_length_bound();
-    double max_coverage = parser.max_coverage_bound();
-
-    debruijn::simplification::CriticalEdgeMarker<Graph> critical_marker(g, info.chunk_cnt());
-    critical_marker.PutMarks();
-
-    debruijn::simplification::ParallelLowCoverageFunctor<Graph> ec_remover(g,
-                            max_length,
-                            max_coverage,
-                            removal_handler);
-
-    TwoStepAlgorithmRunner<Graph, typename Graph::EdgeId> runner(g, true);
-
-    RunEdgeAlgorithm(g, runner, ec_remover, info.chunk_cnt());
-
-    critical_marker.ClearMarks();
-
-    ParallelCompress(g, info.chunk_cnt());
-    //called in parallel compress
-    //CleanGraph(g, info.chunk_cnt());
-    return true;
-}
-
-template<class Graph>
-class SmartIteratorsHolder {
-    typedef typename Graph::EdgeId EdgeId;
-    typedef typename Graph::VertexId VertexId;
-    typedef typename Graph::VertexIt VertexIt;
-    typedef typename Graph::DataMasterT DataMasterT;
-    typedef omnigraph::ObservableGraph<DataMasterT> ObservableGraphT;
-    typedef omnigraph::SmartEdgeIterator<ObservableGraphT, omnigraph::CoverageComparator<Graph>> CoverageOrderIteratorT;
-    typedef omnigraph::SmartEdgeIterator<ObservableGraphT, omnigraph::LengthComparator<Graph>> LengthOrderIteratorT;
-    const Graph& g_;
-    const bool persistent_;
-    std::shared_ptr<LengthOrderIteratorT> tip_smart_it_;
-    std::shared_ptr<CoverageOrderIteratorT> bulge_smart_it_;
-    std::shared_ptr<CoverageOrderIteratorT> ec_smart_it_;
-
-public:
-    SmartIteratorsHolder(const Graph& g, bool persistent) : g_(g), persistent_(persistent) {
-        if (persistent_) {
-            INFO("Using permanent iterators");
-        }
-    }
-
-    std::shared_ptr<LengthOrderIteratorT> tip_smart_it() {
-        if (tip_smart_it_)
-            return tip_smart_it_;
-        auto answer = make_shared<LengthOrderIteratorT>(g_, omnigraph::LengthComparator<Graph>(g_), true);
-        if (persistent_)
-            tip_smart_it_ = answer;
-        return answer;
-    }
-
-    std::shared_ptr<CoverageOrderIteratorT> bulge_smart_it() {
-        if (bulge_smart_it_)
-            return bulge_smart_it_;
-        auto answer = make_shared<CoverageOrderIteratorT>(g_, omnigraph::CoverageComparator<Graph>(g_), true);
-        if (persistent_)
-            bulge_smart_it_ = answer;
-        return answer;
-    }
-
-    std::shared_ptr<CoverageOrderIteratorT> ec_smart_it() {
-        if (ec_smart_it_)
-            return ec_smart_it_;
-        auto answer = make_shared<CoverageOrderIteratorT>(g_, omnigraph::CoverageComparator<Graph>(g_), true);
-        if (persistent_)
-            ec_smart_it_ = answer;
-        return answer;
-    }
-
-    void ResetIterators() {
-        tip_smart_it_ = nullptr;
-        ec_smart_it_ = nullptr;
-        bulge_smart_it_ = nullptr;
-    }
-};
-
-inline bool FastModeAvailable(const SimplifInfoContainer& info, double activation_cov_threshold) {
-    const auto& cfg = cfg::get();
-
-    //todo fix logic
-    //also handles meta case for now
-    if (cfg.ds.single_cell) {
-        return !cfg::get().main_iteration;
-    }
-
-    if (math::eq(info.detected_mean_coverage(), 0.) &&
-        !cfg.kcm.use_coverage_threshold) {
-        WARN("Mean coverage wasn't reliably estimated");
-        return false;
-    }
-
-    //todo review logic
-    if (math::ls(info.detected_mean_coverage(), activation_cov_threshold) &&
-        !(cfg.kcm.use_coverage_threshold &&
-          math::ge(cfg.kcm.coverage_threshold, activation_cov_threshold))) {
-        INFO("Estimated mean coverage " << info.detected_mean_coverage() <<
-             " is less than fast mode activation coverage " << activation_cov_threshold);
-        return false;
-    }
-
-    return true;
-}
-
-class GraphSimplifier {
-    typedef std::function<void(EdgeId)> HandlerF;
-    conj_graph_pack &gp_;
-    SimplifInfoContainer info_container_;
-    const debruijn_config::simplification simplif_cfg_;
-    HandlerF removal_handler_;
-    stats::detail_info_printer& printer_;
-
-    void PreSimplification() {
-        INFO("PROCEDURE == Presimplification");
-        RemoveSelfConjugateEdges(gp_.g, gp_.k_value + 100, 1., removal_handler_, info_container_.chunk_cnt());
-
-        if (!simplif_cfg_.presimp.enabled || !simplif_cfg_.fast_features) {
-            INFO("Further presimplification is disabled");
-            return;
-        }
-
-        //todo make parallel version
-        RemoveIsolatedEdges(gp_.g, simplif_cfg_.presimp.ier, info_container_.read_length(), removal_handler_, info_container_.chunk_cnt());
-
-        if (info_container_.chunk_cnt() > 1 && EnableParallel()) {
-            ParallelPreSimplification();
-        } else {
-            NonParallelPreSimplification();
-        }
-
-    }
-    
-    void NonParallelPreSimplification() {
-        INFO("Non parallel mode");
-        CountingCallback<Graph> cnt_callback;
-
-        HandlerF removal_handler = AddCountingCallback(cnt_callback, removal_handler_);
-
-        debruijn_config::simplification::tip_clipper tc_config;
-        tc_config.condition = simplif_cfg_.presimp.tip_condition;
-
-        ClipTips(gp_.g, tc_config, info_container_, removal_handler);
-
-        cnt_callback.Report();
-
-        debruijn_config::simplification::erroneous_connections_remover ec_config;
-        ec_config.condition = simplif_cfg_.presimp.ec_condition;
-
-        RemoveLowCoverageEdges(gp_.g, ec_config, info_container_, removal_handler);
-
-        cnt_callback.Report();
-    }
-
-    void ParallelPreSimplification() {
-        INFO("Parallel mode");
-        CountingCallback<Graph> cnt_callback;
-
-        HandlerF removal_handler = AddCountingCallback(cnt_callback, removal_handler_);
-
-        ParallelClipTips(gp_.g, simplif_cfg_.presimp.tip_condition, info_container_,
-                         removal_handler);
-
-        cnt_callback.Report();
-        //    INFO("Early tip clipping");
-        //
-        //    ClipTipsWithProjection(gp, cfg::get().simp.tc,
-        //                           cfg::get().graph_read_corr.enable, cfg::get().ds.RL(),
-        //                           determined_coverage_threshold, removal_handler);
-        //
-
-
-    //    ParallelRemoveBulges(gp.g, cfg::get().simp.br, cfg::get().ds.RL(),
-    //                         removal_handler);
-    //
-    //    cnt_callback.Report();
-
-        ParallelEC(gp_.g, simplif_cfg_.presimp.ec_condition, info_container_,
-                   removal_handler);
-
-        cnt_callback.Report();
-
-        //todo maybe enable with small
-    //    INFO("Isolated edge remover");
-    //    size_t max_length = std::max(cfg::get().ds.RL(), cfg::get().simp.ier.max_length_any_cov);
-    //    INFO("All edges of length smaller than " << max_length << " will be removed");
-    //    IsolatedEdgeRemover<Graph>(gp.g, cfg::get().simp.ier.max_length,
-    //                               cfg::get().simp.ier.max_coverage, max_length)
-    //            .RemoveIsolatedEdges();
-    //
-    //    INFO("Early bulge removal");
-    //    RemoveBulges(gp.g, cfg::get().simp.br, 0, removal_handler, gp.g.k() + 1);
-    }
-
-    bool EnableParallel() {
-        if (simplif_cfg_.presimp.parallel) {
-            INFO("Trying to enable parallel presimplification.");
-            if (gp_.g.AllHandlersThreadSafe()) {
-                return true;
-            } else {
-                WARN("Not all handlers are threadsafe, switching to non-parallel presimplif");
-                //gp.g.PrintHandlersNames();
-            }
-        }
-        return false;
-    }
-
-    bool AllTopology() {
-        bool res = TopologyRemoveErroneousEdges(gp_.g, simplif_cfg_.tec,
-                                                removal_handler_);
-        res |= TopologyReliabilityRemoveErroneousEdges(gp_.g, simplif_cfg_.trec,
-                                                       removal_handler_);
-        res |= RemoveThorns(gp_.g, simplif_cfg_.isec, removal_handler_);
-        res |= MultiplicityCountingRemoveErroneousEdges(gp_.g, simplif_cfg_.tec,
-                                                        removal_handler_);
-        return res;
-    }
-
-    bool FinalRemoveErroneousEdges() {
-
-    //    gp.ClearQuality();
-    //    gp.FillQuality();
-    //    auto colorer = debruijn_graph::DefaultGPColorer(gp);
-    //    omnigraph::DefaultLabeler<typename gp_t::graph_t> labeler(gp.g, gp.edge_pos);
-    //    QualityEdgeLocalityPrintingRH<Graph> qual_removal_handler(gp.g, gp.edge_qual, labeler, colorer,
-    //                                   cfg::get().output_dir + "pictures/colored_edges_deleted/");
-    //
-    //    //positive quality edges removed (folder colored_edges_deleted)
-    //    std::function<void(EdgeId)> qual_removal_handler_f = boost::bind(
-    //            //            &QualityLoggingRemovalHandler<Graph>::HandleDelete,
-    //            &QualityEdgeLocalityPrintingRH<Graph>::HandleDelete,
-    //            boost::ref(qual_removal_handler), _1);
-    //
-    //    std::function<void(set<EdgeId>)> set_removal_handler_f = boost::bind(
-    //                &omnigraph::simplification::SingleEdgeAdapter<set<EdgeId>>, _1, qual_removal_handler_f);
-    //
-
-        std::function<void(set<EdgeId>)> set_removal_handler_f(0);
-        if (removal_handler_) {
-            set_removal_handler_f = std::bind(
-                &omnigraph::simplification::SingleEdgeAdapter<set<EdgeId>>, std::placeholders::_1, removal_handler_);
-        }
-
-        bool changed = RemoveRelativelyLowCoverageComponents(gp_.g, gp_.flanking_cov,
-                                              simplif_cfg_.rcc, info_container_, set_removal_handler_f);
-
-        //changed |= DisconnectRelativelyLowCoverageEdges(gp_.g, gp_.flanking_cov);
-
-        if (simplif_cfg_.topology_simplif_enabled && cfg::get().main_iteration) {
-            changed |= AllTopology();
-            changed |= MaxFlowRemoveErroneousEdges(gp_.g, simplif_cfg_.mfec,
-                                                   removal_handler_);
-        }
-        return changed;
-    }
-
-    void PostSimplification() {
-        typedef std::function<void(EdgeId, const std::vector<EdgeId>&)> opt_callback_f;
-        INFO("PROCEDURE == Post simplification");
-        size_t iteration = 0;
-
-        SmartIteratorsHolder<Graph> iterators_holder(gp_.g, simplif_cfg_.persistent_cycle_iterators
-                                                                && simplif_cfg_.fast_features);
-
-        shared_ptr<SmartIteratorsHolder<Graph>> final_iterators_holder_ptr;
-        //fixme need better configuration
-        if (cfg::get().ds.meta && cfg::get().main_iteration) {
-            final_iterators_holder_ptr = make_shared<SmartIteratorsHolder<Graph>>(gp_.g, simplif_cfg_.persistent_cycle_iterators
-                                                                && simplif_cfg_.fast_features);
-        }
-
-        bool enable_flag = true;
-        while (enable_flag) {
-            enable_flag = false;
-
-            INFO("Iteration " << iteration);
-            if (simplif_cfg_.topology_simplif_enabled) {
-                enable_flag |= TopologyClipTips(gp_.g, simplif_cfg_.ttc, info_container_.read_length(),
-                                                removal_handler_);
-            }
-
-            enable_flag |= FinalRemoveErroneousEdges();
-
-            enable_flag |=  ClipComplexTips(gp_.g, simplif_cfg_.complex_tc);
-
-            enable_flag |= ClipTips(gp_.g, *iterators_holder.tip_smart_it(),
-                                                  simplif_cfg_.tc, 
-                                                  info_container_,
-                                                  cfg::get().graph_read_corr.enable ?
-                                                          WrapWithProjectionCallback(gp_, removal_handler_) : removal_handler_);
-
-            enable_flag |= RemoveBulges(gp_.g, *iterators_holder.bulge_smart_it(),
-                                simplif_cfg_.br,
-                                (opt_callback_f)0, removal_handler_);
-
-
-            //fixme need better configuration
-            if (cfg::get().ds.meta && cfg::get().main_iteration) {
-                enable_flag |= ClipTips(gp_.g, *final_iterators_holder_ptr->tip_smart_it(),
-                                                      simplif_cfg_.final_tc, //todo get rid of this logic
-                                                      info_container_,
-                                                      cfg::get().graph_read_corr.enable ?
-                                                              WrapWithProjectionCallback(gp_, removal_handler_) : removal_handler_);
-    
-    
-                enable_flag |= RemoveBulges(gp_.g, *final_iterators_holder_ptr->bulge_smart_it(),
-                                    simplif_cfg_.final_br,
-                                    //todo get rid of this logic and add br run with standard params
-                                    (opt_callback_f)0, removal_handler_);
-            }
-
-
-            enable_flag |= RemoveComplexBulges(gp_.g, simplif_cfg_.cbr, iteration);
-
-
-            iteration++;
-
-            //    printer(ipp_before_final_err_con_removal);
-            //        printer(ipp_final_tip_clipping, str(format("_%d") % iteration));
-            //        printer(ipp_final_err_con_removal, str(format("_%d") % iteration));
-            //        printer(ipp_final_bulge_removal, str(format("_%d") % iteration));
-        }
-
-        if (simplif_cfg_.topology_simplif_enabled) {
-            RemoveHiddenEC(gp_.g, gp_.flanking_cov, info_container_.detected_coverage_bound(), simplif_cfg_.her, removal_handler_);
-        }
-    }
-
-    //inline
-    //void IdealSimplification(Graph& graph,
-    //                         std::function<double(EdgeId)> quality_handler_f) {
-    //    for (auto iterator = graph.SmartEdgeBegin(); !iterator.IsEnd();
-    //         ++iterator) {
-    //        if (math::eq(quality_handler_f(*iterator), 0.))
-    //            graph.DeleteEdge(*iterator);
-    //    }
-    //    CompressAllVertices(graph);
-    //}
-
-
-
-    void SimplificationCycle(SmartIteratorsHolder<Graph>& iterators_holder) {
-        size_t iteration = info_container_.iteration();
-
-        INFO("PROCEDURE == Simplification cycle, iteration " << (iteration + 1));
-
-        CountingCallback<Graph> cnt_callback;
-
-        HandlerF removal_handler = AddCountingCallback(cnt_callback, removal_handler_);
-
-        DEBUG(iteration << " TipClipping");
-        auto tip_removal_handler = cfg::get().graph_read_corr.enable ?
-                WrapWithProjectionCallback(gp_, removal_handler) : removal_handler;
-        ClipTips(gp_.g, *iterators_holder.tip_smart_it(), simplif_cfg_.tc, info_container_, tip_removal_handler);
-        cnt_callback.Report();
-        DEBUG(iteration << " TipClipping stats");
-        printer_(ipp_tip_clipping, fmt::format("_{:d}", iteration));
-
-        if (!simplif_cfg_.disable_br_in_cycle || !simplif_cfg_.fast_features) {
-            DEBUG(iteration << " BulgeRemoval");
-            RemoveBulges(gp_.g, *iterators_holder.bulge_smart_it(), simplif_cfg_.br,
-                (std::function<void(EdgeId, const std::vector<EdgeId> &)>)0, removal_handler);
-            cnt_callback.Report();
-            DEBUG(iteration << " BulgeRemoval stats");
-            printer_(ipp_bulge_removal, fmt::format("_{:d}", iteration));
-        }
-
-        DEBUG(iteration << " ErroneousConnectionsRemoval");
-        RemoveLowCoverageEdges(gp_.g, *iterators_holder.ec_smart_it(), simplif_cfg_.ec, info_container_, removal_handler);
-        cnt_callback.Report();
-        DEBUG(iteration << " ErroneousConnectionsRemoval stats");
-        printer_(ipp_err_con_removal, fmt::format("_{:d}", iteration));
-    }
-
-public:
-    GraphSimplifier(conj_graph_pack &gp, const SimplifInfoContainer& info_container,
-                    const debruijn_config::simplification& simplif_cfg,
-                    const std::function<void(EdgeId)>& removal_handler,
-                    stats::detail_info_printer& printer)
-            : gp_(gp),
-              info_container_(info_container),
-              simplif_cfg_(simplif_cfg),
-              removal_handler_(removal_handler),
-              printer_(printer) {
-
-    }
-
-    void SimplifyGraph() {
-        printer_(ipp_before_simplification);
-        INFO("Graph simplification started");
-
-        if (simplif_cfg_.fast_features) {
-            INFO("Fast simplification mode enabled")
-        } else {
-            INFO("Fast simplification mode disabled");
-        }
-
-        PreSimplification();
-
-        info_container_.set_iteration_count(simplif_cfg_.cycle_iter_count);
-
-        SmartIteratorsHolder<Graph> iterators_holder(gp_.g, simplif_cfg_.persistent_cycle_iterators
-                                                     && simplif_cfg_.fast_features);
-
-        for (size_t i = 0; i < simplif_cfg_.cycle_iter_count; i++) {
-            info_container_.set_iteration(i);
-            SimplificationCycle(iterators_holder);
-        }
-
-        if (simplif_cfg_.post_simplif_enabled) {
-            PostSimplification();
-        } else {
-            INFO("PostSimplification disabled");
-        }
-    }
-};
-
-}
-
-}
diff --git a/src/debruijn/graphio.hpp b/src/debruijn/graphio.hpp
index 1736538..2534204 100644
--- a/src/debruijn/graphio.hpp
+++ b/src/debruijn/graphio.hpp
@@ -57,14 +57,16 @@ void SaveKmerMapper(const string& file_name,
 }
 
 template<class KmerMapper>
-void LoadKmerMapper(const string& file_name,
+bool LoadKmerMapper(const string& file_name,
                     KmerMapper& kmer_mapper) {
     kmer_mapper.clear();
     std::ifstream file;
     file.open((file_name + ".kmm").c_str(),
               std::ios_base::binary | std::ios_base::in);
+    if (!file.is_open()) {
+        return false;
+    }
     INFO("Reading kmer mapper, " << file_name <<" started");
-    VERIFY(file.is_open());
 
     uint32_t k_;
     file.read((char *) &k_, sizeof(uint32_t));
@@ -73,6 +75,7 @@ void LoadKmerMapper(const string& file_name,
     kmer_mapper.BinRead(file);
 
     file.close();
+    return true;
 }
 
 template<class EdgeIndex>
@@ -229,12 +232,11 @@ class DataPrinter {
         size_t comp_size = 0;
         for (auto I = component_.e_begin(), E = component_.e_end(); I != E; ++I) {
             EdgeId e1 = *I;
-            const auto& inner_map = paired_index.GetEdgeInfo(e1, 0);
-            for (auto II = inner_map.begin(), IE = inner_map.end(); II != IE; ++II) {
-                EdgeId e2 = II->first;
-                const auto& hist = II->second;
-                if (component_.contains(e2)) { // if the second edge also lies in the same component
-                    comp_size += hist.size();
+            auto inner_map = paired_index.Get(e1);
+            for (auto entry : inner_map) {
+                if (component_.contains(entry.first)) { // if the second edge also lies in the same component
+                    comp_size += entry.second.size();
+                    continue;
                 }
             }
         }
@@ -243,12 +245,12 @@ class DataPrinter {
 
         for (auto I = component_.e_begin(), E = component_.e_end(); I != E; ++I) {
             EdgeId e1 = *I;
-            const auto& inner_map = paired_index.GetEdgeInfo(e1, 0);
-            std::map<typename Graph::EdgeId, typename Index::Histogram> ordermap(inner_map.begin(), inner_map.end());
-            for (const auto& entry : ordermap) {
-                EdgeId e2 = entry.first; const auto& hist = entry.second;
+            const auto& inner_map = paired_index.RawGet(e1);
+            std::map<typename Graph::EdgeId, typename Index::RawHistProxy> ordermap(inner_map.begin(), inner_map.end());
+            for (auto entry : ordermap) {
+                EdgeId e2 = entry.first;
                 if (component_.contains(e2))
-                  for (Point point : hist)
+                  for (auto point : entry.second)
                     fprintf(file, "%zu %zu %.2f %.2f %.2f .\n",
                             e1.int_id(), e2.int_id(), math::eq((double)point.d, .0) ? .0 : (double)point.d, (double)point.weight, (double)point.variation());
             }
@@ -412,8 +414,9 @@ class DataScanner {
         return true;
     }
 
+    template<typename Index>
     void LoadPaired(const string& file_name,
-                    PairedInfoIndexT<Graph>& paired_index,
+                    Index& paired_index,
                     bool force_exists = true) {
         typedef typename Graph::EdgeId EdgeId;
         FILE* file = fopen((file_name + ".prd").c_str(), "r");
@@ -429,7 +432,7 @@ class DataScanner {
         size_t paired_count;
         int read_count = fscanf(file, "%zu \n", &paired_count);
         VERIFY(read_count == 1);
-        for (size_t i = 0; i < paired_count; i++) {
+        while (!feof(file)) {
             size_t first_real_id, second_real_id;
             double w, d, v;
             read_count = fscanf(file, "%zu %zu %lf %lf %lf .\n",
@@ -442,43 +445,7 @@ class DataScanner {
             if (e1 == EdgeId(NULL) || e2 == EdgeId(NULL))
                 continue;
             TRACE(e1 << " " << e2 << " " << d << " " << w);
-            paired_index.AddPairInfo(e1, e2, { d, w, v }, false);
-        }
-        DEBUG("PII SIZE " << paired_index.size());
-        fclose(file);
-    }
-
-      void LoadPaired(const string& file_name,
-                      UnclusteredPairedInfoIndexT<Graph>& paired_index,
-                      bool force_exists = true) {
-        typedef typename Graph::EdgeId EdgeId;
-        FILE* file = fopen((file_name + ".prd").c_str(), "r");
-        INFO((file_name + ".prd"));
-        if (force_exists) {
-            VERIFY(file != NULL);
-        } else if (file == NULL) {
-            INFO("Paired info not found, skipping");
-            return;
-        }
-        INFO("Reading paired info from " << file_name << " started");
-
-        size_t paired_count;
-        int read_count = fscanf(file, "%zu \n", &paired_count);
-        VERIFY(read_count == 1);
-        for (size_t i = 0; i < paired_count; i++) {
-            size_t first_real_id, second_real_id;
-            double w, d, v;
-            read_count = fscanf(file, "%zu %zu %lf %lf %lf .\n",
-                                &first_real_id, &second_real_id, &d, &w, &v);
-            VERIFY(read_count == 5);
-            TRACE(first_real_id<< " " << second_real_id << " " << d << " " << w);
-            VERIFY(this->edge_id_map().find(first_real_id) != this->edge_id_map().end())
-            EdgeId e1 = this->edge_id_map()[first_real_id];
-            EdgeId e2 = this->edge_id_map()[second_real_id];
-            if (e1 == EdgeId(NULL) || e2 == EdgeId(NULL))
-                continue;
-            TRACE(e1 << " " << e2 << " " << d << " " << w);
-            paired_index.AddPairInfo(e1, e2, { d, w }, false);
+            paired_index.Add(e1, e2, { d, w, v });
         }
         DEBUG("PII SIZE " << paired_index.size());
         fclose(file);
@@ -891,7 +858,9 @@ void ScanGraphPack(const string& file_name,
     scanner.LoadPositions(file_name, gp.edge_pos);
     //load kmer_mapper only if needed
     if (gp.kmer_mapper.IsAttached())
-        LoadKmerMapper(file_name, gp.kmer_mapper);
+        if (!LoadKmerMapper(file_name, gp.kmer_mapper)) {
+            WARN("Cannot load kmer_mapper, information on projected kmers will be missed");
+        }
     if (!scanner.LoadFlankingCoverage(file_name, gp.flanking_cov)) {
         gp.flanking_cov.Fill(gp.index.inner_index());
     }
diff --git a/src/debruijn/indices/edge_multi_index.hpp b/src/debruijn/indices/edge_multi_index.hpp
index 680b16f..e33c919 100644
--- a/src/debruijn/indices/edge_multi_index.hpp
+++ b/src/debruijn/indices/edge_multi_index.hpp
@@ -15,6 +15,7 @@
 #include "perfect_hash_map.hpp"
 #include "edge_info_updater.hpp"
 #include "kmer_splitters.hpp"
+#include "edge_position_index.hpp"
 
 namespace debruijn_graph {
 
diff --git a/src/debruijn/indices/kmer_extension_index.hpp b/src/debruijn/indices/kmer_extension_index.hpp
index 18d7097..457b506 100644
--- a/src/debruijn/indices/kmer_extension_index.hpp
+++ b/src/debruijn/indices/kmer_extension_index.hpp
@@ -16,7 +16,6 @@
 #include "kmer_splitters.hpp"
 #include "simple_tools.hpp"
 #include "storing_traits.hpp"
-
 #include <bitset>
 
 namespace debruijn_graph {
diff --git a/src/debruijn/indices/kmer_splitters.hpp b/src/debruijn/indices/kmer_splitters.hpp
index 61ab12f..f347be5 100644
--- a/src/debruijn/indices/kmer_splitters.hpp
+++ b/src/debruijn/indices/kmer_splitters.hpp
@@ -6,12 +6,6 @@
 //***************************************************************************
 
 #pragma once
-/*
- * kmer_splitters.hpp
- *
- *  Created on: May 24, 2013
- *      Author: anton
- */
 
 #include "io/io_helper.hpp"
 #include "storing_traits.hpp"
@@ -250,7 +244,7 @@ path::files_t DeBruijnReadKMerSplitter<Read, KmerFilter>::Split(size_t num_files
   }
 
   INFO("Used " << counter << " reads. Maximum read length " << rl);
-  INFO("Average read length " << 1.0 * bases / counter);
+  INFO("Average read length " << double(bases) / double(counter));
   rs_ = { counter, rl, bases };
 
   return out;
diff --git a/src/debruijn/indices/perfect_hash_map.hpp b/src/debruijn/indices/perfect_hash_map.hpp
index 7e3bf6b..7da07d8 100644
--- a/src/debruijn/indices/perfect_hash_map.hpp
+++ b/src/debruijn/indices/perfect_hash_map.hpp
@@ -25,6 +25,7 @@
 #include <cstdlib>
 #include <cstdio>
 #include <cstdint>
+#include <io/kmer_iterator.hpp>
 
 namespace debruijn_graph {
 
@@ -338,6 +339,11 @@ public:
         return kmer_iterator(this->KMersFilename_, KMer::GetDataSize(base::k()));
     }
 
+    std::vector<kmer_iterator> kmer_begin(size_t parts) const {
+        return io::make_kmer_iterator<KMer>(this->KMersFilename_, base::k(), parts);
+    }
+
+
     template<class KmerCounter>
     void BuildIndex(KmerCounter& counter, size_t bucket_num,
                     size_t thread_num) {
diff --git a/src/debruijn/is_counter.hpp b/src/debruijn/is_counter.hpp
index b7496ae..ace7681 100644
--- a/src/debruijn/is_counter.hpp
+++ b/src/debruijn/is_counter.hpp
@@ -21,8 +21,9 @@
 
 namespace debruijn_graph {
 
+using namespace omnigraph;
+
 class InsertSizeCounter: public SequenceMapperListener {
-    typedef std::map<int, size_t> HistType;
 
 public:
 
@@ -64,15 +65,56 @@ public:
     }
 
     virtual void ProcessPairedRead(size_t thread_index,
+                                   const io::PairedRead& r,
+                                   const MappingPath<EdgeId>& read1,
+                                   const MappingPath<EdgeId>& read2) {
+        ProcessPairedRead(thread_index, read1, read2, (int) r.second().size(),
+                          (int) r.first().GetLeftOffset() + (int) r.second().GetRightOffset());
+    }
+
+    virtual void ProcessPairedRead(size_t thread_index,
+                                   const io::PairedReadSeq& r,
+                                   const MappingPath<EdgeId>& read1,
+                                   const MappingPath<EdgeId>& read2) {
+        ProcessPairedRead(thread_index, read1, read2, (int) r.second().size(),
+                          (int) r.first().GetLeftOffset() + (int) r.second().GetRightOffset());
+    }
+
+    virtual void ProcessSingleRead(size_t /*thread_index*/, const io::SingleRead&, const MappingPath<EdgeId>& /*read*/) {
+    }
+
+    virtual void ProcessSingleRead(size_t /*thread_index*/, const io::SingleReadSeq&, const MappingPath<EdgeId>& /*read*/) {
+    }
+
+    virtual void MergeBuffer(size_t thread_index) {
+        if (thread_index != 0) {
+            for (auto it = tmp_hists_[thread_index]->begin(); it != tmp_hists_[thread_index]->end(); ++it) {
+                (*tmp_hists_[0])[it->first] += it->second;
+            }
+            tmp_hists_[thread_index]->clear();
+        }
+    }
+
+    void FindMean(double& mean, double& delta, std::map<size_t, size_t>& percentiles) const {
+        find_mean(hist_, mean, delta, percentiles);
+    }
+
+    void FindMedian(double& median, double& mad, HistType& histogram) const {
+        find_median(hist_, median, mad, histogram);
+    }
+
+private:
+    virtual void ProcessPairedRead(size_t thread_index,
                                    const MappingPath<EdgeId>& read1,
                                    const MappingPath<EdgeId>& read2,
-                                   size_t dist) {
+                                   int read2_size,
+                                   int is_delta) {
 
         ++total_.arr_[thread_index];
 
         if (read1.size() == 1 && read2.size() == 1 &&
-                read2.simple_path().front() == read1.simple_path().front() &&
-                gp_.g.length(read1.simple_path().front()) >= edge_length_threshold_) {
+            read2.simple_path().front() == read1.simple_path().front() &&
+            gp_.g.length(read1.simple_path().front()) >= edge_length_threshold_) {
 
             auto mapping_edge_1 = read1.front().second;
             auto mapping_edge_2 = read2.front().second;
@@ -81,8 +123,8 @@ public:
             TRACE("Read 1: " << (int) mapping_edge_1.mapped_range.start_pos << " - " << (int) mapping_edge_1.initial_range.start_pos << " = " << read1_start);
             int read2_start = (int) mapping_edge_2.mapped_range.start_pos - (int) mapping_edge_2.initial_range.start_pos;
             TRACE("Read 2: " << (int) mapping_edge_2.mapped_range.start_pos << " - " << (int) mapping_edge_2.initial_range.start_pos << " = " << read2_start);
-            int is = read2_start - read1_start + (int) dist;
-            TRACE("IS: " << read2_start << " - " <<  read1_start << " + " << (int) dist << "(" << dist << ") = " << is);
+            int is = read2_start - read1_start + read2_size + is_delta;
+            TRACE("IS: " << read2_start << " - " <<  read1_start << " + " << (int) is_delta << " = " << is);
 
             if (is > 0 || !ignore_negative_) {
                 (*tmp_hists_[thread_index])[is] += 1;
@@ -94,105 +136,6 @@ public:
         }
 
     }
-
-    virtual void ProcessSingleRead(size_t /*thread_index*/, const MappingPath<EdgeId>& /*read*/) {
-
-    }
-
-    virtual void MergeBuffer(size_t thread_index) {
-        if (thread_index != 0) {
-            for (auto it = tmp_hists_[thread_index]->begin(); it != tmp_hists_[thread_index]->end(); ++it) {
-                (*tmp_hists_[0])[it->first] += it->second;
-            }
-            tmp_hists_[thread_index]->clear();
-        }
-    }
-
-    void FindMean(double& mean, double& delta, std::map<size_t, size_t>& percentiles) const {
-        double median = get_median(hist_);
-        double mad = get_mad(hist_, median);
-        double low = median - 5. * 1.4826 * mad;
-        double high = median + 5. * 1.4826 * mad;
-
-        DEBUG("Median IS: " << median);
-        DEBUG("MAD: " << mad);
-        DEBUG("Thresholds set to: [" << low << ", " << high << "]");
-
-        size_t n = 0;
-        double sum = 0.;
-        double sum2 = 0.;
-        DEBUG("Counting average");
-        for (auto iter = hist_.begin(); iter != hist_.end(); ++iter) {
-            if (iter->first < low || iter->first > high) {
-                continue;
-            }
-            n += iter->second;
-            sum += (double) iter->second * 1. * (double) iter->first;
-            sum2 += (double)iter->second * 1. * (double)iter->first * (double)iter->first;
-        }
-        mean = sum / (double) n;
-        delta = sqrt(sum2 / (double) n - mean * mean);
-
-        low = mean - 5 * delta;
-        high = mean + 5 * delta;
-
-        DEBUG("Mean IS: " << mean);
-        DEBUG("sd: " << delta);
-        DEBUG("Thresholds set to: [" << low << ", " << high << "]");
-
-        n = 0;
-        sum = 0.;
-        sum2 = 0.;
-        for (auto iter = hist_.begin(); iter != hist_.end(); ++iter) {
-            if (iter->first < low || iter->first > high) {
-                continue;
-            }
-            n += iter->second;
-            sum += (double) iter->second * 1. * (double) iter->first;
-            sum2 += (double) iter->second * 1. * (double) iter->first * (double) iter->first;
-        }
-        mean = sum / (double) n;
-        delta = sqrt(sum2 / (double) n - mean * mean);
-
-        DEBUG("Mean IS: " << mean);
-        DEBUG("sd: " << delta);
-
-        size_t m = 0;
-
-        DEBUG("Counting percentiles");
-        //todo optimize
-        size_t q[19];
-        for (size_t i = 1; i < 20; ++i) {
-            q[i - 1] = 5 * i;
-        }
-        for (auto iter = hist_.begin(); iter != hist_.end(); ++iter) {
-            if (iter->first < low || iter->first > high) {
-                continue;
-            }
-            size_t mm = m + iter->second;
-            for (size_t i = 0; i < utils::array_size(q); i++) {
-                size_t scaled_q_i((size_t) ((double) q[i] / 100. * (double) n));
-                if (m < scaled_q_i && mm >= scaled_q_i) {
-                    percentiles[q[i]] = iter->first;
-                }
-            }
-            m = mm;
-        }
-    }
-
-    void FindMedian(double& median, double& mad, HistType& histogram) const {
-        DEBUG("Counting median and MAD");
-        median = get_median(hist_);
-        mad = get_mad(hist_, median);
-        double low = median - 5. * 1.4826 * mad;
-        double high = median + 5. * 1.4826 * mad;
-        omnigraph::hist_crop(hist_, low, high, histogram);
-        median = get_median(histogram);
-        mad = get_mad(histogram, median);
-    }
-
-private:
-
     struct count_data {
       size_t total_;
       vector<size_t> arr_;
diff --git a/src/debruijn/kmer_coverage_model.cpp b/src/debruijn/kmer_coverage_model.cpp
index 8271ff0..ae6fb3a 100644
--- a/src/debruijn/kmer_coverage_model.cpp
+++ b/src/debruijn/kmer_coverage_model.cpp
@@ -212,7 +212,7 @@ void KMerCoverageModel::Fit() {
     ccov += cov_[i];
   }
 
-  if (abs(cov_[Valley_] - cov_[MaxCov_] < 3))
+  if (MaxCov_ - Valley_ < 3)
     WARN("Too much erroneous kmers, the estimates might be unreliable");
 
   std::vector<size_t> mvals(1 + MaxCov_ - Valley_);
diff --git a/src/debruijn/launch.hpp b/src/debruijn/launch.hpp
index 86670f4..38fe4a1 100644
--- a/src/debruijn/launch.hpp
+++ b/src/debruijn/launch.hpp
@@ -28,19 +28,19 @@ namespace spades {
 
 void assemble_genome() {
     INFO("SPAdes started");
-    INFO("Starting from stage: " << cfg::get().entry_point);
-
-    if (cfg::get().ds.meta) {
-        ERROR("Metagenomic mode not available yet");
-        return;
+    if (cfg::get().ds.meta && cfg::get().ds.reads.lib_count() != 1) {
+        ERROR("Sorry, current version of metaSPAdes can work with single library only (paired-end only).");
+        exit(239);
     }
 
+    INFO("Starting from stage: " << cfg::get().entry_point);
+
     bool two_step_rr = cfg::get().two_step_rr && cfg::get().rr_enable && cfg::get().ds.meta;
     INFO("Two-step RR enabled: " << two_step_rr);
 
     StageManager SPAdes({cfg::get().developer_mode,
-                          cfg::get().load_from,
-                          cfg::get().output_saves});
+                         cfg::get().load_from,
+                         cfg::get().output_saves});
 
     size_t read_index_cnt = cfg::get().ds.reads.lib_count();
     if (two_step_rr)
@@ -59,8 +59,8 @@ void assemble_genome() {
         conj_gp.kmer_mapper.Attach();
     }
     // Build the pipeline
-    SPAdes.add(new debruijn_graph::Construction());
-    SPAdes.add(new debruijn_graph::GenomicInfoFiller());
+    SPAdes.add(new debruijn_graph::Construction())
+          .add(new debruijn_graph::GenomicInfoFiller());
     if (cfg::get().gap_closer_enable && cfg::get().gc.before_simplify)
         SPAdes.add(new debruijn_graph::GapClosing("early_gapcloser"));
 
@@ -71,14 +71,16 @@ void assemble_genome() {
 
     SPAdes.add(new debruijn_graph::SimplificationCleanup());
     //currently cannot be used with two step rr
-    if (cfg::get().correct_mismatches && !two_step_rr)
+    if (cfg::get().correct_mismatches && !cfg::get().ds.meta)
         SPAdes.add(new debruijn_graph::MismatchCorrection());
     if (cfg::get().rr_enable) {
         if (two_step_rr) {
-            SPAdes.add(new debruijn_graph::PairInfoCount(true));
-            SPAdes.add(new debruijn_graph::DistanceEstimation(true));
-            SPAdes.add(new debruijn_graph::RepeatResolution(true));
-            SPAdes.add(new debruijn_graph::SecondPhaseSetup());
+            if (cfg::get().use_intermediate_contigs)
+                SPAdes.add(new debruijn_graph::PairInfoCount(true))
+                      .add(new debruijn_graph::DistanceEstimation(true))
+                      .add(new debruijn_graph::RepeatResolution(true))
+                      .add(new debruijn_graph::SecondPhaseSetup());
+
             SPAdes.add(new debruijn_graph::Simplification());
         }
 
@@ -97,9 +99,9 @@ void assemble_genome() {
         }
         //end pacbio
 
-        SPAdes.add(new debruijn_graph::PairInfoCount());
-        SPAdes.add(new debruijn_graph::DistanceEstimation());
-        SPAdes.add(new debruijn_graph::RepeatResolution());
+        SPAdes.add(new debruijn_graph::PairInfoCount())
+              .add(new debruijn_graph::DistanceEstimation())
+              .add(new debruijn_graph::RepeatResolution());
     } else {
         SPAdes.add(new debruijn_graph::ContigOutput());
     }
diff --git a/src/debruijn/long_read_mapper.hpp b/src/debruijn/long_read_mapper.hpp
index 1446446..acc1fc9 100644
--- a/src/debruijn/long_read_mapper.hpp
+++ b/src/debruijn/long_read_mapper.hpp
@@ -21,49 +21,57 @@
 namespace debruijn_graph {
 
 class SimpleLongReadMapper: public SequenceMapperListener {
-
 public:
-
-    SimpleLongReadMapper(conj_graph_pack& gp, PathStorage<conj_graph_pack::graph_t>& storage): gp_(gp), storage_(storage) {
+    SimpleLongReadMapper(conj_graph_pack& gp, PathStorage<conj_graph_pack::graph_t>& storage)
+            : gp_(gp), storage_(storage), path_finder_(gp_.g) {
         mapper_ = MapperInstance(gp_);
     }
 
-    virtual ~SimpleLongReadMapper() {
-
-    }
+    virtual ~SimpleLongReadMapper() {}
 
-    virtual void StartProcessLibrary(size_t threads_count) {
-        for (size_t i = 0; i < threads_count; ++i) {
-            buffer_storages_.push_back(new PathStorage<conj_graph_pack::graph_t>(gp_.g));
-        }
+    void StartProcessLibrary(size_t threads_count) override {
+        for (size_t i = 0; i < threads_count; ++i)
+            buffer_storages_.emplace_back(gp_.g);
     }
 
-    virtual void StopProcessLibrary() {
+    void StopProcessLibrary() override {
         for (size_t i = 0; i < buffer_storages_.size(); ++i) {
             MergeBuffer(i);
-            delete buffer_storages_[i];
         }
         buffer_storages_.clear();
     }
 
-    virtual void MergeBuffer(size_t thread_index) {
-        DEBUG("Merge buffer " << thread_index << " with size " << buffer_storages_[thread_index]->size());
-        storage_.AddStorage(*buffer_storages_[thread_index]);
-        buffer_storages_[thread_index]->Clear();
+    void MergeBuffer(size_t thread_index) override {
+        DEBUG("Merge buffer " << thread_index << " with size " << buffer_storages_[thread_index].size());
+        storage_.AddStorage(buffer_storages_[thread_index]);
+        buffer_storages_[thread_index].Clear();
         DEBUG("Now size " << storage_.size());
     }
 
-    virtual void ProcessPairedRead(size_t ,
-                                   const MappingPath<EdgeId>& ,
-                                   const MappingPath<EdgeId>& ,
-                                   size_t ) {
+    void ProcessPairedRead(size_t ,
+                           const io::PairedReadSeq&,
+                           const MappingPath<EdgeId>& ,
+                           const MappingPath<EdgeId>&) override {
+        //nothing to do
+    }
+
+    void ProcessPairedRead(size_t ,
+                           const io::PairedRead&,
+                           const MappingPath<EdgeId>& ,
+                           const MappingPath<EdgeId>&) override {
         //nothing to do
     }
 
-    virtual void ProcessSingleRead(size_t thread_index,
-                                   const MappingPath<EdgeId>& read) {
-        vector<EdgeId> path = ProcessSingleRead(read);
-        buffer_storages_[thread_index]->AddPath(path, 1, true);
+    void ProcessSingleRead(size_t thread_index,
+                           const io::SingleRead&,
+                           const MappingPath<EdgeId>& read) override {
+        ProcessSingleRead(thread_index, read);
+    }
+
+    void ProcessSingleRead(size_t thread_index,
+                           const io::SingleReadSeq&,
+                           const MappingPath<EdgeId>& read) override {
+        ProcessSingleRead(thread_index, read);
     }
 
     PathStorage<conj_graph_pack::graph_t>& GetPaths() {
@@ -71,15 +79,20 @@ public:
     }
 
 private:
-    vector<EdgeId> ProcessSingleRead(const MappingPath<EdgeId>& path) const {
-        return mapper_->FindReadPath(path);
+
+    void ProcessSingleRead(size_t thread_index, const MappingPath<EdgeId>& read) {
+        vector<vector<EdgeId>> paths = path_finder_.FindReadPathWithGaps(read);
+        for(auto path : paths) {
+            buffer_storages_[thread_index].AddPath(path, 1, false);
+        }
     }
 
     conj_graph_pack& gp_;
     PathStorage<conj_graph_pack::graph_t>& storage_;
     std::shared_ptr<const NewExtendedSequenceMapper<conj_graph_pack::graph_t,
                     conj_graph_pack::index_t> > mapper_;
-    vector<PathStorage<conj_graph_pack::graph_t>*> buffer_storages_;
+    ReadPathFinder<conj_graph_pack::graph_t> path_finder_;
+    std::vector<PathStorage<conj_graph_pack::graph_t> > buffer_storages_;
 };
 
 }/*longreads*/
diff --git a/src/debruijn/main.cpp b/src/debruijn/main.cpp
index 4195b27..4306f57 100644
--- a/src/debruijn/main.cpp
+++ b/src/debruijn/main.cpp
@@ -20,6 +20,7 @@
 #include "runtime_k.hpp"
 
 #include "config_struct.hpp"
+#include "version.hpp"
 
 #include <sys/types.h>
 #include <sys/stat.h>
diff --git a/src/debruijn/mismatch_correction.cpp b/src/debruijn/mismatch_correction.cpp
index 824835f..8371205 100644
--- a/src/debruijn/mismatch_correction.cpp
+++ b/src/debruijn/mismatch_correction.cpp
@@ -14,7 +14,12 @@ namespace debruijn_graph {
 
 void MismatchCorrection::run(conj_graph_pack &gp, const char*) {
     gp.EnsureBasicMapping();
-    auto streams = single_binary_readers(true,  true);
+    std::vector<size_t> libs;
+    for (size_t i = 0; i < cfg::get().ds.reads.lib_count(); ++i) {
+        if (cfg::get().ds.reads[i].is_mismatch_correctable())
+            libs.push_back(i);
+    }
+    auto streams = single_binary_readers_for_libs(libs, true,  true);
     size_t corrected = MismatchShallNotPass<conj_graph_pack, io::SingleReadSeq>(gp, 2).ParallelStopAllMismatches(streams, 1);
     INFO("Corrected " << corrected << " nucleotides");
 }
diff --git a/src/debruijn/overlap_analysis.hpp b/src/debruijn/overlap_analysis.hpp
new file mode 100644
index 0000000..5809b36
--- /dev/null
+++ b/src/debruijn/overlap_analysis.hpp
@@ -0,0 +1,113 @@
+#pragma once
+
+#include "logger/logger.hpp"
+#include "omni/range.hpp"
+#include "ssw/ssw_cpp.h"
+
+namespace debruijn_graph {
+using omnigraph::Range;
+
+struct OverlapInfo {
+    Range r1;
+    Range r2;
+    size_t match_cnt;
+
+    OverlapInfo(const Range& r1_, const Range& r2_, size_t match_cnt_)
+            : r1(r1_),
+              r2(r2_),
+              match_cnt(match_cnt_) {
+        VERIFY(match_cnt <= std::min(r1.size(), r2.size()));
+    }
+
+    OverlapInfo()
+            : match_cnt(0) {
+    }
+
+    double identity() const {
+        if (match_cnt == 0)
+            return 0.;
+        return (double)match_cnt / (double)size();
+    }
+
+    size_t size() const {
+        return std::max(r1.size(), r2.size());
+    }
+
+    bool operator==(const OverlapInfo &that) const {
+        return r1 == that.r1 && r2 == that.r2 && match_cnt == that.match_cnt;
+    }
+
+    bool operator!=(const OverlapInfo &that) const {
+        return !(*this == that);
+    }
+};
+
+std::ostream& operator<<(std::ostream& os, const OverlapInfo& info) {
+    return os << "R1: [" << info.r1.start_pos << ", " << info.r1.end_pos
+            << "]; R2: [" << info.r2.start_pos << ", " << info.r2.end_pos << "]"
+            << "; match_cnt: " << info.match_cnt;
+}
+
+class SWOverlapAnalyzer {
+    static const uint32_t CIGAR_FLAG_MASK = (1 << 4) - 1;
+    static const uint32_t CIGAR_MATCH_FLAG = 7;
+    typedef typename Graph::EdgeId EdgeId;
+    size_t flank_length_;
+
+    const StripedSmithWaterman::Aligner aligner_;
+    const StripedSmithWaterman::Filter filter_;
+
+    size_t CountMatches(std::vector<uint32_t> cigar) const {
+        size_t match_cnt = 0;
+        for (uint32_t entry : cigar) {
+            if ((entry & CIGAR_FLAG_MASK) == CIGAR_MATCH_FLAG) {
+                match_cnt += (entry >> 4);
+            }
+        }
+        return match_cnt;
+    }
+
+    OverlapInfo InnerAnalyze(const Sequence& s1, const Sequence& s2) const {
+        if (s1.size() == 0 || s2.size() == 0) {
+            return OverlapInfo();
+        }
+        StripedSmithWaterman::Alignment alignment;
+        if (aligner_.Align(s1.str().c_str(), s2.str().c_str(), int(s2.size()), filter_, &alignment)) {
+            if (alignment.sw_score > 0) {
+                return OverlapInfo(Range(alignment.query_begin, alignment.query_end + 1),
+                            Range(alignment.ref_begin, alignment.ref_end + 1),
+                            CountMatches(alignment.cigar));
+            }
+        }
+        return OverlapInfo();
+    }
+
+public:
+    SWOverlapAnalyzer(size_t flank_length)
+            : flank_length_(flank_length),
+              aligner_(/*match_score*/2,
+              /*mismatch_penalty*/6,
+                       /*gap_opening_penalty*/8,
+                       /*gap_extending_penalty*/8) {
+    }
+
+
+    OverlapInfo AnalyzeOverlap(const Sequence& s1, const Sequence& s2) const {
+        size_t start1 = flank_length_ > s1.size() ? 0 : s1.size() - flank_length_;
+        size_t end2 = flank_length_ > s2.size() ? s2.size() : flank_length_;
+
+        OverlapInfo result = InnerAnalyze(s1.Subseq(start1, s1.size()), s2.Subseq(0, end2));
+        if (result == OverlapInfo())
+            return result;
+
+        result.r1.shift(int(start1));
+        return result;
+    }
+
+    template<class Graph>
+    OverlapInfo AnalyzeOverlap(const Graph& g, EdgeId e1, EdgeId e2) const {
+        return AnalyzeOverlap(g.EdgeNucls(e1), g.EdgeNucls(e2));
+    }
+};
+
+}
diff --git a/src/debruijn/pacbio/pac_index.hpp b/src/debruijn/pacbio/pac_index.hpp
index 491c7cd..51e9d4f 100644
--- a/src/debruijn/pacbio/pac_index.hpp
+++ b/src/debruijn/pacbio/pac_index.hpp
@@ -59,8 +59,6 @@ private:
     size_t read_count;
     bool ignore_map_to_middle;
 
-    map<VertexId, map<VertexId, size_t> > cashed_dijkstra;
-
 public:
     MappingDescription Locate(const Sequence &s) const;
 
@@ -353,10 +351,9 @@ public:
                         if (cfg::get().pb.additional_debug_info) {
                             DEBUG(" escpected gap length: " << -int(g_.length(prev_edge)) + int(prev_last_index.edge_position) - int(cur_first_index.edge_position) + int(debruijn_k - pacbio_k ) - seq_start + seq_end);
                             PathStorageCallback<Graph> callback(g_);
-                            PathProcessor<Graph> path_processor(g_, 0, 4000,
-                                                                start_v, end_v,
-                                                                callback);
-                            path_processor.Process();
+                            ProcessPaths(g_, 0, 4000,
+                                            start_v, end_v,
+                                            callback);
                             vector<vector<EdgeId> > paths = callback.paths();
                             stringstream s_buf;
                             for (auto p_iter = paths.begin();
@@ -480,54 +477,6 @@ public:
             cur_color ++;
 
         }
-        if (cur_color > 1) {
-/*            auto iter = mapping_descr.begin();
-            INFO("not evident clusters selection");
-            for (int i = 0; i < len; i++, iter ++) {
-                INFO(colors[i] <<" " << iter->str(g_));
-            }
-*/        }
-        vector<size_t> long_counts(cur_color);
-        i = 0;
-
-        auto prev_iter = mapping_descr.end();
-        for (auto i_iter = mapping_descr.begin(); i_iter != mapping_descr.end();
-                    ++i_iter, ++i) {
-            if (g_.length(i_iter->edgeId) > 500) {
-                if (colors[i] == DELETED_COLOR) {
-                    if (i_iter->size > 100) {
-                        DEBUG("dominated huge cluster " << i_iter->str(g_));
-                    }
-                    continue;
-                }
-                if (prev_iter != mapping_descr.end()) {
-                    if (i_iter->edgeId != prev_iter->edgeId && ! TopologyGap(i_iter->edgeId, prev_iter->edgeId, true)){
-                        VertexId start_v = g_.EdgeEnd(prev_iter->edgeId);
-                        VertexId end_v = g_.EdgeStart(i_iter->edgeId);
-                        if (cashed_dijkstra.find(start_v) == cashed_dijkstra.end()) {
-                            auto dij = DijkstraHelper<Graph>::CreateBoundedDijkstra(g_, 5000);
-                            dij.run(start_v);
-                            auto distances = dij.GetDistances();
-                            cashed_dijkstra[start_v] = std::map<VertexId, size_t>(distances.first, distances.second);
-                        }
-                        if (cashed_dijkstra[start_v].find(end_v) == cashed_dijkstra[start_v].end()) {
-                            bad_follow++;
-//                            INFO("bad follow edge_ids" << " " << g_.int_id(prev_iter->edgeId) << "( " << prev_iter->size << "),  " << g_.int_id(i_iter->edgeId) << "(" << i_iter->size << ")");
-                        } else if (cashed_dijkstra[start_v][end_v] + i_iter->average_edge_position +
-                                g_.length(prev_iter->edgeId) - prev_iter->average_edge_position >
-                            1.5 *  double(i_iter->average_read_position - prev_iter->average_read_position)) {
-                            half_bad_follow++;
-//                            INFO("ugly follow edge_ids" << " " << g_.int_id(prev_iter->edgeId) << " " << g_.int_id(i_iter->edgeId));
-
-                        } else {
-                            good_follow ++;
-                        }
-                    }
-                }
-                prev_iter = i_iter;
-            }
-        }
-
         return colors;
     }
 
@@ -672,9 +621,8 @@ public:
         DEBUG("seq dist:" << s.size()/3);
         if (distance_cashed.find(vertex_pair) == distance_cashed.end()) {
             DistancesLengthsCallback<Graph> callback(g_);
-            PathProcessor<Graph> path_processor(g_, 0, s.size() / 3, start_v,
-                                                end_v, callback);
-            path_processor.Process();
+            ProcessPaths(g_, 0, s.size() / 3, start_v,
+                             end_v, callback);
             result = callback.distances();
             distance_cashed[vertex_pair] = result;
         } else {
@@ -730,11 +678,10 @@ public:
                                   string &e_add) {
         DEBUG(" Traversing tangled region. Start and end vertices resp: " << g_.int_id(start_v) <<" " << g_.int_id(end_v));
         PathStorageCallback<Graph> callback(g_);
-        PathProcessor<Graph> path_processor(g_,
-                                            path_min_length, path_max_length,
-                                            start_v, end_v,
-                                            callback);
-        path_processor.Process();
+        ProcessPaths(g_,
+                    path_min_length, path_max_length,
+                    start_v, end_v,
+                    callback);
         vector<vector<EdgeId> > paths = callback.paths();
         DEBUG("taking subseq" << start_pos <<" "<< end_pos <<" " << s.size());
         int s_len = int(s.size());
diff --git a/src/debruijn/pacbio/pacbio_gap_closer.hpp b/src/debruijn/pacbio/pacbio_gap_closer.hpp
index 65d8959..544962b 100644
--- a/src/debruijn/pacbio/pacbio_gap_closer.hpp
+++ b/src/debruijn/pacbio/pacbio_gap_closer.hpp
@@ -236,6 +236,7 @@ private:
     int closed_gaps;
     int not_unique_gaps;
     int chained_gaps;
+    bool consensus_gap_closing;
 public:
     void CloseGapsInGraph(map<EdgeId, EdgeId> &replacement) {
         for (auto iter = new_edges_.begin(); iter != new_edges_.end(); ++iter) {
@@ -306,24 +307,43 @@ private:
             if (next_iter == storage.inner_index[e].end() || next_iter->end != cl_start->end) {
                 if (cur_len >= storage.min_gap_quantity && !storage.IsIgnored(make_pair(cl_start->start, cl_start->end))) {
                     vector<string> gap_variants;
+
                     for (auto j_iter = cl_start; j_iter != next_iter; j_iter++) {
                         string s = j_iter->gap_seq.str();
                         transform(s.begin(), s.end(), s.begin(), ::toupper);
                         gap_variants.push_back(s);
                     }
-                    map<EdgeId, pair<size_t, string> > tmp;
-                    string s = g_.EdgeNucls(cl_start->start).Subseq(0, cl_start->edge_gap_start_position).str();
-
-                    const ConsensusCore::PoaConsensus* pc = ConsensusCore::PoaConsensus::FindConsensus(gap_variants,
-                                                                                                       ConsensusCore::PoaConfig::GLOBAL_ALIGNMENT);
-                    string tmp_string = pc->Sequence();
-                    DEBUG("consenus for " << g_.int_id(cl_start->start) << " and " << g_.int_id(cl_start->end) << "found: ");
-                    DEBUG(tmp_string);
-                    s += tmp_string;
-                    s += g_.EdgeNucls(cl_start->end).Subseq(cl_start->edge_gap_end_position, g_.length(cl_start->end) + g_.k()).str();
-                    tmp.insert(make_pair(cl_start->end, make_pair(cur_len, s)));
-                    new_edges[cl_start->start] = tmp;
-
+                    if (consensus_gap_closing || (gap_variants.size() > 0 && gap_variants[0].length() < cfg::get().pb.max_contigs_gap_length)) {
+                        map <EdgeId, pair<size_t, string>> tmp;
+                        string tmp_string;
+                        string s = g_.EdgeNucls(cl_start->start).Subseq(0, cl_start->edge_gap_start_position).str();
+                        if (consensus_gap_closing) {
+                            const ConsensusCore::PoaConsensus *pc = ConsensusCore::PoaConsensus::FindConsensus(
+                                    gap_variants,
+                                    ConsensusCore::PoaConfig::GLOBAL_ALIGNMENT);
+                            tmp_string = pc->Sequence();
+                        } else {
+                            tmp_string = gap_variants[0];
+                            if (gap_variants.size() > 1) {
+
+                                stringstream ss;
+                                for (int i = 0; i < gap_variants.size(); i++)
+                                    ss << gap_variants[i].length() << " ";
+                                INFO(gap_variants.size() << " gap closing variant for contigs, lengths: " << ss.str());
+                            }
+                        }
+
+                        DEBUG("consenus for " << g_.int_id(cl_start->start) << " and " << g_.int_id(cl_start->end) <<
+                                                                                          "found: ");
+                        DEBUG(tmp_string);
+                        s += tmp_string;
+                        s += g_.EdgeNucls(cl_start->end).Subseq(cl_start->edge_gap_end_position,
+                                                                g_.length(cl_start->end) + g_.k()).str();
+                        tmp.insert(make_pair(cl_start->end, make_pair(cur_len, s)));
+                        new_edges[cl_start->start] = tmp;
+                    } else {
+                        INFO ("Skipping gap of size " << gap_variants[0].length() << " multiplicity " << gap_variants.size());
+                    }
                 }
                 cl_start = next_iter;
                 cur_len = 0;
@@ -332,8 +352,8 @@ private:
     }
 
 public:
-    PacbioGapCloser(Graph &g)
-            : g_(g) {
+    PacbioGapCloser(Graph &g, bool consensus_gap )
+            : g_(g), consensus_gap_closing(consensus_gap) {
         closed_gaps = 0;
         not_unique_gaps = 0;
         chained_gaps = 0;
diff --git a/src/debruijn/pacbio_aligning.cpp b/src/debruijn/pacbio_aligning.cpp
index 7d4191e..5b78881 100644
--- a/src/debruijn/pacbio_aligning.cpp
+++ b/src/debruijn/pacbio_aligning.cpp
@@ -6,11 +6,11 @@
 //***************************************************************************
 
 #include "standard.hpp"
-#include "graph_simplification.hpp"
 #include "pacbio/pac_index.hpp"
 #include "pacbio/pacbio_gap_closer.hpp"
 #include "long_read_storage.hpp"
-
+#include "io/wrapper_collection.hpp"
+#include "stats/debruijn_stats.hpp"
 #include "pacbio_aligning.hpp"
 
 namespace debruijn_graph {
@@ -26,8 +26,10 @@ void ProcessReadsBatch(conj_graph_pack &gp,
                                               pacbio::GapStorage<Graph>(gp.g, min_gap_quantity));
     vector<pacbio::StatsCounter> stats_by_thread(cfg::get().max_threads);
 
+    size_t longer_500 = 0;
     size_t aligned = 0;
     size_t nontrivial_aligned = 0;
+
 #   pragma omp parallel for shared(reads, long_reads_by_thread, pac_index, n, aligned, nontrivial_aligned)
     for (size_t i = 0; i < buf_size; ++i) {
         if (i % 1000 == 0) {
@@ -52,13 +54,17 @@ void ProcessReadsBatch(conj_graph_pack &gp,
 #       pragma omp critical
         {
 //            INFO(current_read_mapping.seed_num);
-            if (aligned_edges.size() > 0) {
-                aligned ++;
-                stats_by_thread[thread_num].seeds_percentage[size_t (floor(double(current_read_mapping.seed_num) * 1000.0 / (double) seq.size()))] ++;
-                for (size_t j = 0; j < aligned_edges.size(); j ++){
-                    if (aligned_edges[j].size() > 1) {
-                        nontrivial_aligned ++;
-                        break;
+            if (seq.size() > 500) {
+                longer_500++;
+                if (aligned_edges.size() > 0) {
+                    aligned++;
+                    stats_by_thread[thread_num].seeds_percentage[size_t(
+                            floor(double(current_read_mapping.seed_num) * 1000.0 / (double) seq.size()))]++;
+                    for (size_t j = 0; j < aligned_edges.size(); j++) {
+                        if (aligned_edges[j].size() > 1) {
+                            nontrivial_aligned++;
+                            break;
+                        }
                     }
                 }
             }
@@ -68,7 +74,8 @@ void ProcessReadsBatch(conj_graph_pack &gp,
             VERBOSE_POWER(n, " reads processed");
         }
     }
-    INFO("Read batch of size: " << buf_size << " processed; reads aligned: " << aligned << "; paths of more than one edge received: " << nontrivial_aligned );
+    INFO("Read batch of size: " << buf_size << " processed; "<< longer_500 << " of them longer than 500; among long reads aligned: " << aligned << "; paths of more than one edge received: " << nontrivial_aligned );
+
     for (size_t i = 0; i < cfg::get().max_threads; i++) {
         long_reads.AddStorage(long_reads_by_thread[i]);
         gaps.AddStorage(gaps_by_thread[i]);
@@ -76,12 +83,12 @@ void ProcessReadsBatch(conj_graph_pack &gp,
     }
 }
 
-void align_pacbio(conj_graph_pack &gp, int lib_id) {
+void align_pacbio(conj_graph_pack &gp, int lib_id, bool make_additional_saves) {
     io::ReadStreamList<io::SingleRead> streams;
-    for (auto it = cfg::get().ds.reads[lib_id].single_begin(); it != cfg::get().ds.reads[lib_id].single_end(); ++it) {
-          //do we need input_file function here?
-        streams.push_back(make_shared<io::FixingWrapper>(make_shared<io::FileReadStream>(*it)));
-    }
+    for (const auto& reads : cfg::get().ds.reads[lib_id].single_reads())
+      //do we need input_file function here?
+      streams.push_back(make_shared<io::FixingWrapper>(make_shared<io::FileReadStream>(reads)));
+
     //make_shared<io::FixingWrapper>(make_shared<io::FileReadStream>(file));
     //    auto pacbio_read_stream = single_easy_reader(cfg::get().ds.reads[lib_id],
 //    false, false);
@@ -93,11 +100,13 @@ void align_pacbio(conj_graph_pack &gp, int lib_id) {
     pacbio::StatsCounter stats;
     size_t min_gap_quantity = 2;
     size_t rtype = 0;
+    bool consensus_gap_closing = false;
     if (cfg::get().ds.reads[lib_id].type() == io::LibraryType::PacBioReads || 
-            cfg::get().ds.reads[lib_id].type() == io::LibraryType::SangerReads || 
-            cfg::get().ds.reads[lib_id].type() == io::LibraryType::NanoporeReads) {
+        cfg::get().ds.reads[lib_id].type() == io::LibraryType::SangerReads || 
+        cfg::get().ds.reads[lib_id].type() == io::LibraryType::NanoporeReads) {
         min_gap_quantity = cfg::get().pb.pacbio_min_gap_quantity;
         rtype = 1;
+        consensus_gap_closing = true;
     } else {
         min_gap_quantity = cfg::get().pb.contigs_min_gap_quantity;
         rtype = 2;
@@ -134,12 +143,14 @@ void align_pacbio(conj_graph_pack &gp, int lib_id) {
     stats.report();
     map<EdgeId, EdgeId> replacement;
     size_t min_stats_cutoff =(rtype == 1 ? 1  : 0);
-    long_reads.DumpToFile(cfg::get().output_saves + "long_reads_before_rep.mpr",
+    if (make_additional_saves)
+        long_reads.DumpToFile(cfg::get().output_saves + "long_reads_before_rep.mpr",
                           replacement, min_stats_cutoff, true);
     gaps.DumpToFile(cfg::get().output_saves + "gaps.mpr");
     gaps.PadGapStrings();
-    gaps.DumpToFile(cfg::get().output_saves +  "gaps_padded.mpr");
-    pacbio::PacbioGapCloser<Graph> gap_closer(gp.g);
+    if (make_additional_saves)
+        gaps.DumpToFile(cfg::get().output_saves +  "gaps_padded.mpr");
+    pacbio::PacbioGapCloser<Graph> gap_closer(gp.g, consensus_gap_closing);
     gap_closer.ConstructConsensus(cfg::get().max_threads, gaps);
     gap_closer.CloseGapsInGraph(replacement);
     long_reads.ReplaceEdges(replacement);
@@ -156,10 +167,11 @@ void PacBioAligning::run(conj_graph_pack &gp, const char*) {
     using namespace omnigraph;
     omnigraph::DefaultLabeler<Graph> labeler(gp.g, gp.edge_pos);
     int lib_id = -1;
+    bool make_additional_saves = parent_->saves_policy().make_saves_;
     for (size_t i = 0; i < cfg::get().ds.reads.lib_count(); ++i) {
         if ( cfg::get().ds.reads[i].is_pacbio_alignable() ) {
             lib_id = (int) i;
-            align_pacbio(gp, lib_id);
+            align_pacbio(gp, lib_id, make_additional_saves);
         }
     }
 
diff --git a/src/debruijn/pair_info_count.cpp b/src/debruijn/pair_info_count.cpp
index decb6ff..433b3ee 100644
--- a/src/debruijn/pair_info_count.cpp
+++ b/src/debruijn/pair_info_count.cpp
@@ -22,16 +22,17 @@
 #include "pair_info_filler.hpp"
 #include "stats/debruijn_stats.hpp"
 #include "path_extend/split_graph_pair_info.hpp"
+#include "bwa_pair_info_filler.hpp"
 
 namespace debruijn_graph {
-    typedef io::SequencingLibrary<debruijn_config::DataSetData> SequencingLib;
 
+typedef io::SequencingLibrary<debruijn_config::DataSetData> SequencingLib;
 
 bool RefineInsertSizeForLib(conj_graph_pack& gp, size_t ilib, size_t edge_length_threshold) {
 
   INFO("Estimating insert size (takes a while)");
   InsertSizeCounter hist_counter(gp, edge_length_threshold, /* ignore negative */ true);
-  SequenceMapperNotifier notifier(gp, false);
+  SequenceMapperNotifier notifier(gp);
   notifier.Subscribe(ilib, &hist_counter);
 
   SequencingLib& reads = cfg::get_writable().ds.reads[ilib];
@@ -103,7 +104,7 @@ void ProcessPairedReads(conj_graph_pack& gp, size_t ilib, bool map_single_reads)
     LatePairedIndexFiller pif(gp.g, PairedReadCountWeight, gp.paired_indices[ilib]);
     notifier.Subscribe(ilib, &pif);
 
-    auto paired_streams = paired_binary_readers(reads, true, (size_t) reads.data().mean_insert_size);
+    auto paired_streams = paired_binary_readers(reads, false, (size_t) reads.data().mean_insert_size);
     notifier.ProcessLibrary(paired_streams, ilib, *ChooseProperMapper(gp, reads));
     cfg::get_writable().ds.reads[ilib].data().pi_threshold = split_graph.GetThreshold();
 
@@ -118,8 +119,7 @@ bool HasGoodRRLibs() {
         if (lib.is_contig_lib())
             continue;
         if (lib.is_paired() &&
-                (lib.data().mean_insert_size == 0.0 ||
-                    lib.data().mean_insert_size < 1.1 * (double) lib.data().read_length)) {
+                lib.data().mean_insert_size == 0.0) {
             continue;
         }
         if (lib.is_repeat_resolvable()) {
@@ -164,42 +164,68 @@ void PairInfoCount::run(conj_graph_pack &gp, const char*) {
     //fixme implement better universal logic 
     size_t edge_length_threshold = cfg::get().ds.meta ? 1000 : stats::Nx(gp.g, 50);
     INFO("Min edge length for estimation: " << edge_length_threshold);
+    bwa_pair_info::BWAPairInfoFiller bwa_counter(gp.g,
+                                                 cfg::get().bwa.path_to_bwa,
+                                                 path::append_path(cfg::get().output_dir, "bwa_count"),
+                                                 cfg::get().max_threads, !cfg::get().bwa.debug);
+
     for (size_t i = 0; i < cfg::get().ds.reads.lib_count(); ++i) {
-        INFO("Estimating insert size for library #" << i);
-        if (cfg::get().ds.reads[i].is_paired()) {
+        const auto& lib = cfg::get().ds.reads[i];
 
+        if (cfg::get().bwa.enabled && lib.is_bwa_alignable()) {
+            //Run insert size estimation and pair index filler together to save disc space (removes SAM file right after processing the lib)
+            bwa_counter.ProcessLib(i, cfg::get_writable().ds.reads[i], gp.paired_indices[i],
+                                   edge_length_threshold, cfg::get().bwa.min_contig_len);
+        }
+        else if (lib.is_paired()) {
+            INFO("Estimating insert size for library #" << i);
+            const auto& lib_data = lib.data();
+            size_t rl = lib_data.read_length;
+            size_t k = cfg::get().K;
             bool insert_size_refined = RefineInsertSizeForLib(gp, i, edge_length_threshold);
 
             if (!insert_size_refined) {
                 cfg::get_writable().ds.reads[i].data().mean_insert_size = 0.0;
                 WARN("Unable to estimate insert size for paired library #" << i);
-                if (cfg::get().ds.reads[i].data().read_length > 0 && cfg::get().ds.reads[i].data().read_length <= cfg::get().K) {
-                    WARN("Maximum read length (" << cfg::get().ds.reads[i].data().read_length << ") should be greater than K (" << cfg::get().K << ")");
-                }
-                else if (cfg::get().ds.reads[i].data().read_length <= cfg::get().K * 11 / 10) {
-                    WARN("Maximum read length (" << cfg::get().ds.reads[i].data().read_length << ") is probably too close to K (" << cfg::get().K << ")");
+                if (rl > 0 && rl <= k) {
+                    WARN("Maximum read length (" << rl << ") should be greater than K (" << k << ")");
+                } else if (rl <= k * 11 / 10) {
+                    WARN("Maximum read length (" << rl << ") is probably too close to K (" << k << ")");
                 } else {
                     WARN("None of paired reads aligned properly. Please, check orientation of your read pairs.");
                 }
                 continue;
             } else {
-                INFO("  Estimated insert size for paired library #" << i);
-                INFO("  Insert size = " << cfg::get().ds.reads[i].data().mean_insert_size <<
-                        ", deviation = " << cfg::get().ds.reads[i].data().insert_size_deviation <<
-                        ", left quantile = " << cfg::get().ds.reads[i].data().insert_size_left_quantile <<
-                        ", right quantile = " << cfg::get().ds.reads[i].data().insert_size_right_quantile <<
-                        ", read length = " << cfg::get().ds.reads[i].data().read_length);
+                INFO("  Insert size = " << lib_data.mean_insert_size <<
+                        ", deviation = " << lib_data.insert_size_deviation <<
+                        ", left quantile = " << lib_data.insert_size_left_quantile <<
+                        ", right quantile = " << lib_data.insert_size_right_quantile <<
+                        ", read length = " << lib_data.read_length);
+
+                if (lib_data.mean_insert_size < 1.1 * (double) rl) {
+                    WARN("Estimated mean insert size " << lib_data.mean_insert_size
+                                << " is very small compared to read length " << rl);
+                }
             }
         }
     }
 
     for (size_t i = 0; i < cfg::get().ds.reads.lib_count(); ++i) {
-        INFO("Mapping library #" << i);
         const auto& lib = cfg::get().ds.reads[i];
-        if (lib.is_contig_lib() && !lib.is_pacbio_alignable()) {
-            INFO("Mapping contigs library");
+        if (lib.is_pacbio_alignable()) {
+            INFO("Library #" << i << " was mapped by PacBio mapper, skipping");
+            continue;
+        }
+        else if (lib.is_contig_lib()) {
+            INFO("Mapping contigs library #" << i);
             ProcessSingleReads(gp, i, false);
-		} else {
+		}
+        else if (cfg::get().bwa.enabled && lib.is_bwa_alignable()) {
+            INFO("Library #" << i << " was mapped by BWA, skipping");
+            continue;
+        }
+        else {
+            INFO("Mapping library #" << i);
             bool map_single_reads = ShouldMapSingleReads(i);
             cfg::get_writable().use_single_reads |= map_single_reads;
 
@@ -215,7 +241,6 @@ void PairInfoCount::run(conj_graph_pack &gp, const char*) {
                 INFO("Total paths obtained from single reads: " << gp.single_long_reads[i].size());
             }
         }
-
     }
 
     SensitiveReadMapper<Graph>::EraseIndices();
diff --git a/src/debruijn/pair_info_filler.hpp b/src/debruijn/pair_info_filler.hpp
index 1edafd3..11a3b7e 100644
--- a/src/debruijn/pair_info_filler.hpp
+++ b/src/debruijn/pair_info_filler.hpp
@@ -36,41 +36,47 @@ public:
     }
 
     virtual void StartProcessLibrary(size_t threads_count) {
-        for (auto it = graph_.ConstEdgeBegin(); !it.IsEnd(); ++it)
-            paired_index_.AddPairInfo(*it, *it, { 0., 0. });
-        for (size_t i = 0; i < threads_count; ++i)
-            buffer_pi_.emplace_back();
+        paired_index_.Init();
+        buffer_pi_ = {graph_, threads_count};
     }
 
     virtual void StopProcessLibrary() {
         for (size_t i = 0; i < buffer_pi_.size(); ++i)
             MergeBuffer(i);
 
-        buffer_pi_.clear();
+        buffer_pi_.Clear();
     }
 
     virtual void ProcessPairedRead(size_t thread_index,
+                                   const io::PairedRead& r,
                                    const MappingPath<EdgeId>& read1,
-                                   const MappingPath<EdgeId>& read2,
-                                   size_t dist) {
-        ProcessPairedRead(buffer_pi_[thread_index], read1, read2, dist);
+                                   const MappingPath<EdgeId>& read2) {
+        ProcessPairedRead(buffer_pi_[thread_index], read1, read2, r.distance());
+    }
+
+    virtual void ProcessPairedRead(size_t thread_index,
+                                   const io::PairedReadSeq& r,
+                                   const MappingPath<EdgeId>& read1,
+                                   const MappingPath<EdgeId>& read2) {
+        ProcessPairedRead(buffer_pi_[thread_index], read1, read2, r.distance());
     }
 
     virtual void ProcessSingleRead(size_t,
+                                   const io::SingleReadSeq&,
+                                   const MappingPath<EdgeId>&) {}
+
+    virtual void ProcessSingleRead(size_t,
+                                   const io::SingleRead&,
                                    const MappingPath<EdgeId>&) {}
 
     virtual void MergeBuffer(size_t thread_index) {
-        paired_index_.AddAll(buffer_pi_[thread_index]);
+        paired_index_.Merge(buffer_pi_[thread_index]);
         buffer_pi_[thread_index].Clear();
     }
 
     virtual ~LatePairedIndexFiller() {}
 
 private:
-    EdgePair ConjugatePair(EdgePair ep) const {
-        return std::make_pair(graph_.conjugate(ep.second), graph_.conjugate(ep.first));
-    }
-
     void ProcessPairedRead(omnigraph::de::PairedInfoBuffer<Graph>& paired_index,
                            const MappingPath<EdgeId>& path1,
                            const MappingPath<EdgeId>& path2, size_t read_distance) const {
@@ -81,8 +87,6 @@ private:
 
                 EdgePair ep{mapping_edge_1.first, mapping_edge_2.first};
 
-                if (ep > ConjugatePair(ep))
-                    continue;
 
                 double weight = weight_f_(mapping_edge_1.second,
                                           mapping_edge_2.second);
@@ -93,8 +97,8 @@ private:
                         + (int) mapping_edge_1.second.mapped_range.start_pos
                         - (int) mapping_edge_2.second.mapped_range.end_pos;
 
-                paired_index.AddPairInfo(mapping_edge_1.first, mapping_edge_2.first,
-                                         { (double) edge_distance, weight });
+                paired_index.Add(mapping_edge_1.first, mapping_edge_2.first,
+                                         omnigraph::de::RawPoint(edge_distance, weight));
             }
         }
     }
@@ -103,10 +107,9 @@ private:
     const Graph& graph_;
     WeightF weight_f_;
     omnigraph::de::UnclusteredPairedInfoIndexT<Graph>& paired_index_;
-    std::vector<omnigraph::de::PairedInfoBuffer<Graph> > buffer_pi_;
+    omnigraph::de::PairedInfoBuffersT<Graph> buffer_pi_;
 
-    DECL_LOGGER("LatePairedIndexFiller")
-    ;
+    DECL_LOGGER("LatePairedIndexFiller");
 };
 
 
diff --git a/src/debruijn/pair_info_improver.hpp b/src/debruijn/pair_info_improver.hpp
index e8e92d7..8c529f4 100644
--- a/src/debruijn/pair_info_improver.hpp
+++ b/src/debruijn/pair_info_improver.hpp
@@ -11,6 +11,7 @@
 #include "graph_pack.hpp"
 #include "path_utils.hpp"
 #include "split_path_constructor.hpp"
+#include "de/paired_info_helpers.hpp"
 #include <math.h>
 
 namespace debruijn_graph {
@@ -19,18 +20,13 @@ template<class Graph>
 static
 bool TryToAddPairInfo(omnigraph::de::PairedInfoIndexT<Graph>& clustered_index,
                       typename Graph::EdgeId e1, typename Graph::EdgeId e2,
-                      const omnigraph::de::Point& p,
-                      bool reflected = true) {
-    const omnigraph::de::Point& point_to_add = p;
-
-    const auto histogram = clustered_index.GetEdgePairInfo(e1, e2);
-    for (auto it = histogram.begin(); it != histogram.end(); ++it)
-        if (ClustersIntersect(*it, point_to_add))
+                      const omnigraph::de::Point& point_to_add) {
+    auto histogram = clustered_index.Get(e1, e2);
+    for (auto i : histogram)
+        if (ClustersIntersect(i, point_to_add))
             return false;
 
-    clustered_index.AddPairInfo(e1, e2, point_to_add, reflected);
-    if (reflected)
-        clustered_index.AddConjPairInfo(e1, e2, point_to_add, reflected);
+    clustered_index.Add(e1, e2, point_to_add);
     return true;
 }
 
@@ -39,10 +35,11 @@ class PairInfoImprover {
     typedef typename Graph::EdgeId EdgeId;
     typedef std::vector<omnigraph::de::PairInfo<EdgeId> > PairInfos;
     typedef std::pair<EdgeId, EdgeId> EdgePair;
+    typedef omnigraph::de::PairedInfoIndexT<Graph> Index;
 
   public:
     PairInfoImprover(const Graph& g,
-                     omnigraph::de::PairedInfoIndexT<Graph>& clustered_index,
+                     Index& clustered_index,
                      const io::SequencingLibrary<debruijn_config::DataSetData> &lib)
             : graph_(g), index_(clustered_index), lib_(lib) { }
 
@@ -64,7 +61,7 @@ class PairInfoImprover {
 
     class ContradictionalRemover {
       public:
-        ContradictionalRemover(std::vector<omnigraph::de::PairedInfoIndexT<Graph> > &to_remove,
+        ContradictionalRemover(omnigraph::de::PairedInfoIndicesT<Graph> &to_remove,
                                const Graph &g,
                                omnigraph::de::PairedInfoIndexT<Graph>& index)
                 : to_remove_(to_remove), graph_(g), index_(index) {}
@@ -73,9 +70,7 @@ class PairInfoImprover {
             omnigraph::de::PairedInfoIndexT<Graph> &to_remove = to_remove_[omp_get_thread_num()];
 
             if (graph_.length(e)>= cfg::get().max_repeat_length && index_.contains(e))
-                FindInconsistent(e,
-                                 index_.edge_begin(e), index_.edge_end(e),
-                                 to_remove);
+                FindInconsistent(e, to_remove);
 
             return false;
         }
@@ -111,47 +106,38 @@ class PairInfoImprover {
             }
         }
 
-        // Checking the consitency of two edge pairs (e, e_1) and (e, e_2) for all pairs (e, <some_edge>)
+        // Checking the consistency of two edge pairs (e, e_1) and (e, e_2) for all pairs (base_edge, <some_edge>)
         void FindInconsistent(EdgeId base_edge,
-                              typename omnigraph::de::PairedInfoIndexT<Graph>::EdgeIterator start,
-                              typename omnigraph::de::PairedInfoIndexT<Graph>::EdgeIterator end,
-                              omnigraph::de::PairedInfoIndexT<Graph>& pi) const {
-            typedef typename omnigraph::de::PairedInfoIndexT<Graph>::EdgeIterator EdgeIterator;
-
-            for (EdgeIterator I_1(start), E(end); I_1 != E; ++I_1) {
-                for (EdgeIterator I_2(start); I_2 != E; ++I_2) {
-                    if (I_1 == I_2)
+                              Index& pi) const {
+            for (auto i1 : index_.Get(base_edge)) {
+                auto e1 = i1.first;
+                for (auto i2 : index_.Get(base_edge)) {
+                    auto e2 = i2.first;
+                    if (e1 == e2)
                         continue;
-
-                    std::pair<EdgeId, omnigraph::de::Point> entry1 = *I_1;
-                    std::pair<EdgeId, omnigraph::de::Point> entry2 = *I_2;
-
-                    EdgeId e1 = entry1.first;
-                    const omnigraph::de::Point& p1 = entry1.second;
-                    EdgeId e2 = entry2.first;
-                    const omnigraph::de::Point& p2 = entry2.second;
-                    if (!IsConsistent(base_edge, e1, e2, p1, p2)) {
-                        if (math::le(p1.weight, p2.weight)) {
-                            pi.AddPairInfo(base_edge, e1, p1);
-                        } else {
-                            pi.AddPairInfo(base_edge, e2, p2);
+                    for (auto p1 : i1.second) {
+                        for (auto p2 : i2.second) {
+                            if (!IsConsistent(base_edge, e1, e2, p1, p2)) {
+                                if (math::le(p1.weight, p2.weight))
+                                    pi.Add(base_edge, e1, p1);
+                                else
+                                    pi.Add(base_edge, e2, p2);
+                            }
                         }
                     }
                 }
             }
         }
 
-        std::vector<omnigraph::de::PairedInfoIndexT<Graph> > &to_remove_;
+        omnigraph::de::PairedInfoIndicesT<Graph> &to_remove_;
         const Graph &graph_;
-        omnigraph::de::PairedInfoIndexT<Graph>& index_;
+        Index& index_;
     };
 
     size_t RemoveContradictional(unsigned nthreads) {
         size_t cnt = 0;
 
-        std::vector<omnigraph::de::PairedInfoIndexT<Graph> > to_remove;
-        for (size_t i = 0; i < nthreads; ++i)
-            to_remove.emplace_back(graph_);
+        omnigraph::de::PairedInfoIndicesT<Graph> to_remove(graph_, nthreads);
 
         // FIXME: Replace with lambda
         ContradictionalRemover remover(to_remove, graph_, index_);
@@ -161,15 +147,15 @@ class PairInfoImprover {
 
         DEBUG("Merging maps");
         for (size_t i = 1; i < nthreads; ++i) {
-            to_remove[0].AddAll(to_remove[i]);
+            to_remove[0].Merge(to_remove[i]);
             to_remove[i].Clear();
         }
         DEBUG("Resulting size " << to_remove[0].size());
 
         DEBUG("Deleting paired infos, liable to removing");
-        for (auto I = to_remove[0].begin(), E = to_remove[0].end(); I != E; ++I) {
+        for (auto I = omnigraph::de::raw_pair_begin(to_remove[0]);
+            I != omnigraph::de::raw_pair_end(to_remove[0]); ++I) {
             cnt += DeleteIfExist(I.first(), I.second(), *I);
-            cnt += DeleteConjugateIfExist(I.first(), I.second(), *I);
         }
         to_remove[0].Clear();
 
@@ -179,77 +165,47 @@ class PairInfoImprover {
 
     }
 
-    class MissingFiller {
-      public:
-        MissingFiller(std::vector<std::vector<omnigraph::de::PairedInfoIndexT<Graph> > > &to_add,
-                      const Graph &graph,
-                      const omnigraph::de::PairedInfoIndexT<Graph> &index,
-                      const SplitPathConstructor<Graph> &spc,
-                      const io::SequencingLibrary<debruijn_config::DataSetData> &lib)
-                : to_add_(to_add), graph_(graph), index_(index), spc_(spc), lib_(lib) {}
-
-        bool operator()(EdgeId e) {
-            std::vector<PathInfoClass<Graph> > paths =
-                    spc_.ConvertPIToSplitPaths(index_.GetEdgeInfo(e),
-                                               lib_.data().mean_insert_size, lib_.data().insert_size_deviation);
-            for (auto iter = paths.begin(); iter != paths.end(); ++iter) {
-                TRACE("Path " << iter->PrintPath(graph_));
-
-                const PathInfoClass<Graph>& path = *iter;
-                for (auto pi_iter = path.begin(); pi_iter != path.end(); ++pi_iter) {
-                    const auto& pi = *pi_iter;
-                    EdgeId e1 = pi.first;
-                    EdgeId e2 = pi.second;
-                    std::pair<EdgeId, EdgeId> ep = std::make_pair(e1, e2);
-                    if (ep <= ConjugatePair(ep))
-                        TryToAddPairInfo(to_add_[omp_get_thread_num()][0], e1, e2, pi.point, false);
-                    else
-                        TryToAddPairInfo(to_add_[omp_get_thread_num()][1], e1, e2, pi.point, false);
-                }
-            }
-
-            return false;
-        }
-
-      private:
-        EdgePair ConjugatePair(EdgePair ep) const {
-            return std::make_pair(graph_.conjugate(ep.second), graph_.conjugate(ep.first));
-        }
-
-        std::vector<std::vector<omnigraph::de::PairedInfoIndexT<Graph> > > &to_add_;
-        const Graph &graph_;
-        const omnigraph::de::PairedInfoIndexT<Graph> &index_;
-        const SplitPathConstructor<Graph> &spc_;
-        const io::SequencingLibrary<debruijn_config::DataSetData>& lib_;
-    };
-
     size_t FillMissing(unsigned nthreads) {
-        TRACE("Fill missing: Creating indexes");
-        std::vector<std::vector<omnigraph::de::PairedInfoIndexT<Graph> > > to_add(nthreads);
-        for (size_t i = 0; i < nthreads; ++i) {
-            to_add[i].emplace_back(graph_);
-            to_add[i].emplace_back(graph_);
-        }
+        DEBUG("Fill missing: Creating indexes");
+        const size_t NUM_CHUNKS = nthreads * 16;
+        omnigraph::de::PairedInfoIndicesT<Graph> to_add(graph_, NUM_CHUNKS);
 
         SplitPathConstructor<Graph> spc(graph_);
-        // FIXME: Replace with lambda
-        MissingFiller filler(to_add, graph_, index_, spc, lib_);
+        IterationHelper<Graph, EdgeId> edges(graph_);
+        auto iters = edges.Chunks(NUM_CHUNKS);
+
         DEBUG("Fill missing: Start threads");
-        ParallelEdgeProcessor<Graph>(graph_, nthreads).Run(filler);
+        #pragma omp parallel for schedule(guided)
+        for (size_t i = 0; i < iters.size() - 1; ++i) {
+            TRACE("Processing chunk #" << i);
+            for (auto e = iters[i]; e != iters[i + 1]; ++e) {
+                TRACE("Checking for edge " << *e);
+                auto paths = spc.ConvertPIToSplitPaths(*e, index_,
+                                                       lib_.data().mean_insert_size,
+                                                       lib_.data().insert_size_deviation);
+                for (const auto &path : paths) {
+                    TRACE("Path " << path.PrintPath(graph_));
+                    for (const auto &pi : path)
+                        TryToAddPairInfo(to_add[i], pi.first, pi.second, pi.point);
+                }
+            }
+        }
+        //ParallelEdgeProcessor<Graph>(graph_, nthreads).Run(filler);
         DEBUG("Fill missing: Threads finished");
 
         size_t cnt = 0;
-        for (size_t j = 0; j < 2; ++j)
-            for (size_t i = 0; i < nthreads; ++i) {
-                DEBUG("Adding map #" << i << " " << j);
-                for (auto I = to_add[i][j].begin(), E = to_add[i][j].end(); I != E; ++I) {
-                    const auto& hist = *I;
-                    EdgeId e1 = I.first();
-                    EdgeId e2 = I.second();
-                    for (auto it = hist.begin(); it != hist.end(); ++it)
-                        cnt += TryToAddPairInfo(index_, e1, e2, *it);
-                }
+        for (size_t i = 0; i < iters.size() - 1; ++i) {
+            DEBUG("Adding map #" << i);
+            for (auto I = omnigraph::de::raw_pair_begin(to_add[i]);
+                I != omnigraph::de::raw_pair_end(to_add[i]);
+                ++I) {
+                EdgeId e1 = I.first();
+                EdgeId e2 = I.second();
+                for (auto p : *I)
+                    cnt += TryToAddPairInfo(index_, e1, e2, p);
             }
+            to_add[i].Clear();
+        }
 
         DEBUG("Size of paired index " << index_.size());
 
@@ -259,48 +215,18 @@ class PairInfoImprover {
     }
 
   private:
-    size_t DeleteIfExist(EdgeId e1, EdgeId e2, const de::HistogramWithWeight& infos) {
+    size_t DeleteIfExist(EdgeId e1, EdgeId e2, const typename Index::FullHistProxy& infos) {
         size_t cnt = 0;
-        const auto histogram = index_.GetEdgePairInfo(e1, e2);
-        for (auto I = infos.begin(), E = infos.end(); I != E; ++I) {
-            const omnigraph::de::Point& point = *I;
-            for (auto p_iter = histogram.begin(); p_iter != histogram.end(); ++p_iter) {
-                if (math::eq(p_iter->d, point.d)) {
-                    cnt += index_.RemovePairInfo(e1, e2, *p_iter);
-                    cnt += index_.RemovePairInfo(e2, e1, -*p_iter);
-                    TRACE("Removed pi " << graph_.int_id(e1) << " " << graph_.int_id(e2)
-                          << " dist " << p_iter->d << " var " << p_iter->var);
-                }
-            }
-
+        for (auto point : infos) {
+            cnt += index_.Remove(e1, e2, point);
             TRACE("cnt += " << cnt);
         }
-        return cnt;
-    }
 
-    size_t DeleteConjugateIfExist(EdgeId e1, EdgeId e2, const de::HistogramWithWeight& infos) {
-        size_t cnt = 0;
-        EdgeId rc_e1 = graph_.conjugate(e2);
-        EdgeId rc_e2 = graph_.conjugate(e1);
-        const auto histogram = index_.GetEdgePairInfo(rc_e1, rc_e2);
-        for (auto I = infos.begin(), E = infos.end(); I != E; ++I) {
-            const omnigraph::de::Point point = ConjugatePoint(graph_.length(e1), graph_.length(e2), *I);
-            for (auto p_iter = histogram.begin(); p_iter != histogram.end(); ++p_iter) {
-                if (math::eq(p_iter->d, point.d)) {
-                    cnt += index_.RemovePairInfo(rc_e1, rc_e2, *p_iter);
-                    cnt += index_.RemovePairInfo(rc_e2, rc_e1, -*p_iter);
-                    TRACE("Removed pi " << graph_.int_id(rc_e1) << " " << graph_.int_id(rc_e2)
-                          << " dist " << p_iter->d << " var " << p_iter->var);
-                }
-            }
-
-            TRACE("cnt += " << cnt);
-        }
         return cnt;
     }
 
     const Graph& graph_;
-    omnigraph::de::PairedInfoIndexT<Graph>& index_;
+    Index& index_;
     const io::SequencingLibrary<debruijn_config::DataSetData>& lib_;
 
     DECL_LOGGER("PairInfoImprover")
diff --git a/src/debruijn/paired_statistics.hpp b/src/debruijn/paired_statistics.hpp
index 6c29963..6493c73 100644
--- a/src/debruijn/paired_statistics.hpp
+++ b/src/debruijn/paired_statistics.hpp
@@ -262,8 +262,8 @@ public:
 
   bool ContainsPositiveDistance(EdgeId e1, const Histogram& infos) const {
     int first_len = int(graph_.length(e1));
-    for (auto it = infos.begin(); it != infos.end(); ++it) {
-      if (rounded_d(*it) > first_len)
+    for (auto point : infos) {
+      if (rounded_d(point) > first_len)
         return true;
     }
     return false;
diff --git a/src/debruijn/path_extend/bidirectional_path.hpp b/src/debruijn/path_extend/bidirectional_path.hpp
index d0fd4dc..8ea4edb 100644
--- a/src/debruijn/path_extend/bidirectional_path.hpp
+++ b/src/debruijn/path_extend/bidirectional_path.hpp
@@ -25,20 +25,36 @@ namespace path_extend {
 
 class BidirectionalPath;
 
+struct Gap {
+    int gap_;
+    uint32_t trash_previous_;
+    uint32_t trash_current_;
+    Gap(int gap)
+    : gap_(gap), trash_previous_(0), trash_current_(0)
+    { }
+
+    Gap(int gap, uint32_t trash_previous, uint32_t trash_current)
+     : gap_(gap), trash_previous_(trash_previous), trash_current_(trash_current)
+     { }
+};
+
+
 class PathListener {
 public:
-    virtual void FrontEdgeAdded(EdgeId e, BidirectionalPath * path, int gap = 0) = 0;
-    virtual void BackEdgeAdded(EdgeId e, BidirectionalPath * path, int gap = 0) = 0;
+    virtual void FrontEdgeAdded(EdgeId e, BidirectionalPath * path, Gap gap) = 0;
+    virtual void BackEdgeAdded(EdgeId e, BidirectionalPath * path, Gap gap) = 0;
     virtual void FrontEdgeRemoved(EdgeId e, BidirectionalPath * path) = 0;
     virtual void BackEdgeRemoved(EdgeId e, BidirectionalPath * path) = 0;
     virtual ~PathListener() {
     }
 };
 
+
 class BidirectionalPath : public PathListener {
 private:
     static std::atomic<uint64_t> path_id_;
 
+
 public:
     BidirectionalPath(const Graph& g)
             : g_(g),
@@ -126,9 +142,10 @@ public:
         if (gap_len_.size() == 0 || cumulative_len_.size() == 0) {
             return 0;
         }
-        return cumulative_len_[0] + gap_len_[0];
+        return cumulative_len_[0] + gap_len_[0].gap_;
     }
 
+    //TODO iterators forward/reverse
     EdgeId operator[](size_t index) const {
         return data_[index];
     }
@@ -148,7 +165,15 @@ public:
     }
 
     int GapAt(size_t index) const {
-        return gap_len_[index];
+        return gap_len_[index].gap_;
+    }
+
+    uint32_t TrashCurrentAt(size_t index) const {
+        return gap_len_[index].trash_current_;
+    }
+
+    uint32_t TrashPreviousAt(size_t index) const {
+        return gap_len_[index].trash_previous_;
     }
 
     size_t GetId() const {
@@ -163,7 +188,15 @@ public:
         return data_.front();
     }
 
-    void PushBack(EdgeId e, int gap = 0) {
+    void PushBack(EdgeId e, int gap = 0, uint32_t trash_previous = 0, uint32_t trash_current = 0) {
+        data_.push_back(e);
+        Gap gap_struct(gap, trash_previous, trash_current);
+        gap_len_.push_back(gap_struct);
+        IncreaseLengths(g_.length(e), gap_struct);
+        NotifyBackEdgeAdded(e, gap_struct);
+    }
+
+    void PushBack(EdgeId e, Gap gap) {
         data_.push_back(e);
         gap_len_.push_back(gap);
         IncreaseLengths(g_.length(e), gap);
@@ -172,7 +205,7 @@ public:
 
     void PushBack(const BidirectionalPath& path) {
         for (size_t i = 0; i < path.Size(); ++i) {
-            PushBack(path.At(i), path.GapAt(i));
+            PushBack(path.At(i), path.GapAt(i), path.TrashPreviousAt(i), path.TrashCurrentAt(i));
         }
     }
 
@@ -202,10 +235,18 @@ public:
     virtual void FrontEdgeAdded(EdgeId, BidirectionalPath*, int) {
     }
 
+    virtual void FrontEdgeAdded(EdgeId, BidirectionalPath*, Gap) {
+    }
+
+
     virtual void BackEdgeAdded(EdgeId e, BidirectionalPath*, int gap) {
         PushFront(g_.conjugate(e), gap);
     }
 
+    virtual void BackEdgeAdded(EdgeId e, BidirectionalPath*, Gap gap) {
+        PushFront(g_.conjugate(e), gap);
+    }
+
     virtual void FrontEdgeRemoved(EdgeId, BidirectionalPath*) {
     }
 
@@ -235,6 +276,15 @@ public:
         return FindFirst(e) != -1;
     }
 
+    bool Contains(VertexId v) const {
+        for(auto edge : data_) {
+            if(g_.EdgeEnd(edge) == v || g_.EdgeStart(edge) == v ) {
+                return true;
+            }
+        }
+        return false;
+    }
+
     vector<size_t> FindAll(EdgeId e, size_t start = 0) const {
         vector<size_t> result;
         for (size_t i = start; i < Size(); ++i) {
@@ -296,7 +346,7 @@ public:
                 max_over = over;
             }
         }
-        return max_over;
+        return (size_t) max_over;
     }
 
     int FindFirst(const BidirectionalPath& path, size_t from = 0) const {
@@ -310,7 +360,7 @@ public:
         }
         return -1;
     }
-
+//TODO: Why just naive search?
     int FindLast(const BidirectionalPath& path) const {
         if (path.Size() > Size()) {
             return -1;
@@ -369,6 +419,7 @@ public:
                 size_t palindrom_half_size = begin - begin_pos;
                 size_t head_len = Length() - LengthAt(begin_pos);
                 size_t tail_len = *end_pos < Size() - 1 ? LengthAt(*end_pos + 1) : 0;
+//TODO : this is not true in case of gaps inside the palindrom_len;
                 size_t palindrom_len = (size_t) max((int) LengthAt(begin_pos) - (int) LengthAt(begin), 0);
                 size_t between = (size_t) max(0, (int) LengthAt(begin) - (int) (end < Size() - 1 ? LengthAt(end + 1) : 0));
                 DEBUG("tail len " << tail_len << " head len " << head_len << " palindrom_len "<< palindrom_len << " between " << between);
@@ -378,7 +429,7 @@ public:
                         continue;
                     }
                     if (between > palindrom_len) {
-                        DEBUG("to big part between");
+                        DEBUG("too big part between");
                         continue;
                     }
                 }
@@ -388,9 +439,11 @@ public:
                 }
                 if (delete_tail) {
                     PopBack(tail_size + palindrom_half_size);
+                    DEBUG("Deleting tail  because of palindrom removal");
                     return;
                 } else {
                     GetConjPath()->PopBack(head_size + palindrom_half_size);
+                    DEBUG("Deleting head because of palindrom removal");
                     return;
                 }
             }
@@ -425,7 +478,7 @@ public:
         }
         result.PushBack(g_.conjugate(Back()), 0);
         for (int i = ((int) Size()) - 2; i >= 0; --i) {
-            result.PushBack(g_.conjugate(data_[i]), gap_len_[i + 1]);
+            result.PushBack(g_.conjugate(data_[i]), gap_len_[i + 1].gap_ + gap_len_[i + 1].trash_current_ - gap_len_[i + 1].trash_previous_, gap_len_[i + 1].trash_current_, gap_len_[i + 1].trash_previous_);
         }
 
         return result;
@@ -482,9 +535,13 @@ public:
         DEBUG("Path " << id_);
         DEBUG("Length " << Length());
         DEBUG("Weight " << weight_);
-        DEBUG("#, edge, length, gap length, total length, total length from begin");
+        DEBUG("#, edge, length, gap length, trash length, total length, total length from begin");
         for (size_t i = 0; i < Size(); ++i) {
-            DEBUG(i << ", " << g_.int_id(At(i)) << ", " << g_.length(At(i)) << ", " << GapAt(i) << ", " << LengthAt(i) << ", " << Length() - LengthAt(i));
+            DEBUG(i << ", " << g_.int_id(At(i)) << ", " 
+                    << g_.length(At(i)) << ", " << GapAt(i) << ", " 
+                    << TrashPreviousAt(i) << "-" << TrashCurrentAt(i) 
+                    << ", " << LengthAt(i) << ", " 
+                    << ((Length() < LengthAt(i)) ? 0 : Length() - LengthAt(i)));
         }
     }
 
@@ -549,6 +606,15 @@ public:
     bool IsOverlap() const {
         return overlap_;
     }
+
+    void ResetOverlaps() {
+        overlap_ = false;
+        has_overlaped_begin_ = false;
+        has_overlaped_end_ = false;
+        conj_path_->overlap_ = false;
+        conj_path_->has_overlaped_begin_ = false;
+        conj_path_->has_overlaped_end_ = false;
+    }
 private:
 
     void RecountLengths() {
@@ -560,16 +626,16 @@ private:
         }
     }
 
-    void IncreaseLengths(size_t length, size_t gap) {
+    void IncreaseLengths(size_t length, Gap gap_struct) {
         for (auto iter = cumulative_len_.begin(); iter != cumulative_len_.end(); ++iter) {
-            *iter += length + gap;
+            *iter += length + gap_struct.gap_ - gap_struct.trash_previous_;
         }
-
         cumulative_len_.push_back(length);
     }
 
     void DecreaseLengths() {
-        size_t length = g_.length(data_.back()) + gap_len_.back();
+        size_t length = g_.length(data_.back()) + gap_len_.back().gap_ - gap_len_.back().trash_previous_;
+
         for (auto iter = cumulative_len_.begin(); iter != cumulative_len_.end(); ++iter) {
             *iter -= length;
         }
@@ -582,12 +648,24 @@ private:
         }
     }
 
+    void NotifyFrontEdgeAdded(EdgeId e, Gap gap) {
+        for (auto i = listeners_.begin(); i != listeners_.end(); ++i) {
+            (*i)->FrontEdgeAdded(e, this, gap);
+        }
+    }
+
     void NotifyBackEdgeAdded(EdgeId e, int gap) {
         for (auto i = listeners_.begin(); i != listeners_.end(); ++i) {
             (*i)->BackEdgeAdded(e, this, gap);
         }
     }
 
+    void NotifyBackEdgeAdded(EdgeId e, Gap gap) {
+        for (auto i = listeners_.begin(); i != listeners_.end(); ++i) {
+            (*i)->BackEdgeAdded(e, this, gap);
+        }
+    }
+
     void NotifyFrontEdgeRemoved(EdgeId e) {
         for (auto i = listeners_.begin(); i != listeners_.end(); ++i) {
             (*i)->FrontEdgeRemoved(e, this);
@@ -600,27 +678,34 @@ private:
         }
     }
 
-    void PushFront(EdgeId e, int gap = 0) {
+    void PushFront(EdgeId e, Gap gap) {
+        PushFront(e, gap.gap_ + gap.trash_current_ - gap.trash_previous_, gap.trash_current_, gap.trash_previous_);
+    }
+
+    void PushFront(EdgeId e, int gap = 0, uint32_t trash_previous = 0, uint32_t trash_current = 0) {
         data_.push_front(e);
         if (gap_len_.size() > 0) {
-            gap_len_[0] += gap;
+            gap_len_[0].gap_ += gap;
+            gap_len_[0].trash_previous_ += trash_previous;
+            gap_len_[0].trash_current_ += trash_current;
         }
-        gap_len_.push_front(0);
+        gap_len_.push_front(Gap(0, 0, 0));
+
         int length = (int) g_.length(e);
         if (cumulative_len_.empty()) {
             cumulative_len_.push_front(length);
         } else {
-            cumulative_len_.push_front(length + gap + cumulative_len_.front());
+            cumulative_len_.push_front(length + cumulative_len_.front() + gap - trash_previous );
         }
         NotifyFrontEdgeAdded(e, gap);
     }
 
     void PopFront() {
         EdgeId e = data_.front();
-        int cur_gap = gap_len_.front();
         if (gap_len_.size() > 1) {
-            cur_gap += GapAt(1);
-            gap_len_[1] = 0;
+            gap_len_[1].gap_ = 0;
+            gap_len_[1].trash_previous_ = 0;
+            gap_len_[1].trash_current_ = 0;
         }
         data_.pop_front();
         gap_len_.pop_front();
@@ -646,7 +731,7 @@ private:
     std::deque<EdgeId> data_;
     BidirectionalPath* conj_path_;
     std::deque<size_t> cumulative_len_;  // Length from beginning of i-th edge to path end for forward directed path: L(e1 + e2 + ... + eN) ... L(eN)
-    std::deque<int> gap_len_;  // e1 - gap2 - e2 - ... - gapN - eN
+    std::deque<Gap> gap_len_;  // e1 - gap2 - e2 - ... - gapN - eN
     std::vector<PathListener *> listeners_;
     const uint64_t id_;  //Unique ID
     float weight_;
@@ -697,6 +782,7 @@ inline size_t FirstNotEqualPosition(const BidirectionalPath& path1, size_t pos1,
             cur_pos1--;
             cur_pos2--;
         } else {
+            DEBUG("Not Equal at " << cur_pos1 << " and " << cur_pos2);
             return cur_pos1;
         }
         if (cur_pos1 >= 0 && cur_pos2 >= 0) {
@@ -709,9 +795,11 @@ inline size_t FirstNotEqualPosition(const BidirectionalPath& path1, size_t pos1,
             gap2 = path2.GapAt(cur_pos2);
         }
     }
+    DEBUG("Equal!!");
     return -1UL;
 }
 inline bool EqualBegins(const BidirectionalPath& path1, size_t pos1, const BidirectionalPath& path2, size_t pos2, bool use_gaps) {
+    DEBUG("Checking for equal begins");
     return FirstNotEqualPosition(path1, pos1, path2, pos2, use_gaps) == -1UL;
 }
 
diff --git a/src/debruijn/path_extend/extension_chooser.hpp b/src/debruijn/path_extend/extension_chooser.hpp
index e19db6e..cb6d897 100644
--- a/src/debruijn/path_extend/extension_chooser.hpp
+++ b/src/debruijn/path_extend/extension_chooser.hpp
@@ -22,21 +22,20 @@
 #include "pe_utils.hpp"
 #include "next_path_searcher.hpp"
 
-namespace path_extend {
+//#include "scaff_supplementary.hpp"
 
-typedef std::multimap<double, EdgeWithDistance> AlternativeConteiner;
+namespace path_extend {
 
+typedef std::multimap<double, EdgeWithDistance> AlternativeContainer;
 
 class PathAnalyzer {
-
-protected:
     const Graph& g_;
 
 public:
     PathAnalyzer(const Graph& g): g_(g) {
     }
 
-    int ExcludeTrivial(const BidirectionalPath& path, std::map<size_t, double>& edges, int from = -1) {
+    int ExcludeTrivial(const BidirectionalPath& path, std::set<size_t>& edges, int from = -1) const {
         int edgeIndex = (from == -1) ? (int) path.Size() - 1 : from;
         if ((int) path.Size() <= from) {
             return edgeIndex;
@@ -46,14 +45,13 @@ public:
             EdgeId e = g_.GetUniqueIncomingEdge(currentVertex);
             currentVertex = g_.EdgeStart(e);
 
-            edges.insert(make_pair((size_t)edgeIndex, 0.0));
+            edges.insert((size_t) edgeIndex);
             --edgeIndex;
         }
         return edgeIndex;
     }
 
-    int ExcludeTrivialWithBulges(const BidirectionalPath& path, std::map<size_t, double>& edges) {
-        edges.clear();
+    int ExcludeTrivialWithBulges(const BidirectionalPath& path, std::set<size_t>& edges) const {
 
         if (path.Empty()) {
             return 0;
@@ -98,7 +96,7 @@ public:
 
     virtual void ExtensionChosen(double weight) = 0;
 
-    virtual void ExtensionChosen(AlternativeConteiner& alts) = 0;
+    virtual void ExtensionChosen(const AlternativeContainer& alts) = 0;
 
     virtual ~ExtensionChooserListener() {
 
@@ -113,96 +111,100 @@ public:
 
 protected:
     const Graph& g_;
-
     shared_ptr<WeightCounter> wc_;
+    //FIXME memory leak?!
+    std::vector<ExtensionChooserListener *> listeners_;
 
+private:
+    double weight_threshold_;
     PathAnalyzer analyzer_;
 
-    double prior_coeff_;
-
     bool excludeTrivial_;
     bool excludeTrivialWithBulges_;
-    DECL_LOGGER("ExtensionChooser");
 
-    std::vector<ExtensionChooserListener *> listeners_;
 
 public:
-    ExtensionChooser(const Graph& g, shared_ptr<WeightCounter> wc = shared_ptr<WeightCounter>(0), double priority = 0.0): g_(g), wc_(wc), analyzer_(g), prior_coeff_(priority),
-        excludeTrivial_(true), excludeTrivialWithBulges_(true), listeners_() {
+    ExtensionChooser(const Graph& g, shared_ptr<WeightCounter> wc = nullptr, double weight_threshold = -1.): 
+        g_(g), wc_(wc), 
+        weight_threshold_(weight_threshold), analyzer_(g), 
+        excludeTrivial_(true), excludeTrivialWithBulges_(true) {
     }
 
     virtual ~ExtensionChooser() {
 
     }
 
-    virtual EdgeContainer Filter(BidirectionalPath& path, EdgeContainer& edges) = 0;
+    virtual EdgeContainer Filter(const BidirectionalPath& path, const EdgeContainer& edges) const = 0;
 
     bool isExcludeTrivial() const
     {
         return excludeTrivial_;
     }
 
-    double CountWeight(BidirectionalPath& path, EdgeId e) {
-        return wc_->CountWeight(path, e);
-    }
-
     bool isExcludeTrivialWithBulges() const
     {
         return excludeTrivialWithBulges_;
     }
 
-    void setExcludeTrivial(bool excludeTrivial)
-    {
+    void setExcludeTrivial(bool excludeTrivial) {
         this->excludeTrivial_ = excludeTrivial;
     }
 
-    void setExcludeTrivialWithBulges(bool excludeTrivialWithBulges)
-    {
+    void setExcludeTrivialWithBulges(bool excludeTrivialWithBulges) {
         this->excludeTrivialWithBulges_ = excludeTrivialWithBulges;
     }
 
-    void ClearExcludedEdges() {
-        wc_->GetExcludedEdges().clear();
-    }
-
-    PairedInfoLibraries& getLibs() {
-        return wc_->getLibs();
+    bool CheckThreshold(double weight) const {
+        return math::ge(weight, weight_threshold_);
     }
 
     void Subscribe(ExtensionChooserListener * listener) {
         listeners_.push_back(listener);
     }
 
-    void NotifyAll(double weight) {
-        for (auto iter = listeners_.begin(); iter != listeners_.end(); ++iter) {
-            (*iter)->ExtensionChosen(weight);
+    void NotifyAll(double weight) const {
+        for (auto listener_ptr : listeners_) {
+            listener_ptr->ExtensionChosen(weight);
         }
     }
 
-    void NotifyAll(AlternativeConteiner& alts) {
-        for (auto iter = listeners_.begin(); iter != listeners_.end(); ++iter) {
-            (*iter)->ExtensionChosen(alts);
+    void NotifyAll(const AlternativeContainer& alts) const {
+        for (auto listener_ptr : listeners_) {
+            listener_ptr->ExtensionChosen(alts);
         }
     }
 
-    bool WeighConterBased() const {
-        return wc_ != 0;
+    bool WeightCounterBased() const {
+        return wc_ != nullptr;
+    }
+
+    const WeightCounter& wc() const {
+        VERIFY(wc_);
+        return *wc_;
     }
 
 protected:
-    void RemoveTrivial(BidirectionalPath& path){
-    	wc_->GetExcludedEdges().clear();
-        if (excludeTrivialWithBulges_)
-        {
-            analyzer_.ExcludeTrivialWithBulges(path, wc_->GetExcludedEdges());
-        }
-        else if (excludeTrivial_)
-        {
-            analyzer_.ExcludeTrivial(path, wc_->GetExcludedEdges());
+    void RemoveTrivial(const BidirectionalPath& path, std::set<size_t>& to_exclude) const {
+        if (excludeTrivialWithBulges_) {
+            analyzer_.ExcludeTrivialWithBulges(path, to_exclude);
+        } else if (excludeTrivial_) {
+            analyzer_.ExcludeTrivial(path, to_exclude);
         }
     }
 
+    bool HasIdealInfo(EdgeId e1, EdgeId e2, size_t dist) const {
+        return math::gr(wc_->lib().IdealPairedInfo(e1, e2, (int) dist), 0.);
+	}
 
+	bool HasIdealInfo(const BidirectionalPath& p, EdgeId e, size_t gap) const {
+        for (int i = (int) p.Size() - 1; i >= 0; --i)
+			if (HasIdealInfo(p[i], e, gap + p.LengthAt(i)))
+                return true;
+        return false;
+	}
+
+private:
+    DECL_LOGGER("ExtensionChooser");
 };
 
 
@@ -219,7 +221,7 @@ public:
     {
     }
 
-    virtual EdgeContainer Filter(BidirectionalPath& path, EdgeContainer& edges) {
+    EdgeContainer Filter(const BidirectionalPath& path, const EdgeContainer& edges) const override {
         EdgeContainer e1 = first_->Filter(path, edges);
         return second_->Filter(path, e1);
     }
@@ -232,7 +234,7 @@ public:
     TrivialExtensionChooser(Graph& g): ExtensionChooser(g)  {
     }
 
-    virtual EdgeContainer Filter(BidirectionalPath& /*path*/, EdgeContainer& edges) {
+    EdgeContainer Filter(const BidirectionalPath& /*path*/, const EdgeContainer& edges) const override {
         if (edges.size() == 1) {
              return edges;
         }
@@ -244,190 +246,188 @@ public:
 class TrivialExtensionChooserWithPI: public ExtensionChooser {
 
 public:
-    TrivialExtensionChooserWithPI(Graph& g, shared_ptr<WeightCounter> wc): ExtensionChooser(g, wc) {
+    TrivialExtensionChooserWithPI(Graph& g, shared_ptr<WeightCounter> wc, double weight_threshold): 
+            ExtensionChooser(g, wc, weight_threshold) {
     }
 
-    virtual EdgeContainer Filter(BidirectionalPath& path, EdgeContainer& edges) {
-        wc_->GetExcludedEdges().clear();
+    EdgeContainer Filter(const BidirectionalPath& path, const EdgeContainer& edges) const override {
         if (edges.size() == 1) {
-                double weight = wc_->CountWeight(path, edges.back().e_);
-                NotifyAll(weight);
+            double weight = wc_->CountWeight(path, edges.back().e_, std::set<size_t>());
+            NotifyAll(weight);
 
-                if (wc_->IsExtensionPossible(weight)) {
-                    return edges;
-                }
+            if (CheckThreshold(weight)) {
+                return edges;
+            }
         }
         return EdgeContainer();
     }
 };
 
 class ExcludingExtensionChooser: public ExtensionChooser {
+    //FIXME what is the logic behind it?
+    double prior_coeff_;
 
-protected:
-
-    virtual void ExcludeEdges(BidirectionalPath& path, EdgeContainer& edges) = 0;
-
-    void FindWeights(BidirectionalPath& path, EdgeContainer& edges,
-            AlternativeConteiner& weights) {
+    AlternativeContainer FindWeights(const BidirectionalPath& path, const EdgeContainer& edges, const std::set<size_t>& to_exclude) const {
+        AlternativeContainer weights;
         for (auto iter = edges.begin(); iter != edges.end(); ++iter) {
-            double weight = wc_->CountWeight(path, iter->e_);
+            double weight = wc_->CountWeight(path, iter->e_, to_exclude);
             weights.insert(std::make_pair(weight, *iter));
             DEBUG("Candidate " << g_.int_id(iter->e_) << " weight " << weight << " length " << g_.length(iter->e_));
         }
         NotifyAll(weights);
+        return weights;
     }
 
-    void FindPossibleEdges(AlternativeConteiner& weights, EdgeContainer& top,
-            double max_weight) {
-        auto possibleEdge = weights.lower_bound(max_weight / prior_coeff_);
-        for (auto iter = possibleEdge; iter != weights.end(); ++iter) {
+    EdgeContainer FindPossibleEdges(const AlternativeContainer& weights, 
+            double max_weight) const {
+        EdgeContainer top;
+        auto possible_edge = weights.lower_bound(max_weight / prior_coeff_);
+        for (auto iter = possible_edge; iter != weights.end(); ++iter) {
             top.push_back(iter->second);
         }
+        return top;
     }
 
-    EdgeContainer FindFilteredEdges(BidirectionalPath& path,
-            EdgeContainer& edges) {
-        AlternativeConteiner weights;
-        FindWeights(path, edges, weights);
-        EdgeContainer top;
-        auto maxWeight = (--weights.end())->first;
-        FindPossibleEdges(weights, top, maxWeight);
+    EdgeContainer FindFilteredEdges(const BidirectionalPath& path,
+            const EdgeContainer& edges, const std::set<size_t>& to_exclude) const {
+        AlternativeContainer weights = FindWeights(path, edges, to_exclude);
+        auto max_weight = (--weights.end())->first;
+        EdgeContainer top = FindPossibleEdges(weights, max_weight);
         EdgeContainer result;
-        if (top.size() >= 1 && wc_->IsExtensionPossible(maxWeight)) {
+        if (top.size() >= 1 && CheckThreshold(max_weight)) {
             result = top;
         }
         return result;
     }
+
+protected:
+
+    virtual void ExcludeEdges(const BidirectionalPath& path, const EdgeContainer& edges, std::set<size_t>& to_exclude) const = 0;
+
 public:
-    ExcludingExtensionChooser(const Graph& g, shared_ptr<WeightCounter> wc, double priority) :
-            ExtensionChooser(g, wc, priority) {
+    ExcludingExtensionChooser(const Graph& g, shared_ptr<WeightCounter> wc, double weight_threshold, double priority) :
+            ExtensionChooser(g, wc, weight_threshold), prior_coeff_(priority) {
 
     }
 
-    virtual EdgeContainer Filter(BidirectionalPath& path,
-            EdgeContainer& edges) {
+    virtual EdgeContainer Filter(const BidirectionalPath& path,
+            const EdgeContainer& edges) const {
         DEBUG("Paired-end extension chooser");
         if (edges.empty()) {
             return edges;
         }
-        RemoveTrivial(path);
+        std::set<size_t> to_exclude;
+        RemoveTrivial(path, to_exclude);
         path.Print();
         EdgeContainer result = edges;
-        bool first_time = true;
-        bool changed = true;
-        if (first_time || (result.size() > 1 && changed)) {
-            first_time = false;
-            ExcludeEdges(path, result);
-            EdgeContainer new_result = FindFilteredEdges(path, result);
-            if (new_result.size() == result.size()) {
-                changed = false;
-            }
-            result = new_result;
-        }
+        ExcludeEdges(path, result, to_exclude);
+        result = FindFilteredEdges(path, result, to_exclude);
         if (result.size() == 1) {
             DEBUG("Paired-end extension chooser helped");
         }
         return result;
     }
 
+private:
+    DECL_LOGGER("ExcludingExtensionChooser");
+
 };
 
 class SimpleExtensionChooser: public ExcludingExtensionChooser {
 protected:
-	virtual void ExcludeEdges(BidirectionalPath& path, EdgeContainer& edges) {
-        ClearExcludedEdges();
+	void ExcludeEdges(const BidirectionalPath& path, const EdgeContainer& edges, std::set<size_t>& to_exclude) const override {
         if (edges.size() < 2) {
             return;
         }
-        RemoveTrivial(path);
+        //excluding based on absense of ideal info
         int index = (int) path.Size() - 1;
-        std::map<size_t, double>& excluded_edges = wc_->GetExcludedEdges();
         while (index >= 0) {
-            if (excluded_edges.find(index) != excluded_edges.end()) {
+            if (to_exclude.count(index)) {
                 index--;
                 continue;
             }
             EdgeId path_edge = path[index];
-            double min_ideal_w = wc_->CountIdealInfo(path_edge, edges.at(0).e_,
-                                                     path.LengthAt(index));
-            bool common = true;
+
             for (size_t i = 0; i < edges.size(); ++i) {
-                double ideal_weight = wc_->CountIdealInfo(path_edge,
-                                                          edges.at(i).e_,
-                                                          path.LengthAt(index));
-                min_ideal_w = std::min(min_ideal_w, ideal_weight);
-                if (!wc_->PairInfoExist(path_edge, edges.at(i).e_,
-                                        (int) path.LengthAt(index))) {
-                    common = false;
+                if (!HasIdealInfo(path_edge,
+                           edges.at(i).e_,
+                           path.LengthAt(index))) {
+                    to_exclude.insert((size_t) index);
                 }
             }
-            if (common) {
-                DEBUG("common info from " << index);
-                excluded_edges.insert(make_pair((size_t) index, 0.0));
-            } else {
-                excluded_edges.insert(make_pair((size_t) index, min_ideal_w));
-            }
+
             index--;
         }
-        stringstream not_excl;
-        not_excl << "not excluded edges ";
-        for (size_t i = 0; i < path.Size(); ++i) {
-            if (excluded_edges.find(i) != excluded_edges.end() && excluded_edges[i] > 0.0) {
-                not_excl << i << " " << excluded_edges[i] << " , ";
+        
+        //excluding based on presense of ambiguous paired info
+        map<size_t, unsigned> edge_2_extension_cnt;
+        for (size_t i = 0; i < edges.size(); ++i) {
+            for (size_t e : wc_->PairInfoExist(path, edges.at(i).e_)) {
+                edge_2_extension_cnt[e] += 1;
+            }
+        }
+
+        for (auto e_w_ec : edge_2_extension_cnt) {
+            if (e_w_ec.second == edges.size()) {
+                to_exclude.insert(e_w_ec.first);
             }
         }
-        DEBUG(not_excl.str());
     }
+
 public:
 
-	SimpleExtensionChooser(const Graph& g, shared_ptr<WeightCounter> wc, double priority) :
-	    ExcludingExtensionChooser(g, wc, priority) {
+	SimpleExtensionChooser(const Graph& g, shared_ptr<WeightCounter> wc, double weight_threshold, double priority) :
+	    ExcludingExtensionChooser(g, wc, weight_threshold, priority) {
 	}
+
+private:
+    DECL_LOGGER("SimpleExtensionChooser");
 };
 
 class LongEdgeExtensionChooser: public ExcludingExtensionChooser {
 protected:
-    virtual void ExcludeEdges(BidirectionalPath& path, EdgeContainer& edges) {
-        ClearExcludedEdges();
+    virtual void ExcludeEdges(const BidirectionalPath& path, const EdgeContainer& edges, std::set<size_t>& to_exclude) const {
         if (edges.size() < 2) {
             return;
         }
-        RemoveTrivial(path);
         int index = (int) path.Size() - 1;
-        std::map<size_t, double>& excluded_edges = wc_->GetExcludedEdges();
         while (index >= 0) {
-            if (excluded_edges.find(index) != excluded_edges.end()) {
+            if (to_exclude.count(index)) {
                 index--;
                 continue;
             }
             EdgeId path_edge = path[index];
-            if(path.graph().length(path_edge) < 200)
-                excluded_edges.insert(make_pair((size_t) index, 0.0));
+            //FIXME configure!
+            if (path.graph().length(path_edge) < 200)
+                to_exclude.insert((size_t) index);
             index--;
         }
     }
 public:
-    LongEdgeExtensionChooser(const Graph& g, shared_ptr<WeightCounter> wc, double priority) :
-        ExcludingExtensionChooser(g, wc, priority) {
+    LongEdgeExtensionChooser(const Graph& g, shared_ptr<WeightCounter> wc, double weight_threshold, double priority) :
+        ExcludingExtensionChooser(g, wc, weight_threshold, priority) {
     }
 };
 
 class ScaffoldingExtensionChooser : public ExtensionChooser {
-    double weight_threshold_;
+
+protected:
+    typedef ExtensionChooser base;
+    double raw_weight_threshold_;
     double cl_weight_threshold_;
     const double is_scatter_coeff_ = 3.0;
 
     void AddInfoFromEdge(const std::vector<int>& distances, const std::vector<double>& weights, 
-                         std::vector<pair<int, double>>& histogram, size_t len_to_path_end) {
+                         std::vector<pair<int, double>>& histogram, size_t len_to_path_end) const {
         for (size_t l = 0; l < distances.size(); ++l) {
             //todo commented out condition seems unnecessary and should be library dependent! do we need "max(0" there?
-            if (/*distances[l] > max(0, (int) len_to_path_end - int(1000)) && */math::ge(weights[l], weight_threshold_)) {
+            if (/*distances[l] > max(0, (int) len_to_path_end - int(1000)) && */math::ge(weights[l], raw_weight_threshold_)) {
                 histogram.push_back(make_pair(distances[l] - (int) len_to_path_end, weights[l]));
             }
         }
     }
 
-    int CountMean(vector<pair<int, double> >& histogram) {
+    int CountMean(const vector<pair<int, double> >& histogram) const {
         double dist = 0.0;
         double sum = 0.0;
         for (size_t i = 0; i < histogram.size(); ++i) {
@@ -438,18 +438,23 @@ class ScaffoldingExtensionChooser : public ExtensionChooser {
         return (int) round(dist);
     }
 
-    void CountAvrgDists(BidirectionalPath& path, EdgeId e, std::vector<pair<int, double>> & histogram) {
+	void GetDistances(EdgeId e1, EdgeId e2, std::vector<int>& dist,
+			std::vector<double>& w) const {
+		wc_->lib().CountDistances(e1, e2, dist, w);
+	}
+
+    void CountAvrgDists(const BidirectionalPath& path, EdgeId e, std::vector<pair<int, double>> & histogram) const {
         for (size_t j = 0; j < path.Size(); ++j) {
             std::vector<int> distances;
             std::vector<double> weights;
-            wc_->GetDistances(path.At(j), e, distances, weights);
+            GetDistances(path.At(j), e, distances, weights);
             if (distances.size() > 0) {
                 AddInfoFromEdge(distances, weights, histogram, path.LengthAt(j));
             }
         }
     }
 
-    void FindBestFittedEdgesForClustered(BidirectionalPath& path, const set<EdgeId>& edges, EdgeContainer& result) {
+    void FindBestFittedEdgesForClustered(const BidirectionalPath& path, const set<EdgeId>& edges, EdgeContainer& result) const {
         for (EdgeId e : edges) {
             std::vector<pair<int, double>> histogram;
             CountAvrgDists(path, e, histogram);
@@ -461,7 +466,7 @@ class ScaffoldingExtensionChooser : public ExtensionChooser {
                 continue;
             }
             int gap = CountMean(histogram);
-            if (wc_->CountIdealInfo(path, e, gap) > 0.0) {
+            if (HasIdealInfo(path, e, gap)) {
                 DEBUG("scaffolding " << g_.int_id(e) << " gap " << gap);
                 result.push_back(EdgeWithDistance(e, gap));
             }
@@ -472,23 +477,21 @@ class ScaffoldingExtensionChooser : public ExtensionChooser {
         return g_.IncomingEdgeCount(g_.EdgeStart(e)) == 0;
     }
 
-    set<EdgeId> FindCandidates(BidirectionalPath& path) const {
+    set<EdgeId> FindCandidates(const BidirectionalPath& path) const {
         set<EdgeId> jumping_edges;
-        PairedInfoLibraries libs = wc_->getLibs();
-        for (auto lib : libs) {
-            //todo lib (and FindJumpEdges) knows its var so it can be counted there
-            int is_scatter = int(math::round(double(lib->GetIsVar()) * is_scatter_coeff_));
-            for (int i = (int) path.Size() - 1; i >= 0 && path.LengthAt(i) - g_.length(path.At(i)) <= lib->GetISMax(); --i) {
-                set<EdgeId> jump_edges_i;
-                lib->FindJumpEdges(path.At(i), jump_edges_i,
-                                   std::max(0, (int)path.LengthAt(i) - is_scatter),
-                                   //FIXME do we need is_scatter here?
-                                   int((path.LengthAt(i) + lib->GetISMax() + is_scatter)),
-                                   0);
-                for (EdgeId e : jump_edges_i) {
-                    if (IsTip(e)) {
-                        jumping_edges.insert(e);
-                    }
+        const auto& lib = wc_->lib();
+        //todo lib (and FindJumpEdges) knows its var so it can be counted there
+        int is_scatter = int(math::round(double(lib.GetIsVar()) * is_scatter_coeff_));
+        for (int i = (int) path.Size() - 1; i >= 0 && path.LengthAt(i) - g_.length(path.At(i)) <= lib.GetISMax(); --i) {
+            set<EdgeId> jump_edges_i;
+            lib.FindJumpEdges(path.At(i), jump_edges_i,
+                               std::max(0, (int)path.LengthAt(i) - is_scatter),
+                               //FIXME do we need is_scatter here?
+                               int((path.LengthAt(i) + lib.GetISMax() + is_scatter)),
+                               0);
+            for (EdgeId e : jump_edges_i) {
+                if (IsTip(e)) {
+                    jumping_edges.insert(e);
                 }
             }
         }
@@ -497,14 +500,13 @@ class ScaffoldingExtensionChooser : public ExtensionChooser {
 
 public:
 
-    ScaffoldingExtensionChooser(const Graph& g, shared_ptr<WeightCounter> wc, double priority, double is_scatter_coeff) :
-        ExtensionChooser(g, wc, priority), weight_threshold_(0.0),
+    ScaffoldingExtensionChooser(const Graph& g, shared_ptr<WeightCounter> wc, double is_scatter_coeff) :
+        ExtensionChooser(g, wc), raw_weight_threshold_(0.0),
         cl_weight_threshold_(cfg::get().pe_params.param_set.scaffolder_options.cl_threshold),
         is_scatter_coeff_(is_scatter_coeff) {
     }
 
-    EdgeContainer Filter(BidirectionalPath& path, EdgeContainer& edges) override {
-        //FIXME WAT?
+    EdgeContainer Filter(const BidirectionalPath& path, const EdgeContainer& edges) const override {
         if (edges.empty()) {
             return edges;
         }
@@ -513,6 +515,8 @@ public:
         FindBestFittedEdgesForClustered(path, candidates, result);
         return result;
     }
+private:
+    DECL_LOGGER("ScaffoldingExtensionChooser");
 };
 
 inline bool EdgeWithWeightCompareReverse(const pair<EdgeId, double>& p1,
@@ -520,29 +524,25 @@ inline bool EdgeWithWeightCompareReverse(const pair<EdgeId, double>& p1,
     return p1.second > p2.second;
 }
 
-class UniqueEdgeAnalyzer {
-protected:
-    DECL_LOGGER("ExtensionChooser")
-
+class LongReadsUniqueEdgeAnalyzer {
+private:
+    DECL_LOGGER("LongReadsUniqueEdgeAnalyzer")
 public:
-    UniqueEdgeAnalyzer(const Graph& g, const GraphCoverageMap& cov_map,
+    LongReadsUniqueEdgeAnalyzer(const Graph& g, const GraphCoverageMap& cov_map,
                        double filter_threshold, double prior_threshold)
             : g_(g),
               cov_map_(cov_map),
               filter_threshold_(filter_threshold),
-              prior_threshold_(prior_threshold),
-              unique_edges_founded_(false) { }
+              prior_threshold_(prior_threshold) { 
+        FindAllUniqueEdges();
+    }
 
-    bool IsUnique(EdgeId e) {
-        if (!unique_edges_founded_) {
-            FindAllUniqueEdges();
-        }
-        unique_edges_founded_ = true;
+    bool IsUnique(EdgeId e) const {
         return unique_edges_.count(e) > 0;
     }
 
 private:
-    bool UniqueEdge(EdgeId e) {
+    bool UniqueEdge(EdgeId e) const {
         if (g_.length(e) > cfg::get().max_repeat_length)
             return true;
         DEBUG("Analyze unique edge " << g_.int_id(e));
@@ -563,6 +563,7 @@ private:
                     return false;
                 }
                 if (!ConsistentPath(**it1, pos1[0], **it2, pos2[0])) {
+                    DEBUG("Checking inconsistency");
                     if (CheckInconsistence(**it1, pos1[0], **it2, pos2[0],
                                            cov_paths)) {
                         DEBUG("***not unique " << g_.int_id(e) << " len " << g_.length(e) << "***");
@@ -626,6 +627,7 @@ private:
         }
         return false;
     }
+
     std::pair<double, double> GetSubPathsWeights(
             const BidirectionalPath& cand1, const BidirectionalPath& cand2,
             const BidirectionalPathSet& cov_paths) const {
@@ -651,7 +653,7 @@ private:
         return false;
     }
 
-    void FindAllUniqueCoverageEdges(){
+    void FindAllUniqueCoverageEdges() {
        if (cfg::get().ds.single_cell) {
            return;
        }
@@ -688,7 +690,6 @@ private:
                unique_edges_.insert(g_.conjugate(*iter));
            }
        }
-       unique_edges_founded_ = true;
        DEBUG("coverage based uniqueness started");
        FindAllUniqueCoverageEdges();
        DEBUG("Unique edges are found");
@@ -697,8 +698,9 @@ private:
     const Graph& g_;
     const GraphCoverageMap& cov_map_;
     double filter_threshold_;
-    double prior_threshold_;bool unique_edges_founded_;
+    double prior_threshold_;
     std::set<EdgeId> unique_edges_;
+
 };
 
 class SimpleScaffolding {
@@ -706,7 +708,7 @@ public:
     SimpleScaffolding(const Graph& g) : g_(g) {}
 
     BidirectionalPath FindMaxCommonPath(const vector<BidirectionalPath*>& paths,
-                                        size_t max_diff_len) {
+                                        size_t max_diff_len) const {
         BidirectionalPath max_end(g_);
         for (auto it1 = paths.begin(); it1 != paths.end(); ++it1) {
             BidirectionalPath* p1 = *it1;
@@ -745,15 +747,15 @@ public:
 
 private:
     const Graph& g_;
-
 };
+
 class LongReadsExtensionChooser : public ExtensionChooser {
 public:
     LongReadsExtensionChooser(const Graph& g, PathContainer& pc,
                               double filtering_threshold,
                               double weight_priority_threshold,
                               double unique_edge_priority_threshold)
-            : ExtensionChooser(g, 0, .0),
+            : ExtensionChooser(g),
               filtering_threshold_(filtering_threshold),
               weight_priority_threshold_(weight_priority_threshold),
               cov_map_(g, pc),
@@ -766,8 +768,8 @@ public:
      * Edge is unique if all reads mapped to this edge are consistent.
      * Two reads are consistent if they can form one path in the graph.
      */
-    virtual EdgeContainer Filter(BidirectionalPath& path,
-                                 EdgeContainer& edges) {
+    EdgeContainer Filter(const BidirectionalPath& path,
+                                 const EdgeContainer& edges) const override {
         if (edges.empty()) {
             return edges;
         }DEBUG("We in Filter of LongReadsExtensionChooser");
@@ -779,14 +781,19 @@ public:
         set<EdgeId> filtered_cands;
         map<EdgeId, BidirectionalPathSet > support_paths_ends;
         auto support_paths = cov_map_.GetCoveringPaths(path.Back());
+        DEBUG("Found " << support_paths.size() << " covering paths!!!");
         for (auto it = support_paths.begin(); it != support_paths.end(); ++it) {
             auto positions = (*it)->FindAll(path.Back());
+            (*it)->Print();
             for (size_t i = 0; i < positions.size(); ++i) {
                 if ((int) positions[i] < (int) (*it)->Size() - 1
                         && EqualBegins(path, (int) path.Size() - 1, **it,
-                                       positions[i], true)) {
+                                       positions[i], false)) {
+                    DEBUG("Checking unique path_back for " << (*it)->GetId());
 
                     if (UniqueBackPath(**it, positions[i])) {
+                        DEBUG("Success");
+
                         EdgeId next = (*it)->At(positions[i] + 1);
                         weights_cands[next] += (*it)->GetWeight();
                         filtered_cands.insert(next);
@@ -830,7 +837,7 @@ public:
     }
 
 private:
-    bool UniqueBackPath(const BidirectionalPath& path, size_t pos) {
+    bool UniqueBackPath(const BidirectionalPath& path, size_t pos) const {
         int int_pos = (int) pos;
         while (int_pos >= 0) {
             if (unique_edge_analyzer_.IsUnique(path.At(int_pos)) > 0)
@@ -850,26 +857,30 @@ private:
     double filtering_threshold_;
     double weight_priority_threshold_;
     const GraphCoverageMap cov_map_;
-    UniqueEdgeAnalyzer unique_edge_analyzer_;
+    LongReadsUniqueEdgeAnalyzer unique_edge_analyzer_;
     SimpleScaffolding simple_scaffolding_;
+
+    DECL_LOGGER("LongReadsExtensionChooser");
 };
 
 class MatePairExtensionChooser : public ExtensionChooser {
 public:
     MatePairExtensionChooser(const Graph& g, shared_ptr<PairedInfoLibrary> lib,
                               const PathContainer& paths, size_t max_number_of_paths_to_search)
-            : ExtensionChooser(g, 0, .0),
+            : ExtensionChooser(g),
               g_(g),
               lib_(lib),
               search_dist_(lib->GetISMax()),
               weight_counter_(g, lib, 10),
               cov_map_(g_, paths),
-              path_searcher_(g_, cov_map_, lib_->GetISMax(), PathsWeightCounter(g, lib, 30), max_number_of_paths_to_search),
+              path_searcher_(g_, cov_map_, lib_->GetISMax(), PathsWeightCounter(g, lib, (size_t) lib->GetSingleThreshold()), max_number_of_paths_to_search),
               unique_edge_analyzer_(g, cov_map_, 0., 1000.),
               simple_scaffolder_(g) {
     }
-    virtual EdgeContainer Filter(BidirectionalPath& path,
-                                 EdgeContainer& init_edges) {
+
+    //Attention! Uses const_cast to modify path!!!
+    EdgeContainer Filter(const BidirectionalPath& path,
+                         const EdgeContainer& init_edges) const override {
         DEBUG("mp chooser");
         path.Print();
         if (path.Length() < lib_->GetISMin()) {
@@ -906,7 +917,7 @@ public:
         EdgeContainer result = ChooseBest(path, next_paths);
         if (result.size() != 1) {
             DEBUG("scaffold tree");
-            result = ScaffoldTree(path);
+            result = ScaffoldTree(const_cast<BidirectionalPath&>(path));
         }
         DeletePaths(next_paths);
         if (result.size() != 1) {
@@ -914,16 +925,17 @@ public:
         }
         return result;
     }
+
 private:
-    EdgeContainer ScaffoldTree(BidirectionalPath& path) {
+    EdgeContainer ScaffoldTree(BidirectionalPath& path) const {
         DEBUG("try scaffold tree");
         vector<BidirectionalPath*> next_paths = path_searcher_.ScaffoldTree(path);
         VERIFY(next_paths.size() <= 1);
         EdgeContainer result;
-        if (next_paths.size() == 1 && next_paths[0]->Size() > 0) {
-            BidirectionalPath* res = next_paths[0];
+        if (!next_paths.empty() && next_paths.back()->Size() > 0) {
+            BidirectionalPath* res = next_paths.back();
             for (size_t i = 0; i < res->Size() - 1; ++i) {
-                path.PushBack(res->At(i), res->GapAt(i));
+                path.PushBack(res->At(i), res->GapAt(i), res->TrashPreviousAt(i), res->TrashCurrentAt(i));
             }
             result = EdgeContainer(1, EdgeWithDistance(res->Back(), res->GapAt(res->Size() - 1)));
         }
@@ -941,7 +953,7 @@ private:
 		return true;
 	}
 
-    map<EdgeId, double> FindBulgeWeights(BidirectionalPath& p, EdgeContainer& edges) const {
+    map<EdgeId, double> FindBulgeWeights(const BidirectionalPath& p, const EdgeContainer& edges) const {
         map<EdgeId, double> result;
         for (size_t i = 0; i < edges.size(); ++i) {
             result[edges[i].e_] = 0.0;
@@ -963,7 +975,7 @@ private:
         return result;
     }
 
-    EdgeContainer TryResolveBulge(BidirectionalPath& p, EdgeContainer& edges) const {
+    EdgeContainer TryResolveBulge(const BidirectionalPath& p, const EdgeContainer& edges) const {
         if (!IsBulge(edges))
             return edges;
         map<EdgeId, double> weights = FindBulgeWeights(p, edges);
@@ -986,7 +998,7 @@ private:
         return result;
     }
 
-    EdgeContainer ChooseBest(const BidirectionalPath& path, BidirectionalPathSet& next_paths) {
+    EdgeContainer ChooseBest(const BidirectionalPath& path, const BidirectionalPathSet& next_paths) const {
         DEBUG("Try to choose from best paths...");
         vector<BidirectionalPath*> best_path = MaxWeightedPath(path, next_paths);
         EdgeContainer result;
@@ -998,7 +1010,7 @@ private:
         return result;
     }
 
-    bool HasPIFromUniqueEdges(const BidirectionalPath& p1, const BidirectionalPath& p2, const set<size_t>& p1_unique_edges) {
+    bool HasPIFromUniqueEdges(const BidirectionalPath& p1, const BidirectionalPath& p2, const set<size_t>& p1_unique_edges) const {
         for (size_t i1 = 0; i1 < p1.Size(); ++i1) {
             if (p1_unique_edges.find(i1) == p1_unique_edges.end()) {
                 continue;
@@ -1015,7 +1027,7 @@ private:
     }
 
     bool SignificallyDifferentEdges(const BidirectionalPath& init_path, const BidirectionalPath& path1, const map<size_t, double>& pi1,
-                                    const BidirectionalPath& path2, const map<size_t, double>& pi2, const set<size_t>& unique_init_edges) {
+                                    const BidirectionalPath& path2, const map<size_t, double>& pi2, const set<size_t>& unique_init_edges) const {
         double not_common_w1 = 0.0;
         double common_w = 0.0;
         for (auto iter = pi1.begin(); iter != pi1.end(); ++iter) {
@@ -1035,7 +1047,7 @@ private:
         return false;
     }
 
-    set<size_t> FindNotCommonEdges(const BidirectionalPath& path, const BidirectionalPathMap< map<size_t, double> >& all_pi) {
+    set<size_t> FindNotCommonEdges(const BidirectionalPath& path, const BidirectionalPathMap< map<size_t, double> >& all_pi) const {
         set<size_t> res;
         for (size_t i = 0; i < path.Size(); ++i) {
             if (!unique_edge_analyzer_.IsUnique(path.At(i))) {
@@ -1054,7 +1066,7 @@ private:
         return res;
     }
 
-    void DeleteSmallWeights(const BidirectionalPath& path, BidirectionalPathSet& paths, BidirectionalPathMap< map<size_t, double> >& all_pi) {
+    void DeleteSmallWeights(const BidirectionalPath& path, BidirectionalPathSet& paths, BidirectionalPathMap< map<size_t, double> >& all_pi) const {
         double max_weight = 0.0;
         BidirectionalPath* max_path = NULL;
         for (auto iter = paths.begin(); iter != paths.end(); ++iter) {
@@ -1076,7 +1088,7 @@ private:
         }
     }
 
-    void DeleteCommonPi(const BidirectionalPath& p, BidirectionalPathMap< map<size_t, double> >& all_pi) {
+    void DeleteCommonPi(const BidirectionalPath& p, BidirectionalPathMap< map<size_t, double> >& all_pi) const {
         weight_counter_.ClearCommonWeight();
         for (size_t i = 0; i < p.Size(); ++i) {
             double common = DBL_MAX;
@@ -1106,7 +1118,7 @@ private:
     }
 
     void CountAllPairInfo(const BidirectionalPath& path, const BidirectionalPathSet& next_paths,
-                BidirectionalPathMap< map<size_t, double> >& result) {
+                BidirectionalPathMap<map<size_t, double>>& result) const {
         result.clear();
         size_t common_begin = FindCommonBegin(next_paths);
         DEBUG("common begin " << common_begin);
@@ -1115,8 +1127,8 @@ private:
         }
     }
 
-    void CountWeightsAndFilter(const BidirectionalPath& path, BidirectionalPathSet& next_paths, bool delete_small_w) {
-        BidirectionalPathMap< map<size_t, double> > all_pi;
+    void CountWeightsAndFilter(const BidirectionalPath& path, BidirectionalPathSet& next_paths, bool delete_small_w) const {
+        BidirectionalPathMap<map<size_t, double> > all_pi;
         CountAllPairInfo(path, next_paths, all_pi);
         DeleteCommonPi(path, all_pi);
         for (BidirectionalPath* next : next_paths) {
@@ -1128,11 +1140,10 @@ private:
     }
 
     struct PathWithWeightSort {
-        PathWithWeightSort(MatePairExtensionChooser& mp_chooser, const BidirectionalPath& path, BidirectionalPathMap< map<size_t, double> >& all_pi)
+        PathWithWeightSort(const MatePairExtensionChooser& mp_chooser, const BidirectionalPath& path, BidirectionalPathMap< map<size_t, double> >& all_pi)
                 : mp_chooser_(mp_chooser),
                   path_(path),
-                  all_pi_(all_pi) {
-            not_common_ = mp_chooser_.FindNotCommonEdges(path_, all_pi_);
+                  not_common_(mp_chooser_.FindNotCommonEdges(path_, all_pi)) {
         }
 
         bool operator()(const BidirectionalPath* p1, const BidirectionalPath* p2) {
@@ -1153,13 +1164,12 @@ private:
             }
             return p1->Size() > p2->Size();
         }
-        MatePairExtensionChooser& mp_chooser_;
+        const MatePairExtensionChooser& mp_chooser_;
         const BidirectionalPath& path_;
-        BidirectionalPathMap< map<size_t, double> >& all_pi_;
-        set<size_t> not_common_;
+        const set<size_t> not_common_;
     };
 
-    vector<BidirectionalPath*> SortResult(const BidirectionalPath& path, BidirectionalPathSet& next_paths) {
+    vector<BidirectionalPath*> SortResult(const BidirectionalPath& path, BidirectionalPathSet& next_paths) const {
         BidirectionalPathMap< map<size_t, double> > all_pi;
         CountAllPairInfo(path, next_paths, all_pi);
         CountWeightsAndFilter(path, next_paths, false);
@@ -1169,7 +1179,7 @@ private:
         return to_sort;
     }
 
-    vector<BidirectionalPath*> MaxWeightedPath(const BidirectionalPath& path, const BidirectionalPathSet& following_paths) {
+    vector<BidirectionalPath*> MaxWeightedPath(const BidirectionalPath& path, const BidirectionalPathSet& following_paths) const {
         BidirectionalPathSet result(following_paths);
         BidirectionalPathSet prev_result;
         while (prev_result.size() != result.size()) {
@@ -1188,7 +1198,7 @@ private:
         return SortResult(path, result);
     }
 
-    BidirectionalPath ChooseFromEnds(const BidirectionalPath& path, const vector<BidirectionalPath*>& paths, const BidirectionalPath& end) { //TODO" rewrite
+    BidirectionalPath ChooseFromEnds(const BidirectionalPath& path, const vector<BidirectionalPath*>& paths, const BidirectionalPath& end) const { //TODO" rewrite
         DEBUG("choose from ends " << paths.size());
         end.Print();
         vector<BidirectionalPath*> new_paths;
@@ -1240,7 +1250,7 @@ private:
         return result;
     }
 
-    int CheckPairInfo(const BidirectionalPath& path, const BidirectionalPath& result_end, int to_add) {
+    int CheckPairInfo(const BidirectionalPath& path, const BidirectionalPath& result_end, int to_add) const {
         while (to_add < (int)result_end.Size()) {
             map<size_t, double> weights = weight_counter_.FindPairInfoFromPath(path, 0, path.Size(), result_end, to_add, to_add + 1);
             double weight_to_edge = 0.0;
@@ -1255,7 +1265,7 @@ private:
         return to_add;
     }
 
-    EdgeContainer TryToScaffold(const BidirectionalPath& path, const vector<BidirectionalPath*>& paths) {
+    EdgeContainer TryToScaffold(const BidirectionalPath& path, const vector<BidirectionalPath*>& paths) const {
         if (paths.size() == 0) {
             return EdgeContainer();
         }
@@ -1284,11 +1294,150 @@ private:
     const Graph& g_;
     shared_ptr<PairedInfoLibrary> lib_;
     size_t search_dist_;
-    PathsWeightCounter weight_counter_;
+    mutable PathsWeightCounter weight_counter_;
     const GraphCoverageMap cov_map_;
     NextPathSearcher path_searcher_;
-    UniqueEdgeAnalyzer unique_edge_analyzer_;
+    LongReadsUniqueEdgeAnalyzer unique_edge_analyzer_;
     SimpleScaffolding simple_scaffolder_;
+
+    DECL_LOGGER("MatePairExtensionChooser");
 };
+
+class CoordinatedCoverageExtensionChooser: public ExtensionChooser {
+public:
+    CoordinatedCoverageExtensionChooser(const Graph& g, CoverageAwareIdealInfoProvider& coverage_provider, size_t max_edge_length_in_repeat, double delta) :
+            ExtensionChooser(g), provider_(coverage_provider), max_edge_length_in_repeat_(max_edge_length_in_repeat), delta_(delta) {
+    }
+
+    EdgeContainer Filter(const BidirectionalPath& path,
+            const EdgeContainer& edges) const override {
+
+
+        double path_coverage = provider_.EstimatePathCoverage(path);
+        if (math::eq(path_coverage, -1.0)) {
+            DEBUG("Path coverage can't be calculated");
+            return EdgeContainer();
+        }
+        DEBUG("Path coverage is " << path_coverage);
+
+        for (auto e_d : edges) {
+            if (path.Contains(g_.EdgeEnd(e_d.e_))) {
+                DEBUG("Avoid to create loops");
+                return EdgeContainer();
+            }
+        }
+        return FindExtensionTroughRepeat(edges, path_coverage);
+    }
+
+private:
+
+    void UpdateCanBeProcessed(VertexId v,
+            std::queue<VertexId>& can_be_processed) const {
+        DEBUG("Updating can be processed");
+        for (EdgeId e : g_.OutgoingEdges(v)) {
+            VertexId neighbour_v = this->g_.EdgeEnd(e);
+            if (g_.length(e) < max_edge_length_in_repeat_) {
+                DEBUG(
+                        "Adding vertex " << neighbour_v.int_id()
+                                << "through edge " << g_.str(e));
+                can_be_processed.push(neighbour_v);
+            }
+        }
+    }
+
+    GraphComponent<Graph> GetRepeatComponent(const VertexId start) const {
+        set<VertexId> vertices_of_component;
+        vertices_of_component.insert(start);
+        std::queue<VertexId> can_be_processed;
+        UpdateCanBeProcessed(start, can_be_processed);
+        while (!can_be_processed.empty()) {
+            VertexId v = can_be_processed.front();
+            can_be_processed.pop();
+            if (vertices_of_component.count(v) != 0) {
+
+                DEBUG("Component is too complex");
+                return GraphComponent<Graph>(g_, false);
+            }
+            DEBUG("Adding vertex " << g_.str(v) << " to component set");
+            vertices_of_component.insert(v);
+            UpdateCanBeProcessed(v, can_be_processed);
+        }
+
+        GraphComponent<Graph> gc(g_, vertices_of_component.begin(),
+                vertices_of_component.end());
+        return gc;
+    }
+
+    EdgeContainer FinalFilter(const EdgeContainer& edges,
+            EdgeId edge_to_extend) const {
+        EdgeContainer result;
+        for (auto e_with_d : edges) {
+            if (e_with_d.e_ == edge_to_extend) {
+                result.push_back(e_with_d);
+            }
+        }
+        return result;
+    }
+
+    bool GoodExtension(EdgeId e, double path_coverage) const {
+        if (math::ge(g_.coverage(e), path_coverage - path_coverage * delta_)) {
+            return true;
+        }
+        else {
+            return false;
+        }
+    }
+
+    EdgeContainer FindExtensionTroughRepeat(const EdgeContainer& edges, double path_coverage) const {
+        set<EdgeId> good_extensions;
+        for(auto edge : edges) {
+
+            if(g_.length(edge.e_) > max_edge_length_in_repeat_) {
+                if(GoodExtension(edge.e_, path_coverage)) {
+                    good_extensions.insert(edge.e_);
+                    continue;
+                }
+            }
+
+            GraphComponent<Graph> gc = GetRepeatComponent(g_.EdgeEnd(edge.e_));
+            if(gc.v_size() == 0) {
+                return EdgeContainer();
+            }
+
+            for (auto e : gc.edges()) {
+                if (g_.length(e) > max_edge_length_in_repeat_) {
+                    DEBUG("Repeat component contains long edges");
+                    return EdgeContainer();
+                }
+            }
+
+            for (auto v : gc.sinks()) {
+                for (auto e : g_.OutgoingEdges(v)) {
+                    if(GoodExtension(e, path_coverage)) {
+                        good_extensions.insert(edge.e_);
+                    }
+                }
+            }
+        }
+
+        DEBUG("Number of good extensions is " << good_extensions.size());
+
+        if (good_extensions.size() != 1) {
+            DEBUG("Returning");
+            return EdgeContainer();
+        }
+
+        DEBUG("Filtering... Extend with edge " << good_extensions.begin()->int_id());
+        return FinalFilter(edges, *good_extensions.begin());
+    }
+    
+    //fixme codestyle
+    CoverageAwareIdealInfoProvider provider_;
+    const size_t max_edge_length_in_repeat_;
+    const double delta_;
+protected:
+    DECL_LOGGER("CoordCoverageExtensionChooser");
+};
+
 }
 #endif /* EXTENSION_HPP_ */
diff --git a/src/debruijn/path_extend/ideal_pair_info.hpp b/src/debruijn/path_extend/ideal_pair_info.hpp
index 1ee0eac..34c10f7 100644
--- a/src/debruijn/path_extend/ideal_pair_info.hpp
+++ b/src/debruijn/path_extend/ideal_pair_info.hpp
@@ -15,11 +15,13 @@
 #ifndef IDEAL_PAIR_INFO_HPP_
 #define IDEAL_PAIR_INFO_HPP_
 #import <vector>
+#include "graph_pack.hpp"
+
+namespace path_extend {
 
 using debruijn_graph::Graph;
 using debruijn_graph::EdgeId;
 
-namespace path_extend {
 class IdealPairInfoCounter {
 public:
     IdealPairInfoCounter(const Graph& g, int d_min, int d_max, size_t read_size,
@@ -41,7 +43,7 @@ public:
         PreCalculateNotTotalReadsWeight();
     }
 
-    double IdealPairedInfo(EdgeId e1, EdgeId e2, int dist, bool additive = false) {
+    double IdealPairedInfo(EdgeId e1, EdgeId e2, int dist, bool additive = false) const {
         std::pair<size_t, size_t> lengths = make_pair(g_.length(e1), g_.length(e2));
         if (pi_.find(lengths) == pi_.end()) {
             pi_.insert(make_pair(lengths, std::map<int, double>()));
@@ -52,6 +54,7 @@ public:
         }
         return weights[dist];
     }
+
     double IdealPairedInfo(size_t len1, size_t len2, int dist, bool additive = false) const {
         double result = 0.0;
         for (auto it = insert_size_distrib_.lower_bound(max(d_min_, 0)); it != insert_size_distrib_.upper_bound(d_max_); ++it) {
@@ -115,7 +118,7 @@ private:
     size_t read_size_;
     std::vector<double> weights_;
     std::map<int, double> insert_size_distrib_;
-    std::map<std::pair<size_t, size_t>, std::map<int, double> > pi_;
+    mutable std::map<std::pair<size_t, size_t>, std::map<int, double> > pi_;
     std::vector<double> not_total_weights_right_;
     std::vector<double> not_total_weights_left_;
 protected:
diff --git a/src/debruijn/path_extend/loop_traverser.hpp b/src/debruijn/path_extend/loop_traverser.hpp
index 3ffa150..e0b04a9 100644
--- a/src/debruijn/path_extend/loop_traverser.hpp
+++ b/src/debruijn/path_extend/loop_traverser.hpp
@@ -117,6 +117,12 @@ private:
 			DEBUG("TraverseLoop STRANGE SITUATION: start " << coveredStartPaths.size() << " end " << coveredEndPaths.size());
 			return;
 		}
+
+        if (coveredStartPaths.size() > 1 or coveredEndPaths.size() > 1) {
+            DEBUG("Ambiguous situation in path joining, quitting");
+            return;
+        }
+
 		BidirectionalPath* startPath = *coveredStartPaths.begin();
 		BidirectionalPath* endPath = *coveredEndPaths.begin();
 		if ((*startPath) == endPath->Conjugate()){
@@ -145,7 +151,7 @@ private:
                 nLen = 0;
             } else {
                 DijkstraHelper<Graph>::BoundedDijkstra dijkstra(DijkstraHelper<Graph>::CreateBoundedDijkstra(g_, 1000, 3000));
-                dijkstra.run(lastVertex);
+                dijkstra.Run(lastVertex);
                 vector<EdgeId> shortest_path = dijkstra.GetShortestPathTo(g_.EdgeStart(endPath->Front()));
 
                 if (shortest_path.size() == 0) {
@@ -166,7 +172,7 @@ private:
 			startPath->PushBack(endPath->At(commonSize), (int) nLen);
 		}
 		for (size_t i = commonSize + 1; i < endPath->Size(); ++i) {
-            startPath->PushBack(endPath->At(i), endPath->GapAt(i));
+            startPath->PushBack(endPath->At(i), endPath->GapAt(i), endPath->TrashPreviousAt(i), endPath->TrashCurrentAt(i));
 		}
 		DEBUG("travers");
 		startPath->Print();
diff --git a/src/debruijn/path_extend/next_path_searcher.hpp b/src/debruijn/path_extend/next_path_searcher.hpp
index 0bc54a0..37f458c 100644
--- a/src/debruijn/path_extend/next_path_searcher.hpp
+++ b/src/debruijn/path_extend/next_path_searcher.hpp
@@ -11,9 +11,7 @@
  *  Created on: Sep 27, 2013
  *      Author: ira
  */
-
-#ifndef NEXT_PATH_SEARCHER_HPP_
-#define NEXT_PATH_SEARCHER_HPP_
+#pragma once
 
 #include <set>
 #include <vector>
@@ -177,36 +175,36 @@ public:
     typedef multimap<EdgeId, PathWithDistance> ConstructedPathT;
 
     NextPathSearcher(const Graph& g, const GraphCoverageMap& cover_map, size_t search_dist, PathsWeightCounter weight_counter, size_t max_number_of_paths_to_search);
-    BidirectionalPathSet FindNextPaths(const BidirectionalPath& path, EdgeId begin_edge, bool jump = true);
-    vector<BidirectionalPath*> ScaffoldTree(const BidirectionalPath& path);
+    BidirectionalPathSet FindNextPaths(const BidirectionalPath& path, EdgeId begin_edge, bool jump = true) const ;
+    vector<BidirectionalPath*> ScaffoldTree(const BidirectionalPath& path) const;
 private:
     bool IsOutTip(VertexId v) const;
     bool IsInTip(VertexId v) const;
-    vector<Edge*> GrowPath(const BidirectionalPath& init_path, Edge* e);
-    Edge* AddEdge(const BidirectionalPath& init_path, Edge* prev_e, EdgeId e_to_add, int gap);
-    bool AnalyzeBubble(const BidirectionalPath& p, EdgeId buldge_edge, size_t gap, Edge* prev_edge);
+    vector<Edge*> GrowPath(const BidirectionalPath& init_path, Edge* e) const;
+    Edge* AddEdge(const BidirectionalPath& init_path, Edge* prev_e, EdgeId e_to_add, int gap) const;
+    bool AnalyzeBubble(const BidirectionalPath& p, EdgeId buldge_edge, size_t gap, Edge* prev_edge) const;
 
     void ScaffoldTip(const BidirectionalPath& path, Edge * current_path, vector<Edge*>& result_edges, vector<Edge*>& stopped_paths, vector<Edge*>& to_add,
-                     bool jump);
-    void ScaffoldChristmasTree(const BidirectionalPath& path, Edge * current_path, vector<Edge*>& to_add, size_t min_length_from);
-    void Scaffold(const BidirectionalPath& init_path, Edge* current_path, ConstructedPathT& constructed_paths, set<EdgeId>& seeds, bool is_gap);
-    void FindScaffoldingCandidates(const BidirectionalPath& init_path, Edge* current_path, EdgeSet& candidate_set, size_t min_length_from);
-    void FindScaffoldingCandidates(EdgeId e, size_t distance_to_tip, vector<EdgeWithDistance>& jump_edges);
-    void OrderScaffoldingCandidates(EdgeSet& candidate_set, const BidirectionalPath& init_path, Edge* current_path, ConstructedPathT& constructed_paths, set<EdgeId>& seeds, bool is_gap);
+                     bool jump) const;
+    void ScaffoldChristmasTree(const BidirectionalPath& path, Edge * current_path, vector<Edge*>& to_add, size_t min_length_from) const;
+    void Scaffold(const BidirectionalPath& init_path, Edge* current_path, ConstructedPathT& constructed_paths, set<EdgeId>& seeds, bool is_gap) const;
+    void FindScaffoldingCandidates(const BidirectionalPath& init_path, Edge* current_path, EdgeSet& candidate_set, size_t min_length_from) const;
+    void FindScaffoldingCandidates(EdgeId e, size_t distance_to_tip, vector<EdgeWithDistance>& jump_edges) const;
+    void OrderScaffoldingCandidates(EdgeSet& candidate_set, const BidirectionalPath& init_path, Edge* current_path, ConstructedPathT& constructed_paths, set<EdgeId>& seeds, bool is_gap) const;
     void RemoveRedundant(ConstructedPathT& constructed_paths) const;
     void ConvertPaths(const ConstructedPathT& constructed_paths, Edge* current_path, vector<Edge*>& to_add) const;
     void ProcessScaffoldingCandidate(EdgeWithDistance& e, EdgeSet& candidate_set, Edge* current_path, size_t grown_path_len,
-                                     ConstructedPathT& constructed_paths, bool is_gap);
-    int EstimateGapForPath(EdgeSet& candidate_set, const BidirectionalPath& p);
-    void AddConstructedPath(const BidirectionalPath& cp, size_t from, int gap, ConstructedPathT& constructed_paths);
-    void FilterBackPaths(BidirectionalPathSet& back_paths, EdgeId edge_to_reach, BidirectionalPathSet& reached_paths, size_t max_len = -1UL);
-    void JoinPathsByGraph(ConstructedPathT& constructed_paths);
-    void JoinPathsByPI(ConstructedPathT& constructed_paths);
-    void JoinPathsByDejikstra(const BidirectionalPath& init_path, ConstructedPathT& constructed_paths);
-    map<PathWithDistance*, size_t> FindDistances(const BidirectionalPath& p, vector<PathWithDistance*>& paths);
-    void FindConnections(vector<PathWithDistance*>& all_paths, map<PathWithDistance*, set<PathWithDistance*> >& connections);
-    vector<vector<PathWithDistance*> > FilterConnections(vector<PathWithDistance*>& all_paths, map<PathWithDistance*, set<PathWithDistance*> >& connections);
-    void ConnectPaths(const BidirectionalPath& init_path, vector<vector<PathWithDistance*> >& variants);
+                                     ConstructedPathT& constructed_paths, bool is_gap) const;
+    int EstimateGapForPath(EdgeSet& candidate_set, const BidirectionalPath& p) const;
+    void AddConstructedPath(const BidirectionalPath& cp, size_t from, int gap, ConstructedPathT& constructed_paths) const;
+    void FilterBackPaths(BidirectionalPathSet& back_paths, EdgeId edge_to_reach, BidirectionalPathSet& reached_paths, size_t max_len = -1UL) const;
+    void JoinPathsByGraph(ConstructedPathT& constructed_paths) const;
+    void JoinPathsByPI(ConstructedPathT& constructed_paths) const;
+    void JoinPathsByDejikstra(const BidirectionalPath& init_path, ConstructedPathT& constructed_paths) const;
+    map<PathWithDistance*, size_t> FindDistances(const BidirectionalPath& p, vector<PathWithDistance*>& paths) const;
+    void FindConnections(vector<PathWithDistance*>& all_paths, map<PathWithDistance*, set<PathWithDistance*> >& connections) const;
+    vector<vector<PathWithDistance*> > FilterConnections(vector<PathWithDistance*>& all_paths, map<PathWithDistance*, set<PathWithDistance*> >& connections) const;
+    void ConnectPaths(const BidirectionalPath& init_path, vector<vector<PathWithDistance*> >& variants) const;
 
     const Graph& g_;
     const GraphCoverageMap& cover_map_;
@@ -229,7 +227,7 @@ inline NextPathSearcher::NextPathSearcher(const Graph& g, const GraphCoverageMap
 
 }
 
-inline vector<BidirectionalPath*> NextPathSearcher::ScaffoldTree(const BidirectionalPath& path) {
+inline vector<BidirectionalPath*> NextPathSearcher::ScaffoldTree(const BidirectionalPath& path) const {
     Edge* start_e = new Edge(g_, path.At(0), NULL, g_.length(path.At(0)) + path.GapAt(0), path.GapAt(0));
     Edge* e = start_e->AddPath(path, 1);
     //jump forward when too much paths
@@ -261,7 +259,7 @@ inline vector<BidirectionalPath*> NextPathSearcher::ScaffoldTree(const Bidirecti
     return result_paths;
 }
 
-inline BidirectionalPathSet NextPathSearcher::FindNextPaths(const BidirectionalPath& path, EdgeId begin_edge, bool jump) {
+inline BidirectionalPathSet NextPathSearcher::FindNextPaths(const BidirectionalPath& path, EdgeId begin_edge, bool jump) const {
     TRACE("begin find next paths");
     vector<Edge*> grow_paths;
     vector<Edge*> result_edges;
@@ -330,7 +328,7 @@ inline BidirectionalPathSet NextPathSearcher::FindNextPaths(const BidirectionalP
     return result_paths;
 }
 
-inline bool NextPathSearcher::AnalyzeBubble(const BidirectionalPath& p, EdgeId buldge_edge, size_t gap, Edge* prev_edge) {
+inline bool NextPathSearcher::AnalyzeBubble(const BidirectionalPath& p, EdgeId buldge_edge, size_t gap, Edge* prev_edge) const {
     EdgeId max_edge = buldge_edge;
     if (prev_edge->GetOutEdgeIndex(buldge_edge) != -1 || prev_edge->GetIncorrectEdgeIndex(buldge_edge) != -1) {
         return prev_edge->GetOutEdgeIndex(buldge_edge) != -1;
@@ -353,7 +351,7 @@ inline bool NextPathSearcher::AnalyzeBubble(const BidirectionalPath& p, EdgeId b
     return max_edge == buldge_edge;
 }
 
-inline Edge* NextPathSearcher::AddEdge(const BidirectionalPath& init_path, Edge* prev_e, EdgeId e_to_add, int gap) {
+inline Edge* NextPathSearcher::AddEdge(const BidirectionalPath& init_path, Edge* prev_e, EdgeId e_to_add, int gap) const {
     Edge* e = prev_e;
     if (e->GetIncorrectEdgeIndex(e_to_add) != -1) {
         return e;
@@ -372,7 +370,7 @@ inline Edge* NextPathSearcher::AddEdge(const BidirectionalPath& init_path, Edge*
     return e;
 }
 
-inline vector<Edge*> NextPathSearcher::GrowPath(const BidirectionalPath& init_path, Edge* e) {
+inline vector<Edge*> NextPathSearcher::GrowPath(const BidirectionalPath& init_path, Edge* e) const {
     TRACE("in growing path");
     vector<Edge*> to_add;
     if (!e->IsCorrect()) {
@@ -417,7 +415,7 @@ inline vector<Edge*> NextPathSearcher::GrowPath(const BidirectionalPath& init_pa
 }
 
 inline void NextPathSearcher::ScaffoldTip(const BidirectionalPath& path, Edge * current_path, vector<Edge*>& result_edges, vector<Edge*>& stopped_paths,
-                                          vector<Edge*>& to_add, bool jump) {
+                                          vector<Edge*>& to_add, bool jump) const {
 
     if (jump) {
         //jump forward when tip
@@ -437,7 +435,7 @@ inline void NextPathSearcher::ScaffoldTip(const BidirectionalPath& path, Edge *
     }
 }
 
-inline void NextPathSearcher::ScaffoldChristmasTree(const BidirectionalPath& path, Edge * current_path, vector<Edge*>& to_add, size_t min_length_from) {
+inline void NextPathSearcher::ScaffoldChristmasTree(const BidirectionalPath& path, Edge * current_path, vector<Edge*>& to_add, size_t min_length_from) const {
 	//jump forward when too much paths
 	DEBUG("========= Scaffolding when too many paths =========");
 	ConstructedPathT constructed_paths;
@@ -503,7 +501,7 @@ inline void NextPathSearcher::ScaffoldChristmasTree(const BidirectionalPath& pat
 }
 
 inline void NextPathSearcher::Scaffold(const BidirectionalPath& init_path, Edge* current_path,
-                                       ConstructedPathT& constructed_paths, set<EdgeId>& seeds, bool is_gap) {
+                                       ConstructedPathT& constructed_paths, set<EdgeId>& seeds, bool is_gap) const {
 
     EdgeSet candidate_set;
     FindScaffoldingCandidates(init_path, current_path, candidate_set, 0);
@@ -516,7 +514,7 @@ inline void NextPathSearcher::Scaffold(const BidirectionalPath& init_path, Edge*
     OrderScaffoldingCandidates(candidate_set, init_path, current_path, constructed_paths, seeds, is_gap);
 }
 
-inline void NextPathSearcher::FindScaffoldingCandidates(const BidirectionalPath& init_path, Edge* current_path, EdgeSet& candidate_set, size_t min_length_from) {
+inline void NextPathSearcher::FindScaffoldingCandidates(const BidirectionalPath& init_path, Edge* current_path, EdgeSet& candidate_set, size_t min_length_from) const {
     set<EdgeId> path_end;
     set<Edge*> prev_edges = current_path->GetPrevEdges(search_dist_);
     for (Edge* e : prev_edges) {
@@ -561,7 +559,7 @@ inline void NextPathSearcher::FindScaffoldingCandidates(const BidirectionalPath&
     }
 }
 
-inline void NextPathSearcher::FindScaffoldingCandidates(EdgeId e, size_t distance_to_tip, vector<EdgeWithDistance>& jump_edges) {
+inline void NextPathSearcher::FindScaffoldingCandidates(EdgeId e, size_t distance_to_tip, vector<EdgeWithDistance>& jump_edges) const {
     if (g_.length(e) < long_edge_len_ || distance_to_tip - g_.length(e) >= search_dist_)
         return;
 
@@ -579,7 +577,7 @@ inline void NextPathSearcher::FindScaffoldingCandidates(EdgeId e, size_t distanc
 
 inline void NextPathSearcher::OrderScaffoldingCandidates(EdgeSet& candidate_set, const BidirectionalPath& init_path,
                                                          Edge* current_path, ConstructedPathT& constructed_paths,
-                                                         set<EdgeId>& seeds, bool is_gap) {
+                                                         set<EdgeId>& seeds, bool is_gap) const {
     size_t grown_path_len = current_path->Length() - init_path.Length();
 
     TRACE("Order Scaffolding Candidates, is gap " << is_gap);
@@ -624,7 +622,7 @@ inline void NextPathSearcher::RemoveRedundant(ConstructedPathT& constructed_path
 }
 
 inline void NextPathSearcher::ProcessScaffoldingCandidate(EdgeWithDistance& e, EdgeSet& candidate_set, Edge* current_path, size_t grown_path_len,
-                                                          ConstructedPathT& constructed_paths, bool is_gap) {
+                                                          ConstructedPathT& constructed_paths, bool is_gap) const {
     bool looking_for_tip = is_gap;
     //Search back from e till tip or maximim length back
     TRACE(" === Searching back === ");
@@ -685,7 +683,7 @@ inline void NextPathSearcher::ProcessScaffoldingCandidate(EdgeWithDistance& e, E
     }
 }
 
-inline int NextPathSearcher::EstimateGapForPath(EdgeSet& candidate_set, const BidirectionalPath& p) {
+inline int NextPathSearcher::EstimateGapForPath(EdgeSet& candidate_set, const BidirectionalPath& p) const {
     int gap = 0;
     int count = 0;
     for (EdgeWithDistance e : candidate_set) {
@@ -703,7 +701,7 @@ inline int NextPathSearcher::EstimateGapForPath(EdgeSet& candidate_set, const Bi
     return gap > 0 ? gap : 100;
 }
 
-inline void NextPathSearcher::AddConstructedPath(const BidirectionalPath& cp, size_t from, int gap, ConstructedPathT& constructed_paths) {
+inline void NextPathSearcher::AddConstructedPath(const BidirectionalPath& cp, size_t from, int gap, ConstructedPathT& constructed_paths) const {
     VERIFY(!cp.Empty());
 
     //Adding if there is unique (candidate - tip)
@@ -760,7 +758,7 @@ inline bool NextPathSearcher::IsInTip(VertexId v) const {
     return false;
 }
 inline void NextPathSearcher::FilterBackPaths(BidirectionalPathSet& back_paths, EdgeId edge_to_reach, BidirectionalPathSet& reached_paths,
-                                              size_t max_len) {
+                                              size_t max_len) const {
 	TRACE("Searching for proper back paths");
 
     int i = 0;
@@ -783,7 +781,7 @@ inline void NextPathSearcher::FilterBackPaths(BidirectionalPathSet& back_paths,
     }
 }
 
-inline void NextPathSearcher::JoinPathsByGraph(ConstructedPathT& constructed_paths) {
+inline void NextPathSearcher::JoinPathsByGraph(ConstructedPathT& constructed_paths) const {
 	TRACE("==  try to join paths using graph ==");
     for (auto p1 = constructed_paths.begin(); p1 != constructed_paths.end(); ++p1) {
         //p1->second.p_.Print();
@@ -814,7 +812,7 @@ inline void NextPathSearcher::JoinPathsByGraph(ConstructedPathT& constructed_pat
     }
 }
 
-inline void NextPathSearcher::JoinPathsByPI(ConstructedPathT& constructed_paths) {
+inline void NextPathSearcher::JoinPathsByPI(ConstructedPathT& constructed_paths) const {
 	DEBUG("==  try to join paths ===");
     for (auto p1 = constructed_paths.begin(); p1 != constructed_paths.end(); ++p1) {
         p1->second.p_.Print();
@@ -871,7 +869,7 @@ inline void NextPathSearcher::JoinPathsByPI(ConstructedPathT& constructed_paths)
                 path2.Print();
                 path1.PushBack(path2.Front(), 100);
                 for (int i = 1; i < (int) path2.Size(); ++i) {
-                    path1.PushBack(path2[i], path2.GapAt(i));
+                    path1.PushBack(path2[i], path2.GapAt(i), path2.TrashPreviousAt(i), path2.TrashCurrentAt(i));
                 }
                 DEBUG("new path");
                 path1.Print();
@@ -881,7 +879,7 @@ inline void NextPathSearcher::JoinPathsByPI(ConstructedPathT& constructed_paths)
         }
     }
 }
-void Generate(size_t l, size_t r, vector<size_t> a,
+inline void Generate(size_t l, size_t r, vector<size_t> a,
 		vector<vector<size_t> >& res, vector<PathWithDistance*>& all_paths, map<PathWithDistance*, set<PathWithDistance*> >& connections) {
 	if (l == r) {
 	    DEBUG("result " << a.size())
@@ -904,7 +902,7 @@ void Generate(size_t l, size_t r, vector<size_t> a,
 	}
 }
 
-vector<vector<size_t> > Generate(size_t n, vector<PathWithDistance*>& all_paths, map<PathWithDistance*, set<PathWithDistance*> >& connections) {
+inline vector<vector<size_t> > Generate(size_t n, vector<PathWithDistance*>& all_paths, map<PathWithDistance*, set<PathWithDistance*> >& connections) {
 	vector<vector<size_t> > result;
 	if (n > 5) {
 	    return result;
@@ -917,11 +915,11 @@ vector<vector<size_t> > Generate(size_t n, vector<PathWithDistance*>& all_paths,
 	return result;
 }
 
-inline map<PathWithDistance*, size_t> NextPathSearcher::FindDistances(const BidirectionalPath& p, vector<PathWithDistance*>& paths) {
+inline map<PathWithDistance*, size_t> NextPathSearcher::FindDistances(const BidirectionalPath& p, vector<PathWithDistance*>& paths) const {
     DEBUG("find distances from e " << g_.int_id(p.Back()))
 	map<PathWithDistance*, size_t> result;
     DijkstraHelper<Graph>::BoundedDijkstra dijkstra(DijkstraHelper<Graph>::CreateBoundedDijkstra(g_, search_dist_, 3000));
-    dijkstra.run(g_.EdgeEnd(p.Back()));
+    dijkstra.Run(g_.EdgeEnd(p.Back()));
     DEBUG("paths size " << paths.size());
     for (auto ipath = paths.begin(); ipath != paths.end(); ++ipath) {
         vector<EdgeId> shortest_path = dijkstra.GetShortestPathTo(g_.EdgeStart((*ipath)->p_.Front()));
@@ -938,7 +936,7 @@ inline map<PathWithDistance*, size_t> NextPathSearcher::FindDistances(const Bidi
     return result;
 }
 
-inline void NextPathSearcher::FindConnections(vector<PathWithDistance*>& all_paths, map<PathWithDistance*, set<PathWithDistance*> >& connections) {
+inline void NextPathSearcher::FindConnections(vector<PathWithDistance*>& all_paths, map<PathWithDistance*, set<PathWithDistance*> >& connections) const {
     for (auto p1 = all_paths.begin(); p1 != all_paths.end(); ++p1) {
         map<PathWithDistance*, size_t> distances = FindDistances((*p1)->p_, all_paths);
         connections[*p1] = set<PathWithDistance*>();
@@ -950,7 +948,7 @@ inline void NextPathSearcher::FindConnections(vector<PathWithDistance*>& all_pat
     }
 }
 
-inline void NextPathSearcher::ConnectPaths(const BidirectionalPath& init_path, vector<vector<PathWithDistance*> >& variants) {
+inline void NextPathSearcher::ConnectPaths(const BidirectionalPath& init_path, vector<vector<PathWithDistance*> >& variants) const {
     if (variants.size() == 1 && variants[0].size() > 0) {
         vector<PathWithDistance*> res = variants[0];
         vector<PathWithDistance*> for_dijkstra;
@@ -970,7 +968,7 @@ inline void NextPathSearcher::ConnectPaths(const BidirectionalPath& init_path, v
             gap = distances.count(res[i]) > 0 ? distances[res[i]] : 100 + g_.k();
             path1.PushBack(path2.Front(), (int)gap);
             for (int i = 1; i < (int) path2.Size(); ++i) {
-                path1.PushBack(path2[i], path2.GapAt(i));
+                path1.PushBack(path2[i], path2.GapAt(i), path2.TrashPreviousAt(i), path2.TrashCurrentAt(i));
             }
             path2.Clear();
         }
@@ -990,7 +988,7 @@ inline void NextPathSearcher::ConnectPaths(const BidirectionalPath& init_path, v
     }
 }
 
-inline vector<vector<PathWithDistance*> > NextPathSearcher::FilterConnections(vector<PathWithDistance*>& all_paths, map<PathWithDistance*, set<PathWithDistance*> >& connections) {
+inline vector<vector<PathWithDistance*> > NextPathSearcher::FilterConnections(vector<PathWithDistance*>& all_paths, map<PathWithDistance*, set<PathWithDistance*> >& connections) const {
     vector<vector<PathWithDistance*> > variants;
     DEBUG("filter connections " << connections.size() << " all paths size " << all_paths.size())
     vector<vector<size_t> > permutations = Generate(all_paths.size(), all_paths, connections);
@@ -1005,7 +1003,7 @@ inline vector<vector<PathWithDistance*> > NextPathSearcher::FilterConnections(ve
     return variants;
 }
 
-inline void NextPathSearcher::JoinPathsByDejikstra(const BidirectionalPath& init_path, ConstructedPathT& constructed_paths) {
+inline void NextPathSearcher::JoinPathsByDejikstra(const BidirectionalPath& init_path, ConstructedPathT& constructed_paths) const {
     DEBUG("==  try to join paths by dejikstra ===");
     for (auto p1 = constructed_paths.begin(); p1 != constructed_paths.end(); ++p1) {
         p1->second.p_.Print();
@@ -1031,4 +1029,3 @@ inline void NextPathSearcher::JoinPathsByDejikstra(const BidirectionalPath& init
 }
 
 }  // namespace path_extend
-#endif /* NEXT_PATH_SEARCHER_HPP_ */
diff --git a/src/debruijn/path_extend/paired_library.hpp b/src/debruijn/path_extend/paired_library.hpp
index 110f1dd..a940098 100644
--- a/src/debruijn/path_extend/paired_library.hpp
+++ b/src/debruijn/path_extend/paired_library.hpp
@@ -21,6 +21,8 @@
 
 #include "xmath.h"
 
+namespace path_extend {
+
 using debruijn_graph::Graph;
 using debruijn_graph::EdgeId;
 
@@ -28,8 +30,6 @@ using omnigraph::de::PairedInfoIndexT;
 typedef omnigraph::de::PairInfo<EdgeId> DePairInfo;
 using omnigraph::de::Point;
 
-namespace path_extend {
-
 struct PairedInfoLibrary {
     PairedInfoLibrary(size_t k, const Graph& g, size_t readS, size_t is,
                       size_t is_min, size_t is_max, size_t is_var,
@@ -53,28 +53,15 @@ struct PairedInfoLibrary {
     void SetCoverage(double cov) { coverage_coeff_ = cov; }
     void SetSingleThreshold(double threshold) { single_threshold_ = threshold; }
 
-    virtual size_t FindJumpEdges(EdgeId e, set<EdgeId>& result, int min_dist = 0, int max_dist = 100000000, size_t min_len = 0) = 0;
+    virtual size_t FindJumpEdges(EdgeId e, set<EdgeId>& result, int min_dist, int max_dist, size_t min_len = 0) const = 0;
     virtual void CountDistances(EdgeId e1, EdgeId e2, vector<int>& dist, vector<double>& w) const = 0;
     virtual double CountPairedInfo(EdgeId e1, EdgeId e2, int distance, bool from_interval = false) const = 0;
-    virtual double CountPairedInfo(EdgeId e1, EdgeId e2, size_t dist_min, size_t dist_max) const = 0;
+    virtual double CountPairedInfo(EdgeId e1, EdgeId e2, int dist_min, int dist_max) const = 0;
 
-    double IdealPairedInfo(EdgeId e1, EdgeId e2, int distance, bool additive = false) {
+    double IdealPairedInfo(EdgeId e1, EdgeId e2, int distance, bool additive = false) const {
         return ideal_pi_counter_.IdealPairedInfo(e1, e2, distance, additive);
     }
 
-    double NormalizeWeight(const DePairInfo& pair_info) {
-        double w = IdealPairedInfo(pair_info.first, pair_info.second,
-                                   rounded_d(pair_info));
-
-        double result_weight = pair_info.weight();
-        if (math::gr(w, 0.))
-            result_weight /= w;
-        else
-            result_weight = 0.0;
-
-        return result_weight;
-    }
-
     size_t GetISMin() const { return is_min_; }
     double GetSingleThreshold() const { return single_threshold_; }
     double GetCoverageCoeff() const { return coverage_coeff_; }
@@ -102,111 +89,58 @@ protected:
 
 template<class Index>
 struct PairedInfoLibraryWithIndex : public PairedInfoLibrary {
+
     PairedInfoLibraryWithIndex(size_t k, const Graph& g, size_t readS, size_t is, size_t is_min, size_t is_max, size_t is_div,
                                const Index& index, bool is_mp,
                                const std::map<int, size_t>& is_distribution)
         : PairedInfoLibrary(k, g, readS, is, is_min, is_max, is_div, is_mp, is_distribution),
           index_(index) {}
 
-    virtual size_t FindJumpEdges(EdgeId e, std::set<EdgeId>& result, int min_dist = 0, int max_dist = 100000000, size_t min_len = 0) {
-        VERIFY(index_.Size() != 0);
+    size_t FindJumpEdges(EdgeId e, std::set<EdgeId>& result, int min_dist, int max_dist, size_t min_len = 0) const override {
+        VERIFY(index_.size() > 0);
         result.clear();
 
-        //FIXME reduce code duplication
-        if (index_.contains(e)) {
-          //fixme is 0 just some int here?!!!
-          const auto& infos = index_.GetEdgeInfo(e, 0);
-          // We do not care about iteration order here - all the edges collected
-          // will be inside std::set<EdgeId>
-          for (const auto& it : infos) {
-            EdgeId e2 = it.first;
-            if (e2 == e)
-              continue;
-            if (g_.length(e2) < min_len)
-              continue;
-
-            for (const auto& point : it.second) {
-              if (math::le(point.d, (omnigraph::de::DEDistance) max_dist) &&
-                  math::ge(point.d, (omnigraph::de::DEDistance) min_dist)) {
-                result.insert(e2);
-                break;
-              }
-            }
-          }
-        }
-
-        e = g_.conjugate(e);
-        if (index_.contains(e)) {
-          //fixme is 0 just some int here?!!!
-          const auto& infos = index_.GetEdgeInfo(e, 0);
-          // We do not care about iteration order here - all the edges collected
-          // will be inside std::set<EdgeId>
-          for (const auto& it : infos) {
+        auto infos = index_.Get(e);
+        // We do not care about iteration order here - all the edges collected
+        // will be inside std::set<EdgeId>
+        for (auto it : infos) {
             EdgeId e2 = it.first;
             if (e2 == e)
-              continue;
+                continue;
             if (g_.length(e2) < min_len)
-              continue;
-
-            for (const auto& point : it.second) {
-              omnigraph::de::DEDistance dist = -point.d + (omnigraph::de::DEDistance) g_.length(e) - (omnigraph::de::DEDistance) g_.length(e2);
-              if (math::le(dist, (omnigraph::de::DEDistance) max_dist) &&
-                  math::ge(dist, (omnigraph::de::DEDistance) min_dist)) {
-                result.insert(g_.conjugate(e2));
-                break;
-              }
+                continue;
+            for (auto point : it.second) {
+                omnigraph::de::DEDistance dist = point.d;
+                if (math::le(dist, (omnigraph::de::DEDistance) max_dist) &&
+                    math::ge(dist, (omnigraph::de::DEDistance) min_dist)) {
+                    result.insert(e2);
+                }
             }
-          }
         }
-
         return result.size();
     }
 
-    virtual void CountDistances(EdgeId e1, EdgeId e2, vector<int>& dist, vector<double>& w) const {
-        VERIFY(index_.Size() != 0);
+
+    void CountDistances(EdgeId e1, EdgeId e2, vector<int>& dist, vector<double>& w) const override {
+        VERIFY(index_.size() > 0);
         if (e1 == e2)
             return;
 
-        auto pairs = index_.GetEdgePairInfo(e1, e2);
-        if (!index_.conj_symmetry()) {
-            auto cpairs = index_.GetEdgePairInfo(g_.conjugate(e2), g_.conjugate(e1));
-            for (auto entry : cpairs) {
-                Point cp = ConjugatePoint(g_.length(e2), g_.length(e1), entry);
-                auto it = pairs.find(cp);
-                if (it != pairs.end())
-                    it->weight += cp.weight;
-                else
-                    pairs.insert(cp);
-            }
-        }
-        for (auto pointIter = pairs.begin(); pointIter != pairs.end(); ++pointIter) {
-            int pairedDistance = rounded_d(*pointIter);
-            if (pairedDistance >= 0) {
-                dist.push_back(pairedDistance);
-                w.push_back(pointIter->weight);
-            }
+        for (auto point : index_.Get(e1, e2)) {
+            int pairedDistance = rounded_d(point);
+            dist.push_back(pairedDistance);
+            w.push_back(point.weight);
         }
     }
 
-    virtual double CountPairedInfo(EdgeId e1, EdgeId e2, int distance,
-                                   bool from_interval = false) const {
-        VERIFY(index_.Size() != 0);
+    double CountPairedInfo(EdgeId e1, EdgeId e2, int distance,
+                           bool from_interval = false) const override {
+        VERIFY(index_.size() != 0);
         double weight = 0.0;
-        auto pairs = index_.GetEdgePairInfo(e1, e2);
-        if (!index_.conj_symmetry()) {
-            auto cpairs = index_.GetEdgePairInfo(g_.conjugate(e2), g_.conjugate(e1));
-            for (auto entry : cpairs) {
-                Point cp = ConjugatePoint(g_.length(e2), g_.length(e1), entry);
-                auto it = pairs.find(cp);
-                if (it != pairs.end())
-                    it->weight += cp.weight;
-                else
-              pairs.insert(cp);
-            }
-        }
-        for (auto pointIter = pairs.begin(); pointIter != pairs.end(); ++pointIter) {
-            int pairedDistance = rounded_d(*pointIter);
-            int distanceDev = (int) pointIter->variation();  //max((int) pointIter->var, (int) is_variation_);
+
+        for (auto point : index_.Get(e1, e2)) {
+            int pairedDistance = rounded_d(point);
+            int distanceDev = (int) point.variation();  //max((int) pointIter->var, (int) is_variation_);
             //Can be modified according to distance comparison
             int d_min = distance - distanceDev;
             int d_max = distance + distanceDev;
@@ -216,31 +150,20 @@ struct PairedInfoLibraryWithIndex : public PairedInfoLibrary {
                 d_max += (int) (is_max_ - is_);
             }
             if (pairedDistance >= d_min && pairedDistance <= d_max) {
-                weight += pointIter->weight;
+                weight += point.weight;
             }
         }
         return weight;
     }
 
-    virtual double CountPairedInfo(EdgeId e1, EdgeId e2, size_t dist_min, size_t dist_max) const {
-        VERIFY(index_.Size() != 0);
+    double CountPairedInfo(EdgeId e1, EdgeId e2, int dist_min, int dist_max) const override {
+        VERIFY(index_.size() != 0);
         double weight = 0.0;
-        auto pairs = index_.GetEdgePairInfo(e1, e2);
-        if (!index_.conj_symmetry()) {
-            auto cpairs = index_.GetEdgePairInfo(g_.conjugate(e2), g_.conjugate(e1));
-            for (auto entry : cpairs) {
-                Point cp = ConjugatePoint(g_.length(e2), g_.length(e1), entry);
-                auto it = pairs.find(cp);
-                if (it != pairs.end())
-                    it->weight += cp.weight;
-                else
-                    pairs.insert(cp);
-            }
-        }
-        for (auto pointIter = pairs.begin(); pointIter != pairs.end(); ++pointIter) {
-            int dist = rounded_d(*pointIter);
-            if (dist > 0 and (size_t)dist >= dist_min and (size_t)dist <= dist_max)
-                weight += pointIter->weight;
+
+        for (auto point : index_.Get(e1, e2)) {
+            int dist = rounded_d(point);
+            if (dist >= dist_min && dist <= dist_max)
+                weight += point.weight;
         }
         return weight;
     }
diff --git a/src/debruijn/path_extend/path_extend_launch.hpp b/src/debruijn/path_extend/path_extend_launch.hpp
index 072776e..360f7d6 100644
--- a/src/debruijn/path_extend/path_extend_launch.hpp
+++ b/src/debruijn/path_extend/path_extend_launch.hpp
@@ -15,6 +15,7 @@
 #ifndef PATH_EXTEND_LAUNCH_HPP_
 #define PATH_EXTEND_LAUNCH_HPP_
 
+#include <path_extend/scaffolder2015/scaffold_graph_constructor.hpp>
 #include "pe_config_struct.hpp"
 #include "pe_resolver.hpp"
 #include "path_extender.hpp"
@@ -23,7 +24,10 @@
 #include "loop_traverser.hpp"
 #include "long_read_storage.hpp"
 #include "next_path_searcher.hpp"
-
+#include "scaffolder2015/extension_chooser2015.hpp"
+#include "genome_consistance_checker.hpp"
+#include "scaffolder2015/scaffold_graph.hpp"
+#include "scaffolder2015/scaffold_graph_visualizer.hpp"
 
 namespace path_extend {
 
@@ -47,12 +51,18 @@ inline void DebugOutputPaths(const conj_graph_pack& gp,
                       const string& name) {
     PathInfoWriter path_writer;
     PathVisualizer visualizer;
+
+    DefaultContigCorrector<ConjugateDeBruijnGraph> corrector(gp.g);
+    DefaultContigConstructor<ConjugateDeBruijnGraph> constructor(gp.g, corrector);
+    ContigWriter writer(gp.g, constructor);
+
     string etcDir = GetEtcDir(output_dir);
     if (!cfg::get().pe_params.debug_output) {
         return;
     }
+    writer.OutputPaths(paths, etcDir + name);
     if (cfg::get().pe_params.output.write_paths) {
-        path_writer.WritePaths(paths, etcDir + name + ".paths");
+        path_writer.WritePaths(paths, etcDir + name + ".dat");
     }
     if (cfg::get().pe_params.viz.print_paths) {
         visualizer.writeGraphWithPathsSimple(gp, etcDir + name + ".dot", name,
@@ -64,14 +74,24 @@ inline double GetWeightThreshold(shared_ptr<PairedInfoLibrary> lib, const pe_con
     return lib->IsMp() ? pset.mate_pair_options.weight_threshold : pset.extension_options.weight_threshold;
 }
 
-inline double GetSingleThreshold(shared_ptr<PairedInfoLibrary> lib, const pe_config::ParamSetT& pset) {
-    return lib->IsMp() ? pset.mate_pair_options.single_threshold : pset.extension_options.single_threshold;
-}
-
 inline double GetPriorityCoeff(shared_ptr<PairedInfoLibrary> lib, const pe_config::ParamSetT& pset) {
     return lib->IsMp() ? pset.mate_pair_options.priority_coeff : pset.extension_options.priority_coeff;
 }
 
+inline void SetSingleThresholdForLib(shared_ptr<PairedInfoLibrary> lib, const pe_config::ParamSetT &pset, double threshold, double correction_coeff = 1.0) {
+    if  (lib->IsMp()) {
+        lib->SetSingleThreshold(pset.mate_pair_options.use_default_single_threshold || math::le(threshold, 0.0) ?
+                                pset.mate_pair_options.single_threshold : threshold);
+    }
+    else {
+        double t = pset.extension_options.use_default_single_threshold || math::le(threshold, 0.0) ?
+                   pset.extension_options.single_threshold : threshold;
+        t = correction_coeff * t;
+        lib->SetSingleThreshold(t);
+    }
+}
+
+
 inline string MakeNewName(const std::string& contigs_name, const std::string& subname) {
     return contigs_name.substr(0, contigs_name.rfind(".fasta")) + "_" + subname + ".fasta";
 }
@@ -198,15 +218,16 @@ inline void ClonePathContainer(PathContainer& spaths, PathContainer& tpaths, Gra
     }
 }
 
-inline void FinalizePaths(PathContainer& paths, GraphCoverageMap& cover_map, size_t max_overlap, bool mate_pairs = false) {
+inline void FinalizePaths(PathContainer& paths, GraphCoverageMap& cover_map, size_t min_edge_len, size_t max_path_diff, bool mate_pairs = false) {
     DefaultContigCorrector<ConjugateDeBruijnGraph> corrector(cover_map.graph());
     DefaultContigConstructor<ConjugateDeBruijnGraph> constructor(cover_map.graph(), corrector);
     ContigWriter writer(cover_map.graph(), constructor);
     PathExtendResolver resolver(cover_map.graph());
 
-    resolver.removeOverlaps(paths, cover_map, max_overlap, cfg::get().pe_params.param_set.remove_overlaps, cfg::get().pe_params.param_set.cut_all_overlaps);
+    resolver.removeOverlaps(paths, cover_map, min_edge_len, max_path_diff,
+                            cfg::get().pe_params.param_set.remove_overlaps, cfg::get().pe_params.param_set.cut_all_overlaps);
     if (mate_pairs) {
-        resolver.RemoveMatePairEnds(paths, max_overlap);
+        resolver.RemoveMatePairEnds(paths, min_edge_len);
     }
     if (cfg::get().avoid_rc_connections) {
         paths.FilterInterstandBulges();
@@ -216,6 +237,10 @@ inline void FinalizePaths(PathContainer& paths, GraphCoverageMap& cover_map, siz
         resolver.addUncoveredEdges(paths, cover_map);
     }
     paths.SortByLength();
+    for(auto& path : paths) {
+        path.first->ResetOverlaps();
+    }
+
 }
 
 inline void TraverseLoops(PathContainer& paths, GraphCoverageMap& cover_map, shared_ptr<ContigsMaker> extender) {
@@ -259,6 +284,7 @@ enum class PathExtendStage {
     PEStage,
     MPStage,
     FinalizingPEStage,
+    Scaffold2015,
 };
 
 template<class Index>
@@ -271,7 +297,7 @@ inline shared_ptr<PairedInfoLibrary> MakeNewLib(const conj_graph_pack::graph_t&
     int is_min = (int) lib.data().insert_size_left_quantile;
     int is_max = (int) lib.data().insert_size_right_quantile;
     int var = (int) lib.data().insert_size_deviation;
-    bool is_mp = lib.type() == io::LibraryType::MatePairs;
+    bool is_mp = lib.type() == io::LibraryType::MatePairs ||  lib.type() == io::LibraryType::HQMatePairs ;
     return make_shared< PairedInfoLibraryWithIndex<decltype(paired_index[index])> >(cfg::get().K, g, read_length,
                                                                                     is, is_min > 0.0 ? size_t(is_min) : 0, is_max > 0.0 ? size_t(is_max) : 0,
                                                                                     size_t(var),
@@ -290,40 +316,48 @@ inline shared_ptr<SimpleExtender> MakeLongReadsExtender(const conj_graph_pack& g
                                                    GetSingleReadsWeightPriorityThreshold(lib.type()),
                                                    GetSingleReadsUniqueEdgePriorityThreshold(lib.type()));
 
-    size_t max_repeat_length = std::max(10000ul, lib.data().read_length);
-    INFO("max_repeat_length set to " << max_repeat_length);
-    return make_shared<SimpleExtender>(gp, cov_map, longReadEC, max_repeat_length,  
+    size_t resolvable_repeat_length_bound = 10000ul;
+    if (!lib.is_contig_lib()) {
+        resolvable_repeat_length_bound = std::max(resolvable_repeat_length_bound, lib.data().read_length);
+    }
+    INFO("resolvable_repeat_length_bound set to " << resolvable_repeat_length_bound);
+    return make_shared<SimpleExtender>(gp, cov_map, longReadEC, resolvable_repeat_length_bound,  
             pset.loop_removal.max_loops, true, UseCoverageResolverForSingleReads(lib.type()));
 }
 
 inline shared_ptr<SimpleExtender> MakeLongEdgePEExtender(const conj_graph_pack& gp, const GraphCoverageMap& cov_map,
-                                                         size_t lib_index, const pe_config::ParamSetT& pset, bool use_auto_threshold, bool investigate_loops) {
+                                                         size_t lib_index, const pe_config::ParamSetT& pset, bool investigate_loops) {
     shared_ptr<PairedInfoLibrary> lib = MakeNewLib(gp.g, gp.clustered_indices, lib_index);
-    if (use_auto_threshold) {
-        lib->SetSingleThreshold(cfg::get().ds.reads[lib_index].data().pi_threshold);
-        if (!investigate_loops)
-            INFO("Threshold for library #" << lib_index << " is " << cfg::get().ds.reads[lib_index].data().pi_threshold);
-    }
-    shared_ptr<WeightCounter> wc = make_shared<PathCoverWeightCounter>(gp.g, lib, GetWeightThreshold(lib, pset),
-                                                                       GetSingleThreshold(lib, pset));
-    wc->setNormalizeWeight(pset.normalize_weight);
-    shared_ptr<ExtensionChooser> extension = make_shared<LongEdgeExtensionChooser>(gp.g, wc, GetPriorityCoeff(lib, pset));
+    SetSingleThresholdForLib(lib, pset, cfg::get().ds.reads[lib_index].data().pi_threshold);
+    INFO("Threshold for lib #" << lib_index << ": " << lib->GetSingleThreshold());
+
+    shared_ptr<WeightCounter> wc = make_shared<PathCoverWeightCounter>(gp.g, lib, pset.normalize_weight);
+    shared_ptr<ExtensionChooser> extension = make_shared<LongEdgeExtensionChooser>(gp.g, wc, GetWeightThreshold(lib, pset), GetPriorityCoeff(lib, pset));
+    return make_shared<SimpleExtender>(gp, cov_map, extension, lib->GetISMax(), pset.loop_removal.max_loops, investigate_loops, false);
+}
+
+
+inline shared_ptr<SimpleExtender> MakeMetaExtender(const conj_graph_pack& gp, const GraphCoverageMap& cov_map,
+                                       size_t lib_index, const pe_config::ParamSetT& pset, bool investigate_loops) {
+    shared_ptr<PairedInfoLibrary> lib = MakeNewLib(gp.g, gp.clustered_indices, lib_index);
+    VERIFY(!lib->IsMp());
+
+    shared_ptr<WeightCounter> wc = make_shared<MetagenomicWeightCounter>(gp.g, lib, /*read_length*/cfg::get().ds.RL(), 
+                            /*normalized_threshold*/ 0.3, /*raw_threshold*/ 3, /*estimation_edge_length*/ 300);
+    shared_ptr<SimpleExtensionChooser> extension = make_shared<SimpleExtensionChooser>(gp.g, wc, 
+                                                        pset.extension_options.weight_threshold, 
+                                                        pset.extension_options.priority_coeff);
     return make_shared<SimpleExtender>(gp, cov_map, extension, lib->GetISMax(), pset.loop_removal.max_loops, investigate_loops, false);
 }
 
 inline shared_ptr<SimpleExtender> MakePEExtender(const conj_graph_pack& gp, const GraphCoverageMap& cov_map,
-                                       size_t lib_index, const pe_config::ParamSetT& pset, bool use_auto_threshold, bool investigate_loops) {
+                                       size_t lib_index, const pe_config::ParamSetT& pset, bool investigate_loops) {
     shared_ptr<PairedInfoLibrary> lib = MakeNewLib(gp.g, gp.clustered_indices, lib_index);
-    if (use_auto_threshold) {
-        lib->SetSingleThreshold(cfg::get().ds.reads[lib_index].data().pi_threshold);
-        if (!investigate_loops)
-            INFO("Threshold for library #" << lib_index << " is " << cfg::get().ds.reads[lib_index].data().pi_threshold);
-    }
-    //fixme temporary configuration for meta mode
-    shared_ptr<WeightCounter> wc = make_shared<PathCoverWeightCounter>(gp.g, lib, GetWeightThreshold(lib, pset), 
-                                                                       GetSingleThreshold(lib, pset), cfg::get().ds.meta ? 0.25 : 1.0);
-    wc->setNormalizeWeight(pset.normalize_weight);
-    shared_ptr<SimpleExtensionChooser> extension = make_shared<SimpleExtensionChooser>(gp.g, wc, GetPriorityCoeff(lib, pset));
+    SetSingleThresholdForLib(lib, pset, cfg::get().ds.reads[lib_index].data().pi_threshold);
+    INFO("Threshold for lib #" << lib_index << ": " << lib->GetSingleThreshold());
+
+    shared_ptr<WeightCounter> wc = make_shared<PathCoverWeightCounter>(gp.g, lib, pset.normalize_weight);
+    shared_ptr<SimpleExtensionChooser> extension = make_shared<SimpleExtensionChooser>(gp.g, wc, GetWeightThreshold(lib, pset), GetPriorityCoeff(lib, pset));
     return make_shared<SimpleExtender>(gp, cov_map, extension, lib->GetISMax(), pset.loop_removal.max_loops, investigate_loops, false);
 }
 
@@ -332,25 +366,75 @@ inline shared_ptr<PathExtender> MakeScaffoldingExtender(const conj_graph_pack& g
     shared_ptr<PairedInfoLibrary> lib = MakeNewLib(gp.g, gp.scaffolding_indices, lib_index);
 
     shared_ptr<WeightCounter> counter = make_shared<ReadCountWeightCounter>(gp.g, lib);
-    double prior_coef = GetPriorityCoeff(lib, pset);
+    //FIXME this variable was not used!
+    //double prior_coef = GetPriorityCoeff(lib, pset);
     //FIXME review parameters
     //todo put parameters in config
     //FIXME remove max_must_overlap from config
     double var_coeff = 3.0;
-    auto scaff_chooser = std::make_shared<ScaffoldingExtensionChooser>(gp.g, counter, prior_coef, var_coeff);
-    auto gap_joiner = std::make_shared<HammingGapJoiner>(gp.g, pset.scaffolder_options.min_gap_score,
-                                                 int(math::round((double) gp.g.k() - var_coeff * (double) lib->GetIsVar())),
-                                                 (int) (pset.scaffolder_options.max_can_overlap * (double) gp.g.k()),
+    auto scaff_chooser = std::make_shared<ScaffoldingExtensionChooser>(gp.g, counter, var_coeff);
+
+    vector<shared_ptr<GapJoiner>> joiners;
+
+    if (pset.scaffolder_options.use_la_gap_joiner) {
+        joiners.push_back(std::make_shared<LAGapJoiner>(gp.g, pset.scaffolder_options.min_overlap_length,
+                                                    pset.scaffolder_options.flank_multiplication_coefficient,
+                                                    pset.scaffolder_options.flank_addition_coefficient));
+    }
+
+    joiners.push_back(std::make_shared<HammingGapJoiner>(gp.g, pset.scaffolder_options.min_gap_score,
                                                  pset.scaffolder_options.short_overlap,
-                                                 (int) 2 * cfg::get().ds.RL(), pset.scaffolder_options.artificial_gap,
-                                                 cfg::get().pe_params.param_set.scaffolder_options.use_old_score);
-    return make_shared<ScaffoldingPathExtender>(gp, cov_map, scaff_chooser, gap_joiner, lib->GetISMax(), pset.loop_removal.max_loops, false);
+                                                 (int) 2 * cfg::get().ds.RL()));
+
+    auto composite_gap_joiner = std::make_shared<CompositeGapJoiner>(gp.g, 
+                                                joiners, 
+                                                size_t(pset.scaffolder_options.max_can_overlap * (double) gp.g.k()),
+                                                int(math::round((double) gp.g.k() - var_coeff * (double) lib->GetIsVar())),
+                                                pset.scaffolder_options.artificial_gap);
+
+    return make_shared<ScaffoldingPathExtender>(gp, cov_map, scaff_chooser, composite_gap_joiner, lib->GetISMax(), pset.loop_removal.max_loops, false);
 }
 
+
+inline shared_ptr<PathExtender> MakeScaffolding2015Extender(const conj_graph_pack& gp, const GraphCoverageMap& cov_map,
+                                                        size_t lib_index, const pe_config::ParamSetT& pset, const shared_ptr<ScaffoldingUniqueEdgeStorage> storage) {
+    shared_ptr<PairedInfoLibrary> lib;
+    INFO("for lib " << lib_index);
+
+    //TODO:: temporary solution
+    if (gp.paired_indices[lib_index].size() > gp.clustered_indices[lib_index].size()) {
+        INFO("Paired unclustered indices not empty, using them");
+        lib = MakeNewLib(gp.g, gp.paired_indices, lib_index);
+    } else if (gp.clustered_indices[lib_index].size() != 0 ) {
+        INFO("clustered indices not empty, using them");
+        lib = MakeNewLib(gp.g, gp.clustered_indices, lib_index);
+    } else {
+        ERROR("All paired indices are empty!");
+    }
+
+    shared_ptr<WeightCounter> counter = make_shared<ReadCountWeightCounter>(gp.g, lib);
+//TODO::was copypasted from MakeScaffoldingExtender
+//TODO::REWRITE
+    double var_coeff = 3.0;
+    DEBUG("here creating extchooser");
+//TODO: 2 is relative weight cutoff, to config!
+    auto scaff_chooser = std::make_shared<ExtensionChooser2015>(gp.g, counter, var_coeff, storage, 2, lib_index);
+
+    auto gap_joiner = std::make_shared<HammingGapJoiner>(gp.g, pset.scaffolder_options.min_gap_score,
+                                                         pset.scaffolder_options.short_overlap,
+                                                         (int) 2 * cfg::get().ds.RL());
+
+    return make_shared<ScaffoldingPathExtender>(gp, cov_map, scaff_chooser, gap_joiner, lib->GetISMax(), pset.loop_removal.max_loops, false , false);
+}
+
+
 inline shared_ptr<SimpleExtender> MakeMPExtender(const conj_graph_pack& gp, const GraphCoverageMap& cov_map, const PathContainer& paths,
                                        size_t lib_index, const pe_config::ParamSetT& pset) {
 
     shared_ptr<PairedInfoLibrary> lib = MakeNewLib(gp.g, gp.paired_indices, lib_index);
+    SetSingleThresholdForLib(lib, pset, cfg::get().ds.reads[lib_index].data().pi_threshold);
+    INFO("Threshold for lib #" << lib_index << ": " << lib->GetSingleThreshold());
+
     size_t max_number_of_paths_to_search = GetNumberMPPaths(gp.g);
     DEBUG("max number of mp paths " << max_number_of_paths_to_search);
 
@@ -358,18 +442,51 @@ inline shared_ptr<SimpleExtender> MakeMPExtender(const conj_graph_pack& gp, cons
     return make_shared<SimpleExtender>(gp, cov_map, chooser, lib->GetISMax(), pset.loop_removal.mp_max_loops, true, false);
 }
 
+inline shared_ptr<SimpleExtender> MakeCoordCoverageExtender(const conj_graph_pack& gp, const GraphCoverageMap& cov_map,
+                                       const pe_config::ParamSetT& pset) {
+    shared_ptr<PairedInfoLibrary> lib = MakeNewLib(gp.g, gp.paired_indices, 0);
+    CoverageAwareIdealInfoProvider provider(gp.g, lib, 1000, 2000);
+    shared_ptr<CoordinatedCoverageExtensionChooser> chooser = make_shared<CoordinatedCoverageExtensionChooser>(gp.g, provider,
+            pset.coordinated_coverage.max_edge_length_in_repeat, pset.coordinated_coverage.delta);
+    return make_shared<SimpleExtender>(gp, cov_map, chooser, -1ul, pset.loop_removal.mp_max_loops, true, false);
+}
+
 
 inline bool InsertSizeCompare(const shared_ptr<PairedInfoLibrary> lib1,
                               const shared_ptr<PairedInfoLibrary> lib2) {
     return lib1->GetISMax() < lib2->GetISMax();
 }
 
+template<typename Base, typename T>
+inline bool instanceof(const T *ptr) {
+    return dynamic_cast<const Base*>(ptr) != nullptr;
+}
+
+//Used for debug purpose only
+inline void PrintExtenders(vector<shared_ptr<PathExtender> >& extenders) {
+    DEBUG("Extenders in vector:");
+    for(size_t i = 0; i < extenders.size(); ++i) {
+        string type = typeid(*extenders[i]).name();
+        DEBUG("Extender #i" << type);
+        if (instanceof<SimpleExtender>(extenders[i].get())) {
+            auto ec = ((SimpleExtender *) extenders[i].get())->GetExtensionChooser();
+            string chooser_type = typeid(*ec).name();
+            DEBUG("    Extender #i" << chooser_type);
+        }
+        else if (instanceof<ScaffoldingPathExtender>(extenders[i].get())) {
+            auto ec = ((ScaffoldingPathExtender *) extenders[i].get())->GetExtensionChooser();
+            string chooser_type = typeid(*ec).name();
+            DEBUG("    Extender #i" << chooser_type);
+        }
+    }
+}
 
 inline vector<shared_ptr<PathExtender> > MakeAllExtenders(PathExtendStage stage, const conj_graph_pack& gp, const GraphCoverageMap& cov_map,
-                                            const pe_config::ParamSetT& pset, bool use_auto_threshold, const PathContainer& paths_for_mp = PathContainer()) {
+                                            const pe_config::ParamSetT& pset, shared_ptr<ScaffoldingUniqueEdgeStorage> storage, const PathContainer& paths_for_mp = PathContainer()) {
 
     vector<shared_ptr<PathExtender> > result;
     vector<shared_ptr<PathExtender> > pes;
+    vector<shared_ptr<PathExtender> > pes2015;
     vector<shared_ptr<PathExtender> > pe_loops;
     vector<shared_ptr<PathExtender> > pe_scafs;
     vector<shared_ptr<PathExtender> > mps;
@@ -385,37 +502,64 @@ inline vector<shared_ptr<PathExtender> > MakeAllExtenders(PathExtendStage stage,
             if (lib.type() != lt)
                 continue;
 
-            if (IsForSingleReadExtender(lib)) {
+            //TODO: scaff2015 does not need any single read libs?
+            if (IsForSingleReadExtender(lib) && pset.sm != sm_2015) {
                 result.push_back(MakeLongReadsExtender(gp, cov_map, i, pset));
                 ++single_read_libs;
             }
-            if (IsForPEExtender(lib) && stage == PathExtendStage::PEStage) {
-                if(cfg::get().ds.moleculo)
-                    pes.push_back(MakeLongEdgePEExtender(gp, cov_map, i, pset, use_auto_threshold, false));
-                pes.push_back(MakePEExtender(gp, cov_map, i, pset, use_auto_threshold, false));
-            }
-            if (IsForShortLoopExtender(lib)) {
-                pe_loops.push_back(MakePEExtender(gp, cov_map, i, pset, use_auto_threshold, true));
+            if (IsForPEExtender(lib)) {
                 ++pe_libs;
+                if (stage == PathExtendStage::PEStage
+                    && (pset.sm == sm_old_pe_2015 || pset.sm == sm_old || pset.sm == sm_combined)) {
+                    if (cfg::get().ds.meta)
+                        //TODO proper configuration via config
+                        pes.push_back(MakeMetaExtender(gp, cov_map, i, pset, false));
+                    else if (cfg::get().ds.moleculo)
+                        pes.push_back(MakeLongEdgePEExtender(gp, cov_map, i, pset, false));
+                    else
+                        pes.push_back(MakePEExtender(gp, cov_map, i, pset, false));
+                }
+                else if (pset.sm == sm_2015) {
+                    pes2015.push_back(MakeScaffolding2015Extender(gp, cov_map, i, pset, storage));
+                }
+            }
+            if (IsForShortLoopExtender(lib) && (pset.sm == sm_old_pe_2015 || pset.sm == sm_old || pset.sm == sm_combined)) {
+                if (cfg::get().ds.meta) {
+                    pes.push_back(MakeMetaExtender(gp, cov_map, i, pset, true));
+                } else {
+                    pe_loops.push_back(MakePEExtender(gp, cov_map, i, pset, true));
+                }
             }
             if (IsForScaffoldingExtender(lib) && cfg::get().use_scaffolder && pset.scaffolder_options.on) {
-                pe_scafs.push_back(MakeScaffoldingExtender(gp, cov_map, i, pset));
                 ++scf_pe_libs;
+                if (pset.sm == sm_old || pset.sm == sm_combined) {
+                    pe_scafs.push_back(MakeScaffoldingExtender(gp, cov_map, i, pset));
+                }
+                if (pset.sm == sm_old_pe_2015 || pset.sm == sm_combined) {
+                    pe_scafs.push_back(MakeScaffolding2015Extender(gp, cov_map, i, pset, storage));
+                }
             }
             if (IsForMPExtender(lib) && stage == PathExtendStage::MPStage) {
-                mps.push_back(MakeMPExtender(gp, cov_map, paths_for_mp, i, pset));
                 ++mp_libs;
+                if (pset.sm == sm_old || pset.sm == sm_combined) {
+                    mps.push_back(MakeMPExtender(gp, cov_map, paths_for_mp, i, pset));
+                }
+                if (is_2015_scaffolder_enabled(pset.sm)) {
+                    mps.push_back(MakeScaffolding2015Extender(gp, cov_map, i, pset, storage));
+                }
             }
         }
 
         //std::sort(scaff_libs.begin(), scaff_libs.end(), InsertSizeCompare);
         result.insert(result.end(), pes.begin(), pes.end());
+        result.insert(result.end(), pes2015.begin(), pes2015.end());
         result.insert(result.end(), pe_loops.begin(), pe_loops.end());
         result.insert(result.end(), pe_scafs.begin(), pe_scafs.end());
         result.insert(result.end(), mps.begin(), mps.end());
         pes.clear();
         pe_loops.clear();
         pe_scafs.clear();
+        pes2015.clear();
         mps.clear();
     }
 
@@ -424,10 +568,83 @@ inline vector<shared_ptr<PathExtender> > MakeAllExtenders(PathExtendStage stage,
     INFO("Using " << mp_libs << " mate-pair " << LibStr(mp_libs));
     INFO("Using " << single_read_libs << " single read " << LibStr(single_read_libs));
     INFO("Scaffolder is " << (pset.scaffolder_options.on ? "on" : "off"));
+
+    if(pset.use_coordinated_coverage) {
+        INFO("Using additional coordinated coverage extender");
+        result.push_back(MakeCoordCoverageExtender(gp, cov_map, pset));
+    }
+
+    PrintExtenders(result);
     return result;
 }
 
-size_t FindOverlapLenForStage(PathExtendStage stage) {
+inline shared_ptr<scaffold_graph::ScaffoldGraph> ConstructScaffoldGraph(const conj_graph_pack& gp,
+                                                                        shared_ptr<ScaffoldingUniqueEdgeStorage> edge_storage,
+                                                                        const pe_config::ParamSetT::ScaffoldGraphParamsT& params) {
+    using namespace scaffold_graph;
+    vector<shared_ptr<ConnectionCondition>> conditions;
+
+    INFO("Constructing connections");
+    if (params.graph_connectivity) {
+        conditions.push_back(make_shared<AssemblyGraphConnectionCondition>(gp.g, params.max_path_length));
+    }
+    for (size_t lib_index = 0; lib_index < cfg::get().ds.reads.lib_count(); ++lib_index) {
+        auto lib = cfg::get().ds.reads[lib_index];
+        if (lib.is_paired()) {
+            shared_ptr<PairedInfoLibrary> paired_lib;
+            if (IsForMPExtender(lib))
+                paired_lib = MakeNewLib(gp.g, gp.paired_indices, lib_index);
+            else if (IsForPEExtender(lib))
+                paired_lib = MakeNewLib(gp.g, gp.clustered_indices, lib_index);
+            else
+                INFO("Unusable paired lib #" << lib_index);
+            conditions.push_back(make_shared<PairedLibConnectionCondition>(gp.g, paired_lib, lib_index, params.min_read_count));
+        }
+    }
+    INFO("Total conditions " << conditions.size());
+
+    INFO("Constructing scaffold graph");
+    LengthEdgeCondition edge_condition(gp.g, edge_storage->GetMinLength());
+    DefaultScaffoldGraphConstructor constructor(gp.g, edge_storage->GetSet(), conditions, edge_condition);
+    auto scaffoldGraph = constructor.Construct();
+
+    INFO("Scaffold graph contains " << scaffoldGraph->VertexCount() << " vertices and " << scaffoldGraph->EdgeCount() << " edges");
+    return scaffoldGraph;
+}
+
+
+inline void PrintScaffoldGraph(shared_ptr<scaffold_graph::ScaffoldGraph> scaffoldGraph,
+                               const set<EdgeId> main_edge_set,
+                               const string& filename) {
+    using namespace scaffold_graph;
+
+    auto vcolorer = make_shared<ScaffoldVertexSetColorer>(main_edge_set);
+    auto ecolorer = make_shared<ScaffoldEdgeColorer>();
+    CompositeGraphColorer <ScaffoldGraph> colorer(vcolorer, ecolorer);
+
+    INFO("Visualizing single grpah");
+    ScaffoldGraphVisualizer singleVisualizer(*scaffoldGraph, false);
+    std::ofstream single_dot;
+    single_dot.open((filename + "_single.dot").c_str());
+    singleVisualizer.Visualize(single_dot, colorer);
+    single_dot.close();
+
+    INFO("Visualizing paired grpah");
+    ScaffoldGraphVisualizer pairedVisualizer(*scaffoldGraph, true);
+    std::ofstream paired_dot;
+    paired_dot.open((filename + "_paired.dot").c_str());
+    pairedVisualizer.Visualize(paired_dot, colorer);
+    paired_dot.close();
+
+    INFO("Printing scaffold grpah");
+    std::ofstream data_stream;
+    data_stream.open((filename + ".data").c_str());
+    scaffoldGraph->Print(data_stream);
+    data_stream.close();
+}
+
+
+inline size_t FindOverlapLenForStage(PathExtendStage stage) {
     size_t res = 0;
     for (const auto& lib : cfg::get().ds.reads) {
         if (IsForPEExtender(lib) && stage == PathExtendStage::PEStage) {
@@ -454,14 +671,65 @@ inline void ResolveRepeatsPe(conj_graph_pack& gp,
         const std::string& output_dir,
         const std::string& contigs_name,
         bool traversLoops,
-        boost::optional<std::string> broken_contigs,
-        bool use_auto_threshold = true) {
+        boost::optional<std::string> broken_contigs) {
 
     INFO("ExSPAnder repeat resolving tool started");
 
+    auto storage = std::make_shared<ScaffoldingUniqueEdgeStorage>();
+    auto sc_mode = cfg::get().pe_params.param_set.sm;
+
+    if (sc_mode != sm_old) {
+//TODO: Separate function!!
+        //Setting scaffolding2015 parameters
+        auto min_unique_length = cfg::get().pe_params.param_set.scaffolding2015.min_unique_length;
+        auto unique_variaton = cfg::get().pe_params.param_set.scaffolding2015.unique_coverage_variation;
+        if (cfg::get().pe_params.param_set.scaffolding2015.autodetect) {
+            INFO("Autodetecting unique edge set parameters...");
+            bool pe_found = false;
+//TODO constant
+            size_t min_MP_IS = 10000;
+            for (size_t i = 0; i < cfg::get().ds.reads.lib_count(); ++i) {
+
+                if (IsForPEExtender(cfg::get().ds.reads[i])) {
+                    pe_found = true;
+                }
+                if (IsForMPExtender(cfg::get().ds.reads[i])) {
+                    min_MP_IS = min(min_MP_IS, (size_t) cfg::get().ds.reads[i].data().mean_insert_size);
+                }
+            }
+            if (pe_found) {
+//TODO constants;
+                unique_variaton = 0.5;
+                INFO("PE lib found, we believe in coverage");
+            } else {
+                unique_variaton = 50;
+                INFO("No paired libs found, we do not believe in coverage");
+            }
+            min_unique_length = min_MP_IS;
+            INFO("Minimal unique edge length set to the smallest MP library IS: " << min_unique_length);
+
+        } else {
+            INFO("Unique edge set constructed with parameters from config : length " << min_unique_length
+                     << " variation " << unique_variaton);
+        }
+        ScaffoldingUniqueEdgeAnalyzer unique_edge_analyzer(gp, min_unique_length, unique_variaton);
+        unique_edge_analyzer.FillUniqueEdgeStorage(*storage);
+    }
+
+
     make_dir(output_dir);
     make_dir(GetEtcDir(output_dir));
-    const pe_config::ParamSetT& pset = cfg::get().pe_params.param_set;
+    const pe_config::ParamSetT &pset = cfg::get().pe_params.param_set;
+
+    //Scaffold graph
+    shared_ptr<scaffold_graph::ScaffoldGraph> scaffoldGraph;
+    if (cfg::get().pe_params.param_set.scaffold_graph_params.construct) {
+        scaffoldGraph = ConstructScaffoldGraph(gp, storage, cfg::get().pe_params.param_set.scaffold_graph_params);
+        if (cfg::get().pe_params.param_set.scaffold_graph_params.output) {
+            PrintScaffoldGraph(scaffoldGraph, storage->GetSet(), GetEtcDir(output_dir) + "scaffold_graph");
+        }
+    }
+
 
     DefaultContigCorrector<ConjugateDeBruijnGraph> corrector(gp.g);
     DefaultContigConstructor<ConjugateDeBruijnGraph> constructor(gp.g, corrector);
@@ -471,9 +739,14 @@ inline void ResolveRepeatsPe(conj_graph_pack& gp,
     GraphCoverageMap cover_map(gp.g);
     INFO("SUBSTAGE = paired-end libraries")
     PathExtendStage exspander_stage = PathExtendStage::PEStage;
-    vector<shared_ptr<PathExtender> > all_libs = MakeAllExtenders(exspander_stage, gp, cover_map, pset, use_auto_threshold);
-    size_t max_over = max(FindOverlapLenForStage(exspander_stage), gp.g.k() + 100);
-    shared_ptr<CompositeExtender> mainPE = make_shared<CompositeExtender>(gp.g, cover_map, all_libs, max_over);
+    vector<shared_ptr<PathExtender> > all_libs = MakeAllExtenders(exspander_stage, gp, cover_map, pset, storage);
+
+    //Parameters are subject to change
+    size_t max_is_right_quantile = max(FindOverlapLenForStage(exspander_stage), gp.g.k() + 100);
+    size_t min_edge_len = 100;
+
+    shared_ptr<CompositeExtender> mainPE = make_shared<CompositeExtender>(gp.g, cover_map, all_libs,
+                                                                          max_is_right_quantile, storage);
 
 //extend pe + long reads
     PathExtendResolver resolver(gp.g);
@@ -483,7 +756,7 @@ inline void ResolveRepeatsPe(conj_graph_pack& gp,
     INFO("Growing paths using paired-end and long single reads");
     auto paths = resolver.extendSeeds(seeds, *mainPE);
     paths.SortByLength();
-    DebugOutputPaths(gp, output_dir, paths, "pe_overlaped_paths");
+    DebugOutputPaths(gp, output_dir, paths, "pe_before_overlap");
 
     PathContainer clone_paths;
     GraphCoverageMap clone_map(gp.g);
@@ -492,18 +765,19 @@ inline void ResolveRepeatsPe(conj_graph_pack& gp,
     if (mp_exist) {
         ClonePathContainer(paths, clone_paths, clone_map);
     }
-
-    FinalizePaths(paths, cover_map, max_over);
+//We do not run overlap removal in 2015 mode
+    if (!(sc_mode == sm_old_pe_2015 || sc_mode == sm_2015 || sc_mode == sm_combined))
+        FinalizePaths(paths, cover_map, min_edge_len, max_is_right_quantile);
     if (broken_contigs.is_initialized()) {
         OutputBrokenScaffolds(paths, (int) gp.g.k(), writer,
                               output_dir + (mp_exist ? "pe_contigs" : broken_contigs.get()));
     }
-    writer.OutputPaths(paths, GetEtcDir(output_dir) + "pe_before_traversal");
-    DebugOutputPaths(gp, output_dir, paths, "before_traverse_pe");
+    DebugOutputPaths(gp, output_dir, paths, "pe_before_traverse");
     if (traversLoops) {
         TraverseLoops(paths, cover_map, mainPE);
+        FinalizePaths(paths, cover_map, min_edge_len, max_is_right_quantile);
     }
-    DebugOutputPaths(gp, output_dir, paths, (mp_exist ? "final_pe_paths" : "final_paths"));
+    DebugOutputPaths(gp, output_dir, paths, (mp_exist ? "pe_final_paths" : "final_paths"));
     writer.OutputPaths(paths, output_dir + (mp_exist ? "pe_scaffolds" : contigs_name));
 
     cover_map.Clear();
@@ -513,49 +787,55 @@ inline void ResolveRepeatsPe(conj_graph_pack& gp,
     }
 
 //MP
-    DebugOutputPaths(gp, output_dir, clone_paths, "before_mp_paths");
+    DebugOutputPaths(gp, output_dir, clone_paths, "mp_before_extend");
 
     INFO("SUBSTAGE = mate-pair libraries ")
     exspander_stage = PathExtendStage::MPStage;
     all_libs.clear();
-    all_libs = MakeAllExtenders(exspander_stage, gp, clone_map, pset, use_auto_threshold, clone_paths);
-    max_over = FindOverlapLenForStage(exspander_stage);
-    shared_ptr<CompositeExtender> mp_main_pe = make_shared<CompositeExtender>(gp.g, clone_map, all_libs, max_over);
+    all_libs = MakeAllExtenders(exspander_stage, gp, clone_map, pset, storage, clone_paths);
+    max_is_right_quantile = FindOverlapLenForStage(exspander_stage);
+    shared_ptr<CompositeExtender> mp_main_pe = make_shared<CompositeExtender>(gp.g, clone_map, all_libs,
+                                                                              max_is_right_quantile, storage);
 
     INFO("Growing paths using mate-pairs");
     auto mp_paths = resolver.extendSeeds(clone_paths, *mp_main_pe);
-    FinalizePaths(mp_paths, clone_map, max_over, true);
+    if (!is_2015_scaffolder_enabled(pset.sm)) {
+        DebugOutputPaths(gp, output_dir, mp_paths, "mp_before_overlap");
+        FinalizePaths(mp_paths, clone_map, max_is_right_quantile, max_is_right_quantile, true);
+    }
     DebugOutputPaths(gp, output_dir, mp_paths, "mp_final_paths");
-    writer.OutputPaths(mp_paths, GetEtcDir(output_dir) + "mp_prefinal");
-
     DEBUG("Paths are grown with mate-pairs");
-    if (cfg::get().pe_params.debug_output) {
-        writer.OutputPaths(mp_paths, output_dir + "mp_paths");
-    }
+
 //MP end
 
 //pe again
     INFO("SUBSTAGE = polishing paths")
     exspander_stage = PathExtendStage::FinalizingPEStage;
     all_libs.clear();
-    all_libs = MakeAllExtenders(exspander_stage, gp, cover_map, pset, use_auto_threshold);
-    max_over = FindOverlapLenForStage(exspander_stage);
-    shared_ptr<CompositeExtender> last_extender = make_shared<CompositeExtender>(gp.g, clone_map, all_libs, max_over);
+    all_libs = MakeAllExtenders(exspander_stage, gp, cover_map, pset, storage);
+    max_is_right_quantile = FindOverlapLenForStage(exspander_stage);
+    shared_ptr<CompositeExtender> last_extender = make_shared<CompositeExtender>(gp.g, clone_map, all_libs,
+                                                                                 max_is_right_quantile, storage);
 
     auto last_paths = resolver.extendSeeds(mp_paths, *last_extender);
-    FinalizePaths(last_paths, clone_map, max_over);
+    DebugOutputPaths(gp, output_dir, last_paths, "mp2_before_overlap");
+    if (!is_2015_scaffolder_enabled(pset.sm)) {
+        FinalizePaths(last_paths, clone_map, min_edge_len, max_is_right_quantile);
+        DebugOutputPaths(gp, output_dir, last_paths, "mp2_before_traverse");
+    }
 
-    writer.OutputPaths(last_paths, GetEtcDir(output_dir) + "mp_before_traversal");
-    DebugOutputPaths(gp, output_dir, last_paths, "before_traverse_mp");
     TraverseLoops(last_paths, clone_map, last_extender);
+    FinalizePaths(last_paths, clone_map, min_edge_len, max_is_right_quantile);
 
 //result
     if (broken_contigs.is_initialized()) {
         OutputBrokenScaffolds(last_paths, (int) gp.g.k(), writer, output_dir + broken_contigs.get());
     }
-    DebugOutputPaths(gp, output_dir, last_paths, "last_paths");
+    DebugOutputPaths(gp, output_dir, last_paths, "mp2_final_paths");
     writer.OutputPaths(last_paths, output_dir + contigs_name);
 
+    //FinalizeUniquenessPaths();
+
     last_paths.DeleteAllPaths();
     seeds.DeleteAllPaths();
     mp_paths.DeleteAllPaths();
diff --git a/src/debruijn/path_extend/path_extender.hpp b/src/debruijn/path_extend/path_extender.hpp
index f49d5bd..7f40516 100644
--- a/src/debruijn/path_extend/path_extender.hpp
+++ b/src/debruijn/path_extend/path_extender.hpp
@@ -13,11 +13,14 @@
 
 #pragma once
 
-#include "pe_utils.hpp"
+
 #include "extension_chooser.hpp"
 #include "path_filter.hpp"
+#include "overlap_analysis.hpp"
+#include "scaffolder2015/scaff_supplementary.hpp"
 #include <cmath>
 
+
 namespace path_extend {
 
 class ShortLoopResolver {
@@ -27,7 +30,7 @@ public:
 
     virtual ~ShortLoopResolver() { }
 
-    virtual void ResolveShortLoop(BidirectionalPath& path) = 0;
+    virtual void ResolveShortLoop(BidirectionalPath& path) const = 0;
 
 protected:
     DECL_LOGGER("PathExtender")
@@ -48,7 +51,7 @@ protected:
         }
     }
 
-    void MakeCycleStep(BidirectionalPath& path, EdgeId e) {
+    void MakeCycleStep(BidirectionalPath& path, EdgeId e) const {
         if (path.Size() == 0) {
             return;
         }
@@ -64,7 +67,8 @@ public:
             : ShortLoopResolver(gp.g), gp_(gp) {
 
     }
-    virtual void ResolveShortLoop(BidirectionalPath& path) {
+
+    void ResolveShortLoop(BidirectionalPath& path) const override {
         DEBUG("resolve short loop by coverage");
         path.Print();
 
@@ -119,7 +123,7 @@ class SimpleLoopResolver : public ShortLoopResolver {
 public:
     SimpleLoopResolver(Graph& g) : ShortLoopResolver(g) { }
 
-    virtual void ResolveShortLoop(BidirectionalPath& path) {
+    void ResolveShortLoop(BidirectionalPath& path) const override {
         pair<EdgeId, EdgeId> edges;
         if (path.Size() >= 1 && GetLoopAndExit(g_, path.Back(), edges)) {
             DEBUG("Resolving short loop...");
@@ -136,30 +140,29 @@ protected:
 };
 
 class LoopResolver : public ShortLoopResolver {
-    static const size_t iter_ = 10;
-    ExtensionChooser& chooser_;
+    static const size_t ITER_COUNT = 10;
+    const WeightCounter& wc_;
 
 public:
-    LoopResolver(const Graph& g, ExtensionChooser& chooser)
+    LoopResolver(const Graph& g, const WeightCounter& wc)
             : ShortLoopResolver(g),
-              chooser_(chooser) { }
+              wc_(wc) { }
 
-    void MakeBestChoice(BidirectionalPath& path, pair<EdgeId, EdgeId>& edges) {
+    void MakeBestChoice(BidirectionalPath& path, pair<EdgeId, EdgeId>& edges) const {
         UndoCycles(path, edges.first);
-        chooser_.ClearExcludedEdges();
         BidirectionalPath experiment(path);
-        double maxWeight = chooser_.CountWeight(experiment, edges.second);
-        double diff = maxWeight - chooser_.CountWeight(experiment, edges.first);
+        double max_weight = wc_.CountWeight(experiment, edges.second);
+        double diff = max_weight - wc_.CountWeight(experiment, edges.first);
         size_t maxIter = 0;
-        for (size_t i = 1; i <= iter_; ++i) {
-            double weight = chooser_.CountWeight(experiment, edges.first);
+        for (size_t i = 1; i <= ITER_COUNT; ++i) {
+            double weight = wc_.CountWeight(experiment, edges.first);
             if (weight > 0) {
                 MakeCycleStep(experiment, edges.first);
-                weight = chooser_.CountWeight(experiment, edges.second);
-                double weight2 = chooser_.CountWeight(experiment, edges.first);
-                if (weight > maxWeight || (weight == maxWeight && weight - weight2 > diff)
-                        || (weight == maxWeight && weight - weight2 == diff && i == 1)) {
-                    maxWeight = weight;
+                weight = wc_.CountWeight(experiment, edges.second);
+                double weight2 = wc_.CountWeight(experiment, edges.first);
+                if (weight > max_weight || (weight == max_weight && weight - weight2 > diff)
+                        || (weight == max_weight && weight - weight2 == diff && i == 1)) {
+                    max_weight = weight;
                     maxIter = i;
                     diff = weight - weight2;
                 }
@@ -171,7 +174,7 @@ public:
         path.PushBack(edges.second);
     }
 
-    virtual void ResolveShortLoop(BidirectionalPath& path) {
+    void ResolveShortLoop(BidirectionalPath& path) const override {
         pair<EdgeId, EdgeId> edges;
         if (path.Size() >=1 && GetLoopAndExit(g_, path.Back(), edges)) {
             DEBUG("Resolving short loop...");
@@ -181,7 +184,6 @@ public:
     }
 };
 
-
 class GapJoiner {
 
 public:
@@ -189,13 +191,11 @@ public:
     GapJoiner(const Graph& g)
             : g_(g) { }
 
-    virtual int FixGap(EdgeId sink, EdgeId source, int initial_gap) const = 0;
+    virtual Gap FixGap( EdgeId source, EdgeId sink, int initial_gap) const = 0;
 
     virtual ~GapJoiner() { }
 protected:
     const Graph& g_;
-    DECL_LOGGER("PathExtender")
-
 };
 
 class SimpleGapJoiner : public GapJoiner {
@@ -203,31 +203,140 @@ class SimpleGapJoiner : public GapJoiner {
 public:
     SimpleGapJoiner(const Graph& g) : GapJoiner(g) { }
 
-    virtual int FixGap(EdgeId sink, EdgeId source, int initial_gap) const {
+    Gap FixGap(EdgeId source, EdgeId sink, int initial_gap) const override {
         if (initial_gap > 2 * (int) g_.k()) {
-            return initial_gap;
+            return Gap(initial_gap);
         }
         for (int l = (int) g_.k(); l > 0; --l) {
-            if (g_.EdgeNucls(sink).Subseq(g_.length(sink) + g_.k() - l) == g_.EdgeNucls(source).Subseq(0, l)) {
+            if (g_.EdgeNucls(sink).Subseq(g_.length(source) + g_.k() - l) == g_.EdgeNucls(sink).Subseq(0, l)) {
                 DEBUG("Found correct gap length");
                 DEBUG("Inintial: " << initial_gap << ", new gap: " << g_.k() - l);
-                return (int) g_.k() - l;
+                return Gap((int) g_.k() - l);
             }
         }
         DEBUG("Perfect overlap is not found, inintial: " << initial_gap);
-        return initial_gap;
+        return Gap(initial_gap);
     }
 };
 
 class HammingGapJoiner: public GapJoiner {
+    const double min_gap_score_;
+    const size_t short_overlap_threshold_;
+    const size_t basic_overlap_length_;
+
+    vector<size_t> DiffPos(const Sequence& s1, const Sequence& s2) const {
+        VERIFY(s1.size() == s2.size());
+        vector < size_t > answer;
+        for (size_t i = 0; i < s1.size(); ++i)
+            if (s1[i] != s2[i])
+                answer.push_back(i);
+        return answer;
+    }
+
+    size_t HammingDistance(const Sequence& s1, const Sequence& s2) const {
+        VERIFY(s1.size() == s2.size());
+        size_t dist = 0;
+        for (size_t i = 0; i < s1.size(); ++i) {
+            if (s1[i] != s2[i]) {
+                dist++;
+            }
+        }
+        return dist;
+    }
+
+//    double ScoreGap(const Sequence& s1, const Sequence& s2, int gap, int initial_gap) const {
+//        VERIFY(s1.size() == s2.size());
+//        return 1.0 - (double) HammingDistance(s1, s2) / (double) s1.size()
+//                - (double) abs(gap - initial_gap) / (double) (2 * g_.k());
+//    }
+
+
+    double ScoreGap(const Sequence& s1, const Sequence& s2) const {
+        VERIFY(s1.size() == s2.size());
+        return 1.0 - (double) HammingDistance(s1, s2) / (double) s1.size();
+    }
+
+public:
+
+    //todo review parameters in usages
+    HammingGapJoiner(const Graph& g,
+            double min_gap_score,
+            size_t short_overlap_threshold,
+            size_t basic_overlap_length):
+                GapJoiner(g),
+                min_gap_score_(min_gap_score),
+                short_overlap_threshold_(short_overlap_threshold),
+                basic_overlap_length_(basic_overlap_length)
+    {
+        DEBUG("HammingGapJoiner params: \n min_gap_score " << min_gap_score_ <<
+              "\n short_overlap_threshold " << short_overlap_threshold_ <<
+              "\n basic_overlap_length " << basic_overlap_length_);
+    }
+
+    //estimated_gap is in k-mers
+    Gap FixGap(EdgeId source, EdgeId sink, int estimated_gap) const override {
+
+        size_t corrected_start_overlap = basic_overlap_length_;
+        if (estimated_gap < 0) {
+            corrected_start_overlap -= estimated_gap;
+        }
+
+        corrected_start_overlap = min(corrected_start_overlap,
+                                      g_.k() + min(g_.length(source), g_.length(sink)));
+
+        DEBUG("Corrected max overlap " << corrected_start_overlap);
+
+        double best_score = min_gap_score_;
+        int fixed_gap = INVALID_GAP;
+
+        double overlap_coeff = 0.3;
+        size_t min_overlap = 1ul;
+        if (estimated_gap < 0) {
+            size_t estimated_overlap = g_.k() - estimated_gap;
+            min_overlap = max(size_t(math::round(overlap_coeff * double(estimated_overlap))), 1ul);
+        }
+        //todo better usage of estimated overlap
+        DEBUG("Min overlap " << min_overlap);
+
+        for (size_t l = corrected_start_overlap; l >= min_overlap; --l) {
+            //TRACE("Sink: " << g_.EdgeNucls(sink).Subseq(g_.length(sink) + g_.k() - l).str());
+            //TRACE("Source: " << g_.EdgeNucls(source).Subseq(0, l));
+            double score = 0;
+            score = ScoreGap(g_.EdgeNucls(source).Subseq(g_.length(source) + g_.k() - l),
+                                    g_.EdgeNucls(sink).Subseq(0, l));
+            if (math::gr(score, best_score)) {
+                TRACE("Curr overlap " << l);
+                TRACE("Score: " << score);
+                best_score = score;
+                fixed_gap = int(g_.k() - l);
+            }
+
+            if (l == short_overlap_threshold_ && fixed_gap != INVALID_GAP) {
+                //look at "short" overlaps only if long overlaps couldn't be found
+                DEBUG("Not looking at short overlaps");
+                break;
+            }
+        }
+
+        if (fixed_gap != INVALID_GAP) {
+            DEBUG("Found candidate gap length with score " << best_score);
+            DEBUG("Estimated gap: " << estimated_gap <<
+                  ", fixed gap: " << fixed_gap << " (overlap " << g_.k() - fixed_gap<< ")");
+        }
+        return Gap(fixed_gap);
+    }
+
+private:
+    DECL_LOGGER("HammingGapJoiner");
+};
+
+//deprecated!
+//fixme reduce code duplication with HammingGapJoiner
+class LikelihoodHammingGapJoiner: public GapJoiner {
     static const size_t DEFAULT_PADDING_LENGTH = 10;
     const double min_gap_score_;
-    const int must_overlap_threshold_;
-    const size_t may_overlap_threshold_;
     const size_t short_overlap_threshold_;
     const size_t basic_overlap_length_;
-    const size_t artificial_gap_;
-    const bool use_old_score_;
 
     vector<size_t> DiffPos(const Sequence& s1, const Sequence& s2) const {
         VERIFY(s1.size() == s2.size());
@@ -267,48 +376,25 @@ class HammingGapJoiner: public GapJoiner {
         return 2.*double(n) + double(n - mismatches) * log_match_prob + double(mismatches) * log_mismatch_prob;
     }
 
-    double OldScoreGap(const Sequence& s1, const Sequence& s2) const {
-        VERIFY(s1.size() == s2.size());
-        return 1.0 - (double) HammingDistance(s1, s2) / (double) s1.size();
-    }
-
 public:
 
     //todo review parameters in usages
-    HammingGapJoiner(const Graph& g,
+    LikelihoodHammingGapJoiner(const Graph& g,
             double min_gap_score,
-            int must_overlap_threshold,
-            size_t may_overlap_threshold,
             size_t short_overlap_threshold,
-            size_t basic_overlap_length,
-            size_t artificial_gap = DEFAULT_PADDING_LENGTH,
-            bool use_old_score = false):
+            size_t basic_overlap_length):
                 GapJoiner(g),
                 min_gap_score_(min_gap_score),
-                must_overlap_threshold_(must_overlap_threshold),
-                may_overlap_threshold_(may_overlap_threshold),
                 short_overlap_threshold_(short_overlap_threshold),
-                basic_overlap_length_(basic_overlap_length),
-                artificial_gap_(artificial_gap),
-                use_old_score_(use_old_score)
+                basic_overlap_length_(basic_overlap_length)
     {
-        DEBUG("HammingGapJoiner params: \n min_gap_score " << min_gap_score_ <<
-              "\n must_overlap_threshold " << must_overlap_threshold_ <<
-              "\n may_overlap_threshold " << may_overlap_threshold_ <<
+        DEBUG("LikelihoodHammingGapJoiner params: \n min_gap_score " << min_gap_score_ <<
               "\n short_overlap_threshold " << short_overlap_threshold_ <<
-              "\n basic_overlap_length " << basic_overlap_length_ <<
-              "\n artificial_gap " << artificial_gap_);
+              "\n basic_overlap_length " << basic_overlap_length_);
     }
 
     //estimated_gap is in k-mers
-    virtual int FixGap(EdgeId sink, EdgeId source, int estimated_gap) const {
-        DEBUG("Trying to fix estimated gap " << estimated_gap <<
-              " between " << g_.str(sink) << " and " << g_.str(source));
-
-        if (estimated_gap > int(g_.k() + may_overlap_threshold_)) {
-            DEBUG("Edges are supposed to be too far to check overlaps");
-            return estimated_gap;
-        }
+    Gap FixGap(EdgeId source, EdgeId sink, int estimated_gap) const override {
 
         size_t corrected_start_overlap = basic_overlap_length_;
         if (estimated_gap < 0) {
@@ -316,7 +402,7 @@ public:
         }
 
         corrected_start_overlap = min(corrected_start_overlap,
-                                      g_.k() + min(g_.length(sink), g_.length(source)));
+                                      g_.k() + min(g_.length(source), g_.length(sink)));
 
         DEBUG("Corrected max overlap " << corrected_start_overlap);
 
@@ -336,12 +422,8 @@ public:
             //TRACE("Sink: " << g_.EdgeNucls(sink).Subseq(g_.length(sink) + g_.k() - l).str());
             //TRACE("Source: " << g_.EdgeNucls(source).Subseq(0, l));
             double score = 0;
-            if(use_old_score_)
-                score = OldScoreGap(g_.EdgeNucls(sink).Subseq(g_.length(sink) + g_.k() - l),
-                                    g_.EdgeNucls(source).Subseq(0, l));
-            else
-                score = ScoreGap(g_.EdgeNucls(sink).Subseq(g_.length(sink) + g_.k() - l),
-                                    g_.EdgeNucls(source).Subseq(0, l));
+            score = ScoreGap(g_.EdgeNucls(source).Subseq(g_.length(source) + g_.k() - l),
+                                    g_.EdgeNucls(sink).Subseq(0, l));
             if (math::gr(score, best_score)) {
                 TRACE("Curr overlap " << l);
                 TRACE("Score: " << score);
@@ -360,24 +442,163 @@ public:
             DEBUG("Found candidate gap length with score " << best_score);
             DEBUG("Estimated gap: " << estimated_gap <<
                   ", fixed gap: " << fixed_gap << " (overlap " << g_.k() - fixed_gap<< ")");
-        } else {
-            //couldn't find decent overlap
-            if (estimated_gap < must_overlap_threshold_) {
-                DEBUG("Estimated gap looks unreliable");
-            } else {
-                DEBUG("Overlap was not found");
-                fixed_gap = max(estimated_gap, int(g_.k() + artificial_gap_));
+        }
+        return Gap(fixed_gap);
+    }
+
+private:
+    DECL_LOGGER("LikelihoodHammingGapJoiner");
+};
+
+//if I was in LA
+class LAGapJoiner: public GapJoiner {
+public:
+    LAGapJoiner(const Graph& g, size_t min_la_length,
+            double flank_multiplication_coefficient,
+            double flank_addition_coefficient) :
+            GapJoiner(g), min_la_length_(min_la_length), flank_addition_coefficient_(
+                    flank_addition_coefficient), flank_multiplication_coefficient_(
+                    flank_multiplication_coefficient) {
+        DEBUG("flank_multiplication_coefficient - " << flank_multiplication_coefficient_); DEBUG("flank_addition_coefficient_  - " << flank_addition_coefficient_ );
+    }
+
+    Gap FixGap(EdgeId source, EdgeId sink, int initial_gap) const override {
+
+        DEBUG("Overlap doesn't exceed " << size_t(abs(initial_gap) * ESTIMATED_GAP_MULTIPLIER) + GAP_ADDITIONAL_COEFFICIENT);
+        SWOverlapAnalyzer overlap_analyzer(
+                size_t(abs(initial_gap) * ESTIMATED_GAP_MULTIPLIER) + GAP_ADDITIONAL_COEFFICIENT);
+
+        auto overlap_info = overlap_analyzer.AnalyzeOverlap(g_, source,
+                sink);
+
+        DEBUG(overlap_info);
+
+        if (overlap_info.size() < min_la_length_) {
+            DEBUG("Low alignment size");
+            return Gap(INVALID_GAP);
+        }
+
+        size_t max_flank_length = max(overlap_info.r2.start_pos,
+                g_.length(source) + g_.k() - overlap_info.r1.end_pos);
+        DEBUG("Max flank length - " << max_flank_length);
+
+        if ((double) max_flank_length * flank_multiplication_coefficient_
+                + flank_addition_coefficient_ > overlap_info.size()) {
+            DEBUG("Too long flanks for such alignment");
+            return Gap(INVALID_GAP);
+        }
+
+        if (overlap_info.identity() < IDENTITY_RATIO) {
+            DEBUG("Low identity score");
+            return Gap(INVALID_GAP);
+        }
+
+        if ((g_.length(source) + g_.k())  - overlap_info.r1.end_pos > g_.length(source)) {
+            DEBUG("Save kmers. Don't want to have edges shorter than k");
+            return Gap(INVALID_GAP);
+        }
+
+        if (overlap_info.r2.start_pos > g_.length(sink)) {
+            DEBUG("Save kmers. Don't want to have edges shorter than k");
+            return Gap(INVALID_GAP);
+        }
+
+        return Gap(
+                (int) (-overlap_info.r1.size() - overlap_info.r2.start_pos
+                        + g_.k()),
+                (uint32_t) (g_.length(source) + g_.k()
+                        - overlap_info.r1.end_pos),
+                (uint32_t) overlap_info.r2.start_pos);
+    }
+
+private:
+    DECL_LOGGER("LAGapJoiner");
+    const size_t min_la_length_;
+    const double flank_addition_coefficient_;
+    const double flank_multiplication_coefficient_;
+    constexpr static double IDENTITY_RATIO = 0.9;
+    constexpr static double ESTIMATED_GAP_MULTIPLIER = 2.0;
+    const size_t GAP_ADDITIONAL_COEFFICIENT = 30;
+};
+
+
+class CompositeGapJoiner: public GapJoiner {
+public:
+
+    CompositeGapJoiner(const Graph& g, 
+                       const vector<shared_ptr<GapJoiner>>& joiners, 
+                       size_t may_overlap_threhold, 
+                       int must_overlap_threhold, 
+                       size_t artificail_gap) :
+            GapJoiner(g), 
+            joiners_(joiners), 
+            may_overlap_threshold_(may_overlap_threhold), 
+            must_overlap_threshold_(must_overlap_threhold), 
+            artificial_gap_(artificail_gap)
+            {  }
+
+    Gap FixGap(EdgeId source, EdgeId sink, int estimated_gap) const override {
+        DEBUG("Trying to fix estimated gap " << estimated_gap <<
+                " between " << g_.str(source) << " and " << g_.str(sink));
+
+        if (estimated_gap > int(g_.k() + may_overlap_threshold_)) {
+            DEBUG("Edges are supposed to be too far to check overlaps");
+            return Gap(estimated_gap);
+        }
+
+        for (auto joiner : joiners_) {
+            Gap gap = joiner->FixGap(source, sink, estimated_gap);
+            if (gap.gap_ != GapJoiner::INVALID_GAP) {
+                return gap;
             }
         }
-        DEBUG("Final fixed gap " << fixed_gap);
-        return fixed_gap;
+
+        //couldn't find decent overlap
+        if (estimated_gap < must_overlap_threshold_) {
+            DEBUG("Estimated gap looks unreliable");
+            return Gap(INVALID_GAP);
+        } else {
+            DEBUG("Overlap was not found");
+            return Gap(max(estimated_gap, int(g_.k() + artificial_gap_)));
+        }
     }
 
 private:
-    DECL_LOGGER("HammingGapJoiner");
+    vector<shared_ptr<GapJoiner>> joiners_;
+    const size_t may_overlap_threshold_;
+    const int must_overlap_threshold_;
+    const size_t artificial_gap_;
+
+    DECL_LOGGER("CompositeGapJoiner");
 };
 
+//FIXME move to tests
+//Just for test. Look at overlap_analysis_tests
+inline Gap MimicLAGapJoiner(Sequence& s1, Sequence& s2) {
+    const int INVALID_GAP = -1000000;
+    constexpr static double IDENTITY_RATIO = 0.9;
+
+    SWOverlapAnalyzer overlap_analyzer_(10000);
+    auto overlap_info = overlap_analyzer_.AnalyzeOverlap(s1, s2);
+    size_t min_la_length_ = 4;
+    if (overlap_info.size() < min_la_length_) {
+        DEBUG("Low alignment size");
+        return Gap(INVALID_GAP);
+    }
+    if (overlap_info.identity() < IDENTITY_RATIO) {
+        DEBUG("Low identity score");
+        return Gap(INVALID_GAP);
+    }
+    std::cout << overlap_info;
+
+    return Gap(
+            (int) (-overlap_info.r1.size() - overlap_info.r2.start_pos),
+            (uint32_t) (s1.size() - overlap_info.r1.end_pos),
+            (uint32_t) overlap_info.r2.start_pos);
+}
+
 
+//Detects a cycle as a minsuffix > IS present earlier in the path. Overlap is allowed.
 class InsertSizeLoopDetector {
 protected:
     const Graph& g_;
@@ -392,6 +613,16 @@ public:
         return min_cycle_len_;
     }
 
+    bool CheckCycledNonIS(const BidirectionalPath& path) const {
+        if (path.Size() <= 2) {
+            return false;
+        }
+        BidirectionalPath last = path.SubPath(path.Size() - 2);
+        int pos = path.FindFirst(last);
+        VERIFY(pos >= 0);
+        return size_t(pos) != path.Size() - 2;
+    }
+
     bool CheckCycled(const BidirectionalPath& path) const {
         return FindCycleStart(path) != -1;
     }
@@ -406,16 +637,21 @@ public:
     int FindCycleStart(const BidirectionalPath& path) const {
         TRACE("Looking for IS cycle " << min_cycle_len_);
         int i = FindPosIS(path);
-        DEBUG("last is pos " << i);
+        TRACE("last is pos " << i);
         if (i < 0) return -1;
 //Tail
         BidirectionalPath last = path.SubPath(i);
-        last.Print();
-        int pos = path.SubPath(0, i).FindFirst(last);
-        DEBUG("looking for 1sr IS cycle " << pos);
+        //last.Print();
+
+        int pos = path.FindFirst(last);
+// not cycle
+        if (pos == i) pos = -1;
+        TRACE("looking for 1sr IS cycle " << pos);
         return pos;
     }
 
+//After cycle detected, removes min suffix > IS.
+//returns the beginning of the cycle.
     int RemoveCycle(BidirectionalPath& path) const {
         int pos = FindCycleStart(path);
         DEBUG("Found IS cycle " << pos);
@@ -423,23 +659,17 @@ public:
             return -1;
         }
 
-        size_t skip_identical_edges = 0;
-        LoopDetector loop_detect(&path, cov_map_);
-        if (loop_detect.IsCycled(2, skip_identical_edges)) {
-            return -1;
-        } else {
-            int last_edge_pos = FindPosIS(path);
-            VERIFY(last_edge_pos > -1);
-            DEBUG("last edge pos " << last_edge_pos);
-            VERIFY(last_edge_pos > pos);
-            for (int i = (int) path.Size() - 1; i >= last_edge_pos; --i) {
-                path.PopBack();
-            }
-            VERIFY((int) path.Size() == last_edge_pos);
-            VERIFY(pos < (int) path.Size());
-            DEBUG("result pos " <<pos);
-            return pos;
+        int last_edge_pos = FindPosIS(path);
+        VERIFY(last_edge_pos > -1);
+        DEBUG("last edge pos " << last_edge_pos);
+        VERIFY(last_edge_pos > pos);
+        for (int i = (int) path.Size() - 1; i >= last_edge_pos; --i) {
+            path.PopBack();
         }
+        VERIFY((int) path.Size() == last_edge_pos);
+        VERIFY(pos < (int) path.Size());
+        DEBUG("result pos " <<pos);
+        return pos;
     }
 };
 
@@ -541,13 +771,32 @@ protected:
     DECL_LOGGER("PathExtender")
 };
 
+struct UsedUniqueStorage {
+    set<EdgeId> used_;
+
+    shared_ptr<ScaffoldingUniqueEdgeStorage> unique_;
+    void insert(EdgeId e) {
+        if (unique_->IsUnique(e)) {
+            used_.insert(e);
+            used_.insert(e->conjugate());
+        }
+    }
+    bool IsUsedAndUnique (EdgeId e) {
+        return (unique_->IsUnique(e) && used_.find(e) != used_.end());
+    }
+    UsedUniqueStorage(  shared_ptr<ScaffoldingUniqueEdgeStorage> unique ):used_(), unique_(unique) {}
+};
 class PathExtender {
 public:
     PathExtender(const Graph & g): g_(g){ }
     virtual ~PathExtender() { }
     virtual bool MakeGrowStep(BidirectionalPath& path) = 0;
+    void AddUniqueEdgeStorage(shared_ptr<UsedUniqueStorage> used_storage) {
+        used_storage_ = used_storage;
+    }
 protected:
     const Graph& g_;
+    shared_ptr<UsedUniqueStorage> used_storage_;
     DECL_LOGGER("PathExtender")
 };
 
@@ -561,17 +810,22 @@ public:
               max_diff_len_(max_diff_len) {
     }
 
-    CompositeExtender(Graph & g, GraphCoverageMap& cov_map, vector<shared_ptr<PathExtender> > pes, size_t max_diff_len)
+    CompositeExtender(Graph & g, GraphCoverageMap& cov_map, vector<shared_ptr<PathExtender> > pes, size_t max_diff_len, shared_ptr<ScaffoldingUniqueEdgeStorage> unique)
             : ContigsMaker(g),
               cover_map_(cov_map),
               repeat_detector_(g, cover_map_, 2 * cfg::get().max_repeat_length),  //TODO: move to config
               extenders_(),
               max_diff_len_(max_diff_len) {
         extenders_ = pes;
+        used_storage_ = make_shared<UsedUniqueStorage>(UsedUniqueStorage( unique));
+        for (auto ex: extenders_) {
+            ex->AddUniqueEdgeStorage(used_storage_);
+        }
     }
 
-    void AddExender(shared_ptr<PathExtender> pe) {
+    void AddExtender(shared_ptr<PathExtender> pe) {
         extenders_.push_back(pe);
+        pe->AddUniqueEdgeStorage(used_storage_);
     }
 
     virtual void GrowAll(PathContainer& paths, PathContainer * result) {
@@ -592,7 +846,11 @@ public:
 
     bool MakeGrowStep(BidirectionalPath& path, bool detect_repeats_online = true) {
         DEBUG("make grow step composite extender");
-
+        auto sc_mode = cfg::get().pe_params.param_set.sm;
+        if (is_2015_scaffolder_enabled(sc_mode)) {
+            DEBUG("force switch off online repeats detect, 2015 on");
+            detect_repeats_online = false;
+        }
         if (detect_repeats_online) {
             BidirectionalPath *repeat_path = repeat_detector_.RepeatPath(path);
             size_t repeat_size = repeat_detector_.MaxCommonSize(path, *repeat_path);
@@ -654,7 +912,7 @@ private:
     RepeatDetector repeat_detector_;
     vector<shared_ptr<PathExtender> > extenders_;
     size_t max_diff_len_;
-
+    shared_ptr<UsedUniqueStorage> used_storage_;
     void SubscribeCoverageMap(BidirectionalPath * path) {
         path->Subscribe(&cover_map_);
         for (size_t i = 0; i < path->Size(); ++i) {
@@ -669,6 +927,24 @@ private:
             if (paths.size() > 10 && i % (paths.size() / 10 + 1) == 0) {
                 INFO("Processed " << i << " paths from " << paths.size() << " (" << i * 100 / paths.size() << "%)");
             }
+//In 2015 modes do not use a seed already used in paths.
+            auto sc_mode = cfg::get().pe_params.param_set.sm;
+            if (sc_mode == sm_old_pe_2015 || sc_mode == sm_2015 || sc_mode == sm_combined) {
+                bool was_used = false;
+                for (size_t ind =0; ind < paths.Get(i)->Size(); ind++) {
+                    EdgeId eid = paths.Get(i)->At(ind);
+                    if (used_storage_->IsUsedAndUnique(eid)) {
+                        was_used = true; break;
+                    } else {
+                        used_storage_->insert(eid);
+                    }
+                }
+                if (was_used) {
+                    DEBUG("skipping already used seed");
+                    continue;
+                }
+            }
+//TODO: coverage_map should be exterminated
             if (!cover_map_.IsCovered(*paths.Get(i))) {
                 usedPaths.AddPair(paths.Get(i), paths.GetConjugate(i));
                 BidirectionalPath * path = new BidirectionalPath(*paths.Get(i));
@@ -702,7 +978,8 @@ protected:
     bool investigateShortLoops_;
     bool use_short_loop_cov_resolver_;
     CovShortLoopResolver cov_loop_resolver_;
-    vector<pair<shared_ptr<BidirectionalPath>, shared_ptr<BidirectionalPath> > > visited_cycles_;
+
+    vector<shared_ptr<BidirectionalPath> > visited_cycles_;
     InsertSizeLoopDetector is_detector_;
     const GraphCoverageMap& cov_map_;
 
@@ -736,21 +1013,17 @@ public:
             this->maxLoops_ = maxLoops;
         }
     }
-
+//seems that it is outofdate
     bool InExistingLoop(const BidirectionalPath& path) {
         TRACE("Checking existing loops");
         int j = 0;
-        for (auto cycle_pair : visited_cycles_) {
-            shared_ptr<BidirectionalPath> cycle = cycle_pair.first;
-            shared_ptr<BidirectionalPath> cycle_path = cycle_pair.second;
-            VERIFY(!cycle->Empty());
-            VERIFY(!cycle_path->Empty());
+        for (auto cycle : visited_cycles_) {
             VERBOSE_POWER2(j++, "checking ");
-            int pos = path.FindLast(*cycle_path);
+            int pos = path.FindLast(*cycle);
             if (pos == -1)
                 continue;
 
-            int start_cycle_pos = pos + (int) cycle_path->Size();
+            int start_cycle_pos = pos + (int) cycle->Size();
             bool only_cycles_in_tail = true;
             int last_cycle_pos = start_cycle_pos;
             DEBUG("start_cycle pos "<< last_cycle_pos);
@@ -766,19 +1039,20 @@ public:
             DEBUG("last_cycle_pos " << last_cycle_pos);
             only_cycles_in_tail = only_cycles_in_tail && cycle->CompareFrom(0, path.SubPath(last_cycle_pos));
             if (only_cycles_in_tail) {
+// seems that most of this is useless, checking
+                VERIFY (last_cycle_pos == start_cycle_pos);
                 DEBUG("find cycle " << last_cycle_pos);
                 DEBUG("path");
                 path.Print();
                 DEBUG("last subpath");
                 path.SubPath(last_cycle_pos).Print();
-                DEBUG("cycle path");
-                cycle_path->Print();
                 DEBUG("cycle");
                 cycle->Print();
                 DEBUG("last_cycle_pos " << last_cycle_pos << " path size " << path.Size());
                 VERIFY(last_cycle_pos <= (int)path.Size());
                 DEBUG("last cycle pos + cycle " << last_cycle_pos + (int)cycle->Size());
                 VERIFY(last_cycle_pos + (int)cycle->Size() >= (int)path.Size());
+
                 return true;
             }
         }
@@ -790,13 +1064,7 @@ public:
             DEBUG("Wrong position in IS cycle");
             return;
         }
-        int i = (int) pos;
-        while (i >= 0 && path.LengthAt(i) < is_detector_.GetMinCycleLenth()) {
-            --i;
-        }
-        if (i < 0)
-            i = 0;
-        visited_cycles_.push_back(make_pair(std::make_shared<BidirectionalPath>(path.SubPath(pos)), std::make_shared<BidirectionalPath>(path.SubPath(i))));
+        visited_cycles_.push_back(std::make_shared<BidirectionalPath>(path.SubPath(pos)));
         DEBUG("add cycle");
         path.SubPath(pos).Print();
     }
@@ -812,30 +1080,20 @@ public:
                 return true;
             }
         }
-        size_t skip_identical_edges = 0;
-        LoopDetector loop_detect(&path, cov_map_);
-        if (loop_detect.IsCycled(maxLoops_, skip_identical_edges)) {
-            size_t loop_size = loop_detect.LoopEdges(skip_identical_edges, 1);
-            DEBUG("Path is Cycled! skip identival edges = " << skip_identical_edges);
-            path.Print();
-            loop_detect.RemoveLoop(skip_identical_edges, false);
-            DEBUG("After delete");
-            path.Print();
-
-            VERIFY(path.Size() >= loop_size);
-            AddCycledEdges(path, path.Size() - loop_size);
-            return true;
-        }
         return false;
     }
 
+    bool DetectCycleScaffolding(BidirectionalPath& path) {
+          return is_detector_.CheckCycledNonIS(path);
+    }
+
     virtual bool MakeSimpleGrowStep(BidirectionalPath& path) = 0;
 
     virtual bool ResolveShortLoopByCov(BidirectionalPath& path) = 0;
 
     virtual bool ResolveShortLoopByPI(BidirectionalPath& path) = 0;
 
-    virtual bool CanInvistigateShortLoop() const {
+    virtual bool CanInvestigateShortLoop() const {
         return false;
     }
 
@@ -883,8 +1141,10 @@ private:
     }
 
     bool InvestigateShortLoop() {
-        return investigateShortLoops_ && (use_short_loop_cov_resolver_ || CanInvistigateShortLoop());
+        return investigateShortLoops_ && (use_short_loop_cov_resolver_ || CanInvestigateShortLoop());
     }
+protected:
+    DECL_LOGGER("LoopDetectingPathExtender")
 };
 
 class SimpleExtender: public LoopDetectingPathExtender {
@@ -892,7 +1152,6 @@ class SimpleExtender: public LoopDetectingPathExtender {
 protected:
 
     shared_ptr<ExtensionChooser> extensionChooser_;
-    LoopResolver loopResolver_;
 
     void FindFollowingEdges(BidirectionalPath& path, ExtensionChooser::EdgeContainer * result) {
         DEBUG("Looking for the following edges")
@@ -911,13 +1170,17 @@ protected:
 
 public:
 
-    SimpleExtender(const conj_graph_pack& gp, const GraphCoverageMap& cov_map, shared_ptr<ExtensionChooser> ec, size_t is, size_t max_loops, bool investigate_short_loops, bool use_short_loop_cov_resolver):
+    SimpleExtender(const conj_graph_pack& gp, const GraphCoverageMap& cov_map, shared_ptr<ExtensionChooser> ec, 
+                    size_t is, size_t max_loops, bool investigate_short_loops, bool use_short_loop_cov_resolver):
         LoopDetectingPathExtender(gp, cov_map, max_loops, investigate_short_loops, use_short_loop_cov_resolver, is),
-        extensionChooser_(ec),
-        loopResolver_(gp.g, *extensionChooser_) {
+        extensionChooser_(ec) {
     }
 
-    virtual bool MakeSimpleGrowStep(BidirectionalPath& path) {
+    std::shared_ptr<ExtensionChooser> GetExtensionChooser() const {
+        return extensionChooser_;
+    }
+
+    bool MakeSimpleGrowStep(BidirectionalPath& path) override {
         if (path.Size() == 0) {
             return false;
         }
@@ -930,7 +1193,7 @@ public:
         if (candidates.size() == 1) {
             LoopDetector loop_detector(&path, cov_map_);
             if (!investigateShortLoops_ && (loop_detector.EdgeInShortLoop(path.Back()) or loop_detector.EdgeInShortLoop(candidates.back().e_))
-                    && extensionChooser_->WeighConterBased()) {
+                    && extensionChooser_->WeightCounterBased()) {
                 return false;
             }
         }
@@ -943,11 +1206,22 @@ public:
             DEBUG("loop detecor");
             if (!investigateShortLoops_ &&
                     (loop_detector.EdgeInShortLoop(path.Back())  or loop_detector.EdgeInShortLoop(candidates.back().e_))
-                    && extensionChooser_->WeighConterBased()) {
+                    && extensionChooser_->WeightCounterBased()) {
                 return false;
             }
             DEBUG("push");
-            path.PushBack(candidates.back().e_, candidates.back().d_);
+            auto sc_mode = cfg::get().pe_params.param_set.sm;
+            EdgeId eid = candidates.back().e_;
+//In 2015 modes when trying to use already used unique edge, it is not added and path growing stops.
+//That allows us to avoid overlap removal hacks used earlier.
+            if (is_2015_scaffolder_enabled(sc_mode)) {
+                if (used_storage_->IsUsedAndUnique(eid)) {
+                    return false;
+                } else {
+                    used_storage_->insert(eid);
+                }
+            }
+            path.PushBack(eid, candidates.back().d_);
             DEBUG("push done");
             return true;
         }
@@ -955,11 +1229,11 @@ public:
     }
 
 
-    virtual bool CanInvistigateShortLoop() const {
-        return extensionChooser_->WeighConterBased();
+    bool CanInvestigateShortLoop() const override {
+        return extensionChooser_->WeightCounterBased();
     }
 
-    virtual bool ResolveShortLoopByCov(BidirectionalPath& path) {
+    bool ResolveShortLoopByCov(BidirectionalPath& path) override {
         LoopDetector loop_detector(&path, cov_map_);
         size_t init_len = path.Length();
         bool result = false;
@@ -975,24 +1249,28 @@ public:
         return true;
     }
 
-    virtual bool ResolveShortLoopByPI(BidirectionalPath& path) {
-            if (extensionChooser_->WeighConterBased()) {
-                LoopDetector loop_detector(&path, cov_map_);
-                size_t init_len = path.Length();
-                bool result = false;
-                while (path.Size() >= 1 && loop_detector.EdgeInShortLoop(path.Back())) {
-                    loopResolver_.ResolveShortLoop(path);
-                    if (init_len == path.Length()) {
-                        return result;
-                    } else {
-                        result = true;
-                    }
-                    init_len = path.Length();
+    bool ResolveShortLoopByPI(BidirectionalPath& path) override {
+        if (extensionChooser_->WeightCounterBased()) {
+            LoopResolver loop_resolver(g_, extensionChooser_->wc());
+            LoopDetector loop_detector(&path, cov_map_);
+            size_t init_len = path.Length();
+            bool result = false;
+            while (path.Size() >= 1 && loop_detector.EdgeInShortLoop(path.Back())) {
+                loop_resolver.ResolveShortLoop(path);
+                if (init_len == path.Length()) {
+                    return result;
+                } else {
+                    result = true;
                 }
-                return true;
+                init_len = path.Length();
             }
-            return false;
+            return true;
         }
+        return false;
+    }
+
+protected:
+    DECL_LOGGER("SimpleExtender")
 
 };
 
@@ -1001,6 +1279,9 @@ class ScaffoldingPathExtender: public LoopDetectingPathExtender {
     ExtensionChooser::EdgeContainer sources_;
     std::shared_ptr<GapJoiner> gap_joiner_;
 
+//When check_sink_ set to false we can scaffold not only tips
+    bool check_sink_;
+
     void InitSources() {
         sources_.clear();
 
@@ -1019,35 +1300,71 @@ class ScaffoldingPathExtender: public LoopDetectingPathExtender {
 public:
 
     ScaffoldingPathExtender(const conj_graph_pack& gp, const GraphCoverageMap& cov_map, std::shared_ptr<ExtensionChooser> extension_chooser,
-                            std::shared_ptr<GapJoiner> gap_joiner, size_t is, size_t max_loops, bool investigateShortLoops):
+                            std::shared_ptr<GapJoiner> gap_joiner, size_t is, size_t max_loops, bool investigateShortLoops, bool check_sink = true):
         LoopDetectingPathExtender(gp, cov_map, max_loops, investigateShortLoops, false, is),
             extension_chooser_(extension_chooser),
-            gap_joiner_(gap_joiner)
+            gap_joiner_(gap_joiner),check_sink_(check_sink)
     {
         InitSources();
     }
 
     virtual bool MakeSimpleGrowStep(BidirectionalPath& path) {
-        if (path.Size() < 1 || !IsSink(path.Back())) {
+        if (path.Size() < 1 || (check_sink_ && !IsSink(path.Back())) ) {
             return false;
         }
-        DEBUG("scaffolding");
+        DEBUG("scaffolding:");
+        DEBUG("Simple grow step, growing path");
+        path.Print();
         ExtensionChooser::EdgeContainer candidates = extension_chooser_->Filter(path, sources_);
         DEBUG("scaffolding candidates " << candidates.size() << " from sources " << sources_.size());
+
         if (candidates.size() == 1) {
             if (candidates[0].e_ == path.Back() || (cfg::get().avoid_rc_connections && candidates[0].e_ == g_.conjugate(path.Back()))) {
                 return false;
             }
-            int gap = cfg::get().pe_params.param_set.scaffolder_options.fix_gaps ?
-                            gap_joiner_->FixGap(path.Back(), candidates.back().e_, candidates.back().d_) : candidates.back().d_;
+            BidirectionalPath temp_path(path);
+            temp_path.PushBack(candidates[0].e_);
+            if(this->DetectCycleScaffolding(temp_path)) {
+                return false;
+            }
 
-            if (gap != GapJoiner::INVALID_GAP) {
-                DEBUG("Scaffolding. PathId: " << path.GetId() << " path length: " << path.Length() << ", fixed gap length: " << gap);
-                path.PushBack(candidates.back().e_, gap);
+            auto sc_mode = cfg::get().pe_params.param_set.sm;
+            EdgeId eid = candidates.back().e_;
+            if(cfg::get().pe_params.param_set.scaffolder_options.fix_gaps && check_sink_) {
+                Gap gap = gap_joiner_->FixGap(path.Back(), candidates.back().e_, candidates.back().d_);
+                if (gap.gap_ != GapJoiner::INVALID_GAP) {
+                    DEBUG("Scaffolding. PathId: " << path.GetId() << " path length: " << path.Length() <<
+                                                                                         ", fixed gap length: " << gap.gap_ << ", trash length: " << gap.trash_previous_ << "-" <<
+                                                                                         gap.trash_current_);
+
+                    if (is_2015_scaffolder_enabled(sc_mode)) {
+                        if (used_storage_->IsUsedAndUnique(eid)) {
+                            return false;
+                        } else {
+                            used_storage_->insert(eid);
+                        }
+                    }
+                    path.PushBack(eid, gap);
+                    return true;
+                }
+                else {
+                    DEBUG("Looks like wrong scaffolding. PathId: " << path.GetId() << " path length: " <<
+                                                                                      path.Length() << ", fixed gap length: " << candidates.back().d_);
+                    return false;
+                }
+            }
+            else {
+                DEBUG("Gap joiners off");
+                DEBUG("Scaffolding. PathId: " << path.GetId() << " path length: " << path.Length() << ", fixed gap length: " << candidates.back().d_ );
+                if (is_2015_scaffolder_enabled(sc_mode)) {
+                    if (used_storage_->IsUsedAndUnique(eid)) {
+                        return false;
+                    } else {
+                        used_storage_->insert(eid);
+                    }
+                }
+                path.PushBack(candidates.back().e_, candidates.back().d_);
                 return true;
-            } else {
-                DEBUG("Looks like wrong scaffolding. PathId: " << path.GetId() << " path length: " << path.Length() << ", fixed gap length: " << candidates.back().d_);
-                return false;
             }
         }
         DEBUG("scaffolding end");
@@ -1062,6 +1379,10 @@ public:
 		return false;
 	}
 
+    std::shared_ptr<ExtensionChooser> GetExtensionChooser() const {
+        return extension_chooser_;
+    }
+
 private:
 	DECL_LOGGER("ScaffoldingPathExtender");
 };
diff --git a/src/debruijn/path_extend/pe_config_struct.cpp b/src/debruijn/path_extend/pe_config_struct.cpp
index 8c437f9..cd332ac 100644
--- a/src/debruijn/path_extend/pe_config_struct.cpp
+++ b/src/debruijn/path_extend/pe_config_struct.cpp
@@ -11,101 +11,140 @@
 namespace path_extend {
 
 void load(output_broken_scaffolds& obs, boost::property_tree::ptree const& pt, std::string const& key, bool complete) {
-
   if (complete || pt.find(key) != pt.not_found()) {
     std::string ep = pt.get<std::string>(key);
     obs = pe_config::output_broken_scaffolds_id(ep);
   }
+}
+
+void load(scaffolding_mode &sm, boost::property_tree::ptree const& pt, std::string const& key, bool complete) {
+    if (complete || pt.find(key) != pt.not_found()) {
+        std::string ep = pt.get<std::string>(key);
+        sm = pe_config::scaffolding_mode_id(ep);
+    }
+}
 
+void load(pe_config::ParamSetT::ScaffoldGraphParamsT& sg, boost::property_tree::ptree const& pt, bool /*complete*/) {
+    using config_common::load;
+    load(sg.construct,          pt, "construct"         );
+    load(sg.output,             pt, "output"            );
+    load(sg.min_read_count,     pt, "min_read_count"    );
+    load(sg.graph_connectivity, pt, "graph_connectivity");
+    load(sg.max_path_length,    pt, "max_path_length"   );
 }
 
-void load(pe_config::OutputParamsT& o, boost::property_tree::ptree const& pt, bool /*complete*/) {
+void load(pe_config::OutputParamsT& o, boost::property_tree::ptree const& pt, bool complete) {
   using config_common::load;
 
-  load(o.write_overlaped_paths,   pt, "write_overlaped_paths" );
-  load(o.write_paths,             pt, "write_paths"           );
+  load(o.write_overlaped_paths,   pt, "write_overlaped_paths" , complete);
+  load(o.write_paths,             pt, "write_paths"           , complete);
 }
 
-void load(pe_config::VisualizeParamsT& o, boost::property_tree::ptree const& pt, bool /*complete*/) {
+void load(pe_config::VisualizeParamsT& o, boost::property_tree::ptree const& pt, bool complete) {
   using config_common::load;
-  load(o.print_overlaped_paths,   pt, "print_overlaped_paths" );
-  load(o.print_paths,             pt, "print_paths"           );
+  load(o.print_overlaped_paths,   pt, "print_overlaped_paths" , complete);
+  load(o.print_paths,             pt, "print_paths"           , complete);
 }
 
 void load(pe_config::ParamSetT::ExtensionOptionsT& es,
-          boost::property_tree::ptree const& pt, bool ) {
+          boost::property_tree::ptree const& pt, bool complete) {
     using config_common::load;
-    load(es.recalculate_threshold, pt, "recalculate_threshold");
-    load(es.priority_coeff, pt, "priority_coeff");
-    load(es.weight_threshold, pt, "weight_threshold");
-    load(es.single_threshold, pt, "single_threshold");
+    load(es.use_default_single_threshold, pt, "use_default_single_threshold", complete);
+    load(es.priority_coeff, pt, "priority_coeff", complete);
+    load(es.weight_threshold, pt, "weight_threshold", complete);
+    load(es.single_threshold, pt, "single_threshold", complete);
 }
 
 void load(pe_config::ParamSetT::LoopRemovalT& lr,
-          boost::property_tree::ptree const& pt, bool /*complete*/) {
+          boost::property_tree::ptree const& pt, bool complete) {
     using config_common::load;
-    load(lr.max_loops, pt, "max_loops");
-    load(lr.mp_max_loops, pt, "mp_max_loops");
+    load(lr.max_loops, pt, "max_loops", complete);
+    load(lr.mp_max_loops, pt, "mp_max_loops", complete);
 }
 
-void load(pe_config::ParamSetT::ScaffolderOptionsT& so, boost::property_tree::ptree const& pt, bool /*complete*/)
+void load(pe_config::ParamSetT::CoordinatedCoverageT& coord_cov,
+          boost::property_tree::ptree const& pt, bool complete) {
+    using config_common::load;
+    load(coord_cov.max_edge_length_in_repeat, pt, "max_edge_length_repeat", complete);
+    load(coord_cov.delta, pt, "delta", complete);
+}
+
+void load(pe_config::ParamSetT::ScaffolderOptionsT& so, 
+            boost::property_tree::ptree const& pt, bool complete)
 {
   using config_common::load;
-  load(so.on      , pt, "on"      );
-  load(so.cutoff      , pt, "cutoff"      );
-  load(so.rel_cutoff      , pt, "rel_cutoff"      );
-  load(so.sum_threshold      , pt, "sum_threshold"      );
-
-  load(so.cluster_info      , pt, "cluster_info"      );
-  load(so.cl_threshold      , pt, "cl_threshold"      );
-
-  load(so.fix_gaps      , pt, "fix_gaps"      );
-  load(so.min_gap_score      , pt, "min_gap_score"      );
-  load(so.max_must_overlap      , pt, "max_must_overlap"      );
-  load(so.max_can_overlap      , pt, "max_can_overlap"      );
-  load(so.short_overlap      , pt, "short_overlap"      );
-  load(so.artificial_gap      , pt, "artificial_gap"      );
-  load(so.use_old_score      , pt, "use_old_score"      );
+  load(so.on      , pt, "on"      , complete);
+  load(so.cutoff      , pt, "cutoff", complete);
+  load(so.rel_cutoff      , pt, "rel_cutoff", complete);
+  load(so.sum_threshold      , pt, "sum_threshold", complete);
+
+  load(so.cluster_info      , pt, "cluster_info", complete);
+  load(so.cl_threshold      , pt, "cl_threshold", complete);
+
+  load(so.fix_gaps      , pt, "fix_gaps", complete);
+  load(so.use_la_gap_joiner      , pt, "use_la_gap_joiner", complete);
+  load(so.min_gap_score      , pt, "min_gap_score", complete);
+  load(so.max_must_overlap      , pt, "max_must_overlap", complete);
+  load(so.max_can_overlap      , pt, "max_can_overlap", complete);
+  load(so.short_overlap      , pt, "short_overlap", complete);
+  load(so.artificial_gap      , pt, "artificial_gap", complete);
+  load(so.use_old_score      , pt, "use_old_score", complete);
+  load(so.min_overlap_length, pt, "min_overlap_length", complete);
+  load(so.flank_addition_coefficient, pt, "flank_addition_coefficient", complete);
+  load(so.flank_multiplication_coefficient, pt, "flank_multiplication_coefficient", complete);
 }
 
-void load(pe_config::ParamSetT& p, boost::property_tree::ptree const& pt, bool /*complete*/) {
-
-  using config_common::load;
-  load(p.normalize_weight, pt,  "normalize_weight");
-  load(p.cut_all_overlaps, pt, "cut_all_overlaps");
-  load(p.split_edge_length, pt, "split_edge_length");
-  load(p.extension_options, pt, "extension_options");
-  load(p.mate_pair_options, pt, "mate_pair_options");
-  load(p.scaffolder_options, pt, "scaffolder");
-    load(p.loop_removal, pt, "loop_removal");
-    load(p.remove_overlaps, pt, "remove_overlaps");
+void load(pe_config::ParamSetT& p, boost::property_tree::ptree const& pt, bool complete) {
+    using config_common::load;
+    load(p.sm, pt, "scaffolding_mode", complete);
+    load(p.normalize_weight, pt,  "normalize_weight", complete);
+    load(p.cut_all_overlaps, pt, "cut_all_overlaps", complete);
+    load(p.split_edge_length, pt, "split_edge_length", complete);
+    load(p.extension_options, pt, "extension_options", complete);
+    load(p.mate_pair_options, pt, "mate_pair_options", complete);
+    load(p.scaffolder_options, pt, "scaffolder", complete);
+    load(p.loop_removal, pt, "loop_removal", complete);
+    load(p.coordinated_coverage, pt, "coordinated_coverage", complete);
+    load(p.remove_overlaps, pt, "remove_overlaps", complete);
+    load(p.use_coordinated_coverage, pt, "use_coordinated_coverage", complete);
+    load(p.scaffolding2015, pt, "scaffolding2015", complete);
+    load(p.scaffold_graph_params, pt, "scaffold_graph", complete);
 }
 
+
 void load(pe_config::LongReads& p, boost::property_tree::ptree const& pt,
+          bool complete) {
+    using config_common::load;
+    load(p.filtering, pt, "filtering", complete);
+    load(p.weight_priority, pt, "weight_priority", complete);
+    load(p.unique_edge_priority, pt, "unique_edge_priority", complete);
+}
+
+void load(pe_config::ParamSetT::Scaffolding2015& p, boost::property_tree::ptree const& pt,
           bool) {
     using config_common::load;
-    load(p.filtering, pt, "filtering");
-    load(p.weight_priority, pt, "weight_priority");
-    load(p.unique_edge_priority, pt, "unique_edge_priority");
+    load(p.autodetect, pt, "autodetect");
+    load(p.min_unique_length, pt, "min_unique_length");
+    load(p.unique_coverage_variation, pt, "unique_coverage_variation");
 }
 
 void load(pe_config::AllLongReads& p, boost::property_tree::ptree const& pt,
-          bool) {
+          bool complete) {
     using config_common::load;
-    load(p.pacbio_reads, pt, "pacbio_reads");
-    load(p.single_reads, pt, "single_reads");
-    load(p.contigs, pt, "coverage_base_rr");
+    load(p.pacbio_reads, pt, "pacbio_reads", complete);
+    load(p.single_reads, pt, "single_reads", complete);
+    load(p.contigs, pt, "coverage_base_rr", complete);
 }
 
 void load(pe_config::MainPEParamsT& p, boost::property_tree::ptree const& pt,
-          bool /*complete*/) {
+          bool complete) {
     using config_common::load;
-    load(p.debug_output, pt, "debug_output");
-    load(p.output, pt, "output");
-    load(p.viz, pt, "visualize");
-    load(p.param_set, pt, p.name.c_str());
-    load(p.obs, pt, "output_broken_scaffolds");
-    load(p.long_reads, pt, "long_reads");
+    load(p.debug_output, pt, "debug_output", complete);
+    load(p.output, pt, "output", complete);
+    load(p.viz, pt, "visualize", complete);
+    load(p.obs, pt, "output_broken_scaffolds", complete);
+    load(p.param_set, pt, "params", complete);
+    load(p.long_reads, pt, "long_reads", complete);
     if (!p.debug_output) {
         p.output.DisableAll();
         p.viz.DisableAll();
@@ -113,14 +152,13 @@ void load(pe_config::MainPEParamsT& p, boost::property_tree::ptree const& pt,
     p.etc_dir = "path_extend";
 }
 
-
-// main long contigs config load function
-void load(pe_config& pe_cfg, boost::property_tree::ptree const& pt, bool /*complete*/) {
-  using config_common::load;
-
-  load(pe_cfg.dataset_name           , pt, "dataset"               );
-  load(pe_cfg.params                 , pt, "pe_params"             );
-}
+//// main long contigs config load function
+//void load(pe_config& pe_cfg, boost::property_tree::ptree const& pt, bool complete) {
+//  using config_common::load;
+//
+//  load(pe_cfg.dataset_name           , pt, "dataset", complete);
+//  load(pe_cfg.params                 , pt, "pe_params", complete);
+//}
 
 };
 
diff --git a/src/debruijn/path_extend/pe_config_struct.hpp b/src/debruijn/path_extend/pe_config_struct.hpp
index 985a6ee..286e0bf 100644
--- a/src/debruijn/path_extend/pe_config_struct.hpp
+++ b/src/debruijn/path_extend/pe_config_struct.hpp
@@ -27,14 +27,23 @@
 
 namespace path_extend {
 
-const char * const pe_cfg_filename = "./config/debruijn/path_extend/lc_config.info";
-
 enum output_broken_scaffolds {
     obs_none,
     obs_break_gaps,
     obs_break_all
 };
 
+enum scaffolding_mode {
+    sm_old,
+    sm_2015,
+    sm_combined,
+    sm_old_pe_2015
+};
+
+inline bool is_2015_scaffolder_enabled(const scaffolding_mode mode) {
+    return (mode != sm_old);
+}
+
 // struct for path extend subproject's configuration file
 struct pe_config {
 
@@ -58,7 +67,7 @@ struct pe_config {
   static const std::string& output_broken_scaffolds_name(output_broken_scaffolds obs) {
     auto it = output_broken_scaffolds_info().right.find(obs);
     VERIFY_MSG(it != output_broken_scaffolds_info().right.end(),
-               "No name for working stage id = " << obs);
+               "No name for output broken scaffolds mode id = " << obs);
 
     return it->second;
   }
@@ -66,7 +75,41 @@ struct pe_config {
   static output_broken_scaffolds output_broken_scaffolds_id(std::string name) {
     auto it = output_broken_scaffolds_info().left.find(name);
     VERIFY_MSG(it != output_broken_scaffolds_info().left.end(),
-               "There is no working stage with name = " << name);
+               "There is no output broken scaffolds mode with name = " << name);
+
+    return it->second;
+  }
+
+  typedef boost::bimap<std::string, scaffolding_mode> scaffolding_mode_id_mapping;
+
+  static const scaffolding_mode_id_mapping FillSMInfo() {
+      scaffolding_mode_id_mapping::value_type info[] = {
+              scaffolding_mode_id_mapping::value_type("old", sm_old),
+              scaffolding_mode_id_mapping::value_type("2015", sm_2015),
+              scaffolding_mode_id_mapping::value_type("combined", sm_combined),
+              scaffolding_mode_id_mapping::value_type("old_pe_2015", sm_old_pe_2015)
+    };
+
+    return scaffolding_mode_id_mapping(info, utils::array_end(info));
+  }
+
+  static const scaffolding_mode_id_mapping& scaffolding_mode_info() {
+    static scaffolding_mode_id_mapping scaffolding_mode_info = FillSMInfo();
+    return scaffolding_mode_info;
+  }
+
+  static const std::string& scaffolding_mode_name(scaffolding_mode sm) {
+    auto it = scaffolding_mode_info().right.find(sm);
+    VERIFY_MSG(it != scaffolding_mode_info().right.end(),
+               "No name for scaffolding mode id = " << sm);
+
+    return it->second;
+  }
+
+  static scaffolding_mode scaffolding_mode_id(std::string name) {
+    auto it = scaffolding_mode_info().left.find(name);
+    VERIFY_MSG(it != scaffolding_mode_info().left.end(),
+               "There is no scaffolding mode with name = " << name);
 
     return it->second;
   }
@@ -81,6 +124,8 @@ struct pe_config {
     }
   };
 
+
+
   struct VisualizeParamsT {
     bool print_overlaped_paths;
     bool print_paths;
@@ -92,12 +137,14 @@ struct pe_config {
   };
 
   struct ParamSetT {
+    scaffolding_mode sm;
+
     bool normalize_weight;
     size_t split_edge_length;
     bool cut_all_overlaps;
 
     struct ExtensionOptionsT {
-        bool recalculate_threshold;
+        bool use_default_single_threshold;
         double single_threshold;
         double weight_threshold;
         double priority_coeff;
@@ -116,6 +163,7 @@ struct pe_config {
       double cl_threshold;
 
       bool fix_gaps;
+      bool use_la_gap_joiner;
       double min_gap_score;
       double max_must_overlap;
       double max_can_overlap;
@@ -123,6 +171,10 @@ struct pe_config {
       size_t artificial_gap;
 
       bool use_old_score;
+
+      size_t min_overlap_length;
+      double flank_addition_coefficient;
+      double flank_multiplication_coefficient;
     } scaffolder_options;
 
 
@@ -132,6 +184,24 @@ struct pe_config {
     } loop_removal;
 
     bool remove_overlaps;
+    bool use_coordinated_coverage;
+
+    struct CoordinatedCoverageT {
+      size_t max_edge_length_in_repeat;
+      double delta;
+    } coordinated_coverage;
+      struct Scaffolding2015 {
+          bool autodetect;
+          size_t min_unique_length;
+          double unique_coverage_variation;
+      } scaffolding2015;
+      struct ScaffoldGraphParamsT {
+          bool construct;
+          bool output;
+          size_t min_read_count;
+          bool graph_connectivity;
+          size_t max_path_length;
+      } scaffold_graph_params;
   };
 
   struct LongReads {
@@ -146,10 +216,11 @@ struct pe_config {
       LongReads contigs;
   };
 
+
   struct MainPEParamsT {
-    std::string name;
     output_broken_scaffolds obs;
 
+    bool finalize_paths;
     bool debug_output;
     std::string etc_dir;
 
@@ -157,20 +228,16 @@ struct pe_config {
     VisualizeParamsT viz;
     ParamSetT param_set;
     AllLongReads long_reads;
-  } params;
-
-  std::string dataset_name;
-
+  }; // params;
 
 };
 
-
-
-void load(pe_config::MainPEParamsT& p, boost::property_tree::ptree const& pt, bool complete);
-void load(pe_config& pe_cfg, boost::property_tree::ptree const& pt, bool complete);
+void load(pe_config::ParamSetT& p, boost::property_tree::ptree const& pt, bool complete = true);
+void load(pe_config::MainPEParamsT& p, boost::property_tree::ptree const& pt, bool complete = true);
+//void load(pe_config& pe_cfg, boost::property_tree::ptree const& pt, bool complete);
 
 }
 
-typedef config_common::config<path_extend::pe_config> pe_cfg;
+//typedef config_common::config<path_extend::pe_config> pe_cfg;
 
 #endif /* CONFIG_STRUCT_HPP_ */
diff --git a/src/debruijn/path_extend/pe_io.hpp b/src/debruijn/path_extend/pe_io.hpp
index 8beb30d..01f1a91 100644
--- a/src/debruijn/path_extend/pe_io.hpp
+++ b/src/debruijn/path_extend/pe_io.hpp
@@ -19,7 +19,7 @@
 #include "bidirectional_path.hpp"
 #include "contig_output.hpp"
 #include "io/osequencestream.hpp"
-
+#include "genome_consistance_checker.hpp"
 namespace path_extend {
 
 using namespace debruijn_graph;
@@ -29,39 +29,52 @@ protected:
     DECL_LOGGER("PathExtendIO")
 
 protected:
-    const Graph& g_;
+	const Graph& g_;
     ContigConstructor<Graph> &constructor_;
     size_t k_;
     map<EdgeId, ExtendedContigIdT> ids_;
 
-    string ToString(const BidirectionalPath& path) const {
-        stringstream ss;
-        if (path.IsInterstrandBulge() && path.Size() == 1) {
-            ss << constructor_.construct(path.Back()).first.substr(k_, g_.length(path.Back()) - k_);
-            return ss.str();
-        }
+    //TODO: add constructor
+	string ToString(const BidirectionalPath& path) const {
+		stringstream ss;
+		if (path.IsInterstrandBulge() && path.Size() == 1) {
+		    ss << constructor_.construct(path.Back()).first.substr(k_, g_.length(path.Back()) - k_);
+		    return ss.str();
+		}
 
-        if (!path.Empty()) {
+		if (!path.Empty()) {
             ss << constructor_.construct(path[0]).first.substr(0, k_);
-        }
-
-        for (size_t i = 0; i < path.Size(); ++i) {
-            int gap = i == 0 ? 0 : path.GapAt(i);
-            if (gap > (int) k_) {
-                for (size_t j = 0; j < gap - k_; ++j) {
-                    ss << "N";
-                }
+		}
+
+		for (size_t i = 0; i < path.Size(); ++i) {
+			int gap = i == 0 ? 0 : path.GapAt(i);
+			if (gap > (int) k_) {
+				for (size_t j = 0; j < gap - k_; ++j) {
+					ss << "N";
+				}
                 ss << constructor_.construct(path[i]).first;
-            } else {
-                int overlapLen = (int) k_ - gap;
-                if (overlapLen >= (int) g_.length(path[i]) + (int) k_) {
-                    continue;
-                }
-                ss << constructor_.construct(path[i]).first.substr((size_t) overlapLen);
-            }
-        }
-        return ss.str();
-    }
+			} else {
+				int overlapLen = (int) k_ - gap;
+				if (overlapLen >= (int) g_.length(path[i]) + (int) k_) {
+				    if(overlapLen > (int) g_.length(path[i]) + (int) k_) {
+	                    WARN("Such scaffolding logic leads to local misassemblies");
+				    }
+					continue;
+				}
+				auto temp_str = g_.EdgeNucls(path[i]).Subseq(overlapLen).str();
+				if(i != path.Size() - 1) {
+	                for(size_t j = 0 ; j < path.TrashPreviousAt(i + 1); ++j) {
+	                    temp_str.pop_back();
+	                    if(temp_str.size() == 0) {
+	                        break;
+	                    }
+	                }
+				}
+				ss << temp_str;
+			}
+		}
+		return ss.str();
+	}
 
     string ToFASTGString(const BidirectionalPath& path) const {
         if (path.Empty())
@@ -111,21 +124,21 @@ public:
         int i = 1;
         oss << paths.size() << endl;
         for (auto iter = paths.begin(); iter != paths.end(); ++iter) {
-            //oss << i << endl;
-            i++;
+			//oss << i << endl;
+			i++;
             BidirectionalPath* path = iter.get();
             if (path->GetId() % 2 != 0) {
                 path = path->GetConjPath();
             }
             oss << "PATH " << path->GetId() << " " << path->Size() << " " << path->Length() + k_ << endl;
             for (size_t j = 0; j < path->Size(); ++j) {
-                oss << g_.int_id(path->At(j)) << " " << g_.length(path->At(j)) <<  " " << path->GapAt(j) << endl;
+			    oss << g_.int_id(path->At(j)) << " " << g_.length(path->At(j)) <<  " " << path->GapAt(j) <<  " " << path->TrashPreviousAt(j) <<  " " << path->TrashCurrentAt(j) << endl;
             }
             //oss << endl;
-        }
-        oss.close();
-        DEBUG("Edges written");
-    }
+		}
+		oss.close();
+		DEBUG("Edges written");
+	}
 
     void LoadPaths(PathContainer &paths, GraphCoverageMap &cover_map, const string &filename) const {
         paths.clear();
@@ -155,9 +168,13 @@ public:
                 size_t eid;
                 size_t elen;
                 int gap;
-                iss >> eid >> elen >> gap;
+                uint32_t trash_prev;
+                uint32_t trash_current;
+
+                iss >> eid >> elen >> gap >> trash_prev >> trash_current;
+                Gap gap_struct(gap, trash_prev, trash_current);
                 EdgeId edge = int_ids[eid];
-                conjugatePath->PushBack(edge, gap);
+                conjugatePath->PushBack(edge, gap_struct);
                 VERIFY(g_.length(edge) == elen);
             }
             VERIFY(path->Length() + k_ == len);
@@ -184,7 +201,7 @@ public:
         	DEBUG("NODE " << ++i);
             BidirectionalPath* path = iter.get();
             path->Print();
-            oss.setID((int) path->GetId());
+        	oss.setID((int) path->GetId());
             oss.setCoverage(path->Coverage());
             string path_string = ToString(*path);
 
@@ -202,6 +219,22 @@ public:
         DEBUG("Contigs written");
     }
 
+
+    //TODO: DimaA insert somewhere
+    /*
+            auto map_res = genome_checker.CountMisassemblies(*path);
+            if (map_res.misassemblies > 0) {
+                INFO ("there are "<< map_res.misassemblies<<  " misassemblies in path: ");
+                path->PrintInfo();
+                total_mis += map_res.misassemblies;
+            }
+            if (map_res.wrong_gap_size > 0) {
+                INFO ("there are "<<map_res.wrong_gap_size <<" wrong gaps in path: ");
+                path->PrintInfo();
+                gap_mis += map_res.wrong_gap_size;
+            }
+      */
+
     void WriteFASTGPaths(const PathContainer& paths, const string& filename) const {
         INFO("Writing FASTG paths to " << filename);
         std::ofstream oss(filename.c_str());
@@ -218,9 +251,11 @@ public:
     void OutputPaths(const PathContainer& paths, const string& filename_base) const {
         WritePathsToFASTA(paths, filename_base);
     }
+
 };
 
 
+
 class PathInfoWriter {
 protected:
     DECL_LOGGER("PathExtendIO")
diff --git a/src/debruijn/path_extend/pe_resolver.hpp b/src/debruijn/path_extend/pe_resolver.hpp
index 01f3ccc..5c0f976 100644
--- a/src/debruijn/path_extend/pe_resolver.hpp
+++ b/src/debruijn/path_extend/pe_resolver.hpp
@@ -56,10 +56,13 @@ public:
         }
     }
 
-    void RemoveSimilarPaths(PathContainer& paths, size_t max_overlap, bool del_only_equal, bool del_subpaths, bool del_begins, bool del_all, bool add_overlap_begins) const {
+    void RemoveSimilarPaths(PathContainer& paths, size_t min_edge_len, size_t max_path_diff, bool del_only_equal, bool del_subpaths, bool del_begins, bool del_all, bool add_overlap_begins) const {
+        DEBUG("== Removing similar paths ==");
+        DEBUG("Min edge len " << min_edge_len << ", max path diff " << max_path_diff)
+        DEBUG("Only equal " << del_only_equal << ", subpaths " << del_subpaths << ", starts " << del_begins << ", all " << del_all << ", add starts " << add_overlap_begins);
         std::vector<EdgeId> edges = GetSortedEdges();
-        for (size_t edgeId = 0; edgeId < edges.size(); ++edgeId) {
-            EdgeId edge = edges.at(edgeId);
+        for (size_t edgeIndex = 0; edgeIndex < edges.size(); ++edgeIndex) {
+            EdgeId edge = edges.at(edgeIndex);
             BidirectionalPathSet cov_paths = coverage_map_.GetCoveringPaths(edge);
             std::vector<BidirectionalPath*> cov_vect(cov_paths.begin(), cov_paths.end());
             std::sort(cov_vect.begin(), cov_vect.end(), PathIdCompare);
@@ -79,36 +82,48 @@ public:
                         if (path2->IsOverlap()) {
                             path1->SetOverlap(true);
                         }
+                        DEBUG("Removing path " << path2->GetId() << " because of path " << path1->GetId());
+                        path2->Print();
+                        path1->Print();
                         path2->Clear();
                         cov_paths = coverage_map_.GetCoveringPaths(edge);
                         continue;
                     }
-                    if (g_.length(edge) <= max_overlap || path1->IsOverlap() || path2->IsOverlap() || del_only_equal) {
+                    if (g_.length(edge) <= min_edge_len || path1->IsOverlap() || path2->IsOverlap() || del_only_equal) {
                         continue;
                     }
-                    CompareAndCut(paths, edge, path1, path2, (int) max_overlap,
+                    CompareAndCut(paths, edge, path1, path2, max_path_diff,
                                   del_subpaths, del_begins, del_all, add_overlap_begins);
                     cov_paths = coverage_map_.GetCoveringPaths(edge);
                 }
             }
         }
+        DEBUG("== Emd removing similar paths ==");
     }
 
 private:
     
-    void CompareAndCut(PathContainer& paths, EdgeId edge, BidirectionalPath* path1, BidirectionalPath* path2, size_t max_overlap, bool del_subpaths, bool del_begins,
+    void SubscribeCoverageMap(BidirectionalPath* path) const {
+        path->Subscribe(&coverage_map_);
+        for (size_t i = 0; i < path->Size(); ++i) {
+            coverage_map_.BackEdgeAdded(path->At(i), path, path->GapAt(i));
+        }
+    }
+
+    void CompareAndCut(PathContainer& paths, EdgeId edge, BidirectionalPath* path1, BidirectionalPath* path2,
+                       size_t max_path_diff,
+                       bool del_subpaths, bool del_begins,
                        bool del_all, bool add_overlap_begins) const {
         vector<size_t> positions1 = path1->FindAll(edge);
         vector<size_t> positions2 = path2->FindAll(edge);
         size_t i1 = 0;
         size_t i2 = 0;
-
         bool renewed = false;
         while (i1 < positions1.size()) {
             while (i2 < positions2.size()) {
                 DEBUG("CompareAndCutFromPos paths " << g_.int_id(edge));
                 CompareAndCutFromPos(paths, path1, (int) positions1[i1], path2,
-                                     (int) positions2[i2], (int) max_overlap,
+                                     (int) positions2[i2], max_path_diff,
                                      del_subpaths, del_begins, del_all, add_overlap_begins);
 
                 if (positions1[i1] >= path1->Size() || path1->At(positions1[i1]) != edge || positions2[i2] >= path2->Size() || path2->At(positions2[i2]) != edge) {
@@ -140,96 +155,144 @@ private:
     }
 
     void CompareAndCutFromPos(PathContainer& paths, BidirectionalPath* path1, int pos1,
-                       BidirectionalPath* path2, int pos2, int max_overlap,
-                       bool delete_subpaths, bool delete_begins,
-                       bool delete_all, bool add_overlap_begins) const {
+                              BidirectionalPath* path2, int pos2,
+                              size_t max_path_diff,
+                              bool delete_subpaths, bool delete_begins,
+                              bool delete_all, bool add_overlap_begins) const {
         int last2 = pos2;
         int last1 = pos1;
         if (last1 >= (int) path1->Size() || last2 >= (int) path2->Size()) {
             return;
         }
         vector<int> other_path_end;
-        pair<int, int> posRes = ComparePaths(last1, last2, *path1, *path2,
-                                             max_overlap);
+        pair<int, int> posRes = ComparePaths(last1, last2, *path1, *path2, max_path_diff);
         last1 = posRes.first;
         last2 = posRes.second;
         BidirectionalPath* conj1 = path1->GetConjPath();
         BidirectionalPath* conj2 = path2->GetConjPath();
         size_t first1 = conj1->Size() - pos1 - 1;
         size_t first2 = conj2->Size() - pos2 - 1;
-        posRes = ComparePaths(first1, first2, *conj1, *conj2, max_overlap);
+        posRes = ComparePaths(first1, first2, *conj1, *conj2, max_path_diff);
         first2 = conj2->Size() - posRes.second - 1;
         first1 = conj1->Size() - posRes.first - 1;
-        if ((int)path2->LengthAt(last2) - (int)g_.length(path2->At(last2)) < (int)max_overlap) {
+        if ((int)path2->LengthAt(last2) - (int)g_.length(path2->At(last2)) < (int) max_path_diff) {
             last2 = (int)path2->Size() - 1;
         }
-        if ((int)path2->Length() - (int)path2->LengthAt(first2) < (int)max_overlap) {
+        if ((int)path2->Length() - (int)path2->LengthAt(first2) < (int) max_path_diff) {
             first2 = 0;
         }
-        if ((int)path1->LengthAt(last1) - (int)g_.length(path1->At(last1)) < (int)max_overlap) {
+        if ((int)path1->LengthAt(last1) - (int)g_.length(path1->At(last1)) < (int) max_path_diff) {
             last1 = (int)path1->Size() - 1;
         }
-        if ((int)path1->Length() - (int)path1->LengthAt(first1) < (int)max_overlap) {
+        if ((int)path1->Length() - (int)path1->LengthAt(first1) < (int) max_path_diff) {
             first1 = 0;
         }
 
-        if (!CutOverlaps(paths, path1, first1, last1, path1->Size(), path2,
+        CutOverlaps(paths, path1, first1, last1, path1->Size(), path2,
                          first2, last2, path2->Size(), delete_subpaths,
-                         delete_begins, delete_all, add_overlap_begins)) {
-            size_t common_length = path1->LengthAt(first1)
-                    - path1->LengthAt(last1) + g_.length(path1->At(last1));
-            if (common_length > cfg::get().max_repeat_length) {
-                DEBUG("Similar paths were not deleted " << common_length
-                      << " before common 1 " << (path1->Length() - path1->LengthAt(first1))
-                      <<" after common 1 " << (path1->LengthAt(last1) - g_.length(path1->At(last1)))
-                      << " before common 2 " << (path2->Length() - path2->LengthAt(first2))
-                      << " after common 2 " << (path2->LengthAt(last2) - g_.length(path2->At(last2))));
-                path1->Print();
-                path2->Print();
-            }
-        }
+                         delete_begins, delete_all, add_overlap_begins);
     }
 
     void AddOverlap(PathContainer& paths, BidirectionalPath* path1, size_t first1, size_t last1) const {
         BidirectionalPath* overlap = new BidirectionalPath(path1->SubPath(first1, last1 + 1));
         BidirectionalPath* conj_overlap = new BidirectionalPath(overlap->Conjugate());
+        SubscribeCoverageMap(overlap);
+        SubscribeCoverageMap(conj_overlap);
         paths.AddPair(overlap, conj_overlap);
     }
 
     bool CutOverlaps(PathContainer& paths, BidirectionalPath* path1, size_t first1, size_t last1, size_t size1, BidirectionalPath* path2, size_t first2,
                      size_t last2, size_t size2, bool del_subpaths, bool del_begins, bool del_all, bool add_overlap_begins) const {
-        if (first1 == 0 && last1 == size1 - 1 && del_subpaths && !path1->HasOverlapedBegin() && !path1->HasOverlapedEnd()) {
+        if (first1 == 0 && last1 == size1 - 1 && del_subpaths) {
+            DEBUG("Removing path " << path1->GetId() << " because of path " << path2->GetId());
+            path1->Print();
+            path2->Print();
             path1->Clear();
-        } else if (first2 == 0 && last2 == size2 - 1 && del_subpaths && !path2->HasOverlapedBegin() && !path2->HasOverlapedEnd()) {
+        } else if (first2 == 0 && last2 == size2 - 1 && del_subpaths) {
+            DEBUG("Removing path " << path2->GetId() << " because of path " << path1->GetId());
+            path2->Print();
+            path1->Print();
             path2->Clear();
         } else if (first2 == 0 && first1 == 0 && del_begins) {
-            if (add_overlap_begins && !path1->HasOverlapedBegin() && !path2->HasOverlapedBegin()) {
+            DEBUG("Path " << path1->GetId() << ", len " << path1->Length() << " and path " << path2->GetId() << ", len " << path2->Length() <<  " have similar starts");
+            DEBUG("Path 1: " << last1 << " edges of length " << path1->Length() - path1->LengthAt(min(last1 + 1, path1->Size() - 1)));
+            DEBUG("Path 2: " << last2 << " edges of length " << path2->Length() - path2->LengthAt(min(last2 + 1, path2->Size() - 1)));
+            DEBUG("Path 1 has overlap start " << path1->HasOverlapedBegin() << ", path 2 has overlap start " <<  path2->HasOverlapedBegin());
+
+            if (add_overlap_begins) {
                 AddOverlap(paths, path1, first1, last1);
+                DEBUG("Detaching overlap " << path2->GetId() << " and " << path1->GetId());
+                path2->Print();
+                path1->Print();
                 path1->GetConjPath()->PopBack(last1 + 1);
                 path2->GetConjPath()->PopBack(last2 + 1);
-            } else if (path1->Length() < path2->Length() && !path1->HasOverlapedBegin()) {
+            } else if (path1->Length() < path2->Length()) {
+                DEBUG("Detaching overlap from " << path1->GetId() << " because of "<< path2->GetId());
+                path1->Print();
+                path2->Print();
                 path1->GetConjPath()->PopBack(last1 + 1);
-            } else if (!path2->HasOverlapedBegin()) {
+            } else {
+                DEBUG("Detaching overlap from " << path2->GetId() << " because of "<< path1->GetId());
+                path2->Print();
+                path1->Print();
                 path2->GetConjPath()->PopBack(last2 + 1);
             }
         } else if ((last1 == size1 - 1 && last2 == size2 - 1) && del_begins) {
-            if (add_overlap_begins && !path1->HasOverlapedEnd() && !path2->HasOverlapedEnd()){
+            DEBUG("Path " << path1->GetId() << ", len " << path1->Length() << " and path " << path2->GetId() << ", len " << path2->Length() << " have similar ends");
+            DEBUG("Path 1: " << path1->Size() - first1 << " edges of length " << path1->LengthAt(first1));
+            DEBUG("Path 2: " << path2->Size() - first2 << " edges of length " << path2->LengthAt(first2));
+            DEBUG("Path 1 has overlap end " << path1->HasOverlapedEnd() << ", path 2 has overlap end " <<  path2->HasOverlapedEnd());
+
+            if (add_overlap_begins){
                 AddOverlap(paths, path1, first1, last1);
+                DEBUG("Detaching overlap " << path2->GetId() << " and " << path1->GetId());
+                path2->Print();
+                path1->Print();
                 path1->PopBack(last1 + 1 - first1);
                 path2->PopBack(last2 + 1 - first2);
             }
-            if (path1->Length() < path2->Length() && !path1->HasOverlapedEnd()) {
+            if (path1->Length() < path2->Length()) {
+                DEBUG("Detaching overlap from " << path1->GetId() << " because of "<< path2->GetId());
+                path1->Print();
+                path2->Print();
                 path1->PopBack(last1 + 1 - first1);
-            } else if (!path2->HasOverlapedEnd()) {
+            } else {
+                DEBUG("Detaching overlap from " << path2->GetId() << " because of "<< path1->GetId());
+                path2->Print();
+                path1->Print();
                 path2->PopBack(last2 + 1 - first2);
             }
-        } else if (first2 == 0 && del_all && !path2->HasOverlapedBegin()) {
+        } else if (first2 == 0 && del_all) {
+            DEBUG("Detaching overlap from " << path2->GetConjPath()->GetId() << " because of "<< path1->GetId());
+            DEBUG("Does it have overlap in the beginning: " << path2->HasOverlapedBegin());
+            path2->Print();
+            DEBUG(" >>>> ")
+            path1->Print();
+            DEBUG(" ==== ");
             path2->GetConjPath()->PopBack(last2 + 1);
-        } else if (last2 == size2 - 1 && del_all && !path2->HasOverlapedEnd()) {
+        } else if (last2 == size2 - 1 && del_all) {
+            DEBUG("Detaching overlap from " << path2->GetId() << " because of "<< path1->GetId());
+            DEBUG("Does it have overlap in the end: " << path2->HasOverlapedEnd());
+            path2->Print();
+            DEBUG(" >>>> ")
+            path1->Print();
+            DEBUG(" ==== ");
             path2->PopBack(last1 + 1 - first1);
-        } else if (first1 == 0 && del_all && !path1->HasOverlapedBegin()) {
+        } else if (first1 == 0 && del_all) {
+            DEBUG("Detaching overlap from " << path1->GetConjPath()->GetId() << " because of "<< path2->GetId());
+            DEBUG("Does it have overlap in the end: " << path1->HasOverlapedBegin());
+            path1->Print();
+            DEBUG(" >>>> ")
+            path2->Print();
+            DEBUG(" ==== ");
             path1->GetConjPath()->PopBack(last1 + 1);
-        } else if (last1 == size1 - 1 && del_all && !path1->HasOverlapedEnd()) {
+        } else if (last1 == size1 - 1 && del_all) {
+            DEBUG("Detaching overlap from " << path1->GetId() << " because of "<< path2->GetId());
+            DEBUG("Does it have overlap in the end: " << path1->HasOverlapedBegin());
+            path1->Print();
+            DEBUG(" >>>> ")
+            path2->Print();
+            DEBUG(" ==== ");
             path1->PopBack(last1 + 1 - first1);
         } else {
             return false;
@@ -265,9 +328,15 @@ private:
                        BidirectionalPath* path2, size_t overlap_size) const {
         BidirectionalPath* conj2 = path2->GetConjPath();
         if (path1->IsOverlap() && overlap_size == path1->Size()) {
+            DEBUG("Detaching overlap from " << path2->GetConjPath()->GetId() << " because of "<< path1->GetId());
+            path2->Print();
+            path1->Print();
             conj2->PopBack(overlap_size);
             path2->SetOverlapedBeginTo(path1);
         } else if (path2->IsOverlap() && path2->Size() == overlap_size) {
+            DEBUG("Detaching overlap from " << path1->GetId() << " because of "<< path2->GetId());
+            path1->Print();
+            path2->Print();
             path1->PopBack(overlap_size);
             path1->SetOverlapedEndTo(path2);
         } else if (overlap_size < path2->Size()
@@ -275,17 +344,21 @@ private:
             BidirectionalPath* overlap = new BidirectionalPath(g_, path1->Back());
             BidirectionalPath* conj_overlap = new BidirectionalPath(
                     g_, g_.conjugate(path1->Back()));
+            SubscribeCoverageMap(overlap);
+            SubscribeCoverageMap(conj_overlap);
             paths.AddPair(overlap, conj_overlap);
+            DEBUG("Detaching overlap " << path1->GetId() << " and " << conj2->GetId());
+            path1->Print();
+            conj2->Print();
             path1->PopBack();
             conj2->PopBack();
+
             for (size_t i = 1; i < overlap_size; ++i) {
                 conj_overlap->PushBack(g_.conjugate(path1->Back()));
                 path1->PopBack();
                 conj2->PopBack();
             }
-            coverage_map_.Subscribe(overlap);
             overlap->SetOverlap(true);
-            coverage_map_.Subscribe(conj_overlap);
             path1->SetOverlapedEndTo(overlap);
             path2->SetOverlapedBeginTo(overlap);
         }
@@ -294,8 +367,7 @@ private:
     void FindAndRemovePathOverlap(PathContainer& all_paths,
                                   BidirectionalPath* path1) const {
         int last = (int) path1->Size() - 1;
-        if (last <= 0 or coverage_map_.GetCoverage(path1->At(last)) <= 1
-                or HasAlreadyOverlapedEnd(path1)) {
+        if (last <= 0 or coverage_map_.GetCoverage(path1->At(last)) <= 1) {
             return;
         }
         BidirectionalPathSet paths =
@@ -304,7 +376,7 @@ private:
         size_t overlap_size = 0;
         for (auto path_iter = paths.begin(); path_iter != paths.end();
                 ++path_iter) {
-            if (IsSamePath(*path_iter, path1) || HasAlreadyOverlapedBegin(*path_iter)) {
+            if (IsSamePath(*path_iter, path1)) {
                 continue;
             }
             size_t over_size = path1->OverlapEndSize(*path_iter);
@@ -330,10 +402,10 @@ private:
                 : g_(g) {
         }
         bool operator()(const EdgeId& e1, const EdgeId& e2) const {
-            if (g_.length(e1) < g_.length(e2)) {
+            if (g_.length(e1) > g_.length(e2)) {
                 return true;
             }
-            if (g_.length(e2) < g_.length(e1)) {
+            if (g_.length(e2) > g_.length(e1)) {
                 return false;
             }
             return e1.int_id() < e2.int_id();
@@ -362,11 +434,12 @@ public:
 		std::set<EdgeId> included;
 		PathContainer edges;
 		for (auto iter = g_.ConstEdgeBegin(); !iter.IsEnd(); ++iter) {
-			if (g_.int_id(*iter) <= 0 or InCycle(*iter, g_))
+			if (g_.int_id(*iter) <= 0 or InTwoEdgeCycle(*iter, g_))
 				continue;
             if (included.count(*iter) == 0) {
-				edges.AddPair(new BidirectionalPath(g_, *iter),
-                              new BidirectionalPath(g_, g_.conjugate(*iter)));
+                BidirectionalPath * first = new BidirectionalPath(g_, *iter);
+                BidirectionalPath * second = new BidirectionalPath(g_, g_.conjugate(*iter));
+				edges.AddPair(first,second);
 				included.insert(*iter);
 				included.insert(g_.conjugate(*iter));
 			}
@@ -381,7 +454,7 @@ public:
     }
 
     void removeOverlaps(PathContainer& paths, GraphCoverageMap& coverage_map,
-                        size_t max_overlap, bool cut_overlaps,  bool add_overlaps_begin) {
+                        size_t min_edge_len, size_t max_path_diff, bool cut_overlaps,  bool add_overlaps_begin) {
         if (!cut_overlaps) {
             return;
         }
@@ -390,15 +463,16 @@ public:
             remover.CutPseudoSelfConjugatePaths(paths);
         //writer.WritePathsToFASTA(paths, output_dir + "/before.fasta");
         //DEBUG("Removing subpaths");
-        remover.RemoveSimilarPaths(paths, max_overlap, false, true, true, false, add_overlaps_begin);
+        //delete not only eq,
+        remover.RemoveSimilarPaths(paths, min_edge_len, max_path_diff, false, true, false, false, add_overlaps_begin);
         //writer.WritePathsToFASTA(paths, output_dir + "/remove_similar.fasta");
         //DEBUG("Remove overlaps")
         remover.RemoveOverlaps(paths);
         //writer.WritePathsToFASTA(paths, output_dir + "/after_remove_overlaps.fasta");
-        remover.RemoveSimilarPaths(paths, max_overlap, true, false, false, false, add_overlaps_begin);
+        remover.RemoveSimilarPaths(paths, min_edge_len, max_path_diff, true, false, false, false, add_overlaps_begin);
         //writer.WritePathsToFASTA(paths, output_dir + "/remove_equal.fasta");
         //DEBUG("remove similar path. Max difference " << max_overlap);
-        remover.RemoveSimilarPaths(paths, max_overlap, false, true, true, true, add_overlaps_begin);
+        remover.RemoveSimilarPaths(paths, min_edge_len, max_path_diff, false, true, true, true, add_overlaps_begin);
         DEBUG("end removing");
     }
 
@@ -414,7 +488,13 @@ public:
         std::set<EdgeId> included;
         for (auto iter = g_.ConstEdgeBegin(); !iter.IsEnd(); ++iter) {
             if (included.count(*iter) == 0 && !coverageMap.IsCovered(*iter)) {
-                paths.AddPair(new BidirectionalPath(g_, *iter), new BidirectionalPath(g_, g_.conjugate(*iter)));
+                BidirectionalPath* path = new BidirectionalPath(g_, *iter);
+                BidirectionalPath* conj = new BidirectionalPath(g_, g_.conjugate(*iter));
+                path->Subscribe(&coverageMap);
+                conj->Subscribe(&coverageMap);
+                coverageMap.BackEdgeAdded(path->At(0), path, path->GapAt(0));
+                coverageMap.BackEdgeAdded(conj->At(0), conj, conj->GapAt(0));
+                paths.AddPair(path, conj);
                 included.insert(*iter);
                 included.insert(g_.conjugate(*iter));
             }
diff --git a/src/debruijn/path_extend/pe_utils.hpp b/src/debruijn/path_extend/pe_utils.hpp
index e96cd07..eaf2606 100644
--- a/src/debruijn/path_extend/pe_utils.hpp
+++ b/src/debruijn/path_extend/pe_utils.hpp
@@ -20,7 +20,9 @@
 using namespace debruijn_graph;
 
 namespace path_extend {
-inline bool InCycle(EdgeId e, const Graph& g) {
+
+//Checks whether we are in a cycle of length 2, used only for seed selection.
+inline bool InTwoEdgeCycle(EdgeId e, const Graph &g) {
     auto v = g.EdgeEnd(e);
     if (g.OutgoingEdgeCount(v) >= 1) {
         auto edges = g.OutgoingEdges(v);
@@ -62,7 +64,7 @@ protected:
 
     MapDataT * empty_;
 
-    virtual void EdgeAdded(EdgeId e, BidirectionalPath * path, int /*gap*/) {
+    virtual void EdgeAdded(EdgeId e, BidirectionalPath * path, Gap /*gap*/) {
         auto iter = edgeCoverage_.find(e);
         if (iter == edgeCoverage_.end()) {
             edgeCoverage_.insert(std::make_pair(e, new MapDataT()));
@@ -125,11 +127,11 @@ public:
 		}
 	}
 
-    virtual void FrontEdgeAdded(EdgeId e, BidirectionalPath * path, int gap) {
+    virtual void FrontEdgeAdded(EdgeId e, BidirectionalPath * path, Gap gap) {
         EdgeAdded(e, path, gap);
     }
 
-    virtual void BackEdgeAdded(EdgeId e, BidirectionalPath * path, int gap) {
+    virtual void BackEdgeAdded(EdgeId e, BidirectionalPath * path, Gap gap) {
         EdgeAdded(e, path, gap);
     }
 
@@ -418,7 +420,7 @@ private:
             ++i;
 
             while(i < path.Size() and path.GapAt(i) <= min_gap_) {
-                p->PushBack(path[i], path.GapAt(i));
+                p->PushBack(path[i], path.GapAt(i), path.TrashPreviousAt(i), path.TrashCurrentAt(i));
                 ++i;
             }
             if (i < path.Size()) {
diff --git a/src/debruijn/path_extend/scaffolder2015/connection_condition2015.cpp b/src/debruijn/path_extend/scaffolder2015/connection_condition2015.cpp
new file mode 100644
index 0000000..1e411c5
--- /dev/null
+++ b/src/debruijn/path_extend/scaffolder2015/connection_condition2015.cpp
@@ -0,0 +1,111 @@
+
+
+#include "connection_condition2015.hpp"
+namespace path_extend {
+
+    PairedLibConnectionCondition::PairedLibConnectionCondition(const debruijn_graph::Graph &graph,
+                                 shared_ptr <PairedInfoLibrary> lib,
+                                 size_t lib_index,
+                                 size_t min_read_count) :
+            graph_(graph),
+            lib_(lib),
+            lib_index_(lib_index),
+            min_read_count_(min_read_count),
+//TODO reconsider condition
+            left_dist_delta_(5 * (int) lib_->GetIsVar()),
+            right_dist_delta_(5 * (int) lib_->GetISMax()) {
+    }
+
+    size_t PairedLibConnectionCondition::GetLibIndex() const {
+        return lib_index_;
+    }
+
+    set <debruijn_graph::EdgeId> PairedLibConnectionCondition::ConnectedWith(debruijn_graph::EdgeId e) const {
+        set <debruijn_graph::EdgeId> all_edges;
+        int e_length = (int) graph_.length(e);
+        lib_->FindJumpEdges(e, all_edges, e_length - left_dist_delta_, e_length + right_dist_delta_);
+
+        set <debruijn_graph::EdgeId> result;
+        for (auto edge : all_edges) {
+            if (edge != e && edge != graph_.conjugate(e) &&
+                math::ge(GetWeight(e, edge), (double) min_read_count_)) {
+                result.insert(edge);
+            }
+        }
+        return result;
+    }
+
+    double PairedLibConnectionCondition::GetWeight(debruijn_graph::EdgeId e1, debruijn_graph::EdgeId e2) const {
+        int e_length = (int) graph_.length(e1);
+        double res = lib_->CountPairedInfo(e1, e2, e_length - left_dist_delta_, e_length + right_dist_delta_);
+        VERIFY(res == lib_->CountPairedInfo(graph_.conjugate(e2), graph_.conjugate(e1),
+                                            (int) graph_.length(e2) - left_dist_delta_,
+                                            (int) graph_.length(e2) + right_dist_delta_));
+
+        return res;
+    }
+
+//TODO: We use same part of index twice, is it necessary?
+    int PairedLibConnectionCondition::GetMedianGap(debruijn_graph::EdgeId e1, debruijn_graph::EdgeId e2) const {
+        std::vector<int> distances;
+        std::vector<double> weights;
+        int e_length = (int) graph_.length(e1);
+        lib_->CountDistances(e1, e2, distances, weights);
+        std::vector<pair<int, double> >h(distances.size());
+        for (size_t i = 0; i< distances.size(); i++) {
+            if (distances[i] >= e_length - left_dist_delta_ && distances[i] <= e_length + right_dist_delta_)
+                h.push_back(std::make_pair(distances[i], weights[i]));
+        }
+//TODO: is it really necessary?
+        std::sort(h.begin(), h.end());
+        double sum = 0.0;
+        double sum2 = 0.0;
+        for (size_t j = 0; j< h.size(); ++j) {
+            sum += h[j].second;
+        }
+        size_t i = 0;
+        for (; i < h.size(); ++i) {
+            sum2 += h[i].second;
+            if (sum2 * 2 > sum)
+                break;
+        }
+        if (i >= h.size()) {
+            WARN("Count median error");
+            i = h.size() - 1;
+        }
+        return (int) round(h[i].first - e_length);
+    }
+
+    AssemblyGraphConnectionCondition::AssemblyGraphConnectionCondition(const debruijn_graph::Graph &g, size_t max_connection_length) :
+            g_(g),
+            max_connection_length_(max_connection_length) {
+    }
+
+    set <debruijn_graph::EdgeId> AssemblyGraphConnectionCondition::ConnectedWith(debruijn_graph::EdgeId e) const {
+        set <debruijn_graph::EdgeId> result;
+
+        for (auto connected: g_.OutgoingEdges(g_.EdgeEnd(e))) {
+            result.insert(connected);
+        }
+//TODO: optimization possible. Precompute all pairs of interesting connected vertex.
+        DijkstraHelper<debruijn_graph::Graph>::BoundedDijkstra dijkstra(
+                DijkstraHelper<debruijn_graph::Graph>::CreateBoundedDijkstra(g_, max_connection_length_));
+        dijkstra.Run(g_.EdgeEnd(e));
+        for (auto v: dijkstra.ReachedVertices()) {
+            for (auto connected: g_.OutgoingEdges(v)) {
+                result.insert(connected);
+            }
+        }
+
+        return result;
+    }
+
+    double AssemblyGraphConnectionCondition::GetWeight(debruijn_graph::EdgeId, debruijn_graph::EdgeId) const {
+        return 1.0;
+    }
+
+    size_t AssemblyGraphConnectionCondition::GetLibIndex() const {
+        return (size_t) - 1;
+    }
+
+}
diff --git a/src/debruijn/path_extend/scaffolder2015/connection_condition2015.hpp b/src/debruijn/path_extend/scaffolder2015/connection_condition2015.hpp
new file mode 100644
index 0000000..a9842b4
--- /dev/null
+++ b/src/debruijn/path_extend/scaffolder2015/connection_condition2015.hpp
@@ -0,0 +1,69 @@
+
+#ifndef CONNECTION_CONDITION2015_HPP
+#define CONNECTION_CONDITION2015_HPP
+#include "genome_consistance_checker.hpp"
+#include "logger/logger.hpp"
+#include "path_extend/paired_library.hpp"
+#include <map>
+#include <set>
+
+namespace path_extend {
+
+/* Connection condition are used by both scaffolder's extension chooser and scaffold graph */
+
+    class ConnectionCondition {
+    public:
+// Outputs the edges e is connected with.
+//TODO  performance issue: think about inside filtering. Return only unique connected edges?
+        virtual set <debruijn_graph::EdgeId> ConnectedWith(debruijn_graph::EdgeId e) const = 0;
+// Outputs the weight of the pair e1 and e2
+        virtual double GetWeight(debruijn_graph::EdgeId e1, debruijn_graph::EdgeId e2) const = 0;
+        virtual size_t GetLibIndex() const = 0;
+        virtual ~ConnectionCondition() {
+        }
+    };
+/* Main (mate pair library) connection condition.
+ *
+ */
+    class PairedLibConnectionCondition : public ConnectionCondition {
+    private:
+        const debruijn_graph::Graph &graph_;
+        shared_ptr <PairedInfoLibrary> lib_;
+        size_t lib_index_;
+//Minimal number of mate pairs to call connection sound
+        size_t min_read_count_;
+    public:
+//Only paired info with gap between e1 and e2 between -left_dist_delta_ and right_dist_delta_ taken in account
+        int left_dist_delta_;
+        int right_dist_delta_;
+
+        PairedLibConnectionCondition(const debruijn_graph::Graph &graph,
+                                     shared_ptr <PairedInfoLibrary> lib,
+                                     size_t lib_index,
+                                     size_t min_read_count);
+        size_t GetLibIndex() const override;
+        set <debruijn_graph::EdgeId> ConnectedWith(debruijn_graph::EdgeId e) const override;
+        double GetWeight(debruijn_graph::EdgeId e1, debruijn_graph::EdgeId e2) const override;
+//Returns median gap size
+        int GetMedianGap (debruijn_graph::EdgeId e1, debruijn_graph::EdgeId e2) const;
+    };
+
+/*  Condition used to find connected in graph edges.
+ *
+ */
+    class AssemblyGraphConnectionCondition : public ConnectionCondition {
+    private:
+        const debruijn_graph::Graph &g_;
+//Maximal gap to the connection.
+        size_t max_connection_length_;
+
+    public:
+        AssemblyGraphConnectionCondition(const debruijn_graph::Graph &g, size_t max_connection_length);
+
+        set <debruijn_graph::EdgeId> ConnectedWith(debruijn_graph::EdgeId e) const override;
+        double GetWeight(debruijn_graph::EdgeId, debruijn_graph::EdgeId) const override;
+        size_t GetLibIndex() const override;
+    };
+}
+
+#endif //PROJECT_CONNECTION_CONDITION2015_HPP
diff --git a/src/debruijn/path_extend/scaffolder2015/extension_chooser2015.cpp b/src/debruijn/path_extend/scaffolder2015/extension_chooser2015.cpp
new file mode 100644
index 0000000..ca45f49
--- /dev/null
+++ b/src/debruijn/path_extend/scaffolder2015/extension_chooser2015.cpp
@@ -0,0 +1,81 @@
+//
+// Created by lab42 on 8/26/15.
+//
+
+#include "extension_chooser2015.hpp"
+
+namespace path_extend {
+using namespace std;
+
+std::pair<EdgeId, int> ExtensionChooser2015::FindLastUniqueInPath(const BidirectionalPath& path) const {
+    for (int i =  (int)path.Size() - 1; i >= 0; --i) {
+        if (unique_edges_->IsUnique(path.At(i))) {
+            return std::make_pair(path.At(i), i);
+        }
+    }
+    return std::make_pair(EdgeId(0), -1);
+}
+
+ExtensionChooser::EdgeContainer ExtensionChooser2015::FindNextUniqueEdge(const EdgeId from) const {
+    VERIFY(unique_edges_->IsUnique(from));
+    EdgeContainer result;
+    set<EdgeId> candidate_edges = paired_connection_condition_.ConnectedWith(from);
+    vector<pair<double, pair<EdgeId, int >>> to_sort;
+    for (EdgeId e : candidate_edges) {
+        if (!unique_edges_->IsUnique(e)) {
+            continue;
+        }
+        double sum = paired_connection_condition_.GetWeight(from, e);
+        DEBUG("edge " << g_.int_id(e) << " weight " << sum);
+        if (sum < absolute_weight_threshold_) {
+            DEBUG("Edge " << g_.int_id(e)  << " weight " << sum << " failed absolute weight threshold " << absolute_weight_threshold_);
+            continue;
+        }
+        int gap = paired_connection_condition_.GetMedianGap(from, e);
+
+        auto connected_with = graph_connection_condition_.ConnectedWith(from);
+        if (connected_with.find(e) != connected_with.end()) {
+            sum *= graph_connection_bonus_;
+        }
+        to_sort.push_back(make_pair(sum, make_pair(e, gap)));
+    }
+//descending order, reverse iterators;
+    sort(to_sort.rbegin(), to_sort.rend());
+    for(size_t j = 0; j < to_sort.size(); j++) {
+        if (j == 0 || to_sort[j].first* relative_weight_threshold_ > to_sort[j - 1].first) {
+            result.push_back(EdgeWithDistance(to_sort[j].second.first, to_sort[j].second.second));
+            DEBUG("Edge " << g_.int_id(to_sort[j].second.first) << " gap " << to_sort[j].second.second << " weight "<< to_sort[j].first <<  " passed absolute weight threshold " << absolute_weight_threshold_);
+        } else {
+            DEBUG ("Edge " << g_.int_id(to_sort[j].second.first) << " weight " << to_sort[j].first << " failed relative weight threshold " << relative_weight_threshold_);
+            DEBUG("other removed");
+            break;
+        }
+    }
+    return result;
+}
+
+ExtensionChooser::EdgeContainer ExtensionChooser2015::Filter(const BidirectionalPath& path, const ExtensionChooser::EdgeContainer& /*edges*/) const {
+//    set<EdgeId> candidates = FindCandidates(path);
+    pair<EdgeId, int> last_unique = FindLastUniqueInPath(path);
+    EdgeContainer result;
+
+    if (last_unique.second < 0) {
+// No unique edge found
+        return result;
+    }
+
+    result = FindNextUniqueEdge(last_unique.first);
+//Backward check. We connected edges iff they are best continuation to each other.
+    if (result.size() == 1) {
+        DEBUG("For edge " << g_.int_id(last_unique.first) << " unique next edge "<< result[0].e_ <<" found, doing backwards check ");
+        EdgeContainer backwards_check = FindNextUniqueEdge(g_.conjugate(result[0].e_));
+        if ((backwards_check.size() != 1) || (g_.conjugate(backwards_check[0].e_) != last_unique.first)) {
+            result.clear();
+        }
+//We should reduce gap size with length of the edges that came after last unique.
+        result[0].d_ -= int (path.LengthAt(last_unique.second) - g_.length(last_unique.first));
+    }
+    return result;
+}
+
+}
diff --git a/src/debruijn/path_extend/scaffolder2015/extension_chooser2015.hpp b/src/debruijn/path_extend/scaffolder2015/extension_chooser2015.hpp
new file mode 100644
index 0000000..64c9080
--- /dev/null
+++ b/src/debruijn/path_extend/scaffolder2015/extension_chooser2015.hpp
@@ -0,0 +1,49 @@
+//
+// Created by lab42 on 8/26/15.
+//
+#pragma once
+
+#include "path_extend/extension_chooser.hpp"
+#include "connection_condition2015.hpp"
+#include "genome_consistance_checker.hpp"
+#include "logger/logger.hpp"
+#include <map>
+#include <set>
+namespace path_extend {
+class ExtensionChooser2015: public ScaffoldingExtensionChooser {
+private:
+    shared_ptr<ScaffoldingUniqueEdgeStorage> unique_edges_;
+// for possible connections e1 and e2 if weight(e1) > relative_weight_threshold_ * weight(e2) then e2 will be ignored
+    double relative_weight_threshold_;
+    PairedLibConnectionCondition paired_connection_condition_;
+    AssemblyGraphConnectionCondition graph_connection_condition_;
+// weight < absolute_weight_threshold_ will be ignored
+    size_t absolute_weight_threshold_;
+// multiplicator for the pairs which are connected in graph.
+    double graph_connection_bonus_;
+
+protected:
+//If path contains no unique edges return -1
+    pair<EdgeId, int> FindLastUniqueInPath(const BidirectionalPath& path) const;
+//Find all possible next unique edges confirmed with mate-pair information. (absolute/relative)_weight_threshold_ used for filtering
+    EdgeContainer FindNextUniqueEdge(const EdgeId from) const;
+        DECL_LOGGER("ExtensionChooser2015")
+public:
+    ExtensionChooser2015(const Graph& g, shared_ptr<WeightCounter> wc, double is_scatter_coeff,
+                         shared_ptr<ScaffoldingUniqueEdgeStorage> unique_edges ,double relative_threshold, size_t lib_index):
+            ScaffoldingExtensionChooser(g, wc, is_scatter_coeff), unique_edges_(unique_edges), relative_weight_threshold_(relative_threshold), paired_connection_condition_(g,
+            wc->get_libptr(), lib_index,
+//TODO: constants are subject to reconsider
+            0), graph_connection_condition_(g, 2*unique_edges_->GetMinLength()), absolute_weight_threshold_(2), graph_connection_bonus_(2) {
+        INFO("ExtensionChooser2015 created");
+    }
+/* @param edges are really not used and left for compatibility
+ * @returns possible next edge if there is unique one, else returns empty container
+ *
+ */
+
+    EdgeContainer Filter(const BidirectionalPath& path, const EdgeContainer& edges) const override;
+};
+
+
+}
diff --git a/src/debruijn/path_extend/scaffolder2015/scaff_supplementary.cpp b/src/debruijn/path_extend/scaffolder2015/scaff_supplementary.cpp
new file mode 100644
index 0000000..afb3779
--- /dev/null
+++ b/src/debruijn/path_extend/scaffolder2015/scaff_supplementary.cpp
@@ -0,0 +1,66 @@
+#include "scaff_supplementary.hpp"
+#include <algorithm>
+
+using namespace std;
+namespace path_extend {
+
+
+void ScaffoldingUniqueEdgeAnalyzer::SetCoverageBasedCutoff() {
+    vector <pair<double, size_t>> coverages;
+    map <EdgeId, size_t> long_component;
+    size_t total_len = 0, short_len = 0, cur_len = 0;
+
+    for (auto iter = gp_.g.ConstEdgeBegin(); !iter.IsEnd(); ++iter) {
+        if (gp_.g.length(*iter) > length_cutoff_) {
+            coverages.push_back(make_pair(gp_.g.coverage(*iter), gp_.g.length(*iter)));
+            total_len += gp_.g.length(*iter);
+            long_component[*iter] = 0;
+        } else {
+            short_len += gp_.g.length(*iter);
+        }
+    }
+    if (total_len == 0) {
+        WARN("not enough edges longer than "<< length_cutoff_);
+        return;
+    }
+    sort(coverages.begin(), coverages.end());
+    size_t i = 0;
+    while (cur_len < total_len / 2 && i < coverages.size()) {
+        cur_len += coverages[i].second;
+        i++;
+    }
+    median_coverage_ = coverages[i].first;
+}
+
+
+void ScaffoldingUniqueEdgeAnalyzer::FillUniqueEdgeStorage(ScaffoldingUniqueEdgeStorage &storage_) {
+    storage_.unique_edges_.clear();
+    size_t total_len = 0;
+    size_t unique_len = 0;
+    size_t unique_num = 0;
+    storage_.SetMinLength(length_cutoff_);
+    for (auto iter = gp_.g.ConstEdgeBegin(); !iter.IsEnd(); ++iter) {
+        size_t tlen = gp_.g.length(*iter);
+        total_len += tlen;
+        if (gp_.g.length(*iter) >= length_cutoff_ && gp_.g.coverage(*iter) > median_coverage_ * (1 - relative_coverage_variation_)
+                && gp_.g.coverage(*iter) < median_coverage_ * (1 + relative_coverage_variation_) ) {
+            storage_.unique_edges_.insert(*iter);
+            unique_len += tlen;
+            unique_num ++;
+        }
+    }
+    for (auto iter = storage_.begin(); iter != storage_.end(); ++iter) {
+        DEBUG (gp_.g.int_id(*iter) << " " << gp_.g.coverage(*iter) << " " << gp_.g.length(*iter) );
+    }
+    INFO ("With length cutoff: " << length_cutoff_ <<", median long edge coverage: " << median_coverage_ << ", and maximal unique coverage: " <<
+                                                                                                            relative_coverage_variation_);
+    INFO("Unique edges quantity: " << unique_num << ", unique edges length " << unique_len <<", total edges length" << total_len);
+    if (unique_len * 2 < total_len) {
+        WARN("Less than half of genome in unique edges!");
+    }
+
+}
+
+
+
+}
diff --git a/src/debruijn/path_extend/scaffolder2015/scaff_supplementary.hpp b/src/debruijn/path_extend/scaffolder2015/scaff_supplementary.hpp
new file mode 100644
index 0000000..aef1431
--- /dev/null
+++ b/src/debruijn/path_extend/scaffolder2015/scaff_supplementary.hpp
@@ -0,0 +1,75 @@
+#pragma once
+#include "graph_pack.hpp"
+#include "logger/logger.hpp"
+
+namespace path_extend {
+    typedef debruijn_graph::EdgeId EdgeId;
+
+/* Storage of presumably unique, relatively long edges. Filled by ScaffoldingUniqueEdgeAnalyzer
+ *
+ */
+    class ScaffoldingUniqueEdgeStorage {
+        friend class ScaffoldingUniqueEdgeAnalyzer;
+    private:
+        set <EdgeId> unique_edges_;
+        size_t min_unique_length_;
+    public:
+        ScaffoldingUniqueEdgeStorage(): unique_edges_(){
+            DEBUG("storage created, empty");
+        }
+
+        bool IsUnique(EdgeId e) const {
+            return (unique_edges_.find(e) != unique_edges_.end());
+        }
+
+        decltype(unique_edges_.begin()) begin() const {
+            return unique_edges_.begin();
+        }
+
+        decltype(unique_edges_.end()) end() const {
+            return unique_edges_.end();
+        }
+
+        size_t size() const {
+            return unique_edges_.size();
+        }
+        size_t GetMinLength() const {
+            return min_unique_length_;
+        }
+        void SetMinLength(size_t min_length)  {
+            min_unique_length_ = min_length;
+        }
+
+        const set<EdgeId>& GetSet() const {
+            return unique_edges_;
+        }
+   protected:
+        DECL_LOGGER("ScaffoldingUniqueEdgeStorage")
+
+    };
+
+/* Auxillary class required to fillin the unique edge storage.
+ *
+ */
+    class ScaffoldingUniqueEdgeAnalyzer {
+
+    ;
+    private:
+        const debruijn_graph::conj_graph_pack &gp_;
+        size_t length_cutoff_;
+        double median_coverage_;
+        double relative_coverage_variation_;
+    protected:
+        DECL_LOGGER("ScaffoldingUniqueEdgeAnalyzer")
+
+
+        void SetCoverageBasedCutoff();
+    public:
+        ScaffoldingUniqueEdgeAnalyzer(const debruijn_graph::conj_graph_pack &gp, size_t apriori_length_cutoff, double max_relative_coverage):gp_(gp), length_cutoff_(apriori_length_cutoff), relative_coverage_variation_(max_relative_coverage){
+            SetCoverageBasedCutoff();
+        }
+        void FillUniqueEdgeStorage(ScaffoldingUniqueEdgeStorage &storage_);
+    };
+}
+
+
diff --git a/src/debruijn/path_extend/scaffolder2015/scaffold_graph.cpp b/src/debruijn/path_extend/scaffolder2015/scaffold_graph.cpp
new file mode 100644
index 0000000..0dfd8b8
--- /dev/null
+++ b/src/debruijn/path_extend/scaffolder2015/scaffold_graph.cpp
@@ -0,0 +1,275 @@
+#include "scaffold_graph.hpp"
+
+
+namespace path_extend {
+namespace scaffold_graph {
+
+std::atomic<ScaffoldGraph::ScaffoldEdgeIdT> ScaffoldGraph::ScaffoldEdge::scaffold_edge_id_{0};
+
+void ScaffoldGraph::AddEdgeSimple(const ScaffoldGraph::ScaffoldEdge &e, size_t conjugate_id) {
+    edges_.emplace(e.getId(), e);
+    outgoing_edges_.emplace(e.getStart(), e.getId());
+    incoming_edges_.emplace(e.getEnd(), e.getId());
+    conjugate_[e.getId()] = conjugate_id;
+}
+
+void ScaffoldGraph::DeleteOutgoing(const ScaffoldGraph::ScaffoldEdge &e) {
+    auto e_range = outgoing_edges_.equal_range(e.getStart());
+    for (auto edge_id = e_range.first; edge_id != e_range.second; ++edge_id) {
+        if (edges_.at(edge_id->second) == e) {
+            outgoing_edges_.erase(edge_id);
+        }
+    }
+}
+
+void ScaffoldGraph::DeleteIncoming(const ScaffoldGraph::ScaffoldEdge &e) {
+    auto e_range = incoming_edges_.equal_range(e.getEnd());
+    for (auto edge_id = e_range.first; edge_id != e_range.second; ++edge_id) {
+        if (edges_.at(edge_id->second) == e) {
+            incoming_edges_.erase(edge_id);
+        }
+    }
+}
+
+void ScaffoldGraph::DeleteAllOutgoingEdgesSimple(ScaffoldGraph::ScaffoldVertex v) {
+    auto e_range = outgoing_edges_.equal_range(v);
+    for (auto edge_id = e_range.first; edge_id != e_range.second; ++edge_id) {
+        DeleteIncoming(edges_.at(edge_id->second));
+    }
+    outgoing_edges_.erase(v);
+}
+
+void ScaffoldGraph::DeleteEdgeFromStorage(const ScaffoldGraph::ScaffoldEdge &e) {
+    VERIFY(!Exists(e));
+
+    size_t conjugate_id = conjugate_[e.getId()];
+    edges_.erase(e.getId());
+    edges_.erase(conjugate_id);
+    conjugate_.erase(e.getId());
+    conjugate_.erase(conjugate_id);
+}
+
+void ScaffoldGraph::DeleteAllIncomingEdgesSimple(ScaffoldGraph::ScaffoldVertex v) {
+    auto e_range = incoming_edges_.equal_range(v);
+    for (auto edge_id = e_range.first; edge_id != e_range.second; ++edge_id) {
+        DeleteOutgoing(edges_.at(edge_id->second));
+    }
+    incoming_edges_.erase(v);
+}
+
+bool ScaffoldGraph::Exists(ScaffoldGraph::ScaffoldVertex assembly_graph_edge) const {
+    return vertices_.count(assembly_graph_edge) != 0;
+}
+
+bool ScaffoldGraph::Exists(const ScaffoldGraph::ScaffoldEdge &e) const {
+    auto e_range = outgoing_edges_.equal_range(e.getStart());
+    for (auto edge_id = e_range.first; edge_id != e_range.second; ++edge_id) {
+        if (edges_.at(edge_id->second) == e) {
+            return true;
+        }
+    }
+    return false;
+}
+
+ScaffoldGraph::ScaffoldVertex ScaffoldGraph::conjugate(ScaffoldGraph::ScaffoldVertex assembly_graph_edge) const {
+    return assembly_graph_.conjugate(assembly_graph_edge);
+}
+
+ScaffoldGraph::ScaffoldEdge ScaffoldGraph::conjugate(const ScaffoldGraph::ScaffoldEdge &e) const {
+    auto iter = conjugate_.find(e.getId());
+    if (iter != conjugate_.end()) {
+        return edges_.at(iter->second);
+    }
+    return ScaffoldEdge(conjugate(e.getEnd()), conjugate(e.getStart()), e.getColor(), e.getWeight());
+}
+
+bool ScaffoldGraph::AddVertex(ScaffoldGraph::ScaffoldVertex assembly_graph_edge) {
+    if (!Exists(assembly_graph_edge)) {
+        VERIFY(!Exists(conjugate(assembly_graph_edge)));
+        vertices_.insert(assembly_graph_edge);
+        vertices_.insert(conjugate(assembly_graph_edge));
+        return true;
+    }
+    return false;
+}
+
+void ScaffoldGraph::AddVertices(const set<ScaffoldGraph::ScaffoldVertex> &vertices) {
+    for (auto v : vertices) {
+        AddVertex(v);
+    }
+}
+
+bool ScaffoldGraph::AddEdge(ScaffoldGraph::ScaffoldVertex v1, ScaffoldGraph::ScaffoldVertex v2, size_t lib_id, double weight) {
+    VERIFY(Exists(v1));
+    VERIFY(Exists(v2));
+
+    ScaffoldEdge e(v1, v2, lib_id, weight);
+    if (Exists(e)) {
+        VERIFY(Exists(conjugate(e)));
+        return false;
+    }
+
+    auto conj = conjugate(e);
+    AddEdgeSimple(e, conj.getId());
+    AddEdgeSimple(conj, e.getId());
+    return true;
+}
+
+void ScaffoldGraph::Print(ostream &os) const {
+    for (auto v: vertices_) {
+        os << "Vertex " << int_id(v) << " ~ " << int_id(conjugate(v))
+            << ": len = " << assembly_graph_.length(v) << ", cov = " << assembly_graph_.coverage(v) << endl;
+    }
+    for (auto e_iter = ebegin(); e_iter != eend(); ++e_iter) {
+        os << "Edge " << e_iter->getId() << " ~ " << conjugate(*e_iter).getId() <<
+            ": " << int_id(e_iter->getStart()) << " -> " << int_id(e_iter->getEnd()) <<
+            ", lib index = " << e_iter->getColor() << ", weight " << e_iter->getWeight() << endl;
+    }
+}
+
+ScaffoldGraph::ScaffoldEdge ScaffoldGraph::UniqueIncoming(ScaffoldGraph::ScaffoldVertex assembly_graph_edge) const {
+    VERIFY(HasUniqueIncoming(assembly_graph_edge));
+    return edges_.at(incoming_edges_.find(assembly_graph_edge)->second);
+}
+
+ScaffoldGraph::ScaffoldEdge ScaffoldGraph::UniqueOutgoing(ScaffoldGraph::ScaffoldVertex assembly_graph_edge) const {
+    VERIFY(HasUniqueOutgoing(assembly_graph_edge));
+    return edges_.at(outgoing_edges_.find(assembly_graph_edge)->second);
+}
+
+bool ScaffoldGraph::HasUniqueIncoming(ScaffoldGraph::ScaffoldVertex assembly_graph_edge) const {
+    return IncomingEdgeCount(assembly_graph_edge) == 1;
+}
+
+bool ScaffoldGraph::HasUniqueOutgoing(ScaffoldGraph::ScaffoldVertex assembly_graph_edge) const {
+    return OutgoingEdgeCount(assembly_graph_edge) == 1;
+}
+
+size_t ScaffoldGraph::IncomingEdgeCount(ScaffoldGraph::ScaffoldVertex assembly_graph_edge) const {
+    return incoming_edges_.count(assembly_graph_edge);
+}
+
+size_t ScaffoldGraph::OutgoingEdgeCount(ScaffoldGraph::ScaffoldVertex assembly_graph_edge) const {
+    return outgoing_edges_.count(assembly_graph_edge);
+}
+
+vector<ScaffoldGraph::ScaffoldEdge> ScaffoldGraph::IncomingEdges(ScaffoldGraph::ScaffoldVertex assembly_graph_edge) const {
+    vector<ScaffoldEdge> result;
+    auto e_range = incoming_edges_.equal_range(assembly_graph_edge);
+    for (auto edge_id = e_range.first; edge_id != e_range.second; ++edge_id) {
+        result.push_back(edges_.at(edge_id->second));
+    }
+    return result;
+}
+
+vector<ScaffoldGraph::ScaffoldEdge> ScaffoldGraph::OutgoingEdges(ScaffoldGraph::ScaffoldVertex assembly_graph_edge) const {
+    vector<ScaffoldEdge> result;
+    auto e_range = outgoing_edges_.equal_range(assembly_graph_edge);
+    for (auto edge_id = e_range.first; edge_id != e_range.second; ++edge_id) {
+        result.push_back(edges_.at(edge_id->second));
+    }
+    return result;
+}
+
+const debruijn_graph::Graph &ScaffoldGraph::AssemblyGraph() const {
+    return assembly_graph_;
+}
+
+size_t ScaffoldGraph::EdgeCount() const {
+    return edges_.size();
+}
+
+size_t ScaffoldGraph::VertexCount() const {
+    return vertices_.size();
+}
+
+ScaffoldGraph::ScaffoldVertex ScaffoldGraph::EdgeEnd(ScaffoldEdge e) const {
+    return e.getEnd();
+}
+
+ScaffoldGraph::ScaffoldVertex ScaffoldGraph::EdgeStart(ScaffoldEdge e) const {
+    return e.getStart();
+}
+
+size_t ScaffoldGraph::int_id(ScaffoldGraph::ScaffoldEdge e) const {
+    return e.getId();
+}
+
+size_t ScaffoldGraph::int_id(ScaffoldGraph::ScaffoldVertex v) const {
+    return assembly_graph_.int_id(v);
+}
+
+ScaffoldGraph::ConstScaffoldEdgeIterator ScaffoldGraph::eend() const {
+    return ConstScaffoldEdgeIterator(edges_.cend());
+}
+
+ScaffoldGraph::ConstScaffoldEdgeIterator ScaffoldGraph::ebegin() const {
+    return ConstScaffoldEdgeIterator(edges_.cbegin());
+}
+
+ScaffoldGraph::VertexStorage::const_iterator ScaffoldGraph::vend() const {
+    return vertices_.cend();
+}
+
+ScaffoldGraph::VertexStorage::const_iterator ScaffoldGraph::vbegin() const {
+    return vertices_.cbegin();
+}
+
+adt::iterator_range<ScaffoldGraph::VertexStorage::const_iterator> ScaffoldGraph::vertices() const {
+    return adt::make_range(vbegin(), vend());
+}
+
+adt::iterator_range<ScaffoldGraph::ConstScaffoldEdgeIterator> ScaffoldGraph::edges() const {
+    return adt::make_range(ebegin(), eend());
+}
+
+bool ScaffoldGraph::IsVertexIsolated(ScaffoldGraph::ScaffoldVertex assembly_graph_edge) const {
+    bool
+        result = incoming_edges_.count(assembly_graph_edge) == 0 && outgoing_edges_.count(assembly_graph_edge) == 0;
+    VERIFY((incoming_edges_.count(conjugate(assembly_graph_edge)) == 0
+        && incoming_edges_.count(assembly_graph_edge) == 0) == result);
+    return result;
+}
+
+bool ScaffoldGraph::RemoveVertex(ScaffoldGraph::ScaffoldVertex assembly_graph_edge) {
+    if (Exists(assembly_graph_edge)) {
+        VERIFY(Exists(conjugate(assembly_graph_edge)));
+
+        DeleteAllOutgoingEdgesSimple(assembly_graph_edge);
+        DeleteAllIncomingEdgesSimple(assembly_graph_edge);
+        DeleteAllOutgoingEdgesSimple(conjugate(assembly_graph_edge));
+        DeleteAllIncomingEdgesSimple(conjugate(assembly_graph_edge));
+
+        VERIFY(incoming_edges_.count(assembly_graph_edge) == 0);
+        VERIFY(outgoing_edges_.count(assembly_graph_edge) == 0);
+        VERIFY(incoming_edges_.count(conjugate(assembly_graph_edge)) == 0);
+        VERIFY(outgoing_edges_.count(conjugate(assembly_graph_edge)) == 0);
+
+        vertices_.erase(assembly_graph_edge);
+        vertices_.erase(conjugate(assembly_graph_edge));
+
+        return true;
+    }
+    return false;
+}
+
+bool ScaffoldGraph::RemoveEdge(const ScaffoldGraph::ScaffoldEdge &e) {
+    if (Exists(e)) {
+        VERIFY(Exists(conjugate(e)));
+        DeleteOutgoing(e);
+        DeleteIncoming(e);
+        DeleteOutgoing(conjugate(e));
+        DeleteIncoming(conjugate(e));
+        DeleteEdgeFromStorage(e);
+
+        return true;
+    }
+    return false;
+}
+
+bool ScaffoldGraph::AddEdge(const ScaffoldGraph::ScaffoldEdge &e) {
+    return AddEdge(e.getStart(), e.getEnd(), e.getColor(), e.getWeight());
+}
+
+} //scaffold_graph
+} //path_extend
\ No newline at end of file
diff --git a/src/debruijn/path_extend/scaffolder2015/scaffold_graph.hpp b/src/debruijn/path_extend/scaffolder2015/scaffold_graph.hpp
new file mode 100644
index 0000000..033efea
--- /dev/null
+++ b/src/debruijn/path_extend/scaffolder2015/scaffold_graph.hpp
@@ -0,0 +1,233 @@
+//
+// Created by andrey on 17.09.15.
+//
+#pragma once
+
+#include "logger/logger.hpp"
+#include "debruijn_graph.hpp"
+#include "path_extend/paired_library.hpp"
+#include "connection_condition2015.hpp"
+
+#include <standard_base.hpp>
+#include <adt/iterator_range.hpp>
+
+namespace path_extend {
+namespace scaffold_graph {
+
+//do NOT add "using namespace debruijn_graph" in order not to confuse between EdgeId typdefs
+
+class ScaffoldGraph {
+
+public:
+    //EdgeId in de Bruijn graph is vertex in scaffolding graph
+    typedef debruijn_graph::EdgeId ScaffoldVertex;
+
+    //Unique edge id
+    typedef size_t ScaffoldEdgeIdT;
+
+    //Scaffold edge indormation class
+    struct ScaffoldEdge {
+    private:
+        //unique id
+        ScaffoldEdgeIdT id_;
+        //id counter
+        static std::atomic<ScaffoldEdgeIdT> scaffold_edge_id_;
+
+        ScaffoldVertex start_;
+        ScaffoldVertex end_;
+        //color = lib#
+        size_t color_;
+        //read pair weight or anything else
+        double weight_;
+
+    public:
+
+        ScaffoldEdge(ScaffoldVertex start, ScaffoldVertex end, size_t lib_id = (size_t) -1, double weight = 0) :
+            id_(scaffold_edge_id_++),
+            start_(start), end_(end),
+            color_(lib_id),
+            weight_(weight) {
+        }
+
+        ScaffoldEdgeIdT getId() const {
+            return id_;
+        }
+
+
+        size_t getColor() const {
+            return color_;
+        }
+
+        double getWeight() const {
+            return weight_;
+        }
+
+        const ScaffoldVertex getStart() const {
+            return start_;
+        }
+
+        const ScaffoldVertex getEnd() const {
+            return end_;
+        }
+
+        bool operator==(const ScaffoldEdge &e) const {
+            return color_ == e.color_ && weight_ == e.weight_ && start_ == e.start_ && end_ == e.end_;
+        }
+
+        bool operator==(const ScaffoldEdge &e) {
+            return color_ == e.color_ && weight_ == e.weight_ && start_ == e.start_ && end_ == e.end_;
+        }
+    };
+
+    //typedef for possibility to use in templated graph visualizers
+    typedef ScaffoldVertex VertexId;
+    typedef ScaffoldEdge EdgeId;
+
+    //All vertices are stored in set
+    typedef std::set<ScaffoldVertex> VertexStorage;
+    //Edges are stored in map: Id -> Edge Information
+    typedef std::unordered_map<ScaffoldEdgeIdT, ScaffoldEdge> EdgeStorage;
+    //Adjacency list contains vertrx and edge id (instead of whole edge information)
+    typedef std::unordered_multimap<ScaffoldVertex, ScaffoldEdgeIdT> AdjacencyStorage;
+
+    struct ConstScaffoldEdgeIterator: public boost::iterator_facade<ConstScaffoldEdgeIterator,
+                                                                    const ScaffoldEdge,
+                                                                    boost::forward_traversal_tag> {
+    private:
+        EdgeStorage::const_iterator iter_;
+
+    public:
+        ConstScaffoldEdgeIterator(EdgeStorage::const_iterator iter) : iter_(iter) {
+        }
+
+    private:
+        friend class boost::iterator_core_access;
+
+        void increment() {
+            ++iter_;
+        }
+
+        bool equal(const ConstScaffoldEdgeIterator &other) const {
+            return iter_ == other.iter_;
+        }
+
+        ScaffoldEdge dereference() const {
+            return iter_->second;
+        }
+    };
+
+private:
+    const debruijn_graph::Graph &assembly_graph_;
+
+    VertexStorage vertices_;
+
+    EdgeStorage edges_;
+
+    //Map for storing conjugate scaffolding edges
+    std::unordered_map<ScaffoldEdgeIdT, ScaffoldEdgeIdT> conjugate_;
+
+    AdjacencyStorage outgoing_edges_;
+
+    AdjacencyStorage incoming_edges_;
+
+    //Add edge without any checks and conjugate
+    void AddEdgeSimple(const ScaffoldEdge &e, size_t conjugate_id);
+
+    //Delete outgoing edge from adjancecy list without checks
+    //and removing conjugate and respective incoming edge
+    void DeleteOutgoing(const ScaffoldEdge &e);
+
+    //Delete incoming edge from adjancecy list without checks
+    //and removing conjugate and respective outoging edge
+    void DeleteIncoming(const ScaffoldEdge &e);
+
+    //Delete all edge info from storage
+    void DeleteEdgeFromStorage(const ScaffoldEdge &e);
+
+    //Detelte all outgoing from v edges from  adjacency lists
+    void DeleteAllOutgoingEdgesSimple(ScaffoldVertex v);
+
+    //Detelte all incoming from v edges from  adjacency lists
+    void DeleteAllIncomingEdgesSimple(ScaffoldVertex v);
+
+public:
+    ScaffoldGraph(const debruijn_graph::Graph &g) : assembly_graph_(g) {
+    }
+
+    bool Exists(ScaffoldVertex assembly_graph_edge) const;
+
+    bool Exists(const ScaffoldEdge &e) const;
+
+    ScaffoldVertex conjugate(ScaffoldVertex assembly_graph_edge) const;
+
+    //Return structure thay is equal to conjugate of e (not exactrly the same structure as in graph)
+    ScaffoldEdge conjugate(const ScaffoldEdge &e) const;
+
+    //Add isolated vertex to the graph if not exitsts
+    bool AddVertex(ScaffoldVertex assembly_graph_edge);
+
+    void AddVertices(const set<ScaffoldVertex> &vertices);
+
+    //Add edge (and conjugate) if not exists
+    //v1 and v2 must exist
+    bool AddEdge(ScaffoldVertex v1, ScaffoldVertex v2, size_t lib_id, double weight);
+
+    bool AddEdge(const ScaffoldEdge &e);
+
+    //Rempve edge from edge container and all adjacency lists
+    bool RemoveEdge(const ScaffoldEdge &e);
+
+    //Remove vertex and all adjacent edges
+    bool RemoveVertex(ScaffoldVertex assembly_graph_edge);
+
+    bool IsVertexIsolated(ScaffoldVertex assembly_graph_edge) const;
+
+    VertexStorage::const_iterator vbegin() const;
+
+    VertexStorage::const_iterator vend() const;
+
+    adt::iterator_range<VertexStorage::const_iterator> vertices() const;
+
+    ConstScaffoldEdgeIterator ebegin() const;
+
+    ConstScaffoldEdgeIterator eend() const;
+
+    adt::iterator_range<ScaffoldGraph::ConstScaffoldEdgeIterator> edges() const;
+
+    size_t int_id(ScaffoldVertex v) const;
+
+    size_t int_id(ScaffoldEdge e) const;
+
+    ScaffoldVertex EdgeStart(ScaffoldEdge e) const;
+
+    ScaffoldVertex EdgeEnd(ScaffoldEdge e) const;
+
+    size_t VertexCount() const;
+
+    size_t EdgeCount() const;
+
+    const debruijn_graph::Graph & AssemblyGraph() const;
+
+    vector<ScaffoldEdge> OutgoingEdges(ScaffoldVertex assembly_graph_edge) const;
+
+    vector<ScaffoldEdge> IncomingEdges(ScaffoldVertex assembly_graph_edge) const;
+
+    size_t OutgoingEdgeCount(ScaffoldVertex assembly_graph_edge) const;
+
+    size_t IncomingEdgeCount(ScaffoldVertex assembly_graph_edge) const;
+
+    bool HasUniqueOutgoing(ScaffoldVertex assembly_graph_edge) const;
+
+    bool HasUniqueIncoming(ScaffoldVertex assembly_graph_edge) const;
+
+    ScaffoldEdge UniqueOutgoing(ScaffoldVertex assembly_graph_edge) const;
+
+    ScaffoldEdge UniqueIncoming(ScaffoldVertex assembly_graph_edge) const;
+
+    void Print(ostream &os) const;
+
+};
+
+} //scaffold_graph
+} //path_extend
+
diff --git a/src/debruijn/path_extend/scaffolder2015/scaffold_graph_constructor.cpp b/src/debruijn/path_extend/scaffolder2015/scaffold_graph_constructor.cpp
new file mode 100644
index 0000000..4cc41aa
--- /dev/null
+++ b/src/debruijn/path_extend/scaffolder2015/scaffold_graph_constructor.cpp
@@ -0,0 +1,73 @@
+//
+// Created by andrey on 04.12.15.
+//
+
+#include "scaffold_graph_constructor.hpp"
+
+namespace path_extend {
+namespace scaffold_graph {
+
+
+bool LengthEdgeCondition::IsSuitable(debruijn_graph::EdgeId e) const {
+    return graph_.length(e) >= min_length_;
+}
+
+void BaseScaffoldGraphConstructor::ConstructFromEdgeConditions(const EdgeCondition &edge_condition,
+                                                           vector<shared_ptr<ConnectionCondition>> &connection_conditions,
+                                                           bool use_terminal_vertices_only) {
+    for (auto e = graph_->AssemblyGraph().ConstEdgeBegin(); !e.IsEnd(); ++e) {
+        if (edge_condition.IsSuitable(*e)) {
+            graph_->AddVertex(*e);
+        }
+    }
+    ConstructFromConditions(connection_conditions, use_terminal_vertices_only);
+}
+
+void BaseScaffoldGraphConstructor::ConstructFromSet(const set<EdgeId> edge_set,
+                                                vector<shared_ptr<ConnectionCondition>> &connection_conditions,
+                                                bool use_terminal_vertices_only) {
+    graph_->AddVertices(edge_set);
+    ConstructFromConditions(connection_conditions, use_terminal_vertices_only);
+}
+
+void BaseScaffoldGraphConstructor::ConstructFromConditions(vector<shared_ptr<ConnectionCondition>> &connection_conditions,
+                                                       bool use_terminal_vertices_only) {
+    for (auto condition : connection_conditions) {
+        ConstructFromSingleCondition(condition, use_terminal_vertices_only);
+    }
+}
+
+void BaseScaffoldGraphConstructor::ConstructFromSingleCondition(const shared_ptr<ConnectionCondition> condition,
+                                                            bool use_terminal_vertices_only) {
+    for (auto v : graph_->vertices()) {
+        TRACE("Vertex " << graph_->int_id(v));
+
+        if (use_terminal_vertices_only && graph_->OutgoingEdgeCount(v) > 0)
+            continue;
+
+        auto connected_with = condition->ConnectedWith(v);
+        for (auto connected : connected_with) {
+            TRACE("Connected with " << graph_->int_id(connected));
+            if (graph_->Exists(connected)) {
+                if (use_terminal_vertices_only && graph_->IncomingEdgeCount(connected) > 0)
+                    continue;
+                graph_->AddEdge(v, connected, condition->GetLibIndex(), condition->GetWeight(v, connected));
+            }
+        }
+    }
+}
+
+
+shared_ptr<ScaffoldGraph> SimpleScaffoldGraphConstructor::Construct() {
+    ConstructFromSet(edge_set_, connection_conditions_);
+    return graph_;
+}
+
+shared_ptr<ScaffoldGraph> DefaultScaffoldGraphConstructor::Construct() {
+    ConstructFromSet(edge_set_, connection_conditions_);
+    ConstructFromEdgeConditions(edge_condition_, connection_conditions_);
+    return graph_;
+}
+
+} //scaffold_graph
+} //path_extend
\ No newline at end of file
diff --git a/src/debruijn/path_extend/scaffolder2015/scaffold_graph_constructor.hpp b/src/debruijn/path_extend/scaffolder2015/scaffold_graph_constructor.hpp
new file mode 100644
index 0000000..bbf45f4
--- /dev/null
+++ b/src/debruijn/path_extend/scaffolder2015/scaffold_graph_constructor.hpp
@@ -0,0 +1,101 @@
+//
+// Created by andrey on 04.12.15.
+//
+
+#pragma once
+
+#include "scaffold_graph.hpp"
+
+
+namespace path_extend {
+namespace scaffold_graph {
+
+//De Bruijn graph edge condition interface
+class EdgeCondition {
+public:
+    virtual bool IsSuitable(debruijn_graph::EdgeId e) const = 0;
+
+    virtual ~EdgeCondition() { }
+
+};
+
+//Edge length condition
+class LengthEdgeCondition: public EdgeCondition {
+    const debruijn_graph::Graph &graph_;
+
+    size_t min_length_;
+
+public:
+    LengthEdgeCondition(const debruijn_graph::Graph &graph, size_t min_len) : graph_(graph), min_length_(min_len) {
+    }
+
+    bool IsSuitable(debruijn_graph::EdgeId e) const;
+};
+
+//Iterface
+class ScaffoldGraphConstructor {
+
+public:
+    virtual shared_ptr<ScaffoldGraph> Construct() = 0;
+};
+
+//Basic scaffold graph constructor functions
+class BaseScaffoldGraphConstructor: public ScaffoldGraphConstructor {
+protected:
+    shared_ptr<ScaffoldGraph> graph_;
+
+    BaseScaffoldGraphConstructor(const debruijn_graph::Graph& assembly_graph) {
+        graph_ = make_shared<ScaffoldGraph>(assembly_graph);
+    }
+
+    void ConstructFromSingleCondition(const shared_ptr<ConnectionCondition> condition,
+                                      bool use_terminal_vertices_only);
+
+    void ConstructFromConditions(vector<shared_ptr<ConnectionCondition>> &connection_conditions,
+                                 bool use_terminal_vertices_only = false);
+
+    void ConstructFromSet(const set<EdgeId> edge_set,
+                          vector<shared_ptr<ConnectionCondition>> &connection_conditions,
+                          bool use_terminal_vertices_only = false);
+
+    void ConstructFromEdgeConditions(const EdgeCondition& edge_condition,
+                                     vector<shared_ptr<ConnectionCondition>> &connection_conditions,
+                                     bool use_terminal_vertices_only = false);
+};
+
+
+class SimpleScaffoldGraphConstructor: public BaseScaffoldGraphConstructor {
+protected:
+    const set<EdgeId>& edge_set_;
+    vector<shared_ptr<ConnectionCondition>>& connection_conditions_;
+
+public:
+    SimpleScaffoldGraphConstructor(const debruijn_graph::Graph& assembly_graph,
+                                    const set<EdgeId>& edge_set,
+                                    vector<shared_ptr<ConnectionCondition>> &connection_conditions):
+        BaseScaffoldGraphConstructor(assembly_graph),
+        edge_set_(edge_set), connection_conditions_(connection_conditions) {}
+
+    shared_ptr<ScaffoldGraph> Construct() override;
+};
+
+class DefaultScaffoldGraphConstructor: public SimpleScaffoldGraphConstructor {
+protected:
+    const EdgeCondition& edge_condition_;
+
+public:
+    DefaultScaffoldGraphConstructor(const debruijn_graph::Graph& assembly_graph,
+                                    const set<EdgeId>& edge_set,
+                                    vector<shared_ptr<ConnectionCondition>> &connection_conditions,
+                                    const EdgeCondition& edge_condition):
+        SimpleScaffoldGraphConstructor(assembly_graph, edge_set, connection_conditions),
+        edge_condition_(edge_condition)
+    {}
+
+    shared_ptr<ScaffoldGraph> Construct() override;
+};
+
+
+} //scaffold_graph
+} //path_extend
+
diff --git a/src/debruijn/path_extend/scaffolder2015/scaffold_graph_visualizer.cpp b/src/debruijn/path_extend/scaffolder2015/scaffold_graph_visualizer.cpp
new file mode 100644
index 0000000..ed9f254
--- /dev/null
+++ b/src/debruijn/path_extend/scaffolder2015/scaffold_graph_visualizer.cpp
@@ -0,0 +1,71 @@
+//
+// Created by andrey on 21.09.15.
+//
+
+#include "scaffold_graph_visualizer.hpp"
+
+namespace path_extend{ namespace scaffold_graph {
+
+const map<size_t, string> ScaffoldEdgeColorer::color_map =
+        {{(size_t) -1, "black"},
+         {0, "red"},
+         {1, "blue"},
+         {2, "green"},
+         {3, "magenta"},
+         {4, "orange"},
+         {5, "cyan"}};
+
+const string ScaffoldEdgeColorer::default_color = "black";
+
+string ScaffoldGraphLabeler::label(EdgeId e) const {
+    return "ID: " + ToString(e.getId()) +
+        "\\n Weight: " + ToString(e.getWeight()) +
+        "\\n Lib#: " + ToString(e.getColor());
+}
+
+string ScaffoldGraphLabeler::label(VertexId v) const {
+    return "ID: " + ToString(graph_.int_id(v)) +
+        "\\n Len: " + ToString(graph_.AssemblyGraph().length(v)) +
+        "\\n Cov: " + ToString(graph_.AssemblyGraph().coverage(v));
+}
+
+void ScaffoldGraphVisualizer::Visualize(GraphPrinter<ScaffoldGraph> &printer) {
+    printer.open();
+    printer.AddVertices(graph_.vbegin(), graph_.vend());
+    for (auto e : graph_.edges()) {
+        printer.AddEdge(e);
+    }
+    printer.close();
+}
+
+void ScaffoldGraphVisualizer::Visualize(ostream &os, CompositeGraphColorer<ScaffoldGraph>& colorer) {
+    ScaffoldGraphLabeler labeler(graph_);
+    EmptyGraphLinker<ScaffoldGraph> linker;
+
+    if (paired_) {
+        PairedGraphPrinter <ScaffoldGraph> printer(graph_, os, labeler, colorer, linker);
+        Visualize(printer);
+    } else {
+        SingleGraphPrinter <ScaffoldGraph> printer(graph_, os, labeler, colorer, linker);
+        Visualize(printer);
+    }
+}
+
+string ScaffoldEdgeColorer::GetValue(ScaffoldGraph::EdgeId e) const {
+    auto it = color_map.find(e.getColor());
+    if (it != color_map.end()) {
+        return it->second;
+    }
+    return default_color;
+}
+
+string ScaffoldVertexSetColorer::GetValue(ScaffoldGraph::VertexId v) const {
+    if (vertex_set_.count(v) > 0)
+        return "white";
+    return "yellow";
+}
+} //scaffold_graph
+} //path_extend
+
+
+
diff --git a/src/debruijn/path_extend/scaffolder2015/scaffold_graph_visualizer.hpp b/src/debruijn/path_extend/scaffolder2015/scaffold_graph_visualizer.hpp
new file mode 100644
index 0000000..cd42022
--- /dev/null
+++ b/src/debruijn/path_extend/scaffolder2015/scaffold_graph_visualizer.hpp
@@ -0,0 +1,73 @@
+//
+// Created by andrey on 21.09.15.
+//
+
+#ifndef PROJECT_SCAFFOLD_GRAPH_VISUALIZER_HPP
+#define PROJECT_SCAFFOLD_GRAPH_VISUALIZER_HPP
+
+#include "graphio.hpp"
+#include "scaffold_graph.hpp"
+
+namespace path_extend { namespace scaffold_graph {
+
+using namespace omnigraph::visualization;
+
+
+class ScaffoldGraphLabeler : public GraphLabeler<ScaffoldGraph> {
+
+private:
+    const ScaffoldGraph &graph_;
+
+public:
+    ScaffoldGraphLabeler(const ScaffoldGraph &graph) : graph_(graph) {
+    }
+
+    string label(VertexId v) const;
+
+    string label(EdgeId e) const;
+};
+
+
+class ScaffoldEdgeColorer : public ElementColorer<ScaffoldGraph::EdgeId> {
+private:
+    static const map<size_t, string> color_map;
+
+    static const string default_color;
+
+public:
+    string GetValue(ScaffoldGraph::EdgeId e) const;
+};
+
+
+class ScaffoldVertexSetColorer : public ElementColorer<ScaffoldGraph::VertexId> {
+ private:
+  set<ScaffoldGraph::VertexId> vertex_set_;
+
+ public:
+  ScaffoldVertexSetColorer(const set<ScaffoldGraph::VertexId>& vertex_set): vertex_set_(vertex_set) {
+  }
+
+    string GetValue(ScaffoldGraph::VertexId v) const;
+};
+
+class ScaffoldGraphVisualizer {
+
+    const ScaffoldGraph &graph_;
+    const bool paired_;
+
+private:
+    void Visualize(GraphPrinter<ScaffoldGraph> &printer);
+
+public:
+    ScaffoldGraphVisualizer(const ScaffoldGraph &graph, bool paired = true) :
+            graph_(graph), paired_(paired) {
+    }
+
+    void Visualize(ostream &os, CompositeGraphColorer<ScaffoldGraph>& colorer);
+};
+
+} //scaffold_graph
+} //path_extend
+
+
+#endif //PROJECT_SCAFFOLD_GRAPH_VISUALIZER_HPP
diff --git a/src/debruijn/path_extend/split_graph_pair_info.hpp b/src/debruijn/path_extend/split_graph_pair_info.hpp
index 3941652..2a739b7 100644
--- a/src/debruijn/path_extend/split_graph_pair_info.hpp
+++ b/src/debruijn/path_extend/split_graph_pair_info.hpp
@@ -80,7 +80,7 @@ struct PairInfo {
     double distance_;
     size_t count_;
 
-    PairInfo() 
+    PairInfo()
             : weight_(0.), distance_(0.), count_(0) {}
 
     PairInfo(double weight, double distance, size_t count = 0)
@@ -266,40 +266,46 @@ public:
 
     }
 
-    ~SplitGraphPairInfo() {}
-
-    virtual void StartProcessLibrary(size_t threads_count) {
+    void StartProcessLibrary(size_t threads_count) override {
         baskets_buffer_.clear();
-        for (size_t i = 0; i < threads_count; ++i) {
-            baskets_buffer_.push_back(
-                    new BasketsPairInfoIndex(gp_, basket_size_));
-        }
+        for (size_t i = 0; i < threads_count; ++i)
+            baskets_buffer_.emplace_back(gp_, basket_size_);
     }
 
-    virtual void ProcessPairedRead(size_t thread_index,
-                                   const MappingPath<EdgeId>& read1,
-                                   const MappingPath<EdgeId>& read2,
-                                   size_t dist) {
-        ProcessPairedRead(*baskets_buffer_[thread_index], read1, read2, dist);
+    void ProcessPairedRead(size_t thread_index,
+                           const io::PairedRead& r,
+                           const MappingPath<EdgeId>& read1,
+                           const MappingPath<EdgeId>& read2) override {
+        ProcessPairedRead(baskets_buffer_[thread_index], r.first().size(), r.second().size(),
+                          read1, read2, r.distance());
     }
 
-    virtual void ProcessSingleRead(size_t, const MappingPath<EdgeId>&) {
+    void ProcessPairedRead(size_t thread_index,
+                           const io::PairedReadSeq& r,
+                           const MappingPath<EdgeId>& read1,
+                           const MappingPath<EdgeId>& read2) override {
+        ProcessPairedRead(baskets_buffer_[thread_index], r.first().size(), r.second().size(),
+                          read1, read2, r.distance());
+    }
+
+    void ProcessSingleRead(size_t, const io::SingleRead&, const MappingPath<EdgeId>&) override {
         //only paired reads are interesting
     }
 
-    virtual void MergeBuffer(size_t thread_index) {
-        basket_index_.AddAll(*baskets_buffer_[thread_index]);
-        baskets_buffer_[thread_index]->Clear();
+    void ProcessSingleRead(size_t, const io::SingleReadSeq&, const MappingPath<EdgeId>&) override {
+        //only paired reads are interesting
+    }
+    void MergeBuffer(size_t thread_index) override {
+        basket_index_.AddAll(baskets_buffer_[thread_index]);
+        baskets_buffer_[thread_index].Clear();
     }
 
-    virtual void StopProcessLibrary() {
-        for (size_t i = 0; i < baskets_buffer_.size(); ++i) {
+    void StopProcessLibrary() override {
+        for (size_t i = 0; i < baskets_buffer_.size(); ++i)
             MergeBuffer(i);
-        }
+
         FindThreshold();
-        for (size_t i = 0; i < baskets_buffer_.size(); ++i) {
-            delete baskets_buffer_[i];
-        }
+
         baskets_buffer_.clear();
     }
 
@@ -309,22 +315,23 @@ public:
 
 private:
     void FindThreshold() {
-        std::ofstream ideal;
         size_t min_long_edge = basket_size_;
         const Graph& g = gp_.g;
         vector<double> good_pi;
         vector<double> bad_pi;
         double insert_size_min = (double) is_ - 2. * (double) is_var_;
         double insert_size_max = (double) is_ + 2. * (double) is_var_;
-        for (auto edge = g.ConstEdgeBegin(); !edge.IsEnd(); ++edge) {
-            if (g.length(*edge) > min_long_edge) {
-                if (g.int_id(*edge) <= 0)
+        for (auto e = g.ConstEdgeBegin(); !e.IsEnd(); ++e) {
+            EdgeId edge = *e;
+
+            if (g.length(edge) > min_long_edge) {
+                if (g.int_id(edge) <= 0)
                     continue;
 
-                EdgePairInfo& edge_pi = basket_index_.GetEdgePairInfo(*edge);
+                EdgePairInfo& edge_pi = basket_index_.GetEdgePairInfo(edge);
                 if (edge_pi.size() == 0)
                     continue;
-                size_t count_backets = LastBasketIndex(*edge, (int) insert_size_max,
+                size_t count_backets = LastBasketIndex(edge, (int) insert_size_max,
                                                        edge_pi);
                 for (size_t index = 0; index <= count_backets; ++index) {
                     map<Basket, PairInfo>& basket_info = edge_pi.GetInfo(index);
@@ -332,12 +339,10 @@ private:
                                                           (int) insert_size_min,
                                                           (int) insert_size_max,
                                                           edge_pi);
-                    for (auto iter = basket_info.begin();
-                            iter != basket_info.end(); ++iter) {
+                    for (auto iter = basket_info.begin(); iter != basket_info.end(); ++iter) {
                         PairInfo& pi = iter->second;
-                        if (iter->first.edgeId() == *edge
-                                && pair_baskets.find(iter->first.index())
-                                        != pair_baskets.end()) {
+                        if (iter->first.edgeId() == edge &&
+                            pair_baskets.find(iter->first.index()) != pair_baskets.end()) {
                             good_pi.push_back(GetNormalizedWeight(pi));
                         } else {
                             bad_pi.push_back(GetNormalizedWeight(pi));
@@ -386,8 +391,8 @@ private:
                 / ideal_pi_counter_.IdealPairedInfo(basket_size_, basket_size_,
                                                     (int) pi.distance_);
     }
-
-    void ProcessPairedRead(BasketsPairInfoIndex& basket_index,
+    
+    void InnerProcess(BasketsPairInfoIndex& basket_index,
                            const MappingPath<EdgeId>& path1,
                            const MappingPath<EdgeId>& path2,
                            size_t read_distance) {
@@ -416,6 +421,17 @@ private:
         }
     }
 
+    void ProcessPairedRead(BasketsPairInfoIndex& basket_index,
+                           size_t r1_length,
+                           size_t r2_length,
+                           const MappingPath<EdgeId>& path1,
+                           const MappingPath<EdgeId>& path2,
+                           size_t read_distance) {
+        InnerProcess(basket_index, path1, path2, read_distance);
+        InnerProcess(basket_index, ConjugateMapping(gp_.g, path2, r2_length),
+                     ConjugateMapping(gp_.g, path1, r1_length), read_distance);
+    }
+
     const conj_graph_pack& gp_;
     size_t is_;
     size_t is_var_;
@@ -423,7 +439,7 @@ private:
     size_t is_max_;
     size_t basket_size_;
     BasketsPairInfoIndex basket_index_;
-    vector<BasketsPairInfoIndex*> baskets_buffer_;
+    vector<BasketsPairInfoIndex> baskets_buffer_;
     double threshold_;
     IdealPairInfoCounter ideal_pi_counter_;
 };
diff --git a/src/debruijn/path_extend/weight_counter.hpp b/src/debruijn/path_extend/weight_counter.hpp
index 8788ef6..b2d8ef6 100644
--- a/src/debruijn/path_extend/weight_counter.hpp
+++ b/src/debruijn/path_extend/weight_counter.hpp
@@ -55,8 +55,6 @@ struct EdgeWithPairedInfo {
 			e_(e_), pi_(pi) {
 
 	}
-protected:
-    DECL_LOGGER("WeightCounter");
 };
 
 struct EdgeWithDistance {
@@ -75,385 +73,283 @@ struct EdgeWithDistance {
 	    }
 	};
 
-	static DistanceComparator comparator;
-protected:
-    DECL_LOGGER("WeightCounter");
+	//static DistanceComparator comparator;
 };
 
-class ExtentionAnalyzer {
+class IdealInfoProvider {
+public:
+    virtual ~IdealInfoProvider() {}
 
-protected:
-	const Graph& g_;
-	PairedInfoLibrary& lib_;
+	virtual std::vector<EdgeWithPairedInfo> FindCoveredEdges(const BidirectionalPath& path, EdgeId candidate) const = 0;
+};
 
+class BasicIdealInfoProvider : public IdealInfoProvider {
+	const shared_ptr<PairedInfoLibrary> lib_;
 public:
-	ExtentionAnalyzer(const Graph& g, PairedInfoLibrary& l) :
-			g_(g), lib_(l) {
-	}
-
-	PairedInfoLibrary& getLib() {
-		return lib_;
-	}
+    BasicIdealInfoProvider(const shared_ptr<PairedInfoLibrary>& lib) : lib_(lib) {
+    }
 
-	void FindCoveredEdges(const BidirectionalPath& path, EdgeId candidate,
-                          std::vector<EdgeWithPairedInfo>& edges) {
-        edges.clear();
+	std::vector<EdgeWithPairedInfo> FindCoveredEdges(const BidirectionalPath& path, EdgeId candidate) const override {
+        std::vector<EdgeWithPairedInfo> covered;
         for (int i = (int) path.Size() - 1; i >= 0; --i) {
-            double w = lib_.IdealPairedInfo(path[i], candidate,
+            double w = lib_->IdealPairedInfo(path[i], candidate,
                                             (int) path.LengthAt(i));
+            //FIXME think if we need extremely low ideal weights
             if (math::gr(w, 0.)) {
-                edges.push_back(EdgeWithPairedInfo(i, w));
+                covered.push_back(EdgeWithPairedInfo(i, w));
             }
         }
+        return covered;
     }
-
-	void FindForwardEdges(const BidirectionalPath& /*path*/, EdgeId candidate,
-			std::vector<EdgeWithDistance>& edges) {
-		edges.clear();
-		edges.push_back(EdgeWithDistance(candidate, 0));
-
-		size_t i = 0;
-		while (i < edges.size()) {
-			size_t currentDistance = edges[i].d_ + g_.length(edges[i].e_);
-			auto nextEdges = g_.OutgoingEdges(g_.EdgeEnd(edges[i].e_));
-
-			if (edges[i].d_ < (int) lib_.GetISMax()) {
-				for (auto edge = nextEdges.begin(); edge != nextEdges.end();
-						++edge) {
-					edges.push_back(EdgeWithDistance(*edge, currentDistance));
-				}
-			}
-			++i;
-		}
-	}
-protected:
-    DECL_LOGGER("WeightCounter");
 };
 
 class WeightCounter {
 
 protected:
 	const Graph& g_;
-	PairedInfoLibraries libs_;
-	std::vector<shared_ptr<ExtentionAnalyzer> > analyzers_;
-	double avrageLibWeight_;
-
-	double threshold_;
-	bool normalizeWeight_;
-
-	std::map<size_t, double> excluded_edges_;
+	const shared_ptr<PairedInfoLibrary> lib_;
+	bool normalize_weight_;
+    shared_ptr<IdealInfoProvider> ideal_provider_;
 
 public:
 
-	WeightCounter(const Graph& g, PairedInfoLibraries& libs, double threshold = 0.0) :
-			g_(g), libs_(libs), threshold_(threshold), normalizeWeight_(true), excluded_edges_() {
-	    InitAnalyzers();
+	WeightCounter(const Graph& g, const shared_ptr<PairedInfoLibrary>& lib, 
+                  bool normalize_weight = true, 
+                  shared_ptr<IdealInfoProvider> ideal_provider = nullptr) :
+			g_(g), lib_(lib), normalize_weight_(normalize_weight), ideal_provider_(ideal_provider) {
+       if (!ideal_provider_) {
+           ideal_provider_ = make_shared<BasicIdealInfoProvider>(lib);
+       }
 	}
 
-    WeightCounter(const Graph& g, shared_ptr<PairedInfoLibrary> lib, double threshold = 0.0) :
-            g_(g), libs_(), threshold_(threshold), normalizeWeight_(true), excluded_edges_() {
-        libs_.push_back(lib);
-        InitAnalyzers();
-    }
+	virtual std::set<size_t> PairInfoExist(const BidirectionalPath& path, EdgeId e, 
+                                    int gap = 0) const = 0;
 
+	virtual double CountWeight(const BidirectionalPath& path, EdgeId e,
+			const std::set<size_t>& excluded_edges = std::set<size_t>(), int gapLength = 0) const = 0;
 
-	virtual ~WeightCounter() {
-		/*for (auto iter = analyzers_.begin(); iter != analyzers_.end(); ++iter) {
-			delete *iter;
-		}*/
-		analyzers_.clear();
+	const PairedInfoLibrary& lib() const {
+		return *lib_;
 	}
 
-	virtual bool PairInfoExist(EdgeId first, EdgeId second, int distance) = 0;
+    const shared_ptr<PairedInfoLibrary> get_libptr() const {
+        return lib_;
+    };
 
-	virtual double CountWeight(BidirectionalPath& path, EdgeId e,
-			int gapLength = 0) = 0;
+private:
+    DECL_LOGGER("WeightCounter");
+};
 
-	virtual void GetDistances(EdgeId e1, EdgeId e2, std::vector<int>& dist,
-			std::vector<double>& w) = 0;
+class ReadCountWeightCounter: public WeightCounter {
 
-	virtual double CountIdealInfo(EdgeId e1, EdgeId e2, size_t dist) = 0;
+	std::vector<EdgeWithPairedInfo> CountLib(const BidirectionalPath& path, EdgeId e,
+			int add_gap = 0) const {
+        std::vector<EdgeWithPairedInfo> answer;
 
-	virtual double CountIdealInfo(const BidirectionalPath& p, EdgeId e,
-			size_t gap) = 0;
+		for (const EdgeWithPairedInfo& e_w_pi : ideal_provider_->FindCoveredEdges(path, e)) {
+			double w = lib_->CountPairedInfo(path[e_w_pi.e_], e,
+					(int) path.LengthAt(e_w_pi.e_) + add_gap);
 
-	virtual bool IsExtensionPossible(BidirectionalPath& path, EdgeId e) {
-		return IsExtensionPossible(CountWeight(path, e)) ? true : false;
-	}
+			if (normalize_weight_) {
+				w /= e_w_pi.pi_;
+			}
+			answer.push_back(EdgeWithPairedInfo(e_w_pi.e_, w));
+		}
 
-	virtual bool IsExtensionPossible(double weight) const {
-		return math::ge(weight, threshold_) ? true : false;
+		return answer;
 	}
 
-	std::map<size_t, double>& GetExcludedEdges() {
-		return excluded_edges_;
-	}
+public:
 
-	double getThreshold() const {
-		return threshold_;
+	ReadCountWeightCounter(const Graph& g, const shared_ptr<PairedInfoLibrary>& lib,
+                            bool normalize_weight = true, 
+                            shared_ptr<IdealInfoProvider> ideal_provider = nullptr) :
+			WeightCounter(g, lib, normalize_weight, ideal_provider) {
 	}
 
-	bool isNormalizeWeight() const {
-		return normalizeWeight_;
-	}
+	double CountWeight(const BidirectionalPath& path, EdgeId e, 
+                        const std::set<size_t>& excluded_edges, int gap) const override {
+		double weight = 0.0;
 
-	void setNormalizeWeight(bool normalizeWeight) {
-		this->normalizeWeight_ = normalizeWeight;
-	}
+        for (const auto& e_w_pi : CountLib(path, e, gap)) {
+		    if (!excluded_edges.count(e_w_pi.e_)) {
+			    weight += e_w_pi.pi_;
+		    }
+        }
 
-	void setThreshold(double threshold) {
-		this->threshold_ = threshold;
+		return weight;
 	}
 
-	PairedInfoLibraries& getLibs() {
-		return libs_;
+	std::set<size_t> PairInfoExist(const BidirectionalPath& path, EdgeId e, 
+                                    int gap = 0) const override {
+        std::set<size_t> answer;
+        for (const auto& e_w_pi : CountLib(path, e, gap)) {
+            if (math::gr(e_w_pi.pi_, 0.)) {
+                answer.insert(e_w_pi.e_);
+            }
+        }
+        
+        return answer;
 	}
-protected:
-    DECL_LOGGER("WeightCounter");
 
-private:
-    void InitAnalyzers() {
-        avrageLibWeight_ = 0.0;
-        analyzers_.reserve(libs_.size());
-        for (auto iter = libs_.begin(); iter != libs_.end(); ++iter) {
-            analyzers_.push_back(std::make_shared<ExtentionAnalyzer>(g_, **iter));
-            avrageLibWeight_ += (*iter)->GetCoverageCoeff();
-        }
-        avrageLibWeight_ /= (double) max(libs_.size(), (size_t) 1);
-    }
 };
 
-class ReadCountWeightCounter: public WeightCounter {
+class PathCoverWeightCounter: public WeightCounter {
+    double single_threshold_;
 
-protected:
+	double TotalIdealNonExcluded(const std::vector<EdgeWithPairedInfo>& ideally_covered_edges, 
+                        const std::set<size_t>& excluded_edges) const {
+		double ideal_total = 0.0;
 
-	double CountSingleLib(int libIndex, BidirectionalPath& path, EdgeId e,
-			int additionalGapLength = 0.0) {
+		for (const EdgeWithPairedInfo& e_w_pi : ideally_covered_edges) {
+            if (!excluded_edges.count(e_w_pi.e_))
+			    ideal_total += e_w_pi.pi_;
+		}
 
-		double weight = 0.0;
+		return ideal_total;
+	}
 
-		std::vector<EdgeWithPairedInfo> coveredEdges;
-		analyzers_[libIndex]->FindCoveredEdges(path, e, coveredEdges);
+	std::vector<EdgeWithPairedInfo> CountLib(const BidirectionalPath& path, EdgeId e,
+			const std::vector<EdgeWithPairedInfo>& ideally_covered_edges, int add_gap = 0) const {
+        std::vector<EdgeWithPairedInfo> answer;
 
-		for (auto iter = coveredEdges.begin(); iter != coveredEdges.end();
-				++iter) {
-			if (excluded_edges_.find((int) iter->e_) != excluded_edges_.end()) {
-				continue;
-			}
-			double w = libs_[libIndex]->CountPairedInfo(path[iter->e_], e,
-					(int) path.LengthAt(iter->e_) + additionalGapLength);
+		for (const EdgeWithPairedInfo& e_w_pi : ideally_covered_edges) {
+			double ideal_weight = e_w_pi.pi_;
+
+			double weight = lib_->CountPairedInfo(
+					path[e_w_pi.e_], e,
+					(int) path.LengthAt(e_w_pi.e_) + add_gap);
 
-			if (normalizeWeight_) {
-				w /= iter->pi_;
+			if (normalize_weight_) {
+				weight /= ideal_weight;
 			}
-			weight += w;
 
+			if (math::ge(weight, single_threshold_)) {
+				answer.push_back(EdgeWithPairedInfo(e_w_pi.e_, ideal_weight));
+			}
 		}
 
-		return weight;
+        return answer;
 	}
 
 public:
 
-	ReadCountWeightCounter(const Graph& g_, PairedInfoLibraries& libs,
-			double threshold_ = 0.0) :
-			WeightCounter(g_, libs, threshold_) {
-	}
-
-    ReadCountWeightCounter(const Graph& g_, shared_ptr<PairedInfoLibrary> lib,
-            double threshold_ = 0.0) :
-            WeightCounter(g_, lib, threshold_) {
-    }
-
-	virtual void GetDistances(EdgeId e1, EdgeId e2, std::vector<int>& dist,
-			std::vector<double>& w) {
-		for (size_t i = 0; i < libs_.size(); ++i) {
-			libs_[i]->CountDistances(e1, e2, dist, w);
-		}
-	}
-
-	virtual double CountIdealInfo(EdgeId e1, EdgeId e2, size_t dist) {
-		double res = 0.0;
-		for (size_t i = 0; i < libs_.size(); ++i) {
-			res += libs_[i]->IdealPairedInfo(e1, e2, (int) dist);
-		}
-		return res;
-	}
-
-	virtual double CountIdealInfo(const BidirectionalPath& p, EdgeId e,
-			size_t gap) {
-		double w = 0.0;
-		for (int i = (int) p.Size() - 1; i >= 0; --i) {
-			w += CountIdealInfo(p[i], e, gap + p.LengthAt(i));
-		}
-		return w;
+	PathCoverWeightCounter(const Graph& g, const shared_ptr<PairedInfoLibrary>& lib,
+                            bool normalize_weight = true, 
+                            double single_threshold = -1.,
+                            shared_ptr<IdealInfoProvider> ideal_provider = nullptr) :
+			WeightCounter(g, lib, normalize_weight, ideal_provider), single_threshold_(single_threshold) {
+        if (math::ls(single_threshold_, 0.)) {
+            single_threshold_ = lib_->GetSingleThreshold();
+        }
 	}
 
-	virtual double CountWeight(BidirectionalPath& path, EdgeId e,
-			int gapLength = 0) {
-		double weight = 0.0;
-		std::vector<EdgeWithDistance> edges;
+	double CountWeight(const BidirectionalPath& path, EdgeId e,
+			const std::set<size_t>& excluded_edges, int gap) const override {
+        double lib_weight = 0.;
+        const auto ideal_coverage = ideal_provider_->FindCoveredEdges(path, e);
 
-		for (size_t i = 0; i < libs_.size(); ++i) {
-			weight += CountSingleLib((int) i, path, e, gapLength);
-		}
+        for (const auto& e_w_pi : CountLib(path, e, ideal_coverage, gap)) {
+            if (!excluded_edges.count(e_w_pi.e_)) {
+                lib_weight += e_w_pi.pi_;
+            }
+        }
 
-		return weight;
+        double total_ideal_coverage = TotalIdealNonExcluded(ideal_coverage, excluded_edges);
+		return math::eq(total_ideal_coverage, 0.) ? 0. : lib_weight / total_ideal_coverage;
 	}
 
-	virtual bool PairInfoExist(EdgeId first, EdgeId second, int distance) {
-		for (size_t libIndex = 0; libIndex < libs_.size(); ++libIndex) {
-			double w = libs_[libIndex]->CountPairedInfo(first, second,
-					distance);
-			double w_ideal = libs_[libIndex]->IdealPairedInfo(first, second,
-					distance);
-			if (w_ideal == 0) {
-				continue;
-			}
-			if (normalizeWeight_) {
-				w /= w_ideal;
-			}
-			if (w > 0) {
-				return true;
-			}
-		}
-		return false;
+	std::set<size_t> PairInfoExist(const BidirectionalPath& path, EdgeId e, 
+                                    int gap = 0) const override {
+        std::set<size_t> answer;
+        for (const auto& e_w_pi : CountLib(path, e, ideal_provider_->FindCoveredEdges(path, e), gap)) {
+            if (math::gr(e_w_pi.pi_, 0.)) {
+                answer.insert(e_w_pi.e_);
+            }
+        }
+		return answer;
 	}
-
 };
 
-class PathCoverWeightCounter: public WeightCounter {
-	double single_threshold_;
-    double correction_coeff_;
-protected:
-
-	double CountSingleLib(int libIndex, BidirectionalPath& path, EdgeId e,
-			int additionalGapLength = 0.0) {
-		PairedInfoLibrary& pairedInfoLibrary = *libs_[libIndex];
-		double weight = 0.0;
-		double idealWeight = 0.0;
-
-		std::vector<EdgeWithPairedInfo> coveredEdges;
-		analyzers_[libIndex]->FindCoveredEdges(path, e, coveredEdges);
-		for (auto iter = coveredEdges.begin(); iter != coveredEdges.end();
-				++iter) {
-			double ideal_weight = iter->pi_;
-			if (excluded_edges_.find(iter->e_) != excluded_edges_.end()) {
-				if (!math::gr(excluded_edges_[iter->e_], 0.0) or !math::gr(ideal_weight, 0.0)) {
-				    continue;
-				} else {
-					ideal_weight = excluded_edges_[iter->e_];
-				}
-			}
-			double threshold =
-					pairedInfoLibrary.GetSingleThreshold() >= 0.0 ?
-							pairedInfoLibrary.GetSingleThreshold() :
-							single_threshold_;
-
-            threshold *= correction_coeff_;
-			TRACE("Threshold: " << threshold);
-
-			double singleWeight = libs_[libIndex]->CountPairedInfo(
-					path[iter->e_], e,
-					(int) path.LengthAt(iter->e_) + additionalGapLength);
-			/*DEBUG("weight edge " << iter->e_ <<
-			      " weight " << singleWeight
-			      << " norm " <<singleWeight / ideal_weight
-			      <<" threshold " << threshold
-			      <<" used " << math::ge(singleWeight, threshold));*/
-
-			if (normalizeWeight_) {
-				singleWeight /= ideal_weight;
-			}
-			if (math::ge(singleWeight, threshold)) {
-				weight += ideal_weight;
-			}
-			idealWeight += ideal_weight;
-		}
-
-		return math::gr(idealWeight, 0.0) ? weight / idealWeight : 0.0;
-	}
+class CoverageAwareIdealInfoProvider : public BasicIdealInfoProvider {
+    static constexpr double MAGIC_COEFF = 2.;
+	const Graph& g_;
+    size_t read_length_; 
+    size_t estimation_edge_length_;
 
 public:
+    //works for single lib only!!!
+    double EstimatePathCoverage(const BidirectionalPath& path) const  {
+        double answer = -1.0;
+        for (int i = (int) path.Size() - 1; i >= 0; --i) {
+            EdgeId e = path.At(i);
+            if (g_.length(e) > estimation_edge_length_) {
+                if (answer < 0 || g_.coverage(e) < answer) {
+                    answer = g_.coverage(e);
+                }
+            }
+        }
+        return answer;
+    }
 
-	PathCoverWeightCounter(const Graph& g, PairedInfoLibraries& libs,
-			double threshold_ = 0.0, double single_threshold = 0.0,
-			double correction_coeff = 1.0) :
-			WeightCounter(g, libs, threshold_), 
-            single_threshold_(single_threshold),
-			correction_coeff_(correction_coeff) {
+    CoverageAwareIdealInfoProvider(const Graph& g, const shared_ptr<PairedInfoLibrary>& lib,
+                                    size_t read_length, size_t estimation_edge_length) : 
+                BasicIdealInfoProvider(lib), g_(g), read_length_(read_length), 
+                estimation_edge_length_(estimation_edge_length) {
+        VERIFY(read_length_ > g_.k());
+    }
 
-	}
+	std::vector<EdgeWithPairedInfo> FindCoveredEdges(const BidirectionalPath& path, EdgeId candidate) const override {
+		double estimated_coverage = EstimatePathCoverage(path);
+        VERIFY(math::gr(estimated_coverage, 0.));
 
-	PathCoverWeightCounter(const Graph& g, shared_ptr<PairedInfoLibrary> lib,
-                           double threshold = 0.0, double single_threshold = 0.0,
-                           double correction_coeff = 1.0)
-            : WeightCounter(g, lib, threshold),
-              single_threshold_(single_threshold),
-              correction_coeff_(correction_coeff) {
+	    double correction_coeff = estimated_coverage / ((double(read_length_) - double(g_.k())) * MAGIC_COEFF);
 
+        std::vector<EdgeWithPairedInfo> answer = BasicIdealInfoProvider::FindCoveredEdges(path, candidate);
+        for (auto& e_w_pi : answer) {
+            e_w_pi.pi_ *= correction_coeff;
+        }
+        return answer;
     }
+};
 
+//FIXME optimize number of calls of EstimatePathCoverage(path)
+class MetagenomicWeightCounter: public WeightCounter {
+    static const size_t LENGTH_BOUND = 500;
+    shared_ptr<CoverageAwareIdealInfoProvider> cov_info_provider_;
+    shared_ptr<WeightCounter> normalizing_wc_;
+    shared_ptr<WeightCounter> raw_wc_;
 
-	virtual void GetDistances(EdgeId e1, EdgeId e2, std::vector<int>& dist,
-			std::vector<double>& w) {
-		for (size_t i = 0; i < libs_.size(); ++i) {
-			libs_[i]->CountDistances(e1, e2, dist, w);
-		}
-	}
+public:
 
-	virtual double CountIdealInfo(EdgeId e1, EdgeId e2, size_t dist) {
-		double res = 0.0;
-		for (size_t i = 0; i < libs_.size(); ++i) {
-			res += libs_[i]->IdealPairedInfo(e1, e2, (int) dist);
-		}
-		return res;
+	MetagenomicWeightCounter(const Graph& g, const shared_ptr<PairedInfoLibrary>& lib,
+                             size_t read_length, double normalized_threshold, double raw_threshold, 
+                             size_t estimation_edge_length = LENGTH_BOUND) :
+			WeightCounter(g, lib) {
+        cov_info_provider_ = make_shared<CoverageAwareIdealInfoProvider>(g, lib, read_length, estimation_edge_length);
+        normalizing_wc_ = make_shared<PathCoverWeightCounter>(g, lib, true, normalized_threshold, cov_info_provider_);
+        raw_wc_ = make_shared<PathCoverWeightCounter>(g, lib, false, raw_threshold);
 	}
 
-	virtual double CountIdealInfo(const BidirectionalPath& p, EdgeId e,
-			size_t gap) {
-		double w = 0.0;
-		for (int i = (int) p.Size() - 1; i >= 0; --i) {
-			w += g_.length(p[i]) ?
-					CountIdealInfo(p[i], e, gap + p.LengthAt(i)) > 0 : 0;
-		}
-		return w;
-	}
-
-	virtual double CountWeight(BidirectionalPath& path, EdgeId e,
-			int gapLength = 0) {
-		double weight = 0.0;
-		for (size_t i = 0; i < libs_.size(); ++i) {
-			weight += CountSingleLib((int) i, path, e, gapLength);
-		}
-
-		return weight / (double) max(libs_.size(), (size_t) 1);
+	double CountWeight(const BidirectionalPath& path, EdgeId e,
+			const std::set<size_t>& excluded_edges, int gap = 0) const override {
+        if (math::gr(cov_info_provider_->EstimatePathCoverage(path), 0.)) {
+            return normalizing_wc_->CountWeight(path, e, excluded_edges, gap);
+        } else {
+            return raw_wc_->CountWeight(path, e, excluded_edges, gap);
+        }
 	}
 
-	virtual bool PairInfoExist(EdgeId first, EdgeId second, int distance) {
-		for (size_t libIndex = 0; libIndex < libs_.size(); ++libIndex) {
-			double w = libs_[libIndex]->CountPairedInfo(first, second,
-					distance);
-			double w_ideal = libs_[libIndex]->IdealPairedInfo(first, second,
-					distance);
-			if (w_ideal == 0.0) {
-				continue;
-			}
-			if (normalizeWeight_) {
-				w /= w_ideal;
-			}
-			double threshold =
-					libs_[libIndex]->GetSingleThreshold() >= 0.0 ?
-							libs_[libIndex]->GetSingleThreshold() :
-							single_threshold_;
-			if (w > threshold) {
-				return true;
-			}
-		}
-		return false;
+	std::set<size_t> PairInfoExist(const BidirectionalPath& path, EdgeId e, 
+                                    int gap = 0) const override {
+        if (math::gr(cov_info_provider_->EstimatePathCoverage(path), 0.)) {
+            return normalizing_wc_->PairInfoExist(path, e, gap);
+        } else {
+            return raw_wc_->PairInfoExist(path, e, gap);
+        }
 	}
-
 };
+
 struct PathsPairIndexInfo {
     PathsPairIndexInfo(size_t edge1_, size_t edge2_, double w_, double dist_)
             : edge1(edge1_),
@@ -467,6 +363,7 @@ struct PathsPairIndexInfo {
     double w;
     double dist;
 };
+
 class PathsWeightCounter {
 public:
     PathsWeightCounter(const Graph& g, shared_ptr<PairedInfoLibrary> lib, size_t min_read_count);
@@ -481,8 +378,8 @@ public:
                          size_t to1, EdgeId edge, size_t gap) const;
     void SetCommonWeightFrom(size_t iedge, double weight);
     void ClearCommonWeight();
-    void FindJumpCandidates(EdgeId e, int min_dist, int max_dist, size_t min_len, set<EdgeId>& result);
-    void FindJumpEdges(EdgeId e, set<EdgeId>& candidates, int min_dist, int max_dist, vector<EdgeWithDistance>& result);
+    void FindJumpCandidates(EdgeId e, int min_dist, int max_dist, size_t min_len, set<EdgeId>& result) const;
+    void FindJumpEdges(EdgeId e, set<EdgeId>& candidates, int min_dist, int max_dist, vector<EdgeWithDistance>& result) const;
     const shared_ptr<PairedInfoLibrary> GetLib() const {
         return lib_;
     }
@@ -503,7 +400,6 @@ private:
     shared_ptr<PairedInfoLibrary> lib_;
     std::map<size_t, double> common_w_;
     size_t min_read_count_;
-protected:
     DECL_LOGGER("WeightCounter");
 };
 inline PathsWeightCounter::PathsWeightCounter(const Graph& g, shared_ptr<PairedInfoLibrary>lib, size_t min_read_count):g_(g), lib_(lib), min_read_count_(min_read_count){
@@ -597,11 +493,11 @@ inline map<size_t, double> PathsWeightCounter::FindPairInfoFromPath(
     FindPairInfo(path1, from1, to1, path2, from2, to2, pi, ideal_pi);
     return pi;
 }
-inline void PathsWeightCounter::FindJumpCandidates(EdgeId e, int min_dist, int max_dist, size_t min_len, set<EdgeId>& result) {
+inline void PathsWeightCounter::FindJumpCandidates(EdgeId e, int min_dist, int max_dist, size_t min_len, set<EdgeId>& result) const {
     result.clear();
     lib_->FindJumpEdges(e, result, min_dist, max_dist, min_len);
 }
-inline void PathsWeightCounter::FindJumpEdges(EdgeId e, set<EdgeId>& edges, int min_dist, int max_dist, vector<EdgeWithDistance>& result) {
+inline void PathsWeightCounter::FindJumpEdges(EdgeId e, set<EdgeId>& edges, int min_dist, int max_dist, vector<EdgeWithDistance>& result) const {
     result.clear();
 
     for (auto e2 = edges.begin(); e2 != edges.end(); ++e2) {
@@ -640,7 +536,7 @@ inline double PathsWeightCounter::IdealPI(EdgeId e1, EdgeId e2, int dist) const
 }
 
 inline bool PathsWeightCounter::HasPI(EdgeId e1, EdgeId e2, size_t dist_min, size_t dist_max) const {
-    return lib_->CountPairedInfo(e1, e2, dist_min, dist_max) > min_read_count_;
+    return lib_->CountPairedInfo(e1, e2, (int) dist_min, (int) dist_max) > min_read_count_;
 }
 };
 
diff --git a/src/debruijn/path_utils.hpp b/src/debruijn/path_utils.hpp
index 3085eb5..4f40077 100644
--- a/src/debruijn/path_utils.hpp
+++ b/src/debruijn/path_utils.hpp
@@ -24,23 +24,19 @@ namespace debruijn_graph {
         typename Graph::EdgeId e2,
         size_t min_dist,
         size_t max_dist,
-        PathProcessor<Graph>& path_processor) 
+		const PathProcessor<Graph>& path_processor)
   {
       typedef typename Graph::EdgeId EdgeId;
       typedef vector<EdgeId> Path;
 
-      PathStorageCallback<Graph> callback(g);
       //PathProcessor<Graph> path_processor(g,
                                           //min_dist - g.length(e1),
                                           //max_dist - g.length(e1),
           //g.EdgeEnd(e1), g.EdgeStart(e2), callback);
 
-      path_processor.SetMinLens({min_dist - g.length(e1)});
-      path_processor.SetMaxLen(max_dist - g.length(e1));
-      path_processor.SetEndPoints({g.EdgeStart(e2)});
-      path_processor.SetCallback(&callback);
-      path_processor.ResetCallCount();
-      int error_code = path_processor.Process();
+      PathStorageCallback<Graph> callback(g);
+      int error_code = path_processor.Process(g.EdgeStart(e2), min_dist - g.length(e1),
+                                              max_dist - g.length(e1), callback);
       vector<Path> paths = callback.paths();
 
       vector<EdgeId> result;
@@ -79,12 +75,11 @@ namespace debruijn_graph {
         typename Graph::EdgeId& e2, size_t min_dist,
         size_t max_dist) {
       PathStorageCallback<Graph> callback(g);
-      PathProcessor<Graph> path_processor(g,
+      ProcessPaths(g,
           min_dist,
           max_dist, //0, *cfg::get().ds.IS - K + size_t(*cfg::get().ds.is_var),
           g.EdgeEnd(e1), g.EdgeStart(e2),
           callback);
-      path_processor.Process();
       auto paths = callback.paths();
       return paths;
     }
diff --git a/src/debruijn/positions.hpp b/src/debruijn/positions.hpp
index b75ee40..bb89632 100644
--- a/src/debruijn/positions.hpp
+++ b/src/debruijn/positions.hpp
@@ -90,6 +90,8 @@ void FillPos(gp_t& gp, const Sequence& s, string name) {
     pos_filler.Process(s, name);
 }
 
+
+
 //inline
 //void CollectPositions(conj_graph_pack &gp) {
 //    gp.edge_pos.clear();
diff --git a/src/debruijn/read_converter.hpp b/src/debruijn/read_converter.hpp
index 6c9f185..23e3ca0 100644
--- a/src/debruijn/read_converter.hpp
+++ b/src/debruijn/read_converter.hpp
@@ -26,7 +26,7 @@ namespace debruijn_graph {
 class ReadConverter {
 
 private:
-    const static size_t current_binary_format_version = 8;
+    const static size_t current_binary_format_version = 10;
 
     void convert_reads_to_binary() {
         if (path::FileExists(cfg::get().temp_bin_reads_info)) {
@@ -55,6 +55,7 @@ private:
 
                 auto &dataset = cfg::get_writable().ds.reads;
                 for (size_t i = 0; i < dataset.lib_count(); ++i) {
+                    info >> dataset[i].data().binary_coverted;
                     info >> dataset[i].data().read_length;
                     info >> dataset[i].data().total_nucls;
 
@@ -78,33 +79,47 @@ private:
 
         INFO("Converting reads to binary format (takes a while)");
         for (size_t i = 0; i < dataset.lib_count(); ++i) {
-            INFO("Paired reads for library #" << i);
-            dataset[i].data().thread_num = cfg::get().max_threads;
-            dataset[i].data().paired_read_prefix = cfg::get().paired_read_prefix + "_" + ToString(i);
-
-            io::PairedStreamPtr paired_reader = paired_easy_reader(dataset[i], false, 0, false, false);
-            io::BinaryWriter paired_converter(dataset[i].data().paired_read_prefix, cfg::get().max_threads, cfg::get().buffer_size);
-            io::ReadStreamStat paired_stat = paired_converter.ToBinary(*paired_reader, dataset[i].orientation());
-            paired_stat.read_count_ *= 2;
-            total_stat.merge(paired_stat);
-
-            INFO("Single reads for library #" << i);
-            dataset[i].data().single_read_prefix = cfg::get().single_read_prefix + "_" + ToString(i);
-            io::SingleStreamPtr single_reader = single_easy_reader(dataset[i], false, false);
-            io::BinaryWriter single_converter(dataset[i].data().single_read_prefix, cfg::get().max_threads, cfg::get().buffer_size);
-            io::ReadStreamStat single_stat = single_converter.ToBinary(*single_reader);
-            total_stat.merge(single_stat);
-
-            paired_stat.merge(single_stat);
-            dataset[i].data().read_length = paired_stat.max_len_;
-            dataset[i].data().total_nucls = paired_stat.total_len_;
+            if (cfg::get().bwa.enabled && dataset[i].is_bwa_alignable()) {
+                INFO("Library #" << i << " will be used by BWA only and thus will not be converted");
+                continue;
+            }
+            else if (dataset[i].is_binary_covertable()) {
+                INFO("Paired reads for library #" << i);
+                dataset[i].data().thread_num = cfg::get().max_threads;
+                dataset[i].data().paired_read_prefix = cfg::get().paired_read_prefix + "_" + ToString(i);
+
+                io::PairedStreamPtr paired_reader = paired_easy_reader(dataset[i], false, 0, false, false);
+                io::BinaryWriter paired_converter
+                    (dataset[i].data().paired_read_prefix, cfg::get().max_threads, cfg::get().buffer_size);
+                io::ReadStreamStat paired_stat = paired_converter.ToBinary(*paired_reader, dataset[i].orientation());
+                paired_stat.read_count_ *= 2;
+                total_stat.merge(paired_stat);
+
+                INFO("Single reads for library #" << i);
+                dataset[i].data().single_read_prefix = cfg::get().single_read_prefix + "_" + ToString(i);
+                io::SingleStreamPtr single_reader = single_easy_reader(dataset[i], false, false);
+                io::BinaryWriter single_converter
+                    (dataset[i].data().single_read_prefix, cfg::get().max_threads, cfg::get().buffer_size);
+                io::ReadStreamStat single_stat = single_converter.ToBinary(*single_reader);
+                total_stat.merge(single_stat);
+
+                paired_stat.merge(single_stat);
+                dataset[i].data().read_length = paired_stat.max_len_;
+                dataset[i].data().total_nucls = paired_stat.total_len_;
+                dataset[i].data().binary_coverted = true;
+            }
+            else {
+                INFO("Library #" << i << " doesn't need to be converted");
+            }
         }
         info.open(cfg::get().temp_bin_reads_info.c_str(), std::ios_base::out);
         info << current_binary_format_version << " " << cfg::get().max_threads << " " << cfg::get().ds.reads.lib_count() << " " <<
                 total_stat.read_count_ << " " << total_stat.max_len_ << " " << total_stat.total_len_ << "\n";
 
         for (size_t i = 0; i < dataset.lib_count(); ++i) {
-            info << dataset[i].data().read_length << " " << dataset[i].data().total_nucls << "\n";
+            info << dataset[i].data().binary_coverted
+                << " " << dataset[i].data().read_length
+                << " " << dataset[i].data().total_nucls << "\n";
         }
         info.close();
     }
@@ -126,6 +141,7 @@ io::BinaryPairedStreams raw_paired_binary_readers(const io::SequencingLibrary<de
                                                                    bool followed_by_rc,
                                                                    size_t insert_size = 0) {
     convert_if_needed();
+    VERIFY_MSG(lib.data().binary_coverted, "Lib was not converted to binary, cannot produce binary stream");
 
     io::ReadStreamList<io::PairedReadSeq> paired_streams;
     for (size_t i = 0; i < lib.data().thread_num; ++i) {
@@ -139,6 +155,7 @@ io::BinarySingleStreams raw_single_binary_readers(const io::SequencingLibrary<de
                                                                    bool followed_by_rc,
                                                                    bool including_paired_reads) {
     convert_if_needed();
+    VERIFY_MSG(lib.data().binary_coverted, "Lib was not converted to binary, cannot produce binary stream");
 
     io::BinarySingleStreams single_streams;
     for (size_t i = 0; i < lib.data().thread_num; ++i) {
diff --git a/src/debruijn/repeat.hpp b/src/debruijn/repeat.hpp
deleted file mode 100644
index f5b5966..0000000
--- a/src/debruijn/repeat.hpp
+++ /dev/null
@@ -1,371 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-#ifndef REPEAT_HPP
-#define REPET_HPP
-
-namespace debruijn_graph {
-
-	typedef enum { TOPOLOGY, LENGTH, PAIREDINFO } kind_of_repeat;
-
-	bool if_unique_val(double val, const vector<double>& ar, double epsilon=0.000001) {
-		unsigned counter = 0;
-		for (auto it = ar.begin(); it != ar.end(); ++it) {
-			if (fabs(val - *it)<=epsilon) counter+=1;
-		}
-		return (counter==1);
-	}
-
-	double get_max(const vector<vector<double>>& vec, int& max_id_i, int& max_id_j, double epsilon=0.000001) {
-		double max_val = 0;
-		max_id_i = -1;
-		max_id_j = -1;
-		for (unsigned i = 0; i < vec.size(); ++i) {
-			for (unsigned j = 0; j < vec[i].size(); ++j) {
-				if (vec[i][j] - max_val > epsilon ){
-					max_val = vec[i][j];
-					max_id_i = i;
-					max_id_j = j;
-				}
-			}
-		}
-		return max_val;
-	}
-	
-	void hide(vector<vector<double>>& vec, int i, double default_val = -1) {
-		for (unsigned j = 0; j < vec[i].size(); ++j) vec[i][j] = default_val;
-	}
-
-	void hide(vector<vector<double>>& vec, int i, int j, double default_val = -1) {
-		for (unsigned k = 0; k < vec[i].size(); ++k) vec[i][k] = default_val;
-		for (unsigned k = 0; k < vec.size(); ++k) vec[k][j] = default_val;
-	}
-
-	template <class graph_pack>
-	class Repeat {
-		
-		typedef typename Graph::EdgeId EdgeId;
-		typedef typename Graph::VertexId VertexId;
-
-		const graph_pack &gp_;
-		vector<EdgeId> incoming_edges_;
-		vector<EdgeId> outgoing_edges_;
-		vector<EdgeId> component_;
-		const double repeat_length_upper_threshold_;
-		const map<EdgeId, kind_of_repeat> edge_to_kind_;
-		FILE* file;
-
-
-
-		template <class EdgesPositionHandlerT> 
-		bool match( const vector<EdgeId> &path, const unsigned current_id, const unsigned current_start,
-			EdgesPositionHandlerT& ref_pos) {
-			if (path.size() == current_id ){
-				return true;
-			}
-			EdgeId edge = path[current_id];
-			auto pos_it = ref_pos.edges_positions().find(edge);
-			if ( current_id == path.size() - 1 && pos_it->second.size() > 1 ){
-				return false;
-			}
-			bool matched = false;
-			for (size_t i = 0; i < pos_it->second.size(); ++i) {
-				auto start = pos_it->second[i].start();
-				if ( fabs(start - current_start) < 2 ) {
-					auto end = pos_it->second[i].end();
-					matched = match( path, current_id + 1, end + 1, ref_pos);
-				}
-			}
-			return matched;
-		}
-
-		bool MatchReference( const vector<EdgeId>& path) {
-			auto ref_pos = gp_.edge_pos;
-			EdgeId edge = path[0];
-			auto pos_it = ref_pos.edges_positions().find(edge);
-			if ( pos_it->second.size() == 1 ){
-				auto next_start = pos_it->second[0].end();
-				return match( path, 1, next_start + 1, ref_pos );
-			}
-			return false;
-		}
-
-		bool IfContainsLinearlyDependentRows(const vector<vector<double>>& transition_probabilities ) const {
-			for (unsigned i = 0; i < transition_probabilities.size() - 1; ++i) {
-				bool dependent = true;
-				for (unsigned k = i+1; k < transition_probabilities.size(); ++k) 
-					for (unsigned j = 0; j < transition_probabilities.size(); ++j) {
-						if (fabs(transition_probabilities[i][j] - transition_probabilities[k][j]) > 0.001) { 
-							dependent = false;
-							break;
-						}
-					}
-				if (dependent) return true;
-			}
-			return false;
-		}
-	
-		void ChoosePairsGreedy(vector<vector<double>>& transition_probabilities, vector<pair<EdgeId,EdgeId>>& pairs_of_edges, double match_quality_threshold) {
-			if (IfContainsLinearlyDependentRows (transition_probabilities)) 
-				return;
-			unsigned counter(0);
-			while (counter < transition_probabilities.size()) {
-				int max_id_i(0), max_id_j(0);
-				double max_val = get_max(transition_probabilities, max_id_i, max_id_j);
-				if (max_id_i != -1) {
-					if (!if_unique_val(max_val, transition_probabilities[max_id_i])) {
-						hide(transition_probabilities, max_id_i);
-					}
-					else {
-						hide(transition_probabilities, max_id_i, max_id_j);
-						if (max_val > match_quality_threshold) {
-							pairs_of_edges.push_back(make_pair(incoming_edges_[max_id_i], outgoing_edges_[max_id_j]));
-							fprintf(file,"pair: %d %d %5.4f\n", max_id_i, max_id_j, max_val);
-						}
-					}
-				}
-				counter += 1;
-			}
-		}
-
-		//TODO: Dima: make gp_.edge_pos.IsConsistentWithGenome() a const method
-		void ChooseConsistentPairs( const vector< vector <double> >& transition_probabilities ) {
-			int i = 0, j = 0;
-			for (auto in_edge = incoming_edges_.begin(); in_edge != incoming_edges_.end(); ++in_edge, ++i) {
-				for ( auto out_edge = outgoing_edges_.begin(); out_edge != outgoing_edges_.end(); ++out_edge, ++j ) {
-					vector<EdgeId> path;
-					path.push_back(*in_edge);
-					path.insert(path.begin()+1, component_.begin(), component_.end());
-					path.push_back(*out_edge);
-					DEBUG("check if consistent with genome..");
-					/*if (gp_.edge_pos.IsConsistentWithGenome(path)) {
-						DEBUG("pair: " << transition_probabilities[i][j] << "\n");
-					}*/
-				}
-			}
-		}
-
-		void bfs ( const EdgeId& edge,  set<EdgeId>& visited_edges, int& curLen, int& maxPathLen) {
-			visited_edges.insert(edge);
-			auto incoming_edges = gp_.g.IncomingEdges(gp_.g.EdgeStart(edge));
-			for ( auto e = incoming_edges.begin(); e != incoming_edges.end(); ++e) {
-				if ( find(component_.begin(), component_.end(), *e) != component_.end() && visited_edges.find(*e) == visited_edges.end() ){
-					curLen += gp_.g.length(*e);
-					if (curLen > maxPathLen) maxPathLen = curLen;
-					bfs(*e, visited_edges, curLen, maxPathLen);
-				}
-			}
-			auto outgoing_edges_ = gp_.g.OutgoingEdges(gp_.g.EdgeEnd(edge));
-			for ( auto e = outgoing_edges_.begin(); e != outgoing_edges_.end(); ++e) {
-				if ( find(component_.begin(), component_.end(), *e) != component_.end() && visited_edges.find(*e) == visited_edges.end() ){
-					curLen += gp_.g.length(*e);
-					if (curLen > maxPathLen) maxPathLen = curLen;
-					bfs(*e, visited_edges, curLen, maxPathLen);
-				}
-			}
-		}		
-	
-		public:
-		explicit Repeat(const graph_pack& gp, const vector<EdgeId>& incoming_edges, const vector<EdgeId>& outgoing_edges, const vector<EdgeId>& component, double repeat_length_upper_threshold,
-				const map<EdgeId, kind_of_repeat>& edge_to_kind, FILE* out_file) : gp_(gp),
-										incoming_edges_(incoming_edges),
-										outgoing_edges_(outgoing_edges),
-										component_(component), 
-										repeat_length_upper_threshold_(repeat_length_upper_threshold),
-										edge_to_kind_(edge_to_kind),
-										file(out_file) {}
-
-			      
-		template<class EdgeQualityLabeler>
-		bool IfContainsOnlyGenomicEdges( const EdgeQualityLabeler& quality_labeler ) const {
-			for (auto iter = component_.begin(); iter != component_.end(); ++iter) {
-		;		if (quality_labeler.quality(*iter) < 0.5) {
-					return false;
-				}
-			}		
-			for ( auto e = incoming_edges_.begin(); e != incoming_edges_.end(); ++e ) {
-				if (quality_labeler.quality(*e) < 0.5) {
-					return false;
-				}
-			}
-			for ( auto e = outgoing_edges_.begin(); e != outgoing_edges_.end(); ++e ) {
-				if (quality_labeler.quality(*e) < 0.5) {
-					return false;
-				}
-			}
-			return true;
-		}
-			
-		template<class EdgeQualityLabeler>
-		bool IfRepeatByQuality( const EdgeQualityLabeler& quality_labeler ) const {
-
-			for ( auto e = component_.begin(); e != component_.end(); ++e ) {
-				if ( quality_labeler.quality(*e) <= 1.5 ) return false;
-			}
-			for ( auto e = incoming_edges_.begin(); e != incoming_edges_.end(); ++e ) {
-				 if ( quality_labeler.quality(*e) > 1.5 ) return false;
-			}
-			for ( auto e = outgoing_edges_.begin(); e != outgoing_edges_.end(); ++e ) {
-				 if ( quality_labeler.quality(*e) > 1.5 ) return false;
-			}
-			return true;
-		}
-
-		template <class DetailedCoverage, class EdgeQualityLabeler>
-		void GetComponentInfo(const DetailedCoverage& coverage, const vector< vector <double> >& transition_probabilities, const EdgeQualityLabeler& quality_labeler ) const  {
-			fprintf(file,"Component: \n");
-			for ( auto iter = component_.begin(); iter != component_.end(); ++iter ) {
-				fprintf(file,"%lu edge length: %lu average edge coverage %5.4f quality: %5.2f",gp_.g.int_id(*iter), gp_.g.length(*iter), gp_.g.coverage(*iter),
-					quality_labeler.quality(*iter));
-				auto repeat_type = edge_to_kind_.find(*iter);
-				VERIFY(repeat_type != edge_to_kind_.end()); 
-				if ( repeat_type->second == TOPOLOGY){
-					fprintf(file,"TOPOLOGY\n");
-				}
-				else if  (repeat_type->second == LENGTH ){
-					fprintf(file,"LENGTH\n");
-				}
-				else if (repeat_type->second == PAIREDINFO ){
-					fprintf(file,"PAIREDINFO\n");
-				}
-			 }
-			fprintf(file, "incoming edges:\n");
-			for ( auto iter = incoming_edges_.begin(); iter != incoming_edges_.end(); ++iter ) {
-			 	fprintf(file, "%lu edge length: %lu outgoing edge coverage: %5.4f average edge coverage %5.4f quality: %5.2f\n", gp_.g.int_id(*iter), gp_.g.length(*iter), 
-					coverage.GetOutCov(*iter), gp_.g.coverage(*iter), quality_labeler.quality(*iter));
-			}
-			fprintf(file,"outgoing edges: \n");
-			for ( auto iter = outgoing_edges_.begin(); iter != outgoing_edges_.end(); ++iter ) {
-			 	fprintf(file, "%lu edge length: %lu incoming edge coverage: %5.4f average edge coverage %5.4f quality: %5.2f\n", gp_.g.int_id(*iter), gp_.g.length(*iter), 
-					coverage.GetInCov(*iter), gp_.g.coverage(*iter), quality_labeler.quality(*iter));
-			}
-			bool correct_component = IfRepeatByQuality( quality_labeler  );
-			if (!correct_component) {
-				fprintf(file,"repeat is detected incorrectly\n");
-			}
-			if (transition_probabilities.size() > 0) {
-			 	fprintf(file,"transition probabilities:\n");
-				for (auto vec = transition_probabilities.begin(); vec != transition_probabilities.end(); ++vec ) {
-					for ( auto prob = vec->begin(); prob != vec->end(); ++prob ) {
-						fprintf(file,"%5.4f ",*prob);
-					}
-					fprintf(file,"\n");
-				}
-			}
-				
-		}
-
-		bool IfContainsOnlyShortEdges() const {
-			for ( auto it = component_.begin(); it != component_.end(); ++it ) {
-				auto repeat_type = edge_to_kind_.find(*it);
-				VERIFY(repeat_type != edge_to_kind_.end()); 
-				if (gp_.g.length(*it) >= repeat_length_upper_threshold_ || repeat_type->second == TOPOLOGY )
-					return false;
-			}
-			return true;
-		}
-
-		void CountDistance( const EdgeId& edge_in, const EdgeId& edge_out, int& distance ) const {
-			// gets a repetitive component and calculates the length of the longest path in it
-			if ( gp_.g.EdgeEnd(edge_in) == gp_.g.EdgeStart(edge_out) ) return;
-			for ( auto edge = component_.begin(); edge != component_.end(); ++edge ){
-				if ( gp_.g.EdgeEnd(edge_in) == gp_.g.EdgeStart(*edge) ) {
-					distance += gp_.g.length(*edge);
-					CountDistance( *edge, edge_out, distance);
-					return;
-				}
-			}
-		}
-
-		// the length of the longest path in kmers
-		int GetLongestPathLength() const {
-			set<EdgeId> visited_edges;
-			vector<vector<EdgeId>> paths;
-			int maxPathLen = gp_.g.length(component_[0]);
-			for ( auto edge = component_.begin(); edge != component_.end(); ++edge ){
-				if (visited_edges.find(*edge) != visited_edges.end()) continue;
-				int curLen = gp_.g.length(*edge);
-				visited_edges.insert(*edge);
-				 bfs(*edge, visited_edges, curLen, maxPathLen);
-			}
-			return maxPathLen;
-		}
-
-		void dfs( const VertexId& vStart, const VertexId& vEnd, set<EdgeId>& visited,  vector<EdgeId>& path) const {
-			for ( auto edge = component_.begin(); edge != component_.end(); ++edge ){
-				if (visited.find(*edge) != visited.end()){
-					continue;
-				}
-				if ( vStart == gp_.g.EdgeStart(*edge) ){
-					if ( vEnd == gp_.g.EdgeEnd(*edge) ){
-						 path.push_back(*edge);
-						 visited.insert(*edge);
-						 return;
-					}
-					visited.insert(*edge);
-					dfs( gp_.g.EdgeEnd(*edge), vEnd, visited, path );
-					path.insert(path.begin(), *edge);
-					return;
-				}
-			}
-			return;
-		}
-
-		void SetPaths(const vector<pair<EdgeId,EdgeId>>& pairs_of_edges, vector<vector<EdgeId>>& resolved_paths) {
-			for (unsigned i = 0; i < pairs_of_edges.size(); ++i) {
-				EdgeId in_edge = pairs_of_edges[i].first;
-				EdgeId out_edge = pairs_of_edges[i].second;
-				vector<EdgeId> path;
-				set<EdgeId> visited;
-				dfs(gp_.g.EdgeEnd(in_edge), gp_.g.EdgeStart(out_edge), visited, path);
-				path.insert(path.begin(),in_edge);
-				path.push_back(out_edge);
-				if (MatchReference(path)) { fprintf(file,"match!\n"); }
-				else {fprintf(file,"does not match!\n");}
-				resolved_paths.push_back(path);
-			}
-
-		}
-
-		template <class DetailedCoverage, class EdgeQualityLabeler, class KmerIndex>
-		bool Resolve( BucketMapper<Graph, KmerIndex> &bm, const DetailedCoverage& coverage,
-							const EdgeQualityLabeler& quality_labeler,
-							vector<vector<EdgeId>>& resolved_paths) { 
-			vector<pair<EdgeId,EdgeId>> pairs_of_edges;
-			vector< vector <double> > transition_probabilities ;
-			for ( unsigned i = 0; i < incoming_edges_.size(); i++) {
-				transition_probabilities.push_back(vector<double>(outgoing_edges_.size(),1));
-			}
-			int in_edge_counter(0);
-			for ( auto in_edge = incoming_edges_.begin(); in_edge != incoming_edges_.end(); ++in_edge, ++in_edge_counter ) {
-				double in_cov = coverage.GetOutCov(*in_edge);
-				int in_bucket = bm.GetCoverageBucket(in_cov);
-				int out_edge_counter(0);
-				for ( auto out_edge = outgoing_edges_.begin(); out_edge != outgoing_edges_.end(); ++out_edge, ++out_edge_counter ) {
-					double out_cov = coverage.GetInCov(*out_edge);
-					int out_bucket = bm.GetCoverageBucket(out_cov);
-					int distance(0);
-					CountDistance(*in_edge, *out_edge, distance);
-					double shift = 25;
-					double probability = bm.GetProbabilityFromBucketToBucketForDistance (in_bucket, out_bucket, distance, shift) ;
-					transition_probabilities[in_edge_counter][out_edge_counter] = probability;
-				}
-			}
-			GetComponentInfo(coverage, transition_probabilities, quality_labeler );
-			double match_quality_threshold = 0.1;
-			ChoosePairsGreedy(transition_probabilities, pairs_of_edges, match_quality_threshold);
-			if (pairs_of_edges.size() > 0) {
-				SetPaths(pairs_of_edges, resolved_paths);
-				return true;
-			}
-			return false;
-		}
-	};
-
-}
-
-#endif
diff --git a/src/debruijn/repeat_resolving.cpp b/src/debruijn/repeat_resolving.cpp
index 996ed71..3beebd0 100644
--- a/src/debruijn/repeat_resolving.cpp
+++ b/src/debruijn/repeat_resolving.cpp
@@ -48,10 +48,15 @@ void RepeatResolution::run(conj_graph_pack &gp, const char*) {
         stats::PrepareForDrawing(gp);
     }
 
-    VERIFY(cfg::get().pe_params.param_set.remove_overlaps);
+    omnigraph::DefaultLabeler<Graph> labeler(gp.g, gp.edge_pos);
+    stats::detail_info_printer printer(gp, labeler, cfg::get().output_dir);
+    printer(ipp_before_repeat_resolution);
+
+    //todo awful hack to get around PE using cfg::get everywhere...
+    auto tmp_params_storage = cfg::get().pe_params;
     if (preliminary_) {
-        INFO("Overlap removal disabled for first-stage rr")
-        cfg::get_writable().pe_params.param_set.remove_overlaps = false;
+        INFO("Setting up preliminary path extend settings")
+        cfg::get_writable().pe_params = cfg::get().prelim_pe_params;
     }
 
     OutputContigs(gp.g, cfg::get().output_dir + "before_rr");
@@ -74,9 +79,9 @@ void RepeatResolution::run(conj_graph_pack &gp, const char*) {
         INFO("Unsupported repeat resolver");
         OutputContigs(gp.g, cfg::get().output_dir + "final_contigs");
     }
-
     if (preliminary_) {
-        cfg::get_writable().pe_params.param_set.remove_overlaps = true;
+        INFO("Restoring initial path extend settings")
+        cfg::get_writable().pe_params = tmp_params_storage;
     }
 }
 
diff --git a/src/debruijn/second_phase_setup.cpp b/src/debruijn/second_phase_setup.cpp
index 79b53e6..2c2e062 100644
--- a/src/debruijn/second_phase_setup.cpp
+++ b/src/debruijn/second_phase_setup.cpp
@@ -30,7 +30,7 @@ void SecondPhaseSetup::run(conj_graph_pack &gp, const char*) {
 	INFO("Preparing second phase");
 	gp.ClearRRIndices();
 
-	std::string old_pe_contigs_filename = cfg::get().output_dir + "scaffolds.fasta";
+	std::string old_pe_contigs_filename = cfg::get().output_dir + "final_contigs.fasta";
 	std::string new_pe_contigs_filename = cfg::get().output_dir + "first_pe_contigs.fasta";
 
     VERIFY(path::check_existence(old_pe_contigs_filename));
@@ -38,9 +38,6 @@ void SecondPhaseSetup::run(conj_graph_pack &gp, const char*) {
 	int code = rename(old_pe_contigs_filename.c_str(), new_pe_contigs_filename.c_str());
     VERIFY(code == 0);
 
-	io::ReadStreamList<io::SingleRead> additional_contigs;
-	additional_contigs.push_back(io::EasyStream(cfg::get().output_dir + "first_pe_contigs.fasta", true));
-
 	io::SequencingLibrary<debruijn_graph::debruijn_config::DataSetData> untrusted_contigs;
 	untrusted_contigs.push_back_single(new_pe_contigs_filename);
 	untrusted_contigs.set_orientation(io::LibraryOrientation::Undefined);
diff --git a/src/debruijn/sequence_mapper.hpp b/src/debruijn/sequence_mapper.hpp
index 37bf55a..ef9c1c4 100644
--- a/src/debruijn/sequence_mapper.hpp
+++ b/src/debruijn/sequence_mapper.hpp
@@ -20,6 +20,22 @@
 namespace debruijn_graph {
 
 template<class Graph>
+MappingPath<typename Graph::EdgeId> ConjugateMapping(const Graph& g, 
+                                              const MappingPath<typename Graph::EdgeId>& mp, 
+                                              size_t sequence_length) {
+    MappingPath<typename Graph::EdgeId> answer;
+    for (size_t i = mp.size(); i > 0; --i) {
+        auto p = mp[i-1];
+        auto e = p.first;
+        MappingRange mr = p.second;
+        answer.push_back(g.conjugate(e), 
+                        MappingRange(mr.initial_range.Invert(sequence_length - g.k()), 
+                        mr.mapped_range.Invert(g.length(e))));
+    }
+    return answer;
+}
+
+template<class Graph>
 class SequenceMapper {
 public:
     typedef typename Graph::EdgeId EdgeId;
@@ -42,6 +58,7 @@ public:
   
     MappingPath<EdgeId> MapRead(const io::SingleRead &read) const {
 //      VERIFY(read.IsValid());
+        DEBUG(read.name() << " is mapping");
         string s = read.GetSequenceString();
         size_t l = 0, r = 0;
         MappingPath<EdgeId> result;
@@ -59,12 +76,191 @@ public:
         if (r > l) {
             result.join(MapSequence(Sequence(s.substr(l, r - l))), int(l));
         }
+        DEBUG(read.name() << " is mapped");
+        DEBUG("Number of edges is " << result.size());
+
       return result;
     }
 
     virtual size_t KmerSize() const = 0;
 };
 
+template<class Graph>
+class MappingPathFixer {
+public:
+
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+
+    MappingPathFixer(const Graph& graph)
+            : g_(graph) {
+    }
+
+    bool CheckContiguous(const vector<typename Graph::EdgeId>& path) const {
+        for (size_t i = 1; i < path.size(); ++i) {
+            if (g_.EdgeEnd(path[i - 1]) != g_.EdgeStart(path[i]))
+                return false;
+        }
+        return true;
+    }
+
+    Path<EdgeId> TryFixPath(const Path<EdgeId>& path, size_t length_bound = 70) const {
+        return Path<EdgeId>(TryFixPath(path.sequence(), length_bound), path.start_pos(), path.end_pos());
+    }
+
+    vector<EdgeId> TryFixPath(const vector<EdgeId>& edges, size_t length_bound = 70) const {
+        vector<EdgeId> answer;
+        if (edges.empty()) {
+            //          WARN("Mapping path was empty");
+            return vector<EdgeId>();
+        }
+        answer.push_back(edges[0]);
+        for (size_t i = 1; i < edges.size(); ++i) {
+            if (g_.EdgeEnd(edges[i - 1]) != g_.EdgeStart(edges[i])) {
+                vector<EdgeId> closure = TryCloseGap(g_.EdgeEnd(edges[i - 1]),
+                                                     g_.EdgeStart(edges[i]),
+                                                     length_bound);
+                answer.insert(answer.end(), closure.begin(), closure.end());
+            }
+            answer.push_back(edges[i]);
+        }
+        return answer;
+    }
+
+    vector<EdgeId> DeleteSameEdges(const vector<EdgeId>& path) const {
+        vector<EdgeId> result;
+        if (path.empty()) {
+            return result;
+        }
+        result.push_back(path[0]);
+        for (size_t i = 1; i < path.size(); ++i) {
+            if (path[i] != result[result.size() - 1]) {
+                result.push_back(path[i]);
+            }
+        }
+        return result;
+    }
+
+private:
+    vector<EdgeId> TryCloseGap(VertexId v1, VertexId v2, size_t length_bound) const {
+        if (v1 == v2)
+            return vector<EdgeId>();
+        TRACE("Trying to close gap between v1=" << g_.int_id(v1) << " and v2=" << g_.int_id(v2));
+        PathStorageCallback<Graph> path_store(g_);
+
+        TRACE("Path storage callback created");
+        //todo reduce value after investigation
+        ProcessPaths(g_, 0, length_bound, v1, v2, path_store);
+
+        TRACE("Paths processed");
+        if (path_store.size() == 0) {
+            TRACE("Failed to find closing path");
+            //          TRACE("Failed to close gap between v1=" << graph_.int_id(v1)
+            //                          << " (conjugate "
+            //                          << graph_.int_id(g_.conjugate(v1))
+            //                          << ") and v2=" << g_.int_id(v2)
+            //                          << " (conjugate "
+            //                          << g_.int_id(g_.conjugate(v2)) << ")");
+            //          return boost::none;
+            return vector<EdgeId>();
+        } else if (path_store.size() == 1) {
+            TRACE("Unique closing path found");
+        } else {
+            TRACE("Several closing paths found, first chosen");
+        }
+        TRACE("Taking answer    ");
+        vector<EdgeId> answer = path_store.paths().front();
+        TRACE("Gap closed");
+        TRACE( "Cumulative closure length is " << CumulativeLength(g_, answer));
+        return answer;
+    }
+    const Graph& g_;
+};
+
+template<class Graph>
+class ReadPathFinder {
+private:
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+    const Graph& g_;
+    typedef MappingPathFixer<Graph> GraphMappingPathFixer;
+    const GraphMappingPathFixer path_fixer_;
+public:
+    ReadPathFinder (const Graph& g) :
+        g_(g), path_fixer_(g)
+    {   }
+
+    vector<EdgeId> FindReadPath(const MappingPath<EdgeId>& mapping_path) const {
+          if (!IsMappingPathValid(mapping_path)) {
+              TRACE("read unmapped");
+              return vector<EdgeId>();
+          }
+          vector<EdgeId> corrected_path = path_fixer_.DeleteSameEdges(
+                  mapping_path.simple_path());
+          PrintPathInfo(corrected_path);
+          if(corrected_path.size() != mapping_path.simple_path().size()) {
+              DEBUG("Some edges were deleted");
+          }
+          vector<EdgeId> fixed_path = path_fixer_.TryFixPath(corrected_path);
+          if (!path_fixer_.CheckContiguous(fixed_path)) {
+              TRACE("read unmapped");
+              std::stringstream debug_stream;
+              for (size_t i = 0; i < fixed_path.size(); ++i) {
+                  debug_stream << g_.int_id(fixed_path[i]) << " ";
+              }
+              TRACE(debug_stream.str());
+              return vector<EdgeId>();
+          } else {
+              DEBUG("Path fix works");
+          }
+          return fixed_path;
+      }
+
+    vector<vector<EdgeId>> FindReadPathWithGaps(const MappingPath<EdgeId>& mapping_path) const {
+          if (!IsMappingPathValid(mapping_path)) {
+              TRACE("read unmapped");
+              return vector<vector<EdgeId>>();
+          }
+          vector<EdgeId> corrected_path = path_fixer_.DeleteSameEdges(
+                  mapping_path.simple_path());
+          PrintPathInfo(corrected_path);
+          if(corrected_path.size() != mapping_path.simple_path().size()) {
+              DEBUG("Some edges were deleted");
+          }
+          vector<EdgeId> fixed_path = path_fixer_.TryFixPath(corrected_path);
+          return SplitUnfixedPoints(fixed_path);
+      }
+
+private:
+
+      vector<vector<EdgeId>> SplitUnfixedPoints(vector<EdgeId>& path) const {
+          vector<vector<EdgeId>> result;
+          size_t prev_start = 0;
+          for (size_t i = 1; i < path.size(); ++i) {
+              if (g_.EdgeEnd(path[i - 1]) != g_.EdgeStart(path[i])) {
+                      result.push_back(vector<EdgeId>(path.begin() + prev_start, path.begin() + i));
+                      prev_start = i;
+              }
+          }
+          result.push_back(vector<EdgeId>(path.begin() + prev_start, path.end()));
+          return result;
+      }
+
+      bool IsTip(VertexId v) const {
+          return g_.IncomingEdgeCount(v) + g_.OutgoingEdgeCount(v) == 1;
+      }
+
+      bool IsMappingPathValid(const MappingPath<EdgeId>& path) const {
+          return path.size() != 0;
+      }
+
+      void PrintPathInfo(vector<EdgeId>& corrected_path) const {
+          for(size_t i = 0; i < corrected_path.size(); ++i) {
+              DEBUG(i + 1 << "-th edge is " << corrected_path[i].int_id());
+          }
+      }
+};
+
 template<class Graph, class Index>
 class NewExtendedSequenceMapper: public SequenceMapper<Graph> {
 
@@ -72,7 +268,6 @@ class NewExtendedSequenceMapper: public SequenceMapper<Graph> {
 
  public:
   typedef std::vector<MappingRange> RangeMappings;
-  typedef MappingPathFixer<Graph> GraphMappingPathFixer;
 
  private:
   const Index& index_;
@@ -81,7 +276,6 @@ class NewExtendedSequenceMapper: public SequenceMapper<Graph> {
   typedef typename Index::KMer Kmer;
   typedef KmerMapper<Graph, Kmer> KmerSubs;
   const KmerSubs& kmer_mapper_;
-  const GraphMappingPathFixer path_fixer_;
   size_t k_;
   bool optimization_on_;
   //	mutable size_t mapped_;
@@ -182,7 +376,7 @@ class NewExtendedSequenceMapper: public SequenceMapper<Graph> {
                             const Index& index,
                             const KmerSubs& kmer_mapper,
 			    bool optimization_on = true) :
-      SequenceMapper<Graph>(g), index_(index), kmer_mapper_(kmer_mapper), path_fixer_(g), k_(g.k()+1),
+      SequenceMapper<Graph>(g), index_(index), kmer_mapper_(kmer_mapper), k_(g.k()+1),
 	optimization_on_(optimization_on) { }
 
   ~NewExtendedSequenceMapper() {
@@ -225,31 +419,7 @@ class NewExtendedSequenceMapper: public SequenceMapper<Graph> {
       return k_;
   }
 
-  vector<EdgeId> FindReadPath(const MappingPath<EdgeId>& mapping_path) const {
-        if (!IsMappingPathValid(mapping_path)) {
-            TRACE("read unmapped");
-            return vector<EdgeId>();
-        }
-        vector<EdgeId> corrected_path = path_fixer_.DeleteSameEdges(
-                mapping_path.simple_path());
-        vector<EdgeId> fixed_path = path_fixer_.TryFixPath(corrected_path);
-        if (!path_fixer_.CheckContiguous(fixed_path)) {
-            TRACE("read unmapped");
-            std::stringstream debug_stream;
-            for (size_t i = 0; i < fixed_path.size(); ++i) {
-                debug_stream << g_.int_id(fixed_path[i]) << " ";
-            }
-            TRACE(debug_stream.str());
-            return vector<EdgeId>();
-        }
-        return fixed_path;
-    }
-
-private:
-    bool IsMappingPathValid(const MappingPath<EdgeId>& path) const {
-        return path.size() != 0;
-    }
-    DECL_LOGGER("NewExtendedSequenceMapper");
+  DECL_LOGGER("NewExtendedSequenceMapper");
 };
 
 
@@ -258,5 +428,4 @@ std::shared_ptr<NewExtendedSequenceMapper<typename gp_t::graph_t, typename gp_t:
   return std::make_shared<NewExtendedSequenceMapper<typename gp_t::graph_t, typename gp_t::index_t> >(gp.g, gp.index, gp.kmer_mapper);
 }
 
-
 }
diff --git a/src/debruijn/sequence_mapper_notifier.hpp b/src/debruijn/sequence_mapper_notifier.hpp
index 1400387..13040ea 100644
--- a/src/debruijn/sequence_mapper_notifier.hpp
+++ b/src/debruijn/sequence_mapper_notifier.hpp
@@ -13,6 +13,8 @@
 #include "io/paired_read.hpp"
 #include "graph_pack.hpp"
 
+#include "io/paired_read.hpp"
+
 #include <vector>
 #include <cstdlib>
 
@@ -22,8 +24,13 @@ class SequenceMapperListener {
 public:
     virtual void StartProcessLibrary(size_t threads_count) = 0;
     virtual void StopProcessLibrary() = 0;
-    virtual void ProcessPairedRead(size_t thread_index, const MappingPath<EdgeId>& read1, const MappingPath<EdgeId>& read2, size_t dist) = 0;
-    virtual void ProcessSingleRead(size_t thread_index, const MappingPath<EdgeId>& read) = 0;
+
+    //TODO: think about read ierarchy
+    virtual void ProcessPairedRead(size_t thread_index, const io::PairedRead& pr, const MappingPath<EdgeId>& read1, const MappingPath<EdgeId>& read2) = 0;
+    virtual void ProcessPairedRead(size_t thread_index, const io::PairedReadSeq& pr, const MappingPath<EdgeId>& read1, const MappingPath<EdgeId>& read2) = 0;
+    virtual void ProcessSingleRead(size_t thread_index, const io::SingleRead& r, const MappingPath<EdgeId>& read) = 0;
+    virtual void ProcessSingleRead(size_t thread_index, const io::SingleReadSeq& r, const MappingPath<EdgeId>& read) = 0;
+
     virtual void MergeBuffer(size_t thread_index) = 0;
     virtual ~SequenceMapperListener() {}
 };
@@ -32,8 +39,8 @@ class SequenceMapperNotifier {
 public:
     typedef SequenceMapper<conj_graph_pack::graph_t> SequenceMapperT;
 
-    SequenceMapperNotifier(const conj_graph_pack& gp, bool send_true_distance = true)
-            : gp_(gp), send_true_distance_(send_true_distance) { }
+    SequenceMapperNotifier(const conj_graph_pack& gp)
+            : gp_(gp) { }
 
     void Subscribe(size_t lib_index, SequenceMapperListener* listener) {
         while ((int)lib_index >= (int)listeners_.size() - 1) {
@@ -110,7 +117,7 @@ private:
             listener->MergeBuffer(ithread);
     }
     const conj_graph_pack& gp_;
-    bool send_true_distance_;
+
     std::vector<std::vector<SequenceMapperListener*> > listeners_;  //first vector's size = count libs
 };
 
@@ -126,11 +133,9 @@ inline void SequenceMapperNotifier::NotifyProcessRead(const io::PairedReadSeq& r
     MappingPath<EdgeId> path2 = mapper.MapSequence(read2);
     for (const auto& listener : listeners_[ilib]) {
         TRACE("Dist: " << r.second().size() << " - " << r.insert_size() << " = " << r.second().size() - r.insert_size());
-        // FIXME: Cleanup this trash.
-        size_t distance = (send_true_distance_ ? r.distance() : r.second().size() - r.insert_size());
-        listener->ProcessPairedRead(ithread, path1, path2, distance);
-        listener->ProcessSingleRead(ithread, path1);
-        listener->ProcessSingleRead(ithread, path2);
+        listener->ProcessPairedRead(ithread, r, path1, path2);
+        listener->ProcessSingleRead(ithread, r.first(), path1);
+        listener->ProcessSingleRead(ithread, r.second(), path2);
     }
 }
 
@@ -143,11 +148,9 @@ inline void SequenceMapperNotifier::NotifyProcessRead(const io::PairedRead& r,
     MappingPath<EdgeId> path2 = mapper.MapRead(r.second());
     for (const auto& listener : listeners_[ilib]) {
         TRACE("Dist: " << r.second().size() << " - " << r.insert_size() << " = " << r.second().size() - r.insert_size());
-        // FIXME: Cleanup this trash.
-        size_t distance = (send_true_distance_ ? r.distance() : r.second().size() - r.insert_size());
-        listener->ProcessPairedRead(ithread, path1, path2, distance);
-        listener->ProcessSingleRead(ithread, path1);
-        listener->ProcessSingleRead(ithread, path2);
+        listener->ProcessPairedRead(ithread, r, path1, path2);
+        listener->ProcessSingleRead(ithread, r.first(), path1);
+        listener->ProcessSingleRead(ithread, r.second(), path2);
     }
 }
 
@@ -159,7 +162,7 @@ inline void SequenceMapperNotifier::NotifyProcessRead(const io::SingleReadSeq& r
     const Sequence& read = r.sequence();
     MappingPath<EdgeId> path = mapper.MapSequence(read);
     for (const auto& listener : listeners_[ilib])
-        listener->ProcessSingleRead(ithread, path);
+        listener->ProcessSingleRead(ithread, r, path);
 }
 
 template<>
@@ -169,7 +172,7 @@ inline void SequenceMapperNotifier::NotifyProcessRead(const io::SingleRead& r,
                                                       size_t ithread) const {
     MappingPath<EdgeId> path = mapper.MapRead(r);
     for (const auto& listener : listeners_[ilib])
-        listener->ProcessSingleRead(ithread, path);
+        listener->ProcessSingleRead(ithread, r, path);
 }
 
 } /*debruijn_graph*/
diff --git a/src/debruijn/simplification.cpp b/src/debruijn/simplification.cpp
index cfcc1fa..0604af9 100644
--- a/src/debruijn/simplification.cpp
+++ b/src/debruijn/simplification.cpp
@@ -6,7 +6,8 @@
 //***************************************************************************
 
 #include "standard.hpp"
-#include "graph_simplification.hpp"
+#include "simplification/simplification_settings.hpp"
+#include "simplification/graph_simplification.hpp"
 #include "omni/visualization/graph_labeler.hpp"
 #include "io/single_read.hpp"
 #include "positions.hpp"
@@ -15,6 +16,344 @@
 
 namespace debruijn_graph {
 
+using namespace debruijn::simplification;
+
+class GraphSimplifier {
+    typedef std::function<void(EdgeId)> HandlerF;
+    typedef omnigraph::PersistentEdgeRemovingAlgorithm<Graph,
+            omnigraph::ParallelInterestingElementFinder<Graph, EdgeId>,
+            LengthComparator<Graph>> TipClipperT;
+    typedef omnigraph::PersistentEdgeRemovingAlgorithm<Graph,
+            omnigraph::ParallelInterestingElementFinder<Graph, EdgeId>,
+            CoverageComparator<Graph>> ECRemoverT;
+
+    typedef std::vector<std::pair<AlgoPtr<Graph>, std::string>> AlgoStorageT;
+
+    conj_graph_pack& gp_;
+    Graph& g_;
+    SimplifInfoContainer info_container_;
+    const debruijn_config::simplification simplif_cfg_;
+
+    CountingCallback<Graph> cnt_callback_;
+    HandlerF removal_handler_;
+    stats::detail_info_printer& printer_;
+
+//    bool FastModeAvailable(const SimplifInfoContainer& info, double activation_cov_threshold) {
+//        const auto& cfg = cfg::get();
+//
+//        //todo fix logic
+//        //also handles meta case for now
+//        if (cfg.ds.single_cell) {
+//            return !cfg::get().main_iteration;
+//        }
+//
+//        if (math::eq(info.detected_mean_coverage(), 0.) &&
+//            !cfg.kcm.use_coverage_threshold) {
+//            WARN("Mean coverage wasn't reliably estimated");
+//            return false;
+//        }
+//
+//        //todo review logic
+//        if (math::ls(info.detected_mean_coverage(), activation_cov_threshold) &&
+//            !(cfg.kcm.use_coverage_threshold &&
+//              math::ge(cfg.kcm.coverage_threshold, activation_cov_threshold))) {
+//            INFO("Estimated mean coverage " << info.detected_mean_coverage() <<
+//                 " is less than fast mode activation coverage " << activation_cov_threshold);
+//            return false;
+//        }
+//
+//        return true;
+//    }
+
+    bool PerformInitCleaning() {
+        if (simplif_cfg_.init_clean.early_it_only && info_container_.main_iteration()) {
+            INFO("Most init cleaning disabled on main iteration");
+            return false;
+        }
+
+        if (math::ge(simplif_cfg_.init_clean.activation_cov, 0.)
+                && math::ls(info_container_.detected_mean_coverage(), simplif_cfg_.init_clean.activation_cov)) {
+            INFO("Most init cleaning disabled since detected mean " << info_container_.detected_mean_coverage()
+                 << " was less than activation coverage " << simplif_cfg_.init_clean.activation_cov);
+            return false;
+        }
+
+        return true;
+    }
+
+    void InitialCleaning() {
+        INFO("PROCEDURE == InitialCleaning");
+
+        AlgoStorageT algos;
+
+        PushValid(
+                SelfConjugateEdgeRemoverInstance(g_,
+                                                 simplif_cfg_.init_clean.self_conj_condition,
+                                                 info_container_, removal_handler_),
+                "Self conjugate edge remover",
+                algos);
+
+        if (PerformInitCleaning()) {
+            PushValid(
+                    IsolatedEdgeRemoverInstance(g_,
+                                        simplif_cfg_.init_clean.ier,
+                                        info_container_, removal_handler_),
+                    "Initial isolated edge remover",
+                    algos);
+
+            PushValid(
+                    TipClipperInstance(g_,
+                               debruijn_config::simplification::tip_clipper(simplif_cfg_.init_clean.tip_condition),
+                               info_container_,
+                               removal_handler_),
+                    "Initial tip clipper",
+                    algos);
+
+            PushValid(
+                    ECRemoverInstance(g_,
+                              debruijn_config::simplification::erroneous_connections_remover(simplif_cfg_.init_clean.ec_condition),
+                              info_container_,
+                              removal_handler_),
+                    "Initial ec remover",
+                    algos);
+
+            PushValid(
+                    LowFlankDisconnectorInstance(g_, gp_.flanking_cov,
+                                                 simplif_cfg_.init_clean.disconnect_flank_cov, info_container_,
+                                                 removal_handler_),
+                    "Disconnecting edges with low flanking coverage",
+                    algos);
+        }
+
+        RunAlgos(algos);
+    }
+
+    bool AllTopology() {
+        bool res = TopologyRemoveErroneousEdges(gp_.g, simplif_cfg_.tec,
+                                                removal_handler_);
+        cnt_callback_.Report();
+        res |= TopologyReliabilityRemoveErroneousEdges(gp_.g, simplif_cfg_.trec,
+                                                       removal_handler_);
+        cnt_callback_.Report();
+        res |= RemoveThorns(gp_.g, simplif_cfg_.isec, removal_handler_);
+        cnt_callback_.Report();
+        res |= MultiplicityCountingRemoveErroneousEdges(gp_.g, simplif_cfg_.tec,
+                                                        removal_handler_);
+        cnt_callback_.Report();
+        return res;
+    }
+
+    bool FinalRemoveErroneousEdges() {
+
+    //    gp.ClearQuality();
+    //    gp.FillQuality();
+    //    auto colorer = debruijn_graph::DefaultGPColorer(gp);
+    //    omnigraph::DefaultLabeler<typename gp_t::graph_t> labeler(gp.g, gp.edge_pos);
+    //    QualityEdgeLocalityPrintingRH<Graph> qual_removal_handler(gp.g, gp.edge_qual, labeler, colorer,
+    //                                   cfg::get().output_dir + "pictures/colored_edges_deleted/");
+    //
+    //    //positive quality edges removed (folder colored_edges_deleted)
+    //    std::function<void(EdgeId)> qual_removal_handler_f = boost::bind(
+    //            //            &QualityLoggingRemovalHandler<Graph>::HandleDelete,
+    //            &QualityEdgeLocalityPrintingRH<Graph>::HandleDelete,
+    //            boost::ref(qual_removal_handler), _1);
+    //
+    //    std::function<void(set<EdgeId>)> set_removal_handler_f = boost::bind(
+    //                &omnigraph::simplification::SingleEdgeAdapter<set<EdgeId>>, _1, qual_removal_handler_f);
+    //
+
+        std::function<void(set<EdgeId>)> set_removal_handler_f(0);
+        if (removal_handler_) {
+            set_removal_handler_f = std::bind(
+                &omnigraph::simplification::SingleEdgeAdapter<set<EdgeId>>, std::placeholders::_1, removal_handler_);
+        }
+
+        bool changed = RemoveRelativelyLowCoverageComponents(gp_.g, gp_.flanking_cov,
+                                              simplif_cfg_.rcc, info_container_, set_removal_handler_f);
+
+        cnt_callback_.Report();
+
+        changed |= DisconnectRelativelyLowCoverageEdges(gp_.g, gp_.flanking_cov, simplif_cfg_.relative_ed);
+
+        if (simplif_cfg_.topology_simplif_enabled && info_container_.main_iteration()) {
+            changed |= AllTopology();
+            changed |= MaxFlowRemoveErroneousEdges(gp_.g, simplif_cfg_.mfec,
+                                                   removal_handler_);
+            cnt_callback_.Report();
+        }
+        return changed;
+    }
+
+    void PostSimplification() {
+        INFO("PROCEDURE == Post simplification");
+        size_t iteration = 0;
+
+        AlgoStorageT algos;
+
+        PushValid(
+                TipClipperInstance(g_, simplif_cfg_.tc,
+                                   info_container_, removal_handler_),
+                "Tip clipper",
+                algos);
+
+        PushValid(
+                TipClipperInstance(g_, simplif_cfg_.final_tc,
+                                   info_container_, removal_handler_),
+                "Final tip clipper",
+                algos);
+
+        PushValid(
+                BRInstance(g_, simplif_cfg_.br,
+                                   info_container_, removal_handler_),
+                "Bulge remover",
+                algos);
+
+        PushValid(
+                BRInstance(g_, simplif_cfg_.final_br,
+                                   info_container_, removal_handler_),
+                "Final bulge remover",
+                algos);
+
+        if (simplif_cfg_.topology_simplif_enabled) {
+            PushValid(
+                    TopologyTipClipperInstance(g_, simplif_cfg_.ttc,
+                                                      info_container_, removal_handler_),
+                    "Topology tip clipper",
+                    algos);
+        }
+
+        //FIXME need better configuration
+        if (cfg::get().ds.meta) {
+            PushValid(
+                    BRInstance(g_, simplif_cfg_.second_final_br,
+                                       info_container_, removal_handler_),
+                    "Yet another final bulge remover",
+                    algos);
+        }
+
+        bool enable_flag = true;
+        while (enable_flag) {
+            enable_flag = false;
+
+            INFO("Iteration " << iteration);
+
+            enable_flag |= FinalRemoveErroneousEdges();
+            cnt_callback_.Report();
+
+            enable_flag |=  ClipComplexTips(gp_.g, simplif_cfg_.complex_tc, removal_handler_);
+            cnt_callback_.Report();
+
+            enable_flag |= RemoveComplexBulges(gp_.g, simplif_cfg_.cbr, iteration);
+            cnt_callback_.Report();
+
+            enable_flag |= RunAlgos(algos);
+
+            iteration++;
+
+            //    printer(ipp_before_final_err_con_removal);
+            //        printer(ipp_final_tip_clipping, str(format("_%d") % iteration));
+            //        printer(ipp_final_err_con_removal, str(format("_%d") % iteration));
+            //        printer(ipp_final_bulge_removal, str(format("_%d") % iteration));
+        }
+
+        //fixme move to AllTopology?
+        if (simplif_cfg_.topology_simplif_enabled) {
+            RemoveHiddenEC(gp_.g, gp_.flanking_cov, simplif_cfg_.her, info_container_, removal_handler_);
+
+            cnt_callback_.Report();
+        }
+
+        INFO("Disrupting self-conjugate edges");
+        SelfConjugateDisruptor<Graph>(gp_.g, removal_handler_).Run();
+        cnt_callback_.Report();
+    }
+
+    //inline
+    //void IdealSimplification(Graph& graph,
+    //                         std::function<double(EdgeId)> quality_handler_f) {
+    //    for (auto iterator = graph.SmartEdgeBegin(); !iterator.IsEnd();
+    //         ++iterator) {
+    //        if (math::eq(quality_handler_f(*iterator), 0.))
+    //            graph.DeleteEdge(*iterator);
+    //    }
+    //    CompressAllVertices(graph);
+    //}
+
+//    std::shared_ptr<Predicate<EdgeId>> ParseCondition(const string& condition) const {
+//        ConditionParser<Graph> parser(g_, condition, info_container_);
+//        return parser();
+//    }
+
+    void PushValid(const AlgoPtr<Graph>& algo_ptr, std::string comment, AlgoStorageT& algos) const {
+        if (algo_ptr) {
+            algos.push_back(std::make_pair(algo_ptr, comment));
+        }
+    }
+
+    bool RunAlgos(AlgoStorageT& algos, bool force_primary_launch = false) {
+        bool changed = false;
+        for (auto algo_comment : algos) {
+             INFO("Running " << algo_comment.second);
+             changed |= algo_comment.first->Run(force_primary_launch);
+             cnt_callback_.Report();
+         }
+        return changed;
+    }
+
+public:
+    GraphSimplifier(conj_graph_pack &gp, const SimplifInfoContainer& info_container,
+                    const debruijn_config::simplification& simplif_cfg,
+                    const std::function<void(EdgeId)>& removal_handler,
+                    stats::detail_info_printer& printer)
+            : gp_(gp),
+              g_(gp_.g),
+              info_container_(info_container),
+              simplif_cfg_(simplif_cfg),
+              removal_handler_(AddCountingCallback(cnt_callback_, removal_handler)),
+              printer_(printer) {
+
+    }
+
+    void SimplifyGraph() {
+        printer_(ipp_before_simplification);
+        INFO("Graph simplification started");
+
+        InitialCleaning();
+
+        AlgoStorageT algos;
+
+        PushValid(
+                TipClipperInstance(g_, simplif_cfg_.tc, info_container_, removal_handler_, simplif_cfg_.cycle_iter_count),
+                "Tip clipper",
+                algos);
+        PushValid(
+                BRInstance(g_, simplif_cfg_.br, info_container_, removal_handler_, simplif_cfg_.cycle_iter_count),
+                "Bulge remover",
+                algos);
+        PushValid(
+                ECRemoverInstance(g_, simplif_cfg_.ec, info_container_, removal_handler_, simplif_cfg_.cycle_iter_count),
+                "Low coverage edge remover",
+                algos);
+
+        size_t iteration = 0;
+        bool graph_changed = true;
+        //cannot stop simply if nothing changed, since threshold change on every iteration
+        while (iteration < simplif_cfg_.cycle_iter_count || graph_changed) {
+            INFO("PROCEDURE == Simplification cycle, iteration " << iteration + 1);
+            graph_changed = RunAlgos(algos);
+            ++iteration;
+        }
+
+        printer_(ipp_before_post_simplification);
+
+        if (simplif_cfg_.post_simplif_enabled) {
+            PostSimplification();
+        } else {
+            INFO("PostSimplification disabled");
+        }
+    }
+};
+
 void Simplification::run(conj_graph_pack &gp, const char*) {
     using namespace omnigraph;
 
@@ -38,49 +377,49 @@ void Simplification::run(conj_graph_pack &gp, const char*) {
 //            boost::ref(qual_removal_handler), _1);
 
 
-    debruijn::simplification::SimplifInfoContainer info_container;
-    info_container
-        //0 if model didn't converge
-        .set_detected_mean_coverage(gp.ginfo.estimated_mean())
-        .set_read_length(cfg::get().ds.RL())
-        .set_chunk_cnt(cfg::get().max_threads);
+    SimplifInfoContainer info_container;
+    info_container.set_read_length(cfg::get().ds.RL())
+        .set_main_iteration(cfg::get().main_iteration)
+        .set_chunk_cnt(5 * cfg::get().max_threads);
 
     if (!cfg::get().ds.meta) {
-        info_container.set_detected_coverage_bound(gp.ginfo.ec_bound());
+        //0 if model didn't converge
+        //todo take max with trusted_bound
+        info_container.set_detected_mean_coverage(gp.ginfo.estimated_mean())
+                .set_detected_coverage_bound(gp.ginfo.ec_bound());
     }
 
-    debruijn_config::simplification& simplif_cfg =
-            preliminary_ ? cfg::get_writable().preliminary_simp : cfg::get_writable().simp;
-
-    //todo fix logic
-    simplif_cfg.fast_features &= debruijn::simplification::FastModeAvailable(info_container, simplif_cfg.fast_activation_cov);
-
-//    debruijn::simplification::SimplifyGraph(gp, info_container, simplif_cfg, 0/*removal_handler_f*/, printer);
-    debruijn::simplification::GraphSimplifier simplifier(gp, info_container, simplif_cfg, 0/*removal_handler_f*/, printer);
+    debruijn::simplification::GraphSimplifier simplifier(gp, info_container,
+                                                                 preliminary_ ? cfg::get().preliminary_simp : cfg::get().simp,
+                                                                 nullptr/*removal_handler_f*/,
+                                                                 printer);
     simplifier.SimplifyGraph();
-
-    AvgCovereageCounter<Graph> cov_counter(gp.g);
-    cfg::get_writable().ds.set_avg_coverage(cov_counter.Count());
 }
 
 void SimplificationCleanup::run(conj_graph_pack &gp, const char*) {
-    omnigraph::DefaultLabeler<Graph> labeler/*tot_lab*/(gp.g, gp.edge_pos);
-    stats::detail_info_printer printer(gp, labeler, cfg::get().output_dir);
-
-    printer(ipp_removing_isolated_edges);
+    SimplifInfoContainer info_container;
+    info_container
+        .set_read_length(cfg::get().ds.RL())
+        .set_main_iteration(cfg::get().main_iteration)
+        .set_chunk_cnt(5 * cfg::get().max_threads);
 
-    debruijn::simplification::RemoveIsolatedEdges(gp.g, cfg::get().simp.ier, cfg::get().ds.RL(), std::function<void(EdgeId)>(0), cfg::get().max_threads);
+    IsolatedEdgeRemoverInstance(gp.g, cfg::get().simp.ier, info_container, (HandlerF<Graph>)nullptr)->Run();
 
     double low_threshold = gp.ginfo.trusted_bound();
     if (math::gr(low_threshold, 0.0)) {
         INFO("Removing all the edges having coverage " << low_threshold << " and less");
-        omnigraph::EdgeRemovingAlgorithm<Graph> removing_algo(gp.g,
-                                                              std::make_shared<func::AlwaysTrue<EdgeId>>(), 0);
-
-        removing_algo.Run(CoverageComparator<Graph>(gp.g),
-                              std::make_shared<CoverageUpperBound<Graph>>(gp.g, low_threshold));
+        ParallelEdgeRemovingAlgorithm<Graph, CoverageComparator<Graph>>
+                cov_cleaner(gp.g,
+                            CoverageUpperBound<Graph>(gp.g, low_threshold),
+                            info_container.chunk_cnt(),
+                            (HandlerF<Graph>)nullptr,
+                            /*canonical_only*/true,
+                            CoverageComparator<Graph>(gp.g));
+        cov_cleaner.Run();
     }
 
+    omnigraph::DefaultLabeler<Graph> labeler(gp.g, gp.edge_pos);
+    stats::detail_info_printer printer(gp, labeler, cfg::get().output_dir);
     printer(ipp_final_simplified);
 
     DEBUG("Graph simplification finished");
@@ -91,7 +430,7 @@ void SimplificationCleanup::run(conj_graph_pack &gp, const char*) {
     INFO("Average coverage = " << cfg::get().ds.avg_coverage());
     if (!cfg::get().ds.single_cell) {
         if (cfg::get().ds.avg_coverage() < gp.ginfo.ec_bound())
-            WARN("The determined errorneous connection coverage threshold may be determined improperly\n");
+            WARN("The determined erroneous connection coverage threshold may be determined improperly\n");
     }
 }
 
diff --git a/src/debruijn/simplification/graph_simplification.hpp b/src/debruijn/simplification/graph_simplification.hpp
new file mode 100644
index 0000000..4a35755
--- /dev/null
+++ b/src/debruijn/simplification/graph_simplification.hpp
@@ -0,0 +1,825 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+/*
+ * graph_simplification.hpp
+ *
+ *  Created on: Aug 12, 2011
+ *      Author: sergey
+ */
+
+#pragma once
+
+#include "standard_base.hpp"
+#include "config_struct.hpp"
+#include "debruijn_graph.hpp"
+#include "stats/debruijn_stats.hpp"
+
+#include "omni/visualization/graph_colorer.hpp"
+#include "omni/omni_utils.hpp"
+#include "omni/omni_tools.hpp"
+#include "omni/tip_clipper.hpp"
+#include "omni/complex_tip_clipper.hpp"
+#include "omni/bulge_remover.hpp"
+#include "omni/complex_bulge_remover.hpp"
+#include "omni/erroneous_connection_remover.hpp"
+#include "omni/relative_coverage_remover.hpp"
+#include "omni/mf_ec_remover.hpp"
+#include "omni/parallel_processing.hpp"
+#include "utils.hpp"
+#include "simplification/simplification_settings.hpp"
+#include "simplification/single_cell_simplification.hpp"
+#include "detail_coverage.hpp"
+#include "graph_read_correction.hpp"
+#include "detail_coverage.hpp"
+
+#include "stats/chimera_stats.hpp"
+#include "moleculo.hpp"
+
+namespace debruijn {
+
+namespace simplification {
+
+//todo remove this line
+using namespace debruijn_graph;
+
+template<class Graph>
+using AlgoPtr = std::shared_ptr<omnigraph::PersistentAlgorithmBase<Graph>>;
+
+template<class Graph>
+using EdgeConditionT = pred::TypedPredicate<typename Graph::EdgeId>;
+
+template<class Graph>
+class ConditionParser {
+private:
+    typedef typename Graph::EdgeId EdgeId;
+
+    const Graph& g_;
+    string next_token_;
+    string input_;
+    const SimplifInfoContainer settings_;
+    size_t curr_iteration_;
+    size_t iteration_cnt_;
+    std::queue<string> tokenized_input_;
+
+    size_t max_length_bound_;
+    double max_coverage_bound_;
+
+    string ReadNext() {
+        if (!tokenized_input_.empty()) {
+            next_token_ = tokenized_input_.front();
+            tokenized_input_.pop();
+        } else {
+            next_token_ = "";
+        }
+        return next_token_;
+    }
+
+    template<typename T>
+    bool RelaxMax(T& cur_max, T t) {
+        if (t > cur_max) {
+            cur_max = t;
+            return true;
+        }
+        return false;
+    }
+
+    template<typename T>
+    bool RelaxMin(T& cur_min, T t) {
+        if (t < cur_min) {
+            cur_min = t;
+            return true;
+        }
+        return false;
+    }
+
+    double GetCoverageBound() {
+        if (next_token_ == "auto") {
+            return settings_.detected_coverage_bound();
+        } else {
+            return boost::lexical_cast<double>(next_token_);
+        }
+    }
+
+    pred::TypedPredicate<EdgeId> ParseCondition(size_t& min_length_bound,
+                                               double& min_coverage_bound) {
+        if (next_token_ == "tc_lb") {
+            double length_coeff = boost::lexical_cast<double>(ReadNext());
+
+            DEBUG("Creating tip length bound. Coeff " << length_coeff);
+            size_t length_bound = LengthThresholdFinder::MaxTipLength(
+                settings_.read_length(), g_.k(), length_coeff);
+
+            DEBUG("Length bound " << length_bound);
+
+            RelaxMin(min_length_bound, length_bound);
+            return LengthUpperBound<Graph>(g_, length_bound);
+        } else if (next_token_ == "to_ec_lb") {
+            double length_coeff = boost::lexical_cast<double>(ReadNext());
+
+            DEBUG( "Creating length bound for erroneous connections originated from tip merging. Coeff " << length_coeff);
+            size_t length_bound =
+                    LengthThresholdFinder::MaxTipOriginatedECLength(
+                        settings_.read_length(), g_.k(), length_coeff);
+
+            DEBUG("Length bound " << length_bound);
+
+            RelaxMin(min_length_bound, length_bound);
+            return LengthUpperBound<Graph>(g_, length_bound);
+        } else if (next_token_ == "ec_lb") {
+            size_t length_coeff = boost::lexical_cast<size_t>(ReadNext());
+
+            DEBUG("Creating ec length bound. Coeff " << length_coeff);
+            size_t length_bound =
+                    LengthThresholdFinder::MaxErroneousConnectionLength(
+                        g_.k(), length_coeff);
+
+            DEBUG("Length bound " << length_bound);
+
+            RelaxMin(min_length_bound, length_bound);
+            return LengthUpperBound<Graph>(g_, length_bound);
+        } else if (next_token_ == "lb") {
+            size_t length_bound = boost::lexical_cast<size_t>(ReadNext());
+
+            DEBUG("Creating length bound. Value " << length_bound);
+
+            RelaxMin(min_length_bound, length_bound);
+            return LengthUpperBound<Graph>(g_, length_bound);
+        } else if (next_token_ == "cb") {
+            ReadNext();
+            double cov_bound = GetCoverageBound();
+            DEBUG("Creating coverage upper bound " << cov_bound);
+            RelaxMin(min_coverage_bound, cov_bound);
+            return CoverageUpperBound<Graph>(g_, cov_bound);
+        } else if (next_token_ == "icb") {
+            VERIFY(iteration_cnt_ != -1ul && curr_iteration_ != -1ul);
+            ReadNext();
+            double cov_bound = GetCoverageBound();
+            cov_bound = cov_bound / (double) iteration_cnt_ * (double) (curr_iteration_ + 1);
+            DEBUG("Creating iterative coverage upper bound " << cov_bound);
+            RelaxMin(min_coverage_bound, cov_bound);
+            return CoverageUpperBound<Graph>(g_, cov_bound);
+        } else if (next_token_ == "rctc") {
+            ReadNext();
+            DEBUG("Creating relative cov tip cond " << next_token_);
+            return RelativeCoverageTipCondition<Graph>(g_, boost::lexical_cast<double>(next_token_));
+        } else if (next_token_ == "disabled") {
+            DEBUG("Creating disabling condition");
+            return pred::AlwaysFalse<EdgeId>();
+        } else if (next_token_ == "mmm") {
+            ReadNext();
+            DEBUG("Creating max mismatches cond " << next_token_);
+            return MismatchTipCondition<Graph>(g_, lexical_cast<size_t>(next_token_));
+        } else {
+            VERIFY(false);
+            return pred::AlwaysTrue<EdgeId>();
+        }
+    }
+
+    pred::TypedPredicate<EdgeId> ParseConjunction(size_t& min_length_bound,
+                                                   double& min_coverage_bound) {
+        pred::TypedPredicate<EdgeId> answer = pred::AlwaysTrue<EdgeId>();
+        VERIFY(next_token_ == "{");
+        ReadNext();
+        while (next_token_ != "}") {
+            answer = pred::And(answer,
+                              ParseCondition(min_length_bound, min_coverage_bound));
+            ReadNext();
+        }
+        return answer;
+    }
+
+public:
+
+    ConditionParser(const Graph& g, string input, const SimplifInfoContainer& settings,
+                    size_t curr_iteration = -1ul, size_t iteration_cnt = -1ul)
+            : g_(g),
+              input_(input),
+              settings_(settings),
+              curr_iteration_(curr_iteration),
+              iteration_cnt_(iteration_cnt),
+              max_length_bound_(0),
+              max_coverage_bound_(0.) {
+        DEBUG("Creating parser for string " << input);
+        using namespace boost;
+        vector<string> tmp_tokenized_input;
+        boost::split(tmp_tokenized_input, input_, boost::is_any_of(" ,;"), boost::token_compress_on);
+        for (auto it = tmp_tokenized_input.begin();
+             it != tmp_tokenized_input.end(); ++it) {
+            tokenized_input_.push(*it);
+        }
+        ReadNext();
+    }
+
+    pred::TypedPredicate<EdgeId> operator()() {
+        DEBUG("Parsing");
+        pred::TypedPredicate<EdgeId> answer = pred::AlwaysFalse<EdgeId>();
+        VERIFY_MSG(next_token_ == "{", "Expected \"{\", but next token was " << next_token_);
+        while (next_token_ == "{") {
+            size_t min_length_bound = numeric_limits<size_t>::max();
+            double min_coverage_bound = numeric_limits<double>::max();
+            answer = pred::Or(answer,
+                             ParseConjunction(min_length_bound, min_coverage_bound));
+            RelaxMax(max_length_bound_, min_length_bound);
+            RelaxMax(max_coverage_bound_, min_coverage_bound);
+            ReadNext();
+        }
+        return answer;
+    }
+
+    size_t max_length_bound() const {
+        return max_length_bound_;
+    }
+
+    double max_coverage_bound() const {
+        return max_coverage_bound_;
+    }
+
+private:
+    DECL_LOGGER("ConditionParser");
+};
+
+//todo move to visualization
+template<class graph_pack>
+shared_ptr<omnigraph::visualization::GraphColorer<typename graph_pack::graph_t>> DefaultGPColorer(
+        const graph_pack& gp) {
+    auto mapper = MapperInstance(gp);
+    auto path1 = mapper->MapSequence(gp.genome).path();
+    auto path2 = mapper->MapSequence(!gp.genome).path();
+    return omnigraph::visualization::DefaultColorer(gp.g, path1, path2);
+}
+
+template<class Graph>
+class EditDistanceTrackingCallback {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::EdgeData EdgeData;
+    const Graph& g_;
+
+public:
+    EditDistanceTrackingCallback(const Graph& g)
+            : g_(g) {
+    }
+
+    bool operator()(EdgeId edge, const vector<EdgeId>& path) const {
+        vector<Sequence> path_sequences;
+        for (auto it = path.begin(); it != path.end(); ++it) {
+            path_sequences.push_back(g_.EdgeNucls(*it));
+        }
+        Sequence path_sequence(
+            MergeOverlappingSequences(path_sequences, g_.k()));
+        size_t dist = EditDistance(g_.EdgeNucls(edge), path_sequence);
+        TRACE( "Bulge sequences with distance " << dist << " were " << g_.EdgeNucls(edge) << " and " << path_sequence);
+        return true;
+    }
+
+private:
+    DECL_LOGGER("EditDistanceTrackingCallback")
+    ;
+};
+
+//template<class Graph, class SmartEdgeIt>
+//bool ClipTips(
+//    Graph& g,
+//    SmartEdgeIt& it,
+//    const debruijn_config::simplification::tip_clipper& tc_config,
+//    const SimplifInfoContainer& info,
+//    std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
+//
+//    INFO("Clipping tips");
+//
+//    string condition_str = tc_config.condition;
+//
+//    ConditionParser<Graph> parser(g, condition_str, info);
+//    auto condition = parser();
+//
+//    omnigraph::EdgeRemovingAlgorithm<Graph> tc(g,
+//                                               omnigraph::AddTipCondition(g, condition),
+//                                               removal_handler, true);
+//
+//    TRACE("Tip length bound " << parser.max_length_bound());
+//    return tc.RunFromIterator(it,
+//                      make_shared<LengthUpperBound<Graph>>(g, parser.max_length_bound()));
+//}
+
+//template<class Graph>
+//bool ClipTips(
+//    Graph& g,
+//    const debruijn_config::simplification::tip_clipper& tc_config,
+//    const SimplifInfoContainer& info,
+//    std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
+//
+//    auto it = g.SmartEdgeBegin(LengthComparator<Graph>(g), true);
+//    return ClipTips(g, it, tc_config, info, removal_handler);
+//}
+
+//enabling tip projection, todo optimize if hotspot
+template<class gp_t>
+HandlerF<typename gp_t::graph_t> WrapWithProjectionCallback(
+    gp_t& gp,
+    HandlerF<typename gp_t::graph_t> removal_handler) {
+    typedef typename gp_t::graph_t Graph;
+    typedef typename Graph::EdgeId EdgeId;
+    TipsProjector<gp_t> tip_projector(gp);
+
+    HandlerF<Graph> projecting_callback = std::bind(&TipsProjector<gp_t>::ProjectTip,
+                                             tip_projector, std::placeholders::_1);
+
+    return func::Composition<EdgeId>(std::ref(removal_handler), projecting_callback);
+}
+
+template<class Graph, class InterestingEdgeFinder>
+class LowCoverageEdgeRemovingAlgorithm : public PersistentEdgeRemovingAlgorithm<Graph,
+                                                                                InterestingEdgeFinder, CoverageComparator<Graph>> {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef PersistentEdgeRemovingAlgorithm<Graph, InterestingEdgeFinder, CoverageComparator<Graph>> base;
+    SimplifInfoContainer simplif_info_;
+    std::string condition_str_;
+    pred::TypedPredicate<EdgeId> remove_condition_;
+    pred::TypedPredicate<EdgeId> proceed_condition_;
+
+protected:
+
+    void PrepareIteration(size_t it_cnt, size_t total_it_estimate) override {
+        TRACE("Preparing iteration " << it_cnt << " out of total estimate " << total_it_estimate);
+        ConditionParser<Graph> parser(this->g(), condition_str_,
+                                      simplif_info_, it_cnt, total_it_estimate);
+        remove_condition_ = omnigraph::AddAlternativesPresenceCondition(this->g(), parser());
+        TRACE("Updated remove condition");
+        proceed_condition_ = CoverageUpperBound<Graph>(this->g(), parser.max_coverage_bound());
+        TRACE("Updated proceed condition up to coverage " << parser.max_coverage_bound());
+    }
+
+    bool Proceed(EdgeId e) const override {
+        return proceed_condition_(e);
+    }
+
+    bool ShouldRemove(EdgeId e) const override {
+        return remove_condition_(e);
+    }
+
+public:
+    LowCoverageEdgeRemovingAlgorithm(Graph& g,
+                                    const InterestingEdgeFinder& interest_edge_finder,
+                                    const SimplifInfoContainer& simplif_info,
+                                    const std::string& condition_str,
+                                    std::function<void(EdgeId)> removal_handler = nullptr,
+                                    bool canonical_only = false,
+                                    bool track_changes = true,
+                                    size_t total_iteration_estimate = -1ul)
+            : base(g, interest_edge_finder,
+                   removal_handler,
+                   canonical_only,
+                   CoverageComparator<Graph>(g),
+                   track_changes,
+                   total_iteration_estimate),
+            simplif_info_(simplif_info),
+            condition_str_(condition_str),
+            remove_condition_(pred::AlwaysFalse<EdgeId>()),
+            proceed_condition_(pred::AlwaysTrue<EdgeId>()) {}
+private:
+    DECL_LOGGER("LowCoverageEdgeRemovingAlgorithm");
+};
+
+template<class Graph>
+AlternativesAnalyzer<Graph> ParseBRConfig(const Graph& g,
+                                          const debruijn_config::simplification::bulge_remover& config) {
+    size_t max_length = LengthThresholdFinder::MaxBulgeLength(
+        g.k(), config.max_bulge_length_coefficient,
+        config.max_additive_length_coefficient);
+
+    DEBUG("Length bound " << max_length);
+
+    return AlternativesAnalyzer<Graph>(g, config.max_coverage,
+                                                    max_length,
+                                                    config.max_relative_coverage,
+                                                    config.max_delta,
+                                                    config.max_relative_delta,
+                                                    config.max_number_edges);
+}
+
+template<class Graph>
+AlgoPtr<Graph> SelfConjugateEdgeRemoverInstance(Graph &g, const string& condition_str,
+                const SimplifInfoContainer& info,
+                HandlerF<Graph> removal_handler = 0) {
+    ConditionParser<Graph> parser(g, condition_str, info);
+    auto condition = pred::And(SelfConjugateCondition<Graph>(g), parser());
+    
+    return std::make_shared<ParallelEdgeRemovingAlgorithm<Graph>>(g,
+                                                                  condition,
+                                                                  info.chunk_cnt(),
+                                                                  removal_handler,
+                                                                  /*canonical_only*/true);
+}
+
+template<class Graph>
+bool RemoveRelativelyLowCoverageComponents(
+        Graph &g,
+        const FlankingCoverage<Graph>& flanking_cov,
+        const debruijn_config::simplification::relative_coverage_comp_remover& rcc_config,
+        const SimplifInfoContainer& info,
+        typename ComponentRemover<Graph>::HandlerF removal_handler = 0) {
+    if (rcc_config.enabled) {
+        INFO("Removing relatively low covered connections");
+        size_t connecting_path_length_bound = LengthThresholdFinder::MaxErroneousConnectionLength(
+            g.k(), rcc_config.max_ec_length_coefficient);
+
+        std::string pics_dir = "";//cfg::get().output_dir + "rel_cov_components/"
+
+        double max_coverage = math::ge(rcc_config.max_coverage_coeff, 0.) 
+                                ? info.detected_coverage_bound() * rcc_config.max_coverage_coeff 
+                                : std::numeric_limits<double>::max();
+
+        omnigraph::simplification::relative_coverage::
+            RelativeCoverageComponentRemover<Graph> rel_rem(
+                g,
+                std::bind(&FlankingCoverage<Graph>::LocalCoverage,
+                          std::cref(flanking_cov), std::placeholders::_1, std::placeholders::_2),
+                rcc_config.coverage_gap, size_t(double(info.read_length()) * rcc_config.length_coeff),
+                size_t(double(info.read_length()) * rcc_config.tip_allowing_length_coeff),
+                connecting_path_length_bound,
+                max_coverage,
+                removal_handler, rcc_config.vertex_count_limit, pics_dir);
+        return rel_rem.Run();
+    } else {
+        INFO("Removal of relatively low covered connections disabled");
+        return false;
+    }
+}
+
+template<class Graph>
+bool DisconnectRelativelyLowCoverageEdges(Graph &g,
+        const FlankingCoverage<Graph>& flanking_cov,
+        const debruijn_config::simplification::relative_coverage_edge_disconnector& rced_config) {
+    if (rced_config.enabled) {
+        INFO("Disconnecting edges with relatively low coverage");
+        omnigraph::simplification::relative_coverage::RelativeCoverageDisconnector<
+                Graph> disconnector(g, std::bind(&FlankingCoverage<Graph>::LocalCoverage,
+                                std::cref(flanking_cov), std::placeholders::_1,
+                                std::placeholders::_2), rced_config.diff_mult);
+        return disconnector.Run();
+    } else {
+        INFO("Disconnection of relatively low covered edges disabled");
+        return false;
+    }
+}
+
+template<class Graph>
+bool RemoveComplexBulges(
+    Graph& g,
+    debruijn_config::simplification::complex_bulge_remover cbr_config,
+    size_t /*iteration*/ = 0) {
+    if (!cbr_config.enabled)
+        return false;
+    INFO("Removing complex bulges");
+    size_t max_length = (size_t) ((double) g.k() * cbr_config.max_relative_length);
+    size_t max_diff = cbr_config.max_length_difference;
+    omnigraph::complex_br::ComplexBulgeRemover<Graph> complex_bulge_remover(
+        g, max_length, max_diff);
+    return complex_bulge_remover.Run();
+}
+
+//template<class Graph>
+//bool RemoveIsolatedEdges(Graph &g, size_t max_length, double max_coverage, size_t max_length_any_cov,
+//                 std::function<void(typename Graph::EdgeId)> removal_handler = 0, size_t chunk_cnt = 1) {
+//    typedef typename Graph::EdgeId EdgeId;
+//
+//    //todo add info that some other edges might be removed =)
+//    INFO("Removing isolated edges");
+//    INFO("All edges shorter than " << max_length_any_cov << " will be removed");
+//    INFO("Also edges shorter than " << max_length << " and coverage smaller than " << max_coverage << " will be removed");
+//    //todo add warn on max_length_any_cov > max_length
+//
+//    auto condition = func::And<EdgeId>(
+//            make_shared<IsolatedEdgeCondition<Graph>>(g),
+//            func::Or<EdgeId>(
+//                make_shared<LengthUpperBound<Graph>>(g, max_length_any_cov),
+//                func::And<EdgeId>(
+//                    make_shared<LengthUpperBound<Graph>>(g, max_length),
+//                    make_shared<CoverageUpperBound<Graph>>(g, max_coverage)
+//                )));
+//
+//    if (chunk_cnt == 1) {
+//        omnigraph::EdgeRemovingAlgorithm<Graph> removing_algo(g, condition, removal_handler);
+//
+//        return removing_algo.Run(LengthComparator<Graph>(g),
+//                                         make_shared<LengthUpperBound<Graph>>(g, std::max(max_length, max_length_any_cov)));
+//    } else {
+//        SemiParallelAlgorithmRunner<Graph, EdgeId> runner(g);
+//        SemiParallelEdgeRemovingAlgorithm<Graph> removing_algo(g, condition, removal_handler);
+//
+//        return RunEdgeAlgorithm(g, runner, removing_algo, chunk_cnt);
+//    }
+//}
+
+template<class Graph>
+bool ClipComplexTips(Graph& g, debruijn_config::simplification::complex_tip_clipper ctc_conf, HandlerF<Graph> removal_handler = 0) {
+    if(!ctc_conf.enabled) {
+        INFO("Complex tip clipping disabled");
+    	return false;
+    }
+
+    std::function<void(set<EdgeId>)> set_removal_handler_f(0);
+    if (removal_handler) {
+        set_removal_handler_f = std::bind(
+            &omnigraph::simplification::SingleEdgeAdapter<set<EdgeId>>, std::placeholders::_1, removal_handler);
+    }
+
+    INFO("Complex tip clipping");
+    size_t max_edge_length = g.k() * 2;
+    ComplexTipClipper<Graph> tip_clipper(g, max_edge_length, "", set_removal_handler_f);
+    tip_clipper.Run();
+    return true;
+}
+
+template<class Graph>
+AlgoPtr<Graph> IsolatedEdgeRemoverInstance(Graph &g,
+                                           debruijn_config::simplification::isolated_edges_remover ier,
+                                           const SimplifInfoContainer& info,
+                                           HandlerF<Graph> removal_handler = 0) {
+    if (!ier.enabled) {
+        return nullptr;
+    }
+
+    //todo document behavior
+    size_t max_length_any_cov = std::max(info.read_length(), ier.max_length_any_cov);
+
+//    INFO("Removing isolated edges");
+//    INFO("All edges shorter than " << max_length_any_cov << " will be removed");
+//    INFO("Also edges shorter than " << ier.max_length << " and coverage smaller than " << ier.max_coverage << " will be removed");
+    //todo add warn on max_length_any_cov > max_length
+
+    auto condition = pred::And(IsolatedEdgeCondition<Graph>(g),
+                              pred::Or(LengthUpperBound<Graph>(g, max_length_any_cov),
+                                      pred::And(LengthUpperBound<Graph>(g, ier.max_length),
+                                               CoverageUpperBound<Graph>(g, ier.max_coverage))));
+
+    return std::make_shared<ParallelEdgeRemovingAlgorithm<Graph>>(g,
+                                                                  condition,
+                                                                  info.chunk_cnt(),
+                                                                  removal_handler,
+                                                                  /*canonical_only*/true);
+}
+
+template<class Graph>
+pred::TypedPredicate<typename Graph::EdgeId> NecessaryBulgeCondition(const Graph& g,
+                                                                    const debruijn_config::simplification::bulge_remover& br_config,
+                                                                    const SimplifInfoContainer&) {
+    auto analyzer = ParseBRConfig(g, br_config);
+    return omnigraph::NecessaryBulgeCondition(g, analyzer.max_length(), analyzer.max_coverage());
+}
+
+template<class Graph>
+pred::TypedPredicate<typename Graph::EdgeId> NecessaryTipCondition(const Graph& g,
+                                                                  const debruijn_config::simplification::tip_clipper& tc_config,
+                                                                  const SimplifInfoContainer& info) {
+    ConditionParser<Graph> parser(g, tc_config.condition, info);
+    auto condition = parser();
+    return omnigraph::NecessaryTipCondition(g, parser.max_length_bound(),
+                                            parser.max_coverage_bound());
+}
+
+template<class Graph>
+pred::TypedPredicate<typename Graph::EdgeId> NecessaryECCondition(const Graph& g,
+                                                                 const debruijn_config::simplification::erroneous_connections_remover& ec_config,
+                                                                 const SimplifInfoContainer& info, size_t current_iteration = 0, size_t iteration_cnt = 1) {
+    ConditionParser<Graph> parser(g, ec_config.condition, info, current_iteration, iteration_cnt);
+    auto condition = parser();
+    return omnigraph::NecessaryECCondition(g, parser.max_length_bound(),
+                                           parser.max_coverage_bound());
+}
+
+template<class Graph>
+AlgoPtr<Graph> ECRemoverInstance(Graph& g,
+                                 const debruijn_config::simplification::erroneous_connections_remover& ec_config,
+                                 const SimplifInfoContainer& info,
+                                 HandlerF<Graph> removal_handler,
+                                 size_t iteration_cnt = 1) {
+    if (ec_config.condition.empty())
+        return nullptr;
+
+    typedef omnigraph::ParallelInterestingElementFinder<Graph> InterestingFinderT;
+    InterestingFinderT interesting_finder(g,
+                                          NecessaryECCondition(g, ec_config, info, iteration_cnt - 1, iteration_cnt),
+                                          info.chunk_cnt());
+    return make_shared<LowCoverageEdgeRemovingAlgorithm<Graph, InterestingFinderT>>(
+            g, interesting_finder, info, ec_config.condition, removal_handler,
+            /*canonical only*/ true, /*track changes*/ true, iteration_cnt);
+}
+
+template<class Graph>
+AlgoPtr<Graph> TipClipperInstance(Graph& g,
+                                  const EdgeConditionT<Graph>& condition,
+                                  const SimplifInfoContainer& info,
+                                  HandlerF<Graph> removal_handler,
+                                  bool track_changes = true,
+                                  size_t /*iteration_cnt*/ = 1) {
+    return make_shared<ParallelEdgeRemovingAlgorithm<Graph, LengthComparator<Graph>>>(g,
+                                                                        AddTipCondition(g, condition),
+                                                                        info.chunk_cnt(),
+                                                                        removal_handler,
+                                                                        /*canonical_only*/true,
+                                                                        LengthComparator<Graph>(g),
+                                                                        track_changes);
+}
+
+template<class Graph>
+AlgoPtr<Graph> TipClipperInstance(Graph& g,
+                                           const debruijn_config::simplification::tip_clipper& tc_config,
+                                           const SimplifInfoContainer& info,
+                                           HandlerF<Graph> removal_handler,
+                                           size_t iteration_cnt = 1) {
+    if (tc_config.condition.empty())
+        return nullptr;
+
+    ConditionParser<Graph> parser(g, tc_config.condition, info);
+    auto condition = parser();
+    return TipClipperInstance(g, condition, info, removal_handler, /*track changes*/true, iteration_cnt);
+}
+
+template<class Graph>
+AlgoPtr<Graph> TopologyTipClipperInstance(
+    Graph &g,
+    const debruijn_config::simplification::topology_tip_clipper& ttc_config,
+    const SimplifInfoContainer& info,
+    HandlerF<Graph> removal_handler) {
+
+    auto condition
+            = pred::And(LengthUpperBound<Graph>(g,
+                                               LengthThresholdFinder::MaxTipLength(info.read_length(), g.k(), ttc_config.length_coeff)),
+                       DefaultUniquenessPlausabilityCondition<Graph>(g,
+                                                                     ttc_config.uniqueness_length, ttc_config.plausibility_length));
+
+    return TipClipperInstance(g,
+                              condition, info, removal_handler, /*track changes*/false);
+}
+
+template<class Graph>
+AlgoPtr<Graph> BRInstance(Graph& g,
+                          const debruijn_config::simplification::bulge_remover& br_config,
+                          const SimplifInfoContainer& info,
+                          HandlerF<Graph> removal_handler,
+                          size_t /*iteration_cnt*/ = 1) {
+    typedef ParallelInterestingElementFinder<Graph, 
+                                    typename Graph::EdgeId> InterestingEdgeFinder;
+    if (!br_config.enabled || (br_config.main_iteration_only && !info.main_iteration())) {
+        return nullptr;
+    }
+
+    auto alternatives_analyzer = ParseBRConfig(g, br_config);
+
+     
+    InterestingEdgeFinder interesting_edge_finder(g,
+                                                  NecessaryBulgeCondition(g,
+                                                                          alternatives_analyzer.max_length(),
+                                                                          alternatives_analyzer.max_coverage()), 
+                                                  info.chunk_cnt());
+    if (br_config.parallel) {
+        INFO("Creating parallel br instance");
+        return make_shared<ParallelBulgeRemover<Graph, InterestingEdgeFinder>>(g,
+                interesting_edge_finder,
+                br_config.buff_size,
+                br_config.buff_cov_diff,
+                br_config.buff_cov_rel_diff,
+                alternatives_analyzer,
+                nullptr,
+                removal_handler,
+                /*track_changes*/true);
+    } else {
+        INFO("Creating br instance");
+        return make_shared<BulgeRemover<Graph, InterestingEdgeFinder>>(g,
+                interesting_edge_finder,
+                alternatives_analyzer,
+                nullptr,
+                removal_handler,
+                /*track_changes*/true);
+    }
+}
+
+//todo make this all work for end of the edges also? switch to canonical iteration?
+//todo rename, since checking topology also
+template<class Graph>
+class FlankingCovBound : public EdgeCondition<Graph> {
+    typedef EdgeCondition<Graph> base;
+    typedef typename Graph::EdgeId EdgeId;
+    const FlankingCoverage<Graph>& flanking_cov_;
+    double max_coverage_;
+public:
+    FlankingCovBound(const Graph& g,
+                     const FlankingCoverage<Graph>& flanking_cov,
+                     double max_coverage)
+        : base(g),
+          flanking_cov_(flanking_cov),
+          max_coverage_(max_coverage) {
+    }
+
+    bool Check(EdgeId e) const override {
+        return this->g().length(e) > 1 
+                    && this->g().OutgoingEdgeCount(this->g().EdgeStart(e)) > 1 
+                    && math::le(flanking_cov_.CoverageOfStart(e), max_coverage_);
+    }
+
+};
+
+template<class Graph, class Comparator = std::less<typename Graph::EdgeId>>
+class ParallelDisconnectionAlgorithm : public PersistentProcessingAlgorithm<Graph,
+                                                typename Graph::EdgeId,
+                                                ParallelInterestingElementFinder<Graph>, Comparator> {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef PersistentProcessingAlgorithm<Graph, EdgeId,
+            ParallelInterestingElementFinder<Graph>, Comparator> base;
+    pred::TypedPredicate<EdgeId> condition_;
+    omnigraph::simplification::relative_coverage::EdgeDisconnector<Graph> disconnector_;
+
+public:
+    ParallelDisconnectionAlgorithm(Graph& g,
+                                    pred::TypedPredicate<EdgeId> condition,
+                                    size_t chunk_cnt,
+                                    HandlerF<Graph> removal_handler,
+                                    const Comparator& comp = Comparator(),
+                                    bool track_changes = true)
+            : base(g,
+                   ParallelInterestingElementFinder<Graph>(g, condition, chunk_cnt),
+                           /*canonical_only*/false, comp, track_changes),
+                   condition_(condition),
+                   disconnector_(g, removal_handler) {
+    }
+
+    bool Process(EdgeId e) override {
+        if (condition_(e)) {
+            disconnector_(e);
+            return true;
+        }
+        return false;
+    }
+
+};
+
+template<class Graph>
+AlgoPtr<Graph> LowFlankDisconnectorInstance(Graph& g,
+                                           const FlankingCoverage<Graph>& flanking_cov,
+                                           double cov_bound,
+                                           const SimplifInfoContainer& info,
+                                           HandlerF<Graph> removal_handler) {
+    if (math::ls(cov_bound, 0.)) {
+        INFO("Flanking coverage based disconnection disabled");
+        return nullptr;
+    }
+
+    return make_shared<ParallelDisconnectionAlgorithm<Graph>>(g,
+                                                              FlankingCovBound<Graph>(g, flanking_cov, cov_bound),
+                                                              info.chunk_cnt(),
+                                                              removal_handler);
+}
+
+////todo add chunk_cnt
+//template<class Graph>
+//bool ClipTips(
+//    Graph& g,
+//    const std::string& condition,
+//    const SimplifInfoContainer& info,
+//    std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
+//
+//    if (condition != "") {
+//        ConditionParser<Graph> parser(g, condition, info);
+//        auto condition = parser();
+//        ParallelEdgeRemovingAlgorithm<Graph, LengthComparator<Graph>> algo(g,
+//                                                                           AddTipCondition(g, condition),
+//                                            info.chunk_cnt(),
+//                                            removal_handler,
+//                                            /*canonical_only*/true,
+//                                            LengthComparator<Graph>(g));
+//        return algo.Run();
+//    } else {
+//        return false;
+//    }
+//}
+
+//template<class Graph>
+//bool RemoveLowCoverageEdges(
+//    Graph& g,
+//    const std::string& condition,
+//    const SimplifInfoContainer& info,
+//    std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
+//
+//    if (condition != "") {
+//        ConditionParser<Graph> parser(g, condition, info);
+//         auto condition = parser();
+//         blahblahblah
+//         ParallelEdgeRemovingAlgorithm<Graph, CoverageComparator<Graph>> algo(g,
+//                                             condition,
+//                                             info.chunk_cnt(),
+//                                             removal_handler,
+//                                             /*canonical_only*/true,
+//                                             CoverageComparator<Graph>(g));
+//        return algo.Run();
+//    } else {
+//        return false;
+//    }
+//}
+
+}
+}
diff --git a/src/debruijn/simplification/parallel_simplification_algorithms.hpp b/src/debruijn/simplification/parallel_simplification_algorithms.hpp
index 7229325..113cd02 100644
--- a/src/debruijn/simplification/parallel_simplification_algorithms.hpp
+++ b/src/debruijn/simplification/parallel_simplification_algorithms.hpp
@@ -19,6 +19,19 @@ namespace debruijn {
 
 namespace simplification {
 
+//    bool EnableParallel() {
+//        if (simplif_cfg_.presimp.parallel) {
+//            INFO("Trying to enable parallel presimplification.");
+//            if (gp_.g.AllHandlersThreadSafe()) {
+//                return true;
+//            } else {
+//                WARN("Not all handlers are threadsafe, switching to non-parallel presimplif");
+//                //gp.g.PrintHandlersNames();
+//            }
+//        }
+//        return false;
+//    }
+
 template<class Graph>
 class ParallelTipClippingFunctor {
     typedef typename Graph::EdgeId EdgeId;
@@ -258,7 +271,7 @@ public:
     }
 
     void PutMarks() {
-        auto chunk_iterators = ParallelIterationHelper<Graph>(g_).VertexChunks(chunk_cnt_);
+        auto chunk_iterators = IterationHelper<Graph, VertexId>(g_).Chunks(chunk_cnt_);
 
         #pragma omp parallel for schedule(guided)
         for (size_t i = 0; i < chunk_iterators.size() - 1; ++i) {
@@ -267,7 +280,7 @@ public:
     }
 
     void ClearMarks() {
-        auto chunk_iterators = ParallelIterationHelper<Graph>(g_).EdgeChunks(chunk_cnt_);
+        auto chunk_iterators = IterationHelper<Graph, EdgeId>(g_).Chunks(chunk_cnt_);
 
         #pragma omp parallel for schedule(guided)
         for (size_t i = 0; i < chunk_iterators.size() - 1; ++i) {
@@ -289,7 +302,7 @@ class ParallelLowCoverageFunctor {
 
     Graph& g_;
     typename Graph::HelperT helper_;
-    shared_ptr<func::Predicate<EdgeId>> ec_condition_;
+    pred::TypedPredicate<EdgeId> ec_condition_;
     HandlerF handler_f_;
 
     omnigraph::GraphElementMarker<EdgeId> edge_marker_;
@@ -313,17 +326,13 @@ public:
     ParallelLowCoverageFunctor(Graph& g, size_t max_length, double max_coverage, HandlerF handler_f = 0)
             : g_(g),
               helper_(g_.GetConstructionHelper()),
-              ec_condition_(
-                      func::And<EdgeId>(
-                              func::And<EdgeId>(make_shared<omnigraph::LengthUpperBound<Graph>>(g, max_length),
-                                                make_shared<omnigraph::CoverageUpperBound<Graph>>(g, max_coverage)),
-                              make_shared<omnigraph::AlternativesPresenceCondition<Graph>>(g))),
-              handler_f_(handler_f) {
-
-    }
+              ec_condition_(pred::And(pred::And(omnigraph::LengthUpperBound<Graph>(g, max_length),
+                                              omnigraph::CoverageUpperBound<Graph>(g, max_coverage)),
+                                     omnigraph::AlternativesPresenceCondition<Graph>(g))),
+                            handler_f_(handler_f) {}
 
     bool IsOfInterest(EdgeId e) const {
-        return !edge_marker_.is_marked(e) && ec_condition_->Check(e);
+        return !edge_marker_.is_marked(e) && ec_condition_(e);
     }
 
     void PrepareForProcessing(size_t /*interesting_cnt*/) {
@@ -592,6 +601,324 @@ public:
 
 };
 
+
+//todo add conjugate filtration
+template<class Graph, class ElementType>
+class AlgorithmRunner {
+    const Graph& g_;
+
+    template<class Algo, class It>
+    bool ProcessBucket(Algo& algo, It begin, It end) {
+        bool changed = false;
+        for (auto it = begin; it != end; ++it) {
+            changed |= algo.Process(*it);
+        }
+        return changed;
+    }
+
+public:
+
+    const Graph& g() const {
+        return g_;
+    }
+
+    AlgorithmRunner(Graph& g)
+            : g_(g) {
+
+    }
+
+    template<class Algo, class ItVec>
+    bool RunFromChunkIterators(Algo& algo, const ItVec& chunk_iterators) {
+        DEBUG("Running from " << chunk_iterators.size() - 1 << "chunks");
+        VERIFY(chunk_iterators.size() > 1);
+        bool changed = false;
+        #pragma omp parallel for schedule(guided) reduction(|:changed)
+        for (size_t i = 0; i < chunk_iterators.size() - 1; ++i) {
+            changed |= ProcessBucket(algo, chunk_iterators[i], chunk_iterators[i + 1]);
+        }
+        DEBUG("Finished");
+        return changed;
+    }
+private:
+    DECL_LOGGER("AlgorithmRunner")
+    ;
+};
+
+template<class Graph, class ElementType>
+class TwoStepAlgorithmRunner {
+    typedef typename Graph::VertexId VertexId;
+    typedef typename Graph::EdgeId EdgeId;
+
+    const Graph& g_;
+    const bool filter_conjugate_;
+    std::vector<std::vector<ElementType>> elements_of_interest_;
+
+    template<class Algo>
+    bool ProcessBucket(Algo& algo, const std::vector<ElementType>& bucket, size_t idx_offset) const {
+        bool changed = false;
+        for (ElementType el : bucket) {
+            changed |= algo.Process(el, idx_offset++);
+        }
+        return changed;
+    }
+
+    template<class Algo>
+    bool Process(Algo& algo) const {
+        std::vector<size_t> cumulative_bucket_sizes;
+        cumulative_bucket_sizes.push_back(0);
+        for (const auto& bucket : elements_of_interest_) {
+            cumulative_bucket_sizes.push_back(cumulative_bucket_sizes.back() + bucket.size());
+        }
+        DEBUG("Preparing for processing");
+        algo.PrepareForProcessing(cumulative_bucket_sizes.back());
+        bool changed = false;
+        DEBUG("Processing buckets");
+        #pragma omp parallel for schedule(guided) reduction(|:changed)
+        for (size_t i = 0; i < elements_of_interest_.size(); ++i) {
+            changed |= ProcessBucket(algo, elements_of_interest_[i], cumulative_bucket_sizes[i]);
+        }
+        return changed;
+    }
+
+    template<class Algo>
+    void CountElement(Algo& algo, ElementType el, size_t bucket) {
+        if (filter_conjugate_ && g_.conjugate(el) < el)
+            return;
+        if (algo.IsOfInterest(el)) {
+            TRACE("Element " << g_.str(el) << " is of interest");
+            elements_of_interest_[bucket].push_back(el);
+        } else {
+            TRACE("Element " << g_.str(el) << " is not interesting");
+        }
+    }
+
+    template<class Algo, class It>
+    void CountAll(Algo& algo, It begin, It end, size_t bucket) {
+        for (auto it = begin; !(it == end); ++it) {
+            CountElement(algo, *it, bucket);
+        }
+    }
+
+public:
+
+    const Graph& g() const {
+        return g_;
+    }
+
+    //conjugate elements are filtered based on ids
+    //should be used only if both conjugate elements are simultaneously either interesting or not
+    //fixme filter_conjugate is redundant
+    TwoStepAlgorithmRunner(Graph& g, bool filter_conjugate)
+            : g_(g),
+              filter_conjugate_(filter_conjugate) {
+
+    }
+
+    template<class Algo, class ItVec>
+    bool RunFromChunkIterators(Algo& algo, const ItVec& chunk_iterators) {
+        DEBUG("Started running from " << chunk_iterators.size() - 1 << " chunks");
+        VERIFY(algo.ShouldFilterConjugate() == filter_conjugate_);
+        VERIFY(chunk_iterators.size() > 1);
+        elements_of_interest_.clear();
+        elements_of_interest_.resize(chunk_iterators.size() - 1);
+        DEBUG("Searching elements of interest");
+        #pragma omp parallel for schedule(guided)
+        for (size_t i = 0; i < chunk_iterators.size() - 1; ++i) {
+            CountAll(algo, chunk_iterators[i], chunk_iterators[i + 1], i);
+        }
+        DEBUG("Processing");
+        return Process(algo);
+    }
+
+//    template<class Algo, class It>
+//    void RunFromIterator(Algo& algo, It begin, It end) {
+//        RunFromChunkIterators(algo, std::vector<It> { begin, end });
+//    }
+private:
+    DECL_LOGGER("TwoStepAlgorithmRunner")
+    ;
+};
+
+template<class Graph, class ElementType>
+class SemiParallelAlgorithmRunner {
+    typedef typename Graph::VertexId VertexId;
+    typedef typename Graph::EdgeId EdgeId;
+
+    const Graph& g_;
+
+public:
+
+    const Graph& g() const {
+        return g_;
+    }
+
+    SemiParallelAlgorithmRunner(Graph& g)
+            : g_(g) {
+
+    }
+
+    template<class Algo, class ItVec, class Comparator = std::less<ElementType>>
+    bool RunFromChunkIterators(Algo& algo, const ItVec& chunk_iterators,
+            const Comparator& comp = Comparator()) {
+        VERIFY(chunk_iterators.size() > 1);
+        SmartSetIterator<Graph, ElementType, Comparator> it(g_, false, comp);
+
+        FillInterestingFromChunkIterators(chunk_iterators, it,
+                                          std::bind(&Algo::IsOfInterest, std::ref(algo), std::placeholders::_1));
+
+        bool changed = false;
+        for (; !it.IsEnd(); ++it) {
+            changed |= algo.Process(*it);
+        }
+        return changed;
+    }
+
+private:
+    DECL_LOGGER("SemiParallelAlgorithmRunner")
+    ;
+};
+
+//todo generalize to use for other algorithms if needed
+template<class Graph>
+class SemiParallelEdgeRemovingAlgorithm {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+    Graph& g_;
+    pred::TypedPredicate<EdgeId> condition_;
+    EdgeRemover<Graph> edge_remover_;
+
+public:
+    SemiParallelEdgeRemovingAlgorithm(Graph& g,
+                                      pred::TypedPredicate<EdgeId> condition,
+                                      std::function<void(EdgeId)> removal_handler = 0) :
+            g_(g), condition_(condition), edge_remover_(g, removal_handler) {
+    }
+
+    bool IsOfInterest(EdgeId e) const {
+        return condition_->Check(e);
+    }
+
+    bool Process(EdgeId e) {
+        edge_remover_.DeleteEdge(e);
+        return true;
+    }
+};
+
+template<class Graph>
+void ParallelCompress(Graph& g, size_t chunk_cnt, bool loop_post_compression = true) {
+    INFO("Parallel compression");
+    debruijn::simplification::ParallelCompressor<Graph> compressor(g);
+    TwoStepAlgorithmRunner<Graph, typename Graph::VertexId> runner(g, false);
+    RunVertexAlgorithm(g, runner, compressor, chunk_cnt);
+
+    //have to call cleaner to get rid of new isolated vertices
+    CleanGraph(g, chunk_cnt);
+
+    if (loop_post_compression) {
+        INFO("Launching post-compression to compress loops");
+        CompressAllVertices(g, chunk_cnt);
+    }
+}
+
+template<class Graph>
+bool ParallelClipTips(Graph& g,
+              const string& tip_condition,
+              const SimplifInfoContainer& info,
+              std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
+    INFO("Parallel tip clipping");
+
+    string condition_str = tip_condition;
+
+    ConditionParser<Graph> parser(g, condition_str, info);
+
+    parser();
+
+    debruijn::simplification::ParallelTipClippingFunctor<Graph> tip_clipper(g,
+        parser.max_length_bound(), parser.max_coverage_bound(), removal_handler);
+
+    AlgorithmRunner<Graph, typename Graph::VertexId> runner(g);
+
+    RunVertexAlgorithm(g, runner, tip_clipper, info.chunk_cnt());
+
+    ParallelCompress(g, info.chunk_cnt());
+    //Cleaner is launched inside ParallelCompression
+    //CleanGraph(g, info.chunk_cnt());
+
+    return true;
+}
+
+//template<class Graph>
+//bool ParallelRemoveBulges(Graph& g,
+//              const debruijn_config::simplification::bulge_remover& br_config,
+//              size_t /*read_length*/,
+//              std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
+//    INFO("Parallel bulge remover");
+//
+//    size_t max_length = LengthThresholdFinder::MaxBulgeLength(
+//        g.k(), br_config.max_bulge_length_coefficient,
+//        br_config.max_additive_length_coefficient);
+//
+//    DEBUG("Max bulge length " << max_length);
+//
+//    debruijn::simplification::ParallelSimpleBRFunctor<Graph> bulge_remover(g,
+//                            max_length,
+//                            br_config.max_coverage,
+//                            br_config.max_relative_coverage,
+//                            br_config.max_delta,
+//                            br_config.max_relative_delta,
+//                            removal_handler);
+//    for (VertexId v : g) {
+//        bulge_remover(v);
+//    }
+//
+//    Compress(g);
+//    return true;
+//}
+
+template<class Graph>
+bool ParallelEC(Graph& g,
+              const string& ec_condition,
+              const SimplifInfoContainer& info,
+              std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
+    INFO("Parallel ec remover");
+
+    ConditionParser<Graph> parser(g, ec_condition, info);
+
+    auto condition = parser();
+
+    size_t max_length = parser.max_length_bound();
+    double max_coverage = parser.max_coverage_bound();
+
+    debruijn::simplification::CriticalEdgeMarker<Graph> critical_marker(g, info.chunk_cnt());
+    critical_marker.PutMarks();
+
+    debruijn::simplification::ParallelLowCoverageFunctor<Graph> ec_remover(g,
+                            max_length,
+                            max_coverage,
+                            removal_handler);
+
+    TwoStepAlgorithmRunner<Graph, typename Graph::EdgeId> runner(g, true);
+
+    RunEdgeAlgorithm(g, runner, ec_remover, info.chunk_cnt());
+
+    critical_marker.ClearMarks();
+
+    ParallelCompress(g, info.chunk_cnt());
+    //called in parallel compress
+    //CleanGraph(g, info.chunk_cnt());
+    return true;
+}
+
+template<class Graph, class AlgoRunner, class Algo>
+bool RunVertexAlgorithm(Graph& g, AlgoRunner& runner, Algo& algo, size_t chunk_cnt) {
+    return runner.RunFromChunkIterators(algo, IterationHelper<Graph, VertexId>(g).Chunks(chunk_cnt));
+}
+
+template<class Graph, class AlgoRunner, class Algo>
+bool RunEdgeAlgorithm(Graph& g, AlgoRunner& runner, Algo& algo, size_t chunk_cnt) {
+    return runner.RunFromChunkIterators(algo, IterationHelper<Graph, EdgeId>(g).Chunks(chunk_cnt));
+}
+
 }
 
 }
diff --git a/src/debruijn/simplification/simplification_settings.hpp b/src/debruijn/simplification/simplification_settings.hpp
index 1b55370..6a51247 100644
--- a/src/debruijn/simplification/simplification_settings.hpp
+++ b/src/debruijn/simplification/simplification_settings.hpp
@@ -38,8 +38,7 @@ class SimplifInfoContainer {
     size_t read_length_;
     double detected_mean_coverage_;
     double detected_coverage_bound_;
-    size_t iteration_count_;
-    size_t iteration_;
+    bool main_iteration_;
     size_t chunk_cnt_;
 
 public: 
@@ -47,8 +46,7 @@ public:
         read_length_(-1ul),
         detected_mean_coverage_(-1.0),
         detected_coverage_bound_(-1.0),
-        iteration_count_(-1ul),
-        iteration_(-1ul),
+        main_iteration_(false),
         chunk_cnt_(-1ul) {
     }
 
@@ -67,16 +65,10 @@ public:
         return detected_coverage_bound_;
     }
 
-    size_t iteration_count() const {
-        VERIFY(iteration_count_ != -1ul);
-        return iteration_count_;
+    bool main_iteration() const {
+        return main_iteration_;
     }
 
-    size_t iteration() const {
-        VERIFY(iteration_ != -1ul);
-        return iteration_;
-    }
-    
     size_t chunk_cnt() const {
         VERIFY(chunk_cnt_ != -1ul);
         return chunk_cnt_;
@@ -97,13 +89,8 @@ public:
         return *this;
     }
 
-    SimplifInfoContainer& set_iteration_count(size_t iteration_count) {
-        iteration_count_ = iteration_count;
-        return *this;
-    }
-
-    SimplifInfoContainer& set_iteration(size_t iteration) {
-        iteration_ = iteration;
+    SimplifInfoContainer& set_main_iteration(bool main_iteration) {
+        main_iteration_ = main_iteration;
         return *this;
     }
 
@@ -113,193 +100,6 @@ public:
     }
 };
 
-template<class Graph>
-class ConditionParser {
-private:
-    typedef typename Graph::EdgeId EdgeId;
-
-    const Graph& g_;
-    string next_token_;
-    string input_;
-    const SimplifInfoContainer settings_;
-    queue<string> tokenized_input_;
-
-    size_t max_length_bound_;
-    double max_coverage_bound_;
-
-    string ReadNext() {
-        if (!tokenized_input_.empty()) {
-            next_token_ = tokenized_input_.front();
-            tokenized_input_.pop();
-        } else {
-            next_token_ = "";
-        }
-        return next_token_;
-    }
-
-    template<typename T>
-    bool RelaxMax(T& cur_max, T t) {
-        if (t > cur_max) {
-            cur_max = t;
-            return true;
-        }
-        return false;
-    }
-
-    template<typename T>
-    bool RelaxMin(T& cur_min, T t) {
-        if (t < cur_min) {
-            cur_min = t;
-            return true;
-        }
-        return false;
-    }
-
-    double GetCoverageBound() {
-        if (next_token_ == "auto") {
-            return settings_.detected_coverage_bound();
-        } else {
-            return lexical_cast<double>(next_token_);
-        }
-    }
-
-    shared_ptr<Predicate<EdgeId>> ParseCondition(size_t& min_length_bound,
-                                                 double& min_coverage_bound) {
-        if (next_token_ == "tc_lb") {
-            double length_coeff = lexical_cast<double>(ReadNext());
-
-            DEBUG("Creating tip length bound. Coeff " << length_coeff);
-            size_t length_bound = LengthThresholdFinder::MaxTipLength(
-                settings_.read_length(), g_.k(), length_coeff);
-
-            DEBUG("Length bound" << length_bound);
-
-            RelaxMin(min_length_bound, length_bound);
-            return make_shared<LengthUpperBound<Graph>>(g_, length_bound);
-        } else if (next_token_ == "to_ec_lb") {
-            double length_coeff = lexical_cast<double>(ReadNext());
-
-            DEBUG( "Creating length bound for erroneous connections originated from tip merging. Coeff " << length_coeff);
-            size_t length_bound =
-                    LengthThresholdFinder::MaxTipOriginatedECLength(
-                        settings_.read_length(), g_.k(), length_coeff);
-
-            DEBUG("Length bound" << length_bound);
-
-            RelaxMin(min_length_bound, length_bound);
-            return make_shared<LengthUpperBound<Graph>>(g_, length_bound);
-        } else if (next_token_ == "ec_lb") {
-            size_t length_coeff = lexical_cast<size_t>(ReadNext());
-
-            DEBUG("Creating ec length bound. Coeff " << length_coeff);
-            size_t length_bound =
-                    LengthThresholdFinder::MaxErroneousConnectionLength(
-                        g_.k(), length_coeff);
-
-            RelaxMin(min_length_bound, length_bound);
-            return make_shared<LengthUpperBound<Graph>>(g_, length_bound);
-        } else if (next_token_ == "lb") {
-            size_t length_bound = lexical_cast<size_t>(ReadNext());
-
-            DEBUG("Creating length bound. Value " << length_bound);
-
-            RelaxMin(min_length_bound, length_bound);
-            return make_shared<LengthUpperBound<Graph>>(g_, length_bound);
-        } else if (next_token_ == "cb") {
-            ReadNext();
-            double cov_bound = GetCoverageBound();
-            DEBUG("Creating coverage upper bound " << cov_bound);
-            RelaxMin(min_coverage_bound, cov_bound);
-            return make_shared<CoverageUpperBound<Graph>>(g_, cov_bound);
-        } else if (next_token_ == "icb") {
-            ReadNext();
-            double cov_bound = GetCoverageBound();
-            cov_bound = cov_bound / (double) settings_.iteration_count() * (double) (settings_.iteration() + 1);
-            DEBUG("Creating iterative coverage upper bound " << cov_bound);
-            RelaxMin(min_coverage_bound, cov_bound);
-            return make_shared<CoverageUpperBound<Graph>>(g_, cov_bound);
-        } else if (next_token_ == "rctc") {
-            ReadNext();
-            DEBUG("Creating relative cov tip cond " << next_token_);
-            return make_shared<RelativeCoverageTipCondition<Graph>>(
-                g_, lexical_cast<double>(next_token_));
-        } else if (next_token_ == "disabled") {
-            DEBUG("Creating disabling condition");
-            return make_shared<func::AlwaysFalse<EdgeId>>();
-        } else if (next_token_ == "mmm") {
-            ReadNext();
-            DEBUG("Creating max mismatches cond " << next_token_);
-            return make_shared < MismatchTipCondition<Graph>> (g_, lexical_cast < size_t > (next_token_));
-        } else {
-            VERIFY(false);
-            return make_shared<func::AlwaysTrue<EdgeId>>();
-        }
-    }
-
-    shared_ptr<Predicate<EdgeId>> ParseConjunction(size_t& min_length_bound,
-                                                   double& min_coverage_bound) {
-        shared_ptr<Predicate<EdgeId>> answer =
-                make_shared<AlwaysTrue<EdgeId>>();
-        VERIFY(next_token_ == "{");
-        ReadNext();
-        while (next_token_ != "}") {
-            answer = make_shared<AndOperator<EdgeId>>(
-                answer,
-                ParseCondition(min_length_bound, min_coverage_bound));
-            ReadNext();
-        }
-        return answer;
-    }
-
-public:
-
-    ConditionParser(const Graph& g, string input, const SimplifInfoContainer& settings)
-            : g_(g),
-              input_(input),
-              settings_(settings),
-              max_length_bound_(0),
-              max_coverage_bound_(0.) {
-        DEBUG("Creating parser for string " << input);
-        using namespace boost;
-        vector<string> tmp_tokenized_input;
-        split(tmp_tokenized_input, input_, is_any_of(" ,;"), token_compress_on);
-        for (auto it = tmp_tokenized_input.begin();
-             it != tmp_tokenized_input.end(); ++it) {
-            tokenized_input_.push(*it);
-        }
-        ReadNext();
-    }
-
-    shared_ptr<Predicate<EdgeId>> operator()() {
-        DEBUG("Parsing");
-        shared_ptr<Predicate<EdgeId>> answer = make_shared<NotOperator<EdgeId>>(
-            make_shared<AlwaysTrue<EdgeId>>());
-        VERIFY_MSG(next_token_ == "{", "Expected \"{\", but next token was " << next_token_);
-        while (next_token_ == "{") {
-            size_t min_length_bound = numeric_limits<size_t>::max();
-            double min_coverage_bound = numeric_limits<double>::max();
-            answer = make_shared<OrOperator<EdgeId>>(
-                answer,
-                ParseConjunction(min_length_bound, min_coverage_bound));
-            RelaxMax(max_length_bound_, min_length_bound);
-            RelaxMax(max_coverage_bound_, min_coverage_bound);
-            ReadNext();
-        }
-        return answer;
-    }
-
-    size_t max_length_bound() const {
-        return max_length_bound_;
-    }
-
-    double max_coverage_bound() const {
-        return max_coverage_bound_;
-    }
-
-private:
-    DECL_LOGGER("ConditionParser");
-};
-
 }
 
 }
diff --git a/src/debruijn/simplification/single_cell_simplification.hpp b/src/debruijn/simplification/single_cell_simplification.hpp
new file mode 100644
index 0000000..fd9a893
--- /dev/null
+++ b/src/debruijn/simplification/single_cell_simplification.hpp
@@ -0,0 +1,110 @@
+#pragma once
+
+#include "config_struct.hpp"
+#include "omni/erroneous_connection_remover.hpp"
+#include "omni/mf_ec_remover.hpp"
+#include "simplification_settings.hpp"
+#include "detail_coverage.hpp"
+
+namespace debruijn {
+namespace simplification {
+
+template<class Graph>
+bool TopologyRemoveErroneousEdges(
+    Graph &g,
+    const debruijn_graph::debruijn_config::simplification::topology_based_ec_remover& tec_config,
+    std::function<void(typename Graph::EdgeId)> removal_handler) {
+    INFO("Removing connections based on topology");
+    size_t max_length = LengthThresholdFinder::MaxErroneousConnectionLength(
+        g.k(), tec_config.max_ec_length_coefficient);
+
+    pred::TypedPredicate<typename Graph::EdgeId>
+            condition(DefaultUniquenessPlausabilityCondition<Graph>(g, tec_config.uniqueness_length, tec_config.plausibility_length));
+
+    return omnigraph::RemoveErroneousEdgesInLengthOrder(g, condition, max_length, removal_handler);
+}
+
+template<class Graph>
+bool MultiplicityCountingRemoveErroneousEdges(
+    Graph &g,
+    const debruijn_graph::debruijn_config::simplification::topology_based_ec_remover& tec_config,
+    std::function<void(typename Graph::EdgeId)> removal_handler) {
+    INFO("Removing connections based on topological multiplicity counting");
+    size_t max_length = LengthThresholdFinder::MaxErroneousConnectionLength(
+        g.k(), tec_config.max_ec_length_coefficient);
+
+    pred::TypedPredicate<typename Graph::EdgeId>
+            condition(MultiplicityCountingCondition<Graph>(g, tec_config.uniqueness_length,
+                                          /*plausibility*/ MakePathLengthLowerBound(g,
+                                                                                    PlausiblePathFinder<Graph>(g, 2 * tec_config.plausibility_length), tec_config.plausibility_length)));
+
+    return omnigraph::RemoveErroneousEdgesInLengthOrder(g, condition, max_length, removal_handler);
+}
+
+template<class Graph>
+bool RemoveThorns(
+    Graph &g,
+    const debruijn_graph::debruijn_config::simplification::interstrand_ec_remover& isec_config,
+    std::function<void(typename Graph::EdgeId)> removal_handler) {
+    INFO("Removing interstrand connections");
+    size_t max_length = LengthThresholdFinder::MaxErroneousConnectionLength(
+        g.k(), isec_config.max_ec_length_coefficient);
+
+    auto condition
+            = pred::And(LengthUpperBound<Graph>(g, max_length),
+                       ThornCondition<Graph>(g, isec_config.uniqueness_length, isec_config.span_distance));
+
+    return omnigraph::RemoveErroneousEdgesInCoverageOrder(g, condition, numeric_limits<double>::max(), removal_handler);
+}
+
+template<class Graph>
+bool TopologyReliabilityRemoveErroneousEdges(
+    Graph &g,
+    const debruijn_graph::debruijn_config::simplification::tr_based_ec_remover& trec_config,
+    std::function<void(typename Graph::EdgeId)> removal_handler) {
+    INFO("Removing connections based on topology and reliable coverage");
+    size_t max_length = LengthThresholdFinder::MaxErroneousConnectionLength(
+        g.k(), trec_config.max_ec_length_coefficient);
+
+    auto condition
+            = pred::And(CoverageUpperBound<Graph>(g, trec_config.unreliable_coverage),
+                       PredicateUniquenessPlausabilityCondition<Graph>(g,
+                                                                       /*uniqueness*/MakePathLengthLowerBound(g, UniquePathFinder<Graph>(g), trec_config.uniqueness_length),
+                                                                       /*plausibility*/pred::AlwaysTrue<typename Graph::EdgeId>()));
+
+    return omnigraph::RemoveErroneousEdgesInLengthOrder(g, condition, max_length, removal_handler);
+}
+
+template<class Graph>
+bool MaxFlowRemoveErroneousEdges(
+    Graph &g,
+    const debruijn_graph::debruijn_config::simplification::max_flow_ec_remover& mfec_config,
+    omnigraph::HandlerF<Graph> removal_handler = 0) {
+    if (!mfec_config.enabled)
+        return false;
+    INFO("Removing connections based on max flow strategy");
+    size_t max_length = LengthThresholdFinder::MaxErroneousConnectionLength(
+        g.k(), (size_t) mfec_config.max_ec_length_coefficient);
+    omnigraph::MaxFlowECRemover<Graph> erroneous_edge_remover(
+        g, max_length, mfec_config.uniqueness_length,
+        mfec_config.plausibility_length, removal_handler);
+    return erroneous_edge_remover.Process();
+}
+
+template<class Graph>
+bool RemoveHiddenEC(Graph& g,
+                    const debruijn_graph::FlankingCoverage<Graph>& flanking_cov,
+                    const debruijn_graph::debruijn_config::simplification::hidden_ec_remover& her_config,
+                    const SimplifInfoContainer& info,
+                    omnigraph::HandlerF<Graph> removal_handler) {
+    if (her_config.enabled) {
+        INFO("Removing hidden erroneous connections");
+        return HiddenECRemover<Graph>(g, her_config.uniqueness_length, flanking_cov,
+                               her_config.unreliability_threshold, info.detected_coverage_bound(),
+                               her_config.relative_threshold, removal_handler).Run();
+    }
+    return false;
+}
+
+}
+}
diff --git a/src/debruijn/split_path_constructor.hpp b/src/debruijn/split_path_constructor.hpp
index 4f080e3..822a90b 100644
--- a/src/debruijn/split_path_constructor.hpp
+++ b/src/debruijn/split_path_constructor.hpp
@@ -18,6 +18,7 @@
 #include "logger/logger.hpp"
 #include "de/paired_info.hpp"
 #include "omni/path_processor.hpp"
+#include "../include/de/paired_info.hpp"
 
 namespace debruijn_graph {
 
@@ -61,21 +62,24 @@ class SplitPathConstructor {
   public:
     SplitPathConstructor(const Graph &graph): graph_(graph) {}
 
-    vector<PathInfo> ConvertPIToSplitPaths(const std::vector<PairInfo>& pair_infos, double is, double is_var) const {
+    vector<PathInfo> ConvertPIToSplitPaths(EdgeId cur_edge, const omnigraph::de::PairedInfoIndexT<Graph> &pi, double is, double is_var) const {
+        vector<PairInfo> pair_infos; //TODO: this is an adaptor for the old implementation
+        for (auto i : pi.Get(cur_edge))
+            for (auto j : i.second)
+                pair_infos.emplace_back(cur_edge, i.first, j);
+
         vector<PathInfo> result;
-        if (pair_infos.size() == 0)
+        if (pair_infos.empty())
             return result;
 
-        EdgeId cur_edge = pair_infos[0].first;
         vector<bool> pair_info_used(pair_infos.size());
         TRACE("Preparing path_processor for this base edge");
         size_t path_upper_bound = PairInfoPathLengthUpperBound(graph_.k(), (size_t) is, is_var);
 
-        PathStorageCallback<Graph> callback(graph_);
+        //FIXME is path_upper_bound enough?
         PathProcessor<Graph> path_processor(graph_,
-                                            path_upper_bound,
-                                            path_upper_bound,
-                                            graph_.EdgeEnd(cur_edge), graph_.EdgeStart(cur_edge), callback);
+                                            graph_.EdgeEnd(cur_edge),
+                                            path_upper_bound);
 
         TRACE("Path_processor is done");
 
@@ -88,6 +92,7 @@ class SplitPathConstructor {
             DEBUG("SPC: pi " << cur_info);
             vector<EdgeId> common_part = GetCommonPathsEnd(graph_, cur_edge, cur_info.second,
                                                            (size_t) (cur_info.d() - cur_info.var()),
+                    //FIXME is it a bug?!
                                                            (size_t) (cur_info.d() - cur_info.var()),
                                                            path_processor);
             DEBUG("Found common part of size " << common_part.size());
diff --git a/src/debruijn/stage.hpp b/src/debruijn/stage.hpp
index 9ed2fce..a9c07ac 100644
--- a/src/debruijn/stage.hpp
+++ b/src/debruijn/stage.hpp
@@ -122,13 +122,17 @@ class StageManager {
     StageManager(SavesPolicy policy = SavesPolicy())
             : saves_policy_(policy) {}
 
-    void add(AssemblyStage *stage) {
+    StageManager& add(AssemblyStage *stage) {
         stages_.push_back(std::unique_ptr<AssemblyStage>(stage));
         stages_.back()->parent_ = this;
+
+        return *this;
     }
-    void add(std::initializer_list<AssemblyStage*> stages) {
+    StageManager& add(std::initializer_list<AssemblyStage*> stages) {
         for (auto it = stages.begin(), et = stages.end(); it != et; ++it)
             add(*it);
+
+        return *this;
     }
 
     void run(debruijn_graph::conj_graph_pack& g,
diff --git a/src/debruijn/stats/debruijn_stats.hpp b/src/debruijn/stats/debruijn_stats.hpp
index a0c20aa..73d04c9 100644
--- a/src/debruijn/stats/debruijn_stats.hpp
+++ b/src/debruijn/stats/debruijn_stats.hpp
@@ -54,8 +54,8 @@ FindGenomeMappingPath(const Sequence& genome, const graph_pack& gp) {
 template <class graph_pack>
 shared_ptr<omnigraph::visualization::GraphColorer<Graph>> DefaultColorer(const graph_pack& gp) {
     return omnigraph::visualization::DefaultColorer(gp.g, 
-        FindGenomeMappingPath(gp.genome, gp.g, gp.index, gp.kmer_mapper).path(),
-        FindGenomeMappingPath(!gp.genome, gp.g, gp.index, gp.kmer_mapper).path());
+        FindGenomeMappingPath(gp.genome.GetSequence(), gp.g, gp.index, gp.kmer_mapper).path(),
+        FindGenomeMappingPath(!gp.genome.GetSequence(), gp.g, gp.index, gp.kmer_mapper).path());
 }
 
 template <class graph_pack>
@@ -78,8 +78,8 @@ class GenomeMappingStat: public AbstractStatCounter {
     Sequence genome_;
     size_t k_;
   public:
-    GenomeMappingStat(const Graph &graph, const Index &index,	Sequence genome, size_t k) :
-            graph_(graph), index_(index), genome_(genome), k_(k) {}
+    GenomeMappingStat(const Graph &graph, const Index &index, GenomeStorage genome, size_t k) :
+            graph_(graph), index_(index), genome_(genome.GetSequence()), k_(k) {}
 
     virtual ~GenomeMappingStat() {}
 
@@ -151,9 +151,9 @@ void CountStats(const graph_pack& gp) {
     typedef typename Graph::EdgeId EdgeId;
     INFO("Counting stats");
     StatList stats;
-    Path<EdgeId> path1 = FindGenomeMappingPath(gp.genome, gp.g, gp.index,
+    Path<EdgeId> path1 = FindGenomeMappingPath(gp.genome.GetSequence(), gp.g, gp.index,
                                       gp.kmer_mapper).path();
-    Path<EdgeId> path2 = FindGenomeMappingPath(!gp.genome, gp.g, gp.index,
+    Path<EdgeId> path2 = FindGenomeMappingPath(!gp.genome.GetSequence(), gp.g, gp.index,
                                       gp.kmer_mapper).path();
     stats.AddStat(new VertexEdgeStat<Graph>(gp.g));
     stats.AddStat(new BlackEdgesStat<Graph>(gp.g, path1, path2));
@@ -226,7 +226,7 @@ optional<runtime_k::RtSeq> FindCloseKP1mer(const conj_graph_pack &gp,
         for (int dir = -1; dir <= 1; dir += 2) {
             size_t pos = (gp.genome.size() - k + genome_pos + dir * diff) % (gp.genome.size() - k);
             runtime_k::RtSeq kp1mer = gp.kmer_mapper.Substitute(
-                runtime_k::RtSeq (k + 1, gp.genome, pos));
+                runtime_k::RtSeq (k + 1, gp.genome.GetSequence(), pos));
             if (gp.index.contains(kp1mer))
                 return optional<runtime_k::RtSeq>(kp1mer);
         }
@@ -240,136 +240,137 @@ void PrepareForDrawing(conj_graph_pack &gp) {
     CollectContigPositions(gp);
 }
 
-inline
-void ProduceDetailedInfo(conj_graph_pack &gp,
-                         const omnigraph::GraphLabeler<Graph>& labeler, const string& run_folder,
-                         const string &pos_name,
-                         info_printer_pos pos) {
-    string base_folder = path::append_path(run_folder, "pictures/");
-    make_dir(base_folder);
-    string folder = path::append_path(base_folder, pos_name + "/");
-
-    auto it = cfg::get().info_printers.find(pos);
-    VERIFY(it != cfg::get().info_printers.end());
-
-    const debruijn_config::info_printer & config = it->second;
-
-    if (!(config.print_stats || config.write_error_loc ||
-        config.write_full_graph ||
-        config.write_full_nc_graph ||
-        config.write_components ||
-        !config.components_for_kmer.empty() ||
-        config.write_components_along_genome ||
-        config.write_components_along_contigs || config.save_full_graph ||
-        !config.components_for_genome_pos.empty())) {
-
-        return;
-    } 
 
-    make_dir(folder);
-    PrepareForDrawing(gp);
-
-    auto path1 = FindGenomeMappingPath(gp.genome, gp.g, gp.index,
-                                      gp.kmer_mapper).path();
-
-    auto colorer = DefaultColorer(gp);
-
-    if (config.print_stats) {
-        INFO("Printing statistics for " << details::info_printer_pos_name(pos));
-        CountStats(gp);
-    }
-
-    if (config.write_error_loc) {
-        make_dir(folder + "error_loc/");
-        WriteErrorLoc(gp.g, folder + "error_loc/", colorer, labeler);
+struct detail_info_printer {
+    detail_info_printer(conj_graph_pack &gp,
+                        const omnigraph::GraphLabeler<Graph>& labeler, 
+                        const string& folder)
+            :  gp_(gp),
+               labeler_(labeler),
+               folder_(folder) {
     }
 
-    if (config.write_full_graph) {
-        WriteComponent(GraphComponent<Graph>(gp.g, gp.g.begin(), gp.g.end()), folder + "full_graph.dot", colorer, labeler);
-    }
+    void operator() (info_printer_pos pos,
+                    const string& folder_suffix = "") {
+        string pos_name = details::info_printer_pos_name(pos);
 
-    if (config.write_full_nc_graph) {
-        WriteSimpleComponent(GraphComponent<Graph>(gp.g, gp.g.begin(), gp.g.end()), folder + "nc_full_graph.dot", colorer, labeler);
+        ProduceDetailedInfo(pos_name + folder_suffix, pos);
     }
 
-    if (config.write_components) {
-        make_dir(folder + "components/");
-        omnigraph::visualization::WriteComponents(gp.g, folder + "components/", omnigraph::ReliableSplitter<Graph>(gp.g), colorer, labeler);
-    }
+  private:
 
-    if (!config.components_for_kmer.empty()) {
-        string kmer_folder = path::append_path(base_folder, "kmer_loc/");
-        make_dir(kmer_folder);
-        auto kmer = runtime_k::RtSeq(gp.k_value + 1, config.components_for_kmer.substr(0, gp.k_value + 1).c_str());
-        string file_name = path::append_path(kmer_folder, pos_name + ".dot");
-        WriteKmerComponent(gp, kmer, file_name, colorer,labeler);
-    }
+    void ProduceDetailedInfo(const string &pos_name,
+                             info_printer_pos pos) {
+        static size_t call_cnt = 0;
+
+        auto it = cfg::get().info_printers.find(pos);
+        VERIFY(it != cfg::get().info_printers.end());
+    
+        const debruijn_config::info_printer & config = it->second;
+    
+        if (config.basic_stats) {
+            VertexEdgeStat<conj_graph_pack::graph_t> stats(gp_.g);
+            INFO("Number of vertices : " << stats.vertices() << ", number of edges : "
+                  << stats.edges() << ", sum length of edges : " << stats.edge_length());
+        }
 
-    if (config.write_components_along_genome) {
-        make_dir(folder + "along_genome/");
-        omnigraph::visualization::WriteComponentsAlongPath(gp.g, path1.sequence(), folder + "along_genome/", colorer, labeler);
-    }
+        if (config.save_full_graph) {
+            string saves_folder = path::append_path(path::append_path(folder_, "saves/"),
+                                              ToString(call_cnt++, 2) + "_" + pos_name + "/");
+            path::make_dirs(saves_folder);
+            graphio::ConjugateDataPrinter<conj_graph_pack::graph_t> printer(gp_.g);
+            graphio::PrintBasicGraph(saves_folder + "graph", printer);
+        }
 
-    if (config.write_components_along_contigs) {
-        make_dir(folder + "along_contigs/");
-        NewExtendedSequenceMapper<Graph, Index> mapper(gp.g, gp.index, gp.kmer_mapper);
-        WriteGraphComponentsAlongContigs(gp.g, mapper, folder + "along_contigs/", colorer, labeler);
-    }
+        if (config.extended_stats) {
+            VERIFY(cfg::get().developer_mode);
+            CountStats(gp_);
+        }
 
-    if (config.save_full_graph) {
-        make_dir(folder + "full_graph_save/");
-        graphio::PrintGraphPack(folder + "full_graph_save/graph", gp);
-    }
+        if (!(config.write_error_loc ||
+            config.write_full_graph ||
+            config.write_full_nc_graph ||
+            config.write_components ||
+            !config.components_for_kmer.empty() ||
+            config.write_components_along_genome ||
+            config.write_components_along_contigs ||
+            !config.components_for_genome_pos.empty())) {
+            return;
+        } 
+
+        VERIFY(cfg::get().developer_mode);
+        string pics_folder = path::append_path(path::append_path(folder_, "pictures/"),
+                                          ToString(call_cnt++, 2) + "_" + pos_name + "/");
+        path::make_dirs(pics_folder);
+        PrepareForDrawing(gp_);
+    
+        auto path1 = FindGenomeMappingPath(gp_.genome.GetSequence(), gp_.g, gp_.index,
+                                          gp_.kmer_mapper).path();
+    
+        auto colorer = DefaultColorer(gp_);
+    
+        if (config.write_error_loc) {
+            make_dir(pics_folder + "error_loc/");
+            WriteErrorLoc(gp_.g, pics_folder + "error_loc/", colorer, labeler_);
+        }
+    
+        if (config.write_full_graph) {
+            WriteComponent(GraphComponent<Graph>(gp_.g, gp_.g.begin(), gp_.g.end()), pics_folder + "full_graph.dot", colorer, labeler_);
+        }
+    
+        if (config.write_full_nc_graph) {
+            WriteSimpleComponent(GraphComponent<Graph>(gp_.g, gp_.g.begin(), gp_.g.end()), pics_folder + "nc_full_graph.dot", colorer, labeler_);
+        }
+    
+        if (config.write_components) {
+            make_dir(pics_folder + "components/");
+            omnigraph::visualization::WriteComponents(gp_.g, pics_folder + "components/", omnigraph::ReliableSplitter<Graph>(gp_.g), colorer, labeler_);
+        }
+    
+        if (!config.components_for_kmer.empty()) {
+            string kmer_folder = path::append_path(pics_folder, "kmer_loc/");
+            make_dir(kmer_folder);
+            auto kmer = runtime_k::RtSeq(gp_.k_value + 1, config.components_for_kmer.substr(0, gp_.k_value + 1).c_str());
+            string file_name = path::append_path(kmer_folder, pos_name + ".dot");
+            WriteKmerComponent(gp_, kmer, file_name, colorer, labeler_);
+        }
+    
+        if (config.write_components_along_genome) {
+            make_dir(pics_folder + "along_genome/");
+            omnigraph::visualization::WriteComponentsAlongPath(gp_.g, path1.sequence(), pics_folder + "along_genome/", colorer, labeler_);
+        }
+    
+        if (config.write_components_along_contigs) {
+            make_dir(pics_folder + "along_contigs/");
+            NewExtendedSequenceMapper<Graph, Index> mapper(gp_.g, gp_.index, gp_.kmer_mapper);
+            WriteGraphComponentsAlongContigs(gp_.g, mapper, pics_folder + "along_contigs/", colorer, labeler_);
+        }
 
-    if (!config.components_for_genome_pos.empty()) {
-        string pos_loc_folder = path::append_path(base_folder, "pos_loc/");
-        make_dir(pos_loc_folder);
-        vector<string> positions;
-        boost::split(positions, config.components_for_genome_pos,
-                     boost::is_any_of(" ,"), boost::token_compress_on);
-        for (auto it = positions.begin(); it != positions.end(); ++it) {
-            optional < runtime_k::RtSeq > close_kp1mer = FindCloseKP1mer(gp,
-                                                                         boost::lexical_cast<int>(*it), gp.k_value);
-            if (close_kp1mer) {
-                string locality_folder = path::append_path(pos_loc_folder, *it + "/");
-                make_dir(locality_folder);
-                WriteKmerComponent(gp, *close_kp1mer, path::append_path(locality_folder, pos_name + ".dot"), colorer, labeler);
-            } else {
-                WARN(
-                    "Failed to find genome kp1mer close to the one at position "
-                    << *it << " in the graph. Which is " << runtime_k::RtSeq (gp.k_value + 1, gp.genome, boost::lexical_cast<int>(*it)));
+        if (!config.components_for_genome_pos.empty()) {
+            string pos_loc_folder = path::append_path(pics_folder, "pos_loc/");
+            make_dir(pos_loc_folder);
+            vector<string> positions;
+            boost::split(positions, config.components_for_genome_pos,
+                         boost::is_any_of(" ,"), boost::token_compress_on);
+            for (auto it = positions.begin(); it != positions.end(); ++it) {
+                boost::optional<runtime_k::RtSeq> close_kp1mer = FindCloseKP1mer(gp_,
+                                                                             boost::lexical_cast<int>(*it), gp_.k_value);
+                if (close_kp1mer) {
+                    string locality_folder = path::append_path(pos_loc_folder, *it + "/");
+                    make_dir(locality_folder);
+                    WriteKmerComponent(gp_, *close_kp1mer, path::append_path(locality_folder, pos_name + ".dot"), colorer, labeler_);
+                } else {
+                    WARN(
+                        "Failed to find genome kp1mer close to the one at position "
+                        << *it << " in the graph. Which is " << runtime_k::RtSeq (gp_.k_value + 1, gp_.genome.GetSequence(), boost::lexical_cast<int>(*it)));
+                }
             }
         }
     }
-}
-
-struct detail_info_printer {
-    detail_info_printer(conj_graph_pack &gp,
-                        const omnigraph::GraphLabeler<Graph>& labeler, const string& folder)
-            :  folder_(folder),
-               func_(bind(&ProduceDetailedInfo, boost::ref(gp),
-                     boost::ref(labeler), _3, _2, _1)),
-               graph_(gp.g), cnt(0) {
-    }
 
-    void operator()(info_printer_pos pos,
-                    string const& folder_suffix = "") {
-        cnt++;
-        string pos_name = details::info_printer_pos_name(pos);
-        VertexEdgeStat<conj_graph_pack::graph_t> stats(graph_);
-        TRACE("Number of vertices : " << stats.vertices() << ", number of edges : " << stats.edges() << ", sum length of edges : " << stats.edge_length());
-        func_(pos,
-              ToString(cnt, 2) + "_" + pos_name + folder_suffix,
-              folder_
-            //                (path::append_path(folder_, (pos_name + folder_suffix)) + "/")
-              );
-    }
-
-  private:
+    conj_graph_pack& gp_;
+    const omnigraph::GraphLabeler<Graph>& labeler_;
     string folder_;
-    std::function<void(info_printer_pos, string const&, string const&)> func_;
-    const conj_graph_pack::graph_t &graph_;
-    size_t cnt;
 };
 
 inline
diff --git a/src/dipspades/consensus_contigs_constructor/contig_correctors/close_gaps_corrector.hpp b/src/dipspades/consensus_contigs_constructor/contig_correctors/close_gaps_corrector.hpp
index 1d8186e..23b1be4 100644
--- a/src/dipspades/consensus_contigs_constructor/contig_correctors/close_gaps_corrector.hpp
+++ b/src/dipspades/consensus_contigs_constructor/contig_correctors/close_gaps_corrector.hpp
@@ -32,7 +32,7 @@ class CloseGapsCorrector : public AbstractContigCorrector{
 				VertexId end = g_.EdgeStart(path[i + 1]);
 				auto dijkstra = DijkstraHelper<Graph>::CreateTargeredBoundedDijkstra(g_,
 						end, dsp_cfg::get().pbr.max_bulge_nucls_len); //DijkstraHelper<Graph>::CreateBoundedDijkstra(g_, dsp_cfg::get().pbr.max_bulge_nucls_len);
-				dijkstra.run(start);
+				dijkstra.Run(start);
 				if(dijkstra.DistanceCounted(end)){
 					vector<EdgeId> add_path = dijkstra.GetShortestPathTo(end);
 					for(auto e = add_path.begin(); e != add_path.end(); e++)
@@ -42,7 +42,7 @@ class CloseGapsCorrector : public AbstractContigCorrector{
 				else{
 					// second attempt
 					VertexId prev_start = g_.EdgeStart(cur_edge);
-					dijkstra.run(prev_start);
+					dijkstra.Run(prev_start);
 	               if(dijkstra.DistanceCounted(end)){
 	                    vector<EdgeId> add_path = dijkstra.GetShortestPathTo(end);
 						new_path.erase(new_path.begin() + new_path.size() - 1);
diff --git a/src/dipspades/consensus_contigs_constructor/contig_correctors/overlap_searcher.hpp b/src/dipspades/consensus_contigs_constructor/contig_correctors/overlap_searcher.hpp
index c7c5366..7c5d6f1 100644
--- a/src/dipspades/consensus_contigs_constructor/contig_correctors/overlap_searcher.hpp
+++ b/src/dipspades/consensus_contigs_constructor/contig_correctors/overlap_searcher.hpp
@@ -242,10 +242,10 @@ class OverlapCorrector : public LoopBulgeDeletionCorrector{
 
 					auto path_searcher = DijkstraHelper<Graph>::CreateBackwardBoundedDijkstra(g_,
 							dsp_cfg::get().pbr.max_bulge_nucls_len);
-					path_searcher.run(start1);
+					path_searcher.Run(start1);
 					auto reached_vert1 = path_searcher.ReachedVertices();
 
-					path_searcher.run(start2);
+					path_searcher.Run(start2);
 					auto reached_vert2 = path_searcher.ReachedVertices();
 
 					for(size_t i = 0; i < first_start; i++){
@@ -282,10 +282,10 @@ class OverlapCorrector : public LoopBulgeDeletionCorrector{
 
 					auto path_searcher = DijkstraHelper<Graph>::CreateBackwardBoundedDijkstra(g_,
 							dsp_cfg::get().pbr.max_bulge_nucls_len);
-					path_searcher.run(end1);
+					path_searcher.Run(end1);
 					auto reached_vert1 = path_searcher.ReachedVertices();
 
-					path_searcher.run(end2);
+					path_searcher.Run(end2);
 					auto reached_vert2 = path_searcher.ReachedVertices();
 
 					for(size_t i = last_end; i < last_path.size(); i++){
diff --git a/src/dipspades/consensus_contigs_constructor/contig_correctors/redundant_contig_remover.hpp b/src/dipspades/consensus_contigs_constructor/contig_correctors/redundant_contig_remover.hpp
index 2b5cec8..7a71a10 100644
--- a/src/dipspades/consensus_contigs_constructor/contig_correctors/redundant_contig_remover.hpp
+++ b/src/dipspades/consensus_contigs_constructor/contig_correctors/redundant_contig_remover.hpp
@@ -263,12 +263,12 @@ public:
 				// find vertex v, such that paths starts are reachable from v
 				auto path_searcher1 = DijkstraHelper<Graph>::CreateBackwardBoundedDijkstra(g_,
 						max_tail_length_);
-				path_searcher1.run(start1);
+				path_searcher1.Run(start1);
 				auto reached_vert1 = path_searcher1.ReachedVertices();
 
 				auto path_searcher2 = DijkstraHelper<Graph>::CreateBackwardBoundedDijkstra(g_,
 						max_tail_length_);
-				path_searcher2.run(start2);
+				path_searcher2.Run(start2);
 				auto reached_vert2 = path_searcher2.ReachedVertices();
 
 				for(size_t i = 0; i < pos1[0]; i++)
@@ -353,12 +353,12 @@ public:
 					// find vertex v, such that paths ends are reachable from v
 					auto path_searcher1 = DijkstraHelper<Graph>::CreateBackwardBoundedDijkstra(g_,
 							max_tail_length_);
-					path_searcher1.run(end1);
+					path_searcher1.Run(end1);
 					auto reached_vert1 = path_searcher1.ReachedVertices();
 
 					auto path_searcher2 = DijkstraHelper<Graph>::CreateBackwardBoundedDijkstra(g_,
 							max_tail_length_);
-					path_searcher2.run(end2);
+					path_searcher2.Run(end2);
 					auto reached_vert2 = path_searcher2.ReachedVertices();
 
 					for(size_t i = ConvInd(pos1[pos1.size() - 1], path1.size()); i < path1.size(); i++)
diff --git a/src/dipspades/dipspades.hpp b/src/dipspades/dipspades.hpp
index cb2e0a8..8300731 100644
--- a/src/dipspades/dipspades.hpp
+++ b/src/dipspades/dipspades.hpp
@@ -5,15 +5,17 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "../debruijn/stage.hpp"
+
+#include "io/splitting_wrapper.hpp"
+#include "graph_construction.hpp"
+#include "stage.hpp"
+
+#include "dipspades_config.hpp"
 
 #include "polymorphic_bulge_remover/polymorphic_bulge_remover.hpp"
 #include "consensus_contigs_constructor/consensus_contigs_constructor.hpp"
 #include "haplotype_assembly/haplotype_assembler.hpp"
 #include "kmer_gluing/equal_sequence_gluer.hpp"
-#include "../debruijn/graph_construction.hpp"
-#include "io/splitting_wrapper.hpp"
-#include "dipspades_config.hpp"
 
 using namespace debruijn_graph;
 using namespace spades;
@@ -215,7 +217,7 @@ public:
 			return;
 		INFO("Diploid graph construction");
 		conj_graph_pack double_graph_pack(graph_pack.k_value, dsp_cfg::get().io.tmp_dir,
-										  dsp_cfg::get().io.num_libraries);
+										  dsp_cfg::get().io.num_libraries, "");
 		construct_graph_from_contigs(double_graph_pack);
 		HaplotypeAssembler(graph_pack, double_graph_pack, storage().default_storage,
 						   storage().composite_storage, storage().redundancy_map).Run();
@@ -238,7 +240,7 @@ void run_dipspades() {
     		dsp_cfg::get().bp.K,
     		dsp_cfg::get().io.tmp_dir,
     		dsp_cfg::get().io.num_libraries,
-            Sequence(""), // reference genome
+            "", // reference genome
             1); // flanking range
 
     conj_gp.kmer_mapper.Attach();
diff --git a/src/dipspades/dipspades_config.cpp b/src/dipspades/dipspades_config.cpp
index f08e317..96ad099 100644
--- a/src/dipspades/dipspades_config.cpp
+++ b/src/dipspades/dipspades_config.cpp
@@ -6,7 +6,7 @@
 //***************************************************************************
 
 #include "dipspades_config.hpp"
-#include "../include/config_common.hpp"
+#include "config_common.hpp"
 #include "utils/files_utils.hpp"
 
 using namespace dipspades;
diff --git a/src/dipspades/dipspades_config.hpp b/src/dipspades/dipspades_config.hpp
index 14defe9..aab9b2d 100644
--- a/src/dipspades/dipspades_config.hpp
+++ b/src/dipspades/dipspades_config.hpp
@@ -7,7 +7,7 @@
 
 #pragma once
 
-#include "../include/config_singl.hpp"
+#include "config_singl.hpp"
 #include <boost/property_tree/ptree_fwd.hpp>
 
 struct dipspades_config {
diff --git a/src/dipspades/main.cpp b/src/dipspades/main.cpp
index 02c5944..1a5b79f 100644
--- a/src/dipspades/main.cpp
+++ b/src/dipspades/main.cpp
@@ -9,16 +9,15 @@
  * Assembler Main
  */
 #include "standard.hpp"
-#include "../include/logger/log_writers.hpp"
+#include "logger/log_writers.hpp"
 
-#include "../include/segfault_handler.hpp"
-#include "../include/stacktrace.hpp"
-#include "../include/memory_limit.hpp"
-#include "../include/copy_file.hpp"
-#include "../include/perfcounter.hpp"
-#include "../include/runtime_k.hpp"
+#include "segfault_handler.hpp"
+#include "stacktrace.hpp"
+#include "memory_limit.hpp"
+#include "copy_file.hpp"
+#include "perfcounter.hpp"
+#include "runtime_k.hpp"
 
-//#include "config_struct.hpp"
 
 #include "graph_pack.hpp"
 #include "construction.hpp"
@@ -31,14 +30,15 @@
 #include <sys/stat.h>
 #include <unistd.h>
 
-#include "../include/segfault_handler.hpp"
+#include "segfault_handler.hpp"
+#include "version.hpp"
 
 void make_dirs(){
-	  make_dir(dsp_cfg::get().io.output_base);
-	  make_dir(dsp_cfg::get().io.output_root);
-	  make_dir(dsp_cfg::get().io.output_dir);
-	  make_dir(dsp_cfg::get().io.output_saves);
-	  make_dir(dsp_cfg::get().io.tmp_dir);
+    make_dir(dsp_cfg::get().io.output_base);
+    make_dir(dsp_cfg::get().io.output_root);
+    make_dir(dsp_cfg::get().io.output_dir);
+    make_dir(dsp_cfg::get().io.output_saves);
+    make_dir(dsp_cfg::get().io.tmp_dir);
 }
 
 void copy_configs(string cfg_filename, string to) {
@@ -86,7 +86,7 @@ int main(int /*argc*/, char** argv) {
     load_config          (cfg_filename);
     make_dirs();
     if(dsp_cfg::get().rp.developer_mode)
-    	copy_configs(cfg_filename, path::append_path(dsp_cfg::get().io.output_dir, "configs"));
+        copy_configs(cfg_filename, path::append_path(dsp_cfg::get().io.output_dir, "configs"));
     create_console_logger(cfg_filename);
 
     VERIFY(dsp_cfg::get().bp.K >= runtime_k::MIN_K && dsp_cfg::get().bp.K < runtime_k::MAX_K);
@@ -97,7 +97,6 @@ int main(int /*argc*/, char** argv) {
     INFO("Starting dipSPAdes, built from " SPADES_GIT_REFSPEC ", git revision " SPADES_GIT_SHA1);
     INFO("Assembling dataset (" << dsp_cfg::get().io.dataset_name << ") with K=" << dsp_cfg::get().bp.K);
     dipspades::run_dipspades();
-
 //    link_output("latest_success");
   } catch (std::bad_alloc const& e) {
     std::cerr << "Not enough memory to run SPAdes. " << e.what() << std::endl;
diff --git a/src/dipspades/polymorphic_bulge_remover/bulge_paths_searcher.hpp b/src/dipspades/polymorphic_bulge_remover/bulge_paths_searcher.hpp
index e686eb5..94295a0 100644
--- a/src/dipspades/polymorphic_bulge_remover/bulge_paths_searcher.hpp
+++ b/src/dipspades/polymorphic_bulge_remover/bulge_paths_searcher.hpp
@@ -38,7 +38,7 @@ public:
 	vector<VertexId> VerticesReachedFrom(VertexId start_vertex) {
 		auto bounded_dijkstra = DijkstraHelper<Graph>::CreateBoundedDijkstra(this->graph_,
 				this->search_depth_, this->max_neigh_number_);
-		bounded_dijkstra.run(start_vertex);
+		bounded_dijkstra.Run(start_vertex);
 		TRACE("Reached vertices size - " << bounded_dijkstra.ReachedVertices());
 		return bounded_dijkstra.ReachedVertices();
 	}
@@ -46,7 +46,7 @@ public:
 	vector<vector<EdgeId> > GetAllPathsTo(VertexId start_vertex, VertexId end_vertex) {
 		auto bounded_dijkstra = DijkstraHelper<Graph>::CreateBoundedDijkstra(this->graph_,
 				this->search_depth_, this->max_neigh_number_);
-		bounded_dijkstra.run(start_vertex);
+		bounded_dijkstra.Run(start_vertex);
 
 		vector<vector<EdgeId> > alternative_paths;
 		auto shortest_path = bounded_dijkstra.GetShortestPathTo(end_vertex);
@@ -82,15 +82,14 @@ public:
 	vector<VertexId> VerticesReachedFrom(VertexId start_vertex) {
 		auto bounded_dijkstra = DijkstraHelper<Graph>::CreateBoundedDijkstra(this->graph_,
 				this->search_depth_);
-		bounded_dijkstra.run(start_vertex);
+		bounded_dijkstra.Run(start_vertex);
 		return bounded_dijkstra.ReachedVertices();
 	}
 
 	vector<vector<EdgeId> > GetAllPathsTo(VertexId start_vertex, VertexId end_vertex) {
 		PathStorageCallback<Graph> callback(this->graph_);
-		PathProcessor<Graph> path_processor(this->graph_, 0, this->search_depth_,
+		ProcessPaths(this->graph_, 0, this->search_depth_,
 				start_vertex, end_vertex, callback);
-		path_processor.Process();
 		return callback.paths();
 	}
 };
diff --git a/src/hammer/CMakeLists.txt b/src/hammer/CMakeLists.txt
index 70be48e..7b02ce6 100644
--- a/src/hammer/CMakeLists.txt
+++ b/src/hammer/CMakeLists.txt
@@ -22,7 +22,7 @@ add_executable(hammer
 #  add_subdirectory(quake_count)
 #  add_subdirectory(gen_test_data)
 
-target_link_libraries(hammer input mph_index BamTools yaml-cpp format input ${COMMON_LIBRARIES})
+target_link_libraries(hammer input cityhash BamTools yaml-cpp format input ${COMMON_LIBRARIES})
 
 if (SPADES_STATIC_BUILD)
   set_target_properties(hammer PROPERTIES LINK_SEARCH_END_STATIC 1)
diff --git a/src/hammer/config_struct_hammer.cpp b/src/hammer/config_struct_hammer.cpp
index 341b98c..d9df167 100644
--- a/src/hammer/config_struct_hammer.cpp
+++ b/src/hammer/config_struct_hammer.cpp
@@ -39,7 +39,8 @@ void load(hammer_config& cfg, boost::property_tree::ptree const& pt) {
   load(cfg.count_numfiles, pt, "count_numfiles");
   load(cfg.count_merge_nthreads, pt, "count_merge_nthreads");
   load(cfg.count_split_buffer, pt, "count_split_buffer");
-
+  load(cfg.count_filter_singletons, pt, "count_filter_singletons");
+  
   load(cfg.hamming_do, pt, "hamming_do");
   load(cfg.hamming_blocksize_quadratic_threshold, pt, "hamming_blocksize_quadratic_threshold");
 
@@ -67,6 +68,7 @@ void load(hammer_config& cfg, boost::property_tree::ptree const& pt) {
   load(cfg.correct_use_threshold, pt, "correct_use_threshold");
   load(cfg.correct_readbuffer, pt, "correct_readbuffer");
   load(cfg.correct_discard_bad, pt, "correct_discard_bad");
+  load(cfg.correct_stats, pt, "correct_stats");
 
   std::string fname;
   load(fname, pt, "dataset");
diff --git a/src/hammer/config_struct_hammer.hpp b/src/hammer/config_struct_hammer.hpp
index e60899a..f22670a 100644
--- a/src/hammer/config_struct_hammer.hpp
+++ b/src/hammer/config_struct_hammer.hpp
@@ -47,6 +47,7 @@ struct hammer_config {
   unsigned count_numfiles;
   unsigned count_merge_nthreads;
   size_t count_split_buffer;
+  bool count_filter_singletons;
 
   bool hamming_do;
   unsigned hamming_blocksize_quadratic_threshold;
@@ -75,6 +76,7 @@ struct hammer_config {
   double correct_threshold;
   unsigned correct_readbuffer;
   unsigned correct_nthreads;
+  bool correct_stats;  
 };
 
 
diff --git a/src/hammer/expander.cpp b/src/hammer/expander.cpp
index e4d6c07..b6a7a6f 100644
--- a/src/hammer/expander.cpp
+++ b/src/hammer/expander.cpp
@@ -33,13 +33,15 @@ bool Expander::operator()(const Read &r) {
   ValidKMerGenerator<hammer::K> gen(cr);
   while (gen.HasMore()) {
     hammer::KMer kmer = gen.kmer();
-    size_t idx = data_.seq_idx(kmer);
-    size_t read_pos = gen.pos() - 1;
+    size_t idx = data_.checking_seq_idx(kmer);
+    if (idx != -1ULL) {
+      size_t read_pos = gen.pos() - 1;
 
-    kmer_indices[read_pos] = idx;
-    if (data_[idx].good()) {
-      for (size_t j = read_pos; j < read_pos + hammer::K; ++j)
-        covered_by_solid[j] = true;
+      kmer_indices[read_pos] = idx;
+      if (data_[idx].good()) {
+        for (size_t j = read_pos; j < read_pos + hammer::K; ++j)
+          covered_by_solid[j] = true;
+      }
     }
     gen.Next();
   }
diff --git a/src/hammer/hamcluster.cpp b/src/hammer/hamcluster.cpp
index d0e6178..4db02ab 100644
--- a/src/hammer/hamcluster.cpp
+++ b/src/hammer/hamcluster.cpp
@@ -99,7 +99,7 @@ static void processBlockQuadratic(ConcurrentDSU  &uf,
     for (size_t j = i + 1; j < block_size; j++) {
       size_t y = block[j];
       hammer::KMer kmery = data.kmer(y);
-      if (uf.find_set(x) != uf.find_set(y) &&
+      if (!uf.same(x, y) &&
           canMerge(uf, x, y) &&
           hamdistKMer(kmerx, kmery, tau) <= tau) {
         uf.unite(x, y);
@@ -202,3 +202,87 @@ void KMerHamClusterer::cluster(const std::string &prefix,
     INFO("Merge done, saw " << big_blocks2 << " big blocks out of " << nblocks << " processed.");
   }
 }
+
+enum {
+  UNLOCKED = 0,
+  PARTIALLY_LOCKED = 1,
+  FULLY_LOCKED = 3
+};
+
+static bool canMerge2(const ConcurrentDSU &uf, size_t kidx, size_t cidx) {
+    // If either of indices is fully locked - bail out
+    uint64_t kaux = uf.root_aux(kidx), caux = uf.root_aux(cidx);
+    if (kaux == FULLY_LOCKED || caux == FULLY_LOCKED)
+        return false;
+
+    // Otherwise there is a possibility to merge stuff.
+    if (0 && (kaux == PARTIALLY_LOCKED || caux == PARTIALLY_LOCKED)) {
+        // We cannot merge two partially locked clusters.
+        return kaux != caux;
+    }
+
+    return true;
+}
+
+static void ClusterChunk(size_t start_idx, size_t end_idx, const KMerData &data, ConcurrentDSU &uf) {
+    unsigned nthreads = cfg::get().general_max_nthreads;
+
+    // INFO("Cluster: " << start_idx << ":" << end_idx);
+#   pragma omp parallel num_threads(nthreads)
+    {
+#       pragma omp for
+        for (size_t idx = start_idx; idx < end_idx; ++idx) {
+            hammer::KMer kmer = data.kmer(idx);
+
+            if (kmer.GetHash() > (!kmer).GetHash())
+                continue;
+
+            size_t kidx = data.seq_idx(kmer);
+            size_t rckidx = -1ULL;
+            // INFO("" << kmer << ":" << kidx);
+
+            for (size_t k = 0; k < hammer::K; ++k) {
+                hammer::KMer candidate = kmer;
+                char c = candidate[k];
+                for (char nc = 0; nc < 4; ++nc) {
+                    if (nc == c)
+                        continue;
+                    candidate.set(k, nc);
+                    size_t cidx = data.checking_seq_idx(candidate);
+                    // INFO("" << candidate << ":" << cidx);
+                    if (cidx != -1ULL && canMerge2(uf, kidx, cidx)) {
+                        uf.unite(kidx, cidx);
+
+                        size_t rccidx = data.seq_idx(!candidate);
+                        if (rckidx == -1ULL)
+                            rckidx = data.seq_idx(!kmer);
+                        uf.unite(rckidx, rccidx);
+                    }
+                }
+            }
+        }
+#       pragma omp barrier
+        //INFO("Lock: " << start_idx << ":" << end_idx);
+#       pragma omp for
+        for (size_t idx = start_idx; idx < end_idx; ++idx) {
+            if (uf.set_size(idx) < 2500)
+                continue;
+
+            if (uf.root_aux(idx) != FULLY_LOCKED)
+                uf.set_root_aux(idx, FULLY_LOCKED);
+        }
+    }
+}
+
+void TauOneKMerHamClusterer::cluster(const std::string &, const KMerData &data, ConcurrentDSU &uf) {
+    size_t start_idx = 0;
+    while (start_idx < data.size()) {
+        size_t end_idx = start_idx + 64*1024;
+        if (end_idx > data.size())
+            end_idx = data.size();
+
+        ClusterChunk(start_idx, end_idx, data, uf);
+
+        start_idx = end_idx;
+    }
+}
diff --git a/src/hammer/hamcluster.hpp b/src/hammer/hamcluster.hpp
index 951e7f1..d2041af 100644
--- a/src/hammer/hamcluster.hpp
+++ b/src/hammer/hamcluster.hpp
@@ -149,4 +149,13 @@ class KMerHamClusterer {
   DECL_LOGGER("Hamming Clustering");
 };
 
+class TauOneKMerHamClusterer {
+ public:
+  TauOneKMerHamClusterer() {} 
+  void cluster(const std::string &prefix, const KMerData &data, ConcurrentDSU &uf);
+ private:
+  DECL_LOGGER("tau = 1 Hamming Clustering");
+};
+
+
 #endif // HAMMER_SUBKMER_SORTER_HPP
diff --git a/src/hammer/hammer_tools.cpp b/src/hammer/hammer_tools.cpp
index 0dff190..2e5e268 100644
--- a/src/hammer/hammer_tools.cpp
+++ b/src/hammer/hammer_tools.cpp
@@ -83,7 +83,7 @@ void CorrectReadsBatch(std::vector<bool> &res,
   bool correct_threshold = cfg::get().correct_use_threshold;
   bool discard_bad = cfg::get().correct_discard_bad;
 
-  ReadCorrector corrector(data);
+  ReadCorrector corrector(data, cfg::get().correct_stats);
 # pragma omp parallel for shared(reads, res, data) num_threads(correct_nthreads)
   for (size_t i = 0; i < buf_size; ++i) {
     if (reads[i].size() >= K) {
@@ -222,7 +222,7 @@ size_t CorrectAllReads() {
       std::string usuffix =  std::to_string(ilib) + "_" +
                              std::to_string(iread) + ".cor.fastq";
 
-      std::string unpaired = getLargestPrefix(I->first, I->second) + "_unpaired";
+      std::string unpaired = getLargestPrefix(I->first, I->second) + "_unpaired.fastq";
 
       std::string outcorl = getReadsFilename(cfg::get().output_dir, I->first,  Globals::iteration_no, usuffix);
       std::string outcorr = getReadsFilename(cfg::get().output_dir, I->second, Globals::iteration_no, usuffix);
diff --git a/src/hammer/kmer_data.cpp b/src/hammer/kmer_data.cpp
index 650a267..6375e5e 100644
--- a/src/hammer/kmer_data.cpp
+++ b/src/hammer/kmer_data.cpp
@@ -11,6 +11,7 @@
 
 #include "io/mmapped_writer.hpp"
 #include "io/ireadstream.hpp"
+#include "io/kmer_iterator.hpp"
 #include "config_struct_hammer.hpp"
 
 #include "file_limit.hpp"
@@ -205,7 +206,9 @@ static inline void Merge(KMerStat &lhs, const KMerStat &rhs) {
 
 static void PushKMer(KMerData &data,
                      KMer kmer, const unsigned char *q, double prob) {
-  size_t idx = data.seq_idx(kmer);
+  size_t idx = data.checking_seq_idx(kmer);
+  if (idx == -1ULL)
+      return;
   KMerStat &kmc = data[idx];
   kmc.lock();
   Merge(kmc,
@@ -222,7 +225,9 @@ static void PushKMerRC(KMerData &data,
   for (unsigned i = 0; i < K; ++i)
     rcq[K - i - 1] = q[i];
 
-  size_t idx = data.seq_idx(kmer);
+  size_t idx = data.checking_seq_idx(kmer);
+  if (idx == -1ULL)
+      return;
   KMerStat &kmc = data[idx];
   kmc.lock();
   Merge(kmc,
@@ -263,27 +268,268 @@ class KMerDataFiller {
   }
 };
 
+class KMerMultiplicityCounter {
+    KMerData &data_;
+    uint64_t *cnt_;
+
+    void IncCount(const hammer::KMer &k) {
+        size_t idx = data_.seq_idx(k);
+        size_t block = idx * 2 / (8 * sizeof(uint64_t)), pos = (idx * 2) % (8 * sizeof(uint64_t));
+        size_t mask = 3ull << pos;
+
+        if (__sync_fetch_and_or(cnt_ + block, 1ull << pos) & mask)
+            __sync_fetch_and_or(cnt_ + block, 2ull << pos);
+    }
+
+  public:
+    KMerMultiplicityCounter(KMerData &data)
+            : data_(data) {
+        size_t blocks =  (2 * data.size()) / (8 * sizeof(uint64_t)) + 1;
+        cnt_ = new uint64_t[blocks];
+        memset(cnt_, 0, blocks * sizeof(uint64_t));
+    }
+    ~KMerMultiplicityCounter() { delete[] cnt_; }
+
+
+    bool operator()(const Read &r) {
+        int trim_quality = cfg::get().input_trim_quality;
+
+        // FIXME: Get rid of this
+        Read cr = r;
+        size_t sz = cr.trimNsAndBadQuality(trim_quality);
+
+        if (sz < hammer::K)
+            return false;
+
+        ValidKMerGenerator<hammer::K> gen(cr);
+        while (gen.HasMore()) {
+            KMer kmer = gen.kmer();
+
+            IncCount(kmer);
+            IncCount(!kmer);
+
+            gen.Next();
+        }
+
+        return false;
+    }
+
+    size_t count(size_t idx) const {
+        size_t block = idx * 2 / (8 * sizeof(uint64_t)), pos = idx * 2 % (8 * sizeof(uint64_t));
+        return (cnt_[block] >> pos) & 3;
+    }
+};
+
+class NonSingletonKMerSplitter : public KMerSplitter<hammer::KMer> {
+    typedef std::vector<std::vector<KMer> > KMerBuffer;
+
+    std::pair<size_t, size_t>
+    FillBufferFromStream(io::raw_kmer_iterator<hammer::KMer> &it,
+                         KMerBuffer &entry,
+                         size_t cell_size, size_t num_files) {
+        size_t processed = 0, non_singleton = 0 ;
+        for ( ; it.good(); ++it) {
+            hammer::KMer seq(hammer::K, *it);
+
+            size_t kidx = data_.seq_idx(seq);
+            size_t cnt = counter_.count(kidx);
+
+            processed += 1;
+
+            if (cnt == 1)
+                continue;
+
+            non_singleton += 1;
+
+            size_t idx = this->GetFileNumForSeq(seq, (unsigned)num_files);
+            entry[idx].push_back(seq);
+
+
+            if (entry[idx].size() > cell_size)
+                break;
+        }
+        return std::make_pair(processed, non_singleton);
+    }
+
+    void DumpBuffers(size_t num_files, size_t nthreads,
+                     std::vector<KMerBuffer> &buffers,
+                     const path::files_t &ostreams) const {
+#       pragma omp parallel for num_threads(nthreads)
+        for (unsigned k = 0; k < num_files; ++k) {
+            size_t sz = 0;
+            for (size_t i = 0; i < nthreads; ++i)
+                sz += buffers[i][k].size();
+
+            if (!sz)
+                continue;
+
+            std::vector<KMer> SortBuffer;
+            SortBuffer.reserve(sz);
+            for (size_t i = 0; i < nthreads; ++i) {
+                KMerBuffer &entry = buffers[i];
+                SortBuffer.insert(SortBuffer.end(), entry[k].begin(), entry[k].end());
+            }
+            libcxx::sort(SortBuffer.begin(), SortBuffer.end(), KMerComparator());
+            auto it = std::unique(SortBuffer.begin(), SortBuffer.end());
+
+#           pragma omp critical
+            {
+                FILE *f = fopen(ostreams[k].c_str(), "ab");
+                VERIFY_MSG(f, "Cannot open temporary file to write");
+                fwrite(SortBuffer.data(), sizeof(KMer), it - SortBuffer.begin(), f);
+                fclose(f);
+            }
+        }
+
+        for (unsigned i = 0; i < nthreads; ++i) {
+            for (unsigned j = 0; j < num_files; ++j) {
+                buffers[i][j].clear();
+            }
+        }
+    }
+
+  public:
+    NonSingletonKMerSplitter(std::string &work_dir,
+                             const std::string &final_kmers,
+                             const KMerData &data,
+                             const KMerMultiplicityCounter &counter)
+            : KMerSplitter<hammer::KMer>(work_dir, hammer::K), final_kmers_(final_kmers), data_(data), counter_(counter){}
+
+    virtual path::files_t Split(size_t num_files) {
+        unsigned nthreads = std::min(cfg::get().count_merge_nthreads, cfg::get().general_max_nthreads);
+
+        INFO("Splitting kmer instances into " << num_files << " buckets. This might take a while.");
+
+        // Determine the set of output files
+        path::files_t out;
+        for (unsigned i = 0; i < num_files; ++i)
+            out.push_back(GetRawKMersFname(i));
+
+        size_t file_limit = num_files + 2*nthreads;
+        size_t res = limit_file(file_limit);
+        if (res < file_limit) {
+            WARN("Failed to setup necessary limit for number of open files. The process might crash later on.");
+            WARN("Do 'ulimit -n " << file_limit << "' in the console to overcome the limit");
+        }
+
+        size_t reads_buffer_size = cfg::get().count_split_buffer;
+        if (reads_buffer_size == 0) {
+            reads_buffer_size = 536870912ull;
+            size_t mem_limit =  (size_t)((double)(get_free_memory()) / (nthreads * 3));
+            INFO("Memory available for splitting buffers: " << (double)mem_limit / 1024.0 / 1024.0 / 1024.0 << " Gb");
+            reads_buffer_size = std::min(reads_buffer_size, mem_limit);
+        }
+        size_t cell_size = reads_buffer_size / (num_files * sizeof(KMer));
+        // Set sane minimum cell size
+        if (cell_size < 16384)
+            cell_size = 16384;
+
+        INFO("Using cell size of " << cell_size);
+        std::vector<KMerBuffer> tmp_entries(nthreads);
+        for (unsigned i = 0; i < nthreads; ++i) {
+            KMerBuffer &entry = tmp_entries[i];
+            entry.resize(num_files);
+            for (unsigned j = 0; j < num_files; ++j) {
+                entry[j].reserve((size_t)(1.1 * (double)cell_size));
+            }
+        }
+
+        size_t n = 15;
+        size_t total_kmers = 0, non_singletons = 0;
+        auto kmers = io::make_kmer_iterator<hammer::KMer>(final_kmers_, hammer::K, nthreads);
+        while (std::any_of(kmers.begin(), kmers.end(),
+                           [](const io::raw_kmer_iterator<hammer::KMer> &it) { return it.good(); })) {
+#           pragma omp parallel for num_threads(nthreads) reduction(+ : total_kmers) reduction(+ : non_singletons)
+            for (size_t i = 0; i < kmers.size(); ++i) {
+                size_t kc, nsc;
+                std::tie(kc, nsc) = FillBufferFromStream(kmers[i], tmp_entries[i], cell_size, num_files);
+                total_kmers += kc;
+                non_singletons += nsc;
+            }
+
+            DumpBuffers(num_files, nthreads, tmp_entries, out);
+            if (total_kmers >> n) {
+                INFO("Processed " << total_kmers << " kmers");
+                n += 1;
+            }
+        }
+        INFO("Processed " << total_kmers << " kmers");
+
+        INFO("Total " << non_singletons << " non-singleton k-mers written");
+
+        unlink(final_kmers_.c_str());
+
+        return out;
+    }
+
+  private:
+    const std::string final_kmers_;
+    const KMerData &data_;
+    const KMerMultiplicityCounter &counter_;
+};
+
 void KMerDataCounter::BuildKMerIndex(KMerData &data) {
   // Build the index
   std::string workdir = cfg::get().input_working_dir;
   HammerKMerSplitter splitter(workdir);
   KMerDiskCounter<hammer::KMer> counter(workdir, splitter);
+
   size_t kmers = KMerIndexBuilder<HammerKMerIndex>(workdir, num_files_, omp_get_max_threads()).BuildIndex(data.index_, counter, /* save final */ true);
+  std::string final_kmers = counter.GetFinalKMersFname();
+  // Optionally perform a filtering step
+  if (cfg::get().count_filter_singletons) {
+      INFO("Filtering singleton k-mers");
+      data.kmers_.set_size(kmers);
+      KMerMultiplicityCounter mcounter(data);
+
+      const auto& dataset = cfg::get().dataset;
+      for (auto I = dataset.reads_begin(), E = dataset.reads_end(); I != E; ++I) {
+          INFO("Processing " << *I);
+          ireadstream irs(*I, cfg::get().input_qvoffset);
+          hammer::ReadProcessor rp(omp_get_max_threads());
+          rp.Run(irs, mcounter);
+          VERIFY_MSG(rp.read() == rp.processed(), "Queue unbalanced");
+      }
+
+      size_t singletons = 0;
+      for (size_t idx = 0; idx < data.size(); ++idx) {
+          size_t cnt = mcounter.count(idx);
+          VERIFY(cnt);
+          singletons += cnt == 1;
+      }
+      INFO("There are " << data.size() << " kmers in total. "
+           "Among them " << data.size() - singletons << " (" <<  100.0 * (double)(data.size() - singletons) / (double)data.size() << "%) are non-singletons.");
+
+      NonSingletonKMerSplitter nssplitter(workdir, final_kmers, data, mcounter);
+      KMerDiskCounter<hammer::KMer> nscounter(workdir, nssplitter);
+      HammerKMerIndex reduced_index;
+      kmers = KMerIndexBuilder<HammerKMerIndex>(workdir, num_files_, omp_get_max_threads()).BuildIndex(reduced_index, nscounter, /* save final */ true);
+      data.index_.swap(reduced_index);
+      final_kmers = nscounter.GetFinalKMersFname();
+  }
 
   // Check, whether we'll ever have enough memory for running BH and bail out earlier
-  if (1.25 * (double)kmers * (sizeof(KMerStat) + sizeof(hammer::KMer)) > (double) get_memory_limit())
-      FATAL_ERROR("The reads contain too many k-mers to fit into available memory limit. Increase memory limit and restart");
+  double needed = 1.25 * (double)kmers * (sizeof(KMerStat) + sizeof(hammer::KMer));
+  if (needed > (double) get_memory_limit())
+      FATAL_ERROR("The reads contain too many k-mers to fit into available memory. You need approx. "
+                  << needed / 1024.0 / 1024.0 / 1024.0
+                  << "GB of free RAM to assemble your dataset");
 
   {
-    MMappedFileRecordArrayIterator<hammer::KMer::DataType> kmer_it(counter.GetFinalKMersFname(), hammer::KMer::GetDataSize(hammer::K));
-
     INFO("Arranging kmers in hash map order");
     data.kmers_.set_size(kmers);
     data.kmers_.set_data(new hammer::KMer::DataType[kmers * hammer::KMer::GetDataSize(hammer::K)]);
 
-    for (; kmer_it.good(); ++kmer_it) {
-      size_t kidx = data.index_.seq_idx(hammer::KMer(hammer::K, *kmer_it));
-      memcpy(data.kmers_[kidx].data(), *kmer_it, hammer::KMer::TotalBytes);
+    unsigned nthreads = std::min(cfg::get().count_merge_nthreads, cfg::get().general_max_nthreads);
+    auto kmers_its = io::make_kmer_iterator<hammer::KMer>(final_kmers, hammer::K, 16*nthreads);
+
+#   pragma omp parallel for num_threads(nthreads) schedule(guided)
+    for (size_t i = 0; i < kmers_its.size(); ++i) {
+        auto &kmer_it = kmers_its[i];
+        for (; kmer_it.good(); ++kmer_it) {
+            size_t kidx = data.index_.seq_idx(hammer::KMer(hammer::K, *kmer_it));
+            memcpy(data.kmers_[kidx].data(), *kmer_it, hammer::KMer::TotalBytes);
+        }
     }
 
     unlink(counter.GetFinalKMersFname().c_str());
@@ -310,6 +556,7 @@ void KMerDataCounter::FillKMerData(KMerData &data) {
   size_t singletons = 0;
   for (size_t i = 0; i < data.size(); ++i) {
     VERIFY(data[i].count());
+
     // Make sure all the kmers are marked as 'Bad' in the beginning
     data[i].mark_bad();
 
diff --git a/src/hammer/main.cpp b/src/hammer/main.cpp
index fe4af02..3574989 100644
--- a/src/hammer/main.cpp
+++ b/src/hammer/main.cpp
@@ -30,6 +30,8 @@
 #include "logger/logger.hpp"
 #include "logger/log_writers.hpp"
 
+#include "version.hpp"
+
 #include <yaml-cpp/yaml.h>
 
 #include <algorithm>
@@ -118,7 +120,7 @@ int main(int argc, char * argv[]) {
     hammer::InitializeSubKMerPositions();
 
     INFO("Size of aux. kmer data " << sizeof(KMerStat) << " bytes");
-    
+
     int max_iterations = cfg::get().general_max_iterations;
 
     // now we can begin the iterations
@@ -151,10 +153,15 @@ int main(int argc, char * argv[]) {
       std::vector<std::vector<size_t> > classes;
       if (cfg::get().hamming_do || do_everything) {
         ConcurrentDSU uf(Globals::kmer_data->size());
-        KMerHamClusterer clusterer(cfg::get().general_tau);
+        std::string ham_prefix = hammer::getFilename(cfg::get().input_working_dir, Globals::iteration_no, "kmers.hamcls");
         INFO("Clustering Hamming graph.");
-        clusterer.cluster(hammer::getFilename(cfg::get().input_working_dir, Globals::iteration_no, "kmers.hamcls"),
-                          *Globals::kmer_data, uf);
+        if (cfg::get().general_tau > 1) {
+          KMerHamClusterer(cfg::get().general_tau).cluster(ham_prefix, *Globals::kmer_data, uf);
+        } else {
+          TauOneKMerHamClusterer().cluster(ham_prefix, *Globals::kmer_data, uf);
+        }
+
+        INFO("Extracting clusters");
         size_t num_classes = uf.extract_to_file(hammer::getFilename(cfg::get().input_working_dir, Globals::iteration_no, "kmers.hamming"));
 
 #if 0
diff --git a/src/hammer/misc/memusg b/src/hammer/misc/memusg
index 9346ebe..db3f1ab 100755
--- a/src/hammer/misc/memusg
+++ b/src/hammer/misc/memusg
@@ -13,7 +13,7 @@ set -um
 
 pgid=`ps -o pgid= $$`
 # make sure we're in a separate process group
-if [ x$pgid = x$(ps -o pgid= $(ps -o ppid= $$)) ]; then
+if [ x$pgid == x$(ps -o pgid= $(ps -o ppid= $$)) ]; then
     cmd=
     set -- "$0" "$@"
     for a; do cmd+="'${a//"'"/"'\\''"}' "; done
diff --git a/src/hammer/read_corrector.cpp b/src/hammer/read_corrector.cpp
index e286c71..f90571f 100644
--- a/src/hammer/read_corrector.cpp
+++ b/src/hammer/read_corrector.cpp
@@ -69,7 +69,7 @@ std::string ReadCorrector::CorrectReadRight(const std::string &seq, const std::s
     std::priority_queue<state> corrections, candidates;
     positions_t cpos{{(uint16_t)-1, (uint16_t)-1U, (uint16_t)-1U, (uint16_t)-1U}};
 
-    const size_t size_thr = 100 * log2(read_size - right_pos) + 1;
+    const size_t size_thr = size_t(100 * log2(read_size - right_pos)) + 1;
     const double penalty_thr = -(double)(read_size - right_pos) * 15.0 / 100;
     const size_t pos_thr = 8;
 
@@ -158,7 +158,7 @@ std::string ReadCorrector::CorrectReadRight(const std::string &seq, const std::s
 }
 
 bool ReadCorrector::CorrectOneRead(Read & r,
-                                   bool correct_threshold, bool discard_singletons, bool discard_bad) {
+                                   bool, bool, bool) {
     std::string seq = r.getSequenceString();
     const std::string &qual = r.getQualityString();
 
@@ -172,19 +172,21 @@ bool ReadCorrector::CorrectOneRead(Read & r,
     while (gen.HasMore()) {
         size_t read_pos = gen.pos() - 1;
         hammer::KMer kmer = gen.kmer();
-        const KMerStat &kmer_data = data_[kmer];
-
-        if (kmer_data.good()) {
-            if (read_pos != right_pos - K + 2) {
-                left_pos = read_pos;
-                right_pos = left_pos + K - 1;
-            } else
-                right_pos += 1;
-
-            if (right_pos - left_pos + 1 > solid_len) {
-                lleft_pos = left_pos;
-                lright_pos = right_pos;
-                solid_len = right_pos - left_pos + 1;
+        size_t idx = data_.checking_seq_idx(kmer);
+        if (idx != -1ULL) {
+            const KMerStat &kmer_data = data_[idx];
+            if (kmer_data.good()) {
+                if (read_pos != right_pos - K + 2) {
+                    left_pos = read_pos;
+                    right_pos = left_pos + K - 1;
+                } else
+                    right_pos += 1;
+
+                if (right_pos - left_pos + 1 > solid_len) {
+                    lleft_pos = left_pos;
+                    lright_pos = right_pos;
+                    solid_len = right_pos - left_pos + 1;
+                }
             }
         }
 
@@ -215,6 +217,15 @@ bool ReadCorrector::CorrectOneRead(Read & r,
 
 #           pragma omp atomic
             changed_nucleotides_ += corrected;
+            if (correct_stats_) {
+                std::string name = r.getName();
+                name += " BH:changed:" + std::to_string(corrected);
+                r.setName(name.data());
+            }
+        } else if (correct_stats_) {
+            std::string name = r.getName();
+            name += " BH:failed";
+            r.setName(name.data());
         }
 
         if (seq.size() != read_size) {
@@ -224,6 +235,10 @@ bool ReadCorrector::CorrectOneRead(Read & r,
 
         r.setSequence(newseq.data(), /* preserve_trimming */ true);
         return true;
+    } else if (solid_len == read_size && correct_stats_) {
+        std::string name = r.getName();
+        name += " BH:ok";
+        r.setName(name.data());
     }
 
     return solid_len == read_size;
diff --git a/src/hammer/read_corrector.hpp b/src/hammer/read_corrector.hpp
index 23e7e8e..fb44c1b 100644
--- a/src/hammer/read_corrector.hpp
+++ b/src/hammer/read_corrector.hpp
@@ -22,10 +22,13 @@ class ReadCorrector {
   size_t changed_nucleotides_;
   size_t uncorrected_nucleotides_;
   size_t total_nucleotides_;
+  bool   correct_stats_;
 
  public:
-  ReadCorrector(const KMerData& data)
-      : data_(data), changed_reads_(0), changed_nucleotides_(0), uncorrected_nucleotides_(0), total_nucleotides_(0) {}
+    ReadCorrector(const KMerData& data, bool correct_stats = false)
+            : data_(data),
+              changed_reads_(0), changed_nucleotides_(0), uncorrected_nucleotides_(0), total_nucleotides_(0),
+              correct_stats_(correct_stats) {}
 
   size_t changed_reads() const {
     return changed_reads_;
diff --git a/src/include/adt/concurrent_dsu.hpp b/src/include/adt/concurrent_dsu.hpp
index 4a13374..ad4cb38 100644
--- a/src/include/adt/concurrent_dsu.hpp
+++ b/src/include/adt/concurrent_dsu.hpp
@@ -19,151 +19,215 @@
 #include <algorithm>
 #include <vector>
 #include <unordered_map>
+#include <atomic>
+
+// Silence bogus gcc warnings
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wconversion"
 
 class ConcurrentDSU {
-  union atomic_set_t {
-    uint64_t raw;
-    struct {
-      uint64_t next  : 40;
-      uint32_t size  : 23;
-      uint32_t dirty : 1;
-    };
+  struct atomic_set_t {
+      uint64_t data  : 61;
+      uint64_t aux   : 2;
+      bool     root  : 1;
   } __attribute__ ((packed));
 
- public:
-  ConcurrentDSU(size_t size)
-     : size(size) {
-      if (size > ((1ULL << 40) - 1)) {
-          std::cerr << "Error, size greater than 2^40 -1";
-          exit(-1);
-      }
+  static_assert(sizeof(atomic_set_t) == 8, "Unexpected size of atomic_set_t");
 
-      data = new atomic_set_t[size];
-      for (size_t i = 0; i < size; i++) {
-          data[i].next = i & ((1ULL << 40) - 1);
-          data[i].size = 1;
-          data[i].dirty = 0;
-      }
-  }
+  public:
+    ConcurrentDSU(size_t size)
+        : data_(size) {
 
-  ~ConcurrentDSU() {
-    delete[] data;
+      for (size_t i = 0; i < size; i++)
+          data_[i] = { .data = 1, .aux = 0, .root = true };
   }
 
+  ~ConcurrentDSU() { }
+
   void unite(size_t x, size_t y) {
-    while (true) {
-      x = find_set(x);
-      y = find_set(y);
-      if (x == y)
-        return;
-
-      unsigned x_size = data[x].size;
-      unsigned y_size = data[y].size;
-      if (x_size > y_size || (x_size == y_size && x > y)) {
-        std::swap(x, y);
-        std::swap(x_size, y_size);
+      uint64_t x_size, y_size;
+      uint64_t x_aux, y_aux;
+
+      // Step one: update the links
+      while (true) {
+          x = find_set(x);
+          y = find_set(y);
+          if (x == y)
+              return;
+
+          atomic_set_t x_entry = data_[x], y_entry = data_[y];
+          // If someone already changed roots => retry
+          if (!x_entry.root || !y_entry.root)
+              continue;
+
+          // We need to link the smallest subtree to the largest
+          x_size = x_entry.data, y_size = y_entry.data;
+          x_aux = x_entry.aux, y_aux = y_entry.aux;
+          if (x_size > y_size || (x_size == y_size && x > y)) {
+              std::swap(x, y);
+              std::swap(x_size, y_size);
+              std::swap(x_aux, y_aux);
+              std::swap(x_entry, y_entry);
+          }
+
+          // Link 'x' to 'y'. If someone already changed 'x' => try again.
+          atomic_set_t new_x_entry =  { .data = y, .aux = x_aux, .root = false };
+          if (!data_[x].compare_exchange_strong(x_entry, new_x_entry))
+              continue;
+
+          break;
       }
 
-      if (lock(y)) {
-        if (update_root(x, x_size, y, x_size)) {
-          while (data[x].dirty) {
-            // SPIN LOCK
-          }
-          while (true) {
-            atomic_set_t old = data[y];
-            atomic_set_t nnew = old;
-            nnew.size = (uint32_t) (nnew.size + data[x].size) & ((1U << 23) - 1);
-            if (__sync_bool_compare_and_swap(&data[y].raw, old.raw, nnew.raw)) {
-              break;
-            }
-          }
-        }
-        unlock(y);
+      // Step two: update the size.  We already linked 'x' to 'y'. Therefore we
+      // need to add 'x_size' to whichever value is currently inside 'y'.
+      while (true) {
+          y = find_set(y);
+          atomic_set_t y_entry = data_[y];
+          // If someone already changed the roots => retry
+          if (!y_entry.root)
+              continue;
+
+          // Update the size. If someone already changed 'y' => try again.
+          atomic_set_t new_y_entry = { .data = x_size + y_entry.data, .aux = y_aux, .root = true };
+          if (!data_[y].compare_exchange_strong(y_entry, new_y_entry))
+              continue;
+
+          break;
       }
-    }
   }
 
   size_t set_size(size_t i) const {
-    size_t el = find_set(i);
-    return data[el].size;
+      while (true) {
+          size_t el = find_set(i);
+          atomic_set_t entry = data_[el];
+          if (!entry.root)
+              continue;
+
+          return entry.data;
+      }
   }
 
   size_t find_set(size_t x) const {
-    size_t r = x;
-
-    // The version with full path compression
-
-    // Find the root
-    while (r != data[r].next)
-        r = data[r].next;
-
-    // Update the stuff
-    unsigned r_size = data[r].size;
-    unsigned x_size = data[x].size;
-    while (x_size < r_size || (r_size == x_size && x < r)) {
-        size_t next = data[x].next;
-        atomic_set_t old = data[x];
-        atomic_set_t nnew = old;
-        nnew.next = r & ((1ULL << 40) - 1);
-        __sync_bool_compare_and_swap(&data[x].raw, old.raw, nnew.raw);
-
-        x = next;
-        x_size = data[x].size;
-    }
+      // Step one: find the root
+      size_t r = x;
+      atomic_set_t r_entry = data_[r];
+      while (!r_entry.root) {
+          r = r_entry.data;
+          r_entry = data_[r];
+      }
 
-    return r;
-
- #if 0
-    // The version with path halving
-    while (x != data[x].next) {
-      size_t next = data[x].next;
-      atomic_set_t old = data[x];
-      atomic_set_t nnew = old;
-      nnew.next = data[next].next;
-      __sync_bool_compare_and_swap(&data[x].raw, old.raw, nnew.raw);
-      x = data[next].next;
-    }
-    return x;
-#endif
+      // Step two: traverse the path from 'x' to root trying to update the links
+      // Note that the links might change, therefore we stop as soon as we'll
+      // end at 'some' root.
+      while (x != r) {
+          atomic_set_t x_entry = data_[x];
+          if (x_entry.root)
+              break;
+
+          // Try to update parent (may fail, it's ok)
+          atomic_set_t new_x_entry = { .data = r, .aux = x_entry.aux, .root = false };
+          data_[x].compare_exchange_weak(x_entry, new_x_entry);
+          x = x_entry.data;
+      }
+
+      return x;
+  }
+
+  bool same(size_t x, size_t y) const {
+      while (true) {
+          x = find_set(x);
+          y = find_set(y);
+          if (x == y)
+              return true;
+          if (data_[x].load().root)
+              return false;
+      }
   }
 
   size_t num_sets() const {
     size_t count = 0;
-    for (size_t i = 0; i < size; i++) {
-      if (data[i].next == i)
-        count++;
+    for (const auto& entry : data_) {
+        count += entry.load(std::memory_order_relaxed).root;
     }
+
     return count;
   }
 
+  bool is_root(size_t x) const {
+    return data_[x].load(std::memory_order_relaxed).root;
+  }
+
+  uint64_t aux(size_t x) const {
+    return data_[x].load(std::memory_order_relaxed).aux;
+  }
+
+  uint64_t root_aux(size_t x) const {
+      while (true) {
+          x = find_set(x);
+          atomic_set_t entry = data_[x];
+
+          if (!entry.root)
+              continue;
+
+          return entry.aux;
+      }
+  }
+
+  void set_aux(size_t x, uint64_t data) {
+      while (true) {
+        atomic_set_t x_entry = data_[x];
+        atomic_set_t new_x_entry = { .data = x_entry.data, .aux = data, .root = x_entry.root };
+        if (!data_[x].compare_exchange_strong(x_entry, new_x_entry))
+            continue;
+
+        break;
+      }
+  }
+
+  void set_root_aux(size_t x, uint64_t data) {
+      while (true) {
+          x = find_set(x);
+          atomic_set_t x_entry = data_[x];
+          if (!x_entry.root)
+              continue;
+
+          atomic_set_t new_x_entry = { .data = x_entry.data, .aux = data, .root = true };
+          if (!data_[x].compare_exchange_strong(x_entry, new_x_entry))
+              continue;
+
+          break;
+      }
+  }
+
   size_t extract_to_file(const std::string& Prefix) {
     // First, touch all the sets to make them directly connect to the root
 #   pragma omp parallel for
-    for (size_t x = 0; x < size; ++x)
+    for (size_t x = 0; x < data_.size(); ++x)
         (void) find_set(x);
 
     std::unordered_map<size_t, size_t> sizes;
 
 #if 0
     for (size_t x = 0; x < size; ++x) {
-        if (data[x].next != x) {
-            size_t t = data[x].next;
-            VERIFY(data[t].next == t)
+        if (data_[x].parent != x) {
+            size_t t = data_[x].parent;
+            VERIFY(data_[t].parent == t)
         }
     }
 #endif
 
     // Insert all the root elements into the map
-    for (size_t x = 0; x < size; ++x) {
-        if (data[x].next == x)
+    sizes.reserve(num_sets());
+    for (size_t x = 0; x < data_.size(); ++x) {
+        if (is_root(x))
             sizes[x] = 0;
     }
 
     // Now, calculate the counts. We can do this in parallel, because we know no
     // insertion can occur.
 #   pragma omp parallel for
-    for (size_t x = 0; x < size; ++x) {
-        size_t& entry = sizes[data[x].next];
+    for (size_t x = 0; x < data_.size(); ++x) {
+        size_t& entry = sizes[parent(x)];
 #       pragma omp atomic
         entry += 1;
     }
@@ -171,8 +235,8 @@ class ConcurrentDSU {
     // Now we know the sizes of each cluster. Go over again and calculate the
     // file-relative (cumulative) offsets.
     size_t off = 0;
-    for (size_t x = 0; x < size; ++x) {
-        if (data[x].next == x) {
+    for (size_t x = 0; x < data_.size(); ++x) {
+        if (is_root(x)) {
             size_t& entry = sizes[x];
             size_t noff = off + entry;
             entry = off;
@@ -182,8 +246,8 @@ class ConcurrentDSU {
 
     // Write down the entries
     std::vector<size_t> out(off);
-    for (size_t x = 0; x < size; ++x) {
-        size_t& entry = sizes[data[x].next];
+    for (size_t x = 0; x < data_.size(); ++x) {
+        size_t& entry = sizes[parent(x)];
         out[entry++] = x;
     }
     std::ofstream os(Prefix, std::ios::binary | std::ios::out);
@@ -194,8 +258,8 @@ class ConcurrentDSU {
     MMappedRecordWriter<size_t> index(Prefix + ".idx");
     index.reserve(sizes.size());
     size_t *idx = index.data();
-    for (size_t x = 0, i = 0, sz = 0; x < size; ++x) {
-        if (data[x].next == x) {
+    for (size_t x = 0, i = 0, sz = 0; x < data_.size(); ++x) {
+        if (is_root(x)) {
             idx[i++] = sizes[x] - sz;
             sz = sizes[x];
         }
@@ -205,8 +269,8 @@ class ConcurrentDSU {
   }
 
   void get_sets(std::vector<std::vector<size_t> > &otherWay) {
-    otherWay.resize(size);
-    for (size_t i = 0; i < size; i++) {
+    otherWay.resize(data_.size());
+    for (size_t i = 0; i < data_.size(); i++) {
       size_t set = find_set(i);
       otherWay[set].push_back(i);
     }
@@ -215,44 +279,18 @@ class ConcurrentDSU {
   }
 
 private:
-  bool lock(size_t y) {
-    while (true) {
-      atomic_set_t old = data[y];
-      if (old.next != y) {
-        return false;
-      }
-      old.dirty = 0;
-      atomic_set_t nnew = old;
-      nnew.dirty = 1;
-      if (__sync_bool_compare_and_swap(&data[y].raw, old.raw, nnew.raw)) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  void unlock(size_t y) {
-    data[y].dirty = 0;
+  size_t parent(size_t x) const {
+     atomic_set_t val = data_[x];
+     return (val.root ? x : val.data);
   }
 
   static bool zero_size(const std::vector<size_t> & v) {
     return v.size() == 0;
   }
 
-  bool update_root(size_t x, uint32_t oldrank,
-                   size_t y, uint32_t newrank) {
-    atomic_set_t old = data[x];
-    if (old.next != x || old.size != oldrank) {
-      return false;
-    }
-    atomic_set_t nnew = old;
-    nnew.next = y & ((1ULL << 40) - 1);
-    nnew.size = newrank & ((1U << 23) - 1);
-    return __sync_bool_compare_and_swap(&data[x].raw, old.raw, nnew.raw);
-  }
-
-  mutable atomic_set_t *data;
-  size_t size;
+  mutable std::vector<std::atomic<atomic_set_t> > data_;
 };
 
+#pragma GCC diagnostic pop
+
 #endif /* CONCURRENTDSU_HPP_ */
diff --git a/src/include/adt/flat_map.hpp b/src/include/adt/flat_map.hpp
new file mode 100644
index 0000000..a12e1a8
--- /dev/null
+++ b/src/include/adt/flat_map.hpp
@@ -0,0 +1,320 @@
+#ifndef __ADT_FLAT_MAP_HPP__
+#define __ADT_FLAT_MAP_HPP__
+
+#pragma once
+
+#include <vector>
+#include <algorithm>
+#include <functional>
+
+namespace adt {
+
+template<typename K, typename V, typename Comp = std::less<K>, typename Allocator = std::allocator<std::pair<K, V> > >
+struct flat_map {
+    typedef K key_type;
+    typedef V mapped_type;
+    typedef std::pair<K, V> value_type;
+    typedef Comp key_compare;
+    struct value_compare : std::binary_function<value_type, value_type, bool> {
+        bool operator()(const value_type & lhs, const value_type & rhs) const {
+            return key_compare()(lhs.first, rhs.first);
+        }
+    };
+    typedef Allocator allocator_type;
+    typedef V& reference;
+    typedef const V& const_reference;
+    typedef typename std::allocator_traits<allocator_type>::pointer pointer;
+    typedef typename std::allocator_traits<allocator_type>::const_pointer const_pointer;
+    typedef std::vector<value_type, allocator_type> container_type;
+    typedef typename container_type::iterator iterator;
+    typedef typename container_type::const_iterator const_iterator;
+    typedef typename container_type::reverse_iterator reverse_iterator;
+    typedef typename container_type::const_reverse_iterator const_reverse_iterator;
+    typedef typename container_type::difference_type difference_type;
+    typedef typename container_type::size_type size_type;
+
+    flat_map() = default;
+    template<typename It>
+    flat_map(It begin, It end) { insert(begin, end); }
+    flat_map(std::initializer_list<value_type> init)
+            : flat_map(init.begin(), init.end()) {}
+
+    iterator                begin()              {    return data_.begin();    }
+    iterator                end()                {    return data_.end();      }
+    const_iterator          begin()     const    {    return data_.begin();    }
+    const_iterator          end()       const    {    return data_.end();      }
+    const_iterator          cbegin()    const    {    return data_.cbegin();   }
+    const_iterator          cend()      const    {    return data_.cend();     }
+    reverse_iterator        rbegin()             {    return data_.rbegin();   }
+    reverse_iterator        rend()               {    return data_.rend();     }
+    const_reverse_iterator  rbegin()    const    {    return data_.rbegin();   }
+    const_reverse_iterator  rend()      const    {    return data_.rend();     }
+    const_reverse_iterator  crbegin()   const    {    return data_.crbegin();  }
+    const_reverse_iterator  crend()     const    {    return data_.crend();    }
+
+    bool empty() const { return data_.empty(); }
+    size_type size() const { return data_.size(); }
+    size_type max_size() const { return data_.max_size(); }
+    size_type capacity() const { return data_.capacity(); }
+    void reserve(size_type size) {data_.reserve(size); }
+    void shrink_to_fit() { data_.shrink_to_fit(); }
+    size_type bytes_used() const { return capacity() * sizeof(value_type) + sizeof(data_); }
+
+    mapped_type & operator[](const key_type &key) {
+        KeyOrValueCompare comp;
+        auto lower = lower_bound(key);
+        if (lower == end() || comp(key, *lower))
+            return data_.emplace(lower, key, mapped_type())->second;
+        else
+            return lower->second;
+    }
+    mapped_type & operator[](key_type &&key) {
+        KeyOrValueCompare comp;
+        auto lower = lower_bound(key);
+        if (lower == end() || comp(key, *lower))
+            return data_.emplace(lower, std::move(key), mapped_type())->second;
+        else
+            return lower->second;
+    }
+
+    std::pair<iterator, bool> insert(value_type &&value) {
+        return emplace(std::move(value));
+    }
+    std::pair<iterator, bool> insert(const value_type &value) {
+        return emplace(value);
+    }
+    iterator insert(const_iterator hint, value_type &&value) {
+        return emplace_hint(hint, std::move(value));
+    }
+    iterator insert(const_iterator hint, const value_type &value) {
+        return emplace_hint(hint, value);
+    }
+
+    template<typename It>
+    void insert(It begin, It end) {
+        // If we need to increase the capacity, utilize this fact and emplace
+        // the stuff.
+        for (; begin != end && size() == capacity(); ++begin) {
+            emplace(*begin);
+        }
+        if (begin == end)
+            return;
+        
+        // If we don't need to increase capacity, then we can use a more efficient
+        // insert method where everything is just put in the same vector
+        // and then merge in place.
+        size_type size_before = data_.size();
+        try {
+            for (size_t i = capacity(); i > size_before && begin != end; --i, ++begin) {
+                data_.emplace_back(*begin);
+            }
+        } catch(...) {
+            // If emplace_back throws an exception, the easiest way to make sure
+            // that our invariants are still in place is to resize to the state
+            // we were in before
+            for (size_t i = data_.size(); i > size_before; --i) {
+                data_.pop_back();
+            }
+            throw;
+        }
+
+        value_compare comp;
+        auto mid = data_.begin() + size_before;
+        std::stable_sort(mid, data_.end(), comp);
+        std::inplace_merge(data_.begin(), mid, data_.end(), comp);
+        data_.erase(std::unique(data_.begin(), data_.end(), std::not2(comp)), data_.end());
+
+        // Make sure that we inserted at least one element before
+        // recursing. Otherwise we'd recurse too often if we were to insert the
+        // same element many times
+        if (data_.size() == size_before) {
+            for (; begin != end; ++begin) {
+                if (emplace(*begin).second) {
+                    ++begin;
+                    break;
+                }
+            }
+        }
+
+        // Insert the remaining elements that didn't fit by calling this function recursively.
+        return insert(begin, end);
+    }
+    void insert(std::initializer_list<value_type> il) {
+        insert(il.begin(), il.end());
+    }
+    iterator erase(iterator it) {
+        return data_.erase(it);
+    }
+    iterator erase(const_iterator it) {
+        return erase(iterator_const_cast(it));
+    }
+    size_type erase(const key_type &key) {
+        auto found = find(key);
+        if (found == end())
+            return 0;
+        erase(found);
+        return 1;
+    }
+    iterator erase(const_iterator first, const_iterator last) {
+        return data_.erase(iterator_const_cast(first), iterator_const_cast(last));
+    }
+    void swap(flat_map & other) {
+        data_.swap(other.data);
+    }
+    void clear() {
+        data_.clear();
+    }
+    template<typename First, typename... Args>
+    std::pair<iterator, bool> emplace(First &&first, Args &&... args) {
+        KeyOrValueCompare comp;
+        auto lower_bound = std::lower_bound(data_.begin(), data_.end(), first, comp);
+        if (lower_bound == data_.end() || comp(first, *lower_bound))
+            return { data_.emplace(lower_bound, std::forward<First>(first), std::forward<Args>(args)...), true };
+        else
+            return { lower_bound, false };
+    }
+    std::pair<iterator, bool> emplace() {
+        return emplace(value_type());
+    }
+    template<typename First, typename... Args>
+    iterator emplace_hint(const_iterator hint, First &&first, Args &&... args) {
+        KeyOrValueCompare comp;
+        if (hint == cend() || comp(first, *hint)) {
+            if (hint == cbegin() || comp(*(hint - 1), first))
+                return data_.emplace(iterator_const_cast(hint), std::forward<First>(first), std::forward<Args>(args)...);
+            else
+                return emplace(std::forward<First>(first), std::forward<Args>(args)...).first;
+        } else if (!comp(*hint, first)) {
+            return begin() + (hint - cbegin());
+        } else {
+            return emplace(std::forward<First>(first), std::forward<Args>(args)...).first;
+        }
+    }
+    iterator emplace_hint(const_iterator hint) {
+        return emplace_hint(hint, value_type());
+    }
+
+    key_compare key_comp() const {
+        return key_compare();
+    }
+    value_compare value_comp() const {
+        return value_compare();
+    }
+
+    template<typename T>
+    iterator find(const T &key) {
+        return binary_find(begin(), end(), key, KeyOrValueCompare());
+    }
+    template<typename T>
+    const_iterator find(const T &key) const {
+        return binary_find(begin(), end(), key, KeyOrValueCompare());
+    }
+    template<typename T>
+    size_type count(const T &key) const {
+        return std::binary_search(begin(), end(), key, KeyOrValueCompare()) ? 1 : 0;
+    }
+    template<typename T>
+    iterator lower_bound(const T &key) {
+        return std::lower_bound(begin(), end(), key, KeyOrValueCompare());
+    }
+    template<typename T>
+    const_iterator lower_bound(const T & key) const {
+        return std::lower_bound(begin(), end(), key, KeyOrValueCompare());
+    }
+    template<typename T>
+    iterator upper_bound(const T & key) {
+        return std::upper_bound(begin(), end(), key, KeyOrValueCompare());
+    }
+    template<typename T>
+    const_iterator upper_bound(const T &key) const {
+        return std::upper_bound(begin(), end(), key, KeyOrValueCompare());
+    }
+    template<typename T>
+    std::pair<iterator, iterator> equal_range(const T &key) {
+        return std::equal_range(begin(), end(), key, KeyOrValueCompare());
+    }
+    template<typename T>
+    std::pair<const_iterator, const_iterator> equal_range(const T &key) const {
+        return std::equal_range(begin(), end(), key, KeyOrValueCompare());
+    }
+    allocator_type get_allocator() const {
+        return data_.get_allocator();
+    }
+
+    bool operator==(const flat_map &other) const {
+        return data_ == other.data_;
+    }
+    bool operator!=(const flat_map &other) const {
+        return !(*this == other);
+    }
+    bool operator<(const flat_map &other) const {
+        return data_ < other.data_;
+    }
+    bool operator>(const flat_map &other) const {
+        return other < *this;
+    }
+    bool operator<=(const flat_map &other) const {
+        return !(other < *this);
+    }
+    bool operator>=(const flat_map &other) const {
+        return !(*this < other);
+    }
+
+  private:
+    container_type data_;
+
+    iterator iterator_const_cast(const_iterator it) {
+        return begin() + (it - cbegin());
+    }
+
+    struct KeyOrValueCompare {
+        bool operator()(const key_type &lhs, const key_type &rhs) const {
+            return key_compare()(lhs, rhs);
+        }
+        bool operator()(const key_type &lhs, const value_type &rhs) const {
+            return key_compare()(lhs, rhs.first);
+        }
+        template<typename T>
+        bool operator()(const key_type &lhs, const T &rhs) const {
+            return key_compare()(lhs, rhs);
+        }
+        template<typename T>
+        bool operator()(const T &lhs, const key_type &rhs) const {
+            return key_compare()(lhs, rhs);
+        }
+        bool operator()(const value_type &lhs, const key_type &rhs) const {
+            return key_compare()(lhs.first, rhs);
+        }
+        bool operator()(const value_type &lhs, const value_type &rhs) const {
+            return key_compare()(lhs.first, rhs.first);
+        }
+        template<typename T>
+        bool operator()(const value_type &lhs, const T &rhs) const {
+            return key_compare()(lhs.first, rhs);
+        }
+        template<typename T>
+        bool operator()(const T &lhs, const value_type &rhs) const {
+            return key_compare()(lhs, rhs.first);
+        }
+    };
+
+    // like std::binary_search, but returns the iterator to the element
+    // if it was found, and returns end otherwise
+    template<typename It, typename T, typename Compare>
+    static It binary_find(It begin, It end, const T & value, const Compare & cmp) {
+        auto lower_bound = std::lower_bound(begin, end, value, cmp);
+        if (lower_bound == end || cmp(value, *lower_bound))
+            return end;
+        else
+            return lower_bound;
+    }
+};
+
+template<typename K, typename V, typename C, typename A>
+void swap(flat_map<K, V, C, A> & lhs, flat_map<K, V, C, A> & rhs) {
+    lhs.swap(rhs);
+}
+
+}
+
+#endif
diff --git a/src/include/adt/flat_set.hpp b/src/include/adt/flat_set.hpp
new file mode 100644
index 0000000..b4ee8e0
--- /dev/null
+++ b/src/include/adt/flat_set.hpp
@@ -0,0 +1,230 @@
+#ifndef __ADT_FLAT_SET_HPP__
+#define __ADT_FLAT_SET_HPP__
+
+#pragma once
+
+#include <vector>
+#include <algorithm>
+#include <type_traits>
+#include <functional>
+
+namespace adt {
+
+template<typename T, typename Comp = std::less<T>, template<typename, typename...> class Container = std::vector >
+struct flat_set {
+    typedef T key_type;
+    typedef T value_type;
+    typedef Comp key_compare;
+    typedef Comp value_compare;
+    typedef value_type& reference;
+    typedef const value_type& const_reference;
+    typedef Container<value_type> container_type;
+    typedef typename container_type::pointer pointer;
+    typedef typename container_type::const_pointer const_pointer;
+    typedef typename container_type::iterator iterator;
+    typedef typename container_type::const_iterator const_iterator;
+    typedef typename container_type::reverse_iterator reverse_iterator;
+    typedef typename container_type::const_reverse_iterator const_reverse_iterator;
+    typedef typename container_type::difference_type difference_type;
+    typedef typename container_type::size_type size_type;
+
+    flat_set() = default;
+    template<typename It>
+    flat_set(It begin, It end) {
+        insert(begin, end);
+    }
+    flat_set(std::initializer_list<value_type> init)
+            : flat_set(init.begin(), init.end()) { }
+
+    iterator                begin()         { return data_.begin();   }
+    iterator                end()           { return data_.end();     }
+    const_iterator          begin()   const { return data_.begin();   }
+    const_iterator          end()     const { return data_.end();     }
+    const_iterator          cbegin()  const { return data_.cbegin();  }
+    const_iterator          cend()    const { return data_.cend();    }
+    reverse_iterator        rbegin()        { return data_.rbegin();  }
+    reverse_iterator        rend()          { return data_.rend();    }
+    const_reverse_iterator  rbegin()  const { return data_.rbegin();  }
+    const_reverse_iterator  rend()    const { return data_.rend();    }
+    const_reverse_iterator  crbegin() const { return data_.crbegin(); }
+    const_reverse_iterator  crend()   const { return data_.crend();   }
+
+    bool empty() const { return data_.empty(); }
+    size_type size() const { return data_.size(); }
+    size_type max_size() const { return data_.max_size(); }
+    size_type capacity() const { return data_.capacity(); }
+    void reserve(size_type size) { data_.reserve(size); }
+    void shrink_to_fit() { data_.shrink_to_fit(); }
+    size_type bytes_used() const { return capacity() * sizeof(value_type) + sizeof(data_); }
+
+    std::pair<iterator, bool> insert(value_type && value) { return emplace(std::move(value)); }
+    std::pair<iterator, bool> insert(const value_type & value) { return emplace(value); }
+    iterator insert(const_iterator hint, value_type && value) { return emplace_hint(hint, std::move(value)); }
+    iterator insert(const_iterator hint, const value_type & value) { return emplace_hint(hint, value); }
+    void insert(std::initializer_list<value_type> il) { insert(il.begin(), il.end()); }
+
+    template<typename It>
+    void insert(It begin, It end) {
+        // If we need to increase the capacity, utilize this fact and emplace
+        // the stuff.
+        for (; begin != end && size() == capacity(); ++begin) {
+            emplace(*begin);
+        }
+        if (begin == end)
+            return;
+        // If we don't need to increase capacity, then we can use a more efficient
+        // insert method where everything is just put in the same vector
+        // and then merge in place.
+        size_type size_before = data_.size();
+        try {
+            for (size_t i = capacity(); i > size_before && begin != end; --i, ++begin) {
+                data_.emplace_back(*begin);
+            }
+        } catch(...) {
+            // If emplace_back throws an exception, the easiest way to make sure
+            // that our invariants are still in place is to resize to the state
+            // we were in before
+            for (size_t i = data_.size(); i > size_before; --i) {
+                data_.pop_back();
+            }
+            throw;
+        }
+        value_compare comp;
+        auto mid = data_.begin() + size_before;
+        std::stable_sort(mid, data_.end(), comp);
+        std::inplace_merge(data_.begin(), mid, data_.end(), comp);
+        data_.erase(std::unique(data_.begin(), data_.end(), std::not2(comp)), data_.end());
+        // Make sure that we inserted at least one element before
+        // recursing. Otherwise we'd recurse too often if we were to insert the
+        // same element many times
+        if (data_.size() == size_before) {
+            for (; begin != end; ++begin) {
+                if (emplace(*begin).second) {
+                    ++begin;
+                    break;
+                }
+            }
+        }
+
+        // insert the remaining elements that didn't fit by calling this function recursively
+        // this will recurse log(n) times where n is std::distance(begin, end)
+        return insert(begin, end);
+    }
+    iterator erase(iterator it) { return data_.erase(it); }
+    iterator erase(const_iterator it) { return erase(iterator_const_cast(it)); }
+    size_type erase(const value_type &val) {
+        auto found = find(val);
+        if (found == end()) return 0;
+        erase(found);
+        return 1;
+    }
+    iterator erase(const_iterator first, const_iterator last) {
+        return data_.erase(iterator_const_cast(first), iterator_const_cast(last));
+    }
+
+    void swap(flat_set & other) { data_.swap(other.data); }
+    void clear() { data_.clear(); }
+
+    template<typename First, typename... Args>
+    std::pair<iterator, bool> emplace(First && first, Args &&... args) {
+        Comp comp;
+        auto lower_bound = std::lower_bound(data_.begin(), data_.end(), first, comp);
+        if (lower_bound == data_.end() || comp(first, *lower_bound))
+            return { data_.emplace(lower_bound, std::forward<First>(first), std::forward<Args>(args)...), true };
+        else
+            return { lower_bound, false };
+    }
+    std::pair<iterator, bool> emplace() { return emplace(value_type()); }
+    template<typename First, typename... Args>
+    iterator emplace_hint(const_iterator hint, First && first, Args &&... args) {
+        Comp comp;
+        if (hint == cend() || comp(first, *hint)) {
+            if (hint == cbegin() || comp(*(hint - 1), first))
+                return data_.emplace(iterator_const_cast(hint), std::forward<First>(first), std::forward<Args>(args)...);
+            else
+                return emplace(std::forward<First>(first), std::forward<Args>(args)...).first;
+        } else if (!comp(*hint, first)) {
+            return begin() + (hint - cbegin());
+        }
+
+        return emplace(std::forward<First>(first), std::forward<Args>(args)...).first;
+    }
+    iterator emplace_hint(const_iterator hint) { return emplace_hint(hint, value_type()); }
+
+    key_compare key_comp() const { return key_compare(); }
+    value_compare value_comp() const { return value_compare(); }
+
+    iterator find(const value_type &key) {
+        return binary_find(begin(), end(), key, Comp());
+    }
+    const_iterator find(const value_type &key) const {
+        return binary_find(begin(), end(), key, Comp());
+    }
+    size_type count(const value_type &key) const {
+        return std::binary_search(begin(), end(), key, Comp()) ? 1 : 0;
+    }
+    iterator lower_bound(const value_type &key) {
+        return std::lower_bound(begin(), end(), key, Comp());
+    }
+    const_iterator lower_bound(const value_type &key) const {
+        return std::lower_bound(begin(), end(), key, Comp());
+    }
+    iterator upper_bound(const value_type &key) {
+        return std::upper_bound(begin(), end(), key, Comp());
+    }
+    const_iterator upper_bound(const value_type &key) const {
+        return std::upper_bound(begin(), end(), key, Comp());
+    }
+    std::pair<iterator, iterator> equal_range(const value_type &key) {
+        return std::equal_range(begin(), end(), key, Comp());
+    }
+    std::pair<const_iterator, const_iterator> equal_range(const value_type &key) const {
+        return std::equal_range(begin(), end(), key, Comp());
+    }
+
+    bool operator==(const flat_set &other) const {
+        return data_ == other.data_;
+    }
+    bool operator!=(const flat_set &other) const {
+        return !(*this == other);
+    }
+    bool operator<(const flat_set &other) const {
+        return data_ < other.data_;
+    }
+    bool operator>(const flat_set &other) const {
+        return other < *this;
+    }
+    bool operator<=(const flat_set &other) const {
+        return !(other < *this);
+    }
+    bool operator>=(const flat_set &other) const {
+        return !(*this < other);
+    }
+
+  private:
+    container_type data_;
+
+    iterator iterator_const_cast(const_iterator it) {
+        return begin() + (it - cbegin());
+    }
+
+    // like std::binary_search, but returns the iterator to the element
+    // if it was found, and returns end otherwise
+    template<typename It, typename Compare>
+    static It binary_find(It begin, It end, const value_type &value, const Compare &cmp) {
+        auto lower_bound = std::lower_bound(begin, end, value, cmp);
+        if (lower_bound == end || cmp(value, *lower_bound))
+            return end;
+        else
+            return lower_bound;
+    }
+};
+
+template<typename V, typename C, template<typename, typename...> class Container>
+void swap(flat_set<V, C, Container> & lhs, flat_set<V, C, Container> & rhs) {
+    lhs.swap(rhs);
+}
+
+}
+
+#endif // __ADT_FLAT_SET_HPP__
diff --git a/src/include/adt/function_traits.hpp b/src/include/adt/function_traits.hpp
new file mode 100644
index 0000000..ebb946e
--- /dev/null
+++ b/src/include/adt/function_traits.hpp
@@ -0,0 +1,70 @@
+#ifndef __ADT_FUNCTION_TRAITS__
+#define __ADT_FUNCTION_TRAITS__
+
+#pragma once
+
+#include <functional>
+
+namespace adt {
+
+template<class F>
+struct function_traits;
+
+// function pointer
+template<class R, class... Args>
+struct function_traits<R(*)(Args...)> : public function_traits<R(Args...)> {};
+
+// member function pointer
+template<class C, class R, class... Args>
+struct function_traits<R(C::*)(Args...)> : public function_traits<R(C&, Args...)> {};
+
+// const member function pointer
+template<class C, class R, class... Args>
+struct function_traits<R(C::*)(Args...) const> : public function_traits<R(C&, Args...)> {};
+
+// member object pointer
+template<class C, class R>
+struct function_traits<R(C::*)> : public function_traits<R(C&)> {};
+
+template<class R, class... Args>
+struct function_traits<R(Args...)> {
+  using return_type = R;
+
+  static constexpr std::size_t arity = sizeof...(Args);
+
+  template <std::size_t N>
+  struct arg {
+    static_assert(N < arity, "invalid argument index");
+    using type = typename std::tuple_element<N, std::tuple<Args...>>::type;
+  };
+};
+
+template<class F>
+struct function_traits<F&> : public function_traits<F> {};
+
+template<class F>
+struct function_traits<F&&> : public function_traits<F> {};
+
+// functors & default implementation
+template<class F>
+struct function_traits {
+ private:
+  using call_type = function_traits<decltype(&F::operator())>;
+
+ public:
+  using return_type = typename call_type::return_type;
+
+  // Remeber to get rid of this argument
+  static constexpr std::size_t arity = call_type::arity - 1;
+
+  template <std::size_t N>
+  struct arg {
+    static_assert(N < arity, "invalid argument index");
+    // Remeber to get rid of this argument
+    using type = typename call_type::template arg<N+1>::type;
+  };
+};
+
+} // namespace adt
+
+#endif // __ADT_FUNCTION_TRAITS__
diff --git a/src/include/adt/iterator_range.hpp b/src/include/adt/iterator_range.hpp
index 8e31d90..e76c61b 100644
--- a/src/include/adt/iterator_range.hpp
+++ b/src/include/adt/iterator_range.hpp
@@ -8,12 +8,19 @@
 #define __ITERATOR_RANGE_H__
 
 #include <utility>
+#include <iterator>
+
+namespace adt {
 
 template <typename IteratorT>
 class iterator_range {
   IteratorT begin_iterator, end_iterator;
 
 public:
+  template <typename Container>
+  iterator_range(Container &&c)
+  //TODO: Consider ADL/non-member begin/end calls.
+      : begin_iterator(c.begin()), end_iterator(c.end()) {}
   iterator_range(IteratorT begin_iterator, IteratorT end_iterator)
       : begin_iterator(std::move(begin_iterator)),
         end_iterator(std::move(end_iterator)) {}
@@ -30,4 +37,10 @@ template <typename T> iterator_range<T> make_range(std::pair<T, T> p) {
   return iterator_range<T>(std::move(p.first), std::move(p.second));
 }
 
+template<typename T>
+iterator_range<decltype(begin(std::declval<T>()))> drop_begin(T &&t, int n) {
+  return make_range(std::next(begin(t), n), end(t));
+}
+}
+
 #endif
diff --git a/src/include/adt/queue_iterator.hpp b/src/include/adt/queue_iterator.hpp
index f3aac60..a125b65 100644
--- a/src/include/adt/queue_iterator.hpp
+++ b/src/include/adt/queue_iterator.hpp
@@ -49,6 +49,10 @@ public:
 		return res;
 	}
 
+	void clear() {
+	    storage_.clear();
+	}
+
 	bool empty() const {
 		return storage_.empty();
 	}
@@ -94,10 +98,20 @@ public:
 		queue_.erase(to_remove);
 	}
 
+	void clear() {
+	    queue_.clear();
+	    current_actual_ = false;
+	    current_deleted_ = false;
+	}
+
 	bool IsEnd() const {
 		return queue_.empty();
 	}
 
+	size_t size() const {
+		return queue_.size();
+	}
+
 	const T& operator*() {
 		VERIFY(!queue_.empty());
 		if(!current_actual_ || current_deleted_) {
diff --git a/src/include/adt/small_pod_vector.hpp b/src/include/adt/small_pod_vector.hpp
new file mode 100644
index 0000000..d261174
--- /dev/null
+++ b/src/include/adt/small_pod_vector.hpp
@@ -0,0 +1,379 @@
+#ifndef __ADT_SMALL_POD_VECTOR__
+#define __ADT_SMALL_POD_VECTOR__
+
+#pragma once
+
+#include <llvm/PointerIntPair.h>
+
+#include <vector>
+#include <type_traits>
+
+namespace adt {
+
+#define LIKELY(EXPR) __builtin_expect((bool)(EXPR), true)
+#define UNLIKELY(EXPR) __builtin_expect((bool)(EXPR), false)
+
+template<class T>
+class SmallPODVector {
+  template <typename PT1, typename PT2> class PointerUnionTraits {
+  public:
+    static inline void *getAsVoidPointer(void *P) { return P; }
+    static inline void *getFromVoidPointer(void *P) { return P; }
+    enum {
+      PT1BitsAv = (int)(llvm::PointerLikeTypeTraits<PT1>::NumLowBitsAvailable),
+      PT2BitsAv = (int)(llvm::PointerLikeTypeTraits<PT2>::NumLowBitsAvailable),
+      NumLowBitsAvailable = PT1BitsAv < PT2BitsAv ? PT1BitsAv : PT2BitsAv
+    };
+  };
+
+  static const unsigned SmallSizeIntBits = 3;
+  static const unsigned MaxSmall = (1 << SmallSizeIntBits) - 1;
+
+  typedef typename std::vector<T> vector_type;
+
+  typedef llvm::PointerIntPair<void *, SmallSizeIntBits, size_t,
+                               PointerUnionTraits<T*, vector_type*> > container_type;
+
+  typedef SmallPODVector<T> self;
+  container_type data_;
+
+public:
+  typedef size_t size_type;
+  typedef ptrdiff_t difference_type;
+  typedef T value_type;
+  typedef T* iterator;
+  typedef const T* const_iterator;
+
+  typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+  typedef std::reverse_iterator<iterator> reverse_iterator;
+
+  typedef T& reference;
+  typedef const T& const_reference;
+  typedef T* pointer;
+  typedef const T* const_pointer;
+
+// workaround missing "is_trivially_copyable" in g++ < 5.0
+#if __GNUG__ && __GNUC__ < 5
+#define IS_TRIVIALLY_COPYABLE(T) __has_trivial_copy(T)
+#else
+#define IS_TRIVIALLY_COPYABLE(T) std::is_trivially_copyable<T>::value
+#endif
+
+  static_assert(IS_TRIVIALLY_COPYABLE(value_type), "Value type for SmallPODVector should be trivially copyable");
+
+#undef IS_TRIVIALLY_COPYABLE
+
+private:
+  vector_type* vector() const {
+    return (data_.getInt() == 0 ? static_cast<vector_type*>(data_.getPointer()) : nullptr);
+  }
+
+  void impl_resize(size_type N) {
+    void *data = data_.getPointer(), *new_data = data;
+    size_t sz = data_.getInt(), new_sz = N;
+
+    if (UNLIKELY(sz == 0 && data != nullptr)) { // vector case
+      vector_type *v = static_cast<vector_type*>(data);
+      if (N > MaxSmall) {
+        v->resize(N);
+        new_data = v;
+        new_sz = 0;
+      } else { // We have to turn vector into array
+        if (N) {
+          new_data = malloc(N * sizeof(T));
+          new_sz = N;
+          memcpy(new_data, v->data(), N * sizeof(T));
+        } else {
+          new_data = nullptr;
+          new_sz = 0;
+        }
+        delete v;
+      }
+    } else if (UNLIKELY(N > MaxSmall)) {
+      // Ok, we have to grow too much - allocate new vector
+      vector_type *new_vector = new vector_type((T*)data, (T*)data + sz);
+      new_vector->resize(N);
+      if (data)
+        free(data);
+      new_data = new_vector;
+      new_sz = 0;
+    } else {
+      // Otherwise, simply change the size of the allocated space
+      if (N) {
+        new_data = realloc(data, N * sizeof(T));
+        new_sz = N;
+      } else {
+        free(data);
+        new_data = nullptr;
+        new_sz = 0;
+      }
+    }
+
+    data_.setPointer(new_data);
+    data_.setInt(new_sz);
+  }
+
+public:
+  SmallPODVector<T>() = default;
+  SmallPODVector<T>(size_type size, const T &value = T()) {
+    this->assign(size, value);
+  }
+
+  SmallPODVector<T>(const self &that) {
+    assign(that.begin(), that.end());
+  }
+
+  const self& operator=(const self& that) {
+    // Avoid self-assignment.
+    if (this == &that) return *this;
+    assign(that.begin(), that.end());
+    return *this;
+  }
+
+  SmallPODVector<T>(self &&that) {
+    data_ = that.data_;
+    that.data_.setPointer(nullptr);
+    that.data_.setInt(0);
+  }
+
+  const self& operator=(const self&& that) {
+    // Avoid self-assignment.
+    if (this == &that) return *this;
+
+    this->impl_resize(0);
+    data_ = that.data_;
+    that.data_.setPointer(nullptr);
+    that.data_.setInt(0);
+
+    return *this;
+  }
+
+  ~SmallPODVector<T>() {
+    this->impl_resize(0);
+  }
+
+  __attribute__((always_inline))
+  bool empty() const {
+    return data_.getInt() == 0 && data_.getPointer() == nullptr;
+  }
+
+  __attribute__((always_inline))
+  size_type size() const {
+    const auto v = vector();
+    if (UNLIKELY(v != nullptr))
+      return v->size();
+
+    return data_.getInt();
+  }
+
+  __attribute__((always_inline))
+  pointer data() {
+    const auto v = vector();
+    if (UNLIKELY(v != nullptr))
+      return v->data();
+
+    return pointer(data_.getPointer());
+  }
+
+  __attribute__((always_inline))
+  const_pointer cdata() const {
+    const auto v = vector();
+    if (UNLIKELY(v != nullptr))
+      return v->data();
+
+    return const_pointer(data_.getPointer());
+  }
+
+  size_type max_size() const { return size_type(-1) / sizeof(T); }
+  size_t capacity() const {
+    const auto v = vector();
+    if (UNLIKELY(v != nullptr))
+      return v->capacity();
+
+    return data_.getInt();
+  }
+
+  // forward iterator creation methods.
+  __attribute__((always_inline))
+  iterator begin() {
+    return (iterator)(data());
+  }
+  __attribute__((always_inline))
+  const_iterator begin() const {
+    return (const_iterator)(cdata());
+  }
+  __attribute__((always_inline))
+  const_iterator cbegin() const {
+    return (const_iterator)(cdata());
+  }
+  __attribute__((always_inline))
+  iterator end() {
+    return (iterator)(data() + size());
+  }
+  __attribute__((always_inline))
+  const_iterator end() const {
+    return (const_iterator)(cdata() + size());
+  }
+  __attribute__((always_inline))
+  const_iterator cend() const {
+    return (const_iterator)(cdata() + size());
+  }
+
+  // reverse iterator creation methods.
+  reverse_iterator rbegin()            { return reverse_iterator(end()); }
+  const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
+  reverse_iterator rend()              { return reverse_iterator(begin()); }
+  const_reverse_iterator rend() const  { return const_reverse_iterator(begin());}
+
+  __attribute__((always_inline))
+  reference operator[](size_type idx) {
+    assert(idx < size());
+    return begin()[idx];
+  }
+  __attribute__((always_inline))
+  const_reference operator[](size_type idx) const {
+    assert(idx < size());
+    return begin()[idx];
+  }
+
+  reference front() {
+    assert(!empty());
+    return begin()[0];
+  }
+  const_reference front() const {
+    assert(!empty());
+    return begin()[0];
+  }
+
+  reference back() {
+    assert(!empty());
+    return end()[-1];
+  }
+  const_reference back() const {
+    assert(!empty());
+    return end()[-1];
+  }
+
+  void push_back(const T &value) {
+    const auto v = vector();
+    if (UNLIKELY(v != nullptr)) {
+      v->push_back(value);
+      return;
+    }
+
+    this->impl_resize(this->size() + 1);
+    memcpy(this->end() - 1, &value, sizeof(T));
+  }
+
+  void pop_back() {
+    // This will reallocate to array, if necessary.
+    this->impl_resize(this->size() - 1);
+  }
+
+  T pop_back_val() {
+    T res = ::std::move(this->back());
+    this->pop_back();
+    return res;
+  }
+
+  void clear() {
+    this->impl_resize(0);
+  }
+
+  void resize(size_type count) {
+    this->impl_resize(count);
+    std::uninitialized_fill(this->begin() + count, this->end(), T());
+  }
+
+  void resize(size_type count, const T &value) {
+    this->impl_resize(count);
+    std::uninitialized_fill(this->begin() + count, this->end(), value);
+  }
+
+  void reserve(size_type count) {
+    if (auto v = vector()) {
+      v->reserve(count);
+    }
+  }
+
+  void assign(size_type count, const T &value) {
+    this->impl_resize(count);
+    std::uninitialized_fill(this->begin(), this->end(), value);
+  }
+
+  template<class InputIt>
+  void assign(InputIt first, InputIt last) {
+    this->impl_resize(last - first);
+    std::uninitialized_copy(first, last, this->begin());
+  }
+
+  iterator erase(const_iterator pos) {
+    size_type idx = pos - this->begin();
+    std::copy(iterator(pos + 1), this->end(), iterator(pos));
+    this->impl_resize(this->size() - 1); // This might invalidate iterators
+
+    return this->begin() + idx;
+  }
+
+  iterator erase(const_iterator first, const_iterator last) {
+    difference_type idx = first - this->begin();
+    std::copy(iterator(last), this->end(), iterator(first));
+    this->impl_resize(this->size() - (last - first)); // This might invalidate iterators
+
+    return this->begin() + idx;
+  }
+
+  iterator insert(const_iterator pos, const T &value) {
+    if (pos == this->end()) {
+      this->push_back(value);
+      return this->end() - 1;
+    }
+
+    difference_type idx = pos - this->begin();
+    size_type sz = this->size();
+
+    this->impl_resize(sz + 1); // This might invalidate iterators
+
+    iterator it = this->begin() + idx;
+    std::copy_backward(it, this->end() - 1, this->end());
+
+    // If we just moved the element we're inserting, be sure to update the
+    // reference.
+    const T *vptr = &value;
+    if (it <= vptr && vptr < this->end())
+      ++vptr;
+
+    *it = *vptr;
+
+    return it;
+  }
+
+  template <typename... ArgTypes> void emplace_back(ArgTypes &&... args) {
+    value_type tmp(std::forward<ArgTypes>(args)...);
+    push_back(std::move(tmp));
+  }
+
+  template <typename... ArgTypes> iterator emplace(const_iterator pos, ArgTypes &&... args) {
+    value_type tmp(std::forward<ArgTypes>(args)...);
+    return insert(pos, std::move(tmp));
+  }
+
+  bool operator==(const self &rhs) const {
+    if (this->size() != rhs.size()) return false;
+    return std::equal(this->begin(), this->end(), rhs.begin());
+  }
+  bool operator!=(const self &rhs) const {
+    return !(*this == rhs);
+  }
+  bool operator<(const self &rhs) const {
+    return std::lexicographical_compare(this->begin(), this->end(),
+                                        rhs.begin(), rhs.end());
+  }
+};
+
+#undef LIKELY
+#undef UNLIKELY
+
+}
+
+#endif // __ADT_SMALL_POD_VECTOR__
diff --git a/src/include/de/conj_iterator.hpp b/src/include/de/conj_iterator.hpp
new file mode 100644
index 0000000..50c49a5
--- /dev/null
+++ b/src/include/de/conj_iterator.hpp
@@ -0,0 +1,140 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#pragma once
+
+#include <boost/iterator/iterator_facade.hpp>
+
+namespace omnigraph {
+
+namespace de {
+
+/**
+ * @brief Proxy for containers which are essentially splitted into two: the straight and the conjugate one
+ *        (any of which can be empty).
+ * @param C the underlying container type
+ */
+template<typename C>
+class ConjProxy {
+public:
+    typedef C Container;
+
+    /**
+     * @brief Iterator for this splitted container.
+     *        It automatically switches onto the conjugate half when finished the straight.
+     */
+    class Iterator :
+            public boost::iterator_facade<Iterator, typename Container::const_reference, boost::bidirectional_traversal_tag> {
+    public:
+        typedef typename Container::const_iterator InnerIterator;
+
+        Iterator(InnerIterator start_iter, InnerIterator stop_iter, InnerIterator jump_iter, bool conj)
+                : iter_(start_iter), stop_iter_(stop_iter), jump_iter_(jump_iter), conj_(conj) { }
+
+    private:
+        friend class boost::iterator_core_access;
+
+        /**
+         * @brief Increments the iterator.
+         * @detail The underlying iterator is incremented; when it reaches the `stop` position,
+         *         it jumps to the `jump` position which is on the conjugate half.
+         */
+        void increment() {
+            ++iter_;
+            if (!conj_ && iter_ == stop_iter_) {
+                conj_ = true;
+                iter_ = jump_iter_;
+            }
+        }
+
+        void decrement() {
+            if (conj_ && iter_ == jump_iter_) {
+                conj_ = false;
+                iter_ = stop_iter_;
+            }
+            --iter_;
+        }
+
+        bool equal(const Iterator &other) const {
+            return conj_ == other.conj_ && iter_ == other.iter_;
+        }
+
+        typename C::const_reference dereference() const {
+            return *iter_;
+        }
+
+    public:
+        /**
+         * @brief Returns the container const_iterator to the current element.
+         */
+        InnerIterator Iter() const {
+            return iter_;
+        }
+
+        /**
+         * @brief Returns if the iterator is on the conjugate half.
+         */
+        bool Conj() const {
+            return conj_;
+        }
+
+    private:
+        InnerIterator iter_, //the current position
+                stop_iter_,  //when to stop and jump (typically `end` of the straight half)
+                jump_iter_;  //where to jump (typically `begin` of the conjugate half)
+        bool conj_;
+    };
+
+    ConjProxy(const Container &cont, const Container &conj_cont) :
+            cont_(cont),
+            conj_cont_(conj_cont) { }
+
+    /**
+     * @brief Iteration always starts from the beginning of the leftmost non-empty half.
+     *        If there is no such one, it essentially equals to `end`.
+     */
+    Iterator begin() const {
+        auto conj = cont_.empty();
+        auto start = conj ? conj_cont_.begin() : cont_.begin();
+        return Iterator(start, cont_.end(), conj_cont_.begin(), conj);
+    }
+
+    /**
+     * @brief Raw iterator should end right after the jumping, i.e. on the beginning
+     *        of the conjugate half.
+     */
+    Iterator conj_begin() const {
+        return Iterator(conj_cont_.begin(), cont_.end(), conj_cont_.begin(), true);
+    }
+
+    /**
+     * @brief Full iterator ends on the end of the conjugate half.
+     */
+    Iterator end() const {
+        return Iterator(conj_cont_.end(), cont_.end(), conj_cont_.begin(), true);
+    }
+
+    /**
+     * @brief Returns the total size of both halves.
+     */
+    size_t size() const {
+        return cont_.size() + conj_cont_.size();
+    }
+
+    /**
+     * @brief Returns if both halves are empty.
+     */
+    bool empty() const {
+        return cont_.empty() && conj_cont_.empty();
+    }
+
+private:
+    const Container &cont_, &conj_cont_;
+};
+
+}
+
+}
diff --git a/src/include/de/data_divider.hpp b/src/include/de/data_divider.hpp
index 907d13d..bd33b93 100644
--- a/src/include/de/data_divider.hpp
+++ b/src/include/de/data_divider.hpp
@@ -23,7 +23,7 @@
 #include <utility>
 #include <cstdlib>
 #include <cstdio>
-#include "paired_info.hpp"
+#include "index_point.hpp"
 #include "omni/omni_utils.hpp"
 
 namespace omnigraph {
diff --git a/src/include/de/distance_estimation.hpp b/src/include/de/distance_estimation.hpp
index 87fc014..00b7190 100644
--- a/src/include/de/distance_estimation.hpp
+++ b/src/include/de/distance_estimation.hpp
@@ -59,14 +59,15 @@ class GraphDistanceFinder {
 
     DistancesLengthsCallback<Graph> callback(graph_);
 
-    PathProcessor<Graph> paths_proc(graph_, path_lower_bounds, path_upper_bound,
-                                    graph_.EdgeEnd(e1), end_points, callback);
+    PathProcessor<Graph> paths_proc(graph_, graph_.EdgeEnd(e1), path_upper_bound);
 
-    paths_proc.Process();
+    for (size_t i = 0; i < end_points.size(); ++i) {
+        //FIXME should max dist also depend on the point?
+        paths_proc.Process(end_points[i], path_lower_bounds[i], path_upper_bound, callback);
+    }
 
     vector<GraphLengths> result;
 
-
     size_t i = 0;
     for (auto& entry : second_edges) {
       GraphLengths lengths = callback.distances(i++);
@@ -98,7 +99,7 @@ class AbstractDistanceEstimator {
  protected:
   typedef UnclusteredPairedInfoIndexT<Graph> InPairedIndex;
   typedef PairedInfoIndexT<Graph> OutPairedIndex;
-  typedef typename InPairedIndex::Histogram InHistogram;
+  typedef typename InPairedIndex::FullHistProxy InHistogram;
   typedef typename OutPairedIndex::Histogram OutHistogram;
 
  public:
@@ -147,9 +148,7 @@ class AbstractDistanceEstimator {
   }
 
   void AddToResult(const OutHistogram& clustered, EdgePair ep, PairedInfoBuffer<Graph>& result) const {
-    for (auto it = clustered.begin(); it != clustered.end(); ++it) {
-      result.AddPairInfo(ep, *it);
-    }
+      result.AddMany(ep.first, ep.second, clustered);
   }
 
 private:
@@ -170,10 +169,10 @@ class DistanceEstimator: public AbstractDistanceEstimator<Graph> {
   typedef pair<EdgeId, EdgeId> EdgePair;
 
  protected:
-  typedef typename AbstractDistanceEstimator<Graph>::InPairedIndex InPairedIndex;
-  typedef typename AbstractDistanceEstimator<Graph>::OutPairedIndex OutPairedIndex;
-  typedef typename InPairedIndex::Histogram InHistogram;
-  typedef typename OutPairedIndex::Histogram OutHistogram;
+  typedef typename base::InPairedIndex InPairedIndex;
+  typedef typename base::OutPairedIndex OutPairedIndex;
+  typedef typename base::InHistogram InHistogram;
+  typedef typename base::OutHistogram OutHistogram;
 
  public:
   DistanceEstimator(const Graph& graph,
@@ -198,17 +197,15 @@ class DistanceEstimator: public AbstractDistanceEstimator<Graph> {
       edges.push_back(*it);
 
     DEBUG("Processing");
-    std::vector<PairedInfoBuffer<Graph> > buffer(nthreads);
+    PairedInfoBuffersT<Graph> buffer(this->graph(), nthreads);
 #   pragma omp parallel for num_threads(nthreads) schedule(guided, 10)
     for (size_t i = 0; i < edges.size(); ++i) {
       EdgeId edge = edges[i];
-      const auto& inner_map = index.GetEdgeInfo(edge, 0);
-      ProcessEdge(edge, inner_map, buffer[omp_get_thread_num()]);
+      ProcessEdge(edge, index, buffer[omp_get_thread_num()]);
     }
 
-    INFO("Merging maps");
     for (size_t i = 0; i < nthreads; ++i) {
-      result.AddAll(buffer[i]);
+      result.Merge(buffer[i]);
       buffer[i].Clear();
     }
   }
@@ -216,19 +213,6 @@ class DistanceEstimator: public AbstractDistanceEstimator<Graph> {
  protected:
   const size_t max_distance_;
 
-  OutHistogram ConjugateInfos(EdgePair ep, const OutHistogram& histogram) const {
-    OutHistogram answer;
-    const Graph& g = this->graph();
-    for (auto point : histogram)
-      answer.insert(ConjugatePoint(g.length(ep.first), g.length(ep.second), point));
-
-    return answer;
-  }
-
-  EdgePair ConjugatePair(EdgePair ep) const {
-    return std::make_pair(this->graph().conjugate(ep.second), this->graph().conjugate(ep.first));
-  }
-
   virtual EstimHist EstimateEdgePairDistances(EdgePair ep,
                                               const InHistogram& histogram,
                                               const GraphLengths& raw_forward) const {
@@ -236,7 +220,7 @@ class DistanceEstimator: public AbstractDistanceEstimator<Graph> {
     using namespace math;
     EdgeId e1 = ep.first, e2 = ep.second;
     size_t first_len  = this->graph().length(e1), second_len = this->graph().length(e2);
-    int maxD = rounded_d(*histogram.rbegin()), minD = rounded_d(*histogram.begin());
+    int minD = rounded_d(histogram.min()), maxD = rounded_d(histogram.max());
 
     TRACE("Bounds are " << minD << " " << maxD);
     EstimHist result;
@@ -287,28 +271,28 @@ class DistanceEstimator: public AbstractDistanceEstimator<Graph> {
 
  private:
   virtual void ProcessEdge(EdgeId e1,
-                           const typename InPairedIndex::InnerMap& inner_map,
+                           const InPairedIndex& pi,
                            PairedInfoBuffer<Graph>& result) const {
     typename base::LengthMap second_edges;
-    for (auto I = inner_map.begin(), E = inner_map.end(); I != E; ++I)
-      second_edges[I->first];
+    auto inner_map = pi.RawGet(e1);
+    for (auto i : inner_map)
+        second_edges[i.first];
 
     this->FillGraphDistancesLengths(e1, second_edges);
 
     for (const auto& entry: second_edges) {
       EdgeId e2 = entry.first;
       EdgePair ep(e1, e2);
-      if (ep > ConjugatePair(ep))
-          continue;
+
+      VERIFY(ep <= pi.ConjugatePair(ep));
 
       const GraphLengths& forward = entry.second;
       TRACE("Edge pair is " << this->graph().int_id(ep.first)
             << " " << this->graph().int_id(ep.second));
-      const InHistogram& hist = inner_map.find(e2)->second;
+      auto hist = pi.Get(e1, e2);
       const EstimHist& estimated = this->EstimateEdgePairDistances(ep, hist, forward);
       OutHistogram res = this->ClusterResult(ep, estimated);
       this->AddToResult(res, ep, result);
-      this->AddToResult(ConjugateInfos(ep, res), ConjugatePair(ep), result);
     }
   }
 
diff --git a/src/include/de/extensive_distance_estimation.hpp b/src/include/de/extensive_distance_estimation.hpp
index 0a786cb..7c6573e 100644
--- a/src/include/de/extensive_distance_estimation.hpp
+++ b/src/include/de/extensive_distance_estimation.hpp
@@ -28,8 +28,10 @@ class ExtensiveDistanceEstimator: public WeightedDistanceEstimator<Graph> {
   typedef WeightedDistanceEstimator<Graph> base;
   typedef typename base::InPairedIndex InPairedIndex;
   typedef typename base::OutPairedIndex OutPairedIndex;
-  typedef typename InPairedIndex::Histogram InHistogram;
-  typedef typename OutPairedIndex::Histogram OutHistogram;
+  typedef typename base::InHistogram InHistogram;
+  typedef typename base::OutHistogram OutHistogram;
+
+  typedef typename InPairedIndex::Histogram TempHistogram;
 
  public:
   ExtensiveDistanceEstimator(const Graph &graph,
@@ -47,11 +49,11 @@ class ExtensiveDistanceEstimator: public WeightedDistanceEstimator<Graph> {
   typedef vector<pair<int, double> > EstimHist;
   typedef vector<size_t> GraphLengths;
 
-  void ExtendInfoLeft(EdgeId e1, EdgeId e2, InHistogram& data, size_t max_shift) const {
+  void ExtendInfoLeft(EdgeId e1, EdgeId e2, TempHistogram& data, size_t max_shift) const {
     ExtendLeftDFS(e1, e2, data, 0, max_shift);
   }
 
-  void ExtendInfoRight(EdgeId e1, EdgeId e2, InHistogram& data, size_t max_shift) const {
+  void ExtendInfoRight(EdgeId e1, EdgeId e2, TempHistogram& data, size_t max_shift) const {
     ExtendRightDFS(e1, e2, data, 0, max_shift);
   }
 
@@ -60,11 +62,12 @@ class ExtensiveDistanceEstimator: public WeightedDistanceEstimator<Graph> {
   typedef pair<EdgeId, EdgeId> EdgePair;
 
   virtual void ProcessEdge(EdgeId e1,
-                           const typename InPairedIndex::InnerMap& inner_map,
-                           PairedInfoBuffer<Graph>& result) const {
+                           const InPairedIndex& pi,
+                           PairedInfoBuffer<Graph>& result) const override {
+    auto inner_map = pi.RawGet(e1);
     typename base::LengthMap second_edges;
-    for (auto I = inner_map.begin(), E = inner_map.end(); I != E; ++I)
-      second_edges[I->first];
+    for (auto i : inner_map)
+      second_edges[i.first];
 
     this->FillGraphDistancesLengths(e1, second_edges);
 
@@ -72,11 +75,8 @@ class ExtensiveDistanceEstimator: public WeightedDistanceEstimator<Graph> {
       EdgeId e2 = entry.first;
       EdgePair ep(e1, e2);
 
-      if (ep > this->ConjugatePair(ep))
-          continue;
-
       const GraphLengths& forward = entry.second;
-      InHistogram hist = inner_map.find(e2)->second;
+      TempHistogram hist = pi.Get(e1, e2).Unwrap();
       DEBUG("Extending paired information");
       double weight_0 = WeightSum(hist);
       DEBUG("Extend left");
@@ -87,14 +87,13 @@ class ExtensiveDistanceEstimator: public WeightedDistanceEstimator<Graph> {
       const EstimHist& estimated = this->EstimateEdgePairDistances(ep, hist, forward);
       OutHistogram res = this->ClusterResult(ep, estimated);
       this->AddToResult(res, ep, result);
-      this->AddToResult(this->ConjugateInfos(ep, res), this->ConjugatePair(ep), result);
     }
   }
 
   double WeightSum(const InHistogram& hist) const {
     double answer = 0.;
-    for (auto iter = hist.begin(); iter != hist.end(); ++iter) {
-      answer += iter->weight;
+    for (const auto& p : hist) {
+      answer += p.weight;
     }
     return answer;
   }
@@ -104,23 +103,22 @@ class ExtensiveDistanceEstimator: public WeightedDistanceEstimator<Graph> {
       return true;
 
     auto prev = hist.begin()->d;
-    for (auto it = hist.begin(); it != hist.end(); ++it) {
-      if (math::gr(prev, it->d))
+    for (auto p : hist) {
+      if (math::gr(prev, p.d))
         return false;
 
-      prev = it->d;
+      prev = p.d;
     }
     return true;
   }
 
-  void MergeInto(const InHistogram& what, InHistogram& where, int shift) const {
+  void MergeInto(const TempHistogram& what, TempHistogram& where, int shift) const {
     // assuming they are sorted already
     if (what.size() == 0)
       return;
 
     if (where.size() == 0) {
-      for (auto iter = what.begin(); iter != what.end(); ++iter) {
-        Point to_be_added = *iter;
+      for (auto to_be_added : what) {
         to_be_added.d += shift;
         where.insert(to_be_added);
       }
@@ -133,14 +131,12 @@ class ExtensiveDistanceEstimator: public WeightedDistanceEstimator<Graph> {
     // straightforwardly.
     if (math::ls(where.rbegin()->d, what.begin()->d + shift) ||
         math::gr(where.begin()->d, what.rbegin()->d + shift)) {
-      for (auto iter = what.begin(); iter != what.end(); ++iter) {
-        Point to_be_added = *iter;
+      for (auto to_be_added : what) {
         to_be_added.d += shift;
         where.insert(to_be_added);
       }
     } else {
-      for (auto iter = what.begin(); iter != what.end(); ++iter) {
-        Point to_be_added(*iter);
+      for (auto to_be_added : what) {
         to_be_added.d += shift;
         auto low_bound = std::lower_bound(where.begin(), where.end(), to_be_added);
         if (to_be_added == *low_bound) {
@@ -154,21 +150,18 @@ class ExtensiveDistanceEstimator: public WeightedDistanceEstimator<Graph> {
     VERIFY(IsSorted(where));
   }
 
-  InHistogram FilterPositive(const InHistogram& hist, size_t first_len, size_t second_len) const {
+  TempHistogram FilterPositive(const typename InPairedIndex::FullHistProxy& hist, size_t first_len, size_t second_len) const {
     // assuming it is sorted
-    if (hist.size() == 0)
-      return hist;
-
-    InHistogram answer;
-    for (auto iterator = hist.begin(); iterator != hist.end(); ++iterator) {
-      if (math::ge(2. * iterator->d + (double) second_len, (double) first_len))
-        answer.insert(*iterator);
+    TempHistogram answer;
+    for (auto point : hist) {
+      if (math::ge(2. * point.d + (double) second_len, (double) first_len))
+        answer.insert(point);
     }
     return answer;
   }
 
   // left edge being extended to the left, shift is negative always
-  void ExtendLeftDFS(EdgeId current, const EdgeId& last, InHistogram& data, int shift, size_t max_shift) const {
+  void ExtendLeftDFS(EdgeId current, const EdgeId& last, TempHistogram& data, int shift, size_t max_shift) const {
     VertexId start = this->graph().EdgeStart(current);
     if (current == last)
       return;
@@ -176,7 +169,7 @@ class ExtensiveDistanceEstimator: public WeightedDistanceEstimator<Graph> {
       return;
 
     for (EdgeId next : this->graph().IncomingEdges(start)) {
-      auto hist = this->index().GetEdgePairInfo(next, last);
+      auto hist = this->index().Get(next, last);
       if (-shift < (int) max_shift)
         ExtendLeftDFS(next, last, data, shift - (int) this->graph().length(next), max_shift);
       auto filtered_infos = FilterPositive(hist, this->graph().length(next), this->graph().length(last));
@@ -186,7 +179,7 @@ class ExtensiveDistanceEstimator: public WeightedDistanceEstimator<Graph> {
   }
 
   // right edge being extended to the right, shift is negative always
-  void ExtendRightDFS(const EdgeId& first, EdgeId current, InHistogram& data, int shift, size_t max_shift) const {
+  void ExtendRightDFS(const EdgeId& first, EdgeId current, TempHistogram& data, int shift, size_t max_shift) const {
     VertexId end = this->graph().EdgeEnd(current);
     if (current == first)
       return;
@@ -194,7 +187,7 @@ class ExtensiveDistanceEstimator: public WeightedDistanceEstimator<Graph> {
       return;
 
     for (EdgeId next : this->graph().OutgoingEdges(end)) {
-      auto hist = this->index().GetEdgePairInfo(first, next);
+      auto hist = this->index().Get(first, next);
       if (-shift < (int) max_shift)
         ExtendRightDFS(first, next, data, shift - (int) this->graph().length(current), max_shift);
 
@@ -204,7 +197,7 @@ class ExtensiveDistanceEstimator: public WeightedDistanceEstimator<Graph> {
     }
   }
 
-  virtual const string Name() const {
+  const string Name() const override {
     static const string my_name = "EXTENSIVE";
     return my_name;
   }
diff --git a/src/include/de/index_point.hpp b/src/include/de/index_point.hpp
new file mode 100644
index 0000000..c5dba96
--- /dev/null
+++ b/src/include/de/index_point.hpp
@@ -0,0 +1,455 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#pragma once
+
+#include <btree/btree_set.h>
+#include "adt/flat_set.hpp"
+#include "adt/small_pod_vector.hpp"
+
+namespace omnigraph {
+
+namespace de {
+
+// Define several storage-only POD types which can be
+// implicitly converted to / from double.
+
+class DEDistance {
+public:
+    DEDistance() = default;
+    DEDistance(int d)
+            : d_((float)d) {}
+    DEDistance(double d)
+            : d_((float)d) {}
+    DEDistance(size_t d)
+            : d_((float)d) {}
+    operator float() const { return d_; }
+    DEDistance operator+= (double d) {
+        d_ += (float)d;
+        return *this;
+    }
+    DEDistance operator*= (double d) {
+        d_ *= (float)d;
+        return *this;
+    }
+private:
+    float d_;
+};
+
+class DEWeight {
+public:
+    DEWeight() = default;
+    DEWeight(double d)
+            : d_((float)d) {}
+    operator float() const { return d_; }
+    DEWeight operator+= (double d) {
+        d_ += (float)d;
+        return *this;
+    }
+    DEWeight operator*= (double d) {
+        d_ *= (float)d;
+        return *this;
+    }
+private:
+    float d_;
+};
+
+struct __attribute__((aligned(8))) RawPoint {
+    DEDistance d;
+    mutable DEWeight weight;
+
+    RawPoint()
+            : RawPoint(0, 0) { }
+    
+
+    RawPoint(DEDistance distance, DEWeight weight)
+            : d(distance), weight(weight) {}
+
+    RawPoint(DEDistance distance, DEWeight weight, DEDistance)
+            : d(distance), weight(weight) {}
+
+
+    const RawPoint operator+=(const RawPoint &rhs) const {
+        weight += rhs.weight;
+        return *this;
+    }
+
+    std::string str() const {
+        stringstream ss;
+        ss << "Point: " << " distance = " << this->d
+           << ", weight = " << this->weight;
+        return ss.str();
+    }
+
+    bool operator<(const RawPoint& rhs) const {
+        return math::ls(this->d, rhs.d);
+    }
+
+    bool operator==(const RawPoint& rhs) const {
+        return math::eq(this->d, rhs.d);
+    }
+
+    bool operator!=(const RawPoint& rhs) const {
+        return !(operator==(rhs));
+    }
+
+    RawPoint operator-() const {
+        return RawPoint(-d, weight);
+    }
+
+    RawPoint operator+(const RawPoint &rhs) const {
+        return RawPoint(d, rhs.weight + weight);
+    }
+
+    DEWeight variation() const {
+        return 0;
+    }
+
+    RawPoint Conjugate(size_t l1, size_t l2) const
+    {
+        return RawPoint(d + DEDistance(l2) - DEDistance(l1), weight);
+    }
+};
+
+struct Point : public RawPoint {
+    DEDistance var;
+
+    Point()
+            : Point(0, 0, 0) { }
+
+    Point(DEDistance distance, DEWeight weight, DEDistance variance)
+            : RawPoint(distance, weight), var(variance) {}
+
+    Point(const RawPoint &rhs)
+            : RawPoint(rhs), var(0.0) {}
+
+    bool operator<(const Point& rhs) const {
+        return math::ls(this->d, rhs.d);
+    }
+
+    bool operator==(const Point& rhs) const {
+        return math::eq(this->d, rhs.d);
+    }
+
+    bool operator!=(const Point& rhs) const {
+        return !(operator==(rhs));
+    }
+
+    Point operator-() const {
+        return Point(-d, weight, var);
+    }
+
+    Point operator+(const Point &rhs) const {
+        auto weight_rhs = rhs.weight;
+        // counting new bounds in the case, when we are merging pair infos with var != 0
+        auto left_bound = std::min(d - var, rhs.d - rhs.var);
+        auto right_bound = std::max(d + var, rhs.d + rhs.var);
+        auto new_dist = (left_bound + right_bound) * 0.5f;
+        auto new_weight = weight + weight_rhs;
+        auto new_variance = (right_bound - left_bound) * 0.5f;
+
+        return Point(new_dist, new_weight, new_variance);
+    }
+
+    DEDistance variation() const {
+        return var;
+    }
+
+    Point Conjugate(size_t l1, size_t l2) const
+    {
+        return Point(d + DEDistance(l2) - DEDistance(l1), weight, var);
+    }
+};
+
+inline int rounded_d(const RawPoint& p) {
+    return math::round_to_zero(p.d);
+}
+
+inline std::ostream& operator<<(std::ostream& os, const Point &point) {
+    return os << point.str();
+}
+
+inline std::ostream& operator<<(std::ostream& os, const RawPoint &point) {
+    return os << point.str();
+}
+
+template<class Point>
+class Histogram {
+    typedef Histogram<Point> self_type;
+    typedef typename std::less<Point> key_compare;
+    typedef typename std::allocator<Point> allocator_type;
+    typedef typename adt::flat_set<Point, key_compare, adt::SmallPODVector> Tree;
+
+  public:
+    typedef typename Tree::key_type key_type;
+    typedef typename Tree::value_type value_type;
+    typedef typename Tree::pointer pointer;
+    typedef typename Tree::const_pointer const_pointer;
+    typedef typename Tree::reference reference;
+    typedef typename Tree::const_reference const_reference;
+    typedef typename Tree::size_type size_type;
+    typedef typename Tree::difference_type difference_type;
+    typedef typename Tree::iterator iterator;
+    typedef typename Tree::const_iterator const_iterator;
+    typedef typename Tree::reverse_iterator reverse_iterator;
+    typedef typename Tree::const_reverse_iterator const_reverse_iterator;
+
+    enum {
+        kValueSize = sizeof(Point)
+    };
+    
+ public:
+    // Default constructor.
+    Histogram() = default;
+
+    // Copy constructor.
+    Histogram(const self_type &x)
+            : tree_(x.tree_) {}
+
+    template <class InputIterator>
+    Histogram(InputIterator b, InputIterator e) {
+        insert(b, e);
+    }
+
+    // Iterator routines.
+    iterator begin() { return tree_.begin(); }
+    const_iterator begin() const { return tree_.begin(); }
+    iterator end() { return tree_.end(); }
+    const_iterator end() const { return tree_.end(); }
+    reverse_iterator rbegin() { return tree_.rbegin(); }
+    const_reverse_iterator rbegin() const { return tree_.rbegin(); }
+    reverse_iterator rend() { return tree_.rend(); }
+    const_reverse_iterator rend() const { return tree_.rend(); }
+
+    // Lookup routines.
+    iterator lower_bound(const key_type &key) { return tree_.lower_bound(key); }
+    const_iterator lower_bound(const key_type &key) const { return tree_.lower_bound(key); }
+    iterator upper_bound(const key_type &key) { return tree_.upper_bound(key); }
+    const_iterator upper_bound(const key_type &key) const { return tree_.upper_bound(key); }
+    std::pair<iterator,iterator> equal_range(const key_type &key) { return tree_.equal_range(key); }
+    std::pair<const_iterator,const_iterator> equal_range(const key_type &key) const { return tree_.equal_range(key); }
+
+    // Utility routines.
+    void clear() { tree_.clear(); }
+    void swap(self_type &x) { tree_.swap(x.tree_); }
+
+    // Size routines.
+    size_type size() const { return tree_.size(); }
+    size_type max_size() const { return tree_.max_size(); }
+    bool empty() const { return tree_.empty(); }
+    size_type bytes_used() const { return tree_.bytes_used(); }
+
+    // Lookup routines.
+    iterator find(const key_type &key) { return tree_.find(key); }
+    const_iterator find(const key_type &key) const { return tree_.find(key); }
+    size_type count(const key_type &key) const { return tree_.count(key); }
+
+    // Insertion routines.
+    std::pair<iterator,bool> insert(const value_type &x) { return tree_.insert(x); }
+    iterator insert(iterator position, const value_type &x) { return tree_.insert(position, x); }
+    template <typename InputIterator>
+    void insert(InputIterator b, InputIterator e) { tree_.insert(b, e); }
+
+    // Deletion routines.
+    size_type erase(const key_type &key) { return tree_.erase(key); }
+    // Erase the specified iterator from the btree. The iterator must be valid
+    // (i.e. not equal to end()).  Return an iterator pointing to the node after
+    // the one that was erased (or end() if none exists).
+    iterator erase(const iterator &iter) { return tree_.erase(iter); }
+    void erase(const iterator &first, const iterator &last) { tree_.erase(first, last); }
+
+    bool operator==(const self_type& x) const {
+        if (size() != x.size())
+            return false;
+
+        for (const_iterator i = begin(), xi = x.begin(); i != end(); ++i, ++xi)
+            if (*i != *xi)
+                return false;
+
+        return true;
+    }
+
+    bool operator!=(const self_type& other) const {
+        return !operator==(other);
+    }
+
+  protected:
+    Tree tree_;
+
+  private:
+    // This is template voodoo which creates function overload depending on
+    // whether Point has const operator+= or not.
+    template<class>
+    struct true_helper : std::true_type {};
+    template<class T = Point>
+    static auto test_can_merge(int) -> true_helper<decltype(std::declval<const T>().operator+=(std::declval<const T>()))>;
+    template<class>
+    static auto test_can_merge(long) -> std::false_type;
+    template<class T = Point>
+    struct can_merge : decltype(test_can_merge<T>(0)) {};
+
+  public:
+    // This function overload is enabled only when Point has const operator+= (e.g. RawPoint)
+    // and therefore we can update it inplace.
+    template<class OtherHist, class U = Point>
+    typename std::enable_if<can_merge<U>::value, size_t>::type
+    merge(const OtherHist &other) {
+        size_t added = 0;
+        for (const auto& new_point : other) {
+            // First, try to insert a point
+            const auto& result = insert(new_point);
+            if (!result.second) {
+                // We already having something there. Try to merge stuff in.
+                *result.first += new_point;
+            } else
+                added += 1;
+        }
+
+        return added;
+    }
+
+    // Otherwise this overload is used, which removes the point from set,
+    // updates it and re-inserts back.
+    template<class OtherHist, class U = Point>
+    typename std::enable_if<!can_merge<U>::value, size_t>::type
+    merge(const OtherHist &other) {
+        size_t added = 0;
+        for (const auto& new_point : other) {
+            // First, try to insert a point
+            auto result = insert(new_point);
+            if (!result.second) {
+                Point updated = *result.first + new_point;
+                auto after_removed = erase(result.first);
+                insert(after_removed, updated);
+            } else
+                added += 1;
+        }
+
+        return added;
+    }
+};
+
+template <typename T>
+inline std::ostream& operator<<(std::ostream &os, const Histogram<T> &b) {
+    os << b;
+    return os;
+}
+
+typedef Histogram<RawPoint> RawHistogram;
+typedef Histogram<Point> HistogramWithWeight;
+
+inline bool ClustersIntersect(Point p1, Point p2) {
+    return math::le(p1.d, p2.d + p1.var + p2.var) &&
+           math::le(p2.d, p1.d + p1.var + p2.var);
+}
+
+// tuple of a pair of edges @first, @second, and a @point
+template<typename EdgeId>
+struct PairInfo {
+    EdgeId first;
+    EdgeId second;
+    Point point;
+
+    PairInfo()
+            : first(), second(), point() {}
+
+
+    PairInfo(const PairInfo& pair_info)
+            : first(pair_info.first), second(pair_info.second), point(pair_info.point) {}
+
+    PairInfo(EdgeId first, EdgeId second, DEDistance d, DEWeight weight, DEDistance var)
+            : first(first), second(second), point(d, weight, var) {}
+
+    PairInfo(EdgeId first, EdgeId second, Point point)
+            : first(first), second(second), point(point) {}
+
+    // Two paired infos are considered equal
+    // if they coincide in all parameters except for weight and variance.
+    bool operator==(const PairInfo& rhs) const {
+        const PairInfo &lhs = *this;
+        return lhs.first == rhs.first && lhs.second == rhs.second && lhs.point == rhs.point;
+    }
+
+    bool operator!=(const PairInfo& rhs) const {
+        return !(*this == rhs);
+    }
+
+    bool operator<(const PairInfo<EdgeId>& rhs) const {
+        const PairInfo<EdgeId>& lhs = *this;
+        return lhs.first == rhs.first ?
+               (lhs.second == rhs.second ? lhs.point < rhs.point : lhs.second < rhs.second)
+               : lhs.first < rhs.first;
+    }
+
+    double d() const      { return point.d;      }
+    double weight() const { return point.weight; }
+    double var() const    { return point.var;    }
+};
+
+template<typename EdgeId>
+ostream& operator<<(ostream& os, const PairInfo<EdgeId>& info) {
+    return os << "PairInfo: first = " << info.first << ", second = " << info.second
+           << "Point : " << info.point;
+}
+
+template<typename EdgeId>
+const PairInfo<EdgeId> MinPairInfo(EdgeId id) {
+    return PairInfo<EdgeId>(id, EdgeId(typename EdgeId::pointer_type(1)),
+                            -10000000000, 0., 0.);
+}
+
+template<typename EdgeId>
+const PairInfo<EdgeId> MaxPairInfo(EdgeId id) {
+    return PairInfo<EdgeId>(id, EdgeId(typename EdgeId::pointer_type(-1)),
+                            10000000000, 0., 0.);
+}
+
+template<typename EdgeId>
+const PairInfo<EdgeId> MinPairInfo(EdgeId e1, EdgeId e2) {
+    PairInfo<EdgeId> info = MinPairInfo(e1);
+    info.second = e2;
+    return info;
+}
+
+template<typename EdgeId>
+const PairInfo<EdgeId> MaxPairInfo(EdgeId e1, EdgeId e2) {
+    PairInfo<EdgeId> info = MaxPairInfo(e1);
+    info.second = e2;
+    return info;
+}
+
+/**
+ * Method returns approximate distance between occurrences of edges in genome rounded to the nearest
+ * integer. In case of a tie closest to 0 value is chosen thus one can assume that distance
+ * is rounded the same way as opposite one.
+ * todo check that written here is true
+ */
+template<typename EdgeId>
+inline int rounded_d(PairInfo<EdgeId> const& pi) {
+    return math::round_to_zero(pi.d());
+}
+
+template<typename EdgeId>
+inline PairInfo<EdgeId> BackwardInfo(const PairInfo<EdgeId>& pi) {
+    return PairInfo<EdgeId>(pi.second, pi.first, -pi.point);
+}
+
+template<typename EdgeId>
+inline bool IsSymmetric(PairInfo<EdgeId> const& pi) {
+    return pi.first == pi.second && math::eq(pi.d(), 0.);
+}
+
+}
+
+}
+
+namespace std {
+template<>
+class numeric_limits<omnigraph::de::DEDistance> : public numeric_limits<float> {};
+template<>
+class numeric_limits<omnigraph::de::DEWeight> : public numeric_limits<float> {};
+}
diff --git a/src/include/de/insert_size_refiner.hpp b/src/include/de/insert_size_refiner.hpp
index f08f3f3..a6630b5 100644
--- a/src/include/de/insert_size_refiner.hpp
+++ b/src/include/de/insert_size_refiner.hpp
@@ -13,7 +13,9 @@
 
 namespace omnigraph {
 
-inline double get_median(const std::map<int, size_t> &hist) {
+typedef std::map<int, size_t> HistType;
+
+inline double get_median(const HistType &hist) {
   double S = 0;
   for (auto iter = hist.begin(); iter != hist.end(); ++iter)
     S += (double) iter->second;
@@ -29,7 +31,7 @@ inline double get_median(const std::map<int, size_t> &hist) {
   return -1;
 }
 
-inline double get_mad(const std::map<int, size_t> &hist, double median) { // median absolute deviation
+inline double get_mad(const HistType &hist, double median) { // median absolute deviation
   std::map<int, size_t> hist2;
   for (auto iter = hist.begin(); iter != hist.end(); ++iter) {
       int x = abs(iter->first - math::round_to_zero(median));
@@ -38,7 +40,7 @@ inline double get_mad(const std::map<int, size_t> &hist, double median) { // med
   return get_median(hist2);
 }
 
-inline void hist_crop(const map<int, size_t> &hist, double low, double high, map<int, size_t>& res) {
+inline void hist_crop(const HistType &hist, double low, double high, HistType& res) {
   for (auto iter = hist.begin(); iter != hist.end(); ++iter) {
     if (iter->first >= low && iter->first <= high) {
       DEBUG("Cropped histogram " <<  iter->first << " " << iter->second);
@@ -49,7 +51,7 @@ inline void hist_crop(const map<int, size_t> &hist, double low, double high, map
 
 inline
 std::pair<double, double> GetISInterval(double quantile,
-                                        const std::map<int, size_t> &is_hist) {
+                                        const HistType &is_hist) {
   // First, obtain the sum of the values
   double S = 0;
   for (auto iter : is_hist)
@@ -73,5 +75,92 @@ std::pair<double, double> GetISInterval(double quantile,
   return std::make_pair(is_min, is_max);
 }
 
+inline void find_median(const HistType& hist, double& median, double& mad, HistType&cropped_hist) {
+    DEBUG("Counting median and MAD");
+    median = get_median(hist);
+    mad = get_mad(hist, median);
+    double low = median - 5. * 1.4826 * mad;
+    double high = median + 5. * 1.4826 * mad;
+    omnigraph::hist_crop(hist, low, high, cropped_hist);
+    median = get_median(cropped_hist);
+    mad = get_mad(cropped_hist, median);
+}
+
+//Moved from insert size counter.
+//TODO: Please explain constants like 1.4826.
+inline void find_mean(const HistType& hist, double& mean, double& delta, std::map<size_t, size_t>& percentiles) {
+    double median = get_median(hist);
+    double mad = get_mad(hist, median);
+    double low = median - 5. * 1.4826 * mad;
+    double high = median + 5. * 1.4826 * mad;
+
+    DEBUG("Median IS: " << median);
+    DEBUG("MAD: " << mad);
+    DEBUG("Thresholds set to: [" << low << ", " << high << "]");
+
+    size_t n = 0;
+    double sum = 0.;
+    double sum2 = 0.;
+    DEBUG("Counting average");
+    for (auto iter = hist.begin(); iter != hist.end(); ++iter) {
+        if (iter->first < low || iter->first > high) {
+            continue;
+        }
+        n += iter->second;
+        sum += (double) iter->second * 1. * (double) iter->first;
+        sum2 += (double)iter->second * 1. * (double)iter->first * (double)iter->first;
+    }
+    mean = sum / (double) n;
+    delta = sqrt(sum2 / (double) n - mean * mean);
+
+    low = mean - 5 * delta;
+    high = mean + 5 * delta;
+
+    DEBUG("Mean IS: " << mean);
+    DEBUG("sd: " << delta);
+    DEBUG("Thresholds set to: [" << low << ", " << high << "]");
+
+    n = 0;
+    sum = 0.;
+    sum2 = 0.;
+    for (auto iter = hist.begin(); iter != hist.end(); ++iter) {
+        if (iter->first < low || iter->first > high) {
+            continue;
+        }
+        n += iter->second;
+        sum += (double) iter->second * 1. * (double) iter->first;
+        sum2 += (double) iter->second * 1. * (double) iter->first * (double) iter->first;
+    }
+    mean = sum / (double) n;
+    delta = sqrt(sum2 / (double) n - mean * mean);
+
+    DEBUG("Mean IS: " << mean);
+    DEBUG("sd: " << delta);
+
+    size_t m = 0;
+
+    DEBUG("Counting percentiles");
+    //todo optimize
+    size_t q[19];
+    for (size_t i = 1; i < 20; ++i) {
+        q[i - 1] = 5 * i;
+    }
+    for (auto iter = hist.begin(); iter != hist.end(); ++iter) {
+        if (iter->first < low || iter->first > high) {
+            continue;
+        }
+        size_t mm = m + iter->second;
+        for (size_t i = 0; i < utils::array_size(q); i++) {
+            size_t scaled_q_i((size_t) ((double) q[i] / 100. * (double) n));
+            if (m < scaled_q_i && mm >= scaled_q_i) {
+                percentiles[q[i]] = (size_t) iter->first;
+            }
+        }
+        m = mm;
+    }
+}
+
+
+
 
 }
diff --git a/src/include/de/pair_info_filters.hpp b/src/include/de/pair_info_filters.hpp
index 52e7df0..fc1567c 100644
--- a/src/include/de/pair_info_filters.hpp
+++ b/src/include/de/pair_info_filters.hpp
@@ -8,109 +8,12 @@
 #ifndef PAIR_INFO_FILTERS_HPP_
 #define PAIR_INFO_FILTERS_HPP_
 
+#include "paired_info_helpers.hpp"
+
 namespace omnigraph {
 
 namespace de {
 
-/*
-template<class Graph>
-class AbstractPairInfoFilter {
-
- private:
-  typedef typename Graph::VertexId VertexId;
-  typedef typename Graph::EdgeId EdgeId;
-  typedef PairInfo<EdgeId> PairInfoT;
-
- protected:
-  virtual bool Check(const PairInfoT&) const {
-    return true;
-  }
-
-  virtual bool Check(EdgeId, EdgeId, const Point&) const {
-    return true;
-  }
-
-  const Graph& graph_;
-
- public:
-  AbstractPairInfoFilter(const Graph& graph) :
-          graph_(graph) {}
-
-  void Filter(PairedInfoIndexT<Graph>& index) const {
-    TRACE("index size: " << index.size());
-    for (auto it = index.begin(); it != index.end(); ++it) {
-      // This is dirty hack, but it's safe here
-      Histogram& infos = const_cast<Histogram&>(*it);
-      const EdgeId& e1 = it.first();
-      const EdgeId& e2 = it.second();
-
-      for (auto p_iter = infos.begin(); p_iter != infos.end(); ) {
-        const Point& point = *p_iter;
-        if (!Check(e1, e2, point))
-            p_iter = infos.erase(p_iter);
-        else
-            ++p_iter;
-      }
-    }
-
-    INFO("Pruning the index");
-    index.Prune();
-  }
-
-  virtual ~AbstractPairInfoFilter() {}
-};
-
-template<class Graph>
-class PairInfoWeightFilter: public AbstractPairInfoFilter<Graph> {
-
- private:
-  typedef typename Graph::EdgeId EdgeId;
-  typedef PairInfo<EdgeId> PairInfoT;
-  double weight_threshold_;
-
- public:
-  PairInfoWeightFilter(const Graph& graph, double weight_threshold) :
-    AbstractPairInfoFilter<Graph>(graph), weight_threshold_(weight_threshold) {
-  }
-
- protected:
-  virtual bool Check(EdgeId, EdgeId, const Point& p) const {
-    return math::ge(p.weight, weight_threshold_);
-  }
-
-  virtual bool Check(const PairInfoT& info) const {
-    return math::ge(info.weight(), weight_threshold_);
-  }
-};
-
-
-template<class Graph>
-class PairInfoWeightFilterWithCoverage: public AbstractPairInfoFilter<Graph> {
-
- private:
-  typedef typename Graph::EdgeId EdgeId;
-  typedef PairInfo<EdgeId> PairInfoT;
-  double weight_threshold_;
-
- public:
-  PairInfoWeightFilterWithCoverage(const Graph& graph, double weight_threshold) :
-          AbstractPairInfoFilter<Graph>(graph), weight_threshold_(weight_threshold)
-    {}
-
- protected:
-  virtual bool Check(EdgeId e1, EdgeId e2, const Point& p) const {
-    double info_weight = p.weight;
-    return math::ge(info_weight, weight_threshold_) ||
-           math::ge(info_weight, 0.1 * this->graph_.coverage(e1)) ||
-           math::ge(info_weight, 0.1 * this->graph_.coverage(e2));
-  }
-
-  virtual bool Check(const PairInfoT& info) const {
-    return Check(info.first, info.second, info.point);
-  }
-};
-*/
-
 template<class Graph>
 class AbstractPairInfoChecker{
 private:
@@ -192,7 +95,7 @@ class AmbiguousPairInfoChecker : public AbstractPairInfoChecker<Graph> {
   }
 
   bool IsPairInfoGood(EdgeId edge1, EdgeId edge2){
-	  return index_.GetEdgePairInfo(edge1, edge2).size() <= 1;
+	  return index_.Get(edge1, edge2).size() <= 1;
   }
 
   bool EdgesAreFromSimpleBulgeWithAmbPI(const PairInfoT& info){
@@ -239,9 +142,8 @@ class AmbiguousPairInfoChecker : public AbstractPairInfoChecker<Graph> {
   }
 
   double GetPairInfoWeight(EdgeId edge1, EdgeId edge2){
-	  if(index_.GetEdgePairInfo(edge1, edge2).size() == 1)
-		  return index_.GetEdgePairInfo(edge1, edge2).begin()->weight;
-	  return 0;
+      auto hist = index_.Get(edge1, edge2);
+      return (hist.size() == 1) ? float(hist.begin()->weight) : 0.0f;
   }
 
   bool InnerCheck(const PairInfoT& info){
@@ -324,37 +226,42 @@ private:
 template<class Graph>
 class PairInfoFilter{
 private:
-  typedef typename Graph::VertexId VertexId;
-  typedef typename Graph::EdgeId EdgeId;
-  typedef PairInfo<EdgeId> PairInfoT;
+    typedef typename Graph::VertexId VertexId;
+    typedef typename Graph::EdgeId EdgeId;
+    typedef PairInfo<EdgeId> PairInfoT;
 
 protected:
-  AbstractPairInfoChecker<Graph> &pair_info_checker_;
+    AbstractPairInfoChecker<Graph> &pair_info_checker_;
 
 public:
-  PairInfoFilter(AbstractPairInfoChecker<Graph> &pair_info_checker) :
-	pair_info_checker_(pair_info_checker){
-  }
+    PairInfoFilter(AbstractPairInfoChecker<Graph> &pair_info_checker) :
+            pair_info_checker_(pair_info_checker)
+    {}
 
-  void Filter(PairedInfoIndexT<Graph>& index){
-    TRACE("Index size: " << index.size());
-    for (auto it = index.begin(); it != index.end(); ++it) {
-      auto points = *it;
-      auto e1 = it.first();
-      auto e2 = it.second();
-      if (pair_info_checker_.Check(e1, e2)) {
-        for (auto p_iter = points.begin(); p_iter != points.end(); ) {
-          const Point& point = *p_iter++;
-          if (!pair_info_checker_.Check(PairInfoT(e1, e2, point))) {
-            index.DeletePairInfo(e1, e2, point);
-            index.DeletePairInfo(e2, e1, -point);
-          }
+    void Filter(PairedInfoIndexT<Graph>& index) {
+        INFO("Start filtering; index size: " << index.size());
+        //We can't filter while traversing, because Remove may invalidate iterators
+        //So let's save edge pairs first
+        using EdgePair = std::pair<EdgeId, EdgeId>;
+        std::vector<EdgePair> pairs;
+        for (auto i = pair_begin(index); i != pair_end(index); ++i)
+            if (pair_info_checker_.Check(i.first(), i.second()))
+                pairs.push_back({i.first(), i.second()});
+
+        //TODO: implement fast removing of the whole set of points
+        for (const auto& pair : pairs) {
+            //Same thing with invalidation
+            HistogramWithWeight hist;
+            for (auto point : index[pair])
+                if (!pair_info_checker_.Check(PairInfoT(pair.first, pair.second, point)))
+                    hist.insert(point);
+            //index.RemoveMany(pair_hist.first.first, pair_hist.first.second, pair_hist.second);
+            for (const auto& point : hist)
+                index.Remove(pair.first, pair.second, point);
         }
-      }
+
+        INFO("Done filtering");
     }
-    INFO("Pruning the index");
-    index.Prune();
-  }
 };
 
 }
diff --git a/src/include/de/paired_info.hpp b/src/include/de/paired_info.hpp
index 12d4169..20c78a8 100644
--- a/src/include/de/paired_info.hpp
+++ b/src/include/de/paired_info.hpp
@@ -7,880 +7,857 @@
 
 #pragma once
 
-#include "xmath.h"
-#include "omni/omni_utils.hpp"
-#include "sequence/sequence.hpp"
+#include "conj_iterator.hpp"
+#include "index_point.hpp"
 
-#include <boost/iterator/iterator_facade.hpp>
+#include <adt/iterator_range.hpp>
 
-#include <btree/btree_set.h>
 #include <btree/safe_btree_map.h>
 #include <sparsehash/sparse_hash_map>
 
-#include <cmath>
-#include <map>
-#include <limits>
-
-//#define MERGE_DATA_RELATIVE_DIFFERENCE 0.3
 
 namespace omnigraph {
 
 namespace de {
 
-// Define several storage-only types which can be implicitly converted to / from
-// double.
-
-class DEDistance {
-  public:
-    DEDistance() = default;
-    DEDistance(int d)
-            : d_((float)d) {}
-    DEDistance(double d)
-            : d_((float)d) {}
-    DEDistance(size_t d)
-            : d_((float)d) {}
-    operator float() const { return d_; }
-    DEDistance operator+= (double d) {
-        d_ += (float)d;
-        return *this;
-    }
-    DEDistance operator*= (double d) {
-        d_ *= (float)d;
-        return *this;
-    }
-  private:
-    float d_;
-};
-
-class DEWeight {
-  public:
-    DEWeight() = default;
-    DEWeight(double d)
-            : d_((float)d) {}
-    operator float() const { return d_; }
-    DEWeight operator+= (double d) {
-        d_ += (float)d;
-        return *this;
-    }
-    DEWeight operator*= (double d) {
-        d_ *= (float)d;
-        return *this;
-    }
-  private:
-    float d_;
-};
-
 /**
- * PairInfo class represents basic data unit for paired information: edges first and second appear
- * in genome at distance d_ and this information has weight weight_.
+ * @brief Paired reads info storage. Arranged as a map of map of info points.
+ * @param G graph type
+ * @param H map-like container type (parameterized by key and value type)
  */
-struct Point {
-  public:
-    DEDistance d;
-    DEWeight   weight;
-    DEWeight   var;
-
-    Point()
-            : d(0.0), weight(0.0), var(0.0) {}
-
-    Point(DEDistance distance, DEWeight weight, DEWeight variance)
-            : d(distance), weight(weight), var(variance) {}
-
-    Point(const Point& rhs)
-            : d(rhs.d), weight(rhs.weight), var(rhs.var) {}
+template<typename G, typename H, template<typename, typename> class Container>
+class PairedIndex {
 
-    std::string str() const {
-        stringstream ss;
-        ss << "Point: " << " distance = " << this->d
-           << ", weight = " << this->weight
-           << ", variance = " << this->var;
-        return ss.str();
-    }
-
-    Point& operator=(const Point& rhs) {
-        using namespace math;
-        update_value_if_needed<DEDistance>(d, rhs.d);
-        update_value_if_needed<DEWeight>(weight, rhs.weight);
-        update_value_if_needed<DEWeight>(var, rhs.var);
-        return *this;
-    }
+public:
+    typedef G Graph;
+    typedef H Histogram;
+    typedef typename Graph::EdgeId EdgeId;
+    typedef std::pair<EdgeId, EdgeId> EdgePair;
+    typedef typename Histogram::value_type Point;
+
+    typedef Container<EdgeId, Histogram> InnerMap;
+    typedef Container<EdgeId, InnerMap> StorageMap;
+
+    //--Data access types--
+
+    typedef typename StorageMap::const_iterator ImplIterator;
+
+    /**
+     * @brief Smart proxy set representing a composite histogram of points between two edges.
+     * @param full When true, represents the whole histogram (consisting both of directly added points
+     *             and "restored" conjugates).
+     *             When false, proxifies only the added points.
+     * @detail You can work with the proxy just like with any constant set.
+     *         The only major difference is that it returns all consisting points by value,
+     *         becauses some of them don't exist in the underlying sets and are
+     *         restored from the conjugate info on-the-fly.
+     */
+    template<bool full = true>
+    class HistProxy {
+
+    public:
+        /**
+         * @brief Iterator over a proxy set of points.
+         * @warning Generally, the proxy is unordered even if the set is ordered.
+         *          If you require that, convert it into a flat histogram with Unwrap().
+         * @param full When true, traverses both straight and conjugate points,
+         *             and automatically recalculates the distance for latter.
+         *             When false, traverses only the added points and skips the rest.
+         */
+        class Iterator: public boost::iterator_facade<Iterator, Point, boost::bidirectional_traversal_tag, Point> {
+
+            typedef typename ConjProxy<Histogram>::Iterator InnerIterator;
+
+        public:
+            Iterator(InnerIterator iter, float offset)
+                    : iter_(iter)
+                    , offset_(offset)
+            {}
+
+        private:
+            friend class boost::iterator_core_access;
+
+            Point dereference() const {
+                Point result = *iter_;
+                if (iter_.Conj())
+                    result.d += offset_;
+                return result;
+            }
 
-    bool operator<(const Point& rhs) const {
-        return math::ls(this->d, rhs.d);
-    }
+            void increment() {
+                ++iter_;
+            }
 
-    bool operator==(const Point& rhs) const {
-        return math::eq(this->d, rhs.d);
-    }
+            void decrement() {
+                --iter_;
+            }
 
-    bool operator!=(const Point& rhs) const {
-        return !(operator==(rhs));
-    }
+            inline bool equal(const Iterator &other) const {
+                return iter_ == other.iter_;
+            }
 
-    Point operator-() const {
-        return Point(-d, weight, var);
-    }
+            InnerIterator iter_; //current position
+            float offset_;       //offset to be added for conjugate distance
+        };
+
+        HistProxy(const Histogram& hist, const Histogram& conj_hist, float offset = 0)
+            : hist_(hist, conj_hist)
+            , offset_(offset)
+        {}
+
+        /**
+         * @brief Returns an empty proxy (effectively a Null object pattern).
+         */
+        static const Histogram& empty_hist() {
+            static Histogram res;
+            return res;
+        }
 
-    Point operator+(const Point &rhs) const {
-        auto weight_rhs = rhs.weight;
-        // counting new bounds in the case, when we are merging pair infos with var != 0
-        auto left_bound = std::min(d - var, rhs.d - rhs.var);
-        auto right_bound = std::max(d + var, rhs.d + rhs.var);
-        auto new_dist = (left_bound + right_bound) * 0.5f;
-        auto new_weight = weight + weight_rhs;
-        auto new_variance = (right_bound - left_bound) * 0.5f;
+        /**
+         * @brief Returns a wrapper for an ordinary histogram (for implicit conversions)
+         */
+        HistProxy(const Histogram& hist, float offset = 0)
+                : hist_(hist, HistProxy::empty_hist())
+                , offset_(offset)
+        {}
 
-        return Point(new_dist, new_weight, new_variance);
-    }
+        Iterator begin() const {
+            return Iterator(hist_.begin(), offset_);
+        }
 
-  DEWeight variation() const {
-    return var;
-  }
-};
+        Iterator end() const {
+            //auto i = full ? hist_.end() : hist_.conj_begin();
+            //return Iterator(i, offset_);
+            return Iterator(hist_.end(), offset_);
+        }
 
-struct RawPoint {
-  public:
-    DEDistance d;
-    DEWeight   weight;
+        /**
+         * @brief Finds the point with the minimal distance.
+         * @todo Simplify
+         */
+        Point min() const {
+            //Our histograms are ordered, so the minimum is `begin` of either
+            //straight or conjugate half, but we should beware of emptiness.
+            VERIFY(!empty());
+            auto i1 = begin();
+            if (full) {
+                auto i2 = Iterator(hist_.conj_begin(), offset_);
+                if (i1 == i2 || i2 == end())
+                    return *i1;
+                return std::min(*i1, *i2);
+            } else {
+                return *i1;
+            }
+        }
 
-    RawPoint()
-            : d(0.0), weight(0.0) {}
+        /**
+         * @brief Finds the point with the maximal distance.
+         * @todo Simplify
+         */
+        Point max() const {
+            //Our histograms are ordered, so the maximum is `rbegin` of either
+            //straight or conjugate half, but we should beware of emptiness.
+            VERIFY(!empty());
+            auto i1 = end();
+            if (full) {
+                auto i2 = Iterator(hist_.conj_begin(), offset_);
+                if (i1 == i2 || i2 == begin())
+                    return *--i1;
+                return std::max(*--i1, *--i2);
+            } else {
+                return *--i1;
+            }
+        }
 
-    RawPoint(DEDistance distance, DEWeight weight)
-            : d(distance), weight(weight) {}
+        /**
+         * @brief Returns the copy of all points in a simple histogram.
+         */
+        Histogram Unwrap() const {
+            return Histogram(begin(), end());
+        }
 
-    RawPoint(const Point& rhs)
-            : d(rhs.d), weight(rhs.weight) {}
+        size_t size() const {
+            return hist_.size();
+        }
 
-    operator Point() const {
-      return Point(d, weight, 0);
-    }
+        bool empty() const {
+            return hist_.empty();
+        }
 
-    std::string str() const {
-        stringstream ss;
-        ss << "Point: " << " distance = " << this->d
-           << ", weight = " << this->weight;
-        return ss.str();
-    }
+    private:
+        const ConjProxy<Histogram> hist_;
+        float offset_;
+    };
 
-    RawPoint& operator=(const RawPoint& rhs) {
-        using namespace math;
-        update_value_if_needed<DEDistance>(d, rhs.d);
-        update_value_if_needed<DEWeight>(weight, rhs.weight);
-        return *this;
-    }
+    /**
+     * @brief Type synonym for full histogram proxies (with added and conjugated points)
+     */
+    typedef HistProxy<true> FullHistProxy;
+    /**
+     * @brief Type synonym for raw histogram proxies (only with directly added points)
+     */
+    typedef HistProxy<false> RawHistProxy;
+
+    typedef typename HistProxy<true>::Iterator HistIterator;
+    typedef typename HistProxy<false>::Iterator RawHistIterator;
+
+    //---- Traversing edge neighbours ----
+
+    template<bool full = true>
+    using EdgeHist = std::pair<EdgeId, HistProxy<full>>;
+
+    /**
+     * @brief A proxy map representing neighbourhood of an edge,
+     *        where `Key` is the graph edge ID and `Value` is the proxy histogram.
+     * @param full When true, represents all neighbours (consisting both of directly added data
+     *             and "restored" conjugates).
+     *             When false, proxifies only the added edges.
+     * @detail You can work with the proxy just like with any constant map.
+     *         The only major difference is that it returns all consisting pairs by value,
+     *         becauses some of them don't exist in the underlying sets and are
+     *         restored from the conjugate info on-the-fly.
+     */
+    template<bool full = true>
+    class EdgeProxy {
+    public:
+
+        /**
+         * @brief Iterator over a proxy map.
+         * @param full When true, traverses both straight and conjugate pairs,
+         *             and automatically recalculates the distance for latter.
+         *             When false, traverses only the added points and skips the rest.
+         */
+        class Iterator: public boost::iterator_facade<Iterator, EdgeHist<full>, boost::forward_traversal_tag, EdgeHist<full>> {
+
+            typedef typename ConjProxy<InnerMap>::Iterator InnerIterator;
+
+            void Skip() {
+                if (full) { //For a full iterator, skip possibly repeated edges
+                    while (!iter_.Conj() &&
+                            index_.GetImpl(index_.graph().conjugate(edge_), index_.graph().conjugate(iter_->first)).size())
+                        ++iter_;
+
+                } else { //For a raw iterator, skip conjugate pairs
+                    while (!iter_.Conj() && iter_->first < edge_)
+                        ++iter_;
+                }
+            }
 
-    bool operator<(const RawPoint& rhs) const {
-        return math::ls(this->d, rhs.d);
-    }
+        public:
+            Iterator(const PairedIndex &index, InnerIterator iter, EdgeId edge)
+                    : index_ (index)
+                    , iter_(iter)
+                    , edge_(edge)
+            {
+                Skip();
+            }
 
-    bool operator==(const RawPoint& rhs) const {
-        return math::eq(this->d, rhs.d);
-    }
+            void increment() {
+                ++iter_;
+                Skip();
+            }
 
-    bool operator!=(const RawPoint& rhs) const {
-        return !(operator==(rhs));
-    }
+            void operator=(const Iterator &other) {
+                //TODO: is this risky without an assertion?
+                //We shouldn't reassign iterators from one index onto another
+                iter_ = other.iter_;
+                edge_ = other.edge_;
+            }
 
-    RawPoint operator-() const {
-        return RawPoint(-d, weight);
-    }
+        private:
+            friend class boost::iterator_core_access;
 
-    RawPoint operator+(const RawPoint &rhs) const {
-        return RawPoint(d, rhs.weight + weight);
-    }
+            bool equal(const Iterator &other) const {
+                return iter_ == other.iter_;
+            }
 
-    DEWeight variation() const {
-      return 0;
-    }
-};
+            EdgeHist<full> dereference() const {
+                EdgeId e2 = iter_->first;
+                if (full) {
+                    float offset = index_.CalcOffset(edge_, e2);
+                    EdgePair conj = index_.ConjugatePair(edge_, e2);
+                    if (iter_.Conj()) {
+                        return std::make_pair(conj.first,
+                            HistProxy<full>(index_.GetImpl(edge_, conj.first),
+                                            index_.GetImpl(e2, conj.second),
+                                            offset));
+                    } else {
+                        return std::make_pair(e2, HistProxy<full>(iter_->second, index_.GetImpl(conj), offset));
+                    }
+                } else {
+                    return std::make_pair(e2, HistProxy<full>(iter_->second));
+                }
+            }
 
-inline int rounded_d(Point p) {
-    return math::round_to_zero(p.d);
-}
+        private:
+            const PairedIndex &index_;
+            InnerIterator iter_;
+            EdgeId edge_;
+        };
 
-inline std::ostream& operator<<(std::ostream& os, const Point &point) {
-    return os << point.str();
-}
+        EdgeProxy(const PairedIndex &index, const InnerMap& map, const InnerMap& conj_map, EdgeId edge)
+            : index_(index), map_(map, conj_map), edge_(edge)
+        {}
 
-inline std::ostream& operator<<(std::ostream& os, const RawPoint &point) {
-    return os << point.str();
-}
+        Iterator begin() const {
+            return Iterator(index_, map_.begin(), edge_);
+        }
 
-//typedef std::set<Point> Histogram;
-typedef btree::btree_set<Point, std::less<Point>, std::allocator<Point>, 1024> HistogramWithWeight;
-typedef btree::btree_set<RawPoint, std::less<RawPoint>, std::allocator<RawPoint>, 1024> RawHistogram;
+        Iterator end() const {
+            auto i = full ? map_.end() : map_.conj_begin();
+            return Iterator(index_, i, edge_);
+        }
 
-inline bool ClustersIntersect(Point p1, Point p2) {
-  return math::le(p1.d, p2.d + p1.var + p2.var) &&
-         math::le(p2.d, p1.d + p1.var + p2.var);
-}
+        HistProxy<full> operator[](EdgeId e2) const {
+            //TODO: optimize
+            EdgeId e1 = edge_;
+            auto offset = index_.CalcOffset(e1, e2);
+            if (full) {
+                const auto& hist = index_.GetImpl(edge_, e2);
+                const auto& conj_hist = index_.GetImpl(index_.ConjugatePair(edge_, e2));
+                return HistProxy<full>(hist, conj_hist, offset);
+            } else {
+                if (index_.SwapConj(e1, e2))
+                    return HistProxy<full>(HistProxy<full>::empty_hist(), index_.GetImpl(e1, e2), offset);
+                else
+                    return HistProxy<full>(index_.GetImpl(e1, e2));
+            }
+        }
 
-inline Point ConjugatePoint(size_t l1, size_t l2, const Point& point) {
-    return Point(point.d + DEDistance(l2) - DEDistance(l1), point.weight, point.var);
-}
+        inline bool empty() const {
+            return map_.empty();
+        }
 
-inline RawPoint ConjugatePoint(size_t l1, size_t l2, const RawPoint& point) {
-    return RawPoint(point.d + DEDistance(l2) - DEDistance(l1), point.weight);
-}
+    private:
+        const PairedIndex& index_;
+        const ConjProxy<InnerMap> map_;
+        EdgeId edge_;
+    };
 
-// tuple of a pair of edges @first, @second, and a @point
-template<typename EdgeId>
-struct PairInfo {
-    EdgeId first;
-    EdgeId second;
-    Point point;
+    /*template<> HistProxy<true> EdgeProxy<true>::operator[](EdgeId e2) const {
+        return index_.Get(edge_, e2);
+    }
 
-    PairInfo()
-            : first(), second(), point() {}
+    template<> HistProxy<false> EdgeProxy<false>::operator[](EdgeId e2) const {
+        return index_.RawGet(edge_, e2);
+    }*/
 
+    typedef typename EdgeProxy<true>::Iterator EdgeIterator;
+    typedef typename EdgeProxy<false>::Iterator RawEdgeIterator;
 
-    PairInfo(const PairInfo& pair_info)
-            : first(pair_info.first), second(pair_info.second), point(pair_info.point)
-    {}
+    //--Constructor--
 
-    PairInfo(EdgeId first, EdgeId second, DEDistance d, DEWeight weight, DEWeight var)
-            : first(first), second(second), point(d, weight, var)
+    PairedIndex(const Graph &graph)
+        : size_(0), graph_(graph)
     {}
 
-    PairInfo(EdgeId first, EdgeId second, Point point)
-            : first(first), second(second), point(point) {}
-
-    // Two paired infos are considered equal
-    // if they coincide in all parameters except for weight and variance.
-    bool operator==(const PairInfo& rhs) const {
-        const PairInfo &lhs = *this;
-        return lhs.first == rhs.first && lhs.second == rhs.second && lhs.point == rhs.point;
+    //--Inserting--
+public:
+    /**
+     * @brief Returns a conjugate pair for two edges.
+     */
+    inline EdgePair ConjugatePair(EdgeId e1, EdgeId e2) const {
+        return std::make_pair(graph_.conjugate(e2), graph_.conjugate(e1));
+    }
+    /**
+     * @brief Returns a conjugate pair for a pair of edges.
+     */
+    inline EdgePair ConjugatePair(EdgePair ep) const {
+        return ConjugatePair(ep.first, ep.second);
+    }
+
+    bool SwapConj(EdgeId &e1, EdgeId &e2) const {
+        EdgePair ep = {e1, e2}, ep_conj = ConjugatePair(ep);
+        if (ep > ep_conj) {
+            e1 = ep_conj.first;
+            e2 = ep_conj.second;
+            return true;
+        }
+        return false;
     }
 
-    bool operator!=(const PairInfo& rhs) const {
-        return !(*this == rhs);
+private:
+    bool SwapConj(EdgeId &e1, EdgeId &e2, Point &p) const {
+        if (SwapConj(e1, e2)) {
+            p.d += CalcOffset(e1, e2);
+            return true;
+        }
+        return false;
     }
 
-    bool operator<(const PairInfo<EdgeId>& rhs) const {
-        const PairInfo<EdgeId>& lhs = *this;
-        return (lhs.first == rhs.first ?
-                (lhs.second == rhs.second ? lhs.point < rhs.point : lhs.second < rhs.second)
-                : lhs.first  < rhs.first);
+    float CalcOffset(EdgeId e1, EdgeId e2) const {
+        return float(graph_.length(e1)) - float(graph_.length(e2));
     }
 
-    double d() const      { return point.d;      }
-    double weight() const { return point.weight; }
-    double var() const    { return point.var;    }
-};
-
-template<typename EdgeId>
-ostream& operator<<(ostream& os, const PairInfo<EdgeId>& info) {
-  return os << "PairInfo: first = " << info.first << ", second = " << info.second
-            << "Point : " << info.point;
-}
-
-template<typename EdgeId>
-const PairInfo<EdgeId> MinPairInfo(EdgeId id) {
-  return PairInfo<EdgeId>(id, EdgeId(typename EdgeId::pointer_type(1)),
-      -10000000000, 0., 0.);
-}
-
-template<typename EdgeId>
-const PairInfo<EdgeId> MaxPairInfo(EdgeId id) {
-  return PairInfo<EdgeId>(id, EdgeId(typename EdgeId::pointer_type(-1)),
-       10000000000, 0., 0.);
-}
-
-template<typename EdgeId>
-const PairInfo<EdgeId> MinPairInfo(EdgeId e1, EdgeId e2) {
-  PairInfo<EdgeId> info = MinPairInfo(e1);
-  info.second = e2;
-  return info;
-}
-
-template<typename EdgeId>
-const PairInfo<EdgeId> MaxPairInfo(EdgeId e1, EdgeId e2) {
-  PairInfo<EdgeId> info = MaxPairInfo(e1);
-  info.second = e2;
-  return info;
-}
-
-/**
- * Method returns approximate distance between occurrences of edges in genome rounded to the nearest
- * integer. In case of a tie closest to 0 value is chosen thus one can assume that distance
- * is rounded the same way as opposite one.
- * todo check that written here is true
- */
-template<typename EdgeId>
-inline int rounded_d(PairInfo<EdgeId> const& pi) {
-  return math::round_to_zero(pi.d());
-}
+public:
+    /**
+     * @brief Adds a point between two edges to the index,
+     *        merging weights if there's already one with the same distance.
+     */
+    void Add(EdgeId e1, EdgeId e2, Point point) {
+        SwapConj(e1, e2, point);
+        InsertOrMerge(e1, e2, point);
+    }
+
+    /**
+     * @brief Adds a whole set of points between two edges to the index.
+     */
+    template<typename TH>
+    void AddMany(EdgeId e1, EdgeId e2, const TH& hist) {
+        float offset = SwapConj(e1, e2) ? CalcOffset(e1, e2) : 0.0f;
+        for (auto point : hist) {
+            point.d += offset;
+            InsertOrMerge(e1, e2, point);
+        }
+    }
 
-template<typename EdgeId>
-PairInfo<EdgeId> BackwardInfo(const PairInfo<EdgeId>& pi) {
-  return PairInfo<EdgeId>(pi.second, pi.first, -pi.point);
-}
+private:
 
-template<typename EdgeId>
-inline bool IsSymmetric(PairInfo<EdgeId> const& pi) {
-  return pi.first == pi.second && math::eq(pi.d(), 0.);
-}
-
-// new map { EdgeId -> (EdgeId -> (d, weight, var)) }
-template<class Graph,
-         class HistogramType = HistogramWithWeight,
-         class InnerMapType = btree::safe_btree_map<typename Graph::EdgeId, HistogramType>,
-         class IndexDataType = btree::safe_btree_map<typename Graph::EdgeId, InnerMapType> >
-class PairedInfoStorage {
-    bool conj_symmetry_;
- public:
-    typedef typename Graph::EdgeId EdgeId;
-    typedef HistogramType Histogram;
-    typedef typename HistogramType::const_iterator HistIterator;
-    typedef InnerMapType InnerMap;
-    typedef typename IndexDataType::const_iterator DataIterator;
-    typedef typename HistogramType::value_type Point;
+    void InsertOrMerge(EdgeId e1, EdgeId e2,
+                       const Point &sp) {
+        auto& straight = storage_[e1][e2];
+        auto si = straight.find(sp);
+        auto rp = -sp;
+        if (si != straight.end()) {
+            MergeData(straight, si, sp);
+            if (!IsSymmetric(e1, e2, sp)) {
+                auto& reversed = storage_[e2][e1];
+                auto ri = reversed.find(rp);
+                MergeData(reversed, ri, rp);
+            }
+        } else {
+            InsertPoint(straight, sp);
+            if (!IsSymmetric(e1, e2, sp)) {
+                auto &reversed = storage_[e2][e1];
+                InsertPoint(reversed, rp);
+            }
+        }
+    }
 
-    PairedInfoStorage(bool conj_symmetry = false)
-            : conj_symmetry_(conj_symmetry), size_(0) {}
+    //Would be faster, but unstable for hash_map due to the iterator invalidation
+    /*void InsertOrMerge(Histogram& straight, Histogram& reversed,
+                       const Point &sp) {
+        auto si = straight.find(sp);
+        auto rp = -sp;
+        if (si != straight.end()) {
+            MergeData(straight, si, sp);
+            auto ri = reversed.find(rp);
+            MergeData(reversed, ri, rp);
+        }
+        else {
+            InsertPoint(reversed, rp);
+            InsertPoint(straight, sp);
+            //if (!IsSymmetric(e1, e2, point)) TODO
 
-    DataIterator data_begin() const {
-        return index_.begin();
-    }
+        }
+    }*/
 
-    DataIterator data_end() const {
-        return index_.end();
+    static bool IsSymmetric(EdgeId e1, EdgeId e2, Point point) {
+        return (e1 == e2) && math::eq(point.d, 0.f);
     }
 
-    bool conj_symmetry() const {
-        return conj_symmetry_;
+    // modifying the histogram
+    inline void InsertPoint(Histogram& histogram, Point point) {
+        histogram.insert(point);
+        ++size_;
     }
 
-    // adding pair infos
-    void AddPairInfo(const std::pair<EdgeId, EdgeId>& edge_pair,
-                     Point point_to_add,
-                     bool add_reversed = true) {
-        AddPairInfo(edge_pair.first, edge_pair.second, point_to_add, add_reversed);
+    void MergeData(Histogram& hist, typename Histogram::iterator to_update, const Point& to_merge) {
+        //We can't just modify the existing point, because if variation is non-zero,
+        //resulting distance will differ
+        auto to_add = *to_update + to_merge;
+        auto after_removed = hist.erase(to_update);
+        hist.insert(after_removed, to_add);
     }
 
-    void AddPairInfo(EdgeId e1, EdgeId e2,
-                     Point point_to_add,
-                     bool add_reversed = true) {
-        Histogram& histogram = index_[e1][e2];
-        HistIterator iterator_to_point = histogram.find(point_to_add);
-
-        if (iterator_to_point != histogram.end())
-            MergeData(e1, e2, *iterator_to_point, point_to_add, add_reversed);
-        else
-            InsertPoint(e1, e2, histogram, point_to_add, add_reversed);
+public:
+    /**
+     * @brief Adds a lot of info from another index, using fast merging strategy.
+     *        Should be used instead of point-by-point index merge.
+     */
+    template<class Index>
+    void Merge(const Index& index_to_add) {
+        auto& base_index = storage_;
+        for (auto AddI = index_to_add.data_begin(); AddI != index_to_add.data_end(); ++AddI) {
+            EdgeId e1_to_add = AddI->first;
+            const auto& map_to_add = AddI->second;
+            InnerMap& map_already_exists = base_index[e1_to_add];
+            MergeInnerMaps(map_to_add, map_already_exists);
+        }
     }
 
-    void DeletePairInfo(EdgeId e1, EdgeId e2,
-                        Point point_to_remove) {
-        Histogram& histogram = index_[e1][e2];
-        histogram.erase(point_to_remove);
+private:
+    template<class OtherMap>
+    void MergeInnerMaps(const OtherMap& map_to_add,
+                        InnerMap& map) {
+        for (const auto& to_add : map_to_add) {
+            Histogram &hist_exists = map[to_add.first];
+            size_ += hist_exists.merge(to_add.second);
+        }
     }
 
-    // erasing specific entry from the index
-    size_t RemovePairInfo(EdgeId e1, EdgeId e2, const Point& point_to_remove) {
-        auto iter = index_.find(e1);
-        if (iter != index_.end()) {
-            InnerMap& map = iter->second;
-            auto iter2 = map.find(e2);
-            if (iter2 != map.end()) {
-                Histogram& hist = iter2->second;
-                size_t success = hist.erase(point_to_remove);
-                if (success == 1)
+public:
+    //--Data deleting methods--
+
+    /**
+     * @brief Removes the specific entry from the index.
+     * @warning Don't use it on unclustered index, because hashmaps require set_deleted_item
+     * @return The number of deleted entries (0 if there wasn't such entry)
+     */
+    size_t Remove(EdgeId e1, EdgeId e2, Point point) {
+        auto res = RemoveImpl(e1, e2, point);
+        auto conj = ConjugatePair(e1, e2);
+        point.d += CalcOffset(e2, e1);
+        res += RemoveImpl(conj.first, conj.second, point);
+        return res;
+    }
+
+    /**
+     * @brief Removes the whole histogram from the index.
+     * @warning Don't use it on unclustered index, because hashmaps require set_deleted_item
+     * @return The number of deleted entries
+     */
+    size_t Remove(EdgeId e1, EdgeId e2) {
+        SwapConj(e1, e2);
+        auto res = RemoveAll(e1, e2);
+        if (e1 != e2)
+            res += RemoveAll(e2, e1);
+        return res;
+    }
+
+private:
+
+    size_t RemoveImpl(EdgeId e1, EdgeId e2, Point point) {
+        auto res = RemoveSingle(e1, e2, point);
+        if (!IsSymmetric(e1, e2, point))
+            res += RemoveSingle(e2, e1, -point);
+        return res;
+    }
+
+    //TODO: remove duplicode
+    size_t RemoveSingle(EdgeId e1, EdgeId e2, Point point) {
+        auto i1 = storage_.find(e1);
+        if (i1 != storage_.end()) {
+            auto& map = i1->second;
+            auto i2 = map.find(e2);
+            if (i2 != map.end()) {
+                Histogram& hist = i2->second;
+                if (hist.erase(point)) {
                     --size_;
-                if (hist.empty())
-                    map.erase(iter2);
-                if (map.empty())
-                    index_.erase(iter);
-
-                return success;
+                    if (hist.empty()) {
+                        map.erase(e2);
+                        if (map.empty())
+                            storage_.erase(e1);
+                    }
+                    return 1;
+                }
+                return 0;
             }
         }
         return 0;
     }
 
-    void RemovePairInfo(const PairInfo<EdgeId>& info) {
-        this->RemovePairInfo(info.first, info.second, info.point);
-    }
-
-    // removing all points from @e1, @e2 histogram,
-    // returns 1 if operation was successful, 0 if not
-    size_t RemoveEdgePairInfo(EdgeId e1, EdgeId e2) {
-        auto iter = index_.find(e1);
-        if (iter != index_.end()) {
-            InnerMap& map = iter->second;
-            auto iter2 = map.find(e2);
-            if (iter2 != map.end()) {
-                Histogram& hist = iter2->second;
+    size_t RemoveAll(EdgeId e1, EdgeId e2) {
+        auto i1 = storage_.find(e1);
+        if (i1 != storage_.end()) {
+            auto& map = i1->second;
+            auto i2 = map.find(e2);
+            if (i2 != map.end()) {
+                Histogram& hist = i2->second;
                 size_t size_decrease = hist.size();
-                map.erase(iter2);
+                map.erase(i2);
                 size_ -= size_decrease;
                 if (map.empty())
-                    index_.erase(iter);
-
-                return 1;
+                    storage_.erase(i1);
+                return size_decrease;
             }
         }
         return 0;
     }
 
-    // removes all points, which refer to this edge
-    // also removes all backward information
-    void RemoveEdgeInfo(EdgeId edge) {
-        InnerMap& inner_map = index_[edge];
+public:
+
+    /**
+     * @brief Removes all neighbourhood of an edge (all edges referring to it, and their histograms)
+     * @warning Currently doesn't check the conjugate info (should it?), so it may actually
+     *          skip some data.
+     * @return The number of deleted entries
+     */
+    size_t Remove(EdgeId edge) {
+        InnerMap &inner_map = storage_[edge];
         for (auto iter = inner_map.begin(); iter != inner_map.end(); ++iter) {
             EdgeId e2 = iter->first;
-            if (edge != e2)
-                this->RemoveEdgePairInfo(e2, edge);
+            if (edge != e2) {
+                this->Remove(e2, edge);
+            }
         }
         size_t size_of_removed = inner_map.size();
-        index_.erase(edge);
+        storage_.erase(edge);
         size_ -= size_of_removed;
+        return size_of_removed;
     }
 
-    void Clear() {
-        index_.clear();
-        size_ = 0;
-    }
+    // --Accessing--
 
-    size_t Size() const {
-        return size_;
+    /**
+     * @brief Underlying raw implementation data (for custom iterator helpers).
+     */
+    ImplIterator data_begin() const {
+        return storage_.begin();
     }
 
-    // Usual implementation, the same as in the old paired index
-    std::vector<PairInfo<EdgeId> > GetEdgeInfo(EdgeId edge) const {
-        typename IndexDataType::const_iterator iter = index_.find(edge);
-        TRACE("Getting edge info");
-        if (iter == index_.end())
-            return std::vector<PairInfo<EdgeId> >();
-
-        std::vector<PairInfo<EdgeId> > result;
-        result.reserve(iter->second.size());
+    /**
+     * @brief Underlying raw implementation data (for custom iterator helpers).
+     */
+    ImplIterator data_end() const {
+        return storage_.end();
+    }
 
-        for (const auto& entry : iter->second)
-          for (const auto& point : entry.second)
-            result.push_back({edge, entry.first, point});
+    adt::iterator_range<ImplIterator> data() const {
+        return adt::make_range(data_begin(), data_end());
+    }    
 
-        return result;
+    /**
+     * @brief Returns a full proxy map to the neighbourhood of some edge.
+     */
+    EdgeProxy<> Get(EdgeId id) const {
+        return EdgeProxy<>(*this, GetImpl(id), GetImpl(graph_.conjugate(id)), id);
     }
 
-    // faster implementation, but less resolver-friendly
-    // returns InnerMap instead of vector<>,
-    // one can iterate it using FastIterator class
-    const InnerMap GetEdgeInfo(EdgeId edge, int) const {
-        typename IndexDataType::const_iterator iter = index_.find(edge);
-        if (iter == index_.end())
-            return InnerMap();
-        else
-            return iter->second;
+    /**
+     * @brief Returns a raw proxy map to neighboring edges
+     * @detail You should use it when you don't care for backward
+     *         and conjugate info, or don't want to process them twice.
+     */
+    EdgeProxy<false> RawGet(EdgeId id) const {
+        return EdgeProxy<false>(*this, GetImpl(id), empty_map_, id);
     }
 
-    const Histogram GetEdgePairInfo(EdgeId e1, EdgeId e2) const {
-        typename IndexDataType::const_iterator iter = index_.find(e1);
-        if (iter == index_.end())
-            return Histogram();
-        else {
-            const InnerMap& inner_map = iter->second;
-            typename InnerMap::const_iterator iter2 = inner_map.find(e2);
-            if (iter2 == inner_map.end())
-                return Histogram();
-            else
-                return iter2->second;
-        }
+    /**
+     * @brief Operator alias of Get(id).
+     */
+    EdgeProxy<> operator[](EdgeId id) const {
+        return Get(id);
     }
 
-    void Prune() {
-        for (auto iter = index_.begin(); iter != index_.end(); ) {
-            // First, remove all the empty Histograms
-            InnerMap& inner_map = iter->second;
-            for (auto it = inner_map.begin(); it != inner_map.end(); ) {
-                if (it->second.empty())
-                    inner_map.erase(it++);
-                else
-                    ++it;
-            }
-
-            // Now, pretty much the same, but the outer stuff
-            if (inner_map.empty())
-                index_.erase(iter++);
-            else
-                ++iter;
-        }
+private:
+    //When there is no such edge, returns a fake empty map for safety
+    const InnerMap& GetImpl(EdgeId e1) const {
+        auto i = storage_.find(e1);
+        if (i == storage_.end())
+            return empty_map_;
+        return i->second;
     }
 
-    // here we trying to insert PairInfo,
-    // if there is no existing PairInfo with the same key
-    // very complicated implementation, but it seems to be faster.
-    template<class Storage>
-    void AddAll(const Storage& index_to_add) {
-        IndexDataType& base_index = this->index_;
-        for (auto AddI = index_to_add.data_begin(), E = index_to_add.data_end(); AddI != E; ++AddI) {
-            EdgeId e1_to_add = AddI->first;
-            const auto& map_to_add = AddI->second;
-            InnerMap& map_already_exists = base_index[e1_to_add];
-            MergeInnerMaps(map_to_add, map_already_exists);
+    //When there is no such histogram, returns a fake empty histogram for safety
+    const Histogram& GetImpl(EdgeId e1, EdgeId e2) const {
+        auto i = storage_.find(e1);
+        if (i != storage_.end()) {
+            auto j = i->second.find(e2);
+            if (j != i->second.end())
+                return j->second;
         }
+        return HistProxy<true>::empty_hist();
     }
 
-    bool contains(EdgeId edge) const { return index_.count(edge); }
-
-    size_t size() const { return size_; }
-
-  private:
-    bool IsSymmetric(EdgeId e1, EdgeId e2,
-                     Point point) const {
-        return (e1 == e2) && math::eq(point.d, 0.f);
+    inline const Histogram& GetImpl(EdgePair e) const {
+        return GetImpl(e.first, e.second);
     }
 
-    // modifying the histogram
-    void InsertPoint(EdgeId e1, EdgeId e2,
-                     Histogram& histogram,
-                     Point new_point,
-                     bool add_reversed) {
-        // first backwards
-        if (add_reversed && !IsSymmetric(e1, e2, new_point)) {
-            index_[e2][e1].insert(-new_point);
-            ++size_;
-        }
+public:
 
-        histogram.insert(new_point);
-        ++size_;
+    /**
+     * @brief Returns a full histogram proxy for all points between two edges.
+     */
+    HistProxy<> Get(EdgeId e1, EdgeId e2) const {
+        auto offset = CalcOffset(e1, e2);
+        return HistProxy<>(GetImpl(e1, e2), GetImpl(ConjugatePair(e1, e2)), offset);
     }
 
-    void UpdateSinglePoint(Histogram &hist, typename Histogram::iterator point_to_update, Point new_point) {
-        typename Histogram::iterator after_removed = hist.erase(point_to_update);
-        hist.insert(after_removed, new_point);
+    /**
+     * @brief Operator alias of Get(e1, e2).
+     */
+    inline HistProxy<> operator[](EdgePair p) const {
+        return Get(p.first, p.second);
     }
 
-    void MergeData(EdgeId e1, EdgeId e2,
-                   Point point_to_update, Point point_to_add,
-                   bool add_reversed) {
-        if (add_reversed) {
-            Histogram& histogram = index_[e2][e1];
-            UpdateSinglePoint(histogram, histogram.find(-point_to_update), -(point_to_update + point_to_add));
-        }
-
-        Histogram& histogram = index_[e1][e2];
-        UpdateSinglePoint(histogram, histogram.find(point_to_update), point_to_update + point_to_add);
+    /**
+     * @brief Returns a raw histogram proxy for only straight points between two edges.
+     */
+    HistProxy<false> RawGet(EdgeId e1, EdgeId e2) const {
+        if (SwapConj(e1, e2))
+            return HistProxy<false>(HistProxy<false>::empty_hist(), GetImpl(e1, e2), CalcOffset(e2, e1));
+        else
+            return HistProxy<false>(GetImpl(e1, e2), HistProxy<false>::empty_hist(), 0);
     }
 
-    void MergeData(Histogram& hist, typename Histogram::iterator to_update,
-                   Point point_to_add) {
-        UpdateSinglePoint(hist, to_update, *to_update + point_to_add);
+    /**
+     * @brief Checks if an edge (or its conjugated twin) is consisted in the index.
+     */
+    bool contains(EdgeId edge) const {
+        return storage_.count(edge) + storage_.count(graph_.conjugate(edge)) > 0;
     }
 
-    template<class OtherMap>
-    void MergeInnerMaps(const OtherMap& map_to_add,
-                        InnerMap& map) {
-        typedef typename Histogram::iterator hist_iterator;
-        for (auto I = map_to_add.begin(), E = map_to_add.end(); I != E; ++I) {
-            Histogram &hist_exists = map[I->first];
-            const auto& hist_to_add = I->second;
-
-            for (auto p_it = hist_to_add.begin(), E = hist_to_add.end(); p_it != E; ++p_it) {
-              Point new_point = *p_it;
-              const pair<hist_iterator, bool>& result = hist_exists.insert(new_point);
-              if (!result.second) { // in this case we need to merge two points
-                MergeData(hist_exists, result.first, new_point);
-              } else
-                ++size_;
-            }
-        }
+    /**
+     * @brief Checks if there is a histogram for two points (or their conjugate pair).
+     */
+    bool contains(EdgeId e1, EdgeId e2) const {
+        auto conj = ConjugatePair(e1, e2);
+        auto i1 = storage_.find(e1);
+        if (i1 != storage_.end() && i1->second.count(e2))
+            return true;
+        auto i2 = storage_.find(conj.first);
+        if (i2 != storage_.end() && i2->second.count(conj.second))
+            return true;
+        return false;
     }
 
-  protected:
-    IndexDataType index_;
-    size_t size_;
-};
-
-template<class Graph>
-using PairedInfoBuffer = PairedInfoStorage<Graph,
-                                           RawHistogram,
-                                           std::unordered_map<typename Graph::EdgeId, RawHistogram>,
-                                           std::unordered_map<typename Graph::EdgeId,
-                                                              std::unordered_map<typename Graph::EdgeId, RawHistogram> > >;
-
-template<class Graph>
-class PairedInfoIndexT: public PairedInfoStorage<Graph> {
-  typedef PairedInfoStorage<Graph> base;
-
-  public:
-    typedef typename base::Histogram Histogram;
-    typedef typename base::DataIterator DataIterator;
-    typedef typename base::InnerMap InnerMap;
-    typedef typename Graph::EdgeId EdgeId;
+    // --Miscellaneous--
 
-    PairedInfoIndexT(const Graph& graph)
-        : base(true), graph_(graph) {}
-
-    ~PairedInfoIndexT() {
-        TRACE("~PairedInfoIndexT ok");
-    }
+    /**
+     * Returns the graph the index is based on. Needed for custom iterators.
+     */
+    const Graph &graph() const { return graph_; }
 
+    /**
+     * @brief Inits the index with graph data. Used in clustered indexes.
+     */
     void Init() {
         for (auto it = graph_.ConstEdgeBegin(); !it.IsEnd(); ++it)
-          this->AddPairInfo(*it, *it, { });
-    }
-
-    // method adds paired info to the conjugate edges
-    void AddConjPairInfo(EdgeId e1, EdgeId e2,
-                         Point point_to_add,
-                         bool add_reversed = 1) {
-        this->AddPairInfo(graph_.conjugate(e2),
-                          graph_.conjugate(e1),
-                          ConjugatePoint(graph_.length(e1), graph_.length(e2), point_to_add),
-                          add_reversed);
-    }
-
-    // prints the contents of index
-    void PrintAll() const {
-        size_t size = 0;
-        for (auto I = this->begin(), E = this->end(); I != E; ++I) {
-            EdgeId e1 = I.first(); EdgeId e2 = I.second();
-            const auto& histogram = *I;
-            size += histogram.size();
-            INFO("Histogram for edges "
-                 << this->g().int_id(e1) << " "
-                 << this->g().int_id(e2));
-            for (const auto& point : histogram) {
-                INFO("    Entry " << point.str());
-            }
-        }
-        VERIFY_MSG(this->size() == size, "Size " << size << " must have been equal to " << this->size());
-    }
-
-    class EdgePairIterator :
-        public boost::iterator_facade<EdgePairIterator,
-                                      const Histogram,
-                                      boost::forward_traversal_tag,
-                                      const Histogram& > {
-
-     public:
-        EdgePairIterator(DataIterator cedge, DataIterator eedge)
-                : cedge_(cedge), eedge_(eedge), sedge_() {
-            if (cedge_ == eedge_)
-                return;
-
-            sedge_ = cedge_->second.begin();
-            skip_empty();
-        }
-
-        EdgeId first() const { return cedge_->first; }
-        EdgeId second() const { return sedge_->first; }
-
-        friend ostream& operator<<(ostream& os, const EdgePairIterator& iter) {
-            return os << iter.first() << " " << iter.second();
-        }
-
-      private:
-        typedef typename InnerMap::const_iterator InnerIterator;
-
-        friend class boost::iterator_core_access;
-
-        void skip_empty() {
-            while (sedge_ == cedge_->second.end()) {
-                ++cedge_;
-                if (cedge_ == eedge_)
-                    break;
-                sedge_ = cedge_->second.begin();
-            }
-        }
-
-        void increment() {
-            ++sedge_;
-            skip_empty();
-        }
-
-        bool equal(const EdgePairIterator &other) const {
-            return other.cedge_ == cedge_ && (cedge_ == eedge_ || other.sedge_ == sedge_);
-        }
-
-        const Histogram& dereference() const {
-            return sedge_->second;
-        }
-
-        DataIterator cedge_, eedge_;
-        InnerIterator sedge_;
-    };
-
-    class EdgeIterator :
-            public boost::iterator_facade<EdgeIterator,
-                                          const std::pair<EdgeId, Point>,
-                                          boost::forward_traversal_tag,
-                                          const std::pair<EdgeId, Point> > {
-        typedef typename Histogram::const_iterator histogram_iterator;
-        typedef typename InnerMap::const_iterator InnerIterator;
-
-      public:
-        EdgeIterator(InnerIterator cedge, InnerIterator eedge)
-                : cedge_(cedge), eedge_(eedge), point_() {
-            if (cedge_ == eedge_)
-                return;
-
-            point_ = cedge_->second.begin();
-            skip_empty();
-        }
-
-      private:
-        friend class boost::iterator_core_access;
-
-        void skip_empty() {
-            while (point_ == cedge_->second.end()) {
-                ++cedge_;
-                if (cedge_ == eedge_)
-                    break;
-                point_ = cedge_->second.begin();
-            }
-        }
-
-        void increment() {
-            ++point_;
-            skip_empty();
-        }
-
-        bool equal(const EdgeIterator &other) const {
-            return other.cedge_ == cedge_ && (cedge_ == eedge_ || other.point_ == point_);
-        }
-
-        const std::pair<EdgeId, Point> dereference() const {
-            return std::make_pair(cedge_->first, *point_);
-        }
-
-        InnerIterator cedge_, eedge_;
-        histogram_iterator point_;
-    };
-
-    EdgePairIterator begin() const {
-        return EdgePairIterator(this->index_.begin(), this->index_.end());
+            Add(*it, *it, Point());
     }
 
-    EdgePairIterator end() const {
-        return EdgePairIterator(this->index_.end(), this->index_.end());
-    }
-
-    EdgeIterator edge_begin(EdgeId edge) const {
-        VERIFY(this->contains(edge));
-        return edge_begin(this->index_.find(edge));
+    /**
+     * @brief Clears the whole index. Used in merging.
+     */
+    void Clear() {
+        storage_.clear();
+        size_ = 0;
     }
 
-    EdgeIterator edge_end(EdgeId edge) const {
-        VERIFY(this->contains(edge));
-        return edge_end(this->index_.find(edge));
-    }
+    /**
+     * @brief Returns the physical index size (total count of all edge pairs)
+     * @warning (not really total, doesn't include the conjugate info)
+     */
+    size_t size() const { return size_; }
 
- private:
-    EdgeIterator edge_begin(DataIterator entry) const {
-      return EdgeIterator(entry->second.begin(), entry->second.end());
-    }
+private:
+    size_t size_;
+    const Graph& graph_;
+    StorageMap storage_;
+    InnerMap empty_map_; //null object
+};
 
-    EdgeIterator edge_end(DataIterator entry) const {
-      return EdgeIterator(entry->second.end(), entry->second.end());
-    }
+//Aliases for common graphs
+template<typename K, typename V>
+using safe_btree_map = btree::safe_btree_map<K, V>; //Two-parameters wrapper
+template<typename Graph>
+using PairedInfoIndexT = PairedIndex<Graph, HistogramWithWeight, safe_btree_map>;
 
-    const Graph& graph_;
+template<typename K, typename V>
+using sparse_hash_map = google::sparse_hash_map<K, V>; //Two-parameters wrapper
+template<typename Graph>
+using UnclusteredPairedInfoIndexT = PairedIndex<Graph, RawHistogram, sparse_hash_map>;
 
-    DECL_LOGGER("PairedInfoIndexT");
-};
+/**
+ * @brief A collection of paired indexes which can be manipulated as one.
+ *        Used as a convenient wrapper in parallel index processing.
+ */
+template<class Index>
+class PairedIndices {
+    typedef std::vector<Index> Storage;
+    Storage data_;
 
-template<class Graph,
-         class IndexT = PairedInfoIndexT<Graph> >
-struct PairedInfoIndicesT {
-    std::vector<IndexT> data_;
+public:
+    PairedIndices() {}
 
-    PairedInfoIndicesT(const Graph& graph, size_t lib_num) {
+    PairedIndices(const typename Index::Graph& graph, size_t lib_num) {
+        data_.reserve(lib_num);
         for (size_t i = 0; i < lib_num; ++i)
             data_.emplace_back(graph);
     }
 
+    /**
+     * @brief Inits all indexes.
+     */
     void Init() { for (auto& it : data_) it.Init(); }
 
+    /**
+     * @brief Clears all indexes.
+     */
     void Clear() { for (auto& it : data_) it.Clear(); }
 
-    IndexT& operator[](size_t i) { return data_[i]; }
+    Index& operator[](size_t i) { return data_[i]; }
 
-    const IndexT& operator[](size_t i) const { return data_[i]; }
+    const Index& operator[](size_t i) const { return data_[i]; }
 
     size_t size() const { return data_.size(); }
+
+    typename Storage::iterator begin() { return data_.begin(); }
+    typename Storage::iterator end() { return data_.end(); }
+
+    typename Storage::const_iterator begin() const { return data_.begin(); }
+    typename Storage::const_iterator end() const { return data_.end(); }
 };
 
 template<class Graph>
-//using UnclusteredPairedInfoIndexT = PairedInfoStorage<Graph, RawHistogram>;
-using UnclusteredPairedInfoIndexT = PairedInfoStorage<Graph,
-                                                      RawHistogram,
-                                                      google::sparse_hash_map<typename Graph::EdgeId, RawHistogram>,
-                                                      google::sparse_hash_map<typename Graph::EdgeId,
-                                                                              google::sparse_hash_map<typename Graph::EdgeId, RawHistogram> > >;
+using PairedInfoIndicesT = PairedIndices<PairedInfoIndexT<Graph>>;
 
 template<class Graph>
-using UnclusteredPairedInfoIndicesT = std::vector<UnclusteredPairedInfoIndexT<Graph> >;
+using UnclusteredPairedInfoIndicesT = PairedIndices<UnclusteredPairedInfoIndexT<Graph>>;
 
-//New metric weight normalizer
+template<typename K, typename V>
+using unordered_map = std::unordered_map<K, V>; //Two-parameters wrapper
 template<class Graph>
-class PairedInfoWeightNormalizer {
-  typedef typename Graph::EdgeId EdgeId;
-  const Graph& g_;
-  const size_t insert_size_;
-  //todo use this param!
-  const double is_var_;
-  const size_t read_length_;
-  const size_t k_;
-  const double avg_coverage_;
-public:
+using PairedInfoBuffer = PairedIndex<Graph, RawHistogram, unordered_map>;
 
-  //Delta better to be around 5-10% of insert size
-  PairedInfoWeightNormalizer(const Graph& g, size_t insert_size,
-      double is_var, size_t read_length, size_t k, double avg_coverage) :
-      g_(g), insert_size_(insert_size), is_var_(is_var), read_length_(
-          read_length), k_(k), avg_coverage_(avg_coverage) {
-  }
-
-  const PairInfo<EdgeId> NormalizeWeightWithCoverage(const PairInfo<EdgeId>& pair_info) {
-      PairInfo<EdgeId> new_info = pair_info;
-      new_info.weight() *= g_.length(pair_info.first) * g_.length(pair_info.second) * 1.
-                        / (g_.coverage(pair_info.first) * g_.coverage(pair_info.second));
-      return new_info;
-  }
-
-  const Point NormalizeWeight(EdgeId e1, EdgeId e2, Point point) const {
-    double w = 0.;
-    if (math::eq(point.d, 0.f) && e1 == e2) {
-      w = 0. + (double) g_.length(e1) - (double) insert_size_ + 2. * (double) read_length_ + 1. - (double) k_;
-    } else {
-      if (math::ls(point.d, 0.f)) {
-        using std::swap;
-        swap(e1, e2);
-      }
-      int gap_len = abs(rounded_d(point)) - (int) g_.length(e1);
-      int right = std::min((int) insert_size_, gap_len + (int) g_.length(e2) + (int) read_length_);
-      int left = std::max(gap_len, (int) insert_size_ - (int) read_length_ - (int) g_.length(e1));
-      w = 0. + (double) (right - left + 1 - (int) k_);
-    }
-
-    double result_weight = point.weight;
-    if (math::gr(w, /*-10.*/0.)) {
-      result_weight /= w; //(w + 10);
-    } else
-      result_weight = 0.;
-
-    double cov_norm_coeff = avg_coverage_ / (2. * (double) (read_length_ - k_));
-    result_weight /= cov_norm_coeff;
-
-    Point result(point);
-    result.weight = result_weight;
-    return result;
-  }
-};
-
-};
+template<class Graph>
+using PairedInfoBuffersT = PairedIndices<PairedInfoBuffer<Graph>>;
+
+/*
+//Debug
+template<typename T>
+std::ostream& operator<<(std::ostream& str, const PairedInfoBuffer<T>& pi) {
+    str << "--- PI of size " << pi.size() << "---\n";
+
+    for (auto i = pi.data_begin(); i != pi.data_end(); ++i) {
+        auto e1 = i->first;
+        str << e1 << " has: \n";
+
+        for (auto j = i->second.begin(); j != i->second.end(); ++j) {
+            str << "- " << j->first << ": ";
+            for (auto p : j->second)
+                str << p << ", ";
+            str << std::endl;
+        }
+    }
 
+    str << "-------\n";
+    return str;
 }
 
-namespace std {
+//Debug
+template<typename T>
+std::ostream& operator<<(std::ostream& str, const PairedInfoIndexT<T>& pi) {
+    str << "--- PI of size " << pi.size() << "---\n";
+
+    for (auto i = pi.data_begin(); i != pi.data_end(); ++i) {
+        auto e1 = i->first;
+        str << e1 << " has: \n";
 
-template<>
-class numeric_limits<omnigraph::de::DEWeight> : public numeric_limits<float> {};
+        for (auto j = i->second.begin(); j != i->second.end(); ++j) {
+            str << "- " << j->first << ": ";
+            for (auto p : j->second)
+                str << p << ", ";
+            str << std::endl;
+        }
+    }
 
-template<>
-class numeric_limits<omnigraph::de::DEDistance> : public numeric_limits<float> {};
+    str << "-------\n";
+    return str;
+}
+*/
 
-};
+}
+
+}
diff --git a/src/include/de/paired_info_helpers.hpp b/src/include/de/paired_info_helpers.hpp
new file mode 100644
index 0000000..9223ad8
--- /dev/null
+++ b/src/include/de/paired_info_helpers.hpp
@@ -0,0 +1,149 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#pragma once
+
+#include "paired_info.hpp"
+#include "boost/optional.hpp"
+
+namespace omnigraph {
+
+namespace de {
+
+template<typename Index, bool full>
+class EdgePairIterator :
+        public boost::iterator_facade<EdgePairIterator<Index, full>,
+                typename Index::FullHistProxy,
+                boost::forward_traversal_tag,
+                typename Index::FullHistProxy>
+{
+    typedef typename ConjProxy<typename Index::StorageMap>::Iterator OuterIterator;
+    typedef boost::optional<typename Index::InnerMap::const_iterator> InnerIterator;
+
+protected:
+    //They're not intended to be constucted explicitly, only via begin/end.
+    EdgePairIterator(const Index& index, OuterIterator i)
+        : index_(index), i_(i)
+    {
+        StartOver();
+    }
+
+    bool FakePair() {
+        auto ep = std::make_pair(i_->first, (*j_)->first);
+        return ep > index_.ConjugatePair(ep);
+    }
+
+    inline void Skip() { //For a raw iterator, skip conjugate pairs
+        while (!full && j_ && FakePair()) {
+            IncImpl();
+        }
+    }
+
+    void IncImpl() {
+        ++(*j_);
+        if (j_ == i_->second.end()) { //Traversed all neighbours, jump to the next edge
+            ++i_;
+            StartOver();
+        }
+    }
+
+public:
+    void increment() {
+        IncImpl();
+        Skip();
+    }
+
+private:
+    void StartOver() {
+        if (i_.Iter() == index_.data_end()) {
+            j_.reset();
+        } else {
+            j_ = i_->second.begin();
+            Skip();
+        }
+    }
+
+public:
+
+    typename Index::FullHistProxy dereference() const {
+        return index_.Get(first(), second()); //TODO: optimize
+    }
+
+    bool equal(const EdgePairIterator &other) const {
+        return (j_ == other.j_) && (i_.Conj() == other.i_.Conj());
+    }
+
+    typename Index::EdgeId first() const {
+        if (i_.Conj())
+            return index_.graph().conjugate((*j_)->first);
+        return i_->first;
+    }
+
+    typename Index::EdgeId second() const {
+        if (i_.Conj())
+            return index_.graph().conjugate(i_->first);
+        return (*j_)->first;
+    }
+
+    static EdgePairIterator begin(const Index& index) {
+        auto i = OuterIterator(index.data_begin(), index.data_end(), index.data_begin(), !index.size());
+        return EdgePairIterator(index, i);
+    }
+
+    static EdgePairIterator end(const Index& index) {
+        auto stop = full ? index.data_end() : index.data_begin();
+        auto i = OuterIterator(stop, index.data_end(), index.data_begin(), true);
+        return EdgePairIterator(index, i);
+    }
+
+private:
+    const Index &index_;
+    OuterIterator i_;
+    InnerIterator j_;
+};
+
+template<typename Storage>
+inline EdgePairIterator<Storage, true> pair_begin(const Storage &s) {
+    return EdgePairIterator<Storage, true>::begin(s);
+}
+
+template<typename Storage>
+inline EdgePairIterator<Storage, true> pair_end(const Storage &s) {
+    return EdgePairIterator<Storage, true>::end(s);
+}
+
+template<typename Storage>
+inline EdgePairIterator<Storage, false> raw_pair_begin(const Storage &s) {
+    return EdgePairIterator<Storage, false>::begin(s);
+}
+
+template<typename Storage>
+inline EdgePairIterator<Storage, false> raw_pair_end(const Storage &s) {
+    return EdgePairIterator<Storage, false>::end(s);
+}
+
+//Small wrapper for range-based loops
+//Usage: for (auto i in PairsOf(index))
+/*template <typename Storage>
+class PairsOf {
+public:
+    EdgePairIterator<Storage> begin() const{
+        return pair_begin(storage_);
+    }
+
+    EdgePairIterator<Storage> end() const{
+        return pair_begin(storage_);
+    }
+
+    PairsOf(const Storage& storage)
+            : storage_(storage) {}
+private:
+    const Storage& storage_;
+};*/
+
+}
+
+}
diff --git a/src/include/de/smoothing_distance_estimation.hpp b/src/include/de/smoothing_distance_estimation.hpp
index cb36f50..c3c1bf8 100644
--- a/src/include/de/smoothing_distance_estimation.hpp
+++ b/src/include/de/smoothing_distance_estimation.hpp
@@ -26,8 +26,9 @@ protected:
   typedef ExtensiveDistanceEstimator<Graph> base;
   typedef typename base::InPairedIndex InPairedIndex;
   typedef typename base::OutPairedIndex OutPairedIndex;
-  typedef typename InPairedIndex::Histogram InHistogram;
-  typedef typename OutPairedIndex::Histogram OutHistogram;
+  typedef typename base::InHistogram InHistogram;
+  typedef typename base::OutHistogram OutHistogram;
+  typedef typename InPairedIndex::Histogram TempHistogram;
 
  public:
   SmoothingDistanceEstimator(const Graph& graph,
@@ -64,9 +65,9 @@ protected:
   typedef vector<PairInfo<EdgeId> > PairInfos;
   typedef vector<size_t> GraphLengths;
 
-  virtual EstimHist EstimateEdgePairDistances(EdgePair /*ep*/,
-                                              const InHistogram& /*raw_data*/,
-                                              const vector<size_t>& /*forward*/) const {
+  EstimHist EstimateEdgePairDistances(EdgePair /*ep*/,
+                                      const InHistogram& /*raw_data*/,
+                                      const vector<size_t>& /*forward*/) const override {
     VERIFY_MSG(false, "Sorry, the SMOOOOTHING estimator is not available anymore." <<
                "SPAdes is going to terminate");
 
@@ -92,7 +93,7 @@ private:
     size_t first_len = this->graph().length(ep.first);
     size_t second_len = this->graph().length(ep.second);
     TRACE("Lengths are " << first_len << " " << second_len);
-    InHistogram data;
+    TempHistogram data;
     for (auto I = raw_hist.begin(), E = raw_hist.end(); I != E; ++I) {
       Point p = *I;
       if (math::ge(2 * (long) rounded_d(p) + (long) second_len, (long) first_len))
@@ -159,12 +160,13 @@ private:
     return new_result;
     }
 
-  virtual void ProcessEdge(EdgeId e1,
-                           const typename InPairedIndex::InnerMap& inner_map,
-                           PairedInfoBuffer<Graph>& result) const {
+  void ProcessEdge(EdgeId e1,
+                   const InPairedIndex& pi,
+                   PairedInfoBuffer<Graph>& result) const override {
     typename base::LengthMap second_edges;
-    for (auto I = inner_map.begin(), E = inner_map.end(); I != E; ++I)
-      second_edges[I->first];
+    auto inner_map = pi.RawGet(e1);
+    for (auto I : inner_map)
+      second_edges[I.first];
 
     this->FillGraphDistancesLengths(e1, second_edges);
 
@@ -172,13 +174,13 @@ private:
       EdgeId e2 = entry.first;
       EdgePair ep(e1, e2);
 
-      if (ep > this->ConjugatePair(ep))
-          continue;
+      VERIFY(ep <= pi.ConjugatePair(ep));
 
       TRACE("Processing edge pair " << this->graph().int_id(e1)
             << " " << this->graph().int_id(e2));
       const GraphLengths& forward = entry.second;
-      InHistogram hist = inner_map.find(e2)->second;
+
+      TempHistogram hist = pi.Get(e1, e2).Unwrap();
       EstimHist estimated;
       //DEBUG("Extending paired information");
       //DEBUG("Extend left");
@@ -194,7 +196,6 @@ private:
       DEBUG(gap_distances << " distances between gap edge pairs have been found");
       OutHistogram res = this->ClusterResult(ep, estimated);
       this->AddToResult(res, ep, result);
-      this->AddToResult(this->ConjugateInfos(ep, res), this->ConjugatePair(ep), result);
     }
   }
 
@@ -205,7 +206,7 @@ private:
             this->graph().OutgoingEdgeCount(this->graph().EdgeStart(e2)) == 1);
   }
 
-  virtual const string Name() const {
+  const string Name() const override {
     static const string my_name = "SMOOTHING";
     return my_name;
   }
diff --git a/src/include/de/weighted_distance_estimation.hpp b/src/include/de/weighted_distance_estimation.hpp
index 4c034fb..eecb6bc 100644
--- a/src/include/de/weighted_distance_estimation.hpp
+++ b/src/include/de/weighted_distance_estimation.hpp
@@ -23,8 +23,8 @@ class WeightedDistanceEstimator: public DistanceEstimator<Graph> {
   typedef DistanceEstimator<Graph> base;
   typedef typename base::InPairedIndex InPairedIndex;
   typedef typename base::OutPairedIndex OutPairedIndex;
-  typedef typename InPairedIndex::Histogram InHistogram;
-  typedef typename OutPairedIndex::Histogram OutHistogram;
+  typedef typename base::InHistogram InHistogram;
+  typedef typename base::OutHistogram OutHistogram;
 
  public:
   WeightedDistanceEstimator(const Graph &graph,
@@ -47,7 +47,7 @@ class WeightedDistanceEstimator: public DistanceEstimator<Graph> {
 
   virtual EstimHist EstimateEdgePairDistances(EdgePair ep,
                                               const InHistogram& histogram,
-                                              const GraphLengths& raw_forward) const {
+                                              const GraphLengths& raw_forward) const override {
     using std::abs;
     using namespace math;
     TRACE("Estimating with weight function");
@@ -55,11 +55,9 @@ class WeightedDistanceEstimator: public DistanceEstimator<Graph> {
     size_t second_len = this->graph().length(ep.second);
 
     EstimHist result;
-    int maxD = rounded_d(*histogram.rend());
-    int minD = rounded_d(*histogram.rbegin());
+    int maxD = rounded_d(histogram.max()), minD = rounded_d(histogram.min());
     vector<int> forward;
-    for (auto I = raw_forward.begin(), E = raw_forward.end(); I != E; ++I) {
-      int length = (int) *I;
+    for (auto length : raw_forward) {
       if (minD - (int) this->max_distance_ <= length && length <= maxD + (int) this->max_distance_) {
         forward.push_back(length);
       }
@@ -104,7 +102,7 @@ class WeightedDistanceEstimator: public DistanceEstimator<Graph> {
     return result;
   }
 
-  virtual const string Name() const {
+  const string Name() const override {
     static const string my_name = "WEIGHTED";
     return my_name;
   }
diff --git a/src/include/func.hpp b/src/include/func.hpp
index 1b9baf1..6420d70 100644
--- a/src/include/func.hpp
+++ b/src/include/func.hpp
@@ -50,126 +50,20 @@ class NotOperator;
 template<class T>
 class Predicate: public Func<T, bool> {
 public:
+    typedef T checked_type;
+
 	bool Apply(T t) const {
 		return Check(t);
 	}
 
 	virtual bool Check(T t) const = 0;
 
-	virtual ~Predicate() {
-	}
-};
-
-template<class T>
-class AdaptorPredicate: public Predicate<T> {
-    typedef std::function<bool(T)> pred_func_t;
-    pred_func_t pred_f_;
-public:
-    AdaptorPredicate(pred_func_t pred_f) :
-        pred_f_(pred_f) {
-    }
-
-    bool Check(T t) const {
-        return pred_f_(t);
-    }
-};
-
-//template<class T>
-//const shared_ptr<Predicate<T>> operator &&(const shared_ptr<Predicate<T>>& a, const shared_ptr<Predicate<T>>& b) {
-//	return AndOperator<T>(a, b);
-//}
-//
-//template<class T>
-//const shared_ptr<Predicate<T>> operator ||(const shared_ptr<Predicate<T>>& a, const shared_ptr<Predicate<T>>& b) {
-//	return OrOperator<T>(a, b);
-//}
-//
-//template<class T>
-//const shared_ptr<Predicate<T>> operator !(const shared_ptr<Predicate<T>>& a) {
-//	return NotOperator<T>(a);
-//}
-
-template<class T>
-const shared_ptr<Predicate<T>> And(const shared_ptr<Predicate<T>>& a,
-		const shared_ptr<Predicate<T>>& b) {
-	return make_shared<AndOperator<T>>(a, b);
-}
-
-template<class T>
-const shared_ptr<Predicate<T>> Or(const shared_ptr<Predicate<T>>& a,
-		const shared_ptr<Predicate<T>>& b) {
-	return make_shared<OrOperator<T>>(a, b);
-}
-
-template<class T>
-const shared_ptr<Predicate<T>> Not(const shared_ptr<Predicate<T>>& a) {
-	return make_shared<NotOperator<T>>(a);
-}
-
-template<class T>
-class AlwaysTrue: public Predicate<T> {
-public:
-
-	bool Check(T /*t*/) const {
-		return true;
-	}
-
-};
-
-template<class T>
-class AlwaysFalse: public Predicate<T> {
-public:
-
-	bool Check(T /*t*/) const {
-		return false;
-	}
-
-};
-
-template<class T>
-class NotOperator: public Predicate<T> {
-	shared_ptr<Predicate<T>> a_;
-
-public:
-	NotOperator(const shared_ptr<Predicate<T>>& a) :
-			a_(a) {
-	}
-
-	bool Check(T t) const {
-		return !a_->Check(t);
-	}
-};
+    bool operator()(T t) const { return Check(t); }
+    
 
-template<class T>
-class AndOperator: public Predicate<T> {
-	shared_ptr<Predicate<T>> a_;
-	shared_ptr<Predicate<T>> b_;
-
-public:
-	AndOperator(const shared_ptr<Predicate<T>>& a,
-			const shared_ptr<Predicate<T>>& b) :
-			a_(a), b_(b) {
-	}
-
-	bool Check(T t) const {
-		return a_->Check(t) && b_->Check(t);
+	virtual ~Predicate() {
 	}
 };
 
-template<class T>
-class OrOperator: public Predicate<T> {
-	shared_ptr<Predicate<T>> a_;
-	shared_ptr<Predicate<T>> b_;
-
-public:
-	OrOperator(const shared_ptr<Predicate<T>>& a,
-			const shared_ptr<Predicate<T>>& b) :
-			a_(a), b_(b) {
-	}
-
-	bool Check(T t) const {
-		return a_->Check(t) || b_->Check(t);
-	}
-};
 
 }
diff --git a/src/include/graph_print_utils.hpp b/src/include/graph_print_utils.hpp
index f0cdb68..a10fcec 100755
--- a/src/include/graph_print_utils.hpp
+++ b/src/include/graph_print_utils.hpp
@@ -153,7 +153,7 @@ public:
 	void recordVertex(Vertex vertex) {
 		this->recordVertexId(vertex.id_);
 		this->os() << "[";
-		this->recordParameter("label", vertex.label_);
+		this->recordParameterInQuotes("label", vertex.label_);
 		this->os() << ",";
 		this->recordParameter("style", "filled");
 		this->os() << ",";
@@ -289,6 +289,8 @@ public:
 		this->recordParameter("label", constructComplexNodeLabel(vertex));
 		this->os() << ",";
 		this->recordParameter("color", "black");
+		this->os() << ",";
+		this->recordParameter("URL", "/vertex/" + std::to_string(vertex.first.id_) + ".svg");
 		this->os() << "]" << endl;
 	}
 
diff --git a/src/include/io/kmer_iterator.hpp b/src/include/io/kmer_iterator.hpp
new file mode 100644
index 0000000..4ece433
--- /dev/null
+++ b/src/include/io/kmer_iterator.hpp
@@ -0,0 +1,54 @@
+#ifndef __IO_KMER_ITERATOR_HPP__
+#define __IO_KMER_ITERATOR_HPP__
+
+#include "io/mmapped_reader.hpp"
+#include <string>
+
+namespace io {
+
+template<class Seq>
+using raw_kmer_iterator = MMappedFileRecordArrayIterator<typename Seq::DataType>;
+
+template<class Seq>
+raw_kmer_iterator<Seq> make_kmer_iterator(const std::string &FileName,
+                                          unsigned K) {
+  return raw_kmer_iterator<Seq>(FileName, Seq::GetDataSize(K));
+}
+
+template<class Seq>
+std::vector<raw_kmer_iterator<Seq>> make_kmer_iterator(const std::string &FileName,
+                                                       size_t K, size_t amount) {
+  std::vector<raw_kmer_iterator<Seq>> res;  
+  if (amount == 1) {
+      res.emplace_back(FileName, Seq::GetDataSize(K));
+    return res;
+  }
+
+  // Determine the file size
+  struct stat buf;
+  VERIFY_MSG(stat(FileName.c_str(), &buf) != -1,
+             "stat(2) failed. Reason: " << strerror(errno) << ". Error code: " << errno);
+  size_t file_size = buf.st_size;
+
+  // Now start creating the iterators keeping in mind, that offset should be
+  // multiple of page size.
+  size_t chunk = round_up(file_size / amount, getpagesize() * Seq::GetDataSize(K) * sizeof(typename Seq::DataType));
+  size_t offset = 0;
+  if (chunk > file_size)
+    chunk = file_size;
+
+  while (offset < file_size) {
+    res.emplace_back(FileName, Seq::GetDataSize(K),
+                     offset,
+                     offset + chunk > file_size ? file_size - offset : chunk);
+    offset += chunk;
+  }
+  
+  return res;
+}
+
+
+
+};
+
+#endif
diff --git a/src/include/io/library.hpp b/src/include/io/library.hpp
index 101627a..3387d90 100644
--- a/src/include/io/library.hpp
+++ b/src/include/io/library.hpp
@@ -129,6 +129,10 @@ class SequencingLibraryBase {
     return paired_reads_iterator(left_paired_reads_.end(), right_paired_reads_.end());
   }
 
+  adt::iterator_range<paired_reads_iterator> paired_reads() const {
+    return adt::make_range(paired_begin(), paired_end());
+  }  
+    
   single_reads_iterator reads_begin() const {
     // NOTE: We have a contract with single_end here. Single reads always go last!
     single_reads_iterator res(left_paired_reads_.begin(), left_paired_reads_.end());
@@ -142,6 +146,10 @@ class SequencingLibraryBase {
     return single_reads_iterator(single_reads_.end(), single_reads_.end());
   }
 
+  adt::iterator_range<single_reads_iterator> reads() const {
+    return adt::make_range(reads_begin(), reads_end());
+  }  
+    
   single_reads_iterator single_begin() const {
     return single_reads_iterator(single_reads_.begin(), single_reads_.end());
   }
@@ -150,12 +158,28 @@ class SequencingLibraryBase {
     return single_reads_iterator(single_reads_.end(), single_reads_.end());
   }
 
+  adt::iterator_range<single_reads_iterator> single_reads() const {
+    return adt::make_range(single_begin(), single_end());
+  }
+    
   bool is_graph_contructable() const {
     return (type_ == io::LibraryType::PairedEnd ||
             type_ == io::LibraryType::SingleReads ||
             type_ == io::LibraryType::HQMatePairs);
   }
 
+  bool is_bwa_alignable() const {
+    return type_ == io::LibraryType::MatePairs;
+  }
+
+  bool is_mismatch_correctable() const {
+      return is_graph_contructable();
+  }
+
+  bool is_binary_covertable() {
+      return is_graph_contructable() || is_mismatch_correctable() || is_paired();
+  }
+
   bool is_paired() const {
     return (type_ == io::LibraryType::PairedEnd ||
             type_ == io::LibraryType::MatePairs||
@@ -276,11 +300,11 @@ class DataSet {
   iterator end() { return libraries_.end(); }
   const_iterator end() const { return libraries_.end(); }
 
-  iterator_range<iterator> libraries() {
-      return iterator_range<iterator>(library_begin(), library_end());
+  adt::iterator_range<iterator> libraries() {
+    return adt::make_range(library_begin(), library_end());
   }
-  iterator_range<const_iterator> libraries() const {
-      return iterator_range<const_iterator>(library_begin(), library_end());
+  adt::iterator_range<const_iterator> libraries() const {
+    return adt::make_range(library_begin(), library_end());
   }
 
   single_reads_iterator reads_begin() const {
@@ -295,8 +319,8 @@ class DataSet {
   single_reads_iterator reads_end() const {
     return single_reads_iterator(libraries_.back().reads_end(), libraries_.back().reads_end());
   }
-  iterator_range<single_reads_iterator> reads() {
-      return iterator_range<iterator>(reads_begin(), reads_end());
+  adt::iterator_range<single_reads_iterator> reads() {
+    return adt::make_range(reads_begin(), reads_end());
   }
 
   single_reads_iterator single_begin() const {
@@ -311,11 +335,10 @@ class DataSet {
   single_reads_iterator single_end() const {
     return single_reads_iterator(libraries_.back().single_end(), libraries_.back().single_end());
   }
-  iterator_range<single_reads_iterator> single_reads() {
-    return iterator_range<single_reads_iterator>(single_begin(), single_end());
+  adt::iterator_range<single_reads_iterator> single_reads() {
+    return adt::make_range(single_begin(), single_end());
   }
 
-
   paired_reads_iterator paired_begin() const {
     auto it = libraries_.begin();
     paired_reads_iterator res(it->paired_begin(), it->paired_end());
@@ -328,9 +351,10 @@ class DataSet {
   paired_reads_iterator paired_end() const {
     return paired_reads_iterator(libraries_.back().paired_end(), libraries_.back().paired_end());
   }
-  iterator_range<paired_reads_iterator> paired_reads() {
-      return iterator_range<paired_reads_iterator>(paired_begin(), paired_end());
-  }
+
+  adt::iterator_range<paired_reads_iterator> paired_reads() const {
+    return adt::make_range(paired_begin(), paired_end());
+  } 
 
  private:
   LibraryStorage libraries_;
diff --git a/src/include/io/mmapped_reader.hpp b/src/include/io/mmapped_reader.hpp
index e2230ae..f5ee92b 100644
--- a/src/include/io/mmapped_reader.hpp
+++ b/src/include/io/mmapped_reader.hpp
@@ -25,6 +25,7 @@
 #include <cerrno>
 
 #include <string>
+#include <algorithm>
 
 class MMappedReader {
   int StreamFile;
@@ -38,12 +39,19 @@ class MMappedReader {
       munmap(MappedRegion, BlockSize);
 
     BlockOffset += BlockSize;
+
+    if (BlockOffset + BlockSize > FileSize)
+      BlockSize = FileSize - BlockOffset;
+
     // We do not add PROT_WRITE here intentionaly - remapping and write access
     // is pretty error-prone.
-    MappedRegion =
-        (uint8_t*)mmap(NULL, BlockSize,
-                       PROT_READ, MAP_FILE | MAP_PRIVATE,
-                       StreamFile, BlockOffset);
+    if (BlockSize)
+      MappedRegion =
+          (uint8_t*)mmap(NULL, BlockSize,
+                         PROT_READ, MAP_FILE | MAP_PRIVATE,
+                         StreamFile, InitialOffset + BlockOffset);
+    else
+      MappedRegion = NULL;
     VERIFY_MSG((intptr_t)MappedRegion != -1L,
                "mmap(2) failed. Reason: " << strerror(errno) << ". Error code: " << errno);
   }
@@ -56,22 +64,24 @@ class MMappedReader {
  protected:
   uint8_t* MappedRegion;
   size_t FileSize, BlockOffset, BytesRead, BlockSize;
+  off_t InitialOffset;
 
  public:
   MMappedReader()
-      : StreamFile(-1), Unlink(false), FileName(""), MappedRegion(0), FileSize(0), BytesRead(0)
+      : StreamFile(-1), Unlink(false), FileName(""), MappedRegion(0), FileSize(0), BytesRead(0), InitialOffset(0)
     {}
 
   MMappedReader(const std::string &filename, bool unlink = false,
-                size_t blocksize = 64*1024*1024, size_t off = 0, size_t sz = 0)
+                size_t blocksize = 64*1024*1024, off_t off = 0, size_t sz = 0)
       : Unlink(unlink), FileName(filename), BlockSize(blocksize) {
     struct stat buf;
 
-    FileSize = (sz ? sz : (stat(FileName.c_str(), &buf) != 0 ? 0 : buf.st_size));
+    InitialOffset = off;
+    FileSize = (sz ? sz : (stat(FileName.c_str(), &buf) != 0 ? 0 : buf.st_size - InitialOffset));
 
     StreamFile = open(FileName.c_str(), O_RDONLY);
     VERIFY_MSG(StreamFile != -1,
-               "open(2) failed. Reason: " << strerror(errno) << ". Error code: " << errno);
+               "open(2) failed. Reason: " << strerror(errno) << ". Error code: " << errno << ". File: " << FileName);
 
     if (BlockSize != -1ULL) {
       size_t PageSize = getpagesize();
@@ -82,7 +92,7 @@ class MMappedReader {
     if (BlockSize) {
       MappedRegion =
           (uint8_t*)mmap(NULL, BlockSize, PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE,
-                         StreamFile, off);
+                         StreamFile, InitialOffset);
       VERIFY_MSG((intptr_t)MappedRegion != -1L,
                  "mmap(2) failed. Reason: " << strerror(errno) << ". Error code: " << errno);
     } else
@@ -101,11 +111,12 @@ class MMappedReader {
     FileName = std::move(other.FileName);
     Unlink = other.Unlink;
     StreamFile = other.StreamFile;
+    InitialOffset = other.InitialOffset;
 
     // Now, zero out inside other, so we won't do crazy thing in dtor
-    StreamFile = -1;
-    Unlink = false;
-    MappedRegion = 0;
+    other.StreamFile = -1;
+    other.Unlink = false;
+    other.MappedRegion = 0;
   }
 
   MMappedReader& operator=(MMappedReader &&other) {
@@ -190,7 +201,7 @@ class MMappedRecordReader : public MMappedReader {
 
   MMappedRecordReader(const std::string &FileName, bool unlink = true,
                       size_t blocksize = 64*1024*1024 / (sizeof(T) * (unsigned)getpagesize()) * (sizeof(T) * (unsigned)getpagesize()),
-                      size_t off = 0, size_t sz = 0):
+                      off_t off = 0, size_t sz = 0):
       MMappedReader(FileName, unlink, blocksize, off, sz) {
     VERIFY(FileSize % sizeof(T) == 0);
   }
@@ -266,7 +277,7 @@ class MMappedRecordArrayReader : public MMappedReader {
   MMappedRecordArrayReader(const std::string &FileName,
                            size_t elcnt = 1,
                            bool unlink = true,
-                           size_t off = 0, size_t sz = 0):
+                           off_t off = 0, size_t sz = 0):
       MMappedReader(FileName, unlink, -1ULL, off, sz), elcnt_(elcnt) {
     VERIFY(FileSize % (sizeof(T) * elcnt_) == 0);
   }
@@ -291,6 +302,10 @@ class MMappedRecordArrayReader : public MMappedReader {
   const_iterator cend() const { return const_iterator(data() + size()*elcnt_, elcnt_); }
 };
 
+static inline size_t round_up(size_t value, size_t boundary) {
+    return (value + boundary - 1) / boundary * boundary;
+}
+
 template<class T>
 class MMappedFileRecordArrayIterator :
         public boost::iterator_facade<MMappedFileRecordArrayIterator<T>,
@@ -299,28 +314,37 @@ class MMappedFileRecordArrayIterator :
                                       const T*> {
   public:
     // Default ctor, used to implement "end" iterator
-    MMappedFileRecordArrayIterator(): value_(NULL), reader_(), elcnt_(0), good_(false) {}
-    MMappedFileRecordArrayIterator(const std::string &FileName, size_t elcnt)
+    MMappedFileRecordArrayIterator(): value_(NULL), array_size_(0), reader_(), good_(false) {}
+    MMappedFileRecordArrayIterator(const std::string &FileName,
+                                   size_t elcnt,
+                                   off_t offset = 0, size_t filesize = 0)
             : value_(NULL),
+              array_size_(sizeof(T) * elcnt),
               reader_(FileName, false,
-                      64*1024*1024 / (sizeof(T) * (unsigned)getpagesize() * elcnt) * (sizeof(T) * (unsigned)getpagesize() * elcnt)),
-              elcnt_(elcnt), good_(true) {
+                      round_up(filesize > 0 ? std::min(size_t(64 * 1024 * 1024), filesize) : 64 * 1024 * 1024, array_size_ * (unsigned)getpagesize()),
+                      offset, filesize),
+              good_(false) {
         increment();
     }
     MMappedFileRecordArrayIterator(MMappedRecordReader<T> &&reader, size_t elcnt)
-            : value_(NULL), reader_(std::move(reader)), elcnt_(elcnt), good_(true) {
+            : value_(NULL), array_size_(sizeof(T) * elcnt), reader_(std::move(reader)), good_(false) {
         increment();
     }
+    MMappedFileRecordArrayIterator(const MMappedFileRecordArrayIterator&) = delete;
+
+    MMappedFileRecordArrayIterator(MMappedFileRecordArrayIterator&& other)
+            : value_(other.value_), array_size_(other.array_size_),
+              reader_(std::move(other.reader_)), good_(other.good_) {}
 
     bool good() const { return good_; }
+    const MMappedRecordReader<T>& reader() const { return reader_; }
 
   private:
     friend class boost::iterator_core_access;
 
     void increment() {
         good_ = reader_.good();
-        if (good_)
-            value_ = (T*)reader_.skip(elcnt_ * sizeof(T));
+        value_ = (good_ ? (T*)reader_.skip(array_size_) : NULL);
     }
     bool equal(const MMappedFileRecordArrayIterator &other) const {
         return value_ == other.value_;
@@ -328,8 +352,8 @@ class MMappedFileRecordArrayIterator :
     const T* dereference() const { return value_; }
 
     T* value_;
+    size_t array_size_;
     MMappedRecordReader<T> reader_;
-    size_t elcnt_;
     bool good_;
 };
 
diff --git a/src/include/io/paired_read.hpp b/src/include/io/paired_read.hpp
index edb3156..fd3fbd0 100644
--- a/src/include/io/paired_read.hpp
+++ b/src/include/io/paired_read.hpp
@@ -14,72 +14,35 @@
 
 namespace io {
 
-/**
- * It includes 2 SingleRead elements and the insert size.
- */
+
 class PairedRead {
  public:
   typedef SingleRead SingleReadT;
   typedef int16_t size_type;
 
-  /*
-   * Default constructor.
-   */
   PairedRead() : first_(), second_(), insert_size_(0) {}
 
-  /*
-   * Conctructor from SingleReads.
-   *
-   * @param first First SingleRead in the pair.
-   * @param second Second SingleRead in the pair.
-   * @param insert_size Insert size of the paired read.
-   */
   PairedRead(const SingleRead& first,
              const SingleRead& second,
              size_t insert_size)
       : first_(first), second_(second), insert_size_(insert_size) {}
 
-  /*
-   * Return first SingleRead in the pair.
-   *
-   * @return First SingleRead.
-   */
   const SingleRead& first() const {
     return first_;
   }
 
-  /*
-   * Return second SingleRead in the pair.
-   *
-   * @return Second SingleRead.
-   */
   const SingleRead& second() const {
     return second_;
   }
 
-  /*
-   * Return insert_size of PairedRead.
-   *
-   * @return Insert size.
-   */
   size_t insert_size() const {
     return insert_size_;
   }
 
-  /*
-   * Return distance between single reads.
-   *
-   * @return Distance.
-   */
   size_t distance() const {
     return insert_size_ - second_.size();
   }
 
-  /*
-   * Return gap between single reads.
-   *
-   * @return Gap.
-   */
   size_t gap() const {
     return insert_size_ - first_.size() - second_.size();
   }
@@ -92,23 +55,10 @@ class PairedRead {
     return first_.size() + second_.size();
   }
 
-  /*
-   * Check whether PairedRead is valid.
-   *
-   * @return true if PairedRead is valid (both SingleReads are
-   * correct), and false otherwise.
-   */
   bool IsValid() const {
     return first_.IsValid() && second_.IsValid();
   }
 
-  /*
-   * Return ith SingleRead of pair (0th or 1st). If index
-   * is not 0 or 1, the assertion happens.
-   *
-   * @param i SingleRead index.
-   * @return SingleRead on ith position of pair.
-   */
   const SingleRead& operator[] (size_t i) const {
     if (i == 0) {
       return first_;
@@ -119,25 +69,10 @@ class PairedRead {
     return first_;
   }
 
-  /*
-   * Return reversed complimentary PairedRead (PairedRead with
-   * reserve complimentary first and second SingleReads
-   * and the same insert size).
-   *
-   * @return Reversed complimentary PairedRead.
-   */
   const PairedRead operator!() const {
     return PairedRead(!second_, !first_, insert_size_);
   }
 
-  /*
-   * Check whether two PairedReads are equal.
-   *
-   * @param pairedread The PairedRead we want to compare ours with.
-   * @return true if these two PairedReads have similar
-   * first and second SingleReads and insert size,
-   * and false otherwise.
-   */
   bool operator==(const PairedRead& pairedread) const {
     return first_ == pairedread.first_ &&
         second_ == pairedread.second_ &&
@@ -148,9 +83,6 @@ class PairedRead {
     first_.BinWrite(file, rc1);
     second_.BinWrite(file, rc2);
 
-    size_type is = (size_type)insert_size_;
-    file.write((const char *) &is, sizeof(is));
-
     return !file.fail();
   }
 
@@ -160,17 +92,8 @@ class PairedRead {
   }
 
  private:
-  /*
-   * @variable First SingleRead in the pair.
-   */
   SingleRead first_;
-  /*
-   * @variable Second SingleRead in the pair.
-   */
   SingleRead second_;
-  /*
-   * @variable Insert size between two SingleReads.
-   */
   size_t insert_size_;
 
 };
@@ -191,30 +114,17 @@ class PairedReadSeq {
  public:
   PairedReadSeq() : first_(), second_(), insert_size_(0) {}
 
-  //    PairedReadSeq(std::istream& file, size_t is): first_(file), second_(file) {
-  //        PairedRead::size_type is_delta;
-  //        file.read((char *) &is_delta, sizeof(is_delta));
-  //
-  //        insert_size_ = is + is_delta;
-  //    }
-
   bool BinRead(std::istream& file, size_t is = 0) {
     first_.BinRead(file);
     second_.BinRead(file);
 
-    PairedRead::size_type is_delta;
-    file.read((char *) &is_delta, sizeof(is_delta));
-
-    insert_size_ = is + is_delta;
+    insert_size_ = is - (size_t) first_.GetLeftOffset() - (size_t) second_.GetRightOffset();
     return !file.fail();
   }
 
-  bool BinWrite(std::ostream& file) const {
-    first_.BinWrite(file);
-    second_.BinWrite(file);
-
-    PairedRead::size_type is = (PairedRead::size_type)insert_size_;
-    file.write((const char *) &is, sizeof(is));
+  bool BinWrite(std::ostream& file, bool rc1 = false, bool rc2 = false) const {
+    first_.BinWrite(file, rc1);
+    second_.BinWrite(file, rc2);
 
     return !file.fail();
   }
diff --git a/src/include/io/read.hpp b/src/include/io/read.hpp
index ebd1221..a7a0dbe 100644
--- a/src/include/io/read.hpp
+++ b/src/include/io/read.hpp
@@ -152,7 +152,9 @@ public:
       qual_[i] = (char)(qual_[i] - offset);
     }
   }
-
+  void setName(const char* s) {
+    name_ = s;
+  }
 
   Read()
       : valid_(false), ltrim_(0), rtrim_(0), initial_size_(0) {
@@ -180,9 +182,6 @@ private:
   int initial_size_;
   friend class ireadstream;
   friend uint32_t TrimBadQuality(Read*, int);
-  void setName(const char* s) {
-    name_ = s;
-  }
   bool updateValid() const {
     if (seq_.size() == 0) {
       return false;
diff --git a/src/corrector/read.hpp b/src/include/io/sam/read.hpp
similarity index 50%
rename from src/corrector/read.hpp
rename to src/include/io/sam/read.hpp
index 07800cc..4617b65 100644
--- a/src/corrector/read.hpp
+++ b/src/include/io/sam/read.hpp
@@ -5,44 +5,22 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-/*
- * read.hpp
- *
- *  Created on: Jun 26, 2014
- *      Author: lab42
- */
-#include "positional_read.hpp"
-
 #include "samtools/bam.h"
 
 #include <string>
 #include <unordered_map>
+#include <samtools/bam.h>
 
 #pragma once
 
-namespace corrector {
+namespace sam_reader {
 
-struct SingleSamRead {
+class SingleSamRead {
+private:
     bam1_t *data_;
-    size_t get_data_len() const {
-        return data_->core.l_qseq;
-    }
-    size_t get_cigar_len() const {
-        return data_->core.n_cigar;
-    }
-    int get_contig_id() const {
-        return data_->core.tid;
-    }
-    void set_data(bam1_t *seq_) {
-        bam_destroy1(data_);
-        data_ = bam_dup1( seq_);
-    }
 
-    std::string get_cigar() const;
-    std::string get_name() const;
-    std::string get_seq() const;
+public:
 
-    int CountPositions(std::unordered_map<size_t, position_description> &ps, const size_t &contig_length) const;
     SingleSamRead() {
         data_ = bam_init1();
     }
@@ -57,19 +35,82 @@ struct SingleSamRead {
         data_ = bam_dup1(c.data_);
         return *this;
     }
+
+    int32_t data_len() const {
+        return data_->core.l_qseq;
+    }
+
+    uint32_t cigar_len() const {
+        return data_->core.n_cigar;
+    }
+
+    int contig_id() const {
+        return data_->core.tid;
+    }
+
+    bool is_aligned() const {
+        return (data_->core.flag & 0x4) == 0;
+    }
+
+    bool is_main_alignment() const {
+        return (data_->core.flag & 0x900) == 0;
+    }
+
+    bool is_properly_aligned() const {
+        return is_aligned() && is_main_alignment() && map_qual() != 0;
+    }
+
+    bool strand() const {
+        return (data_->core.flag & 0x10) == 0;
+    }
+
+    uint32_t map_qual() const {
+        return data_->core.qual;
+    }
+
+    int32_t pos() const {
+        return data_->core.pos;
+    }
+
+    uint32_t* cigar_ptr() const {
+        return bam1_cigar(data_);
+    }
+
+    uint8_t* seq_ptr() const {
+        return bam1_seq(data_);
+    }
+
+    std::string cigar() const;
+    std::string name() const;
+    std::string seq() const;
+
+    void set_data(bam1_t *seq_) {
+        bam_destroy1(data_);
+        data_ = bam_dup1( seq_);
+    }
 };
 
-struct PairedSamRead {
+class PairedSamRead {
+private:
     SingleSamRead r1;
     SingleSamRead r2;
-    PairedSamRead(): r1(), r2(){
+
+public:
+    PairedSamRead(): r1(), r2() {
     }
 
     PairedSamRead(SingleSamRead &a1, SingleSamRead &a2) {
         r1 = a1;
         r2 = a2;
     }
-    int CountPositions(std::unordered_map<size_t, position_description> &ps, const size_t &contig_length) const;
+
+    const SingleSamRead& Left() const {
+        return r1;
+    }
+
+    const SingleSamRead& Right() const {
+        return r2;
+    }
 };
 }
 ;
diff --git a/src/corrector/sam_reader.hpp b/src/include/io/sam/sam_reader.hpp
similarity index 93%
rename from src/corrector/sam_reader.hpp
rename to src/include/io/sam/sam_reader.hpp
index c0d3925..63d262b 100644
--- a/src/corrector/sam_reader.hpp
+++ b/src/include/io/sam/sam_reader.hpp
@@ -14,7 +14,8 @@
 #include <samtools/bam.h>
 
 #include <string>
-namespace corrector {
+
+namespace sam_reader {
 
 class MappedSamStream {
 public:
@@ -30,7 +31,7 @@ public:
     bool eof() const;
     MappedSamStream& operator >>(SingleSamRead& read);
     MappedSamStream& operator >>(PairedSamRead& read);
-const char* get_contig_name(size_t i) const;
+    const char* get_contig_name(int i) const;
     void close();
     void reset();
 
diff --git a/src/include/io/single_read.hpp b/src/include/io/single_read.hpp
index 2891cba..287b733 100644
--- a/src/include/io/single_read.hpp
+++ b/src/include/io/single_read.hpp
@@ -32,95 +32,67 @@ enum OffsetType {
 
 //todo extract code about offset from here
 
-/**
- * It includes 3 strings: with id, sequence and quality of the input read.
- */
+typedef uint16_t SequenceOffsetT;
+
+
 class SingleRead {
  public:
+
   static std::string EmptyQuality(const std::string& seq) {
     return std::string(seq.size(), (char) 33);
   }
 
   static const int BAD_QUALITY_THRESHOLD = 2;
 
-  /*
-   * Default constructor.
-   */
   SingleRead() :
-      name_(""), seq_(""), qual_(""), valid_(false) {
+      name_(""), seq_(""), qual_(""), left_offset_(0), right_offset_(0), valid_(false) {
+      DEBUG(name_ << " created");
   }
 
-  /*
-   * Test constructor.
-   *
-   * @param name The name of the SingleRead (id in input file).
-   * @param seq The sequence of ATGC letters.
-   * @param qual The quality of the SingleRead sequence.
-   */
   SingleRead(const std::string& name, const std::string& seq,
-             const std::string& qual, OffsetType offset) :
-      name_(name), seq_(seq), qual_(qual) {
+             const std::string& qual, OffsetType offset,
+             SequenceOffsetT left_offset = 0, SequenceOffsetT right_offset = 0) :
+      name_(name), seq_(seq), qual_(qual), left_offset_(left_offset), right_offset_(right_offset) {
     Init();
+    DEBUG(name_ << " created");
     for (size_t i = 0; i < qual_.size(); ++i) {
       qual_[i] = (char)(qual_[i] - offset);
     }
   }
 
   SingleRead(const std::string& name, const std::string& seq,
-             const std::string& qual) :
-      name_(name), seq_(seq), qual_(qual) {
-    Init();
+             const std::string& qual,
+             SequenceOffsetT left_offset = 0, SequenceOffsetT right_offset = 0) :
+      name_(name), seq_(seq), qual_(qual), left_offset_(left_offset), right_offset_(right_offset) {
+      DEBUG(name_ << " created");
+      Init();
   }
 
-  SingleRead(const std::string& name, const std::string& seq) :
-      name_(name), seq_(seq), qual_(EmptyQuality(seq_)) {
+  SingleRead(const std::string& name, const std::string& seq,
+             SequenceOffsetT left_offset = 0, SequenceOffsetT right_offset = 0) :
+      name_(name), seq_(seq), qual_(EmptyQuality(seq_)), left_offset_(left_offset), right_offset_(right_offset) {
+      DEBUG(name_ << " created");
     Init();
   }
 
-  /*
-   * Check whether SingleRead is valid.
-   *
-   * @return true if SingleRead is valid (there is no N in sequence
-   * and sequence size is equal to quality size), and false otherwise
-   */
   bool IsValid() const {
     return valid_;
   }
 
-  /*
-   * Return Sequence object, got from sequence string.
-   *
-   * @return SingleRead sequence.
-   */
   Sequence sequence(bool rc = false) const {
     VERIFY(valid_);
     return Sequence(seq_, rc);
   }
 
-  /*
-   * Return Quality object, got from quality string.
-   *
-   * @return SingleRead quality.
-   */
   Quality quality() const {
     VERIFY(valid_);
     return Quality(qual_);
   }
 
-  /*
-   * Return name of single read.
-   *
-   * @return SingleRead name.
-   */
   const std::string& name() const {
     return name_;
   }
 
-  /*
-   * Return size of SingleRead.
-   *
-   * @return The size of SingleRead sequence.
-   */
   size_t size() const {
     return seq_.size();
   }
@@ -129,31 +101,14 @@ class SingleRead {
     return size();
   }
 
-  /*
-   * Return SingleRead sequence string (in readable form with ATGC).
-   *
-   * @return SingleRead sequence string.
-   */
   const std::string& GetSequenceString() const {
     return seq_;
   }
 
-  /*
-   * Return SingleRead quality string (in readable form).
-   *
-   * @return SingleRead quality string.
-   */
   const std::string& GetQualityString() const {
     return qual_;
   }
 
-  /*
-   * Return SingleRead quality string, where every quality value is
-   * increased by PhredOffset (need for normalization of quality values).
-   * Do not modify original quality values.
-   *
-   * @return Modified SingleRead quality string.
-   */
   std::string GetPhredQualityString() const {
     int offset = PhredOffset;
     std::string res = qual_;
@@ -175,12 +130,6 @@ class SingleRead {
     return dignucl(seq_[i]);
   }
 
-  /*
-   * Return reversed complimentary SingleRead (SingleRead with new
-   * name, reversed complimentary sequence, and reversed quality).
-   *
-   * @return Reversed complimentary SingleRead.
-   */
   SingleRead operator!() const {
     std::string new_name;
     if (name_.length() >= 3 && name_.substr(name_.length() - 3) == "_RC") {
@@ -194,20 +143,21 @@ class SingleRead {
     //		} else {
     //			new_name = name_.substr(1, name_.length());
     //		}
-    return SingleRead(new_name, ReverseComplement(seq_), Reverse(qual_));
+    return SingleRead(new_name, ReverseComplement(seq_), Reverse(qual_), right_offset_, left_offset_);
   }
 
   SingleRead SubstrStrict(size_t from, size_t to) const {
     size_t len = to - from;
     //		return SingleRead(name_, seq_.substr(from, len), qual_.substr(from, len));
-    //		TODO make naming nicer
+    //		TODO remove naming?
     std::string new_name;
     if (name_.length() >= 3 && name_.substr(name_.length() - 3) == "_RC") {
       new_name = name_.substr(0, name_.length() - 3) + "_SUBSTR(" + ToString(size() - to) + "," + ToString(size() - from) + ")" + "_RC";
     } else {
       new_name = name_ + "_SUBSTR(" + ToString(from) + "," + ToString(to) + ")";
     }
-    return SingleRead(new_name, seq_.substr(from, len), qual_.substr(from, len));
+    return SingleRead(new_name, seq_.substr(from, len), qual_.substr(from, len),
+                      SequenceOffsetT(from + (size_t) left_offset_), SequenceOffsetT(size() - to + (size_t) right_offset_));
   }
 
   SingleRead Substr(size_t from, size_t to) const {
@@ -221,14 +171,6 @@ class SingleRead {
     return SubstrStrict(from, to);
   }
 
-  /*
-   * Check whether two SingleReads are equal.
-   *
-   * @param singleread The SingleRead we want to compare ours with.
-   *
-   * @return true if these two single reads have similar sequences,
-   * and false otherwise.
-   */
   bool operator==(const SingleRead& singleread) const {
     return seq_ == singleread.seq_;
   }
@@ -237,11 +179,6 @@ class SingleRead {
     name_ = new_name;
   }
 
-  //	void ClearQuality() {
-  //		qual_ = std::string(seq_.size(), (char) 0);
-  //		UpdateValid();
-  //	}
-
   static bool IsValid(const std::string& seq) {
     for (size_t i = 0; i < seq.size(); ++i) {
       if (!is_nucl(seq[i])) {
@@ -251,9 +188,24 @@ class SingleRead {
     return true;
   }
 
+  SequenceOffsetT GetLeftOffset() const {
+      return left_offset_;
+  }
+
+  SequenceOffsetT GetRightOffset() const {
+      return right_offset_;
+  }
 
   bool BinWrite(std::ostream& file, bool rc = false) const {
-    return sequence(rc).BinWrite(file);
+    sequence(rc).BinWrite(file);
+    if (rc) {
+      file.write((const char *) &right_offset_, sizeof(right_offset_));
+      file.write((const char *) &left_offset_, sizeof(left_offset_));
+    } else {
+      file.write((const char *) &left_offset_, sizeof(left_offset_));
+      file.write((const char *) &right_offset_, sizeof(right_offset_));
+    }
+    return !file.fail();
   }
 
 
@@ -278,6 +230,12 @@ class SingleRead {
   /*
    * @variable The flag of SingleRead correctness.
    */
+
+  //Left and right offsets with respect to original sequence
+  SequenceOffsetT left_offset_;
+
+  SequenceOffsetT right_offset_;
+
   bool valid_;
 
   void Init() {
@@ -295,18 +253,34 @@ inline std::ostream& operator<<(std::ostream& os, const SingleRead& read) {
 class SingleReadSeq {
 
  public:
-  SingleReadSeq(const Sequence& s): seq_(s) {
+  SingleReadSeq(const Sequence& s,
+                SequenceOffsetT left_offset = 0, SequenceOffsetT right_offset = 0):
+      seq_(s), left_offset_(left_offset), right_offset_(right_offset) {
   }
 
-  SingleReadSeq() {
+  SingleReadSeq(): seq_(), left_offset_(0), right_offset_(0) {
   }
 
   bool BinRead(std::istream& file) {
-    return seq_.BinRead(file);
+    seq_.BinRead(file);
+    file.read((char*) &left_offset_, sizeof(left_offset_));
+    file.read((char*) &right_offset_, sizeof(right_offset_));
+    return !file.fail();
   }
 
-  bool BinWrite(std::ostream& file) const {
-    return seq_.BinWrite(file);
+  bool BinWrite(std::ostream& file, bool rc = false) const {
+    if (rc)
+      (!seq_).BinWrite(file);
+    else
+      seq_.BinWrite(file);
+    if (rc) {
+      file.write((const char *) &right_offset_, sizeof(right_offset_));
+      file.write((const char *) &left_offset_, sizeof(left_offset_));
+    } else {
+      file.write((const char *) &left_offset_, sizeof(left_offset_));
+      file.write((const char *) &right_offset_, sizeof(right_offset_));
+    }
+    return !file.fail();
   }
 
   //    SingleReadSeq(std::istream& file): seq_(file, true) {
@@ -332,8 +306,21 @@ class SingleReadSeq {
     return SingleReadSeq(!seq_);
   }
 
+  SequenceOffsetT GetLeftOffset() const {
+    return left_offset_;
+  }
+
+  SequenceOffsetT GetRightOffset() const {
+    return right_offset_;
+  }
+
  private:
   Sequence seq_;
+
+  //Left and right offsets with respect to original sequence
+  SequenceOffsetT left_offset_;
+
+  SequenceOffsetT right_offset_;
 };
 
 inline std::ostream& operator<<(std::ostream& os, const SingleReadSeq& read) {
diff --git a/src/include/logger/logger.hpp b/src/include/logger/logger.hpp
index c28907a..56b5cbb 100644
--- a/src/include/logger/logger.hpp
+++ b/src/include/logger/logger.hpp
@@ -8,6 +8,7 @@
 #pragma once
 #include "perfcounter.hpp"
 
+#include <vector>
 #include <unordered_map>
 #include <string>
 #include <sstream>
diff --git a/src/include/mph_index/MurmurHash3.h b/src/include/mph_index/MurmurHash3.h
deleted file mode 100644
index 265d576..0000000
--- a/src/include/mph_index/MurmurHash3.h
+++ /dev/null
@@ -1,38 +0,0 @@
-//-----------------------------------------------------------------------------
-// MurmurHash3 was written by Austin Appleby, and is placed in the public
-// domain. The author hereby disclaims copyright to this source code.
-
-#ifndef _MURMURHASH3_H_
-#define _MURMURHASH3_H_
-
-//-----------------------------------------------------------------------------
-// Platform-specific functions and macros
-
-// Microsoft Visual Studio
-
-#if defined(_MSC_VER)
-
-typedef unsigned char uint8_t;
-typedef unsigned long uint32_t;
-typedef unsigned __int64 uint64_t;
-
-// Other compilers
-
-#else	// defined(_MSC_VER)
-
-#include <stdint.h>
-#include <stdlib.h>
-
-#endif // !defined(_MSC_VER)
-
-//-----------------------------------------------------------------------------
-
-void MurmurHash3_x86_32  ( const void * key, const size_t len, uint32_t seed, void * out );
-
-void MurmurHash3_x86_128 ( const void * key, const size_t len, uint32_t seed, void * out );
-
-void MurmurHash3_x64_128 ( const void * key, const size_t len, uint32_t seed, void * out );
-
-//-----------------------------------------------------------------------------
-
-#endif // _MURMURHASH3_H_
diff --git a/src/include/mph_index/base_hash.hpp b/src/include/mph_index/base_hash.hpp
index d362e5f..f8482ca 100644
--- a/src/include/mph_index/base_hash.hpp
+++ b/src/include/mph_index/base_hash.hpp
@@ -4,9 +4,9 @@
 #include <tuple>
 #include <algorithm>
 #include <cstring>
+#include <city/city.h>
 #include "common.hpp"
 
-
 namespace emphf {
 
     inline uint64_t unaligned_load64(uint8_t const* from)
@@ -225,4 +225,69 @@ namespace emphf {
 
     };
 
+
+    struct city_hasher {
+        typedef uint64_t seed_t;
+        typedef uint64_t hash_t;
+        typedef std::tuple<hash_t, hash_t, hash_t> hash_triple_t;
+
+        city_hasher()
+        {}
+
+        city_hasher(uint64_t seed)
+            : m_seed(seed)
+        {}
+
+        template <typename Rng>
+        static city_hasher generate(Rng& rng) {
+            return city_hasher(rng());
+        }
+
+        hash_triple_t operator()(byte_range_t s) const {
+            city_uint128 ch = CityHash128WithSeed((char*)s.first, s.second - s.first, {m_seed, 0x9e3779b97f4a7c13ULL});
+            hash_triple_t h(ch.first, 0x9e3779b97f4a7c13ULL, ch.second);
+            mix(h);
+            
+            return h;
+        }
+
+        void swap(city_hasher& other) {
+            std::swap(m_seed, other.m_seed);
+        }
+
+        void save(std::ostream& os) const {
+            os.write(reinterpret_cast<char const*>(&m_seed), sizeof(m_seed));
+        }
+
+        void load(std::istream& is) {
+            is.read(reinterpret_cast<char*>(&m_seed), sizeof(m_seed));
+        }
+
+        seed_t seed() const {
+            return m_seed;
+        }
+
+    protected:
+        seed_t m_seed;
+
+        static void mix(hash_triple_t& h) {
+            uint64_t& a = std::get<0>(h);
+            uint64_t& b = std::get<1>(h);
+            uint64_t& c = std::get<2>(h);
+
+            a -= b; a -= c; a ^= (c >> 43);
+            b -= c; b -= a; b ^= (a << 9);
+            c -= a; c -= b; c ^= (b >> 8);
+            a -= b; a -= c; a ^= (c >> 38);
+            b -= c; b -= a; b ^= (a << 23);
+            c -= a; c -= b; c ^= (b >> 5);
+            a -= b; a -= c; a ^= (c >> 35);
+            b -= c; b -= a; b ^= (a << 49);
+            c -= a; c -= b; c ^= (b >> 11);
+            a -= b; a -= c; a ^= (c >> 12);
+            b -= c; b -= a; b ^= (a << 18);
+            c -= a; c -= b; c ^= (b >> 22);
+        }
+    };
+
 }
diff --git a/src/include/mph_index/kmer_index.hpp b/src/include/mph_index/kmer_index.hpp
index e8248da..105443a 100644
--- a/src/include/mph_index/kmer_index.hpp
+++ b/src/include/mph_index/kmer_index.hpp
@@ -14,7 +14,6 @@
 #include "base_hash.hpp"
 #include "hypergraph.hpp"
 #include "hypergraph_sorter_seq.hpp"
-#include "MurmurHash3.h"
 
 #include "openmp_wrapper.h"
 
@@ -131,7 +130,7 @@ class KMerIndex {
   typedef size_t IdxType;
 
  private:
-  using KMerDataIndex = emphf::mphf<emphf::jenkins64_hasher>;
+  using KMerDataIndex = emphf::mphf<emphf::city_hasher>;
   typedef KMerIndex __self;
 
  public:
@@ -207,6 +206,13 @@ class KMerIndex {
     count_size();
   }
 
+  void swap(KMerIndex<traits> &other) {
+    std::swap(index_, other.index_);
+    std::swap(num_buckets_, other.num_buckets_);
+    std::swap(size_, other.size_);
+    std::swap(bucket_starts_, other.bucket_starts_);
+  }
+
  private:
   KMerDataIndex *index_;
 
diff --git a/src/include/omni/action_handlers.hpp b/src/include/omni/action_handlers.hpp
index 16d7ccb..573a44e 100644
--- a/src/include/omni/action_handlers.hpp
+++ b/src/include/omni/action_handlers.hpp
@@ -12,9 +12,11 @@
 
 #include <boost/noncopyable.hpp>
 #include <string>
+#include <vector>
 
 namespace omnigraph {
 
+using std::vector;
 /**
  * ActionHandler is base listening class for graph events. All structures and information storages
  * which are meant to synchronize with graph should use this structure. In order to make handler listen
@@ -28,7 +30,7 @@ namespace omnigraph {
  * consistent. Now high level events are merge, glue and split. This list can be extended in near future.
  */
 template<typename VertexId, typename EdgeId>
-class ActionHandler : boost::noncopyable {
+class ActionHandler : private boost::noncopyable {
     const std::string handler_name_;
   private:
     bool attached_;
diff --git a/src/include/omni/basic_edge_conditions.hpp b/src/include/omni/basic_edge_conditions.hpp
index f9a1284..e19890f 100644
--- a/src/include/omni/basic_edge_conditions.hpp
+++ b/src/include/omni/basic_edge_conditions.hpp
@@ -8,8 +8,8 @@
 #pragma once
 
 #include "func.hpp"
+#include "pred.hpp"
 #include "omni_utils.hpp"
-
 namespace omnigraph {
 
 using namespace func;
@@ -52,6 +52,13 @@ public:
 };
 
 template<class Graph>
+inline bool HasAlternatives(const Graph& g, typename Graph::EdgeId e) {
+    return g.OutgoingEdgeCount(g.EdgeStart(e)) > 1
+                    && g.IncomingEdgeCount(g.EdgeEnd(e)) > 1;
+}
+
+
+template<class Graph>
 class AlternativesPresenceCondition : public EdgeCondition<Graph> {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
@@ -65,13 +72,18 @@ class AlternativesPresenceCondition : public EdgeCondition<Graph> {
     }
 
     bool Check(EdgeId e) const {
-        return this->g().OutgoingEdgeCount(this->g().EdgeStart(e)) > 1
-                && this->g().IncomingEdgeCount(this->g().EdgeEnd(e)) > 1;
+        return HasAlternatives(this->g(), e);
     }
 
 };
 
 template<class Graph>
+pred::TypedPredicate<typename Graph::EdgeId> AddAlternativesPresenceCondition(const Graph& g,
+                                                                             pred::TypedPredicate<typename Graph::EdgeId> condition) {
+    return pred::And(AlternativesPresenceCondition<Graph>(g), condition);
+}
+
+template<class Graph>
 class CoverageUpperBound : public EdgeCondition<Graph> {
     typedef typename Graph::EdgeId EdgeId;
     typedef EdgeCondition<Graph> base;
@@ -147,10 +159,9 @@ class PathLengthLowerBound : public EdgeCondition<Graph> {
 };
 
 template<class Graph, class PathFinder>
-std::shared_ptr<PathLengthLowerBound<Graph, PathFinder> >
+PathLengthLowerBound<Graph, PathFinder>
 MakePathLengthLowerBound(const Graph& g, const PathFinder& path_finder, size_t min_length) {
-    return std::make_shared<PathLengthLowerBound<Graph, PathFinder>>(g, path_finder,
-                                                                min_length);
+    return PathLengthLowerBound<Graph, PathFinder>(g, path_finder, min_length);
 }
 
 template<class Graph>
@@ -206,18 +217,18 @@ class PredicateUniquenessPlausabilityCondition :
         public UniquenessPlausabilityCondition<Graph> {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
-    typedef shared_ptr<Predicate<EdgeId>> EdgePredicate;
+    typedef pred::TypedPredicate<EdgeId> EdgePredicate;
     typedef UniquenessPlausabilityCondition<Graph> base;
 
     EdgePredicate uniqueness_condition_;
     EdgePredicate plausiblity_condition_;
 
     bool CheckUniqueness(EdgeId e, bool) const {
-        return uniqueness_condition_->Check(e);
+        return uniqueness_condition_(e);
     }
 
     bool CheckPlausibility(EdgeId e, bool) const {
-        return plausiblity_condition_->Check(e);
+        return plausiblity_condition_(e);
     }
 
  public:
@@ -237,7 +248,7 @@ class DefaultUniquenessPlausabilityCondition :
         public PredicateUniquenessPlausabilityCondition<Graph> {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
-    typedef shared_ptr<Predicate<EdgeId>> EdgePredicate;
+    typedef pred::TypedPredicate<EdgeId> EdgePredicate;
     typedef PredicateUniquenessPlausabilityCondition<Graph> base;
 
  public:
@@ -246,13 +257,10 @@ class DefaultUniquenessPlausabilityCondition :
                                            size_t uniqueness_length,
                                            size_t plausibility_length)
             : base(g,
-                   MakePathLengthLowerBound(g, UniquePathFinder<Graph>(g),
-                                            uniqueness_length),
-                   MakePathLengthLowerBound(
-                           g,
-                           PlausiblePathFinder<Graph>(g,
-                                                      2 * plausibility_length),
-                           plausibility_length)) {
+                   MakePathLengthLowerBound(g,
+                                            UniquePathFinder<Graph>(g), uniqueness_length),
+                   MakePathLengthLowerBound(g,
+                                            PlausiblePathFinder<Graph>(g, 2 * plausibility_length), plausibility_length)) {
     }
 
 };
diff --git a/src/include/omni/bulge_remover.hpp b/src/include/omni/bulge_remover.hpp
index 4339d24..545c70d 100644
--- a/src/include/omni/bulge_remover.hpp
+++ b/src/include/omni/bulge_remover.hpp
@@ -69,20 +69,21 @@ class MostCoveredSimpleAlternativePathChooser: public PathProcessor<Graph>::Call
 	typedef typename Graph::EdgeId EdgeId;
 	typedef typename Graph::VertexId VertexId;
 
-	Graph& g_;
+	const Graph& g_;
 	EdgeId forbidden_edge_;
+
 	double max_coverage_;
 	vector<EdgeId> most_covered_path_;
 
 public:
 
-	MostCoveredSimpleAlternativePathChooser(Graph& g, EdgeId edge) :
+	MostCoveredSimpleAlternativePathChooser(const Graph& g, EdgeId edge) :
 			g_(g), forbidden_edge_(edge), max_coverage_(-1.0) {
 
 	}
 
-	virtual void HandleReversedPath(const vector<EdgeId>& reversed_path) {
-		vector<EdgeId> path = this->ReversePath(reversed_path);
+	void HandleReversedPath(const vector<EdgeId>& reversed_path) override {
+	    vector<EdgeId> path = this->ReversePath(reversed_path);
 		double path_cov = AvgCoverage(g_, path);
 		for (size_t i = 0; i < path.size(); i++) {
 			if (path[i] == forbidden_edge_)
@@ -107,205 +108,674 @@ inline size_t CountMaxDifference(size_t absolute_diff, size_t length, double rel
     return std::max((size_t) std::floor(relative_diff * (double) length), absolute_diff);
 }
 
-/**
- * This class removes simple bulges from given graph with the following algorithm: it iterates through all edges of
- * the graph and for each edge checks if this edge is likely to be a simple bulge
- * if edge is judged to be one it is removed.
- */
 template<class Graph>
-class BulgeRemover: public EdgeProcessingAlgorithm<Graph> {
-    typedef EdgeProcessingAlgorithm<Graph> base;
-	typedef typename Graph::EdgeId EdgeId;
-	typedef typename Graph::VertexId VertexId;
+class BulgeGluer {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+    typedef std::function<void(EdgeId edge, const vector<EdgeId>& path)> BulgeCallbackF;
+    Graph& g_;
+    BulgeCallbackF opt_callback_;
+    std::function<void(EdgeId)> removal_handler_;
+
+    void InnerProcessBulge(EdgeId edge, const vector<EdgeId>& path) {
+
+        EnsureEndsPositionAligner aligner(CumulativeLength(g_, path),
+                g_.length(edge));
+        double prefix_length = 0.;
+        vector<size_t> bulge_prefix_lengths;
+
+        for (EdgeId e : path) {
+            prefix_length += (double) g_.length(e);
+            bulge_prefix_lengths.push_back(aligner.GetPosition((size_t) prefix_length));
+        }
 
-	bool PossibleBulgeEdge(EdgeId e) const {
-	  return (graph_.length(e) <= max_length_ && graph_.coverage(e) < max_coverage_ &&
-	          graph_.OutgoingEdgeCount(graph_.EdgeStart(e)) > 1 &&
-	          graph_.IncomingEdgeCount(graph_.EdgeEnd(e)) > 1);
-	}
+        EdgeId edge_to_split = edge;
+        size_t prev_length = 0;
 
-	/**
-	 * Checks if alternative path is simple (doesn't contain conjugate edges, edge e or conjugate(e))
-	 * and its average coverage is greater than max_relative_coverage_ * g.coverage(e)
-	 */
-	bool BulgeCondition(EdgeId e, const vector<EdgeId>& path,
-			double path_coverage) {
-		return math::ge(path_coverage * max_relative_coverage_,
-				graph_.coverage(e)) && SimplePathCondition<Graph>(graph_)(e, path);
-	}
+        TRACE("Process bulge " << path.size() << " edges");
 
-	void ProcessBulge(EdgeId edge, const vector<EdgeId>& path) {
-		if (opt_callback_)
-			opt_callback_(edge, path);
+        //fixme remove after checking results
+        bool flag = false;
+        VERIFY(bulge_prefix_lengths.back() == g_.length(edge));
 
-		if (removal_handler_)
-			removal_handler_(edge);
+        for (size_t i = 0; i < path.size(); ++i) {
+            if (bulge_prefix_lengths[i] > prev_length) {
+                if (bulge_prefix_lengths[i] - prev_length
+                        != g_.length(edge_to_split)) {
 
-		VertexId start = graph_.EdgeStart(edge);
-		VertexId end = graph_.EdgeEnd(edge);
+                    TRACE("SplitEdge " << g_.str(edge_to_split));
+                    TRACE(
+                            "Start: " << g_.str(g_.EdgeStart(edge_to_split)));
+                    TRACE(
+                            "Start: " << g_.str(g_.EdgeEnd(edge_to_split)));
 
-		TRACE("Projecting edge " << graph_.str(edge));
-		InnerProcessBulge(edge, path);
+                    pair<EdgeId, EdgeId> split_result = g_.SplitEdge(
+                            edge_to_split,
+                            bulge_prefix_lengths[i] - prev_length);
 
-		TRACE("Compressing start vertex " << graph_.str(start));
-		graph_.CompressVertex(start);
+                    edge_to_split = split_result.second;
 
-		TRACE("Compressing end vertex " << graph_.str(end));
-		graph_.CompressVertex(end);
-	}
+                    TRACE("GlueEdges " << g_.str(split_result.first));
+                    flag = true;
+                    g_.GlueEdges(split_result.first, path[i]);
 
-	void InnerProcessBulge(EdgeId edge, const vector<EdgeId>& path) {
+                } else {
+                    TRACE("GlueEdges " << g_.str(edge_to_split));
+                    flag = true;
+                    g_.GlueEdges(edge_to_split, path[i]);
+                }
+            }
+            prev_length = bulge_prefix_lengths[i];
+        }
+        VERIFY(flag);
+    }
+
+public:
 
-		EnsureEndsPositionAligner aligner(CumulativeLength(graph_, path),
-				graph_.length(edge));
-		double prefix_length = 0.;
-		vector<size_t> bulge_prefix_lengths;
+    BulgeGluer(Graph& g, BulgeCallbackF opt_callback = 0,
+               std::function<void(EdgeId)> removal_handler = 0) :
+               g_(g),
+               opt_callback_(opt_callback),
+               removal_handler_(removal_handler) {
 
-		for (EdgeId e : path) {
-			prefix_length += (double) graph_.length(e);
-			bulge_prefix_lengths.push_back(aligner.GetPosition((size_t) prefix_length));
-		}
+    }
 
-		EdgeId edge_to_split = edge;
-		size_t prev_length = 0;
+    void operator()(EdgeId edge, const vector<EdgeId>& path) {
+        if (opt_callback_)
+            opt_callback_(edge, path);
 
-		TRACE("Process bulge " << path.size() << " edges");
+        if (removal_handler_)
+            removal_handler_(edge);
 
-		//fixme remove after checking results
-		bool flag = false;
-        VERIFY(bulge_prefix_lengths.back() == graph_.length(edge));
+        VertexId start = g_.EdgeStart(edge);
+        VertexId end = g_.EdgeEnd(edge);
 
-		for (size_t i = 0; i < path.size(); ++i) {
-			if (bulge_prefix_lengths[i] > prev_length) {
-				if (bulge_prefix_lengths[i] - prev_length
-						!= graph_.length(edge_to_split)) {
-
-					TRACE("SplitEdge " << graph_.str(edge_to_split));
-					TRACE(
-							"Start: " << graph_.str(graph_.EdgeStart(edge_to_split)));
-					TRACE(
-							"Start: " << graph_.str(graph_.EdgeEnd(edge_to_split)));
-
-					pair<EdgeId, EdgeId> split_result = graph_.SplitEdge(
-							edge_to_split,
-							bulge_prefix_lengths[i] - prev_length);
-
-					edge_to_split = split_result.second;
-
-					TRACE("GlueEdges " << graph_.str(split_result.first));
-					flag = true;
-					graph_.GlueEdges(split_result.first, path[i]);
-
-				} else {
-					TRACE("GlueEdges " << graph_.str(edge_to_split));
-					flag = true;
-					graph_.GlueEdges(edge_to_split, path[i]);
-				}
-			}
-			prev_length = bulge_prefix_lengths[i];
-		}
-		VERIFY(flag);
-	}
+        TRACE("Projecting edge " << g_.str(edge));
+        InnerProcessBulge(edge, path);
+
+        TRACE("Compressing start vertex " << g_.str(start));
+        g_.CompressVertex(start);
+
+        TRACE("Compressing end vertex " << g_.str(end));
+        g_.CompressVertex(end);
+    }
+
+};
+
+template<class Graph>
+class AlternativesAnalyzer {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+    const Graph& g_;
+    double max_coverage_;
+    size_t max_length_;
+    double max_relative_coverage_;
+    size_t max_delta_;
+    double max_relative_delta_;
+    size_t max_edge_cnt_;
+
+    static vector<EdgeId> EmptyPath() {
+        static vector<EdgeId> vec = {};
+        return vec;
+    }
+
+    /**
+     * Checks if alternative path is simple (doesn't contain conjugate edges, edge e or conjugate(e))
+     * and its average coverage * max_relative_coverage_ is greater than g.coverage(e)
+     */
+    bool BulgeCondition(EdgeId e, const vector<EdgeId>& path,
+            double path_coverage) const {
+        return math::ge(path_coverage * max_relative_coverage_,
+                g_.coverage(e)) && SimplePathCondition<Graph>(g_)(e, path);
+    }
+
+public:
+    AlternativesAnalyzer(const Graph& g, double max_coverage, size_t max_length,
+                         double max_relative_coverage, size_t max_delta,
+                         double max_relative_delta, size_t max_edge_cnt) :
+                         g_(g),
+                         max_coverage_(max_coverage),
+                         max_length_(max_length),
+                         max_relative_coverage_(max_relative_coverage),
+                         max_delta_(max_delta),
+                         max_relative_delta_(max_relative_delta),
+                         max_edge_cnt_(max_edge_cnt) {
+        DEBUG("Created alternatives analyzer max_length=" << max_length
+        << " max_coverage=" << max_coverage
+        << " max_relative_coverage=" << max_relative_coverage
+        << " max_delta=" << max_delta
+        << " max_relative_delta=" << max_relative_delta);
+    }
+
+    vector<EdgeId> operator()(EdgeId e) const {
+        if (g_.length(e) > max_length_ || math::gr(g_.coverage(e), max_coverage_)) {
+            return EmptyPath();
+        }
+
+        size_t kplus_one_mer_coverage = (size_t) math::round((double) g_.length(e) * g_.coverage(e));
+        TRACE("Processing edge " << g_.str(e) << " and coverage " << kplus_one_mer_coverage);
+
+        size_t delta = CountMaxDifference(max_delta_, g_.length(e), max_relative_delta_);
+
+        MostCoveredSimpleAlternativePathChooser<Graph> path_chooser(g_, e);
+
+        VertexId start = g_.EdgeStart(e);
+        TRACE("Start " << g_.str(start));
+        VertexId end = g_.EdgeEnd(e);
+        TRACE("End " << g_.str(end));
+
+        ProcessPaths(g_, (g_.length(e) > delta) ? g_.length(e) - delta : 0,
+                g_.length(e) + delta, start, end, path_chooser, max_edge_cnt_);
+
+        const vector<EdgeId>& path = path_chooser.most_covered_path();
+        if (!path.empty()) {
+            VERIFY(g_.EdgeStart(path[0]) == start);
+            VERIFY(g_.EdgeEnd(path.back()) == end);
+        }
+
+        double path_coverage = path_chooser.max_coverage();
+        if (math::gr(path_coverage, 0.)) {
+            TRACE("Best path with coverage " << path_coverage << " is " << PrintPath(g_, path));
+
+            if (BulgeCondition(e, path, path_coverage)) {
+                TRACE("Satisfied condition");
+                return path;
+            } else {
+                TRACE("Didn't satisfy condition");
+                return EmptyPath();
+            }
+        } else {
+            TRACE("Didn't find alternative");
+            return EmptyPath();
+        }
+    }
+
+    double max_coverage() const {
+        return max_coverage_;
+    }
+
+    size_t max_length() const {
+        return max_length_;
+    }
+
+private:
+    DECL_LOGGER("AlternativesAnalyzer");
+};
+
+template<class Graph>
+pred::TypedPredicate<typename Graph::EdgeId>
+NecessaryBulgeCondition(const Graph& g, size_t max_length, double max_coverage) {
+    return AddAlternativesPresenceCondition(g,
+                                            pred::And(LengthUpperBound<Graph>(g, max_length),
+                                                     CoverageUpperBound<Graph>(g, max_coverage)));
+}
+
+/**
+ * This class removes simple bulges from given graph with the following algorithm: it iterates through all edges of
+ * the graph and for each edge checks if this edge is likely to be a simple bulge
+ * if edge is judged to be one it is removed.
+ */
+//template<class Graph>
+//class OldBulgeRemover: public EdgeProcessingAlgorithm<Graph> {
+//    typedef EdgeProcessingAlgorithm<Graph> base;
+//	typedef typename Graph::EdgeId EdgeId;
+//	typedef typename Graph::VertexId VertexId;
+//
+//protected:
+//
+//	/*virtual*/
+//    bool ProcessEdge(EdgeId e) {
+//        TRACE("Considering edge " << this->g().str(e)
+//                      << " of length " << this->g().length(e)
+//                      << " and avg coverage " << this->g().coverage(e));
+//
+//        if (!HasAlternatives(this->g(), e)) {
+//            TRACE("Not possible bulge edge");
+//            return false;
+//        }
+//
+//        for (const auto& analyzer : alternatives_analyzers_) {
+//            vector<EdgeId> alternative = analyzer(e);
+//            if (!alternative.empty()) {
+//                gluer_(e, alternative);
+//                return true;
+//            }
+//        }
+//        return false;
+//    }
+//
+//public:
+//
+//	typedef std::function<void(EdgeId edge, const vector<EdgeId>& path)> BulgeCallbackF;
+//
+////	BulgeRemover(Graph& g,  double max_coverage, size_t max_length,
+////			double max_relative_coverage, size_t max_delta,
+////			double max_relative_delta,
+////			size_t max_edge_cnt,
+////			BulgeCallbackF opt_callback = 0,
+////			std::function<void(EdgeId)> removal_handler = 0) :
+////			base(g, true),
+////			gluer_(g, opt_callback, removal_handler) {
+////                DEBUG("Launching br max_length=" << max_length
+////                << " max_coverage=" << max_coverage
+////                << " max_relative_coverage=" << max_relative_coverage
+////                << " max_delta=" << max_delta
+////                << " max_relative_delta=" << max_relative_delta
+////                << " max_number_edges=" << max_edge_cnt);
+////                alternatives_analyzers_.push_back(
+////                        AlternativesAnalyzer<Graph>(g, max_coverage,
+////                                                    max_length, max_relative_coverage,
+////                                                    max_delta, max_relative_delta, max_edge_cnt));
+////    }
+//
+//	OldBulgeRemover(Graph& g,
+//	        const std::vector<AlternativesAnalyzer<Graph>>& alternatives_analyzers,
+//			BulgeCallbackF opt_callback = 0,
+//			std::function<void(EdgeId)> removal_handler = 0) :
+//			base(g, true),
+//			alternatives_analyzers_(alternatives_analyzers),
+//			gluer_(g, opt_callback, removal_handler) {
+//    }
+//
+//private:
+//	std::vector<AlternativesAnalyzer<Graph>> alternatives_analyzers_;
+//	BulgeGluer<Graph> gluer_;
+//private:
+//	DECL_LOGGER("BulgeRemover")
+//};
+
+template<class Graph>
+inline double AbsoluteMaxCoverage(const std::vector<AlternativesAnalyzer<Graph>>& alternatives_analyzers) {
+    double ans = -1.;
+    for (const auto& analyzer : alternatives_analyzers) {
+        ans = std::max(ans, analyzer.max_coverage());
+    }
+    return ans;
+}
+
+//fixme maybe switch on parallel finder?
+template<class Graph, class InterestingElementFinder>
+class BulgeRemover: public PersistentProcessingAlgorithm<Graph,
+                                                        typename Graph::EdgeId,
+                                                        InterestingElementFinder,
+                                                        CoverageComparator<Graph>> {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+    typedef PersistentProcessingAlgorithm<Graph, EdgeId,
+            InterestingElementFinder, CoverageComparator<Graph>> base;
 
 protected:
+
     /*virtual*/
-    bool ProcessEdge(EdgeId edge) {
-        if (graph_.conjugate(edge) < edge) {
-            TRACE("Noncanonical edge");
+    bool Process(EdgeId e) {
+        TRACE("Considering edge " << this->g().str(e)
+                      << " of length " << this->g().length(e)
+                      << " and avg coverage " << this->g().coverage(e));
+
+        if (!HasAlternatives(this->g(), e)) {
+            TRACE("Not possible bulge edge");
             return false;
         }
-        TRACE("Considering edge " << graph_.str(edge) << " of length " << graph_.length(edge) << " and avg coverage " << graph_.coverage(edge));
-        TRACE("Is possible bulge " << PossibleBulgeEdge(edge));
 
-        if (!PossibleBulgeEdge(edge)) {
-            return false;
+        vector<EdgeId> alternative = alternatives_analyzer_(e);
+        if (!alternative.empty()) {
+            gluer_(e, alternative);
+            return true;
         }
+        return false;
+    }
+
+public:
+
+    typedef std::function<void(EdgeId edge, const vector<EdgeId>& path)> BulgeCallbackF;
+
+//  BulgeRemover(Graph& g,  double max_coverage, size_t max_length,
+//          double max_relative_coverage, size_t max_delta,
+//          double max_relative_delta,
+//          size_t max_edge_cnt,
+//          BulgeCallbackF opt_callback = 0,
+//          std::function<void(EdgeId)> removal_handler = 0) :
+//          base(g, true),
+//          gluer_(g, opt_callback, removal_handler) {
+//                DEBUG("Launching br max_length=" << max_length
+//                << " max_coverage=" << max_coverage
+//                << " max_relative_coverage=" << max_relative_coverage
+//                << " max_delta=" << max_delta
+//                << " max_relative_delta=" << max_relative_delta
+//                << " max_number_edges=" << max_edge_cnt);
+//                alternatives_analyzers_.push_back(
+//                        AlternativesAnalyzer<Graph>(g, max_coverage,
+//                                                    max_length, max_relative_coverage,
+//                                                    max_delta, max_relative_delta, max_edge_cnt));
+//    }
+
+    BulgeRemover(Graph& g, const InterestingElementFinder& interesting_finder,
+            const AlternativesAnalyzer<Graph>& alternatives_analyzer,
+            BulgeCallbackF opt_callback = 0,
+            std::function<void(EdgeId)> removal_handler = 0,
+            bool track_changes = true) :
+            base(g,
+                 interesting_finder,
+                 /*canonical_only*/true,
+                 CoverageComparator<Graph>(g),
+                 track_changes),
+            alternatives_analyzer_(alternatives_analyzer),
+            gluer_(g, opt_callback, removal_handler) {
+    }
 
-        size_t kplus_one_mer_coverage = (size_t) math::round((double) graph_.length(edge) * graph_.coverage(edge));
-        TRACE("Processing edge " << graph_.str(edge) << " and coverage " << kplus_one_mer_coverage);
+private:
+    AlternativesAnalyzer<Graph> alternatives_analyzer_;
+    BulgeGluer<Graph> gluer_;
+private:
+    DECL_LOGGER("BulgeRemover")
+};
 
-        size_t delta = CountMaxDifference(max_delta_, graph_.length(edge), max_relative_delta_);
+template<class Graph, class InterestingElementFinder>
+class ParallelBulgeRemover : public PersistentAlgorithmBase<Graph> {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+    typedef SmartSetIterator<Graph, EdgeId, CoverageComparator<Graph>> SmartEdgeSet;
 
-        MostCoveredSimpleAlternativePathChooser<Graph> path_chooser(graph_, edge);
+    size_t buff_size_;
+    double buff_cov_diff_;
+    double buff_cov_rel_diff_;
+    AlternativesAnalyzer<Graph> alternatives_analyzer_;
+    BulgeGluer<Graph> gluer_;
+    InterestingElementFinder interesting_edge_finder_;
+    //todo remove
+    bool tracking_;
 
-        VertexId start = graph_.EdgeStart(edge);
-        TRACE("Start " << graph_.str(start));
+    size_t curr_iteration_;
 
-        VertexId end = graph_.EdgeEnd(edge);
-        TRACE("End " << graph_.str(end));
+    SmartEdgeSet it_;
 
-        PathProcessor<Graph> path_finder(graph_, (graph_.length(edge) > delta) ? graph_.length(edge) - delta : 0, graph_.length(edge) + delta, start, end, path_chooser);
+    static vector<EdgeId> EmptyPath() {
+        static vector<EdgeId> vec = {};
+        return vec;
+    }
 
-        path_finder.Process();
+    struct BulgeInfo : private boost::noncopyable {
+        size_t id;
+        EdgeId e;
+        std::vector<EdgeId> alternative;
 
-        const vector<EdgeId>& path = path_chooser.most_covered_path();
+        BulgeInfo() :
+            id(-1ul) {
+        }
 
-        double path_coverage = path_chooser.max_coverage();
-        TRACE("Best path with coverage " << path_coverage << " is " << PrintPath<Graph>(graph_, path));
+        BulgeInfo(size_t id_, EdgeId e_, std::vector<EdgeId> alternative_) :
+            id(id_), e(e_), alternative(std::move(alternative_)) {
+
+        }
 
-        if (BulgeCondition(edge, path, path_coverage)) {
-            TRACE("Satisfied condition");
+        BulgeInfo(BulgeInfo&& that) {
+            *this = std::move(that);
+        }
+
+        BulgeInfo& operator= (BulgeInfo&& that) {
+            id = that.id;
+            e = that.e;
+            alternative = std::move(that.alternative);
+            return *this;
+        }
+
+//        BulgeInfo(size_t id_, EdgeId e_, std::vector<EdgeId>&& alternative_) :
+//            id(id_), e(e_), alternative(std::move(alternative_)) {
+//
+//        }
+//
+        bool operator< (const BulgeInfo& that) const {
+//            VERIFY_MSG(id != that.id, "Ooops " << id);
+            return id < that.id;
+        }
+
+        std::string str(const Graph& g) const {
+            std::stringstream ss;
+            ss << "BulgeInfo " << id
+                    << " e: " << g.str(e)
+                    << " path: " << PrintPath(g, alternative);
+            return ss.str();
+        }
+
+    };
+
+    bool CheckInteracting(const BulgeInfo& info, const std::unordered_set<EdgeId>& involved_edges) const {
+        if (involved_edges.count(info.e))
+            return true;
+        for (EdgeId e : info.alternative)
+            if (involved_edges.count(e))
+                return true;
+        return false;
+    }
+
+    void AccountEdge(EdgeId e, std::unordered_set<EdgeId>& involved_edges) const {
+        TRACE("Pushing edge " << this->g().str(e));
+        involved_edges.insert(e);
+        EdgeId conj = this->g().conjugate(e);
+        TRACE("Pushing edge " << this->g().str(conj));
+        involved_edges.insert(conj);
+    }
+
+    void AccountEdges(const BulgeInfo& info, std::unordered_set<EdgeId>& involved_edges) const {
+        AccountEdge(info.e, involved_edges);
+        for (EdgeId e : info.alternative) {
+            AccountEdge(e, involved_edges);
+        }
+    }
+
+    //false if time to stop
+    bool FillEdgeBuffer(vector<EdgeId>& buffer, pred::TypedPredicate<EdgeId> proceed_condition) {
+        VERIFY(buffer.empty());
+        DEBUG("Filling edge buffer of size " << buff_size_);
+        perf_counter perf;
+        double low_cov = 0.;
+        double cov_diff = 0.;
+        while (!it_.IsEnd() && buffer.size() < buff_size_) {
+            EdgeId e = *it_;
+            TRACE("Current edge " << this->g().str(e));
+            if (!proceed_condition(e)) {
+                TRACE("Stop condition was reached.");
+                //need to release last element of the iterator to make it replaceable by new elements
+                it_.ReleaseCurrent();
+                return false;
+            }
+
+            double cov = this->g().coverage(e);
+            if (buffer.empty()) {
+                low_cov = cov;
+                cov_diff = max(buff_cov_diff_, buff_cov_rel_diff_ * low_cov);
+            } else {
+                if (math::gr(cov, low_cov + cov_diff)) {
+                    //need to release last element of the iterator to make it replaceable by new elements
+                    it_.ReleaseCurrent();
+                    return true;
+                }
+            }
+            TRACE("Potential bulge edge");
+            buffer.push_back(e);
+            ++it_;
+        }
 
-            ProcessBulge(edge, path);
+        DEBUG("Filled in " << perf.time() << " seconds");
+        if (buffer.size() == buff_size_) {
+            TRACE("Buffer filled");
             return true;
         } else {
-            TRACE("Didn't satisfy condition");
+            TRACE("No more edges in iterator");
             return false;
         }
     }
 
+    std::vector<std::vector<BulgeInfo>> FindBulges(const std::vector<EdgeId> edge_buffer) const {
+        DEBUG("Looking for bulges (in parallel). Edge buffer size " << edge_buffer.size());
+        perf_counter perf;
+        std::vector<std::vector<BulgeInfo>> bulge_buffers(omp_get_max_threads());
+        size_t n = edge_buffer.size();
+        //order is in agreement with coverage
+        #pragma omp parallel for schedule(guided)
+        for (size_t i = 0; i < n; ++i) {
+            EdgeId e = edge_buffer[i];
+            auto alternative = alternatives_analyzer_(e);
+            if (!alternative.empty()) {
+                bulge_buffers[omp_get_thread_num()].push_back(BulgeInfo(i, e, std::move(alternative)));
+            }
+        }
+        DEBUG("Bulges found in " << perf.time() << " seconds");
+        return bulge_buffers;
+    }
+
+    std::vector<BulgeInfo> MergeBuffers(std::vector<std::vector<BulgeInfo>>&& buffers) const {
+        DEBUG("Merging bulge buffers");
+        perf_counter perf;
+
+        std::vector<BulgeInfo> merged_bulges;
+        for (auto& bulge_buffer : buffers) {
+            std::copy(std::make_move_iterator(bulge_buffer.begin()),
+                      std::make_move_iterator(bulge_buffer.end()),
+                      std::back_inserter(merged_bulges));
+        }
+
+        DEBUG("Sorting");
+        //order is in agreement with coverage
+        std::sort(merged_bulges.begin(), merged_bulges.end());
+        DEBUG("Total bulges " << merged_bulges.size());
+        DEBUG("Buffers merged in " << perf.time() << " seconds");
+        return merged_bulges;
+    }
+
+    SmartEdgeSet RetainIndependentBulges(std::vector<BulgeInfo>& bulges) const {
+        DEBUG("Looking for independent bulges");
+        size_t total_cnt = bulges.size();
+        perf_counter perf;
+
+        std::vector<BulgeInfo> filtered;
+        filtered.reserve(bulges.size());
+        //fixme switch to involved vertices to bring fully parallel glueing closer
+        std::unordered_set<EdgeId> involved_edges;
+        SmartEdgeSet interacting_edges(this->g(), false, CoverageComparator<Graph>(this->g()));
+
+        for (BulgeInfo& info : bulges) {
+            TRACE("Analyzing interactions of " << info.str(this->g()));
+            if (CheckInteracting(info, involved_edges)) {
+                TRACE("Interacting");
+                interacting_edges.push(info.e);
+            } else {
+                TRACE("Independent");
+                AccountEdges(info, involved_edges);
+                filtered.push_back(std::move(info));
+            }
+        }
+        bulges = std::move(filtered);
+
+        DEBUG("Independent bulges identified in " << perf.time() << " seconds");
+        DEBUG("Independent cnt " << bulges.size());
+        DEBUG("Interacting cnt " << interacting_edges.size());
+        VERIFY(bulges.size() + interacting_edges.size() == total_cnt);
+
+        return interacting_edges;
+    }
+
+    bool ProcessBulges(const std::vector<BulgeInfo>& independent_bulges, SmartEdgeSet&& interacting_edges) {
+        DEBUG("Processing bulges");
+        perf_counter perf;
+
+        bool triggered = false;
+
+        for (const BulgeInfo& info : independent_bulges) {
+            TRACE("Processing bulge " << info.str(this->g()));
+            triggered = true;
+            gluer_(info.e, info.alternative);
+        }
+
+        DEBUG("Independent bulges glued in " << perf.time() << " seconds");
+        perf.reset();
+
+        DEBUG("Processing remaining interacting bulges " << interacting_edges.size());
+        //usual br strategy
+        for (; !interacting_edges.IsEnd(); ++interacting_edges) {
+            EdgeId e = *interacting_edges;
+            TRACE("Processing edge " << this->g().str(e));
+            std::vector<EdgeId> alternative = alternatives_analyzer_(e);
+            if (!alternative.empty()) {
+                gluer_(e, alternative);
+                triggered = true;
+            }
+        }
+        DEBUG("Interacting edges processed in " << perf.time() << " seconds");
+        return triggered;
+    }
+
 public:
 
-	typedef std::function<void(EdgeId edge, const vector<EdgeId>& path)> BulgeCallbackF;
-    
-	BulgeRemover(Graph& graph, size_t max_length, double max_coverage,
-			double max_relative_coverage, size_t max_delta,
-			double max_relative_delta,
-			BulgeCallbackF opt_callback = 0,
-			std::function<void(EdgeId)> removal_handler = 0) :
-			base(graph, true),
-			graph_(graph),
-			max_length_(max_length),
-			max_coverage_(max_coverage),
-			max_relative_coverage_(max_relative_coverage),
-			max_delta_(max_delta),
-			max_relative_delta_(max_relative_delta),
-			opt_callback_(opt_callback),
-			removal_handler_(removal_handler) {
-                DEBUG("Launching br max_length=" << max_length 
-                << " max_coverage=" << max_coverage 
-                << " max_relative_coverage=" << max_relative_coverage
-                << " max_delta=" << max_delta 
-                << " max_relative_delta=" << max_relative_delta);
-	}
+    typedef std::function<void(EdgeId edge, const vector<EdgeId>& path)> BulgeCallbackF;
+
+    ParallelBulgeRemover(Graph& g, const InterestingElementFinder& interesting_edge_finder, 
+                         size_t buff_size, double buff_cov_diff, 
+                         double buff_cov_rel_diff, const AlternativesAnalyzer<Graph>& alternatives_analyzer,
+                         BulgeCallbackF opt_callback = 0,
+                         std::function<void(EdgeId)> removal_handler = 0,
+                         bool track_changes = true) :
+                         PersistentAlgorithmBase<Graph>(g),
+                         buff_size_(buff_size),
+                         buff_cov_diff_(buff_cov_diff),
+                         buff_cov_rel_diff_(buff_cov_rel_diff),
+                         alternatives_analyzer_(alternatives_analyzer),
+                         gluer_(g, opt_callback, removal_handler),
+                         interesting_edge_finder_(interesting_edge_finder),
+                         tracking_(track_changes),
+                         curr_iteration_(0),
+                         it_(g, true, CoverageComparator<Graph>(g), true) {
+        VERIFY(buff_size_ > 0);
+        it_.Detach();
+    }
 
-//  Old version. If it was math::gr then it would be equivalent to new one.
-//	bool RemoveBulges() {
-//		bool changed = false;
-//		CoverageComparator<Graph> comparator(graph_);
-//		for (auto iterator = graph_.SmartEdgeBegin(comparator);
-//				!iterator.IsEnd(); ++iterator) {
-//			EdgeId e = *iterator;
-//			if (math::ge(graph_.coverage(e), max_coverage_))
-//				break;
-//			changed |= ProcessNext(e);
-//		}
-//		return changed;
-//	}
+    bool Run(bool force_primary_launch = false) override {
+        bool primary_launch = force_primary_launch ? true : curr_iteration_ == 0;
+        //todo remove if not needed; 
+        //potentially can vary coverage threshold in coordination with ec threshold
+        auto proceed_condition = pred::AlwaysTrue<EdgeId>();
 
-private:
-	//fixme redundant field
-	Graph& graph_;
-	size_t max_length_;
-	double max_coverage_;
-	double max_relative_coverage_;
-	size_t max_delta_;
-	double max_relative_delta_;
-	BulgeCallbackF opt_callback_;
-	std::function<void(EdgeId)> removal_handler_;
+        if (!it_.IsAttached()) {
+            it_.Attach();
+        }
+        if (primary_launch) {
+            it_.clear();
+            TRACE("Primary launch.");
+            TRACE("Start search for interesting edges");
+            interesting_edge_finder_.Run(it_);
+            TRACE(it_.size() << " interesting edges to process");
+        } else {
+            VERIFY(tracking_);
+            TRACE(it_.size() << " edges to process");
+        }
+
+        bool triggered = false;
+        bool proceed = true;
+        while (proceed) {
+            std::vector<EdgeId> edge_buffer;
+            edge_buffer.reserve(buff_size_);
+            proceed = FillEdgeBuffer(edge_buffer, proceed_condition);
+
+            std::vector<BulgeInfo> bulges = MergeBuffers(FindBulges(edge_buffer));
+
+            auto interacting_edges = RetainIndependentBulges(bulges);
+
+            bool inner_triggered  = ProcessBulges(bulges, std::move(interacting_edges));
+            proceed |= inner_triggered;
+            triggered |= inner_triggered;
+        }
+
+        TRACE("Finished processing. Triggered = " << triggered);
+        if (!tracking_)
+            it_.Detach();
+
+        curr_iteration_++;
+
+        return triggered;
+    }
 
 private:
-	DECL_LOGGER("BulgeRemover")
+    DECL_LOGGER("ParallelBulgeRemover")
 };
 
 }
diff --git a/src/include/omni/complex_bulge_remover.hpp b/src/include/omni/complex_bulge_remover.hpp
index 741dd64..1704f73 100644
--- a/src/include/omni/complex_bulge_remover.hpp
+++ b/src/include/omni/complex_bulge_remover.hpp
@@ -103,9 +103,7 @@ public:
 		for (VertexId end_v : end_vertices_) {
 			PathStorageCallback<Graph> path_storage(g_);
 			Range r = vertex_depth_.find(end_v)->second;
-			PathProcessor<Graph> best_path_finder(g_, r.start_pos, r.end_pos,
-					start_vertex_, end_v, path_storage);
-			best_path_finder.Process();
+			ProcessPaths(g_, r.start_pos, r.end_pos, start_vertex_, end_v, path_storage);
 			answer += path_storage.size();
 		}
 		return answer;
@@ -230,6 +228,7 @@ public:
 	}
 
 	virtual void HandleSplit(EdgeId old_edge, EdgeId new_edge_1, EdgeId /*new_edge_2*/) {
+	    VERIFY(old_edge != g_.conjugate(old_edge));
 		VertexId start = g_.EdgeStart(old_edge);
 		VertexId end = g_.EdgeEnd(old_edge);
 		if (contains(start)) {
@@ -330,6 +329,7 @@ public:
 
 	virtual void HandleSplit(EdgeId old_edge, EdgeId new_edge_1,
 			EdgeId new_edge_2) {
+	    VERIFY(old_edge != br_comp_.g().conjugate(old_edge));
 		if (Contains(old_edge)) {
 			edges_.erase(old_edge);
 			vertices_.insert(br_comp_.g().EdgeEnd(new_edge_1));
@@ -476,6 +476,7 @@ public:
 
 	virtual void HandleSplit(EdgeId old_edge, EdgeId new_edge_1,
 			EdgeId /*new_edge_2*/) {
+	    VERIFY(old_edge != comp_.g().conjugate(old_edge));
 		if (comp_.contains(old_edge)) {
 			CountAndSetVertexColor(comp_.g().EdgeEnd(new_edge_1));
 		}
@@ -1109,7 +1110,7 @@ public:
 
 	bool Run() {
         size_t cnt = 0;
-		INFO("Complex bulge remover started");
+		DEBUG("Complex bulge remover started");
 		if (!pics_folder_.empty()) {
 //			remove_dir(pics_folder_);
 			make_dir(pics_folder_);
@@ -1146,8 +1147,8 @@ public:
 				g_.CompressVertex(v);
 			}
 		}
-		INFO("Complex bulge remover finished");
-        INFO("Bulges processed " << cnt);
+		DEBUG("Complex bulge remover finished");
+        DEBUG("Bulges processed " << cnt);
 		return something_done_flag;
 	}
 
diff --git a/src/include/omni/complex_tip_clipper.hpp b/src/include/omni/complex_tip_clipper.hpp
index 58dd811..5b4f92a 100644
--- a/src/include/omni/complex_tip_clipper.hpp
+++ b/src/include/omni/complex_tip_clipper.hpp
@@ -23,7 +23,7 @@ class ComplexTipClipper {
     Graph& g_;
     size_t max_length_;
     string pics_folder_;
-
+    std::function<void(const set<EdgeId>&)> removal_handler_;
     const size_t edge_length_treshold = 100;
 
     bool CheckEdgeLenghts(const GraphComponent<Graph>& component) const {
@@ -41,7 +41,7 @@ class ComplexTipClipper {
     }
 
     void RemoveComplexTip(GraphComponent<Graph>& component) {
-        ComponentRemover<Graph> remover(g_);
+        ComponentRemover<Graph> remover(g_, removal_handler_);
         remover.DeleteComponent(component.edges().begin(), component.edges().end());
     }
 
@@ -56,8 +56,8 @@ class ComplexTipClipper {
     }
 
 public:
-    ComplexTipClipper(Graph& g, size_t max_length, const string& pics_folder = "") :
-            g_(g), max_length_(max_length), pics_folder_(pics_folder)
+    ComplexTipClipper(Graph& g, size_t max_length, const string& pics_folder = "", std::function<void(const set<EdgeId>&)> removal_handler = 0) :
+            g_(g), max_length_(max_length), pics_folder_(pics_folder), removal_handler_(removal_handler)
     { }
 
     bool Run() {
diff --git a/src/include/omni/coverage.hpp b/src/include/omni/coverage.hpp
index b303867..6d8a872 100644
--- a/src/include/omni/coverage.hpp
+++ b/src/include/omni/coverage.hpp
@@ -15,9 +15,13 @@
 #pragma once
 
 #include "logger/logger.hpp"
-
+#include <iostream>
+#include <vector>
+#include <algorithm>
+#include "../xmath.h"
 namespace omnigraph {
 
+using std::vector;
 //todo save/load absolute coverage
 template<class Graph>
 class CoverageIndex : public GraphActionHandler<Graph> {
@@ -298,15 +302,22 @@ class CoverageIndex : public GraphActionHandler<Graph> {
 //		SetCoverage(newEdge1, coverage1);
 //		SetCoverage(newEdge2, coverage2);
         double avg_cov = coverage(old_edge);
-        SetRawCoverage(new_edge1, max(1, (int) math::round(avg_cov * (double) this->g().length(new_edge1))));
-        SetRawCoverage(new_edge2, max(1, (int) math::round(avg_cov * (double) this->g().length(new_edge2))));
+        if (old_edge == g_.conjugate(old_edge)) {
+            int raw1 = std::max(1, (int) math::round(avg_cov * (double) this->g().length(new_edge1)));
+            SetRawCoverage(new_edge1, raw1);
+            SetRawCoverage(g_.conjugate(new_edge1), raw1);
+            SetRawCoverage(new_edge2, std::max(1, (int) math::round(avg_cov * (double) this->g().length(new_edge2))));
+        } else {
+            SetRawCoverage(new_edge1, std::max(1, (int) math::round(avg_cov * (double) this->g().length(new_edge1))));
+            SetRawCoverage(new_edge2, std::max(1, (int) math::round(avg_cov * (double) this->g().length(new_edge2))));
+        }
     }
 
-    void Save(EdgeId e, ostream& out) const {
+    void Save(EdgeId e, std::ostream& out) const {
         out << fmt::format("{:.6f}", coverage(e));
     }
 
-    void Load(EdgeId e, istream& in) {
+    void Load(EdgeId e, std::istream& in) {
         double cov;
         in >> cov;
         SetAvgCoverage(e, cov);
diff --git a/src/include/omni/dijkstra_tools/dijkstra_algorithm.hpp b/src/include/omni/dijkstra_tools/dijkstra_algorithm.hpp
index dfbd709..e66f0ec 100644
--- a/src/include/omni/dijkstra_tools/dijkstra_algorithm.hpp
+++ b/src/include/omni/dijkstra_tools/dijkstra_algorithm.hpp
@@ -4,13 +4,15 @@
 //* All Rights Reserved
 //* See file LICENSE for details.
 //***************************************************************************
-
 #pragma once
 
+#include "simple_tools.hpp"
 #include "dijkstra_settings.hpp"
-#include "standard_base.hpp"
 
 #include <queue>
+#include <vector>
+#include <set>
+#include <map>
 
 namespace omnigraph {
 
@@ -48,187 +50,193 @@ public:
 
 template<class Graph, class DijkstraSettings, typename distance_t = size_t>
 class Dijkstra {
-	typedef typename Graph::VertexId VertexId;
-	typedef typename Graph::EdgeId EdgeId;
-	typedef distance_t DistanceType;
-
-	typedef std::map<VertexId, distance_t> distances_map;
-	typedef typename distances_map::const_iterator distances_map_ci;
-	typedef typename std::priority_queue<element_t<Graph, distance_t>, vector<element_t<Graph, distance_t> >,
-	                                       ReverseDistanceComparator<element_t<Graph, distance_t> > > queue_t;
-
-	// constructor parameters
-	const Graph& graph_;
-	DijkstraSettings settings_;
-	const size_t max_vertex_number_;
-
-	// changeable parameters
-	bool finished_;
-	size_t vertex_number_;
-	bool vertex_limit_exceeded_;
-
-	// accumulative structures
-	distances_map distances_;
-	set<VertexId> processed_vertices_;
-	std::map<VertexId, pair<VertexId, EdgeId> > prev_vert_map_;
-
-	void initialize(VertexId start, queue_t &queue){
-		vertex_number_ = 0;
-		distances_.clear();
-		processed_vertices_.clear();
-		prev_vert_map_.clear();
-		set_finished(false);
-		init(start);
-		queue.push(element_t<Graph, distance_t>(0, start, VertexId(0), EdgeId(0)));
-		prev_vert_map_[start] = std::pair<VertexId, EdgeId>(VertexId(0), EdgeId(0));
-	}
+    typedef typename Graph::VertexId VertexId;
+    typedef typename Graph::EdgeId EdgeId;
+    typedef distance_t DistanceType;
+
+    typedef std::map<VertexId, distance_t> distances_map;
+    typedef typename distances_map::const_iterator distances_map_ci;
+    typedef typename std::priority_queue<element_t<Graph, distance_t>, std::vector<element_t<Graph, distance_t>>,
+            ReverseDistanceComparator<element_t<Graph, distance_t>>> queue_t;
+
+    // constructor parameters
+    const Graph& graph_;
+    DijkstraSettings settings_;
+    const size_t max_vertex_number_;
+
+    // changeable parameters
+    bool finished_;
+    size_t vertex_number_;
+    bool vertex_limit_exceeded_;
+
+    // accumulative structures
+    distances_map distances_;
+    std::set<VertexId> processed_vertices_;
+    std::map<VertexId, pair<VertexId, EdgeId>> prev_vert_map_;
+
+    void Init(VertexId start, queue_t &queue) {
+        vertex_number_ = 0;
+        distances_.clear();
+        processed_vertices_.clear();
+        prev_vert_map_.clear();
+        set_finished(false);
+        settings_.Init(start);
+        queue.push(element_t<Graph, distance_t>(0, start, VertexId(0), EdgeId(0)));
+        prev_vert_map_[start] = std::pair<VertexId, EdgeId>(VertexId(0), EdgeId(0));
+    }
 
-public:
-	Dijkstra(const Graph &graph, DijkstraSettings settings, size_t max_vertex_number = size_t(-1)) :
-		graph_(graph),
-		settings_(settings),
-		max_vertex_number_(max_vertex_number),
-		finished_(false),
-		vertex_number_(0),
-		vertex_limit_exceeded_(false) { }
-
-	bool finished() const {
-		return finished_;
-	}
-
-	bool DistanceCounted(VertexId vertex) const {
-		return distances_.find(vertex) != distances_.end();
-	}
-
-	void init(VertexId start) {
-		settings_.Init(start);
-	}
-
-	distance_t GetDistance(VertexId vertex) const {
-		VERIFY(DistanceCounted(vertex));
-		return distances_.find(vertex)->second;
-	}
-
-	pair<distances_map_ci, distances_map_ci> GetDistances() const {
-		distances_map_ci begin = distances_.begin();
-		distances_map_ci end = distances_.end();
-		return make_pair(begin, end);
-	}
-
-	void set_finished(bool state) {
-		finished_ = state;
-	}
-
-	bool CheckPutVertex(VertexId vertex, EdgeId edge, distance_t length) const {
-		return settings_.CheckPutVertex(vertex, edge, length);
-	}
-
-	bool CheckProcessVertex(VertexId vertex, distance_t distance) {
+    void set_finished(bool state) {
+        finished_ = state;
+    }
+
+    bool CheckPutVertex(VertexId vertex, EdgeId edge, distance_t length) const {
+        return settings_.CheckPutVertex(vertex, edge, length);
+    }
+
+    bool CheckProcessVertex(VertexId vertex, distance_t distance) {
         ++vertex_number_;
-        if (vertex_number_ > max_vertex_number_){
+        if (vertex_number_ > max_vertex_number_) {
             vertex_limit_exceeded_ = true;
             return false;
         }
         return (vertex_number_ < max_vertex_number_) && settings_.CheckProcessVertex(vertex, distance);
-	}
-
-	distance_t GetLength(EdgeId edge) const {
-		return settings_.GetLength(edge);
-	}
-
-	std::vector<EdgeId> GetShortestPathTo(VertexId vertex){
-		std::vector<EdgeId> path;
-		if(prev_vert_map_.find(vertex) == prev_vert_map_.end())
-			return path;
-
-		VertexId curr_vertex = vertex;
-		VertexId prev_vertex = prev_vert_map_[vertex].first;
-		EdgeId edge = prev_vert_map_[curr_vertex].second;
-
-		while(prev_vertex != VertexId(0)){
-			if(graph_.EdgeStart(edge) == prev_vertex)
-				path.insert(path.begin(), edge);
-			else
-				path.push_back(edge);
-			curr_vertex = prev_vertex;
-			prev_vertex = prev_vert_map_[curr_vertex].first;
-			edge = prev_vert_map_[curr_vertex].second;
-		}
-		return path;
-	}
-
-	void AddNeighboursToQueue(VertexId cur_vertex, distance_t cur_dist, queue_t& queue) {
-		auto neigh_iterator = settings_.GetIterator(cur_vertex);
-		while(neigh_iterator.HasNext()){
-			TRACE("Checking new neighbour of vertex " << graph_.str(cur_vertex) << " started");
-			auto cur_pair = neigh_iterator.Next();
-			if (!DistanceCounted(cur_pair.vertex)) {
-				TRACE("Adding new entry to queue");
-				distance_t new_dist = GetLength(cur_pair.edge) + cur_dist;
-				TRACE("Entry: vertex " << graph_.str(cur_vertex) << " distance " << new_dist);
-				if (CheckPutVertex(cur_pair.vertex, cur_pair.edge, new_dist)) {
-					TRACE("CheckPutVertex returned true and new entry is added");
-					queue.push(element_t<Graph, distance_t>(new_dist, cur_pair.vertex,
-							cur_vertex, cur_pair.edge));
-				}
-			}
-			TRACE("Checking new neighbour of vertex " << graph_.str(cur_vertex) << " finished");
-		}
-		TRACE("All neighbours of vertex " << graph_.str(cur_vertex) << " processed");
-	}
-
-	void run(VertexId start) {
-		TRACE("Starting dijkstra run from vertex " << graph_.str(start));
-		queue_t queue;
-		initialize(start, queue);
-		TRACE("Priority queue initialized. Starting search");
-
-		while (!queue.empty() && !finished()) {
-			TRACE("Dijkstra iteration started");
-			const element_t<Graph, distance_t>& next = queue.top();
-			distance_t distance = next.distance;
-			VertexId vertex = next.curr_vertex;
-
-			prev_vert_map_[vertex] = std::pair<VertexId, EdgeId>(next.prev_vertex, next.edge_between);
-			queue.pop();
-			TRACE("Vertex " << graph_.str(vertex) << " with distance " << distance << " fetched from queue");
-
-			if (DistanceCounted(vertex)){
-				TRACE("Distance to vertex " << graph_.str(vertex) << " already counted. Proceeding to next queue entry.");
-				continue;
-			}
-			distances_.insert(make_pair(vertex, distance));
-
-		    TRACE("Vertex " << graph_.str(vertex) << " is found to be at distance "
-		              << distance << " from vertex " << graph_.str(start));
-			if (!CheckProcessVertex(vertex, distance)){
-				TRACE("Check for processing vertex failed. Proceeding to the next queue entry.");
-				continue;
-			}
-			processed_vertices_.insert(vertex);
-			AddNeighboursToQueue(vertex, distance, queue);
-		}
-		set_finished(true);
-		TRACE("Finished dijkstra run from vertex " << graph_.str(start));
-  }
+    }
 
-  vector<VertexId> ReachedVertices() const {
-    vector<VertexId> result;
-    for (auto it = distances_.begin(); it != distances_.end(); ++it) {
-      result.push_back(it->first);
+    distance_t GetLength(EdgeId edge) const {
+        return settings_.GetLength(edge);
     }
-    return result;
-  }
 
-  const set<VertexId>& ProcessedVertices() {
-    return processed_vertices_;
-  }
+    void AddNeighboursToQueue(VertexId cur_vertex, distance_t cur_dist, queue_t& queue) {
+        auto neigh_iterator = settings_.GetIterator(cur_vertex);
+        while (neigh_iterator.HasNext()) {
+            TRACE("Checking new neighbour of vertex " << graph_.str(cur_vertex) << " started");
+            auto cur_pair = neigh_iterator.Next();
+            if (!DistanceCounted(cur_pair.vertex)) {
+                TRACE("Adding new entry to queue");
+                distance_t new_dist = GetLength(cur_pair.edge) + cur_dist;
+                TRACE("Entry: vertex " << graph_.str(cur_vertex) << " distance " << new_dist);
+                if (CheckPutVertex(cur_pair.vertex, cur_pair.edge, new_dist)) {
+                    TRACE("CheckPutVertex returned true and new entry is added");
+                    queue.push(element_t<Graph, distance_t>(new_dist, cur_pair.vertex,
+                                    cur_vertex, cur_pair.edge));
+                }
+            }
+            TRACE("Checking new neighbour of vertex " << graph_.str(cur_vertex) << " finished");
+        }
+        TRACE("All neighbours of vertex " << graph_.str(cur_vertex) << " processed");
+    }
+
+public:
+    Dijkstra(const Graph &graph, DijkstraSettings settings, size_t max_vertex_number = size_t(-1)) :
+        graph_(graph),
+        settings_(settings),
+        max_vertex_number_(max_vertex_number),
+        finished_(false),
+        vertex_number_(0),
+        vertex_limit_exceeded_(false) {}
+
+    Dijkstra(Dijkstra&& /*other*/) = default; 
+
+    Dijkstra& operator=(Dijkstra&& /*other*/) = default;
+
+    Dijkstra(const Dijkstra& /*other*/) = delete; 
+
+    Dijkstra& operator=(const Dijkstra& /*other*/) = delete;
+
+    bool finished() const {
+        return finished_;
+    }
+
+    bool DistanceCounted(VertexId vertex) const {
+        return distances_.find(vertex) != distances_.end();
+    }
+
+    distance_t GetDistance(VertexId vertex) const {
+        VERIFY(DistanceCounted(vertex));
+        return distances_.find(vertex)->second;
+    }
+
+    std::pair<distances_map_ci, distances_map_ci> GetDistances() const {
+        distances_map_ci begin = distances_.begin();
+        distances_map_ci end = distances_.end();
+        return make_pair(begin, end);
+    }
+
+    void Run(VertexId start) {
+        TRACE("Starting dijkstra run from vertex " << graph_.str(start));
+        queue_t queue;
+        Init(start, queue);
+        TRACE("Priority queue initialized. Starting search");
+
+        while (!queue.empty() && !finished()) {
+            TRACE("Dijkstra iteration started");
+            const element_t<Graph, distance_t>& next = queue.top();
+            distance_t distance = next.distance;
+            VertexId vertex = next.curr_vertex;
+
+            prev_vert_map_[vertex] = std::pair<VertexId, EdgeId>(next.prev_vertex, next.edge_between);
+            queue.pop();
+            TRACE("Vertex " << graph_.str(vertex) << " with distance " << distance << " fetched from queue");
+
+            if (DistanceCounted(vertex)) {
+                TRACE("Distance to vertex " << graph_.str(vertex) << " already counted. Proceeding to next queue entry.");
+                continue;
+            }
+            distances_.insert(make_pair(vertex, distance));
+
+            TRACE("Vertex " << graph_.str(vertex) << " is found to be at distance "
+                    << distance << " from vertex " << graph_.str(start));
+            if (!CheckProcessVertex(vertex, distance)) {
+                TRACE("Check for processing vertex failed. Proceeding to the next queue entry.");
+                continue;
+            }
+            processed_vertices_.insert(vertex);
+            AddNeighboursToQueue(vertex, distance, queue);
+        }
+        set_finished(true);
+        TRACE("Finished dijkstra run from vertex " << graph_.str(start));
+    }
+
+    std::vector<EdgeId> GetShortestPathTo(VertexId vertex) {
+        std::vector<EdgeId> path;
+        if (prev_vert_map_.find(vertex) == prev_vert_map_.end())
+            return path;
+
+        VertexId curr_vertex = vertex;
+        VertexId prev_vertex = get(prev_vert_map_, vertex).first;
+        EdgeId edge = get(prev_vert_map_, curr_vertex).second;
+
+        while (prev_vertex != VertexId(0)) {
+            if (graph_.EdgeStart(edge) == prev_vertex)
+                path.insert(path.begin(), edge);
+            else
+                path.push_back(edge);
+            curr_vertex = prev_vertex;
+            const auto& prev_v_e = get(prev_vert_map_, curr_vertex);
+            prev_vertex = prev_v_e.first;
+            edge = prev_v_e.second;
+        }
+        return path;
+    }
+
+    vector<VertexId> ReachedVertices() const {
+        vector<VertexId> result;
+        for (auto it = distances_.begin(); it != distances_.end(); ++it) {
+            result.push_back(it->first);
+        }
+        return result;
+    }
+
+    const set<VertexId>& ProcessedVertices() const {
+        return processed_vertices_;
+    }
+
+    bool VertexLimitExceeded() const {
+        return vertex_limit_exceeded_;
+    }
 
-  bool VertexLimitExceeded(){
-	  return vertex_limit_exceeded_;
-  }
 private:
-  DECL_LOGGER("Dijkstra");
+    DECL_LOGGER("Dijkstra");
 };
 
 template<class Graph>
@@ -239,7 +247,7 @@ class DistanceCounter {
 		  LengthCalculator<Graph>,
   	  	  VertexProcessChecker<Graph>,
   	  	  VertexPutChecker<Graph>,
-  	  	  ForwardNeighbourIteratorFactory<Graph> >  BaseDijkstraSettings;
+  	  	  ForwardNeighbourIteratorFactory<Graph>>  BaseDijkstraSettings;
 
 public:
   DistanceCounter(const Graph& graph) :
diff --git a/src/include/omni/dijkstra_tools/dijkstra_helper.hpp b/src/include/omni/dijkstra_tools/dijkstra_helper.hpp
index 0540762..01505b7 100644
--- a/src/include/omni/dijkstra_tools/dijkstra_helper.hpp
+++ b/src/include/omni/dijkstra_tools/dijkstra_helper.hpp
@@ -40,12 +40,12 @@ public:
 
 	typedef Dijkstra<Graph, BoundedDijkstraSettings> BoundedDijkstra;
 
-	static BoundedDijkstra CreateBoundedDijkstra(const Graph &graph, size_t bound,
-			size_t max_vertex_number = size_t(-1)){
+	static BoundedDijkstra CreateBoundedDijkstra(const Graph &graph, size_t length_bound,
+			size_t max_vertex_number = -1ul){
 		return BoundedDijkstra(graph, BoundedDijkstraSettings(
         				LengthCalculator<Graph>(graph),
-        				BoundProcessChecker<Graph>(bound),
-        				BoundPutChecker<Graph>(bound),
+        				BoundProcessChecker<Graph>(length_bound),
+        				BoundPutChecker<Graph>(length_bound),
         				ForwardNeighbourIteratorFactory<Graph>(graph)),
 				max_vertex_number);
 	}
@@ -132,34 +132,32 @@ public:
 	static CountingDijkstra CreateCountingDijkstra(const Graph &graph, size_t max_size,
 			size_t edge_length_bound, size_t max_vertex_number = size_t(-1)){
 		return CountingDijkstra(graph, UnorientCountingDijkstraSettings(graph,
-    					shared_ptr<LengthCalculator<Graph> >(),
-    					shared_ptr<VertexProcessChecker<Graph> >(),
-    					shared_ptr<VertexPutChecker<Graph> >(),
     					UnorientedNeighbourIteratorFactory<Graph>(graph),
     					max_size, edge_length_bound), max_vertex_number);
 	}
 
-	//------------------------------
-	// targeted bounded dijkstra
-	//------------------------------
-
-	typedef ComposedDijkstraSettings<Graph,
-			LengthCalculator<Graph>,
-			BoundedVertexTargetedProcessChecker<Graph>,
-			BoundPutChecker<Graph>,
-			ForwardNeighbourIteratorFactory<Graph> > TargeredBoundedDijkstraSettings;
 
-	typedef Dijkstra<Graph, TargeredBoundedDijkstraSettings> TargeredBoundedDijkstra;
+    //------------------------------
+    // targeted bounded dijkstra
+    //------------------------------
 
-	static TargeredBoundedDijkstra CreateTargeredBoundedDijkstra(const Graph &graph,
-			VertexId target_vertex, size_t bound, size_t max_vertex_number = size_t(-1)){
-		return TargeredBoundedDijkstra(graph,
-				TargeredBoundedDijkstraSettings(LengthCalculator<Graph>(graph),
-						BoundedVertexTargetedProcessChecker<Graph>(target_vertex, bound),
-						BoundPutChecker<Graph>(bound),
-						ForwardNeighbourIteratorFactory<Graph>(graph)),
-				max_vertex_number);
-	}
+    typedef ComposedDijkstraSettings<Graph,
+            LengthCalculator<Graph>,
+            BoundedVertexTargetedProcessChecker<Graph>,
+            BoundPutChecker<Graph>,
+            ForwardNeighbourIteratorFactory<Graph> > TargeredBoundedDijkstraSettings;
+
+    typedef Dijkstra<Graph, TargeredBoundedDijkstraSettings> TargeredBoundedDijkstra;
+
+    static TargeredBoundedDijkstra CreateTargeredBoundedDijkstra(const Graph &graph,
+            VertexId target_vertex, size_t bound, size_t max_vertex_number = size_t(-1)){
+        return TargeredBoundedDijkstra(graph,
+                TargeredBoundedDijkstraSettings(LengthCalculator<Graph>(graph),
+                        BoundedVertexTargetedProcessChecker<Graph>(target_vertex, bound),
+                        BoundPutChecker<Graph>(bound),
+                        ForwardNeighbourIteratorFactory<Graph>(graph)),
+                max_vertex_number);
+    }
 };
 
 }
diff --git a/src/include/omni/dijkstra_tools/dijkstra_settings.hpp b/src/include/omni/dijkstra_tools/dijkstra_settings.hpp
index a581c74..38897b9 100644
--- a/src/include/omni/dijkstra_tools/dijkstra_settings.hpp
+++ b/src/include/omni/dijkstra_tools/dijkstra_settings.hpp
@@ -70,9 +70,6 @@ class CountingDijkstraSettings {
     typedef typename Graph::EdgeId EdgeId;
 
     const Graph &graph_;
-    shared_ptr<LengthCalculator<Graph> > len_calc_;
-    shared_ptr<VertexProcessChecker<Graph> > vert_proc_checker_;
-    shared_ptr<VertexPutChecker<Graph> > vert_put_checker_;
 
     NeighbourIteratorFactory neigh_iter_factory_;
     static const distance_t inf = 100000000;
@@ -82,15 +79,9 @@ class CountingDijkstraSettings {
 
 public:
     CountingDijkstraSettings(const Graph &graph,
-    	    shared_ptr<LengthCalculator<Graph> > len_calc,
-    	    shared_ptr<VertexProcessChecker<Graph> > vert_proc_checker,
-    	    shared_ptr<VertexPutChecker<Graph> > vert_put_checker,
     		NeighbourIteratorFactory neigh_iter_factory,
     		size_t max_size, size_t edge_length_bound) :
        	graph_(graph),
-       	len_calc_(len_calc),
-       	vert_proc_checker_(vert_proc_checker),
-       	vert_put_checker_(vert_put_checker),
        	neigh_iter_factory_(neigh_iter_factory),
         max_size_(max_size),
         edge_length_bound_(edge_length_bound),
@@ -101,22 +92,16 @@ public:
     }
 
 	distance_t GetLength(EdgeId edge) const{
-		if(len_calc_)
-			return len_calc_->GetLength(edge);
 		if (graph_.length(edge) <= edge_length_bound_)
 			return graph_.length(edge);
         return inf;
 	}
 
-	bool CheckProcessVertex(VertexId vertex, distance_t distance){
-		if(vert_proc_checker_)
-			return vert_proc_checker_->Check(vertex, distance);
+	bool CheckProcessVertex(VertexId , distance_t ){
 		return current_ < max_size_;
 	}
 
-	bool CheckPutVertex(VertexId vertex, EdgeId edge, distance_t length) const{
-		if(vert_put_checker_)
-			return vert_put_checker_->Check(vertex, edge, length);
+	bool CheckPutVertex(VertexId , EdgeId edge, distance_t ) const{
         if (current_ < max_size_)
             ++current_;
         if (current_ < max_size_ && GetLength(edge) < inf)
diff --git a/src/include/omni/dijkstra_tools/vertex_process_checker.hpp b/src/include/omni/dijkstra_tools/vertex_process_checker.hpp
index 462adcc..4cddc98 100644
--- a/src/include/omni/dijkstra_tools/vertex_process_checker.hpp
+++ b/src/include/omni/dijkstra_tools/vertex_process_checker.hpp
@@ -14,21 +14,22 @@ class VertexProcessChecker {
 	typedef typename Graph::VertexId VertexId;
 	typedef typename Graph::EdgeId EdgeId;
 public:
-	VertexProcessChecker() { }
+	VertexProcessChecker() {}
 	virtual bool Check(VertexId, distance_t) { return true; }
-	virtual ~VertexProcessChecker() { }
+	virtual ~VertexProcessChecker() {}
 };
 
 template<class Graph, typename distance_t = size_t>
 class BoundProcessChecker : public VertexProcessChecker<Graph, distance_t> {
 	typedef typename Graph::VertexId VertexId;
 	typedef typename Graph::EdgeId EdgeId;
-    const distance_t bound_;
+    const distance_t distance_bound_;
 public:
-    BoundProcessChecker(distance_t bound) : VertexProcessChecker<Graph, distance_t>(),
-    	bound_(bound) { }
-    bool Check(VertexId, distance_t distance) {
-    	return distance <= bound_;
+    BoundProcessChecker(distance_t distance_bound) :
+		distance_bound_(distance_bound) {}
+
+    bool Check(VertexId, distance_t distance) override {
+    	return distance <= distance_bound_;
     }
 };
 
@@ -37,33 +38,35 @@ class ZeroLengthProcessChecker : public VertexProcessChecker<Graph, distance_t>
 	typedef typename Graph::VertexId VertexId;
 	typedef typename Graph::EdgeId EdgeId;
 public:
-	ZeroLengthProcessChecker() : VertexProcessChecker<Graph, distance_t>() { }
-    bool Check(VertexId, distance_t distance) {
+	ZeroLengthProcessChecker() {}
+
+    bool Check(VertexId, distance_t distance) override {
     	return distance == 0;
     }
 };
 
 template<class Graph, typename distance_t = size_t>
-class BoundedVertexTargetedProcessChecker : public VertexProcessChecker<Graph, distance_t> {
-	typedef typename Graph::VertexId VertexId;
-	typedef typename Graph::EdgeId EdgeId;
-	VertexId target_vertex_;
-	size_t bound_;
+class BoundedVertexTargetedProcessChecker : public BoundProcessChecker<Graph, distance_t> {
+    typedef BoundProcessChecker<Graph, distance_t> base;
+    typedef typename Graph::VertexId VertexId;
+    typedef typename Graph::EdgeId EdgeId;
 
-	bool target_reached_;
+    VertexId target_vertex_;
+    bool target_reached_;
 public:
-	BoundedVertexTargetedProcessChecker(VertexId target_vertex, size_t bound) :
-		VertexProcessChecker<Graph, distance_t>(),
-		target_vertex_(target_vertex),
-		bound_(bound),
-		target_reached_(false) { }
+    BoundedVertexTargetedProcessChecker(VertexId target_vertex, size_t bound) :
+        base(bound),
+        target_vertex_(target_vertex),
+        target_reached_(false) { }
 
-    bool Check(VertexId vertex, distance_t distance) {
-    	if(vertex == target_vertex_)
-    		target_reached_ = true;
-    	if(!target_reached_)
-    		return distance <= bound_;
-    	return false;
+    bool Check(VertexId vertex, distance_t distance) override {
+        if (vertex == target_vertex_)
+            target_reached_ = true;
+        if (target_reached_)
+            return false;
+        else
+            return base::Check(vertex, distance);
     }
 };
+
 }
diff --git a/src/include/omni/edges_position_handler.hpp b/src/include/omni/edges_position_handler.hpp
index 69ae308..df98943 100644
--- a/src/include/omni/edges_position_handler.hpp
+++ b/src/include/omni/edges_position_handler.hpp
@@ -167,6 +167,10 @@ public:
 	}
 
 	virtual void HandleSplit(EdgeId oldEdge, EdgeId newEdge1, EdgeId newEdge2) {
+	    if (oldEdge == this->g().conjugate(oldEdge)) {
+	        WARN("EdgesPositionHandler does not support self-conjugate splits");
+	        return;
+	    }
 		if (edges_positions_.count(oldEdge) != 0) {
 			auto contig_map = edges_positions_[oldEdge];
 			AddAndShiftEdgePositions(newEdge1, contig_map, 0);
diff --git a/src/include/omni/erroneous_connection_remover.hpp b/src/include/omni/erroneous_connection_remover.hpp
index 2dbeac6..28a4dc1 100644
--- a/src/include/omni/erroneous_connection_remover.hpp
+++ b/src/include/omni/erroneous_connection_remover.hpp
@@ -25,39 +25,36 @@
 namespace omnigraph {
 
 template<class Graph>
-shared_ptr<func::Predicate<typename Graph::EdgeId>> AddAlternativesPresenceCondition(const Graph& g,
-                                                                  shared_ptr<func::Predicate<typename Graph::EdgeId>> condition) {
-    return func::And<typename Graph::EdgeId>(
-            make_shared<AlternativesPresenceCondition<Graph>>(g),
-            condition);
+pred::TypedPredicate<typename Graph::EdgeId>
+NecessaryECCondition(const Graph& g, size_t max_length, double max_coverage) {
+    return AddAlternativesPresenceCondition(g, pred::And(LengthUpperBound<Graph>(g, max_length),
+                                                        CoverageUpperBound<Graph>(g, max_coverage)));
 }
 
 template<class Graph>
 bool RemoveErroneousEdgesInCoverageOrder(Graph &g,
-                                         shared_ptr<func::Predicate<typename Graph::EdgeId>> removal_condition,
+                                         pred::TypedPredicate<typename Graph::EdgeId> removal_condition,
                                          double max_coverage,
                                          std::function<void(typename Graph::EdgeId)> removal_handler) {
-
     omnigraph::EdgeRemovingAlgorithm<Graph> erroneous_edge_remover(g,
                                                                    AddAlternativesPresenceCondition(g, removal_condition),
                                                                    removal_handler);
 
     return erroneous_edge_remover.Run(CoverageComparator<Graph>(g),
-                                          make_shared<CoverageUpperBound<Graph>>(g, max_coverage));
+                                      CoverageUpperBound<Graph>(g, max_coverage));
 }
 
 template<class Graph>
 bool RemoveErroneousEdgesInLengthOrder(Graph &g,
-                                       shared_ptr<func::Predicate<typename Graph::EdgeId>> removal_condition,
+                                       pred::TypedPredicate<typename Graph::EdgeId> removal_condition,
                                        size_t max_length,
                                        std::function<void(typename Graph::EdgeId)> removal_handler) {
-
     omnigraph::EdgeRemovingAlgorithm<Graph> erroneous_edge_remover(g,
                                                                    AddAlternativesPresenceCondition(g, removal_condition),
                                                                    removal_handler);
 
     return erroneous_edge_remover.Run(LengthComparator<Graph>(g),
-                                          make_shared<LengthUpperBound<Graph>>(g, max_length));
+                                      LengthUpperBound<Graph>(g, max_length));
 }
 
 template<class Graph>
@@ -163,7 +160,7 @@ class ThornCondition : public EdgeCondition<Graph> {
             return false;
 
         auto dij = DijkstraHelper<Graph>::CreateBoundedDijkstra(this->g(), dijkstra_depth_);
-        dij.run(this->g().EdgeStart(e));
+        dij.Run(this->g().EdgeStart(e));
         vector<VertexId> reached = dij.ReachedVertices();
         for (auto it = reached.begin(); it != reached.end(); ++it) {
             if (*it != this->g().EdgeEnd(e)
@@ -218,11 +215,10 @@ class ThornCondition : public EdgeCondition<Graph> {
 };
 
 template<class Graph>
-class MultiplicityCountingCondition : public UniquenessPlausabilityCondition<
-        Graph> {
+class MultiplicityCountingCondition : public UniquenessPlausabilityCondition<Graph> {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
-    typedef shared_ptr<Predicate<EdgeId>> EdgePredicate;
+    typedef pred::TypedPredicate<EdgeId> EdgePredicate;
     typedef UniquenessPlausabilityCondition<Graph> base;
 
     MultiplicityCounter<Graph> multiplicity_counter_;
@@ -239,7 +235,7 @@ public:
     }
 
     bool CheckPlausibility(EdgeId e, bool) const {
-        return plausiblity_condition_->Check(e);
+        return plausiblity_condition_(e);
     }
 
     MultiplicityCountingCondition(const Graph& g, size_t uniqueness_length,
@@ -270,10 +266,10 @@ private:
 	double relative_threshold_;
 	const AbstractFlankingCoverage<Graph> &flanking_coverage_;
 	EdgeRemover<Graph> edge_remover_;
-	shared_ptr<MultiplicityCountingCondition<Graph>> condition_;
+	MultiplicityCountingCondition<Graph> condition_;
 private:
 	void RemoveHiddenEC(EdgeId edge) {
-		if (this->g().length(edge) <= this->g().k())
+		if (this->g().length(edge) <= this->g().k() || (edge == this->g().conjugate(edge) && this->g().length(edge) <= 2 * this->g().k()))
 			edge_remover_.DeleteEdge(edge);
 		else {
 			auto split_result = this->g().SplitEdge(edge, this->g().k());
@@ -282,9 +278,9 @@ private:
 	}
 
 	void RemoveHiddenECWithNoCompression(EdgeId edge) {
-		if (this->g().length(edge) <= this->g().k())
+		if (this->g().length(edge) <= this->g().k() || (edge == this->g().conjugate(edge) && this->g().length(edge) <= 2 * this->g().k())) {
 			edge_remover_.DeleteEdgeWithNoCompression(edge);
-		else {
+		} else {
 			auto split_result = this->g().SplitEdge(edge, this->g().k());
 			edge_remover_.DeleteEdgeWithNoCompression(split_result.first);
 		}
@@ -322,7 +318,7 @@ private:
 			return false;
 		}
 		vector<EdgeId> edges(this->g().out_begin(v), this->g().out_end(v));
-		return (edges.size() == 2 && this->g().conjugate(edges[0]) == edges[1] && condition_->CheckUniqueness(this->g().GetUniqueIncomingEdge(v), false)) || this->g().length(this->g().GetUniqueIncomingEdge(v)) >= uniqueness_length_;
+		return (edges.size() == 2 && this->g().conjugate(edges[0]) == edges[1] && condition_.CheckUniqueness(this->g().GetUniqueIncomingEdge(v), false)) || this->g().length(this->g().GetUniqueIncomingEdge(v)) >= uniqueness_length_;
 	}
 
 	bool ProcessEdge(EdgeId e) {
@@ -344,13 +340,42 @@ public:
               unreliability_threshold_(unreliability_threshold * ec_threshold), ec_threshold_(ec_threshold),
               relative_threshold_(relative_threshold), flanking_coverage_(flanking_coverage),
               edge_remover_(g, removal_handler),
-              condition_(new MultiplicityCountingCondition<Graph>(g, uniqueness_length,
-                              make_shared<func::AlwaysTrue<EdgeId>>())) {
-
+              condition_(g, uniqueness_length, pred::AlwaysTrue<EdgeId>()) {
     }
 
 private:
 	DECL_LOGGER("HiddenECRemover");
 };
 
+template<class Graph>
+class SelfConjugateDisruptor: public EdgeProcessingAlgorithm<Graph> {
+    typedef EdgeProcessingAlgorithm<Graph> base;
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+    EdgeRemover<Graph> edge_remover_;
+protected:
+
+    bool ProcessEdge(EdgeId e) override {
+        if (e == this->g().conjugate(e)) {
+            TRACE("Disrupting self-conjugate edge " << this->g().str(e));
+            EdgeId to_del = e;
+            size_t len = this->g().length(e);
+            if (len > 1) {
+                to_del = this->g().SplitEdge(e, len / 2).second;
+            }
+            edge_remover_.DeleteEdge(to_del);
+            return true;
+        }
+        return false;
+    }
+
+public:
+    SelfConjugateDisruptor(Graph& g,
+                           std::function<void(EdgeId)> removal_handler = 0)
+            : base(g, true), edge_remover_(g, removal_handler) {
+    }
+
+private:
+    DECL_LOGGER("SelfConjugateDisruptor");
+};
 }
diff --git a/src/include/omni/graph_component.hpp b/src/include/omni/graph_component.hpp
index 37ea6a8..613b6e4 100644
--- a/src/include/omni/graph_component.hpp
+++ b/src/include/omni/graph_component.hpp
@@ -102,8 +102,10 @@ public:
 	}
 
 	//Full graph component
-	GraphComponent(const Graph &g, const string &name = "") : graph_(g), name_(name) {
-		Fill(g.begin(), g.end());
+	GraphComponent(const Graph &g, bool fill = true, const string &name = "") : graph_(g), name_(name) {
+		if(fill) {
+	        Fill(g.begin(), g.end());
+		}
 	}
 
 	//may be used for conjugate closure
diff --git a/src/include/omni/graph_core.hpp b/src/include/omni/graph_core.hpp
index b7be217..dbf7149 100644
--- a/src/include/omni/graph_core.hpp
+++ b/src/include/omni/graph_core.hpp
@@ -11,9 +11,12 @@
 #include "verify.hpp"
 #include "logger/logger.hpp"
 #include "order_and_law.hpp"
+#include <boost/iterator/iterator_facade.hpp>
+#include "../simple_tools.hpp"
 
 namespace omnigraph {
 
+using std::vector;
 template<class DataMaster>
 class GraphCore;
 
@@ -203,7 +206,6 @@ private:
     }
 
     void AddOutgoingEdge(EdgeId e) {
-        VERIFY(this != 0);
         outgoing_edges_.insert(std::upper_bound(outgoing_edges_.begin(), outgoing_edges_.end(), e), e);
         //outgoing_edges_.push_back(e);
     }
@@ -284,6 +286,10 @@ private:
        delete conjugate.get();
    }
 
+   bool AdditionalCompressCondition(VertexId v) const {
+       return !(EdgeEnd(GetUniqueOutgoingEdge(v)) == conjugate(v) && EdgeStart(GetUniqueIncomingEdge(v)) == conjugate(v));
+   }
+
 protected:
 
    VertexId CreateVertex(const VertexData& data1, const VertexData& data2, restricted::IdDistributor& id_distributor) {
@@ -372,6 +378,7 @@ protected:
     }
 
     void HiddenDeleteEdge(EdgeId edge) {
+        DEBUG("Hidden delete edge " << edge.int_id());
         EdgeId rcEdge = conjugate(edge);
         VertexId rcStart = conjugate(edge->end());
         VertexId start = conjugate(rcEdge->end());
@@ -383,6 +390,13 @@ protected:
         delete edge.get();
     }
 
+    void HiddenDeletePath(const std::vector<EdgeId>& edgesToDelete, const std::vector<VertexId>& verticesToDelete) {
+        for (auto it = edgesToDelete.begin(); it != edgesToDelete.end(); ++it)
+            HiddenDeleteEdge(*it);
+        for (auto it = verticesToDelete.begin(); it != verticesToDelete.end(); ++it)
+            HiddenDeleteVertex(*it);
+    }
+
 public:
 
     GraphCore(const DataMaster& master) : master_(master) {
@@ -502,13 +516,6 @@ public:
 
     //////////////////////shortcut methods
 
-    void HiddenDeletePath(const std::vector<EdgeId>& edgesToDelete, const std::vector<VertexId>& verticesToDelete) {
-        for (auto it = edgesToDelete.begin(); it != edgesToDelete.end(); ++it)
-            HiddenDeleteEdge(*it);
-        for (auto it = verticesToDelete.begin(); it != verticesToDelete.end(); ++it)
-            HiddenDeleteVertex(*it);
-    }
-
     std::vector<EdgeId> IncidentEdges(VertexId v) const {
         vector<EdgeId> answer;
         push_back_all(answer, IncomingEdges(v));
@@ -541,10 +548,6 @@ public:
     bool IsDeadStart(VertexId v) const {
         return IncomingEdgeCount(v) == 0;
     }
-
-    bool AdditionalCompressCondition(VertexId v) const {
-        return !(EdgeEnd(GetUniqueOutgoingEdge(v)) == conjugate(v) && EdgeStart(GetUniqueIncomingEdge(v)) == conjugate(v));
-    }
     
     bool CanCompressVertex(VertexId v) const {
         //      TRACE("Compress vertex check: ");
diff --git a/src/include/omni/graph_iterators.hpp b/src/include/omni/graph_iterators.hpp
index ae4b821..dab55f6 100644
--- a/src/include/omni/graph_iterators.hpp
+++ b/src/include/omni/graph_iterators.hpp
@@ -9,8 +9,9 @@
 
 #include "adt/queue_iterator.hpp"
 #include "io/read_processor.hpp"
+#include "pred.hpp"
 #include "action_handlers.hpp"
-
+#include "simple_tools.hpp"
 #include <boost/iterator/iterator_facade.hpp>
 
 namespace omnigraph {
@@ -26,11 +27,14 @@ class SmartIterator : public GraphActionHandler<Graph> {
     DynamicQueueIterator<ElementId, Comparator> inner_it_;
     bool add_new_;
     bool canonical_only_;
+    //todo think of checking it in HandleAdd
+    pred::TypedPredicate<ElementId> add_condition_;
 
 protected:
 
     void push(const ElementId& el) {
-        if (!canonical_only_ || el <= this->g().conjugate(el)) {
+        if ((!canonical_only_ || el <= this->g().conjugate(el)) &&
+            add_condition_(el)) {
             inner_it_.push(el);
         }
     }
@@ -48,19 +52,22 @@ protected:
         }
     }
 
-    SmartIterator(const Graph &g, const string &name, bool add_new,
-                  const Comparator& comparator, bool canonical_only)
+    void clear() {
+        inner_it_.clear();
+    }
+
+    SmartIterator(const Graph &g, const std::string &name, bool add_new,
+                  const Comparator& comparator, bool canonical_only,
+                  pred::TypedPredicate<ElementId> add_condition = pred::AlwaysTrue<ElementId>())
             : base(g, name),
               inner_it_(comparator),
               add_new_(add_new),
-              canonical_only_(canonical_only) {
+              canonical_only_(canonical_only),
+              add_condition_(add_condition) {
     }
 
 public:
 
-    virtual ~SmartIterator() {
-    }
-
     bool canonical_only() const {
         return canonical_only_;
     }
@@ -69,6 +76,10 @@ public:
         return inner_it_.IsEnd();
     }
 
+    size_t size() const {
+		return inner_it_.size();
+	}
+
     ElementId operator*() {
         return *inner_it_;
     }
@@ -106,16 +117,20 @@ class SmartSetIterator : public SmartIterator<Graph, ElementId, Comparator> {
 
 public:
     SmartSetIterator(const Graph &g,
+                     bool add_new = false,
                      const Comparator& comparator = Comparator(),
-                     bool canonical_only = false)
-            : base(g, "SmartSet " + ToString(this), false, comparator, canonical_only) {
+                     bool canonical_only = false,
+                     pred::TypedPredicate<ElementId> add_condition = pred::AlwaysTrue<ElementId>())
+            : base(g, "SmartSet " + ToString(this), add_new, comparator, canonical_only, add_condition) {
     }
 
     template<class Iterator>
     SmartSetIterator(const Graph &g, Iterator begin, Iterator end,
+                     bool add_new = false,
                      const Comparator& comparator = Comparator(),
-                     bool canonical_only = false)
-            : SmartSetIterator(g, comparator, canonical_only) {
+                     bool canonical_only = false,
+                     pred::TypedPredicate<ElementId> add_condition = pred::AlwaysTrue<ElementId>())
+            : SmartSetIterator(g, add_new, comparator, canonical_only, add_condition) {
         insert(begin, end);
     }
 
@@ -124,6 +139,13 @@ public:
         base::insert(begin, end);
     }
 
+    void push(const ElementId& el) {
+        base::push(el);
+    }
+
+    void clear() {
+        base::clear();
+    }
 };
 
 /**
@@ -273,7 +295,6 @@ class SmartEdgeIterator : public SmartIterator<Graph, typename Graph::EdgeId, Co
     }
 
   public:
-    //todo think of some parallel simplif problem O_o
     SmartEdgeIterator(const Graph &g, Comparator comparator = Comparator(),
                       bool canonical_only = false)
             : SmartIterator<Graph, EdgeId, Comparator>(
@@ -329,22 +350,35 @@ class ParallelEdgeProcessor {
 };
 
 //todo move out
-template<class Graph>
-class ParallelIterationHelper {
-    typedef typename Graph::EdgeId EdgeId;
-    typedef typename Graph::VertexId VertexId;
-    typedef typename Graph::VertexIt const_vertex_iterator;
+template<class Graph, class ElementId>
+class IterationHelper {
+};
 
+template<class Graph>
+class IterationHelper<Graph, typename Graph::VertexId> {
     const Graph& g_;
 public:
+    typedef typename Graph::VertexId VertexId;
+    typedef typename Graph::VertexIt const_vertex_iterator;
 
-    ParallelIterationHelper(const Graph& g)
+    IterationHelper(const Graph& g)
             : g_(g) {
+    }
+
+    const_vertex_iterator begin() const {
+        return g_.begin();
+    }
 
+    const_vertex_iterator end() const {
+        return g_.end();
     }
 
-    std::vector<const_vertex_iterator> VertexChunks(size_t chunk_cnt) const {
+    std::vector<const_vertex_iterator> Chunks(size_t chunk_cnt) const {
         VERIFY(chunk_cnt > 0);
+        if (chunk_cnt == 1) {
+            return {begin(), end()};
+        }
+
         //trying to split vertices into equal chunks, leftovers put into first chunk
         vector<const_vertex_iterator> answer;
         size_t vertex_cnt = g_.size();
@@ -371,9 +405,38 @@ public:
         return answer;
     }
 
-    std::vector<omnigraph::GraphEdgeIterator<Graph>> EdgeChunks(size_t chunk_cnt) const {
+};
+
+//todo move out
+template<class Graph>
+class IterationHelper<Graph, typename Graph::EdgeId> {
+    typedef typename Graph::VertexId VertexId;
+
+    const Graph& g_;
+public:
+    typedef typename Graph::EdgeId EdgeId;
+    typedef GraphEdgeIterator<Graph> const_edge_iterator;
+
+    IterationHelper(const Graph& g)
+            : g_(g) {
+    }
+
+    const_edge_iterator begin() const {
+        return const_edge_iterator(g_, g_.begin());
+    }
+
+    const_edge_iterator end() const {
+        return const_edge_iterator(g_, g_.end());
+    }
+
+    std::vector<omnigraph::GraphEdgeIterator<Graph>> Chunks(size_t chunk_cnt) const {
+        if (chunk_cnt == 1) {
+            return {begin(), end()};
+        }
+
         vector<omnigraph::GraphEdgeIterator<Graph>> answer;
-        for (const_vertex_iterator v_it : VertexChunks(chunk_cnt)) {
+
+        for (auto v_it : IterationHelper<Graph, VertexId>(g_).Chunks(chunk_cnt)) {
             answer.push_back(omnigraph::GraphEdgeIterator<Graph>(g_, v_it));
         }
         return answer;
diff --git a/src/include/omni/graph_processing_algorithm.hpp b/src/include/omni/graph_processing_algorithm.hpp
index bc6cef9..4c2c52c 100644
--- a/src/include/omni/graph_processing_algorithm.hpp
+++ b/src/include/omni/graph_processing_algorithm.hpp
@@ -7,17 +7,21 @@
 
 #pragma once
 
-#include "logger/logger.hpp"
-#include "func.hpp"
+#include "graph_iterators.hpp"
 #include "graph_component.hpp"
 #include "coverage.hpp"
+#include "pred.hpp"
+#include "logger/logger.hpp"
 
 namespace omnigraph {
 
 template<class Graph>
+using HandlerF = std::function<void(typename Graph::EdgeId)>;
+
+template<class Graph>
 class EdgeProcessingAlgorithm {
     typedef typename Graph::EdgeId EdgeId;
-    typedef std::shared_ptr<func::Predicate<EdgeId>> ProceedConditionT;
+    typedef pred::TypedPredicate<EdgeId> ProceedConditionT;
 
     Graph& g_;
     bool conjugate_symmetry_;
@@ -34,7 +38,8 @@ class EdgeProcessingAlgorithm {
     virtual bool ProcessEdge(EdgeId e) = 0;
 
  public:
-    EdgeProcessingAlgorithm(Graph& g, bool conjugate_symmetry = false)
+    EdgeProcessingAlgorithm(Graph& g,
+                             bool conjugate_symmetry = false)
             : g_(g), conjugate_symmetry_(conjugate_symmetry) {
 
     }
@@ -46,41 +51,62 @@ class EdgeProcessingAlgorithm {
 //        return conjugate_symmetry_;
 //    }
 
-    template<class SmartEdgeIt>
-    bool RunFromIterator(SmartEdgeIt& it,
-                 ProceedConditionT proceed_condition = std::make_shared<func::AlwaysTrue<EdgeId>>()) {
-        VERIFY(!it.canonical_only() || conjugate_symmetry_);
-        TRACE("Start processing");
+    template<class Comparator = std::less<EdgeId>>
+    bool Run(const Comparator& comp = Comparator(), ProceedConditionT proceed_condition = pred::AlwaysTrue<EdgeId>()) {
         bool triggered = false;
-        for (; !it.IsEnd(); ++it) {
+        for (auto it = g_.SmartEdgeBegin(comp, conjugate_symmetry_); !it.IsEnd(); ++it) {
             EdgeId e = *it;
             TRACE("Current edge " << g_.str(e));
-            if (!proceed_condition->Check(e)) {
+            if (!proceed_condition(e)) {
                 TRACE("Stop condition was reached.");
-                //need to release last element of the iterator to make it replacable by new elements
-                it.ReleaseCurrent();
                 break;
             }
 
             TRACE("Processing edge " << this->g().str(e));
             triggered |= ProcessEdge(e);
-        }
-        TRACE("Finished processing. Triggered = " << triggered);
+        };
         return triggered;
     }
 
-    template<class Comparator = std::less<EdgeId>>
-    bool Run(const Comparator& comp = Comparator(),
-                 ProceedConditionT proceed_condition = make_shared<func::AlwaysTrue<EdgeId>>()) {
-        auto it = g_.SmartEdgeBegin(comp, conjugate_symmetry_);
-        return RunFromIterator(it, proceed_condition);
-    }
-
  private:
     DECL_LOGGER("EdgeProcessingAlgorithm");
 };
 
 template<class Graph>
+class CountingCallback {
+    typedef typename Graph::EdgeId EdgeId;
+    bool report_on_destruction_;
+    std::atomic<size_t> cnt_;
+
+public:
+    CountingCallback(bool report_on_destruction = false) :
+            report_on_destruction_(report_on_destruction), cnt_(0) {
+    }
+
+    ~CountingCallback() {
+        if (report_on_destruction_)
+            Report();
+    }
+
+    void HandleDelete(EdgeId /*e*/) {
+        cnt_++;
+    }
+
+    void Report() {
+        TRACE(cnt_ << " edges were removed.")
+        cnt_ = 0;
+    }
+
+private:
+    DECL_LOGGER("CountingCallback");
+};
+
+template<class Graph>
+std::function<void(typename Graph::EdgeId)> AddCountingCallback(CountingCallback<Graph>& cnt_callback, std::function<void(typename Graph::EdgeId)> handler) {
+    std::function<void(typename Graph::EdgeId)> cnt_handler = std::bind(&CountingCallback<Graph>::HandleDelete, std::ref(cnt_callback), std::placeholders::_1);
+    return func::Composition<typename Graph::EdgeId>(handler, cnt_handler);
+}
+template<class Graph>
 void RemoveIsolatedOrCompress(Graph& g, typename Graph::VertexId v) {
     if (g.IsDeadStart(v) && g.IsDeadEnd(v)) {
         g.DeleteVertex(v);
@@ -104,7 +130,6 @@ class EdgeRemover {
               removal_handler_(removal_handler) {
     }
 
-    //todo how is it even compiling with const?!!!
     void DeleteEdge(EdgeId e) {
         VertexId start = g_.EdgeStart(e);
         VertexId end = g_.EdgeEnd(e);
@@ -143,13 +168,13 @@ class EdgeRemovingAlgorithm : public EdgeProcessingAlgorithm<Graph> {
     typedef EdgeProcessingAlgorithm<Graph> base;
     typedef typename Graph::EdgeId EdgeId;
 
-    shared_ptr<func::Predicate<EdgeId>> remove_condition_;
+    pred::TypedPredicate<EdgeId> remove_condition_;
     EdgeRemover<Graph> edge_remover_;
 
  protected:
     bool ProcessEdge(EdgeId e) {
         TRACE("Checking edge " << this->g().str(e) << " for the removal condition");
-        if (remove_condition_->Check(e)) {
+        if (remove_condition_(e)) {
             TRACE("Check passed, removing");
             edge_remover_.DeleteEdge(e);
             return true;
@@ -159,16 +184,13 @@ class EdgeRemovingAlgorithm : public EdgeProcessingAlgorithm<Graph> {
     }
 
  public:
-    EdgeRemovingAlgorithm(
-            Graph& g,
-            shared_ptr<func::Predicate<EdgeId>> remove_condition,
-            std::function<void (EdgeId)> removal_handler = boost::none,
-            bool conjugate_symmetry = false)
+    EdgeRemovingAlgorithm(Graph& g,
+                          pred::TypedPredicate<EdgeId> remove_condition,
+                          std::function<void (EdgeId)> removal_handler = boost::none,
+                          bool conjugate_symmetry = false)
             : base(g, conjugate_symmetry),
               remove_condition_(remove_condition),
-              edge_remover_(g, removal_handler) {
-
-    }
+              edge_remover_(g, removal_handler) {}
 
  private:
     DECL_LOGGER("EdgeRemovingAlgorithm");
diff --git a/src/include/omni/mapping_path.hpp b/src/include/omni/mapping_path.hpp
index 0a80650..ce73466 100644
--- a/src/include/omni/mapping_path.hpp
+++ b/src/include/omni/mapping_path.hpp
@@ -5,8 +5,9 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#ifndef __OMNI_MAPPING_PATH_HPP__
-#define __OMNI_MAPPING_PATH_HPP__
+#pragma once
+
+#include "range.hpp"
 
 namespace omnigraph {
 
@@ -43,82 +44,10 @@ class Path {
     iterator end() const { return sequence_.end(); }
 };
 
-struct Range {
-private:
-	bool inside(size_t left, size_t right, size_t point) const {
-		return left <= point && point <= right;
-	}
-
-public:
-    //inclusive
-    size_t start_pos;
-    //exclusive
-    size_t end_pos;
-
-    size_t size() const {
-        VERIFY(end_pos >= start_pos);
-        return end_pos - start_pos;
-    }
-
-    void shift(int shift) {
-        VERIFY(shift > 0 || size_t(-shift) <= start_pos);
-        start_pos += shift;
-        end_pos += shift;
-    }
-
-    Range(): start_pos(0), end_pos(0) {
-        VERIFY(end_pos >= start_pos);
-    }
-
-    Range(size_t start_pos, size_t end_pos)
-            : start_pos(start_pos),
-              end_pos(end_pos) {
-        VERIFY(end_pos >= start_pos);
-    }
-
-    bool operator<(const Range &other) const {
-      if (start_pos != other.start_pos)
-        return start_pos < other.start_pos;
-      return end_pos < other.end_pos;
-    }
-
-    bool contains(const Range& that) const {
-        return start_pos <= that.start_pos && end_pos >= that.end_pos;
-    }
-
-    Range Merge(const Range &other) const {
-    	return Range(this->start_pos, other.end_pos);
-    }
-
-    bool empty() const {
-    	return start_pos == end_pos;
-    }
-
-    bool Intersect(const Range &other) const {
-    	return inside(start_pos, end_pos, other.start_pos) || inside(start_pos, end_pos, other.end_pos) ||
-    			inside(other.start_pos, other.end_pos, start_pos);
-    }
-
-    bool IntersectLeftOf(const Range &other) const {
-    	return inside(start_pos, end_pos, other.start_pos) && inside(other.start_pos, other.end_pos, end_pos);
-    }
-
-    bool operator==(const Range &that) const {
-    	return start_pos == that.start_pos || end_pos == that.end_pos;
-    }
-
-    bool operator!=(const Range &that) const {
-    	return !(*this == that);
-    }
-};
-
-inline std::ostream& operator<<(std::ostream& os, const Range& range) {
-    os << "[" << (range.start_pos + 1) << " - " << range.end_pos << "]";
-    return os;
-}
-
 struct MappingRange {
+// on genome/contig/whatever
     Range initial_range;
+//on edge
     Range mapped_range;
 
     MappingRange() {
@@ -184,6 +113,11 @@ struct MappingRange {
     		return this->initial_range < other.initial_range;
     	return this->mapped_range < other.mapped_range;
     }
+    MappingRange operator = (const MappingRange & other) {
+        initial_range = other.initial_range;
+        mapped_range = other.mapped_range;
+        return *this;
+    }
 
     bool Intersect(const MappingRange &other) {
     	return initial_range.Intersect(other.initial_range) && mapped_range.Intersect(other.mapped_range);
@@ -218,7 +152,7 @@ inline std::ostream& operator<<(std::ostream& os, const MappingRange& map_range)
     return os;
 }
 
-    template<typename ElementId>
+template<typename ElementId>
 class MappingPath {
  public:
     MappingPath() {}
@@ -279,6 +213,7 @@ class MappingPath {
     std::vector<ElementId> edges_;
     std::vector<MappingRange> range_mappings_;
 };
+
 template <typename ElementId>
 inline std::ostream& operator<<(std::ostream& os, const MappingPath<ElementId>& mp) {
     os << "MappingPath ( ";
@@ -289,7 +224,4 @@ inline std::ostream& operator<<(std::ostream& os, const MappingPath<ElementId>&
     return os;
 }
 
-
 }
-
-#endif
diff --git a/src/include/omni/observable_graph.hpp b/src/include/omni/observable_graph.hpp
index e21dcf6..1189362 100644
--- a/src/include/omni/observable_graph.hpp
+++ b/src/include/omni/observable_graph.hpp
@@ -49,9 +49,9 @@ public:
         return HelperT(*this);
     }
 
-   const Applier& GetHandlerApplier() const {
-       return *applier_;
-   }
+    const Applier& GetHandlerApplier() const {
+        return *applier_;
+    }
 
     void AddActionHandler(Handler* action_handler) const;
 
@@ -79,31 +79,31 @@ public:
 
     bool VerifyAllDetached();
 
-   //smart iterators
-   template<typename Comparator>
-   SmartVertexIterator<ObservableGraph, Comparator> SmartVertexBegin(
-           const Comparator& comparator, bool canonical_only = false) const {
-       return SmartVertexIterator<ObservableGraph, Comparator>(*this,
-                                                               comparator, canonical_only);
-   }
+    //smart iterators
+    template<typename Comparator>
+    SmartVertexIterator<ObservableGraph, Comparator> SmartVertexBegin(
+            const Comparator& comparator, bool canonical_only = false) const {
+        return SmartVertexIterator<ObservableGraph, Comparator>(*this,
+                                                                comparator, canonical_only);
+    }
 
-   SmartVertexIterator<ObservableGraph> SmartVertexBegin(bool canonical_only = false) const {
-       return SmartVertexIterator<ObservableGraph>(*this, std::less<VertexId>(), canonical_only);
-   }
+    SmartVertexIterator<ObservableGraph> SmartVertexBegin(bool canonical_only = false) const {
+        return SmartVertexIterator<ObservableGraph>(*this, std::less<VertexId>(), canonical_only);
+    }
 
-   template<typename Comparator>
-   SmartEdgeIterator<ObservableGraph, Comparator> SmartEdgeBegin(
-           const Comparator& comparator, bool canonical_only = false) const {
-       return SmartEdgeIterator<ObservableGraph, Comparator>(*this, comparator, canonical_only);
-   }
+    template<typename Comparator>
+    SmartEdgeIterator<ObservableGraph, Comparator> SmartEdgeBegin(
+            const Comparator& comparator, bool canonical_only = false) const {
+        return SmartEdgeIterator<ObservableGraph, Comparator>(*this, comparator, canonical_only);
+    }
 
-   SmartEdgeIterator<ObservableGraph> SmartEdgeBegin(bool canonical_only = false) const {
-       return SmartEdgeIterator<ObservableGraph>(*this, std::less<EdgeId>(), canonical_only);
-   }
+    SmartEdgeIterator<ObservableGraph> SmartEdgeBegin(bool canonical_only = false) const {
+        return SmartEdgeIterator<ObservableGraph>(*this, std::less<EdgeId>(), canonical_only);
+    }
 
-   ConstEdgeIterator<ObservableGraph> ConstEdgeBegin(bool canonical_only = false) const {
-       return ConstEdgeIterator<ObservableGraph>(*this, canonical_only);
-   }
+    ConstEdgeIterator<ObservableGraph> ConstEdgeBegin(bool canonical_only = false) const {
+        return ConstEdgeIterator<ObservableGraph>(*this, canonical_only);
+    }
 
     void FireDeletePath(const std::vector<EdgeId>& edges_to_delete, const std::vector<VertexId>& vertices_to_delete) const;
 
@@ -126,6 +126,7 @@ public:
     void ForceDeleteVertex(VertexId v);
     
     using base::GetGraphIdDistributor;
+    using base::conjugate;
 
     EdgeId AddEdge(const EdgeData &data) {
         return AddEdge(data, GetGraphIdDistributor());
@@ -236,34 +237,34 @@ void ObservableGraph<DataMaster>::CompressVertex(VertexId v) {
 template<class DataMaster>
 typename ObservableGraph<DataMaster>::EdgeId ObservableGraph<DataMaster>::UnsafeCompressVertex(VertexId v) {
     VERIFY(base::CanCompressVertex(v));
-    vector<EdgeId> edges_to_merge;
+    std::vector<EdgeId> edges_to_merge;
     edges_to_merge.push_back(base::GetUniqueIncomingEdge(v));
     edges_to_merge.push_back(base::GetUniqueOutgoingEdge(v));
     return MergePath(edges_to_merge);
 }
 
 template<class DataMaster>
-vector<typename ObservableGraph<DataMaster>::EdgeId> ObservableGraph<DataMaster>::EdgesToDelete(const vector<EdgeId>& path) const {
-    set<EdgeId> edgesToDelete;
+std::vector<typename ObservableGraph<DataMaster>::EdgeId> ObservableGraph<DataMaster>::EdgesToDelete(const std::vector<EdgeId>& path) const {
+    std::set<EdgeId> edgesToDelete;
     edgesToDelete.insert(path[0]);
     for (size_t i = 0; i + 1 < path.size(); i++) {
         EdgeId e = path[i + 1];
         if (edgesToDelete.find(base::conjugate(e)) == edgesToDelete.end())
             edgesToDelete.insert(e);
     }
-    return vector<EdgeId>(edgesToDelete.begin(), edgesToDelete.end());
+    return std::vector<EdgeId>(edgesToDelete.begin(), edgesToDelete.end());
 }
 
 template<class DataMaster>
 vector<typename ObservableGraph<DataMaster>::VertexId> ObservableGraph<DataMaster>::VerticesToDelete(const vector<EdgeId>& path) const {
-    set<VertexId> verticesToDelete;
+    std::set<VertexId> verticesToDelete;
     for (size_t i = 0; i + 1 < path.size(); i++) {
         EdgeId e = path[i + 1];
         VertexId v = base::EdgeStart(e);
         if (verticesToDelete.find(base::conjugate(v)) == verticesToDelete.end())
             verticesToDelete.insert(v);
     }
-    return vector < VertexId > (verticesToDelete.begin(), verticesToDelete.end());
+    return vector<VertexId>(verticesToDelete.begin(), verticesToDelete.end());
 }
 
 template<class DataMaster>
@@ -309,7 +310,7 @@ bool ObservableGraph<DataMaster>::AllHandlersThreadSafe() const {
 template<class DataMaster>
 void ObservableGraph<DataMaster>::PrintHandlersNames() const {
     for (Handler* handler : action_handler_list_) {
-        cout << handler->name() << " attached=" << handler->IsAttached() << endl;
+        std::cout << handler->name() << " attached=" << handler->IsAttached() << std::endl;
     }
 }
 
@@ -457,11 +458,14 @@ typename ObservableGraph<DataMaster>::EdgeId ObservableGraph<DataMaster>::MergeP
 
 template<class DataMaster>
 std::pair<typename ObservableGraph<DataMaster>::EdgeId, typename ObservableGraph<DataMaster>::EdgeId> ObservableGraph<DataMaster>::SplitEdge(EdgeId edge, size_t position) {
-    VERIFY_MSG(position > 0 && position < base::length(edge), "Edge length is " << base::length(edge) << " but split pos was " << position);;
-    pair<VertexData, pair<EdgeData, EdgeData> > newData = base::master().SplitData(base::data(edge), position);
+    bool sc_flag = (edge == conjugate(edge));
+    VERIFY_MSG(position > 0 && position < (sc_flag ? base::length(edge) / 2 + 1 : base::length(edge)),
+            "Edge length is " << base::length(edge) << " but split pos was " << position);
+    std::pair<VertexData, std::pair<EdgeData, EdgeData> > newData = base::master().SplitData(base::data(edge), position, sc_flag);
     VertexId splitVertex = base::HiddenAddVertex(newData.first);
     EdgeId new_edge1 = base::HiddenAddEdge(base::EdgeStart(edge), splitVertex, newData.second.first);
-    EdgeId new_edge2 = base::HiddenAddEdge(splitVertex, base::EdgeEnd(edge), newData.second.second);
+    EdgeId new_edge2 = base::HiddenAddEdge(splitVertex, sc_flag ? conjugate(splitVertex) : base::EdgeEnd(edge), newData.second.second);
+    VERIFY(!sc_flag || new_edge2 == conjugate(new_edge2))
     FireSplit(edge, new_edge1, new_edge2);
     FireDeleteEdge(edge);
     FireAddVertex(splitVertex);
diff --git a/src/include/omni/omni_tools.hpp b/src/include/omni/omni_tools.hpp
index adf630f..47d8a0d 100644
--- a/src/include/omni/omni_tools.hpp
+++ b/src/include/omni/omni_tools.hpp
@@ -22,16 +22,52 @@
 
 namespace omnigraph {
 
+template<class Graph>
+class VertexCondition : public Predicate<typename Graph::VertexId> {
+    typedef typename Graph::VertexId VertexId;
+    const Graph& g_;
+ protected:
+
+    VertexCondition(const Graph& g)
+            : g_(g) {
+    }
+
+    const Graph& g() const {
+        return g_;
+    }
+
+};
+
+template<class Graph>
+class CompressCondition : public VertexCondition<Graph> {
+    typedef typename Graph::VertexId VertexId;
+
+public:
+    CompressCondition(const Graph& g) :
+        VertexCondition<Graph>(g) {
+    }
+
+    bool Check(VertexId v) const override {
+        return this->g().CanCompressVertex(v);
+    }
+};
+
 /**
  * Compressor compresses vertices with unique incoming and unique outgoing edge in linear time while
  * simple one-by-one compressing has square complexity.
  */
 template<class Graph>
-class Compressor {
+class Compressor : public PersistentProcessingAlgorithm<Graph,
+                                        typename Graph::VertexId,
+                                        ParallelInterestingElementFinder<Graph, typename Graph::VertexId>> {
 	typedef typename Graph::EdgeId EdgeId;
 	typedef typename Graph::VertexId VertexId;
+	typedef PersistentProcessingAlgorithm<Graph,
+            VertexId, ParallelInterestingElementFinder<Graph, VertexId>> base;
+	typedef CompressCondition<Graph> ConditionT;
 
 	Graph &graph_;
+	ConditionT compress_condition_;
 	bool safe_merging_;
 
 	bool GoUniqueWayForward(EdgeId &e) {
@@ -78,22 +114,27 @@ class Compressor {
 
 	}
 
-	//todo use graph method!
-	bool CanCompressVertex(VertexId v) const {
-		if (!graph_.CheckUniqueOutgoingEdge(v)
-			|| !graph_.CheckUniqueIncomingEdge(v)) {
-			TRACE(
-					"Vertex "
-							<< graph_.str(v)
-							<< " judged NOT compressible. Proceeding to the next vertex");
-			TRACE("Processing vertex " << graph_.str(v) << " finished");
-			return false;
-		}
-		return true;
-	}
+//	//todo use graph method!
+//	bool CanCompressVertex(VertexId v) const {
+//		if (!graph_.CheckUniqueOutgoingEdge(v)
+//			|| !graph_.CheckUniqueIncomingEdge(v)) {
+//			TRACE(
+//					"Vertex "
+//							<< graph_.str(v)
+//							<< " judged NOT compressible. Proceeding to the next vertex");
+//			TRACE("Processing vertex " << graph_.str(v) << " finished");
+//			return false;
+//		}
+//		return true;
+//	}
 public:
-	Compressor(Graph &graph, bool safe_merging = true) :
+	Compressor(Graph &graph, size_t chunk_cnt = 1, bool safe_merging = true) :
+	        base(graph,
+	             ParallelInterestingElementFinder<Graph, VertexId>(graph,
+	                                                               ConditionT(graph), chunk_cnt),
+	             /*canonical only*/true),
 			graph_(graph),
+			compress_condition_(graph),
 			safe_merging_(safe_merging) {
 	}
 
@@ -104,7 +145,7 @@ public:
 	 */
 	bool CompressVertex(VertexId v) {
 		TRACE("Processing vertex " << graph_.str(v) << " started");
-		if (! CanCompressVertex(v)) {
+		if (! compress_condition_.Check(v)) {
 			return false;
 		}
 		TRACE("Vertex " << graph_.str(v) << " judged compressible");
@@ -112,22 +153,27 @@ public:
 		return true;
 	}
 
-	EdgeId CompressVertexEdgeId(VertexId v){
+	EdgeId CompressVertexEdgeId(VertexId v) {
 		TRACE("Processing vertex " << graph_.str(v) << " started");
-		if (! CanCompressVertex(v)) {
+		if (! compress_condition_.Check(v)) {
 			return EdgeId(0);
 		}
 		TRACE("Vertex " << graph_.str(v) << " judged compressible");
 		return CompressWithoutChecks(v);
-
 	}
 
-	bool IsOfInterest(VertexId v) const {
-	    return CanCompressVertex(v);
-	}
+//	bool IsOfInterest(VertexId v) const {
+//	    return CanCompressVertex(v);
+//	}
 
-	bool Process(VertexId v) {
-	    return CompressVertex(v);
+protected:
+	bool Process(VertexId v) override {
+        if (compress_condition_.Check(v)) {
+            CompressWithoutChecks(v);
+            return true;
+        } else {
+            return false;
+        }
 	}
 
 private:
@@ -139,30 +185,56 @@ private:
  */
 template<class Graph>
 bool CompressAllVertices(Graph& g, bool safe_merging = true, size_t chunk_cnt = 1) {
-    SemiParallelAlgorithmRunner<Graph, typename Graph::VertexId> runner(g);
-    Compressor<Graph> compressor(g, safe_merging);
-    return RunVertexAlgorithm(g, runner, compressor, chunk_cnt);
+    Compressor<Graph> compressor(g, chunk_cnt, safe_merging);
+    return compressor.Run();
 }
 
 template<class Graph>
-class Cleaner {
-	typedef typename Graph::EdgeId EdgeId;
-	typedef typename Graph::VertexId VertexId;
+class IsolatedVertexCondition : public VertexCondition<Graph> {
+    typedef typename Graph::VertexId VertexId;
+
+public:
+    IsolatedVertexCondition(const Graph& g) :
+        VertexCondition<Graph>(g) {
+    }
+
+    bool Check(VertexId v) const override {
+        return this->g().IsDeadStart(v) && this->g().IsDeadEnd(v);
+    }
+};
+
+
+template<class Graph>
+class Cleaner : public PersistentProcessingAlgorithm<Graph,
+                            typename Graph::VertexId,
+                            ParallelInterestingElementFinder<Graph, typename Graph::VertexId>> {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+    typedef PersistentProcessingAlgorithm<Graph,
+            VertexId, ParallelInterestingElementFinder<Graph, VertexId>> base;
+    typedef IsolatedVertexCondition<Graph> ConditionT;
 
 	Graph& g_;
+	ConditionT isolated_condition_;
 
 public:
-	Cleaner(Graph& g) :
-			g_(g) {
+	Cleaner(Graph& g, size_t chunk_cnt = 1) :
+	        base(g,
+                 ParallelInterestingElementFinder<Graph, VertexId>(g,
+                                                                   ConditionT(g), chunk_cnt),
+                 /*canonical only*/true),
+			g_(g), isolated_condition_(g) {
 	}
 
-    bool IsOfInterest(VertexId v) const {
-        return g_.IsDeadStart(v) && g_.IsDeadEnd(v);
-    }
+protected:
 
     bool Process(VertexId v) {
-        g_.DeleteVertex(v);
-        return true;
+        if (isolated_condition_.Check(v)) {
+            g_.DeleteVertex(v);
+            return true;
+        } else {
+            return false;
+        }
     }
 
 //	void Clean() {
@@ -173,8 +245,6 @@ public:
 //		}
 //	}
 
-private:
-	DECL_LOGGER("Cleaner")
 };
 
 /**
@@ -182,9 +252,8 @@ private:
  */
 template<class Graph>
 bool CleanGraph(Graph& g, size_t chunk_cnt = 1) {
-    SemiParallelAlgorithmRunner<Graph, typename Graph::VertexId> runner(g);
-    Cleaner<Graph> cleaner(g);
-    return RunVertexAlgorithm(g, runner, cleaner, chunk_cnt);
+    Cleaner<Graph> cleaner(g, chunk_cnt);
+    return cleaner.Run();
 }
 
 template<class Graph>
diff --git a/src/include/omni/order_and_law.hpp b/src/include/omni/order_and_law.hpp
index b8e7741..a8c7532 100644
--- a/src/include/omni/order_and_law.hpp
+++ b/src/include/omni/order_and_law.hpp
@@ -13,9 +13,13 @@
 #include <unordered_set>
 #include <unordered_map>
 #include <stacktrace.hpp>
+#include <algorithm>
+#include <map>
+
 #include "openmp_wrapper.h"
 #include "folly/PackedSyncPtr.h"
 
+
 namespace restricted
 {
 
@@ -160,7 +164,7 @@ public:
 
   void Synchronize() const {
     size_t& global_max_id = id_distributor_.max_int_id_;
-    global_max_id = max(cur_id_, global_max_id);
+    global_max_id = std::max(cur_id_, global_max_id);
   }
 
 private:
@@ -551,7 +555,7 @@ private:
 };
 
 template<class T>
-ostream &operator<<(ostream &stream, const pure_pointer<T>& pointer)
+std::ostream &operator<<(std::ostream &stream, const pure_pointer<T>& pointer)
 {
   stream << pointer.int_id();
   return stream;
diff --git a/src/include/omni/parallel_processing.hpp b/src/include/omni/parallel_processing.hpp
index 3c282c3..a38f7d4 100644
--- a/src/include/omni/parallel_processing.hpp
+++ b/src/include/omni/parallel_processing.hpp
@@ -13,226 +13,277 @@
 
 namespace omnigraph {
 
-//todo add conjugate filtration
-template<class Graph, class ElementType>
-class AlgorithmRunner {
-    const Graph& g_;
-
-    template<class Algo, class It>
-    bool ProcessBucket(Algo& algo, It begin, It end) {
-        bool changed = false;
-        for (auto it = begin; it != end; ++it) {
-            changed |= algo.Process(*it);
+template<class ItVec, class SmartIt, class Predicate>
+void FillInterestingFromChunkIterators(const ItVec& chunk_iterators,
+                                       SmartIt& smart_it,
+                                       const Predicate& predicate) {
+    VERIFY(chunk_iterators.size() > 1);
+    typedef typename Predicate::checked_type ElementType;
+    std::vector<std::vector<ElementType>> of_interest(omp_get_max_threads());
+
+    #pragma omp parallel for schedule(guided)
+    for (size_t i = 0; i < chunk_iterators.size() - 1; ++i) {
+        for (auto it = chunk_iterators[i], end = chunk_iterators[i + 1]; it != end; ++it) {
+            ElementType t = *it;
+            if (predicate(t)) {
+                of_interest[omp_get_thread_num()].push_back(t);
+            }
         }
-        return changed;
     }
 
-public:
-
-    const Graph& g() const {
-        return g_;
+    for (auto& chunk : of_interest) {
+        smart_it.insert(chunk.begin(), chunk.end());
+        chunk.clear();
     }
+}
 
-    AlgorithmRunner(Graph& g)
-            : g_(g) {
+template<class Graph, class ElementId = typename Graph::EdgeId>
+class TrivialInterestingElementFinder {
+public:
 
+    TrivialInterestingElementFinder() {
     }
 
-    template<class Algo, class ItVec>
-    bool RunFromChunkIterators(Algo& algo, const ItVec& chunk_iterators) {
-        DEBUG("Running from " << chunk_iterators.size() - 1 << "chunks");
-        VERIFY(chunk_iterators.size() > 1);
-        bool changed = false;
-        #pragma omp parallel for schedule(guided) reduction(|:changed)
-        for (size_t i = 0; i < chunk_iterators.size() - 1; ++i) {
-            changed |= ProcessBucket(algo, chunk_iterators[i], chunk_iterators[i + 1]);
-        }
-        DEBUG("Finished");
-        return changed;
+    template<class SmartIt>
+    bool Run(SmartIt& /*it*/) const {
+        return false;
     }
-private:
-    DECL_LOGGER("AlgorithmRunner")
-    ;
 };
 
-template<class Graph, class ElementType>
-class TwoStepAlgorithmRunner {
-    typedef typename Graph::VertexId VertexId;
-    typedef typename Graph::EdgeId EdgeId;
+template<class Graph, class ElementId = typename Graph::EdgeId>
+class SimpleInterestingElementFinder {
+    typedef GraphEdgeIterator<Graph> EdgeIt;
 
     const Graph& g_;
-    const bool filter_conjugate_;
-    std::vector<std::vector<ElementType>> elements_of_interest_;
-
-    template<class Algo>
-    bool ProcessBucket(Algo& algo, const std::vector<ElementType>& bucket, size_t idx_offset) const {
-        bool changed = false;
-        for (ElementType el : bucket) {
-            changed |= algo.Process(el, idx_offset++);
-        }
-        return changed;
-    }
+    pred::TypedPredicate<ElementId> condition_;
+public:
 
-    template<class Algo>
-    bool Process(Algo& algo) const {
-        std::vector<size_t> cumulative_bucket_sizes;
-        cumulative_bucket_sizes.push_back(0);
-        for (const auto& bucket : elements_of_interest_) {
-            cumulative_bucket_sizes.push_back(cumulative_bucket_sizes.back() + bucket.size());
-        }
-        DEBUG("Preparing for processing");
-        algo.PrepareForProcessing(cumulative_bucket_sizes.back());
-        bool changed = false;
-        DEBUG("Processing buckets");
-        #pragma omp parallel for schedule(guided) reduction(|:changed)
-        for (size_t i = 0; i < elements_of_interest_.size(); ++i) {
-            changed |= ProcessBucket(algo, elements_of_interest_[i], cumulative_bucket_sizes[i]);
-        }
-        return changed;
-    }
+    SimpleInterestingElementFinder(const Graph& g,
+                                   pred::TypedPredicate<ElementId> condition = pred::AlwaysTrue<ElementId>())
+            :  g_(g), condition_(condition) {}
 
-    template<class Algo>
-    void CountElement(Algo& algo, ElementType el, size_t bucket) {
-        if (filter_conjugate_ && g_.conjugate(el) < el)
-            return;
-        if (algo.IsOfInterest(el)) {
-            INFO("Element " << g_.str(el) << " is of interest");
-            elements_of_interest_[bucket].push_back(el);
-        } else {
-            INFO("Element " << g_.str(el) << " is not interesting");
+    template<class SmartIt>
+    bool Run(SmartIt& interest) const {
+        for (EdgeIt it = EdgeIt(g_, g_.begin()), end = EdgeIt(g_, g_.end()); it != end; ++it) {
+            if (condition_(*it)) {
+                interest.push(*it);
+            }
         }
+        return false;
     }
+};
 
-    template<class Algo, class It>
-    void CountAll(Algo& algo, It begin, It end, size_t bucket) {
-        for (auto it = begin; !(it == end); ++it) {
-            CountElement(algo, *it, bucket);
-        }
-    }
+template<class Graph, class ElementId = typename Graph::EdgeId>
+class ParallelInterestingElementFinder {
+    typedef GraphEdgeIterator<Graph> EdgeIt;
 
+    const Graph& g_;
+    pred::TypedPredicate<ElementId> condition_;
+    const size_t chunk_cnt_;
 public:
 
-    const Graph& g() const {
-        return g_;
+    ParallelInterestingElementFinder(const Graph& g,
+                                     pred::TypedPredicate<ElementId> condition,
+                                     size_t chunk_cnt)
+            : g_(g), condition_(condition), chunk_cnt_(chunk_cnt) {}
+
+    template<class SmartIt>
+    bool Run(SmartIt& it) const {
+        TRACE("Looking for interesting elements");
+        TRACE("Splitting graph into " << chunk_cnt_ << " chunks");
+        FillInterestingFromChunkIterators(IterationHelper<Graph, ElementId>(g_).Chunks(chunk_cnt_), it, condition_);
+        TRACE("Found " << it.size() << " interesting elements");
+        return false;
     }
+private:
+    DECL_LOGGER("ParallelInterestingElementFinder");
+};
 
-    //conjugate elements are filtered based on ids
-    //should be used only if both conjugate elements are simultaneously either interesting or not
-    //fixme filter_conjugate is redundant
-    TwoStepAlgorithmRunner(Graph& g, bool filter_conjugate)
-            : g_(g),
-              filter_conjugate_(filter_conjugate) {
-
-    }
+template<class Graph>
+class PersistentAlgorithmBase {
+    Graph& g_;
+protected:
 
-    template<class Algo, class ItVec>
-    bool RunFromChunkIterators(Algo& algo, const ItVec& chunk_iterators) {
-        DEBUG("Started running from " << chunk_iterators.size() - 1 << " chunks");
-        VERIFY(algo.ShouldFilterConjugate() == filter_conjugate_);
-        VERIFY(chunk_iterators.size() > 1);
-        elements_of_interest_.clear();
-        elements_of_interest_.resize(chunk_iterators.size() - 1);
-        DEBUG("Searching elements of interest");
-        #pragma omp parallel for schedule(guided)
-        for (size_t i = 0; i < chunk_iterators.size() - 1; ++i) {
-            CountAll(algo, chunk_iterators[i], chunk_iterators[i + 1], i);
-        }
-        DEBUG("Processing");
-        return Process(algo);
-    }
+    PersistentAlgorithmBase(Graph& g) : g_(g) {}
 
-//    template<class Algo, class It>
-//    void RunFromIterator(Algo& algo, It begin, It end) {
-//        RunFromChunkIterators(algo, std::vector<It> { begin, end });
-//    }
-private:
-    DECL_LOGGER("TwoStepAlgorithmRunner")
-    ;
+    Graph& g() { return g_; }
+    const Graph& g() const { return g_; }
+public:
+    virtual ~PersistentAlgorithmBase() {}
+    virtual bool Run(bool force_primary_launch = false) = 0;
 };
 
-template<class Graph, class ElementType>
-class SemiParallelAlgorithmRunner {
-    typedef typename Graph::VertexId VertexId;
-    typedef typename Graph::EdgeId EdgeId;
+//todo use add_condition in it_
+template<class Graph, class ElementId, class InterestingElementFinder,
+         class Comparator = std::less<ElementId>>
+class PersistentProcessingAlgorithm : public PersistentAlgorithmBase<Graph> {
+    InterestingElementFinder interest_el_finder_;
 
-    const Graph& g_;
+    SmartSetIterator<Graph, ElementId, Comparator> it_;
+    //todo remove
+    bool tracking_;
+    size_t total_iteration_estimate_;
 
-public:
+    size_t curr_iteration_;
 
-    const Graph& g() const {
-        return g_;
-    }
+protected:
+
+    virtual bool Process(ElementId el) = 0;
+    virtual bool Proceed(ElementId /*el*/) const { return true; }
 
-    SemiParallelAlgorithmRunner(Graph& g)
-            : g_(g) {
+    virtual void PrepareIteration(size_t /*it_cnt*/, size_t /*total_it_estimate*/) {}
 
+public:
+
+    PersistentProcessingAlgorithm(Graph& g,
+                                      const InterestingElementFinder& interest_el_finder,
+                                      bool canonical_only = false,
+                                      const Comparator& comp = Comparator(),
+                                      bool track_changes = true,
+                                      size_t total_iteration_estimate = -1ul) :
+                                      PersistentAlgorithmBase<Graph>(g),
+                                      interest_el_finder_(interest_el_finder),
+                                      it_(g, true, comp, canonical_only),
+                                      tracking_(track_changes),
+                                      total_iteration_estimate_(total_iteration_estimate),
+                                      curr_iteration_(0) {
+        it_.Detach();
     }
 
-    template<class Algo, class ItVec>
-    bool RunFromChunkIterators(Algo& algo, const ItVec& chunk_iterators) {
-        VERIFY(chunk_iterators.size() > 1);
-        std::vector<std::vector<ElementType>> of_interest(chunk_iterators.size() - 1);
-
-        #pragma omp parallel for schedule(guided)
-        for (size_t i = 0; i < chunk_iterators.size() - 1; ++i) {
-            for (auto it = chunk_iterators[i], end = chunk_iterators[i + 1]; it != end; ++it) {
-                ElementType t = *it;
-                if (algo.IsOfInterest(t)) {
-                    of_interest[i].push_back(t);
-                }
-            }
+    bool Run(bool force_primary_launch = false) {
+        bool primary_launch = !tracking_ || (curr_iteration_ == 0) || force_primary_launch ;
+        if (!it_.IsAttached()) {
+            it_.Attach();
+        }
+        if (primary_launch) {
+            it_.clear();
+            TRACE("Primary launch.");
+            TRACE("Start preprocessing");
+            interest_el_finder_.Run(it_);
+            TRACE(it_.size() << " edges to process after preprocessing");
+        } else {
+            TRACE(it_.size() << " edges to process");
+            VERIFY(tracking_);
         }
 
-        auto it = SmartSetIterator<Graph, ElementType>(g_);
-        for (auto& chunk : of_interest) {
-            it.insert(chunk.begin(), chunk.end());
+        if (curr_iteration_ >= total_iteration_estimate_) {
+            PrepareIteration(total_iteration_estimate_ - 1, total_iteration_estimate_);
+        } else {
+            PrepareIteration(curr_iteration_, total_iteration_estimate_);
         }
-        bool changed = false;
-        for (; !it.IsEnd(); ++it) {
-            changed |= algo.Process(*it);
+
+        bool triggered = false;
+        TRACE("Start processing");
+        for (; !it_.IsEnd(); ++it_) {
+            ElementId el = *it_;
+            if (!Proceed(el)) {
+                TRACE("Proceed condition turned false on element " << this->g().str(el));
+                it_.ReleaseCurrent();
+                break;
+            }
+            TRACE("Processing edge " << this->g().str(el));
+            triggered |= Process(el);
         }
-        return changed;
+        TRACE("Finished processing. Triggered = " << triggered);
+        if (!tracking_)
+            it_.Detach();
+
+        curr_iteration_++;
+        return triggered;
     }
 
-private:
-    DECL_LOGGER("SemiParallelAlgorithmRunner")
-    ;
 };
 
-//todo generalize to use for other algorithms if needed
-template<class Graph>
-class SemiParallelEdgeRemovingAlgorithm {
+template<class Graph, class InterestingEdgeFinder,
+         class Comparator = std::less<typename Graph::EdgeId>>
+class PersistentEdgeRemovingAlgorithm : public PersistentProcessingAlgorithm<Graph,
+                                                                            typename Graph::EdgeId,
+                                                                            InterestingEdgeFinder, Comparator> {
     typedef typename Graph::EdgeId EdgeId;
-    typedef typename Graph::VertexId VertexId;
-    Graph& g_;
-    shared_ptr<func::Predicate<EdgeId>> condition_;
+    typedef PersistentProcessingAlgorithm<Graph, EdgeId, InterestingEdgeFinder, Comparator> base;
     EdgeRemover<Graph> edge_remover_;
-
 public:
-    SemiParallelEdgeRemovingAlgorithm(Graph& g,
-                                      shared_ptr<func::Predicate<EdgeId>> condition,
-                                      std::function<void(EdgeId)> removal_handler = 0) :
-            g_(g), condition_(condition), edge_remover_(g, removal_handler) {
+    PersistentEdgeRemovingAlgorithm(Graph& g,
+                                    const InterestingEdgeFinder& interest_edge_finder,
+                                    std::function<void(EdgeId)> removal_handler = boost::none,
+                                    bool canonical_only = false,
+                                    const Comparator& comp = Comparator(),
+                                    bool track_changes = true,
+                                    size_t total_iteration_estimate = -1ul)
+            : base(g, interest_edge_finder,
+                   canonical_only, comp, track_changes,
+                   total_iteration_estimate),
+                   edge_remover_(g, removal_handler) {
+
+    }
+
+protected:
+
+    virtual bool ShouldRemove(EdgeId e) const = 0;
+
+    bool Process(EdgeId e) override {
+        TRACE("Checking edge " << this->g().str(e) << " for the removal condition");
+        if (ShouldRemove(e)) {
+            TRACE("Check passed, removing");
+            edge_remover_.DeleteEdge(e);
+            return true;
+        }
+        TRACE("Check not passed");
+        return false;
     }
 
-    bool IsOfInterest(EdgeId e) const {
-        return condition_->Check(e);
+};
+
+template<class Graph, class InterestingEdgeFinder,
+         class Comparator = std::less<typename Graph::EdgeId>>
+class ConditionEdgeRemovingAlgorithm : public PersistentEdgeRemovingAlgorithm<Graph,
+                                                                              InterestingEdgeFinder, Comparator> {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef PersistentEdgeRemovingAlgorithm<Graph, InterestingEdgeFinder, Comparator> base;
+    pred::TypedPredicate<EdgeId> remove_condition_;
+protected:
+
+    bool ShouldRemove(EdgeId e) const override {
+        return remove_condition_(e);
     }
 
-    bool Process(EdgeId e) {
-        edge_remover_.DeleteEdge(e);
-        return true;
+public:
+    ConditionEdgeRemovingAlgorithm(Graph& g,
+                                   const InterestingEdgeFinder& interest_edge_finder,
+                                   pred::TypedPredicate<EdgeId> remove_condition,
+                                   std::function<void(EdgeId)> removal_handler = boost::none,
+                                   bool canonical_only = false,
+                                   const Comparator& comp = Comparator(),
+                                   bool track_changes = true)
+            : base(g, interest_edge_finder,
+                   removal_handler,
+                   canonical_only, comp, track_changes),
+                   remove_condition_(remove_condition) {
+
     }
 };
 
-template<class Graph, class AlgoRunner, class Algo>
-bool RunVertexAlgorithm(Graph& g, AlgoRunner& runner, Algo& algo, size_t chunk_cnt) {
-    return runner.RunFromChunkIterators(algo, ParallelIterationHelper<Graph>(g).VertexChunks(chunk_cnt));
-}
+template<class Graph, class Comparator = std::less<typename Graph::EdgeId>>
+class ParallelEdgeRemovingAlgorithm : public ConditionEdgeRemovingAlgorithm<Graph,
+                                                ParallelInterestingElementFinder<Graph>, Comparator> {
+    typedef ConditionEdgeRemovingAlgorithm<Graph,
+            ParallelInterestingElementFinder<Graph>, Comparator> base;
+    typedef typename Graph::EdgeId EdgeId;
 
-template<class Graph, class AlgoRunner, class Algo>
-bool RunEdgeAlgorithm(Graph& g, AlgoRunner& runner, Algo& algo, size_t chunk_cnt) {
-    return runner.RunFromChunkIterators(algo, ParallelIterationHelper<Graph>(g).EdgeChunks(chunk_cnt));
-}
+public:
+    ParallelEdgeRemovingAlgorithm(Graph& g,
+                                  pred::TypedPredicate<EdgeId> remove_condition,
+                                  size_t chunk_cnt,
+                                  std::function<void(EdgeId)> removal_handler = boost::none,
+                                  bool canonical_only = false,
+                                  const Comparator& comp = Comparator(),
+                                  bool track_changes = true)
+            : base(g,
+                   ParallelInterestingElementFinder<Graph>(g, remove_condition, chunk_cnt),
+                   remove_condition, removal_handler,
+                   canonical_only, comp, track_changes) {
+    }
+
+};
 
 }
diff --git a/src/include/omni/path_processor.hpp b/src/include/omni/path_processor.hpp
index 3954ade..dbeb20d 100644
--- a/src/include/omni/path_processor.hpp
+++ b/src/include/omni/path_processor.hpp
@@ -14,7 +14,7 @@
 namespace omnigraph {
 
 template<class Graph>
-const string PrintPath(Graph& g, const vector<typename Graph::EdgeId>& edges) {
+const string PrintPath(const Graph& g, const vector<typename Graph::EdgeId>& edges) {
 	string delim = "";
 	std::stringstream ss;
 	for (size_t i = 0; i < edges.size(); ++i) {
@@ -32,83 +32,6 @@ class PathProcessor {
     typedef typename Graph::VertexId VertexId;
     typedef vector<EdgeId> Path;
     typedef typename DijkstraHelper<Graph>::BoundedDijkstra DijkstraT;
-
-    void Push(EdgeId e, VertexId start_v) {
-        TRACE("Pushing edge " << g_.str(e));
-        curr_len_ += g_.length(e);
-        reversed_edge_path_.push_back(e);
-        vertex_cnts_.put(start_v);
-    }
-
-    void Pop() {
-        VERIFY(!reversed_edge_path_.empty());
-        EdgeId e = reversed_edge_path_.back();
-        size_t len = g_.length(e);
-        VERIFY(curr_len_ >= len);
-
-        TRACE("Popping edge " << g_.str(e));
-        vertex_cnts_.take(g_.EdgeStart(e));
-        reversed_edge_path_.pop_back();
-        curr_len_ -= len;
-    }
-
-    bool CanGo(EdgeId e, VertexId start_v) {
-//        VertexId v = g_.EdgeStart(e);
-        //if (!dijkstra_.DistanceCounted(v)) {
-        //TRACE("Distance not counted yet");
-        //}
-        //else
-        //TRACE("Shortest distance from this vertex is " << dijkstra_.GetDistance(v)
-        //<< " and sum with current path length " << cur_len
-        //<< " exceeded max length " << max_len_);
-        if (!dijkstra_.DistanceCounted(start_v))
-            return false;
-        if (dijkstra_.GetDistance(start_v) + g_.length(e) + curr_len_ > max_len_)
-            return false;
-        if (vertex_cnts_.mult(start_v) >= MAX_VERTEX_USAGE)
-            return false;
-        return true;
-    }
-
-    //returns true iff limits were exceeded
-    bool Go(VertexId v, const size_t min_len) {
-        TRACE("Got to vertex " << g_.str(v));
-        if (++call_cnt_ >= MAX_CALL_CNT) {
-            TRACE("Maximal count " << MAX_CALL_CNT << " of recursive calls was exceeded!");
-            return true;
-        }
-
-        if (v == start_ && curr_len_ >= min_len) {
-            //TRACE("New path found: " << PrintPath(g_, path_));
-            callback_->HandleReversedPath(reversed_edge_path_);
-        }
-
-        TRACE("Iterating through incoming edges of vertex " << g_.int_id(v))
-        //TODO: doesn`t work with parallel simplification
-        vector<EdgeId> incoming;
-        incoming.reserve(4);
-        std::copy_if(g_.in_begin(v), g_.in_end(v), std::back_inserter(incoming), [&] (EdgeId e) {
-            return dijkstra_.DistanceCounted(g_.EdgeStart(e));
-        });
-
-        std::sort(incoming.begin(), incoming.end(), [&] (EdgeId e1, EdgeId e2) {
-            return dijkstra_.GetDistance(g_.EdgeStart(e1)) < dijkstra_.GetDistance(g_.EdgeStart(e2));
-        });
-
-        for (EdgeId e : incoming) {
-            VertexId start_v = g_.EdgeStart(e);
-            if (CanGo(e, start_v)) {
-                Push(e, start_v);
-                bool exceeded_limits = Go(start_v, min_len);
-                Pop();
-                if (exceeded_limits)
-                    return true;
-            }
-        }
-        //TRACE("Processing vertex " << g_.int_id(v) << " finished");
-        return false;
-    }
-
 public:
     class Callback {
 
@@ -131,44 +54,137 @@ public:
         }
     };
 
-    // constructor for paths between start vertex and a set of @end_points
-    PathProcessor(const Graph& g, const vector<size_t>& min_lens, size_t max_len, VertexId start,
-                  const vector<VertexId>& end_points, Callback& callback)
-            : g_(g),
-              min_lens_(min_lens),
-              max_len_(max_len),
-              start_(start),
-              end_points_(end_points),
-              dijkstra_(DijkstraHelper<Graph>::CreateBoundedDijkstra(g, max_len, MAX_DIJKSTRA_VERTICES)),
-              callback_(&callback),
-              curr_len_(0),
-              call_cnt_(0) {
-        TRACE("Dijkstra launched");
-        dijkstra_.run(start);
-        reversed_edge_path_.reserve(MAX_CALL_CNT);
-        TRACE("Dijkstra finished");
-    }
+private:
+
+    class Traversal {
+        const PathProcessor& outer_;
+        VertexId end_;
+        size_t min_len_;
+        size_t max_len_;
+        Callback& callback_;
+        size_t edge_depth_bound_;
+
+        size_t curr_len_;
+        size_t curr_depth_;
+        size_t call_cnt_;
+        Path reversed_edge_path_;
+        bag<VertexId> vertex_cnts_;
+
+        const Graph& g_;
+        const DijkstraT& dijkstra_;
+
+        void Push(EdgeId e, VertexId start_v) {
+            TRACE("Pushing edge " << g_.str(e));
+            curr_len_ += g_.length(e);
+            curr_depth_++;
+            reversed_edge_path_.push_back(e);
+            vertex_cnts_.put(start_v);
+        }
+
+        void Pop() {
+            VERIFY(!reversed_edge_path_.empty());
+            EdgeId e = reversed_edge_path_.back();
+            size_t len = g_.length(e);
+            VERIFY(curr_len_ >= len);
+
+            TRACE("Popping edge " << g_.str(e));
+            vertex_cnts_.take(g_.EdgeStart(e));
+            reversed_edge_path_.pop_back();
+            curr_len_ -= len;
+            curr_depth_--;
+        }
 
-    // constructor when we have only one @end_point
-    PathProcessor(const Graph& g, size_t min_len, size_t max_len, VertexId start, VertexId end_point, Callback& callback)
-            : g_(g),
-              max_len_(max_len),
+        bool CanGo(EdgeId e, VertexId start_v) {
+            if (!dijkstra_.DistanceCounted(start_v))
+                return false;
+            if (dijkstra_.GetDistance(start_v) + g_.length(e) + curr_len_ > max_len_)
+                return false;
+            if (curr_depth_ >= edge_depth_bound_)
+                return false;
+            if (vertex_cnts_.mult(start_v) >= PathProcessor::MAX_VERTEX_USAGE)
+                return false;
+            return true;
+        }
+
+        bool Go(VertexId v, const size_t min_len) {
+            TRACE("Got to vertex " << g_.str(v));
+            if (++call_cnt_ >= PathProcessor::MAX_CALL_CNT) {
+                TRACE("Maximal count " << MAX_CALL_CNT << " of recursive calls was exceeded!");
+                return true;
+            }
+
+            if (v == outer_.start_ && curr_len_ >= min_len) {
+                //TRACE("New path found: " << PrintPath(g_, path_));
+                callback_.HandleReversedPath(reversed_edge_path_);
+            }
+
+            TRACE("Iterating through incoming edges of vertex " << g_.int_id(v))
+            //TODO: doesn`t work with parallel simplification
+            vector<EdgeId> incoming;
+            incoming.reserve(4);
+            std::copy_if(g_.in_begin(v), g_.in_end(v), std::back_inserter(incoming), [&] (EdgeId e) {
+                return dijkstra_.DistanceCounted(g_.EdgeStart(e));
+            });
+
+            std::sort(incoming.begin(), incoming.end(), [&] (EdgeId e1, EdgeId e2) {
+                return dijkstra_.GetDistance(g_.EdgeStart(e1)) < dijkstra_.GetDistance(g_.EdgeStart(e2));
+            });
+
+            for (EdgeId e : incoming) {
+                VertexId start_v = g_.EdgeStart(e);
+                if (CanGo(e, start_v)) {
+                    Push(e, start_v);
+                    bool exceeded_limits = Go(start_v, min_len);
+                    Pop();
+                    if (exceeded_limits)
+                        return true;
+                }
+            }
+            return false;
+        }
+
+    public:
+        Traversal(const PathProcessor& outer, VertexId end,
+                  size_t min_len, size_t max_len,
+                  Callback& callback, size_t edge_depth_bound) :
+            outer_(outer), end_(end),
+            min_len_(min_len), max_len_(max_len),
+            callback_(callback),
+            edge_depth_bound_(edge_depth_bound),
+            curr_len_(0), curr_depth_(0), call_cnt_(0), 
+            g_(outer.g_),
+            dijkstra_(outer.dijkstra_) {
+            reversed_edge_path_.reserve(PathProcessor::MAX_CALL_CNT);
+            vertex_cnts_.put(end_);
+        }
+
+        //returns true iff limits were exceeded
+        bool Go() {
+            bool code = Go(end_, min_len_);
+            VERIFY(curr_len_ == 0);
+            VERIFY(curr_depth_ == 0);
+            vertex_cnts_.take(end_);
+            VERIFY(vertex_cnts_.size() == 0);
+            return code;
+        }
+    };
+
+    friend class Traversal;
+
+public:
+
+    PathProcessor(const Graph& g, VertexId start, size_t length_bound) :
+    		  g_(g),
               start_(start),
-              dijkstra_(DijkstraHelper<Graph>::CreateBoundedDijkstra(g, max_len, MAX_DIJKSTRA_VERTICES)),
-              callback_(&callback),
-              curr_len_(0),
-              call_cnt_(0) {
+              dijkstra_(DijkstraHelper<Graph>::CreateBoundedDijkstra(g, length_bound, MAX_DIJKSTRA_VERTICES)) {
         TRACE("Dijkstra launched");
-        min_lens_.push_back(min_len);
-        end_points_.push_back(end_point);
-        dijkstra_.run(start);
-        reversed_edge_path_.reserve(MAX_CALL_CNT);
+        dijkstra_.Run(start);
         TRACE("Dijkstra finished");
     }
 
     // dfs from the end vertices
     // 3 two mistakes, 2 bad dijkstra, 1 some bad dfs, 0 = okay
-    int Process() {
+    int Process(VertexId end, size_t min_len, size_t max_len, Callback& callback, size_t edge_depth_bound = -1ul) const {
         TRACE("Process launched");
         int error_code = 0;
 
@@ -177,50 +193,16 @@ public:
             error_code = 2;
         }
 
-        TRACE("Start vertex is " << g_.int_id(start_));
-        for (size_t i = 0; i < end_points_.size(); ++i) {
-            VERIFY(curr_len_ == 0);
-            VERIFY(vertex_cnts_.size() == 0);
-            call_cnt_ = 0;
-            VertexId current_end = end_points_[i];
-            TRACE("Bounds are " << min_lens_[i] << " " << max_len_);
-            TRACE("Current end vertex " << g_.int_id(current_end));
-            vertex_cnts_.put(current_end);
-            error_code |= int(Go(current_end, min_lens_[i]));
-            vertex_cnts_.take(current_end);
-            callback_->Flush();
-        }
-        TRACE("Process finished with error code " << error_code);
-        return error_code;
-    }
-
-    //todo remove setters
-    void SetMinLens(const vector<size_t>& new_min_lens) {
-        min_lens_ = new_min_lens;
-    }
-
-    void SetMinLens(vector<size_t> && new_min_lens) {
-        min_lens_ = new_min_lens;
-    }
+        TRACE("Start vertex is " << g_.str(start_));
+        TRACE("Bounds are " << min_len << " " << max_len);
+        TRACE("End vertex " << g_.str(end));
 
-    void SetMaxLen(size_t new_max_len) {
-        max_len_ = new_max_len;
-    }
-
-    void SetEndPoints(const vector<VertexId>& new_end_points) {
-        end_points_ = new_end_points;
-    }
-
-    void SetEndPoints(vector<VertexId> && new_end_points) {
-        end_points_ = new_end_points;
-    }
-
-    void SetCallback(Callback* new_callback) {
-        callback_ = new_callback;
-    }
+        Traversal traversal(*this, end, min_len, max_len, callback, edge_depth_bound);
+        error_code |= int(traversal.Go());
 
-    void ResetCallCount() {
-        call_cnt_ = 0;
+        callback.Flush();
+        TRACE("Process finished with error code " << error_code);
+        return error_code;
     }
 
 private:
@@ -229,22 +211,21 @@ private:
     static const size_t MAX_VERTEX_USAGE = 5;
 
     const Graph& g_;
-    vector<size_t> min_lens_;
-    size_t max_len_;
     VertexId start_;
-    vector<VertexId> end_points_;
     DijkstraT dijkstra_;
-    Callback* callback_;
-
-    Path reversed_edge_path_;
-    bag<VertexId> vertex_cnts_;
-    size_t curr_len_;
-    size_t call_cnt_;
 
     DECL_LOGGER("PathProcessor")
 };
 
 template<class Graph>
+int ProcessPaths(const Graph& g, size_t min_len, size_t max_len,
+                 typename Graph::VertexId start, typename Graph::VertexId end,
+                 typename PathProcessor<Graph>::Callback& callback, size_t max_edge_cnt = -1ul) {
+    PathProcessor<Graph> processor(g, start, max_len);
+    return processor.Process(end, min_len, max_len, callback, max_edge_cnt);
+}
+
+template<class Graph>
 class CompositeCallback: public PathProcessor<Graph>::Callback {
 	typedef typename Graph::EdgeId EdgeId;
 	typedef vector<EdgeId> Path;
@@ -254,13 +235,13 @@ public:
 		processors_.push_back(&processor);
 	}
 
-	virtual void Flush() {
+	void Flush() override {
 		for (auto it = processors_.begin(); it != processors_.end(); ++it) {
 			(*it)->Flush();
 		}
 	}
 
-	virtual void HandleReversedPath(const Path& path) {
+	void HandleReversedPath(const Path& path) override {
 		for (auto it = processors_.begin(); it != processors_.end(); ++it) {
 			(*it)->HandleReversedPath(path);
 		}
@@ -279,7 +260,7 @@ public:
             g_(g), cnt_(0), comparator_(comparator) {
     }
 
-    virtual void HandleReversedPath(const vector<EdgeId>& path) {
+    void HandleReversedPath(const vector<EdgeId>& path) override {
         cnt_++;
         if(best_path_.size() == 0 || comparator_(path, best_path_))
             best_path_ = path;
@@ -301,9 +282,7 @@ private:
 };
 
 
-
-
-    template<class Graph>
+template<class Graph>
 class PathStorageCallback: public PathProcessor<Graph>::Callback {
 	typedef typename Graph::EdgeId EdgeId;
 	typedef vector<EdgeId> Path;
@@ -313,12 +292,12 @@ public:
 			g_(g) {
 	}
 
-	virtual void Flush() {
+	void Flush() override {
 		all_paths_.push_back(cur_paths_);
 		cur_paths_.clear();
 	}
 
-	virtual void HandleReversedPath(const vector<EdgeId>& path) {
+	void HandleReversedPath(const vector<EdgeId>& path) override {
 		cur_paths_.push_back(this->ReversePath(path));
 	}
 
@@ -346,13 +325,13 @@ public:
 			g_(g), count_(0) {
 	}
 
-	virtual void Flush() {
+	void Flush() override {
 		all_paths_.push_back(cur_paths_);
 		counts_.push_back(count_);
 		cur_paths_.clear();
 	}
 
-	virtual void HandleReversedPath(const Path& path) {
+	void HandleReversedPath(const Path& path) override {
 		if (path.size() > 0) {
 			++count_;
 			cur_paths_.push_back(this->ReversePath(path));
@@ -386,13 +365,13 @@ public:
 			g_(g), count_(0) {
 	}
 
-	virtual void Flush() {
+	void Flush() override {
 		all_vertices_.push_back(vertices_);
 		vertices_.clear();
 		counts_.push_back(count_);
 	}
 
-	virtual void HandleReversedPath(const Path& path) {
+	void HandleReversedPath(const Path& path) override {
 		for (auto it = path.rbegin(); it != path.rend(); ++it) {
 			if (path.size() > 0) {
 				vertices_.insert(g_.EdgeStart(*it));
@@ -428,17 +407,18 @@ public:
 			g_(g) {
 	}
 
-	virtual void Flush() {
+	void Flush() override {
 		all_distances_.push_back(distances_);
 		distances_.clear();
 	}
 
-	virtual void HandleReversedPath(const Path& path) {
+	void HandleReversedPath(const Path& path) override {
 		size_t path_length = PathLength(path);
 		distances_.insert(path_length);
 	}
 
 	vector<size_t> distances(size_t k = 0) const {
+	    VERIFY(k < all_distances_.size());
 		const set<size_t>& tmp = all_distances_[k];
 		return vector<size_t>(tmp.begin(), tmp.end());
 	}
@@ -458,96 +438,4 @@ private:
 	DECL_LOGGER("DistancesLengthsCallback");
 };
 
-template<class Graph>
-class MappingPathFixer {
-public:
-
-    typedef typename Graph::EdgeId EdgeId;
-    typedef typename Graph::VertexId VertexId;
-
-    MappingPathFixer(const Graph& graph)
-            : g_(graph) {
-    }
-
-    bool CheckContiguous(const vector<typename Graph::EdgeId>& path) const {
-        for (size_t i = 1; i < path.size(); ++i) {
-            if (g_.EdgeEnd(path[i - 1]) != g_.EdgeStart(path[i]))
-                return false;
-        }
-        return true;
-    }
-
-    Path<EdgeId> TryFixPath(const Path<EdgeId>& path, size_t length_bound = 70) const {
-        return Path<EdgeId>(TryFixPath(path.sequence(), length_bound), path.start_pos(), path.end_pos());
-    }
-
-    vector<EdgeId> TryFixPath(const vector<EdgeId>& edges, size_t length_bound = 70) const {
-        vector<EdgeId> answer;
-        if (edges.empty()) {
-            //          WARN("Mapping path was empty");
-            return vector<EdgeId>();
-        }
-        answer.push_back(edges[0]);
-        for (size_t i = 1; i < edges.size(); ++i) {
-            if (g_.EdgeEnd(edges[i - 1]) != g_.EdgeStart(edges[i])) {
-                vector<EdgeId> closure = TryCloseGap(g_.EdgeEnd(edges[i - 1]),
-                                                     g_.EdgeStart(edges[i]),
-                                                     length_bound);
-                answer.insert(answer.end(), closure.begin(), closure.end());
-            }
-            answer.push_back(edges[i]);
-        }
-        return answer;
-    }
-
-    vector<EdgeId> DeleteSameEdges(const vector<EdgeId>& path) const {
-        vector<EdgeId> result;
-        if (path.empty()) {
-            return result;
-        }
-        result.push_back(path[0]);
-        for (size_t i = 1; i < path.size(); ++i) {
-            if (path[i] != result[result.size() - 1]) {
-                result.push_back(path[i]);
-            }
-        }
-        return result;
-    }
-
-private:
-    vector<EdgeId> TryCloseGap(VertexId v1, VertexId v2, size_t length_bound) const {
-        if (v1 == v2)
-            return vector<EdgeId>();
-        TRACE(
-                "Trying to close gap between v1=" << g_.int_id(v1) << " and v2=" << g_.int_id(v2));
-        PathStorageCallback<Graph> path_store(g_);
-        //todo reduce value after investigation
-        PathProcessor<Graph> path_processor(g_, 0, length_bound, v1, v2, path_store);
-        path_processor.Process();
-
-        if (path_store.size() == 0) {
-            TRACE("Failed to find closing path");
-            //          TRACE("Failed to close gap between v1=" << graph_.int_id(v1)
-            //                          << " (conjugate "
-            //                          << graph_.int_id(g_.conjugate(v1))
-            //                          << ") and v2=" << g_.int_id(v2)
-            //                          << " (conjugate "
-            //                          << g_.int_id(g_.conjugate(v2)) << ")");
-            //          return boost::none;
-            return vector<EdgeId>();
-        } else if (path_store.size() == 1) {
-            TRACE("Unique closing path found");
-        } else {
-            TRACE("Several closing paths found, first chosen");
-        }
-        vector<EdgeId> answer = path_store.paths().front();
-        TRACE("Gap closed");
-        TRACE( "Cumulative closure length is " << CumulativeLength(g_, answer));
-        return answer;
-    }
-    const Graph& g_;
-};
-
-
-
 }
diff --git a/src/include/omni/range.hpp b/src/include/omni/range.hpp
new file mode 100644
index 0000000..a321eb0
--- /dev/null
+++ b/src/include/omni/range.hpp
@@ -0,0 +1,92 @@
+#pragma once
+
+#include "verify.hpp"
+
+namespace omnigraph {
+
+struct Range {
+private:
+    bool inside(size_t left, size_t right, size_t point) const {
+        return left <= point && point <= right;
+    }
+
+public:
+    //inclusive
+    size_t start_pos;
+    //exclusive
+    size_t end_pos;
+
+    size_t size() const {
+        VERIFY(end_pos >= start_pos);
+        return end_pos - start_pos;
+    }
+
+    void shift(int shift) {
+        VERIFY(shift > 0 || size_t(-shift) <= start_pos);
+        start_pos += shift;
+        end_pos += shift;
+    }
+
+    Range(): start_pos(0), end_pos(0) {
+        VERIFY(end_pos >= start_pos);
+    }
+
+    Range(size_t start_pos, size_t end_pos)
+            : start_pos(start_pos),
+              end_pos(end_pos) {
+        VERIFY(end_pos >= start_pos);
+    }
+
+    bool operator<(const Range &other) const {
+      if (start_pos != other.start_pos)
+        return start_pos < other.start_pos;
+      return end_pos < other.end_pos;
+    }
+
+    bool contains(const Range& that) const {
+        return start_pos <= that.start_pos && end_pos >= that.end_pos;
+    }
+
+    Range Merge(const Range &other) const {
+        return Range(this->start_pos, other.end_pos);
+    }
+
+    Range Invert(size_t base_length) const {
+        VERIFY(base_length >= end_pos);
+        return Range(base_length - end_pos, base_length - start_pos);
+    }
+
+    Range& operator=(const Range& other) {
+        start_pos = other.start_pos;
+        end_pos = other.end_pos;
+        return *this;
+    }
+
+    bool empty() const {
+        return start_pos == end_pos;
+    }
+
+    bool Intersect(const Range &other) const {
+        return inside(start_pos, end_pos, other.start_pos) || inside(start_pos, end_pos, other.end_pos) ||
+                inside(other.start_pos, other.end_pos, start_pos);
+    }
+
+    bool IntersectLeftOf(const Range &other) const {
+        return inside(start_pos, end_pos, other.start_pos) && inside(other.start_pos, other.end_pos, end_pos);
+    }
+
+    bool operator==(const Range &that) const {
+        return start_pos == that.start_pos && end_pos == that.end_pos;
+    }
+
+    bool operator!=(const Range &that) const {
+        return !(*this == that);
+    }
+};
+
+inline std::ostream& operator<<(std::ostream& os, const Range& range) {
+    os << "[" << (range.start_pos + 1) << " - " << range.end_pos << "]";
+    return os;
+}
+
+}
diff --git a/src/include/omni/relative_coverage_remover.hpp b/src/include/omni/relative_coverage_remover.hpp
index 976c4d8..6ebd70b 100644
--- a/src/include/omni/relative_coverage_remover.hpp
+++ b/src/include/omni/relative_coverage_remover.hpp
@@ -183,11 +183,11 @@ public:
             : g_(g),
               local_coverage_f_(local_coverage_f),
               min_coverage_gap_(min_coverage_gap) {
-
+        VERIFY(math::gr(min_coverage_gap, 1.));
     }
 
     double LocalCoverage(EdgeId e, VertexId v) const {
-        TRACE("Local coverage of edge " << g_.str(e) << " around vertex " << g_.str(v) << " was " << local_coverage_f_(e, v));
+        DEBUG("Local coverage of edge " << g_.str(e) << " around vertex " << g_.str(v) << " was " << local_coverage_f_(e, v));
         return local_coverage_f_(e, v);
     }
 
@@ -366,47 +366,74 @@ private:
     DECL_LOGGER("RelativelyLowCoveredComponentChecker");
 };
 
+//Removes last (k+1)-mer of graph edge
+template<class Graph>
+class EdgeDisconnector {
+    typedef typename Graph::EdgeId EdgeId;
+    Graph& g_;
+    EdgeRemover<Graph> edge_remover_;
 
+public:
+    EdgeDisconnector(Graph& g,
+                     HandlerF<Graph> removal_handler = nullptr):
+                                 g_(g), edge_remover_(g, removal_handler) {
+    }
 
+    EdgeId operator()(EdgeId e) {
+        VERIFY(g_.length(e) > 1);
+        pair<EdgeId, EdgeId> split_res = g_.SplitEdge(e, 1);
+        edge_remover_.DeleteEdge(split_res.first);
+        return split_res.first;
+    }
+};
+
+//todo make parallel
 template<class Graph>
-class RelativeCoverageDisconnector : public EdgeProcessingAlgorithm<Graph> {
+class RelativeCoverageDisconnector: public EdgeProcessingAlgorithm<Graph> {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
+    typedef std::function<double(EdgeId, VertexId)> LocalCoverageFT;
     typedef EdgeProcessingAlgorithm<Graph> base;
-    const RelativeCoverageHelper<Graph>& rel_helper_;
-    const double minimum_coverage_diff_mult = 20.0;
+
+    const RelativeCoverageHelper<Graph> rel_helper_;
+    EdgeDisconnector<Graph> disconnector_;
+    size_t cnt_;
 public:
     RelativeCoverageDisconnector(Graph& g,
-                      const RelativeCoverageHelper<Graph>& rel_helper)
-            : base(g, true), rel_helper_(rel_helper) {
+            LocalCoverageFT local_coverage_f, double diff_mult) :
+            base(g, false),
+            rel_helper_(g, local_coverage_f, diff_mult),
+            disconnector_(g),
+            cnt_(0) {
+    }
+
+    ~RelativeCoverageDisconnector() {
+        DEBUG("Disconnected edge cnt " << cnt_);
     }
+
 protected:
     bool ProcessEdge(EdgeId edge) {
-    	DEBUG("Processing edge " << this->g().int_id(edge));
-    	VertexId v = this->g().EdgeEnd(edge);
-    	double coverage_edge_around_v = rel_helper_.LocalCoverage(edge, v);
-    	double max_local_incoming = rel_helper_.MaxLocalCoverage(this->g().IncomingEdges(v), v);
-    	double max_local_outgoing = rel_helper_.MaxLocalCoverage(this->g().OutgoingEdges(v), v);
-    	DEBUG("Edge coverage - " << coverage_edge_around_v << ", max incoming coverage - " << max_local_incoming
-    			<< ", max outgoing coverage - " << max_local_outgoing);
-    	if(min(max_local_incoming, max_local_outgoing) > minimum_coverage_diff_mult * coverage_edge_around_v) {
-    		DEBUG("Disconnecting");
-    		return DisconnectEdge(edge);
-    	}
-    	DEBUG("No need to disconnect");
-    	return false;
+        DEBUG("Processing edge " << this->g().int_id(edge));
+        VertexId v = this->g().EdgeStart(edge);
+        double coverage_edge_around_v = rel_helper_.LocalCoverage(edge, v);
+        DEBUG("Local flanking coverage - " << coverage_edge_around_v);
+        DEBUG("Max local coverage incoming  - " << rel_helper_.MaxLocalCoverage(this->g().IncomingEdges(v), v));
+        DEBUG("Max local coverage outgoing  - " << rel_helper_.MaxLocalCoverage(this->g().OutgoingEdges(v), v));
+        if (this->g().length(edge) > 1 &&
+                rel_helper_.CheckAnyHighlyCovered(this->g().IncomingEdges(v), v, coverage_edge_around_v) &&
+                rel_helper_.CheckAnyHighlyCovered(this->g().OutgoingEdges(v), v, coverage_edge_around_v)) {
+            DEBUG("Disconnecting");
+            disconnector_(edge);
+            cnt_++;
+            return true;
+        } else {
+            DEBUG("No need to disconnect");
+            return false;
+      }
     }
+
 private:
-    bool DisconnectEdge(EdgeId edge) {
-    	size_t len = this->g().length(edge);
-    	if(len > 1) {
-    		pair<EdgeId, EdgeId> split_res = this->g().SplitEdge(edge, len - 1);
-    		EdgeRemover<Graph> edge_remover(this->g());
-    		edge_remover.DeleteEdge(split_res.second);
-    		return true;
-    	}
-    	return false;
-    }
+
     DECL_LOGGER("RelativeCoverageDisconnector");
 };
 
@@ -513,7 +540,7 @@ class RelativeCoverageComponentRemover : public EdgeProcessingAlgorithm<Graph> {
     typedef typename Graph::VertexId VertexId;
     typedef std::function<double(EdgeId, VertexId)> LocalCoverageFT;
     typedef typename ComponentRemover<Graph>::HandlerF HandlerF;
-    typedef std::shared_ptr<func::Predicate<EdgeId>> ProceedConditionT;
+    typedef pred::TypedPredicate<EdgeId> ProceedConditionT;
 
     RelativeCoverageHelper<Graph> rel_helper_;
     size_t length_bound_;
diff --git a/src/include/omni/splitters.hpp b/src/include/omni/splitters.hpp
index 36f3940..2eaaed2 100644
--- a/src/include/omni/splitters.hpp
+++ b/src/include/omni/splitters.hpp
@@ -451,7 +451,7 @@ public:
     GraphComponent<Graph> Find(typename Graph::VertexId v) {
     	auto cd = DijkstraHelper<Graph>::CreateCountingDijkstra(this->graph(), max_size_,
     			edge_length_bound_);
-        cd.run(v);
+        cd.Run(v);
         vector<VertexId> result_vector = cd.ReachedVertices();
         set<VertexId> result(result_vector.begin(), result_vector.end());
         ComponentCloser<Graph> cc(this->graph(), edge_length_bound_);
@@ -592,7 +592,7 @@ public:
 
     GraphComponent<Graph> Find(VertexId v) {
     	auto cd = DijkstraHelper<Graph>::CreateShortEdgeDijkstra(this->graph(), edge_length_bound_);
-        cd.run(v);
+        cd.Run(v);
         set<VertexId> result = cd.ProcessedVertices();
         return GraphComponent<Graph>(this->graph(), result.begin(),
                                      result.end());
@@ -728,8 +728,10 @@ private:
 				break;
 			} else {
 				vertices.insert(next.v_begin(), next.v_end());
-				name += ";";
-				name += next.name();
+                if (next.name() != "") {
+                    name += ";";
+                    name += next.name();
+                }
 			}
 		}
 		return GraphComponent<Graph>(this->graph(), vertices.begin(), vertices.end(), CutName(name, 60));
diff --git a/src/include/omni/tip_clipper.hpp b/src/include/omni/tip_clipper.hpp
index aaf3561..21ab8d4 100644
--- a/src/include/omni/tip_clipper.hpp
+++ b/src/include/omni/tip_clipper.hpp
@@ -18,6 +18,7 @@
 
 #include "omni_utils.hpp"
 #include "xmath.h"
+#include "func.hpp"
 #include "basic_edge_conditions.hpp"
 #include "graph_processing_algorithm.hpp"
 
@@ -61,7 +62,7 @@ public:
 			base(g), max_relative_coverage_(max_relative_coverage) {
 	}
 
-	bool Check(EdgeId e) const {
+	bool Check(EdgeId e) const override {
 		//+1 is a trick to deal with edges of 0 coverage from iterative run
 		double max_coverage = MaxCompetitorCoverage(e) + 1;
 		return math::le(this->g().coverage(e),
@@ -94,7 +95,7 @@ public:
      * @param edge edge vertex to be checked
      * @return true if edge judged to be tip and false otherwise.
      */
-    /*virtual*/ bool Check(EdgeId e) const {
+    bool Check(EdgeId e) const override {
         return (IsTip(this->g().EdgeEnd(e)) || IsTip(this->g().EdgeStart(e)))
                 && (this->g().OutgoingEdgeCount(this->g().EdgeStart(e))
                         + this->g().IncomingEdgeCount(this->g().EdgeEnd(e)) > 2);
@@ -106,11 +107,11 @@ public:
 template<class Graph>
 class MismatchTipCondition : public EdgeCondition<Graph> {
     typedef EdgeCondition<Graph> base;
-
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
 
     size_t max_diff_;
+
     size_t Hamming(EdgeId edge1, EdgeId edge2) const {
         size_t len = std::min(this->g().length(edge1), this->g().length(edge2));
         size_t cnt = 0;
@@ -123,53 +124,54 @@ class MismatchTipCondition : public EdgeCondition<Graph> {
         return cnt;
     }
 
-public:
-    static const size_t INF = size_t(-1);
-    MismatchTipCondition(const Graph& g, size_t max_diff) : base(g), max_diff_(max_diff) {
-    }
-
-    /**
-     * This method checks if given edge topologically looks like a tip.
-     * @param edge edge vertex to be checked
-     * @return true if edge judged to be tip and false otherwise.
-     */
-    /*virtual*/ bool Check(EdgeId e) const {
-        if(max_diff_ == INF) {
-            return true;
-        }
-        auto alternatives = this->g().OutgoingEdges(this->g().EdgeStart(e));
-        for(auto it = alternatives.begin(); it != alternatives.end(); ++it) {
-            if(e != *it && this->g().length(e) < this->g().length(*it) && Hamming(e, *it) <= max_diff_) {
+    bool InnerCheck(EdgeId e) const {
+        size_t len = this->g().length(e);
+        for (auto alt : this->g().OutgoingEdges(this->g().EdgeStart(e))) {
+            if (e != alt && len < this->g().length(alt) && Hamming(e, alt) <= max_diff_) {
                 return true;
             }
         }
         return false;
     }
 
+public:
+    MismatchTipCondition(const Graph& g, size_t max_diff) : 
+        base(g), max_diff_(max_diff) {
+    }
+
+    bool Check(EdgeId e) const override {
+        return InnerCheck(e) || InnerCheck(this->g().conjugate(e));
+    }
+
 };
 
 template<class Graph>
-shared_ptr<func::Predicate<typename Graph::EdgeId>> AddTipCondition(const Graph& g,
-                                                                  shared_ptr<func::Predicate<typename Graph::EdgeId>> condition) {
-    return func::And<typename Graph::EdgeId>(
-            make_shared<TipCondition<Graph>>(g),
-            condition);
+pred::TypedPredicate<typename Graph::EdgeId> AddTipCondition(const Graph& g,
+                                                            pred::TypedPredicate<typename Graph::EdgeId> condition) {
+    return pred::And(TipCondition<Graph>(g), condition);
 }
 
 template<class Graph>
-bool ClipTips(
-        Graph& g,
-        size_t max_length,
-        shared_ptr<Predicate<typename Graph::EdgeId>> condition
-            = make_shared<func::AlwaysTrue<typename Graph::EdgeId>>(),
-        std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
-
-    omnigraph::EdgeRemovingAlgorithm<Graph> tc(g,
-                                               AddTipCondition(g, condition),
-                                               removal_handler);
-
-    return tc.Run(LengthComparator<Graph>(g),
-                      make_shared<LengthUpperBound<Graph>>(g, max_length));
+pred::TypedPredicate<typename Graph::EdgeId>
+NecessaryTipCondition(const Graph& g, size_t max_length, double max_coverage) {
+    return AddTipCondition(g, pred::And(LengthUpperBound<Graph>(g, max_length),
+                                       CoverageUpperBound<Graph>(g, max_coverage)));
 }
 
+//template<class Graph>
+//bool ClipTips(
+//        Graph& g,
+//        size_t max_length,
+//        shared_ptr<Predicate<typename Graph::EdgeId>> condition
+//            = make_shared<func::AlwaysTrue<typename Graph::EdgeId>>(),
+//        std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
+//
+//    omnigraph::EdgeRemovingAlgorithm<Graph> tc(g,
+//                                               AddTipCondition(g, condition),
+//                                               removal_handler);
+//
+//    return tc.Run(LengthComparator<Graph>(g),
+//                      make_shared<LengthUpperBound<Graph>>(g, max_length));
+//}
+
 } // namespace omnigraph
diff --git a/src/include/pred.hpp b/src/include/pred.hpp
new file mode 100644
index 0000000..af85372
--- /dev/null
+++ b/src/include/pred.hpp
@@ -0,0 +1,165 @@
+#ifndef __ADT_PRED_HPP__
+#define __ADT_PRED_HPP__
+
+#pragma once
+
+#include "adt/function_traits.hpp"
+
+#include <memory>
+#include <functional>
+
+namespace pred {
+
+template<typename T>
+class TypedPredicate {
+ public:
+  typedef T checked_type;
+  
+  template<typename P>
+  TypedPredicate(P p)
+      : self_(std::make_shared<TypedPredicateModel<P> >(std::move(p))) {}
+
+  bool operator()(T x) const {
+    return self_->operator()(x);
+  }
+
+ private:
+  struct TypedPredicateConcept {
+    virtual ~TypedPredicateConcept() {};
+    virtual bool operator()(T x) const = 0;
+  };
+
+  template<class P>
+  struct TypedPredicateModel : TypedPredicateConcept {
+    TypedPredicateModel(P p)
+        : data_(std::move(p)) {}
+
+    virtual bool operator()(T x) const override {
+      return data_(x);
+    }
+
+    P data_;
+  };
+
+  std::shared_ptr<const TypedPredicateConcept> self_;
+};
+
+template<typename T>
+class AlwaysTrueOperator {
+ public:
+  typedef T checked_type;
+
+  bool operator()(T) const {
+    return true;
+  }
+};
+
+template<typename T>
+class AlwaysFalseOperator {
+  typedef T checked_type;
+
+ public:
+  bool operator()(T) const {
+    return false;
+  }
+};
+
+template<typename T>
+class AndOperator {
+ public:
+  typedef T checked_type;
+
+  AndOperator(TypedPredicate<T> lhs, TypedPredicate<T> rhs)
+      : lhs_(std::move(lhs)),
+        rhs_(std::move(rhs)) { }
+
+  bool operator()(T x) const {
+    return lhs_(x) && rhs_(x);
+  }
+
+ private:
+  const TypedPredicate<T> lhs_, rhs_;
+};
+
+template<typename T>
+class OrOperator {
+ public:
+  typedef T checked_type;
+
+  OrOperator(TypedPredicate<T> lhs, TypedPredicate<T> rhs)
+      : lhs_(std::move(lhs)), rhs_(std::move(rhs)) { }
+
+  bool operator()(T x) const {
+    return lhs_(x) || rhs_(x);
+  }
+
+ private:
+  const TypedPredicate<T> lhs_, rhs_;
+};
+
+template<typename T>
+class NotOperator {
+ public:
+  typedef T checked_type;
+
+  NotOperator(const TypedPredicate<T> p)
+      : p_(std::move(p)) {}
+
+  bool operator()(T x) const {
+    return !p_(x);
+  }
+
+ private:
+  const TypedPredicate<T> p_;
+};
+
+template<class P,
+         bool = adt::function_traits<P>::arity == 1 &&
+                std::is_same<typename adt::function_traits<P>::return_type, bool>::value>
+struct is_predicate : public std::true_type {};
+
+template<class P>
+struct is_predicate<P, false> : public std::false_type {};
+
+template<class TP1, class TP2,
+         typename _T1 = typename adt::function_traits<TP1>::template arg<0>::type,
+         typename _T2 = typename adt::function_traits<TP2>::template arg<0>::type,
+         typename =
+           typename std::enable_if<std::is_same<_T1, _T2>::value &&
+                                   is_predicate<TP1>::value && is_predicate<TP2>::value
+                                   >::type>
+TypedPredicate<_T1> And(TP1 lhs, TP2 rhs) {
+  return AndOperator<_T1>(lhs, rhs);
+}
+
+template<class TP1, class TP2,
+         typename _T1 = typename adt::function_traits<TP1>::template arg<0>::type,
+         typename _T2 = typename adt::function_traits<TP2>::template arg<0>::type,
+         typename =
+           typename std::enable_if<std::is_same<_T1, _T2>::value &&
+                                   is_predicate<TP1>::value && is_predicate<TP2>::value
+                                   >::type>
+TypedPredicate<_T1> Or(TP1 lhs, TP2 rhs) {
+  return OrOperator<_T1>(lhs, rhs);
+}
+
+template<class TP,
+         typename _T = typename adt::function_traits<TP>::template arg<0>::type,
+         typename =
+           typename std::enable_if<is_predicate<TP>::value>::type>
+TypedPredicate<_T> Not(TP p) {
+  return NotOperator<_T>(p);
+}
+
+template<class T>
+TypedPredicate<T> AlwaysTrue() {
+  return AlwaysTrueOperator<T>();
+}
+template<class T>
+TypedPredicate<T> AlwaysFalse() {
+  return AlwaysFalseOperator<T>();
+}
+
+} // namespace pred
+
+#endif // __ADT_PRED_HPP__
diff --git a/src/include/sequence/rtseq.hpp b/src/include/sequence/rtseq.hpp
index 0f0ef37..2dbe934 100644
--- a/src/include/sequence/rtseq.hpp
+++ b/src/include/sequence/rtseq.hpp
@@ -25,8 +25,6 @@
 #include "seq.hpp"
 #include "simple_seq.hpp"
 
-#include "mph_index/MurmurHash3.h"
-
 #include <cstring>
 #include <iostream>
 
@@ -665,9 +663,7 @@ class RuntimeSeq {
   }
 
   static size_t GetHash(const DataType *data, size_t sz, uint32_t seed = 0) {
-    uint64_t res[2];
-    MurmurHash3_x64_128(data, sz * sizeof(DataType), 0x9E3779B9 ^ seed, res);
-    return res[0] ^ res[1];
+    return CityHash64WithSeed((const char*)data, sz * sizeof(DataType), 0x9E3779B9 ^ seed);
   }
 
   size_t GetHash(unsigned seed = 0) const {
diff --git a/src/include/sequence/seq.hpp b/src/include/sequence/seq.hpp
index 5923267..848430f 100755
--- a/src/include/sequence/seq.hpp
+++ b/src/include/sequence/seq.hpp
@@ -32,11 +32,13 @@
 #include <cstring>
 #include <iostream>
 
+#include <city/city.h>
+
 #include "verify.hpp"
 #include "sequence/nucl.hpp"
 #include "log.hpp"
 #include "seq_common.hpp"
-#include "mph_index/MurmurHash3.h"
+
 
 /**
  * @param T is max number of nucleotides, type for storage
@@ -171,6 +173,10 @@ class Seq {
   template<typename S>
   explicit Seq(const S &s, size_t offset = 0, size_t number_to_read = size_,
                bool raw = false) {
+    if (this->size(s) == 0) {
+        return;
+    }
+    VERIFY(offset < this->size(s));
     VERIFY(is_dignucl(s[offset]) || is_nucl(s[offset]));
     if (!raw)
       VERIFY(offset + number_to_read <= this->size(s));
@@ -348,7 +354,10 @@ class Seq {
   }
  
   bool operator==(const Seq<size_, T>& s) const {
-    return 0 == memcmp(data_.data(), s.data_.data(), sizeof(T) * DataSize);
+    for (size_t i = 0; i < DataSize; ++i)
+      if (data_[i] != s.data_[i])
+        return false;
+    return true;
   }
 
   /**
@@ -449,9 +458,7 @@ class Seq {
   }
 
   static size_t GetHash(const DataType *data, size_t sz = DataSize, uint32_t seed = 0) {
-    uint64_t res[2];
-    MurmurHash3_x64_128(data,  sz * sizeof(DataType), 0x9E3779B9 ^ seed, res);
-    return res[0] ^ res[1];
+    return CityHash64WithSeed((const char*)data, sz * sizeof(DataType), 0x9E3779B9 ^ seed);
   }
 
   size_t GetHash(uint32_t seed = 0) const {
diff --git a/src/include/sequence/simple_seq.hpp b/src/include/sequence/simple_seq.hpp
index 7beefe9..ecd6b9c 100644
--- a/src/include/sequence/simple_seq.hpp
+++ b/src/include/sequence/simple_seq.hpp
@@ -62,11 +62,6 @@ public:
     const static size_t TotalBytes = sizeof(T) * DataSize;
 
 private:
-    /* *
-     * @variable Just some prime number to count the hash function of the kmer
-     * */
-    const static size_t PrimeNum = 239;
-
     // number of nucleotides in the last data_ bucket
     const static size_t NuclsRemain = size_ & (TNucl - 1);
 
@@ -112,33 +107,21 @@ public:
         memcpy(dst, (const void *) data_.data(), TotalBytes);
     }
 
-    size_t GetHash() const {
-        size_t hash = PrimeNum;
-        for (size_t i = 0; i < DataSize; i++) {
-            hash = ((hash << 5) - hash) + data_[i];
-        }
-        return hash;
+    static size_t GetHash(const DataType *data, size_t sz, uint32_t seed = 0) {
+        return CityHash64WithSeed((const char*)data, sz * sizeof(DataType), 0x9E3779B9 ^ seed);
+    }
+
+    size_t GetHash(uint32_t seed = 0) const {
+        return GetHash(data_.data(), DataSize, seed);
     }
 
     struct hash {
-        size_t operator()(const SimpleSeq<size_, T>& seq) const {
-            size_t hash = PrimeNum;
-            for (size_t i = 0; i < seq.DataSize; i++) {
-                hash = ((hash << 5) - hash) + seq.data_[i];
-            }
-            return hash;
+        size_t operator()(const SimpleSeq<size_, T>& seq, uint32_t seed = 0) const {
+            return seq.GetHash(seed);
         }
-    };
 
-    struct multiple_hash {
-        size_t operator()(const SimpleSeq<size_, T>& seq, size_t hash_num,
-                size_t h) const {
-//            WARN("using multiple hash");
-            ++hash_num;
-            for (size_t i = 0; i < seq.DataSize; i++) {
-                h = (h << hash_num) + seq.data_[i];
-            }
-            return h;
+        size_t operator()(const DataType *data, size_t sz, unsigned seed = 0) {
+            return GetHash(data, sz, seed);
         }
     };
 
diff --git a/src/include/simple_tools.hpp b/src/include/simple_tools.hpp
index 5375665..3f8e859 100644
--- a/src/include/simple_tools.hpp
+++ b/src/include/simple_tools.hpp
@@ -165,6 +165,20 @@ std::ostream& operator<< (std::ostream& os, const std::vector<T>& v)
  	return os;
 }
 
+template<class T>
+std::ostream& operator<< (std::ostream& os, const std::set<T>& set)
+{
+	os << "{";
+	bool delim = false;
+	for (const auto& i : set) {
+		if (delim) os << ", ";
+		os << i;
+		delim = true;
+	}
+	os << "}";
+	return os;
+}
+
 }
 
 #endif /* SIMPLE_TOOLS_HPP_ */
diff --git a/src/include/ssw/ssw.h b/src/include/ssw/ssw.h
index 68ab504..34c9e67 100755
--- a/src/include/ssw/ssw.h
+++ b/src/include/ssw/ssw.h
@@ -16,6 +16,11 @@
 #include <string.h>
 #include <emmintrin.h>
 
+#ifdef __cplusplus
+extern "C" {
+#endif	// __cplusplus
+
+
 /*!	@typedef	structure of the query profile	*/
 struct _profile;
 typedef struct _profile s_profile;
@@ -23,45 +28,41 @@ typedef struct _profile s_profile;
 /*!	@typedef	structure of the alignment result
 	@field	score1	the best alignment score
 	@field	score2	sub-optimal alignment score
-	@field	ref_begin1	0-based best alignment beginning position on reference;	ref_begin1 = -1 when the best alignment beginning 
+	@field	ref_begin1	0-based best alignment beginning position on reference;	ref_begin1 = -1 when the best alignment beginning
 						position is not available
 	@field	ref_end1	0-based best alignment ending position on reference
-	@field	read_begin1	0-based best alignment beginning position on read; read_begin1 = -1 when the best alignment beginning 
+	@field	read_begin1	0-based best alignment beginning position on read; read_begin1 = -1 when the best alignment beginning
 						position is not available
 	@field	read_end1	0-based best alignment ending position on read
 	@field	read_end2	0-based sub-optimal alignment ending position on read
-	@field	cigar	best alignment cigar; stored the same as that in BAM format, high 28 bits: length, low 4 bits: M/I/D (0/1/2); 
+	@field	cigar	best alignment cigar; stored the same as that in BAM format, high 28 bits: length, low 4 bits: M/I/D (0/1/2);
 					cigar = 0 when the best alignment path is not available
 	@field	cigarLen	length of the cigar string; cigarLen = 0 when the best alignment path is not available
 */
 typedef struct {
-	uint16_t score1;	
-	uint16_t score2;	
-	int32_t ref_begin1;	
-	int32_t ref_end1;	
-	int32_t	read_begin1;	
-	int32_t read_end1;	
+	uint16_t score1;
+	uint16_t score2;
+	int32_t ref_begin1;
+	int32_t ref_end1;
+	int32_t	read_begin1;
+	int32_t read_end1;
 	int32_t ref_end2;
-	uint32_t* cigar;	
-	int32_t cigarLen;	
+	uint32_t* cigar;
+	int32_t cigarLen;
 } s_align;
 
-#ifdef __cplusplus
-extern "C" {
-#endif	// __cplusplus
-
 /*!	@function	Create the query profile using the query sequence.
 	@param	read	pointer to the query sequence; the query sequence needs to be numbers
 	@param	readLen	length of the query sequence
 	@param	mat	pointer to the substitution matrix; mat needs to be corresponding to the read sequence
 	@param	n	the square root of the number of elements in mat (mat has n*n elements)
-	@param	score_size	estimated Smith-Waterman score; if your estimated best alignment score is surely < 255 please set 0; if 
-						your estimated best alignment score >= 255, please set 1; if you don't know, please set 2 
+	@param	score_size	estimated Smith-Waterman score; if your estimated best alignment score is surely < 255 please set 0; if
+						your estimated best alignment score >= 255, please set 1; if you don't know, please set 2
 	@return	pointer to the query profile structure
 	@note	example for parameter read and mat:
 			If the query sequence is: ACGTATC, the sequence that read points to can be: 1234142
 			Then if the penalty for match is 2 and for mismatch is -2, the substitution matrix of parameter mat will be:
-			//A  C  G  T  
+			//A  C  G  T
 			  2 -2 -2 -2 //A
 			 -2  2 -2 -2 //C
 			 -2 -2  2 -2 //G
@@ -71,7 +72,7 @@ extern "C" {
 s_profile* ssw_init (const int8_t* read, const int32_t readLen, const int8_t* mat, const int32_t n, const int8_t score_size);
 
 /*!	@function	Release the memory allocated by function ssw_init.
-	@param	p	pointer to the query profile structure	
+	@param	p	pointer to the query profile structure
 */
 void init_destroy (s_profile* p);
 
@@ -81,39 +82,39 @@ void init_destroy (s_profile* p);
 	@param	ref	pointer to the target sequence; the target sequence needs to be numbers and corresponding to the mat parameter of
 				function ssw_init
 	@param	refLen	length of the target sequence
-	@param	weight_gapO	the absolute value of gap open penalty  
+	@param	weight_gapO	the absolute value of gap open penalty
 	@param	weight_gapE	the absolute value of gap extension penalty
-	@param	flag	bitwise FLAG; (from high to low) bit 5: when setted as 1, function ssw_align will return the best alignment 
-					beginning position; bit 6: when setted as 1, if (ref_end1 - ref_begin1 < filterd && read_end1 - read_begin1 
-					< filterd), (whatever bit 5 is setted) the function will return the best alignment beginning position and 
+	@param	flag	bitwise FLAG; (from high to low) bit 5: when setted as 1, function ssw_align will return the best alignment
+					beginning position; bit 6: when setted as 1, if (ref_end1 - ref_begin1 < filterd && read_end1 - read_begin1
+					< filterd), (whatever bit 5 is setted) the function will return the best alignment beginning position and
 					cigar; bit 7: when setted as 1, if the best alignment score >= filters, (whatever bit 5 is setted) the function
   					will return the best alignment beginning position and cigar; bit 8: when setted as 1, (whatever bit 5, 6 or 7 is
- 					setted) the function will always return the best alignment beginning position and cigar. When flag == 0, only 
+ 					setted) the function will always return the best alignment beginning position and cigar. When flag == 0, only
 					the optimal and sub-optimal scores and the optimal alignment ending position will be returned.
 	@param	filters	score filter: when bit 7 of flag is setted as 1 and bit 8 is setted as 0, filters will be used (Please check the
  					decription of the flag parameter for detailed usage.)
-	@param	filterd	distance filter: when bit 6 of flag is setted as 1 and bit 8 is setted as 0, filterd will be used (Please check 
+	@param	filterd	distance filter: when bit 6 of flag is setted as 1 and bit 8 is setted as 0, filterd will be used (Please check
 					the decription of the flag parameter for detailed usage.)
-	@param	maskLen	The distance between the optimal and suboptimal alignment ending position >= maskLen. We suggest to use 
-					readLen/2, if you don't have special concerns. Note: maskLen has to be >= 15, otherwise this function will NOT 
+	@param	maskLen	The distance between the optimal and suboptimal alignment ending position >= maskLen. We suggest to use
+					readLen/2, if you don't have special concerns. Note: maskLen has to be >= 15, otherwise this function will NOT
 					return the suboptimal alignment information. Detailed description of maskLen: After locating the optimal
-					alignment ending position, the suboptimal alignment score can be heuristically found by checking the second 
-					largest score in the array that contains the maximal score of each column of the SW matrix. In order to avoid 
-					picking the scores that belong to the alignments sharing the partial best alignment, SSW C library masks the 
-					reference loci nearby (mask length = maskLen) the best alignment ending position and locates the second largest 
+					alignment ending position, the suboptimal alignment score can be heuristically found by checking the second
+					largest score in the array that contains the maximal score of each column of the SW matrix. In order to avoid
+					picking the scores that belong to the alignments sharing the partial best alignment, SSW C library masks the
+					reference loci nearby (mask length = maskLen) the best alignment ending position and locates the second largest
 					score from the unmasked elements.
-	@return	pointer to the alignment result structure 
+	@return	pointer to the alignment result structure
 	@note	Whatever the parameter flag is setted, this function will at least return the optimal and sub-optimal alignment score,
 			and the optimal alignment ending positions on target and query sequences. If both bit 6 and 7 of the flag are setted
-			while bit 8 is not, the function will return cigar only when both criteria are fulfilled. All returned positions are 
-			0-based coordinate.  	
+			while bit 8 is not, the function will return cigar only when both criteria are fulfilled. All returned positions are
+			0-based coordinate.
 */
-s_align* ssw_align (const s_profile* prof, 
-					const int8_t* ref, 
-					int32_t refLen, 
-					const uint8_t weight_gapO, 
-					const uint8_t weight_gapE, 
-					const uint8_t flag,	
+s_align* ssw_align (const s_profile* prof,
+					const int8_t* ref,
+					int32_t refLen,
+					const uint8_t weight_gapO,
+					const uint8_t weight_gapE,
+					const uint8_t flag,
 					const uint16_t filters,
 					const int32_t filterd,
 					const int32_t maskLen);
@@ -123,6 +124,63 @@ s_align* ssw_align (const s_profile* prof,
 */
 void align_destroy (s_align* a);
 
+/*!	@function		Produce CIGAR 32-bit unsigned integer from CIGAR operation and CIGAR length
+	@param	length		length of CIGAR
+	@param	op_letter	CIGAR operation character ('M', 'I', etc)
+	@return			32-bit unsigned integer, representing encoded CIGAR operation and length
+*/
+static inline uint32_t to_cigar_int (uint32_t length, char op_letter)
+{
+	uint32_t res;
+	uint8_t op_code;
+
+	switch (op_letter) {
+		case 'M': /* alignment match (can be a sequence match or mismatch */
+		default:
+			op_code = 0;
+			break;
+		case 'I': /* insertion to the reference */
+			op_code = 1;
+			break;
+		case 'D': /* deletion from the reference */
+			op_code = 2;
+			break;
+		case 'N': /* skipped region from the reference */
+			op_code = 3;
+			break;
+		case 'S': /* soft clipping (clipped sequences present in SEQ) */
+			op_code = 4;
+			break;
+		case 'H': /* hard clipping (clipped sequences NOT present in SEQ) */
+			op_code = 5;
+			break;
+		case 'P': /* padding (silent deletion from padded reference) */
+			op_code = 6;
+			break;
+		case '=': /* sequence match */
+			op_code = 7;
+			break;
+		case 'X': /* sequence mismatch */
+			op_code = 8;
+			break;
+	}
+
+	res = (length << 4) | op_code;
+	return res;
+}
+
+/*!	@function		Extract CIGAR operation character from CIGAR 32-bit unsigned integer
+	@param	cigar_int	32-bit unsigned integer, representing encoded CIGAR operation and length
+	@return			CIGAR operation character ('M', 'I', etc)
+*/
+char cigar_int_to_op (uint32_t cigar_int);
+
+/*!	@function		Extract length of a CIGAR operation from CIGAR 32-bit unsigned integer
+	@param	cigar_int	32-bit unsigned integer, representing encoded CIGAR operation and length
+	@return			length of CIGAR operation
+*/
+uint32_t cigar_int_to_len (uint32_t cigar_int);
+
 #ifdef __cplusplus
 }
 #endif	// __cplusplus
diff --git a/src/include/ssw/ssw_cpp.h b/src/include/ssw/ssw_cpp.h
index 6e689d4..cdcf717 100644
--- a/src/include/ssw/ssw_cpp.h
+++ b/src/include/ssw/ssw_cpp.h
@@ -8,7 +8,7 @@
 namespace StripedSmithWaterman {
 
 struct Alignment {
-  uint16_t sw_score;           // The best alignment score 
+  uint16_t sw_score;           // The best alignment score
   uint16_t sw_score_next_best; // The next best alignment score
   int32_t  ref_begin;          // Reference begin position of the best alignment
   int32_t  ref_end;            // Reference end position of the best alignment
@@ -39,12 +39,12 @@ struct Filter {
   //       sw_score; sw_score_next_best; ref_end; query_end; ref_end_next_best.
   // NOTE: Only need score of alignments, please set 'report_begin_position'
   //       and 'report_cigar' false.
-  
-  bool report_begin_position;    // Give ref_begin and query_begin. 
+
+  bool report_begin_position;    // Give ref_begin and query_begin.
                                  //   If it is not set, ref_begin and query_begin are -1.
   bool report_cigar;             // Give cigar_string and cigar.
                                  //   report_begin_position is automatically TRUE.
-  
+
   // When *report_cigar* is true and alignment passes these two filters,
   //   cigar_string and cigar will be given.
   uint16_t score_filter;         // score >= score_filter
@@ -57,6 +57,13 @@ struct Filter {
     , score_filter(0)
     , distance_filter(32767)
   {};
+
+  Filter(const bool& pos, const bool& cigar, const uint16_t& score, const uint16_t& dis)
+    : report_begin_position(pos)
+    , report_cigar(cigar)
+    , score_filter(score)
+    , distance_filter(dis)
+    {};
 };
 
 class Aligner {
@@ -68,7 +75,7 @@ class Aligner {
   //             use the other constructor and pass the corresponding matrix in.
   // =========
   Aligner(void);
-  
+
   // =========
   // @function Construct an Aligner by assigning scores.
   //             The function will build the {A.C,G,T,N} aligner.
@@ -79,22 +86,22 @@ class Aligner {
           const uint8_t& mismatch_penalty,
 	  const uint8_t& gap_opening_penalty,
 	  const uint8_t& gap_extending_penalty);
-  
+
   // =========
   // @function Construct an Aligner by the specific matrixs.
   // =========
-  Aligner(const int8_t* score_matrix, 
+  Aligner(const int8_t* score_matrix,
           const int&    score_matrix_size,
           const int8_t* translation_matrix,
 	  const int&    translation_matrix_size);
-  
+
   ~Aligner(void);
 
   // =========
-  // @function Build the reference sequence and thus make 
+  // @function Build the reference sequence and thus make
   //             Align(const char* query, s_align* alignment) function;
   //             otherwise the reference should be given when aligning.
-  //           [NOTICE] If there exists a sequence, that one will be deleted 
+  //           [NOTICE] If there exists a sequence, that one will be deleted
   //                    and replaced.
   // @param    seq    The reference bases;
   //                  [NOTICE] It is not necessary null terminated.
@@ -115,7 +122,7 @@ class Aligner {
   };
 
   // =========
-  // @function Align the query againt the reference that is set by 
+  // @function Align the query againt the reference that is set by
   //             SetReferenceSequence.
   // @param    query     The query sequence.
   // @param    filter    The filter for the alignment.
@@ -126,7 +133,7 @@ class Aligner {
 
   // =========
   // @function Align the query againt the reference.
-  //           [NOTICE] The reference won't replace the reference 
+  //           [NOTICE] The reference won't replace the reference
   //                      set by SetReferenceSequence.
   // @param    query     The query sequence.
   // @param    ref       The reference sequence.
@@ -136,7 +143,7 @@ class Aligner {
   // @param    alignment The container contains the result.
   // @return   True: succeed; false: fail.
   // =========
-  bool Align(const char* query, const char* ref, const int& ref_len, 
+  bool Align(const char* query, const char* ref, const int& ref_len,
              const Filter& filter, Alignment* alignment) const;
 
   // @function Clear up all containers and thus the aligner is disabled.
@@ -160,24 +167,22 @@ class Aligner {
           const uint8_t& mismatch_penalty,
 	  const uint8_t& gap_opening_penalty,
 	  const uint8_t& gap_extending_penalty);
-  
+
   // =========
   // @function Construct an Aligner by the specific matrixs.
   //           [NOTICE] If the aligner is not cleaned, rebuilding will fail.
   // @return   True: succeed; false: fail.
   // =========
   bool ReBuild(
-          const int8_t* score_matrix, 
+          const int8_t* score_matrix,
           const int&    score_matrix_size,
           const int8_t* translation_matrix,
 	  const int&    translation_matrix_size);
-  
+
  private:
   int8_t* score_matrix_;
   int     score_matrix_size_;
   int8_t* translation_matrix_;
-  bool    default_matrix_;
-  bool    matrix_built_;
 
   uint8_t match_score_;           // default: 2
   uint8_t mismatch_penalty_;      // default: 2
@@ -190,9 +195,10 @@ class Aligner {
   int TranslateBase(const char* bases, const int& length, int8_t* translated) const;
   void SetAllDefault(void);
   void BuildDefaultMatrix(void);
-  
+  void ClearMatrices(void);
+
   Aligner& operator= (const Aligner&);
-  Aligner (const Aligner&); 
+  Aligner (const Aligner&);
 }; // class Aligner
 
 
@@ -201,11 +207,11 @@ class Aligner {
 // ================
 inline void Aligner::CleanReferenceSequence(void) {
   if (reference_length_ == 0) return;
-  
+
   // delete the current buffer
   if (reference_length_ > 1) delete [] translated_reference_;
   else delete translated_reference_;
-  
+
   reference_length_ = 0;
 }
 } // namespace StripedSmithWaterman
diff --git a/src/include/version.hpp.in b/src/include/version.hpp.in
new file mode 100644
index 0000000..9ec658f
--- /dev/null
+++ b/src/include/version.hpp.in
@@ -0,0 +1,7 @@
+#ifndef __SPADES_VERSION_HPP__
+#define __SPADES_VERSION_HPP__
+
+#cmakedefine SPADES_GIT_REFSPEC "${SPADES_GIT_REFSPEC}"
+#cmakedefine SPADES_GIT_SHA1 "${SPADES_GIT_SHA1}"
+
+#endif // __SPADES_VERSION_HPP__
diff --git a/src/io/CMakeLists.txt b/src/io/CMakeLists.txt
index f480272..865dc2c 100644
--- a/src/io/CMakeLists.txt
+++ b/src/io/CMakeLists.txt
@@ -12,7 +12,9 @@ add_library(input STATIC
             path_helper.cpp
             copy_file.cpp
             library.cpp
-            logger_impl.cpp)
+            logger_impl.cpp
+            sam/read.cpp
+            sam/sam_reader.cpp)
 
-target_link_libraries(input BamTools yaml-cpp)
+target_link_libraries(input BamTools samtools yaml-cpp)
 
diff --git a/src/io/library.cpp b/src/io/library.cpp
index d84d8ea..e6b2f67 100644
--- a/src/io/library.cpp
+++ b/src/io/library.cpp
@@ -116,12 +116,12 @@ Node convert<SequencingLibraryBase>::encode(const io::SequencingLibraryBase& rhs
     node["orientation"] = rhs.orientation();
     node["type"] = rhs.type();
 
-    for (auto it = rhs.paired_begin(), et = rhs.paired_end(); et != it; ++it) {
-      node["left reads"].push_back(it->first);
-      node["right reads"].push_back(it->second);
+    for (const auto& read_pair : rhs.paired_reads()) {
+      node["left reads"].push_back(read_pair.first);
+      node["right reads"].push_back(read_pair.second);
     }
-    for (auto it = rhs.single_begin(), et = rhs.single_end(); et != it; ++it)
-      node["single reads"].push_back(*it);
+    for (const auto& reads : rhs.single_reads())
+      node["single reads"].push_back(reads);
 
     return node;
 }
diff --git a/src/io/sam/read.cpp b/src/io/sam/read.cpp
new file mode 100644
index 0000000..de65d03
--- /dev/null
+++ b/src/io/sam/read.cpp
@@ -0,0 +1,42 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#include <io/sam/read.hpp>
+
+using namespace std;
+
+namespace sam_reader {
+
+string SingleSamRead::cigar() const {
+    uint32_t *cigar = bam1_cigar(data_);
+    string res;
+    res.reserve(data_->core.n_cigar);
+    for (size_t k = 0; k < data_->core.n_cigar; ++k) {
+        res += std::to_string(bam_cigar_oplen(cigar[k]));
+        res += bam_cigar_opchr(cigar[k]);
+
+    }
+    return res;
+}
+
+string SingleSamRead::name() const {
+    string res(bam1_qname(data_));
+    return res;
+}
+
+string SingleSamRead::seq() const {
+    string res = "";
+    auto b = bam1_seq(data_);
+    for (int k = 0; k < data_->core.l_qseq; ++k) {
+        res += bam_nt16_rev_table[bam1_seqi(b, k)];
+    }
+    return res;
+}
+
+
+}
+;
diff --git a/src/corrector/sam_reader.cpp b/src/io/sam/sam_reader.cpp
similarity index 82%
rename from src/corrector/sam_reader.cpp
rename to src/io/sam/sam_reader.cpp
index 24b4e8d..77e3f4f 100644
--- a/src/corrector/sam_reader.cpp
+++ b/src/io/sam/sam_reader.cpp
@@ -5,17 +5,15 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "read.hpp"
-#include "sam_reader.hpp"
+#include <io/sam/read.hpp>
+#include <io/sam/sam_reader.hpp>
 
-#include <samtools/sam.h>
-#include "samtools/bam.h"
 using namespace std;
 
-namespace corrector {
+namespace sam_reader {
 
 bool MappedSamStream::eof() const {
-    return eof_;
+        return eof_;
 }
 
 bool MappedSamStream::is_open() const {
@@ -39,14 +37,14 @@ MappedSamStream& MappedSamStream::operator >>(PairedSamRead& read) {
     MappedSamStream::operator >>(r2);
 
     read = PairedSamRead(r1, r2);
-    TRACE(r1.get_seq());
-    TRACE(r2.get_seq());
-    TRACE(r1.get_name());
+    TRACE(r1.seq());
+    TRACE(r2.seq());
+    TRACE(r1.name());
     return *this;
 }
 
-const char* MappedSamStream::get_contig_name(size_t i) const {
-    VERIFY(i < (size_t) reader_->header->n_targets);
+const char* MappedSamStream::get_contig_name(int i) const {
+    VERIFY(i < reader_->header->n_targets);
     return (reader_->header->target_name[i]);
 }
 
diff --git a/src/ionhammer/CMakeLists.txt b/src/ionhammer/CMakeLists.txt
index e3ee7a0..8b2f8a8 100644
--- a/src/ionhammer/CMakeLists.txt
+++ b/src/ionhammer/CMakeLists.txt
@@ -20,7 +20,7 @@ add_executable(ionhammer
                seqeval/TreephaserLite.cpp
                main.cpp)
 
-target_link_libraries(ionhammer input mph_index BamTools yaml-cpp input ${COMMON_LIBRARIES})
+target_link_libraries(ionhammer input cityhash BamTools yaml-cpp input ${COMMON_LIBRARIES})
 
 if (SPADES_STATIC_BUILD)
   set_target_properties(ionhammer PROPERTIES LINK_SEARCH_END_STATIC 1)
diff --git a/src/ionhammer/HSeq.hpp b/src/ionhammer/HSeq.hpp
index 5b10f26..567f84f 100644
--- a/src/ionhammer/HSeq.hpp
+++ b/src/ionhammer/HSeq.hpp
@@ -9,7 +9,7 @@
 #define __HAMMER_HSEQ_HPP__
 
 #include "sequence/nucl.hpp"
-#include "mph_index/MurmurHash3.h"
+#include <city/city.h>
 
 #include <array>
 #include <string>
@@ -236,9 +236,7 @@ class HSeq {
   }
 
   static size_t GetHash(const DataType *data, size_t sz = DataSize, uint32_t seed = 0) {
-    uint64_t res[2];
-    MurmurHash3_x64_128(data, sz * sizeof(DataType), 0x9E3779B9 ^ seed, res);
-    return res[0] ^ res[1];
+    return CityHash64WithSeed((const char*)data, sz * sizeof(DataType), 0x9E3779B9 ^ seed);
   }
 
   size_t GetHash(uint32_t seed = 0) const {
diff --git a/src/ionhammer/main.cpp b/src/ionhammer/main.cpp
index f66f999..0048cf8 100644
--- a/src/ionhammer/main.cpp
+++ b/src/ionhammer/main.cpp
@@ -29,6 +29,8 @@
 
 #include "openmp_wrapper.h"
 
+#include "version.hpp"
+
 #include <yaml-cpp/yaml.h>
 #include <fstream>
 #include <iomanip>
diff --git a/src/mph_index/MurmurHash3.cpp b/src/mph_index/MurmurHash3.cpp
deleted file mode 100644
index 965a38f..0000000
--- a/src/mph_index/MurmurHash3.cpp
+++ /dev/null
@@ -1,345 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-//-----------------------------------------------------------------------------
-// MurmurHash3 was written by Austin Appleby, and is placed in the public
-// domain. The author hereby disclaims copyright to this source code.
-
-// Note - The x86 and x64 versions do _not_ produce the same results, as the
-// algorithms are optimized for their respective platforms. You can still
-// compile and run any of them on any platform, but your performance with the
-// non-native version will be less than optimal.
-
-#include "mph_index/MurmurHash3.h"
-
-//-----------------------------------------------------------------------------
-// Platform-specific functions and macros
-
-// Microsoft Visual Studio
-
-#if defined(_MSC_VER)
-
-#define FORCE_INLINE	__forceinline
-
-#include <stdlib.h>
-
-#define ROTL32(x,y)	_rotl(x,y)
-#define ROTL64(x,y)	_rotl64(x,y)
-
-#define BIG_CONSTANT(x) (x)
-
-// Other compilers
-
-#else	// defined(_MSC_VER)
-
-#define	FORCE_INLINE inline __attribute__((always_inline))
-
-inline uint32_t rotl32 ( uint32_t x, int8_t r )
-{
-  return (x << r) | (x >> (32 - r));
-}
-
-inline uint64_t rotl64 ( uint64_t x, int8_t r )
-{
-  return (x << r) | (x >> (64 - r));
-}
-
-#define	ROTL32(x,y)	rotl32(x,y)
-#define ROTL64(x,y)	rotl64(x,y)
-
-#define BIG_CONSTANT(x) (x##LLU)
-
-#endif // !defined(_MSC_VER)
-
-//-----------------------------------------------------------------------------
-// Block read - if your platform needs to do endian-swapping or can only
-// handle aligned reads, do the conversion here
-
-FORCE_INLINE uint32_t getblock ( const uint32_t * p, int i )
-{
-  return p[i];
-}
-
-FORCE_INLINE uint64_t getblock ( const uint64_t * p, int i )
-{
-  return p[i];
-}
-
-//-----------------------------------------------------------------------------
-// Finalization mix - force all bits of a hash block to avalanche
-
-FORCE_INLINE uint32_t fmix ( uint32_t h )
-{
-  h ^= h >> 16;
-  h *= 0x85ebca6b;
-  h ^= h >> 13;
-  h *= 0xc2b2ae35;
-  h ^= h >> 16;
-
-  return h;
-}
-
-//----------
-
-FORCE_INLINE uint64_t fmix ( uint64_t k )
-{
-  k ^= k >> 33;
-  k *= BIG_CONSTANT(0xff51afd7ed558ccd);
-  k ^= k >> 33;
-  k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
-  k ^= k >> 33;
-
-  return k;
-}
-
-//-----------------------------------------------------------------------------
-
-void MurmurHash3_x86_32 ( const void * key, const size_t len,
-                          uint32_t seed, void * out )
-{
-  const uint8_t * data = (const uint8_t*)key;
-  const size_t nblocks = len / 4;
-
-  uint32_t h1 = seed;
-
-  uint32_t c1 = 0xcc9e2d51;
-  uint32_t c2 = 0x1b873593;
-
-  //----------
-  // body
-
-  const uint32_t * blocks = (const uint32_t *)(data + nblocks*4);
-
-  for(int i = -(int) nblocks; i; i++)
-  {
-    uint32_t k1 = getblock(blocks,i);
-
-    k1 *= c1;
-    k1 = ROTL32(k1,15);
-    k1 *= c2;
-    
-    h1 ^= k1;
-    h1 = ROTL32(h1,13); 
-    h1 = h1*5+0xe6546b64;
-  }
-
-  //----------
-  // tail
-
-  const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
-
-  uint32_t k1 = 0;
-
-  switch(len & 3)
-  {
-  case 3: k1 ^= tail[2] << 16;
-  case 2: k1 ^= tail[1] << 8;
-  case 1: k1 ^= tail[0];
-          k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
-  };
-
-  //----------
-  // finalization
-
-  h1 ^= (uint32_t) len;
-
-  h1 = fmix(h1);
-
-  *(uint32_t*)out = h1;
-} 
-
-//-----------------------------------------------------------------------------
-
-void MurmurHash3_x86_128 ( const void * key, const size_t len,
-                           uint32_t seed, void * out )
-{
-  const uint8_t * data = (const uint8_t*)key;
-  const size_t nblocks = len / 16;
-
-  uint32_t h1 = seed;
-  uint32_t h2 = seed;
-  uint32_t h3 = seed;
-  uint32_t h4 = seed;
-
-  uint32_t c1 = 0x239b961b; 
-  uint32_t c2 = 0xab0e9789;
-  uint32_t c3 = 0x38b34ae5; 
-  uint32_t c4 = 0xa1e38b93;
-
-  //----------
-  // body
-
-  const uint32_t * blocks = (const uint32_t *)(data + nblocks*16);
-
-  for(int i = -(int) nblocks; i; i++)
-  {
-    uint32_t k1 = getblock(blocks,i*4+0);
-    uint32_t k2 = getblock(blocks,i*4+1);
-    uint32_t k3 = getblock(blocks,i*4+2);
-    uint32_t k4 = getblock(blocks,i*4+3);
-
-    k1 *= c1; k1  = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
-
-    h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b;
-
-    k2 *= c2; k2  = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
-
-    h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747;
-
-    k3 *= c3; k3  = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
-
-    h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35;
-
-    k4 *= c4; k4  = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
-
-    h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17;
-  }
-
-  //----------
-  // tail
-
-  const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
-
-  uint32_t k1 = 0;
-  uint32_t k2 = 0;
-  uint32_t k3 = 0;
-  uint32_t k4 = 0;
-
-  switch(len & 15)
-  {
-  case 15: k4 ^= tail[14] << 16;
-  case 14: k4 ^= tail[13] << 8;
-  case 13: k4 ^= tail[12] << 0;
-           k4 *= c4; k4  = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
-
-  case 12: k3 ^= tail[11] << 24;
-  case 11: k3 ^= tail[10] << 16;
-  case 10: k3 ^= tail[ 9] << 8;
-  case  9: k3 ^= tail[ 8] << 0;
-           k3 *= c3; k3  = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
-
-  case  8: k2 ^= tail[ 7] << 24;
-  case  7: k2 ^= tail[ 6] << 16;
-  case  6: k2 ^= tail[ 5] << 8;
-  case  5: k2 ^= tail[ 4] << 0;
-           k2 *= c2; k2  = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
-
-  case  4: k1 ^= tail[ 3] << 24;
-  case  3: k1 ^= tail[ 2] << 16;
-  case  2: k1 ^= tail[ 1] << 8;
-  case  1: k1 ^= tail[ 0] << 0;
-           k1 *= c1; k1  = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
-  };
-
-  //----------
-  // finalization
-
-  h1 ^= (uint32_t) len;
-  h2 ^= (uint32_t) len; 
-  h3 ^= (uint32_t) len; 
-  h4 ^= (uint32_t) len;
-
-  h1 += h2; h1 += h3; h1 += h4;
-  h2 += h1; h3 += h1; h4 += h1;
-
-  h1 = fmix(h1);
-  h2 = fmix(h2);
-  h3 = fmix(h3);
-  h4 = fmix(h4);
-
-  h1 += h2; h1 += h3; h1 += h4;
-  h2 += h1; h3 += h1; h4 += h1;
-
-  ((uint32_t*)out)[0] = h1;
-  ((uint32_t*)out)[1] = h2;
-  ((uint32_t*)out)[2] = h3;
-  ((uint32_t*)out)[3] = h4;
-}
-
-//-----------------------------------------------------------------------------
-
-void MurmurHash3_x64_128 ( const void * key, const size_t len,
-                           const uint32_t seed, void * out )
-{
-  const uint8_t * data = (const uint8_t*)key;
-  const size_t nblocks = len / 16;
-
-  uint64_t h1 = seed;
-  uint64_t h2 = seed;
-
-  uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
-  uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
-
-  //----------
-  // body
-
-  const uint64_t * blocks = (const uint64_t *)(data);
-
-  for(size_t i = 0; i < nblocks; i++)
-  {
-    uint64_t k1 = getblock(blocks, (int) i*2+0);
-    uint64_t k2 = getblock(blocks, (int) i*2+1);
-
-    k1 *= c1; k1  = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
-
-    h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729;
-
-    k2 *= c2; k2  = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
-
-    h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5;
-  }
-
-  //----------
-  // tail
-
-  const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
-
-  uint64_t k1 = 0;
-  uint64_t k2 = 0;
-
-  switch(len & 15)
-  {
-  case 15: k2 ^= uint64_t(tail[14]) << 48;
-  case 14: k2 ^= uint64_t(tail[13]) << 40;
-  case 13: k2 ^= uint64_t(tail[12]) << 32;
-  case 12: k2 ^= uint64_t(tail[11]) << 24;
-  case 11: k2 ^= uint64_t(tail[10]) << 16;
-  case 10: k2 ^= uint64_t(tail[ 9]) << 8;
-  case  9: k2 ^= uint64_t(tail[ 8]) << 0;
-           k2 *= c2; k2  = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
-
-  case  8: k1 ^= uint64_t(tail[ 7]) << 56;
-  case  7: k1 ^= uint64_t(tail[ 6]) << 48;
-  case  6: k1 ^= uint64_t(tail[ 5]) << 40;
-  case  5: k1 ^= uint64_t(tail[ 4]) << 32;
-  case  4: k1 ^= uint64_t(tail[ 3]) << 24;
-  case  3: k1 ^= uint64_t(tail[ 2]) << 16;
-  case  2: k1 ^= uint64_t(tail[ 1]) << 8;
-  case  1: k1 ^= uint64_t(tail[ 0]) << 0;
-           k1 *= c1; k1  = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
-  };
-
-  //----------
-  // finalization
-
-  h1 ^= len; h2 ^= len;
-
-  h1 += h2;
-  h2 += h1;
-
-  h1 = fmix(h1);
-  h2 = fmix(h2);
-
-  h1 += h2;
-  h2 += h1;
-
-  ((uint64_t*)out)[0] = h1;
-  ((uint64_t*)out)[1] = h2;
-}
-
-//-----------------------------------------------------------------------------
-
diff --git a/src/scaffold_correction/scaffold_correction.hpp b/src/scaffold_correction/scaffold_correction.hpp
index fc9b524..0e2309f 100644
--- a/src/scaffold_correction/scaffold_correction.hpp
+++ b/src/scaffold_correction/scaffold_correction.hpp
@@ -96,7 +96,7 @@ namespace scaffold_correction {
             size_t max_path_length = max_insert_ + 2 * max_cut_length_;
             DS ds(DS::LC(graph_, path), DS::VPrC(max_path_length), DS::VPuC(max_path_length), DS::NIF(graph_));
             omnigraph::Dijkstra<Graph, DS> dj(graph_, ds);
-            dj.run(v1);
+            dj.Run(v1);
             if(dj.DistanceCounted(v2) && dj.GetDistance(v2) <= max_insert_) {
                 vector<EdgeId> result = dj.GetShortestPathTo(v2);
                 VERIFY(graph_.EdgeStart(result.front()) == v1);
@@ -319,11 +319,10 @@ namespace spades {
                 cfg::get().pos.max_mapping_gap,
                 cfg::get().pos.max_gap_diff);
         StageManager manager({cfg::get().developer_mode,
-                cfg::get().load_from,
-                cfg::get().output_saves});
-        manager.add(new debruijn_graph::Construction());
-        std::string output_file = cfg::get().output_dir + "corrected_scaffolds.fasta";
-        manager.add(new ScaffoldCorrectionStage(cfg::get().K, output_file, cfg::get().sc_cor));
+                              cfg::get().load_from,
+                              cfg::get().output_saves});
+        manager.add(new debruijn_graph::Construction())
+               .add(new ScaffoldCorrectionStage(cfg::get().K, cfg::get().output_dir + "corrected_scaffolds.fasta", cfg::get().sc_cor));
         INFO("Output directory: " << cfg::get().output_dir);
         conj_gp.kmer_mapper.Attach();
         manager.run(conj_gp, cfg::get().entry_point.c_str());
diff --git a/src/spades_pipeline/__pycache__/corrector_logic.cpython-34.pyc b/src/spades_pipeline/__pycache__/corrector_logic.cpython-34.pyc
new file mode 100644
index 0000000..608dc2e
Binary files /dev/null and b/src/spades_pipeline/__pycache__/corrector_logic.cpython-34.pyc differ
diff --git a/src/spades_pipeline/__pycache__/dipspades_logic.cpython-34.pyc b/src/spades_pipeline/__pycache__/dipspades_logic.cpython-34.pyc
new file mode 100644
index 0000000..d464471
Binary files /dev/null and b/src/spades_pipeline/__pycache__/dipspades_logic.cpython-34.pyc differ
diff --git a/src/spades_pipeline/__pycache__/dipspades_logic.cpython-35.pyc b/src/spades_pipeline/__pycache__/dipspades_logic.cpython-35.pyc
new file mode 100644
index 0000000..366981f
Binary files /dev/null and b/src/spades_pipeline/__pycache__/dipspades_logic.cpython-35.pyc differ
diff --git a/src/spades_pipeline/__pycache__/hammer_logic.cpython-33.pyc b/src/spades_pipeline/__pycache__/hammer_logic.cpython-33.pyc
new file mode 100644
index 0000000..361840c
Binary files /dev/null and b/src/spades_pipeline/__pycache__/hammer_logic.cpython-33.pyc differ
diff --git a/src/spades_pipeline/__pycache__/hammer_logic.cpython-34.pyc b/src/spades_pipeline/__pycache__/hammer_logic.cpython-34.pyc
new file mode 100644
index 0000000..e919345
Binary files /dev/null and b/src/spades_pipeline/__pycache__/hammer_logic.cpython-34.pyc differ
diff --git a/src/spades_pipeline/__pycache__/hammer_logic.cpython-35.pyc b/src/spades_pipeline/__pycache__/hammer_logic.cpython-35.pyc
new file mode 100644
index 0000000..1e89b54
Binary files /dev/null and b/src/spades_pipeline/__pycache__/hammer_logic.cpython-35.pyc differ
diff --git a/src/spades_pipeline/__pycache__/options_storage.cpython-33.pyc b/src/spades_pipeline/__pycache__/options_storage.cpython-33.pyc
new file mode 100644
index 0000000..651b454
Binary files /dev/null and b/src/spades_pipeline/__pycache__/options_storage.cpython-33.pyc differ
diff --git a/src/spades_pipeline/__pycache__/options_storage.cpython-34.pyc b/src/spades_pipeline/__pycache__/options_storage.cpython-34.pyc
new file mode 100644
index 0000000..eb2d269
Binary files /dev/null and b/src/spades_pipeline/__pycache__/options_storage.cpython-34.pyc differ
diff --git a/src/spades_pipeline/__pycache__/options_storage.cpython-35.pyc b/src/spades_pipeline/__pycache__/options_storage.cpython-35.pyc
new file mode 100644
index 0000000..d7c5d9f
Binary files /dev/null and b/src/spades_pipeline/__pycache__/options_storage.cpython-35.pyc differ
diff --git a/src/spades_pipeline/__pycache__/process_cfg.cpython-33.pyc b/src/spades_pipeline/__pycache__/process_cfg.cpython-33.pyc
new file mode 100644
index 0000000..67ddbbf
Binary files /dev/null and b/src/spades_pipeline/__pycache__/process_cfg.cpython-33.pyc differ
diff --git a/src/spades_pipeline/__pycache__/process_cfg.cpython-34.pyc b/src/spades_pipeline/__pycache__/process_cfg.cpython-34.pyc
new file mode 100644
index 0000000..9a0c9a7
Binary files /dev/null and b/src/spades_pipeline/__pycache__/process_cfg.cpython-34.pyc differ
diff --git a/src/spades_pipeline/__pycache__/process_cfg.cpython-35.pyc b/src/spades_pipeline/__pycache__/process_cfg.cpython-35.pyc
new file mode 100644
index 0000000..6c0152a
Binary files /dev/null and b/src/spades_pipeline/__pycache__/process_cfg.cpython-35.pyc differ
diff --git a/src/spades_pipeline/__pycache__/spades_logic.cpython-33.pyc b/src/spades_pipeline/__pycache__/spades_logic.cpython-33.pyc
new file mode 100644
index 0000000..42ef55e
Binary files /dev/null and b/src/spades_pipeline/__pycache__/spades_logic.cpython-33.pyc differ
diff --git a/src/spades_pipeline/__pycache__/spades_logic.cpython-34.pyc b/src/spades_pipeline/__pycache__/spades_logic.cpython-34.pyc
new file mode 100644
index 0000000..f3f7187
Binary files /dev/null and b/src/spades_pipeline/__pycache__/spades_logic.cpython-34.pyc differ
diff --git a/src/spades_pipeline/__pycache__/spades_logic.cpython-35.pyc b/src/spades_pipeline/__pycache__/spades_logic.cpython-35.pyc
new file mode 100644
index 0000000..6406c7e
Binary files /dev/null and b/src/spades_pipeline/__pycache__/spades_logic.cpython-35.pyc differ
diff --git a/src/spades_pipeline/__pycache__/support.cpython-33.pyc b/src/spades_pipeline/__pycache__/support.cpython-33.pyc
new file mode 100644
index 0000000..a4db44e
Binary files /dev/null and b/src/spades_pipeline/__pycache__/support.cpython-33.pyc differ
diff --git a/src/spades_pipeline/__pycache__/support.cpython-34.pyc b/src/spades_pipeline/__pycache__/support.cpython-34.pyc
new file mode 100644
index 0000000..dcae8e7
Binary files /dev/null and b/src/spades_pipeline/__pycache__/support.cpython-34.pyc differ
diff --git a/src/spades_pipeline/__pycache__/support.cpython-35.pyc b/src/spades_pipeline/__pycache__/support.cpython-35.pyc
new file mode 100644
index 0000000..591c0b3
Binary files /dev/null and b/src/spades_pipeline/__pycache__/support.cpython-35.pyc differ
diff --git a/src/spades_pipeline/common/SeqIO.pyc b/src/spades_pipeline/common/SeqIO.pyc
new file mode 100644
index 0000000..ac196f5
Binary files /dev/null and b/src/spades_pipeline/common/SeqIO.pyc differ
diff --git a/src/spades_pipeline/common/__pycache__/SeqIO.cpython-33.pyc b/src/spades_pipeline/common/__pycache__/SeqIO.cpython-33.pyc
new file mode 100644
index 0000000..fd33d67
Binary files /dev/null and b/src/spades_pipeline/common/__pycache__/SeqIO.cpython-33.pyc differ
diff --git a/src/spades_pipeline/common/__pycache__/SeqIO.cpython-34.pyc b/src/spades_pipeline/common/__pycache__/SeqIO.cpython-34.pyc
new file mode 100644
index 0000000..8d02836
Binary files /dev/null and b/src/spades_pipeline/common/__pycache__/SeqIO.cpython-34.pyc differ
diff --git a/src/spades_pipeline/common/__pycache__/SeqIO.cpython-35.pyc b/src/spades_pipeline/common/__pycache__/SeqIO.cpython-35.pyc
new file mode 100644
index 0000000..548b1e0
Binary files /dev/null and b/src/spades_pipeline/common/__pycache__/SeqIO.cpython-35.pyc differ
diff --git a/src/spades_pipeline/common/__pycache__/alignment.cpython-33.pyc b/src/spades_pipeline/common/__pycache__/alignment.cpython-33.pyc
new file mode 100644
index 0000000..77ead33
Binary files /dev/null and b/src/spades_pipeline/common/__pycache__/alignment.cpython-33.pyc differ
diff --git a/src/spades_pipeline/common/__pycache__/alignment.cpython-34.pyc b/src/spades_pipeline/common/__pycache__/alignment.cpython-34.pyc
new file mode 100644
index 0000000..22f3eb5
Binary files /dev/null and b/src/spades_pipeline/common/__pycache__/alignment.cpython-34.pyc differ
diff --git a/src/spades_pipeline/common/__pycache__/alignment.cpython-35.pyc b/src/spades_pipeline/common/__pycache__/alignment.cpython-35.pyc
new file mode 100644
index 0000000..d661b64
Binary files /dev/null and b/src/spades_pipeline/common/__pycache__/alignment.cpython-35.pyc differ
diff --git a/src/spades_pipeline/common/__pycache__/parallel_launcher.cpython-34.pyc b/src/spades_pipeline/common/__pycache__/parallel_launcher.cpython-34.pyc
new file mode 100644
index 0000000..99d4ccf
Binary files /dev/null and b/src/spades_pipeline/common/__pycache__/parallel_launcher.cpython-34.pyc differ
diff --git a/src/spades_pipeline/common/__pycache__/parallel_launcher.cpython-35.pyc b/src/spades_pipeline/common/__pycache__/parallel_launcher.cpython-35.pyc
new file mode 100644
index 0000000..55e0ba0
Binary files /dev/null and b/src/spades_pipeline/common/__pycache__/parallel_launcher.cpython-35.pyc differ
diff --git a/src/spades_pipeline/common/__pycache__/sam_parser.cpython-33.pyc b/src/spades_pipeline/common/__pycache__/sam_parser.cpython-33.pyc
new file mode 100644
index 0000000..df85ce2
Binary files /dev/null and b/src/spades_pipeline/common/__pycache__/sam_parser.cpython-33.pyc differ
diff --git a/src/spades_pipeline/common/__pycache__/sam_parser.cpython-34.pyc b/src/spades_pipeline/common/__pycache__/sam_parser.cpython-34.pyc
new file mode 100644
index 0000000..0121736
Binary files /dev/null and b/src/spades_pipeline/common/__pycache__/sam_parser.cpython-34.pyc differ
diff --git a/src/spades_pipeline/common/__pycache__/sam_parser.cpython-35.pyc b/src/spades_pipeline/common/__pycache__/sam_parser.cpython-35.pyc
new file mode 100644
index 0000000..577dbb7
Binary files /dev/null and b/src/spades_pipeline/common/__pycache__/sam_parser.cpython-35.pyc differ
diff --git a/src/spades_pipeline/common/alignment.pyc b/src/spades_pipeline/common/alignment.pyc
new file mode 100644
index 0000000..8a14330
Binary files /dev/null and b/src/spades_pipeline/common/alignment.pyc differ
diff --git a/src/spades_pipeline/common/parallel_launcher.pyc b/src/spades_pipeline/common/parallel_launcher.pyc
new file mode 100644
index 0000000..932cddb
Binary files /dev/null and b/src/spades_pipeline/common/parallel_launcher.pyc differ
diff --git a/src/spades_pipeline/common/sam_parser.pyc b/src/spades_pipeline/common/sam_parser.pyc
new file mode 100644
index 0000000..615946e
Binary files /dev/null and b/src/spades_pipeline/common/sam_parser.pyc differ
diff --git a/src/spades_pipeline/easy_align.py b/src/spades_pipeline/easy_align.py
new file mode 100755
index 0000000..114fbb7
--- /dev/null
+++ b/src/spades_pipeline/easy_align.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+import os
+import sys
+
+pipeline_modules_home = 'src/spades_pipeline/'#os.path.dirname(os.path.realpath(__file__)
+sys.path.append(os.path.join(pipeline_modules_home, "common"))
+sys.path.append(os.path.join(pipeline_modules_home, "truspades"))
+
+import logging
+import alignment
+
+#import spades_init
+#spades_init.init()
+
+
+def align(contigs_file, left, right, out_dir, threads):
+    #logging
+    log = logging.getLogger('reference_construction')
+    log.setLevel(logging.INFO)
+    console = logging.StreamHandler(sys.stderr)
+    console.setFormatter(logging.Formatter('%(message)s'))
+    console.setLevel(logging.INFO)
+    log.addHandler(console)
+    #logging
+
+    bwa_command='bin/bwa-spades'
+    index = alignment.index_bwa(bwa_command, log, contigs_file, os.path.join(out_dir, "bwa_index"), "bwtsw")
+    index = os.path.join(out_dir, "bwa_index", "index")
+    sam = alignment.align_bwa_pe_lib(bwa_command, index, left, right, os.path.join(out_dir, "align"), log, threads)
+    #index_bwa(command, log, reference, work_dir, algorithm = "is"):
+    #align_bwa_pe_lib(command, work_dir + "/index", reads_file1, reads_file2, work_dir, log, threads = 1):
+
+if __name__ == '__main__':
+
+    if len(sys.argv) < 5:
+        sys.stderr.write("Usage: %s <contigs> <left_reads> <right_reads> <out_dir> [threads = 8]\n" % sys.argv[0])
+        exit(1)
+
+    threads = 8
+    if len(sys.argv) >= 6:
+        threads = int(sys.argv[5])
+
+    align(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], threads)
diff --git a/src/spades_pipeline/hammer_logic.py b/src/spades_pipeline/hammer_logic.py
index 6dd575f..213bee3 100644
--- a/src/spades_pipeline/hammer_logic.py
+++ b/src/spades_pipeline/hammer_logic.py
@@ -69,6 +69,8 @@ def prepare_config_bh(filename, cfg, log):
     subst_dict["general_hard_memory_limit"] = cfg.max_memory
     if "qvoffset" in cfg.__dict__:
         subst_dict["input_qvoffset"] = cfg.qvoffset
+    if "count_filter_singletons" in cfg.__dict__:
+        subst_dict["count_filter_singletons"] = cfg.count_filter_singletons
     process_cfg.substitute_params(filename, subst_dict, log)
 
 
diff --git a/src/spades_pipeline/options_storage.py b/src/spades_pipeline/options_storage.py
index 015ab50..8b39abf 100644
--- a/src/spades_pipeline/options_storage.py
+++ b/src/spades_pipeline/options_storage.py
@@ -22,7 +22,7 @@ CONTIGS_ALLOWED_READS_EXTENSIONS += [x + '.gz' for x in CONTIGS_ALLOWED_READS_EX
 ALLOWED_READS_EXTENSIONS += [x + '.gz' for x in ALLOWED_READS_EXTENSIONS]
 
 # we support up to MAX_LIBS_NUMBER libs for each type of short-reads libs
-MAX_LIBS_NUMBER = 5
+MAX_LIBS_NUMBER = 9
 OLD_STYLE_READS_OPTIONS = ["--12", "-1", "-2", "-s"]
 SHORT_READS_TYPES = {"pe": "paired-end", "s": "single", "mp": "mate-pairs", "hqmp": "hq-mate-pairs", "nxmate": "nxmate"}
 # other libs types:
@@ -56,6 +56,7 @@ TMP_DIR = "tmp"
 output_dir = None
 single_cell = False
 iontorrent = False
+meta = False
 test_mode = False
 
 # pipeline options
@@ -85,7 +86,6 @@ iterations = None
 bh_heap_check = None
 spades_heap_check = None
 read_buffer_size = None
-meta = False
 ### END OF OPTIONS
 
 # for restarting SPAdes
@@ -122,11 +122,11 @@ dict_of_rel2abs = dict()
 long_options = "12= threads= memory= tmp-dir= iterations= phred-offset= sc iontorrent meta "\
                "only-error-correction only-assembler "\
                "disable-gzip-output disable-gzip-output:false disable-rr disable-rr:false " \
-               "help test debug debug:false reference= config-file= dataset= "\
+               "help version test debug debug:false reference= config-file= dataset= "\
                "bh-heap-check= spades-heap-check= read-buffer-size= help-hidden "\
                "mismatch-correction mismatch-correction:false careful careful:false "\
                "continue restart-from= diploid truseq cov-cutoff= configs-dir= stop-after=".split()
-short_options = "o:1:2:s:k:t:m:i:h"
+short_options = "o:1:2:s:k:t:m:i:hv"
 
 # adding multiple paired-end, mate-pair and other (long reads) libraries support
 reads_options = []
@@ -145,20 +145,31 @@ reads_options = list(map(lambda x: "--" + x.split('=')[0], reads_options))
 reads_options += OLD_STYLE_READS_OPTIONS
 
 
+def version(spades_version, mode=None):
+    sys.stderr.write("SPAdes v" + str(spades_version))
+    if mode is not None:
+        sys.stderr.write(" (" + mode + " mode)")
+    sys.stderr.write("\n")
+    sys.stderr.flush()
+
+
 def usage(spades_version, show_hidden=False, dipspades=False):
     if not dipspades:
-        sys.stderr.write("SPAdes genome assembler v." + str(spades_version) + "\n")
+        sys.stderr.write("SPAdes genome assembler v" + str(spades_version) + "\n\n")
     else:
-        sys.stderr.write("dipSPAdes 1.0: genome assembler designed for diploid genomes with high heterozygosity rate\n\n")
+        sys.stderr.write("dipSPAdes v" + str(spades_version) +
+                         ": genome assembler designed for diploid genomes with high heterozygosity rate\n\n")
     sys.stderr.write("Usage: " + str(sys.argv[0]) + " [options] -o <output_dir>" + "\n")
     sys.stderr.write("" + "\n")
     sys.stderr.write("Basic options:" + "\n")
     sys.stderr.write("-o\t<output_dir>\tdirectory to store all the resulting files (required)" + "\n")
     if not dipspades:
         sys.stderr.write("--sc\t\t\tthis flag is required for MDA (single-cell) data" + "\n")
+        sys.stderr.write("--meta\t\t\tthis flag is required for metagenomic sample data" + "\n")
     sys.stderr.write("--iontorrent\t\tthis flag is required for IonTorrent data" + "\n")
     sys.stderr.write("--test\t\t\truns SPAdes on toy dataset" + "\n")
     sys.stderr.write("-h/--help\t\tprints this usage message" + "\n")
+    sys.stderr.write("-v/--version\t\tprints version" + "\n")
 
     sys.stderr.write("" + "\n")
     if not dipspades:
@@ -171,41 +182,41 @@ def usage(spades_version, show_hidden=False, dipspades=False):
     sys.stderr.write("-2\t<filename>\tfile with reverse paired-end reads" + "\n")
     sys.stderr.write("-s\t<filename>\tfile with unpaired reads" + "\n")
     sys.stderr.write("--pe<#>-12\t<filename>\tfile with interlaced"\
-                         " reads for paired-end library number <#> (<#> = 1,2,3,4,5)" + "\n")
+                         " reads for paired-end library number <#> (<#> = 1,2,..,9)" + "\n")
     sys.stderr.write("--pe<#>-1\t<filename>\tfile with forward reads"\
-                         " for paired-end library number <#> (<#> = 1,2,3,4,5)" + "\n")
+                         " for paired-end library number <#> (<#> = 1,2,..,9)" + "\n")
     sys.stderr.write("--pe<#>-2\t<filename>\tfile with reverse reads"\
-                         " for paired-end library number <#> (<#> = 1,2,3,4,5)" + "\n")
+                         " for paired-end library number <#> (<#> = 1,2,..,9)" + "\n")
     sys.stderr.write("--pe<#>-s\t<filename>\tfile with unpaired reads"\
-                         " for paired-end library number <#> (<#> = 1,2,3,4,5)" + "\n")
+                         " for paired-end library number <#> (<#> = 1,2,..,9)" + "\n")
     sys.stderr.write("--pe<#>-<or>\torientation of reads"\
-                         " for paired-end library number <#> (<#> = 1,2,3,4,5; <or> = fr, rf, ff)" + "\n")
+                         " for paired-end library number <#> (<#> = 1,2,..,9; <or> = fr, rf, ff)" + "\n")
     sys.stderr.write("--s<#>\t\t<filename>\tfile with unpaired reads"\
-                     " for single reads library number <#> (<#> = 1,2,3,4,5)" + "\n")
+                     " for single reads library number <#> (<#> = 1,2,..,9)" + "\n")
     sys.stderr.write("--mp<#>-12\t<filename>\tfile with interlaced"\
-                         " reads for mate-pair library number <#> (<#> = 1,2,3,4,5)" + "\n")
+                         " reads for mate-pair library number <#> (<#> = 1,2,..,9)" + "\n")
     sys.stderr.write("--mp<#>-1\t<filename>\tfile with forward reads"\
-                         " for mate-pair library number <#> (<#> = 1,2,3,4,5)" + "\n")
+                         " for mate-pair library number <#> (<#> = 1,2,..,9)" + "\n")
     sys.stderr.write("--mp<#>-2\t<filename>\tfile with reverse reads"\
-                         " for mate-pair library number <#> (<#> = 1,2,3,4,5)" + "\n")
+                         " for mate-pair library number <#> (<#> = 1,2,..,9)" + "\n")
     sys.stderr.write("--mp<#>-s\t<filename>\tfile with unpaired reads"\
-                         " for mate-pair library number <#> (<#> = 1,2,3,4,5)" + "\n")
+                         " for mate-pair library number <#> (<#> = 1,2,..,9)" + "\n")
     sys.stderr.write("--mp<#>-<or>\torientation of reads"\
-                         " for mate-pair library number <#> (<#> = 1,2,3,4,5; <or> = fr, rf, ff)" + "\n")
+                         " for mate-pair library number <#> (<#> = 1,2,..,9; <or> = fr, rf, ff)" + "\n")
     sys.stderr.write("--hqmp<#>-12\t<filename>\tfile with interlaced"\
-                     " reads for high-quality mate-pair library number <#> (<#> = 1,2,3,4,5)" + "\n")
+                     " reads for high-quality mate-pair library number <#> (<#> = 1,2,..,9)" + "\n")
     sys.stderr.write("--hqmp<#>-1\t<filename>\tfile with forward reads"\
-                     " for high-quality mate-pair library number <#> (<#> = 1,2,3,4,5)" + "\n")
+                     " for high-quality mate-pair library number <#> (<#> = 1,2,..,9)" + "\n")
     sys.stderr.write("--hqmp<#>-2\t<filename>\tfile with reverse reads"\
-                     " for high-quality mate-pair library number <#> (<#> = 1,2,3,4,5)" + "\n")
+                     " for high-quality mate-pair library number <#> (<#> = 1,2,..,9)" + "\n")
     sys.stderr.write("--hqmp<#>-s\t<filename>\tfile with unpaired reads"\
-                     " for high-quality mate-pair library number <#> (<#> = 1,2,3,4,5)" + "\n")
+                     " for high-quality mate-pair library number <#> (<#> = 1,2,..,9)" + "\n")
     sys.stderr.write("--hqmp<#>-<or>\torientation of reads"\
-                     " for high-quality mate-pair library number <#> (<#> = 1,2,3,4,5; <or> = fr, rf, ff)" + "\n")
+                     " for high-quality mate-pair library number <#> (<#> = 1,2,..,9; <or> = fr, rf, ff)" + "\n")
     sys.stderr.write("--nxmate<#>-1\t<filename>\tfile with forward reads"\
-                         " for Lucigen NxMate library number <#> (<#> = 1,2,3,4,5)" + "\n")
+                         " for Lucigen NxMate library number <#> (<#> = 1,2,..,9)" + "\n")
     sys.stderr.write("--nxmate<#>-2\t<filename>\tfile with reverse reads"\
-                         " for Lucigen NxMate library number <#> (<#> = 1,2,3,4,5)" + "\n")
+                         " for Lucigen NxMate library number <#> (<#> = 1,2,..,9)" + "\n")
     sys.stderr.write("--sanger\t<filename>\tfile with Sanger reads\n")
     sys.stderr.write("--pacbio\t<filename>\tfile with PacBio reads\n")
     sys.stderr.write("--nanopore\t<filename>\tfile with Nanopore reads\n")
@@ -277,8 +288,6 @@ def usage(spades_version, show_hidden=False, dipspades=False):
         sys.stderr.write("--spades-heap-check\t<value>\tsets HEAPCHECK environment variable"\
                              " for SPAdes" + "\n")
         sys.stderr.write("--help-hidden\tprints this usage message with all hidden options" + "\n")
-        if not dipspades:
-            sys.stderr.write("--meta\t\t\tthis flag is required for metagenomic sample data" + "\n")
 
     if show_hidden and dipspades:
         sys.stderr.write("" + "\n")
diff --git a/src/spades_pipeline/run_contig_breaker.py b/src/spades_pipeline/run_contig_breaker.py
index 3099081..2f3e44c 100755
--- a/src/spades_pipeline/run_contig_breaker.py
+++ b/src/spades_pipeline/run_contig_breaker.py
@@ -6,23 +6,40 @@
 # See file LICENSE for details.
 ############################################################################
 
-
+import os
 import sys
+
+pipeline_modules_home = 'src/spades_pipeline/'#os.path.dirname(os.path.realpath(__file__)
+sys.path.append(os.path.join(pipeline_modules_home, "common"))
+sys.path.append(os.path.join(pipeline_modules_home, "truspades"))
+
+#import alignment
+import sam_parser
 import break_by_coverage
-from Bio import SeqIO
-
-if len(sys.argv) < 4:
-    sys.stderr.write("Usage: %s <contigs> <sam_file> <output_filename>\n" % sys.argv[0])
-    exit(1)
-
-contigs_file = sys.argv[1]
-sam_file = sys.argv[2]
-output_file = sys.argv[3]
-
-coverage_breaker = break_by_coverage.ContigBreaker(contigs_file, sam_file, 100, 50)
-contigs = list(SeqIO.parse(open(contigs_file, "rU"), "fasta"))
-output = open(output_file, "w")
-for contig in contigs:
-    for subcontig in coverage_breaker.Break(contig):
-        SeqIO.write(subsubsubcontig, output, "fasta")
-output.close()
+import SeqIO
+
+def break_contigs(contigs_file, sam_file, output_file):
+    contigs = list(SeqIO.parse(open(contigs_file, "rU"), "fasta"))
+    #sam = sam_parser.SamChain([sam_parser.Samfile(sam_file) for sam_file in sam_files])
+    sam = sam_parser.Samfile(sam_file)
+    #last two arguments: K, min0 stretch length to break
+    coverage_breaker = break_by_coverage.ContigBreaker(contigs, sam, 100, 50)
+    coverage_breaker.OutputBroken(output_file)
+    #contigs = list(SeqIO.parse(open(contigs_file, "rU"), "fasta"))
+    #output = open(output_file, "w")
+    #for contig in contigs:
+    #    for subcontig in coverage_breaker.Break(contig):
+    #        SeqIO.write(subcontig, output, "fasta")
+    #output.close()
+
+
+if __name__ == '__main__':
+
+    if len(sys.argv) < 4:
+        sys.stderr.write("Usage: %s <contigs> <sam_file> <output_filename>\n" % sys.argv[0])
+        exit(1)
+    
+    contigs_file = sys.argv[1]
+    sam_file = sys.argv[2]
+    output_file = sys.argv[3]
+    break_contigs(contigs_file, sam_file, output_file);
diff --git a/src/spades_pipeline/spades_logic.py b/src/spades_pipeline/spades_logic.py
index b2c6e96..764b317 100644
--- a/src/spades_pipeline/spades_logic.py
+++ b/src/spades_pipeline/spades_logic.py
@@ -20,7 +20,7 @@ import options_storage
 BASE_STAGE = "construction"
 READS_TYPES_USED_IN_CONSTRUCTION = ["paired-end", "single", "hq-mate-pairs"]
 
-def prepare_config_spades(filename, cfg, log, additional_contigs_fname, K, stage, saves_dir, last_one):
+def prepare_config_spades(filename, cfg, log, additional_contigs_fname, K, stage, saves_dir, last_one, execution_home):
     subst_dict = dict()
 
     subst_dict["K"] = str(K)
@@ -61,6 +61,7 @@ def prepare_config_spades(filename, cfg, log, additional_contigs_fname, K, stage
         else:
             subst_dict["coverage_threshold"] = cfg.cov_cutoff
 
+    subst_dict["path_to_bwa"] =  os.path.join(execution_home, "bwa-spades")
     process_cfg.substitute_params(filename, subst_dict, log)
 
 
@@ -156,7 +157,7 @@ def run_iteration(configs_dir, execution_home, cfg, log, K, prev_K, last_one):
     if "read_buffer_size" in cfg.__dict__:
         construction_cfg_file_name = os.path.join(dst_configs, "construction.info")
         process_cfg.substitute_params(construction_cfg_file_name, {"read_buffer_size": cfg.read_buffer_size}, log)
-    prepare_config_spades(cfg_file_name, cfg, log, additional_contigs_fname, K, stage, saves_dir, last_one)
+    prepare_config_spades(cfg_file_name, cfg, log, additional_contigs_fname, K, stage, saves_dir, last_one, execution_home)
 
     command = [os.path.join(execution_home, "spades"), cfg_file_name]
     support.sys_call(command, log)
@@ -323,6 +324,10 @@ def run_spades(configs_dir, execution_home, cfg, dataset_data, ext_python_module
         if os.path.isfile(os.path.join(latest, "final_contigs.fasta")):
             if not os.path.isfile(cfg.result_contigs) or not options_storage.continue_mode:
                 shutil.copyfile(os.path.join(latest, "final_contigs.fasta"), cfg.result_contigs)
+        if os.path.isfile(os.path.join(latest, "first_pe_contigs.fasta")):
+            result_first_pe_contigs = os.path.join(os.path.dirname(cfg.result_contigs), "first_pe_contigs.fasta")
+            if not os.path.isfile(result_first_pe_contigs) or not options_storage.continue_mode:
+                shutil.copyfile(os.path.join(latest, "first_pe_contigs.fasta"), result_first_pe_contigs)
         if cfg.rr_enable:
             if os.path.isfile(os.path.join(latest, "scaffolds.fasta")):
                 if not os.path.isfile(cfg.result_scaffolds) or not options_storage.continue_mode:
@@ -338,6 +343,7 @@ def run_spades(configs_dir, execution_home, cfg, dataset_data, ext_python_module
                 shutil.copyfile(os.path.join(latest, "scaffolds.paths"), cfg.result_scaffolds_paths)
 
 
+
     if cfg.developer_mode:
         # saves
         saves_link = os.path.join(os.path.dirname(cfg.result_contigs), "saves")
diff --git a/src/spades_pipeline/support.py b/src/spades_pipeline/support.py
index 52d13af..4306267 100644
--- a/src/spades_pipeline/support.py
+++ b/src/spades_pipeline/support.py
@@ -38,7 +38,7 @@ def error(err_str, log=None, dipspades=False, prefix=SPADES_PY_ERROR_MESSAGE):
         binary_name = "dipSPAdes"
     if log:
         log.info("\n\n" + prefix + " " + err_str)
-        log_warnings(log)
+        log_warnings(log, with_error=True)
         log.info("\nIn case you have troubles running " + binary_name + ", you can write to spades.support at bioinf.spbau.ru")
         log.info("Please provide us with params.txt and " + binary_name.lower() + ".log files from the output directory.")
     else:
@@ -145,7 +145,7 @@ def which(program):
     if fpath:
         if is_exe(program):
             return program
-    else:
+    elif "PATH" in os.environ:
         for path in os.environ["PATH"].split(os.pathsep):
             path = path.strip('"')
             exe_file = os.path.join(path, program)
@@ -280,13 +280,20 @@ def save_data_to_file(data, file):
     os.chmod(file, stat.S_IWRITE | stat.S_IREAD | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
 
 
-def get_warnings(log_filename):
+def get_important_messages_from_log(log_filename, warnings=True):
     def already_saved(list_to_check, suffix): # for excluding duplicates (--continue-from may cause them)
         for item in list_to_check:
             if item.endswith(suffix):
                 return True
         return False
 
+    if warnings:
+        spades_py_message = SPADES_PY_WARN_MESSAGE
+        spades_message = SPADES_WARN_MESSAGE
+    else:  # errors
+        spades_py_message = SPADES_PY_ERROR_MESSAGE
+        spades_message = SPADES_ERROR_MESSAGE
+
     ### for capturing correct warnings in case of continue_mode
     if continue_logfile_offset:
         continued_log = open(log_filename, 'r')
@@ -303,23 +310,23 @@ def get_warnings(log_filename):
     else:
         lines_to_check = open(log_filename, 'r').readlines()
 
-    spades_py_warns = []
-    spades_warns = []
-    WARN_SUMMARY_PREFIX = ' * '
+    spades_py_msgs = []
+    spades_msgs = []
+    IMPORTANT_MESSAGE_SUMMARY_PREFIX = ' * '
     for line in lines_to_check:
-        if line.startswith(WARN_SUMMARY_PREFIX):
+        if line.startswith(IMPORTANT_MESSAGE_SUMMARY_PREFIX):
             continue
-        if line.find(SPADES_PY_WARN_MESSAGE) != -1:
-            suffix = line[line.find(SPADES_PY_WARN_MESSAGE) + len(SPADES_PY_WARN_MESSAGE):].strip()
-            line = line.replace(SPADES_PY_WARN_MESSAGE, '').strip()
-            if not already_saved(spades_py_warns, suffix):
-                spades_py_warns.append(WARN_SUMMARY_PREFIX + line)
-        elif line.find(SPADES_WARN_MESSAGE) != -1:
-            suffix = line[line.find(SPADES_WARN_MESSAGE) + len(SPADES_WARN_MESSAGE):].strip()
+        if line.find(spades_py_message) != -1:
+            suffix = line[line.find(spades_py_message) + len(spades_py_message):].strip()
+            line = line.replace(spades_py_message, '').strip()
+            if not already_saved(spades_py_msgs, suffix):
+                spades_py_msgs.append(IMPORTANT_MESSAGE_SUMMARY_PREFIX + line)
+        elif line.find(spades_message) != -1:
+            suffix = line[line.find(spades_message) + len(spades_message):].strip()
             line = line.strip()
-            if not already_saved(spades_warns, suffix):
-                spades_warns.append(WARN_SUMMARY_PREFIX + line)
-    return spades_py_warns, spades_warns
+            if not already_saved(spades_msgs, suffix):
+                spades_msgs.append(IMPORTANT_MESSAGE_SUMMARY_PREFIX + line)
+    return spades_py_msgs, spades_msgs
 
 
 def get_logger_filename(log):
@@ -330,15 +337,18 @@ def get_logger_filename(log):
     return log_file
 
 
-def log_warnings(log):
+def log_warnings(log, with_error=False):
     log_file = get_logger_filename(log)
     if not log_file:
         return False
     for h in log.__dict__['handlers']:
         h.flush()
-    spades_py_warns, spades_warns = get_warnings(log_file)
+    spades_py_warns, spades_warns = get_important_messages_from_log(log_file, warnings=True)
     if spades_py_warns or spades_warns:
-        log.info("\n======= SPAdes pipeline finished WITH WARNINGS!")
+        if with_error:
+            log.info("\n======= SPAdes pipeline finished abnormally and WITH WARNINGS!")
+        else:
+            log.info("\n======= SPAdes pipeline finished WITH WARNINGS!")
         warnings_filename = os.path.join(os.path.dirname(log_file), "warnings.log")
         warnings_handler = logging.FileHandler(warnings_filename, mode='w')
         log.addHandler(warnings_handler)
@@ -354,6 +364,12 @@ def log_warnings(log):
                 log.info(line)
         log.info("======= Warnings saved to " + warnings_filename)
         log.removeHandler(warnings_handler)
+        if with_error:
+            spades_py_errors, spades_errors = get_important_messages_from_log(log_file, warnings=False)
+            log.info("")
+            log.info("=== ERRORs:")
+            for line in (spades_errors + spades_py_errors):
+                log.info(line)
         return True
     return False
 
diff --git a/src/spades_pipeline/truspades/__pycache__/barcode_extraction.cpython-34.pyc b/src/spades_pipeline/truspades/__pycache__/barcode_extraction.cpython-34.pyc
new file mode 100644
index 0000000..f32ac80
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/barcode_extraction.cpython-34.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/barcode_extraction.cpython-35.pyc b/src/spades_pipeline/truspades/__pycache__/barcode_extraction.cpython-35.pyc
new file mode 100644
index 0000000..3fe98fd
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/barcode_extraction.cpython-35.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/break_by_coverage.cpython-33.pyc b/src/spades_pipeline/truspades/__pycache__/break_by_coverage.cpython-33.pyc
new file mode 100644
index 0000000..33e1aa5
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/break_by_coverage.cpython-33.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/break_by_coverage.cpython-34.pyc b/src/spades_pipeline/truspades/__pycache__/break_by_coverage.cpython-34.pyc
new file mode 100644
index 0000000..03b5bda
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/break_by_coverage.cpython-34.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/break_by_coverage.cpython-35.pyc b/src/spades_pipeline/truspades/__pycache__/break_by_coverage.cpython-35.pyc
new file mode 100644
index 0000000..50306da
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/break_by_coverage.cpython-35.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/generate_quality.cpython-33.pyc b/src/spades_pipeline/truspades/__pycache__/generate_quality.cpython-33.pyc
new file mode 100644
index 0000000..d0ec4f3
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/generate_quality.cpython-33.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/generate_quality.cpython-34.pyc b/src/spades_pipeline/truspades/__pycache__/generate_quality.cpython-34.pyc
new file mode 100644
index 0000000..40111c6
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/generate_quality.cpython-34.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/generate_quality.cpython-35.pyc b/src/spades_pipeline/truspades/__pycache__/generate_quality.cpython-35.pyc
new file mode 100644
index 0000000..cec48d4
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/generate_quality.cpython-35.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/id_generation.cpython-34.pyc b/src/spades_pipeline/truspades/__pycache__/id_generation.cpython-34.pyc
new file mode 100644
index 0000000..19e8bce
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/id_generation.cpython-34.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/id_generation.cpython-35.pyc b/src/spades_pipeline/truspades/__pycache__/id_generation.cpython-35.pyc
new file mode 100644
index 0000000..bdff821
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/id_generation.cpython-35.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/launch_options.cpython-34.pyc b/src/spades_pipeline/truspades/__pycache__/launch_options.cpython-34.pyc
new file mode 100644
index 0000000..e6e7e08
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/launch_options.cpython-34.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/launch_options.cpython-35.pyc b/src/spades_pipeline/truspades/__pycache__/launch_options.cpython-35.pyc
new file mode 100644
index 0000000..c90fdec
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/launch_options.cpython-35.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/moleculo_filter_contigs.cpython-33.pyc b/src/spades_pipeline/truspades/__pycache__/moleculo_filter_contigs.cpython-33.pyc
new file mode 100644
index 0000000..b17fe1b
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/moleculo_filter_contigs.cpython-33.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/moleculo_filter_contigs.cpython-34.pyc b/src/spades_pipeline/truspades/__pycache__/moleculo_filter_contigs.cpython-34.pyc
new file mode 100644
index 0000000..266f207
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/moleculo_filter_contigs.cpython-34.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/moleculo_filter_contigs.cpython-35.pyc b/src/spades_pipeline/truspades/__pycache__/moleculo_filter_contigs.cpython-35.pyc
new file mode 100644
index 0000000..1f547ea
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/moleculo_filter_contigs.cpython-35.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/moleculo_postprocessing.cpython-33.pyc b/src/spades_pipeline/truspades/__pycache__/moleculo_postprocessing.cpython-33.pyc
new file mode 100644
index 0000000..15cfb62
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/moleculo_postprocessing.cpython-33.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/moleculo_postprocessing.cpython-34.pyc b/src/spades_pipeline/truspades/__pycache__/moleculo_postprocessing.cpython-34.pyc
new file mode 100644
index 0000000..7b19367
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/moleculo_postprocessing.cpython-34.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/moleculo_postprocessing.cpython-35.pyc b/src/spades_pipeline/truspades/__pycache__/moleculo_postprocessing.cpython-35.pyc
new file mode 100644
index 0000000..13cb1a8
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/moleculo_postprocessing.cpython-35.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/reference_construction.cpython-34.pyc b/src/spades_pipeline/truspades/__pycache__/reference_construction.cpython-34.pyc
new file mode 100644
index 0000000..945bd2c
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/reference_construction.cpython-34.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/reference_construction.cpython-35.pyc b/src/spades_pipeline/truspades/__pycache__/reference_construction.cpython-35.pyc
new file mode 100644
index 0000000..e0182fa
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/reference_construction.cpython-35.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/string_dist_utils.cpython-34.pyc b/src/spades_pipeline/truspades/__pycache__/string_dist_utils.cpython-34.pyc
new file mode 100644
index 0000000..e9131b1
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/string_dist_utils.cpython-34.pyc differ
diff --git a/src/spades_pipeline/truspades/__pycache__/string_dist_utils.cpython-35.pyc b/src/spades_pipeline/truspades/__pycache__/string_dist_utils.cpython-35.pyc
new file mode 100644
index 0000000..9437430
Binary files /dev/null and b/src/spades_pipeline/truspades/__pycache__/string_dist_utils.cpython-35.pyc differ
diff --git a/src/spades_pipeline/truspades/barcode_extraction.py b/src/spades_pipeline/truspades/barcode_extraction.py
index 5060612..15efbce 100644
--- a/src/spades_pipeline/truspades/barcode_extraction.py
+++ b/src/spades_pipeline/truspades/barcode_extraction.py
@@ -6,6 +6,7 @@
 
 import os.path
 import sys
+import logging
 
 from id_generation import generate_ids
 from string_dist_utils import lcs, dist
@@ -94,7 +95,7 @@ def ExtractBarcodes(dirs):
     short_barcodes = generate_barcode_list(list(barcode_dict.keys()))
     return [Barcode(short, barcode_dict[bid]) for bid, short in short_barcodes]
 
-def ReadDataset(file, log):
+def ReadDataset(file, log = logging.getLogger("ReadDataset")):
     log.info("Reading dataset from " + file + "\n")
     if os.path.exists(file) and os.path.isfile(file):
         result = []
@@ -117,6 +118,6 @@ def ReadDataset(file, log):
         sys.exit(1)
 
 def print_dataset(dataset, output_file, log):
-    log.info("Printing dataset to " + output_file + "\n")
+    log.info("Printing dataset to " + output_file)
     open(output_file, "w").write("\n".join([str(line).strip() for line in dataset]) + "\n")
 
diff --git a/src/spades_pipeline/truspades/barcode_extraction.pyc b/src/spades_pipeline/truspades/barcode_extraction.pyc
new file mode 100644
index 0000000..4f05744
Binary files /dev/null and b/src/spades_pipeline/truspades/barcode_extraction.pyc differ
diff --git a/src/spades_pipeline/truspades/break_by_coverage.py b/src/spades_pipeline/truspades/break_by_coverage.py
index 1d8d621..b412bc0 100644
--- a/src/spades_pipeline/truspades/break_by_coverage.py
+++ b/src/spades_pipeline/truspades/break_by_coverage.py
@@ -105,14 +105,14 @@ class ContigBreaker:
 
     def Break(self, contig):
         result = []
-#        print contig.id
-#        print self.sam.gettid(contig.id)
+        #print contig.id
+        #print self.sam.gettid(contig.id)
         for part in self.part_list_[self.sam.gettid(contig.id)]:
             result.append(contig.subseq(part[0], part[1]))
         return result
 
     def OutputBroken(self, output_file):
-        output = open(output_file, "r")
+        output = open(output_file, "w")
         for contig in self.contigs:
             for subcontig in self.Break(contig):
                 SeqIO.write(subcontig, output, "fasta")
@@ -144,7 +144,7 @@ class PatternBreaker:
             l1 = 0
         if l2 == -1:
             l2 = 0
-        l = max(l1, l2) + len(self.pattern)
+        l = max(l1, l2)
         if l > len(seq) - self.max_cut:
             return l
         else:
@@ -178,5 +178,5 @@ class NBreaker:
             result.append(contig.subseq(last_break, len(contig)))
         return result
 
-if __name__ == '__main__':
-    ContigBreaker(sys.argv[1], sys.argv[3], int(sys.argv[4]), int(sys.argv[5])).OutputBroken(sys.argv[2])
+#if __name__ == '__main__':
+#    ContigBreaker(sys.argv[1], sys.argv[3], int(sys.argv[4]), int(sys.argv[5])).OutputBroken(sys.argv[2])
diff --git a/src/spades_pipeline/truspades/break_by_coverage.pyc b/src/spades_pipeline/truspades/break_by_coverage.pyc
new file mode 100644
index 0000000..4eb31b6
Binary files /dev/null and b/src/spades_pipeline/truspades/break_by_coverage.pyc differ
diff --git a/src/spades_pipeline/truspades/generate_quality.pyc b/src/spades_pipeline/truspades/generate_quality.pyc
new file mode 100644
index 0000000..e471030
Binary files /dev/null and b/src/spades_pipeline/truspades/generate_quality.pyc differ
diff --git a/src/spades_pipeline/truspades/id_generation.pyc b/src/spades_pipeline/truspades/id_generation.pyc
new file mode 100644
index 0000000..cd26c8e
Binary files /dev/null and b/src/spades_pipeline/truspades/id_generation.pyc differ
diff --git a/src/spades_pipeline/truspades/launch_options.py b/src/spades_pipeline/truspades/launch_options.py
index 05dc6e6..75a7268 100644
--- a/src/spades_pipeline/truspades/launch_options.py
+++ b/src/spades_pipeline/truspades/launch_options.py
@@ -8,6 +8,7 @@ __author__ = 'anton'
 import getopt
 import os
 import sys
+import options_storage
 
 class Options:
     def set_default_options(self):
@@ -26,23 +27,29 @@ class Options:
         self.possible_modes = ["run_truspades", "generate_dataset", "construct_subreferences"]
         self.test = False
 
-    def __init__(self, argv, bin, home):
+    def __init__(self, argv, bin, home, version):
         if len(argv) == 1:
-            print_usage_and_exit(1)
-        long_params = "test help-hidden construct-dataset reference= reference-index= do= continue threads= help dataset= input-dir= additional-options".split(" ")
-        short_params = "o:t:h"
+            print_usage_and_exit(1, version)
+        long_params = "test help-hidden construct-dataset reference= reference-index= do= continue " \
+                      "threads= help version dataset= input-dir= additional-options=".split(" ")
+        short_params = "o:t:hv"
         self.set_default_options()
         self.bin = bin
         self.home = home
+        self.version = version
         try:
-            options_list, self.spades_options = getopt.gnu_getopt(argv[1:], short_params, long_params)
+            options_list, tmp = getopt.gnu_getopt(argv[1:], short_params, long_params)
+            if len(tmp) != 0:
+                print_usage_and_exit(1, self.version)
         except getopt.GetoptError:
             _, exc, _ = sys.exc_info()
             sys.stderr.write(str(exc) + "\n")
-            print_usage_and_exit(1)
+            print_usage_and_exit(1, self.version)
         for (key, value) in options_list:
+            if key == "--version" or key == "-v":
+                print_version_and_exit(self.version)
             if key == "--help" or key == "-h":
-                print_usage_and_exit(1)
+                print_usage_and_exit(1, self.version)
             elif key == "--test":
                 dir = os.path.abspath("spades_test") + "_truspades"
                 self.output_dir = dir
@@ -73,38 +80,41 @@ class Options:
             elif key == "--threads" or key == "-t":
                 self.threads = int(value)
             elif key == "--help-hidden":
-                print_usage_and_exit(0, True)
+                print_usage_and_exit(0, self.version, show_hidden=True)
         if not self.mode in self.possible_modes:
             sys.stderr.write("Error: --do parameter can only have one of the following values: " + ", ".join(self.possible_modes) + "\n")
-            print_usage_and_exit(1)
+            print_usage_and_exit(1, self.version)
         if None == self.output_dir or os.path.isfile(self.output_dir):
             sys.stderr.write("Error: Please provide output directory\n")
-            print_usage_and_exit(1)
+            print_usage_and_exit(1, self.version)
         if self.continue_launch:
             return
         cnt = len([option for option in [self.dataset_file, self.input_dirs, self.command_list] if option != None])
         if cnt != 1:
             sys.stderr.write("Error: exactly one of dataset-file and input-dir must be specified\n")
-            print_usage_and_exit(1)
+            print_usage_and_exit(1, self.version)
         if self.mode == "construct_subreferences":
             if self.index == "":
                 sys.stderr.write("Error: Please provide reference index for BWA")
-                print_usage_and_exit(1)
+                print_usage_and_exit(1, self.version)
             if self.reference == "":
                 sys.stderr.write("Error: Please provide reference for subreference construction")
-                print_usage_and_exit(1)
+                print_usage_and_exit(1, self.version)
 
 
-def print_usage_and_exit(code, show_hidden = False):
+def print_usage_and_exit(code, version, show_hidden=False):
+    sys.stderr.write("TruSPAdes v" + str(version) +
+                     ": genome assembler designed for short reads produced by Illumina TruSeq Long Read technology\n\n")
     sys.stderr.write("Usage: " + str(sys.argv[0]) + " [options] -o <output_dir>" + "\n")
     sys.stderr.write("" + "\n")
     sys.stderr.write("Basic options:" + "\n")
     sys.stderr.write("-h/--help\t\t\tprints this usage message" + "\n")
+    sys.stderr.write("-v/--version\t\t\tprints version" + "\n")
     sys.stderr.write("--test\t\t\t\trun truSPAdes on toy dataset" + "\n")
     sys.stderr.write("-o\t\t<output_dir>\tdirectory to store all the resulting files (required)" + "\n")
     sys.stderr.write("-t/--threads\t<int>\t\tnumber of threads" + "\n")
     sys.stderr.write("--continue\t\t\tcontinue interrupted launch" + "\n")
-    sys.stderr.write("--construct-dataset\t\tparse dataset from input folder")
+    sys.stderr.write("--construct-dataset\t\tparse dataset from input folder" + "\n")
     sys.stderr.write("" + "\n")
     sys.stderr.write("Input options:" + "\n")
     sys.stderr.write("--input-dir\t<directory>\tdirectory with input data. Note that the directory should contain only files with reads. This option can be used several times to provide several input directories." + "\n")
@@ -119,3 +129,8 @@ def print_usage_and_exit(code, show_hidden = False):
     # sys.stderr.write("--run-truspades\truns truSPAdes on all barcodes" + "\n")
     sys.stderr.flush()
     sys.exit(code)
+
+
+def print_version_and_exit(version):
+    options_storage.version(version, mode="TruSPAdes")
+    sys.exit(0)
diff --git a/src/spades_pipeline/truspades/launch_options.pyc b/src/spades_pipeline/truspades/launch_options.pyc
new file mode 100644
index 0000000..b34aecb
Binary files /dev/null and b/src/spades_pipeline/truspades/launch_options.pyc differ
diff --git a/src/spades_pipeline/truspades/moleculo_filter_contigs.pyc b/src/spades_pipeline/truspades/moleculo_filter_contigs.pyc
new file mode 100644
index 0000000..de429cc
Binary files /dev/null and b/src/spades_pipeline/truspades/moleculo_filter_contigs.pyc differ
diff --git a/src/spades_pipeline/truspades/moleculo_postprocessing.pyc b/src/spades_pipeline/truspades/moleculo_postprocessing.pyc
new file mode 100644
index 0000000..398bece
Binary files /dev/null and b/src/spades_pipeline/truspades/moleculo_postprocessing.pyc differ
diff --git a/src/spades_pipeline/truspades/reference_construction.pyc b/src/spades_pipeline/truspades/reference_construction.pyc
new file mode 100644
index 0000000..11c0a6f
Binary files /dev/null and b/src/spades_pipeline/truspades/reference_construction.pyc differ
diff --git a/src/spades_pipeline/truspades/string_dist_utils.pyc b/src/spades_pipeline/truspades/string_dist_utils.pyc
new file mode 100644
index 0000000..83f1c44
Binary files /dev/null and b/src/spades_pipeline/truspades/string_dist_utils.pyc differ
diff --git a/truspades.py b/truspades.py
index fe2ed12..f93324e 100755
--- a/truspades.py
+++ b/truspades.py
@@ -9,22 +9,13 @@ import logging
 
 import os
 import sys
+import spades_init
+spades_init.init()
+truspades_home = spades_init.spades_home
+spades_home = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
+spades_version = spades_init.spades_version
 
-truspades_home = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
-bin_home = os.path.join(truspades_home, 'bin')
-python_modules_home = os.path.join(truspades_home, 'src')
-
-if os.path.isfile(os.path.join(truspades_home, 'spades')):
-    install_prefix = os.path.dirname(truspades_home)
-    bin_home = os.path.join(install_prefix, 'bin')
-    truspades_home = os.path.join(install_prefix, 'share', 'spades')
-    python_modules_home = truspades_home
-
-sys.path.append(os.path.join(python_modules_home, "spades_pipeline", "common"))
-sys.path.append(os.path.join(python_modules_home, "spades_pipeline", "truspades"))
-sys.path.append(os.path.join(python_modules_home, "spades_pipeline"))
-
-import SeqIO
+import SeqIO  # TODO: add to ext/scr/python_libs
 import parallel_launcher
 import reference_construction
 import launch_options
@@ -59,9 +50,9 @@ def reads_line(libs):
 def command_line(barcode, output_dir, params, continue_launch):
 #    logfile = os.path.join(output_dir, "logs", barcode.id + ".out")
     if continue_launch and os.path.exists(os.path.join(output_dir, barcode.id,  "params.txt")):
-        result = ["python " + os.path.join(bin_home, "spades.py"), "--truseq", "-o", os.path.join(output_dir, barcode.id), "--continue", " ".join(params)]
+        result = ["python " + os.path.join(spades_home, "spades.py"), "--truseq", "-o", os.path.join(output_dir, barcode.id), "--continue", params]
     else:
-       result = ["python " + os.path.join(bin_home, "spades.py"), "--truseq", "-t", "1", "-o", os.path.join(output_dir, barcode.id), reads_line(barcode.libs), " ".join(params)]
+       result = ["python " + os.path.join(spades_home, "spades.py"), "--truseq", "-t", "1", "-o", os.path.join(output_dir, barcode.id), reads_line(barcode.libs), params]
 #    result = ["./truspades.py", "-o", os.path.join(output_dir, barcode.id), reads_line(barcode.libs), " ".join(params), "\n"]
     return " ".join(result)
 
@@ -149,8 +140,10 @@ def CheckTestSuccess(options, log):
 
 
 def main(argv):
-    options = launch_options.Options(argv, bin_home, truspades_home)
+    options = launch_options.Options(argv, spades_home, truspades_home, spades_version)
     support.ensure_dir_existence(options.output_dir)
+    if options.test:
+        support.recreate_dir(options.output_dir)
     log = create_log(options)
     dataset_file = os.path.join(options.output_dir, "dataset.info")
     if options.continue_launch:
diff --git a/truspades_manual.html b/truspades_manual.html
index d2e6fab..ac2bc51 100644
--- a/truspades_manual.html
+++ b/truspades_manual.html
@@ -21,7 +21,8 @@
 3. <a href="#sec3">Running truSPAdes</a><br>
     3.1. <a href="#sec3.1">TruSPAdes command line options</a><br>
     3.2. <a href="#sec3.2">TruSPAdes output</a><br>
-4. <a href="#sec4">Feedback and bug reports</a><br>
+4. <a href="#sec4">Citation</a><br>
+5. <a href="#sec5">Feedback and bug reports</a><br>
 <br>
 
 <a name="sec1"></a>
@@ -155,6 +156,11 @@ Note that we assume that truSPAdes installation directory is added to the <code>
 </p>
 
 <p>
+    <code>-v</code> (or <code>--version</code>)<br>
+        Prints version.
+</p>
+
+<p>
     <code>--continue</code><br>
         Continues truSPAdes run from the specified output folder.
 </p>
@@ -193,7 +199,12 @@ Note that we assume that truSPAdes installation directory is added to the <code>
 </pre>
 
 <a name="sec4">
-<h2>4. Feedback and bug reports</h2>
+<h2>4. Citation</h2>
+<p>
+    If you use truSPAdes in your research, please include <a href="http://www.nature.com/nmeth/journal/vaop/ncurrent/full/nmeth.3737.html" target="_blank">Bankevich & Pevzner, 2016</a> in your reference list.
+
+<a name="sec5">
+<h2>5. Feedback and bug reports</h2>
 <p>
     Your comments, bug reports, and suggestions are very welcomed. They will help us to further improve truSPAdes.
     

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/spades.git



More information about the debian-med-commit mailing list