[med-svn] [spades] 02/05: New upstream version 3.10.1+dfsg

Michael Crusoe misterc-guest at moszumanska.debian.org
Wed Apr 19 13:24:37 UTC 2017


This is an automated email from the git hooks/post-receive script.

misterc-guest pushed a commit to branch master
in repository spades.

commit 34bcf2cde65b161abf350f7b69c01f97be7aa71d
Author: Michael R. Crusoe <michael.crusoe at gmail.com>
Date:   Wed Apr 19 02:55:40 2017 -0700

    New upstream version 3.10.1+dfsg
---
 LICENSE                                            |    2 +-
 VERSION                                            |    2 +-
 changelog.html                                     |   27 +
 configs/debruijn/config.info                       |   32 +-
 configs/debruijn/distance_estimation.info          |   12 +-
 configs/debruijn/large_genome_mode.info            |   22 +
 configs/debruijn/log.properties                    |    6 +
 configs/debruijn/mda_mode.info                     |   11 +-
 configs/debruijn/meta_mode.info                    |   49 +-
 configs/debruijn/moleculo_mode.info                |    2 +
 configs/debruijn/path_extend/pe_params.info        |  206 --
 configs/debruijn/pe_params.info                    |   56 +-
 configs/debruijn/rna_mode.info                     |   31 +-
 configs/debruijn/simplification.info               |    1 +
 ext/include/btree/btree.h                          |  138 +-
 ext/include/btree/btree_container.h                |   45 +-
 ext/include/btree/btree_map.h                      |    9 +
 ext/include/btree/safe_btree.h                     |   32 +-
 ext/include/btree/safe_btree_map.h                 |    8 +
 ext/include/bwa/bntseq.h                           |   91 +
 ext/include/bwa/bwa.h                              |   62 +
 ext/include/bwa/bwamem.h                           |  184 ++
 ext/include/bwa/bwt.h                              |  130 +
 ext/include/bwa/utils.h                            |  111 +
 ext/include/cuckoo/LICENSE                         |   18 +
 ext/include/cuckoo/city_hasher.hh                  |   44 +
 ext/include/cuckoo/cuckoohash_config.hh            |   36 +
 ext/include/cuckoo/cuckoohash_map.hh               | 2537 ++++++++++++++++++++
 ext/include/cuckoo/cuckoohash_util.hh              |  136 ++
 ext/include/cuckoo/libcuckoo_lazy_array.hh         |  202 ++
 ext/include/llvm/Support/MathExtras.h              |    1 +
 ext/src/CMakeLists.txt                             |    4 +-
 ext/src/getopt_pp/CMakeLists.txt                   |    5 +
 ext/{include => src}/getopt_pp/getopt_pp.cpp       |    2 +-
 ext/src/llvm/CMakeLists.txt                        |    3 +
 manual.html                                        |  105 +-
 metaspades.py                                      |  102 +-
 plasmidspades.py                                   |  102 +-
 rnaspades.py                                       |  102 +-
 rnaspades_manual.html                              |   21 +-
 spades.py                                          |  102 +-
 spades_compile.sh                                  |    2 +-
 src/CMakeLists.txt                                 |    6 +-
 src/cmake/options.cmake                            |    3 +
 src/cmake/pack.cmake                               |    4 +-
 src/common/CMakeLists.txt                          |   22 +
 src/{utils => common}/adt/array_vector.hpp         |    0
 src/{utils => common}/adt/bag.hpp                  |    2 +-
 src/{utils => common}/adt/bf.hpp                   |    0
 src/{utils => common}/adt/chained_iterator.hpp     |    0
 src/{utils => common}/adt/concurrent_dsu.hpp       |    2 +-
 src/{utils => common}/adt/filter_iterator.hpp      |    0
 src/{utils => common}/adt/flat_map.hpp             |    0
 src/{utils => common}/adt/flat_set.hpp             |    0
 src/{utils => common}/adt/hll.hpp                  |    0
 src/{utils => common}/adt/iterator_range.hpp       |    0
 src/{utils => common}/adt/kmer_hash_vector.hpp     |    2 +-
 src/{utils => common}/adt/kmer_vector.hpp          |   17 +-
 src/common/adt/loser_tree.hpp                      |  134 ++
 src/{utils => common}/adt/parallel_seq_vector.hpp  |    8 +-
 .../adt/parallel_unordered_map.hpp                 |    0
 src/{utils => common}/adt/pointer_iterator.hpp     |    0
 src/{utils => common}/adt/queue_iterator.hpp       |    2 +-
 src/{utils => common}/adt/small_pod_vector.hpp     |    0
 .../assembly_graph/CMakeLists.txt                  |    8 +-
 .../components/component_filters.hpp               |    0
 .../components/connected_component.cpp             |    0
 .../components/connected_component.hpp             |    2 +-
 .../assembly_graph/components/graph_component.hpp  |  226 ++
 .../assembly_graph/components/splitters.hpp        |  369 ++-
 .../assembly_graph/core}/action_handlers.hpp       |    4 +-
 .../assembly_graph/core}/basic_graph_stats.hpp     |    2 +-
 .../assembly_graph/core}/construction_helper.hpp   |    2 +-
 .../assembly_graph/core}/coverage.hpp              |   10 +-
 .../assembly_graph/core}/debruijn_data.hpp         |    8 +-
 .../assembly_graph/core}/directions.hpp            |    0
 .../assembly_graph/core}/graph.hpp                 |    0
 .../assembly_graph/core}/graph_core.hpp            |    8 +-
 .../assembly_graph/core}/graph_iterators.hpp       |   14 +-
 .../assembly_graph/core}/observable_graph.hpp      |    2 +-
 .../assembly_graph/core}/order_and_law.hpp         |    4 +-
 .../dijkstra/dijkstra_algorithm.hpp                |    2 +-
 .../assembly_graph}/dijkstra/dijkstra_helper.hpp   |   10 +-
 .../assembly_graph}/dijkstra/dijkstra_settings.hpp |    0
 .../assembly_graph}/dijkstra/length_calculator.hpp |    2 +-
 .../dijkstra/neighbours_iterator.hpp               |    0
 .../dijkstra/vertex_process_checker.hpp            |    0
 .../dijkstra/vertex_put_checker.hpp                |    0
 .../graph_support/basic_edge_conditions.hpp        |  151 ++
 .../graph_support/basic_vertex_conditions.hpp      |   22 +-
 .../assembly_graph/graph_support/chimera_stats.hpp |    0
 .../assembly_graph/graph_support/comparators.hpp   |    0
 .../assembly_graph/graph_support/contig_output.hpp |  181 +-
 .../graph_support/coverage_filling.hpp             |   80 +
 .../graph_support/coverage_uniformity_analyzer.cpp |   70 +
 .../graph_support/coverage_uniformity_analyzer.hpp |   23 +
 .../graph_support/detail_coverage.hpp              |   88 +-
 .../assembly_graph/graph_support/edge_removal.hpp  |  172 ++
 .../graph_support/genomic_quality.hpp              |   37 +-
 .../graph_support/graph_processing_algorithm.hpp   |  146 ++
 .../graph_support/marks_and_locks.hpp              |    0
 .../graph_support/parallel_processing.hpp          |  306 +++
 .../graph_support/scaff_supplementary.cpp          |  270 +++
 .../graph_support/scaff_supplementary.hpp          |  100 +
 .../handlers/edge_labels_handler.hpp               |    2 +-
 .../handlers/edges_position_handler.hpp            |    5 +-
 .../assembly_graph/handlers/id_track_handler.hpp   |    4 +-
 .../assembly_graph/paths/bidirectional_path.cpp    |    2 +-
 .../assembly_graph/paths/bidirectional_path.hpp    |   42 +-
 .../bidirectional_path_output.cpp                  |   68 +
 .../bidirectional_path_output.hpp                  |   60 +
 .../paths/bidirectional_path_io/io_support.cpp     |  186 ++
 .../paths/bidirectional_path_io/io_support.hpp     |  190 ++
 .../assembly_graph/paths/mapping_path.hpp          |   79 +-
 .../assembly_graph/paths/path_finders.hpp          |    2 +-
 .../assembly_graph/paths/path_processor.hpp        |  147 +-
 src/common/assembly_graph/paths/path_utils.hpp     |  130 +
 .../assembly_graph/stats/picture_dump.hpp          |   94 +-
 .../assembly_graph/stats/statistics.hpp            |    2 +-
 src/{modules => common}/empty.cpp                  |    0
 .../pacbio_aligning.hpp => common/func/func.hpp}   |   22 +-
 src/{utils/adt => common/func}/function_traits.hpp |    9 +-
 src/{modules/math => common/func}/pred.hpp         |   54 +-
 src/{modules => common}/io/CMakeLists.txt          |    6 +-
 src/common/io/dataset_support/dataset_readers.hpp  |  121 +
 .../io/dataset_support/read_converter.hpp          |   96 +-
 .../kmers_io => common/io/kmers}/kmer_iterator.hpp |    2 +-
 .../io/kmers}/mmapped_reader.hpp                   |    6 +-
 .../io/kmers}/mmapped_writer.hpp                   |    4 +-
 .../io/reads}/binary_converter.hpp                 |   39 +-
 src/common/io/reads/binary_streams.hpp             |  140 ++
 .../io/reads}/careful_filtering_reader_wrapper.hpp |    2 +-
 .../io/reads}/converting_reader_wrapper.hpp        |    0
 .../io/reads}/delegating_reader_wrapper.hpp        |    0
 .../io/reads}/fasta_fastq_gz_parser.hpp            |   10 +-
 .../reads_io => common/io/reads}/file_reader.hpp   |    4 +-
 .../io/reads}/filtering_reader_wrapper.hpp         |    0
 .../io/reads_io => common/io/reads}/io_helper.hpp  |   10 +-
 .../io/reads_io => common/io/reads}/ireader.hpp    |    2 +-
 .../reads_io => common/io/reads}/ireadstream.hpp   |   16 +-
 .../io/reads}/modifying_reader_wrapper.hpp         |    6 +-
 .../reads_io => common/io/reads}/mpmc_bounded.hpp  |    0
 .../io/reads}/multifile_reader.hpp                 |    0
 .../reads_io => common/io/reads}/orientation.hpp   |    0
 .../io/reads}/osequencestream.hpp                  |   35 +-
 src/{modules => common}/io/reads/paired_read.hpp   |    0
 .../io/reads}/paired_readers.hpp                   |    2 +-
 .../io/reads_io => common/io/reads}/parser.cpp     |   10 +-
 .../io/reads_io => common/io/reads}/parser.hpp     |    2 +-
 .../io/reads}/rc_reader_wrapper.hpp                |    0
 src/{modules => common}/io/reads/read.hpp          |   12 +-
 .../io/reads}/read_processor.hpp                   |    4 +-
 .../io/reads}/read_stream_vector.hpp               |   46 -
 .../io/reads}/sequence_reader.hpp                  |    4 +-
 src/{modules => common}/io/reads/single_read.hpp   |   46 +-
 .../io/reads}/splitting_wrapper.hpp                |   10 +-
 .../reads_io => common/io/reads}/vector_reader.hpp |    2 +-
 .../io/reads}/wrapper_collection.hpp               |    2 +-
 .../io/sam_io => common/io/sam}/bam_parser.hpp     |   10 +-
 .../io/sam_io => common/io/sam}/bam_reader.hpp     |    2 +-
 src/{modules/io/sam_io => common/io/sam}/read.cpp  |    2 +-
 src/{modules/io/sam_io => common/io/sam}/read.hpp  |    0
 .../io/sam_io => common/io/sam}/sam_reader.cpp     |    6 +-
 .../io/sam_io => common/io/sam}/sam_reader.hpp     |    2 +-
 src/{modules => common}/math/smooth.hpp            |   36 +-
 src/{modules => common}/math/xmath.h               |    0
 .../algorithms => common/modules}/CMakeLists.txt   |    6 +-
 src/common/modules/alignment/bwa_index.cpp         |  327 +++
 src/common/modules/alignment/bwa_index.hpp         |   44 +
 .../modules/alignment/bwa_sequence_mapper.hpp      |   35 +
 .../modules/alignment}/edge_index.hpp              |   10 +-
 .../modules/alignment}/edge_index_refiller.cpp     |    8 +-
 .../modules/alignment}/edge_index_refiller.hpp     |    0
 .../modules/alignment}/kmer_map.hpp                |    6 +-
 .../modules/alignment}/kmer_mapper.hpp             |  100 +-
 .../modules/alignment}/kmer_mapper_logger.hpp      |    6 +-
 .../modules/alignment}/long_read_mapper.hpp        |  126 +-
 .../modules/alignment}/long_read_storage.hpp       |   78 +-
 .../modules/alignment}/pacbio/pac_index.hpp        |  268 ++-
 .../alignment}/pacbio/pacbio_read_structures.hpp   |  121 +-
 .../modules/alignment}/sequence_mapper.hpp         |  130 +-
 .../alignment}/sequence_mapper_notifier.hpp        |   42 +-
 .../modules/alignment}/short_read_mapper.hpp       |   39 +-
 .../modules}/genome_consistance_checker.cpp        |   50 +-
 .../modules}/genome_consistance_checker.hpp        |   16 +-
 .../modules}/graph_construction.hpp                |   20 +-
 .../modules}/graph_read_correction.hpp             |    6 +-
 .../modules}/mismatch_shall_not_pass.hpp           |   32 +-
 .../modules}/path_extend/CMakeLists.txt            |    9 +-
 .../modules}/path_extend/extension_chooser.hpp     |  603 +----
 .../modules}/path_extend/ideal_pair_info.hpp       |    0
 .../modules}/path_extend/loop_traverser.hpp        |   76 +-
 .../modules}/path_extend/overlap_analysis.hpp      |    6 +-
 .../modules}/path_extend/paired_library.hpp        |   69 +-
 .../modules}/path_extend/path_extender.hpp         |  309 +--
 .../modules}/path_extend/path_filter.hpp           |   34 +-
 .../modules}/path_extend/path_visualizer.hpp       |   50 +-
 .../modules}/path_extend/pe_config_struct.cpp      |   58 +-
 .../modules}/path_extend/pe_config_struct.hpp      |   85 +-
 .../modules}/path_extend/pe_resolver.hpp           |   78 +-
 .../modules}/path_extend/pe_utils.hpp              |  167 +-
 .../path_extend/pipeline/extenders_logic.cpp       |  423 ++++
 .../path_extend/pipeline/extenders_logic.hpp       |  118 +
 .../path_extend/pipeline/launch_support.cpp        |  128 +
 .../path_extend/pipeline/launch_support.hpp        |  145 ++
 .../modules/path_extend/pipeline/launcher.cpp      |  448 ++++
 .../modules/path_extend/pipeline/launcher.hpp      |  115 +
 .../scaffolder2015/connection_condition2015.cpp    |  260 ++
 .../scaffolder2015/connection_condition2015.hpp    |  143 ++
 .../scaffolder2015/extension_chooser2015.cpp       |   37 +-
 .../scaffolder2015/extension_chooser2015.hpp       |   65 +
 .../path_extend/scaffolder2015/path_polisher.cpp   |  326 +++
 .../path_extend/scaffolder2015/path_polisher.hpp   |   85 +
 .../path_extend/scaffolder2015/scaffold_graph.cpp  |   31 +-
 .../path_extend/scaffolder2015/scaffold_graph.hpp  |   18 +-
 .../scaffolder2015/scaffold_graph_constructor.cpp  |   14 +-
 .../scaffolder2015/scaffold_graph_constructor.hpp  |   27 +-
 .../scaffolder2015/scaffold_graph_visualizer.cpp   |   23 +-
 .../scaffolder2015/scaffold_graph_visualizer.hpp   |   79 +
 .../modules}/path_extend/split_graph_pair_info.hpp |   23 +-
 src/common/modules/path_extend/weight_counter.hpp  |  357 +++
 .../modules}/simplification/bulge_remover.hpp      |  153 +-
 .../modules}/simplification/cleaner.hpp            |   10 +-
 .../simplification/complex_bulge_remover.hpp       |  285 ++-
 .../modules/simplification/complex_tip_clipper.hpp |  178 ++
 .../modules}/simplification/compressor.hpp         |   70 +-
 .../simplification/dominated_set_finder.hpp        |    3 +-
 .../simplification/ec_threshold_finder.hpp         |    6 +-
 .../erroneous_connection_remover.hpp               |  625 +++--
 .../modules}/simplification/mf_ec_remover.hpp      |    0
 .../parallel_simplification_algorithms.hpp         |  146 +-
 .../simplification/relative_coverage_remover.hpp   |  336 +--
 .../modules}/simplification/tip_clipper.hpp        |   39 +-
 .../topological_edge_conditions.hpp}               |  277 ++-
 .../paired_info/concurrent_pair_info_buffer.hpp    |  120 +
 .../paired_info/data_divider.hpp                   |    2 +-
 .../paired_info/distance_estimation.hpp            |   49 +-
 src/{modules => common}/paired_info/histogram.hpp  |   21 +-
 src/common/paired_info/histptr.hpp                 |  156 ++
 .../paired_info/index_point.hpp                    |    0
 .../paired_info/insert_size_refiner.hpp            |    4 +-
 src/{modules => common}/paired_info/is_counter.hpp |   71 +-
 .../paired_info/pair_info_bounds.hpp               |    2 +-
 src/common/paired_info/pair_info_filler.hpp        |  108 +
 .../paired_info/pair_info_filters.hpp              |    0
 .../paired_info/pair_info_improver.hpp             |   10 +-
 .../paired_info/paired_info.hpp                    |  398 ++-
 src/common/paired_info/paired_info_buffer.hpp      |  227 ++
 .../paired_info/paired_info_helpers.hpp            |    0
 .../paired_info/peak_finder.hpp                    |    2 +-
 .../paired_info/smoothing_distance_estimation.hpp  |    6 +-
 .../paired_info/split_path_constructor.hpp         |   18 +-
 .../paired_info/weighted_distance_estimation.hpp   |   38 +-
 src/{modules => common}/paired_info/weights.hpp    |    9 +-
 src/{modules => common}/pipeline/CMakeLists.txt    |    0
 src/{modules => common}/pipeline/config_common.hpp |    6 +-
 src/{modules => common}/pipeline/config_singl.hpp  |    2 +-
 src/{modules => common}/pipeline/config_struct.cpp |   65 +-
 src/{modules => common}/pipeline/config_struct.hpp |   29 +-
 src/{modules => common}/pipeline/genomic_info.hpp  |    0
 .../pipeline/genomic_info_filler.cpp               |    6 +-
 .../pipeline/genomic_info_filler.hpp               |    0
 src/{modules => common}/pipeline/graph_pack.hpp    |   35 +-
 src/{modules => common}/pipeline/graphio.hpp       |   31 +-
 src/{modules => common}/pipeline/library.cpp       |    4 +-
 src/{modules => common}/pipeline/library.hpp       |   76 +-
 src/{modules => common}/pipeline/library.inl       |    0
 src/{modules => common}/pipeline/stage.cpp         |    2 +-
 src/{modules => common}/pipeline/stage.hpp         |    0
 .../sequence/genome_storage.hpp}                   |   38 +-
 .../data_structures => common}/sequence/nucl.hpp   |    2 +-
 .../sequence/quality.hpp                           |    0
 .../data_structures => common}/sequence/rtseq.hpp  |   21 +-
 .../data_structures => common}/sequence/seq.hpp    |    6 +-
 .../sequence/seq_common.hpp}                       |   29 +-
 .../sequence/sequence.hpp                          |    4 +-
 .../sequence/sequence_tools.hpp                    |    4 +-
 .../sequence/simple_seq.hpp                        |    6 +-
 src/{modules => common}/stages/CMakeLists.txt      |    0
 src/{modules => common}/stages/construction.cpp    |    8 +-
 src/{modules => common}/stages/construction.hpp    |    0
 src/{modules => common}/stages/simplification.cpp  |  253 +-
 src/{modules => common}/stages/simplification.hpp  |    0
 .../graph_simplification.hpp                       |  678 ++++++
 .../simplification_pipeline/rna_simplification.hpp |   22 +
 .../simplification_settings.hpp                    |    2 +-
 .../single_cell_simplification.hpp                 |   68 +-
 src/{modules/math => common/utils}/CMakeLists.txt  |   14 +-
 .../utils}/autocompletion.cpp                      |    1 +
 .../utils}/autocompletion.hpp                      |    0
 .../dev_support => common/utils}/copy_file.cpp     |    4 +-
 .../dev_support => common/utils}/copy_file.hpp     |    2 +-
 .../utils/coverage_model}/CMakeLists.txt           |    8 +-
 .../utils/coverage_model}/kmer_coverage_model.cpp  |   96 +-
 .../utils/coverage_model}/kmer_coverage_model.hpp  |   18 +-
 .../dev_support => common/utils}/cpp_utils.hpp     |    0
 .../debruijn_graph/debruijn_graph_constructor.hpp  |   56 +-
 .../utils}/debruijn_graph/early_simplification.hpp |   13 +-
 .../dev_support => common/utils}/file_limit.hpp    |    2 +-
 .../utils}/indices/edge_index_builders.hpp         |    2 +-
 .../utils}/indices/edge_info_updater.hpp           |   51 +-
 .../utils}/indices/edge_multi_index.hpp            |    2 +-
 .../utils}/indices/edge_position_index.hpp         |   92 +-
 .../utils}/indices/editable_index.hpp              |   14 +-
 .../utils}/indices/key_with_hash.hpp               |    4 +-
 .../utils}/indices/kmer_extension_index.hpp        |    6 +-
 .../indices/kmer_extension_index_builder.hpp       |   14 +-
 .../utils}/indices/kmer_splitters.hpp              |   23 +-
 .../utils}/indices/perfect_hash_map.hpp            |   53 +-
 .../utils}/indices/perfect_hash_map_builder.hpp    |   10 +-
 .../utils}/indices/storing_traits.hpp              |   38 +-
 .../utils}/indices/values.hpp                      |    0
 src/{ => common}/utils/levenshtein.hpp             |    2 +-
 src/{modules/dev_support => common/utils}/log.hpp  |    0
 .../utils}/logger/log_writers.hpp                  |    2 +-
 .../dev_support => common/utils}/logger/logger.hpp |    2 +-
 .../utils}/logger/logger_impl.cpp                  |    2 +-
 src/{modules/dev_support => common/utils}/md5.h    |    0
 .../dev_support => common/utils}/memory.hpp        |    0
 .../dev_support => common/utils}/memory_limit.hpp  |    0
 .../utils}/mph_index/CMakeLists.txt                |    0
 .../utils}/mph_index/base_hash.hpp                 |    0
 .../utils}/mph_index/bitpair_vector.cpp            |    0
 .../utils}/mph_index/bitpair_vector.hpp            |    0
 .../utils}/mph_index/common.hpp                    |    0
 .../utils}/mph_index/emphf_config.hpp              |    0
 .../utils}/mph_index/hypergraph.hpp                |    0
 .../utils}/mph_index/hypergraph_sorter_seq.hpp     |    2 +-
 .../utils}/mph_index/kmer_index.hpp                |    0
 .../utils}/mph_index/kmer_index_builder.hpp        |  120 +-
 .../utils}/mph_index/kmer_index_traits.hpp         |    7 +-
 .../utils}/mph_index/mphf.hpp                      |    2 +-
 .../utils}/mph_index/ranked_bitpair_vector.hpp     |    0
 .../dev_support => common/utils}/openmp_wrapper.h  |    0
 .../utils}/parallel_wrapper.hpp                    |    0
 .../dev_support => common/utils}/path_helper.cpp   |    2 +-
 .../dev_support => common/utils}/path_helper.hpp   |    4 +-
 .../dev_support => common/utils}/perfcounter.hpp   |    0
 .../dev_support => common/utils}/range.hpp         |    2 +-
 .../utils}/segfault_handler.hpp                    |    2 +-
 .../dev_support => common/utils}/simple_tools.hpp  |   13 +-
 .../dev_support => common/utils}/stacktrace.hpp    |    0
 .../dev_support => common/utils}/standard_base.hpp |   10 +-
 .../dev_support => common/utils}/verify.hpp        |    2 +-
 .../visualization/graph_colorer.hpp                |  125 +-
 .../visualization/graph_labeler.hpp                |   94 +-
 .../visualization}/graph_print_utils.hpp           |    9 +-
 .../visualization/graph_printer.hpp                |   74 +-
 .../visualization/position_filler.hpp              |   47 +-
 .../visualization/printing_parameter_storage.hpp   |   21 +-
 .../visualization/vertex_linker.hpp                |   19 +-
 .../visualization/visualization.hpp                |    0
 src/common/visualization/visualization_utils.hpp   |  223 ++
 .../visualization/visualizers.hpp                  |   97 +-
 src/modules/CMakeLists.txt                         |   24 -
 .../algorithms/path_extend/next_path_searcher.hpp  | 1031 --------
 .../algorithms/path_extend/path_extend_launch.hpp  | 1257 ----------
 src/modules/algorithms/path_extend/pe_io.hpp       |  290 ---
 .../scaffolder2015/connection_condition2015.cpp    |  144 --
 .../scaffolder2015/connection_condition2015.hpp    |   90 -
 .../scaffolder2015/extension_chooser2015.hpp       |   59 -
 .../scaffolder2015/scaffold_graph_visualizer.hpp   |   73 -
 .../algorithms/path_extend/weight_counter.hpp      |  544 -----
 .../simplification/complex_tip_clipper.hpp         |  158 --
 .../assembly_graph/components/graph_component.hpp  |  198 --
 .../graph_alignment/pacbio/pacbio_gap_closer.hpp   |  396 ---
 .../graph_support/graph_processing_algorithm.hpp   |  262 --
 .../graph_support/parallel_processing.hpp          |  290 ---
 .../graph_support/scaff_supplementary.cpp          |   66 -
 .../graph_support/scaff_supplementary.hpp          |   77 -
 src/modules/assembly_graph/paths/path_utils.hpp    |  128 -
 .../data_structures/sequence/CMakeLists.txt        |   10 -
 .../data_structures/sequence/genome_storage.hpp    |   33 -
 src/modules/dev_support/CMakeLists.txt             |   13 -
 src/modules/dev_support/func.hpp                   |   69 -
 src/modules/io/dataset_support/dataset_readers.hpp |  122 -
 src/modules/io/reads_io/binary_streams.hpp         |  357 ---
 src/modules/io/reads_io/cutting_reader_wrapper.hpp |  135 --
 src/modules/io/reads_io/easy_reader.hpp            |  122 -
 src/modules/io/reads_io/is_corrupting_wrapper.hpp  |   33 -
 src/modules/paired_info/bwa_pair_info_filler.cpp   |  408 ----
 src/modules/paired_info/bwa_pair_info_filler.hpp   |  253 --
 src/modules/paired_info/pair_info_filler.hpp       |  119 -
 .../graph_simplification.hpp                       | 1034 --------
 src/modules/visualization/visualization_utils.hpp  |  210 --
 src/projects/CMakeLists.txt                        |    2 +-
 src/projects/cap/assembly_compare.hpp              |   22 +-
 src/projects/cap/assembly_problem_detection.hpp    |    8 +-
 src/projects/cap/cap_commands.hpp                  |    4 +-
 src/projects/cap/cap_environment.hpp               |    4 +-
 src/projects/cap/cap_environment_manager.hpp       |    2 +-
 src/projects/cap/cap_kmer_index.hpp                |    8 +-
 src/projects/cap/cap_logger.hpp                    |    2 +-
 src/projects/cap/colored_graph_construction.hpp    |   10 +-
 src/projects/cap/coloring.hpp                      |   25 +-
 src/projects/cap/compare_standard.hpp              |   26 +-
 src/projects/cap/comparison_utils.hpp              |   16 +-
 src/projects/cap/coordinates_handler.hpp           |    4 +-
 src/projects/cap/deprecated/tools_deprecated.cpp   |    4 +-
 src/projects/cap/diff_masking.hpp                  |   10 +-
 src/projects/cap/gene_analysis.hpp                 |    4 +-
 src/projects/cap/genome_correction.hpp             |    8 +-
 src/projects/cap/junk_cropping_reader.hpp          |    4 +-
 src/projects/cap/longseq.hpp                       |    6 +-
 src/projects/cap/main.cpp                          |    8 +-
 src/projects/cap/mosaic.hpp                        |   20 +-
 src/projects/cap/repeat_masking.hpp                |    8 +-
 src/projects/cap/serialization.hpp                 |    2 +-
 src/projects/cap/simple_inversion_finder.hpp       |    8 +-
 src/projects/cap/stats.hpp                         |   38 +-
 src/projects/cap/tools.cpp                         |    2 +-
 src/projects/cap/untangling.hpp                    |    4 +-
 src/projects/cap/visualization.hpp                 |   28 +-
 src/projects/cclean/CMakeLists.txt                 |   30 +
 src/projects/cclean/adapter_index.cpp              |   50 +
 src/projects/cclean/adapter_index.hpp              |   61 +
 src/projects/cclean/additional.cpp                 |   69 +
 src/projects/cclean/brute_force_clean.cpp          |   97 +
 src/projects/cclean/brute_force_clean.hpp          |   72 +
 .../cclean/comparator.hpp}                         |   20 +-
 src/projects/cclean/config_struct_cclean.cpp       |   44 +
 src/projects/cclean/config_struct_cclean.hpp       |   42 +
 src/projects/cclean/job_wrappers.cpp               |   97 +
 src/projects/cclean/job_wrappers.hpp               |   73 +
 src/projects/cclean/main.cpp                       |   86 +
 src/projects/cclean/output.cpp                     |   82 +
 src/projects/cclean/output.hpp                     |   49 +
 src/projects/cclean/running_modes.cpp              |  268 +++
 src/projects/cclean/running_modes.hpp              |   93 +
 src/projects/cclean/utils.cpp                      |  136 ++
 src/projects/cclean/utils.hpp                      |   58 +
 .../{hammer => cclean}/valid_kmer_generator.hpp    |   32 +-
 src/projects/corrector/CMakeLists.txt              |    2 +-
 src/projects/corrector/config_struct.cpp           |    2 +-
 src/projects/corrector/contig_processor.cpp        |    8 +-
 src/projects/corrector/contig_processor.hpp        |    6 +-
 src/projects/corrector/dataset_processor.cpp       |   10 +-
 src/projects/corrector/dataset_processor.hpp       |    6 +-
 .../corrector/interesting_pos_processor.cpp        |    2 +-
 src/projects/corrector/main.cpp                    |    4 +-
 src/projects/dipspades/CMakeLists.txt              |    6 +-
 .../consensus_contigs_constructor.hpp              |    4 +-
 .../contig_correctors/close_gaps_corrector.hpp     |    2 +-
 src/projects/dipspades/dipspades.hpp               |    4 +-
 src/projects/dipspades/dipspades_config.cpp        |    2 +-
 .../conservative_regions_searcher.hpp              |    4 +-
 .../dipspades/kmer_gluing/equal_sequence_gluer.hpp |    2 +-
 src/projects/dipspades/main.cpp                    |   10 +-
 .../bulge_paths_searcher.hpp                       |    2 +-
 .../complex_bulge_remover.hpp                      |    2 +-
 .../polymorphic_bulge_remover.hpp                  |    8 +-
 src/projects/dipspades/utils/edge_gluer.hpp        |    2 +-
 src/projects/dipspades/utils/path_routines.hpp     |    5 +-
 src/projects/hammer/CMakeLists.txt                 |    2 +-
 src/projects/hammer/config_struct_hammer.cpp       |    2 +-
 src/projects/hammer/hamcluster.cpp                 |    4 +-
 src/projects/hammer/hamcluster.hpp                 |    6 +-
 src/projects/hammer/hammer_tools.cpp               |    4 +-
 src/projects/hammer/hammer_tools.hpp               |    6 +-
 src/projects/hammer/kmer_cluster.cpp               |    4 +-
 src/projects/hammer/kmer_data.cpp                  |   14 +-
 src/projects/hammer/kmer_data.hpp                  |    4 +-
 src/projects/hammer/kmer_stat.hpp                  |    4 +-
 src/projects/hammer/main.cpp                       |   14 +-
 src/projects/hammer/parallel_radix_sort.hpp        |    2 +-
 src/projects/hammer/quake_correct/bithash.cpp      |    2 +-
 src/projects/hammer/quake_count/quake_count.cpp    |    2 +-
 src/projects/hammer/quake_count/quake_count_17.cpp |    2 +-
 src/projects/hammer/quake_count/quake_count_19.cpp |    2 +-
 src/projects/hammer/quake_count/quake_count_21.cpp |    2 +-
 src/projects/hammer/quake_count/quake_count_25.cpp |    2 +-
 src/projects/hammer/quake_count/quake_count_29.cpp |    2 +-
 src/projects/hammer/quake_count/quake_count_33.cpp |    2 +-
 src/projects/hammer/quake_count/quake_count_37.cpp |    2 +-
 src/projects/hammer/quake_count/quake_count_45.cpp |    2 +-
 src/projects/hammer/quake_count/quake_count_55.cpp |    2 +-
 src/projects/hammer/quake_count/quake_count_65.cpp |    2 +-
 src/projects/hammer/quake_count/quake_count_75.cpp |    2 +-
 .../hammer/quake_count/valid_kmer_generator.hpp    |    2 +-
 src/projects/hammer/quake_enhanced/count.cpp       |    2 +-
 src/projects/hammer/quake_enhanced/count/count.cpp |    2 +-
 .../quake_enhanced/filter_trusted_enh/main.cpp     |    2 +-
 src/projects/hammer/valid_kmer_generator.hpp       |    2 +-
 src/projects/ionhammer/CMakeLists.txt              |    2 +-
 src/projects/ionhammer/HSeq.hpp                    |    2 +-
 src/projects/ionhammer/config_struct.cpp           |    2 +-
 src/projects/ionhammer/err_helper_table.cpp        |    2 +-
 src/projects/ionhammer/err_helper_table.hpp        |    2 +-
 src/projects/ionhammer/expander.cpp                |    2 +-
 src/projects/ionhammer/hamcluster.cpp              |    4 +-
 src/projects/ionhammer/hamcluster.hpp              |    4 +-
 src/projects/ionhammer/kmer_data.cpp               |   10 +-
 src/projects/ionhammer/kmer_data.hpp               |    2 +-
 src/projects/ionhammer/main.cpp                    |   20 +-
 src/projects/ionhammer/read_corrector.hpp          |    2 +-
 src/projects/ionhammer/subcluster.cpp              |    2 +-
 src/projects/mph_test/CMakeLists.txt               |    2 +-
 src/projects/mph_test/main.cpp                     |   27 +-
 src/projects/mts/CMakeLists.txt                    |   57 +
 src/projects/mts/Common.snake                      |   69 +
 src/projects/mts/README                            |   21 +
 src/projects/mts/Snakefile                         |  175 ++
 src/projects/mts/Stats.snake                       |  270 +++
 src/projects/mts/annotation.hpp                    |  310 +++
 src/projects/mts/config.yaml                       |   10 +
 src/projects/mts/contig_abundance.cpp              |  176 ++
 src/projects/mts/contig_abundance.hpp              |  143 ++
 src/projects/mts/contig_abundance_counter.cpp      |  101 +
 src/projects/mts/formats.hpp                       |   29 +
 src/projects/mts/kmc_api/kmc_file.cpp              | 1093 +++++++++
 src/projects/mts/kmc_api/kmc_file.h                |  141 ++
 src/projects/mts/kmc_api/kmer_api.cpp              |   48 +
 src/projects/mts/kmc_api/kmer_api.h                |  596 +++++
 src/projects/mts/kmc_api/kmer_defs.h               |   54 +
 src/projects/mts/kmc_api/mmer.cpp                  |   49 +
 src/projects/mts/kmc_api/mmer.h                    |  182 ++
 src/projects/mts/kmc_api/stdafx.h                  |    4 +
 src/projects/mts/kmer_multiplicity_counter.cpp     |  256 ++
 src/projects/mts/log.properties                    |   10 +
 src/projects/mts/logger.hpp                        |   11 +
 src/projects/mts/mts.py                            |   73 +
 src/projects/mts/prop_binning.cpp                  |  128 +
 src/projects/mts/propagate.cpp                     |  331 +++
 src/projects/mts/propagate.hpp                     |   29 +
 src/projects/mts/read_binning.cpp                  |   90 +
 src/projects/mts/read_binning.hpp                  |   92 +
 .../empty.cpp => projects/mts/scripts/__init__.py} |    0
 src/projects/mts/scripts/calc_kmers_mpl.py         |   38 +
 src/projects/mts/scripts/canopy_launch.sh          |   17 +
 src/projects/mts/scripts/choose_samples.py         |   61 +
 src/projects/mts/scripts/combine_contigs.py        |   28 +
 src/projects/mts/scripts/common.py                 |  121 +
 src/projects/mts/scripts/filter_nucmer.py          |   54 +
 src/projects/mts/scripts/gather_stats.py           |   28 +
 src/projects/mts/scripts/gen_samples.py            |   96 +
 src/projects/mts/scripts/make_input.py             |   53 +
 src/projects/mts/scripts/make_points_matrix.py     |   35 +
 src/projects/mts/scripts/parse_output.py           |   58 +
 src/projects/mts/scripts/pca.R                     |   77 +
 src/projects/mts/scripts/ref_stats.sh              |   63 +
 src/projects/mts/scripts/split_bins.py             |   30 +
 src/projects/mts/stats.cpp                         |  194 ++
 src/projects/mts/test.py                           |  205 ++
 src/projects/mts/visualization.hpp                 |   66 +
 src/projects/online_vis/CMakeLists.txt             |    6 +-
 src/projects/online_vis/debruijn_environment.hpp   |    6 +-
 .../drawing_commands/draw_contig_command.hpp       |    2 +-
 .../drawing_commands/draw_missasemblies.hpp        |    4 +-
 .../drawing_commands/draw_polymorphic_regions.hpp  |   17 +-
 .../drawing_commands/draw_poorly_assembled.hpp     |    6 +-
 .../drawing_commands/draw_position_command.hpp     |    4 +-
 .../drawing_commands/drawing_command.hpp           |    8 +-
 .../drawing_commands/show_position_command.hpp     |    4 +-
 src/projects/online_vis/environment.hpp            |    6 +-
 src/projects/online_vis/main.cpp                   |   10 +-
 src/projects/online_vis/online_visualizer.hpp      |    2 +-
 .../position_commands/fill_position_command.hpp    |    2 +-
 src/projects/online_vis/processing_commands.hpp    |    4 +-
 src/projects/online_vis/standard_vis.hpp           |    2 +-
 .../junction_sequence_command.hpp                  |    4 +-
 .../statistics_commands/print_contigs_stats.hpp    |    2 +-
 src/projects/online_vis/vis_logger.hpp             |    6 +-
 src/projects/scaffold_correction/CMakeLists.txt    |    2 +-
 src/projects/scaffold_correction/main.cpp          |   15 +-
 .../scaffold_correction/scaffold_correction.hpp    |   17 +-
 src/projects/spades/CMakeLists.txt                 |   10 +-
 src/projects/spades/chromosome_removal.cpp         |   45 +-
 src/projects/spades/chromosome_removal.hpp         |    3 +-
 src/projects/spades/contig_output_stage.cpp        |   55 +
 src/projects/spades/contig_output_stage.hpp        |   29 +
 src/projects/spades/distance_estimation.cpp        |    8 +-
 src/projects/spades/gap_closer.cpp                 |  146 +-
 src/projects/spades/gap_closing.hpp                |   74 +
 src/projects/spades/hybrid_aligning.cpp            |  462 ++++
 .../{pacbio_aligning.hpp => hybrid_aligning.hpp}   |    6 +-
 src/projects/spades/hybrid_gap_closer.hpp          |  743 ++++++
 src/projects/spades/launch.hpp                     |   69 +-
 src/projects/spades/main.cpp                       |    8 +-
 src/projects/spades/mismatch_correction.cpp        |    4 +-
 src/projects/spades/pacbio_aligning.cpp            |  185 --
 src/projects/spades/pair_info_count.cpp            |  409 ++--
 src/projects/spades/repeat_resolving.cpp           |   60 +-
 src/projects/spades/repeat_resolving.hpp           |   12 -
 src/projects/spades/second_phase_setup.cpp         |    5 +-
 src/projects/spades/second_phase_setup.hpp         |    7 +-
 src/projects/spades/series_analysis.hpp            |  323 +++
 .../truseq_analysis/AlignmentAnalyserNew.cpp       |    4 +-
 .../truseq_analysis/AlignmentAnalyserNew.hpp       |    2 +-
 src/projects/truseq_analysis/CMakeLists.txt        |    2 +-
 .../truseq_analysis/alignment_analyser.cpp         |    2 +-
 .../truseq_analysis/alignment_analyser.hpp         |    4 +-
 src/projects/truseq_analysis/analysis_pipeline.cpp |    6 +-
 src/projects/truseq_analysis/analysis_pipeline.hpp |    2 +-
 .../truseq_analysis/consistent_mapping.cpp         |    2 +-
 src/projects/truseq_analysis/main.cpp              |    8 +-
 src/spades_pipeline/corrector_logic.py             |    6 +-
 src/spades_pipeline/hammer_logic.py                |    2 +
 src/spades_pipeline/options_storage.py             |   19 +-
 src/spades_pipeline/spades_logic.py                |   16 +-
 src/spades_pipeline/support.py                     |   21 +
 test_dataset_plasmid/pl1.fq.gz                     |  Bin 0 -> 68202 bytes
 test_dataset_plasmid/pl2.fq.gz                     |  Bin 0 -> 68276 bytes
 602 files changed, 25877 insertions(+), 13974 deletions(-)

diff --git a/LICENSE b/LICENSE
index 9decf09..0438b8d 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,5 +1,5 @@
 SPADES: SAINT-PETERSBURG GENOME ASSEMBLER
-Copyright (c) 2015-2016 Saint Petersburg State University
+Copyright (c) 2015-2017 Saint Petersburg State University
 Copyright (c) 2011-2014 Saint Petersburg Academic University
 
 SPAdes is free software; you can redistribute it and/or modify
diff --git a/VERSION b/VERSION
index 6bd1074..f870be2 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-3.9.1
+3.10.1
diff --git a/changelog.html b/changelog.html
index 6731f56..e94a96d 100644
--- a/changelog.html
+++ b/changelog.html
@@ -3,6 +3,33 @@
 
 <h2>SPAdes Genome Assembler changelog</h2>
 
+<h3>SPAdes 3.10.1, 1 March 2017</h3>
+
+<p>FIX: Build for MacOS.</p>
+
+<p>FIX: Minor bugs in hybridSPAdes pipeline.</p>
+
+<p>FIX: <code>--continue</code> option for metaSPAdes.</p>
+
+<p>FIX: <code>--tmp-dir</code> is now works correctly for MismatchCorrector.</p>
+
+<p>FIX: <code>Assertion `overlap <= k_' failed</code> in rnaSPAdes and metaSPAdes.</p>
+
+<p>FIX: <code>Assertion `path.Length() > 0' failed</code> in metaSPAdes.</p>
+
+<h3>SPAdes 3.10.0, 27 January 2017</h3>
+
+<p>NEW: Scaffolding algorithm for mate-pairs and long reads.</p>
+
+<p>NEW: Contigs and graph output in GFA format.
+
+<p>CHANGE: Better running time and RAM consumption for all pipelines.</p>
+
+<p>CHANGE: Improvements in metagenomic pipeline.</p>
+
+<p>CHANGE: Improved isoform detection algorithm in rnaSPAdes.</p>
+
+
 <h3>SPAdes 3.9.1, 4 December 2016</h3>
 
 <p>FIX: macOS Sierra crash.</p>
diff --git a/configs/debruijn/config.info b/configs/debruijn/config.info
index 1620f30..df5179a 100644
--- a/configs/debruijn/config.info
+++ b/configs/debruijn/config.info
@@ -95,6 +95,13 @@ use_scaffolder  true
 
 avoid_rc_connections true
 
+contig_output {
+    contigs_name    final_contigs
+    scaffolds_name  scaffolds
+    ; none  --- do not output broken scaffolds | break_gaps --- break only by N steches | break_all --- break all with overlap < k
+    output_broken_scaffolds     break_gaps
+}
+
 ;position handling
 
 pos
@@ -129,17 +136,19 @@ kmer_coverage_model {
 pacbio_processor
 {
 ;align and traverse.
-	pacbio_k 13
-	additional_debug_info false
-	compression_cutoff 0.6
-	domination_cutoff 1.5
-	path_limit_stretching 1.3
-	path_limit_pressing 0.7
-	ignore_middle_alignment true
-    ;gap_closer
-	long_seq_limit 400
-	pacbio_min_gap_quantity 2
-	contigs_min_gap_quantity 1
+    pacbio_k 13
+    additional_debug_info false
+    compression_cutoff 0.6
+    domination_cutoff 1.5
+    path_limit_stretching 1.3
+    path_limit_pressing 0.7
+    ignore_middle_alignment true
+    max_path_in_dijkstra 15000
+    max_vertex_in_dijkstra 2000
+;gap_closer
+    long_seq_limit 400
+    pacbio_min_gap_quantity 2
+    contigs_min_gap_quantity 1
     max_contigs_gap_length 10000
 }
 
@@ -162,3 +171,4 @@ bwa_aligner
 
 ;flanking coverage range
 flanking_range 55
+series_analysis ""
diff --git a/configs/debruijn/distance_estimation.info b/configs/debruijn/distance_estimation.info
index 3761b05..20954c6 100644
--- a/configs/debruijn/distance_estimation.info
+++ b/configs/debruijn/distance_estimation.info
@@ -2,13 +2,15 @@
 
 de
 {
-    linkage_distance_coeff    0.0
-    max_distance_coeff        2.0
-    max_distance_coeff_scaff  2000.0
-    filter_threshold          2.0
+    linkage_distance_coeff    	0.0
+    max_distance_coeff        	2.0
+    max_distance_coeff_scaff  	2000.0
+    clustered_filter_threshold	2.0
+    raw_filter_threshold	2
+    rounding_coeff              0.5 ; rounding : min(de_max_distance * rounding_coeff, rounding_thr)
+    rounding_threshold          0
 }
 
-
 ade
 {
     ;data dividing
diff --git a/configs/debruijn/large_genome_mode.info b/configs/debruijn/large_genome_mode.info
new file mode 100644
index 0000000..128008e
--- /dev/null
+++ b/configs/debruijn/large_genome_mode.info
@@ -0,0 +1,22 @@
+mode large_genome
+
+
+pe {
+
+debug_output false
+
+params {
+    scaffolding_mode old_pe_2015
+}
+}
+
+
+bwa_aligner
+{
+    bwa_enable true
+    debug false
+    path_to_bwa ./bin/bwa-spades
+    min_contig_len 0
+}
+
+
diff --git a/configs/debruijn/log.properties b/configs/debruijn/log.properties
index cbe4c29..b19eafe 100644
--- a/configs/debruijn/log.properties
+++ b/configs/debruijn/log.properties
@@ -52,3 +52,9 @@ default=INFO
 #ScaffoldingPathExtender=DEBUG
 
 #BWAPairInfo=TRACE
+#LongReadMapper=TRACE
+#GapTrackingListener=TRACE
+#MultiGapJoiner=TRACE
+#HybridGapCloser=TRACE
+#GapJoiner=TRACE
+#CountingCallback=TRACE
diff --git a/configs/debruijn/mda_mode.info b/configs/debruijn/mda_mode.info
index c98df33..11c9815 100644
--- a/configs/debruijn/mda_mode.info
+++ b/configs/debruijn/mda_mode.info
@@ -98,10 +98,19 @@ simp
     }
 }
 
+de
+{
+    raw_filter_threshold	0
+    rounding_threshold          0
+}
+
+
 pe {
 params {
     normalize_weight        true
 
+    scaffolding_mode old
+
     ; extension selection
     extension_options
     {
@@ -114,7 +123,7 @@ params {
 
 long_reads {
     pacbio_reads {
-        unique_edge_priority 10000.0
+        unique_edge_priority 10.0
     }
 }
 }
diff --git a/configs/debruijn/meta_mode.info b/configs/debruijn/meta_mode.info
index 5462e69..69c7bdc 100644
--- a/configs/debruijn/meta_mode.info
+++ b/configs/debruijn/meta_mode.info
@@ -53,8 +53,6 @@ simp
         enabled               true
     }
     
-
-
     ; relative edge disconnector:
     relative_ed
     {
@@ -100,6 +98,15 @@ simp
         max_number_edges        3
     }
 
+    ; hidden ec remover
+    her
+    {
+        enabled                     true
+        uniqueness_length           1500
+        unreliability_threshold     -1.
+        relative_threshold          3.     
+    }
+
     init_clean
     {
        early_it_only   true
@@ -141,31 +148,57 @@ preliminary_simp
 	
 }
 
+; undo single cell config changes, enforce filtering
+de
+{
+    raw_filter_threshold	1
+    rounding_coeff              0.5 ; rounding : min(de_max_distance * rounding_coeff, rounding_thr)
+    rounding_threshold          0
+}
+
 ;NB decsends from sc_pe
 pe {
+
+long_reads {
+    pacbio_reads {
+        filtering   1.9
+        weight_priority    20.0
+        unique_edge_priority 10.0
+        min_significant_overlap 1000
+    }
+}
+
 params {
     remove_overlaps     true
     cut_all_overlaps  true
 
-    ;TODO proper configuration of different extenders is not supported 
-    ;TODO most settings ard hardcoded for now 
+    scaffolding_mode old_pe_2015
 
-    ;normalize_weight        NA
+    normalize_weight     true
+    
+    ; extension selection
     extension_options
     {
-        ;use_default_single_threshold NA
-        ;single_threshold           NA 
+        use_default_single_threshold true
+        single_threshold           0.3
         weight_threshold           0.6
+        priority_coeff             1.5
         max_repeat_length          1000000 
-   }
+    }
     
     use_coordinated_coverage true
 }
+
 }
 
 prelim_pe {
 params {
+    scaffolding_mode old
+
     use_coordinated_coverage false
     remove_overlaps     false
+    scaffolding2015 {
+        min_unique_length 100000000
+    }
 }
 }
diff --git a/configs/debruijn/moleculo_mode.info b/configs/debruijn/moleculo_mode.info
index 40c2a54..a3ad118 100644
--- a/configs/debruijn/moleculo_mode.info
+++ b/configs/debruijn/moleculo_mode.info
@@ -103,6 +103,8 @@ params {
     normalize_weight        true
     cut_all_overlaps  true
 
+    scaffolding_mode old
+
     ; extension selection
     extension_options
     {
diff --git a/configs/debruijn/path_extend/pe_params.info b/configs/debruijn/path_extend/pe_params.info
deleted file mode 100644
index 86f1cd6..0000000
--- a/configs/debruijn/path_extend/pe_params.info
+++ /dev/null
@@ -1,206 +0,0 @@
-default_pe {
-
-; output options
-
-debug_output    true
-
-output {
-    write_overlaped_paths   true
-    write_paths             true
-}
-
-visualize {
-    print_overlaped_paths   true
-    print_paths             true
-}
-
-; none | break_gaps | break_all
-output_broken_scaffolds     break_gaps
-
-params {
-    multi_path_extend   false
-    ; old | 2015 | combined | old_pe_2015
-    scaffolding_mode old_pe_2015
-
-    remove_overlaps     true
-    cut_all_overlaps  false
-
-    split_edge_length    99
-    normalize_weight     false
-    
-    ; extension selection
-    extension_options
-    {
-        use_default_single_threshold false
-        single_threshold           1.75676
-        weight_threshold           0.5
-        priority_coeff             1.5
-        max_repeat_length          8000
-    }    
-
-    mate_pair_options
-    {
-        use_default_single_threshold true
-        single_threshold           30
-        weight_threshold           0.5
-        priority_coeff             1.5
-        max_repeat_length          8000
-    }
-
-    scaffolder {
-        on            true
-        cutoff        2
-        rel_cutoff    0.1
-        sum_threshold 3  
-
-        cluster_info  true
-        cl_threshold  0
-
-        fix_gaps       true
-        use_la_gap_joiner true
-        ;next param should be 0.51 - 1.0 if use_old_score = true and 3.0 otherwise
-        min_gap_score   0.7
-
-        max_must_overlap  -2
-        max_can_overlap   0.5
-        short_overlap     6
-        artificial_gap    10
-        use_old_score   true
-
-        min_overlap_length 10
-        flank_addition_coefficient -5.9
-        flank_multiplication_coefficient 0.97
-    }
-    
-    loop_removal
-    {
-        max_loops       10
-        mp_max_loops    10
-    }
-
-    use_coordinated_coverage false
-    coordinated_coverage
-    {
-       max_edge_length_repeat 300
-       delta                  0.4
-    }
-
-    scaffolding2015 {
-        autodetect      true
-        min_unique_length 10000
-        unique_coverage_variation 0.5
-        ; (median * (1+variation) > unique > median * (1 - variation))
-    }
-
-    scaffold_graph {
-        construct    true
-        output       true
-        always_add   40         ; connection with read count >= always_add are always added to the graph
-        never_add     5         ; connection with read count < never_add are never added to the graph
-        relative_threshold 0.25 ; connection with read count >= max_read_count * relative_threshod are added to the graph if satisfy condition above, max_read_count is calculated amond all alternatives
-        graph_connectivity false
-        max_path_length 10000
-    }
-}
-
-
-long_reads {
-    pacbio_reads {
-        filtering   2.5
-        weight_priority    1.2
-        unique_edge_priority 5.0
-    }
-
-    single_reads {
-        filtering  1.25 
-        weight_priority    5.0
-        unique_edge_priority 1000.0
-    }
-
-    contigs {
-        filtering   0.0
-        weight_priority    1.5
-        unique_edge_priority 2.0
-    }
-
-    meta_untrusted_contigs {
-        filtering   0.0
-        weight_priority    100.0
-        unique_edge_priority 2.0
-    }
-
-}
-}
-
-sc_pe {
-params {
-    normalize_weight        true
-
-    ; extension selection
-    extension_options
-    {
-        use_default_single_threshold false
-        single_threshold           0.001
-        weight_threshold           0.6
-        max_repeat_length          8000
-    }
-
-}
-}
-
-moleculo_pe {
-params {
-    normalize_weight        true
-    cut_all_overlaps  true
-
-    ; extension selection
-    extension_options
-    {
-        use_default_single_threshold false
-        single_threshold           0.001
-        weight_threshold           0.6
-    }
-
-    scaffolder {
-        short_overlap     10
-        use_la_gap_joiner false
-    }
-}
-}
-
-;NB decsends from sc_pe
-meta_pe {
-params {
-    remove_overlaps     true
-    cut_all_overlaps  true
-
-    ;TODO proper configuration of different extenders is not supported 
-    ;TODO most settings ard hardcoded for now 
-
-    ;normalize_weight        NA
-    extension_options
-    {
-        ;use_default_single_threshold NA
-        ;single_threshold           NA 
-        weight_threshold           0.6
-        max_repeat_length          50000 
-   }
-    
-    use_coordinated_coverage true
-}
-}
-
-prelim_pe {
-params {
-    use_coordinated_coverage false
-    remove_overlaps     false
-}
-}
-
-rna_pe {
-
-params {
-    multi_path_extend   true
-    remove_overlaps     false
-}
-}
diff --git a/configs/debruijn/pe_params.info b/configs/debruijn/pe_params.info
index 9c838bd..0d7a172 100644
--- a/configs/debruijn/pe_params.info
+++ b/configs/debruijn/pe_params.info
@@ -14,27 +14,25 @@ visualize {
     print_paths             true
 }
 
-; none | break_gaps | break_all
-output_broken_scaffolds     break_gaps
-
 params {
     multi_path_extend   false
     ; old | 2015 | combined | old_pe_2015
-    scaffolding_mode old
+    scaffolding_mode old_pe_2015
 
     remove_overlaps     true
     cut_all_overlaps  false
 
     split_edge_length    99
-    normalize_weight     false
+    normalize_weight     true
     
     ; extension selection
     extension_options
     {
-        use_default_single_threshold false
-        single_threshold           1.75676
+        use_default_single_threshold true
+        single_threshold           0.1
         weight_threshold           0.5
         priority_coeff             1.5
+        ;TODO remove from here
         max_repeat_length          8000
     }    
 
@@ -44,6 +42,7 @@ params {
         single_threshold           30
         weight_threshold           0.5
         priority_coeff             1.5
+        ;TODO remove from here
         max_repeat_length          8000
     }
 
@@ -72,8 +71,8 @@ params {
         flank_addition_coefficient -5.9
         flank_multiplication_coefficient 0.97
 
-	    var_coeff 3.0
-	    basic_overlap_coeff 2.0
+        var_coeff 3.0
+        basic_overlap_coeff 2.0
     }
 
     path_cleaning
@@ -81,12 +80,6 @@ params {
         enabled false
     }
     
-    loop_removal
-    {
-        max_loops       10
-        mp_max_loops    10
-    }
-
     use_coordinated_coverage false
     coordinated_coverage
     {
@@ -96,11 +89,14 @@ params {
     }
 
     scaffolding2015 {
-        autodetect      true
-        min_unique_length 10000
-        unique_coverage_variation 0.5
         ; (median * (1+variation) > unique > median * (1 - variation))
-	relative_weight_cutoff 2.0
+        relative_weight_cutoff 2.0
+
+        unique_length_upper_bound 2000   ; max(unique_length_upper_bound, max_is(all libs))
+        unique_length_lower_bound 500    ; max(unique_length_lower_bound, unique_length_step)
+        unique_length_step 300
+
+        graph_connectivity_max_edges 200000
     }
 
     scaffold_graph {
@@ -109,9 +105,29 @@ params {
         always_add   40         ; connection with read count >= always_add are always added to the graph
         never_add     5         ; connection with read count < never_add are never added to the graph
         relative_threshold 0.25 ; connection with read count >= max_read_count * relative_threshod are added to the graph if satisfy condition above, max_read_count is calculated amond all alternatives
-        graph_connectivity false
+        use_graph_connectivity false
         max_path_length 10000
     }
+
+    genome_consistency_checker {
+        max_gap 1000
+        relative_max_gap 0.2
+    }
+
+    uniqueness_analyser {
+        enabled        true
+        unique_coverage_variation 0.5
+
+        nonuniform_coverage_variation 50
+        uniformity_fraction_threshold 0.8
+    }
+
+    loop_traversal
+    {
+        min_edge_length         1000
+        max_component_size      10
+        max_path_length         1000
+    }
 }
 
 
diff --git a/configs/debruijn/rna_mode.info b/configs/debruijn/rna_mode.info
index aad8fec..aae3d6f 100644
--- a/configs/debruijn/rna_mode.info
+++ b/configs/debruijn/rna_mode.info
@@ -2,6 +2,12 @@ mode rna
 
 preserve_raw_paired_index true
 
+contig_output {
+    scaffolds_name  transcripts
+    ; none  --- do not output broken scaffolds | break_gaps --- break only by N steches | break_all --- break all with overlap < k
+    output_broken_scaffolds     none
+}
+
 simp
 {
     ; enable advanced ec removal algo
@@ -33,17 +39,18 @@ simp
        ; ec_lb: max_ec_length = k + ec_lb
        ; icb: iterative coverage bound
        ; to_ec_lb: max_ec_length = 2*tip_length(to_ec_lb) - 1
-       ; condition               "{ ec_lb 9, icb 40.0 }"
+       ; nbr: use not bulge erroneous connections remover 
+       ; condition               "{ ec_lb 9, icb 40.0, nbr }"
        condition               "{ ec_lb 30, icb 50 }"
     }
 
     ; relative coverage erroneous connections remover:
     rcec
-        {
+    {
             enabled true
             rcec_lb 30
             rcec_cb 0.5
-        }
+    }
 
     rcc
     {
@@ -86,10 +93,24 @@ simp
 
 }
 
+; disable filtering in rna mode
+de
+{
+    raw_filter_threshold	0
+}
+
 pe {
 params {
-    ;multi_path_extend   true
-    ;remove_overlaps     false
+    multi_path_extend   true
+    remove_overlaps     false
+
+    scaffolding_mode old
+
+    extension_options
+    {
+        use_default_single_threshold true
+        single_threshold           0.05
+    }
 
     scaffolder {
         cutoff        1
diff --git a/configs/debruijn/simplification.info b/configs/debruijn/simplification.info
index 4351abd..3ee8e02 100644
--- a/configs/debruijn/simplification.info
+++ b/configs/debruijn/simplification.info
@@ -73,6 +73,7 @@ simp
     {
         enabled false
         diff_mult  20.
+        edge_sum   10000
     }
 
     ; final tip clipper:
diff --git a/ext/include/btree/btree.h b/ext/include/btree/btree.h
index e14afdb..d7a2cb6 100644
--- a/ext/include/btree/btree.h
+++ b/ext/include/btree/btree.h
@@ -663,7 +663,8 @@ class btree_node {
 
   // Inserts the value x at position i, shifting all existing values and
   // children at positions >= i to the right by 1.
-  void insert_value(int i, const value_type &x);
+  template<typename V>
+  void insert_value(int i, V &&x);
 
   // Removes the value at position i, shifting all existing values and children
   // at positions > i to the left by 1.
@@ -724,6 +725,12 @@ class btree_node {
   void value_init(int i, const value_type &x) {
     new (&fields_.values[i]) mutable_value_type(x);
   }
+  
+  template<class V>
+  void value_init(int i, V&& x) {
+    new (&fields_.values[i]) mutable_value_type(std::forward<V>(x));
+  }
+  
   void value_destroy(int i) {
     fields_.values[i].~mutable_value_type();
   }
@@ -885,11 +892,25 @@ class btree : public Params::key_compare {
   // class optimization] for more details.
   template <typename Base, typename Data>
   struct empty_base_handle : public Base {
-    empty_base_handle(const Base &b, const Data &d)
+    empty_base_handle(const Base &b, Data *d)
         : Base(b),
           data(d) {
     }
-    Data data;
+
+    empty_base_handle(empty_base_handle &&other) noexcept
+      : Base(std::move(other)) {
+      data = other.data;
+      other.data = nullptr;
+    }
+
+    empty_base_handle& operator=(empty_base_handle &&other) noexcept {
+      Base::operator=(std::move(other));
+      data = other.data;
+      other.data = nullptr;
+      return *this;
+    }
+    
+    Data *data;
   };
 
   struct node_stats {
@@ -937,6 +958,9 @@ class btree : public Params::key_compare {
   // Copy constructor.
   btree(const self_type &x);
 
+  // Move constructor.
+  btree(self_type &&x) noexcept;
+
   // Destructor.
   ~btree() {
     clear();
@@ -999,17 +1023,23 @@ class btree : public Params::key_compare {
   }
 
   // Inserts a value into the btree only if it does not already exist. The
-  // boolean return value indicates whether insertion succeeded or failed. The
-  // ValuePointer type is used to avoid instatiating the value unless the key
-  // is being inserted. Value is not dereferenced if the key already exists in
-  // the btree. See btree_map::operator[].
-  template <typename ValuePointer>
-  std::pair<iterator,bool> insert_unique(const key_type &key, ValuePointer value);
+  // boolean return value indicates whether insertion succeeded or failed.
+  std::pair<iterator,bool> insert_unique(const key_type &key, value_type&& value);
+
+  // Inserts a value into the btree only if it does not already exist. The
+  // boolean return value indicates whether insertion succeeded or failed.
+  std::pair<iterator,bool> insert_unique(const key_type &key, const value_type& value);
 
   // Inserts a value into the btree only if it does not already exist. The
   // boolean return value indicates whether insertion succeeded or failed.
   std::pair<iterator,bool> insert_unique(const value_type &v) {
-    return insert_unique(params_type::key(v), &v);
+    return insert_unique(params_type::key(v), v);
+  }
+
+  // Inserts a value into the btree only if it does not already exist. The
+  // boolean return value indicates whether insertion succeeded or failed.
+  std::pair<iterator,bool> insert_unique(value_type &&v) {
+    return insert_unique(params_type::key(v), std::move(v));
   }
 
   // Insert with hint. Check to see if the value should be placed immediately
@@ -1022,12 +1052,9 @@ class btree : public Params::key_compare {
   template <typename InputIterator>
   void insert_unique(InputIterator b, InputIterator e);
 
-  // Inserts a value into the btree. The ValuePointer type is used to avoid
-  // instatiating the value unless the key is being inserted. Value is not
-  // dereferenced if the key already exists in the btree. See
-  // btree_map::operator[].
-  template <typename ValuePointer>
-  iterator insert_multi(const key_type &key, ValuePointer value);
+  // Inserts a value into the btree.
+  iterator insert_multi(const key_type &key, const value_type &value);
+  iterator insert_multi(const key_type &key, value_type &&value);
 
   // Inserts a value into the btree.
   iterator insert_multi(const value_type &v) {
@@ -1112,6 +1139,14 @@ class btree : public Params::key_compare {
     return *this;
   }
 
+  self_type& operator=(self_type&& x) noexcept {
+    key_compare::operator=(std::move(x.key_comp()));
+    root_ = std::move(x.root_);
+    x.root_.data = nullptr;
+
+    return *this;
+  }
+  
   key_compare* mutable_key_comp() {
     return this;
   }
@@ -1305,7 +1340,8 @@ class btree : public Params::key_compare {
 
   // Inserts a value into the btree immediately before iter. Requires that
   // key(v) <= iter.key() and (--iter).key() <= key(v).
-  iterator internal_insert(iterator iter, const value_type &v);
+  template<class V>
+  iterator internal_insert(iterator iter, V &&v);
 
   // Returns an iterator pointing to the first value >= the value "iter" is
   // pointing at. Note that "iter" might be pointing to an invalid location as
@@ -1378,7 +1414,7 @@ class btree : public Params::key_compare {
   }
 
  private:
-  empty_base_handle<internal_allocator_type, node_type*> root_;
+  empty_base_handle<internal_allocator_type, node_type> root_;
 
  private:
   // A never instantiated helper function that returns big_ if we have a
@@ -1419,9 +1455,10 @@ class btree : public Params::key_compare {
 ////
 // btree_node methods
 template <typename P>
-inline void btree_node<P>::insert_value(int i, const value_type &x) {
+template <typename V>
+inline void btree_node<P>::insert_value(int i, V &&x) {
   assert(i <= count());
-  value_init(count(), x);
+  value_init(count(), std::forward<V>(x));
   for (int j = count(); j > i; --j) {
     value_swap(j, this, j - 1);
   }
@@ -1739,9 +1776,16 @@ btree<P>::btree(const self_type &x)
   assign(x);
 }
 
-template <typename P> template <typename ValuePointer>
+template <typename P>
+btree<P>::btree(self_type &&x) noexcept
+  : key_compare(std::move(x.key_comp())),
+    root_(std::move(x.root_)) {
+  x.root_.data = nullptr;
+}
+
+template <typename P>
 std::pair<typename btree<P>::iterator, bool>
-btree<P>::insert_unique(const key_type &key, ValuePointer value) {
+btree<P>::insert_unique(const key_type &key, value_type&& value) {
   if (empty()) {
     *mutable_root() = new_leaf_root_node(1);
   }
@@ -1759,10 +1803,33 @@ btree<P>::insert_unique(const key_type &key, ValuePointer value) {
     }
   }
 
-  return std::make_pair(internal_insert(iter, *value), true);
+  return std::make_pair(internal_insert(iter, std::move(value)), true);
 }
 
 template <typename P>
+std::pair<typename btree<P>::iterator, bool>
+btree<P>::insert_unique(const key_type &key, const value_type& value) {
+  if (empty()) {
+    *mutable_root() = new_leaf_root_node(1);
+  }
+
+  std::pair<iterator, int> res = internal_locate(key, iterator(root(), 0));
+  iterator &iter = res.first;
+  if (res.second == kExactMatch) {
+    // The key already exists in the tree, do nothing.
+    return std::make_pair(internal_last(iter), false);
+  } else if (!res.second) {
+    iterator last = internal_last(iter);
+    if (last.node && !compare_keys(key, last.key())) {
+      // The key already exists in the tree, do nothing.
+      return std::make_pair(last, false);
+    }
+  }
+
+  return std::make_pair(internal_insert(iter, value), true);
+}
+  
+template <typename P>
 inline typename btree<P>::iterator
 btree<P>::insert_unique(iterator position, const value_type &v) {
   if (!empty()) {
@@ -1795,9 +1862,23 @@ void btree<P>::insert_unique(InputIterator b, InputIterator e) {
   }
 }
 
-template <typename P> template <typename ValuePointer>
+template <typename P>
+typename btree<P>::iterator
+btree<P>::insert_multi(const key_type &key, value_type &&value) {
+  if (empty()) {
+    *mutable_root() = new_leaf_root_node(1);
+  }
+
+  iterator iter = internal_upper_bound(key, iterator(root(), 0));
+  if (!iter.node) {
+    iter = end();
+  }
+  return internal_insert(iter, std::move(value));
+}
+
+template <typename P>
 typename btree<P>::iterator
-btree<P>::insert_multi(const key_type &key, ValuePointer value) {
+btree<P>::insert_multi(const key_type &key, const value_type &value) {
   if (empty()) {
     *mutable_root() = new_leaf_root_node(1);
   }
@@ -1806,7 +1887,7 @@ btree<P>::insert_multi(const key_type &key, ValuePointer value) {
   if (!iter.node) {
     iter = end();
   }
-  return internal_insert(iter, *value);
+  return internal_insert(iter, value);
 }
 
 template <typename P>
@@ -2197,8 +2278,9 @@ inline IterType btree<P>::internal_last(IterType iter) {
 }
 
 template <typename P>
+template <typename V>
 inline typename btree<P>::iterator
-btree<P>::internal_insert(iterator iter, const value_type &v) {
+btree<P>::internal_insert(iterator iter, V &&v) {
   if (!iter.node->leaf()) {
     // We can't insert on an internal node. Instead, we'll insert after the
     // previous value which is guaranteed to be on a leaf node.
@@ -2223,7 +2305,7 @@ btree<P>::internal_insert(iterator iter, const value_type &v) {
   } else if (!root()->leaf()) {
     ++*mutable_size();
   }
-  iter.node->insert_value(iter.position, v);
+  iter.node->insert_value(iter.position, std::forward<V>(v));
   return iter;
 }
 
diff --git a/ext/include/btree/btree_container.h b/ext/include/btree/btree_container.h
index fb617ab..7895e67 100644
--- a/ext/include/btree/btree_container.h
+++ b/ext/include/btree/btree_container.h
@@ -56,6 +56,15 @@ class btree_container {
       : tree_(x.tree_) {
   }
 
+  btree_container(self_type &&x)
+      : tree_(std::move(x.tree_)) {
+  }
+
+  self_type& operator=(self_type&& x) noexcept {
+    tree_ = std::move(x.tree_);
+    return *this;
+  }
+  
   // Iterator routines.
   iterator begin() { return tree_.begin(); }
   const_iterator begin() const { return tree_.begin(); }
@@ -169,6 +178,14 @@ class btree_unique_container : public btree_container<Tree> {
       : super_type(x) {
   }
 
+  btree_unique_container(self_type &&x)
+      : super_type(std::move(x)) {
+  }
+
+  self_type& operator=(self_type&& x) noexcept {
+    return static_cast<self_type&>(super_type::operator=(std::move(x)));
+  }
+
   // Range constructor.
   template <class InputIterator>
   btree_unique_container(InputIterator b, InputIterator e,
@@ -200,6 +217,10 @@ class btree_unique_container : public btree_container<Tree> {
   void insert(InputIterator b, InputIterator e) {
     this->tree_.insert_unique(b, e);
   }
+  template<class P>
+  std::pair<iterator,bool> insert(P&& x) {
+    return this->tree_.insert_unique(std::forward<P>(x));
+  }
 
   // Deletion routines.
   int erase(const key_type &key) {
@@ -230,20 +251,6 @@ class btree_map_container : public btree_unique_container<Tree> {
   typedef typename Tree::key_compare key_compare;
   typedef typename Tree::allocator_type allocator_type;
 
- private:
-  // A pointer-like object which only generates its value when
-  // dereferenced. Used by operator[] to avoid constructing an empty data_type
-  // if the key already exists in the map.
-  struct generate_value {
-    generate_value(const key_type &k)
-        : key(k) {
-    }
-    value_type operator*() const {
-      return std::make_pair(key, data_type());
-    }
-    const key_type &key;
-  };
-
  public:
   // Default constructor.
   btree_map_container(const key_compare &comp = key_compare(),
@@ -256,6 +263,14 @@ class btree_map_container : public btree_unique_container<Tree> {
       : super_type(x) {
   }
 
+  btree_map_container(self_type &&x) noexcept
+      : super_type(std::move(x)) {
+  }
+
+  self_type& operator=(self_type&& x) noexcept {
+    return static_cast<self_type&>(super_type::operator=(std::move(x)));
+  }
+
   // Range constructor.
   template <class InputIterator>
   btree_map_container(InputIterator b, InputIterator e,
@@ -266,7 +281,7 @@ class btree_map_container : public btree_unique_container<Tree> {
 
   // Insertion routines.
   data_type& operator[](const key_type &key) {
-    return this->tree_.insert_unique(key, generate_value(key)).first->second;
+    return this->tree_.insert_unique(key, std::make_pair(key, data_type())).first->second;
   }
 };
 
diff --git a/ext/include/btree/btree_map.h b/ext/include/btree/btree_map.h
index b83489f..43b25de 100644
--- a/ext/include/btree/btree_map.h
+++ b/ext/include/btree/btree_map.h
@@ -63,6 +63,15 @@ class btree_map : public btree_map_container<
       : super_type(x) {
   }
 
+  // Move constructor.
+  btree_map(self_type &&x) noexcept
+    : super_type(std::move(x)) {
+  }
+
+  self_type& operator=(self_type&& x) noexcept {
+    return static_cast<self_type&>(super_type::operator=(std::move(x)));
+  }
+
   // Range constructor.
   template <class InputIterator>
   btree_map(InputIterator b, InputIterator e,
diff --git a/ext/include/btree/safe_btree.h b/ext/include/btree/safe_btree.h
index 2d85c70..d1227da 100644
--- a/ext/include/btree/safe_btree.h
+++ b/ext/include/btree/safe_btree.h
@@ -206,6 +206,11 @@ class safe_btree {
         generation_(1) {
   }
 
+  safe_btree(self_type &&x) noexcept
+    : tree_(std::move(x.tree_)),
+      generation_(x.generation_) {
+  }
+  
   iterator begin() {
     return iterator(this, tree_.begin());
   }
@@ -274,9 +279,8 @@ class safe_btree {
   }
 
   // Insertion routines.
-  template <typename ValuePointer>
-  std::pair<iterator, bool> insert_unique(const key_type &key, ValuePointer value) {
-    std::pair<tree_iterator, bool> p = tree_.insert_unique(key, value);
+  std::pair<iterator, bool> insert_unique(const key_type &key, value_type &&value) {
+    std::pair<tree_iterator, bool> p = tree_.insert_unique(key, std::move(value));
     generation_ += p.second;
     return std::make_pair(iterator(this, p.first), p.second);
   }
@@ -285,6 +289,11 @@ class safe_btree {
     generation_ += p.second;
     return std::make_pair(iterator(this, p.first), p.second);
   }
+  std::pair<iterator, bool> insert_unique(value_type &&v) {
+    std::pair<tree_iterator, bool> p = tree_.insert_unique(std::move(v));
+    generation_ += p.second;
+    return std::make_pair(iterator(this, p.first), p.second);
+  }
   iterator insert_unique(iterator position, const value_type &v) {
     tree_iterator tree_pos = position.iter();
     ++generation_;
@@ -300,6 +309,15 @@ class safe_btree {
     ++generation_;
     return iterator(this, tree_.insert_multi(v));
   }
+  iterator insert_multi(value_type &&v) {
+    ++generation_;
+    return iterator(this, tree_.insert_multi(std::move(v)));
+  }
+  iterator insert_multi(iterator position, value_type &&v) {
+    tree_iterator tree_pos = position.iter();
+    ++generation_;
+    return iterator(this, tree_.insert_multi(tree_pos, std::move(v)));
+  }
   iterator insert_multi(iterator position, const value_type &v) {
     tree_iterator tree_pos = position.iter();
     ++generation_;
@@ -321,6 +339,14 @@ class safe_btree {
     return *this;
   }
 
+  self_type& operator=(self_type&& x) noexcept {
+    tree_ = std::move(x.tree_);
+    generation_ = x.generation_;
+    x.generation_ = -1;
+
+    return *this;
+  }
+
   // Deletion routines.
   void erase(const iterator &begin, const iterator &end) {
     tree_.erase(begin.iter(), end.iter());
diff --git a/ext/include/btree/safe_btree_map.h b/ext/include/btree/safe_btree_map.h
index a0668f1..2eac400 100644
--- a/ext/include/btree/safe_btree_map.h
+++ b/ext/include/btree/safe_btree_map.h
@@ -69,6 +69,14 @@ class safe_btree_map : public btree_map_container<
       : super_type(x) {
   }
 
+  safe_btree_map(self_type&& x) noexcept
+    : super_type(std::move(x)) {
+  }
+
+  self_type& operator=(safe_btree_map&& x) noexcept {
+    return static_cast<self_type&>(super_type::operator=(std::move(x)));
+  }
+
   // Range constructor.
   template <class InputIterator>
   safe_btree_map(InputIterator b, InputIterator e,
diff --git a/ext/include/bwa/bntseq.h b/ext/include/bwa/bntseq.h
new file mode 100644
index 0000000..63ad3c3
--- /dev/null
+++ b/ext/include/bwa/bntseq.h
@@ -0,0 +1,91 @@
+/* The MIT License
+
+   Copyright (c) 2008 Genome Research Ltd (GRL).
+
+   Permission is hereby granted, free of charge, to any person obtaining
+   a copy of this software and associated documentation files (the
+   "Software"), to deal in the Software without restriction, including
+   without limitation the rights to use, copy, modify, merge, publish,
+   distribute, sublicense, and/or sell copies of the Software, and to
+   permit persons to whom the Software is furnished to do so, subject to
+   the following conditions:
+
+   The above copyright notice and this permission notice shall be
+   included in all copies or substantial portions of the Software.
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+   BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+   SOFTWARE.
+*/
+
+/* Contact: Heng Li <lh3 at sanger.ac.uk> */
+
+#ifndef BWT_BNTSEQ_H
+#define BWT_BNTSEQ_H
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <zlib.h>
+
+#ifndef BWA_UBYTE
+#define BWA_UBYTE
+typedef uint8_t ubyte_t;
+#endif
+
+typedef struct {
+	int64_t offset;
+	int32_t len;
+	int32_t n_ambs;
+	uint32_t gi;
+	int32_t is_alt;
+	char *name, *anno;
+} bntann1_t;
+
+typedef struct {
+	int64_t offset;
+	int32_t len;
+	char amb;
+} bntamb1_t;
+
+typedef struct {
+	int64_t l_pac;
+	int32_t n_seqs;
+	uint32_t seed;
+	bntann1_t *anns; // n_seqs elements
+	int32_t n_holes;
+	bntamb1_t *ambs; // n_holes elements
+	FILE *fp_pac;
+} bntseq_t;
+
+extern unsigned char nst_nt4_table[256];
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+	void bns_dump(const bntseq_t *bns, const char *prefix);
+	bntseq_t *bns_restore(const char *prefix);
+	bntseq_t *bns_restore_core(const char *ann_filename, const char* amb_filename, const char* pac_filename);
+	void bns_destroy(bntseq_t *bns);
+	int64_t bns_fasta2bntseq(gzFile fp_fa, const char *prefix, int for_only);
+	int bns_pos2rid(const bntseq_t *bns, int64_t pos_f);
+	int bns_cnt_ambi(const bntseq_t *bns, int64_t pos_f, int len, int *ref_id);
+	uint8_t *bns_get_seq(int64_t l_pac, const uint8_t *pac, int64_t beg, int64_t end, int64_t *len);
+	uint8_t *bns_fetch_seq(const bntseq_t *bns, const uint8_t *pac, int64_t *beg, int64_t mid, int64_t *end, int *rid);
+	int bns_intv2rid(const bntseq_t *bns, int64_t rb, int64_t re);
+
+#ifdef __cplusplus
+}
+#endif
+
+static inline int64_t bns_depos(const bntseq_t *bns, int64_t pos, int *is_rev)
+{
+	return (*is_rev = (pos >= bns->l_pac))? (bns->l_pac<<1) - 1 - pos : pos;
+}
+
+#endif
diff --git a/ext/include/bwa/bwa.h b/ext/include/bwa/bwa.h
new file mode 100644
index 0000000..8f4e06e
--- /dev/null
+++ b/ext/include/bwa/bwa.h
@@ -0,0 +1,62 @@
+#ifndef BWA_H_
+#define BWA_H_
+
+#include <stdint.h>
+#include "bntseq.h"
+#include "bwt.h"
+
+#define BWA_IDX_BWT 0x1
+#define BWA_IDX_BNS 0x2
+#define BWA_IDX_PAC 0x4
+#define BWA_IDX_ALL 0x7
+
+#define BWA_CTL_SIZE 0x10000
+
+typedef struct bwaidx_s {
+	bwt_t    *bwt; // FM-index
+	bntseq_t *bns; // information on the reference sequences
+	uint8_t  *pac; // the actual 2-bit encoded reference sequences with 'N' converted to a random base
+
+	int    is_shm;
+	int64_t l_mem;
+	uint8_t  *mem;
+} bwaidx_t;
+
+typedef struct {
+	int l_seq, id;
+	char *name, *comment, *seq, *qual, *sam;
+} bseq1_t;
+
+extern int bwa_verbose;
+extern char bwa_rg_id[256];
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+	bseq1_t *bseq_read(int chunk_size, int *n_, void *ks1_, void *ks2_);
+	void bseq_classify(int n, bseq1_t *seqs, int m[2], bseq1_t *sep[2]);
+
+	void bwa_fill_scmat(int a, int b, int8_t mat[25]);
+	uint32_t *bwa_gen_cigar(const int8_t mat[25], int q, int r, int w_, int64_t l_pac, const uint8_t *pac, int l_query, uint8_t *query, int64_t rb, int64_t re, int *score, int *n_cigar, int *NM);
+	uint32_t *bwa_gen_cigar2(const int8_t mat[25], int o_del, int e_del, int o_ins, int e_ins, int w_, int64_t l_pac, const uint8_t *pac, int l_query, uint8_t *query, int64_t rb, int64_t re, int *score, int *n_cigar, int *NM);
+
+	char *bwa_idx_infer_prefix(const char *hint);
+	bwt_t *bwa_idx_load_bwt(const char *hint);
+
+	bwaidx_t *bwa_idx_load_from_shm(const char *hint);
+	bwaidx_t *bwa_idx_load_from_disk(const char *hint, int which);
+	bwaidx_t *bwa_idx_load(const char *hint, int which);
+	void bwa_idx_destroy(bwaidx_t *idx);
+	int bwa_idx2mem(bwaidx_t *idx);
+	int bwa_mem2idx(int64_t l_mem, uint8_t *mem, bwaidx_t *idx);
+
+	void bwa_print_sam_hdr(const bntseq_t *bns, const char *hdr_line);
+	char *bwa_set_rg(const char *s);
+	char *bwa_insert_header(const char *s, char *hdr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/ext/include/bwa/bwamem.h b/ext/include/bwa/bwamem.h
new file mode 100644
index 0000000..3ca79ce
--- /dev/null
+++ b/ext/include/bwa/bwamem.h
@@ -0,0 +1,184 @@
+#ifndef BWAMEM_H_
+#define BWAMEM_H_
+
+#include "bwt.h"
+#include "bntseq.h"
+#include "bwa.h"
+
+#define MEM_MAPQ_COEF 30.0
+#define MEM_MAPQ_MAX  60
+
+struct __smem_i;
+typedef struct __smem_i smem_i;
+
+#define MEM_F_PE        0x2
+#define MEM_F_NOPAIRING 0x4
+#define MEM_F_ALL       0x8
+#define MEM_F_NO_MULTI  0x10
+#define MEM_F_NO_RESCUE 0x20
+#define MEM_F_REF_HDR	0x100
+#define MEM_F_SOFTCLIP  0x200
+#define MEM_F_SMARTPE   0x400
+
+typedef struct mem_opt_s {
+	int a, b;               // match score and mismatch penalty
+	int o_del, e_del;
+	int o_ins, e_ins;
+	int pen_unpaired;       // phred-scaled penalty for unpaired reads
+	int pen_clip5,pen_clip3;// clipping penalty. This score is not deducted from the DP score.
+	int w;                  // band width
+	int zdrop;              // Z-dropoff
+
+	uint64_t max_mem_intv;
+
+	int T;                  // output score threshold; only affecting output
+	int flag;               // see MEM_F_* macros
+	int min_seed_len;       // minimum seed length
+	int min_chain_weight;
+	int max_chain_extend;
+	float split_factor;     // split into a seed if MEM is longer than min_seed_len*split_factor
+	int split_width;        // split into a seed if its occurence is smaller than this value
+	int max_occ;            // skip a seed if its occurence is larger than this value
+	int max_chain_gap;      // do not chain seed if it is max_chain_gap-bp away from the closest seed
+	int n_threads;          // number of threads
+	int chunk_size;         // process chunk_size-bp sequences in a batch
+	float mask_level;       // regard a hit as redundant if the overlap with another better hit is over mask_level times the min length of the two hits
+	float drop_ratio;       // drop a chain if its seed coverage is below drop_ratio times the seed coverage of a better chain overlapping with the small chain
+	float XA_drop_ratio;    // when counting hits for the XA tag, ignore alignments with score < XA_drop_ratio * max_score; only effective for the XA tag
+	float mask_level_redun;
+	float mapQ_coef_len;
+	int mapQ_coef_fac;
+	int max_ins;            // when estimating insert size distribution, skip pairs with insert longer than this value
+	int max_matesw;         // perform maximally max_matesw rounds of mate-SW for each end
+	int max_XA_hits, max_XA_hits_alt; // if there are max_hits or fewer, output them all
+	int8_t mat[25];         // scoring matrix; mat[0] == 0 if unset
+} mem_opt_t;
+
+typedef struct {
+	int64_t rb, re; // [rb,re): reference sequence in the alignment
+	int qb, qe;     // [qb,qe): query sequence in the alignment
+	int rid;        // reference seq ID
+	int score;      // best local SW score
+	int truesc;     // actual score corresponding to the aligned region; possibly smaller than $score
+	int sub;        // 2nd best SW score
+	int alt_sc;
+	int csub;       // SW score of a tandem hit
+	int sub_n;      // approximate number of suboptimal hits
+	int w;          // actual band width used in extension
+	int seedcov;    // length of regions coverged by seeds
+	int secondary;  // index of the parent hit shadowing the current hit; <0 if primary
+	int secondary_all;
+	int seedlen0;   // length of the starting seed
+	int n_comp:30, is_alt:2; // number of sub-alignments chained together
+	float frac_rep;
+	uint64_t hash;
+} mem_alnreg_t;
+
+typedef struct { size_t n, m; mem_alnreg_t *a; } mem_alnreg_v;
+
+typedef struct {
+	int low, high;   // lower and upper bounds within which a read pair is considered to be properly paired
+	int failed;      // non-zero if the orientation is not supported by sufficient data
+	double avg, std; // mean and stddev of the insert size distribution
+} mem_pestat_t;
+
+typedef struct { // This struct is only used for the convenience of API.
+	int64_t pos;     // forward strand 5'-end mapping position
+	int rid;         // reference sequence index in bntseq_t; <0 for unmapped
+	int flag;        // extra flag
+	uint32_t is_rev:1, is_alt:1, mapq:8, NM:22; // is_rev: whether on the reverse strand; mapq: mapping quality; NM: edit distance
+	int n_cigar;     // number of CIGAR operations
+	uint32_t *cigar; // CIGAR in the BAM encoding: opLen<<4|op; op to integer mapping: MIDSH=>01234
+	char *XA;        // alternative mappings
+
+	int score, sub, alt_sc;
+} mem_aln_t;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+	smem_i *smem_itr_init(const bwt_t *bwt);
+	void smem_itr_destroy(smem_i *itr);
+	void smem_set_query(smem_i *itr, int len, const uint8_t *query);
+	void smem_config(smem_i *itr, int min_intv, int max_len, uint64_t max_intv);
+	const bwtintv_v *smem_next(smem_i *itr);
+
+	mem_opt_t *mem_opt_init(void);
+	void mem_fill_scmat(int a, int b, int8_t mat[25]);
+
+	/**
+	 * Align a batch of sequences and generate the alignments in the SAM format
+	 *
+	 * This routine requires $seqs[i].{l_seq,seq,name} and write $seqs[i].sam.
+	 * Note that $seqs[i].sam may consist of several SAM lines if the
+	 * corresponding sequence has multiple primary hits.
+	 *
+	 * In the paired-end mode (i.e. MEM_F_PE is set in $opt->flag), query
+	 * sequences must be interleaved: $n must be an even number and the 2i-th
+	 * sequence and the (2i+1)-th sequence constitute a read pair. In this
+	 * mode, there should be enough (typically >50) unique pairs for the
+	 * routine to infer the orientation and insert size.
+	 *
+	 * @param opt    alignment parameters
+	 * @param bwt    FM-index of the reference sequence
+	 * @param bns    Information of the reference
+	 * @param pac    2-bit encoded reference
+	 * @param n      number of query sequences
+	 * @param seqs   query sequences; $seqs[i].seq/sam to be modified after the call
+	 * @param pes0   insert-size info; if NULL, infer from data; if not NULL, it should be an array with 4 elements,
+	 *               corresponding to each FF, FR, RF and RR orientation. See mem_pestat() for more info.
+	 */
+	void mem_process_seqs(const mem_opt_t *opt, const bwt_t *bwt, const bntseq_t *bns, const uint8_t *pac, int64_t n_processed, int n, bseq1_t *seqs, const mem_pestat_t *pes0);
+
+	/**
+	 * Find the aligned regions for one query sequence
+	 *
+	 * Note that this routine does not generate CIGAR. CIGAR should be
+	 * generated later by mem_reg2aln() below.
+	 *
+	 * @param opt    alignment parameters
+	 * @param bwt    FM-index of the reference sequence
+	 * @param bns    Information of the reference
+	 * @param pac    2-bit encoded reference
+	 * @param l_seq  length of query sequence
+	 * @param seq    query sequence
+	 *
+	 * @return       list of aligned regions.
+	 */
+	mem_alnreg_v mem_align1(const mem_opt_t *opt, const bwt_t *bwt, const bntseq_t *bns, const uint8_t *pac, int l_seq, const char *seq);
+
+	/**
+	 * Generate CIGAR and forward-strand position from alignment region
+	 *
+	 * @param opt    alignment parameters
+	 * @param bns    Information of the reference
+	 * @param pac    2-bit encoded reference
+	 * @param l_seq  length of query sequence
+	 * @param seq    query sequence
+	 * @param ar     one alignment region
+	 *
+	 * @return       CIGAR, strand, mapping quality and forward-strand position
+	 */
+	mem_aln_t mem_reg2aln(const mem_opt_t *opt, const bntseq_t *bns, const uint8_t *pac, int l_seq, const char *seq, const mem_alnreg_t *ar);
+	mem_aln_t mem_reg2aln2(const mem_opt_t *opt, const bntseq_t *bns, const uint8_t *pac, int l_seq, const char *seq, const mem_alnreg_t *ar, const char *name);
+
+	/**
+	 * Infer the insert size distribution from interleaved alignment regions
+	 *
+	 * This function can be called after mem_align1(), as long as paired-end
+	 * reads are properly interleaved.
+	 *
+	 * @param opt    alignment parameters
+	 * @param l_pac  length of concatenated reference sequence
+	 * @param n      number of query sequences; must be an even number
+	 * @param regs   region array of size $n; 2i-th and (2i+1)-th elements constitute a pair
+	 * @param pes    inferred insert size distribution (output)
+	 */
+	void mem_pestat(const mem_opt_t *opt, int64_t l_pac, int n, const mem_alnreg_v *regs, mem_pestat_t pes[4]);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/ext/include/bwa/bwt.h b/ext/include/bwa/bwt.h
new file mode 100644
index 0000000..c71d6b5
--- /dev/null
+++ b/ext/include/bwa/bwt.h
@@ -0,0 +1,130 @@
+/* The MIT License
+
+   Copyright (c) 2008 Genome Research Ltd (GRL).
+
+   Permission is hereby granted, free of charge, to any person obtaining
+   a copy of this software and associated documentation files (the
+   "Software"), to deal in the Software without restriction, including
+   without limitation the rights to use, copy, modify, merge, publish,
+   distribute, sublicense, and/or sell copies of the Software, and to
+   permit persons to whom the Software is furnished to do so, subject to
+   the following conditions:
+
+   The above copyright notice and this permission notice shall be
+   included in all copies or substantial portions of the Software.
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+   BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+   SOFTWARE.
+*/
+
+/* Contact: Heng Li <lh3 at sanger.ac.uk> */
+
+#ifndef BWA_BWT_H
+#define BWA_BWT_H
+
+#include <stdint.h>
+#include <stddef.h>
+
+// requirement: (OCC_INTERVAL%16 == 0); please DO NOT change this line because some part of the code assume OCC_INTERVAL=0x80
+#define OCC_INTV_SHIFT 7
+#define OCC_INTERVAL   (1LL<<OCC_INTV_SHIFT)
+#define OCC_INTV_MASK  (OCC_INTERVAL - 1)
+
+#ifndef BWA_UBYTE
+#define BWA_UBYTE
+typedef unsigned char ubyte_t;
+#endif
+
+typedef uint64_t bwtint_t;
+
+typedef struct {
+	bwtint_t primary; // S^{-1}(0), or the primary index of BWT
+	bwtint_t L2[5]; // C(), cumulative count
+	bwtint_t seq_len; // sequence length
+	bwtint_t bwt_size; // size of bwt, about seq_len/4
+	uint32_t *bwt; // BWT
+	// occurance array, separated to two parts
+	uint32_t cnt_table[256];
+	// suffix array
+	int sa_intv;
+	bwtint_t n_sa;
+	bwtint_t *sa;
+} bwt_t;
+
+typedef struct {
+	bwtint_t x[3], info;
+} bwtintv_t;
+
+typedef struct { size_t n, m; bwtintv_t *a; } bwtintv_v;
+
+/* For general OCC_INTERVAL, the following is correct:
+#define bwt_bwt(b, k) ((b)->bwt[(k)/OCC_INTERVAL * (OCC_INTERVAL/(sizeof(uint32_t)*8/2) + sizeof(bwtint_t)/4*4) + sizeof(bwtint_t)/4*4 + (k)%OCC_INTERVAL/16])
+#define bwt_occ_intv(b, k) ((b)->bwt + (k)/OCC_INTERVAL * (OCC_INTERVAL/(sizeof(uint32_t)*8/2) + sizeof(bwtint_t)/4*4)
+*/
+
+// The following two lines are ONLY correct when OCC_INTERVAL==0x80
+#define bwt_bwt(b, k) ((b)->bwt[((k)>>7<<4) + sizeof(bwtint_t) + (((k)&0x7f)>>4)])
+#define bwt_occ_intv(b, k) ((b)->bwt + ((k)>>7<<4))
+
+/* retrieve a character from the $-removed BWT string. Note that
+ * bwt_t::bwt is not exactly the BWT string and therefore this macro is
+ * called bwt_B0 instead of bwt_B */
+#define bwt_B0(b, k) (bwt_bwt(b, k)>>((~(k)&0xf)<<1)&3)
+
+#define bwt_set_intv(bwt, c, ik) ((ik).x[0] = (bwt)->L2[(int)(c)]+1, (ik).x[2] = (bwt)->L2[(int)(c)+1]-(bwt)->L2[(int)(c)], (ik).x[1] = (bwt)->L2[3-(c)]+1, (ik).info = 0)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+	void bwt_dump_bwt(const char *fn, const bwt_t *bwt);
+	void bwt_dump_sa(const char *fn, const bwt_t *bwt);
+
+	bwt_t *bwt_restore_bwt(const char *fn);
+	void bwt_restore_sa(const char *fn, bwt_t *bwt);
+
+	void bwt_destroy(bwt_t *bwt);
+
+	void bwt_bwtgen(const char *fn_pac, const char *fn_bwt); // from BWT-SW
+	void bwt_bwtgen2(const char *fn_pac, const char *fn_bwt, int block_size); // from BWT-SW
+	void bwt_cal_sa(bwt_t *bwt, int intv);
+
+	void bwt_bwtupdate_core(bwt_t *bwt);
+
+	bwtint_t bwt_occ(const bwt_t *bwt, bwtint_t k, ubyte_t c);
+	void bwt_occ4(const bwt_t *bwt, bwtint_t k, bwtint_t cnt[4]);
+	bwtint_t bwt_sa(const bwt_t *bwt, bwtint_t k);
+
+	// more efficient version of bwt_occ/bwt_occ4 for retrieving two close Occ values
+	void bwt_gen_cnt_table(bwt_t *bwt);
+	void bwt_2occ(const bwt_t *bwt, bwtint_t k, bwtint_t l, ubyte_t c, bwtint_t *ok, bwtint_t *ol);
+	void bwt_2occ4(const bwt_t *bwt, bwtint_t k, bwtint_t l, bwtint_t cntk[4], bwtint_t cntl[4]);
+
+	int bwt_match_exact(const bwt_t *bwt, int len, const ubyte_t *str, bwtint_t *sa_begin, bwtint_t *sa_end);
+	int bwt_match_exact_alt(const bwt_t *bwt, int len, const ubyte_t *str, bwtint_t *k0, bwtint_t *l0);
+
+	/**
+	 * Extend bi-SA-interval _ik_
+	 */
+	void bwt_extend(const bwt_t *bwt, const bwtintv_t *ik, bwtintv_t ok[4], int is_back);
+
+	/**
+	 * Given a query _q_, collect potential SMEMs covering position _x_ and store them in _mem_.
+	 * Return the end of the longest exact match starting from _x_.
+	 */
+	int bwt_smem1(const bwt_t *bwt, int len, const uint8_t *q, int x, int min_intv, bwtintv_v *mem, bwtintv_v *tmpvec[2]);
+	int bwt_smem1a(const bwt_t *bwt, int len, const uint8_t *q, int x, int min_intv, uint64_t max_intv, bwtintv_v *mem, bwtintv_v *tmpvec[2]);
+
+	int bwt_seed_strategy1(const bwt_t *bwt, int len, const uint8_t *q, int x, int min_len, int max_intv, bwtintv_t *mem);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/ext/include/bwa/utils.h b/ext/include/bwa/utils.h
new file mode 100644
index 0000000..11966b8
--- /dev/null
+++ b/ext/include/bwa/utils.h
@@ -0,0 +1,111 @@
+/* The MIT License
+
+   Copyright (c) 2008 Genome Research Ltd (GRL).
+
+   Permission is hereby granted, free of charge, to any person obtaining
+   a copy of this software and associated documentation files (the
+   "Software"), to deal in the Software without restriction, including
+   without limitation the rights to use, copy, modify, merge, publish,
+   distribute, sublicense, and/or sell copies of the Software, and to
+   permit persons to whom the Software is furnished to do so, subject to
+   the following conditions:
+
+   The above copyright notice and this permission notice shall be
+   included in all copies or substantial portions of the Software.
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+   BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+   SOFTWARE.
+*/
+
+/* Contact: Heng Li <lh3 at sanger.ac.uk> */
+
+#ifndef LH3_UTILS_H
+#define LH3_UTILS_H
+
+#include <stdint.h>
+#include <stdio.h>
+#include <zlib.h>
+
+#ifdef __GNUC__
+// Tell GCC to validate printf format string and args
+#define ATTRIBUTE(list) __attribute__ (list)
+#else
+#define ATTRIBUTE(list)
+#endif
+
+#define err_fatal_simple(msg) _err_fatal_simple(__func__, msg)
+#define err_fatal_simple_core(msg) _err_fatal_simple_core(__func__, msg)
+
+#define xopen(fn, mode) err_xopen_core(__func__, fn, mode)
+#define xreopen(fn, mode, fp) err_xreopen_core(__func__, fn, mode, fp)
+#define xzopen(fn, mode) err_xzopen_core(__func__, fn, mode)
+
+#define xassert(cond, msg) if ((cond) == 0) _err_fatal_simple_core(__func__, msg)
+
+typedef struct {
+	uint64_t x, y;
+} pair64_t;
+
+typedef struct { size_t n, m; uint64_t *a; } uint64_v;
+typedef struct { size_t n, m; pair64_t *a; } pair64_v;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+	void err_fatal(const char *header, const char *fmt, ...) ATTRIBUTE((noreturn));
+	void err_fatal_core(const char *header, const char *fmt, ...) ATTRIBUTE((noreturn));
+	void _err_fatal_simple(const char *func, const char *msg) ATTRIBUTE((noreturn));
+	void _err_fatal_simple_core(const char *func, const char *msg) ATTRIBUTE((noreturn));
+	FILE *err_xopen_core(const char *func, const char *fn, const char *mode);
+	FILE *err_xreopen_core(const char *func, const char *fn, const char *mode, FILE *fp);
+	gzFile err_xzopen_core(const char *func, const char *fn, const char *mode);
+    size_t err_fwrite(const void *ptr, size_t size, size_t nmemb, FILE *stream);
+	size_t err_fread_noeof(void *ptr, size_t size, size_t nmemb, FILE *stream);
+
+	int err_gzread(gzFile file, void *ptr, unsigned int len);
+	int err_fseek(FILE *stream, long offset, int whence);
+#define err_rewind(FP) err_fseek((FP), 0, SEEK_SET)
+	long err_ftell(FILE *stream);
+	int err_fprintf(FILE *stream, const char *format, ...)
+        ATTRIBUTE((format(printf, 2, 3)));
+	int err_printf(const char *format, ...)
+        ATTRIBUTE((format(printf, 1, 2)));
+	int err_fputc(int c, FILE *stream);
+#define err_putchar(C) err_fputc((C), stdout)
+	int err_fputs(const char *s, FILE *stream);
+	int err_puts(const char *s);
+	int err_fflush(FILE *stream);
+	int err_fclose(FILE *stream);
+	int err_gzclose(gzFile file);
+
+	double cputime();
+	double realtime();
+
+	void ks_introsort_64 (size_t n, uint64_t *a);
+	void ks_introsort_128(size_t n, pair64_t *a);
+
+#ifdef __cplusplus
+}
+#endif
+
+static inline uint64_t hash_64(uint64_t key)
+{
+	key += ~(key << 32);
+	key ^= (key >> 22);
+	key += ~(key << 13);
+	key ^= (key >> 8);
+	key += (key << 3);
+	key ^= (key >> 15);
+	key += ~(key << 27);
+	key ^= (key >> 31);
+	return key;
+}
+
+#endif
diff --git a/ext/include/cuckoo/LICENSE b/ext/include/cuckoo/LICENSE
new file mode 100644
index 0000000..9d8b367
--- /dev/null
+++ b/ext/include/cuckoo/LICENSE
@@ -0,0 +1,18 @@
+Copyright (C) 2013, Carnegie Mellon University and Intel Corporation
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+---------------------------
+
+The third-party libraries have their own licenses, as detailed in their source
+files.
diff --git a/ext/include/cuckoo/city_hasher.hh b/ext/include/cuckoo/city_hasher.hh
new file mode 100644
index 0000000..a698705
--- /dev/null
+++ b/ext/include/cuckoo/city_hasher.hh
@@ -0,0 +1,44 @@
+/** \file */
+
+#ifndef _CITY_HASHER_HH
+#define _CITY_HASHER_HH
+
+#include <city.h>
+#include <string>
+
+/*! CityHasher is a std::hash-style wrapper around CityHash. We
+ *  encourage using CityHasher instead of the default std::hash if
+ *  possible. */
+template <class Key>
+class CityHasher {
+public:
+    //! The function call operator for our hash function
+    size_t operator()(const Key& k) const {
+        if (sizeof(size_t) < 8) {
+            return CityHash32((const char*) &k, sizeof(k));
+        }
+        /* Although the following line should be optimized away on 32-bit
+         * builds, the cast is still necessary to stop MSVC emitting a
+         * truncation warning. */
+        return static_cast<size_t>(CityHash64((const char*) &k, sizeof(k)));
+    }
+};
+
+/*! This is a template specialization of CityHasher for
+ *  std::string. */
+template <>
+class CityHasher<std::string> {
+public:
+    //! The function call operator for our hash function
+    size_t operator()(const std::string& k) const {
+        if (sizeof(size_t) < 8) {
+            return CityHash32(k.c_str(), k.size());
+        }
+        /* Although the following line should be optimized away on 32-bit
+         * builds, the cast is still necessary to stop MSVC emitting a
+         * truncation warning. */
+        return static_cast<size_t>(CityHash64(k.c_str(), k.size()));
+    }
+};
+
+#endif // _CITY_HASHER_HH
diff --git a/ext/include/cuckoo/cuckoohash_config.hh b/ext/include/cuckoo/cuckoohash_config.hh
new file mode 100644
index 0000000..e894c9b
--- /dev/null
+++ b/ext/include/cuckoo/cuckoohash_config.hh
@@ -0,0 +1,36 @@
+/** \file */
+
+#ifndef _CUCKOOHASH_CONFIG_HH
+#define _CUCKOOHASH_CONFIG_HH
+
+#include <cstddef>
+
+//! The default maximum number of keys per bucket
+constexpr size_t LIBCUCKOO_DEFAULT_SLOT_PER_BUCKET = 4;
+
+//! The default number of elements in an empty hash table
+constexpr size_t LIBCUCKOO_DEFAULT_SIZE =
+    (1U << 16) * LIBCUCKOO_DEFAULT_SLOT_PER_BUCKET;
+
+//! On a scale of 0 to 16, the memory granularity of the locks array. 0 is the
+//! least granular, meaning the array is a contiguous array and thus offers the
+//! best performance but the greatest memory overhead. 16 is the most granular,
+//! offering the least memory overhead but worse performance.
+constexpr size_t LIBCUCKOO_LOCK_ARRAY_GRANULARITY = 0;
+
+//! The default minimum load factor that the table allows for automatic
+//! expansion. It must be a number between 0.0 and 1.0. The table will throw
+//! libcuckoo_load_factor_too_low if the load factor falls below this value
+//! during an automatic expansion.
+constexpr double LIBCUCKOO_DEFAULT_MINIMUM_LOAD_FACTOR = 0.001;
+
+//! An alias for the value that sets no limit on the maximum hashpower. If this
+//! value is set as the maximum hashpower limit, there will be no limit. Since 0
+//! is the only hashpower that can never occur, it should stay at 0. This is
+//! also the default initial value for the maximum hashpower in a table.
+constexpr size_t LIBCUCKOO_NO_MAXIMUM_HASHPOWER = 0;
+
+//! set LIBCUCKOO_DEBUG to 1 to enable debug output
+#define LIBCUCKOO_DEBUG 0
+
+#endif // _CUCKOOHASH_CONFIG_HH
diff --git a/ext/include/cuckoo/cuckoohash_map.hh b/ext/include/cuckoo/cuckoohash_map.hh
new file mode 100644
index 0000000..08448e8
--- /dev/null
+++ b/ext/include/cuckoo/cuckoohash_map.hh
@@ -0,0 +1,2537 @@
+/** \file */
+
+#ifndef _CUCKOOHASH_MAP_HH
+#define _CUCKOOHASH_MAP_HH
+
+#include <algorithm>
+#include <array>
+#include <atomic>
+#include <bitset>
+#include <cassert>
+#include <cstdint>
+#include <cstdlib>
+#include <functional>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <mutex>
+#include <stdexcept>
+#include <thread>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "cuckoohash_config.hh"
+#include "cuckoohash_util.hh"
+#include "libcuckoo_lazy_array.hh"
+
+/**
+ * A concurrent hash table
+ *
+ * @tparam Key type of keys in the table
+ * @tparam T type of values in the table
+ * @tparam Pred type of equality comparison functor
+ * @tparam Alloc type of key-value pair allocator
+ * @tparam SLOT_PER_BUCKET number of slots for each bucket in the table
+ */
+template < class Key,
+           class T,
+           class Hash = std::hash<Key>,
+           class Pred = std::equal_to<Key>,
+           class Alloc = std::allocator<std::pair<const Key, T>>,
+           std::size_t SLOT_PER_BUCKET = LIBCUCKOO_DEFAULT_SLOT_PER_BUCKET
+           >
+class cuckoohash_map {
+public:
+    /** @name Type Declarations */
+    /**@{*/
+
+    using key_type = Key;
+    using mapped_type = T;
+    using value_type = std::pair<const Key, T>;
+    using size_type = std::size_t;
+    using difference_type = std::ptrdiff_t;
+    using hasher = Hash;
+    using key_equal = Pred;
+    using allocator_type = Alloc;
+
+private:
+    using allocator_traits_ = std::allocator_traits<allocator_type>;
+
+public:
+    using reference = value_type&;
+    using const_reference = const value_type&;
+    using pointer = typename allocator_traits_::pointer;
+    using const_pointer = typename allocator_traits_::const_pointer;
+    class locked_table;
+
+    /**@}*/
+
+    /** @name Table Parameters */
+    /**@{*/
+
+    /**
+     * The number of slots per hash bucket
+     */
+    static constexpr size_type slot_per_bucket() {
+        return SLOT_PER_BUCKET;
+    }
+
+    /**@}*/
+
+    /** @name Constructors and Destructors */
+    /**@{*/
+
+    /**
+     * Creates a new cuckohash_map instance
+     *
+     * @param n the number of elements to reserve space for initially
+     * @param hf hash function instance to use
+     * @param eql equality function instance to use
+     * @param alloc allocator instance to use
+     */
+    cuckoohash_map(size_type n = LIBCUCKOO_DEFAULT_SIZE,
+                   const hasher& hf = hasher(),
+                   const key_equal& eql = key_equal(),
+                   const allocator_type& alloc = allocator_type())
+        : hashpower_(reserve_calc(n)),
+          hash_fn_(hf),
+          eq_fn_(eql),
+          allocator_(alloc),
+          buckets_(hashsize(hashpower()), alloc),
+          locks_(hashsize(hashpower()), alloc),
+          expansion_lock_(),
+          minimum_load_factor_(LIBCUCKOO_DEFAULT_MINIMUM_LOAD_FACTOR),
+          maximum_hashpower_(LIBCUCKOO_NO_MAXIMUM_HASHPOWER) {}
+
+    /**
+     * Destroys the table. The destructors of all elements stored in the table
+     * are destroyed, and then the table storage is deallocated.
+     */
+    ~cuckoohash_map() {
+        cuckoo_clear();
+    }
+
+    /**@}*/
+
+    /** @name Table Details
+     *
+     * Methods for getting information about the table. Methods that query
+     * changing properties of the table are not synchronized with concurrent
+     * operations, and may return out-of-date information if the table is being
+     * concurrently modified.
+     *
+     */
+    /**@{*/
+
+    /**
+     * Returns the function that hashes the keys
+     *
+     * @return the hash function
+     */
+    hasher hash_function() const {
+        return hash_fn_;
+    }
+
+    /**
+     * Returns the function that compares keys for equality
+     *
+     * @return the key comparison function
+     */
+    key_equal key_eq() const {
+        return eq_fn_;
+    }
+
+    /**
+     * Returns the allocator associated with the container
+     *
+     * @return the associated allocator
+     */
+    allocator_type get_allocator() const {
+        return allocator_;
+    }
+
+    /**
+     * Returns the hashpower of the table, which is log<SUB>2</SUB>(@ref
+     * bucket_count()).
+     *
+     * @return the hashpower
+     */
+    size_type hashpower() const {
+        return hashpower_.load(std::memory_order_acquire);
+    }
+
+    /**
+     * Returns the number of buckets in the table.
+     *
+     * @return the bucket count
+     */
+    size_type bucket_count() const {
+        return buckets_.size();
+    }
+
+    /**
+     * Returns whether the table is empty or not.
+     *
+     * @return true if the table is empty, false otherwise
+     */
+    bool empty() const {
+        for (size_type i = 0; i < locks_.size(); ++i) {
+            if (locks_[i].elem_counter() > 0) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    /**
+     * Returns the number of elements in the table.
+     *
+     * @return number of elements in the table
+     */
+    size_type size() const {
+        size_type s = 0;
+        for (size_type i = 0; i < locks_.size(); ++i) {
+            s += locks_[i].elem_counter();
+        }
+        return s;
+    }
+
+    /** Returns the current capacity of the table, that is, @ref bucket_count()
+     * × @ref slot_per_bucket().
+     *
+     * @return capacity of table
+     */
+    size_type capacity() const {
+        return bucket_count() * slot_per_bucket();
+    }
+
+    /**
+     * Returns the percentage the table is filled, that is, @ref size() ÷
+     * @ref capacity().
+     *
+     * @return load factor of the table
+     */
+    double load_factor() const {
+        return static_cast<double>(size()) / static_cast<double>(capacity());
+    }
+
+    /**
+     * Sets the minimum load factor allowed for automatic expansions. If an
+     * expansion is needed when the load factor of the table is lower than this
+     * threshold, @ref libcuckoo_load_factor_too_low is thrown. It will not be
+     * thrown for an explicitly-triggered expansion.
+     *
+     * @param mlf the load factor to set the minimum to
+     * @throw std::invalid_argument if the given load factor is less than 0.0
+     * or greater than 1.0
+     */
+    void minimum_load_factor(const double mlf) {
+        if (mlf < 0.0) {
+            throw std::invalid_argument(
+                "load factor " + std::to_string(mlf) + " cannot be "
+                "less than 0");
+        } else if (mlf > 1.0) {
+            throw std::invalid_argument(
+                "load factor " + std::to_string(mlf) + " cannot be "
+                "greater than 1");
+        }
+        minimum_load_factor_.store(mlf, std::memory_order_release);
+    }
+
+    /**
+     * Returns the minimum load factor of the table
+     *
+     * @return the minimum load factor
+     */
+    double minimum_load_factor() {
+        return minimum_load_factor_.load(std::memory_order_acquire);
+    }
+
+    /**
+     * Sets the maximum hashpower the table can be. If set to @ref
+     * LIBCUCKOO_NO_MAXIMUM_HASHPOWER, there will be no limit on the hashpower.
+     * Otherwise, the table will not be able to expand beyond the given
+     * hashpower, either by an explicit or an automatic expansion.
+     *
+     * @param mhp the hashpower to set the maximum to
+     * @throw std::invalid_argument if the current hashpower exceeds the limit
+     */
+    void maximum_hashpower(size_type mhp) {
+        if (mhp != LIBCUCKOO_NO_MAXIMUM_HASHPOWER && hashpower() > mhp) {
+            throw std::invalid_argument(
+                "maximum hashpower " + std::to_string(mhp) + " is less than "
+                "current hashpower");
+
+        }
+        maximum_hashpower_.store(mhp, std::memory_order_release);
+    }
+
+    /**
+     * Returns the maximum hashpower of the table
+     *
+     * @return the maximum hashpower
+     */
+    size_type maximum_hashpower() {
+        return maximum_hashpower_.load(std::memory_order_acquire);
+    }
+
+    /**@}*/
+
+    /** @name Table Operations
+     *
+     * These are operations that affect the data in the table. They are safe to
+     * call concurrently with each other.
+     *
+     */
+    /**@{*/
+
+    /**
+     * Searches the table for @p key, and invokes @p fn on the value. @p fn is
+     * not allowed to modify the contents of the value if found.
+     *
+     * @tparam K type of the key. This can be any type comparable with @c key_type
+     * @tparam F type of the functor. It should implement the method
+     * <tt>void operator()(const mapped_type&)</tt>.
+     * @param key the key to search for
+     * @param fn the functor to invoke if the element is found
+     * @return true if the key was found and functor invoked, false otherwise
+     */
+    template <typename K, typename F>
+    bool find_fn(const K& key, F fn) const {
+        const hash_value hv = hashed_key(key);
+        const auto b = snapshot_and_lock_two<locking_active>(hv);
+        const table_position pos = cuckoo_find(
+            key, hv.partial, b.first(), b.second());
+        if (pos.status == ok) {
+            fn(buckets_[pos.index].val(pos.slot));
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    /**
+     * Searches the table for @p key, and invokes @p fn on the value. @p fn is
+     * allow to modify the contents of the value if found.
+     *
+     * @tparam K type of the key. This can be any type comparable with @c key_type
+     * @tparam F type of the functor. It should implement the method
+     * <tt>void operator()(mapped_type&)</tt>.
+     * @param key the key to search for
+     * @param fn the functor to invoke if the element is found
+     * @return true if the key was found and functor invoked, false otherwise
+     */
+    template <typename K, typename F>
+    bool update_fn(const K& key, F fn) {
+        const hash_value hv = hashed_key(key);
+        const auto b = snapshot_and_lock_two<locking_active>(hv);
+        const table_position pos = cuckoo_find(
+            key, hv.partial, b.first(), b.second());
+        if (pos.status == ok) {
+            fn(buckets_[pos.index].val(pos.slot));
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    /**
+     * Searches for @p key in the table. If the key is not there, it is inserted
+     * with @p val. If the key is there, then @p fn is called on the value. The
+     * key will be immediately constructed as @c key_type(std::forward<K>(key)).
+     * If the insertion succeeds, this constructed key will be moved into the
+     * table and the value constructed from the @p val parameters. If the
+     * insertion fails, the constructed key will be destroyed, and the @p val
+     * parameters will remain valid. If there is no room left in the table, it
+     * will be automatically expanded. Expansion may throw exceptions.
+     *
+     * @tparam K type of the key
+     * @tparam F type of the functor. It should implement the method
+     * <tt>void operator()(mapped_type&)</tt>.
+     * @tparam Args list of types for the value constructor arguments
+     * @param key the key to insert into the table
+     * @param fn the functor to invoke if the element is found
+     * @param val a list of constructor arguments with which to create the value
+     * @return true if a new key was inserted, false if the key was already in
+     * the table
+     */
+    template <typename K, typename F, typename... Args>
+    bool upsert(K&& key, F fn, Args&&... val) {
+        K k(std::forward<K>(key));
+        hash_value hv = hashed_key(k);
+        auto b = snapshot_and_lock_two<locking_active>(hv);
+        table_position pos = cuckoo_insert_loop(hv, b, k);
+        if (pos.status == ok) {
+            add_to_bucket(pos.index, pos.slot, hv.partial, k,
+                          std::forward<Args>(val)...);
+        } else {
+            fn(buckets_[pos.index].val(pos.slot));
+        }
+        return pos.status == ok;
+    }
+
+    /**
+     * Searches for @p key in the table, and invokes @p fn on the value if the
+     * key is found. The functor can mutate the value, and should return @c true
+     * in order to erase the element, and @c false otherwise.
+     *
+     * @tparam K type of the key
+     * @tparam F type of the functor. It should implement the method
+     * <tt>bool operator()(mapped_type&)</tt>.
+     * @param key the key to possibly erase from the table
+     * @param fn the functor to invoke if the element is found
+     * @return true if @p key was found and @p fn invoked, false otherwise
+     */
+    template <typename K, typename F>
+    bool erase_fn(const K& key, F fn) {
+        const hash_value hv = hashed_key(key);
+        const auto b = snapshot_and_lock_two<locking_active>(hv);
+        const table_position pos = cuckoo_find(
+            key, hv.partial, b.first(), b.second());
+        if (pos.status == ok) {
+            if (fn(buckets_[pos.index].val(pos.slot))) {
+                del_from_bucket(buckets_[pos.index], pos.index, pos.slot);
+            }
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    /**
+     * Copies the value associated with @p key into @p val. Equivalent to
+     * calling @ref find_fn with a functor that copies the value into @p val. @c
+     * mapped_type must be @c CopyAssignable.
+     */
+    template <typename K>
+    bool find(const K& key, mapped_type& val) const {
+        return find_fn(key, [&val](const mapped_type& v) mutable {
+                val = v;
+            });
+    }
+
+    /** Searches the table for @p key, and returns the associated value it
+     * finds. @c mapped_type must be @c CopyConstructible.
+     *
+     * @tparam K type of the key
+     * @param key the key to search for
+     * @return the value associated with the given key
+     * @throw std::out_of_range if the key is not found
+     */
+    template <typename K>
+    mapped_type find(const K& key) const {
+        const hash_value hv = hashed_key(key);
+        const auto b = snapshot_and_lock_two<locking_active>(hv);
+        const table_position pos = cuckoo_find(
+            key, hv.partial, b.first(), b.second());
+        if (pos.status == ok) {
+            return buckets_[pos.index].val(pos.slot);
+        } else {
+            throw std::out_of_range("key not found in table");
+        }
+    }
+
+    /** Returns whether or not @p key is in the table. Equivalent to @ref
+     * find_fn with a functor that does nothing.
+     */
+    template <typename K>
+    bool contains(const K& key) const {
+        return find_fn(key, [](const mapped_type&) {});
+    }
+
+    /**
+     * Updates the value associated with @p key to @p val. Equivalent to calling
+     * @ref update_fn with a functor that copies @p val into the associated
+     * value. @c mapped_type must be @c MoveAssignable or @c CopyAssignable.
+     */
+    template <typename K, typename V>
+    bool update(const K& key, V&& val) {
+        return update_fn(key, [&val](mapped_type& v) {
+                v = std::forward<V>(val);
+            });
+    }
+
+    /**
+     * Inserts the key-value pair into the table. Equivalent to calling @ref
+     * upsert with a functor that does nothing.
+     */
+    template <typename K, typename... Args>
+    bool insert(K&& key, Args&&... val) {
+        return upsert(std::forward<K>(key), [](mapped_type&) {},
+                      std::forward<Args>(val)...);
+    }
+
+    /**
+     * Erases the key from the table. Equivalent to calling @ref erase_fn with a
+     * functor that just returns true.
+     */
+    template <typename K>
+    bool erase(const K& key) {
+        return erase_fn(key, [](mapped_type&) { return true; });
+    }
+
+    /**
+     * Resizes the table to the given hashpower. If this hashpower is not larger
+     * than the current hashpower, then it decreases the hashpower to the
+     * maximum of the specified value and the smallest hashpower that can hold
+     * all the elements currently in the table.
+     *
+     * @param n the hashpower to set for the table
+     * @return true if the table changed size, false otherwise
+     */
+    bool rehash(size_type n) {
+        return cuckoo_rehash<locking_active>(n);
+    }
+
+    /**
+     * Reserve enough space in the table for the given number of elements. If
+     * the table can already hold that many elements, the function will shrink
+     * the table to the smallest hashpower that can hold the maximum of the
+     * specified amount and the current table size.
+     *
+     * @param n the number of elements to reserve space for
+     * @return true if the size of the table changed, false otherwise
+     */
+    bool reserve(size_type n) {
+        return cuckoo_reserve<locking_active>(n);
+    }
+
+    /**
+     * Removes all elements in the table, calling their destructors.
+     */
+    void clear() {
+        auto unlocker = snapshot_and_lock_all<locking_active>();
+        cuckoo_clear();
+    }
+
+    /**
+     * Construct a @ref locked_table object that owns all the locks in the
+     * table.
+     *
+     * @return a \ref locked_table instance
+     */
+    locked_table lock_table() {
+        return locked_table(*this);
+    }
+
+    /**@}*/
+
+private:
+    // Hashing types and functions
+
+    // Type of the partial key
+    using partial_t = uint8_t;
+
+    // true if the key is small and simple, which means using partial keys for
+    // lookup would probably slow us down
+    static constexpr bool is_simple =
+        std::is_pod<key_type>::value && sizeof(key_type) <= 8;
+
+    // Contains a hash and partial for a given key. The partial key is used for
+    // partial-key cuckoohashing, and for finding the alternate bucket of that a
+    // key hashes to.
+    struct hash_value {
+        size_type hash;
+        partial_t partial;
+    };
+
+    template <typename K>
+    hash_value hashed_key(const K& key) const {
+        const size_type hash = hash_function()(key);
+        return { hash, partial_key(hash) };
+    }
+
+    template <typename K>
+    size_type hashed_key_only_hash(const K& key) const {
+        return hash_function()(key);
+    }
+
+    // hashsize returns the number of buckets corresponding to a given
+    // hashpower.
+    static inline size_type hashsize(const size_type hp) {
+        return size_type(1) << hp;
+    }
+
+    // hashmask returns the bitmask for the buckets array corresponding to a
+    // given hashpower.
+    static inline size_type hashmask(const size_type hp) {
+        return hashsize(hp) - 1;
+    }
+
+    // The partial key must only depend on the hash value. It cannot change with
+    // the hashpower, because, in order for `cuckoo_fast_double` to work
+    // properly, the alt_index must only grow by one bit at the top each time we
+    // expand the table.
+    static partial_t partial_key(const size_type hash) {
+        const uint64_t hash_64bit = hash;
+        const uint32_t hash_32bit = (
+            static_cast<uint32_t>(hash_64bit) ^
+            static_cast<uint32_t>(hash_64bit >> 32));
+        const uint16_t hash_16bit = (
+            static_cast<uint16_t>(hash_32bit) ^
+            static_cast<uint16_t>(hash_32bit >> 16));
+        const uint16_t hash_8bit = (
+            static_cast<uint8_t>(hash_16bit) ^
+            static_cast<uint8_t>(hash_16bit >> 8));
+        return hash_8bit;
+    }
+
+    // index_hash returns the first possible bucket that the given hashed key
+    // could be.
+    static inline size_type index_hash(const size_type hp, const size_type hv) {
+        return hv & hashmask(hp);
+    }
+
+    // alt_index returns the other possible bucket that the given hashed key
+    // could be. It takes the first possible bucket as a parameter. Note that
+    // this function will return the first possible bucket if index is the
+    // second possible bucket, so alt_index(ti, partial, alt_index(ti, partial,
+    // index_hash(ti, hv))) == index_hash(ti, hv).
+    static inline size_type alt_index(const size_type hp, const partial_t partial,
+                                   const size_type index) {
+        // ensure tag is nonzero for the multiply. 0xc6a4a7935bd1e995 is the
+        // hash constant from 64-bit MurmurHash2
+        const size_type nonzero_tag = static_cast<size_type>(partial) + 1;
+        return (index ^ (nonzero_tag * 0xc6a4a7935bd1e995)) & hashmask(hp);
+    }
+
+    // Locking types and functions
+
+    using locking_active = std::integral_constant<bool, true>;
+    using locking_inactive = std::integral_constant<bool, false>;
+
+    // A fast, lightweight spinlock
+    LIBCUCKOO_SQUELCH_PADDING_WARNING
+    class LIBCUCKOO_ALIGNAS(64) spinlock {
+    public:
+        spinlock() noexcept : elem_counter_(0) {
+            lock_.clear();
+        }
+
+        void lock(locking_active) {
+            while (lock_.test_and_set(std::memory_order_acq_rel));
+        }
+
+        void lock(locking_inactive) {}
+
+        void unlock(locking_active) {
+            lock_.clear(std::memory_order_release);
+        }
+
+        void unlock(locking_inactive) {}
+
+        bool try_lock(locking_active) {
+            return !lock_.test_and_set(std::memory_order_acq_rel);
+        }
+
+        bool try_lock(locking_inactive) {
+            return true;
+        }
+
+        size_type& elem_counter() {
+            return elem_counter_;
+        }
+
+    private:
+        std::atomic_flag lock_;
+        size_type elem_counter_;
+    };
+
+    // The type of the locks container
+    static_assert(LIBCUCKOO_LOCK_ARRAY_GRANULARITY >= 0 &&
+                  LIBCUCKOO_LOCK_ARRAY_GRANULARITY <= 16,
+                  "LIBCUCKOO_LOCK_ARRAY_GRANULARITY constant must be between "
+                  "0 and 16, inclusive");
+    using locks_t = libcuckoo_lazy_array<
+        16 - LIBCUCKOO_LOCK_ARRAY_GRANULARITY, LIBCUCKOO_LOCK_ARRAY_GRANULARITY,
+        spinlock,
+        typename allocator_traits_::template rebind_alloc<spinlock>
+        >;
+
+    // The type of the expansion lock
+    using expansion_lock_t = std::mutex;
+
+    // Classes for managing locked buckets. By storing and moving around sets of
+    // locked buckets in these classes, we can ensure that they are unlocked
+    // properly.
+
+    template <typename LOCK_T>
+    class OneBucket {
+    public:
+        OneBucket() {}
+        OneBucket(locks_t* locks, size_type i)
+            : locks_(locks, OneUnlocker{i}) {}
+
+    private:
+        struct OneUnlocker {
+            size_type i;
+            void operator()(locks_t* p) const {
+                (*p)[lock_ind(i)].unlock(LOCK_T());
+            }
+        };
+
+        std::unique_ptr<locks_t, OneUnlocker> locks_;
+    };
+
+    template <typename LOCK_T>
+    class TwoBuckets {
+    public:
+        TwoBuckets() {}
+        TwoBuckets(locks_t* locks, size_type i1, size_type i2)
+            : locks_(locks, TwoUnlocker{i1, i2}) {}
+
+        size_type first() const {
+            return locks_.get_deleter().i1;
+        }
+
+        size_type second() const {
+            return locks_.get_deleter().i2;
+        }
+
+        bool is_active() const {
+            return static_cast<bool>(locks_);
+        }
+
+        void unlock() {
+            locks_.reset(nullptr);
+        }
+
+    private:
+        struct TwoUnlocker {
+            size_type i1, i2;
+            void operator()(locks_t* p) const {
+                const size_type l1 = lock_ind(i1);
+                const size_type l2 = lock_ind(i2);
+                (*p)[l1].unlock(LOCK_T());
+                if (l1 != l2) {
+                    (*p)[l2].unlock(LOCK_T());
+                }
+            }
+        };
+
+        std::unique_ptr<locks_t, TwoUnlocker> locks_;
+    };
+
+    template <typename LOCK_T>
+    class AllBuckets {
+    public:
+        AllBuckets(locks_t* locks) : locks_(locks) {}
+
+        bool is_active() const {
+            return static_cast<bool>(locks_);
+        }
+
+        void unlock() {
+            locks_.reset(nullptr);
+        }
+
+        void release() {
+            (void)locks_.release();
+        }
+
+    private:
+        struct AllUnlocker {
+            void operator()(locks_t* p) const {
+                for (size_type i = 0; i < p->size(); ++i) {
+                    (*p)[i].unlock(LOCK_T());
+                }
+            }
+        };
+
+        std::unique_ptr<locks_t, AllUnlocker> locks_;
+    };
+
+    // This exception is thrown whenever we try to lock a bucket, but the
+    // hashpower is not what was expected
+    class hashpower_changed {};
+
+    // After taking a lock on the table for the given bucket, this function will
+    // check the hashpower to make sure it is the same as what it was before the
+    // lock was taken. If it isn't unlock the bucket and throw a
+    // hashpower_changed exception.
+    template <typename LOCK_T>
+    inline void check_hashpower(const size_type hp, const size_type lock) const {
+        if (hashpower() != hp) {
+            locks_[lock].unlock(LOCK_T());
+            LIBCUCKOO_DBG("%s", "hashpower changed\n");
+            throw hashpower_changed();
+        }
+    }
+
+    // locks the given bucket index.
+    //
+    // throws hashpower_changed if it changed after taking the lock.
+    template <typename LOCK_T>
+    inline OneBucket<LOCK_T> lock_one(const size_type hp, const size_type i) const {
+        const size_type l = lock_ind(i);
+        locks_[l].lock(LOCK_T());
+        check_hashpower<LOCK_T>(hp, l);
+        return OneBucket<LOCK_T>(&locks_, i);
+    }
+
+    // locks the two bucket indexes, always locking the earlier index first to
+    // avoid deadlock. If the two indexes are the same, it just locks one.
+    //
+    // throws hashpower_changed if it changed after taking the lock.
+    template <typename LOCK_T>
+    TwoBuckets<LOCK_T> lock_two(const size_type hp, const size_type i1,
+                        const size_type i2) const {
+        size_type l1 = lock_ind(i1);
+        size_type l2 = lock_ind(i2);
+        if (l2 < l1) {
+            std::swap(l1, l2);
+        }
+        locks_[l1].lock(LOCK_T());
+        check_hashpower<LOCK_T>(hp, l1);
+        if (l2 != l1) {
+            locks_[l2].lock(LOCK_T());
+        }
+        return TwoBuckets<LOCK_T>(&locks_, i1, i2);
+    }
+
+    // lock_two_one locks the three bucket indexes in numerical order, returning
+    // the containers as a two (i1 and i2) and a one (i3). The one will not be
+    // active if i3 shares a lock index with i1 or i2.
+    //
+    // throws hashpower_changed if it changed after taking the lock.
+    template <typename LOCK_T>
+    std::pair<TwoBuckets<LOCK_T>, OneBucket<LOCK_T>>
+    lock_three(const size_type hp, const size_type i1,
+               const size_type i2, const size_type i3) const {
+        std::array<size_type, 3> l{{lock_ind(i1), lock_ind(i2), lock_ind(i3)}};
+	// Lock in order.
+	if (l[2] < l[1]) std::swap(l[2], l[1]);
+	if (l[2] < l[0]) std::swap(l[2], l[0]);
+	if (l[1] < l[0]) std::swap(l[1], l[0]);
+        locks_[l[0]].lock(LOCK_T());
+        check_hashpower<LOCK_T>(hp, l[0]);
+        if (l[1] != l[0]) {
+            locks_[l[1]].lock(LOCK_T());
+        }
+        if (l[2] != l[1]) {
+            locks_[l[2]].lock(LOCK_T());
+        }
+        return std::make_pair(
+            TwoBuckets<LOCK_T>(&locks_, i1, i2),
+            OneBucket<LOCK_T>(
+                (lock_ind(i3) == lock_ind(i1) || lock_ind(i3) == lock_ind(i2)) ?
+                nullptr : &locks_, i3)
+            );
+    }
+
+    // snapshot_and_lock_two loads locks the buckets associated with the given
+    // hash value, making sure the hashpower doesn't change before the locks are
+    // taken. Thus it ensures that the buckets and locks corresponding to the
+    // hash value will stay correct as long as the locks are held. It returns
+    // the bucket indices associated with the hash value and the current
+    // hashpower.
+    template <typename LOCK_T>
+    TwoBuckets<LOCK_T> snapshot_and_lock_two(const hash_value& hv) const {
+        while (true) {
+            // Store the current hashpower we're using to compute the buckets
+            const size_type hp = hashpower();
+            const size_type i1 = index_hash(hp, hv.hash);
+            const size_type i2 = alt_index(hp, hv.partial, i1);
+            try {
+                return lock_two<LOCK_T>(hp, i1, i2);
+            } catch (hashpower_changed&) {
+                // The hashpower changed while taking the locks. Try again.
+                continue;
+            }
+        }
+    }
+
+    // snapshot_and_lock_all takes all the locks, and returns a deleter object
+    // that releases the locks upon destruction. Note that after taking all the
+    // locks, it is okay to change the buckets_ vector and the hashpower_, since
+    // no other threads should be accessing the buckets.
+    template <typename LOCK_T>
+    AllBuckets<LOCK_T> snapshot_and_lock_all() const {
+        for (size_type i = 0; i < locks_.size(); ++i) {
+            locks_[i].lock(LOCK_T());
+        }
+        return AllBuckets<LOCK_T>(&locks_);
+    }
+
+    // lock_ind converts an index into buckets to an index into locks.
+    static inline size_type lock_ind(const size_type bucket_ind) {
+        return bucket_ind & (locks_t::max_size() - 1);
+    }
+
+    // Data storage types and functions
+
+    // Value type without const Key, used for storage
+    using storage_value_type = std::pair<key_type, mapped_type>;
+
+    // The Bucket type holds slot_per_bucket() partial keys, key-value pairs,
+    // and a occupied bitset, which indicates whether the slot at the given bit
+    // index is in the table or not. It uses aligned_storage arrays to store the
+    // keys and values to allow constructing and destroying key-value pairs in
+    // place. Internally, the values are stored without the const qualifier in
+    // the key, to enable modifying bucket memory.
+    class Bucket {
+    public:
+        Bucket() noexcept {}
+        // The destructor does nothing to the key-value pairs, since we'd need
+        // an allocator to properly destroy the elements.
+        ~Bucket() noexcept {}
+
+        // No move or copy constructors, since we'd need an
+        // instance of the allocator to do any constructions or destructions
+        Bucket(const Bucket&) = delete;
+        Bucket(Bucket&&) = delete;
+        Bucket& operator=(const Bucket&) = delete;
+        Bucket& operator=(Bucket&&) = delete;
+
+        partial_t partial(size_type ind) const {
+            return partials_[ind];
+        }
+
+        const value_type& kvpair(size_type ind) const {
+            return *static_cast<const value_type*>(
+                static_cast<const void*>(std::addressof(kvpairs_[ind])));
+        }
+
+        value_type& kvpair(size_type ind) {
+            return *static_cast<value_type*>(
+                static_cast<void*>(std::addressof(kvpairs_[ind])));
+        }
+
+        storage_value_type& storage_kvpair(size_type ind) {
+            return *static_cast<storage_value_type*>(
+                static_cast<void*>(std::addressof(kvpairs_[ind])));
+        }
+
+        bool occupied(size_type ind) const {
+            return occupied_[ind];
+        }
+
+        const key_type& key(size_type ind) const {
+            return kvpair(ind).first;
+        }
+
+        const mapped_type& val(size_type ind) const {
+            return kvpair(ind).second;
+        }
+
+        mapped_type& val(size_type ind) {
+            return kvpair(ind).second;
+        }
+
+        template <typename K, typename... Args>
+        void setKV(allocator_type& allocator, size_type ind, partial_t p,
+                   K& k, Args&&... args) {
+            partials_[ind] = p;
+            occupied_[ind] = true;
+            allocator_traits_::construct(
+                allocator, &storage_kvpair(ind), std::piecewise_construct,
+                std::forward_as_tuple(std::move(k)),
+                std::forward_as_tuple(std::forward<Args>(args)...));
+        }
+
+        void eraseKV(allocator_type& allocator, size_type ind) {
+            occupied_[ind] = false;
+            allocator_traits_::destroy(
+                allocator, std::addressof(storage_kvpair(ind)));
+        }
+
+        void clear(allocator_type& allocator) {
+            for (size_type i = 0; i < slot_per_bucket(); ++i) {
+                if (occupied(i)) {
+                    eraseKV(allocator, i);
+                }
+            }
+        }
+
+        // Moves the item in b1[slot1] into b2[slot2] without copying
+        static void move_to_bucket(allocator_type& allocator,
+                                   Bucket& b1, size_type slot1,
+                                   Bucket& b2, size_type slot2) {
+            assert(b1.occupied(slot1));
+            assert(!b2.occupied(slot2));
+            storage_value_type& tomove = b1.storage_kvpair(slot1);
+            b2.setKV(allocator, slot2, b1.partial(slot1),
+                     tomove.first, std::move(tomove.second));
+            b1.eraseKV(allocator, slot1);
+        }
+
+        // Moves the contents of b1 to b2
+        static void move_bucket(allocator_type& allocator, Bucket& b1,
+                                Bucket& b2) {
+            for (size_type i = 0; i < slot_per_bucket(); ++i) {
+                if (b1.occupied(i)) {
+                    move_to_bucket(allocator, b1, i, b2, i);
+                }
+            }
+        }
+
+    private:
+        std::array<partial_t, slot_per_bucket()> partials_;
+        std::bitset<slot_per_bucket()> occupied_;
+        std::array<typename std::aligned_storage<
+                       sizeof(storage_value_type),
+                       alignof(storage_value_type)>::type,
+                   slot_per_bucket()> kvpairs_;
+    };
+
+    class BucketContainer {
+        using traits_ = typename allocator_traits_::
+            template rebind_traits<Bucket>;
+    public:
+        BucketContainer(size_type n, typename traits_::allocator_type alloc)
+            : buckets_(traits_::allocate(allocator_, n)),
+              allocator_(alloc), size_(n) {
+            // The Bucket default constructor is nothrow, so we don't have to
+            // worry about dealing with exceptions when constructing all the
+            // elements.
+            static_assert(
+                std::is_nothrow_constructible<Bucket>::value,
+                "BucketContainer requires Bucket to be nothrow constructible");
+            for (size_type i = 0; i < size_; ++i) {
+                traits_::construct(allocator_, &buckets_[i]);
+            }
+        }
+
+        BucketContainer(const BucketContainer&) = delete;
+        BucketContainer(BucketContainer&&) = delete;
+        BucketContainer& operator=(const BucketContainer&) = delete;
+        BucketContainer& operator=(BucketContainer&&) = delete;
+
+        ~BucketContainer() noexcept {
+            static_assert(
+                std::is_nothrow_destructible<Bucket>::value,
+                "BucketContainer requires Bucket to be nothrow destructible");
+            for (size_type i = 0; i < size_; ++i) {
+                traits_::destroy(allocator_, &buckets_[i]);
+            }
+            traits_::deallocate(allocator_, buckets_, size());
+        }
+
+        size_type size() const {
+            return size_;
+        }
+
+        void swap(BucketContainer& other) noexcept {
+            std::swap(buckets_, other.buckets_);
+            // If propagate_container_on_swap is false, we do nothing if the
+            // allocators are equal. If they're not equal, behavior is
+            // undefined, so we can still do nothing.
+            if (traits_::propagate_on_container_swap::value) {
+                std::swap(allocator_, other.allocator_);
+            }
+            std::swap(size_, other.size_);
+        }
+
+        Bucket& operator[](size_type i) {
+            return buckets_[i];
+        }
+
+        const Bucket& operator[](size_type i) const {
+            return buckets_[i];
+        }
+
+    private:
+        typename traits_::pointer buckets_;
+        typename allocator_traits_::template rebind_alloc<Bucket> allocator_;
+        size_type size_;
+    };
+
+    // The type of the buckets container
+    using buckets_t = BucketContainer;
+
+    // Status codes for internal functions
+
+    enum cuckoo_status {
+        ok,
+        failure,
+        failure_key_not_found,
+        failure_key_duplicated,
+        failure_table_full,
+        failure_under_expansion,
+    };
+
+
+    // A composite type for functions that need to return a table position, and
+    // a status code.
+    struct table_position {
+        size_type index;
+        size_type slot;
+        cuckoo_status status;
+    };
+
+    // Searching types and functions
+
+    // cuckoo_find searches the table for the given key, returning the position
+    // of the element found, or a failure status code if the key wasn't found.
+    // It expects the locks to be taken and released outside the function.
+    template <typename K>
+    table_position cuckoo_find(const K &key, const partial_t partial,
+                               const size_type i1, const size_type i2) const {
+        int slot = try_read_from_bucket(buckets_[i1], partial, key);
+        if (slot != -1) {
+            return table_position{i1, static_cast<size_type>(slot), ok};
+        }
+        slot = try_read_from_bucket(buckets_[i2], partial, key);
+        if (slot != -1) {
+            return table_position{i2, static_cast<size_type>(slot), ok};
+        }
+        return table_position{0, 0, failure_key_not_found};
+    }
+
+    // try_read_from_bucket will search the bucket for the given key and return
+    // the index of the slot if found, or -1 if not found.
+    template <typename K>
+    int try_read_from_bucket(const Bucket& b, const partial_t partial,
+                             const K &key) const {
+        // Silence a warning from MSVC about partial being unused if is_simple.
+        (void)partial;
+        for (size_type i = 0; i < slot_per_bucket(); ++i) {
+            if (!b.occupied(i) || (!is_simple && partial != b.partial(i))) {
+                continue;
+            } else if (key_eq()(b.key(i), key)) {
+                return i;
+            }
+        }
+        return -1;
+    }
+
+    // Insertion types and function
+
+    /**
+     * Runs cuckoo_insert in a loop until it succeeds in insert and upsert, so
+     * we pulled out the loop to avoid duplicating logic.
+     *
+     * @param hv the hash value of the key
+     * @param b bucket locks
+     * @param key the key to insert
+     * @return table_position of the location to insert the new element, or the
+     * site of the duplicate element with a status code if there was a duplicate.
+     * In either case, the locks will still be held after the function ends.
+     * @throw libcuckoo_load_factor_too_low if expansion is necessary, but the
+     * load factor of the table is below the threshold
+     */
+    template <typename K, typename LOCK_T>
+    table_position cuckoo_insert_loop(hash_value hv, TwoBuckets<LOCK_T>& b,
+                                      K& key) {
+        table_position pos;
+        while (true) {
+            assert(b.is_active());
+            const size_type hp = hashpower();
+            pos = cuckoo_insert(hv, b, key);
+            switch (pos.status) {
+            case ok:
+            case failure_key_duplicated:
+                return pos;
+            case failure_table_full:
+                // Expand the table and try again, re-grabbing the locks
+                cuckoo_fast_double<LOCK_T, automatic_resize>(hp);
+            case failure_under_expansion:
+                b = snapshot_and_lock_two<LOCK_T>(hv);
+                break;
+            default:
+                assert(false);
+            }
+        }
+    }
+
+    // cuckoo_insert tries to find an empty slot in either of the buckets to
+    // insert the given key into, performing cuckoo hashing if necessary. It
+    // expects the locks to be taken outside the function. Before inserting, it
+    // checks that the key isn't already in the table. cuckoo hashing presents
+    // multiple concurrency issues, which are explained in the function. The
+    // following return states are possible:
+    //
+    // ok -- Found an empty slot, locks will be held on both buckets after the
+    // function ends, and the position of the empty slot is returned
+    //
+    // failure_key_duplicated -- Found a duplicate key, locks will be held, and
+    // the position of the duplicate key will be returned
+    //
+    // failure_under_expansion -- Failed due to a concurrent expansion
+    // operation. Locks are released. No meaningful position is returned.
+    //
+    // failure_table_full -- Failed to find an empty slot for the table. Locks
+    // are released. No meaningful position is returned.
+    template <typename K, typename LOCK_T>
+    table_position cuckoo_insert(const hash_value hv, TwoBuckets<LOCK_T>& b,
+                                 K& key) {
+        int res1, res2;
+        Bucket& b1 = buckets_[b.first()];
+        if (!try_find_insert_bucket(b1, res1, hv.partial, key)) {
+            return table_position{b.first(), static_cast<size_type>(res1),
+                    failure_key_duplicated};
+        }
+        Bucket& b2 = buckets_[b.second()];
+        if (!try_find_insert_bucket(b2, res2, hv.partial, key)) {
+            return table_position{b.second(), static_cast<size_type>(res2),
+                    failure_key_duplicated};
+        }
+        if (res1 != -1) {
+            return table_position{b.first(), static_cast<size_type>(res1), ok};
+        }
+        if (res2 != -1) {
+            return table_position{b.second(), static_cast<size_type>(res2), ok};
+        }
+
+        // We are unlucky, so let's perform cuckoo hashing.
+        size_type insert_bucket = 0;
+        size_type insert_slot = 0;
+        cuckoo_status st = run_cuckoo<LOCK_T>(b, insert_bucket, insert_slot);
+        if (st == failure_under_expansion) {
+            // The run_cuckoo operation operated on an old version of the table,
+            // so we have to try again. We signal to the calling insert method
+            // to try again by returning failure_under_expansion.
+            return table_position{0, 0, failure_under_expansion};
+        } else if (st == ok) {
+            assert(!locks_[lock_ind(b.first())].try_lock(LOCK_T()));
+            assert(!locks_[lock_ind(b.second())].try_lock(LOCK_T()));
+            assert(!buckets_[insert_bucket].occupied(insert_slot));
+            assert(insert_bucket == index_hash(hashpower(), hv.hash) ||
+                   insert_bucket == alt_index(
+                       hashpower(), hv.partial,
+                       index_hash(hashpower(), hv.hash)));
+            // Since we unlocked the buckets during run_cuckoo, another insert
+            // could have inserted the same key into either b.first() or
+            // b.second(), so we check for that before doing the insert.
+            table_position pos = cuckoo_find(
+                key, hv.partial, b.first(), b.second());
+            if (pos.status == ok) {
+                pos.status = failure_key_duplicated;
+                return pos;
+            }
+            return table_position{insert_bucket, insert_slot, ok};
+        }
+        assert(st == failure);
+        LIBCUCKOO_DBG("hash table is full (hashpower = %zu, hash_items = %zu,"
+                      "load factor = %.2f), need to increase hashpower\n",
+                      hashpower(), size(), load_factor());
+        return table_position{0, 0, failure_table_full};
+    }
+
+    // add_to_bucket will insert the given key-value pair into the slot. The key
+    // and value will be move-constructed into the table, so they are not valid
+    // for use afterwards.
+    template <typename K, typename... Args>
+    void add_to_bucket(const size_type bucket_ind, const size_type slot,
+                       const partial_t partial, K& key, Args&&... val) {
+        Bucket& b = buckets_[bucket_ind];
+        assert(!b.occupied(slot));
+        b.setKV(allocator_, slot, partial,
+                key, std::forward<Args>(val)...);
+        ++locks_[lock_ind(bucket_ind)].elem_counter();
+    }
+
+    // try_find_insert_bucket will search the bucket for the given key, and for
+    // an empty slot. If the key is found, we store the slot of the key in
+    // `slot` and return false. If we find an empty slot, we store its position
+    // in `slot` and return true. If no duplicate key is found and no empty slot
+    // is found, we store -1 in `slot` and return true.
+    template <typename K>
+    bool try_find_insert_bucket(const Bucket& b, int& slot,
+                                const partial_t partial, const K &key) const {
+        // Silence a warning from MSVC about partial being unused if is_simple.
+        (void)partial;
+        slot = -1;
+        for (size_type i = 0; i < slot_per_bucket(); ++i) {
+            if (b.occupied(i)) {
+                if (!is_simple && partial != b.partial(i)) {
+                    continue;
+                }
+                if (key_eq()(b.key(i), key)) {
+                    slot = i;
+                    return false;
+                }
+            } else {
+                slot = i;
+            }
+        }
+        return true;
+    }
+
+    // CuckooRecord holds one position in a cuckoo path. Since cuckoopath
+    // elements only define a sequence of alternate hashings for different hash
+    // values, we only need to keep track of the hash values being moved, rather
+    // than the keys themselves.
+    typedef struct {
+        size_type bucket;
+        size_type slot;
+        hash_value hv;
+    } CuckooRecord;
+
+    // The maximum number of items in a cuckoo BFS path.
+    static constexpr uint8_t MAX_BFS_PATH_LEN = 5;
+
+    // An array of CuckooRecords
+    using CuckooRecords = std::array<CuckooRecord, MAX_BFS_PATH_LEN>;
+
+    // run_cuckoo performs cuckoo hashing on the table in an attempt to free up
+    // a slot on either of the insert buckets, which are assumed to be locked
+    // before the start. On success, the bucket and slot that was freed up is
+    // stored in insert_bucket and insert_slot. In order to perform the search
+    // and the swaps, it has to release the locks, which can lead to certain
+    // concurrency issues, the details of which are explained in the function.
+    // If run_cuckoo returns ok (success), then `b` will be active, otherwise it
+    // will not.
+    template <typename LOCK_T>
+    cuckoo_status run_cuckoo(TwoBuckets<LOCK_T>& b, size_type &insert_bucket,
+                             size_type &insert_slot) {
+        // We must unlock the buckets here, so that cuckoopath_search and
+        // cuckoopath_move can lock buckets as desired without deadlock.
+        // cuckoopath_move has to move something out of one of the original
+        // buckets as its last operation, and it will lock both buckets and
+        // leave them locked after finishing. This way, we know that if
+        // cuckoopath_move succeeds, then the buckets needed for insertion are
+        // still locked. If cuckoopath_move fails, the buckets are unlocked and
+        // we try again. This unlocking does present two problems. The first is
+        // that another insert on the same key runs and, finding that the key
+        // isn't in the table, inserts the key into the table. Then we insert
+        // the key into the table, causing a duplication. To check for this, we
+        // search the buckets for the key we are trying to insert before doing
+        // so (this is done in cuckoo_insert, and requires that both buckets are
+        // locked). Another problem is that an expansion runs and changes the
+        // hashpower, meaning the buckets may not be valid anymore. In this
+        // case, the cuckoopath functions will have thrown a hashpower_changed
+        // exception, which we catch and handle here.
+        size_type hp = hashpower();
+        b.unlock();
+        CuckooRecords cuckoo_path;
+        bool done = false;
+        try {
+            while (!done) {
+                const int depth = cuckoopath_search<LOCK_T>(
+                    hp, cuckoo_path, b.first(), b.second());
+                if (depth < 0) {
+                    break;
+                }
+
+                if (cuckoopath_move(hp, cuckoo_path, depth, b)) {
+                    insert_bucket = cuckoo_path[0].bucket;
+                    insert_slot = cuckoo_path[0].slot;
+                    assert(insert_bucket == b.first() || insert_bucket == b.second());
+                    assert(!locks_[lock_ind(b.first())].try_lock(LOCK_T()));
+                    assert(!locks_[lock_ind(b.second())].try_lock(LOCK_T()));
+                    assert(!buckets_[insert_bucket].occupied(insert_slot));
+                    done = true;
+                    break;
+                }
+            }
+        } catch (hashpower_changed&) {
+            // The hashpower changed while we were trying to cuckoo, which means
+            // we want to retry. b.first() and b.second() should not be locked
+            // in this case.
+            return failure_under_expansion;
+        }
+        return done ? ok : failure;
+    }
+
+    // cuckoopath_search finds a cuckoo path from one of the starting buckets to
+    // an empty slot in another bucket. It returns the depth of the discovered
+    // cuckoo path on success, and -1 on failure. Since it doesn't take locks on
+    // the buckets it searches, the data can change between this function and
+    // cuckoopath_move. Thus cuckoopath_move checks that the data matches the
+    // cuckoo path before changing it.
+    //
+    // throws hashpower_changed if it changed during the search.
+    template <typename LOCK_T>
+    int cuckoopath_search(const size_type hp,
+                          CuckooRecords& cuckoo_path,
+                          const size_type i1, const size_type i2) {
+        b_slot x = slot_search<LOCK_T>(hp, i1, i2);
+        if (x.depth == -1) {
+            return -1;
+        }
+        // Fill in the cuckoo path slots from the end to the beginning.
+        for (int i = x.depth; i >= 0; i--) {
+            cuckoo_path[i].slot = x.pathcode % slot_per_bucket();
+            x.pathcode /= slot_per_bucket();
+        }
+        // Fill in the cuckoo_path buckets and keys from the beginning to the
+        // end, using the final pathcode to figure out which bucket the path
+        // starts on. Since data could have been modified between slot_search
+        // and the computation of the cuckoo path, this could be an invalid
+        // cuckoo_path.
+        CuckooRecord& first = cuckoo_path[0];
+        if (x.pathcode == 0) {
+            first.bucket = i1;
+        } else {
+            assert(x.pathcode == 1);
+            first.bucket = i2;
+        }
+        {
+            const auto ob = lock_one<LOCK_T>(hp, first.bucket);
+            const Bucket& b = buckets_[first.bucket];
+            if (!b.occupied(first.slot)) {
+                // We can terminate here
+                return 0;
+            }
+            first.hv = hashed_key(b.key(first.slot));
+        }
+        for (int i = 1; i <= x.depth; ++i) {
+            CuckooRecord& curr = cuckoo_path[i];
+            const CuckooRecord& prev = cuckoo_path[i-1];
+            assert(prev.bucket == index_hash(hp, prev.hv.hash) ||
+                   prev.bucket == alt_index(hp, prev.hv.partial,
+                                            index_hash(hp, prev.hv.hash)));
+            // We get the bucket that this slot is on by computing the alternate
+            // index of the previous bucket
+            curr.bucket = alt_index(hp, prev.hv.partial, prev.bucket);
+            const auto ob = lock_one<LOCK_T>(hp, curr.bucket);
+            const Bucket& b = buckets_[curr.bucket];
+            if (!b.occupied(curr.slot)) {
+                // We can terminate here
+                return i;
+            }
+            curr.hv = hashed_key(b.key(curr.slot));
+        }
+        return x.depth;
+    }
+
+    // cuckoopath_move moves keys along the given cuckoo path in order to make
+    // an empty slot in one of the buckets in cuckoo_insert. Before the start of
+    // this function, the two insert-locked buckets were unlocked in run_cuckoo.
+    // At the end of the function, if the function returns true (success), then
+    // both insert-locked buckets remain locked. If the function is
+    // unsuccessful, then both insert-locked buckets will be unlocked.
+    //
+    // throws hashpower_changed if it changed during the move.
+    template <typename LOCK_T>
+    bool cuckoopath_move(const size_type hp, CuckooRecords& cuckoo_path,
+                         size_type depth, TwoBuckets<LOCK_T>& b) {
+        assert(!b.is_active());
+        if (depth == 0) {
+            // There is a chance that depth == 0, when try_add_to_bucket sees
+            // both buckets as full and cuckoopath_search finds one empty. In
+            // this case, we lock both buckets. If the slot that
+            // cuckoopath_search found empty isn't empty anymore, we unlock them
+            // and return false. Otherwise, the bucket is empty and insertable,
+            // so we hold the locks and return true.
+            const size_type bucket = cuckoo_path[0].bucket;
+            assert(bucket == b.first() || bucket == b.second());
+            b = lock_two<LOCK_T>(hp, b.first(), b.second());
+            if (!buckets_[bucket].occupied(cuckoo_path[0].slot)) {
+                return true;
+            } else {
+                b.unlock();
+                return false;
+            }
+        }
+
+        while (depth > 0) {
+            CuckooRecord& from = cuckoo_path[depth-1];
+            CuckooRecord& to   = cuckoo_path[depth];
+            const size_type fs = from.slot;
+            const size_type ts = to.slot;
+            TwoBuckets<LOCK_T> twob;
+            OneBucket<LOCK_T> extrab;
+            if (depth == 1) {
+                // Even though we are only swapping out of one of the original
+                // buckets, we have to lock both of them along with the slot we
+                // are swapping to, since at the end of this function, they both
+                // must be locked. We store tb inside the extrab container so it
+                // is unlocked at the end of the loop.
+                std::tie(twob, extrab) = lock_three<LOCK_T>(
+                    hp, b.first(), b.second(), to.bucket);
+            } else {
+                twob = lock_two<LOCK_T>(hp, from.bucket, to.bucket);
+            }
+
+            Bucket& fb = buckets_[from.bucket];
+            Bucket& tb = buckets_[to.bucket];
+
+            // We plan to kick out fs, but let's check if it is still there;
+            // there's a small chance we've gotten scooped by a later cuckoo. If
+            // that happened, just... try again. Also the slot we are filling in
+            // may have already been filled in by another thread, or the slot we
+            // are moving from may be empty, both of which invalidate the swap.
+            // We only need to check that the hash value is the same, because,
+            // even if the keys are different and have the same hash value, then
+            // the cuckoopath is still valid.
+            if (hashed_key_only_hash(fb.key(fs)) != from.hv.hash ||
+                tb.occupied(ts) || !fb.occupied(fs)) {
+                return false;
+            }
+
+            Bucket::move_to_bucket(allocator_, fb, fs, tb, ts);
+            if (depth == 1) {
+                // Hold onto the locks contained in twob
+                b = std::move(twob);
+            }
+            depth--;
+        }
+        return true;
+    }
+
+    // A constexpr version of pow that we can use for static_asserts
+    static constexpr size_type const_pow(size_type a, size_type b) {
+        return (b == 0) ? 1 : a * const_pow(a, b - 1);
+    }
+
+    // b_slot holds the information for a BFS path through the table.
+    #pragma pack(push, 1)
+    struct b_slot {
+        // The bucket of the last item in the path.
+        size_type bucket;
+        // a compressed representation of the slots for each of the buckets in
+        // the path. pathcode is sort of like a base-slot_per_bucket number, and
+        // we need to hold at most MAX_BFS_PATH_LEN slots. Thus we need the
+        // maximum pathcode to be at least slot_per_bucket()^(MAX_BFS_PATH_LEN).
+        size_type pathcode;
+        static_assert(const_pow(slot_per_bucket(), MAX_BFS_PATH_LEN) <
+                      std::numeric_limits<decltype(pathcode)>::max(),
+                      "pathcode may not be large enough to encode a cuckoo "
+                      "path");
+        // The 0-indexed position in the cuckoo path this slot occupies. It must
+        // be less than MAX_BFS_PATH_LEN, and also able to hold negative values.
+        int_fast8_t depth;
+        static_assert(MAX_BFS_PATH_LEN - 1 <=
+                      std::numeric_limits<decltype(depth)>::max(),
+                      "The depth type must able to hold a value of"
+                      " MAX_BFS_PATH_LEN - 1");
+        static_assert(-1 >= std::numeric_limits<decltype(depth)>::min(),
+                      "The depth type must be able to hold a value of -1");
+        b_slot() {}
+        b_slot(const size_type b, const size_type p, const decltype(depth) d)
+            : bucket(b), pathcode(p), depth(d) {
+            assert(d < MAX_BFS_PATH_LEN);
+        }
+    };
+    #pragma pack(pop)
+
+    // b_queue is the queue used to store b_slots for BFS cuckoo hashing.
+    #pragma pack(push, 1)
+    class b_queue {
+    public:
+        b_queue() noexcept : first_(0), last_(0) {}
+
+        void enqueue(b_slot x) {
+            assert(!full());
+            slots_[last_] = x;
+            last_ = increment(last_);
+        }
+
+        b_slot dequeue() {
+            assert(!empty());
+            b_slot& x = slots_[first_];
+            first_ = increment(first_);
+            return x;
+        }
+
+        bool empty() const {
+            return first_ == last_;
+        }
+
+        bool full() const {
+            return increment(last_) == first_;
+        }
+
+    private:
+        // The maximum size of the BFS queue. Note that unless it's less than
+        // slot_per_bucket()^MAX_BFS_PATH_LEN, it won't really mean anything.
+        static constexpr size_type MAX_CUCKOO_COUNT = 256;
+        static_assert((MAX_CUCKOO_COUNT & (MAX_CUCKOO_COUNT - 1)) == 0,
+                      "MAX_CUCKOO_COUNT should be a power of 2");
+        // A circular array of b_slots
+        b_slot slots_[MAX_CUCKOO_COUNT];
+        // The index of the head of the queue in the array
+        size_type first_;
+        // One past the index of the last_ item of the queue in the array.
+        size_type last_;
+
+        // returns the index in the queue after ind, wrapping around if
+        // necessary.
+        size_type increment(size_type ind) const {
+            return (ind + 1) & (MAX_CUCKOO_COUNT - 1);
+        }
+    };
+    #pragma pack(pop)
+
+    // slot_search searches for a cuckoo path using breadth-first search. It
+    // starts with the i1 and i2 buckets, and, until it finds a bucket with an
+    // empty slot, adds each slot of the bucket in the b_slot. If the queue runs
+    // out of space, it fails.
+    //
+    // throws hashpower_changed if it changed during the search
+    template <typename LOCK_T>
+    b_slot slot_search(const size_type hp, const size_type i1,
+                       const size_type i2) {
+        b_queue q;
+        // The initial pathcode informs cuckoopath_search which bucket the path
+        // starts on
+        q.enqueue(b_slot(i1, 0, 0));
+        q.enqueue(b_slot(i2, 1, 0));
+        while (!q.full() && !q.empty()) {
+            b_slot x = q.dequeue();
+            // Picks a (sort-of) random slot to start from
+            size_type starting_slot = x.pathcode % slot_per_bucket();
+            for (size_type i = 0; i < slot_per_bucket() && !q.full();
+                 ++i) {
+                size_type slot = (starting_slot + i) % slot_per_bucket();
+                auto ob = lock_one<LOCK_T>(hp, x.bucket);
+                Bucket& b = buckets_[x.bucket];
+                if (!b.occupied(slot)) {
+                    // We can terminate the search here
+                    x.pathcode = x.pathcode * slot_per_bucket() + slot;
+                    return x;
+                }
+
+                // If x has less than the maximum number of path components,
+                // create a new b_slot item, that represents the bucket we would
+                // have come from if we kicked out the item at this slot.
+                const partial_t partial = b.partial(slot);
+                if (x.depth < MAX_BFS_PATH_LEN - 1) {
+                    b_slot y(alt_index(hp, partial, x.bucket),
+                             x.pathcode * slot_per_bucket() + slot, x.depth+1);
+                    q.enqueue(y);
+                }
+            }
+        }
+        // We didn't find a short-enough cuckoo path, so the queue ran out of
+        // space. Return a failure value.
+        return b_slot(0, 0, -1);
+    }
+
+    // cuckoo_fast_double will double the size of the table by taking advantage
+    // of the properties of index_hash and alt_index. If the key's move
+    // constructor is not noexcept, we use cuckoo_expand_simple, since that
+    // provides a strong exception guarantee.
+    template <typename LOCK_T, typename AUTO_RESIZE>
+    cuckoo_status cuckoo_fast_double(size_type current_hp) {
+        if (!std::is_nothrow_move_constructible<storage_value_type>::value) {
+            LIBCUCKOO_DBG("%s", "cannot run cuckoo_fast_double because kv-pair "
+                          "is not nothrow move constructible");
+            return cuckoo_expand_simple<LOCK_T, AUTO_RESIZE>(current_hp + 1);
+        }
+        const size_type new_hp = current_hp + 1;
+        std::lock_guard<expansion_lock_t> l(expansion_lock_);
+        cuckoo_status st = check_resize_validity<AUTO_RESIZE>(current_hp, new_hp);
+        if (st != ok) {
+            return st;
+        }
+
+        locks_.resize(hashsize(new_hp));
+        auto unlocker = snapshot_and_lock_all<LOCK_T>();
+        // We can't just resize, since the Bucket is non-copyable and
+        // non-movable. Instead, we allocate a new array of buckets, and move
+        // the contents of each bucket manually.
+        {
+            buckets_t new_buckets(buckets_.size() * 2, get_allocator());
+            for (size_type i = 0; i < buckets_.size(); ++i) {
+                Bucket::move_bucket(allocator_, buckets_[i], new_buckets[i]);
+            }
+            buckets_.swap(new_buckets);
+        }
+        set_hashpower(new_hp);
+
+        // We gradually unlock the new table, by processing each of the buckets
+        // corresponding to each lock we took. For each slot in an old bucket,
+        // we either leave it in the old bucket, or move it to the corresponding
+        // new bucket. After we're done with the bucket, we release the lock on
+        // it and the new bucket, letting other threads using the new map
+        // gradually. We only unlock the locks being used by the old table,
+        // because unlocking new locks would enable operations on the table
+        // before we want them. We also re-evaluate the partial key stored at
+        // each slot, since it depends on the hashpower.
+        const size_type locks_to_move = std::min(
+            locks_.size(), hashsize(current_hp));
+        parallel_exec(0, locks_to_move,
+                      [this, current_hp, new_hp]
+                      (size_type start, size_type end, std::exception_ptr& eptr) {
+                          try {
+                              move_buckets<LOCK_T>(current_hp, new_hp, start, end);
+                          } catch (...) {
+                              eptr = std::current_exception();
+                          }
+                      });
+        parallel_exec(locks_to_move, locks_.size(),
+                      [this](size_type i, size_type end, std::exception_ptr&) {
+                          for (; i < end; ++i) {
+                              locks_[i].unlock(LOCK_T());
+                          }
+                      });
+        // Since we've unlocked the buckets ourselves, we don't need the
+        // unlocker to do it for us.
+        unlocker.release();
+        return ok;
+    }
+
+    template <typename LOCK_T>
+    void move_buckets(size_type current_hp, size_type new_hp,
+                      size_type start_lock_ind, size_type end_lock_ind) {
+        for (; start_lock_ind < end_lock_ind; ++start_lock_ind) {
+            for (size_type bucket_i = start_lock_ind;
+                 bucket_i < hashsize(current_hp);
+                 bucket_i += locks_t::max_size()) {
+                // By doubling the table size, the index_hash and alt_index of
+                // each key got one bit added to the top, at position
+                // current_hp, which means anything we have to move will either
+                // be at the same bucket position, or exactly
+                // hashsize(current_hp) later than the current bucket
+                Bucket& old_bucket = buckets_[bucket_i];
+                const size_type new_bucket_i = bucket_i + hashsize(current_hp);
+                Bucket& new_bucket = buckets_[new_bucket_i];
+                size_type new_bucket_slot = 0;
+
+                // Move each item from the old bucket that needs moving into the
+                // new bucket
+                for (size_type slot = 0; slot < slot_per_bucket(); ++slot) {
+                    if (!old_bucket.occupied(slot)) {
+                        continue;
+                    }
+                    const hash_value hv = hashed_key(old_bucket.key(slot));
+                    const size_type old_ihash = index_hash(current_hp, hv.hash);
+                    const size_type old_ahash = alt_index(
+                        current_hp, hv.partial, old_ihash);
+                    const size_type new_ihash = index_hash(new_hp, hv.hash);
+                    const size_type new_ahash = alt_index(
+                        new_hp, hv.partial, new_ihash);
+                    if ((bucket_i == old_ihash && new_ihash == new_bucket_i) ||
+                        (bucket_i == old_ahash && new_ahash == new_bucket_i)) {
+                        // We're moving the key from the old bucket to the new
+                        // one
+                        Bucket::move_to_bucket(
+                            allocator_,
+                            old_bucket, slot, new_bucket, new_bucket_slot++);
+                        // Also update the lock counts, in case we're moving to
+                        // a different lock.
+                        --locks_[lock_ind(bucket_i)].elem_counter();
+                        ++locks_[lock_ind(new_bucket_i)].elem_counter();
+                    } else {
+                        // Check that we don't want to move the new key
+                        assert(
+                            (bucket_i == old_ihash && new_ihash == old_ihash) ||
+                            (bucket_i == old_ahash && new_ahash == old_ahash));
+                    }
+                }
+            }
+            // Now we can unlock the lock, because all the buckets corresponding
+            // to it have been unlocked
+            locks_[start_lock_ind].unlock(LOCK_T());
+        }
+    }
+
+    // Checks whether the resize is okay to proceed. Returns a status code, or
+    // throws an exception, depending on the error type.
+    using automatic_resize = std::integral_constant<bool, true>;
+    using manual_resize = std::integral_constant<bool, false>;
+
+    template <typename AUTO_RESIZE>
+    cuckoo_status check_resize_validity(const size_type orig_hp,
+                                        const size_type new_hp) {
+        const size_type mhp = maximum_hashpower();
+        if (mhp != LIBCUCKOO_NO_MAXIMUM_HASHPOWER && new_hp > mhp) {
+            throw libcuckoo_maximum_hashpower_exceeded(new_hp);
+        }
+        if (AUTO_RESIZE::value && load_factor() < minimum_load_factor()) {
+            throw libcuckoo_load_factor_too_low(minimum_load_factor());
+        }
+        if (hashpower() != orig_hp) {
+            // Most likely another expansion ran before this one could grab the
+            // locks
+            LIBCUCKOO_DBG("%s", "another expansion is on-going\n");
+            return failure_under_expansion;
+        }
+        return ok;
+    }
+
+    // cuckoo_expand_simple will resize the table to at least the given
+    // new_hashpower. When we're shrinking the table, if the current table
+    // contains more elements than can be held by new_hashpower, the resulting
+    // hashpower will be greater than new_hashpower. It needs to take all the
+    // bucket locks, since no other operations can change the table during
+    // expansion. Throws libcuckoo_maximum_hashpower_exceeded if we're expanding
+    // beyond the maximum hashpower, and we have an actual limit.
+    template <typename LOCK_T, typename AUTO_RESIZE>
+    cuckoo_status cuckoo_expand_simple(size_type new_hp) {
+        const auto unlocker = snapshot_and_lock_all<LOCK_T>();
+        const size_type hp = hashpower();
+        cuckoo_status st = check_resize_validity<AUTO_RESIZE>(hp, new_hp);
+        if (st != ok) {
+            return st;
+        }
+        // Creates a new hash table with hashpower new_hp and adds all
+        // the elements from the old buckets.
+        cuckoohash_map new_map(
+            hashsize(new_hp) * slot_per_bucket(),
+            hash_function(),
+            key_eq(),
+            get_allocator());
+
+        parallel_exec(
+            0, hashsize(hp),
+            [this, &new_map]
+            (size_type i, size_type end, std::exception_ptr& eptr) {
+                try {
+                    for (; i < end; ++i) {
+                        for (size_type j = 0; j < slot_per_bucket(); ++j) {
+                            if (buckets_[i].occupied(j)) {
+                                storage_value_type& kvpair = (
+                                    buckets_[i].storage_kvpair(j));
+                                new_map.insert(kvpair.first,
+                                               std::move(kvpair.second));
+                            }
+                        }
+                    }
+                } catch (...) {
+                    eptr = std::current_exception();
+                }
+            });
+
+        // Swap the current buckets vector with new_map's and set the hashpower.
+        // This is okay, because we have all the locks, so nobody else should be
+        // reading from the buckets array. Then the old buckets array will be
+        // deleted when new_map is deleted. All the locks should be released by
+        // the unlocker as well.
+        buckets_.swap(new_map.buckets_);
+        set_hashpower(new_map.hashpower_);
+        return ok;
+    }
+
+    // Executes the function over the given range split over num_threads threads
+    template <typename F>
+    static void parallel_exec(size_type start, size_type end, F func) {
+        static const size_type num_threads = (
+            std::thread::hardware_concurrency() == 0 ?
+            1 : std::thread::hardware_concurrency());
+        size_type work_per_thread = (end - start) / num_threads;
+        std::vector<std::thread, typename allocator_traits_::
+        template rebind_alloc<std::thread> > threads(num_threads);
+        std::vector<std::exception_ptr, typename allocator_traits_::
+        template rebind_alloc<std::exception_ptr>> eptrs(num_threads, nullptr);
+        for (size_type i = 0; i < num_threads - 1; ++i) {
+            threads[i] = std::thread(func, start, start + work_per_thread,
+                                     std::ref(eptrs[i]));
+            start += work_per_thread;
+        }
+        threads.back() = std::thread(func, start, end, std::ref(eptrs.back()));
+        for (std::thread& t : threads) {
+            t.join();
+        }
+        for (std::exception_ptr& eptr : eptrs) {
+            if (eptr) {
+                std::rethrow_exception(eptr);
+            }
+        }
+    }
+
+    // Deletion functions
+
+    // Removes an item from a bucket, decrementing the associated counter as
+    // well.
+    void del_from_bucket(Bucket& b, const size_type bucket_ind,
+                         const size_type slot) {
+        b.eraseKV(allocator_, slot);
+        --locks_[lock_ind(bucket_ind)].elem_counter();
+    }
+
+    // Empties the table, calling the destructors of all the elements it removes
+    // from the table. It assumes the locks are taken as necessary.
+    cuckoo_status cuckoo_clear() {
+        for (size_type i = 0; i < buckets_.size(); ++i) {
+            buckets_[i].clear(allocator_);
+        }
+        for (size_type i = 0; i < locks_.size(); ++i) {
+            locks_[i].elem_counter() = 0;
+        }
+        return ok;
+    }
+
+    // Rehashing functions
+
+    template <typename LOCK_T>
+    bool cuckoo_rehash(size_type n) {
+        const size_type hp = hashpower();
+        if (n == hp) {
+            return false;
+        }
+        return cuckoo_expand_simple<LOCK_T, manual_resize>(n) == ok;
+    }
+
+    template <typename LOCK_T>
+    bool cuckoo_reserve(size_type n) {
+        const size_type hp = hashpower();
+        const size_type new_hp = reserve_calc(n);
+        if (new_hp == hp) {
+            return false;
+        }
+        return cuckoo_expand_simple<LOCK_T, manual_resize>(new_hp) == ok;
+    }
+
+    // Miscellaneous functions
+
+    void set_hashpower(size_type val) {
+        hashpower_.store(val, std::memory_order_release);
+    }
+
+    // reserve_calc takes in a parameter specifying a certain number of slots
+    // for a table and returns the smallest hashpower that will hold n elements.
+    static size_type reserve_calc(const size_type n) {
+        const size_type buckets = (n + slot_per_bucket() - 1) / slot_per_bucket();
+        size_type blog2;
+        for (blog2 = 1; (1UL << blog2) < buckets; ++blog2);
+        assert(n <= hashsize(blog2) * slot_per_bucket());
+        return blog2;
+    }
+
+    // This class is a friend for unit testing
+    friend class UnitTestInternalAccess;
+
+    // Member variables
+
+    // 2**hashpower is the number of buckets. This cannot be changed unless all
+    // the locks are taken on the table. Since it is still read and written by
+    // multiple threads not necessarily synchronized by a lock, we keep it
+    // atomic
+    std::atomic<size_type> hashpower_;
+
+    // The hash function
+    hasher hash_fn_;
+
+    // The equality function
+    key_equal eq_fn_;
+
+    // The allocator
+    allocator_type allocator_;
+
+    // vector of buckets. The size or memory location of the buckets cannot be
+    // changed unless al the locks are taken on the table. Thus, it is only safe
+    // to access the buckets_ vector when you have at least one lock held.
+    buckets_t buckets_;
+
+    // array of locks. marked mutable, so that const methods can take locks.
+    // Even though it's a vector, it should not ever change in size after the
+    // initial allocation.
+    mutable locks_t locks_;
+
+    // a lock to synchronize expansions
+    expansion_lock_t expansion_lock_;
+
+    // stores the minimum load factor allowed for automatic expansions. Whenever
+    // an automatic expansion is triggered (during an insertion where cuckoo
+    // hashing fails, for example), we check the load factor against this
+    // double, and throw an exception if it's lower than this value. It can be
+    // used to signal when the hash function is bad or the input adversarial.
+    std::atomic<double> minimum_load_factor_;
+
+    // stores the maximum hashpower allowed for any expansions. If set to
+    // NO_MAXIMUM_HASHPOWER, this limit will be disregarded.
+    std::atomic<size_type> maximum_hashpower_;
+
+public:
+    /**
+     * An ownership wrapper around a @ref cuckoohash_map table instance. When
+     * given a table instance, it takes all the locks on the table, blocking all
+     * outside operations on the table. Because the locked_table has unique
+     * ownership of the table, it can provide a set of operations on the table
+     * that aren't possible in a concurrent context.
+     *
+     * The locked_table interface is very similar to the STL unordered_map
+     * interface, and for functions whose signatures correspond to unordered_map
+     * methods, the behavior should be mostly the same.
+     */
+    class locked_table {
+    public:
+        /** @name Type Declarations */
+        /**@{*/
+
+        using key_type = cuckoohash_map::key_type;
+        using mapped_type = cuckoohash_map::mapped_type;
+        using value_type = cuckoohash_map::value_type;
+        using size_type = cuckoohash_map::size_type;
+        using difference_type = cuckoohash_map::difference_type;
+        using hasher = cuckoohash_map::hasher;
+        using key_equal = cuckoohash_map::key_equal;
+        using allocator_type = cuckoohash_map::allocator_type;
+        using reference = cuckoohash_map::reference;
+        using const_reference = cuckoohash_map::const_reference;
+        using pointer = cuckoohash_map::pointer;
+        using const_pointer = cuckoohash_map::const_pointer;
+
+        /**
+         * A constant iterator over a @ref locked_table, which allows read-only
+         * access to the elements of the table. It fulfills the
+         * BidirectionalIterator concept.
+         */
+        class const_iterator {
+        public:
+            using difference_type = locked_table::difference_type;
+            using value_type = locked_table::value_type;
+            using pointer = locked_table::const_pointer;
+            using reference = locked_table::const_reference;
+            using iterator_category = std::bidirectional_iterator_tag;
+
+            const_iterator() {}
+
+            // Return true if the iterators are from the same locked table and
+            // location, false otherwise.
+            bool operator==(const const_iterator& it) const {
+                return buckets_ == it.buckets_ &&
+                    index_ == it.index_ && slot_ == it.slot_;
+            }
+
+            bool operator!=(const const_iterator& it) const {
+                return !(operator==(it));
+            }
+
+            reference operator*() const {
+                return (*buckets_)[index_].kvpair(slot_);
+            }
+
+            pointer operator->() const {
+                return &(*buckets_)[index_].kvpair(slot_);
+            }
+
+            // Advance the iterator to the next item in the table, or to the end
+            // of the table. Returns the iterator at its new position.
+            const_iterator& operator++() {
+                // Move forward until we get to a slot that is occupied, or we
+                // get to the end
+                ++slot_;
+                for (; index_ < buckets_->size(); ++index_) {
+                    for (; slot_ < slot_per_bucket(); ++slot_) {
+                        if ((*buckets_)[index_].occupied(slot_)) {
+                            return *this;
+                        }
+                    }
+                    slot_ = 0;
+                }
+                assert(std::make_pair(index_, slot_) == end_pos(*buckets_));
+                return *this;
+            }
+
+            // Advance the iterator to the next item in the table, or to the end
+            // of the table. Returns the iterator at its old position.
+            const_iterator operator++(int) {
+                const_iterator old(*this);
+                ++(*this);
+                return old;
+            }
+
+            // Move the iterator back to the previous item in the table. Returns
+            // the iterator at its new position.
+            const_iterator& operator--() {
+                // Move backward until we get to the beginning. Behavior is
+                // undefined if we are iterating at the first element, so we can
+                // assume we'll reach an element. This means we'll never reach
+                // index_ == 0 and slot_ == 0.
+                if (slot_ == 0) {
+                    --index_;
+                    slot_ = slot_per_bucket() - 1;
+                } else {
+                    --slot_;
+                }
+                while (!(*buckets_)[index_].occupied(slot_)) {
+                    if (slot_ == 0) {
+                        --index_;
+                        slot_ = slot_per_bucket() - 1;
+                    } else {
+                        --slot_;
+                    }
+                }
+                return *this;
+            }
+
+            //! Move the iterator back to the previous item in the table.
+            //! Returns the iterator at its old position. Behavior is undefined
+            //! if the iterator is at the beginning.
+            const_iterator operator--(int) {
+                const_iterator old(*this);
+                --(*this);
+                return old;
+            }
+
+        protected:
+            // The buckets owned by the locked table being iterated over. Even
+            // though const_iterator cannot modify the buckets, we don't mark
+            // them const so that the mutable iterator can derive from this
+            // class. Also, since iterators should be default constructible,
+            // copyable, and movable, we have to make this a raw pointer type.
+            buckets_t* buckets_;
+
+            // The bucket index of the item being pointed to. For implementation
+            // convenience, we let it take on negative values.
+            size_type index_;
+
+            // The slot in the bucket of the item being pointed to. For
+            // implementation convenience, we let it take on negative values.
+            size_type slot_;
+
+            // Returns the position signifying the end of the table
+            static std::pair<size_type, size_type>
+            end_pos(const buckets_t& buckets) {
+                return std::make_pair(buckets.size(), 0);
+            }
+
+            // The private constructor is used by locked_table to create
+            // iterators from scratch. If the given index_-slot_ pair is at the
+            // end of the table, or the given spot is occupied, stay. Otherwise,
+            // step forward to the next data item, or to the end of the table.
+            const_iterator(buckets_t& buckets, size_type index,
+                           size_type slot) noexcept
+                : buckets_(std::addressof(buckets)), index_(index), slot_(slot) {
+                if (std::make_pair(index_, slot_) != end_pos(*buckets_) &&
+                    !(*buckets_)[index_].occupied(slot_)) {
+                    operator++();
+                }
+            }
+
+            friend class locked_table;
+        };
+
+        /**
+         * An iterator over a @ref locked_table, which allows read-write access
+         * to elements of the table. It fulfills the BidirectionalIterator
+         * concept.
+         */
+        class iterator : public const_iterator {
+        public:
+            using pointer = cuckoohash_map::pointer;
+            using reference = cuckoohash_map::reference;
+
+            iterator() {}
+
+            bool operator==(const iterator& it) const {
+                return const_iterator::operator==(it);
+            }
+
+            bool operator!=(const iterator& it) const {
+                return const_iterator::operator!=(it);
+            }
+
+            using const_iterator::operator*;
+            reference operator*() {
+                return (*const_iterator::buckets_)[
+                    const_iterator::index_].kvpair(const_iterator::slot_);
+            }
+
+            using const_iterator::operator->;
+            pointer operator->() {
+                return &(*const_iterator::buckets_)[
+                    const_iterator::index_].kvpair(const_iterator::slot_);
+            }
+
+            iterator& operator++() {
+                const_iterator::operator++();
+                return *this;
+            }
+
+            iterator operator++(int) {
+                iterator old(*this);
+                const_iterator::operator++();
+                return old;
+            }
+
+            iterator& operator--() {
+                const_iterator::operator--();
+                return *this;
+            }
+
+            iterator operator--(int) {
+                iterator old(*this);
+                const_iterator::operator--();
+                return old;
+            }
+
+        private:
+            iterator(buckets_t& buckets, size_type index, size_type slot) noexcept
+                : const_iterator(buckets, index, slot) {}
+
+            friend class locked_table;
+        };
+
+        /**@}*/
+
+        /** @name Table Parameters */
+        /**@{*/
+
+        static constexpr size_type slot_per_bucket() {
+            return cuckoohash_map::slot_per_bucket();
+        }
+
+        /**@}*/
+
+        /** @name Constructors, Destructors, and Assignment */
+        /**@{*/
+
+        locked_table() = delete;
+        locked_table(const locked_table&) = delete;
+        locked_table& operator=(const locked_table&) = delete;
+
+        locked_table(locked_table&& lt) noexcept
+            : map_(std::move(lt.map_)),
+              unlocker_(std::move(lt.unlocker_))
+            {}
+
+        locked_table& operator=(locked_table&& lt) noexcept {
+            unlock();
+            map_ = std::move(lt.map_);
+            unlocker_ = std::move(lt.unlocker_);
+            return *this;
+        }
+
+        /**
+         * Unlocks the table, thereby freeing the locks on the table, but also
+         * invalidating all iterators and table operations with this object. It
+         * is idempotent.
+         */
+        void unlock() {
+            unlocker_.unlock();
+        }
+
+        /**@}*/
+
+        /** @name Table Details
+         *
+         * Methods for getting information about the table. Many are identical
+         * to their @ref cuckoohash_map counterparts. Only new functions or
+         * those with different behavior are documented.
+         *
+         */
+        /**@{*/
+
+        /**
+         * Returns whether the locked table has ownership of the table
+         *
+         * @return true if it still has ownership, false otherwise
+         */
+        bool is_active() const {
+            return unlocker_.is_active();
+        }
+
+        hasher hash_function() const {
+            return map_.get().hash_function();
+        }
+
+        key_equal key_eq() const {
+            return map_.get().key_eq();
+        }
+
+        allocator_type get_allocator() const {
+            return map_.get().get_allocator();
+        }
+
+        size_type hashpower() const {
+            return map_.get().hashpower();
+        }
+
+        size_type bucket_count() const {
+            return map_.get().bucket_count();
+        }
+
+        bool empty() const {
+            return map_.get().empty();
+        }
+
+        size_type size() const {
+            return map_.get().size();
+        }
+
+        size_type capacity() const {
+            return map_.get().capacity();
+        }
+
+        double load_factor() const {
+            return map_.get().load_factor();
+        }
+
+        void minimum_load_factor(const double mlf) {
+            map_.get().minimum_load_factor(mlf);
+        }
+
+        double minimum_load_factor() {
+            return map_.get().minimum_load_factor();
+        }
+
+        void maximum_hashpower(size_type mhp) {
+            map_.get().maximum_hashpower(mhp);
+        }
+
+        size_type maximum_hashpower() {
+            return map_.get().maximum_hashpower();
+        }
+
+        /**@}*/
+
+        /**@{*/
+        /**
+         * Returns an iterator to the beginning of the table. If the table is
+         * empty, it will point past the end of the table.
+         *
+         * @return an iterator to the beginning of the table
+         */
+
+        iterator begin() {
+            return iterator(map_.get().buckets_, 0, 0);
+        }
+
+        const_iterator begin() const {
+            return const_iterator(map_.get().buckets_, 0, 0);
+        }
+
+        const_iterator cbegin() const {
+            return begin();
+        }
+
+        /**@}*/
+
+        /** @name Iterators */
+        /**@{*/
+
+        /**@{*/
+        /**
+         * Returns an iterator past the end of the table.
+         *
+         * @return an iterator past the end of the table
+         */
+
+        iterator end() {
+            const auto end_pos = const_iterator::end_pos(map_.get().buckets_);
+            return iterator(map_.get().buckets_,
+                            static_cast<size_type>(end_pos.first),
+                            static_cast<size_type>(end_pos.second));
+        }
+
+        const_iterator end() const {
+            const auto end_pos = const_iterator::end_pos(map_.get().buckets_);
+            return const_iterator(map_.get().buckets_,
+                                  static_cast<size_type>(end_pos.first),
+                                  static_cast<size_type>(end_pos.second));
+        }
+
+        const_iterator cend() const {
+            return end();
+        }
+
+        /**@}*/
+
+        /**@}*/
+
+        /** @name Modifiers */
+        /**@{*/
+
+        void clear() {
+            map_.get().cuckoo_clear();
+        }
+
+        /**
+         * This behaves like the @c unordered_map::try_emplace method, but with
+         * the same argument lifetime properties as @ref cuckoohash_map::insert.
+         * It will always invalidate all iterators, due to the possibilities of
+         * cuckoo hashing and expansion.
+         */
+        template <typename K, typename... Args>
+        std::pair<iterator, bool> insert(K&& key, Args&&... val) {
+            K k(std::forward<K>(key));
+            hash_value hv = map_.get().hashed_key(k);
+            auto b = map_.get().template snapshot_and_lock_two<locking_inactive>(hv);
+            table_position pos = map_.get().cuckoo_insert_loop(hv, b, k);
+            if (pos.status == ok) {
+                map_.get().add_to_bucket(
+                    pos.index, pos.slot, hv.partial, k,
+                    std::forward<Args>(val)...);
+            } else {
+                assert(pos.status == failure_key_duplicated);
+            }
+            return std::make_pair(
+                iterator(map_.get().buckets_, pos.index, pos.slot),
+                pos.status == ok);
+        }
+
+        iterator erase(const_iterator pos) {
+            map_.get().del_from_bucket(map_.get().buckets_[pos.index_],
+                                       pos.index_,
+                                       pos.slot_);
+            return iterator(map_.get().buckets_, pos.index_, pos.slot_);
+        }
+
+        iterator erase(iterator pos) {
+            map_.get().del_from_bucket(map_.get().buckets_[pos.index_],
+                                       pos.index_,
+                                       pos.slot_);
+            return iterator(map_.get().buckets_, pos.index_, pos.slot_);
+        }
+
+        template <typename K>
+        size_type erase(const K& key) {
+            const hash_value hv = map_.get().hashed_key(key);
+            const auto b = map_.get().
+                template snapshot_and_lock_two<locking_inactive>(hv);
+            const table_position pos = map_.get().cuckoo_find(
+                key, hv.partial, b.first(), b.second());
+            if (pos.status == ok) {
+                map_.get().del_from_bucket(map_.get().buckets_[pos.index],
+                                           pos.index, pos.slot);
+                return 1;
+            } else {
+                return 0;
+            }
+        }
+
+        /**@}*/
+
+        /** @name Lookup */
+        /**@{*/
+
+        template <typename K>
+        iterator find(const K& key) {
+            const hash_value hv = map_.get().hashed_key(key);
+            const auto b = map_.get().
+                template snapshot_and_lock_two<locking_inactive>(hv);
+            const table_position pos = map_.get().cuckoo_find(
+                key, hv.partial, b.first(), b.second());
+            if (pos.status == ok) {
+                return iterator(map_.get().buckets_, pos.index, pos.slot);
+            } else {
+                return end();
+            }
+        }
+
+        template <typename K>
+        const_iterator find(const K& key) const {
+            const hash_value hv = map_.get().hashed_key(key);
+            const auto b = map_.get().
+                template snapshot_and_lock_two<locking_inactive>(hv);
+            const table_position pos = map_.get().cuckoo_find(
+                key, hv.partial, b.first(), b.second());
+            if (pos.status == ok) {
+                return const_iterator(map_.get().buckets_, pos.index, pos.slot);
+            } else {
+                return end();
+            }
+        }
+
+        template <typename K>
+        mapped_type& at(const K& key) {
+            auto it = find(key);
+            if (it == end()) {
+                throw std::out_of_range("key not found in table");
+            } else {
+                return it->second;
+            }
+        }
+
+        template <typename K>
+        const mapped_type& at(const K& key) const {
+            auto it = find(key);
+            if (it == end()) {
+                throw std::out_of_range("key not found in table");
+            } else {
+                return it->second;
+            }
+        }
+
+        /**
+         * This function has the same lifetime properties as @ref
+         * cuckoohash_map::insert, except that the value is default-constructed,
+         * with no parameters, if it is not already in the table.
+         */
+        template <typename K>
+        T& operator[](K&& key) {
+            auto result = insert(std::forward<K>(key));
+            return result.first->second;
+        }
+
+        template <typename K>
+        size_type count(const K& key) const {
+            const hash_value hv = map_.get().hashed_key(key);
+            const auto b = map_.get().
+                template snapshot_and_lock_two<locking_inactive>(hv);
+            return map_.get().cuckoo_find(
+                key, hv.partial, b.first(), b.second()).status == ok ? 1 : 0;
+        }
+
+        template <typename K>
+        std::pair<iterator, iterator> equal_range(const K& key) {
+            auto it = find(key);
+            if (it == end()) {
+                return std::make_pair(it, it);
+            } else {
+                auto start_it = it++;
+                return std::make_pair(start_it, it);
+            }
+        }
+
+        template <typename K>
+        std::pair<const_iterator, const_iterator> equal_range(const K& key) const {
+            auto it = find(key);
+            if (it == end()) {
+                return std::make_pair(it, it);
+            } else {
+                auto start_it = it++;
+                return std::make_pair(start_it, it);
+            }
+        }
+
+        /**@}*/
+
+        /** @name Re-sizing */
+        /**@{*/
+
+        /**
+         * This has the same behavior as @ref cuckoohash_map::rehash, except
+         * that we don't return anything.
+         */
+        void rehash(size_type n) {
+            map_.get().template cuckoo_rehash<locking_inactive>(n);
+        }
+
+        /**
+         * This has the same behavior as @ref cuckoohash_map::reserve, except
+         * that we don't return anything.
+         */
+        void reserve(size_type n) {
+            map_.get().template cuckoo_reserve<locking_inactive>(n);
+        }
+
+        /**@}*/
+
+        /** @name Comparison  */
+        /**@{*/
+
+        bool operator==(const locked_table& lt) const {
+            if (size() != lt.size()) {
+                return false;
+            }
+            for (const auto& elem : lt) {
+                auto it = find(elem.first);
+                if (it == end() || it->second != elem.second) {
+                    return false;
+                }
+            }
+            return true;
+        }
+
+        bool operator!=(const locked_table& lt) const {
+            if (size() != lt.size()) {
+                return true;
+            }
+            for (const auto& elem : lt) {
+                auto it = find(elem.first);
+                if (it == end() || it->second != elem.second) {
+                    return true;
+                }
+            }
+            return false;
+        }
+
+        /**@}*/
+
+    private:
+        // The constructor locks the entire table. We keep this constructor
+        // private (but expose it to the cuckoohash_map class), since we don't
+        // want users calling it.
+        locked_table(cuckoohash_map& map) noexcept
+            : map_(map), unlocker_(
+                map_.get().template snapshot_and_lock_all<locking_active>())
+            {}
+
+        // A reference to the map owned by the table
+        std::reference_wrapper<cuckoohash_map> map_;
+        // A manager for all the locks we took on the table.
+        AllBuckets<locking_active> unlocker_;
+
+        friend class cuckoohash_map;
+    };
+};
+
+#endif // _CUCKOOHASH_MAP_HH
diff --git a/ext/include/cuckoo/cuckoohash_util.hh b/ext/include/cuckoo/cuckoohash_util.hh
new file mode 100644
index 0000000..cdb31c6
--- /dev/null
+++ b/ext/include/cuckoo/cuckoohash_util.hh
@@ -0,0 +1,136 @@
+/** \file */
+
+#ifndef _CUCKOOHASH_UTIL_HH
+#define _CUCKOOHASH_UTIL_HH
+
+#include <exception>
+#include <thread>
+#include <utility>
+#include <vector>
+#include "cuckoohash_config.hh" // for LIBCUCKOO_DEBUG
+
+#if LIBCUCKOO_DEBUG
+//! When \ref LIBCUCKOO_DEBUG is 0, LIBCUCKOO_DBG will printing out status
+//! messages in various situations
+#  define LIBCUCKOO_DBG(fmt, ...)                                          \
+     fprintf(stderr, "\x1b[32m""[libcuckoo:%s:%d:%lu] " fmt"" "\x1b[0m",   \
+             __FILE__,__LINE__, std::hash<std::thread::id>()(std::this_thread::get_id()), \
+             __VA_ARGS__)
+#else
+//! When \ref LIBCUCKOO_DEBUG is 0, LIBCUCKOO_DBG does nothing
+#  define LIBCUCKOO_DBG(fmt, ...)  do {} while (0)
+#endif
+
+/**
+ * alignas() requires GCC >= 4.9, so we stick with the alignment attribute for
+ * GCC.
+ */
+#ifdef __GNUC__
+#define LIBCUCKOO_ALIGNAS(x) __attribute__((aligned(x)))
+#else
+#define LIBCUCKOO_ALIGNAS(x) alignas(x)
+#endif
+
+/**
+ * At higher warning levels, MSVC produces an annoying warning that alignment
+ * may cause wasted space: "structure was padded due to __declspec(align())".
+ */
+#ifdef _MSC_VER
+#define LIBCUCKOO_SQUELCH_PADDING_WARNING __pragma(warning(suppress : 4324))
+#else
+#define LIBCUCKOO_SQUELCH_PADDING_WARNING
+#endif
+
+/**
+ * thread_local requires GCC >= 4.8 and is not supported in some clang versions,
+ * so we use __thread if thread_local is not supported
+ */
+#define LIBCUCKOO_THREAD_LOCAL thread_local
+#if defined(__clang__)
+#  if !__has_feature(cxx_thread_local)
+#    undef LIBCUCKOO_THREAD_LOCAL
+#    define LIBCUCKOO_THREAD_LOCAL __thread
+#  endif
+#elif defined(__GNUC__)
+#  if __GNUC__ == 4 && __GNUC_MINOR__ < 8
+#    undef LIBCUCKOO_THREAD_LOCAL
+#    define LIBCUCKOO_THREAD_LOCAL __thread
+#  endif
+#endif
+
+//! For enabling certain methods based on a condition. Here's an example.
+//! LIBCUCKOO_ENABLE_IF(sizeof(int) == 4, int) method() {
+//!     ...
+//! }
+#define LIBCUCKOO_ENABLE_IF(condition, return_type)                       \
+    template <class Bogus = void*>                                        \
+    typename std::enable_if<sizeof(Bogus) && condition, return_type>::type
+
+/**
+ * Thrown when an automatic expansion is triggered, but the load factor of the
+ * table is below a minimum threshold, which can be set by the \ref
+ * cuckoohash_map::minimum_load_factor method. This can happen if the hash
+ * function does not properly distribute keys, or for certain adversarial
+ * workloads.
+ */
+class libcuckoo_load_factor_too_low : public std::exception {
+public:
+    /**
+     * Constructor
+     *
+     * @param lf the load factor of the table when the exception was thrown
+     */
+    libcuckoo_load_factor_too_low(const double lf)
+        : load_factor_(lf) {}
+
+    /**
+     * @return a descriptive error message
+     */
+    virtual const char* what() const noexcept override {
+        return "Automatic expansion triggered when load factor was below "
+            "minimum threshold";
+    }
+
+    /**
+     * @return the load factor of the table when the exception was thrown
+     */
+    double load_factor() const {
+        return load_factor_;
+    }
+private:
+    const double load_factor_;
+};
+
+/**
+ * Thrown when an expansion is triggered, but the hashpower specified is greater
+ * than the maximum, which can be set with the \ref
+ * cuckoohash_map::maximum_hashpower method.
+ */
+class libcuckoo_maximum_hashpower_exceeded : public std::exception {
+public:
+    /**
+     * Constructor
+     *
+     * @param hp the hash power we were trying to expand to
+     */
+    libcuckoo_maximum_hashpower_exceeded(const size_t hp)
+        : hashpower_(hp) {}
+
+    /**
+     * @return a descriptive error message
+     */
+    virtual const char* what() const noexcept override {
+        return "Expansion beyond maximum hashpower";
+    }
+
+    /**
+     * @return the hashpower we were trying to expand to
+     */
+    size_t hashpower() const {
+        return hashpower_;
+    }
+private:
+    const size_t hashpower_;
+};
+
+#endif // _CUCKOOHASH_UTIL_HH
diff --git a/ext/include/cuckoo/libcuckoo_lazy_array.hh b/ext/include/cuckoo/libcuckoo_lazy_array.hh
new file mode 100644
index 0000000..99c4b5b
--- /dev/null
+++ b/ext/include/cuckoo/libcuckoo_lazy_array.hh
@@ -0,0 +1,202 @@
+/** \file */
+
+#ifndef _LIBCUCKOO_LAZY_ARRAY_HH
+#define _LIBCUCKOO_LAZY_ARRAY_HH
+
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <memory>
+
+#include "cuckoohash_util.hh"
+
+/**
+ * A fixed-size array, broken up into segments that are dynamically allocated
+ * upon request. It is the user's responsibility to make sure they only access
+ * allocated parts of the array.
+ *
+ * @tparam OFFSET_BITS the number of bits of the index used as the offset within
+ * a segment
+ * @tparam SEGMENT_BITS the number of bits of the index used as the segment
+ * index
+ * @tparam T the type of stored in the container
+ * @tparam Alloc the allocator used to allocate data
+ */
+template <uint8_t OFFSET_BITS, uint8_t SEGMENT_BITS,
+          class T, class Alloc = std::allocator<T>
+          >
+class libcuckoo_lazy_array {
+public:
+    using value_type = T;
+    using allocator_type = Alloc;
+private:
+    using traits_ = std::allocator_traits<allocator_type>;
+public:
+    using size_type = std::size_t;
+    using reference = value_type&;
+    using const_reference = const value_type&;
+
+    static_assert(SEGMENT_BITS + OFFSET_BITS <= sizeof(size_type)*8,
+                  "The number of segment and offset bits cannot exceed "
+                  " the number of bits in a size_type");
+
+    /**
+     * Default constructor. Creates an empty array with no allocated segments.
+     */
+    libcuckoo_lazy_array(const allocator_type& allocator = Alloc())
+        noexcept(noexcept(Alloc()))
+        : segments_{{nullptr}}, allocated_segments_(0), allocator_(allocator) {}
+
+    /**
+     * Constructs an array with enough segments allocated to fit @p target
+     * elements. Each allocated element is default-constructed.
+     *
+     * @param target the number of elements to allocate space for
+     */
+    libcuckoo_lazy_array(size_type target,
+                         const allocator_type& allocator = Alloc())
+        noexcept(noexcept(Alloc()))
+        : libcuckoo_lazy_array(allocator) {
+        segments_.fill(nullptr);
+        resize(target);
+    }
+
+    libcuckoo_lazy_array(const libcuckoo_lazy_array&) = delete;
+    libcuckoo_lazy_array& operator=(const libcuckoo_lazy_array&) = delete;
+
+    /**
+     * Move constructor
+     *
+     * @param arr the array being moved
+     */
+    libcuckoo_lazy_array(libcuckoo_lazy_array&& arr) noexcept
+        : segments_(arr.segments_),
+          allocated_segments_(arr.allocated_segments_),
+          allocator_(std::move(arr.allocator_)) {
+        // Deactivate the array by setting its allocated segment count to 0
+        arr.allocated_segments_ = 0;
+    }
+
+    /**
+     * Destructor. Destroys all elements allocated in the array.
+     */
+    ~libcuckoo_lazy_array()
+        noexcept(std::is_nothrow_destructible<T>::value) {
+        clear();
+    }
+
+    /**
+     * Destroys all elements allocated in the array.
+     */
+    void clear() {
+        for (size_type i = 0; i < allocated_segments_; ++i) {
+            destroy_array(segments_[i]);
+            segments_[i] = nullptr;
+        }
+    }
+
+    /**
+     * Index operator
+     *
+     * @return a reference to the data at the given index
+     */
+    reference operator[](size_type i) {
+        assert(get_segment(i) < allocated_segments_);
+        return segments_[get_segment(i)][get_offset(i)];
+    }
+
+    /**
+     * Const index operator
+     *
+     * @return a const reference to the data at the given index
+     */
+    const_reference operator[](size_type i) const {
+        assert(get_segment(i) < allocated_segments_);
+        return segments_[get_segment(i)][get_offset(i)];
+    }
+
+    /**
+     * Returns the number of elements the array has allocated space for
+     *
+     * @return current size of the array
+     */
+    size_type size() const {
+        return allocated_segments_ * SEGMENT_SIZE;
+    }
+
+    /**
+     * Returns the maximum number of elements the array can hold
+     *
+     * @return maximum size of the array
+     */
+    static constexpr size_type max_size() {
+        return 1UL << (OFFSET_BITS + SEGMENT_BITS);
+    }
+
+    /**
+     * Allocate enough space for @p target elements, not exceeding the capacity
+     * of the array. Under no circumstance will the array be shrunk.
+     *
+     * @param target the number of elements to ensure space is allocated for
+     */
+    void resize(size_type target) {
+        target = std::min(target, max_size());
+        if (target == 0) {
+            return;
+        }
+        const size_type last_segment = get_segment(target - 1);
+        for (size_type i = allocated_segments_; i <= last_segment; ++i) {
+            segments_[i] = create_array();
+        }
+        allocated_segments_ = last_segment + 1;
+    }
+
+private:
+    static constexpr size_type SEGMENT_SIZE = 1UL << OFFSET_BITS;
+    static constexpr size_type NUM_SEGMENTS = 1UL << SEGMENT_BITS;
+    static constexpr size_type OFFSET_MASK = SEGMENT_SIZE - 1;
+
+    std::array<T*, NUM_SEGMENTS> segments_;
+    size_type allocated_segments_;
+    allocator_type allocator_;
+
+    static size_type get_segment(size_type i) {
+        return i >> OFFSET_BITS;
+    }
+
+    static size_type get_offset(size_type i) {
+        return i & OFFSET_MASK;
+    }
+
+    // Allocates a SEGMENT_SIZE-sized array and default-initializes each element
+    typename traits_::pointer create_array() {
+        typename traits_::pointer arr = traits_::allocate(
+            allocator_, SEGMENT_SIZE);
+        // Initialize all the elements, safely deallocating and destroying
+        // everything in case of error.
+        size_type i;
+        try {
+            for (i = 0; i < SEGMENT_SIZE; ++i) {
+                traits_::construct(allocator_, &arr[i]);
+            }
+        } catch (...) {
+            for (size_type j = 0; j < i; ++j) {
+                traits_::destroy(allocator_, &arr[j]);
+            }
+            traits_::deallocate(allocator_, arr, SEGMENT_SIZE);
+            throw;
+        }
+        return arr;
+    }
+
+    // Destroys every element of a SEGMENT_SIZE-sized array and then deallocates
+    // the memory.
+    void destroy_array(typename traits_::pointer arr) {
+        for (size_type i = 0; i < SEGMENT_SIZE; ++i) {
+            traits_::destroy(allocator_, &arr[i]);
+        }
+        traits_::deallocate(allocator_, arr, SEGMENT_SIZE);
+    }
+};
+
+#endif // _LIBCUCKOO_LAZY_ARRAY_HH
diff --git a/ext/include/llvm/Support/MathExtras.h b/ext/include/llvm/Support/MathExtras.h
index e6f8ffa..8c0b110 100644
--- a/ext/include/llvm/Support/MathExtras.h
+++ b/ext/include/llvm/Support/MathExtras.h
@@ -19,6 +19,7 @@
 #include <cassert>
 #include <cstring>
 #include <type_traits>
+#include <cstdint>
 
 namespace llvm {
 /// \brief The behavior an operation has on an input of 0.
diff --git a/ext/src/CMakeLists.txt b/ext/src/CMakeLists.txt
index b702eb9..9038354 100644
--- a/ext/src/CMakeLists.txt
+++ b/ext/src/CMakeLists.txt
@@ -12,5 +12,7 @@ add_subdirectory(samtools)
 add_subdirectory(cppformat)
 add_subdirectory(ssw)
 add_subdirectory(cityhash)
+add_subdirectory(getopt_pp)
 add_subdirectory(llvm)
-add_subdirectory(htrie)
\ No newline at end of file
+add_subdirectory(htrie)
+add_subdirectory(bwa)
diff --git a/ext/src/getopt_pp/CMakeLists.txt b/ext/src/getopt_pp/CMakeLists.txt
new file mode 100644
index 0000000..e594e19
--- /dev/null
+++ b/ext/src/getopt_pp/CMakeLists.txt
@@ -0,0 +1,5 @@
+project(getopt_pp CXX)
+
+add_library(getopt_pp STATIC
+            getopt_pp.cpp)
+
diff --git a/ext/include/getopt_pp/getopt_pp.cpp b/ext/src/getopt_pp/getopt_pp.cpp
similarity index 99%
rename from ext/include/getopt_pp/getopt_pp.cpp
rename to ext/src/getopt_pp/getopt_pp.cpp
index 71ccc65..485b53a 100644
--- a/ext/include/getopt_pp/getopt_pp.cpp
+++ b/ext/src/getopt_pp/getopt_pp.cpp
@@ -19,7 +19,7 @@ GetOpt_pp:	Yet another C++ version of getopt.
 */
 
 #include <unistd.h>
-#include "getopt_pp.h"
+#include "getopt_pp/getopt_pp.h"
 
 #if __APPLE__
 extern char** environ;
diff --git a/ext/src/llvm/CMakeLists.txt b/ext/src/llvm/CMakeLists.txt
index 6993c91..f34f99a 100644
--- a/ext/src/llvm/CMakeLists.txt
+++ b/ext/src/llvm/CMakeLists.txt
@@ -19,6 +19,9 @@ check_symbol_exists(backtrace "execinfo.h" HAVE_BACKTRACE)
 
 find_library(DL_LIB NAMES "dl")
 
+add_definitions(-D__STDC_CONSTANT_MACROS)
+add_definitions(-D__STDC_LIMIT_MACROS)
+
 # FIXME: Signal handler return type, currently hardcoded to 'void'
 set(RETSIGTYPE void)
 
diff --git a/manual.html b/manual.html
index 6b02feb..e94fbe6 100644
--- a/manual.html
+++ b/manual.html
@@ -1,6 +1,6 @@
 <html>
 <head>
-    <title>SPAdes 3.9.1 Manual</title>
+    <title>SPAdes 3.10.1 Manual</title>
     <style type="text/css">
         .code {
             background-color: lightgray;
@@ -8,7 +8,7 @@
     </style>
 </head>
 <body>
-<h1>SPAdes 3.9.1 Manual</h1>
+<h1>SPAdes 3.10.1 Manual</h1>
 
 1. <a href="#sec1">About SPAdes</a><br>
     1.1. <a href="#sec1.1">Supported data types</a><br>
@@ -34,17 +34,17 @@
 <a name="sec1"></a>
 <h2>1. About SPAdes</h2>
 <p>
-    SPAdes – St. Petersburg genome assembler – is intended for both standard isolates and single-cell MDA bacteria assemblies. This manual will help you to install and run SPAdes. 
-SPAdes version 3.9.1 was released under GPLv2 on December 4, 2016 and can be downloaded from  <a href="http://cab.spbu.ru/software/spades/" target="_blank">http://cab.spbu.ru/software/spades/</a>.
+    SPAdes – St. Petersburg genome assembler – is an assembly toolkit containing various assembly pipelines. This manual will help you to install and run SPAdes. 
+SPAdes version 3.10.1 was released under GPLv2 on March 1, 2017 and can be downloaded from  <a href="http://cab.spbu.ru/software/spades/" target="_blank">http://cab.spbu.ru/software/spades/</a>.
 
 <a name="sec1.1"></a>
 <h3>1.1 Supported data types</h3>
 <p>
     The current version of SPAdes works with Illumina or IonTorrent reads and is capable of providing hybrid assemblies using PacBio, Oxford Nanopore and Sanger reads. You can also provide additional contigs that will be used as long reads.
 <p>
-    Version 3.9.1 of SPAdes supports paired-end reads, mate-pairs and unpaired reads. SPAdes can take as input several paired-end and mate-pair libraries simultaneously. Note, that SPAdes was initially designed for small genomes. It was tested on single-cell and standard bacterial and fungal data sets. SPAdes is not intended for larger genomes (e.g. mammalian size genomes). For such purposes you can use it at your own risk.
+    Version 3.10.1 of SPAdes supports paired-end reads, mate-pairs and unpaired reads. SPAdes can take as input several paired-end and mate-pair libraries simultaneously. Note, that SPAdes was initially designed for small genomes. It was tested on bacterial (both single-cell MDA and standard isolates), fungal and other small genomes. SPAdes is not intended for larger genomes (e.g. mammalian size genomes). For such purposes you can use it at your own risk.
 <p>
-    SPAdes 3.9.1 includes the following additional pipelines:
+    SPAdes 3.10.1 includes the following additional pipelines:
     <ul>
         <li>dipSPAdes – a module for assembling highly polymorphic diploid genomes (see <a href="dipspades_manual.html" target="_blank">dipSPAdes manual</a>).</li>
         <li>metaSPAdes – a pipeline for metagenomic data sets (see <a href="#meta">metaSPAdes options</a>). </li>
@@ -76,7 +76,7 @@ SPAdes comes in several separate modules:
         <li> <a href="http://spades.bioinf.spbau.ru/spades_test_datasets/ecoli_sc/" target="_blank">MDA single-cell <i>E. coli</i></a>; 6.3 Gb, 29M reads, 2x100bp, insert size ~ 270bp </li>
     </ul>
 <p>
-    We ran SPAdes with default parameters using 16 threads on a server with Intel Xeon 2.27GHz processors. BayesHammer runs in approximately 30-40 minutes and takes up to 8Gb of RAM to perform read error correction on each data set. Assembly takes about 15 minutes for the <i>E. coli</i> isolate data set and 30 minutes for the <i>E. coli</i> single-cell data set. Both data sets require about 9Gb of RAM (see notes below). MismatchCorrector runs for about 25 minutes on both data sets, and r [...]
+    We ran SPAdes with default parameters using 16 threads on a server with Intel Xeon 2.27GHz processors and SSD hard drive. BayesHammer runs in approximately half an hour and takes up to 8Gb of RAM to perform read error correction on each data set. Assembly takes about 10 minutes for the <i>E. coli</i> isolate data set and 20 minutes for the <i>E. coli</i> single-cell data set. Both data sets require about 8Gb of RAM (see notes below). MismatchCorrector runs for about 15 minutes on bot [...]
 
 <p>
     <table border="1" cellpadding="4" cellspacing="0">
@@ -98,42 +98,42 @@ SPAdes comes in several separate modules:
 
         <tr>
             <td> BayesHammer </td>
+            <td align="center"> 29m </td>
+            <td align="center"> 7.1 </td>
+            <td align="center"> 11 </td>
             <td align="center"> 34m </td>
-            <td align="center"> 7.7 </td>
-            <td align="center"> 8.4 </td>
-            <td align="center"> 40m </td>
-            <td align="center"> 7.5 </td>
+            <td align="center"> 7.6 </td>
             <td align="center"> 8.8 </td>
         </tr>
 
         <tr>
             <td> SPAdes </td>
-            <td align="center"> 16m </td>
-            <td align="center"> 8.6 </td>
+            <td align="center"> 11m </td>
+            <td align="center"> 8.4 </td>
             <td align="center"> 1.6 </td>
-            <td align="center"> 28m </td>
-            <td align="center"> 8.6 </td>
-            <td align="center"> 2.7 </td>
+            <td align="center"> 17m </td>
+            <td align="center"> 8 </td>
+            <td align="center"> 3.0 </td>
         </tr>
 
         <tr>
             <td> MismatchCorrector </td>
-            <td align="center"> 22m </td>
+            <td align="center"> 13m </td>
             <td align="center"> 1.8 </td>
-            <td align="center"> 21.8 </td>
-            <td align="center"> 26m </td>
+            <td align="center"> 27.1 </td>
+            <td align="center"> 16m </td>
             <td align="center"> 1.8 </td>
-            <td align="center"> 22.9 </td>
+            <td align="center"> 25.5 </td>
         </tr>
 
         <tr>
             <td> Whole pipeline </td>
-            <td align="center"> 1h 12m </td>
-            <td align="center"> 8.6 </td>
-            <td align="center"> 24.2 </td>
-            <td align="center"> 1h 34m </td>
-            <td align="center"> 8.6 </td>
-            <td align="center"> 25.5 </td>
+            <td align="center"> 53m </td>
+            <td align="center"> 8.4 </td>
+            <td align="center"> 29.6 </td>
+            <td align="center"> 1h 7m </td>
+            <td align="center"> 8 </td>
+            <td align="center"> 28.3 </td>
         </tr>
     </table>
 
@@ -143,7 +143,7 @@ SPAdes comes in several separate modules:
         <li> Running SPAdes without preliminary read error correction (e.g. without BayesHammer or IonHammer) will likely require more time and memory. </li>
         <li> Each module removes its temporary files as soon as it finishes. </li>
         <li> SPAdes uses 512 Mb per thread for buffers, which results in higher memory consumption. If you set memory limit manually, SPAdes will use smaller buffers and thus less RAM. </li>
-        <li> Performance statistics is given for SPAdes version 3.9.1. </li>
+        <li> Performance statistics is given for SPAdes version 3.10.1. </li>
     </ul>
 
 
@@ -157,13 +157,13 @@ SPAdes comes in several separate modules:
 <h3>2.1 Downloading SPAdes Linux binaries</h3>
 
 <p>
-    To download <a href="http://cab.spbu.ru/files/release3.9.1/SPAdes-3.9.1-Linux.tar.gz">SPAdes Linux binaries</a> and extract them, go to the directory in which you wish SPAdes to be installed and run:
+    To download <a href="http://cab.spbu.ru/files/release3.10.1/SPAdes-3.10.1-Linux.tar.gz">SPAdes Linux binaries</a> and extract them, go to the directory in which you wish SPAdes to be installed and run:
 
 <pre  class="code">
 <code>
-    wget http://cab.spbu.ru/files/release3.9.1/SPAdes-3.9.1-Linux.tar.gz
-    tar -xzf SPAdes-3.9.1-Linux.tar.gz
-    cd SPAdes-3.9.1-Linux/bin/
+    wget http://cab.spbu.ru/files/release3.10.1/SPAdes-3.10.1-Linux.tar.gz
+    tar -xzf SPAdes-3.10.1-Linux.tar.gz
+    cd SPAdes-3.10.1-Linux/bin/
 </code>
 </pre>
 
@@ -192,13 +192,13 @@ SPAdes comes in several separate modules:
 <h3>2.2 Downloading SPAdes binaries for Mac</h3>
 
 <p>
-    To obtain <a href="http://cab.spbu.ru/files/release3.9.1/SPAdes-3.9.1-Darwin.tar.gz">SPAdes binaries for Mac</a>, go to the directory in which you wish SPAdes to be installed and run:
+    To obtain <a href="http://cab.spbu.ru/files/release3.10.1/SPAdes-3.10.1-Darwin.tar.gz">SPAdes binaries for Mac</a>, go to the directory in which you wish SPAdes to be installed and run:
 
 <pre  class="code">
 <code>
-    curl http://cab.spbu.ru/files/release3.9.1/SPAdes-3.9.1-Darwin.tar.gz -o SPAdes-3.9.1-Darwin.tar.gz
-    tar -zxf SPAdes-3.9.1-Darwin.tar.gz
-    cd SPAdes-3.9.1-Darwin/bin/
+    curl http://cab.spbu.ru/files/release3.10.1/SPAdes-3.10.1-Darwin.tar.gz -o SPAdes-3.10.1-Darwin.tar.gz
+    tar -zxf SPAdes-3.10.1-Darwin.tar.gz
+    cd SPAdes-3.10.1-Darwin/bin/
 </code>
 </pre>
 
@@ -229,20 +229,20 @@ SPAdes comes in several separate modules:
 <p>
     If you wish to compile SPAdes by yourself you will need the following libraries to be pre-installed:
     <ul>
-        <li>g++ (version 4.7 or higher)</li>
+        <li>g++ (version 4.8.2 or higher)</li>
         <li>cmake (version 2.8.12 or higher)</li>
         <li>zlib</li>
         <li>libbz2</li>
     </ul>
 
 <p>
-    If you meet these requirements, you can download the <a href="http://cab.spbu.ru/files/release3.9.1/SPAdes-3.9.1.tar.gz">SPAdes source code</a>: 
+    If you meet these requirements, you can download the <a href="http://cab.spbu.ru/files/release3.10.1/SPAdes-3.10.1.tar.gz">SPAdes source code</a>: 
 
 <pre class="code">
 <code>
-    wget http://cab.spbu.ru/files/release3.9.1/SPAdes-3.9.1.tar.gz
-    tar -xzf SPAdes-3.9.1.tar.gz
-    cd SPAdes-3.9.1
+    wget http://cab.spbu.ru/files/release3.10.1/SPAdes-3.10.1.tar.gz
+    tar -xzf SPAdes-3.10.1.tar.gz
+    cd SPAdes-3.10.1
 </code>
 </pre>
 
@@ -325,18 +325,23 @@ SPAdes comes in several separate modules:
 
 <pre class="code">
 <code>
-===== Assembling finished. 
+===== Assembling finished. Used k-mer sizes: 21, 33, 55
 
  * Corrected reads are in spades_test/corrected/
  * Assembled contigs are in spades_test/contigs.fasta
  * Assembled scaffolds are in spades_test/scaffolds.fasta
+ * Assembly graph is in spades_test/assembly_graph.fastg
+ * Assembly graph in GFA format is in spades_test/assembly_graph.gfa
+ * Paths in the assembly graph corresponding to the contigs are in spades_test/contigs.paths
+ * Paths in the assembly graph corresponding to the scaffolds are in spades_test/scaffolds.paths
 
 ======= SPAdes pipeline finished.
 
-SPAdes log can be found here: /home/andrey/ablab/algorithmic-biology/assembler/spades_test/spades.log
+========= TEST PASSED CORRECTLY.
 
-Thank you for using SPAdes!
+SPAdes log can be found here: spades_test/spades.log
 
+Thank you for using SPAdes!
 </code>
 </pre>
 
@@ -349,7 +354,7 @@ Thank you for using SPAdes!
     SPAdes takes as input paired-end reads, mate-pairs and single (unpaired) reads in FASTA and FASTQ. For IonTorrent data SPAdes also supports unpaired reads in unmapped BAM format (like the one produced by Torrent Server). However, in order to run read error correction, reads should be in FASTQ or BAM format. Sanger, Oxford Nanopore and PacBio CLR reads can be provided in both formats since SPAdes does not run error correction for these types of data.
 
 <p>
-    To run SPAdes 3.9.1 you need at least one library of the following types:
+    To run SPAdes 3.10.1 you need at least one library of the following types:
     <ul>
         <li>Illumina paired-end/high-quality mate-pairs/unpaired reads</li>
         <li>IonTorrent paired-end/high-quality mate-pairs/unpaired reads</li>
@@ -620,7 +625,7 @@ In comparison to the <code>--continue</code> option, you can change some of the
 </p>
 
 <p>
-    <code>--pe<b><#></b>-<b><or></b> <file_name> </code><br>
+    <code>--pe<b><#></b>-<b><or></b> </code><br>
         Orientation of reads for paired-end library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9; <code><b><or></b></code> = "fr","rf","ff"). <br>
         The default orientation for paired-end libraries is forward-reverse. For example, to specify reverse-forward orientation for the second paired-end library, you should use the flag:
     <code>--pe2-rf </code><br>
@@ -642,7 +647,7 @@ In comparison to the <code>--continue</code> option, you can change some of the
         File with right reads for mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9).
 </p>
 <p>
-    <code>--mp<b><#></b>-<b><or></b> <file_name> </code><br>
+    <code>--mp<b><#></b>-<b><or></b> </code><br>
         Orientation of reads for mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9; <code><b><or></b></code> = "fr","rf","ff"). <br>
         The default orientation for mate-pair libraries is reverse-forward. For example, to specify forward-forward orientation for the first mate-pair library, you should use the flag:
     <code>--mp1-ff </code><br>
@@ -671,7 +676,7 @@ In comparison to the <code>--continue</code> option, you can change some of the
 </p>
 
 <p>
-    <code>--hqmp<b><#></b>-<b><or></b> <file_name> </code><br>
+    <code>--hqmp<b><#></b>-<b><or></b> </code><br>
         Orientation of reads for high-quality mate-pair library number <code><b><#></b></code> (<code><b><#></b></code> = 1,2,..,9; <code><b><or></b></code> = "fr","rf","ff"). <br>
         The default orientation for high-quality mate-pair libraries is forward-reverse. For example, to specify reverse-forward orientation for the first high-quality mate-pair library, you should use the flag:
     <code>--hqmp1-rf </code><br>
@@ -1098,6 +1103,7 @@ However, it might be tricky to fully utilize the advantages of  long reads you h
         <li><code><output_dir>/corrected/</code> directory contains reads corrected by BayesHammer in <code>*.fastq.gz</code> files; if compression is disabled, reads are stored in uncompressed  <code>*.fastq</code> files</li>
         <li><code><output_dir>/scaffolds.fasta</code> contains resulting scaffolds (recommended for use as resulting sequences)</li>
         <li><code><output_dir>/contigs.fasta</code> contains resulting contigs</li>
+        <li><code><output_dir>/assembly_graph.gfa</code> contains SPAdes assembly graph and scaffolds paths in <a href="https://github.com/GFA-spec/GFA-spec/blob/master/GFA1.md" target="_blank">GFA 1.0 format</a></li>
         <li><code><output_dir>/assembly_graph.fastg</code> contains SPAdes assembly graph in <a href="http://fastg.sourceforge.net/FASTG_Spec_v1.00.pdf" target="_blank">FASTG format</a></li>
         <li><code><output_dir>/contigs.paths</code> contains paths in the assembly graph corresponding to contigs.fasta (see details below)</li>
         <li><code><output_dir>/scaffolds.paths</code> contains paths in the assembly graph corresponding to scaffolds.fasta (see details below)</li>
@@ -1106,8 +1112,11 @@ However, it might be tricky to fully utilize the advantages of  long reads you h
 <p>
    Contigs/scaffolds names in SPAdes output FASTA files have the following format: <br><code>>NODE_3_length_237403_cov_243.207_ID_45</code><br> Here <code>3</code> is the number of the contig/scaffold, <code>237403</code> is the sequence length in nucleotides and <code>243.207</code> is the k-mer coverage for the last (largest) k value used. Note that the k-mer coverage is always lower than the read (per-base) coverage. 
 
+<p> 
+    In general, SPAdes uses two techniques for joining contigs into scaffolds. First one relies on read pairs and tries to estimate the size of the gap separating contigs. The second one relies on the assembly graph: e.g. if two contigs are separated by a complex tandem repeat, that cannot be resolved exactly, contigs are joined into scaffold with a fixed gap size of 100 bp. Contigs produced by SPAdes do not contain N symbols. 
+
 <p>
-   To view FASTG files we recommend to use <a href="http://rrwick.github.io/Bandage/" target="_blank">Bandage visualization tool</a>. Note that sequences stored in <code>assembly_graph.fastg</code> correspond to contigs before repeat resolution (edges of the assembly graph). Paths corresponding to contigs after repeat resolution (scaffolding) are stored in <code>contigs.paths</code> (<code>scaffolds.paths</code>) in the format accepted by Bandage (see <a href="https://github.com/rrwick/B [...]
+   To view FASTG and GFA files we recommend to use <a href="http://rrwick.github.io/Bandage/" target="_blank">Bandage visualization tool</a>. Note that sequences stored in <code>assembly_graph.fastg</code> correspond to contigs before repeat resolution (edges of the assembly graph). Paths corresponding to contigs after repeat resolution (scaffolding) are stored in <code>contigs.paths</code> (<code>scaffolds.paths</code>) in the format accepted by Bandage (see <a href="https://github.com/ [...]
 
 <p> Let the contig with the name <code>NODE_5_length_100000_cov_215.651_ID_5</code> consist of the following edges of the assembly graph:
 <pre>
diff --git a/metaspades.py b/metaspades.py
index c19e2fb..ff31c92 100755
--- a/metaspades.py
+++ b/metaspades.py
@@ -186,10 +186,15 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
                                       len(options_storage.SHORT_READS_TYPES.keys()) +
                                       len(options_storage.LONG_READS_TYPES))]  # "[{}]*num" doesn't work here!
 
+    # auto detecting SPAdes mode (rna, meta, etc) if it is not a rerun (--continue or --restart-from)
+    if secondary_filling or not options_storage.will_rerun(options):
+        mode = options_storage.get_mode()
+        if mode is not None:
+            options.append(('--' + mode, ''))
+
     # for parsing options from "previous run command"
     options_storage.continue_mode = False
     options_storage.k_mers = None
-
     for opt, arg in options:
         if opt == '-o':
             if not skip_output_dir:
@@ -197,13 +202,17 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
                     support.error('-o option was specified at least twice')
                 options_storage.output_dir = abspath(expanduser(arg))
                 options_storage.dict_of_rel2abs[arg] = options_storage.output_dir
+                support.check_path_is_ascii(options_storage.output_dir, 'output directory')
         elif opt == "--tmp-dir":
             options_storage.tmp_dir = abspath(expanduser(arg))
             options_storage.dict_of_rel2abs[arg] = options_storage.tmp_dir
+            support.check_path_is_ascii(options_storage.tmp_dir, 'directory for temporary files')
         elif opt == "--configs-dir":
             options_storage.configs_dir = support.check_dir_existence(arg)
         elif opt == "--reference":
             options_storage.reference = support.check_file_existence(arg, 'reference', log)
+        elif opt == "--series-analysis":
+            options_storage.series_analysis = support.check_file_existence(arg, 'series-analysis', log)
         elif opt == "--dataset":
             options_storage.dataset_yaml_filename = support.check_file_existence(arg, 'dataset', log)
 
@@ -225,16 +234,12 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         elif opt == "--sc":
             options_storage.single_cell = True
         elif opt == "--meta":
-            #FIXME temporary solution
-            options_storage.single_cell = True
             options_storage.meta = True
         elif opt == "--large-genome":
             options_storage.large_genome = True
         elif opt == "--plasmid":
             options_storage.plasmid = True
         elif opt == "--rna":
-            #FIXME temporary solution
-            options_storage.single_cell = True
             options_storage.rna = True
         elif opt == "--iontorrent":
             options_storage.iontorrent = True
@@ -327,9 +332,7 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
             show_usage(0, show_hidden=True)
 
         elif opt == "--test":
-            options_storage.set_test_options()
-            support.add_to_dataset('-1', os.path.join(spades_home, "test_dataset/ecoli_1K_1.fq.gz"), dataset_data)
-            support.add_to_dataset('-2', os.path.join(spades_home, "test_dataset/ecoli_1K_2.fq.gz"), dataset_data)
+            options_storage.set_test_options()            
             #break
         elif opt == "--diploid":
             options_storage.diploid_mode = True
@@ -338,6 +341,14 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         else:
             raise ValueError
 
+    if options_storage.test_mode:
+        if options_storage.plasmid:
+            support.add_to_dataset('-1', os.path.join(spades_home, "test_dataset_plasmid/pl1.fq.gz"), dataset_data)
+            support.add_to_dataset('-2', os.path.join(spades_home, "test_dataset_plasmid/pl2.fq.gz"), dataset_data)
+        else:
+            support.add_to_dataset('-1', os.path.join(spades_home, "test_dataset/ecoli_1K_1.fq.gz"), dataset_data)
+            support.add_to_dataset('-2', os.path.join(spades_home, "test_dataset/ecoli_1K_2.fq.gz"), dataset_data)
+
     if not options_storage.output_dir:
         support.error("the output_dir is not set! It is a mandatory parameter (-o output_dir).", log)
     if not os.path.isdir(options_storage.output_dir):
@@ -372,7 +383,6 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
                 existing_dataset_data = None
     if existing_dataset_data is not None:
         dataset_data = existing_dataset_data
-        options_storage.dataset_yaml_filename = processed_dataset_fpath
     else:
         if options_storage.dataset_yaml_filename:
             try:
@@ -384,8 +394,7 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         else:
             dataset_data = support.correct_dataset(dataset_data)
             dataset_data = support.relative2abs_paths(dataset_data, os.getcwd())
-        options_storage.dataset_yaml_filename = processed_dataset_fpath
-        pyyaml.dump(dataset_data, open(options_storage.dataset_yaml_filename, 'w'))
+    options_storage.dataset_yaml_filename = processed_dataset_fpath
 
     support.check_dataset_reads(dataset_data, options_storage.only_assembler, log)
     if not support.get_lib_ids_by_type(dataset_data, spades_logic.READS_TYPES_USED_IN_CONSTRUCTION):
@@ -397,6 +406,9 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         if len(support.get_lib_ids_by_type(dataset_data, 'paired-end')) > 1:
             support.error('you cannot specify more than one paired-end library in RNA-Seq mode!')
 
+    if existing_dataset_data is None:
+        pyyaml.dump(dataset_data, open(options_storage.dataset_yaml_filename, 'w'))
+
     options_storage.set_default_values()
     ### FILLING cfg
     cfg["common"] = empty_config()
@@ -412,6 +424,8 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
     cfg["common"].__dict__["max_threads"] = options_storage.threads
     cfg["common"].__dict__["max_memory"] = options_storage.memory
     cfg["common"].__dict__["developer_mode"] = options_storage.developer_mode
+    if options_storage.series_analysis:
+        cfg["common"].__dict__["series_analysis"] = options_storage.series_analysis
 
     # dataset section
     cfg["dataset"].__dict__["yaml_filename"] = options_storage.dataset_yaml_filename
@@ -430,6 +444,8 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         cfg["error_correction"].__dict__["iontorrent"] = options_storage.iontorrent
         if options_storage.meta or options_storage.large_genome:
             cfg["error_correction"].__dict__["count_filter_singletons"] = 1
+        if options_storage.read_buffer_size:
+            cfg["error_correction"].__dict__["read_buffer_size"] = options_storage.read_buffer_size
 
     # assembly
     if not options_storage.only_error_correction:
@@ -449,9 +465,7 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         if options_storage.read_buffer_size:
             cfg["assembly"].__dict__["read_buffer_size"] = options_storage.read_buffer_size
         cfg["assembly"].__dict__["correct_scaffolds"] = options_storage.correct_scaffolds
-        if options_storage.large_genome:
-            cfg["assembly"].__dict__["bwa_paired"] = True
-            cfg["assembly"].__dict__["scaffolding_mode"] = "old_pe_2015"
+
     #corrector can work only if contigs exist (not only error correction)
     if (not options_storage.only_error_correction) and options_storage.mismatch_corrector:
         cfg["mismatch_corrector"] = empty_config()
@@ -500,9 +514,11 @@ def check_cfg_for_partial_run(cfg, type='restart-from'):  # restart-from ot stop
                 support.error("failed to " + action + " K=%s because this K " % k_str + verb + " not specified!")
 
 
-def get_options_from_params(params_filename, spades_py_name=None):
+def get_options_from_params(params_filename, running_script):
+    cmd_line = None
+    options = None
     if not os.path.isfile(params_filename):
-        return None, None
+        return cmd_line, options, "failed to parse command line of the previous run (%s not found)!" % params_filename
     params = open(params_filename, 'r')
     cmd_line = params.readline().strip()
     spades_prev_version = None
@@ -512,20 +528,22 @@ def get_options_from_params(params_filename, spades_py_name=None):
             break
     params.close()
     if spades_prev_version is None:
-        support.error("failed to parse SPAdes version of the previous run! "
-                      "Please restart from the beginning or specify another output directory.")
+        return cmd_line, options, "failed to parse SPAdes version of the previous run!"
     if spades_prev_version.strip() != spades_version.strip():
-        support.error("SPAdes version of the previous run (%s) is not equal to the current version of SPAdes (%s)! "
-                      "Please restart from the beginning or specify another output directory."
-                      % (spades_prev_version.strip(), spades_version.strip()))
-    if spades_py_name is None or cmd_line.find(os.path.basename(spades_py_name)) == -1:
-        spades_py_name = 'spades.py'  # try default name
-    else:
-        spades_py_name = os.path.basename(spades_py_name)
-    spades_py_pos = cmd_line.find(spades_py_name)
-    if spades_py_pos == -1:
-        return None, None
-    return cmd_line, cmd_line[spades_py_pos + len(spades_py_name):].split('\t')
+        return cmd_line, options, "SPAdes version of the previous run (%s) is not equal " \
+                                  "to the current version of SPAdes (%s)!" \
+                                  % (spades_prev_version.strip(), spades_version.strip())
+    if 'Command line: ' not in cmd_line or '\t' not in cmd_line:
+        return cmd_line, options, "failed to parse executable script of the previous run!"
+    options = cmd_line.split('\t')[1:]
+    prev_running_script = cmd_line.split('\t')[0][len('Command line: '):]
+    # we cannot restart/continue spades.py run with metaspades.py/rnaspades.py/etc and vice versa
+    if os.path.basename(prev_running_script) != os.path.basename(running_script):
+        return cmd_line, options, "executable script of the previous run (%s) is not equal " \
+                                  "to the current executable script (%s)!" \
+                                  % (os.path.basename(prev_running_script),
+                                     os.path.basename(running_script))
+    return cmd_line, options, ""
 
 
 def show_version():
@@ -554,19 +572,14 @@ def main(args):
 
     support.check_binaries(bin_home, log)
 
-    # auto detecting SPAdes mode (rna, meta, etc)
-    mode = options_storage.get_mode()
-    if mode is not None:
-        args.append('--' + mode)
-
     # parse options and safe all parameters to cfg
     options = args
     cfg, dataset_data = fill_cfg(options, log)
 
     if options_storage.continue_mode:
-        cmd_line, options = get_options_from_params(os.path.join(options_storage.output_dir, "params.txt"), args[0])
-        if not options:
-            support.error("failed to parse command line of the previous run! Please restart from the beginning or specify another output directory.")
+        cmd_line, options, err_msg = get_options_from_params(os.path.join(options_storage.output_dir, "params.txt"), args[0])
+        if err_msg:
+            support.error(err_msg + " Please restart from the beginning or specify another output directory.")
         cfg, dataset_data = fill_cfg(options, log, secondary_filling=True)
         if options_storage.restart_from:
             check_cfg_for_partial_run(cfg, type='restart-from')
@@ -699,6 +712,7 @@ def main(args):
         result_contigs_filename = os.path.join(cfg["common"].output_dir, options_storage.contigs_name)
         result_scaffolds_filename = os.path.join(cfg["common"].output_dir, options_storage.scaffolds_name)
         result_assembly_graph_filename = os.path.join(cfg["common"].output_dir, options_storage.assembly_graph_name)
+        result_assembly_graph_filename_gfa = os.path.join(cfg["common"].output_dir, options_storage.assembly_graph_name_gfa)
         result_contigs_paths_filename = os.path.join(cfg["common"].output_dir, options_storage.contigs_paths)
         result_scaffolds_paths_filename = os.path.join(cfg["common"].output_dir, options_storage.scaffolds_paths)
         result_transcripts_filename = os.path.join(cfg["common"].output_dir, options_storage.transcripts_name)
@@ -715,6 +729,7 @@ def main(args):
             spades_cfg.__dict__["result_contigs"] = result_contigs_filename
             spades_cfg.__dict__["result_scaffolds"] = result_scaffolds_filename
             spades_cfg.__dict__["result_graph"] = result_assembly_graph_filename
+            spades_cfg.__dict__["result_graph_gfa"] = result_assembly_graph_filename_gfa
             spades_cfg.__dict__["result_contigs_paths"] = result_contigs_paths_filename
             spades_cfg.__dict__["result_scaffolds_paths"] = result_scaffolds_paths_filename
             spades_cfg.__dict__["result_transcripts"] = result_transcripts_filename
@@ -844,7 +859,9 @@ def main(args):
                         if options_storage.continue_mode and os.path.isfile(corrected):
                             log.info("\n== Skipping processing of " + assembly_type + " (already processed)\n")
                             continue
-
+                        if not os.path.isfile(assembled) or os.path.getsize(assembled) == 0:
+                            log.info("\n== Skipping processing of " + assembly_type + " (empty file)\n")
+                            continue
                         support.continue_from_here(log)
                         log.info("\n== Processing of " + assembly_type + "\n")
 
@@ -855,7 +872,6 @@ def main(args):
                         corr_cfg = merge_configs(cfg["mismatch_corrector"], cfg["common"])
                         
                         result_corrected_filename = os.path.join(tmp_dir_for_corrector, "corrected_contigs.fasta")
-
                         corrector_logic.run_corrector( tmp_configs_dir, bin_home, corr_cfg,
                         ext_python_modules_home, log, assembled, result_corrected_filename)
 
@@ -893,6 +909,9 @@ def main(args):
                 if "assembly" in cfg and os.path.isfile(result_assembly_graph_filename):
                     message = " * Assembly graph is in " + support.process_spaces(result_assembly_graph_filename)
                     log.info(message)
+                if "assembly" in cfg and os.path.isfile(result_assembly_graph_filename_gfa):
+                    message = " * Assembly graph in GFA format is in " + support.process_spaces(result_assembly_graph_filename_gfa)
+                    log.info(message)
                 if "assembly" in cfg and os.path.isfile(result_contigs_paths_filename):
                     message = " * Paths in the assembly graph corresponding to the contigs are in " + \
                               support.process_spaces(result_contigs_paths_filename)
@@ -933,7 +952,10 @@ def main(args):
                         result_fasta = list(support.read_fasta(result_filename))
                         # correctness check: should be one contig of length 1000 bp
                         correct_number = 1
-                        correct_length = 1000
+                        if options_storage.plasmid:
+                            correct_length = 9667
+                        else:
+                            correct_length = 1000
                         if not len(result_fasta):
                             support.error("TEST FAILED: %s does not contain contigs!" % result_filename)
                         elif len(result_fasta) > correct_number:
diff --git a/plasmidspades.py b/plasmidspades.py
index c19e2fb..ff31c92 100755
--- a/plasmidspades.py
+++ b/plasmidspades.py
@@ -186,10 +186,15 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
                                       len(options_storage.SHORT_READS_TYPES.keys()) +
                                       len(options_storage.LONG_READS_TYPES))]  # "[{}]*num" doesn't work here!
 
+    # auto detecting SPAdes mode (rna, meta, etc) if it is not a rerun (--continue or --restart-from)
+    if secondary_filling or not options_storage.will_rerun(options):
+        mode = options_storage.get_mode()
+        if mode is not None:
+            options.append(('--' + mode, ''))
+
     # for parsing options from "previous run command"
     options_storage.continue_mode = False
     options_storage.k_mers = None
-
     for opt, arg in options:
         if opt == '-o':
             if not skip_output_dir:
@@ -197,13 +202,17 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
                     support.error('-o option was specified at least twice')
                 options_storage.output_dir = abspath(expanduser(arg))
                 options_storage.dict_of_rel2abs[arg] = options_storage.output_dir
+                support.check_path_is_ascii(options_storage.output_dir, 'output directory')
         elif opt == "--tmp-dir":
             options_storage.tmp_dir = abspath(expanduser(arg))
             options_storage.dict_of_rel2abs[arg] = options_storage.tmp_dir
+            support.check_path_is_ascii(options_storage.tmp_dir, 'directory for temporary files')
         elif opt == "--configs-dir":
             options_storage.configs_dir = support.check_dir_existence(arg)
         elif opt == "--reference":
             options_storage.reference = support.check_file_existence(arg, 'reference', log)
+        elif opt == "--series-analysis":
+            options_storage.series_analysis = support.check_file_existence(arg, 'series-analysis', log)
         elif opt == "--dataset":
             options_storage.dataset_yaml_filename = support.check_file_existence(arg, 'dataset', log)
 
@@ -225,16 +234,12 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         elif opt == "--sc":
             options_storage.single_cell = True
         elif opt == "--meta":
-            #FIXME temporary solution
-            options_storage.single_cell = True
             options_storage.meta = True
         elif opt == "--large-genome":
             options_storage.large_genome = True
         elif opt == "--plasmid":
             options_storage.plasmid = True
         elif opt == "--rna":
-            #FIXME temporary solution
-            options_storage.single_cell = True
             options_storage.rna = True
         elif opt == "--iontorrent":
             options_storage.iontorrent = True
@@ -327,9 +332,7 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
             show_usage(0, show_hidden=True)
 
         elif opt == "--test":
-            options_storage.set_test_options()
-            support.add_to_dataset('-1', os.path.join(spades_home, "test_dataset/ecoli_1K_1.fq.gz"), dataset_data)
-            support.add_to_dataset('-2', os.path.join(spades_home, "test_dataset/ecoli_1K_2.fq.gz"), dataset_data)
+            options_storage.set_test_options()            
             #break
         elif opt == "--diploid":
             options_storage.diploid_mode = True
@@ -338,6 +341,14 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         else:
             raise ValueError
 
+    if options_storage.test_mode:
+        if options_storage.plasmid:
+            support.add_to_dataset('-1', os.path.join(spades_home, "test_dataset_plasmid/pl1.fq.gz"), dataset_data)
+            support.add_to_dataset('-2', os.path.join(spades_home, "test_dataset_plasmid/pl2.fq.gz"), dataset_data)
+        else:
+            support.add_to_dataset('-1', os.path.join(spades_home, "test_dataset/ecoli_1K_1.fq.gz"), dataset_data)
+            support.add_to_dataset('-2', os.path.join(spades_home, "test_dataset/ecoli_1K_2.fq.gz"), dataset_data)
+
     if not options_storage.output_dir:
         support.error("the output_dir is not set! It is a mandatory parameter (-o output_dir).", log)
     if not os.path.isdir(options_storage.output_dir):
@@ -372,7 +383,6 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
                 existing_dataset_data = None
     if existing_dataset_data is not None:
         dataset_data = existing_dataset_data
-        options_storage.dataset_yaml_filename = processed_dataset_fpath
     else:
         if options_storage.dataset_yaml_filename:
             try:
@@ -384,8 +394,7 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         else:
             dataset_data = support.correct_dataset(dataset_data)
             dataset_data = support.relative2abs_paths(dataset_data, os.getcwd())
-        options_storage.dataset_yaml_filename = processed_dataset_fpath
-        pyyaml.dump(dataset_data, open(options_storage.dataset_yaml_filename, 'w'))
+    options_storage.dataset_yaml_filename = processed_dataset_fpath
 
     support.check_dataset_reads(dataset_data, options_storage.only_assembler, log)
     if not support.get_lib_ids_by_type(dataset_data, spades_logic.READS_TYPES_USED_IN_CONSTRUCTION):
@@ -397,6 +406,9 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         if len(support.get_lib_ids_by_type(dataset_data, 'paired-end')) > 1:
             support.error('you cannot specify more than one paired-end library in RNA-Seq mode!')
 
+    if existing_dataset_data is None:
+        pyyaml.dump(dataset_data, open(options_storage.dataset_yaml_filename, 'w'))
+
     options_storage.set_default_values()
     ### FILLING cfg
     cfg["common"] = empty_config()
@@ -412,6 +424,8 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
     cfg["common"].__dict__["max_threads"] = options_storage.threads
     cfg["common"].__dict__["max_memory"] = options_storage.memory
     cfg["common"].__dict__["developer_mode"] = options_storage.developer_mode
+    if options_storage.series_analysis:
+        cfg["common"].__dict__["series_analysis"] = options_storage.series_analysis
 
     # dataset section
     cfg["dataset"].__dict__["yaml_filename"] = options_storage.dataset_yaml_filename
@@ -430,6 +444,8 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         cfg["error_correction"].__dict__["iontorrent"] = options_storage.iontorrent
         if options_storage.meta or options_storage.large_genome:
             cfg["error_correction"].__dict__["count_filter_singletons"] = 1
+        if options_storage.read_buffer_size:
+            cfg["error_correction"].__dict__["read_buffer_size"] = options_storage.read_buffer_size
 
     # assembly
     if not options_storage.only_error_correction:
@@ -449,9 +465,7 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         if options_storage.read_buffer_size:
             cfg["assembly"].__dict__["read_buffer_size"] = options_storage.read_buffer_size
         cfg["assembly"].__dict__["correct_scaffolds"] = options_storage.correct_scaffolds
-        if options_storage.large_genome:
-            cfg["assembly"].__dict__["bwa_paired"] = True
-            cfg["assembly"].__dict__["scaffolding_mode"] = "old_pe_2015"
+
     #corrector can work only if contigs exist (not only error correction)
     if (not options_storage.only_error_correction) and options_storage.mismatch_corrector:
         cfg["mismatch_corrector"] = empty_config()
@@ -500,9 +514,11 @@ def check_cfg_for_partial_run(cfg, type='restart-from'):  # restart-from ot stop
                 support.error("failed to " + action + " K=%s because this K " % k_str + verb + " not specified!")
 
 
-def get_options_from_params(params_filename, spades_py_name=None):
+def get_options_from_params(params_filename, running_script):
+    cmd_line = None
+    options = None
     if not os.path.isfile(params_filename):
-        return None, None
+        return cmd_line, options, "failed to parse command line of the previous run (%s not found)!" % params_filename
     params = open(params_filename, 'r')
     cmd_line = params.readline().strip()
     spades_prev_version = None
@@ -512,20 +528,22 @@ def get_options_from_params(params_filename, spades_py_name=None):
             break
     params.close()
     if spades_prev_version is None:
-        support.error("failed to parse SPAdes version of the previous run! "
-                      "Please restart from the beginning or specify another output directory.")
+        return cmd_line, options, "failed to parse SPAdes version of the previous run!"
     if spades_prev_version.strip() != spades_version.strip():
-        support.error("SPAdes version of the previous run (%s) is not equal to the current version of SPAdes (%s)! "
-                      "Please restart from the beginning or specify another output directory."
-                      % (spades_prev_version.strip(), spades_version.strip()))
-    if spades_py_name is None or cmd_line.find(os.path.basename(spades_py_name)) == -1:
-        spades_py_name = 'spades.py'  # try default name
-    else:
-        spades_py_name = os.path.basename(spades_py_name)
-    spades_py_pos = cmd_line.find(spades_py_name)
-    if spades_py_pos == -1:
-        return None, None
-    return cmd_line, cmd_line[spades_py_pos + len(spades_py_name):].split('\t')
+        return cmd_line, options, "SPAdes version of the previous run (%s) is not equal " \
+                                  "to the current version of SPAdes (%s)!" \
+                                  % (spades_prev_version.strip(), spades_version.strip())
+    if 'Command line: ' not in cmd_line or '\t' not in cmd_line:
+        return cmd_line, options, "failed to parse executable script of the previous run!"
+    options = cmd_line.split('\t')[1:]
+    prev_running_script = cmd_line.split('\t')[0][len('Command line: '):]
+    # we cannot restart/continue spades.py run with metaspades.py/rnaspades.py/etc and vice versa
+    if os.path.basename(prev_running_script) != os.path.basename(running_script):
+        return cmd_line, options, "executable script of the previous run (%s) is not equal " \
+                                  "to the current executable script (%s)!" \
+                                  % (os.path.basename(prev_running_script),
+                                     os.path.basename(running_script))
+    return cmd_line, options, ""
 
 
 def show_version():
@@ -554,19 +572,14 @@ def main(args):
 
     support.check_binaries(bin_home, log)
 
-    # auto detecting SPAdes mode (rna, meta, etc)
-    mode = options_storage.get_mode()
-    if mode is not None:
-        args.append('--' + mode)
-
     # parse options and safe all parameters to cfg
     options = args
     cfg, dataset_data = fill_cfg(options, log)
 
     if options_storage.continue_mode:
-        cmd_line, options = get_options_from_params(os.path.join(options_storage.output_dir, "params.txt"), args[0])
-        if not options:
-            support.error("failed to parse command line of the previous run! Please restart from the beginning or specify another output directory.")
+        cmd_line, options, err_msg = get_options_from_params(os.path.join(options_storage.output_dir, "params.txt"), args[0])
+        if err_msg:
+            support.error(err_msg + " Please restart from the beginning or specify another output directory.")
         cfg, dataset_data = fill_cfg(options, log, secondary_filling=True)
         if options_storage.restart_from:
             check_cfg_for_partial_run(cfg, type='restart-from')
@@ -699,6 +712,7 @@ def main(args):
         result_contigs_filename = os.path.join(cfg["common"].output_dir, options_storage.contigs_name)
         result_scaffolds_filename = os.path.join(cfg["common"].output_dir, options_storage.scaffolds_name)
         result_assembly_graph_filename = os.path.join(cfg["common"].output_dir, options_storage.assembly_graph_name)
+        result_assembly_graph_filename_gfa = os.path.join(cfg["common"].output_dir, options_storage.assembly_graph_name_gfa)
         result_contigs_paths_filename = os.path.join(cfg["common"].output_dir, options_storage.contigs_paths)
         result_scaffolds_paths_filename = os.path.join(cfg["common"].output_dir, options_storage.scaffolds_paths)
         result_transcripts_filename = os.path.join(cfg["common"].output_dir, options_storage.transcripts_name)
@@ -715,6 +729,7 @@ def main(args):
             spades_cfg.__dict__["result_contigs"] = result_contigs_filename
             spades_cfg.__dict__["result_scaffolds"] = result_scaffolds_filename
             spades_cfg.__dict__["result_graph"] = result_assembly_graph_filename
+            spades_cfg.__dict__["result_graph_gfa"] = result_assembly_graph_filename_gfa
             spades_cfg.__dict__["result_contigs_paths"] = result_contigs_paths_filename
             spades_cfg.__dict__["result_scaffolds_paths"] = result_scaffolds_paths_filename
             spades_cfg.__dict__["result_transcripts"] = result_transcripts_filename
@@ -844,7 +859,9 @@ def main(args):
                         if options_storage.continue_mode and os.path.isfile(corrected):
                             log.info("\n== Skipping processing of " + assembly_type + " (already processed)\n")
                             continue
-
+                        if not os.path.isfile(assembled) or os.path.getsize(assembled) == 0:
+                            log.info("\n== Skipping processing of " + assembly_type + " (empty file)\n")
+                            continue
                         support.continue_from_here(log)
                         log.info("\n== Processing of " + assembly_type + "\n")
 
@@ -855,7 +872,6 @@ def main(args):
                         corr_cfg = merge_configs(cfg["mismatch_corrector"], cfg["common"])
                         
                         result_corrected_filename = os.path.join(tmp_dir_for_corrector, "corrected_contigs.fasta")
-
                         corrector_logic.run_corrector( tmp_configs_dir, bin_home, corr_cfg,
                         ext_python_modules_home, log, assembled, result_corrected_filename)
 
@@ -893,6 +909,9 @@ def main(args):
                 if "assembly" in cfg and os.path.isfile(result_assembly_graph_filename):
                     message = " * Assembly graph is in " + support.process_spaces(result_assembly_graph_filename)
                     log.info(message)
+                if "assembly" in cfg and os.path.isfile(result_assembly_graph_filename_gfa):
+                    message = " * Assembly graph in GFA format is in " + support.process_spaces(result_assembly_graph_filename_gfa)
+                    log.info(message)
                 if "assembly" in cfg and os.path.isfile(result_contigs_paths_filename):
                     message = " * Paths in the assembly graph corresponding to the contigs are in " + \
                               support.process_spaces(result_contigs_paths_filename)
@@ -933,7 +952,10 @@ def main(args):
                         result_fasta = list(support.read_fasta(result_filename))
                         # correctness check: should be one contig of length 1000 bp
                         correct_number = 1
-                        correct_length = 1000
+                        if options_storage.plasmid:
+                            correct_length = 9667
+                        else:
+                            correct_length = 1000
                         if not len(result_fasta):
                             support.error("TEST FAILED: %s does not contain contigs!" % result_filename)
                         elif len(result_fasta) > correct_number:
diff --git a/rnaspades.py b/rnaspades.py
index c19e2fb..ff31c92 100755
--- a/rnaspades.py
+++ b/rnaspades.py
@@ -186,10 +186,15 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
                                       len(options_storage.SHORT_READS_TYPES.keys()) +
                                       len(options_storage.LONG_READS_TYPES))]  # "[{}]*num" doesn't work here!
 
+    # auto detecting SPAdes mode (rna, meta, etc) if it is not a rerun (--continue or --restart-from)
+    if secondary_filling or not options_storage.will_rerun(options):
+        mode = options_storage.get_mode()
+        if mode is not None:
+            options.append(('--' + mode, ''))
+
     # for parsing options from "previous run command"
     options_storage.continue_mode = False
     options_storage.k_mers = None
-
     for opt, arg in options:
         if opt == '-o':
             if not skip_output_dir:
@@ -197,13 +202,17 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
                     support.error('-o option was specified at least twice')
                 options_storage.output_dir = abspath(expanduser(arg))
                 options_storage.dict_of_rel2abs[arg] = options_storage.output_dir
+                support.check_path_is_ascii(options_storage.output_dir, 'output directory')
         elif opt == "--tmp-dir":
             options_storage.tmp_dir = abspath(expanduser(arg))
             options_storage.dict_of_rel2abs[arg] = options_storage.tmp_dir
+            support.check_path_is_ascii(options_storage.tmp_dir, 'directory for temporary files')
         elif opt == "--configs-dir":
             options_storage.configs_dir = support.check_dir_existence(arg)
         elif opt == "--reference":
             options_storage.reference = support.check_file_existence(arg, 'reference', log)
+        elif opt == "--series-analysis":
+            options_storage.series_analysis = support.check_file_existence(arg, 'series-analysis', log)
         elif opt == "--dataset":
             options_storage.dataset_yaml_filename = support.check_file_existence(arg, 'dataset', log)
 
@@ -225,16 +234,12 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         elif opt == "--sc":
             options_storage.single_cell = True
         elif opt == "--meta":
-            #FIXME temporary solution
-            options_storage.single_cell = True
             options_storage.meta = True
         elif opt == "--large-genome":
             options_storage.large_genome = True
         elif opt == "--plasmid":
             options_storage.plasmid = True
         elif opt == "--rna":
-            #FIXME temporary solution
-            options_storage.single_cell = True
             options_storage.rna = True
         elif opt == "--iontorrent":
             options_storage.iontorrent = True
@@ -327,9 +332,7 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
             show_usage(0, show_hidden=True)
 
         elif opt == "--test":
-            options_storage.set_test_options()
-            support.add_to_dataset('-1', os.path.join(spades_home, "test_dataset/ecoli_1K_1.fq.gz"), dataset_data)
-            support.add_to_dataset('-2', os.path.join(spades_home, "test_dataset/ecoli_1K_2.fq.gz"), dataset_data)
+            options_storage.set_test_options()            
             #break
         elif opt == "--diploid":
             options_storage.diploid_mode = True
@@ -338,6 +341,14 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         else:
             raise ValueError
 
+    if options_storage.test_mode:
+        if options_storage.plasmid:
+            support.add_to_dataset('-1', os.path.join(spades_home, "test_dataset_plasmid/pl1.fq.gz"), dataset_data)
+            support.add_to_dataset('-2', os.path.join(spades_home, "test_dataset_plasmid/pl2.fq.gz"), dataset_data)
+        else:
+            support.add_to_dataset('-1', os.path.join(spades_home, "test_dataset/ecoli_1K_1.fq.gz"), dataset_data)
+            support.add_to_dataset('-2', os.path.join(spades_home, "test_dataset/ecoli_1K_2.fq.gz"), dataset_data)
+
     if not options_storage.output_dir:
         support.error("the output_dir is not set! It is a mandatory parameter (-o output_dir).", log)
     if not os.path.isdir(options_storage.output_dir):
@@ -372,7 +383,6 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
                 existing_dataset_data = None
     if existing_dataset_data is not None:
         dataset_data = existing_dataset_data
-        options_storage.dataset_yaml_filename = processed_dataset_fpath
     else:
         if options_storage.dataset_yaml_filename:
             try:
@@ -384,8 +394,7 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         else:
             dataset_data = support.correct_dataset(dataset_data)
             dataset_data = support.relative2abs_paths(dataset_data, os.getcwd())
-        options_storage.dataset_yaml_filename = processed_dataset_fpath
-        pyyaml.dump(dataset_data, open(options_storage.dataset_yaml_filename, 'w'))
+    options_storage.dataset_yaml_filename = processed_dataset_fpath
 
     support.check_dataset_reads(dataset_data, options_storage.only_assembler, log)
     if not support.get_lib_ids_by_type(dataset_data, spades_logic.READS_TYPES_USED_IN_CONSTRUCTION):
@@ -397,6 +406,9 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         if len(support.get_lib_ids_by_type(dataset_data, 'paired-end')) > 1:
             support.error('you cannot specify more than one paired-end library in RNA-Seq mode!')
 
+    if existing_dataset_data is None:
+        pyyaml.dump(dataset_data, open(options_storage.dataset_yaml_filename, 'w'))
+
     options_storage.set_default_values()
     ### FILLING cfg
     cfg["common"] = empty_config()
@@ -412,6 +424,8 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
     cfg["common"].__dict__["max_threads"] = options_storage.threads
     cfg["common"].__dict__["max_memory"] = options_storage.memory
     cfg["common"].__dict__["developer_mode"] = options_storage.developer_mode
+    if options_storage.series_analysis:
+        cfg["common"].__dict__["series_analysis"] = options_storage.series_analysis
 
     # dataset section
     cfg["dataset"].__dict__["yaml_filename"] = options_storage.dataset_yaml_filename
@@ -430,6 +444,8 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         cfg["error_correction"].__dict__["iontorrent"] = options_storage.iontorrent
         if options_storage.meta or options_storage.large_genome:
             cfg["error_correction"].__dict__["count_filter_singletons"] = 1
+        if options_storage.read_buffer_size:
+            cfg["error_correction"].__dict__["read_buffer_size"] = options_storage.read_buffer_size
 
     # assembly
     if not options_storage.only_error_correction:
@@ -449,9 +465,7 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         if options_storage.read_buffer_size:
             cfg["assembly"].__dict__["read_buffer_size"] = options_storage.read_buffer_size
         cfg["assembly"].__dict__["correct_scaffolds"] = options_storage.correct_scaffolds
-        if options_storage.large_genome:
-            cfg["assembly"].__dict__["bwa_paired"] = True
-            cfg["assembly"].__dict__["scaffolding_mode"] = "old_pe_2015"
+
     #corrector can work only if contigs exist (not only error correction)
     if (not options_storage.only_error_correction) and options_storage.mismatch_corrector:
         cfg["mismatch_corrector"] = empty_config()
@@ -500,9 +514,11 @@ def check_cfg_for_partial_run(cfg, type='restart-from'):  # restart-from ot stop
                 support.error("failed to " + action + " K=%s because this K " % k_str + verb + " not specified!")
 
 
-def get_options_from_params(params_filename, spades_py_name=None):
+def get_options_from_params(params_filename, running_script):
+    cmd_line = None
+    options = None
     if not os.path.isfile(params_filename):
-        return None, None
+        return cmd_line, options, "failed to parse command line of the previous run (%s not found)!" % params_filename
     params = open(params_filename, 'r')
     cmd_line = params.readline().strip()
     spades_prev_version = None
@@ -512,20 +528,22 @@ def get_options_from_params(params_filename, spades_py_name=None):
             break
     params.close()
     if spades_prev_version is None:
-        support.error("failed to parse SPAdes version of the previous run! "
-                      "Please restart from the beginning or specify another output directory.")
+        return cmd_line, options, "failed to parse SPAdes version of the previous run!"
     if spades_prev_version.strip() != spades_version.strip():
-        support.error("SPAdes version of the previous run (%s) is not equal to the current version of SPAdes (%s)! "
-                      "Please restart from the beginning or specify another output directory."
-                      % (spades_prev_version.strip(), spades_version.strip()))
-    if spades_py_name is None or cmd_line.find(os.path.basename(spades_py_name)) == -1:
-        spades_py_name = 'spades.py'  # try default name
-    else:
-        spades_py_name = os.path.basename(spades_py_name)
-    spades_py_pos = cmd_line.find(spades_py_name)
-    if spades_py_pos == -1:
-        return None, None
-    return cmd_line, cmd_line[spades_py_pos + len(spades_py_name):].split('\t')
+        return cmd_line, options, "SPAdes version of the previous run (%s) is not equal " \
+                                  "to the current version of SPAdes (%s)!" \
+                                  % (spades_prev_version.strip(), spades_version.strip())
+    if 'Command line: ' not in cmd_line or '\t' not in cmd_line:
+        return cmd_line, options, "failed to parse executable script of the previous run!"
+    options = cmd_line.split('\t')[1:]
+    prev_running_script = cmd_line.split('\t')[0][len('Command line: '):]
+    # we cannot restart/continue spades.py run with metaspades.py/rnaspades.py/etc and vice versa
+    if os.path.basename(prev_running_script) != os.path.basename(running_script):
+        return cmd_line, options, "executable script of the previous run (%s) is not equal " \
+                                  "to the current executable script (%s)!" \
+                                  % (os.path.basename(prev_running_script),
+                                     os.path.basename(running_script))
+    return cmd_line, options, ""
 
 
 def show_version():
@@ -554,19 +572,14 @@ def main(args):
 
     support.check_binaries(bin_home, log)
 
-    # auto detecting SPAdes mode (rna, meta, etc)
-    mode = options_storage.get_mode()
-    if mode is not None:
-        args.append('--' + mode)
-
     # parse options and safe all parameters to cfg
     options = args
     cfg, dataset_data = fill_cfg(options, log)
 
     if options_storage.continue_mode:
-        cmd_line, options = get_options_from_params(os.path.join(options_storage.output_dir, "params.txt"), args[0])
-        if not options:
-            support.error("failed to parse command line of the previous run! Please restart from the beginning or specify another output directory.")
+        cmd_line, options, err_msg = get_options_from_params(os.path.join(options_storage.output_dir, "params.txt"), args[0])
+        if err_msg:
+            support.error(err_msg + " Please restart from the beginning or specify another output directory.")
         cfg, dataset_data = fill_cfg(options, log, secondary_filling=True)
         if options_storage.restart_from:
             check_cfg_for_partial_run(cfg, type='restart-from')
@@ -699,6 +712,7 @@ def main(args):
         result_contigs_filename = os.path.join(cfg["common"].output_dir, options_storage.contigs_name)
         result_scaffolds_filename = os.path.join(cfg["common"].output_dir, options_storage.scaffolds_name)
         result_assembly_graph_filename = os.path.join(cfg["common"].output_dir, options_storage.assembly_graph_name)
+        result_assembly_graph_filename_gfa = os.path.join(cfg["common"].output_dir, options_storage.assembly_graph_name_gfa)
         result_contigs_paths_filename = os.path.join(cfg["common"].output_dir, options_storage.contigs_paths)
         result_scaffolds_paths_filename = os.path.join(cfg["common"].output_dir, options_storage.scaffolds_paths)
         result_transcripts_filename = os.path.join(cfg["common"].output_dir, options_storage.transcripts_name)
@@ -715,6 +729,7 @@ def main(args):
             spades_cfg.__dict__["result_contigs"] = result_contigs_filename
             spades_cfg.__dict__["result_scaffolds"] = result_scaffolds_filename
             spades_cfg.__dict__["result_graph"] = result_assembly_graph_filename
+            spades_cfg.__dict__["result_graph_gfa"] = result_assembly_graph_filename_gfa
             spades_cfg.__dict__["result_contigs_paths"] = result_contigs_paths_filename
             spades_cfg.__dict__["result_scaffolds_paths"] = result_scaffolds_paths_filename
             spades_cfg.__dict__["result_transcripts"] = result_transcripts_filename
@@ -844,7 +859,9 @@ def main(args):
                         if options_storage.continue_mode and os.path.isfile(corrected):
                             log.info("\n== Skipping processing of " + assembly_type + " (already processed)\n")
                             continue
-
+                        if not os.path.isfile(assembled) or os.path.getsize(assembled) == 0:
+                            log.info("\n== Skipping processing of " + assembly_type + " (empty file)\n")
+                            continue
                         support.continue_from_here(log)
                         log.info("\n== Processing of " + assembly_type + "\n")
 
@@ -855,7 +872,6 @@ def main(args):
                         corr_cfg = merge_configs(cfg["mismatch_corrector"], cfg["common"])
                         
                         result_corrected_filename = os.path.join(tmp_dir_for_corrector, "corrected_contigs.fasta")
-
                         corrector_logic.run_corrector( tmp_configs_dir, bin_home, corr_cfg,
                         ext_python_modules_home, log, assembled, result_corrected_filename)
 
@@ -893,6 +909,9 @@ def main(args):
                 if "assembly" in cfg and os.path.isfile(result_assembly_graph_filename):
                     message = " * Assembly graph is in " + support.process_spaces(result_assembly_graph_filename)
                     log.info(message)
+                if "assembly" in cfg and os.path.isfile(result_assembly_graph_filename_gfa):
+                    message = " * Assembly graph in GFA format is in " + support.process_spaces(result_assembly_graph_filename_gfa)
+                    log.info(message)
                 if "assembly" in cfg and os.path.isfile(result_contigs_paths_filename):
                     message = " * Paths in the assembly graph corresponding to the contigs are in " + \
                               support.process_spaces(result_contigs_paths_filename)
@@ -933,7 +952,10 @@ def main(args):
                         result_fasta = list(support.read_fasta(result_filename))
                         # correctness check: should be one contig of length 1000 bp
                         correct_number = 1
-                        correct_length = 1000
+                        if options_storage.plasmid:
+                            correct_length = 9667
+                        else:
+                            correct_length = 1000
                         if not len(result_fasta):
                             support.error("TEST FAILED: %s does not contain contigs!" % result_filename)
                         elif len(result_fasta) > correct_number:
diff --git a/rnaspades_manual.html b/rnaspades_manual.html
index 7b5199b..5a23b1e 100644
--- a/rnaspades_manual.html
+++ b/rnaspades_manual.html
@@ -1,6 +1,6 @@
 <html>
 <head>
-    <title>rnaSPAdes 1.0.0 Manual</title>
+    <title>rnaSPAdes manual</title>
     <style type="text/css">
         .code {
             background-color: lightgray;
@@ -8,10 +8,12 @@
     </style>
 </head>
 <body>
-<h1>rnaSPAdes 1.0.0 Manual</h1>
+<h1>rnaSPAdes manual</h1>
 
 1. <a href="#sec1">About rnaSPAdes</a><br>
 2. <a href="#sec2">rnaSPAdes specifics</a><br>
+    2.1. <a href="#sec2.1">Running rnaSPAdes</a><br>
+    2.2. <a href="#sec2.2">rnaSPAdes output</a><br>
 3. <a href="#sec3">Assembly evaluation</a><br>
 4. <a href="#sec4">Citation</a><br>
 5. <a href="#sec5">Feedback and bug reports</a><br>
@@ -24,6 +26,8 @@
 <a name="sec2"></a>
 <h2>2 rnaSPAdes specifics</h2>
 
+<a name="sec2.1"></a>
+<h3>2.1 Running rnaSPAdes</h3>
 <p>
 To run rnaSPAdes use
 
@@ -43,16 +47,21 @@ or
 
 Note that we assume that SPAdes installation directory is added to the <code>PATH</code> variable (provide full path to rnaSPAdes executable otherwise: <code><rnaspades installation dir>/rnaspades.py</code>). 
 
-
-<p>Here are the main differences of rnaSPAdes:
+<p>Here are several notes regarding options :
     <ul>
-        <li>rnaSPAdes outputs only one FASTA file named <code>transcripts.fasta</code>. The corresponding file with paths in the <code>assembly_graph.fastg</code> is <code>transcripts.paths</code>.</li>
         <li>rnaSPAdes can take as an input only one paired-end library and multiple single-end libraries.</li>
         <li>rnaSPAdes does not support <code>--careful</code> and <code>--cov-cutoff</code> options.</li>
         <li>rnaSPAdes is not compatible with other pipeline options such as <code>--meta</code>, <code>--sc</code> and <code>--plasmid</code>.</li>
-        <li>rnaSPAdes works using only a single k-mer size (55 by the default). We strongly recommend no to change this parameter. In case your RNA-Seq data set contains long Illumina reads (150 bp and longer) you may try to use longer k-mer size (approximately half of the read length). In case you have any doubts about your run, do not hesitate to contact us using e-mail given below.</li>
+        <li>rnaSPAdes works using only a single k-mer size (55 by the default). We strongly recommend not to change this parameter. In case your RNA-Seq data set contains long Illumina reads (150 bp and longer) you may try to use longer k-mer size (approximately half of the read length). In case you have any doubts about your run, do not hesitate to contact us using e-mail given below.</li>
     </ul>
 
+<a name="sec2.2"></a>
+<h3>2.2 rnaSPAdes output</h3>
+<p>
+rnaSPAdes outputs only one FASTA file named <code>transcripts.fasta</code>. The corresponding file with paths in the <code>assembly_graph.fastg</code> is <code>transcripts.paths</code>.
+
+<p>
+   Contigs/scaffolds names in rnaSPAdes output FASTA files have the following format: <br><code>>NODE_97_length_6237_cov_11.9819_g8_i2</code><br> Similarly to SPAdes, <code>97</code> is the number of the transcript, <code>6237</code> is its sequence length in nucleotides and <code>11.9819</code> is the k-mer coverage. Note that the k-mer coverage is always lower than the read (per-base) coverage. <code>g8_i2</code> correspond to the gene number 8 and isoform number 2 within this gene. Tr [...]
 
 <a name="sec3">
 <h2>3 Assembly evaluation</h2>
diff --git a/spades.py b/spades.py
index c19e2fb..ff31c92 100755
--- a/spades.py
+++ b/spades.py
@@ -186,10 +186,15 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
                                       len(options_storage.SHORT_READS_TYPES.keys()) +
                                       len(options_storage.LONG_READS_TYPES))]  # "[{}]*num" doesn't work here!
 
+    # auto detecting SPAdes mode (rna, meta, etc) if it is not a rerun (--continue or --restart-from)
+    if secondary_filling or not options_storage.will_rerun(options):
+        mode = options_storage.get_mode()
+        if mode is not None:
+            options.append(('--' + mode, ''))
+
     # for parsing options from "previous run command"
     options_storage.continue_mode = False
     options_storage.k_mers = None
-
     for opt, arg in options:
         if opt == '-o':
             if not skip_output_dir:
@@ -197,13 +202,17 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
                     support.error('-o option was specified at least twice')
                 options_storage.output_dir = abspath(expanduser(arg))
                 options_storage.dict_of_rel2abs[arg] = options_storage.output_dir
+                support.check_path_is_ascii(options_storage.output_dir, 'output directory')
         elif opt == "--tmp-dir":
             options_storage.tmp_dir = abspath(expanduser(arg))
             options_storage.dict_of_rel2abs[arg] = options_storage.tmp_dir
+            support.check_path_is_ascii(options_storage.tmp_dir, 'directory for temporary files')
         elif opt == "--configs-dir":
             options_storage.configs_dir = support.check_dir_existence(arg)
         elif opt == "--reference":
             options_storage.reference = support.check_file_existence(arg, 'reference', log)
+        elif opt == "--series-analysis":
+            options_storage.series_analysis = support.check_file_existence(arg, 'series-analysis', log)
         elif opt == "--dataset":
             options_storage.dataset_yaml_filename = support.check_file_existence(arg, 'dataset', log)
 
@@ -225,16 +234,12 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         elif opt == "--sc":
             options_storage.single_cell = True
         elif opt == "--meta":
-            #FIXME temporary solution
-            options_storage.single_cell = True
             options_storage.meta = True
         elif opt == "--large-genome":
             options_storage.large_genome = True
         elif opt == "--plasmid":
             options_storage.plasmid = True
         elif opt == "--rna":
-            #FIXME temporary solution
-            options_storage.single_cell = True
             options_storage.rna = True
         elif opt == "--iontorrent":
             options_storage.iontorrent = True
@@ -327,9 +332,7 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
             show_usage(0, show_hidden=True)
 
         elif opt == "--test":
-            options_storage.set_test_options()
-            support.add_to_dataset('-1', os.path.join(spades_home, "test_dataset/ecoli_1K_1.fq.gz"), dataset_data)
-            support.add_to_dataset('-2', os.path.join(spades_home, "test_dataset/ecoli_1K_2.fq.gz"), dataset_data)
+            options_storage.set_test_options()            
             #break
         elif opt == "--diploid":
             options_storage.diploid_mode = True
@@ -338,6 +341,14 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         else:
             raise ValueError
 
+    if options_storage.test_mode:
+        if options_storage.plasmid:
+            support.add_to_dataset('-1', os.path.join(spades_home, "test_dataset_plasmid/pl1.fq.gz"), dataset_data)
+            support.add_to_dataset('-2', os.path.join(spades_home, "test_dataset_plasmid/pl2.fq.gz"), dataset_data)
+        else:
+            support.add_to_dataset('-1', os.path.join(spades_home, "test_dataset/ecoli_1K_1.fq.gz"), dataset_data)
+            support.add_to_dataset('-2', os.path.join(spades_home, "test_dataset/ecoli_1K_2.fq.gz"), dataset_data)
+
     if not options_storage.output_dir:
         support.error("the output_dir is not set! It is a mandatory parameter (-o output_dir).", log)
     if not os.path.isdir(options_storage.output_dir):
@@ -372,7 +383,6 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
                 existing_dataset_data = None
     if existing_dataset_data is not None:
         dataset_data = existing_dataset_data
-        options_storage.dataset_yaml_filename = processed_dataset_fpath
     else:
         if options_storage.dataset_yaml_filename:
             try:
@@ -384,8 +394,7 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         else:
             dataset_data = support.correct_dataset(dataset_data)
             dataset_data = support.relative2abs_paths(dataset_data, os.getcwd())
-        options_storage.dataset_yaml_filename = processed_dataset_fpath
-        pyyaml.dump(dataset_data, open(options_storage.dataset_yaml_filename, 'w'))
+    options_storage.dataset_yaml_filename = processed_dataset_fpath
 
     support.check_dataset_reads(dataset_data, options_storage.only_assembler, log)
     if not support.get_lib_ids_by_type(dataset_data, spades_logic.READS_TYPES_USED_IN_CONSTRUCTION):
@@ -397,6 +406,9 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         if len(support.get_lib_ids_by_type(dataset_data, 'paired-end')) > 1:
             support.error('you cannot specify more than one paired-end library in RNA-Seq mode!')
 
+    if existing_dataset_data is None:
+        pyyaml.dump(dataset_data, open(options_storage.dataset_yaml_filename, 'w'))
+
     options_storage.set_default_values()
     ### FILLING cfg
     cfg["common"] = empty_config()
@@ -412,6 +424,8 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
     cfg["common"].__dict__["max_threads"] = options_storage.threads
     cfg["common"].__dict__["max_memory"] = options_storage.memory
     cfg["common"].__dict__["developer_mode"] = options_storage.developer_mode
+    if options_storage.series_analysis:
+        cfg["common"].__dict__["series_analysis"] = options_storage.series_analysis
 
     # dataset section
     cfg["dataset"].__dict__["yaml_filename"] = options_storage.dataset_yaml_filename
@@ -430,6 +444,8 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         cfg["error_correction"].__dict__["iontorrent"] = options_storage.iontorrent
         if options_storage.meta or options_storage.large_genome:
             cfg["error_correction"].__dict__["count_filter_singletons"] = 1
+        if options_storage.read_buffer_size:
+            cfg["error_correction"].__dict__["read_buffer_size"] = options_storage.read_buffer_size
 
     # assembly
     if not options_storage.only_error_correction:
@@ -449,9 +465,7 @@ def fill_cfg(options_to_parse, log, secondary_filling=False):
         if options_storage.read_buffer_size:
             cfg["assembly"].__dict__["read_buffer_size"] = options_storage.read_buffer_size
         cfg["assembly"].__dict__["correct_scaffolds"] = options_storage.correct_scaffolds
-        if options_storage.large_genome:
-            cfg["assembly"].__dict__["bwa_paired"] = True
-            cfg["assembly"].__dict__["scaffolding_mode"] = "old_pe_2015"
+
     #corrector can work only if contigs exist (not only error correction)
     if (not options_storage.only_error_correction) and options_storage.mismatch_corrector:
         cfg["mismatch_corrector"] = empty_config()
@@ -500,9 +514,11 @@ def check_cfg_for_partial_run(cfg, type='restart-from'):  # restart-from ot stop
                 support.error("failed to " + action + " K=%s because this K " % k_str + verb + " not specified!")
 
 
-def get_options_from_params(params_filename, spades_py_name=None):
+def get_options_from_params(params_filename, running_script):
+    cmd_line = None
+    options = None
     if not os.path.isfile(params_filename):
-        return None, None
+        return cmd_line, options, "failed to parse command line of the previous run (%s not found)!" % params_filename
     params = open(params_filename, 'r')
     cmd_line = params.readline().strip()
     spades_prev_version = None
@@ -512,20 +528,22 @@ def get_options_from_params(params_filename, spades_py_name=None):
             break
     params.close()
     if spades_prev_version is None:
-        support.error("failed to parse SPAdes version of the previous run! "
-                      "Please restart from the beginning or specify another output directory.")
+        return cmd_line, options, "failed to parse SPAdes version of the previous run!"
     if spades_prev_version.strip() != spades_version.strip():
-        support.error("SPAdes version of the previous run (%s) is not equal to the current version of SPAdes (%s)! "
-                      "Please restart from the beginning or specify another output directory."
-                      % (spades_prev_version.strip(), spades_version.strip()))
-    if spades_py_name is None or cmd_line.find(os.path.basename(spades_py_name)) == -1:
-        spades_py_name = 'spades.py'  # try default name
-    else:
-        spades_py_name = os.path.basename(spades_py_name)
-    spades_py_pos = cmd_line.find(spades_py_name)
-    if spades_py_pos == -1:
-        return None, None
-    return cmd_line, cmd_line[spades_py_pos + len(spades_py_name):].split('\t')
+        return cmd_line, options, "SPAdes version of the previous run (%s) is not equal " \
+                                  "to the current version of SPAdes (%s)!" \
+                                  % (spades_prev_version.strip(), spades_version.strip())
+    if 'Command line: ' not in cmd_line or '\t' not in cmd_line:
+        return cmd_line, options, "failed to parse executable script of the previous run!"
+    options = cmd_line.split('\t')[1:]
+    prev_running_script = cmd_line.split('\t')[0][len('Command line: '):]
+    # we cannot restart/continue spades.py run with metaspades.py/rnaspades.py/etc and vice versa
+    if os.path.basename(prev_running_script) != os.path.basename(running_script):
+        return cmd_line, options, "executable script of the previous run (%s) is not equal " \
+                                  "to the current executable script (%s)!" \
+                                  % (os.path.basename(prev_running_script),
+                                     os.path.basename(running_script))
+    return cmd_line, options, ""
 
 
 def show_version():
@@ -554,19 +572,14 @@ def main(args):
 
     support.check_binaries(bin_home, log)
 
-    # auto detecting SPAdes mode (rna, meta, etc)
-    mode = options_storage.get_mode()
-    if mode is not None:
-        args.append('--' + mode)
-
     # parse options and safe all parameters to cfg
     options = args
     cfg, dataset_data = fill_cfg(options, log)
 
     if options_storage.continue_mode:
-        cmd_line, options = get_options_from_params(os.path.join(options_storage.output_dir, "params.txt"), args[0])
-        if not options:
-            support.error("failed to parse command line of the previous run! Please restart from the beginning or specify another output directory.")
+        cmd_line, options, err_msg = get_options_from_params(os.path.join(options_storage.output_dir, "params.txt"), args[0])
+        if err_msg:
+            support.error(err_msg + " Please restart from the beginning or specify another output directory.")
         cfg, dataset_data = fill_cfg(options, log, secondary_filling=True)
         if options_storage.restart_from:
             check_cfg_for_partial_run(cfg, type='restart-from')
@@ -699,6 +712,7 @@ def main(args):
         result_contigs_filename = os.path.join(cfg["common"].output_dir, options_storage.contigs_name)
         result_scaffolds_filename = os.path.join(cfg["common"].output_dir, options_storage.scaffolds_name)
         result_assembly_graph_filename = os.path.join(cfg["common"].output_dir, options_storage.assembly_graph_name)
+        result_assembly_graph_filename_gfa = os.path.join(cfg["common"].output_dir, options_storage.assembly_graph_name_gfa)
         result_contigs_paths_filename = os.path.join(cfg["common"].output_dir, options_storage.contigs_paths)
         result_scaffolds_paths_filename = os.path.join(cfg["common"].output_dir, options_storage.scaffolds_paths)
         result_transcripts_filename = os.path.join(cfg["common"].output_dir, options_storage.transcripts_name)
@@ -715,6 +729,7 @@ def main(args):
             spades_cfg.__dict__["result_contigs"] = result_contigs_filename
             spades_cfg.__dict__["result_scaffolds"] = result_scaffolds_filename
             spades_cfg.__dict__["result_graph"] = result_assembly_graph_filename
+            spades_cfg.__dict__["result_graph_gfa"] = result_assembly_graph_filename_gfa
             spades_cfg.__dict__["result_contigs_paths"] = result_contigs_paths_filename
             spades_cfg.__dict__["result_scaffolds_paths"] = result_scaffolds_paths_filename
             spades_cfg.__dict__["result_transcripts"] = result_transcripts_filename
@@ -844,7 +859,9 @@ def main(args):
                         if options_storage.continue_mode and os.path.isfile(corrected):
                             log.info("\n== Skipping processing of " + assembly_type + " (already processed)\n")
                             continue
-
+                        if not os.path.isfile(assembled) or os.path.getsize(assembled) == 0:
+                            log.info("\n== Skipping processing of " + assembly_type + " (empty file)\n")
+                            continue
                         support.continue_from_here(log)
                         log.info("\n== Processing of " + assembly_type + "\n")
 
@@ -855,7 +872,6 @@ def main(args):
                         corr_cfg = merge_configs(cfg["mismatch_corrector"], cfg["common"])
                         
                         result_corrected_filename = os.path.join(tmp_dir_for_corrector, "corrected_contigs.fasta")
-
                         corrector_logic.run_corrector( tmp_configs_dir, bin_home, corr_cfg,
                         ext_python_modules_home, log, assembled, result_corrected_filename)
 
@@ -893,6 +909,9 @@ def main(args):
                 if "assembly" in cfg and os.path.isfile(result_assembly_graph_filename):
                     message = " * Assembly graph is in " + support.process_spaces(result_assembly_graph_filename)
                     log.info(message)
+                if "assembly" in cfg and os.path.isfile(result_assembly_graph_filename_gfa):
+                    message = " * Assembly graph in GFA format is in " + support.process_spaces(result_assembly_graph_filename_gfa)
+                    log.info(message)
                 if "assembly" in cfg and os.path.isfile(result_contigs_paths_filename):
                     message = " * Paths in the assembly graph corresponding to the contigs are in " + \
                               support.process_spaces(result_contigs_paths_filename)
@@ -933,7 +952,10 @@ def main(args):
                         result_fasta = list(support.read_fasta(result_filename))
                         # correctness check: should be one contig of length 1000 bp
                         correct_number = 1
-                        correct_length = 1000
+                        if options_storage.plasmid:
+                            correct_length = 9667
+                        else:
+                            correct_length = 1000
                         if not len(result_fasta):
                             support.error("TEST FAILED: %s does not contain contigs!" % result_filename)
                         elif len(result_fasta) > correct_number:
diff --git a/spades_compile.sh b/spades_compile.sh
index 580f4b6..fe33034 100755
--- a/spades_compile.sh
+++ b/spades_compile.sh
@@ -19,7 +19,7 @@ rm -rf "$BASEDIR/$BUILD_DIR"
 mkdir -p "$BASEDIR/$BUILD_DIR"
 set -e
 cd "$BASEDIR/$BUILD_DIR"
-cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="$PREFIX" "$BASEDIR/src" $*
+cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="$PREFIX" $* "$BASEDIR/src"
 make -j 8
 make install
 cd $PREFIX
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 6ef1d66..d539593 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -20,7 +20,7 @@ set(CMAKE_MODULE_PATH
   "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules")
 # Define various dirs
 set(SPADES_MAIN_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR})
-set(SPADES_MODULES_DIR ${SPADES_MAIN_SRC_DIR}/modules)
+set(SPADES_MODULES_DIR ${SPADES_MAIN_SRC_DIR}/common)
 set(SPADES_MAIN_INCLUDE_DIR ${SPADES_MAIN_SRC_DIR}/include)
 set(SPADES_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR})
 set(SPADES_TOOLS_BINARY_DIR ${SPADES_BINARY_DIR}/bin)
@@ -82,7 +82,7 @@ if (NOT OPENMP_FOUND)
 endif()
 
 # sub projects
-add_subdirectory(modules)
+add_subdirectory(common)
 add_subdirectory(projects)
 add_subdirectory(spades_pipeline)
 
@@ -119,6 +119,8 @@ install(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/../test_dataset"
         DESTINATION share/spades)
 install(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/../test_dataset_truspades"
         DESTINATION share/spades)
+install(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/../test_dataset_plasmid"
+        DESTINATION share/spades)
 # manual, LICENSE, and GPLv2
 install(FILES "${CMAKE_CURRENT_SOURCE_DIR}/../manual.html"
         DESTINATION share/spades
diff --git a/src/cmake/options.cmake b/src/cmake/options.cmake
index 370c73e..3bc0aef 100644
--- a/src/cmake/options.cmake
+++ b/src/cmake/options.cmake
@@ -26,6 +26,9 @@ if (SPADES_STATIC_BUILD)
   set(CMAKE_FIND_LIBRARY_SUFFIXES .a) 
   set(LINK_SEARCH_START_STATIC TRUE)
   set(LINK_SEARCH_END_STATIC TRUE)
+  # This is dirty hack to get rid of -Wl,-Bdynamic
+  set(CMAKE_EXE_LINK_DYNAMIC_C_FLAGS "-Wl,-Bstatic")
+  set(CMAKE_EXE_LINK_DYNAMIC_CXX_FLAGS "-Wl,-Bstatic")
 
   if (APPLE)
     set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libgcc")
diff --git a/src/cmake/pack.cmake b/src/cmake/pack.cmake
index b5982c0..a121170 100644
--- a/src/cmake/pack.cmake
+++ b/src/cmake/pack.cmake
@@ -12,9 +12,9 @@ set(CPACK_PACKAGE_NAME "SPAdes")
 set(CPACK_PACKAGE_VENDOR "Saint Petersburg State University")
 set(CPACK_PACKAGE_DESCRIPTION_FILE "${SPADES_MAIN_SRC_DIR}/../README")
 set(CPACK_RESOURCE_FILE_LICENSE "${SPADES_MAIN_SRC_DIR}/../LICENSE")
-set(CPACK_PACKAGE_VERSION "3.9.1")
+set(CPACK_PACKAGE_VERSION "3.10.1")
 set(CPACK_PACKAGE_VERSION_MAJOR "3")
-set(CPACK_PACKAGE_VERSION_MINOR "9")
+set(CPACK_PACKAGE_VERSION_MINOR "10")
 set(CPACK_PACKAGE_VERSION_PATCH "1")
 set(CPACK_STRIP_FILES bin/spades bin/hammer bin/ionhammer bin/dipspades bin/spades-bwa bin/corrector bin/scaffold_correction)
 
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
new file mode 100644
index 0000000..52bd90a
--- /dev/null
+++ b/src/common/CMakeLists.txt
@@ -0,0 +1,22 @@
+############################################################################
+# Copyright (c) 2015 Saint Petersburg State University
+# Copyright (c) 2011-2014 Saint Petersburg Academic University
+# All Rights Reserved
+# See file LICENSE for details.
+############################################################################
+
+project(common_modules CXX)
+
+add_subdirectory(pipeline)
+add_subdirectory(assembly_graph)
+add_subdirectory(modules/path_extend)
+add_subdirectory(modules)
+add_subdirectory(stages)
+add_subdirectory(utils)
+add_subdirectory(io)
+add_subdirectory(utils/mph_index)
+add_subdirectory(utils/coverage_model)
+
+add_library(common_modules STATIC empty.cpp)
+
+target_link_libraries(common_modules assembly_graph input pipeline coverage_model path_extend stages utils mph_index modules)
diff --git a/src/utils/adt/array_vector.hpp b/src/common/adt/array_vector.hpp
similarity index 100%
rename from src/utils/adt/array_vector.hpp
rename to src/common/adt/array_vector.hpp
diff --git a/src/utils/adt/bag.hpp b/src/common/adt/bag.hpp
similarity index 98%
rename from src/utils/adt/bag.hpp
rename to src/common/adt/bag.hpp
index c5abbb3..47d58ad 100644
--- a/src/utils/adt/bag.hpp
+++ b/src/common/adt/bag.hpp
@@ -7,7 +7,7 @@
 
 #pragma once
 
-#include "dev_support/verify.hpp"
+#include "utils/verify.hpp"
 
 template<class T, class hash = std::hash<T>>
 class bag {
diff --git a/src/utils/adt/bf.hpp b/src/common/adt/bf.hpp
similarity index 100%
rename from src/utils/adt/bf.hpp
rename to src/common/adt/bf.hpp
diff --git a/src/utils/adt/chained_iterator.hpp b/src/common/adt/chained_iterator.hpp
similarity index 100%
rename from src/utils/adt/chained_iterator.hpp
rename to src/common/adt/chained_iterator.hpp
diff --git a/src/utils/adt/concurrent_dsu.hpp b/src/common/adt/concurrent_dsu.hpp
similarity index 99%
rename from src/utils/adt/concurrent_dsu.hpp
rename to src/common/adt/concurrent_dsu.hpp
index 176a5e3..b45445c 100644
--- a/src/utils/adt/concurrent_dsu.hpp
+++ b/src/common/adt/concurrent_dsu.hpp
@@ -8,7 +8,7 @@
 #ifndef CONCURRENTDSU_HPP_
 #define CONCURRENTDSU_HPP_
 
-#include "io/kmers_io/mmapped_writer.hpp"
+#include "io/kmers/mmapped_writer.hpp"
 
 #include <cassert>
 #include <cmath>
diff --git a/src/utils/adt/filter_iterator.hpp b/src/common/adt/filter_iterator.hpp
similarity index 100%
rename from src/utils/adt/filter_iterator.hpp
rename to src/common/adt/filter_iterator.hpp
diff --git a/src/utils/adt/flat_map.hpp b/src/common/adt/flat_map.hpp
similarity index 100%
rename from src/utils/adt/flat_map.hpp
rename to src/common/adt/flat_map.hpp
diff --git a/src/utils/adt/flat_set.hpp b/src/common/adt/flat_set.hpp
similarity index 100%
rename from src/utils/adt/flat_set.hpp
rename to src/common/adt/flat_set.hpp
diff --git a/src/utils/adt/hll.hpp b/src/common/adt/hll.hpp
similarity index 100%
rename from src/utils/adt/hll.hpp
rename to src/common/adt/hll.hpp
diff --git a/src/utils/adt/iterator_range.hpp b/src/common/adt/iterator_range.hpp
similarity index 100%
rename from src/utils/adt/iterator_range.hpp
rename to src/common/adt/iterator_range.hpp
diff --git a/src/utils/adt/kmer_hash_vector.hpp b/src/common/adt/kmer_hash_vector.hpp
similarity index 99%
rename from src/utils/adt/kmer_hash_vector.hpp
rename to src/common/adt/kmer_hash_vector.hpp
index f2b6861..fcc486f 100644
--- a/src/utils/adt/kmer_hash_vector.hpp
+++ b/src/common/adt/kmer_hash_vector.hpp
@@ -16,7 +16,7 @@
 #define KMER_HASH_VECTOR_HPP_
 
 
-#include "data_structures/sequence/runtime_k.hpp"
+#include "sequence/runtime_k.hpp"
 #include "kmer_map.hpp"
 
 
diff --git a/src/utils/adt/kmer_vector.hpp b/src/common/adt/kmer_vector.hpp
similarity index 92%
rename from src/utils/adt/kmer_vector.hpp
rename to src/common/adt/kmer_vector.hpp
index 06b9eb3..2be2fb2 100644
--- a/src/utils/adt/kmer_vector.hpp
+++ b/src/common/adt/kmer_vector.hpp
@@ -40,7 +40,7 @@ private:
     // No JEMalloc, no cookies
     ElTy *res = new ElTy[capacity_ * el_sz_];
     if (storage_)
-      std:: memcpy(res, storage_, size_ * sizeof(ElTy) * el_sz_);
+      std::memcpy(res, storage_, size_ * sizeof(ElTy) * el_sz_);
 
     delete[] storage_;
     storage_ = res;
@@ -79,7 +79,7 @@ public:
 #ifdef SPADES_USE_JEMALLOC
         je_free(storage_);
 #else
-    delete[] storage_;
+        delete[] storage_;
 #endif
     }
 
@@ -114,6 +114,14 @@ public:
         push_back(s.data());
     }
 
+    void push_back(reference s) {
+        push_back(s.data());
+    }
+
+    void push_back(const value_type &s) {
+        push_back(s.data());
+    }
+
     void reserve(size_t amount) {
         if (capacity_ < amount) {
             capacity_ = amount;
@@ -126,6 +134,11 @@ public:
         vector_.set_size(size_);
     }
 
+    void shrink_to_fit() {
+        capacity_ = std::max(size_, size_t(1));
+        vector_.set_data(realloc());
+    }
+
     iterator begin() {
         return vector_.begin();
     }
diff --git a/src/common/adt/loser_tree.hpp b/src/common/adt/loser_tree.hpp
new file mode 100644
index 0000000..7dbab36
--- /dev/null
+++ b/src/common/adt/loser_tree.hpp
@@ -0,0 +1,134 @@
+#pragma once
+
+#include "iterator_range.hpp"
+#include <vector>
+
+namespace adt {
+
+template<typename IntegerType>
+IntegerType ilog2(IntegerType x) {
+    IntegerType lg = 0;
+    while (x >= 256) { x >>= 8; lg += 8; }
+    while (x >>= 1) lg += 1;
+
+    return lg;
+}
+
+template<typename IntegerType>
+IntegerType ilog2ceil(IntegerType x) {
+    return ilog2(x - 1) + 1;
+}
+
+template<class It, class Cmp>
+class loser_tree  {
+    typedef typename std::iterator_traits<It>::value_type value_type;
+    
+    size_t log_k_;
+    size_t k_;
+    std::vector<size_t> entry_;
+    Cmp inner_cmp_;
+
+    bool cmp(const adt::iterator_range<It> &a, const adt::iterator_range<It> &b) const {
+        // Emulate sentinels
+        if (b.end() == b.begin())
+            return true;
+        if (a.end() == a.begin())
+            return false;
+        
+        return inner_cmp_(*a.begin(), *b.begin());
+    }
+    
+    size_t init_winner(size_t root) {
+        if (root >= k_)
+            return root - k_;
+
+        size_t left = init_winner(2 * root);
+        size_t right = init_winner(2 * root + 1);
+        if (cmp(runs_[left], runs_[right])) {
+            entry_[root] = right;
+            return left;
+        } else {
+            entry_[root] = left;
+            return right;
+        }
+    }
+
+  public:
+    loser_tree(const std::vector<adt::iterator_range<It>> &runs,
+               Cmp inner_cmp = Cmp())
+            : inner_cmp_(inner_cmp), runs_(runs) {
+        log_k_ = ilog2ceil(runs.size());
+        k_ = (size_t(1) << log_k_);
+
+        // fprintf(stderr, "k: %zu, logK: %zu, nruns: %zu\n", k_, log_k_, runs.size());
+        
+        entry_.resize(2 * k_);
+        for (size_t i = 0; i < k_; ++i)
+            entry_[k_ + i] = i;
+        
+        // Insert sentinels
+        for (size_t i = runs.size(); i < k_; ++i)
+            runs_.emplace_back(adt::make_range(runs_[0].end(), runs_[0].end()));
+
+        // Populate tree
+        entry_[0] = init_winner(1);
+
+        // for (const auto &entry : entry_)
+        //    fprintf(stderr, "%zu, ", entry);
+        // fprintf(stderr, "\n");
+    }
+
+    size_t replay(size_t winner_index) {
+        auto &winner = runs_[winner_index];
+        if (winner.begin() == winner.end())
+            return winner_index;
+
+        winner = adt::make_range(std::next(winner.begin()), winner.end());
+        for (size_t i = (winner_index + k_) >> 1; i > 0; i >>= 1)
+            if (cmp(runs_[entry_[i]], runs_[winner_index]))
+                std::swap(entry_[i], winner_index);
+
+        return winner_index;
+    }
+
+    bool empty() const {
+        size_t winner_index = entry_[0];
+        const auto &winner = runs_[winner_index];
+        return (winner.begin() == winner.end());
+    }
+    
+
+    template<class It2>
+    size_t multi_merge(It2 out, size_t amount = -1ULL) {
+        size_t cnt = 0;
+        size_t winner_index = entry_[0];
+
+        for (cnt = 0; cnt < amount; ++cnt) {
+            auto &winner = runs_[winner_index];
+            if (winner.begin() == winner.end())
+                break;
+
+            *out++ = *winner.begin();
+
+            winner_index = replay(winner_index);
+        }
+        
+        entry_[0] = winner_index;
+
+        return cnt;
+    }
+
+    value_type pop() {
+        size_t winner_index = entry_[0];
+        value_type res = *runs_[winner_index].begin();
+        entry_[0] = replay(winner_index);
+
+        return res;
+    }
+    
+
+  private:
+    std::vector<adt::iterator_range<It>> runs_;
+};
+
+}
\ No newline at end of file
diff --git a/src/utils/adt/parallel_seq_vector.hpp b/src/common/adt/parallel_seq_vector.hpp
similarity index 93%
rename from src/utils/adt/parallel_seq_vector.hpp
rename to src/common/adt/parallel_seq_vector.hpp
index 209cb84..44c8d6c 100644
--- a/src/utils/adt/parallel_seq_vector.hpp
+++ b/src/common/adt/parallel_seq_vector.hpp
@@ -7,10 +7,10 @@
 
 #pragma once
 
-#include "utils/adt/parallel_unordered_map.hpp"
-#include "dev_support/openmp_wrapper.h"
+#include "parallel_unordered_map.hpp"
+#include "utils/openmp_wrapper.h"
 
-#include "data_structures/sequence/runtime_k.hpp"
+#include "sequence/runtime_k.hpp"
 #include "kmer_map.hpp"
 #include "kmer_hash_vector.hpp"
 
@@ -21,7 +21,7 @@ public:
 
     typedef runtime_k::KmerMap<int> destination_container_t;
 
-    typedef runtime_k::RtSeq Kmer;
+    typedef RtSeq Kmer;
 
 private:
 
diff --git a/src/utils/adt/parallel_unordered_map.hpp b/src/common/adt/parallel_unordered_map.hpp
similarity index 100%
rename from src/utils/adt/parallel_unordered_map.hpp
rename to src/common/adt/parallel_unordered_map.hpp
diff --git a/src/utils/adt/pointer_iterator.hpp b/src/common/adt/pointer_iterator.hpp
similarity index 100%
rename from src/utils/adt/pointer_iterator.hpp
rename to src/common/adt/pointer_iterator.hpp
diff --git a/src/utils/adt/queue_iterator.hpp b/src/common/adt/queue_iterator.hpp
similarity index 99%
rename from src/utils/adt/queue_iterator.hpp
rename to src/common/adt/queue_iterator.hpp
index c879541..5a867af 100644
--- a/src/utils/adt/queue_iterator.hpp
+++ b/src/common/adt/queue_iterator.hpp
@@ -8,7 +8,7 @@
 #ifndef QUEUE_ITERATOR_HPP_
 #define QUEUE_ITERATOR_HPP_
 
-#include "dev_support/verify.hpp"
+#include "utils/verify.hpp"
 #include <set>
 
 template<typename T, typename Comparator>
diff --git a/src/utils/adt/small_pod_vector.hpp b/src/common/adt/small_pod_vector.hpp
similarity index 100%
rename from src/utils/adt/small_pod_vector.hpp
rename to src/common/adt/small_pod_vector.hpp
diff --git a/src/modules/assembly_graph/CMakeLists.txt b/src/common/assembly_graph/CMakeLists.txt
similarity index 52%
rename from src/modules/assembly_graph/CMakeLists.txt
rename to src/common/assembly_graph/CMakeLists.txt
index 41031ef..953a25e 100644
--- a/src/modules/assembly_graph/CMakeLists.txt
+++ b/src/common/assembly_graph/CMakeLists.txt
@@ -5,8 +5,8 @@
 # See file LICENSE for details.
 ############################################################################
 
-project(graph_support CXX)
+project(assembly_graph CXX)
 
-add_library(graph_support STATIC
-            components/connected_component.cpp paths/bidirectional_path.cpp graph_support/scaff_supplementary.cpp graph_alignment/edge_index_refiller.cpp)
-target_link_libraries(graph_support hattrie)
+add_library(assembly_graph STATIC
+            components/connected_component.cpp paths/bidirectional_path.cpp paths/bidirectional_path_io/io_support.cpp paths/bidirectional_path_io/bidirectional_path_output.cpp graph_support/scaff_supplementary.cpp ../modules/alignment/edge_index_refiller.cpp graph_support/coverage_uniformity_analyzer.cpp)
+target_link_libraries(assembly_graph hattrie)
diff --git a/src/modules/assembly_graph/components/component_filters.hpp b/src/common/assembly_graph/components/component_filters.hpp
similarity index 100%
rename from src/modules/assembly_graph/components/component_filters.hpp
rename to src/common/assembly_graph/components/component_filters.hpp
diff --git a/src/modules/assembly_graph/components/connected_component.cpp b/src/common/assembly_graph/components/connected_component.cpp
similarity index 100%
rename from src/modules/assembly_graph/components/connected_component.cpp
rename to src/common/assembly_graph/components/connected_component.cpp
diff --git a/src/modules/assembly_graph/components/connected_component.hpp b/src/common/assembly_graph/components/connected_component.hpp
similarity index 93%
rename from src/modules/assembly_graph/components/connected_component.hpp
rename to src/common/assembly_graph/components/connected_component.hpp
index abc396e..2fa958f 100644
--- a/src/modules/assembly_graph/components/connected_component.hpp
+++ b/src/common/assembly_graph/components/connected_component.hpp
@@ -4,7 +4,7 @@
 #pragma once
 #include <map>
 //#include "path_extend/bidirectional_path.hpp"
-#include "assembly_graph/graph_core/graph.hpp"
+#include "assembly_graph/core/graph.hpp"
 
 namespace debruijn_graph{
 
diff --git a/src/common/assembly_graph/components/graph_component.hpp b/src/common/assembly_graph/components/graph_component.hpp
new file mode 100644
index 0000000..2abcaec
--- /dev/null
+++ b/src/common/assembly_graph/components/graph_component.hpp
@@ -0,0 +1,226 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#pragma once
+
+#include "utils/standard_base.hpp"
+
+namespace omnigraph {
+
+template<class Graph>
+class GraphComponent {
+    typedef typename Graph::VertexId VertexId;
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename std::set<VertexId>::const_iterator vertex_iterator;
+    typedef typename std::set<EdgeId>::const_iterator edge_iterator;
+    const Graph& graph_;
+    std::set<VertexId> vertices_;
+    std::set<EdgeId> edges_;
+    std::set<VertexId> exits_;
+    std::set<VertexId> entrances_;
+    std::string name_;
+
+    template<class VertexIt>
+    void FillVertices(VertexIt begin, VertexIt end, bool add_conjugate = false) {
+        for (auto it = begin; it != end; ++it) {
+            vertices_.insert(*it);
+            if (add_conjugate)
+                vertices_.insert(graph_.conjugate(*it));
+        }
+    }
+
+    template<class EdgeIt>
+    void FillEdges(EdgeIt begin, EdgeIt end, bool add_conjugate = false) {
+        for (auto it = begin; it != end; ++it) {
+            edges_.insert(*it);
+            if (add_conjugate)
+                edges_.insert(graph_.conjugate(*it));
+        }
+    }
+
+    void FillInducedEdges() {
+        for (VertexId v : vertices_) {
+            for (EdgeId e : graph_.OutgoingEdges(v)) {
+                if (vertices_.count(graph_.EdgeEnd(e)) > 0) {
+                    edges_.insert(e);
+                }
+            }
+        }
+    }
+
+    void FillRelevantVertices() {
+        for (EdgeId e : edges_) {
+            vertices_.insert(graph_.EdgeStart(e));
+            vertices_.insert(graph_.EdgeEnd(e));
+        }
+    }
+
+    void FindEntrancesAndExits() {
+        for (auto v : vertices_) {
+            for (auto e : graph_.IncomingEdges(v)) {
+                if (!contains(e)) {
+                    entrances_.insert(v);
+                    break;
+                }
+            }
+
+            for (auto e : graph_.OutgoingEdges(v)) {
+                if (!contains(e)) {
+                    exits_.insert(v);
+                    break;
+                }
+            }
+        }
+    }
+
+    void Swap(GraphComponent<Graph> &that) {
+        VERIFY(&this->graph_ == &that.graph_);
+        std::swap(this->name_, that.name_);
+        std::swap(this->vertices_, that.vertices_);
+        std::swap(this->edges_, that.edges_);
+        std::swap(this->exits_, that.exits_);
+        std::swap(this->entrances_, that.entrances_);
+    }
+
+    template<class EdgeIt>
+    void FillFromEdges(EdgeIt begin, EdgeIt end,
+                       bool add_conjugate) {
+        FillEdges(begin, end, add_conjugate);
+        FillRelevantVertices();
+        FindEntrancesAndExits();
+    }
+
+    GraphComponent<Graph> &operator=(const GraphComponent<Graph> &);
+    GraphComponent(const GraphComponent<Graph> &);
+
+public:
+
+    template<class VertexIt>
+    static GraphComponent FromVertices(const Graph &g, VertexIt begin, VertexIt end,
+                                       bool add_conjugate = false, const string &name = "") {
+        GraphComponent answer(g, name);
+        answer.FillVertices(begin, end, add_conjugate);
+        answer.FillInducedEdges();
+        answer.FindEntrancesAndExits();
+        return answer;
+    }
+
+    template<class EdgeIt>
+    static GraphComponent FromEdges(const Graph &g, EdgeIt begin, EdgeIt end,
+                                    bool add_conjugate = false, const string &name = "") {
+        GraphComponent answer(g, name);
+        answer.FillFromEdges(begin, end, add_conjugate);
+        return answer;
+    }
+
+    template<class Container>
+    static GraphComponent FromVertices(const Graph &g, const Container &c,
+                                       bool add_conjugate = false, const string &name = "") {
+        return FromVertices(g, c.begin(), c.end(), add_conjugate, name);
+    }
+
+    template<class Container>
+    static GraphComponent FromEdges(const Graph &g, const Container &c,
+                                    bool add_conjugate = false, const string &name = "") {
+        return FromEdges(g, c.begin(), c.end(), add_conjugate, name);
+    }
+
+    static GraphComponent WholeGraph(const Graph &g, const string &name = "") {
+        return FromVertices(g, g.begin(), g.end(), false, name);
+    }
+
+    static GraphComponent Empty(const Graph &g, const string &name = "") {
+        return GraphComponent(g, name);
+    }
+
+    GraphComponent(const Graph &g, const string &name = "") :
+            graph_(g), name_(name) {
+    }
+
+    //may be used for conjugate closure
+    GraphComponent(const GraphComponent& component,
+                   bool add_conjugate,
+                   const string &name = "") : graph_(component.graph_), name_(name) {
+        FillFromEdges(component.e_begin(), component.e_end(), add_conjugate);
+    }
+
+    GraphComponent(GraphComponent&& that) : graph_(that.graph_) {
+        Swap(that);
+    }
+
+    GraphComponent<Graph> &operator=(GraphComponent<Graph> &&that) {
+        Swap(that);
+        return *this;
+    }
+
+    const Graph& g() const {
+        return graph_;
+    }
+
+    string name() const {
+        return name_;
+    }
+
+    size_t v_size() const {
+        return vertices_.size();
+    }
+
+    size_t e_size() const {
+        return edges_.size();
+    }
+
+    bool contains(EdgeId e) const {
+        return edges_.count(e) > 0;
+    }
+
+    bool contains(VertexId v) const {
+        return vertices_.count(v) > 0;
+    }
+
+    edge_iterator e_begin() const {
+        return edges_.begin();
+    }
+
+    edge_iterator e_end() const {
+        return edges_.end();
+    }
+
+    const std::set<EdgeId>& edges() const {
+        return edges_;
+    }
+
+    const std::set<VertexId>& vertices() const{
+        return vertices_;
+    }
+
+    vertex_iterator v_begin() const {
+        return vertices_.begin();
+    }
+
+    vertex_iterator v_end() const {
+        return vertices_.end();
+    }
+
+    const std::set<VertexId>& exits() const {
+        return exits_;
+    }
+
+    const std::set<VertexId>& entrances() const {
+        return entrances_;
+    }
+
+    bool IsBorder(VertexId v) const {
+        return exits_.count(v) || entrances_.count(v);
+    }
+
+    bool empty() const {
+        return v_size() == 0;
+    }
+
+};
+
+}
diff --git a/src/modules/assembly_graph/components/splitters.hpp b/src/common/assembly_graph/components/splitters.hpp
similarity index 77%
rename from src/modules/assembly_graph/components/splitters.hpp
rename to src/common/assembly_graph/components/splitters.hpp
index 3bb8f41..9aa5d0d 100644
--- a/src/modules/assembly_graph/components/splitters.hpp
+++ b/src/common/assembly_graph/components/splitters.hpp
@@ -7,9 +7,9 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 #include "graph_component.hpp"
-#include "algorithms/dijkstra/dijkstra_helper.hpp"
+#include "assembly_graph/dijkstra/dijkstra_helper.hpp"
 #include "component_filters.hpp"
 
 namespace omnigraph {
@@ -39,6 +39,19 @@ public:
     const Graph& graph() const {
         return graph_;
     }
+protected:
+    //todo remove after returning to optional
+    std::unique_ptr<GraphComponent<Graph>> MakeUniquePtr(GraphComponent<Graph>&& component) const {
+        return std::unique_ptr<GraphComponent<Graph>>(new GraphComponent<Graph>(std::move(component)));
+    }
+
+    //todo remove after returning to optional
+    GraphComponent<Graph> GetValueAndReset(std::unique_ptr<GraphComponent<Graph>>& component_ptr) const {
+        VERIFY(component_ptr);
+        auto answer = std::move(*component_ptr);
+        component_ptr = nullptr;
+        return answer;
+    }
 };
 
 template<class Graph>
@@ -105,9 +118,6 @@ public:
             : current_(collection.begin()), end_(collection.end()) {
     }
 
-//  virtual bool CheckPutVertex(VertexId vertex, EdgeId /*edge*/, size_t /*length*/) const {
-//    return subgraph_.count(vertex) != 0;
-//  }
     CollectionIterator(shared_ptr<Collection> collection)
             : storage_(collection), current_(collection->begin()), end_(collection->end()) {
     }
@@ -126,87 +136,6 @@ public:
         return next;
     }
 
-//public:
-//  ErrorComponentSplitter(const Graph &graph, const set<EdgeId> &black_edges) :
-//      base(graph), black_edges_(black_edges), iterator_(
-//          graph.SmartEdgeBegin()) {
-//    TRACE("ErrorComponentSplitter created and SmartIterator initialized");
-//  }
-//
-//  virtual ~ErrorComponentSplitter() {
-//  }
-//
-//  vector<VertexId> FindComponent(VertexId start_vertex) {
-//    ComponentFinder<Graph> cf(this->graph(), black_edges_);
-//    cf.run(start_vertex);
-//    return cf.ReachedVertices();
-//  }
-//
-//  vector<VertexId> FindNeighbourhood(VertexId start, size_t bound) {
-//    NeighbourhoodFinder<Graph> nf(this->graph(), black_edges_, bound);
-//    nf.run(start);
-//    return nf.ReachedVertices();
-//  }
-//
-//  size_t FindDiameter(const vector<VertexId> &component) {
-//    set < VertexId > component_set(component.begin(), component.end());
-//    size_t result = 0;
-//    VertexId current = *(component.begin());
-//    for (size_t i = 0; i < 4; i++) {
-//      pair<VertexId, size_t> next = GetFarthest(current, component_set);
-//      current = next.first;
-//      result = next.second;
-//    }
-//    return result;
-//  }
-//
-//  pair<VertexId, size_t> GetFarthest(VertexId v,
-//      const set<VertexId> &component) {
-//    SubgraphDijkstra<Graph> sd(this->graph(), component);
-//    sd.run(v);
-//    pair<VertexId, size_t> result(v, 0);
-//    auto bounds = sd.GetDistances();
-//    for (auto it = bounds.first; it != bounds.second; ++it) {
-//      if (it->second > result.second) {
-//        result = *it;
-//      }
-//    }
-//    return result;
-//  }
-//
-//  virtual vector<VertexId> NextComponent() {
-//    TRACE("Construction of next component started");
-//    if (Finished()) {
-//      VERIFY(false);
-//      return vector<VertexId>();
-//    }
-//    EdgeId next = *iterator_;
-//    ++iterator_;
-//    vector < VertexId > component = FindComponent(
-//        this->graph().EdgeEnd(next));
-//    TRACE("Error edges component constructed. It contains "
-//            << component.size() << " vertices");
-//    size_t component_size = FindDiameter(component);
-//    TRACE("Diameter of component is " << component_size);
-//    vector < VertexId > neighbourhood = FindNeighbourhood(
-//        this->graph().EdgeEnd(next), (size_t) math::round(1.5 * (double) component_size));
-//    TRACE("Error edges component neighborhood constructed. It contains "
-//            << neighbourhood.size() << " vertices");
-//    visited_.insert(component.begin(), component.end());
-//    return neighbourhood;
-//  }
-//
-//  virtual bool Finished() {
-//    while (!iterator_.IsEnd()) {
-//      if (black_edges_.find(*iterator_) != black_edges_.end()
-//          && visited_.find(this->graph().EdgeEnd(*iterator_))
-//              == visited_.end()) {
-//        return false;
-//      }
-//      ++iterator_;
-//    }
-//    return true;
-//  }
     bool HasNext() {
         while(current_ != end_ && relaxed_.count(*current_) == 1) {
             ++current_;
@@ -218,31 +147,6 @@ public:
         relaxed_.insert(e);
     }
 
-//template<class Graph>
-//class ShortEdgeComponentNeighbourhoodFinder: public UnorientedDijkstra<Graph> {
-//private:
-//  typedef UnorientedDijkstra<Graph> base;
-//protected:
-//  typedef typename base::VertexId VertexId;
-//  typedef typename base::EdgeId EdgeId;
-//  typedef typename base::DistanceType distance_t;
-//private:
-//  distance_t bound_;
-//public:
-//  ShortEdgeComponentNeighbourhoodFinder(const Graph &graph, distance_t bound) :
-//      UnorientedDijkstra<Graph>(graph), bound_(bound) {
-//  }
-//
-//  virtual bool CheckProcessVertexVertexId (VertexId /*vertex*/, distance_t distance) {
-//    return distance == 0;
-//  }
-//
-//  virtual distance_t GetLength(EdgeId edge) const {
-//    if (this->graph().length(edge) <= bound_)
-//      return 0;
-//    else
-//      return 1;
-//  }
     void Relax(const vector<Element> &v) {
         for (auto it = v.begin(); it != v.end(); ++it)
             Relax(*it);
@@ -296,40 +200,6 @@ public:
             current_++;
     }
 
-//public:
-//  CountingDijkstra(const Graph &graph, size_t max_size,
-//      size_t edge_length_bound) :
-//      base(graph), max_size_(max_size), edge_length_bound_(
-//          edge_length_bound), current_(0) {
-//  }
-//
-//  virtual bool CheckPutVertex(VertexId /*vertex*/, EdgeId edge,
-//      distance_t /*length*/) const {
-//    if (current_ < max_size_) {
-//      ++current_;
-//    }
-//    if (current_ < max_size_ && GetLength(edge) < inf) {
-//      return true;
-//    }
-//    return false;
-//  }
-//
-//  virtual bool CheckProcessVertex(VertexId /*vertex*/, distance_t /*distance*/) {
-//    return current_ < max_size_;
-//  }
-//
-//  virtual void init(VertexId /*start*/) {
-//    current_ = 0;
-//  }
-//
-//  virtual size_t GetLength(EdgeId edge) const {
-//    if (this->graph().length(edge) <= edge_length_bound_)
-//      //todo change back
-////            return 1;
-//      return this->graph().length(edge);
-//    else
-//      return inf;
-//  }
     void Relax(VertexId e) {
         Relax(vector<VertexId>({e}));
     }
@@ -347,9 +217,9 @@ public:
         return graph_;
     }
 
-    virtual GraphComponent<Graph> Find(typename Graph::VertexId v) = 0;
+    virtual GraphComponent<Graph> Find(typename Graph::VertexId v) const = 0;
 
-    virtual vector<typename Graph::VertexId> InnerVertices(const GraphComponent<Graph> &component) = 0;
+    virtual vector<typename Graph::VertexId> InnerVertices(const GraphComponent<Graph> &component) const = 0;
 
     virtual ~AbstractNeighbourhoodFinder() {
     }
@@ -391,10 +261,103 @@ public:
     GraphComponent<Graph> CloseComponent(const GraphComponent<Graph>& component) const {
         set<VertexId> vertices(component.v_begin(), component.v_end());
         CloseComponent(vertices);
-        return GraphComponent<Graph>(graph_, vertices.begin(), vertices.end());
+        return GraphComponent<Graph>::FromVertices(graph_, vertices);
+    }
+};
+
+template<class Graph>
+class HighCoverageComponentFinder : public AbstractNeighbourhoodFinder<Graph> {
+private:
+    typedef typename Graph::VertexId VertexId;
+    typedef typename Graph::EdgeId EdgeId;
+
+    class CoverageBoundedDFS {
+    private:
+        const Graph &graph_;
+        const double coverage_bound_;
+        const size_t edge_limit_;
+        mutable size_t edge_summary_length_;
+
+        void Find(EdgeId edge, std::set<EdgeId> &result) const {
+            if (result.size() > edge_limit_) {
+                return;
+            }
+
+            if (math::ls(graph_.coverage(edge), coverage_bound_)) {
+                return;
+            }
+
+            if (result.count(edge) || result.count(graph_.conjugate(edge))) {
+                return;
+            }
+
+            edge_summary_length_ += graph_.length(edge);
+            result.insert(edge);
+            result.insert(graph_.conjugate(edge));
+
+            VertexId v = graph_.EdgeEnd(edge);
+            for (auto e : graph_.IncidentEdges(v)) {
+                Find(e, result);
+            }
+
+            v = graph_.EdgeStart(edge);
+            for (auto e : graph_.IncidentEdges(v)) {
+                Find(e, result);
+            }
+        }
+
+    public:
+        CoverageBoundedDFS(const Graph &graph, double coverage_bound,
+                           size_t edge_limit = 10000)
+                : graph_(graph),
+                  coverage_bound_(coverage_bound),
+                  edge_limit_(edge_limit),
+                  edge_summary_length_(0) {
+        }
+
+        std::set<EdgeId> Find(VertexId v) const {
+            edge_summary_length_ = 0;
+            std::set<EdgeId> result;
+            for (auto e : graph_.OutgoingEdges(v)) {
+                Find(e, result);
+            }
+            for (auto e : graph_.IncomingEdges(v)) {
+                Find(e, result);
+            }
+            return result;
+        }
+
+        size_t EdgeSummaryLength() const {
+            return edge_summary_length_;
+        }
+    };
+
+
+    const double coverage_bound_;
+    CoverageBoundedDFS dfs_helper;
+
+public:
+    HighCoverageComponentFinder(const Graph &graph, double max_coverage)
+    : AbstractNeighbourhoodFinder<Graph>(graph), coverage_bound_(max_coverage), dfs_helper(graph, max_coverage) {
+    }
+
+    GraphComponent<Graph> Find(typename Graph::VertexId v) const {
+        std::set<EdgeId> result = dfs_helper.Find(v);
+        return GraphComponent<Graph>::FromEdges(this->graph(), result, false);
+    }
+
+    size_t EdgeSummaryLength(VertexId v) const {
+        GraphComponent<Graph> component = Find(v);
+        DEBUG("Summary edge length for vertex " << v.int_id() << " is " << dfs_helper.EdgeSummaryLength());
+        return dfs_helper.EdgeSummaryLength();
+    }
+
+    vector<VertexId> InnerVertices(const GraphComponent<Graph> &component) const {
+        return vector<VertexId>(component.v_begin(), component.v_end());
     }
 };
 
+
 //This method finds a neighbourhood of a set of vertices. Vertices that are connected by an edge of length more than 600 are not considered as adjacent.
 template<class Graph>
 class ReliableNeighbourhoodFinder : public AbstractNeighbourhoodFinder<Graph> {
@@ -402,7 +365,7 @@ private:
     typedef typename Graph::VertexId VertexId;
     typedef typename Graph::EdgeId EdgeId;
 
-    set<VertexId> FindNeighbours(const set<VertexId> &s) {
+    set<VertexId> FindNeighbours(const set<VertexId> &s) const {
         set<VertexId> result(s.begin(), s.end());
         for (VertexId v : result) {
             for (EdgeId e : this->graph().IncidentEdges(v)) {
@@ -415,7 +378,7 @@ private:
         return result;
     }
 
-    set<VertexId> FindNeighbours(const set<VertexId> &s, size_t eps) {
+    set<VertexId> FindNeighbours(const set<VertexId> &s, size_t eps) const {
         set<VertexId> result = s;
         for(size_t i = 0; i < eps; i++) {
             result = FindNeighbours(result);
@@ -423,13 +386,10 @@ private:
         return result;
     }
 
-    set<VertexId> FindBorder(const GraphComponent<Graph> component) {
+    set<VertexId> FindBorder(const GraphComponent<Graph>& component) const {
         set<VertexId> result;
-        for(auto it = component.vertices().begin(); it != component.vertices().end(); ++it) {
-            if(component.IsBorder(*it)) {
-                result.insert(*it);
-            }
-        }
+        insert_all(result, component.entrances());
+        insert_all(result, component.exits());
         return result;
     }
 
@@ -448,7 +408,7 @@ public:
               max_size_(max_size) {
     }
 
-    GraphComponent<Graph> Find(typename Graph::VertexId v) {
+    GraphComponent<Graph> Find(typename Graph::VertexId v) const {
         auto cd = DijkstraHelper<Graph>::CreateCountingDijkstra(this->graph(), max_size_,
                 edge_length_bound_);
         cd.Run(v);
@@ -456,15 +416,16 @@ public:
         set<VertexId> result(result_vector.begin(), result_vector.end());
         ComponentCloser<Graph> cc(this->graph(), edge_length_bound_);
         cc.CloseComponent(result);
-        return GraphComponent<Graph>(this->graph(), result.begin(),
-                                     result.end());
+        return GraphComponent<Graph>::FromVertices(this->graph(), result);
     }
 
-    vector<VertexId> InnerVertices(const GraphComponent<Graph> &component) {
+    vector<VertexId> InnerVertices(const GraphComponent<Graph> &component) const {
         set<VertexId> border = FindNeighbours(FindBorder(component), 2);
         std::vector<VertexId> result;
-        std::set_difference(component.vertices().begin(), component.vertices().end(), border.begin(), border.end(), std::inserter(result, result.end()));
-        return vector<VertexId>(result.begin(), result.end());
+        std::set_difference(component.vertices().begin(), component.vertices().end(),
+                            border.begin(), border.end(),
+                            std::inserter(result, result.end()));
+        return result;
     }
 };
 
@@ -543,7 +504,7 @@ public:
     const size_t max_size_;
     const size_t max_depth_;
 
-    set<VertexId> last_inner_;
+    mutable set<VertexId> last_inner_;
 
     PathNeighbourhoodFinder(const Graph &graph, const vector<EdgeId>& path, size_t edge_length_bound = DEFAULT_EDGE_LENGTH_BOUND,
                             size_t max_size = DEFAULT_MAX_SIZE, size_t max_depth = DEFAULT_MAX_DEPTH)
@@ -555,7 +516,7 @@ public:
     }
 
 
-    GraphComponent<Graph> Find(VertexId v) {
+    GraphComponent<Graph> Find(VertexId v) const {
         TRACE("Starting from vertex " << this->graph().str(v));
         last_inner_.clear();
         set<VertexId> grey;
@@ -564,10 +525,10 @@ public:
         last_inner_ = black;
         last_inner_.insert(v);
         ComponentCloser<Graph>(this->graph(), 0).CloseComponent(grey);
-        return GraphComponent<Graph>(this->graph(), grey.begin(), grey.end());
+        return GraphComponent<Graph>::FromVertices(this->graph(), grey);
     }
 
-    vector<VertexId> InnerVertices(const GraphComponent<Graph> &/*component*/) {
+    vector<VertexId> InnerVertices(const GraphComponent<Graph> &/*component*/) const {
         return vector<VertexId>(last_inner_.begin(), last_inner_.end());
     }
 private:
@@ -590,15 +551,13 @@ public:
               edge_length_bound_(edge_length_bound) {
     }
 
-    GraphComponent<Graph> Find(VertexId v) {
+    GraphComponent<Graph> Find(VertexId v) const {
         auto cd = DijkstraHelper<Graph>::CreateShortEdgeDijkstra(this->graph(), edge_length_bound_);
         cd.Run(v);
-        set<VertexId> result = cd.ProcessedVertices();
-        return GraphComponent<Graph>(this->graph(), result.begin(),
-                                     result.end());
+        return GraphComponent<Graph>::FromVertices(this->graph(), cd.ProcessedVertices());
     }
 
-    vector<VertexId> InnerVertices(const GraphComponent<Graph> &component) {
+    vector<VertexId> InnerVertices(const GraphComponent<Graph> &component) const {
         return vector<VertexId>(component.v_begin(), component.v_end());
     }
 };
@@ -611,12 +570,13 @@ private:
 
     shared_ptr<GraphSplitter<Graph>> inner_splitter_;
     shared_ptr<GraphComponentFilter<Graph>> checker_;
-    boost::optional<GraphComponent<Graph>> next_;
+    shared_ptr<GraphComponent<Graph>> next_;
 public:
     FilteringSplitterWrapper(
             shared_ptr<GraphSplitter<Graph>> inner_splitter,
             shared_ptr<GraphComponentFilter<Graph>> checker)
-            : GraphSplitter<Graph>(inner_splitter->graph()), inner_splitter_(inner_splitter),
+            : GraphSplitter<Graph>(inner_splitter->graph()),
+              inner_splitter_(inner_splitter),
               checker_(checker) {
     }
 
@@ -625,20 +585,21 @@ public:
             VERIFY(false);
             return omnigraph::GraphComponent<Graph>(this->graph());
         }
-        GraphComponent<Graph> result = next_.get();
-        next_ = boost::optional<GraphComponent<Graph>>();
-        return result;
+        auto result = next_;
+        next_ = nullptr;
+        return *result;
     }
 
     bool HasNext() {
         while (!next_ && inner_splitter_->HasNext()) {
-            GraphComponent<Graph> ne = inner_splitter_->Next();
-            if (checker_->Check(ne)) {
-                next_ = ne;
+            next_ = std::make_shared(inner_splitter_->Next());
+            if (!checker_->Check(*next_)) {
+                next_ = nullptr;
             }
         }
         return next_;
     }
+
 private:
     DECL_LOGGER("FilteringSplitterWrapper");
 };
@@ -652,7 +613,7 @@ private:
 
     shared_ptr<GraphSplitter<Graph>> inner_splitter_;
     shared_ptr<GraphComponentFilter<Graph>> checker_;
-    boost::optional<GraphComponent<Graph>> next_;
+    std::unique_ptr<GraphComponent<Graph>> next_;
     set<VertexId> filtered_;
 public:
     CollectingSplitterWrapper(
@@ -664,28 +625,27 @@ public:
 
     GraphComponent<Graph> Next() {
         if (!HasNext()) {
-               VERIFY(false);
-               return omnigraph::GraphComponent<Graph>(this->graph());
+           VERIFY(false);
+           return omnigraph::GraphComponent<Graph>::Empty(this->graph());
         } else {
-            if(next_) {
-                GraphComponent<Graph> result = next_.get();
-                next_ = boost::optional<GraphComponent<Graph>>();
-                return result;
+            if (next_) {
+                return this->GetValueAndReset(next_);
             } else {
-                   GraphComponent<Graph> result(this->graph(), filtered_.begin(), filtered_.end(), false, "filtered");
-                   filtered_.clear();
-                   return result;
+                auto result = GraphComponent<Graph>::FromVertices(this->graph(),
+                                                                  filtered_,
+                                                                  false, "filtered");
+                filtered_.clear();
+                return result;
             }
         }
     }
 
     bool HasNext() {
         while (!next_ && inner_splitter_->HasNext()) {
-            GraphComponent<Graph> ne = inner_splitter_->Next();
-            if (checker_->Check(ne)) {
-                next_ = ne;
-            } else {
-                filtered_.insert(ne.v_begin(), ne.v_end());
+            next_ = this->MakeUniquePtr(inner_splitter_->Next());
+            if (!checker_->Check(*next_)) {
+                filtered_.insert(next_->v_begin(), next_->v_end());
+                next_ = nullptr;
             }
         }
         return next_ || !filtered_.empty();
@@ -702,7 +662,7 @@ private:
 
     shared_ptr<GraphSplitter<Graph>> inner_splitter_;
     shared_ptr<GraphComponentFilter<Graph>> checker_;
-    boost::optional<GraphComponent<Graph>> next_;
+    std::unique_ptr<GraphComponent<Graph>> next_;
 
     string CutName(const string &name, size_t max_length) {
         VERIFY(max_length >= 7);
@@ -724,7 +684,8 @@ private:
         for(size_t i = 0; i < 10 && inner_splitter_->HasNext(); i++) {
             next = inner_splitter_->Next();
             if (checker_->Check(next)) {
-                next_ = next;
+                VERIFY(!next_);
+                next_ = this->MakeUniquePtr(std::move(next));
                 break;
             } else {
                 vertices.insert(next.v_begin(), next.v_end());
@@ -734,9 +695,10 @@ private:
                 }
             }
         }
-        return GraphComponent<Graph>(this->graph(), vertices.begin(), vertices.end(), CutName(name, 60));
+        return GraphComponent<Graph>::FromVertices(this->graph(), vertices, false, CutName(name, 60));
     }
 
+
 public:
     CondensingSplitterWrapper(
             shared_ptr<GraphSplitter<Graph>> inner_splitter,
@@ -748,21 +710,20 @@ public:
     GraphComponent<Graph> Next() {
         if (!HasNext()) {
             VERIFY(false);
-            return omnigraph::GraphComponent<Graph>(this->graph());
+            return GraphComponent<Graph>(this->graph());
         }
-        if(next_) {
-            GraphComponent<Graph> result = next_.get();
-            next_ = boost::optional<GraphComponent<Graph>>();
-            return result;
+
+        if (next_) {
+            return this->GetValueAndReset(next_);
         } else {
             return ConstructComponent();
         }
     }
 
     bool HasNext() {
-        if(next_)
+        if (next_)
             return true;
-        if(!inner_splitter_->HasNext())
+        if (!inner_splitter_->HasNext())
             return false;
         return true;
     }
diff --git a/src/modules/assembly_graph/graph_core/action_handlers.hpp b/src/common/assembly_graph/core/action_handlers.hpp
similarity index 99%
rename from src/modules/assembly_graph/graph_core/action_handlers.hpp
rename to src/common/assembly_graph/core/action_handlers.hpp
index 55d015d..6395991 100644
--- a/src/modules/assembly_graph/graph_core/action_handlers.hpp
+++ b/src/common/assembly_graph/core/action_handlers.hpp
@@ -8,8 +8,8 @@
 #ifndef __OMNI_ACTION_HANDLERS_HPP__
 #define __OMNI_ACTION_HANDLERS_HPP__
 
-#include "dev_support/verify.hpp"
-#include "dev_support/logger/logger.hpp"
+#include "utils/verify.hpp"
+#include "utils/logger/logger.hpp"
 
 #include <boost/noncopyable.hpp>
 #include <string>
diff --git a/src/modules/assembly_graph/graph_core/basic_graph_stats.hpp b/src/common/assembly_graph/core/basic_graph_stats.hpp
similarity index 97%
rename from src/modules/assembly_graph/graph_core/basic_graph_stats.hpp
rename to src/common/assembly_graph/core/basic_graph_stats.hpp
index 52701ac..bad128f 100644
--- a/src/modules/assembly_graph/graph_core/basic_graph_stats.hpp
+++ b/src/common/assembly_graph/core/basic_graph_stats.hpp
@@ -1,6 +1,6 @@
 #pragma once
 
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 namespace omnigraph {
 
 template<class Graph>
diff --git a/src/modules/assembly_graph/graph_core/construction_helper.hpp b/src/common/assembly_graph/core/construction_helper.hpp
similarity index 98%
rename from src/modules/assembly_graph/graph_core/construction_helper.hpp
rename to src/common/assembly_graph/core/construction_helper.hpp
index f9c5514..229a228 100644
--- a/src/modules/assembly_graph/graph_core/construction_helper.hpp
+++ b/src/common/assembly_graph/core/construction_helper.hpp
@@ -5,7 +5,7 @@
 //***************************************************************************
 
 #pragma once
-//#include "graph_core.hpp"
+//#include "core.hpp"
 #include "observable_graph.hpp"
 
 namespace omnigraph {
diff --git a/src/modules/assembly_graph/graph_core/coverage.hpp b/src/common/assembly_graph/core/coverage.hpp
similarity index 97%
rename from src/modules/assembly_graph/graph_core/coverage.hpp
rename to src/common/assembly_graph/core/coverage.hpp
index 4f243eb..8385b04 100644
--- a/src/modules/assembly_graph/graph_core/coverage.hpp
+++ b/src/common/assembly_graph/core/coverage.hpp
@@ -14,7 +14,7 @@
 
 #pragma once
 
-#include "dev_support/logger/logger.hpp"
+#include "utils/logger/logger.hpp"
 #include <iostream>
 #include <vector>
 #include <algorithm>
@@ -332,12 +332,4 @@ class CoverageIndex : public GraphActionHandler<Graph> {
     }
 };
 
-//todo discuss with Anton
-template<class Graph>
-class AbstractFlankingCoverage {
-public:
-    virtual double GetInCov(typename Graph::EdgeId edge) const = 0;
-    virtual double GetOutCov(typename Graph::EdgeId edge) const = 0;
-};
-
 }
diff --git a/src/modules/assembly_graph/graph_core/debruijn_data.hpp b/src/common/assembly_graph/core/debruijn_data.hpp
similarity index 96%
rename from src/modules/assembly_graph/graph_core/debruijn_data.hpp
rename to src/common/assembly_graph/core/debruijn_data.hpp
index c775165..f196c2e 100644
--- a/src/modules/assembly_graph/graph_core/debruijn_data.hpp
+++ b/src/common/assembly_graph/core/debruijn_data.hpp
@@ -9,10 +9,10 @@
 #include <vector>
 #include <set>
 #include <cstring>
-#include "dev_support/verify.hpp"
-#include "dev_support/logger/logger.hpp"
-#include "data_structures/sequence/sequence_tools.hpp"
-#include "dev_support/standard_base.hpp"
+#include "utils/verify.hpp"
+#include "utils/logger/logger.hpp"
+#include "sequence/sequence_tools.hpp"
+#include "utils/standard_base.hpp"
 
 namespace debruijn_graph {
 class DeBruijnMaster;
diff --git a/src/modules/assembly_graph/graph_core/directions.hpp b/src/common/assembly_graph/core/directions.hpp
similarity index 100%
rename from src/modules/assembly_graph/graph_core/directions.hpp
rename to src/common/assembly_graph/core/directions.hpp
diff --git a/src/modules/assembly_graph/graph_core/graph.hpp b/src/common/assembly_graph/core/graph.hpp
similarity index 100%
rename from src/modules/assembly_graph/graph_core/graph.hpp
rename to src/common/assembly_graph/core/graph.hpp
diff --git a/src/modules/assembly_graph/graph_core/graph_core.hpp b/src/common/assembly_graph/core/graph_core.hpp
similarity index 99%
rename from src/modules/assembly_graph/graph_core/graph_core.hpp
rename to src/common/assembly_graph/core/graph_core.hpp
index d45efb4..71dd589 100644
--- a/src/modules/assembly_graph/graph_core/graph_core.hpp
+++ b/src/common/assembly_graph/core/graph_core.hpp
@@ -8,11 +8,11 @@
 
 #include <vector>
 #include <set>
-#include "dev_support/verify.hpp"
-#include "dev_support/logger/logger.hpp"
+#include "utils/verify.hpp"
+#include "utils/logger/logger.hpp"
 #include "order_and_law.hpp"
 #include <boost/iterator/iterator_facade.hpp>
-#include "dev_support/simple_tools.hpp"
+#include "utils/simple_tools.hpp"
 
 namespace omnigraph {
 
@@ -378,7 +378,7 @@ protected:
     }
 
     void HiddenDeleteEdge(EdgeId edge) {
-        DEBUG("Hidden delete edge " << edge.int_id());
+        TRACE("Hidden delete edge " << edge.int_id());
         EdgeId rcEdge = conjugate(edge);
         VertexId rcStart = conjugate(edge->end());
         VertexId start = conjugate(rcEdge->end());
diff --git a/src/modules/assembly_graph/graph_core/graph_iterators.hpp b/src/common/assembly_graph/core/graph_iterators.hpp
similarity index 96%
rename from src/modules/assembly_graph/graph_core/graph_iterators.hpp
rename to src/common/assembly_graph/core/graph_iterators.hpp
index 9879885..4edb985 100644
--- a/src/modules/assembly_graph/graph_core/graph_iterators.hpp
+++ b/src/common/assembly_graph/core/graph_iterators.hpp
@@ -7,10 +7,10 @@
 
 #pragma once
 
-#include "utils/adt/queue_iterator.hpp"
-#include "math/pred.hpp"
+#include "common/adt/queue_iterator.hpp"
+#include "func/pred.hpp"
 #include "action_handlers.hpp"
-#include "dev_support/simple_tools.hpp"
+#include "utils/simple_tools.hpp"
 #include <boost/iterator/iterator_facade.hpp>
 
 namespace omnigraph {
@@ -27,7 +27,7 @@ class SmartIterator : public GraphActionHandler<Graph> {
     bool add_new_;
     bool canonical_only_;
     //todo think of checking it in HandleAdd
-    pred::TypedPredicate<ElementId> add_condition_;
+    func::TypedPredicate<ElementId> add_condition_;
 
 protected:
 
@@ -57,7 +57,7 @@ protected:
 
     SmartIterator(const Graph &g, const std::string &name, bool add_new,
                   const Comparator& comparator, bool canonical_only,
-                  pred::TypedPredicate<ElementId> add_condition = pred::AlwaysTrue<ElementId>())
+                  func::TypedPredicate<ElementId> add_condition = func::AlwaysTrue<ElementId>())
             : base(g, name),
               inner_it_(comparator),
               add_new_(add_new),
@@ -119,7 +119,7 @@ public:
                      bool add_new = false,
                      const Comparator& comparator = Comparator(),
                      bool canonical_only = false,
-                     pred::TypedPredicate<ElementId> add_condition = pred::AlwaysTrue<ElementId>())
+                     func::TypedPredicate<ElementId> add_condition = func::AlwaysTrue<ElementId>())
             : base(g, "SmartSet " + ToString(this), add_new, comparator, canonical_only, add_condition) {
     }
 
@@ -128,7 +128,7 @@ public:
                      bool add_new = false,
                      const Comparator& comparator = Comparator(),
                      bool canonical_only = false,
-                     pred::TypedPredicate<ElementId> add_condition = pred::AlwaysTrue<ElementId>())
+                     func::TypedPredicate<ElementId> add_condition = func::AlwaysTrue<ElementId>())
             : SmartSetIterator(g, add_new, comparator, canonical_only, add_condition) {
         insert(begin, end);
     }
diff --git a/src/modules/assembly_graph/graph_core/observable_graph.hpp b/src/common/assembly_graph/core/observable_graph.hpp
similarity index 99%
rename from src/modules/assembly_graph/graph_core/observable_graph.hpp
rename to src/common/assembly_graph/core/observable_graph.hpp
index 0286cc5..5b62e24 100644
--- a/src/modules/assembly_graph/graph_core/observable_graph.hpp
+++ b/src/common/assembly_graph/core/observable_graph.hpp
@@ -10,7 +10,7 @@
 #include <vector>
 #include <set>
 #include <cstring>
-#include "dev_support/logger/logger.hpp"
+#include "utils/logger/logger.hpp"
 #include "graph_core.hpp"
 #include "graph_iterators.hpp"
 
diff --git a/src/modules/assembly_graph/graph_core/order_and_law.hpp b/src/common/assembly_graph/core/order_and_law.hpp
similarity index 99%
rename from src/modules/assembly_graph/graph_core/order_and_law.hpp
rename to src/common/assembly_graph/core/order_and_law.hpp
index 20ad96d..1f0373c 100644
--- a/src/modules/assembly_graph/graph_core/order_and_law.hpp
+++ b/src/common/assembly_graph/core/order_and_law.hpp
@@ -12,10 +12,10 @@
 #include <ostream>
 #include <unordered_set>
 #include <unordered_map>
-#include "dev_support/stacktrace.hpp"
+#include "utils/stacktrace.hpp"
 #include <algorithm>
 #include <map>
-#include "dev_support/openmp_wrapper.h"
+#include "utils/openmp_wrapper.h"
 #include "folly/PackedSyncPtr.h"
 
 
diff --git a/src/modules/algorithms/dijkstra/dijkstra_algorithm.hpp b/src/common/assembly_graph/dijkstra/dijkstra_algorithm.hpp
similarity index 99%
rename from src/modules/algorithms/dijkstra/dijkstra_algorithm.hpp
rename to src/common/assembly_graph/dijkstra/dijkstra_algorithm.hpp
index 11c32d8..536e4ed 100644
--- a/src/modules/algorithms/dijkstra/dijkstra_algorithm.hpp
+++ b/src/common/assembly_graph/dijkstra/dijkstra_algorithm.hpp
@@ -6,7 +6,7 @@
 //***************************************************************************
 #pragma once
 
-#include "dev_support/simple_tools.hpp"
+#include "utils/simple_tools.hpp"
 #include "dijkstra_settings.hpp"
 
 #include <queue>
diff --git a/src/modules/algorithms/dijkstra/dijkstra_helper.hpp b/src/common/assembly_graph/dijkstra/dijkstra_helper.hpp
similarity index 95%
rename from src/modules/algorithms/dijkstra/dijkstra_helper.hpp
rename to src/common/assembly_graph/dijkstra/dijkstra_helper.hpp
index 756f2af..a912a31 100644
--- a/src/modules/algorithms/dijkstra/dijkstra_helper.hpp
+++ b/src/common/assembly_graph/dijkstra/dijkstra_helper.hpp
@@ -145,14 +145,14 @@ public:
             LengthCalculator<Graph>,
             BoundedVertexTargetedProcessChecker<Graph>,
             BoundPutChecker<Graph>,
-            ForwardNeighbourIteratorFactory<Graph> > TargeredBoundedDijkstraSettings;
+            ForwardNeighbourIteratorFactory<Graph> > TargetedBoundedDijkstraSettings;
 
-    typedef Dijkstra<Graph, TargeredBoundedDijkstraSettings> TargeredBoundedDijkstra;
+    typedef Dijkstra<Graph, TargetedBoundedDijkstraSettings> TargetedBoundedDijkstra;
 
-    static TargeredBoundedDijkstra CreateTargeredBoundedDijkstra(const Graph &graph,
+    static TargetedBoundedDijkstra CreateTargetedBoundedDijkstra(const Graph &graph,
             VertexId target_vertex, size_t bound, size_t max_vertex_number = size_t(-1)){
-        return TargeredBoundedDijkstra(graph,
-                TargeredBoundedDijkstraSettings(LengthCalculator<Graph>(graph),
+        return TargetedBoundedDijkstra(graph,
+                TargetedBoundedDijkstraSettings(LengthCalculator<Graph>(graph),
                         BoundedVertexTargetedProcessChecker<Graph>(target_vertex, bound),
                         BoundPutChecker<Graph>(bound),
                         ForwardNeighbourIteratorFactory<Graph>(graph)),
diff --git a/src/modules/algorithms/dijkstra/dijkstra_settings.hpp b/src/common/assembly_graph/dijkstra/dijkstra_settings.hpp
similarity index 100%
rename from src/modules/algorithms/dijkstra/dijkstra_settings.hpp
rename to src/common/assembly_graph/dijkstra/dijkstra_settings.hpp
diff --git a/src/modules/algorithms/dijkstra/length_calculator.hpp b/src/common/assembly_graph/dijkstra/length_calculator.hpp
similarity index 98%
rename from src/modules/algorithms/dijkstra/length_calculator.hpp
rename to src/common/assembly_graph/dijkstra/length_calculator.hpp
index ec29690..78fe439 100644
--- a/src/modules/algorithms/dijkstra/length_calculator.hpp
+++ b/src/common/assembly_graph/dijkstra/length_calculator.hpp
@@ -6,7 +6,7 @@
 //***************************************************************************
 
 #pragma once
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 
 namespace omnigraph {
 
diff --git a/src/modules/algorithms/dijkstra/neighbours_iterator.hpp b/src/common/assembly_graph/dijkstra/neighbours_iterator.hpp
similarity index 100%
rename from src/modules/algorithms/dijkstra/neighbours_iterator.hpp
rename to src/common/assembly_graph/dijkstra/neighbours_iterator.hpp
diff --git a/src/modules/algorithms/dijkstra/vertex_process_checker.hpp b/src/common/assembly_graph/dijkstra/vertex_process_checker.hpp
similarity index 100%
rename from src/modules/algorithms/dijkstra/vertex_process_checker.hpp
rename to src/common/assembly_graph/dijkstra/vertex_process_checker.hpp
diff --git a/src/modules/algorithms/dijkstra/vertex_put_checker.hpp b/src/common/assembly_graph/dijkstra/vertex_put_checker.hpp
similarity index 100%
rename from src/modules/algorithms/dijkstra/vertex_put_checker.hpp
rename to src/common/assembly_graph/dijkstra/vertex_put_checker.hpp
diff --git a/src/common/assembly_graph/graph_support/basic_edge_conditions.hpp b/src/common/assembly_graph/graph_support/basic_edge_conditions.hpp
new file mode 100644
index 0000000..a32a2f3
--- /dev/null
+++ b/src/common/assembly_graph/graph_support/basic_edge_conditions.hpp
@@ -0,0 +1,151 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#pragma once
+
+#include "func/func.hpp"
+#include "func/pred.hpp"
+#include "assembly_graph/core/basic_graph_stats.hpp"
+#include "assembly_graph/core/directions.hpp"
+#include "assembly_graph/paths/path_finders.hpp"
+
+namespace omnigraph {
+
+template<class Graph>
+using EdgePredicate = func::TypedPredicate<typename Graph::EdgeId>;
+
+template<class Graph>
+class EdgeCondition : public func::AbstractPredicate<typename Graph::EdgeId> {
+    typedef typename Graph::EdgeId EdgeId;
+
+    const Graph &g_;
+protected:
+
+    EdgeCondition(const Graph &g)
+            : g_(g) {
+    }
+
+    const Graph &g() const {
+        return g_;
+    }
+
+};
+
+template<class Graph>
+class IsolatedEdgeCondition : public EdgeCondition<Graph> {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+    typedef EdgeCondition<Graph> base;
+
+    bool IsTerminalVertex(VertexId v) const {
+        return this->g().IncomingEdgeCount(v) + this->g().OutgoingEdgeCount(v) == 1;
+    }
+
+public:
+    IsolatedEdgeCondition(const Graph &g) : base(g) {
+    }
+
+    bool Check(EdgeId e) const {
+        return IsTerminalVertex(this->g().EdgeStart(e)) && IsTerminalVertex(this->g().EdgeEnd(e));
+    }
+
+};
+
+template<class Graph>
+inline bool HasAlternatives(const Graph &g, typename Graph::EdgeId e) {
+    return g.OutgoingEdgeCount(g.EdgeStart(e)) > 1
+           && g.IncomingEdgeCount(g.EdgeEnd(e)) > 1;
+}
+
+
+template<class Graph>
+class AlternativesPresenceCondition : public EdgeCondition<Graph> {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+    typedef EdgeCondition<Graph> base;
+
+public:
+
+    AlternativesPresenceCondition(const Graph &g)
+            : base(g) {
+
+    }
+
+    bool Check(EdgeId e) const {
+        return HasAlternatives(this->g(), e);
+    }
+
+};
+
+template<class Graph>
+func::TypedPredicate<typename Graph::EdgeId> AddAlternativesPresenceCondition(const Graph &g,
+                                                                              func::TypedPredicate<typename Graph::EdgeId> condition) {
+    return func::And(AlternativesPresenceCondition<Graph>(g), condition);
+}
+
+
+template<class Graph>
+class CoverageUpperBound : public EdgeCondition<Graph> {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef EdgeCondition<Graph> base;
+    const double max_coverage_;
+
+public:
+
+    CoverageUpperBound(const Graph &g, double max_coverage)
+            : base(g),
+              max_coverage_(max_coverage) {
+    }
+
+    bool Check(EdgeId e) const {
+        return math::le(this->g().coverage(e), max_coverage_);
+    }
+
+};
+
+template<class Graph>
+class LengthUpperBound : public EdgeCondition<Graph> {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef EdgeCondition<Graph> base;
+
+    const size_t max_length_;
+
+public:
+
+    LengthUpperBound(const Graph &g, size_t max_length)
+            : base(g),
+              max_length_(max_length) {
+    }
+
+    bool Check(EdgeId e) const {
+        return this->g().length(e) <= max_length_;
+    }
+
+};
+
+template<class Graph>
+class SelfConjugateCondition : public EdgeCondition<Graph> {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+    typedef EdgeCondition<Graph> base;
+
+public:
+
+    SelfConjugateCondition(const Graph& g)
+            : base(g) {
+    }
+
+    bool Check(EdgeId e) const {
+        return e == this->g().conjugate(e);
+    }
+
+private:
+    DECL_LOGGER("SelfConjugateCondition");
+};
+
+
+}
diff --git a/src/modules/assembly_graph/graph_support/basic_vertex_conditions.hpp b/src/common/assembly_graph/graph_support/basic_vertex_conditions.hpp
similarity index 64%
rename from src/modules/assembly_graph/graph_support/basic_vertex_conditions.hpp
rename to src/common/assembly_graph/graph_support/basic_vertex_conditions.hpp
index 2d9e05e..c3e6427 100644
--- a/src/modules/assembly_graph/graph_support/basic_vertex_conditions.hpp
+++ b/src/common/assembly_graph/graph_support/basic_vertex_conditions.hpp
@@ -1,12 +1,11 @@
 #pragma once
-#include "math/pred.hpp"
-#include "dev_support/func.hpp"
+#include "func/pred.hpp"
+#include "func/func.hpp"
 
 namespace omnigraph {
-using func::Predicate;
 
 template<class Graph>
-class VertexCondition : public Predicate<typename Graph::VertexId> {
+class VertexCondition : public func::AbstractPredicate<typename Graph::VertexId> {
     typedef typename Graph::VertexId VertexId;
     const Graph &g_;
 protected:
@@ -49,4 +48,19 @@ public:
     }
 };
 
+template<class Graph>
+class TerminalVertexCondition : public VertexCondition<Graph> {
+    typedef typename Graph::VertexId VertexId;
+
+public:
+    TerminalVertexCondition(const Graph& g) :
+            VertexCondition<Graph>(g) {
+    }
+
+    bool Check(VertexId v) const override {
+        return this->g().IncomingEdgeCount(v) + this->g().OutgoingEdgeCount(v) == 1;
+    }
+
+};
+
 }
\ No newline at end of file
diff --git a/src/modules/assembly_graph/graph_support/chimera_stats.hpp b/src/common/assembly_graph/graph_support/chimera_stats.hpp
similarity index 100%
rename from src/modules/assembly_graph/graph_support/chimera_stats.hpp
rename to src/common/assembly_graph/graph_support/chimera_stats.hpp
diff --git a/src/modules/assembly_graph/graph_support/comparators.hpp b/src/common/assembly_graph/graph_support/comparators.hpp
similarity index 100%
rename from src/modules/assembly_graph/graph_support/comparators.hpp
rename to src/common/assembly_graph/graph_support/comparators.hpp
diff --git a/src/modules/assembly_graph/graph_support/contig_output.hpp b/src/common/assembly_graph/graph_support/contig_output.hpp
similarity index 71%
rename from src/modules/assembly_graph/graph_support/contig_output.hpp
rename to src/common/assembly_graph/graph_support/contig_output.hpp
index 26e9dda..a0daf1f 100644
--- a/src/modules/assembly_graph/graph_support/contig_output.hpp
+++ b/src/common/assembly_graph/graph_support/contig_output.hpp
@@ -8,7 +8,7 @@
 #pragma once
 
 #include "assembly_graph/stats/picture_dump.hpp"
-#include <io/reads_io/osequencestream.hpp>
+#include <io/reads/osequencestream.hpp>
 #include "assembly_graph/components/connected_component.hpp"
 #include "assembly_graph/stats/statistics.hpp"
 #include "assembly_graph/paths/path_finders.hpp"
@@ -50,6 +50,175 @@ public:
     }
 };
 
+
+class GFASegmentWriter {
+private:
+    std::ostream &ostream_;
+
+
+public:
+
+    GFASegmentWriter(std::ostream &stream) : ostream_(stream)  {
+    }
+
+    void Write(size_t edge_id, const Sequence &seq, double cov) {
+        ostream_ << "S\t" << edge_id << "\t";
+        ostream_ << seq.str() << "\t";
+        ostream_ << "KC:i:" << int(cov) << std::endl;
+    }
+};
+
+class GFALinkWriter {
+private:
+    std::ostream &ostream_;
+    size_t overlap_size_;
+
+public:
+
+    GFALinkWriter(std::ostream &stream, size_t overlap_size) : ostream_(stream), overlap_size_(overlap_size)  {
+    }
+
+    void Write(size_t first_segment, std::string &first_orientation, size_t second_segment, std::string &second_orientation) {
+        ostream_ << "L\t" << first_segment << "\t" << first_orientation << "\t" ;
+        ostream_ << second_segment << "\t" << second_orientation << "\t" << overlap_size_ << "M";
+        ostream_ << std::endl;
+
+    }
+};
+
+
+struct PathSegmentSequence {
+    size_t path_id_;
+    size_t segment_number_;
+    std::vector<std::string> segment_sequence_;
+    PathSegmentSequence(size_t path_id, std::vector<std::string> &segment_sequence)
+    : path_id_(path_id), segment_number_(1), segment_sequence_(segment_sequence) {
+    }
+
+    PathSegmentSequence()
+    : path_id_(0), segment_number_(1), segment_sequence_(){
+    }
+    void Reset() {
+        segment_sequence_.clear();
+    }
+};
+
+class GFAPathWriter {
+private:
+    std::ostream &ostream_;
+
+public:
+
+    GFAPathWriter(std::ostream &stream)
+    : ostream_(stream)  {
+    }
+
+    void Write(const PathSegmentSequence &path_segment_sequence) {
+        ostream_ << "P" << "\t" ;
+        ostream_ << path_segment_sequence.path_id_ << "_" << path_segment_sequence.segment_number_ << "\t";
+        std::string delimeter = "";
+        for (size_t i = 0; i < path_segment_sequence.segment_sequence_.size() - 1; ++i) {
+            ostream_ << delimeter << path_segment_sequence.segment_sequence_[i];
+            delimeter = ",";
+        }
+        ostream_ << "\t";
+        std::string delimeter2 = "";
+        for (size_t i = 0; i < path_segment_sequence.segment_sequence_.size() - 1; ++i) {
+                ostream_ << delimeter2 << "*";
+                delimeter2 = ",";
+        }
+        ostream_ << std::endl;
+    }
+
+};
+
+template<class Graph>
+class GFAWriter {
+private:
+    typedef typename Graph::EdgeId EdgeId;
+    const Graph &graph_;
+    const path_extend::PathContainer &paths_;
+    const string filename_;
+    std::set<EdgeId> set_of_authentic_edges_;
+
+    bool IsCanonical(EdgeId e) const {
+        if (e <= graph_.conjugate(e)) {
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    std::string GetOrientation(EdgeId e) const {
+        return IsCanonical(e) ? "+" : "-";
+    }
+
+    void WriteSegments(std::ofstream &stream) {
+        GFASegmentWriter segment_writer(stream);
+        for (auto it = graph_.ConstEdgeBegin(true); !it.IsEnd(); ++it) {
+            segment_writer.Write((*it).int_id(), graph_.EdgeNucls(*it), graph_.coverage(*it) * double(graph_.length(*it)));
+        }
+    }
+
+    void WriteLinks(std::ofstream &stream) {
+        GFALinkWriter link_writer(stream, graph_.k());
+        for (auto it = graph_.SmartVertexBegin(); !it.IsEnd(); ++it) {
+            for (auto inc_edge : graph_.IncomingEdges(*it)) {
+                std::string orientation_first = GetOrientation(inc_edge);
+                size_t segment_first = IsCanonical(inc_edge) ? inc_edge.int_id() : graph_.conjugate(inc_edge).int_id();
+                for (auto out_edge : graph_.OutgoingEdges(*it)) {
+                    size_t segment_second = IsCanonical(out_edge) ? out_edge.int_id() : graph_.conjugate(out_edge).int_id();
+                    std::string orientation_second = GetOrientation(out_edge);
+                    link_writer.Write(segment_first, orientation_first, segment_second, orientation_second);
+                }
+            }
+        }
+    }
+
+    void UpdateSegmentedPath(PathSegmentSequence &segmented_path, EdgeId e) {
+        std::string segment_id = IsCanonical(e) ? ToString(e.int_id()) : ToString(graph_.conjugate(e).int_id());
+        std::string orientation = GetOrientation(e);
+        segmented_path.segment_sequence_.push_back(segment_id + orientation);
+    }
+
+    void WritePaths(std::ofstream &stream) {
+        GFAPathWriter path_writer(stream);
+        for (const auto &path_pair : paths_) {
+            const path_extend::BidirectionalPath &p = (*path_pair.first);
+            if (p.Size() == 0) {
+                continue;
+            }
+            PathSegmentSequence segmented_path;
+            segmented_path.path_id_ = p.GetId();
+            for (size_t i = 0; i < p.Size() - 1; ++i) {
+                EdgeId e = p[i];
+                UpdateSegmentedPath(segmented_path, e);
+                if (graph_.EdgeEnd(e) != graph_.EdgeStart(p[i+1])) {
+                    path_writer.Write(segmented_path);
+                    segmented_path.segment_number_++;
+                    segmented_path.Reset();
+                }
+            }
+            UpdateSegmentedPath(segmented_path, p.Back());
+            path_writer.Write(segmented_path);
+
+        }
+    }
+
+public:
+    GFAWriter(const Graph &graph, const path_extend::PathContainer &paths, const string &filename)
+    : graph_(graph), paths_(paths), filename_(filename) {
+    }
+
+    void Write() {
+        std::ofstream stream;
+        stream.open(filename_);
+        WriteSegments(stream);
+        WriteLinks(stream);
+        WritePaths(stream);
+    }
+};
+
 //This class uses corrected sequences to construct contig (just return as is, find unipath, trim contig)
 template<class Graph>
 class ContigConstructor {
@@ -336,6 +505,13 @@ inline void OutputContigs(ConjugateDeBruijnGraph &g, const string &contigs_outpu
 //    }
 }
 
+inline void OutputContigsToGFA(ConjugateDeBruijnGraph &g, path_extend::PathContainer &paths, const string &contigs_output_filename) {
+    INFO("Outputting graph to " << contigs_output_filename << ".gfa");
+    GFAWriter<ConjugateDeBruijnGraph> writer(g, paths, contigs_output_filename + ".gfa");
+    writer.Write();
+}
+
+
 inline void OutputContigsToFASTG(ConjugateDeBruijnGraph& g,
                    const string& contigs_output_filename, const ConnectedComponentCounter & cc_counter) {
 
@@ -419,7 +595,8 @@ inline void OutputSingleFileContigs(ConjugateDeBruijnGraph& g,
             oss << g.EdgeNucls(*it);
             n++;
         }
-    }DEBUG("SingleFileContigs(Conjugate) written");
+    }
+    DEBUG("SingleFileContigs(Conjugate) written");
 }
 
 }
diff --git a/src/common/assembly_graph/graph_support/coverage_filling.hpp b/src/common/assembly_graph/graph_support/coverage_filling.hpp
new file mode 100644
index 0000000..ad2516e
--- /dev/null
+++ b/src/common/assembly_graph/graph_support/coverage_filling.hpp
@@ -0,0 +1,80 @@
+#pragma once
+
+#include "assembly_graph/core/coverage.hpp"
+#include "assembly_graph/graph_support/detail_coverage.hpp"
+
+namespace debruijn_graph {
+
+template<class StoringType>
+struct SimultaneousCoverageCollector {
+};
+
+template<>
+struct SimultaneousCoverageCollector<SimpleStoring> {
+    template<class SimultaneousCoverageFiller, class Info>
+    static void CollectCoverage(SimultaneousCoverageFiller& filler, const Info &edge_info) {
+        filler.inc_coverage(edge_info);
+    }
+};
+
+template<>
+struct SimultaneousCoverageCollector<InvertableStoring> {
+    template<class SimultaneousCoverageFiller, class Info>
+    static void CollectCoverage(SimultaneousCoverageFiller& filler, const Info &edge_info) {
+        filler.inc_coverage(edge_info);
+        filler.inc_coverage(edge_info.conjugate(filler.k()));
+    }
+};
+
+template<class Graph, class CountIndex>
+class SimultaneousCoverageFiller {
+    const Graph& g_;
+    const CountIndex& count_index_;
+    omnigraph::FlankingCoverage<Graph>& flanking_coverage_;
+    omnigraph::CoverageIndex<Graph>& coverage_index_;
+    typedef typename CountIndex::KmerPos Value;
+public:
+    SimultaneousCoverageFiller(const Graph& g, const CountIndex& count_index,
+                               omnigraph::FlankingCoverage<Graph>& flanking_coverage,
+                               omnigraph::CoverageIndex<Graph>& coverage_index) :
+            g_(g),
+            count_index_(count_index),
+            flanking_coverage_(flanking_coverage),
+            coverage_index_(coverage_index) {
+    }
+
+    size_t k() const {
+        return count_index_.k();
+    }
+
+    void inc_coverage(const Value &edge_info) {
+        coverage_index_.IncRawCoverage(edge_info.edge_id, edge_info.count);
+        if (edge_info.offset < flanking_coverage_.averaging_range()) {
+            flanking_coverage_.IncRawCoverage(edge_info.edge_id, edge_info.count);
+        }
+    }
+
+    void Fill() {
+        for (auto I = count_index_.value_cbegin(), E = count_index_.value_cend();
+             I != E; ++I) {
+            const auto& edge_info = *I;
+            //VERIFY(edge_info.valid());
+            if (edge_info.valid()) {
+                VERIFY(edge_info.edge_id.get() != NULL);
+                SimultaneousCoverageCollector<typename CountIndex::storing_type>::CollectCoverage(*this, edge_info);
+            } else {
+                VERIFY(edge_info.removed());
+                WARN("Duplicating k+1-mers in graph (known bug in construction)");
+            }
+        }
+    }
+};
+
+template<class Graph, class CountIndex>
+void FillCoverageAndFlanking(const CountIndex& count_index, Graph& g,
+                             FlankingCoverage<Graph>& flanking_coverage) {
+    SimultaneousCoverageFiller<Graph, CountIndex> filler(g, count_index, flanking_coverage, g.coverage_index());
+    filler.Fill();
+}
+
+}
\ No newline at end of file
diff --git a/src/common/assembly_graph/graph_support/coverage_uniformity_analyzer.cpp b/src/common/assembly_graph/graph_support/coverage_uniformity_analyzer.cpp
new file mode 100644
index 0000000..b1bb38a
--- /dev/null
+++ b/src/common/assembly_graph/graph_support/coverage_uniformity_analyzer.cpp
@@ -0,0 +1,70 @@
+//***************************************************************************
+//* Copyright (c) 2016 Saint Petersburg State University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+
+#include "coverage_uniformity_analyzer.hpp"
+namespace debruijn_graph {
+double CoverageUniformityAnalyzer::CountMedianCoverage() const{
+    vector <pair<double, size_t> > coverages;
+    size_t total_len = 0, short_len = 0, cur_len = 0;
+    for (auto iter = g_.ConstEdgeBegin(); ! iter.IsEnd(); ++iter){
+        if (g_.length(*iter) > length_bound_) {
+            coverages.push_back(make_pair(g_.coverage(*iter), g_.length(*iter)));
+            total_len += g_.length(*iter);
+        } else {
+            short_len += g_.length(*iter);
+        }
+    }
+    if (total_len == 0){
+        INFO("Median coverage detection failed, not enough long edges");
+        return -1.0;
+    }
+    std::sort(coverages.begin(), coverages.end());
+    size_t i = 0;
+    while (cur_len < total_len/2 && i <coverages.size()) {
+        cur_len += coverages[i].second;
+        i++;
+    }
+    INFO ("genomic coverage is "<< coverages[i - 1].first << " calculated of length " << size_t (double(total_len) * 0.5));
+    return coverages[i - 1].first;
+}
+
+std::pair<size_t, size_t> CoverageUniformityAnalyzer::TotalLengthsNearMedian(double allowed_variation, double median_coverage) const{
+    std::pair<size_t, size_t> res(0,0);
+    for (auto iter = g_.ConstEdgeBegin(); !iter.IsEnd(); ++iter) {
+        if (g_.length(*iter) > length_bound_) {
+            if (g_.coverage(*iter) < median_coverage * (1 + allowed_variation) &&
+                g_.coverage(*iter) > median_coverage * (1 - allowed_variation)) {
+                res.first += g_.length(*iter);
+            } else {
+                res.second += g_.length(*iter);
+            }
+        }
+    }
+    return res;
+}
+
+size_t CoverageUniformityAnalyzer::TotalLongEdgeLength() const {
+    size_t res = 0;
+    for (auto iter = g_.ConstEdgeBegin(); ! iter.IsEnd(); ++iter){
+        if (g_.length(*iter) > length_bound_) {
+            res += g_.length(*iter);
+        }
+    }
+    return res;
+}
+
+double CoverageUniformityAnalyzer::UniformityFraction(double allowed_variation, double median_coverage) const {
+    std::pair<size_t, size_t> lengths = TotalLengthsNearMedian(allowed_variation, median_coverage);
+    size_t total_len = lengths.first + lengths.second;
+    if (total_len == 0) {
+        WARN(" No edges longer than length bound(" << length_bound_ <<" )");
+        return 0;
+    }
+    return double(lengths.first) / double(total_len);
+}
+
+}
\ No newline at end of file
diff --git a/src/common/assembly_graph/graph_support/coverage_uniformity_analyzer.hpp b/src/common/assembly_graph/graph_support/coverage_uniformity_analyzer.hpp
new file mode 100644
index 0000000..016605f
--- /dev/null
+++ b/src/common/assembly_graph/graph_support/coverage_uniformity_analyzer.hpp
@@ -0,0 +1,23 @@
+//***************************************************************************
+//* Copyright (c) 2016 Saint Petersburg State University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+#pragma once
+#include "assembly_graph/core/graph.hpp"
+
+namespace debruijn_graph {
+
+class CoverageUniformityAnalyzer {
+private:
+    const Graph& g_;
+    const size_t length_bound_;
+public:
+    CoverageUniformityAnalyzer(const Graph& g, const size_t length_bound): g_(g), length_bound_(length_bound){}
+    double CountMedianCoverage() const;
+    double UniformityFraction(double allowed_variation, double median_coverage) const;
+//first - inside [median* (1 - allowed_variation), median* (1 + allowed_variation)], second-outside
+    std::pair<size_t, size_t> TotalLengthsNearMedian(double allowed_variation, double median_coverage) const;
+    size_t TotalLongEdgeLength() const;
+};
+}
diff --git a/src/modules/assembly_graph/graph_support/detail_coverage.hpp b/src/common/assembly_graph/graph_support/detail_coverage.hpp
similarity index 64%
rename from src/modules/assembly_graph/graph_support/detail_coverage.hpp
rename to src/common/assembly_graph/graph_support/detail_coverage.hpp
index a203d75..15600e2 100644
--- a/src/modules/assembly_graph/graph_support/detail_coverage.hpp
+++ b/src/common/assembly_graph/graph_support/detail_coverage.hpp
@@ -7,10 +7,10 @@
 
 #pragma once
 
-#include "data_structures/indices/perfect_hash_map.hpp"
-#include "assembly_graph/graph_core/coverage.hpp"
-#include "assembly_graph/graph_core/action_handlers.hpp"
-#include "dev_support/verify.hpp"
+#include "utils/indices/perfect_hash_map.hpp"
+#include "assembly_graph/core/coverage.hpp"
+#include "assembly_graph/core/action_handlers.hpp"
+#include "utils/verify.hpp"
 #include <vector>
 #include <map>
 #include <set>
@@ -18,11 +18,10 @@
 #include <iostream>
 #include <fstream>
 
-namespace debruijn_graph {
+namespace omnigraph {
 
 template<class Graph>
-class FlankingCoverage : public omnigraph::GraphActionHandler<Graph>,
-        public omnigraph::AbstractFlankingCoverage<Graph> {
+class FlankingCoverage : public omnigraph::GraphActionHandler<Graph> {
     typedef omnigraph::GraphActionHandler<Graph> base;
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
@@ -73,7 +72,7 @@ public:
         return averaging_range_;
     }
 
-    //todo currently left for saves compatibility! remove later!
+    //left for saves compatibility and tests remove later!
     template<class CoverageIndex>
     void Fill(const CoverageIndex& count_index) {
         TRACE("Filling flanking coverage from index");
@@ -84,7 +83,7 @@ public:
             EdgeId e = edge_info.edge_id;
             unsigned offset = edge_info.offset;
             unsigned count = edge_info.count;
-            VERIFY(offset != -1u);
+            VERIFY(edge_info.valid());
             VERIFY(e.get() != NULL);
             if (offset < averaging_range_) {
                 IncRawCoverage(e, count);
@@ -159,6 +158,7 @@ public:
         return CoverageOfStart(e);
     }
 
+    //left for compatibility
     //todo rename
     double GetOutCov(EdgeId e) const {
         return CoverageOfEnd(e);
@@ -184,75 +184,7 @@ public:
     }
 
 private:
-    DECL_LOGGER("FlankingCoverage")
-    ;
-};
-
-template<class StoringType>
-struct SimultaneousCoverageCollector {
-};
-
-template<>
-struct SimultaneousCoverageCollector<SimpleStoring> {
-    template<class SimultaneousCoverageFiller, class Info>
-    static void CollectCoverage(SimultaneousCoverageFiller& filler, const Info &edge_info) {
-        filler.inc_coverage(edge_info);
-    }
+    DECL_LOGGER("FlankingCoverage");
 };
 
-template<>
-struct SimultaneousCoverageCollector<InvertableStoring> {
-    template<class SimultaneousCoverageFiller, class Info>
-    static void CollectCoverage(SimultaneousCoverageFiller& filler, const Info &edge_info) {
-        filler.inc_coverage(edge_info);
-        filler.inc_coverage(edge_info.conjugate(filler.k()));
-    }
-};
-
-template<class Graph, class CountIndex>
-class SimultaneousCoverageFiller {
-    const Graph& g_;
-    const CountIndex& count_index_;
-    FlankingCoverage<Graph>& flanking_coverage_;
-    omnigraph::CoverageIndex<Graph>& coverage_index_;
-    typedef typename CountIndex::Value Value;
-public:
-    SimultaneousCoverageFiller(const Graph& g, const CountIndex& count_index,
-                               FlankingCoverage<Graph>& flanking_coverage,
-                               omnigraph::CoverageIndex<Graph>& coverage_index) :
-                                   g_(g),
-                                   count_index_(count_index),
-                                   flanking_coverage_(flanking_coverage),
-                                   coverage_index_(coverage_index) {
-    }
-
-    size_t k() const {
-        return count_index_.k();
-    }
-
-    void inc_coverage(const Value &edge_info) {
-        coverage_index_.IncRawCoverage(edge_info.edge_id, edge_info.count);
-        if (edge_info.offset < flanking_coverage_.averaging_range()) {
-            flanking_coverage_.IncRawCoverage(edge_info.edge_id, edge_info.count);
-        }
-    }
-
-    void Fill() {
-        for (auto I = count_index_.value_cbegin(), E = count_index_.value_cend();
-                I != E; ++I) {
-            const auto& edge_info = *I;
-            VERIFY(edge_info.valid());
-            VERIFY(edge_info.edge_id.get() != NULL);
-            SimultaneousCoverageCollector<typename CountIndex::storing_type>::CollectCoverage(*this, edge_info);
-        }
-    }
-};
-
-template<class Graph, class CountIndex>
-void FillCoverageAndFlanking(const CountIndex& count_index, Graph& g,
-                             FlankingCoverage<Graph>& flanking_coverage) {
-    SimultaneousCoverageFiller<Graph, CountIndex> filler(g, count_index, flanking_coverage, g.coverage_index());
-    filler.Fill();
-}
-
 }
diff --git a/src/common/assembly_graph/graph_support/edge_removal.hpp b/src/common/assembly_graph/graph_support/edge_removal.hpp
new file mode 100644
index 0000000..e4fbe75
--- /dev/null
+++ b/src/common/assembly_graph/graph_support/edge_removal.hpp
@@ -0,0 +1,172 @@
+#pragma once
+#include "utils/logger/logger.hpp"
+
+namespace omnigraph {
+
+template<class Graph>
+void RemoveIsolatedOrCompress(Graph& g, typename Graph::VertexId v) {
+    if (g.IsDeadStart(v) && g.IsDeadEnd(v)) {
+        g.DeleteVertex(v);
+    } else {
+        g.CompressVertex(v);
+    }
+}
+
+template<class Graph>
+class EdgeRemover {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+    typedef std::function<void(EdgeId)> HandlerF;
+
+    Graph& g_;
+    HandlerF removal_handler_;
+
+public:
+    EdgeRemover(Graph& g, HandlerF removal_handler = nullptr)
+            : g_(g),
+              removal_handler_(removal_handler) {
+    }
+
+    void DeleteEdge(EdgeId e) {
+        VertexId start = g_.EdgeStart(e);
+        VertexId end = g_.EdgeEnd(e);
+        DeleteEdgeNoCompress(e);
+        // NOTE: e here is already dead!
+        TRACE("Compressing locality");
+        if (!g_.RelatedVertices(start, end)) {
+            TRACE("Vertices not related");
+            TRACE("Processing end");
+            RemoveIsolatedOrCompress(g_, end);
+            TRACE("End processed");
+        }
+        TRACE("Processing start");
+        RemoveIsolatedOrCompress(g_, start);
+        TRACE("Start processed");
+    }
+
+    void DeleteEdgeNoCompress(EdgeId e) {
+        TRACE("Deletion of edge " << g_.str(e));
+        TRACE("Start " << g_.str(g_.EdgeStart(e)));
+        TRACE("End " << g_.str(g_.EdgeEnd(e)));
+        if (removal_handler_) {
+            TRACE("Calling handler");
+            removal_handler_(e);
+        }
+        TRACE("Deleting edge");
+        g_.DeleteEdge(e);
+    }
+
+    void DeleteEdgeOptCompress(EdgeId e, bool compress) {
+        if (compress)
+            DeleteEdge(e);
+        else
+            DeleteEdgeNoCompress(e);
+    }
+
+private:
+    DECL_LOGGER("EdgeRemover");
+};
+
+//todo rewrite with SmartSetIterator
+template<class Graph>
+class ComponentRemover {
+public:
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+    typedef std::function<void(const std::set<EdgeId>&)> HandlerF;
+
+private:
+    Graph& g_;
+    HandlerF removal_handler_;
+
+    template<class ElemType>
+    void InsertIfNotConjugate(std::set<ElemType>& elems, ElemType elem) {
+        if (elems.count(g_.conjugate(elem)) == 0) {
+            elems.insert(elem);
+        }
+    }
+
+public:
+    ComponentRemover(Graph& g, HandlerF removal_handler = 0)
+            : g_(g),
+              removal_handler_(removal_handler) {
+    }
+
+    template<class EdgeIt>
+    void DeleteComponent(EdgeIt begin, EdgeIt end, bool alter_vertices = true) {
+        using std::set;
+        set<EdgeId> edges;
+        set<VertexId> vertices;
+
+        //cleaning conjugates and gathering vertices
+        for (EdgeIt it = begin; it != end; ++it) {
+            EdgeId e = *it;
+            InsertIfNotConjugate(edges, e);
+            InsertIfNotConjugate(vertices, g_.EdgeStart(e));
+            InsertIfNotConjugate(vertices, g_.EdgeEnd(e));
+        }
+
+        if (removal_handler_) {
+            removal_handler_(edges);
+        }
+
+        for (EdgeId e: edges) {
+            g_.DeleteEdge(e);
+        }
+
+        if (alter_vertices) {
+            for (VertexId v: vertices) {
+                RemoveIsolatedOrCompress(g_, v);
+            }
+        }
+    }
+
+    template<class Container>
+    void DeleteComponent(const Container& container, bool alter_vertices = true) {
+        DeleteComponent(container.begin(), container.end(), alter_vertices);
+    }
+
+};
+
+//Removes first 'trim_len' (k+1)-mers of graph edge, disconnecting it from starting vertex
+//In case edge was removed, its end will be compressed even with "compress = false" parameter
+template<class Graph>
+class EdgeDisconnector {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+    Graph& g_;
+    EdgeRemover<Graph> edge_remover_;
+    const size_t trim_len_;
+    typedef std::function<void(EdgeId)> HandlerF;
+
+public:
+    EdgeDisconnector(Graph& g,
+                     HandlerF removal_handler = nullptr,
+                     size_t trim_len = 1):
+            g_(g),
+            edge_remover_(g, removal_handler),
+            trim_len_(trim_len) {
+        VERIFY(trim_len_ > 0);
+    }
+
+    EdgeId operator()(EdgeId e, bool compress = true) {
+        if (g_.length(e) <= trim_len_
+                || (e == g_.conjugate(e) && g_.length(e) <= 2 * trim_len_)) {
+            VertexId start = g_.EdgeStart(e);
+            VertexId end = g_.EdgeEnd(e);
+            edge_remover_.DeleteEdgeOptCompress(e, compress);
+            if (!compress && !g_.RelatedVertices(start, end)) {
+                TRACE("Processing end");
+                RemoveIsolatedOrCompress(g_, end);
+                TRACE("End processed");
+            }
+            return EdgeId(0);
+        } else {
+            pair<EdgeId, EdgeId> split_res = g_.SplitEdge(e, trim_len_);
+            edge_remover_.DeleteEdgeOptCompress(split_res.first, compress);
+            return split_res.second;
+        }
+    }
+};
+
+}
diff --git a/src/modules/assembly_graph/graph_support/genomic_quality.hpp b/src/common/assembly_graph/graph_support/genomic_quality.hpp
similarity index 92%
rename from src/modules/assembly_graph/graph_support/genomic_quality.hpp
rename to src/common/assembly_graph/graph_support/genomic_quality.hpp
index ee9e75a..608d120 100644
--- a/src/modules/assembly_graph/graph_support/genomic_quality.hpp
+++ b/src/common/assembly_graph/graph_support/genomic_quality.hpp
@@ -9,13 +9,13 @@
 
 #include "visualization/visualization.hpp"
 #include "assembly_graph/graph_support/basic_edge_conditions.hpp"
-#include "assembly_graph/graph_alignment/sequence_mapper.hpp"
-#include "assembly_graph/graph_core/action_handlers.hpp"
+#include "modules/alignment/sequence_mapper.hpp"
+#include "assembly_graph/core/action_handlers.hpp"
 
 namespace debruijn_graph {
 
 template<class Graph>
-class EdgeQuality: public omnigraph::GraphLabeler<Graph>, public omnigraph::GraphActionHandler<Graph> {
+class EdgeQuality: public visualization::graph_labeler::GraphLabeler<Graph>, public omnigraph::GraphActionHandler<Graph> {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
     map<EdgeId, size_t> quality_;
@@ -26,7 +26,7 @@ class EdgeQuality: public omnigraph::GraphLabeler<Graph>, public omnigraph::Grap
             , const KmerMapper<Graph>& kmer_mapper, const Sequence &genome) {
         if (genome.size() < k_)
             return;
-        runtime_k::RtSeq cur = genome.start<runtime_k::RtSeq>(k_);
+        RtSeq cur = genome.start<RtSeq>(k_);
         cur >>= 0;
         for (size_t i = 0; i + k_ - 1 < genome.size(); i++) {
             cur <<= genome[i + k_ - 1];
@@ -43,8 +43,10 @@ public:
     void Fill(const Index &index
             , const KmerMapper<Graph>& kmer_mapper
             , const Sequence &genome) {
+        DEBUG("Filling quality values");
         FillQuality(index, kmer_mapper, genome);
         FillQuality(index, kmer_mapper, !genome);
+        DEBUG(quality_.size() << " edges have non-zero quality");
     }
 
     EdgeQuality(const Graph &graph) :
@@ -52,9 +54,6 @@ public:
             k_(graph.k() + 1) {
     }
 
-    virtual ~EdgeQuality() {
-    }
-
     virtual void HandleAdd(EdgeId /*e*/) {
     }
 
@@ -117,6 +116,8 @@ public:
         quality_.clear();
     }
 
+private:
+    DECL_LOGGER("EdgeQuality");
 };
 
 template<class Graph>
@@ -174,12 +175,12 @@ class QualityEdgeLocalityPrintingRH : public QualityLoggingRemovalHandler<Graph>
     typedef QualityLoggingRemovalHandler<Graph> base;
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
-    omnigraph::visualization::LocalityPrintingRH<Graph> printing_rh_;
+    visualization::visualization_utils::LocalityPrintingRH<Graph> printing_rh_;
 public:
     QualityEdgeLocalityPrintingRH(const Graph& g
             , const EdgeQuality<Graph>& quality_handler
-            , const omnigraph::GraphLabeler<Graph>& labeler
-            , std::shared_ptr<omnigraph::visualization::GraphColorer<Graph>> colorer
+            , const visualization::graph_labeler::GraphLabeler<Graph>& labeler
+            , std::shared_ptr<visualization::graph_colorer::GraphColorer<Graph>> colorer
             , const string& output_folder, bool handle_all = false) :
             base(g, quality_handler, handle_all),
             printing_rh_(g, labeler, colorer, output_folder)
@@ -263,7 +264,7 @@ private:
 //            path::make_dir(folder);
 //            //todo magic constant
 //            map<EdgeId, string> empty_coloring;
-//            omnigraph::visualization::WriteComponent(g_, EdgeNeighborhood<Graph>(g_, edge, 50, 250),
+//            visualization::visualization_utils::WriteComponent(g_, EdgeNeighborhood<Graph>(g_, edge, 50, 250),
 //                  folder + "edge_" +  ToString(g_.int_id(edge)) + ".dot", empty_coloring, labeler_);
 //  }
 //
@@ -285,7 +286,7 @@ private:
 //            , const KmerMapper<Graph>& kmer_mapper, const Sequence &genome) {
 //        if (genome.size() < k_)
 //            return;
-//        runtime_k::RtSeq cur = genome.start<runtime_k::RtSeq>(k_);
+//        RtSeq cur = genome.start<RtSeq>(k_);
 //        cur >>= 0;
 //        for (size_t i = 0; i + k_ - 1 < genome.size(); i++) {
 //            cur <<= genome[i + k_ - 1];
@@ -428,15 +429,15 @@ private:
 //    const Graph& g_;
 //    const EdgeQuality<Graph, Index>& quality_handler_;
 //    const omnigraph::GraphLabeler<Graph>& labeler_;
-//    const omnigraph::visualization::GraphColorer<Graph>& colorer_;
+//    const visualization::graph_colorer::GraphColorer<Graph>& colorer_;
 //    const string& output_folder_;
 ////  size_t black_removed_;
 ////  size_t colored_removed_;
 //public:
 //    QualityEdgeLocalityPrintingRH(const Graph& g
 //            , const EdgeQuality<Graph, Index>& quality_handler
-//            , const omnigraph::GraphLabeler<Graph>& labeler
-//            , const omnigraph::visualization::GraphColorer<Graph>& colorer
+//            , const visualization::graph_labeler::GraphLabeler<Graph>& labeler
+//            , const visualization::graph_colorer::GraphColorer<Graph>& colorer
 //            , const string& output_folder) :
 //            g_(g), quality_handler_(quality_handler),
 //            labeler_(labeler), colorer_(colorer), output_folder_(output_folder){
@@ -450,7 +451,7 @@ private:
 //            //todo magic constant
 ////          map<EdgeId, string> empty_coloring;
 //            shared_ptr<GraphSplitter<Graph>> splitter = EdgeNeighborhoodFinder<Graph>(g_, edge, 50, 250);
-//            omnigraph::visualization::WriteComponents(g_, *splitter/*, "locality_of_edge_" + ToString(g_.int_id(edge))*/
+//            visualization::visualization_utils::WriteComponents(g_, *splitter/*, "locality_of_edge_" + ToString(g_.int_id(edge))*/
 //                    , folder + "edge_" +  ToString(g_.int_id(edge)) + "_" + ToString(quality_handler_.quality(edge)) + ".dot"
 //                    , colorer_, labeler_);
 //        } else {
@@ -502,7 +503,7 @@ private:
 //            shared_ptr<GraphSplitter<Graph>> splitter = EdgeNeighborhoodFinder<Graph>(g_, edge, 50,
 //                    250);
 //
-//            omnigraph::visualization::WriteComponents(g_, *splitter, TrueFilter<vector<VertexId>>(), "locality_of_edge_" + ToString(g_.int_id(edge))
+//            visualization::visualization_utils::WriteComponents(g_, *splitter, TrueFilter<vector<VertexId>>(), "locality_of_edge_" + ToString(g_.int_id(edge))
 //                    , folder + "edge_" +  ToString(g_.int_id(edge)) + "_" + ToString(quality_handler_.quality(edge)) + ".dot"
 //                    , empty_coloring, labeler_);
 //        }
@@ -542,7 +543,7 @@ private:
 //            //todo magic constant
 //            map<EdgeId, string> empty_coloring;
 //            shared_ptr<GraphSplitter<Graph>> splitter = EdgeNeighborhoodFinder<Graph>(g_, edge, 50, 250);
-//            omnigraph::visualization::WriteComponents(g_, *splitter, TrueFilter<vector<VertexId>>(), "locality_of_edge_" + ToString(g_.int_id(edge))
+//            visualization::visualization_utils::WriteComponents(g_, *splitter, TrueFilter<vector<VertexId>>(), "locality_of_edge_" + ToString(g_.int_id(edge))
 //                    , folder + "edge_" +  ToString(g_.int_id(edge)) + ".dot", empty_coloring, labeler_);
 //    }
 //
diff --git a/src/common/assembly_graph/graph_support/graph_processing_algorithm.hpp b/src/common/assembly_graph/graph_support/graph_processing_algorithm.hpp
new file mode 100644
index 0000000..8a27010
--- /dev/null
+++ b/src/common/assembly_graph/graph_support/graph_processing_algorithm.hpp
@@ -0,0 +1,146 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#pragma once
+
+#include "func/func.hpp"
+#include <boost/none.hpp>
+#include <atomic>
+#include "assembly_graph/core/graph_iterators.hpp"
+#include "assembly_graph/components/graph_component.hpp"
+#include "edge_removal.hpp"
+#include "func/pred.hpp"
+#include "utils/logger/logger.hpp"
+
+namespace omnigraph {
+
+template<class Graph>
+using EdgeRemovalHandlerF = std::function<void(typename Graph::EdgeId)>;
+
+template<class Graph>
+class EdgeProcessingAlgorithm {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef func::TypedPredicate<EdgeId> ProceedConditionT;
+
+    Graph& g_;
+    bool conjugate_symmetry_;
+ protected:
+
+    Graph& g() {
+        return g_;
+    }
+
+    const Graph& g() const {
+        return g_;
+    }
+
+    virtual bool ProcessEdge(EdgeId e) = 0;
+
+ public:
+    EdgeProcessingAlgorithm(Graph& g,
+                             bool conjugate_symmetry = false)
+            : g_(g), conjugate_symmetry_(conjugate_symmetry) {
+
+    }
+
+    virtual ~EdgeProcessingAlgorithm() {
+    }
+
+//    bool conjugate_symmetry() const {
+//        return conjugate_symmetry_;
+//    }
+
+    template<class Comparator = std::less<EdgeId>>
+    bool Run(const Comparator& comp = Comparator(), ProceedConditionT proceed_condition = func::AlwaysTrue<EdgeId>()) {
+        bool triggered = false;
+        for (auto it = g_.SmartEdgeBegin(comp, conjugate_symmetry_); !it.IsEnd(); ++it) {
+            EdgeId e = *it;
+            TRACE("Current edge " << g_.str(e));
+            if (!proceed_condition(e)) {
+                TRACE("Stop condition was reached.");
+                break;
+            }
+
+            TRACE("Processing edge " << this->g().str(e));
+            triggered |= ProcessEdge(e);
+        };
+        return triggered;
+    }
+
+ private:
+    DECL_LOGGER("EdgeProcessingAlgorithm");
+};
+
+template<class Graph>
+class CountingCallback {
+    typedef typename Graph::EdgeId EdgeId;
+    bool report_on_destruction_;
+    std::atomic<size_t> cnt_;
+
+public:
+    CountingCallback(bool report_on_destruction = false) :
+            report_on_destruction_(report_on_destruction), cnt_(0) {
+    }
+
+    ~CountingCallback() {
+        if (report_on_destruction_)
+            Report();
+    }
+
+    void HandleDelete(EdgeId /*e*/) {
+        cnt_++;
+    }
+
+    void Report() {
+        TRACE(cnt_ << " edges were removed.")
+        cnt_ = 0;
+    }
+
+private:
+    DECL_LOGGER("CountingCallback");
+};
+
+template<class Graph>
+std::function<void(typename Graph::EdgeId)> AddCountingCallback(CountingCallback<Graph>& cnt_callback, std::function<void(typename Graph::EdgeId)> handler) {
+    std::function<void(typename Graph::EdgeId)> cnt_handler = std::bind(&CountingCallback<Graph>::HandleDelete, std::ref(cnt_callback), std::placeholders::_1);
+    return func::CombineCallbacks<typename Graph::EdgeId>(handler, cnt_handler);
+}
+
+template<class Graph>
+class EdgeRemovingAlgorithm : public EdgeProcessingAlgorithm<Graph> {
+    typedef EdgeProcessingAlgorithm<Graph> base;
+    typedef typename Graph::EdgeId EdgeId;
+
+    func::TypedPredicate<EdgeId> remove_condition_;
+    EdgeRemover<Graph> edge_remover_;
+
+ protected:
+    virtual bool ProcessEdge(EdgeId e) {
+        TRACE("Checking edge " << this->g().str(e) << " for the removal condition");
+        if (remove_condition_(e)) {
+            TRACE("Check passed, removing");
+            edge_remover_.DeleteEdge(e);
+            return true;
+        }
+        TRACE("Check not passed");
+        return false;
+    }
+
+ public:
+    EdgeRemovingAlgorithm(Graph& g,
+                          func::TypedPredicate<EdgeId> remove_condition,
+                          std::function<void (EdgeId)> removal_handler = boost::none,
+                          bool conjugate_symmetry = false)
+            : base(g, conjugate_symmetry),
+              remove_condition_(remove_condition),
+              edge_remover_(g, removal_handler) {}
+
+ private:
+    DECL_LOGGER("EdgeRemovingAlgorithm");
+};
+
+}
diff --git a/src/modules/assembly_graph/graph_support/marks_and_locks.hpp b/src/common/assembly_graph/graph_support/marks_and_locks.hpp
similarity index 100%
rename from src/modules/assembly_graph/graph_support/marks_and_locks.hpp
rename to src/common/assembly_graph/graph_support/marks_and_locks.hpp
diff --git a/src/common/assembly_graph/graph_support/parallel_processing.hpp b/src/common/assembly_graph/graph_support/parallel_processing.hpp
new file mode 100644
index 0000000..abd3149
--- /dev/null
+++ b/src/common/assembly_graph/graph_support/parallel_processing.hpp
@@ -0,0 +1,306 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#pragma once
+
+#include "utils/logger/logger.hpp"
+#include "assembly_graph/core/graph_iterators.hpp"
+#include "assembly_graph/graph_support/graph_processing_algorithm.hpp"
+#include "utils/openmp_wrapper.h"
+
+namespace omnigraph {
+
+template<class ItVec, class Condition, class Handler>
+void FindInterestingFromChunkIterators(const ItVec& chunk_iterators,
+                                       const Condition& predicate,
+                                       const Handler& handler) {
+    VERIFY(chunk_iterators.size() > 1);
+    typedef typename Condition::checked_type ElementType;
+    std::vector<std::vector<ElementType>> of_interest(omp_get_max_threads());
+
+    #pragma omp parallel for schedule(guided)
+    for (size_t i = 0; i < chunk_iterators.size() - 1; ++i) {
+        size_t cnt = 0;
+        for (auto it = chunk_iterators[i], end = chunk_iterators[i + 1]; it != end; ++it) {
+             ElementType t = *it;
+             if (predicate(t)) {
+                 of_interest[omp_get_thread_num()].push_back(t);
+             }
+             cnt++;
+         }
+         DEBUG("Processed " << cnt << " elements as potential candidates by thread " << omp_get_thread_num());
+    }
+
+    for (auto& chunk : of_interest) {
+        for (const auto& el : chunk) {
+            handler(el);
+        }
+        chunk.clear();
+    }
+}
+
+template<class Graph, class ElementId>
+class InterestingElementFinder {
+protected:
+    typedef std::function<void (ElementId)> HandlerF;
+    const func::TypedPredicate<ElementId> condition_;
+public:
+
+    InterestingElementFinder(func::TypedPredicate<ElementId> condition):
+            condition_(condition) {
+    }
+
+    virtual ~InterestingElementFinder() {}
+
+    virtual bool Run(const Graph& /*g*/, HandlerF /*handler*/) const = 0;
+};
+
+template<class Graph, class ElementId = typename Graph::EdgeId>
+class TrivialInterestingElementFinder :
+        public InterestingElementFinder<Graph, ElementId> {
+public:
+
+    TrivialInterestingElementFinder() :
+            InterestingElementFinder<Graph, ElementId>(func::AlwaysTrue<ElementId>()) {
+    }
+
+    bool Run(const Graph& /*g*/, std::function<void (ElementId)> /*handler*/) const override {
+        return false;
+    }
+};
+
+template<class Graph, class ElementId = typename Graph::EdgeId>
+class SimpleInterestingElementFinder : public InterestingElementFinder<Graph, ElementId> {
+    typedef InterestingElementFinder<Graph, ElementId> base;
+    typedef typename base::HandlerF HandlerF;
+public:
+
+    SimpleInterestingElementFinder(func::TypedPredicate<ElementId> condition = func::AlwaysTrue<ElementId>())
+            :  base(condition) {}
+
+    bool Run(const Graph& g, HandlerF handler) const override {
+        const IterationHelper<Graph, ElementId> it_helper(g);
+        for (auto it = it_helper.begin(), end = it_helper.end(); it != end; ++it) {
+            if (this->condition_(*it)) {
+                handler(*it);
+            }
+        }
+        return false;
+    }
+};
+
+template<class Graph, class ElementId = typename Graph::EdgeId>
+class ParallelInterestingElementFinder : public InterestingElementFinder<Graph, ElementId> {
+    typedef InterestingElementFinder<Graph, ElementId> base;
+    typedef typename base::HandlerF HandlerF;
+
+    const size_t chunk_cnt_;
+public:
+
+    ParallelInterestingElementFinder(func::TypedPredicate<ElementId> condition,
+                                     size_t chunk_cnt)
+            : base(condition), chunk_cnt_(chunk_cnt) {}
+
+    bool Run(const Graph& g, HandlerF handler) const override {
+        TRACE("Looking for interesting elements");
+        TRACE("Splitting graph into " << chunk_cnt_ << " chunks");
+        FindInterestingFromChunkIterators(IterationHelper<Graph, ElementId>(g).Chunks(chunk_cnt_),
+                                          this->condition_, handler);
+        return false;
+    }
+
+private:
+    DECL_LOGGER("ParallelInterestingElementFinder");
+};
+
+template<class Graph>
+class PersistentAlgorithmBase {
+    Graph& g_;
+protected:
+
+    PersistentAlgorithmBase(Graph& g) : g_(g) {}
+
+    Graph& g() { return g_; }
+    const Graph& g() const { return g_; }
+public:
+    virtual ~PersistentAlgorithmBase() {}
+    virtual size_t Run(bool force_primary_launch = false) = 0;
+};
+
+template<class Algo>
+inline size_t LoopedRun(Algo& algo) {
+    size_t total_triggered = 0;
+    bool run = true;
+    while (run) {
+        size_t triggered = algo.Run();
+        total_triggered += triggered;
+        run = (triggered > 0);
+    }
+    return total_triggered;
+}
+
+//todo only potentially relevant edges should be stored at any point
+template<class Graph, class ElementId,
+         class Comparator = std::less<ElementId>>
+class PersistentProcessingAlgorithm : public PersistentAlgorithmBase<Graph> {
+protected:
+    typedef std::shared_ptr<InterestingElementFinder<Graph, ElementId>> CandidateFinderPtr;
+    CandidateFinderPtr interest_el_finder_;
+
+private:
+    SmartSetIterator<Graph, ElementId, Comparator> it_;
+    bool tracking_;
+    size_t total_iteration_estimate_;
+    size_t curr_iteration_;
+
+protected:
+    void ReturnForConsideration(ElementId el) {
+        it_.push(el);
+    }
+
+    virtual bool Process(ElementId el) = 0;
+    virtual bool Proceed(ElementId /*el*/) const { return true; }
+
+    virtual void PrepareIteration(size_t /*it_cnt*/, size_t /*total_it_estimate*/) {}
+
+public:
+
+    PersistentProcessingAlgorithm(Graph& g,
+                                  const CandidateFinderPtr& interest_el_finder,
+                                  bool canonical_only = false,
+                                  const Comparator& comp = Comparator(),
+                                  bool track_changes = true,
+                                  size_t total_iteration_estimate = -1ul) :
+            PersistentAlgorithmBase<Graph>(g),
+            interest_el_finder_(interest_el_finder),
+            it_(g, true, comp, canonical_only),
+            tracking_(track_changes),
+            total_iteration_estimate_(total_iteration_estimate),
+            curr_iteration_(0) {
+        it_.Detach();
+    }
+
+    size_t Run(bool force_primary_launch = false) override {
+        bool primary_launch = !tracking_ || (curr_iteration_ == 0) || force_primary_launch ;
+        if (!it_.IsAttached()) {
+            it_.Attach();
+        }
+        if (primary_launch) {
+            it_.clear();
+            TRACE("Primary launch.");
+            TRACE("Start searching for relevant elements");
+            interest_el_finder_->Run(this->g(), [&](ElementId el) {it_.push(el);});
+            TRACE(it_.size() << " elements to consider");
+        } else {
+            TRACE(it_.size() << " elements to consider");
+            VERIFY(tracking_);
+        }
+
+        PrepareIteration(std::min(curr_iteration_, total_iteration_estimate_ - 1), total_iteration_estimate_);
+
+        size_t triggered = 0;
+        TRACE("Start processing");
+        for (; !it_.IsEnd(); ++it_) {
+            ElementId el = *it_;
+            if (!Proceed(el)) {
+                TRACE("Proceed condition turned false on element " << this->g().str(el));
+                it_.ReleaseCurrent();
+                break;
+            }
+            TRACE("Processing edge " << this->g().str(el));
+            if (Process(el))
+                triggered++;
+        }
+        TRACE("Finished processing. Triggered = " << triggered);
+        if (!tracking_)
+            it_.Detach();
+
+        curr_iteration_++;
+        return triggered;
+    }
+private:
+    DECL_LOGGER("PersistentProcessingAlgorithm"); 
+};
+
+template<class Graph,
+        class Comparator = std::less<typename Graph::EdgeId>>
+class ParallelEdgeRemovingAlgorithm : public PersistentProcessingAlgorithm<Graph,
+        typename Graph::EdgeId,
+        Comparator> {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef PersistentProcessingAlgorithm<Graph, EdgeId, Comparator> base;
+
+    const func::TypedPredicate<EdgeId> remove_condition_;
+    EdgeRemover<Graph> edge_remover_;
+
+protected:
+
+    bool Process(EdgeId e) override {
+        TRACE("Checking edge " << this->g().str(e) << " for the removal condition");
+        if (remove_condition_(e)) {
+            TRACE("Check passed, removing");
+            edge_remover_.DeleteEdge(e);
+            return true;
+        }
+        TRACE("Check not passed");
+        return false;
+    }
+
+public:
+    ParallelEdgeRemovingAlgorithm(Graph& g,
+                                  func::TypedPredicate<EdgeId> remove_condition,
+                                  size_t chunk_cnt,
+                                  std::function<void(EdgeId)> removal_handler = boost::none,
+                                  bool canonical_only = false,
+                                  const Comparator& comp = Comparator(),
+                                  bool track_changes = true)
+            : base(g,
+                   std::make_shared<ParallelInterestingElementFinder<Graph>>(remove_condition, chunk_cnt),
+                   canonical_only, comp, track_changes),
+                   remove_condition_(remove_condition),
+                   edge_remover_(g, removal_handler) {
+    }
+
+private:
+    DECL_LOGGER("ParallelEdgeRemovingAlgorithm");
+};
+
+template<class Graph, class Comparator = std::less<typename Graph::EdgeId>>
+class DisconnectionAlgorithm : public PersistentProcessingAlgorithm<Graph,
+        typename Graph::EdgeId,
+        Comparator> {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef PersistentProcessingAlgorithm<Graph, EdgeId, Comparator> base;
+    func::TypedPredicate<EdgeId> condition_;
+    EdgeDisconnector<Graph> disconnector_;
+
+public:
+    DisconnectionAlgorithm(Graph& g,
+                           func::TypedPredicate<EdgeId> condition,
+                           size_t chunk_cnt,
+                           EdgeRemovalHandlerF<Graph> removal_handler,
+                           const Comparator& comp = Comparator(),
+                           bool track_changes = true)
+            : base(g,
+                   std::make_shared<omnigraph::ParallelInterestingElementFinder<Graph>>(condition, chunk_cnt),
+            /*canonical_only*/false, comp, track_changes),
+              condition_(condition),
+              disconnector_(g, removal_handler) {
+    }
+
+    bool Process(EdgeId e) override {
+        if (condition_(e)) {
+            disconnector_(e);
+            return true;
+        }
+        return false;
+    }
+
+};
+
+
+}
diff --git a/src/common/assembly_graph/graph_support/scaff_supplementary.cpp b/src/common/assembly_graph/graph_support/scaff_supplementary.cpp
new file mode 100644
index 0000000..5dd3907
--- /dev/null
+++ b/src/common/assembly_graph/graph_support/scaff_supplementary.cpp
@@ -0,0 +1,270 @@
+#include "scaff_supplementary.hpp"
+#include <algorithm>
+
+using namespace std;
+namespace path_extend {
+
+
+void ScaffoldingUniqueEdgeAnalyzer::SetCoverageBasedCutoff() {
+    vector <pair<double, size_t>> coverages;
+    map <EdgeId, size_t> long_component;
+    size_t total_len = 0, short_len = 0, cur_len = 0;
+
+    for (auto iter = gp_.g.ConstEdgeBegin(); !iter.IsEnd(); ++iter) {
+        if (gp_.g.length(*iter) > length_cutoff_) {
+            coverages.push_back(make_pair(gp_.g.coverage(*iter), gp_.g.length(*iter)));
+            total_len += gp_.g.length(*iter);
+            long_component[*iter] = 0;
+        } else {
+            short_len += gp_.g.length(*iter);
+        }
+    }
+    if (total_len == 0) {
+        WARN("not enough edges longer than "<< length_cutoff_);
+        return;
+    }
+    sort(coverages.begin(), coverages.end());
+    size_t i = 0;
+    while (cur_len < total_len / 2 && i < coverages.size()) {
+        cur_len += coverages[i].second;
+        i++;
+    }
+    median_coverage_ = coverages[i].first;
+}
+
+
+void ScaffoldingUniqueEdgeAnalyzer::FillUniqueEdgeStorage(ScaffoldingUniqueEdgeStorage &storage_) {
+    storage_.unique_edges_.clear();
+    size_t total_len = 0;
+    size_t unique_len = 0;
+    size_t unique_num = 0;
+    storage_.SetMinLength(length_cutoff_);
+    for (auto iter = gp_.g.ConstEdgeBegin(); !iter.IsEnd(); ++iter) {
+        size_t tlen = gp_.g.length(*iter);
+        total_len += tlen;
+        if (gp_.g.length(*iter) >= length_cutoff_ && gp_.g.coverage(*iter) > median_coverage_ * (1 - relative_coverage_variation_)
+                && gp_.g.coverage(*iter) < median_coverage_ * (1 + relative_coverage_variation_) ) {
+            storage_.unique_edges_.insert(*iter);
+            unique_len += tlen;
+            unique_num ++;
+        }
+    }
+    for (auto iter = storage_.begin(); iter != storage_.end(); ++iter) {
+        DEBUG (gp_.g.int_id(*iter) << " " << gp_.g.coverage(*iter) << " " << gp_.g.length(*iter) );
+    }
+    INFO ("With length cutoff: " << length_cutoff_ <<", median long edge coverage: " << median_coverage_ << ", and maximal unique coverage: " <<
+                                                                                                            relative_coverage_variation_);
+    INFO("Unique edges quantity: " << unique_num << ", unique edges length " << unique_len <<", total edges length " << total_len);
+    if (unique_len * 2 < total_len) {
+        WARN("Less than half of genome in unique edges!");
+    }
+
+}
+
+bool ScaffoldingUniqueEdgeAnalyzer::ConservativeByLength(EdgeId e) {
+    return gp_.g.length(e) >= length_cutoff_;
+}
+
+map<EdgeId, size_t> ScaffoldingUniqueEdgeAnalyzer::FillNextEdgeVoting(BidirectionalPathMap<size_t>& active_paths, int direction) const {
+    map<EdgeId, size_t> voting;
+    for (const auto &pair: active_paths) {
+        int current_pos = int(pair.second) + direction;
+        auto path_iter = pair.first;
+        //not found
+        active_paths[path_iter] = path_iter->Size();
+        while (current_pos >= 0 && current_pos < (int) path_iter->Size()) {
+            if (gp_.g.length(path_iter->At(current_pos)) >= length_cutoff_) {
+                voting[path_iter->At(current_pos)] += size_t(round(path_iter->GetWeight()));
+                active_paths[path_iter] = size_t(current_pos);
+                break;
+            }
+            current_pos += direction;
+        }
+    }
+    return voting;
+}
+
+bool ScaffoldingUniqueEdgeAnalyzer::ConservativeByPaths(EdgeId e, shared_ptr<GraphCoverageMap> long_reads_cov_map, const pe_config::LongReads lr_config, int direction) const {
+    BidirectionalPathSet all_set = long_reads_cov_map->GetCoveringPaths(e);
+    BidirectionalPathMap<size_t> active_paths;
+    size_t loop_weight = 0;
+    size_t nonloop_weight = 0;
+    DEBUG ("Checking " << gp_.g.int_id(e) <<" dir "<< direction );
+    for (auto path_iter: all_set) {
+        auto pos = path_iter->FindAll(e);
+        if (pos.size() > 1)
+//TODO:: path weight should be size_t?
+            loop_weight += size_t(round(path_iter->GetWeight()));
+        else {
+            if (path_iter->Size() > 1) nonloop_weight += size_t(round(path_iter->GetWeight()));
+            active_paths[path_iter] = pos[0];
+        }
+    }
+//TODO: small plasmid, paths a-b-a, b-a-b ?
+    if (loop_weight > 1) 
+            return false;
+        else
+            DEBUG (gp_.g.int_id(e) << " loop/nonloop weight " << loop_weight << " " << nonloop_weight);
+            
+    EdgeId prev_unique = e;
+    while (active_paths.size() > 0) {
+        size_t alt = 0;
+        size_t maxx = 0;
+        map<EdgeId, size_t> voting = FillNextEdgeVoting(active_paths, direction);
+
+        if (voting.size() == 0)
+            break;
+        EdgeId next_unique = prev_unique;
+        for (const auto &pair: voting)
+            if (pair.second > maxx) {
+                next_unique = pair.first;
+                maxx = pair.second;
+            }
+        for (const auto &pair: voting)
+            //TODO:: 1 from config?
+            if (pair.first != next_unique && pair.second > 1)
+                alt += pair.second;
+        if (maxx < lr_config.unique_edge_priority * double(alt)) {
+            DEBUG("edge " << gp_.g.int_id(e) <<" dir "<< direction << " was not unique" );
+            DEBUG("current edge " << gp_.g.int_id(next_unique));
+            DEBUG("Paths " << active_paths.size());
+            return false;
+        } else {
+            DEBUG("cur " << gp_.g.int_id(prev_unique) << " next " << gp_.g.int_id(next_unique) <<" sz " << active_paths.size());
+            for (auto iter = active_paths.begin(); iter != active_paths.end();) {
+                if (iter->second >= iter->first->Size() || iter->first->At(iter->second) != next_unique) {
+                    iter = active_paths.erase(iter);
+                } else {
+                    iter++;
+                }
+            }
+            prev_unique = next_unique;
+            DEBUG(active_paths.size() << " "<< gp_.g.int_id(next_unique));
+        }
+    }
+    DEBUG("edge " << gp_.g.int_id(e) <<" dir "<< direction << " was unique" );
+    return true;
+}
+
+bool ScaffoldingUniqueEdgeAnalyzer::ConservativeByPaths(EdgeId e, shared_ptr<GraphCoverageMap> long_reads_cov_map, const pe_config::LongReads lr_config) const{
+    return (ConservativeByPaths(e, long_reads_cov_map, lr_config, 1) && ConservativeByPaths(e, long_reads_cov_map, lr_config, -1));
+}
+
+
+void ScaffoldingUniqueEdgeAnalyzer::CheckCorrectness(ScaffoldingUniqueEdgeStorage& unique_storage_pb) {
+    for (auto iter = gp_.g.ConstEdgeBegin(); !iter.IsEnd(); ++iter) {
+        EdgeId e = *iter;
+        bool e_unique = unique_storage_pb.IsUnique(e);
+        bool e_conj_unique = unique_storage_pb.IsUnique(gp_.g.conjugate(e));
+        VERIFY_MSG(!((e_unique && !e_conj_unique) || (!e_unique && e_conj_unique)), "Edge " << gp_.g.int_id(e) << " is not symmetrically unique with it conjugate");
+        if (ConservativeByLength(e)) {
+            if (e_unique) {
+                DEBUG("edge " << gp_.g.int_id(e) << "is unique");
+            } else {
+                DEBUG("edge " << gp_.g.int_id(e) << "is not unique");
+            }
+        }
+    }
+}
+
+set<VertexId> ScaffoldingUniqueEdgeAnalyzer::GetChildren(VertexId v, map <VertexId, set<VertexId>> &dijkstra_cash_) const {
+    DijkstraHelper<debruijn_graph::Graph>::BoundedDijkstra dijkstra(
+            DijkstraHelper<debruijn_graph::Graph>::CreateBoundedDijkstra(gp_.g, max_dijkstra_depth_, max_dijkstra_vertices_));
+    dijkstra.Run(v);
+
+    if (dijkstra_cash_.find(v) == dijkstra_cash_.end()) {
+        auto tmp = dijkstra.ReachedVertices();
+        tmp.push_back(v);
+        dijkstra_cash_[v] = set<VertexId> (tmp.begin(), tmp.end());
+    }
+    return dijkstra_cash_[v];
+}
+
+bool ScaffoldingUniqueEdgeAnalyzer::FindCommonChildren(EdgeId e1, EdgeId e2, map <VertexId, set<VertexId>> &dijkstra_cash_) const {
+    auto s1 = GetChildren(gp_.g.EdgeEnd(e1), dijkstra_cash_);
+    auto s2 = GetChildren(gp_.g.EdgeEnd(e2), dijkstra_cash_);
+    if (s1.find(gp_.g.EdgeStart(e2)) != s1.end()) {
+        return true;
+    }
+    if (s2.find(gp_.g.EdgeStart(e1)) != s2.end()) {
+        return true;
+    }
+    for (VertexId v: s1) {
+        if (s2.find(v) != s2.end()) {
+            DEBUG("bulge-like structure, edges "<< gp_.g.int_id(e1) << " " << gp_.g.int_id(e2));
+            return true;
+        }
+    }
+    return false;
+}
+
+bool ScaffoldingUniqueEdgeAnalyzer::FindCommonChildren(vector<pair<EdgeId, double>> &next_weights) const {
+    map <VertexId, set<VertexId>> dijkstra_cash_;
+    for (size_t i = 0; i < next_weights.size(); i ++) {
+        for (size_t j = i + 1; j < next_weights.size(); j++) {
+            if (next_weights[i].second * overwhelming_majority_ > next_weights[j].second
+            && next_weights[j].second * overwhelming_majority_ > next_weights[i].second &&
+                !FindCommonChildren(next_weights[i].first, next_weights[j].first, dijkstra_cash_)) {
+                DEBUG("multiple paired info on edges " <<next_weights[i].first <<" and "<< next_weights[j].first);
+                return false;
+            }
+        }
+    }
+    return true;
+}
+
+bool ScaffoldingUniqueEdgeAnalyzer::FindCommonChildren(EdgeId from, size_t lib_index) const{
+    DEBUG("processing unique edge " << gp_.g.int_id(from));
+    auto next_edges = gp_.clustered_indices[lib_index].Get(from);
+    vector<pair<EdgeId, double>> next_weights;
+    for (auto hist_pair: next_edges) {
+        if (hist_pair.first == from || hist_pair.first == gp_.g.conjugate(from))
+            continue;
+        double total_w = 0;
+        for (auto w: hist_pair.second)
+            total_w += w.weight;
+        if (math::gr(total_w, 1.0))
+            next_weights.push_back(make_pair(hist_pair.first, total_w));
+    }
+    sort(next_weights.begin(), next_weights.end(), [&](pair<EdgeId, double>a, pair<EdgeId, double>b){
+        return math::gr(a.second, b.second);
+    });
+//most popular edges. think whether it can be done faster
+    if (next_weights.size() > max_different_edges_) {
+        DEBUG(next_weights.size() << " continuations");
+        next_weights.resize(max_different_edges_);
+    }
+    return FindCommonChildren(next_weights);
+}
+
+
+void ScaffoldingUniqueEdgeAnalyzer::ClearLongEdgesWithPairedLib(size_t lib_index, ScaffoldingUniqueEdgeStorage &storage_) const {
+    set<EdgeId> to_erase;
+    for (EdgeId edge: storage_ ) {
+        if (!FindCommonChildren(edge, lib_index)) {
+            to_erase.insert(edge);
+            to_erase.insert(gp_.g.conjugate(edge));
+        }
+    }
+    for (auto iter = storage_.begin(); iter !=  storage_.end(); ){
+        if (to_erase.find(*iter) != to_erase.end()){
+            iter = storage_.erase(iter);
+        } else {
+            iter++;
+        }
+    }
+}
+
+
+void ScaffoldingUniqueEdgeAnalyzer::FillUniqueEdgesWithLongReads(shared_ptr<GraphCoverageMap> long_reads_cov_map, ScaffoldingUniqueEdgeStorage& unique_storage_pb, const pe_config::LongReads lr_config) {
+    for (auto iter = gp_.g.ConstEdgeBegin(); !iter.IsEnd(); ++iter) {
+        EdgeId e = *iter;
+        if (ConservativeByLength(e) && ConservativeByPaths(e, long_reads_cov_map, lr_config)) {
+            unique_storage_pb.unique_edges_.insert(e);
+        }
+    }
+    CheckCorrectness(unique_storage_pb);
+}
+
+
+}
diff --git a/src/common/assembly_graph/graph_support/scaff_supplementary.hpp b/src/common/assembly_graph/graph_support/scaff_supplementary.hpp
new file mode 100644
index 0000000..8ace4a6
--- /dev/null
+++ b/src/common/assembly_graph/graph_support/scaff_supplementary.hpp
@@ -0,0 +1,100 @@
+#pragma once
+
+#include "assembly_graph/core/graph.hpp"
+#include "pipeline/graph_pack.hpp"
+#include "utils/logger/logger.hpp"
+//FIXME
+#include "modules/path_extend/pe_utils.hpp"
+#include "modules/path_extend/pe_config_struct.hpp"
+#include "modules/path_extend/paired_library.hpp"
+
+namespace path_extend {
+typedef debruijn_graph::EdgeId EdgeId;
+
+/* Storage of presumably unique, relatively long edges. Filled by ScaffoldingUniqueEdgeAnalyzer
+ *
+ */
+class ScaffoldingUniqueEdgeStorage {
+    friend class ScaffoldingUniqueEdgeAnalyzer;
+private:
+    set <EdgeId> unique_edges_;
+    size_t min_unique_length_;
+public:
+    ScaffoldingUniqueEdgeStorage(): unique_edges_(){
+        DEBUG("storage created, empty");
+    }
+
+    bool IsUnique(EdgeId e) const {
+        return (unique_edges_.find(e) != unique_edges_.end());
+    }
+
+    decltype(unique_edges_.begin()) begin() const {
+        return unique_edges_.begin();
+    }
+
+    decltype(unique_edges_.end()) end() const {
+        return unique_edges_.end();
+    }
+
+    decltype(unique_edges_.begin()) erase(decltype(unique_edges_.begin()) iter){
+        return unique_edges_.erase(iter);
+    }
+
+    size_t size() const {
+        return unique_edges_.size();
+    }
+    size_t GetMinLength() const {
+        return min_unique_length_;
+    }
+    void SetMinLength(size_t min_length)  {
+        min_unique_length_ = min_length;
+    }
+
+    const set<EdgeId>& GetSet() const {
+        return unique_edges_;
+    }
+
+protected:
+    DECL_LOGGER("ScaffoldingUniqueEdgeStorage")
+
+};
+
+//Auxillary class required to fillin the unique edge storage.
+
+
+class ScaffoldingUniqueEdgeAnalyzer {
+
+    const debruijn_graph::conj_graph_pack &gp_;
+    size_t length_cutoff_;
+    double median_coverage_;
+    double relative_coverage_variation_;
+//for uniqueness detection
+    static const size_t max_different_edges_ = 20;
+    static const size_t max_dijkstra_depth_ = 1000;
+    static const size_t max_dijkstra_vertices_ = 1000;
+    static const size_t overwhelming_majority_ = 10;
+    set<VertexId> GetChildren(VertexId v, map <VertexId, set<VertexId>> &dijkstra_cash_) const;
+    bool FindCommonChildren(EdgeId e1, EdgeId e2, map <VertexId, set<VertexId>> &dijkstra_cash_) const;
+    bool FindCommonChildren(vector<pair<EdgeId, double>> &next_weights) const;
+    bool FindCommonChildren(EdgeId from, size_t lib_index) const;
+    map<EdgeId, size_t> FillNextEdgeVoting(BidirectionalPathMap<size_t>& active_paths, int direction) const;
+    bool ConservativeByPaths(EdgeId e, shared_ptr<GraphCoverageMap> long_reads_cov_map, const pe_config::LongReads lr_config) const;
+    bool ConservativeByPaths(EdgeId e, shared_ptr<GraphCoverageMap> long_reads_cov_map, const pe_config::LongReads lr_config, int direction) const;
+    bool ConservativeByLength(EdgeId e);
+    void CheckCorrectness(ScaffoldingUniqueEdgeStorage& unique_storage_pb);
+protected:
+    DECL_LOGGER("ScaffoldingUniqueEdgeAnalyzer")
+
+
+    void SetCoverageBasedCutoff();
+public:
+    ScaffoldingUniqueEdgeAnalyzer(const debruijn_graph::conj_graph_pack &gp, size_t apriori_length_cutoff, double max_relative_coverage):gp_(gp), length_cutoff_(apriori_length_cutoff), relative_coverage_variation_(max_relative_coverage){
+        SetCoverageBasedCutoff();
+    }
+    void FillUniqueEdgeStorage(ScaffoldingUniqueEdgeStorage &storage_);
+    void ClearLongEdgesWithPairedLib(size_t lib_index, ScaffoldingUniqueEdgeStorage &storage_) const;
+    void FillUniqueEdgesWithLongReads(shared_ptr<GraphCoverageMap> long_reads_cov_map, ScaffoldingUniqueEdgeStorage& unique_storage_pb, const pe_config::LongReads lr_config);
+};
+}
+
+
diff --git a/src/modules/assembly_graph/handlers/edge_labels_handler.hpp b/src/common/assembly_graph/handlers/edge_labels_handler.hpp
similarity index 99%
rename from src/modules/assembly_graph/handlers/edge_labels_handler.hpp
rename to src/common/assembly_graph/handlers/edge_labels_handler.hpp
index 4a8c653..551939f 100644
--- a/src/modules/assembly_graph/handlers/edge_labels_handler.hpp
+++ b/src/common/assembly_graph/handlers/edge_labels_handler.hpp
@@ -19,7 +19,7 @@
 
 //#include "utils.hpp"
 #include "visualization/graph_labeler.hpp"
-#include "dev_support/simple_tools.hpp"
+#include "utils/simple_tools.hpp"
 #include <unordered_map>
 #include <map>
 
diff --git a/src/modules/assembly_graph/handlers/edges_position_handler.hpp b/src/common/assembly_graph/handlers/edges_position_handler.hpp
similarity index 98%
rename from src/modules/assembly_graph/handlers/edges_position_handler.hpp
rename to src/common/assembly_graph/handlers/edges_position_handler.hpp
index aaa9af0..c3b4c4a 100644
--- a/src/modules/assembly_graph/handlers/edges_position_handler.hpp
+++ b/src/common/assembly_graph/handlers/edges_position_handler.hpp
@@ -16,10 +16,9 @@
 #define EDGES_POSITION_HANDLER_HPP_
 
 //#include "utils.hpp"
-#include "visualization/graph_labeler.hpp"
-#include "dev_support/simple_tools.hpp"
+#include "utils/simple_tools.hpp"
 #include "assembly_graph/paths/mapping_path.hpp"
-#include "assembly_graph/graph_core/action_handlers.hpp"
+#include "assembly_graph/core/action_handlers.hpp"
 
 namespace omnigraph {
 
diff --git a/src/modules/assembly_graph/handlers/id_track_handler.hpp b/src/common/assembly_graph/handlers/id_track_handler.hpp
similarity index 96%
rename from src/modules/assembly_graph/handlers/id_track_handler.hpp
rename to src/common/assembly_graph/handlers/id_track_handler.hpp
index 7ab0ec8..12ab12b 100644
--- a/src/modules/assembly_graph/handlers/id_track_handler.hpp
+++ b/src/common/assembly_graph/handlers/id_track_handler.hpp
@@ -10,8 +10,8 @@
 #include <unordered_map>
 //#include "utils.hpp"
 #include "visualization/graph_labeler.hpp"
-#include "dev_support/simple_tools.hpp"
-#include "assembly_graph/graph_core/action_handlers.hpp"
+#include "utils/simple_tools.hpp"
+#include "assembly_graph/core/action_handlers.hpp"
 using namespace omnigraph;
 
 namespace omnigraph {
diff --git a/src/modules/assembly_graph/paths/bidirectional_path.cpp b/src/common/assembly_graph/paths/bidirectional_path.cpp
similarity index 92%
rename from src/modules/assembly_graph/paths/bidirectional_path.cpp
rename to src/common/assembly_graph/paths/bidirectional_path.cpp
index 0718c2c..b9d45f4 100644
--- a/src/modules/assembly_graph/paths/bidirectional_path.cpp
+++ b/src/common/assembly_graph/paths/bidirectional_path.cpp
@@ -11,7 +11,7 @@
  *      Author: andrey
  */
 
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 #include "assembly_graph/paths/bidirectional_path.hpp"
 
 namespace path_extend {
diff --git a/src/modules/assembly_graph/paths/bidirectional_path.hpp b/src/common/assembly_graph/paths/bidirectional_path.hpp
similarity index 97%
rename from src/modules/assembly_graph/paths/bidirectional_path.hpp
rename to src/common/assembly_graph/paths/bidirectional_path.hpp
index 36e6030..26b5388 100644
--- a/src/modules/assembly_graph/paths/bidirectional_path.hpp
+++ b/src/common/assembly_graph/paths/bidirectional_path.hpp
@@ -14,7 +14,7 @@
 #pragma once
 
 #include <atomic>
-#include "assembly_graph/graph_core/graph.hpp"
+#include "assembly_graph/core/graph.hpp"
 #include "assembly_graph/components/connected_component.hpp"
 
 using debruijn_graph::Graph;
@@ -103,7 +103,12 @@ public:
     }
 
     void Unsubscribe(PathListener * listener) {
-        listeners_.push_back(listener);
+        for (auto it = listeners_.begin(); it != listeners_.end(); ++it) {
+            if (*it == listener) {
+                listeners_.erase(it);
+                break;
+            }
+        }
     }
 
     void SetConjPath(BidirectionalPath* path) {
@@ -168,6 +173,10 @@ public:
         return gap_len_[index].gap_;
     }
 
+    const Gap& GapInfoAt(size_t index) const {
+        return gap_len_[index];
+    }
+
     uint32_t TrashCurrentAt(size_t index) const {
         return gap_len_[index].trash_current_;
     }
@@ -662,25 +671,13 @@ private:
         cumulative_len_.pop_back();
     }
 
-    void NotifyFrontEdgeAdded(EdgeId e, int gap) {
+    void NotifyFrontEdgeAdded(EdgeId e, const Gap& gap) {
         for (auto i = listeners_.begin(); i != listeners_.end(); ++i) {
             (*i)->FrontEdgeAdded(e, this, gap);
         }
     }
 
-    void NotifyFrontEdgeAdded(EdgeId e, Gap gap) {
-        for (auto i = listeners_.begin(); i != listeners_.end(); ++i) {
-            (*i)->FrontEdgeAdded(e, this, gap);
-        }
-    }
-
-    void NotifyBackEdgeAdded(EdgeId e, int gap) {
-        for (auto i = listeners_.begin(); i != listeners_.end(); ++i) {
-            (*i)->BackEdgeAdded(e, this, gap);
-        }
-    }
-
-    void NotifyBackEdgeAdded(EdgeId e, Gap gap) {
+    void NotifyBackEdgeAdded(EdgeId e, const Gap& gap) {
         for (auto i = listeners_.begin(); i != listeners_.end(); ++i) {
             (*i)->BackEdgeAdded(e, this, gap);
         }
@@ -698,7 +695,7 @@ private:
         }
     }
 
-    void PushFront(EdgeId e, Gap gap) {
+    void PushFront(EdgeId e, const Gap& gap) {
         PushFront(e, gap.gap_ + gap.trash_current_ - gap.trash_previous_, gap.trash_current_, gap.trash_previous_);
     }
 
@@ -717,7 +714,7 @@ private:
         } else {
             cumulative_len_.push_front(length + cumulative_len_.front() + gap - trash_previous );
         }
-        NotifyFrontEdgeAdded(e, gap);
+        NotifyFrontEdgeAdded(e, Gap(gap, trash_previous, trash_current));
     }
 
     void PopFront() {
@@ -792,6 +789,8 @@ inline void SkipGaps(const BidirectionalPath& path1, size_t& cur_pos1, int gap1,
     }
 }
 
+
+//Try do ignore multiple loop traversals
 inline size_t FirstNotEqualPosition(const BidirectionalPath& path1, size_t pos1, const BidirectionalPath& path2, size_t pos2, bool use_gaps) {
     int cur_pos1 = (int) pos1;
     int cur_pos2 = (int) pos2;
@@ -933,6 +932,10 @@ public:
         clear();
     }
 
+    ~PathContainer() {
+        DeleteAllPaths();
+    }
+
     size_t size() const {
         return data_.size();
     }
@@ -1044,7 +1047,8 @@ inline pair<size_t, size_t> ComparePaths(size_t start_pos1, size_t start_pos2, c
         bool found = false;
         for (size_t pos2 = 0; pos2 < poses2.size(); ++pos2) {
             if (poses2[pos2] > last2) {
-                if (path2.LengthAt(last2) - path2.LengthAt(poses2[pos2]) - g.length(path2.At(last2)) - path2.GapAt(poses2[pos2]) > max_diff) {
+                int diff = int(path2.LengthAt(last2)) - int(path2.LengthAt(poses2[pos2])) - int(g.length(path2.At(last2))) - path2.GapAt(poses2[pos2]);
+                if (std::abs(diff) > max_diff) {
                     break;
                 }
                 last2 = poses2[pos2];
diff --git a/src/common/assembly_graph/paths/bidirectional_path_io/bidirectional_path_output.cpp b/src/common/assembly_graph/paths/bidirectional_path_io/bidirectional_path_output.cpp
new file mode 100644
index 0000000..a3a3004
--- /dev/null
+++ b/src/common/assembly_graph/paths/bidirectional_path_io/bidirectional_path_output.cpp
@@ -0,0 +1,68 @@
+//
+// Created by andrey on 20.01.17.
+//
+
+#include "bidirectional_path_output.hpp"
+
+namespace path_extend {
+
+
+string path_extend::ContigWriter::ToFASTGPathFormat(const BidirectionalPath &path) const {
+    if (path.Empty())
+        return "";
+    string res = ids_.at(path.Front()).short_id_;
+    for (size_t i = 1; i < path.Size(); ++i) {
+        if (g_.EdgeEnd(path[i - 1]) != g_.EdgeStart(path[i])) {
+            res += ";\n" + ids_.at(path[i]).short_id_;
+        }
+        else {
+            res += "," + ids_.at(path[i]).short_id_;
+        }
+    }
+    return res;
+}
+
+void path_extend::ContigWriter::OutputPaths(const PathContainer &paths,
+                                                  const string &filename_base,
+                                                  bool write_fastg) const {
+    name_generator_->Preprocess(paths);
+    IOContigStorage storage(g_, constructor_, paths);
+
+    INFO("Writing contigs to " << filename_base);
+    io::osequencestream_simple oss(filename_base + ".fasta");
+    std::ofstream os_fastg;
+    if (write_fastg)
+        os_fastg.open((filename_base + ".paths").c_str());
+
+    size_t i = 0;
+    for (const auto& precontig : storage.Storage()) {
+        ++i;
+        std::string contig_id = name_generator_->MakeContigName(i, precontig);
+        oss.set_header(contig_id);
+        oss << precontig.sequence_;
+
+        if (write_fastg) {
+            os_fastg << contig_id << endl;
+            os_fastg << ToFASTGPathFormat(*precontig.path_) << endl;
+            os_fastg << contig_id << "'" << endl;
+            os_fastg << ToFASTGPathFormat(*precontig.path_->GetConjPath()) << endl;
+        }
+    }
+
+    if (write_fastg)
+        os_fastg.close();
+    DEBUG("Contigs written");
+}
+
+
+void path_extend::PathInfoWriter::WritePaths(const PathContainer &paths, const string &filename) const {
+    std::ofstream oss(filename.c_str());
+
+    for (auto iter = paths.begin(); iter != paths.end(); ++iter) {
+        iter.get()->Print(oss);
+    }
+
+    oss.close();
+}
+
+}
\ No newline at end of file
diff --git a/src/common/assembly_graph/paths/bidirectional_path_io/bidirectional_path_output.hpp b/src/common/assembly_graph/paths/bidirectional_path_io/bidirectional_path_output.hpp
new file mode 100644
index 0000000..7de980d
--- /dev/null
+++ b/src/common/assembly_graph/paths/bidirectional_path_io/bidirectional_path_output.hpp
@@ -0,0 +1,60 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#pragma once
+
+
+#include "io_support.hpp"
+
+
+namespace path_extend {
+
+using namespace debruijn_graph;
+
+
+class ContigWriter {
+protected:
+    DECL_LOGGER("PathExtendIO")
+
+protected:
+    const Graph& g_;
+    ContigConstructor<Graph> &constructor_;
+    map<EdgeId, ExtendedContigIdT> ids_;
+    shared_ptr<ContigNameGenerator> name_generator_;
+
+    string ToFASTGPathFormat(const BidirectionalPath &path) const;
+
+
+public:
+    ContigWriter(const Graph& g,
+                 ContigConstructor<Graph> &constructor,
+                 const ConnectedComponentCounter &c_counter,
+                 shared_ptr<ContigNameGenerator> name_generator) :
+            g_(g),
+            constructor_(constructor),
+            ids_(),
+            name_generator_(name_generator) {
+        MakeContigIdMap(g_, ids_, c_counter, "NODE");
+    }
+
+    void OutputPaths(const PathContainer &paths,
+                               const string &filename_base,
+                               bool write_fastg = true) const;
+
+};
+
+
+class PathInfoWriter {
+protected:
+    DECL_LOGGER("PathExtendIO")
+
+public:
+
+    void WritePaths(const PathContainer &paths, const string &filename) const;
+};
+
+}
diff --git a/src/common/assembly_graph/paths/bidirectional_path_io/io_support.cpp b/src/common/assembly_graph/paths/bidirectional_path_io/io_support.cpp
new file mode 100644
index 0000000..f3f07e4
--- /dev/null
+++ b/src/common/assembly_graph/paths/bidirectional_path_io/io_support.cpp
@@ -0,0 +1,186 @@
+//
+// Created by andrey on 23.01.17.
+//
+
+#include "io_support.hpp"
+#include "modules/path_extend/pe_utils.hpp"
+
+namespace path_extend {
+
+void path_extend::TranscriptToGeneJoiner::MakeSet(size_t x) {
+    parents_[x] = x;
+    ranks_[x] = 0;
+}
+
+void path_extend::TranscriptToGeneJoiner::JoinTrees(size_t x, size_t y) {
+    x = FindTree(x);
+    y = FindTree(y);
+    if (x != y) {
+        if (ranks_[x] < ranks_[y])
+            parents_[x] = y;
+        else
+            parents_[y] = x;
+        if (ranks_[x] == ranks_[y])
+            ++ranks_[x];
+    }
+}
+
+void path_extend::TranscriptToGeneJoiner::Init(const PathContainer &paths) {
+    DEBUG("Initializing parents and ranks");
+    parents_.resize(paths.size());
+    ranks_.resize(paths.size());
+
+    size_t path_num = 0;
+    for (auto iter = paths.begin(); iter != paths.end(); ++iter, ++path_num) {
+        path_id_[iter.get()] = path_num;
+        path_id_[iter.getConjugate()] = path_num;
+        MakeSet(path_num);
+    }
+
+    DEBUG("Initialized parents and ranks");
+
+    VERIFY_MSG(path_num == paths.size(), "Path Num " << path_num << " Size " << paths.size())
+}
+
+size_t path_extend::TranscriptToGeneJoiner::FindTree(size_t x) {
+    size_t parent;
+    if (x == parents_[x]) {
+        parent = x;
+    }
+    else {
+        parents_[x] = FindTree(parents_[x]);
+        parent = parents_[x];
+    }
+    return parent;
+}
+
+size_t path_extend::TranscriptToGeneJoiner::GetPathId(BidirectionalPath *path) {
+    return path_id_[path];
+}
+
+void path_extend::TranscriptToGeneJoiner::Construct(const PathContainer &paths) {
+    Init(paths);
+
+    GraphCoverageMap edges_coverage(g_, paths);
+
+    DEBUG("Union trees");
+    //For all edges in coverage map
+    for (auto iterator = edges_coverage.begin(); iterator != edges_coverage.end(); ++iterator) {
+        //Select a path covering an edge
+        EdgeId edge = iterator->first;
+        GraphCoverageMap::MapDataT *edge_paths = iterator->second;
+
+        if (g_.length(edge) > min_edge_len_ && edge_paths->size() > 1) {
+            DEBUG("Long edge " << edge.int_id() << " Paths " << edge_paths->size());
+            //For all other paths covering this edge join then into single gene with the first path
+            for (auto it_edge = ++edge_paths->begin(); it_edge != edge_paths->end(); ++it_edge) {
+                size_t first = path_id_[*edge_paths->begin()];
+                size_t next = path_id_[*it_edge];
+                DEBUG("Edge " << edge.int_id() << " First " << first << " Next " << next);
+
+                JoinTrees(first, next);
+            }
+        }
+    }
+}
+
+string path_extend::IOContigStorage::ToString(const BidirectionalPath &path) const {
+    stringstream ss;
+    if (path.IsInterstrandBulge() && path.Size() == 1) {
+        ss << constructor_.construct(path.Back()).first.substr(k_, g_.length(path.Back()) - k_);
+        return ss.str();
+    }
+
+    if (!path.Empty()) {
+        ss << constructor_.construct(path[0]).first.substr(0, k_);
+    }
+
+
+    size_t i = 0;
+    while (i < path.Size()) {
+        int gap = i == 0 ? 0 : path.GapAt(i);
+        if (gap > (int) k_) {
+            for (size_t j = 0; j < gap - k_; ++j) {
+                ss << "N";
+            }
+            auto temp_str = constructor_.construct(path[i]).first;
+            if (i != path.Size() - 1) {
+                for (size_t j = 0; j < path.TrashPreviousAt(i + 1); ++j) {
+                    temp_str.pop_back();
+                    if (temp_str.size() == 0) {
+                        break;
+                    }
+                }
+            }
+            ss << temp_str;
+        }
+        else {
+            int overlapLen = (int) k_ - gap;
+            if (overlapLen >= (int) g_.length(path[i]) + (int) k_) {
+                overlapLen -= (int) g_.length(path[i]) + (int) k_;
+                ++i;
+                //skipping overlapping edges
+                while (i < path.Size() && overlapLen >= (int) g_.length(path[i]) + path.GapAt(i)) {
+                    overlapLen -= (int) g_.length(path[i]) + path.GapAt(i);
+                    ++i;
+                }
+                if (i == path.Size()) {
+                    break;
+                }
+
+                overlapLen = overlapLen + (int) k_ - path.GapAt(i);
+
+                if(overlapLen < 0) {
+                    for (int j = 0; j < abs(overlapLen); ++j) {
+                        ss << "N";
+                    }
+                    overlapLen = 0;
+                }
+            }
+            auto temp_str = g_.EdgeNucls(path[i]).Subseq(overlapLen).str();
+            if (i != path.Size() - 1) {
+                for (size_t j = 0; j < path.TrashPreviousAt(i + 1); ++j) {
+                    temp_str.pop_back();
+                    if (temp_str.size() == 0) {
+                        break;
+                    }
+                }
+            }
+            ss << temp_str;
+        }
+        ++i;
+    }
+    return ss.str();
+}
+
+void path_extend::ScaffoldBreaker::SplitPath(const BidirectionalPath &path, PathContainer &result) const {
+    size_t i = 0;
+
+    while (i < path.Size()) {
+        BidirectionalPath *p = new BidirectionalPath(path.graph(), path[i]);
+        ++i;
+
+        while (i < path.Size() and path.GapAt(i) <= min_gap_) {
+            p->PushBack(path[i], path.GapAt(i), path.TrashPreviousAt(i), path.TrashCurrentAt(i));
+            ++i;
+        }
+
+        if (i < path.Size()) {
+            DEBUG("split path " << i << " gap " << path.GapAt(i));
+            p->Print();
+        }
+
+        BidirectionalPath *cp = new BidirectionalPath(p->Conjugate());
+        result.AddPair(p, cp);
+    }
+}
+
+void path_extend::ScaffoldBreaker::Break(const PathContainer &paths, PathContainer &result) const {
+    for (auto it = paths.begin(); it != paths.end(); ++it) {
+        SplitPath(*it.get(), result);
+    }
+    result.SortByLength();
+}
+
+}
+
diff --git a/src/common/assembly_graph/paths/bidirectional_path_io/io_support.hpp b/src/common/assembly_graph/paths/bidirectional_path_io/io_support.hpp
new file mode 100644
index 0000000..e46bd42
--- /dev/null
+++ b/src/common/assembly_graph/paths/bidirectional_path_io/io_support.hpp
@@ -0,0 +1,190 @@
+//
+// Created by andrey on 23.01.17.
+//
+
+#pragma once
+
+#include "assembly_graph/paths/bidirectional_path.hpp"
+#include "assembly_graph/graph_support/contig_output.hpp"
+#include "assembly_graph/components/connected_component.hpp"
+
+namespace path_extend {
+using namespace debruijn_graph;
+
+
+struct IOContig {
+    std::string sequence_;
+    BidirectionalPath* path_;
+
+    IOContig(const std::string& sequence, BidirectionalPath* path) :
+        sequence_(sequence), path_(path) { }
+};
+
+struct IOContigGreater
+{
+    bool operator()(const IOContig &a, const IOContig &b) const {
+        if (a.sequence_.length() ==  b.sequence_.length())
+            return math::gr(a.path_->Coverage(), b.path_->Coverage());
+        return a.sequence_.length() > b.sequence_.length();
+    }
+};
+
+class IOContigStorage {
+private:
+    const Graph &g_;
+    ContigConstructor<Graph> &constructor_;
+    size_t k_;
+    vector<IOContig> storage_;
+
+    string ToString(const BidirectionalPath& path) const;
+public:
+    IOContigStorage(const Graph &g, ContigConstructor<Graph> &constructor, const PathContainer &paths):
+        g_(g),
+        constructor_(constructor),
+        k_(g.k()),
+        storage_() {
+
+        for (auto iter = paths.begin(); iter != paths.end(); ++iter) {
+            BidirectionalPath* path = iter.get();
+            if (path->Length() <= 0)
+                continue;
+            string path_string = ToString(*path);
+            if (path_string.length() >= g.k()) {
+                storage_.emplace_back(path_string, path);
+            }
+        }
+        std::sort(storage_.begin(), storage_.end(), IOContigGreater());
+    }
+
+    const vector<IOContig>& Storage() const {
+        return storage_;
+    }
+};
+
+
+//Finds common long edges in paths and joins them into
+//Based on disjoint set union
+class TranscriptToGeneJoiner {
+private:
+    const Graph &g_;
+    size_t min_edge_len_; //minimal length for joining transcripts into a gene
+
+    map<BidirectionalPath *, size_t, PathComparator> path_id_; //path ids
+    std::vector<size_t> parents_; //node parents in
+    std::vector<size_t> ranks_; //tree depth
+
+
+    void MakeSet(size_t x);
+
+    void JoinTrees(size_t x, size_t y);
+
+    void Init(const PathContainer &paths);
+public:
+    TranscriptToGeneJoiner(const Graph &g, size_t min_edge_len): g_(g), min_edge_len_(min_edge_len) {}
+
+    size_t FindTree(size_t x);
+
+    size_t GetPathId(BidirectionalPath *path);
+
+    void Construct(const PathContainer &paths);
+};
+
+
+
+class ContigNameGenerator {
+public:
+    virtual void Preprocess(const PathContainer& paths) = 0;
+
+    virtual std::string MakeContigName(size_t index, const IOContig &precontig) = 0;
+
+    virtual ~ContigNameGenerator() {
+    }
+};
+
+class DefaultContigNameGenerator: public ContigNameGenerator {
+public:
+    void Preprocess(const PathContainer&) override {}
+
+    std::string MakeContigName(size_t index, const IOContig &precontig) override {
+        return io::MakeContigId(index, precontig.sequence_.length(), precontig.path_->Coverage());
+    }
+};
+
+class PlasmidContigNameGenerator: public ContigNameGenerator {
+    const ConnectedComponentCounter &c_counter_;
+
+public:
+    PlasmidContigNameGenerator(const ConnectedComponentCounter &c_counter): c_counter_(c_counter) {}
+
+    void Preprocess(const PathContainer&) override {}
+
+    std::string MakeContigName(size_t index, const IOContig &precontig) override {
+        EdgeId e = precontig.path_->At(0);
+        size_t component = c_counter_.GetComponent(e);
+        return io::MakeContigComponentId(index, precontig.sequence_.length(), precontig.path_->Coverage(), component);
+    }
+};
+
+class TranscriptNameGenerator: public ContigNameGenerator {
+    TranscriptToGeneJoiner transcript_joiner_;
+
+    unordered_map<size_t, size_t> isoform_num_;
+    unordered_map<size_t, size_t> gene_ids_;
+    size_t gene_num_;
+
+public:
+    TranscriptNameGenerator(const Graph &g, size_t min_edge_len = 300):
+        transcript_joiner_(g, min_edge_len),
+        isoform_num_(),
+        gene_ids_(),
+        gene_num_(0) {
+
+    }
+
+    void Preprocess(const PathContainer& paths) override {
+        transcript_joiner_.Construct(paths);
+    }
+
+    std::string MakeContigName(size_t index, const IOContig &precontig) override {
+        size_t id = transcript_joiner_.GetPathId(precontig.path_);
+        size_t parent_id = transcript_joiner_.FindTree(id);
+        DEBUG("Path " << id << " Parent " << parent_id);
+        if (gene_ids_.find(parent_id) == gene_ids_.end()) {
+            gene_ids_[parent_id] = gene_num_;
+            isoform_num_[parent_id] = 0;
+            gene_num_++;
+        }
+        string contig_id = io::MakeRNAContigId(index, precontig.sequence_.length(), precontig.path_->Coverage(), gene_ids_[parent_id], isoform_num_[parent_id]);
+        isoform_num_[parent_id]++;
+        return contig_id;
+    }
+};
+
+
+inline std::shared_ptr<ContigNameGenerator> MakeContigNameGenerator(config::pipeline_type mode,
+                                                                    const conj_graph_pack &gp) {
+    std::shared_ptr<path_extend::ContigNameGenerator> name_generator;
+    if (mode == config::pipeline_type::plasmid)
+        name_generator = make_shared<PlasmidContigNameGenerator>(gp.components);
+    else if (mode == config::pipeline_type::rna)
+        name_generator = make_shared<TranscriptNameGenerator>(gp.g);
+    else
+        name_generator = make_shared<DefaultContigNameGenerator>();
+    return name_generator;
+}
+
+class ScaffoldBreaker {
+private:
+
+    int min_gap_;
+
+    void SplitPath(const BidirectionalPath& path, PathContainer &result) const;
+
+public:
+
+    ScaffoldBreaker(int min_gap): min_gap_(min_gap) {}
+
+    void Break(const PathContainer &paths, PathContainer &result) const;
+};
+
+}
\ No newline at end of file
diff --git a/src/modules/assembly_graph/paths/mapping_path.hpp b/src/common/assembly_graph/paths/mapping_path.hpp
similarity index 77%
rename from src/modules/assembly_graph/paths/mapping_path.hpp
rename to src/common/assembly_graph/paths/mapping_path.hpp
index 2cb6076..3551e04 100644
--- a/src/modules/assembly_graph/paths/mapping_path.hpp
+++ b/src/common/assembly_graph/paths/mapping_path.hpp
@@ -7,7 +7,8 @@
 
 #pragma once
 
-#include "dev_support/range.hpp"
+#include "sequence/sequence.hpp"
+#include "utils/range.hpp"
 
 namespace omnigraph {
 
@@ -26,10 +27,9 @@ class Path {
             : sequence_(sequence), start_pos_(start_pos),  end_pos_( end_pos) {
     }
 
-    Path()
-            : sequence_(),
-              start_pos_(-1ul),
-              end_pos_(-1ul) {
+    Path() : sequence_(),
+             start_pos_(-1ul),
+             end_pos_(-1ul) {
     }
 
     size_t start_pos() const { return start_pos_; }
@@ -169,6 +169,16 @@ class MappingPath {
 
     size_t size() const { return edges_.size(); }
 
+    size_t empty() const { return edges_.empty(); }
+
+    ElementId edge_at(size_t idx) const {
+       return edges_[idx];
+    };
+
+    MappingRange mapping_at(size_t idx) const {
+        return range_mappings_[idx];
+    };
+
     std::pair<const ElementId, const MappingRange> operator[](size_t idx) const {
         return std::make_pair(edges_[idx], range_mappings_[idx]);
     }
@@ -229,4 +239,63 @@ inline std::ostream& operator<<(std::ostream& os, const MappingPath<ElementId>&
     return os;
 }
 
+template<class Graph>
+struct GapDescription {
+    typedef typename Graph::EdgeId EdgeId;
+    EdgeId start, end;
+    Sequence gap_seq;
+    //FIXME discuss using size_t
+    size_t edge_gap_start_position, edge_gap_end_position;
+
+    GapDescription() :
+            start(0),
+            end(0),
+            edge_gap_start_position(0),
+            edge_gap_end_position(0) {
+    }
+
+    GapDescription(EdgeId start_e, EdgeId end_e,
+                   const Sequence &gap,
+                   size_t gap_start, size_t gap_end) :
+            start(start_e),
+            end(end_e),
+            gap_seq(gap.str()),
+            edge_gap_start_position(gap_start),
+            edge_gap_end_position(gap_end) {
+    }
+
+    GapDescription<Graph> conjugate(const Graph &g) const {
+        GapDescription<Graph> res(
+                g.conjugate(end), g.conjugate(start), !gap_seq,
+                g.length(end) - edge_gap_end_position,
+                g.length(start) - edge_gap_start_position);
+        return res;
+    }
+
+    string str(const Graph &g) const {
+        stringstream s;
+        s << g.int_id(start) << " " << edge_gap_start_position << endl
+          << g.int_id(end) << " " << edge_gap_end_position << endl
+          << gap_seq.str() << endl;
+        return s.str();
+    }
+
+    bool operator<(const GapDescription &b) const {
+        return start < b.start ||
+               (start == b.start && end < b.end) ||
+               (start == b.start && end == b.end &&
+                edge_gap_start_position < b.edge_gap_start_position);
+    }
+
+    bool operator!=(const GapDescription rhs) const {
+        return start != rhs.start
+               || end != rhs.end
+               || gap_seq != rhs.gap_seq
+               || edge_gap_start_position != rhs.edge_gap_start_position
+               || edge_gap_end_position != rhs.edge_gap_end_position;
+    }
+
+};
+
+
 }
diff --git a/src/modules/assembly_graph/paths/path_finders.hpp b/src/common/assembly_graph/paths/path_finders.hpp
similarity index 98%
rename from src/modules/assembly_graph/paths/path_finders.hpp
rename to src/common/assembly_graph/paths/path_finders.hpp
index 40f5add..4cef781 100644
--- a/src/modules/assembly_graph/paths/path_finders.hpp
+++ b/src/common/assembly_graph/paths/path_finders.hpp
@@ -1,6 +1,6 @@
 #pragma once
 
-#include "assembly_graph/graph_core/directions.hpp"
+#include "assembly_graph/core/directions.hpp"
 
 namespace omnigraph {
 template<class Graph>
diff --git a/src/modules/assembly_graph/paths/path_processor.hpp b/src/common/assembly_graph/paths/path_processor.hpp
similarity index 73%
rename from src/modules/assembly_graph/paths/path_processor.hpp
rename to src/common/assembly_graph/paths/path_processor.hpp
index 5f3d3b6..0408100 100644
--- a/src/modules/assembly_graph/paths/path_processor.hpp
+++ b/src/common/assembly_graph/paths/path_processor.hpp
@@ -7,9 +7,9 @@
 
 #pragma once
 
-#include "dev_support/standard_base.hpp"
-#include "utils/adt/bag.hpp"
-#include "algorithms/dijkstra/dijkstra_helper.hpp"
+#include "utils/standard_base.hpp"
+#include "common/adt/bag.hpp"
+#include "assembly_graph/dijkstra/dijkstra_helper.hpp"
 
 namespace omnigraph {
 
@@ -39,17 +39,14 @@ public:
         virtual ~Callback() {
         }
 
-        virtual void Flush() {
-        }
-
         virtual void HandleReversedPath(const vector<EdgeId>& reversed_path) = 0;
 
 
     protected:
         Path ReversePath(const Path& path) const {
             Path result;
-            for (auto I = path.rbegin(), E = path.rend(); I != E; ++I)
-                result.push_back(*I);
+            for (auto it = path.rbegin(), end = path.rend(); it != end; ++it)
+                result.push_back(*it);
             return result;
         }
     };
@@ -119,7 +116,6 @@ private:
             }
 
             TRACE("Iterating through incoming edges of vertex " << g_.int_id(v))
-            //TODO: doesn`t work with parallel simplification
             vector<EdgeId> incoming;
             incoming.reserve(4);
             std::copy_if(g_.in_begin(v), g_.in_end(v), std::back_inserter(incoming), [&] (EdgeId e) {
@@ -151,7 +147,7 @@ private:
             min_len_(min_len), max_len_(max_len),
             callback_(callback),
             edge_depth_bound_(edge_depth_bound),
-            curr_len_(0), curr_depth_(0), call_cnt_(0), 
+            curr_len_(0), curr_depth_(0), call_cnt_(0),
             g_(outer.g_),
             dijkstra_(outer.dijkstra_) {
             reversed_edge_path_.reserve(PathProcessor::MAX_CALL_CNT);
@@ -160,6 +156,10 @@ private:
 
         //returns true iff limits were exceeded
         bool Go() {
+            if (!dijkstra_.DistanceCounted(end_) || dijkstra_.GetDistance(end_) > max_len_) {
+                return false;
+            }
+
             bool code = Go(end_, min_len_);
             VERIFY(curr_len_ == 0);
             VERIFY(curr_depth_ == 0);
@@ -200,7 +200,6 @@ public:
         Traversal traversal(*this, end, min_len, max_len, callback, edge_depth_bound);
         error_code |= int(traversal.Go());
 
-        callback.Flush();
         TRACE("Process finished with error code " << error_code);
         return error_code;
     }
@@ -226,29 +225,20 @@ int ProcessPaths(const Graph& g, size_t min_len, size_t max_len,
 }
 
 template<class Graph>
-class CompositeCallback: public PathProcessor<Graph>::Callback {
+class AdapterCallback: public PathProcessor<Graph>::Callback {
     typedef typename Graph::EdgeId EdgeId;
-    typedef vector<EdgeId> Path;
-
+	typedef vector<EdgeId> Path;
+    std::function<void(const Path&)> func_;
+    bool reverse_;
 public:
-    void AddProcessor(typename PathProcessor<Graph>::Callback& processor) {
-        processors_.push_back(&processor);
-    }
 
-    void Flush() override {
-        for (auto it = processors_.begin(); it != processors_.end(); ++it) {
-            (*it)->Flush();
-        }
-    }
+    AdapterCallback(const std::function<void(const Path&)>& func, bool reverse = false) :
+        func_(func), reverse_(reverse) {}
 
     void HandleReversedPath(const Path& path) override {
-        for (auto it = processors_.begin(); it != processors_.end(); ++it) {
-            (*it)->HandleReversedPath(path);
-        }
-    }
+        func_(reverse_ ? this->ReversePath(path) : path);
+	}
 
-private:
-    vector<typename PathProcessor<Graph>::Callback*> processors_;
 };
 
 template<class Graph, class Comparator>
@@ -257,31 +247,24 @@ class BestPathStorage: public PathProcessor<Graph>::Callback {
     typedef vector<EdgeId> Path;
 public:
     BestPathStorage(const Graph& g, Comparator comparator) :
-            g_(g), cnt_(0), comparator_(comparator) {
+            g_(g), comparator_(comparator) {
     }
 
-    void HandleReversedPath(const vector<EdgeId>& path) override {
-        cnt_++;
-        if(best_path_.size() == 0 || comparator_(path, best_path_))
-            best_path_ = path;
+    void HandleReversedPath(const Path& path) override {
+        if (!best_path_ || comparator_(path, *best_path_))
+            best_path_ = boost::make_optional(path);
     }
 
-    vector<EdgeId> BestPath() const {
+    boost::optional<Path> best_path() const {
         return best_path_;
     }
 
-    size_t size() const {
-        return cnt_;
-    }
-
 private:
     const Graph& g_;
-    size_t cnt_;
     Comparator comparator_;
-    vector<vector<Path>> best_path_;
+    boost::optional<Path> best_path_;
 };
 
-
 template<class Graph>
 class PathStorageCallback: public PathProcessor<Graph>::Callback {
     typedef typename Graph::EdgeId EdgeId;
@@ -292,27 +275,21 @@ public:
             g_(g) {
     }
 
-    void Flush() override {
-        all_paths_.push_back(cur_paths_);
-        cur_paths_.clear();
-    }
-
     void HandleReversedPath(const vector<EdgeId>& path) override {
-        cur_paths_.push_back(this->ReversePath(path));
+        paths_.push_back(this->ReversePath(path));
     }
 
-    size_t size(size_t k = 0) const {
-        return all_paths_[k].size();
+    size_t size() const {
+        return paths_.size();
     }
 
-    const vector<Path>& paths(size_t k = 0) const {
-        return all_paths_[k];
+    const vector<Path>& paths() const {
+        return paths_;
     }
 
 private:
     const Graph& g_;
-    vector<vector<Path>> all_paths_;
-    vector<Path> cur_paths_;
+    vector<Path> paths_;
 };
 
 template<class Graph>
@@ -325,33 +302,25 @@ public:
             g_(g), count_(0) {
     }
 
-    void Flush() override {
-        all_paths_.push_back(cur_paths_);
-        counts_.push_back(count_);
-        cur_paths_.clear();
-    }
-
     void HandleReversedPath(const Path& path) override {
         if (path.size() > 0) {
             ++count_;
-            cur_paths_.push_back(this->ReversePath(path));
+            paths_.push_back(this->ReversePath(path));
         }
     }
 
-    size_t count(size_t k = 0) const {
-        return counts_[k];
+    size_t count() const {
+        return count_;
     }
 
-    const vector<Path>& paths(size_t k = 0) const {
-        return all_paths_[k];
+    const vector<Path>& paths() const {
+        return paths_;
     }
 
 private:
     const Graph& g_;
-    vector<size_t> counts_;
     size_t count_;
-    vector<vector<Path> > all_paths_;
-    vector<Path> cur_paths_;
+    vector<Path> paths_;
 };
 
 template<class Graph>
@@ -365,34 +334,26 @@ public:
             g_(g), count_(0) {
     }
 
-    void Flush() override {
-        all_vertices_.push_back(vertices_);
-        vertices_.clear();
-        counts_.push_back(count_);
-    }
-
     void HandleReversedPath(const Path& path) override {
-        for (auto it = path.rbegin(); it != path.rend(); ++it) {
+        for (EdgeId e : path) {
             if (path.size() > 0) {
-                vertices_.insert(g_.EdgeStart(*it));
-                vertices_.insert(g_.EdgeEnd(*it));
+                vertices_.insert(g_.EdgeStart(e));
+                vertices_.insert(g_.EdgeEnd(e));
                 ++count_;
             }
         }
     }
 
-    const set<VertexId>& vertices(size_t k = 0) const {
-        return all_vertices_[k];
+    const set<VertexId>& vertices() const {
+        return vertices_;
     }
 
-    size_t count(size_t k = 0) const {
-        return counts_[k];
+    size_t count() const {
+        return count_;
     }
 
 private:
     Graph& g_;
-    vector<size_t> counts_;
-    vector<set<VertexId>> all_vertices_;
     size_t count_;
     set<VertexId> vertices_;
 };
@@ -407,33 +368,17 @@ public:
             g_(g) {
     }
 
-    void Flush() override {
-        all_distances_.push_back(distances_);
-        distances_.clear();
-    }
-
     void HandleReversedPath(const Path& path) override {
-        size_t path_length = PathLength(path);
-        distances_.insert(path_length);
+        distances_.insert(CumulativeLength(g_, path));
     }
 
-    vector<size_t> distances(size_t k = 0) const {
-        VERIFY(k < all_distances_.size());
-        const set<size_t>& tmp = all_distances_[k];
-        return vector<size_t>(tmp.begin(), tmp.end());
+    vector<size_t> distances() const {
+        return vector<size_t>(distances_.begin(), distances_.end());
     }
 
 private:
-    size_t PathLength(const Path& path) const {
-        size_t res = 0;
-        for (auto I = path.begin(); I != path.end(); ++I)
-            res += g_.length(*I);
-        return res;
-    }
-
     const Graph& g_;
     set<size_t> distances_;
-    vector<set<size_t>> all_distances_;
 
     DECL_LOGGER("DistancesLengthsCallback");
 };
diff --git a/src/common/assembly_graph/paths/path_utils.hpp b/src/common/assembly_graph/paths/path_utils.hpp
new file mode 100644
index 0000000..5ab6b28
--- /dev/null
+++ b/src/common/assembly_graph/paths/path_utils.hpp
@@ -0,0 +1,130 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+/*
+ * path_utils.hpp
+ *
+ */
+
+#pragma once
+
+#include "sequence/sequence.hpp"
+#include "path_processor.hpp"
+#include "mapping_path.hpp"
+#include "assembly_graph/dijkstra/dijkstra_algorithm.hpp"
+
+namespace debruijn_graph {
+
+
+    template<class Graph>
+    vector<typename Graph::EdgeId> GetCommonPathsEnd(
+            const Graph &g,
+            typename Graph::EdgeId e1,
+            typename Graph::EdgeId e2,
+            size_t min_dist,
+            size_t max_dist,
+            typename omnigraph::DijkstraHelper<Graph>::BoundedDijkstra &dijkstra) {
+
+        typedef typename Graph::EdgeId EdgeId;
+        typedef typename Graph::VertexId VertexId;
+
+        vector<EdgeId> res;
+        VERIFY (min_dist >= g.length(e1));
+        VERIFY (max_dist >= g.length(e1));
+        size_t dist = max_dist - g.length(e1);
+        VertexId cur_vertex = g.EdgeStart(e2);
+        if (!dijkstra.DistanceCounted(cur_vertex))
+            return res;
+        size_t cur_dist;
+        if ((cur_dist = dijkstra.GetDistance(cur_vertex)) > dist)
+            return res;
+        size_t suffix_len = 0;
+        while (cur_dist > 0) {
+            EdgeId prev_edge(0);
+            bool found = false;
+            for (auto edge: g.IncomingEdges(cur_vertex)) {
+                if ((dijkstra.DistanceCounted(g.EdgeStart(edge))) && (
+                        suffix_len + g.length(edge) + dijkstra.GetDistance(g.EdgeStart(edge)) <= dist)) {
+                    if (found == true) {
+                        std::reverse(res.begin(), res.end());
+                        return res;
+                    } else {
+                        found = true;
+                        prev_edge = edge;
+                    }
+                }
+            }
+            if (!found)
+                return res;
+            else {
+                suffix_len += g.length(prev_edge);
+                VERIFY(cur_dist >= g.length(prev_edge));
+                cur_dist -= g.length(prev_edge);
+                cur_vertex = g.EdgeStart(prev_edge);
+                res.push_back(prev_edge);
+            }
+        }
+        std::reverse(res.begin(), res.end());
+        return res;
+    }
+
+    template<class Graph>
+    vector<vector<typename Graph::EdgeId> > GetAllPathsBetweenEdges(
+            const Graph &g,
+            typename Graph::EdgeId &e1,
+            typename Graph::EdgeId &e2, size_t min_dist,
+            size_t max_dist) {
+        omnigraph::PathStorageCallback<Graph> callback(g);
+        ProcessPaths(g,
+                     min_dist,
+                     max_dist,
+                     g.EdgeEnd(e1), g.EdgeStart(e2),
+                     callback);
+        auto paths = callback.paths();
+        return paths;
+    }
+
+    template<class graph_pack>
+    size_t GetAllPathsQuantity(const graph_pack &origin_gp,
+                               const typename graph_pack::graph_t::EdgeId &e1,
+                               const typename graph_pack::graph_t::EdgeId &e2, double d, double is_var) {
+        omnigraph::PathStorageCallback<typename graph_pack::graph_t> callback(origin_gp.g);
+        omnigraph::PathProcessor<typename graph_pack::graph_t>
+                path_processor(origin_gp.g,
+                               (size_t) d - origin_gp.g.length(e1) - size_t(is_var),
+                               (size_t) d - origin_gp.g.length(e1) + size_t(is_var),
+                               origin_gp.g.EdgeEnd(e1),
+                               origin_gp.g.EdgeStart(e2),
+                               callback);
+        path_processor.Process();
+        auto paths = callback.paths();
+        TRACE(e1.ind_id() << " " << e2.int_id() << " " << paths.size());
+        return paths.size();
+    }
+
+    template<class Graph>
+    Sequence MergeSequences(const Graph &g,
+                            const vector<typename Graph::EdgeId> &continuous_path) {
+        vector<Sequence> path_sequences;
+        path_sequences.push_back(g.EdgeNucls(continuous_path[0]));
+        for (size_t i = 1; i < continuous_path.size(); ++i) {
+            VERIFY(g.EdgeEnd(continuous_path[i - 1]) == g.EdgeStart(continuous_path[i]));
+            path_sequences.push_back(g.EdgeNucls(continuous_path[i]));
+        }
+        return MergeOverlappingSequences(path_sequences, g.k());
+    }
+
+    template<class Graph>
+    Sequence PathSequence(const Graph &g, const omnigraph::Path<typename Graph::EdgeId> &path) {
+        Sequence path_sequence = MergeSequences(g, path.sequence());
+        size_t start = path.start_pos();
+        size_t end = path_sequence.size() - g.length(path[path.size() - 1]) + path.end_pos();
+        return path_sequence.Subseq(start, end);
+    }
+
+
+}
diff --git a/src/modules/assembly_graph/stats/picture_dump.hpp b/src/common/assembly_graph/stats/picture_dump.hpp
similarity index 79%
rename from src/modules/assembly_graph/stats/picture_dump.hpp
rename to src/common/assembly_graph/stats/picture_dump.hpp
index 18c6d39..bee431d 100644
--- a/src/modules/assembly_graph/stats/picture_dump.hpp
+++ b/src/common/assembly_graph/stats/picture_dump.hpp
@@ -8,10 +8,10 @@
 #pragma once
 
 #include "statistics.hpp"
-#include "assembly_graph/graph_core/graph.hpp"
+#include "assembly_graph/core/graph.hpp"
 
 #include "pipeline/graph_pack.hpp"
-#include "assembly_graph/graph_alignment/sequence_mapper.hpp"
+#include "modules/alignment/sequence_mapper.hpp"
 #include "pipeline/graphio.hpp"
 //FIXME awful dependency to get write_lib_data
 #include "pipeline/config_struct.hpp"
@@ -20,13 +20,13 @@
 #include "visualization/visualization.hpp"
 #include "assembly_graph/handlers/edges_position_handler.hpp"
 #include "assembly_graph/components/graph_component.hpp"
-#include "io/reads_io/rc_reader_wrapper.hpp"
-#include "io/reads_io/delegating_reader_wrapper.hpp"
-#include "io/reads_io/io_helper.hpp"
-#include "io/reads_io/wrapper_collection.hpp"
-#include "io/reads_io/osequencestream.hpp"
+#include "io/reads/rc_reader_wrapper.hpp"
+#include "io/reads/delegating_reader_wrapper.hpp"
+#include "io/reads/io_helper.hpp"
+#include "io/reads/wrapper_collection.hpp"
+#include "io/reads/osequencestream.hpp"
 #include "io/dataset_support/dataset_readers.hpp"
-#include "dev_support/copy_file.hpp"
+#include "utils/copy_file.hpp"
 
 #include <boost/algorithm/string.hpp>
 
@@ -43,7 +43,7 @@ MappingPath<typename Graph::EdgeId>
 FindGenomeMappingPath(const Sequence& genome, const Graph& g,
                       const Index& index,
                       const KmerMapper<Graph>& kmer_mapper) {
-    NewExtendedSequenceMapper<Graph, Index> srt(g, index, kmer_mapper);
+    BasicSequenceMapper<Graph, Index> srt(g, index, kmer_mapper);
     return srt.MapSequence(genome);
 }
 
@@ -54,8 +54,8 @@ FindGenomeMappingPath(const Sequence& genome, const graph_pack& gp) {
 }
 
 template <class graph_pack>
-shared_ptr<omnigraph::visualization::GraphColorer<Graph>> DefaultColorer(const graph_pack& gp) {
-    return omnigraph::visualization::DefaultColorer(gp.g, 
+shared_ptr<visualization::graph_colorer::GraphColorer<Graph>> DefaultColorer(const graph_pack& gp) {
+    return visualization::graph_colorer::DefaultColorer(gp.g,
         FindGenomeMappingPath(gp.genome.GetSequence(), gp.g, gp.index, gp.kmer_mapper).path(),
         FindGenomeMappingPath(!gp.genome.GetSequence(), gp.g, gp.index, gp.kmer_mapper).path());
 }
@@ -64,11 +64,11 @@ template <class graph_pack>
 void CollectContigPositions(graph_pack &gp) {
     if (!cfg::get().pos.contigs_for_threading.empty() &&
         path::FileExists(cfg::get().pos.contigs_for_threading))
-      FillPos(gp, cfg::get().pos.contigs_for_threading, "thr_", true);
+      visualization::position_filler::FillPos(gp, cfg::get().pos.contigs_for_threading, "thr_", true);
 
     if (!cfg::get().pos.contigs_to_analyze.empty() &&
         path::FileExists(cfg::get().pos.contigs_to_analyze))
-      FillPos(gp, cfg::get().pos.contigs_to_analyze, "anlz_", true);
+      visualization::position_filler::FillPos(gp, cfg::get().pos.contigs_to_analyze, "anlz_", true);
 }
 
 template<class Graph, class Index>
@@ -93,7 +93,7 @@ class GenomeMappingStat: public AbstractStatCounter {
         if (genome_.size() <= k_)
             return;
 
-        runtime_k::RtSeq cur = genome_.start<runtime_k::RtSeq>(k_ + 1);
+        RtSeq cur = genome_.start<RtSeq>(k_ + 1);
         cur >>= 0;
         bool breaked = true;
         pair<EdgeId, size_t> cur_position;
@@ -131,10 +131,10 @@ class GenomeMappingStat: public AbstractStatCounter {
 template<class Graph>
 void WriteErrorLoc(const Graph &g,
                    const string& folder_name,
-                   std::shared_ptr<omnigraph::visualization::GraphColorer<Graph>> genome_colorer,
-                   const omnigraph::GraphLabeler<Graph>& labeler) {
+                   std::shared_ptr<visualization::graph_colorer::GraphColorer<Graph>> genome_colorer,
+                   const visualization::graph_labeler::GraphLabeler<Graph>& labeler) {
     INFO("Writing error localities for graph to folder " << folder_name);
-    GraphComponent<Graph> all(g, g.begin(), g.end());
+    auto all = GraphComponent<Graph>::WholeGraph(g);
     set<typename Graph::EdgeId> edges = genome_colorer->ColoredWith(all.edges().begin(),
                                                     all.edges().end(), "black");
     set<typename Graph::VertexId> to_draw;
@@ -143,7 +143,7 @@ void WriteErrorLoc(const Graph &g,
         to_draw.insert(g.EdgeStart(*it));
     }
     shared_ptr<GraphSplitter<Graph>> splitter = StandardSplitter(g, to_draw);
-    WriteComponents(g, folder_name, splitter, genome_colorer, labeler);
+    visualization::visualization_utils::WriteComponents(g, folder_name, splitter, genome_colorer, labeler);
     INFO("Error localities written written to folder " << folder_name);
 }
 
@@ -171,14 +171,16 @@ void CountStats(const graph_pack& gp) {
 
 template<class Graph>
 void WriteGraphComponentsAlongGenome(const Graph& g,
-                                     const GraphLabeler<Graph>& labeler,
+                                     const visualization::graph_labeler::GraphLabeler<Graph>& labeler,
                                      const string& folder,
                                      const Path<typename Graph::EdgeId>& path1,
                                      const Path<typename Graph::EdgeId>& path2) {
     INFO("Writing graph components along genome");
 
     make_dir(folder);
-    omnigraph::visualization::WriteComponentsAlongPath(g, path1, folder, omnigraph::visualization::DefaultColorer(g, path1, path2), labeler);
+    visualization::visualization_utils::WriteComponentsAlongPath(g, path1, folder,
+                                                                 visualization::graph_colorer::DefaultColorer(g, path1, path2),
+                                                                 labeler);
 
     INFO("Writing graph components along genome finished");
 }
@@ -188,8 +190,8 @@ template<class Graph, class Mapper>
 void WriteGraphComponentsAlongContigs(const Graph& g,
                                       Mapper &mapper,
                                       const std::string& folder,
-                                      std::shared_ptr<omnigraph::visualization::GraphColorer<Graph>> colorer,
-                                      const GraphLabeler<Graph>& labeler) {
+                                      std::shared_ptr<visualization::graph_colorer::GraphColorer<Graph>> colorer,
+                                      const visualization::graph_labeler::GraphLabeler<Graph>& labeler) {
     INFO("Writing graph components along contigs");
     auto contigs_to_thread = io::EasyStream(cfg::get().pos.contigs_to_analyze, false);
     contigs_to_thread->reset();
@@ -197,16 +199,16 @@ void WriteGraphComponentsAlongContigs(const Graph& g,
     while (!contigs_to_thread->eof()) {
         (*contigs_to_thread) >> read;
         make_dir(folder + read.name());
-        omnigraph::visualization::WriteComponentsAlongPath(g, mapper.MapSequence(read.sequence()).simple_path(), folder + read.name() + "/",
-                                                           colorer, labeler);
+        visualization::visualization_utils::WriteComponentsAlongPath(g, mapper.MapSequence(read.sequence()).simple_path(),
+                                                                     folder + read.name() + "/", colorer, labeler);
     }
     INFO("Writing graph components along contigs finished");
 }
 
 template<class Graph>
-void WriteKmerComponent(conj_graph_pack &gp, runtime_k::RtSeq const& kp1mer, const std::string& file,
-                        std::shared_ptr<omnigraph::visualization::GraphColorer<Graph>> colorer,
-                        const omnigraph::GraphLabeler<Graph>& labeler) {
+void WriteKmerComponent(conj_graph_pack &gp, RtSeq const& kp1mer, const std::string& file,
+                        std::shared_ptr<visualization::graph_colorer::GraphColorer<Graph>> colorer,
+                        const visualization::graph_labeler::GraphLabeler<Graph>& labeler) {
     if(!gp.index.contains(kp1mer)) {
         WARN("no such kmer in the graph");
         return;
@@ -215,11 +217,11 @@ void WriteKmerComponent(conj_graph_pack &gp, runtime_k::RtSeq const& kp1mer, con
     auto pos = gp.index.get(kp1mer);
     typename Graph::VertexId v = pos.second * 2 < gp.g.length(pos.first) ? gp.g.EdgeStart(pos.first) : gp.g.EdgeEnd(pos.first);
     GraphComponent<Graph> component = omnigraph::VertexNeighborhood<Graph>(gp.g, v);
-    omnigraph::visualization::WriteComponent<Graph>(component, file, colorer, labeler);
+    visualization::visualization_utils::WriteComponent<Graph>(component, file, colorer, labeler);
 }
 
 inline
-optional<runtime_k::RtSeq> FindCloseKP1mer(const conj_graph_pack &gp,
+optional<RtSeq> FindCloseKP1mer(const conj_graph_pack &gp,
                                            size_t genome_pos, size_t k) {
     VERIFY(gp.genome.size() > 0);
     VERIFY(genome_pos < gp.genome.size());
@@ -227,10 +229,10 @@ optional<runtime_k::RtSeq> FindCloseKP1mer(const conj_graph_pack &gp,
     for (size_t diff = 0; diff < magic_const; diff++) {
         for (int dir = -1; dir <= 1; dir += 2) {
             size_t pos = (gp.genome.size() - k + genome_pos + dir * diff) % (gp.genome.size() - k);
-            runtime_k::RtSeq kp1mer = gp.kmer_mapper.Substitute(
-                runtime_k::RtSeq (k + 1, gp.genome.GetSequence(), pos));
+            RtSeq kp1mer = gp.kmer_mapper.Substitute(
+                RtSeq (k + 1, gp.genome.GetSequence(), pos));
             if (gp.index.contains(kp1mer))
-                return optional<runtime_k::RtSeq>(kp1mer);
+                return optional<RtSeq>(kp1mer);
         }
     }
     return boost::none;
@@ -245,7 +247,7 @@ void PrepareForDrawing(conj_graph_pack &gp) {
 
 struct detail_info_printer {
     detail_info_printer(conj_graph_pack &gp,
-                        const omnigraph::GraphLabeler<Graph>& labeler, 
+                        const visualization::graph_labeler::GraphLabeler<Graph>& labeler,
                         const string& folder)
             :  gp_(gp),
                labeler_(labeler),
@@ -263,6 +265,8 @@ struct detail_info_printer {
 
     void ProduceDetailedInfo(const string &pos_name,
                              config::info_printer_pos pos) {
+        using namespace visualization;
+
         static size_t call_cnt = 0;
 
         auto it = cfg::get().info_printers.find(pos);
@@ -335,7 +339,7 @@ struct detail_info_printer {
         PrepareForDrawing(gp_);
     
         auto path1 = FindGenomeMappingPath(gp_.genome.GetSequence(), gp_.g, gp_.index,
-                                          gp_.kmer_mapper).path();
+                                           gp_.kmer_mapper).path();
     
         auto colorer = DefaultColorer(gp_);
     
@@ -345,34 +349,38 @@ struct detail_info_printer {
         }
     
         if (config.write_full_graph) {
-            WriteComponent(GraphComponent<Graph>(gp_.g, gp_.g.begin(), gp_.g.end()), pics_folder + "full_graph.dot", colorer, labeler_);
+            visualization_utils::WriteComponent(GraphComponent<Graph>::WholeGraph(gp_.g),
+                                                pics_folder + "full_graph.dot", colorer, labeler_);
         }
     
         if (config.write_full_nc_graph) {
-            WriteSimpleComponent(GraphComponent<Graph>(gp_.g, gp_.g.begin(), gp_.g.end()), pics_folder + "nc_full_graph.dot", colorer, labeler_);
+            visualization_utils::WriteSimpleComponent(GraphComponent<Graph>::WholeGraph(gp_.g),
+                                                      pics_folder + "nc_full_graph.dot", colorer, labeler_);
         }
     
         if (config.write_components) {
             make_dir(pics_folder + "components/");
-            omnigraph::visualization::WriteComponents(gp_.g, pics_folder + "components/", omnigraph::ReliableSplitter<Graph>(gp_.g), colorer, labeler_);
+            visualization_utils::WriteComponents(gp_.g, pics_folder + "components/",
+                                                 omnigraph::ReliableSplitter<Graph>(gp_.g), colorer, labeler_);
         }
     
         if (!config.components_for_kmer.empty()) {
             string kmer_folder = path::append_path(pics_folder, "kmer_loc/");
             make_dir(kmer_folder);
-            auto kmer = runtime_k::RtSeq(gp_.k_value + 1, config.components_for_kmer.substr(0, gp_.k_value + 1).c_str());
+            auto kmer = RtSeq(gp_.k_value + 1, config.components_for_kmer.substr(0, gp_.k_value + 1).c_str());
             string file_name = path::append_path(kmer_folder, pos_name + ".dot");
             WriteKmerComponent(gp_, kmer, file_name, colorer, labeler_);
         }
     
         if (config.write_components_along_genome) {
             make_dir(pics_folder + "along_genome/");
-            omnigraph::visualization::WriteComponentsAlongPath(gp_.g, path1.sequence(), pics_folder + "along_genome/", colorer, labeler_);
+            visualization_utils::WriteComponentsAlongPath
+                    (gp_.g, path1.sequence(), pics_folder + "along_genome/", colorer, labeler_);
         }
     
         if (config.write_components_along_contigs) {
             make_dir(pics_folder + "along_contigs/");
-            NewExtendedSequenceMapper<Graph, Index> mapper(gp_.g, gp_.index, gp_.kmer_mapper);
+            BasicSequenceMapper<Graph, Index> mapper(gp_.g, gp_.index, gp_.kmer_mapper);
             WriteGraphComponentsAlongContigs(gp_.g, mapper, pics_folder + "along_contigs/", colorer, labeler_);
         }
 
@@ -383,7 +391,7 @@ struct detail_info_printer {
             boost::split(positions, config.components_for_genome_pos,
                          boost::is_any_of(" ,"), boost::token_compress_on);
             for (auto it = positions.begin(); it != positions.end(); ++it) {
-                boost::optional<runtime_k::RtSeq> close_kp1mer = FindCloseKP1mer(gp_,
+                boost::optional<RtSeq> close_kp1mer = FindCloseKP1mer(gp_,
                                                                                  std::stoi(*it), gp_.k_value);
                 if (close_kp1mer) {
                     string locality_folder = path::append_path(pos_loc_folder, *it + "/");
@@ -392,14 +400,14 @@ struct detail_info_printer {
                 } else {
                     WARN(
                         "Failed to find genome kp1mer close to the one at position "
-                        << *it << " in the graph. Which is " << runtime_k::RtSeq (gp_.k_value + 1, gp_.genome.GetSequence(), std::stoi(*it)));
+                        << *it << " in the graph. Which is " << RtSeq (gp_.k_value + 1, gp_.genome.GetSequence(), std::stoi(*it)));
                 }
             }
         }
     }
 
     conj_graph_pack& gp_;
-    const omnigraph::GraphLabeler<Graph>& labeler_;
+    const visualization::graph_labeler::GraphLabeler<Graph>& labeler_;
     string folder_;
 };
 
diff --git a/src/modules/assembly_graph/stats/statistics.hpp b/src/common/assembly_graph/stats/statistics.hpp
similarity index 99%
rename from src/modules/assembly_graph/stats/statistics.hpp
rename to src/common/assembly_graph/stats/statistics.hpp
index 3ab53a5..cb6e7b4 100644
--- a/src/modules/assembly_graph/stats/statistics.hpp
+++ b/src/common/assembly_graph/stats/statistics.hpp
@@ -7,7 +7,7 @@
 
 #pragma once
 
-#include "dev_support/simple_tools.hpp"
+#include "utils/simple_tools.hpp"
 #include "math/xmath.h"
 #include "pipeline/config_struct.hpp"
 #include "assembly_graph/paths/mapping_path.hpp"
diff --git a/src/modules/empty.cpp b/src/common/empty.cpp
similarity index 100%
copy from src/modules/empty.cpp
copy to src/common/empty.cpp
diff --git a/src/projects/spades/pacbio_aligning.hpp b/src/common/func/func.hpp
similarity index 53%
copy from src/projects/spades/pacbio_aligning.hpp
copy to src/common/func/func.hpp
index 4e7d2a9..a0b130f 100644
--- a/src/projects/spades/pacbio_aligning.hpp
+++ b/src/common/func/func.hpp
@@ -7,17 +7,19 @@
 
 #pragma once
 
-#include "pipeline/stage.hpp"
+#include <functional>
 
-namespace debruijn_graph {
-
-class PacBioAligning : public spades::AssemblyStage {
-public:
-    PacBioAligning()
-            : AssemblyStage("PacBio Aligning", "pacbio_aligning") {
-    }
-    void run(conj_graph_pack &gp, const char*);
-};
+namespace func {
 
+template<class T>
+std::function<void(T)> CombineCallbacks(const std::function<void(T)>& f1,
+                                        const std::function<void(T)>& f2) {
+    return [=] (T t) {
+        if (f1)
+            f1(t);
+        if (f2)
+            f2(t);
+    };
 }
 
+}
diff --git a/src/utils/adt/function_traits.hpp b/src/common/func/function_traits.hpp
similarity index 92%
rename from src/utils/adt/function_traits.hpp
rename to src/common/func/function_traits.hpp
index 5729a41..3facd64 100644
--- a/src/utils/adt/function_traits.hpp
+++ b/src/common/func/function_traits.hpp
@@ -1,11 +1,8 @@
-#ifndef __ADT_FUNCTION_TRAITS__
-#define __ADT_FUNCTION_TRAITS__
-
 #pragma once
 
 #include <functional>
 
-namespace adt {
+namespace func {
 
 template<class F>
 struct function_traits;
@@ -71,6 +68,4 @@ public:
     };
 };
 
-} // namespace adt
-
-#endif // __ADT_FUNCTION_TRAITS__
+} // namespace func
diff --git a/src/modules/math/pred.hpp b/src/common/func/pred.hpp
similarity index 77%
rename from src/modules/math/pred.hpp
rename to src/common/func/pred.hpp
index 493626b..ebe22cc 100644
--- a/src/modules/math/pred.hpp
+++ b/src/common/func/pred.hpp
@@ -1,29 +1,26 @@
-#ifndef __ADT_PRED_HPP__
-#define __ADT_PRED_HPP__
-
 #pragma once
 
-#include "utils/adt/function_traits.hpp"
+#include "function_traits.hpp"
 
 #include <memory>
 #include <functional>
 
-namespace pred {
+namespace func {
 
-template<typename T>
-class TypedPredicate {
+template<class T>
+class AbstractPredicate {
 public:
     typedef T checked_type;
 
-    template<typename P>
-    TypedPredicate(P p)
-            : self_(std::make_shared<TypedPredicateModel < P> > (std::move(p))) { }
+    virtual bool Check(T t) const = 0;
 
-    bool operator()(T x) const {
-        return self_->operator()(x);
-    }
+    bool operator()(T t) const { return Check(t); }
 
-private:
+    virtual ~AbstractPredicate() {}
+};
+
+template<typename T>
+class TypedPredicate {
     struct TypedPredicateConcept {
         virtual ~TypedPredicateConcept() { };
 
@@ -43,6 +40,17 @@ private:
     };
 
     std::shared_ptr<const TypedPredicateConcept> self_;
+
+public:
+    typedef T checked_type;
+
+    template<typename P>
+    TypedPredicate(P p)
+            : self_(std::make_shared<TypedPredicateModel<P>>(std::move(p))) { }
+
+    bool operator()(T x) const {
+        return self_->operator()(x);
+    }
 };
 
 template<typename T>
@@ -115,8 +123,8 @@ private:
 };
 
 template<class P,
-        bool = adt::function_traits<P>::arity == 1 &&
-               std::is_same<typename adt::function_traits<P>::return_type, bool>::value>
+        bool = function_traits<P>::arity == 1 &&
+               std::is_same<typename function_traits<P>::return_type, bool>::value>
 struct is_predicate : public std::true_type {
 };
 
@@ -125,8 +133,8 @@ struct is_predicate<P, false> : public std::false_type {
 };
 
 template<class TP1, class TP2,
-        typename _T1 = typename adt::function_traits<TP1>::template arg<0>::type,
-        typename _T2 = typename adt::function_traits<TP2>::template arg<0>::type,
+        typename _T1 = typename function_traits<TP1>::template arg<0>::type,
+        typename _T2 = typename function_traits<TP2>::template arg<0>::type,
         typename =
         typename std::enable_if<std::is_same<_T1, _T2>::value &&
                                 is_predicate<TP1>::value && is_predicate<TP2>::value
@@ -136,8 +144,8 @@ TypedPredicate<_T1> And(TP1 lhs, TP2 rhs) {
 }
 
 template<class TP1, class TP2,
-        typename _T1 = typename adt::function_traits<TP1>::template arg<0>::type,
-        typename _T2 = typename adt::function_traits<TP2>::template arg<0>::type,
+        typename _T1 = typename function_traits<TP1>::template arg<0>::type,
+        typename _T2 = typename function_traits<TP2>::template arg<0>::type,
         typename =
         typename std::enable_if<std::is_same<_T1, _T2>::value &&
                                 is_predicate<TP1>::value && is_predicate<TP2>::value
@@ -147,7 +155,7 @@ TypedPredicate<_T1> Or(TP1 lhs, TP2 rhs) {
 }
 
 template<class TP,
-        typename _T = typename adt::function_traits<TP>::template arg<0>::type,
+        typename _T = typename function_traits<TP>::template arg<0>::type,
         typename =
         typename std::enable_if<is_predicate<TP>::value>::type>
 TypedPredicate<_T> Not(TP p) {
@@ -164,6 +172,4 @@ TypedPredicate<T> AlwaysFalse() {
     return AlwaysFalseOperator<T>();
 }
 
-} // namespace pred
-
-#endif // __ADT_PRED_HPP__
+} // namespace func
diff --git a/src/modules/io/CMakeLists.txt b/src/common/io/CMakeLists.txt
similarity index 81%
rename from src/modules/io/CMakeLists.txt
rename to src/common/io/CMakeLists.txt
index 5c0fd41..31fe9f4 100644
--- a/src/modules/io/CMakeLists.txt
+++ b/src/common/io/CMakeLists.txt
@@ -8,9 +8,9 @@
 project(input CXX)
 
 add_library(input STATIC
-            reads_io/parser.cpp
-            sam_io/read.cpp
-            sam_io/sam_reader.cpp)
+        reads/parser.cpp
+        sam/read.cpp
+        sam/sam_reader.cpp)
 
 target_link_libraries(input BamTools samtools)
 
diff --git a/src/common/io/dataset_support/dataset_readers.hpp b/src/common/io/dataset_support/dataset_readers.hpp
new file mode 100644
index 0000000..4b04751
--- /dev/null
+++ b/src/common/io/dataset_support/dataset_readers.hpp
@@ -0,0 +1,121 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#pragma once
+
+#include "utils/logger/logger.hpp"
+#include "utils/simple_tools.hpp"
+#include "io/reads/io_helper.hpp"
+#include "pipeline/library.hpp"
+#include "pipeline/config_struct.hpp"
+
+namespace io {
+
+inline
+PairedStreamPtr paired_easy_reader(const SequencingLibrary<debruijn_graph::config::DataSetData> &lib,
+                                   bool followed_by_rc,
+                                   size_t insert_size,
+                                   bool change_read_order = false,
+                                   bool use_orientation = true,
+                                   OffsetType offset_type = PhredOffset) {
+    ReadStreamList<PairedRead> streams;
+    for (auto read_pair : lib.paired_reads()) {
+        streams.push_back(PairedEasyStream(read_pair.first, read_pair.second, followed_by_rc, insert_size, change_read_order,
+                                           use_orientation, lib.orientation(), offset_type));
+    }
+    return MultifileWrap<PairedRead>(streams);
+}
+
+inline
+ReadStreamList<SingleRead> single_easy_readers(const SequencingLibrary<debruijn_graph::config::DataSetData> &lib,
+                                               bool followed_by_rc,
+                                               bool including_paired_reads,
+                                               bool handle_Ns = true,
+                                               OffsetType offset_type = PhredOffset) {
+    ReadStreamList<SingleRead> streams;
+    if (including_paired_reads) {
+      for (const auto& read : lib.reads()) {
+        //do we need input_file function here?
+        streams.push_back(EasyStream(read, followed_by_rc, handle_Ns, offset_type));
+      }
+    } else {
+      for (const auto& read : lib.single_reads()) {
+        streams.push_back(EasyStream(read, followed_by_rc, handle_Ns, offset_type));
+      }
+    }
+    return streams;
+}
+
+inline
+SingleStreamPtr single_easy_reader(const SequencingLibrary<debruijn_graph::config::DataSetData> &lib,
+                                   bool followed_by_rc,
+                                   bool including_paired_reads,
+                                   bool handle_Ns = true,
+                                   OffsetType offset_type = PhredOffset) {
+    return MultifileWrap<io::SingleRead>(
+           single_easy_readers(lib, followed_by_rc, including_paired_reads, handle_Ns, offset_type));
+}
+
+inline
+PairedStreamPtr paired_easy_reader_for_libs(std::vector<size_t> libs,
+                                            bool followed_by_rc,
+                                            size_t insert_size,
+                                            bool change_read_order = false,
+                                            bool use_orientation = true,
+                                            OffsetType offset_type = PhredOffset) {
+    ReadStreamList<io::PairedRead> streams;
+    for (size_t i = 0; i < libs.size(); ++i) {
+      streams.push_back(paired_easy_reader(cfg::get().ds.reads[libs[i]],
+                                           followed_by_rc, insert_size, change_read_order, use_orientation, offset_type));
+    }
+    return MultifileWrap<PairedRead>(streams);
+}
+
+
+inline
+PairedStreamPtr paired_easy_reader(bool followed_by_rc,
+                                   size_t insert_size,
+                                   bool change_read_order = false,
+                                   bool use_orientation = true,
+                                   OffsetType offset_type = PhredOffset) {
+
+    std::vector<size_t> all_libs(cfg::get().ds.reads.lib_count());
+    for (size_t i = 0; i < cfg::get().ds.reads.lib_count(); ++i)
+        all_libs[i] = i;
+
+    // FIXME: Should we use only first library?
+    // No, this one is for all libs together
+    return paired_easy_reader_for_libs(all_libs, followed_by_rc, insert_size, change_read_order, use_orientation, offset_type);
+}
+
+
+inline
+SingleStreamPtr single_easy_reader_for_libs(vector<size_t> libs,
+                                            bool followed_by_rc,
+                                            bool including_paired_reads,
+                                            OffsetType offset_type = PhredOffset) {
+    ReadStreamList<SingleRead> streams;
+    for (size_t i = 0; i < libs.size(); ++i) {
+        streams.push_back(single_easy_reader(cfg::get().ds.reads[libs[i]],
+                                             followed_by_rc, including_paired_reads, offset_type));
+    }
+    return MultifileWrap<SingleRead>(streams);
+}
+
+inline
+SingleStreamPtr single_easy_reader(bool followed_by_rc,
+                                   bool including_paired_reads,
+                                   OffsetType offset_type = PhredOffset) {
+
+    std::vector<size_t> all_libs(cfg::get().ds.reads.lib_count());
+    for (size_t i = 0; i < cfg::get().ds.reads.lib_count(); ++i)
+        all_libs[i] = i;
+
+    return single_easy_reader_for_libs(all_libs, followed_by_rc, including_paired_reads, offset_type);
+}
+
+}
diff --git a/src/modules/io/dataset_support/read_converter.hpp b/src/common/io/dataset_support/read_converter.hpp
similarity index 66%
rename from src/modules/io/dataset_support/read_converter.hpp
rename to src/common/io/dataset_support/read_converter.hpp
index 1182e7e..6939f1a 100644
--- a/src/modules/io/dataset_support/read_converter.hpp
+++ b/src/common/io/dataset_support/read_converter.hpp
@@ -14,26 +14,32 @@
 
 #pragma once
 
-#include "io/reads_io/binary_converter.hpp"
-#include "io/reads_io/io_helper.hpp"
+#include "io/reads/binary_converter.hpp"
+#include "io/reads/io_helper.hpp"
 #include "dataset_readers.hpp"
-#include "dev_support/simple_tools.hpp"
+#include "utils/simple_tools.hpp"
 
 #include <fstream>
 
-namespace debruijn_graph {
+namespace io {
 
-typedef io::SequencingLibrary<config::DataSetData> SequencingLibrary;
+typedef debruijn_graph::config::dataset dataset;
+typedef debruijn_graph::config::DataSetData DataSetData;
+typedef SequencingLibrary<DataSetData> SequencingLibraryT;
 
 class ReadConverter {
 
 private:
     const static size_t current_binary_format_version = 11;
 
-    static bool LoadLibIfExists(SequencingLibrary& lib) {
+    static bool CheckBinaryReadsExist(SequencingLibraryT& lib) {
+        return path::FileExists(lib.data().binary_reads_info.bin_reads_info_file);
+    }
+
+    static bool LoadLibIfExists(SequencingLibraryT& lib) {
         auto& data = lib.data();
 
-        if (!path::FileExists(data.binary_reads_info.bin_reads_info_file))
+        if (!CheckBinaryReadsExist(lib))
             return false;
 
         std::ifstream info;
@@ -68,7 +74,7 @@ private:
         return true;
     }
 
-    static void ConvertToBinary(SequencingLibrary& lib) {
+    static void ConvertToBinary(SequencingLibraryT& lib) {
         auto& data = lib.data();
         std::ofstream info;
         info.open(data.binary_reads_info.bin_reads_info_file.c_str(), std::ios_base::out);
@@ -77,21 +83,21 @@ private:
 
         INFO("Converting reads to binary format for library #" << data.lib_index << " (takes a while)");
         INFO("Converting paired reads");
-        io::PairedStreamPtr paired_reader = paired_easy_reader(lib, false, 0, false, false);
-        io::BinaryWriter paired_converter(data.binary_reads_info.paired_read_prefix,
+        PairedStreamPtr paired_reader = paired_easy_reader(lib, false, 0, false, false);
+        BinaryWriter paired_converter(data.binary_reads_info.paired_read_prefix,
                                           data.binary_reads_info.chunk_num,
                                           data.binary_reads_info.buffer_size);
 
-        io::ReadStreamStat paired_stat = paired_converter.ToBinary(*paired_reader, lib.orientation());
+        ReadStreamStat paired_stat = paired_converter.ToBinary(*paired_reader, lib.orientation());
         paired_stat.read_count_ *= 2;
 
         INFO("Converting single reads");
 
-        io::SingleStreamPtr single_reader = single_easy_reader(lib, false, false);
-        io::BinaryWriter single_converter(data.binary_reads_info.single_read_prefix,
+        SingleStreamPtr single_reader = single_easy_reader(lib, false, false);
+        BinaryWriter single_converter(data.binary_reads_info.single_read_prefix,
                                           data.binary_reads_info.chunk_num,
                                           data.binary_reads_info.buffer_size);
-        io::ReadStreamStat single_stat = single_converter.ToBinary(*single_reader);
+        ReadStreamStat single_stat = single_converter.ToBinary(*single_reader);
 
         paired_stat.merge(single_stat);
         data.read_length = paired_stat.max_len_;
@@ -111,8 +117,8 @@ private:
     }
 
 public:
-    static void ConvertToBinaryIfNeeded(SequencingLibrary& lib) {
-        if (lib.data().binary_reads_info.binary_coverted)
+    static void ConvertToBinaryIfNeeded(SequencingLibraryT& lib) {
+        if (lib.data().binary_reads_info.binary_coverted && CheckBinaryReadsExist(lib))
             return;
 
         if (LoadLibIfExists(lib)) {
@@ -125,50 +131,50 @@ public:
 
 
 inline
-io::BinaryPairedStreams raw_paired_binary_readers(io::SequencingLibrary<config::DataSetData> &lib,
+BinaryPairedStreams raw_paired_binary_readers(SequencingLibraryT &lib,
                                                   bool followed_by_rc,
                                                   size_t insert_size = 0) {
     ReadConverter::ConvertToBinaryIfNeeded(lib);
     const auto& data = lib.data();
     VERIFY_MSG(data.binary_reads_info.binary_coverted, "Lib was not converted to binary, cannot produce binary stream");
 
-    io::ReadStreamList<io::PairedReadSeq> paired_streams;
+    ReadStreamList<PairedReadSeq> paired_streams;
     for (size_t i = 0; i < data.binary_reads_info.chunk_num; ++i) {
-        paired_streams.push_back(make_shared<io::BinaryFilePairedStream>(data.binary_reads_info.paired_read_prefix,
+        paired_streams.push_back(make_shared<BinaryFilePairedStream>(data.binary_reads_info.paired_read_prefix,
                                                                          i, insert_size));
     }
-    return io::apply_paired_wrappers(followed_by_rc, paired_streams);
+    return apply_paired_wrappers(followed_by_rc, paired_streams);
 }
 
 inline
-io::BinarySingleStreams raw_single_binary_readers(io::SequencingLibrary<config::DataSetData> &lib,
+BinarySingleStreams raw_single_binary_readers(SequencingLibraryT &lib,
                                                   bool followed_by_rc,
                                                   bool including_paired_reads) {
     const auto& data = lib.data();
     ReadConverter::ConvertToBinaryIfNeeded(lib);
     VERIFY_MSG(data.binary_reads_info.binary_coverted, "Lib was not converted to binary, cannot produce binary stream");
 
-    io::BinarySingleStreams single_streams;
+    BinarySingleStreams single_streams;
     for (size_t i = 0; i < data.binary_reads_info.chunk_num; ++i) {
-        single_streams.push_back(make_shared<io::BinaryFileSingleStream>(data.binary_reads_info.single_read_prefix, i));
+        single_streams.push_back(make_shared<BinaryFileSingleStream>(data.binary_reads_info.single_read_prefix, i));
     }
     if (including_paired_reads) {
-        io::BinaryPairedStreams paired_streams;
+        BinaryPairedStreams paired_streams;
         for (size_t i = 0; i < data.binary_reads_info.chunk_num; ++i) {
-            paired_streams.push_back(make_shared<io::BinaryFilePairedStream>(data.binary_reads_info.paired_read_prefix,
+            paired_streams.push_back(make_shared<BinaryFilePairedStream>(data.binary_reads_info.paired_read_prefix,
                                                                              i, 0));
         }
 
-        return io::apply_single_wrappers(followed_by_rc, single_streams, &paired_streams);
+        return apply_single_wrappers(followed_by_rc, single_streams, &paired_streams);
     }
     else {
-        return io::apply_single_wrappers(followed_by_rc, single_streams);
+        return apply_single_wrappers(followed_by_rc, single_streams);
     }
 }
 
 
 inline
-io::BinaryPairedStreams paired_binary_readers(io::SequencingLibrary<config::DataSetData> &lib,
+BinaryPairedStreams paired_binary_readers(SequencingLibraryT &lib,
                                               bool followed_by_rc,
                                               size_t insert_size = 0) {
     return raw_paired_binary_readers(lib, followed_by_rc, insert_size);
@@ -176,7 +182,7 @@ io::BinaryPairedStreams paired_binary_readers(io::SequencingLibrary<config::Data
 
 
 inline
-io::BinarySingleStreams single_binary_readers(io::SequencingLibrary<config::DataSetData> &lib,
+BinarySingleStreams single_binary_readers(SequencingLibraryT &lib,
                                               bool followed_by_rc,
                                               bool including_paired_reads) {
     return raw_single_binary_readers(lib, followed_by_rc, including_paired_reads);
@@ -185,7 +191,7 @@ io::BinarySingleStreams single_binary_readers(io::SequencingLibrary<config::Data
 
 inline
 //todo simplify
-io::BinaryPairedStreams paired_binary_readers_for_libs(config::dataset& dataset_info,
+BinaryPairedStreams paired_binary_readers_for_libs(dataset& dataset_info,
                                                        const std::vector<size_t>& libs,
                                                        bool followed_by_rc,
                                                        size_t insert_size = 0) {
@@ -193,51 +199,51 @@ io::BinaryPairedStreams paired_binary_readers_for_libs(config::dataset& dataset_
     VERIFY(!libs.empty())
     size_t chunk_num = dataset_info.reads[libs.front()].data().binary_reads_info.chunk_num;
 
-    std::vector<io::BinaryPairedStreams> streams(chunk_num);
+    std::vector<BinaryPairedStreams> streams(chunk_num);
     for (size_t i = 0; i < libs.size(); ++i) {
         VERIFY_MSG(chunk_num == dataset_info.reads[libs[i]].data().binary_reads_info.chunk_num,
                    "Cannot create stream for multiple libraries with different chunk_num")
-        io::BinaryPairedStreams lib_streams = raw_paired_binary_readers(dataset_info.reads[libs[i]], followed_by_rc, insert_size);
+        BinaryPairedStreams lib_streams = raw_paired_binary_readers(dataset_info.reads[libs[i]], followed_by_rc, insert_size);
         for (size_t j = 0; j < chunk_num; ++j) {
             streams[j].push_back(lib_streams.ptr_at(j));
         }
     }
 
-    io::BinaryPairedStreams joint_streams;
+    BinaryPairedStreams joint_streams;
     for (size_t j = 0; j < chunk_num; ++j) {
-      joint_streams.push_back(io::MultifileWrap<io::PairedReadSeq>(streams[j]));
+      joint_streams.push_back(MultifileWrap<PairedReadSeq>(streams[j]));
     }
     return joint_streams;
 }
 
 inline
-io::BinarySingleStreams single_binary_readers_for_libs(config::dataset& dataset_info,
+BinarySingleStreams single_binary_readers_for_libs(dataset& dataset_info,
                                                        const std::vector<size_t>& libs,
                                                        bool followed_by_rc,
                                                        bool including_paired_reads) {
     VERIFY(!libs.empty())
     size_t chunk_num = dataset_info.reads[libs.front()].data().binary_reads_info.chunk_num;
 
-    std::vector<io::BinarySingleStreams> streams(chunk_num);
+    std::vector<BinarySingleStreams> streams(chunk_num);
     for (size_t i = 0; i < libs.size(); ++i) {
         VERIFY_MSG(chunk_num == dataset_info.reads[libs[i]].data().binary_reads_info.chunk_num,
                    "Cannot create stream for multiple libraries with different chunk_num")
-        io::BinarySingleStreams lib_streams = raw_single_binary_readers(dataset_info.reads[libs[i]], followed_by_rc, including_paired_reads);
+        BinarySingleStreams lib_streams = raw_single_binary_readers(dataset_info.reads[libs[i]], followed_by_rc, including_paired_reads);
 
         for (size_t j = 0; j < chunk_num; ++j) {
             streams[j].push_back(lib_streams.ptr_at(j));
         }
     }
 
-    io::BinarySingleStreams joint_streams;
+    BinarySingleStreams joint_streams;
     for (size_t j = 0; j < chunk_num; ++j) {
-      joint_streams.push_back(io::MultifileWrap<io::SingleReadSeq>(streams[j]));
+      joint_streams.push_back(MultifileWrap<SingleReadSeq>(streams[j]));
     }
     return joint_streams;
 }
 
 inline
-io::BinaryPairedStreams paired_binary_readers(config::dataset& dataset_info,
+BinaryPairedStreams paired_binary_readers(dataset& dataset_info,
                                               bool followed_by_rc,
                                               size_t insert_size = 0) {
 
@@ -249,7 +255,7 @@ io::BinaryPairedStreams paired_binary_readers(config::dataset& dataset_info,
 }
 
 inline
-io::BinarySingleStreams single_binary_readers(config::dataset& dataset_info,
+BinarySingleStreams single_binary_readers(dataset& dataset_info,
                                               bool followed_by_rc,
                                               bool including_paired_reads) {
   std::vector<size_t> all_libs(dataset_info.reads.lib_count());
@@ -260,13 +266,13 @@ io::BinarySingleStreams single_binary_readers(config::dataset& dataset_info,
 }
 
 inline
-io::BinarySingleStreamPtr single_binary_multireader(config::dataset& dataset_info, bool followed_by_rc, bool including_paired_reads) {
-    return io::MultifileWrap<io::SingleReadSeq>(single_binary_readers(dataset_info, followed_by_rc, including_paired_reads));
+BinarySingleStreamPtr single_binary_multireader(dataset& dataset_info, bool followed_by_rc, bool including_paired_reads) {
+    return MultifileWrap<SingleReadSeq>(single_binary_readers(dataset_info, followed_by_rc, including_paired_reads));
 }
 
 inline
-io::BinaryPairedStreamPtr paired_binary_multireader(config::dataset& dataset_info, bool followed_by_rc, size_t insert_size = 0) {
-    return io::MultifileWrap<io::PairedReadSeq>(paired_binary_readers(dataset_info, followed_by_rc, insert_size));
+BinaryPairedStreamPtr paired_binary_multireader(dataset& dataset_info, bool followed_by_rc, size_t insert_size = 0) {
+    return MultifileWrap<PairedReadSeq>(paired_binary_readers(dataset_info, followed_by_rc, insert_size));
 }
 
 
diff --git a/src/modules/io/kmers_io/kmer_iterator.hpp b/src/common/io/kmers/kmer_iterator.hpp
similarity index 97%
rename from src/modules/io/kmers_io/kmer_iterator.hpp
rename to src/common/io/kmers/kmer_iterator.hpp
index 0e7a38e..07d04a6 100644
--- a/src/modules/io/kmers_io/kmer_iterator.hpp
+++ b/src/common/io/kmers/kmer_iterator.hpp
@@ -1,7 +1,7 @@
 #ifndef __IO_KMER_ITERATOR_HPP__
 #define __IO_KMER_ITERATOR_HPP__
 
-#include "io/kmers_io/mmapped_reader.hpp"
+#include "io/kmers/mmapped_reader.hpp"
 #include <string>
 
 namespace io {
diff --git a/src/modules/io/kmers_io/mmapped_reader.hpp b/src/common/io/kmers/mmapped_reader.hpp
similarity index 99%
rename from src/modules/io/kmers_io/mmapped_reader.hpp
rename to src/common/io/kmers/mmapped_reader.hpp
index 0fbe335..998659f 100644
--- a/src/modules/io/kmers_io/mmapped_reader.hpp
+++ b/src/common/io/kmers/mmapped_reader.hpp
@@ -8,10 +8,10 @@
 #ifndef HAMMER_MMAPPED_READER_HPP
 #define HAMMER_MMAPPED_READER_HPP
 
-#include "utils/adt/pointer_iterator.hpp"
-#include "utils/adt/array_vector.hpp"
+#include "common/adt/pointer_iterator.hpp"
+#include "common/adt/array_vector.hpp"
 
-#include "dev_support/verify.hpp"
+#include "utils/verify.hpp"
 
 #include <boost/iterator/iterator_facade.hpp>
 
diff --git a/src/modules/io/kmers_io/mmapped_writer.hpp b/src/common/io/kmers/mmapped_writer.hpp
similarity index 98%
rename from src/modules/io/kmers_io/mmapped_writer.hpp
rename to src/common/io/kmers/mmapped_writer.hpp
index 1f90a42..9b3b2ce 100644
--- a/src/modules/io/kmers_io/mmapped_writer.hpp
+++ b/src/common/io/kmers/mmapped_writer.hpp
@@ -8,8 +8,8 @@
 #ifndef HAMMER_MMAPPED_WRITER_HPP
 #define HAMMER_MMAPPED_WRITER_HPP
 
-#include "utils/adt/pointer_iterator.hpp"
-#include "utils/adt/array_vector.hpp"
+#include "common/adt/pointer_iterator.hpp"
+#include "common/adt/array_vector.hpp"
 
 #include <string>
 
diff --git a/src/modules/io/reads_io/binary_converter.hpp b/src/common/io/reads/binary_converter.hpp
similarity index 83%
rename from src/modules/io/reads_io/binary_converter.hpp
rename to src/common/io/reads/binary_converter.hpp
index 7da965f..ff427cb 100644
--- a/src/modules/io/reads_io/binary_converter.hpp
+++ b/src/common/io/reads/binary_converter.hpp
@@ -17,10 +17,10 @@
 
 #include <fstream>
 
-#include "dev_support/verify.hpp"
+#include "utils/verify.hpp"
 #include "ireader.hpp"
-#include "io/reads/single_read.hpp"
-#include "io/reads/paired_read.hpp"
+#include "single_read.hpp"
+#include "paired_read.hpp"
 #include "pipeline/library.hpp"
 
 namespace io {
@@ -253,39 +253,6 @@ public:
         return ToBinaryForThread(stream, buf_size_ / (2 * file_num_), thread_num, orientation);
     }
 
-//    template<class Read>
-//    void WriteReads(std::vector<Read>& data) {
-//        size_t chunk_size = data.size() / file_num_;
-//        size_t last_chunk_size = chunk_size + data.size() % file_num_;
-//
-//        for (size_t i = 0; i < file_num_ - 1; ++i) {
-//            file_ds_[i]->write((const char *) &chunk_size, sizeof(chunk_size));
-//        }
-//        file_ds_.back()->write((const char *) &last_chunk_size, sizeof(last_chunk_size));
-//
-//        size_t start_pos = 0;
-//        for (size_t i = 0; i < file_num_ - 1; ++i, start_pos += chunk_size) {
-//            FlushBuffer(data, *file_ds_[i], start_pos, start_pos + chunk_size);
-//        }
-//        FlushBuffer(data, file_ds_.back(), start_pos, data.size());
-//    }
-//
-//    template<class Read>
-//    void WriteSeparatedReads(std::vector< std::vector<Read> >& data) {
-//        if (data.size() != file_num_) {
-//            WARN("Cannot write reads, number of vectors is not equal to thread number");
-//            return;
-//        }
-//
-//        for (size_t i = 0; i < file_num_; ++i) {
-//            size_t size = data[i].size();
-//            file_ds_[i]->write((const char *) &size, sizeof(size));
-//        }
-//
-//        for (size_t i = 0; i < file_num_; ++i) {
-//            FlushBuffer(data[i], *file_ds_[i]);
-//        }
-//    }
 };
 
 
diff --git a/src/common/io/reads/binary_streams.hpp b/src/common/io/reads/binary_streams.hpp
new file mode 100644
index 0000000..9769b15
--- /dev/null
+++ b/src/common/io/reads/binary_streams.hpp
@@ -0,0 +1,140 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#pragma once
+
+#include <fstream>
+
+#include "utils/verify.hpp"
+#include "ireader.hpp"
+#include "single_read.hpp"
+#include "paired_read.hpp"
+
+namespace io {
+
+// == Deprecated classes ==
+// Use FileReadStream and InsertSizeModyfing instead
+
+class BinaryFileSingleStream: public PredictableReadStream<SingleReadSeq> {
+private:
+    std::ifstream stream_;
+    ReadStreamStat read_stat_;
+    size_t current_;
+
+public:
+
+    BinaryFileSingleStream(const std::string& file_name_prefix, size_t file_num) {
+        std::string fname;
+        fname = file_name_prefix + "_" + ToString(file_num) + ".seq";
+        stream_.open(fname.c_str(), std::ios_base::binary | std::ios_base::in);
+
+        reset();
+    }
+
+    virtual bool is_open() {
+        return stream_.is_open();
+    }
+
+    virtual bool eof() {
+        return current_ == read_stat_.read_count_;
+    }
+
+    virtual BinaryFileSingleStream& operator>>(SingleReadSeq& read) {
+        read.BinRead(stream_);
+        VERIFY(current_ < read_stat_.read_count_);
+
+        ++current_;
+        return *this;
+    }
+
+    virtual void close() {
+        current_ = 0;
+        stream_.close();
+    }
+
+    virtual void reset() {
+        stream_.clear();
+        stream_.seekg(0);
+        VERIFY(stream_.good());
+        read_stat_.read(stream_);
+        current_ = 0;
+    }
+
+    virtual size_t size() const {
+        return read_stat_.read_count_;
+    }
+
+    virtual ReadStreamStat get_stat() const {
+        return read_stat_;
+    }
+
+};
+
+class BinaryFilePairedStream: public PredictableReadStream<PairedReadSeq> {
+
+private:
+    std::ifstream stream_;
+
+    size_t insert_size_;
+
+    ReadStreamStat read_stat_;
+
+    size_t current_;
+
+
+public:
+
+    BinaryFilePairedStream(const std::string& file_name_prefix, size_t file_num, size_t insert_szie): stream_(), insert_size_ (insert_szie) {
+        std::string fname;
+        fname = file_name_prefix + "_" + ToString(file_num) + ".seq";
+        stream_.open(fname.c_str(), std::ios_base::binary | std::ios_base::in);
+
+        reset();
+    }
+
+    virtual bool is_open() {
+        return stream_.is_open();
+    }
+
+    virtual bool eof() {
+        return current_ >= read_stat_.read_count_;
+    }
+
+    virtual BinaryFilePairedStream& operator>>(PairedReadSeq& read) {
+        read.BinRead(stream_, insert_size_);
+        VERIFY(current_ < read_stat_.read_count_);
+
+        ++current_;
+        return *this;
+    }
+
+    virtual void close() {
+        current_ = 0;
+        stream_.close();
+    }
+
+
+    virtual void reset() {
+        stream_.clear();
+        stream_.seekg(0);
+        VERIFY(stream_.good());
+        read_stat_.read(stream_);
+        current_ = 0;
+    }
+
+    virtual size_t size() const {
+        return read_stat_.read_count_;
+    }
+
+    ReadStreamStat get_stat() const {
+        ReadStreamStat stat = read_stat_;
+        stat.read_count_ *= 2;
+        return stat;
+    }
+};
+
+}
diff --git a/src/modules/io/reads_io/careful_filtering_reader_wrapper.hpp b/src/common/io/reads/careful_filtering_reader_wrapper.hpp
similarity index 99%
rename from src/modules/io/reads_io/careful_filtering_reader_wrapper.hpp
rename to src/common/io/reads/careful_filtering_reader_wrapper.hpp
index 188ba6b..cd7771a 100644
--- a/src/modules/io/reads_io/careful_filtering_reader_wrapper.hpp
+++ b/src/common/io/reads/careful_filtering_reader_wrapper.hpp
@@ -6,7 +6,7 @@
 //***************************************************************************
 #pragma once
 //todo rename file
-#include "io/reads_io/delegating_reader_wrapper.hpp"
+#include "io/reads/delegating_reader_wrapper.hpp"
 #include "pipeline/library.hpp"
 
 namespace io {
diff --git a/src/modules/io/reads_io/converting_reader_wrapper.hpp b/src/common/io/reads/converting_reader_wrapper.hpp
similarity index 100%
rename from src/modules/io/reads_io/converting_reader_wrapper.hpp
rename to src/common/io/reads/converting_reader_wrapper.hpp
diff --git a/src/modules/io/reads_io/delegating_reader_wrapper.hpp b/src/common/io/reads/delegating_reader_wrapper.hpp
similarity index 100%
rename from src/modules/io/reads_io/delegating_reader_wrapper.hpp
rename to src/common/io/reads/delegating_reader_wrapper.hpp
diff --git a/src/modules/io/reads_io/fasta_fastq_gz_parser.hpp b/src/common/io/reads/fasta_fastq_gz_parser.hpp
similarity index 95%
rename from src/modules/io/reads_io/fasta_fastq_gz_parser.hpp
rename to src/common/io/reads/fasta_fastq_gz_parser.hpp
index 7cb42c0..d976577 100644
--- a/src/modules/io/reads_io/fasta_fastq_gz_parser.hpp
+++ b/src/common/io/reads/fasta_fastq_gz_parser.hpp
@@ -29,11 +29,11 @@
 #include <zlib.h>
 #include <string>
 #include "kseq/kseq.h"
-#include "dev_support/verify.hpp"
-#include "io/reads/single_read.hpp"
-#include "io/reads_io/parser.hpp"
-#include "data_structures/sequence/quality.hpp"
-#include "data_structures/sequence/nucl.hpp"
+#include "utils/verify.hpp"
+#include "single_read.hpp"
+#include "io/reads/parser.hpp"
+#include "sequence/quality.hpp"
+#include "sequence/nucl.hpp"
 
 namespace io {
 
diff --git a/src/modules/io/reads_io/file_reader.hpp b/src/common/io/reads/file_reader.hpp
similarity index 97%
rename from src/modules/io/reads_io/file_reader.hpp
rename to src/common/io/reads/file_reader.hpp
index c9152d0..49037d6 100644
--- a/src/modules/io/reads_io/file_reader.hpp
+++ b/src/common/io/reads/file_reader.hpp
@@ -16,9 +16,9 @@
 #pragma once
 
 #include "ireader.hpp"
-#include "io/reads/single_read.hpp"
+#include "single_read.hpp"
 #include "parser.hpp"
-#include "dev_support/path_helper.hpp"
+#include "utils/path_helper.hpp"
 
 namespace io {
 
diff --git a/src/modules/io/reads_io/filtering_reader_wrapper.hpp b/src/common/io/reads/filtering_reader_wrapper.hpp
similarity index 100%
rename from src/modules/io/reads_io/filtering_reader_wrapper.hpp
rename to src/common/io/reads/filtering_reader_wrapper.hpp
diff --git a/src/modules/io/reads_io/io_helper.hpp b/src/common/io/reads/io_helper.hpp
similarity index 93%
rename from src/modules/io/reads_io/io_helper.hpp
rename to src/common/io/reads/io_helper.hpp
index 2f42348..7eea77c 100644
--- a/src/modules/io/reads_io/io_helper.hpp
+++ b/src/common/io/reads/io_helper.hpp
@@ -8,8 +8,8 @@
 #pragma once
 
 #include "read_stream_vector.hpp"
-#include "io/reads/single_read.hpp"
-#include "io/reads/paired_read.hpp"
+#include "single_read.hpp"
+#include "paired_read.hpp"
 #include "file_reader.hpp"
 #include "paired_readers.hpp"
 #include "binary_streams.hpp"
@@ -35,12 +35,6 @@ namespace io {
     typedef std::shared_ptr<BinaryPairedStream> BinaryPairedStreamPtr;
     typedef ReadStreamList<PairedReadSeq> BinaryPairedStreams;
 
-    //old
-//    typedef io::IReader<io::SingleReadSeq> SequenceSingleReadStream;
-//    typedef io::IReader<io::PairedReadSeq> SequencePairedReadStream;
-//    typedef io::MultifileReader<io::PairedRead> MultiPairedStream;
-//    typedef io::MultifileReader<io::SingleRead> MultiSingleStream;
-
     inline BinarySingleStreams apply_single_wrappers(bool followed_by_rc,
                                                      BinarySingleStreams& single_readers,
                                                      BinaryPairedStreams* paired_readers = 0) {
diff --git a/src/modules/io/reads_io/ireader.hpp b/src/common/io/reads/ireader.hpp
similarity index 98%
rename from src/modules/io/reads_io/ireader.hpp
rename to src/common/io/reads/ireader.hpp
index e3e286d..252bb5e 100644
--- a/src/modules/io/reads_io/ireader.hpp
+++ b/src/common/io/reads/ireader.hpp
@@ -8,7 +8,7 @@
 #pragma once
 
 #include <boost/noncopyable.hpp>
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 
 namespace io {
 
diff --git a/src/modules/io/reads_io/ireadstream.hpp b/src/common/io/reads/ireadstream.hpp
similarity index 90%
rename from src/modules/io/reads_io/ireadstream.hpp
rename to src/common/io/reads/ireadstream.hpp
index 3cc34d0..e9f4089 100644
--- a/src/modules/io/reads_io/ireadstream.hpp
+++ b/src/common/io/reads/ireadstream.hpp
@@ -17,9 +17,9 @@
 
 #include "kseq/kseq.h"
 #include <zlib.h>
-#include "dev_support/verify.hpp"
-#include "io/reads/read.hpp"
-#include "data_structures/sequence/nucl.hpp"
+#include "utils/verify.hpp"
+#include "read.hpp"
+#include "sequence/nucl.hpp"
 
 // STEP 1: declare the type of file handler and the read() function
 KSEQ_INIT(gzFile, gzread)
@@ -33,13 +33,11 @@ class ireadstream {
 public:
 typedef Read ReadT;
 
-ireadstream(const std::string &filename) : offset_(Read::PHRED_OFFSET) {
-    filename_ = filename;
+ireadstream(const std::string &filename) : filename_(filename), offset_(Read::PHRED_OFFSET)  {
     is_open_ = open(filename);
 }
 
-ireadstream(const std::string &filename, int offset) : offset_(offset) {
-    filename_ = filename;
+ireadstream(const std::string &filename, int offset) : filename_(filename), offset_(offset) {
     is_open_ = open(filename);
 }
 
@@ -158,9 +156,9 @@ while (!stream.eof() && count++ < 10000) {
     std::string q_str = r.getQualityString();
     for (size_t i = 0; i < q_str.size(); ++i) {
         int q_val = q_str[i];
-        if (q_val < 59)
+        if (q_val < ';')
             return 33;
-        if (q_val > 74)
+        if (q_val > 'K')
             return 64;
     }
 }
diff --git a/src/modules/io/reads_io/modifying_reader_wrapper.hpp b/src/common/io/reads/modifying_reader_wrapper.hpp
similarity index 96%
rename from src/modules/io/reads_io/modifying_reader_wrapper.hpp
rename to src/common/io/reads/modifying_reader_wrapper.hpp
index 5575e92..ec4a137 100644
--- a/src/modules/io/reads_io/modifying_reader_wrapper.hpp
+++ b/src/common/io/reads/modifying_reader_wrapper.hpp
@@ -7,12 +7,12 @@
 
 #pragma once
 
-#include "dev_support/verify.hpp"
-#include "io/reads_io/delegating_reader_wrapper.hpp"
+#include "utils/verify.hpp"
+#include "delegating_reader_wrapper.hpp"
+#include "single_read.hpp"
 #include "paired_readers.hpp"
 
 #include <memory>
-#include <io/reads/single_read.hpp>
 
 namespace io {
 
diff --git a/src/modules/io/reads_io/mpmc_bounded.hpp b/src/common/io/reads/mpmc_bounded.hpp
similarity index 100%
rename from src/modules/io/reads_io/mpmc_bounded.hpp
rename to src/common/io/reads/mpmc_bounded.hpp
diff --git a/src/modules/io/reads_io/multifile_reader.hpp b/src/common/io/reads/multifile_reader.hpp
similarity index 100%
rename from src/modules/io/reads_io/multifile_reader.hpp
rename to src/common/io/reads/multifile_reader.hpp
diff --git a/src/modules/io/reads_io/orientation.hpp b/src/common/io/reads/orientation.hpp
similarity index 100%
rename from src/modules/io/reads_io/orientation.hpp
rename to src/common/io/reads/orientation.hpp
diff --git a/src/modules/io/reads_io/osequencestream.hpp b/src/common/io/reads/osequencestream.hpp
similarity index 87%
rename from src/modules/io/reads_io/osequencestream.hpp
rename to src/common/io/reads/osequencestream.hpp
index 6124aef..9545f8c 100644
--- a/src/modules/io/reads_io/osequencestream.hpp
+++ b/src/common/io/reads/osequencestream.hpp
@@ -17,35 +17,36 @@
 #include <fstream>
 #include <string>
 #include <vector>
-#include "io/reads/single_read.hpp"
-#include "io/reads/paired_read.hpp"
+#include "single_read.hpp"
+#include "paired_read.hpp"
 
 namespace io {
 
-inline std::string MakeContigId(int number, size_t length, const std::string& prefix = "NODE") {
+inline std::string MakeContigId(size_t number, size_t length, const std::string& prefix = "NODE") {
     return prefix + "_" + ToString(number) + "_length_" + ToString(length);
 }
 
-inline std::string MakeContigId(int number, size_t length, double coverage, const std::string& prefix = "NODE") {
-    return prefix + "_" + ToString(number)  + "_length_" + ToString(length) + "_cov_" + ToString(coverage);
+inline std::string MakeContigId(size_t number, size_t length, double coverage, const std::string& prefix = "NODE") {
+    return MakeContigId(number, length, prefix) + "_cov_" + ToString(coverage);
 }
 
-inline std::string MakeContigId(int number, size_t length, double coverage, size_t id, const std::string& prefix = "NODE") {
-    return prefix + "_" + ToString(number)  + "_length_" + ToString(length) + "_cov_" + ToString(coverage)  + "_ID_" +  ToString(id);
+inline std::string MakeContigId(size_t number, size_t length, double coverage, size_t id, const std::string& prefix = "NODE") {
+    return MakeContigId(number, length, coverage, prefix) + "_ID_" +  ToString(id);
 }
-inline std::string MakeContigComponentId(int number, size_t length, double coverage, size_t id, size_t component_id, const std::string& prefix = "NODE") {
-    return prefix + "_" + ToString(number)  + "_length_" + ToString(length) + "_cov_" + ToString(coverage)  + "_ID_" +  ToString(id) + "_component_" + ToString(component_id);
-}
-inline std::string MakeContigComponentId(int number, size_t length, double coverage, size_t component_id, const std::string& prefix = "NODE") {
-    return prefix + "_"  + ToString(number)  + "_length_" + ToString(length) + "_cov_" + ToString(coverage)  + "_component_" + ToString(component_id);
+
+inline std::string MakeRNAContigId(size_t number, size_t length, double coverage, size_t gene_id, size_t isoform_id, const std::string& prefix = "NODE") {
+    return MakeContigId(number, length, coverage, prefix) + "_g" + ToString(gene_id)  + "_i" + ToString(isoform_id);
 }
 
+inline std::string MakeContigComponentId(size_t number, size_t length, double coverage, size_t component_id, const std::string& prefix = "NODE") {
+    return MakeContigId(number, length, coverage, prefix)  + "_component_" + ToString(component_id);
+}
 
 class osequencestream {
 protected:
     std::ofstream ofstream_;
 
-    int id_;
+    size_t id_;
 
     void write_str(const std::string& s) {
         size_t cur = 0;
@@ -62,9 +63,10 @@ protected:
 
 public:
     osequencestream(const std::string& filename): id_(1) {
-        ofstream_.open(filename.c_str());
+            ofstream_.open(filename.c_str());
     }
 
+
     virtual ~osequencestream() {
         ofstream_.close();
     }
@@ -96,6 +98,11 @@ public:
     }
 };
 
+
+
+
+
+
 class PairedOutputSequenceStream {
 protected:
     std::ofstream ofstreaml_;
diff --git a/src/modules/io/reads/paired_read.hpp b/src/common/io/reads/paired_read.hpp
similarity index 100%
rename from src/modules/io/reads/paired_read.hpp
rename to src/common/io/reads/paired_read.hpp
diff --git a/src/modules/io/reads_io/paired_readers.hpp b/src/common/io/reads/paired_readers.hpp
similarity index 99%
rename from src/modules/io/reads_io/paired_readers.hpp
rename to src/common/io/reads/paired_readers.hpp
index 14e84a7..8aaa861 100644
--- a/src/modules/io/reads_io/paired_readers.hpp
+++ b/src/common/io/reads/paired_readers.hpp
@@ -9,7 +9,7 @@
 
 #include <string>
 #include "ireader.hpp"
-#include "io/reads/paired_read.hpp"
+#include "paired_read.hpp"
 #include "file_reader.hpp"
 #include "orientation.hpp"
 
diff --git a/src/modules/io/reads_io/parser.cpp b/src/common/io/reads/parser.cpp
similarity index 92%
rename from src/modules/io/reads_io/parser.cpp
rename to src/common/io/reads/parser.cpp
index f750810..3a5ef81 100644
--- a/src/modules/io/reads_io/parser.cpp
+++ b/src/common/io/reads/parser.cpp
@@ -25,11 +25,11 @@
  * according to extension.
  */
 
-#include <io/reads/single_read.hpp>
-#include "io/reads_io/fasta_fastq_gz_parser.hpp"
-#include "io/reads_io/parser.hpp"
-#include "io/sam_io/bam_parser.hpp"
-#include "dev_support/standard_base.hpp"
+#include "single_read.hpp"
+#include "fasta_fastq_gz_parser.hpp"
+#include "parser.hpp"
+#include "sam/bam_parser.hpp"
+#include "utils/standard_base.hpp"
 
 
 namespace io {
diff --git a/src/modules/io/reads_io/parser.hpp b/src/common/io/reads/parser.hpp
similarity index 98%
rename from src/modules/io/reads_io/parser.hpp
rename to src/common/io/reads/parser.hpp
index f384446..030a985 100644
--- a/src/modules/io/reads_io/parser.hpp
+++ b/src/common/io/reads/parser.hpp
@@ -27,7 +27,7 @@
 #define COMMON_IO_PARSER_HPP
 
 #include <string>
-#include "io/reads/single_read.hpp"
+#include "single_read.hpp"
 
 namespace io {
 
diff --git a/src/modules/io/reads_io/rc_reader_wrapper.hpp b/src/common/io/reads/rc_reader_wrapper.hpp
similarity index 100%
rename from src/modules/io/reads_io/rc_reader_wrapper.hpp
rename to src/common/io/reads/rc_reader_wrapper.hpp
diff --git a/src/modules/io/reads/read.hpp b/src/common/io/reads/read.hpp
similarity index 96%
rename from src/modules/io/reads/read.hpp
rename to src/common/io/reads/read.hpp
index 02f4c74..913a6f3 100644
--- a/src/modules/io/reads/read.hpp
+++ b/src/common/io/reads/read.hpp
@@ -18,12 +18,12 @@
 #include <string>
 #include <iostream>
 #include <fstream>
-#include "dev_support/verify.hpp"
-#include "data_structures/sequence/quality.hpp"
-#include "data_structures/sequence/sequence.hpp"
-#include "data_structures/sequence/nucl.hpp"
-#include "data_structures/sequence/sequence_tools.hpp"
-#include "dev_support/simple_tools.hpp"
+#include "utils/verify.hpp"
+#include "sequence/quality.hpp"
+#include "sequence/sequence.hpp"
+#include "sequence/nucl.hpp"
+#include "sequence/sequence_tools.hpp"
+#include "utils/simple_tools.hpp"
 
 //fixme deprecated!!! used in hammer!
 class Read {
diff --git a/src/modules/io/reads_io/read_processor.hpp b/src/common/io/reads/read_processor.hpp
similarity index 98%
rename from src/modules/io/reads_io/read_processor.hpp
rename to src/common/io/reads/read_processor.hpp
index 1da18de..a8d060b 100644
--- a/src/modules/io/reads_io/read_processor.hpp
+++ b/src/common/io/reads/read_processor.hpp
@@ -8,9 +8,9 @@
 #ifndef __HAMMER_READ_PROCESSOR_HPP__
 #define __HAMMER_READ_PROCESSOR_HPP__
 
-#include "io/reads_io/mpmc_bounded.hpp"
+#include "io/reads/mpmc_bounded.hpp"
 
-#include "dev_support/openmp_wrapper.h"
+#include "utils/openmp_wrapper.h"
 
 #pragma GCC diagnostic push
 #ifdef __clang__
diff --git a/src/modules/io/reads_io/read_stream_vector.hpp b/src/common/io/reads/read_stream_vector.hpp
similarity index 75%
rename from src/modules/io/reads_io/read_stream_vector.hpp
rename to src/common/io/reads/read_stream_vector.hpp
index 632e8db..734c451 100644
--- a/src/modules/io/reads_io/read_stream_vector.hpp
+++ b/src/common/io/reads/read_stream_vector.hpp
@@ -41,11 +41,6 @@ public:
     explicit ReadStreamList(size_t size) : readers_(size) {
     }
 
-//  std::vector<Reader*>& get() {
-//      destroy_readers_ = false;
-//      return streams_;
-//  }
-
     //todo use boost iterator facade
     class iterator : public std::iterator<std::input_iterator_tag, ReaderT> {
         typedef typename std::vector<ReaderPtrT>::iterator vec_it;
@@ -72,31 +67,6 @@ public:
         }
     };
 
-//  class const_iterator: public std::iterator<std::input_iterator_tag, Reader> {
-//    typedef typename std::vector<Reader*>::iterator vec_it;
-//    vec_it it_;
-//   public:
-//
-//    const_iterator(vec_it it) : it_(it) {
-//    }
-//
-//    void operator++ () {
-//        ++it_;
-//    }
-//
-//    bool operator== (const const_iterator& that) {
-//        return it_ == that.it_;
-//    }
-//
-//    bool operator!= (const const_iterator& that) {
-//        return it_ != that.it_;
-//    }
-//
-//    ReaderT& operator*() {
-//        return *(*it_);
-//    }
-//  };
-
     ReaderT &operator[](size_t i) {
         return *readers_.at(i);
     }
@@ -130,14 +100,6 @@ public:
         return iterator(readers_.end());
     }
 
-//  const_iterator begin() const {
-//    return iterator(streams_.begin());
-//  }
-//
-//  const_iterator end() const {
-//    return iterator(streams_.end());
-//  }
-
     void push_back(ReaderT *reader_ptr) {
         readers_.push_back(ReaderPtrT(reader_ptr));
     }
@@ -170,14 +132,6 @@ public:
         return stat;
     }
 
-//  void release() {
-//      destroy_readers_ = false;
-//  }
-
-//  const std::vector< Reader * >& get() const {
-//      return streams_;
-//  }
-
 };
 
 }
diff --git a/src/modules/io/reads_io/sequence_reader.hpp b/src/common/io/reads/sequence_reader.hpp
similarity index 95%
rename from src/modules/io/reads_io/sequence_reader.hpp
rename to src/common/io/reads/sequence_reader.hpp
index 515cc9e..86daf5d 100644
--- a/src/modules/io/reads_io/sequence_reader.hpp
+++ b/src/common/io/reads/sequence_reader.hpp
@@ -7,8 +7,8 @@
 
 #pragma once
 
-#include "io/reads_io/ireader.hpp"
-#include "io/reads/single_read.hpp"
+#include "io/reads/ireader.hpp"
+#include "common/basic/reads/single_read.hpp"
 
 namespace io {
 
diff --git a/src/modules/io/reads/single_read.hpp b/src/common/io/reads/single_read.hpp
similarity index 97%
rename from src/modules/io/reads/single_read.hpp
rename to src/common/io/reads/single_read.hpp
index c307eaa..15bac77 100644
--- a/src/modules/io/reads/single_read.hpp
+++ b/src/common/io/reads/single_read.hpp
@@ -7,12 +7,12 @@
 
 #pragma once
 
-#include "dev_support/verify.hpp"
-#include "data_structures/sequence/quality.hpp"
-#include "data_structures/sequence/sequence.hpp"
-#include "data_structures/sequence/nucl.hpp"
-#include "data_structures/sequence/sequence_tools.hpp"
-#include "dev_support/simple_tools.hpp"
+#include "utils/verify.hpp"
+#include "sequence/quality.hpp"
+#include "sequence/sequence.hpp"
+#include "sequence/nucl.hpp"
+#include "sequence/sequence_tools.hpp"
+#include "utils/simple_tools.hpp"
 
 #include <string>
 
@@ -147,23 +147,8 @@ public:
         return SingleRead(new_name, ReverseComplement(seq_), Reverse(qual_), right_offset_, left_offset_);
     }
 
-    SingleRead SubstrStrict(size_t from, size_t to) const {
-        size_t len = to - from;
-        //        return SingleRead(name_, seq_.substr(from, len), qual_.substr(from, len));
-        //        TODO remove naming?
-        std::string new_name;
-        if (name_.length() >= 3 && name_.substr(name_.length() - 3) == "_RC") {
-            new_name = name_.substr(0, name_.length() - 3) + "_SUBSTR(" + ToString(size() - to) + "," +
-                       ToString(size() - from) + ")" + "_RC";
-        } else {
-            new_name = name_ + "_SUBSTR(" + ToString(from) + "," + ToString(to) + ")";
-        }
-        return SingleRead(new_name, seq_.substr(from, len), qual_.substr(from, len),
-                          SequenceOffsetT(from + (size_t) left_offset_),
-                          SequenceOffsetT(size() - to + (size_t) right_offset_));
-    }
-
     SingleRead Substr(size_t from, size_t to) const {
+        VERIFY(from <= to && to <= size());
         size_t len = to - from;
         if (len == size()) {
             return *this;
@@ -246,6 +231,23 @@ private:
         valid_ = SingleRead::IsValid(seq_);
     }
 
+    SingleRead SubstrStrict(size_t from, size_t to) const {
+        size_t len = to - from;
+        //        return SingleRead(name_, seq_.substr(from, len), qual_.substr(from, len));
+        //        TODO remove naming?
+        std::string new_name;
+        if (name_.length() >= 3 && name_.substr(name_.length() - 3) == "_RC") {
+            new_name = name_.substr(0, name_.length() - 3) + "_SUBSTR(" + ToString(size() - to) + "," +
+                       ToString(size() - from) + ")" + "_RC";
+        } else {
+            new_name = name_ + "_SUBSTR(" + ToString(from) + "," + ToString(to) + ")";
+        }
+        return SingleRead(new_name, seq_.substr(from, len), qual_.substr(from, len),
+                          SequenceOffsetT(from + (size_t) left_offset_),
+                          SequenceOffsetT(size() - to + (size_t) right_offset_));
+    }
+
+
 };
 
 inline std::ostream &operator<<(std::ostream &os, const SingleRead &read) {
diff --git a/src/modules/io/reads_io/splitting_wrapper.hpp b/src/common/io/reads/splitting_wrapper.hpp
similarity index 87%
rename from src/modules/io/reads_io/splitting_wrapper.hpp
rename to src/common/io/reads/splitting_wrapper.hpp
index 95a4f23..6665623 100644
--- a/src/modules/io/reads_io/splitting_wrapper.hpp
+++ b/src/common/io/reads/splitting_wrapper.hpp
@@ -6,7 +6,7 @@
 //***************************************************************************
 
 #pragma once
-#include "io/reads/single_read.hpp"
+#include "single_read.hpp"
 #include "read_stream_vector.hpp"
 #include "delegating_reader_wrapper.hpp"
 
@@ -20,12 +20,12 @@ private:
 
     void FillBuffer(SingleRead& tmp_read) {
         buffer_.clear();
-        for(size_t i = 0; i < tmp_read.size(); i++) {
+        for (size_t i = 0; i < tmp_read.size(); ++i) {
             size_t j = i;
-            while(j < tmp_read.size() && is_nucl(tmp_read.GetSequenceString()[j])) {
+            while (j < tmp_read.size() && is_nucl(tmp_read.GetSequenceString()[j])) {
                 j++;
             }
-            if(j > i) {
+            if (j > i) {
                 buffer_.push_back(tmp_read.Substr(i, j));
                 i = j - 1;
             }
@@ -34,7 +34,7 @@ private:
     }
 
     bool Skip() {
-        while(!this->reader().eof() && buffer_position_ == buffer_.size()) {
+        while (!this->reader().eof() && buffer_position_ == buffer_.size()) {
             SingleRead tmp_read;
             this->reader() >> tmp_read;
             FillBuffer(tmp_read);
diff --git a/src/modules/io/reads_io/vector_reader.hpp b/src/common/io/reads/vector_reader.hpp
similarity index 97%
rename from src/modules/io/reads_io/vector_reader.hpp
rename to src/common/io/reads/vector_reader.hpp
index 9059c6e..74dfc7f 100644
--- a/src/modules/io/reads_io/vector_reader.hpp
+++ b/src/common/io/reads/vector_reader.hpp
@@ -6,7 +6,7 @@
 //***************************************************************************
 
 #pragma once
-#include "io/reads_io/ireadstream.hpp"
+#include "io/reads/ireadstream.hpp"
 namespace io {
 
 /**
diff --git a/src/modules/io/reads_io/wrapper_collection.hpp b/src/common/io/reads/wrapper_collection.hpp
similarity index 98%
rename from src/modules/io/reads_io/wrapper_collection.hpp
rename to src/common/io/reads/wrapper_collection.hpp
index 3b243bb..1f6c405 100644
--- a/src/modules/io/reads_io/wrapper_collection.hpp
+++ b/src/common/io/reads/wrapper_collection.hpp
@@ -7,7 +7,7 @@
 
 #pragma once
 
-#include "io/reads/single_read.hpp"
+#include "single_read.hpp"
 #include "delegating_reader_wrapper.hpp"
 
 namespace io {
diff --git a/src/modules/io/sam_io/bam_parser.hpp b/src/common/io/sam/bam_parser.hpp
similarity index 87%
rename from src/modules/io/sam_io/bam_parser.hpp
rename to src/common/io/sam/bam_parser.hpp
index 3a22c0d..74de549 100644
--- a/src/modules/io/sam_io/bam_parser.hpp
+++ b/src/common/io/sam/bam_parser.hpp
@@ -8,11 +8,11 @@
 #ifndef COMMON_IO_BAMPARSER_HPP
 #define COMMON_IO_BAMPARSER_HPP
 
-#include "io/reads/single_read.hpp"
-#include "io/reads_io/parser.hpp"
-#include "data_structures/sequence/quality.hpp"
-#include "data_structures/sequence/nucl.hpp"
-#include "dev_support/verify.hpp"
+#include "reads/single_read.hpp"
+#include "io/reads/parser.hpp"
+#include "sequence/quality.hpp"
+#include "sequence/nucl.hpp"
+#include "utils/verify.hpp"
 
 #include "bamtools/api/BamReader.h"
 
diff --git a/src/modules/io/sam_io/bam_reader.hpp b/src/common/io/sam/bam_reader.hpp
similarity index 98%
rename from src/modules/io/sam_io/bam_reader.hpp
rename to src/common/io/sam/bam_reader.hpp
index 57c2c64..d7f8947 100644
--- a/src/modules/io/sam_io/bam_reader.hpp
+++ b/src/common/io/sam/bam_reader.hpp
@@ -7,7 +7,7 @@
 //todo rename to reader
 #pragma once
 
-#include "io/reads_io/ireader.hpp"
+#include "io/reads/ireader.hpp"
 #include "io/reads/single_read.hpp"
 
 #include <bamtools/api/BamReader.h>
diff --git a/src/modules/io/sam_io/read.cpp b/src/common/io/sam/read.cpp
similarity index 97%
rename from src/modules/io/sam_io/read.cpp
rename to src/common/io/sam/read.cpp
index dc9a0e0..de65d03 100644
--- a/src/modules/io/sam_io/read.cpp
+++ b/src/common/io/sam/read.cpp
@@ -5,7 +5,7 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include <io/sam_io/read.hpp>
+#include <io/sam/read.hpp>
 
 using namespace std;
 
diff --git a/src/modules/io/sam_io/read.hpp b/src/common/io/sam/read.hpp
similarity index 100%
rename from src/modules/io/sam_io/read.hpp
rename to src/common/io/sam/read.hpp
diff --git a/src/modules/io/sam_io/sam_reader.cpp b/src/common/io/sam/sam_reader.cpp
similarity index 95%
rename from src/modules/io/sam_io/sam_reader.cpp
rename to src/common/io/sam/sam_reader.cpp
index 5d338fa..63a1cf8 100644
--- a/src/modules/io/sam_io/sam_reader.cpp
+++ b/src/common/io/sam/sam_reader.cpp
@@ -5,10 +5,8 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include <io/sam_io/read.hpp>
-#include <io/sam_io/sam_reader.hpp>
-
-using namespace std;
+#include <io/sam/read.hpp>
+#include <io/sam/sam_reader.hpp>
 
 namespace sam_reader {
 
diff --git a/src/modules/io/sam_io/sam_reader.hpp b/src/common/io/sam/sam_reader.hpp
similarity index 95%
rename from src/modules/io/sam_io/sam_reader.hpp
rename to src/common/io/sam/sam_reader.hpp
index 55dc297..e37df7c 100644
--- a/src/modules/io/sam_io/sam_reader.hpp
+++ b/src/common/io/sam/sam_reader.hpp
@@ -8,7 +8,7 @@
 
 #include "read.hpp"
 
-#include "dev_support/logger/log_writers.hpp"
+#include "utils/logger/log_writers.hpp"
 
 #include <samtools/sam.h>
 #include <samtools/bam.h>
diff --git a/src/modules/math/smooth.hpp b/src/common/math/smooth.hpp
similarity index 86%
rename from src/modules/math/smooth.hpp
rename to src/common/math/smooth.hpp
index eb53dc9..be12a3e 100644
--- a/src/modules/math/smooth.hpp
+++ b/src/common/math/smooth.hpp
@@ -5,8 +5,7 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#ifndef __SMOTH_HPP__
-#define __SMOTH_HPP__
+#pragma once
 
 #include <cmath>
 
@@ -42,23 +41,23 @@ static int IndexOfMedianOf3(T u, T v, T w) {
     /* else */ return -1;
 }
 
-enum {
-    SmoothNoEndRule,
-    SmoothCopyEndRule,
-    SmoothTukeyEndRule
+enum class SmoothEndRule {
+    No,
+    Copy,
+    Tukey
 };
 
 template<typename T>
-static bool SmoothEndStep(const T *x, T *y, size_t n, unsigned end_rule) {
+static bool SmoothEndStep(const T *x, T *y, size_t n, SmoothEndRule end_rule) {
     switch (end_rule) {
         default:
-        case SmoothNoEndRule:
+        case SmoothEndRule::No:
             return false;
-        case SmoothCopyEndRule:
+        case SmoothEndRule::Copy:
             y[0] = x[0];
             y[n - 1] = x[n - 1];
             return false;
-        case SmoothTukeyEndRule: {
+        case SmoothEndRule::Tukey: {
             bool chg = false;
             y[0] = MedianOf3(3 * y[1] - 2 * y[2], x[0], y[1]);
             chg = chg || (y[0] != x[0]);
@@ -67,12 +66,10 @@ static bool SmoothEndStep(const T *x, T *y, size_t n, unsigned end_rule) {
             return chg;
         }
     }
-
-    return false;
 }
 
 template<typename T>
-static bool Smooth3(const T *x, T *y, size_t n, unsigned end_rule) {
+static bool Smooth3(const T *x, T *y, size_t n, SmoothEndRule end_rule) {
     // y[] := Running Median of three (x) = "3 (x[])" with "copy ends"
     // ---  return chg := ( y != x )
     bool chg = false;
@@ -89,15 +86,15 @@ static bool Smooth3(const T *x, T *y, size_t n, unsigned end_rule) {
 }
 
 template<typename T>
-static size_t Smooth3R(const T *x, T *y, T *z, size_t n, unsigned end_rule) {
+static size_t Smooth3R(const T *x, T *y, T *z, size_t n, SmoothEndRule end_rule) {
     // y[] := "3R"(x) ; 3R = Median of three, repeated until convergence
     size_t iter;
     bool chg;
 
-    iter = chg = Smooth3(x, y, n, SmoothCopyEndRule);
+    iter = chg = Smooth3(x, y, n, SmoothEndRule::Copy);
 
     while (chg) {
-        if ((chg = Smooth3(y, z, n, SmoothNoEndRule))) {
+        if ((chg = Smooth3(y, z, n, SmoothEndRule::No))) {
             iter += 1;
             for (size_t i = 1; i < n - 1; i++)
                 y[i] = z[i];
@@ -112,7 +109,6 @@ static size_t Smooth3R(const T *x, T *y, T *z, size_t n, unsigned end_rule) {
        or   [two "3"s, 2nd w/o change  ] */
 }
 
-
 template<typename T>
 static bool SplitTest(const T *x, size_t i) {
     // Split test:
@@ -172,7 +168,7 @@ static bool SmoothSplit3(const T *x, T *y, size_t n, bool do_ends) {
 
 template<typename T>
 size_t Smooth3RS3R(std::vector <T> &y, const std::vector <T> &x,
-                   unsigned end_rule = SmoothTukeyEndRule, bool split_ends = false) {
+                   SmoothEndRule end_rule = SmoothEndRule::Tukey, bool split_ends = false) {
     // y[1:n] := "3R S 3R"(x[1:n]);  z = "work";
     size_t iter;
     bool chg;
@@ -190,6 +186,4 @@ size_t Smooth3RS3R(std::vector <T> &y, const std::vector <T> &x,
     return (iter + chg);
 }
 
-};
-
-#endif
+}
diff --git a/src/modules/math/xmath.h b/src/common/math/xmath.h
similarity index 100%
rename from src/modules/math/xmath.h
rename to src/common/math/xmath.h
diff --git a/src/modules/algorithms/CMakeLists.txt b/src/common/modules/CMakeLists.txt
similarity index 68%
rename from src/modules/algorithms/CMakeLists.txt
rename to src/common/modules/CMakeLists.txt
index a4b8d60..fbd848b 100644
--- a/src/modules/algorithms/CMakeLists.txt
+++ b/src/common/modules/CMakeLists.txt
@@ -5,7 +5,9 @@
 # See file LICENSE for details.
 ############################################################################
 
-project(algorithms CXX)
+project(modules CXX)
 
-add_library(algorithms STATIC genome_consistance_checker.cpp)
+add_library(modules STATIC
+            genome_consistance_checker.cpp alignment/bwa_index.cpp)
+target_link_libraries(modules bwa)
 
diff --git a/src/common/modules/alignment/bwa_index.cpp b/src/common/modules/alignment/bwa_index.cpp
new file mode 100644
index 0000000..9973477
--- /dev/null
+++ b/src/common/modules/alignment/bwa_index.cpp
@@ -0,0 +1,327 @@
+//***************************************************************************
+//* Copyright (c) 2016 Saint Petersburg State University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#include "bwa_index.hpp"
+
+#include "bwa/bwa.h"
+#include "bwa/bwamem.h"
+#include "bwa/utils.h"
+#include "kseq/kseq.h"
+
+#include <string>
+#include <memory>
+
+// all of the bwa and kseq stuff is in unaligned sequence
+// best way I had to keep from clashes with klib macros
+
+#define MEM_F_SOFTCLIP  0x200
+
+#define _set_pac(pac, l, c) ((pac)[(l)>>2] |= (c)<<((~(l)&3)<<1))
+#define _get_pac(pac, l) ((pac)[(l)>>2]>>((~(l)&3)<<1)&3)
+extern "C" {
+int is_bwt(uint8_t *T, int n);
+};
+
+namespace alignment {
+
+BWAIndex::BWAIndex(const debruijn_graph::Graph& g)
+        : g_(g),
+          memopt_(mem_opt_init(), free),
+          idx_(nullptr, bwa_idx_destroy) {
+    memopt_->flag |= MEM_F_SOFTCLIP;
+    Init();
+}
+
+BWAIndex::~BWAIndex() {}
+
+// modified from bwa (heng li)
+static uint8_t* seqlib_add1(const kstring_t *seq, const kstring_t *name,
+                            bntseq_t *bns, uint8_t *pac, int64_t *m_pac, int *m_seqs, int *m_holes, bntamb1_t **q) {
+    bntann1_t *p;
+    int lasts;
+    if (bns->n_seqs == *m_seqs) {
+        *m_seqs <<= 1;
+        bns->anns = (bntann1_t*)realloc(bns->anns, *m_seqs * sizeof(bntann1_t));
+    }
+    p = bns->anns + bns->n_seqs;
+    p->name = strdup((char*)name->s);
+    p->anno = strdup("(null");
+    p->gi = 0; p->len = seq->l;
+    p->offset = (bns->n_seqs == 0)? 0 : (p-1)->offset + (p-1)->len;
+    p->n_ambs = 0;
+    for (size_t i = lasts = 0; i < seq->l; ++i) {
+        int c = nst_nt4_table[(int)seq->s[i]];
+        if (c >= 4) { // N
+            if (lasts == seq->s[i]) { // contiguous N
+                ++(*q)->len;
+            } else {
+                if (bns->n_holes == *m_holes) {
+                    (*m_holes) <<= 1;
+                    bns->ambs = (bntamb1_t*)realloc(bns->ambs, (*m_holes) * sizeof(bntamb1_t));
+                }
+                *q = bns->ambs + bns->n_holes;
+                (*q)->len = 1;
+                (*q)->offset = p->offset + i;
+                (*q)->amb = seq->s[i];
+                ++p->n_ambs;
+                ++bns->n_holes;
+            }
+        }
+        lasts = seq->s[i];
+        { // fill buffer
+            if (c >= 4) c = lrand48()&3;
+            if (bns->l_pac == *m_pac) { // double the pac size
+                *m_pac <<= 1;
+                pac = (uint8_t*)realloc(pac, *m_pac/4);
+                memset(pac + bns->l_pac/4, 0, (*m_pac - bns->l_pac)/4);
+            }
+            _set_pac(pac, bns->l_pac, c);
+            ++bns->l_pac;
+        }
+    }
+    ++bns->n_seqs;
+
+    return pac;
+}
+
+static uint8_t* seqlib_make_pac(const debruijn_graph::Graph &g,
+                                const std::vector<debruijn_graph::EdgeId> &ids,
+                                bool for_only) {
+    bntseq_t * bns = (bntseq_t*)calloc(1, sizeof(bntseq_t));
+    uint8_t *pac = 0;
+    int32_t m_seqs, m_holes;
+    int64_t m_pac, l;
+    bntamb1_t *q;
+
+    bns->seed = 11; // fixed seed for random generator
+    m_seqs = m_holes = 8; m_pac = 0x10000;
+    bns->anns = (bntann1_t*)calloc(m_seqs, sizeof(bntann1_t));
+    bns->ambs = (bntamb1_t*)calloc(m_holes, sizeof(bntamb1_t));
+    pac = (uint8_t*) calloc(m_pac/4, 1);
+    q = bns->ambs;
+
+    // move through the sequences
+    // FIXME: not kstring is required
+    for (auto e : ids) {
+        std::string ref = std::to_string(g.int_id(e));
+        std::string seq = g.EdgeNucls(e).str();
+
+        // make the ref name kstring
+        kstring_t * name = (kstring_t*)malloc(1 * sizeof(kstring_t));
+        name->l = ref.length() + 1;
+        name->m = ref.length() + 3;
+        name->s = (char*)calloc(name->m, sizeof(char));
+        memcpy(name->s, ref.c_str(), ref.length()+1);
+
+        // make the sequence kstring
+        kstring_t * t = (kstring_t*)malloc(sizeof(kstring_t));
+        t->l = seq.length();
+        t->m = seq.length() + 2;
+        //t->s = (char*)calloc(v[k].Seq.length(), sizeof(char));
+        t->s = (char*)malloc(t->m);
+        memcpy(t->s, seq.c_str(), seq.length());
+
+        // make the forward only pac
+        pac = seqlib_add1(t, name, bns, pac, &m_pac, &m_seqs, &m_holes, &q);
+
+        // clear it out
+        free(name->s);
+        free(name);
+        free(t->s);
+        free(t);
+    }
+
+    if (!for_only) {
+        // add the reverse complemented sequence
+        m_pac = (bns->l_pac * 2 + 3) / 4 * 4;
+        pac = (uint8_t*)realloc(pac, m_pac/4);
+        memset(pac + (bns->l_pac+3)/4, 0, (m_pac - (bns->l_pac+3)/4*4) / 4);
+        for (l = bns->l_pac - 1; l >= 0; --l, ++bns->l_pac)
+            _set_pac(pac, bns->l_pac, 3-_get_pac(pac, l));
+    }
+
+    bns_destroy(bns);
+
+    return pac;
+}
+
+static bwt_t *seqlib_bwt_pac2bwt(const uint8_t *pac, size_t bwt_seq_lenr) {
+    bwt_t *bwt;
+    ubyte_t *buf;
+    int i;
+
+    // initialization
+    bwt = (bwt_t*)calloc(1, sizeof(bwt_t));
+    bwt->seq_len = bwt_seq_lenr; //bwa_seq_len(fn_pac); //dummy
+    bwt->bwt_size = (bwt->seq_len + 15) >> 4;
+
+    // prepare sequence
+    //pac_size = (bwt->seq_len>>2) + ((bwt->seq_len&3) == 0? 0 : 1);
+    //buf2 = (ubyte_t*)calloc(pac_size, 1);
+    //err_fread_noeof(buf2, 1, pac_size, fp);
+    //err_fclose(fp);
+    memset(bwt->L2, 0, 5 * 4);
+    buf = (ubyte_t*)calloc(bwt->seq_len + 1, 1);
+    for (i = 0; i < (int)bwt->seq_len; ++i) {
+        buf[i] = pac[i>>2] >> ((3 - (i&3)) << 1) & 3;
+        ++bwt->L2[1+buf[i]];
+    }
+    for (i = 2; i <= 4; ++i)
+        bwt->L2[i] += bwt->L2[i-1];
+    //free(buf2);
+
+    // Burrows-Wheeler Transform
+    bwt->primary = is_bwt(buf, bwt->seq_len);
+    bwt->bwt = (uint32_t*)calloc(bwt->bwt_size, 4);
+    for (i = 0; i < (int)bwt->seq_len; ++i)
+        bwt->bwt[i>>4] |= buf[i] << ((15 - (i&15)) << 1);
+    free(buf);
+    return bwt;
+}
+
+static bntann1_t* seqlib_add_to_anns(const std::string& name, const std::string& seq, bntann1_t* ann, size_t offset) {
+    ann->offset = offset;
+    ann->name = (char*)malloc(name.length()+1); // +1 for \0
+    strncpy(ann->name, name.c_str(), name.length()+1);
+    ann->anno = (char*)malloc(7);
+    strcpy(ann->anno, "(null)\0");
+    ann->len = seq.length();
+    ann->n_ambs = 0; // number of "holes"
+    ann->gi = 0; // gi?
+    ann->is_alt = 0;
+
+    return ann;
+}
+
+void BWAIndex::Init() {
+    idx_.reset((bwaidx_t*)calloc(1, sizeof(bwaidx_t)));
+    ids_.clear();
+
+    for (auto it = g_.ConstEdgeBegin(true); !it.IsEnd(); ++it) {
+        ids_.push_back(*it);
+    }
+
+    // construct the forward-only pac
+    uint8_t* fwd_pac = seqlib_make_pac(g_, ids_, true); //true->for_only
+
+    // construct the forward-reverse pac ("packed" 2 bit sequence)
+    uint8_t* pac = seqlib_make_pac(g_, ids_, false); // don't write, becasue only used to make BWT
+
+    size_t tlen = 0;
+    for (auto e : ids_)
+        tlen += g_.EdgeNucls(e).size();
+
+#ifdef DEBUG_BWATOOLS
+    std::cerr << "ref seq length: " << tlen << std::endl;
+#endif
+
+    // make the bwt
+    bwt_t *bwt;
+    bwt = seqlib_bwt_pac2bwt(pac, tlen*2); // *2 for fwd and rev
+    bwt_bwtupdate_core(bwt);
+    free(pac); // done with fwd-rev pac
+
+    // construct sa from bwt and occ. adds it to bwt struct
+    bwt_cal_sa(bwt, 32);
+    bwt_gen_cnt_table(bwt);
+
+    // make the bns
+    bntseq_t * bns = (bntseq_t*) calloc(1, sizeof(bntseq_t));
+    bns->l_pac = tlen;
+    bns->n_seqs = ids_.size();
+    bns->seed = 11;
+    bns->n_holes = 0;
+
+    // make the anns
+    // FIXME: Do we really need this?
+    bns->anns = (bntann1_t*)calloc(ids_.size(), sizeof(bntann1_t));
+    size_t offset = 0, k = 0;
+    for (auto e: ids_) {
+        std::string name = std::to_string(g_.int_id(e));
+        std::string seq = g_.EdgeNucls(e).str();
+        seqlib_add_to_anns(name, seq, &bns->anns[k++], offset);
+        offset += seq.length();
+    }
+
+    // ambs is "holes", like N bases
+    bns->ambs = 0; //(bntamb1_t*)calloc(1, sizeof(bntamb1_t));
+
+    // make the in-memory idx struct
+    idx_->bwt = bwt;
+    idx_->bns = bns;
+    idx_->pac = fwd_pac;
+}
+
+omnigraph::MappingPath<debruijn_graph::EdgeId> BWAIndex::AlignSequence(const Sequence &sequence) const {
+    omnigraph::MappingPath<debruijn_graph::EdgeId> res;
+
+    if (!idx_) return res;
+
+    std::string seq = sequence.str();
+    mem_alnreg_v ar = mem_align1(memopt_.get(), idx_->bwt, idx_->bns, idx_->pac,
+                                 seq.length(), seq.data());
+    for (size_t i = 0; i < ar.n; ++i) {
+        const mem_alnreg_t &a = ar.a[i];
+        if (a.secondary >= 0) continue; // skip secondary alignments
+//        if (a.qe - a.qb < g_.k()) continue; // skip short alignments
+//        if (a.re - a.rb < g_.k()) continue;
+        int is_rev = 0;
+        size_t pos = bns_depos(idx_->bns, a.rb < idx_->bns->l_pac? a.rb : a.re - 1, &is_rev) - idx_->bns->anns[a.rid].offset;
+/*        fprintf(stderr, "%zu: [%lld, %lld]\t[%d, %d] %c %d %s %ld %zu\n",
+                i,
+                a.rb, a.re, a.qb, a.qe,
+                "+-"[is_rev], a.rid,
+                idx_->bns->anns[a.rid].name, g_.int_id(ids_[a.rid]), pos);
+*/
+        size_t initial_range_end = a.qe;
+        size_t mapping_range_end = pos + a.re - a.rb;
+        size_t read_length = seq.length() ;
+        //we had to reduce the range to kmer-based
+        if (pos + (a.re - a.rb) >= g_.length(ids_[a.rid]) ){
+            if (a.qe > g_.k() + a.qb)
+                initial_range_end -= g_.k();
+            else continue;
+            if (a.re > g_.k() + a.rb)
+                mapping_range_end -= g_.k();
+            else continue;
+            if (read_length >= g_.k())
+                read_length -= g_.k();
+            else continue;
+        }
+        // FIXME: Check this!
+        if (!is_rev) {
+            res.push_back(ids_[a.rid],
+                          { { (size_t)a.qb, initial_range_end },
+                            { pos, mapping_range_end}});
+        } else {
+//          fprintf (stderr,"%d %d %d\n", a.qb, a.qe  - g_.k(), seq.length() - g_.k());
+
+//            fprintf (stderr,"%d %d %d\n", pos, pos + a.re - a.rb , g_.length(ids_[a.rid]) );
+
+            res.push_back(g_.conjugate(ids_[a.rid]),
+                          { omnigraph::Range(a.qb, initial_range_end).Invert(read_length),
+                            omnigraph::Range(pos, mapping_range_end ).Invert(g_.length(ids_[a.rid])) });
+
+        }
+
+#if 0
+        mem_aln_t aln = mem_reg2aln(memopt_.get(), idx_->bns, idx_->pac, seq.length(), seq.c_str(), &a);
+
+        // print alignment
+        printf("\t%c\t%s\t%ld %ld %ld\t%d\t", "+-"[aln.is_rev], idx_->bns->anns[aln.rid].name, aln.rid, g_.int_id(ids_[aln.rid]), (long)aln.pos, aln.mapq);
+        for (int k = 0; k < aln.n_cigar; ++k) // print CIGAR
+            printf("%d%c", aln.cigar[k]>>4, "MIDSH"[aln.cigar[k]&0xf]);
+        printf("\t%d\n", aln.NM); // print edit distance
+        free(aln.cigar);
+#endif
+
+    }
+    free(ar.a);
+
+    return res;
+}
+
+}
diff --git a/src/common/modules/alignment/bwa_index.hpp b/src/common/modules/alignment/bwa_index.hpp
new file mode 100644
index 0000000..8bc7037
--- /dev/null
+++ b/src/common/modules/alignment/bwa_index.hpp
@@ -0,0 +1,44 @@
+//***************************************************************************
+//* Copyright (c) 2016 Saint Petersburg State University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#pragma once
+
+#include "assembly_graph/core/graph.hpp"
+#include "assembly_graph/paths/mapping_path.hpp"
+
+extern "C" {
+struct bwaidx_s;
+typedef struct bwaidx_s bwaidx_t;
+
+struct mem_opt_s;
+typedef struct mem_opt_s mem_opt_t;
+};
+
+namespace alignment {
+
+class BWAIndex {
+  public:
+    // bwaidx / memopt are incomplete below, therefore we need to outline ctor
+    // and dtor.
+    BWAIndex(const debruijn_graph::Graph& g);
+    ~BWAIndex();
+
+    omnigraph::MappingPath<debruijn_graph::EdgeId> AlignSequence(const Sequence &sequence) const;
+  private:
+    void Init();
+
+    const debruijn_graph::Graph& g_;
+
+    // Store the options in memory
+    std::unique_ptr<mem_opt_t, void(*)(void*)> memopt_;
+
+    // hold the full index structure
+    std::unique_ptr<bwaidx_t, void(*)(bwaidx_t*)> idx_;
+
+    std::vector<debruijn_graph::EdgeId> ids_;
+};
+    
+}
diff --git a/src/common/modules/alignment/bwa_sequence_mapper.hpp b/src/common/modules/alignment/bwa_sequence_mapper.hpp
new file mode 100644
index 0000000..62a8542
--- /dev/null
+++ b/src/common/modules/alignment/bwa_sequence_mapper.hpp
@@ -0,0 +1,35 @@
+//***************************************************************************
+//* Copyright (c) 2016 Saint Petersburg State University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#pragma once
+
+#include "sequence_mapper.hpp"
+#include "bwa_index.hpp"
+#include "assembly_graph/paths/mapping_path.hpp"
+
+namespace alignment {
+  
+template<class Graph>
+class BWAReadMapper: public debruijn_graph::AbstractSequenceMapper<Graph> {
+    typedef typename Graph::EdgeId EdgeId;
+    using debruijn_graph::AbstractSequenceMapper<Graph>::g_;
+public:
+    BWAReadMapper(const Graph& g)
+            : debruijn_graph::AbstractSequenceMapper<Graph>(g),
+            index_(g) {}
+
+    omnigraph::MappingPath<EdgeId> MapSequence(const Sequence &sequence) const {
+        return index_.AlignSequence(sequence);
+    }
+
+    ~BWAReadMapper() {
+    }
+
+    BWAIndex index_;
+};
+
+}
+
diff --git a/src/modules/assembly_graph/graph_alignment/edge_index.hpp b/src/common/modules/alignment/edge_index.hpp
similarity index 88%
rename from src/modules/assembly_graph/graph_alignment/edge_index.hpp
rename to src/common/modules/alignment/edge_index.hpp
index 187ea94..da84b58 100644
--- a/src/modules/assembly_graph/graph_alignment/edge_index.hpp
+++ b/src/common/modules/alignment/edge_index.hpp
@@ -7,9 +7,9 @@
 
 #pragma once
 
-#include "assembly_graph/graph_core/graph.hpp"
-#include "assembly_graph/graph_core/action_handlers.hpp"
-#include "data_structures/indices/edge_position_index.hpp"
+#include "common/assembly_graph/core/graph.hpp"
+#include "common/assembly_graph/core/action_handlers.hpp"
+#include "utils/indices/edge_info_updater.hpp"
 #include "edge_index_refiller.hpp"
     
 namespace debruijn_graph {
@@ -24,11 +24,11 @@ class EdgeIndex: public omnigraph::GraphActionHandler<Graph> {
 
 public:
     typedef typename Graph::EdgeId EdgeId;
-    using InnerIndex = KmerFreeEdgeIndex<Graph, runtime_k::RtSeq, kmer_index_traits<runtime_k::RtSeq>, DefaultStoring>;
+    using InnerIndex = KmerFreeEdgeIndex<Graph, DefaultStoring>;
     typedef Graph GraphT;
     typedef typename InnerIndex::KMer KMer;
     typedef typename InnerIndex::KMerIdx KMerIdx;
-    typedef typename InnerIndex::Value Value;
+    typedef typename InnerIndex::KmerPos Value;
 
 private:
     InnerIndex inner_index_;
diff --git a/src/modules/assembly_graph/graph_alignment/edge_index_refiller.cpp b/src/common/modules/alignment/edge_index_refiller.cpp
similarity index 77%
rename from src/modules/assembly_graph/graph_alignment/edge_index_refiller.cpp
rename to src/common/modules/alignment/edge_index_refiller.cpp
index d008b5a..c03c5ad 100644
--- a/src/modules/assembly_graph/graph_alignment/edge_index_refiller.cpp
+++ b/src/common/modules/alignment/edge_index_refiller.cpp
@@ -4,15 +4,15 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "data_structures/indices/edge_index_builders.hpp"
-#include "data_structures/indices/edge_multi_index.hpp"
-#include "assembly_graph/graph_core/graph.hpp"
+#include "utils/indices/edge_index_builders.hpp"
+#include "utils/indices/edge_multi_index.hpp"
+#include "core/graph.hpp"
 
 #include "edge_index_refiller.hpp"
 
 namespace debruijn_graph {
 
-using EdgeIndex = KmerFreeEdgeIndex<ConjugateDeBruijnGraph, runtime_k::RtSeq, kmer_index_traits<runtime_k::RtSeq>>;
+using EdgeIndex = KmerFreeEdgeIndex<ConjugateDeBruijnGraph>;
 
 template<>
 void EdgeIndexRefiller::Refill(EdgeIndex &index,
diff --git a/src/modules/assembly_graph/graph_alignment/edge_index_refiller.hpp b/src/common/modules/alignment/edge_index_refiller.hpp
similarity index 100%
rename from src/modules/assembly_graph/graph_alignment/edge_index_refiller.hpp
rename to src/common/modules/alignment/edge_index_refiller.hpp
diff --git a/src/modules/assembly_graph/graph_alignment/kmer_map.hpp b/src/common/modules/alignment/kmer_map.hpp
similarity index 97%
rename from src/modules/assembly_graph/graph_alignment/kmer_map.hpp
rename to src/common/modules/alignment/kmer_map.hpp
index e2d0f12..478461b 100644
--- a/src/modules/assembly_graph/graph_alignment/kmer_map.hpp
+++ b/src/common/modules/alignment/kmer_map.hpp
@@ -7,15 +7,15 @@
 #ifndef __KMER_MAP_HPP__
 #define __KMER_MAP_HPP__
 
-#include "data_structures/sequence/runtime_k.hpp"
+#include "sequence/rtseq.hpp"
 
 #include <htrie/hat-trie.h>
 #include <boost/iterator/iterator_facade.hpp>
 
 namespace debruijn_graph {
 class KMerMap {
-    typedef runtime_k::RtSeq Kmer;
-    typedef runtime_k::RtSeq Seq;
+    typedef RtSeq Kmer;
+    typedef RtSeq Seq;
     typedef typename Seq::DataType RawSeqData;
 
     value_t* internal_tryget(const Kmer &key) const {
diff --git a/src/modules/assembly_graph/graph_alignment/kmer_mapper.hpp b/src/common/modules/alignment/kmer_mapper.hpp
similarity index 68%
rename from src/modules/assembly_graph/graph_alignment/kmer_mapper.hpp
rename to src/common/modules/alignment/kmer_mapper.hpp
index 0f67d38..1f11d1f 100644
--- a/src/modules/assembly_graph/graph_alignment/kmer_mapper.hpp
+++ b/src/common/modules/alignment/kmer_mapper.hpp
@@ -7,9 +7,8 @@
 
 #pragma once
 
-#include "data_structures/sequence/sequence_tools.hpp"
-#include "data_structures/sequence/runtime_k.hpp"
-#include "utils/adt/kmer_vector.hpp"
+#include "sequence/sequence_tools.hpp"
+#include "common/adt/kmer_vector.hpp"
 #include "edge_index.hpp"
 
 #include "kmer_map.hpp"
@@ -22,8 +21,8 @@ template<class Graph>
 class KmerMapper : public omnigraph::GraphActionHandler<Graph> {
     typedef omnigraph::GraphActionHandler<Graph> base;
     typedef typename Graph::EdgeId EdgeId;
-    typedef runtime_k::RtSeq Kmer;
-    typedef runtime_k::RtSeq Seq;
+    typedef RtSeq Kmer;
+    typedef RtSeq Seq;
     typedef typename Seq::DataType RawSeqData;
 
     unsigned k_;
@@ -47,14 +46,15 @@ class KmerMapper : public omnigraph::GraphActionHandler<Graph> {
     }
 
 public:
-    KmerMapper(const Graph &g, bool verification_on = true) :
-            base(g, "KmerMapper"), k_(unsigned(g.k() + 1)), mapping_(k_), verification_on_(verification_on), normalized_(false) {
+    KmerMapper(const Graph &g) :
+            base(g, "KmerMapper"),
+            k_(unsigned(g.k() + 1)),
+            mapping_(k_),
+            normalized_(false) {
     }
 
     virtual ~KmerMapper() {}
 
-    unsigned get_k() const { return k_; }
-
     auto begin() const -> decltype(mapping_.begin()) {
         return mapping_.begin();
     }
@@ -78,81 +78,61 @@ public:
         normalized_ = true;
     }
 
-    void Revert(const Kmer &kmer) {
-        Kmer old_value = Substitute(kmer);
-        if (old_value != kmer) {
-            mapping_.erase(kmer);
-            mapping_.set(old_value, kmer);
-            normalized_ = false;
-        }
+    unsigned k() const {
+        return k_;
     }
 
+//    void Revert(const Kmer &kmer) {
+//        Kmer old_value = Substitute(kmer);
+//        if (old_value != kmer) {
+//            mapping_.erase(kmer);
+//            mapping_.set(old_value, kmer);
+//            normalized_ = false;
+//        }
+//    }
+
     void Normalize(const Kmer &kmer) {
         mapping_.set(kmer, Substitute(kmer));
     }
 
-    bool CheckCanRemap(const Sequence &old_s, const Sequence &new_s) const {
-        if (!CheckAllDifferent(old_s, new_s))
-            return false;
-
-        size_t old_length = old_s.size() - k_ + 1;
-        size_t new_length = new_s.size() - k_ + 1;
-        UniformPositionAligner aligner(old_s.size() - k_ + 1,
-                                       new_s.size() - k_ + 1);
-        Kmer old_kmer = old_s.start<Kmer>(k_);
-        old_kmer >>= 0;
-        for (size_t i = k_ - 1; i < old_s.size(); ++i) {
-            old_kmer <<= old_s[i];
-            size_t old_kmer_offset = i - k_ + 1;
-            size_t new_kmer_offest = aligner.GetPosition(old_kmer_offset);
-            if (old_kmer_offset * 2 + 1 == old_length && new_length % 2 == 0) {
-                Kmer middle(k_ - 1, new_s, new_length / 2);
-                if (typename Kmer::less2()(middle, !middle)) {
-                    new_kmer_offest = new_length - 1 - new_kmer_offest;
-                }
-            }
-            Kmer new_kmer(k_, new_s, new_kmer_offest);
-            if (mapping_.count(new_kmer)) {
-                if (Substitute(new_kmer) != old_kmer) {
-                    return false;
-                }
-            }
-        }
-        return true;
-    }
-
     void RemapKmers(const Sequence &old_s, const Sequence &new_s) {
         VERIFY(this->IsAttached());
         size_t old_length = old_s.size() - k_ + 1;
         size_t new_length = new_s.size() - k_ + 1;
         UniformPositionAligner aligner(old_s.size() - k_ + 1,
                                        new_s.size() - k_ + 1);
-        Kmer old_kmer = old_s.start<Kmer>(k_);
-
+        Kmer old_kmer = old_s.start<Kmer>(k_) >> 'A';
+        typename Kmer::less2 kmer_less;
         for (size_t i = k_ - 1; i < old_s.size(); ++i) {
-            // Instead of shifting right
-            if (i != k_ - 1) {
-                old_kmer <<= old_s[i];
-            }
+            old_kmer <<= old_s[i];
+
+            // Checking if already have info for this kmer
+            if (mapping_.count(old_kmer))
+                continue;
 
             size_t old_kmer_offset = i - k_ + 1;
             size_t new_kmer_offest = aligner.GetPosition(old_kmer_offset);
             if (old_kmer_offset * 2 + 1 == old_length && new_length % 2 == 0) {
                 Kmer middle(k_-1, new_s, new_length / 2);
-                if (typename Kmer::less2()(middle, !middle)) {
+                if (kmer_less(middle, !middle)) {
                     new_kmer_offest = new_length - 1 - new_kmer_offest;
                 }
             }
             Kmer new_kmer(k_, new_s, new_kmer_offest);
+            if (old_kmer == new_kmer)
+                continue;
+
             if (mapping_.count(new_kmer)) {
-                if (verification_on_)
-                    VERIFY(Substitute(new_kmer) == old_kmer);
-                mapping_.erase(new_kmer);
-            }
-            if (old_kmer != new_kmer) {
-                mapping_.set(old_kmer, new_kmer);
-                normalized_ = false;
+                // Special case of remapping back.
+                // Not sure that we actually need it
+                if (Substitute(new_kmer) == old_kmer)
+                    mapping_.erase(new_kmer);
+                else
+                    continue;
             }
+
+            mapping_.set(old_kmer, new_kmer);
+            normalized_ = false;
         }
     }
 
diff --git a/src/modules/assembly_graph/graph_alignment/kmer_mapper_logger.hpp b/src/common/modules/alignment/kmer_mapper_logger.hpp
similarity index 88%
rename from src/modules/assembly_graph/graph_alignment/kmer_mapper_logger.hpp
rename to src/common/modules/alignment/kmer_mapper_logger.hpp
index bb9ebe2..3643030 100644
--- a/src/modules/assembly_graph/graph_alignment/kmer_mapper_logger.hpp
+++ b/src/common/modules/alignment/kmer_mapper_logger.hpp
@@ -15,9 +15,9 @@
 #ifndef KMER_MAPPER_LOGGER_H_
 #define KMER_MAPPER_LOGGER_H_
 
-#include "data_structures/sequence/sequence.hpp"
-#include "assembly_graph/graph_core/action_handlers.hpp"
-#include "dev_support/standard_base.hpp"
+#include "sequence/sequence.hpp"
+#include "common/assembly_graph/core/action_handlers.hpp"
+#include "utils/standard_base.hpp"
 
 namespace debruijn {
 
diff --git a/src/modules/assembly_graph/graph_alignment/long_read_mapper.hpp b/src/common/modules/alignment/long_read_mapper.hpp
similarity index 60%
rename from src/modules/assembly_graph/graph_alignment/long_read_mapper.hpp
rename to src/common/modules/alignment/long_read_mapper.hpp
index 654bc21..66dbf03 100644
--- a/src/modules/assembly_graph/graph_alignment/long_read_mapper.hpp
+++ b/src/common/modules/alignment/long_read_mapper.hpp
@@ -5,36 +5,34 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-/*
- * long_read_mapper.hpp
- *
- *  Created on: Jun 17, 2013
- *      Author: andrey
- */
-
 #ifndef LONG_READ_MAPPER_HPP_
 #define LONG_READ_MAPPER_HPP_
 
-#include "assembly_graph/graph_alignment/long_read_storage.hpp"
-#include "assembly_graph/graph_alignment/sequence_mapper_notifier.hpp"
+#include "long_read_storage.hpp"
+#include "sequence_mapper_notifier.hpp"
 
 namespace debruijn_graph {
 
-class AbstractLongReadMapper: public SequenceMapperListener {
+class LongReadMapper: public SequenceMapperListener {
 public:
-    AbstractLongReadMapper(conj_graph_pack& gp, PathStorage<conj_graph_pack::graph_t>& storage)
-            : gp_(gp), storage_(storage), path_finder_(gp_.g) {
+    typedef vector<vector<EdgeId>> PathsT;
+    typedef MappingPath<EdgeId> MappingT;
+    typedef std::function<PathsT (const MappingT&)> PathExtractionF;
+
+    LongReadMapper(const Graph& g,
+                   PathStorage<Graph>& storage,
+                   PathExtractionF path_extractor)
+            : g_(g),
+              storage_(storage),
+              path_extractor_(path_extractor) {
     }
 
     void StartProcessLibrary(size_t threads_count) override {
         for (size_t i = 0; i < threads_count; ++i)
-            buffer_storages_.emplace_back(gp_.g);
+            buffer_storages_.emplace_back(g_);
     }
 
     void StopProcessLibrary() override {
-        for (size_t i = 0; i < buffer_storages_.size(); ++i) {
-            MergeBuffer(i);
-        }
         buffer_storages_.clear();
     }
 
@@ -45,20 +43,6 @@ public:
         DEBUG("Now size " << storage_.size());
     }
 
-    void ProcessPairedRead(size_t ,
-                           const io::PairedReadSeq&,
-                           const MappingPath<EdgeId>& ,
-                           const MappingPath<EdgeId>&) override {
-        //nothing to do
-    }
-
-    void ProcessPairedRead(size_t ,
-                           const io::PairedRead&,
-                           const MappingPath<EdgeId>& ,
-                           const MappingPath<EdgeId>&) override {
-        //nothing to do
-    }
-
     void ProcessSingleRead(size_t thread_index,
                            const io::SingleRead&,
                            const MappingPath<EdgeId>& read) override {
@@ -71,45 +55,40 @@ public:
         ProcessSingleRead(thread_index, read);
     }
 
-    PathStorage<conj_graph_pack::graph_t>& GetPaths() {
-        return storage_;
+    const Graph& g() const {
+        return g_;
     }
 
 private:
-
-    virtual void ProcessSingleRead(size_t thread_index, const MappingPath<EdgeId>& read) = 0;
-
-protected:
-    conj_graph_pack& gp_;
-    PathStorage<conj_graph_pack::graph_t>& storage_;
-    ReadPathFinder<conj_graph_pack::graph_t> path_finder_;
-    std::vector<PathStorage<conj_graph_pack::graph_t> > buffer_storages_;
-
-};
-
-class SimpleLongReadMapper: public AbstractLongReadMapper {
-public:
-    SimpleLongReadMapper(conj_graph_pack& gp, PathStorage<conj_graph_pack::graph_t>& storage)
-            : AbstractLongReadMapper(gp, storage) {
+    void ProcessSingleRead(size_t thread_index, const MappingPath<EdgeId>& mapping) {
+        DEBUG("Processing read");
+        for (const auto& path : path_extractor_(mapping)) {
+            buffer_storages_[thread_index].AddPath(path, 1, false);
+        }
+        DEBUG("Read processed");
     }
 
-private:
-
-    void ProcessSingleRead(size_t thread_index, const MappingPath<EdgeId>& read) override {
-        vector<EdgeId> path = path_finder_.FindReadPath(read);
-        buffer_storages_[thread_index].AddPath(path, 1, false);
-    }
+    const Graph& g_;
+    PathStorage<Graph>& storage_;
+    std::vector<PathStorage<Graph>> buffer_storages_;
+    PathExtractionF path_extractor_;
+    DECL_LOGGER("LongReadMapper");
 };
 
-class GappedLongReadMapper : public AbstractLongReadMapper {
-private:
-    typedef MappingPathFixer<Graph> GraphMappingPathFixer;
-    const GraphMappingPathFixer path_fixer_;
+class GappedPathExtractor {
+    const Graph& g_;
+    const MappingPathFixer<Graph> path_fixer_;
     const double MIN_MAPPED_RATIO = 0.3;
     const size_t MIN_MAPPED_LENGTH = 100;
 public:
-    GappedLongReadMapper(conj_graph_pack& gp, PathStorage<conj_graph_pack::graph_t>& storage)
-            : AbstractLongReadMapper(gp, storage), path_fixer_(gp.g) {
+    GappedPathExtractor(const Graph& g): g_(g), path_fixer_(g) {
+    }
+
+    vector<vector<EdgeId>> operator() (const MappingPath<EdgeId>& mapping) const {
+        vector<EdgeId> corrected_path = path_fixer_.DeleteSameEdges(
+                mapping.simple_path());
+        corrected_path = FilterBadMappings(corrected_path, mapping);
+        return FindReadPathWithGaps(mapping, corrected_path);
     }
 
 private:
@@ -140,7 +119,7 @@ private:
         size_t mapping_index = 0;
         for (auto edge : corrected_path) {
             size_t mapping_size = CountMappedEdgeSize(edge, mapping_path, mapping_index);
-            size_t edge_len =  gp_.g.length(edge);
+            size_t edge_len =  g_.length(edge);
             //VERIFY(edge_len >= mapping_size);
             if (mapping_size > MIN_MAPPED_LENGTH || 
                     math::gr((double) mapping_size / (double) edge_len, MIN_MAPPED_RATIO)) {
@@ -150,17 +129,6 @@ private:
         return new_corrected_path;
     }
 
-
-    void ProcessSingleRead(size_t thread_index, const MappingPath<EdgeId>& read) override {
-        vector<EdgeId> corrected_path = path_fixer_.DeleteSameEdges(
-                read.simple_path());
-        corrected_path = FilterBadMappings(corrected_path, read);
-        vector<vector<EdgeId>> paths = FindReadPathWithGaps(read, corrected_path);
-        for(auto path : paths) {
-            buffer_storages_[thread_index].AddPath(path, 1, false);
-        }
-    }
-
     vector<vector<EdgeId>> FindReadPathWithGaps(const MappingPath<EdgeId>& mapping_path, vector<EdgeId>& corrected_path) const {
           if (mapping_path.size() == 0) {
               TRACE("read unmapped");
@@ -174,7 +142,7 @@ private:
         vector<vector<EdgeId>> result;
         size_t prev_start = 0;
         for (size_t i = 1; i < path.size(); ++i) {
-            if (gp_.g.EdgeEnd(path[i - 1]) != gp_.g.EdgeStart(path[i])) {
+            if (g_.EdgeEnd(path[i - 1]) != g_.EdgeStart(path[i])) {
                     result.push_back(vector<EdgeId>(path.begin() + prev_start, path.begin() + i));
                     prev_start = i;
             }
@@ -184,6 +152,20 @@ private:
     }
 };
 
+typedef std::function<vector<vector<EdgeId>> (const MappingPath<EdgeId>&)> PathExtractionF;
+
+inline PathExtractionF ChooseProperReadPathExtractor(const Graph& g, io::LibraryType lib_type) {
+    if (lib_type == io::LibraryType::PathExtendContigs || lib_type == io::LibraryType::TSLReads
+        || lib_type == io::LibraryType::TrustedContigs || lib_type == io::LibraryType::UntrustedContigs) {
+        return [&] (const MappingPath<EdgeId>& mapping) {
+            return GappedPathExtractor(g)(mapping);
+        };
+    } else {
+        return [&] (const MappingPath<EdgeId>& mapping) {
+            return vector<vector<EdgeId>>{ReadPathFinder<Graph>(g).FindReadPath(mapping)};
+        };
+    }
+}
 
 }/*longreads*/
 
diff --git a/src/modules/assembly_graph/graph_alignment/long_read_storage.hpp b/src/common/modules/alignment/long_read_storage.hpp
similarity index 83%
rename from src/modules/assembly_graph/graph_alignment/long_read_storage.hpp
rename to src/common/modules/alignment/long_read_storage.hpp
index 44bf89e..2eeaee0 100644
--- a/src/modules/assembly_graph/graph_alignment/long_read_storage.hpp
+++ b/src/common/modules/alignment/long_read_storage.hpp
@@ -28,7 +28,7 @@ private:
     mutable size_t w;
 
 public:
-    vector<EdgeId> getPath() const {
+    const vector<EdgeId>& getPath() const {
         return path;
     }
 
@@ -47,12 +47,13 @@ public:
     PathInfo(const vector<EdgeId> &p, size_t weight = 0) :
             path(p), w(weight) {
     }
+
     PathInfo(const PathInfo<Graph> &other) {
         path = other.path;
         w = other.w;
     }
 
-    string str(Graph &g_) {
+    string str(const Graph &g_) const {
         stringstream s;
         for(auto iter = path.begin(); iter != path.end(); iter ++ ){
             s << g_.int_id(*iter) << " ";
@@ -67,10 +68,10 @@ class PathStorage {
     friend class PathInfo<Graph> ;
     typedef typename Graph::EdgeId EdgeId;
     typedef map<EdgeId, set<PathInfo<Graph> > > InnerIndex;
-private:
-    Graph &g_;
+
+    const Graph &g_;
     InnerIndex inner_index_;
-    const size_t kLongEdgeForStats = 500;
+    static const size_t kLongEdgeForStats = 500;
 
     void HiddenAddPath(const vector<EdgeId> &p, int w){
         if (p.size() == 0 ) return;
@@ -87,11 +88,12 @@ private:
 
 public:
 
-    PathStorage(Graph &g)
+    PathStorage(const Graph &g)
             : g_(g),
               inner_index_(),
               size_(0) {
     }
+
     PathStorage(const PathStorage & p)
             : g_(p.g_),
               inner_index_(),
@@ -104,6 +106,7 @@ public:
             }
         }
     }
+
     void ReplaceEdges(map<EdgeId, EdgeId> &old_to_new){
         map<int, EdgeId> tmp_map;
 //        for (auto iter = g_.SmartEdgeBegin(); !iter.IsEnd(); ++iter ){
@@ -156,11 +159,14 @@ public:
             HiddenAddPath(rc_p, w);
         }
     }
-    void DumpToFile(const string filename) const{
+
+    void DumpToFile(const string& filename) const{
         map <EdgeId, EdgeId> auxilary;
         DumpToFile(filename, auxilary);
     }
-    void DumpToFile(const string filename, map<EdgeId, EdgeId> &replacement, size_t stats_weight_cutoff = 1, bool need_log = false) const {
+
+    void DumpToFile(const string& filename, const map<EdgeId, EdgeId>& replacement,
+                    size_t stats_weight_cutoff = 1, bool need_log = false) const {
         ofstream filestr(filename);
         set<EdgeId> continued_edges;
 
@@ -190,21 +196,23 @@ public:
         int continued = 0;
         if (need_log) {
             for (auto iter = g_.ConstEdgeBegin(); !iter.IsEnd(); ++iter) {
-                if (g_.length(*iter) > kLongEdgeForStats) {
-                    if (!g_.IsDeadEnd(g_.EdgeEnd(*iter))) {
-                        if (continued_edges.find(*iter) == continued_edges.end()) {
-                            if ((replacement.find(*iter) != replacement.end() &&
-                                 continued_edges.find(replacement[*iter]) != continued_edges.end())) {
-                                TRACE("found in teplacement, edges " << g_.int_id(*iter) << " " <<
-                                      g_.int_id(replacement[*iter]) << " skipping ");
+                EdgeId e = *iter;
+                if (g_.length(e) > kLongEdgeForStats) {
+                    if (!g_.IsDeadEnd(g_.EdgeEnd(e))) {
+                        if (continued_edges.find(e) == continued_edges.end()) {
+                            auto replacement_it = replacement.find(e);
+                            if (replacement_it != replacement.end() &&
+                                continued_edges.find(replacement_it->second) != continued_edges.end()) {
+                                TRACE("found in teplacement, edges " << g_.int_id(e) << " " <<
+                                      g_.int_id(replacement_it->second) << " skipping ");
                                 continue;
                             }
-                            TRACE("noncontinued end left " << g_.int_id(*iter));
+                            TRACE("noncontinued end left " << g_.int_id(e));
                             noncontinued++;
                         } else
                             continued++;
                     } else {
-                        TRACE("dead end left " << g_.int_id(*iter));
+                        TRACE("dead end left " << g_.int_id(e));
                         long_gapped++;
                     }
                 }
@@ -215,44 +223,14 @@ public:
         }
     }
 
-    vector<PathInfo<Graph> > GetAllPaths() const {
-        vector<PathInfo<Graph> > res;
-        for (auto iter = inner_index_.begin(); iter != inner_index_.end();
-                ++iter) {
-            for (auto j_iter = iter->second.begin();
-                    j_iter != iter->second.end(); ++j_iter) {
-
-                res.push_back(*j_iter);
-            }
-        }
-        return res;
-    }
-
-
-    vector<PathInfo<Graph> > GetAllPathsNoConjugate() {
-        vector<PathInfo<Graph> > res;
-
-        std::set< PathInfo<Graph> > added;
-        for (auto iter = inner_index_.begin(); iter != inner_index_.end();  ++iter) {
+     void SaveAllPaths(vector<PathInfo<Graph>> &res) const {
+        for (auto iter = inner_index_.begin(); iter != inner_index_.end(); ++iter) {
             for (auto j_iter = iter->second.begin(); j_iter != iter->second.end(); ++j_iter) {
-                if (added.count(*j_iter) > 0) {
-                    continue;
-                }
-
-                added.insert(*j_iter);
-                vector<EdgeId> rc_p(j_iter->path.size()) ;
-                for (size_t i = 0; i < j_iter->path.size(); i++) {
-                    rc_p[i] = g_.conjugate(j_iter->path[j_iter->path.size() - 1 - i]);
-                }
-                added.insert(PathInfo<Graph>(rc_p, j_iter->getWeight()));
-
                 res.push_back(*j_iter);
             }
         }
-        return res;
     }
 
-
     void LoadFromFile(const string s, bool force_exists = true) {
         FILE* file = fopen(s.c_str(), "r");
         if (force_exists) {
@@ -319,7 +297,7 @@ public:
         size_ = 0;
     }
 
-    size_t size() {
+    size_t size() const {
         return size_;
     }
 
diff --git a/src/modules/assembly_graph/graph_alignment/pacbio/pac_index.hpp b/src/common/modules/alignment/pacbio/pac_index.hpp
similarity index 77%
rename from src/modules/assembly_graph/graph_alignment/pacbio/pac_index.hpp
rename to src/common/modules/alignment/pacbio/pac_index.hpp
index 0a1c55a..ff779ab 100644
--- a/src/modules/assembly_graph/graph_alignment/pacbio/pac_index.hpp
+++ b/src/common/modules/alignment/pacbio/pac_index.hpp
@@ -7,16 +7,17 @@
 
 #pragma once
 
-#include "data_structures/indices/edge_multi_index.hpp"
-#include "assembly_graph/graph_alignment/edge_index_refiller.hpp"
+#include "utils/indices/edge_multi_index.hpp"
+#include "common/modules/alignment/edge_index_refiller.hpp"
 #include "assembly_graph/paths/mapping_path.hpp"
 #include "assembly_graph/paths/path_processor.hpp"
 // FIXME: Layering violation, get rid of this
 #include "pipeline/config_struct.hpp"
 #include "pacbio_read_structures.hpp"
-#include "pipeline/config_struct.hpp"
+#include "assembly_graph/graph_support/basic_vertex_conditions.hpp"
 
 #include <algorithm>
+#include "assembly_graph/dijkstra/dijkstra_helper.hpp"
 
 namespace pacbio {
 enum {
@@ -24,6 +25,21 @@ enum {
     DELETED_COLOR = - 2
 };
 
+struct OneReadMapping {
+    vector<vector<debruijn_graph::EdgeId>> main_storage;
+    vector<GapDescription> gaps;
+    vector<size_t> real_length;
+    //Total used seeds. sum over all subreads;
+    size_t seed_num;
+    OneReadMapping(const vector<vector<debruijn_graph::EdgeId>>& main_storage_,
+                   const vector<GapDescription>& gaps_,
+                   const vector<size_t>& real_length_,
+                   size_t seed_num_) :
+            main_storage(main_storage_), gaps(gaps_), real_length(real_length_), seed_num(seed_num_) {
+    }
+
+};
+
 template<class Graph>
 class PacBioMappingIndex {
 public:
@@ -52,7 +68,7 @@ private:
 
     set<Sequence> banned_kmers;
     debruijn_graph::DeBruijnEdgeMultiIndex<typename Graph::EdgeId> tmp_index;
-    map<pair<VertexId, VertexId>, vector<size_t> > distance_cashed;
+    mutable map<pair<VertexId, VertexId>, size_t> distance_cashed;
     size_t read_count;
     bool ignore_map_to_middle;
     debruijn_graph::config::debruijn_config::pacbio_processor pb_config_;
@@ -105,6 +121,19 @@ public:
         }
     }
 
+
+    bool similar_in_graph(const MappingInstance &a, const MappingInstance &b,
+                 int shift = 0) const {
+        if (b.read_position + shift < a.read_position) {
+            return similar_in_graph(b, a, -shift);
+        } else if (b.read_position == a.read_position) {
+            return (abs(int(b.edge_position) + shift - int(a.edge_position)) < 2);
+        } else {
+            return ((b.edge_position + shift - a.edge_position) * pb_config_.compression_cutoff <= (b.read_position - a.read_position));
+        }
+    }
+
+
     void dfs_cluster(vector<int> &used, vector<MappingInstance> &to_add,
                      const int cur_ind,
                      const typename MappingDescription::iterator iter) const {
@@ -279,7 +308,7 @@ public:
     }
 
     // is "non strictly dominates" required?
-    inline bool dominates(const KmerCluster<Graph> &a,
+    bool dominates(const KmerCluster<Graph> &a,
                           const KmerCluster<Graph> &b) const {
         size_t a_size = a.size;
         size_t b_size = b.size;
@@ -294,9 +323,10 @@ public:
         }
     }
 
-    vector<EdgeId> FillGapsInCluster(vector<pair<size_t, typename ClustersSet::iterator> > &cur_cluster,
-                                     const Sequence &s) {
+    vector<vector<EdgeId>> FillGapsInCluster(const vector<pair<size_t, typename ClustersSet::iterator> > &cur_cluster,
+                                     const Sequence &s) const {
         vector<EdgeId> cur_sorted;
+        vector<vector<EdgeId>> res;
         EdgeId prev_edge = EdgeId(0);
 
         for (auto iter = cur_cluster.begin(); iter != cur_cluster.end();
@@ -334,8 +364,12 @@ public:
                                                           *(iter->second),
                                                           (int) s_add.length(),
                                                           (int) e_add.length());
-                    if (limits.first == -1)
-                        return vector<EdgeId>(0);
+                    if (limits.first == -1) {
+                        res.push_back(cur_sorted);
+                        cur_sorted.clear();
+                        prev_edge = EdgeId(0);
+                        continue;
+                    }
 
                     vector<EdgeId> intermediate_path = BestScoredPath(s, start_v, end_v, limits.first, limits.second, seq_start, seq_end, s_add, e_add);
                     if (intermediate_path.size() == 0) {
@@ -360,7 +394,10 @@ public:
                             }
                             DEBUG(s_buf.str());
                         }
-                        return intermediate_path;
+                        res.push_back(cur_sorted);
+                        cur_sorted.clear();
+                        prev_edge = EdgeId(0);
+                        continue;
                     }
                     for (auto j_iter = intermediate_path.begin(); j_iter != intermediate_path.end(); j_iter++) {
                         cur_sorted.push_back(*j_iter);
@@ -370,17 +407,20 @@ public:
             cur_sorted.push_back(cur_edge);
             prev_edge = cur_edge;
         }
-        return cur_sorted;
+        if (cur_sorted.size() > 0)
+            res.push_back(cur_sorted);
+        return res;
     }
 
     bool TopologyGap(EdgeId first, EdgeId second, bool oriented) const {
-        bool res = (g_.IsDeadStart(g_.EdgeStart(first)) && g_.IsDeadEnd(g_.EdgeEnd(second)));
+        omnigraph::TerminalVertexCondition<Graph> condition(g_);
+        bool res = condition.Check(g_.EdgeEnd(first)) && condition.Check(g_.EdgeStart(second));
         if (!oriented)
-            res |= g_.IsDeadEnd(g_.EdgeEnd(first)) && g_.IsDeadStart(g_.EdgeStart(second));
+            res |= condition.Check(g_.EdgeStart(first)) && condition.Check(g_.EdgeEnd(second));
         return res;
     }
 
-    vector<int> GetWeightedColors(ClustersSet &mapping_descr, Sequence &s) {
+    vector<int> GetWeightedColors(const ClustersSet &mapping_descr) const {
         int len = (int) mapping_descr.size();
         DEBUG("getting colors, table size "<< len);
         vector<vector<int> > cons_table(len);
@@ -417,7 +457,7 @@ public:
                     j_iter != mapping_descr.end(); ++j_iter, ++j) {
                 if (i_iter == j_iter)
                     continue;
-                cons_table[i][j] = IsConsistent(s, *i_iter, *j_iter);
+                cons_table[i][j] = IsConsistent(*i_iter, *j_iter);
             }
         }
         i = 0;
@@ -452,6 +492,7 @@ public:
             if (maxi == -1) {
                 break;
             }
+            cur_color = maxi;
             colors[maxi] = cur_color;
             int real_maxi = maxi, min_i = maxi;
 
@@ -466,25 +507,39 @@ public:
                 }
                 real_maxi --;
             }
-            cur_color ++;
-
         }
         return colors;
     }
 
 
+    GapDescription CreateGapDescription(const KmerCluster<debruijn_graph::Graph>& a,
+                                        const KmerCluster<debruijn_graph::Graph>& b,
+                                        const Sequence& read) const {
+        size_t seq_start = a.sorted_positions[a.last_trustable_index].read_position + pacbio_k;
+        size_t seq_end = b.sorted_positions[b.first_trustable_index].read_position;
+        if (seq_start > seq_end) {
+            DEBUG("Overlapping flanks not supported yet");
+            return GapDescription();
+        }
+        return GapDescription(a.edgeId,
+                              b.edgeId,
+                              read.Subseq(seq_start, seq_end),
+                              a.sorted_positions[a.last_trustable_index].edge_position + pacbio_k - debruijn_k,
+                              b.sorted_positions[b.first_trustable_index].edge_position);
+    }
 
 
-    OneReadMapping<Graph> GetReadAlignment(Sequence &s) {
+    OneReadMapping GetReadAlignment(Sequence &s) const {
         ClustersSet mapping_descr = GetOrderClusters(s);
         DEBUG("clusters got");
         int len = (int) mapping_descr.size();
         vector<size_t> real_length;
 
-        vector<int> colors = GetWeightedColors(mapping_descr, s);
+        vector<int> colors = GetWeightedColors(mapping_descr);
         vector<vector<EdgeId> > sortedEdges;
+        vector<bool> block_gap_closer;
         vector<typename ClustersSet::iterator> start_clusters, end_clusters;
-        vector<GapDescription<Graph> > illumina_gaps;
+        vector<GapDescription> illumina_gaps;
         vector<int> used(len);
         size_t used_seed_count = 0;
         auto iter = mapping_descr.begin();
@@ -523,7 +578,7 @@ public:
                         ++iter) {
                     auto next_iter = iter + 1;
                     if (next_iter == cur_cluster.end()
-                            || !IsConsistent(s, *(iter->second),
+                            || !IsConsistent(*(iter->second),
                                              *(next_iter->second))) {
                         if (next_iter != cur_cluster.end()) {
                             DEBUG("clusters splitted:");
@@ -532,51 +587,64 @@ public:
                         }
                         vector<pair<size_t, typename ClustersSet::iterator> > splitted_cluster(
                                 cur_cluster_start, next_iter);
-                        vector<EdgeId> cur_sorted = FillGapsInCluster(
+                        auto res = FillGapsInCluster(
                                 splitted_cluster, s);
-                        if (cur_sorted.size() > 0) {
-                            start_clusters.push_back(cur_cluster_start->second);
-                            end_clusters.push_back(iter->second);
-                            sortedEdges.push_back(cur_sorted);
+                        for (auto &cur_sorted:res) {
+                            DEBUG("Adding " <<res.size() << " subreads, cur alignments " << cur_sorted.size());
+                            if (cur_sorted.size() > 0) {
+                                for(EdgeId eee: cur_sorted) {
+                                    DEBUG (g_.int_id(eee));
+                                }
+                                start_clusters.push_back(cur_cluster_start->second);
+                                end_clusters.push_back(iter->second);
+                                sortedEdges.push_back(cur_sorted);
+                                //Blocking gap closing inside clusters;
+                                block_gap_closer.push_back(true);
+                            }
                         }
+                        if (block_gap_closer.size() > 0)
+                            block_gap_closer[block_gap_closer.size() - 1] = false;
                         cur_cluster_start = next_iter;
                     } else {
-              DEBUG("connected consequtive clusters:");
-                          DEBUG("on "<< iter->second->str(g_));
-                          DEBUG("and " << next_iter->second->str(g_));
-
+                        DEBUG("connected consecutive clusters:");
+                        DEBUG("on "<< iter->second->str(g_));
+                        DEBUG("and " << next_iter->second->str(g_));
                     }
-
                 }
             }
         }
         DEBUG("adding gaps between subreads");
-        int alignments = int(sortedEdges.size());
-        for (int i = 0; i < alignments; i++) {
-            for (int j = 0; j < alignments; j++) {
-                EdgeId before_gap = sortedEdges[j][sortedEdges[j].size() - 1];
-                EdgeId after_gap = sortedEdges[i][0];
+
+        for (size_t i = 0; i + 1 < sortedEdges.size() ; i++) {
+                if (block_gap_closer[i])
+                    continue;
+                size_t j = i + 1;
+                EdgeId before_gap = sortedEdges[i][sortedEdges[i].size() - 1];
+                EdgeId after_gap = sortedEdges[j][0];
 //do not add "gap" for rc-jumping
                 if (before_gap != after_gap
                         && before_gap != g_.conjugate(after_gap)) {
                     if (i != j && TopologyGap(before_gap, after_gap, true)) {
                         if (start_clusters[j]->CanFollow(*end_clusters[i])) {
-                            illumina_gaps.push_back(
-                                    GapDescription<Graph>(*end_clusters[i],
-                                                          *start_clusters[j], s,
-                                                          (int) pacbio_k));
+                            auto gap = CreateGapDescription(*end_clusters[i],
+                                                            *start_clusters[j],
+                                                            s);
+                            if (gap != GapDescription()) {
+                                illumina_gaps.push_back(gap);
+                                DEBUG("adding gap between alignments number " << i<< " and " << j);
+                            }
                         }
 
                     }
                 }
-            }
+
         }
-        return OneReadMapping<Graph>(sortedEdges, illumina_gaps, real_length, used_seed_count);
+        return OneReadMapping(sortedEdges, illumina_gaps, real_length, used_seed_count);
     }
 
     std::pair<int, int> GetPathLimits(const KmerCluster<Graph> &a,
                                       const KmerCluster<Graph> &b,
-                                      int s_add_len, int e_add_len) {
+                                      int s_add_len, int e_add_len) const {
         int start_pos = a.sorted_positions[a.last_trustable_index].read_position;
         int end_pos = b.sorted_positions[b.first_trustable_index].read_position;
         int seq_len = -start_pos + end_pos;
@@ -594,62 +662,78 @@ public:
     }
 
 //0 - No, 1 - Yes
-    int IsConsistent(Sequence &s, const KmerCluster<Graph> &a,
-                     const KmerCluster<Graph> &b) {
+    int IsConsistent(const KmerCluster<Graph> &a,
+                     const KmerCluster<Graph> &b) const {
         EdgeId a_edge = a.edgeId;
         EdgeId b_edge = b.edgeId;
         size_t a_id =  g_.int_id(a_edge);
         size_t b_id =  g_.int_id(b_edge);
         DEBUG("clusters on " << a_id << " and " << b_id );
-        if (abs(a.sorted_positions[a.last_trustable_index].read_position - b.sorted_positions[b.first_trustable_index].read_position) > 5000) {
-            DEBUG("...to far5000");
+        if (a.sorted_positions[a.last_trustable_index].read_position + (int) pb_config_.max_path_in_dijkstra <
+            b.sorted_positions[b.first_trustable_index].read_position) {
+            DEBUG ("Clusters are too far in read");
             return 0;
         }
         VertexId start_v = g_.EdgeEnd(a_edge);
         size_t addition = g_.length(a_edge);
         VertexId end_v = g_.EdgeStart(b_edge);
         pair<VertexId, VertexId> vertex_pair = make_pair(start_v, end_v);
-        vector<size_t> result;
-        DEBUG("seq dist:" << s.size()/3);
-        if (distance_cashed.find(vertex_pair) == distance_cashed.end()) {
-            omnigraph::DistancesLengthsCallback<Graph> callback(g_);
-            ProcessPaths(g_, 0, s.size() / 3, start_v,
-                             end_v, callback);
-            result = callback.distances();
-            distance_cashed[vertex_pair] = result;
+
+        size_t result = size_t(-1);
+        bool not_found = true;
+        auto distance_it = distance_cashed.begin();
+#pragma omp critical(pac_index)
+        {
+            distance_it = distance_cashed.find(vertex_pair);
+            not_found = (distance_it == distance_cashed.end());
+        }
+        if (not_found) {
+//TODO: constants
+            omnigraph::DijkstraHelper<debruijn_graph::Graph>::BoundedDijkstra dijkstra(
+                    omnigraph::DijkstraHelper<debruijn_graph::Graph>::CreateBoundedDijkstra(g_, pb_config_.max_path_in_dijkstra, pb_config_.max_vertex_in_dijkstra));
+            dijkstra.Run(start_v);
+            if (dijkstra.DistanceCounted(end_v)) {
+                result = dijkstra.GetDistance(end_v);
+            }
+#pragma omp critical(pac_index)
+        {
+            distance_it = distance_cashed.insert({vertex_pair, result}).first;
+        }
         } else {
-      DEBUG("taking from cashed");
-    }
-        DEBUG("addition: " << addition << " found " << result.size() << " lengths:" );
-        for (size_t i = 0; i < result.size(); i++) {
-            DEBUG(result[i]);
-    }
-        result = distance_cashed[vertex_pair];
+            DEBUG("taking from cashed");
+        }
+
+
+        result = distance_it->second;
+        DEBUG (result);
+        if (result == size_t(-1)) {
+            return 0;
+        }
         //TODO: Serious optimization possible
-        for (size_t i = 0; i < result.size(); i++) {
-            for (auto a_iter = a.sorted_positions.begin();
-                    a_iter != a.sorted_positions.end(); ++a_iter) {
-                if (a_iter - a.sorted_positions.begin() > 500 &&  a.sorted_positions.end() - a_iter >500) continue;
-                int cnt = 0;
-                for (auto b_iter = b.sorted_positions.begin();
-                        b_iter != b.sorted_positions.end() && cnt <500; ++b_iter, cnt ++) {
-                    if (similar(*a_iter, *b_iter,
-                                (int) (result[i] + addition))) {
-                        return 1;
-                    }
+
+        for (auto a_iter = a.sorted_positions.begin();
+                a_iter != a.sorted_positions.end(); ++a_iter) {
+            if (a_iter - a.sorted_positions.begin() > 500 &&  a.sorted_positions.end() - a_iter >500) continue;
+            int cnt = 0;
+            for (auto b_iter = b.sorted_positions.begin();
+                    b_iter != b.sorted_positions.end() && cnt <500; ++b_iter, cnt ++) {
+                if (similar_in_graph(*a_iter, *b_iter,
+                            (int) (result + addition))) {
+                    return 1;
                 }
-                cnt = 0;
-                if (b.sorted_positions.size() > 500) {
-                    for (auto b_iter = b.sorted_positions.end() - 1;
-                                            b_iter != b.sorted_positions.begin() && cnt < 500; --b_iter, cnt ++) {
-                        if (similar(*a_iter, *b_iter,
-                                    (int) (result[i] + addition))) {
-                            return 1;
-                        }
+            }
+            cnt = 0;
+            if (b.sorted_positions.size() > 500) {
+                for (auto b_iter = b.sorted_positions.end() - 1;
+                                        b_iter != b.sorted_positions.begin() && cnt < 500; --b_iter, cnt ++) {
+                    if (similar_in_graph(*a_iter, *b_iter,
+                                (int) (result + addition))) {
+                        return 1;
                     }
                 }
             }
         }
+
         return 0;
 
     }
@@ -667,7 +751,7 @@ public:
     vector<EdgeId> BestScoredPath(const Sequence &s, VertexId start_v, VertexId end_v,
                                   int path_min_length, int path_max_length,
                                   int start_pos, int end_pos, string &s_add,
-                                  string &e_add) {
+                                  string &e_add) const {
         DEBUG(" Traversing tangled region. Start and end vertices resp: " << g_.int_id(start_v) <<" " << g_.int_id(end_v));
         omnigraph::PathStorageCallback<Graph> callback(g_);
         ProcessPaths(g_,
@@ -681,10 +765,17 @@ public:
         size_t best_path_ind = paths.size();
         size_t best_score = 1000000000;
         DEBUG("need to find best scored path between "<<paths.size()<<" , seq_len " << seq_string.length());
-        if (paths.size() == 0)
+        if (paths.size() == 0) {
+            DEBUG ("no paths");
             return vector<EdgeId>(0);
+        }
+        if (seq_string.length() > pb_config_.max_contigs_gap_length) {
+            DEBUG("Gap is too large");
+            return vector<EdgeId>(0);
+        }
         for (size_t i = 0; i < paths.size(); i++) {
             string cur_string = s_add + PathToString(paths[i]) + e_add;
+            DEBUG("cur_string: " << cur_string <<"\n seq_string " << seq_string);
             if (paths.size() > 1 && paths.size() < 10) {
                 TRACE("candidate path number "<< i << " , len " << cur_string.length());
                 TRACE("graph candidate: " << cur_string);
@@ -703,6 +794,7 @@ public:
                 best_path_ind = i;
             }
         }
+        DEBUG(best_score);
         if (best_score == 1000000000)
             return vector<EdgeId>(0);
         if (paths.size() > 1 && paths.size() < 10) {
@@ -745,15 +837,15 @@ public:
         for (auto iter = largest_clusters.begin(); iter != largest_clusters.end(); ++iter) {
             auto first_cluster = iter->second.sorted_positions[iter->second.first_trustable_index];
             auto last_cluster = iter->second.sorted_positions[iter->second.last_trustable_index];
-            omnigraph::MappingRange range(Range(first_cluster.read_position, last_cluster.read_position),
-                                          Range(first_cluster.edge_position, last_cluster.edge_position));
+            omnigraph::MappingRange range(omnigraph::Range(first_cluster.read_position, last_cluster.read_position),
+                                          omnigraph::Range(first_cluster.edge_position, last_cluster.edge_position));
             result.join({iter->second.edgeId, range});
         }
 
         return result;
     }
 
-    std::pair<EdgeId, size_t> GetUniqueKmerPos(const runtime_k::RtSeq& kmer) const {
+    std::pair<EdgeId, size_t> GetUniqueKmerPos(const RtSeq& kmer) const {
         KeyWithHash kwh = tmp_index.ConstructKWH(kmer);
 
         if (tmp_index.valid(kwh.key())) {
@@ -777,8 +869,8 @@ typename PacBioMappingIndex<Graph>::MappingDescription PacBioMappingIndex<Graph>
     if (s.size() < pacbio_k)
         return res;
 
-    //runtime_k::RtSeq kmer = s.start<runtime_k::RtSeq>(pacbio_k);
-    KeyWithHash kwh = tmp_index.ConstructKWH(s.start<runtime_k::RtSeq>(pacbio_k));
+    //RtSeq kmer = s.start<RtSeq>(pacbio_k);
+    KeyWithHash kwh = tmp_index.ConstructKWH(s.start<RtSeq>(pacbio_k));
 
     for (size_t j = pacbio_k; j < s.size(); ++j) {
         kwh = kwh << s[j];
diff --git a/src/modules/assembly_graph/graph_alignment/pacbio/pacbio_read_structures.hpp b/src/common/modules/alignment/pacbio/pacbio_read_structures.hpp
similarity index 77%
rename from src/modules/assembly_graph/graph_alignment/pacbio/pacbio_read_structures.hpp
rename to src/common/modules/alignment/pacbio/pacbio_read_structures.hpp
index c2ce186..6ae4b7a 100644
--- a/src/modules/assembly_graph/graph_alignment/pacbio/pacbio_read_structures.hpp
+++ b/src/common/modules/alignment/pacbio/pacbio_read_structures.hpp
@@ -7,12 +7,16 @@
 
 #pragma once
 
-#include "data_structures/indices/perfect_hash_map.hpp"
+#include "utils/indices/perfect_hash_map.hpp"
+#include "common/modules/alignment/sequence_mapper.hpp"
+#include "common/assembly_graph/core/graph.hpp"
 #include <algorithm>
 #include <map>
 #include <set>
 
 namespace pacbio {
+typedef omnigraph::GapDescription<debruijn_graph::Graph> GapDescription;
+
 template<class T>
 struct pair_iterator_less {
     bool operator ()(pair<size_t, T> const& a, pair<size_t, T> const& b) const {
@@ -145,71 +149,56 @@ private:
     ;
 };
 
-template<class Graph>
-struct GapDescription {
-    typedef typename Graph::EdgeId EdgeId;
-    typename Graph::EdgeId start, end;
-    Sequence gap_seq;
-    int edge_gap_start_position, edge_gap_end_position;
-
-
-    GapDescription(EdgeId start_e, EdgeId end_e, const Sequence &gap, int gap_start, int gap_end) :
-            start(start_e), end(end_e), gap_seq(gap.str()), edge_gap_start_position(gap_start), edge_gap_end_position(gap_end) {
-    }
-
-    GapDescription(const KmerCluster<Graph> &a, const KmerCluster<Graph> & b, Sequence read, int pacbio_k) {
-        edge_gap_start_position = a.sorted_positions[a.last_trustable_index].edge_position;
-        edge_gap_end_position = b.sorted_positions[b.first_trustable_index].edge_position + pacbio_k - 1;
-        start = a.edgeId;
-        end = b.edgeId;
-        DEBUG(read.str());
-        gap_seq = read.Subseq(a.sorted_positions[a.last_trustable_index].read_position, b.sorted_positions[b.first_trustable_index].read_position + pacbio_k - 1);
-        DEBUG(gap_seq.str());
-        DEBUG("gap added");
-    }
-
-    GapDescription<Graph> conjugate(Graph &g_, int shift) const {
-        GapDescription<Graph> res(
-                g_.conjugate(end), g_.conjugate(start), (!gap_seq),
-                (int) g_.length(end) + shift - edge_gap_end_position,
-                (int) g_.length(start) + shift - edge_gap_start_position);
-         DEBUG("conjugate created" << res.str(g_));
-         return res;
-    }
-
-    string str(Graph &g_) const {
-        stringstream s;
-        s << g_.int_id(start) << " " << edge_gap_start_position <<endl << g_.int_id(end) << " " << edge_gap_end_position << endl << gap_seq.str()<< endl;
-        return s.str();
-    }
-
-    bool operator <(const GapDescription & b) const {
-        return (start < b.start || (start == b.start &&  end < b.end) ||
-                (start == b.start &&  end == b.end && edge_gap_start_position < b.edge_gap_start_position));
-    }
-
-private:
-    DECL_LOGGER("PacIndex")
-    ;
-};
-
-template<class Graph>
-struct OneReadMapping {
-    typedef typename Graph::EdgeId EdgeId;
-    vector<vector<EdgeId> > main_storage;
-    vector<GapDescription<Graph> > gaps;
-    vector<size_t> real_length;
-//Total used seeds. sum over all subreads;
-    size_t seed_num;
-    OneReadMapping(vector<vector<EdgeId> > &paths_description, vector<GapDescription<Graph> > &gaps_description, vector<size_t> real_length, size_t seed_num) :
-            main_storage(paths_description), gaps(gaps_description), real_length(real_length), seed_num(seed_num) {
-    }
-
-};
-
+//template<class Graph>
+//struct GapDescription {
+//    typedef typename Graph::EdgeId EdgeId;
+//    EdgeId start, end;
+//    Sequence gap_seq;
+//    int edge_gap_start_position, edge_gap_end_position;
+//
+//
+//    GapDescription(EdgeId start_e, EdgeId end_e, const Sequence &gap, int gap_start, int gap_end) :
+//            start(start_e), end(end_e), gap_seq(gap.str()), edge_gap_start_position(gap_start), edge_gap_end_position(gap_end) {
+//    }
+//
+//    GapDescription(const KmerCluster<Graph> &a, const KmerCluster<Graph> & b, Sequence read, int pacbio_k) {
+//        edge_gap_start_position = a.sorted_positions[a.last_trustable_index].edge_position;
+//        edge_gap_end_position = b.sorted_positions[b.first_trustable_index].edge_position + pacbio_k - 1;
+//        start = a.edgeId;
+//        end = b.edgeId;
+//        DEBUG(read.str());
+//        gap_seq = read.Subseq(a.sorted_positions[a.last_trustable_index].read_position,
+//                              b.sorted_positions[b.first_trustable_index].read_position + pacbio_k - 1);
+//        DEBUG(gap_seq.str());
+//        DEBUG("gap added");
+//    }
+//
+//    GapDescription<Graph> conjugate(Graph &g, int shift) const {
+//        GapDescription<Graph> res(
+//                g.conjugate(end), g.conjugate(start), (!gap_seq),
+//                (int) g.length(end) + shift - edge_gap_end_position,
+//                (int) g.length(start) + shift - edge_gap_start_position);
+//         DEBUG("conjugate created" << res.str(g));
+//         return res;
+//    }
+//
+//    string str(Graph &g) const {
+//        stringstream s;
+//        s << g.int_id(start) << " " << edge_gap_start_position <<endl << g.int_id(end) << " " << edge_gap_end_position << endl << gap_seq.str()<< endl;
+//        return s.str();
+//    }
+//
+//    bool operator <(const GapDescription& b) const {
+//        return (start < b.start || (start == b.start &&  end < b.end) ||
+//                (start == b.start &&  end == b.end && edge_gap_start_position < b.edge_gap_start_position));
+//    }
+//
+//private:
+//    DECL_LOGGER("PacIndex")
+//    ;
+//}
 
 struct StatsCounter{
-
     map<size_t,size_t> path_len_in_edges;
     vector<size_t> subreads_length;
     size_t total_len ;
@@ -246,7 +235,7 @@ struct StatsCounter{
         }
     }
 
-    void report(){
+    void report() const {
         size_t total = 0;
         for (auto iter = seeds_percentage.begin(); iter != seeds_percentage.end(); ++iter){
             total += iter->second;
@@ -260,9 +249,9 @@ struct StatsCounter{
         }
         INFO("Median fraction of present seeds in maximal alignmnent among reads aligned to the graph: " << double(percentage) * 0.001);
     }
+
 private:
     DECL_LOGGER("StatsCounter");
-
 };
 
 inline int StringDistance(string &a, string &b) {
diff --git a/src/modules/assembly_graph/graph_alignment/sequence_mapper.hpp b/src/common/modules/alignment/sequence_mapper.hpp
similarity index 79%
rename from src/modules/assembly_graph/graph_alignment/sequence_mapper.hpp
rename to src/common/modules/alignment/sequence_mapper.hpp
index 1334ced..7572fb6 100644
--- a/src/modules/assembly_graph/graph_alignment/sequence_mapper.hpp
+++ b/src/common/modules/alignment/sequence_mapper.hpp
@@ -7,16 +7,16 @@
 
 #pragma once
 
-#include "data_structures/sequence/sequence_tools.hpp"
+#include "assembly_graph/paths/mapping_path.hpp"
 #include "assembly_graph/paths/path_processor.hpp"
-#include "assembly_graph/graph_core/basic_graph_stats.hpp"
+#include "sequence/sequence_tools.hpp"
+#include "common/assembly_graph/core/basic_graph_stats.hpp"
 
-#include "data_structures/sequence/runtime_k.hpp"
 #include "edge_index.hpp"
 #include "kmer_mapper.hpp"
 
 #include <cstdlib>
-#include "assembly_graph/graph_core/basic_graph_stats.hpp"
+#include "common/assembly_graph/core/basic_graph_stats.hpp"
 
 namespace debruijn_graph {
 using omnigraph::MappingPath;
@@ -34,7 +34,7 @@ MappingPath<typename Graph::EdgeId> ConjugateMapping(const Graph& g,
         auto e = p.first;
         MappingRange mr = p.second;
         answer.push_back(g.conjugate(e), 
-                        MappingRange(mr.initial_range.Invert(sequence_length - g.k()), 
+                        MappingRange(mr.initial_range.Invert(sequence_length - g.k()),
                         mr.mapped_range.Invert(g.length(e))));
     }
     return answer;
@@ -44,24 +44,28 @@ template<class Graph>
 class SequenceMapper {
 public:
     typedef typename Graph::EdgeId EdgeId;
-    typedef runtime_k::RtSeq Kmer;
+    typedef RtSeq Kmer;
 
-protected:
-    const Graph& g_;
+    virtual ~SequenceMapper() {}
 
-public:
-    SequenceMapper(const Graph& g): g_(g) {
+    virtual MappingPath<EdgeId> MapSequence(const Sequence &sequence) const = 0;
 
-    }
+    virtual MappingPath<EdgeId> MapRead(const io::SingleRead &read) const = 0;
+};
 
-    virtual ~SequenceMapper() {
+template<class Graph>
+class AbstractSequenceMapper : public SequenceMapper<Graph> {
+protected:
+    const Graph& g_;
 
+//    const Graph& g() const {
+//        return g_;
+//    }
+public:
+    AbstractSequenceMapper(const Graph& g) : g_(g) {
     }
 
-    virtual MappingPath<EdgeId> MapSequence(const Sequence &sequence) const = 0;
-
-  
-    MappingPath<EdgeId> MapRead(const io::SingleRead &read) const {
+    MappingPath<EdgeId> MapRead(const io::SingleRead &read) const override {
 //      VERIFY(read.IsValid());
         DEBUG(read.name() << " is mapping");
         string s = read.GetSequenceString();
@@ -70,7 +74,7 @@ public:
         for(size_t i = 0; i < s.size(); i++) {
             if (read.GetSequenceString()[i] == 'N') {
                 if (r > l) {
-                    result.join(MapSequence(Sequence(s.substr(l, r - l))), int(l));
+                    result.join(this->MapSequence(Sequence(s.substr(l, r - l))), int(l));
                 }
                 r = i + 1;
                 l = i + 1;
@@ -79,17 +83,55 @@ public:
             }
         }
         if (r > l) {
-            result.join(MapSequence(Sequence(s.substr(l, r - l))), int(l));
+            result.join(this->MapSequence(Sequence(s.substr(l, r - l))), int(l));
         }
         DEBUG(read.name() << " is mapped");
         DEBUG("Number of edges is " << result.size());
 
-      return result;
+        return result;
     }
-
-    virtual size_t KmerSize() const = 0;
 };
 
+//potentially useful class
+//template<class Graph>
+//class DelegatingSequenceMapper : public SequenceMapper<Graph> {
+//public:
+//    typedef std::function<MappingPath<EdgeId> (const MappingPath<EdgeId>&, size_t)> ProcessingF;
+//private:
+//    shared_ptr<SequenceMapper<Graph>> inner_mapper_;
+//    ProcessingF processing_f_;
+//
+//public:
+//    DelegatingSequenceMapper(shared_ptr<SequenceMapper<Graph>> inner_mapper,
+//                             ProcessingF processing_f) :
+//            inner_mapper_(inner_mapper), processing_f_(processing_f) {
+//    }
+//
+//    MappingPath<EdgeId> MapSequence(const Sequence& s) const override {
+//        return processing_f_(inner_mapper_->MapSequence(s), s.size());
+//    }
+//
+//    MappingPath<EdgeId> MapRead(const io::SingleRead& r) const override {
+//        return processing_f_(inner_mapper_->MapRead(r), r.size());
+//    }
+//};
+
+template<class Graph>
+bool SpuriousMappingFilter(const Graph& /*g*/,
+                           const MappingPath<EdgeId>& mapping_path,
+                           size_t read_length,
+                           size_t max_range,
+                           size_t min_flank) {
+    if (mapping_path.size() == 1) {
+        Range read_range = mapping_path[0].second.initial_range;
+        if (read_range.size() <= max_range
+            && read_range.start_pos >= min_flank
+            && read_range.end_pos + min_flank <= read_length)
+            return false;
+    }
+    return true;
+}
+
 template<class Graph>
 class MappingPathFixer {
 public:
@@ -224,10 +266,6 @@ public:
 
 private:
 
-      bool IsTip(VertexId v) const {
-          return g_.IncomingEdgeCount(v) + g_.OutgoingEdgeCount(v) == 1;
-      }
-
       bool IsMappingPathValid(const MappingPath<EdgeId>& path) const {
           return path.size() != 0;
       }
@@ -240,14 +278,12 @@ private:
 };
 
 template<class Graph, class Index>
-class NewExtendedSequenceMapper: public SequenceMapper<Graph> {
- using SequenceMapper<Graph>::g_;
-
- public:
-  typedef std::vector<MappingRange> RangeMappings;
+class BasicSequenceMapper: public AbstractSequenceMapper<Graph> {
+  using AbstractSequenceMapper<Graph>::g_;
 
- private:
   const Index& index_;
+
+  typedef std::vector<MappingRange> RangeMappings;
   typedef typename Graph::EdgeId EdgeId;
   typedef typename Graph::VertexId VertexId;
   typedef typename Index::KMer Kmer;
@@ -328,18 +364,13 @@ class NewExtendedSequenceMapper: public SequenceMapper<Graph> {
   }
 
  public:
-  NewExtendedSequenceMapper(const Graph& g,
+  BasicSequenceMapper(const Graph& g,
                             const Index& index,
                             const KmerSubs& kmer_mapper,
                 bool optimization_on = true) :
-      SequenceMapper<Graph>(g), index_(index), kmer_mapper_(kmer_mapper), k_(g.k()+1),
-    optimization_on_(optimization_on) { }
-
-  ~NewExtendedSequenceMapper() {
-    //        TRACE("In destructor of sequence mapper");
-    //        TRACE(mapped_ << " sequences were mapped");
-    //        TRACE(unmapped_ << " sequences couldn't be mapped");
-  }
+      AbstractSequenceMapper<Graph>(g), index_(index),
+      kmer_mapper_(kmer_mapper), k_(g.k()+1),
+      optimization_on_(optimization_on) { }
 
   MappingPath<EdgeId> MapSequence(const Sequence &sequence) const {
     std::vector<EdgeId> passed_edges;
@@ -350,7 +381,6 @@ class NewExtendedSequenceMapper: public SequenceMapper<Graph> {
     }
 
     Kmer kmer = sequence.start<Kmer>(k_);
-    //kmer >>= 0;
     bool try_thread = false;
     try_thread = ProcessKmer(kmer, 0, passed_edges,
                              range_mapping, try_thread);
@@ -360,28 +390,16 @@ class NewExtendedSequenceMapper: public SequenceMapper<Graph> {
                                range_mapping, try_thread);
     }
 
-    //        if (passed_edges.empty()) {
-    ////            TRACE("Sequence " << sequence << "couldn't be mapped");
-    //            unmapped_++;
-    //            //todo maybe check path consistency?
-    //        } else {
-    //            mapped_++;
-    //        }
-
     return MappingPath<EdgeId>(passed_edges, range_mapping);
   }
 
-  size_t KmerSize() const {
-      return k_;
-  }
-
-  DECL_LOGGER("NewExtendedSequenceMapper");
+  DECL_LOGGER("BasicSequenceMapper");
 };
 
 
 template<class gp_t>
-std::shared_ptr<NewExtendedSequenceMapper<typename gp_t::graph_t, typename gp_t::index_t> > MapperInstance(const gp_t& gp) {
-  return std::make_shared<NewExtendedSequenceMapper<typename gp_t::graph_t, typename gp_t::index_t> >(gp.g, gp.index, gp.kmer_mapper);
+std::shared_ptr<BasicSequenceMapper<typename gp_t::graph_t, typename gp_t::index_t>> MapperInstance(const gp_t& gp) {
+  return std::make_shared<BasicSequenceMapper<typename gp_t::graph_t, typename gp_t::index_t>>(gp.g, gp.index, gp.kmer_mapper);
 }
 
 }
diff --git a/src/modules/assembly_graph/graph_alignment/sequence_mapper_notifier.hpp b/src/common/modules/alignment/sequence_mapper_notifier.hpp
similarity index 82%
rename from src/modules/assembly_graph/graph_alignment/sequence_mapper_notifier.hpp
rename to src/common/modules/alignment/sequence_mapper_notifier.hpp
index d5af6f9..35120e2 100644
--- a/src/modules/assembly_graph/graph_alignment/sequence_mapper_notifier.hpp
+++ b/src/common/modules/alignment/sequence_mapper_notifier.hpp
@@ -8,10 +8,11 @@
 #ifndef SEQUENCE_MAPPER_NOTIFIER_HPP_
 #define SEQUENCE_MAPPER_NOTIFIER_HPP_
 
-#include "dev_support/memory_limit.hpp"
-#include "assembly_graph/graph_alignment/sequence_mapper.hpp"
+#include "utils/memory_limit.hpp"
+#include "sequence_mapper.hpp"
 #include "short_read_mapper.hpp"
 #include "io/reads/paired_read.hpp"
+#include "io/reads/read_stream_vector.hpp"
 #include "pipeline/graph_pack.hpp"
 
 #include <vector>
@@ -21,21 +22,24 @@ namespace debruijn_graph {
 //todo think if we still need all this
 class SequenceMapperListener {
 public:
-    virtual void StartProcessLibrary(size_t threads_count) = 0;
-    virtual void StopProcessLibrary() = 0;
+    virtual void StartProcessLibrary(size_t /* threads_count */) {}
+    virtual void StopProcessLibrary() {}
 
     //TODO: think about read ierarchy
-    virtual void ProcessPairedRead(size_t thread_index, const io::PairedRead& pr, const MappingPath<EdgeId>& read1, const MappingPath<EdgeId>& read2) = 0;
-    virtual void ProcessPairedRead(size_t thread_index, const io::PairedReadSeq& pr, const MappingPath<EdgeId>& read1, const MappingPath<EdgeId>& read2) = 0;
-    virtual void ProcessSingleRead(size_t thread_index, const io::SingleRead& r, const MappingPath<EdgeId>& read) = 0;
-    virtual void ProcessSingleRead(size_t thread_index, const io::SingleReadSeq& r, const MappingPath<EdgeId>& read) = 0;
-
-    virtual void MergeBuffer(size_t thread_index) = 0;
+    virtual void ProcessPairedRead(size_t /* thread_index */, const io::PairedRead&  /* pr */,
+                                   const MappingPath<EdgeId>& /* read1 */, const MappingPath<EdgeId>& /* read2 */) {}
+    virtual void ProcessPairedRead(size_t /* thread_index */, const io::PairedReadSeq& /* pr */,
+                                   const MappingPath<EdgeId>& /* read1 */, const MappingPath<EdgeId>& /* read2 */) {}
+    virtual void ProcessSingleRead(size_t /* thread_index */, const io::SingleRead& /* r */, const MappingPath<EdgeId>& /* read */) {}
+    virtual void ProcessSingleRead(size_t /* thread_index */, const io::SingleReadSeq& /* r */, const MappingPath<EdgeId>& /* read */) {}
+
+    virtual void MergeBuffer(size_t /* thread_index */) {}
+    
     virtual ~SequenceMapperListener() {}
 };
 
 class SequenceMapperNotifier {
-    static const size_t BUFFER_SIZE = 200000;
+    static constexpr size_t BUFFER_SIZE = 200000;
 public:
     typedef SequenceMapper<conj_graph_pack::graph_t> SequenceMapperT;
 
@@ -58,16 +62,14 @@ public:
 
         streams.reset();
         NotifyStartProcessLibrary(lib_index, threads_count);
-
         size_t counter = 0, n = 15;
         size_t fmem = get_free_memory();
 
         #pragma omp parallel for num_threads(threads_count) shared(counter)
-        for (size_t ithread = 0; ithread < threads_count; ++ithread) {
+        for (size_t i = 0; i < streams.size(); ++i) {
             size_t size = 0;
             ReadType r;
-            auto& stream = streams[ithread];
-            stream.reset();
+            auto& stream = streams[i];
             while (!stream.eof()) {
                 if (size == BUFFER_SIZE || 
                     // Stop filling buffer if the amount of available is smaller
@@ -81,16 +83,20 @@ public:
                             n += 1;
                         }
                         size = 0;
-                        NotifyMergeBuffer(lib_index, ithread);
+                        NotifyMergeBuffer(lib_index, i);
                     }
                 }
                 stream >> r;
                 ++size;
-                NotifyProcessRead(r, mapper, lib_index, ithread);
+                NotifyProcessRead(r, mapper, lib_index, i);
             }
-#           pragma omp atomic
+            #pragma omp atomic
             counter += size;
         }
+
+        for (size_t i = 0; i < threads_count; ++i)
+            NotifyMergeBuffer(lib_index, i);
+
         INFO("Total " << counter << " reads processed");
         NotifyStopProcessLibrary(lib_index);
     }
diff --git a/src/modules/assembly_graph/graph_alignment/short_read_mapper.hpp b/src/common/modules/alignment/short_read_mapper.hpp
similarity index 71%
rename from src/modules/assembly_graph/graph_alignment/short_read_mapper.hpp
rename to src/common/modules/alignment/short_read_mapper.hpp
index b17559a..db9e564 100644
--- a/src/modules/assembly_graph/graph_alignment/short_read_mapper.hpp
+++ b/src/common/modules/alignment/short_read_mapper.hpp
@@ -1,10 +1,3 @@
-/*
- * short_read_mapper.hpp
- *
- *  Created on: Dec 4, 2013
- *      Author: andrey
- */
-
 //***************************************************************************
 //* Copyright (c) 2015 Saint Petersburg State University
 //* Copyright (c) 2011-2014 Saint Petersburg Academic University
@@ -15,19 +8,21 @@
 #pragma once
 
 
-#include "assembly_graph/graph_alignment/sequence_mapper.hpp"
-#include "assembly_graph/graph_alignment/pacbio/pac_index.hpp"
+#include "sequence_mapper.hpp"
+#include "common/modules/alignment/pacbio/pac_index.hpp"
+#include "modules/alignment/bwa_sequence_mapper.hpp"
 
 namespace debruijn_graph {
   
 template<class Graph>
-class SensitiveReadMapper: public SequenceMapper<Graph> {
+class SensitiveReadMapper: public AbstractSequenceMapper<Graph> {
     typedef typename Graph::EdgeId EdgeId;
-    using SequenceMapper<Graph>::g_;
+    using AbstractSequenceMapper<Graph>::g_;
 private:
 
     size_t small_k_;
 
+    //FIXME awful!
     static map<size_t, pacbio::PacBioMappingIndex<Graph>* > indices_;
     static size_t active_mappers_;
 
@@ -36,7 +31,7 @@ private:
 public:
 
     SensitiveReadMapper(const Graph& g, size_t k, size_t graph_k) :
-        SequenceMapper<Graph>(g), small_k_(k)
+        AbstractSequenceMapper<Graph>(g), small_k_(k)
     {
         if (indices_.find(small_k_) == indices_.end()) {
             indices_.insert(make_pair(small_k_,
@@ -50,10 +45,6 @@ public:
         return index_->GetShortReadAlignment(sequence);
     }
 
-    size_t KmerSize() const {
-        return small_k_;
-    }
-
     ~SensitiveReadMapper() {
         --active_mappers_;
     }
@@ -76,18 +67,22 @@ map<size_t, pacbio::PacBioMappingIndex<Graph>* > SensitiveReadMapper<Graph>::ind
 template<class Graph>
 size_t SensitiveReadMapper<Graph>::active_mappers_ = 0;
 
-
 template<class graph_pack, class SequencingLib>
-std::shared_ptr<SequenceMapper<typename graph_pack::graph_t>> ChooseProperMapper(const graph_pack& gp, const SequencingLib& library) {
+std::shared_ptr<SequenceMapper<typename graph_pack::graph_t>> ChooseProperMapper(const graph_pack& gp, const SequencingLib& library, bool use_bwa = false) {
+    typedef typename graph_pack::graph_t Graph;
     if (library.type() == io::LibraryType::MatePairs) {
-        INFO("Mapping mate-pair library, selecting sensitive read mapper with k=" << cfg::get().sensitive_map.k);
-        return std::make_shared<SensitiveReadMapper<typename graph_pack::graph_t>>(gp.g, cfg::get().sensitive_map.k, gp.k_value);
+        if (use_bwa) {
+            INFO("Mapping mate-pairs using BWA lib mapper");
+            return std::make_shared<alignment::BWAReadMapper<Graph>>(gp.g);
+        } else {
+            INFO("Mapping mate-pair library, selecting sensitive read mapper with k=" << cfg::get().sensitive_map.k);
+            return std::make_shared<SensitiveReadMapper<Graph>>(gp.g, cfg::get().sensitive_map.k, gp.k_value);
+        }
     }
-
     size_t read_length = library.data().read_length;
     if (read_length < gp.k_value && library.type() == io::LibraryType::PairedEnd) {
         INFO("Read length = " << read_length << ", selecting short read mapper");
-        return std::make_shared<SensitiveReadMapper<typename graph_pack::graph_t>>(gp.g, read_length/ 3, gp.k_value);
+        return std::make_shared<SensitiveReadMapper<Graph>>(gp.g, read_length/ 3, gp.k_value);
     }
 
     INFO("Selecting usual mapper");
diff --git a/src/modules/algorithms/genome_consistance_checker.cpp b/src/common/modules/genome_consistance_checker.cpp
similarity index 82%
rename from src/modules/algorithms/genome_consistance_checker.cpp
rename to src/common/modules/genome_consistance_checker.cpp
index f3009ad..ac40130 100644
--- a/src/modules/algorithms/genome_consistance_checker.cpp
+++ b/src/common/modules/genome_consistance_checker.cpp
@@ -1,5 +1,5 @@
-#include "algorithms/genome_consistance_checker.hpp"
-#include "assembly_graph/graph_core/graph.hpp"
+#include "modules/genome_consistance_checker.hpp"
+#include "assembly_graph/core/graph.hpp"
 #include <algorithm>
 #include <limits>
 namespace debruijn_graph {
@@ -49,7 +49,7 @@ PathScore GenomeConsistenceChecker::CountMisassemblies(const BidirectionalPath &
     }
 }
 
-void GenomeConsistenceChecker::SpellGenome() {
+vector<pair<EdgeId, MappingRange> > GenomeConsistenceChecker::ConstructEdgeOrder() const {
     vector<pair<EdgeId, MappingRange> > to_sort;
     for(auto e: storage_) {
         if (excluded_unique_.find(e) == excluded_unique_.end() ) {
@@ -68,12 +68,50 @@ void GenomeConsistenceChecker::SpellGenome() {
         return a.second.initial_range.start_pos < b.second.initial_range.start_pos;
     }
     );
+    return to_sort;
+}
+
+
+void GenomeConsistenceChecker::SpellGenome() {
     size_t count = 0;
-    for(auto p: to_sort) {
-        INFO("edge " << gp_.g.int_id(p.first) << " length "<< gp_.g.length(p.first) << " coverage " << gp_.g.coverage(p.first) << " mapped to " << p.second.mapped_range.start_pos << " - " << p.second.mapped_range.end_pos << " init_range " << p.second.initial_range.start_pos << " - " << p.second.initial_range.end_pos );
-        genome_spelled_[p.first] = count;
+    auto to_sort = ConstructEdgeOrder();
+    vector<size_t> starts;
+    vector<size_t> ends;
+    for(size_t i = 0; i <to_sort.size(); i++) {
+        if (i > 0 && to_sort[i].second.initial_range.start_pos - to_sort[i-1].second.initial_range.end_pos > storage_.GetMinLength() ) {
+            INFO ("Large gap " << to_sort[i].second.initial_range.start_pos - to_sort[i-1].second.initial_range.end_pos );
+            starts.push_back(to_sort[i].second.initial_range.start_pos);
+            ends.push_back(to_sort[i-1].second.initial_range.end_pos);
+        }
+        if (i == 0) {
+            starts.push_back(to_sort[i].second.initial_range.start_pos);
+        }
+        if (i == to_sort.size() - 1){
+            ends.push_back(to_sort[i].second.initial_range.end_pos);
+        }
+        INFO("edge " << gp_.g.int_id(to_sort[i].first) << " length "<< gp_.g.length(to_sort[i].first) <<
+                     " coverage " << gp_.g.coverage(to_sort[i].first) << " mapped to " << to_sort[i].second.mapped_range.start_pos
+             << " - " << to_sort[i].second.mapped_range.end_pos << " init_range " << to_sort[i].second.initial_range.start_pos << " - " << to_sort[i].second.initial_range.end_pos );
+        genome_spelled_[to_sort[i].first] = count;
         count++;
     }
+    vector<size_t> lengths;
+    size_t total_len = 0;
+    for (size_t i = 0; i < starts.size(); i++) {
+        lengths.push_back(ends[i] - starts[i]);
+        total_len += lengths[i];
+    }
+    sort(lengths.begin(), lengths.end());
+    reverse(lengths.begin(), lengths.end());
+    size_t cur = 0;
+    size_t i = 0;
+    while (cur < total_len / 2 && i < lengths.size()) {
+        cur += lengths[i];
+        i++;
+    }
+    INFO("Assuming gaps of length > " << storage_.GetMinLength() << " unresolvable..");
+    if (lengths.size() > 0)
+        INFO("Rough estimates on N50/L50:" << lengths[i - 1] << " / " << i - 1 << " with len " << total_len);
 }
 
 PathScore GenomeConsistenceChecker::CountMisassembliesWithStrand(const BidirectionalPath &path, const string strand) const {
diff --git a/src/modules/algorithms/genome_consistance_checker.hpp b/src/common/modules/genome_consistance_checker.hpp
similarity index 82%
rename from src/modules/algorithms/genome_consistance_checker.hpp
rename to src/common/modules/genome_consistance_checker.hpp
index 7c106f3..0fcf115 100644
--- a/src/modules/algorithms/genome_consistance_checker.hpp
+++ b/src/common/modules/genome_consistance_checker.hpp
@@ -10,7 +10,7 @@
 #include "visualization/graph_labeler.hpp"
 #include "assembly_graph/handlers/edges_position_handler.hpp"
 #include "assembly_graph/paths/mapping_path.hpp"
-#include "data_structures/sequence/sequence.hpp"
+#include "sequence/sequence.hpp"
 #include "pipeline/graph_pack.hpp"
 #include "visualization/position_filler.hpp"
 #include "assembly_graph/paths/bidirectional_path.hpp"
@@ -32,10 +32,9 @@ class GenomeConsistenceChecker {
 
 private:
     const conj_graph_pack &gp_;
-    const Graph &graph_;
     //EdgesPositionHandler<Graph> &position_handler_;
     Sequence genome_;
-    ScaffoldingUniqueEdgeStorage storage_;
+    const ScaffoldingUniqueEdgeStorage &storage_;
     size_t absolute_max_gap_;
     double relative_max_gap_;
     set<EdgeId> excluded_unique_;
@@ -56,21 +55,24 @@ DECL_LOGGER("GenomeConsistenceChecker");
 
 
 public:
-    GenomeConsistenceChecker(const conj_graph_pack &gp, ScaffoldingUniqueEdgeStorage &storage, size_t max_gap, double relative_max_gap /*= 0.2*/) : gp_(gp),
-            graph_(gp.g), /*position_handler_(gp.edge_pos),*/ genome_(gp.genome.GetSequence()), storage_(storage),
+    GenomeConsistenceChecker(const conj_graph_pack &gp, const ScaffoldingUniqueEdgeStorage &storage, size_t max_gap, double relative_max_gap /*= 0.2*/) : gp_(gp),
+            genome_(gp.genome.GetSequence()), storage_(storage),
         absolute_max_gap_(max_gap), relative_max_gap_(relative_max_gap), excluded_unique_(), circular_edge_() {
         if (!gp.edge_pos.IsAttached()) {
             gp.edge_pos.Attach();
         }
         gp.edge_pos.clear();
-        FillPos(gp_, gp_.genome.GetSequence(), "0");
-        FillPos(gp_, !gp_.genome.GetSequence(), "1");
+        visualization::position_filler::FillPos(gp_, gp_.genome.GetSequence(), "0");
+        visualization::position_filler::FillPos(gp_, !gp_.genome.GetSequence(), "1");
         RefillPos();
     }
     PathScore CountMisassemblies(const BidirectionalPath &path) const;
+    vector<pair<EdgeId, MappingRange> > ConstructEdgeOrder() const;
+
 //spells genome in language of long unique edges from storage;
     void SpellGenome();
 
+
 };
 
 
diff --git a/src/modules/algorithms/graph_construction.hpp b/src/common/modules/graph_construction.hpp
similarity index 91%
rename from src/modules/algorithms/graph_construction.hpp
rename to src/common/modules/graph_construction.hpp
index d7034e6..c862956 100644
--- a/src/modules/algorithms/graph_construction.hpp
+++ b/src/common/modules/graph_construction.hpp
@@ -15,20 +15,20 @@
 
 #include "pipeline/graph_pack.hpp"
 
-#include "io/reads_io/io_helper.hpp"
-#include "assembly_graph/graph_core/graph.hpp"
+#include "io/reads/io_helper.hpp"
+#include "assembly_graph/core/graph.hpp"
 
-#include "data_structures/debruijn_graph/debruijn_graph_constructor.hpp"
-#include "data_structures/debruijn_graph/early_simplification.hpp"
+#include "utils/debruijn_graph/debruijn_graph_constructor.hpp"
+#include "utils/debruijn_graph/early_simplification.hpp"
 
-#include "dev_support/perfcounter.hpp"
+#include "utils/perfcounter.hpp"
 #include "io/dataset_support/read_converter.hpp"
 
 #include "assembly_graph/handlers/edges_position_handler.hpp"
-#include "assembly_graph/graph_support/detail_coverage.hpp"
-#include "data_structures/indices/storing_traits.hpp"
-#include "data_structures/indices/edge_index_builders.hpp"
-#include "dev_support/openmp_wrapper.h"
+#include "assembly_graph/graph_support/coverage_filling.hpp"
+#include "utils/indices/storing_traits.hpp"
+#include "utils/indices/edge_index_builders.hpp"
+#include "utils/openmp_wrapper.h"
 
 namespace debruijn_graph {
 
@@ -110,7 +110,7 @@ void EarlyClipTips(size_t k, const config::debruijn_config::construction& params
     }
 }
 
-#include "data_structures/indices/kmer_extension_index_builder.hpp"
+#include "utils/indices/kmer_extension_index_builder.hpp"
 
 template<class Graph, class Read, class Index>
 ReadStatistics ConstructGraphUsingExtentionIndex(const config::debruijn_config::construction params,
diff --git a/src/modules/algorithms/graph_read_correction.hpp b/src/common/modules/graph_read_correction.hpp
similarity index 97%
rename from src/modules/algorithms/graph_read_correction.hpp
rename to src/common/modules/graph_read_correction.hpp
index 311891d..892cfb8 100644
--- a/src/modules/algorithms/graph_read_correction.hpp
+++ b/src/common/modules/graph_read_correction.hpp
@@ -11,9 +11,9 @@
 #include "assembly_graph/paths/mapping_path.hpp"
 #include "assembly_graph/paths/path_finders.hpp"
 #include "assembly_graph/paths/path_processor.hpp"
-#include "io/reads_io/modifying_reader_wrapper.hpp"
-#include "assembly_graph/graph_core/order_and_law.hpp"
-#include "assembly_graph/graph_alignment/sequence_mapper.hpp"
+#include "io/reads/modifying_reader_wrapper.hpp"
+#include "assembly_graph/core/order_and_law.hpp"
+#include "modules/alignment/sequence_mapper.hpp"
 
 namespace debruijn_graph {
 
diff --git a/src/modules/algorithms/mismatch_shall_not_pass.hpp b/src/common/modules/mismatch_shall_not_pass.hpp
similarity index 93%
rename from src/modules/algorithms/mismatch_shall_not_pass.hpp
rename to src/common/modules/mismatch_shall_not_pass.hpp
index 0451adb..085e412 100644
--- a/src/modules/algorithms/mismatch_shall_not_pass.hpp
+++ b/src/common/modules/mismatch_shall_not_pass.hpp
@@ -7,13 +7,12 @@
 
 #pragma once
 
-#include "algorithms/simplification/compressor.hpp"
+#include "modules/simplification/compressor.hpp"
 #include "assembly_graph/handlers/id_track_handler.hpp"
-#include "dev_support/logger/logger.hpp"
+#include "utils/logger/logger.hpp"
 
-#include "io/reads_io/read_stream_vector.hpp"
-#include "data_structures/sequence/runtime_k.hpp"
-#include "assembly_graph/graph_alignment/sequence_mapper.hpp"
+#include "io/reads/read_stream_vector.hpp"
+#include "modules/alignment/sequence_mapper.hpp"
 
 #include "pipeline/config_struct.hpp"
 
@@ -82,8 +81,8 @@ private:
         for (auto it = kmer_mapper.begin(); it != kmer_mapper.end(); ++it) {
             // Kmer mapper iterator dereferences to pair (KMer, KMer), not to the reference!
             const auto mentry = *it;
-            const runtime_k::RtSeq &from = mentry.first;
-            const runtime_k::RtSeq &to = mentry.second;
+            const RtSeq &from = mentry.first;
+            const RtSeq &to = mentry.second;
             size_t cnt = 0;
             size_t cnt_arr[4];
             for (size_t i = 0; i < 4; i++)
@@ -215,20 +214,14 @@ private:
             edge = tmp.first;
             mismatch = tmp.second;
         }
-        const Sequence &s_mm = gp_.g.EdgeNucls(mismatch);
+        Sequence s_mm = gp_.g.EdgeNucls(mismatch);
         Sequence correct = s_mm.Subseq(0, gp_.g.k()) + Sequence(string(1, nucl)) +
                            s_mm.Subseq(gp_.g.k() + 1, gp_.g.k() * 2 + 1);
-        if (!gp_.kmer_mapper.CheckCanRemap(s_mm, correct)) {
-            return edge;
-        }
+
         VERIFY(nucl != s_mm[gp_.g.k()]);
         EdgeId correct_edge = gp_.g.AddEdge(gp_.g.EdgeStart(mismatch), gp_.g.EdgeEnd(mismatch), correct);
-        if (position > gp_.g.k()) {
-            gp_.g.GlueEdges(mismatch, correct_edge);
-            return edge;
-        } else {
-            return gp_.g.GlueEdges(mismatch, correct_edge);
-        }
+        EdgeId glued = gp_.g.GlueEdges(mismatch, correct_edge);
+        return position > gp_.g.k() ? edge : glued;
     }
 
     EdgeId CorrectNucls(EdgeId edge, const std::vector<pair<size_t, char>> &mismatches) {
@@ -305,8 +298,9 @@ private:
     }
 
 public:
-    MismatchShallNotPass(graph_pack &gp, double relative_threshold = 1.5) : gp_(gp), relative_threshold_(
-            relative_threshold) {
+    MismatchShallNotPass(graph_pack &gp, double relative_threshold = 1.5) :
+            gp_(gp),
+            relative_threshold_(relative_threshold) {
         VERIFY(relative_threshold >= 1);
     }
 
diff --git a/src/modules/algorithms/path_extend/CMakeLists.txt b/src/common/modules/path_extend/CMakeLists.txt
similarity index 67%
rename from src/modules/algorithms/path_extend/CMakeLists.txt
rename to src/common/modules/path_extend/CMakeLists.txt
index 03b447b..62d21fc 100644
--- a/src/modules/algorithms/path_extend/CMakeLists.txt
+++ b/src/common/modules/path_extend/CMakeLists.txt
@@ -8,11 +8,16 @@
 project(path_extend CXX)
 
 add_library(path_extend STATIC pe_config_struct.cpp
+                    pipeline/launch_support.cpp
+                    pipeline/launcher.cpp
+                    pipeline/extenders_logic.cpp
                     scaffolder2015/extension_chooser2015.cpp
                     scaffolder2015/scaffold_graph.cpp
                     scaffolder2015/scaffold_graph_constructor.cpp
                     scaffolder2015/scaffold_graph_visualizer.cpp
-                    scaffolder2015/connection_condition2015.cpp)
+                    scaffolder2015/connection_condition2015.cpp 
+                    scaffolder2015/path_polisher.cpp)
+
+target_link_libraries(path_extend assembly_graph ssw)
 
-target_link_libraries(path_extend graph_support)
 
diff --git a/src/modules/algorithms/path_extend/extension_chooser.hpp b/src/common/modules/path_extend/extension_chooser.hpp
similarity index 64%
rename from src/modules/algorithms/path_extend/extension_chooser.hpp
rename to src/common/modules/path_extend/extension_chooser.hpp
index b0a989a..cfd1e98 100644
--- a/src/modules/algorithms/path_extend/extension_chooser.hpp
+++ b/src/common/modules/path_extend/extension_chooser.hpp
@@ -18,9 +18,9 @@
 #include <cfloat>
 #include <iostream>
 #include <fstream>
+#include <map>
 #include "weight_counter.hpp"
 #include "pe_utils.hpp"
-#include "next_path_searcher.hpp"
 
 //#include "scaff_supplementary.hpp"
 
@@ -28,7 +28,6 @@ namespace path_extend {
 
 typedef std::multimap<double, EdgeWithDistance> AlternativeContainer;
 
-
 class PathAnalyzer {
 protected:
     const Graph& g_;
@@ -246,21 +245,29 @@ private:
 
 
 class JointExtensionChooser: public ExtensionChooser {
-
-protected:
     shared_ptr<ExtensionChooser> first_;
-
     shared_ptr<ExtensionChooser> second_;
 
 public:
-    JointExtensionChooser(const Graph& g, shared_ptr<ExtensionChooser> first, shared_ptr<ExtensionChooser> second): ExtensionChooser(g),
-        first_(first), second_(second)
-    {
+    JointExtensionChooser(const Graph& g,
+                          shared_ptr<ExtensionChooser> first,
+                          shared_ptr<ExtensionChooser> second): ExtensionChooser(g),
+        first_(first), second_(second) {
     }
 
     EdgeContainer Filter(const BidirectionalPath& path, const EdgeContainer& edges) const override {
-        EdgeContainer e1 = first_->Filter(path, edges);
-        return second_->Filter(path, e1);
+        EdgeContainer answer;
+        auto r1 = first_->Filter(path, edges);
+        auto r2 = second_->Filter(path, edges);
+        for (auto ewd1 : r1) {
+            for (auto ewd2 : r2) {
+                if (ewd1.e_ == ewd2.e_) {
+                    VERIFY(ewd1.d_ == ewd2.d_);
+                    answer.push_back(ewd1);
+                }
+            }
+        }
+        return answer;
     }
 };
 
@@ -279,30 +286,7 @@ public:
     }
 };
 
-
-class TrivialExtensionChooserWithPI: public ExtensionChooser {
-
-public:
-    TrivialExtensionChooserWithPI(Graph& g, shared_ptr<WeightCounter> wc, double weight_threshold): 
-            ExtensionChooser(g, wc, weight_threshold) {
-    }
-
-    EdgeContainer Filter(const BidirectionalPath& path, const EdgeContainer& edges) const override {
-        if (edges.size() == 1) {
-            double weight = wc_->CountWeight(path, edges.back().e_, std::set<size_t>());
-            NotifyAll(weight);
-
-            if (CheckThreshold(weight)) {
-                return edges;
-            }
-        }
-        return EdgeContainer();
-    }
-};
-
 class ExcludingExtensionChooser: public ExtensionChooser {
-    //FIXME what is the logic behind it?
-protected:
     PathAnalyzer analyzer_;
     double prior_coeff_;
 
@@ -330,10 +314,11 @@ protected:
     EdgeContainer FindFilteredEdges(const BidirectionalPath& path,
             const EdgeContainer& edges, const std::set<size_t>& to_exclude) const {
         AlternativeContainer weights = FindWeights(path, edges, to_exclude);
+        VERIFY(!weights.empty());
         auto max_weight = (--weights.end())->first;
         EdgeContainer top = FindPossibleEdges(weights, max_weight);
         EdgeContainer result;
-        if (top.size() >= 1 && CheckThreshold(max_weight)) {
+        if (CheckThreshold(max_weight)) {
             result = top;
         }
         return result;
@@ -341,7 +326,12 @@ protected:
 
 protected:
 
-    virtual void ExcludeEdges(const BidirectionalPath& path, const EdgeContainer& edges, std::set<size_t>& to_exclude) const = 0;
+    virtual void ExcludeEdges(const BidirectionalPath& path,
+                              const EdgeContainer& /*edges*/,
+                              std::set<size_t>& to_exclude) const {
+        analyzer_.RemoveTrivial(path, to_exclude);
+    }
+
 
 public:
     ExcludingExtensionChooser(const Graph& g, shared_ptr<WeightCounter> wc, PathAnalyzer analyzer, double weight_threshold, double priority) :
@@ -356,7 +346,6 @@ public:
             return edges;
         }
         std::set<size_t> to_exclude;
-        analyzer_.RemoveTrivial(path, to_exclude);
         path.Print();
         EdgeContainer result = edges;
         ExcludeEdges(path, result, to_exclude);
@@ -375,10 +364,12 @@ private:
 class SimpleExtensionChooser: public ExcludingExtensionChooser {
 protected:
     void ExcludeEdges(const BidirectionalPath& path, const EdgeContainer& edges, std::set<size_t>& to_exclude) const override {
+        ExcludingExtensionChooser::ExcludeEdges(path, edges, to_exclude);
+
         if (edges.size() < 2) {
             return;
         }
-        //excluding based on absense of ideal info
+        //excluding based on absence of ideal info
         int index = (int) path.Size() - 1;
         while (index >= 0) {
             if (to_exclude.count(index)) {
@@ -423,10 +414,62 @@ private:
     DECL_LOGGER("SimpleExtensionChooser");
 };
 
+//TODO this class should not exist with better configuration of excluding conditions
+class IdealBasedExtensionChooser : public ExcludingExtensionChooser {
+protected:
+    void ExcludeEdges(const BidirectionalPath &path, const EdgeContainer &edges,
+                      std::set<size_t> &to_exclude) const override {
+        //commented for a reason
+        //ExcludingExtensionChooser::ExcludeEdges(path, edges, to_exclude);
+        //if (edges.size() < 2) {
+        //    return;
+        //}
+        VERIFY(to_exclude.empty());
+        //excluding based on absence of ideal info
+        for (int index = (int) path.Size() - 1; index >= 0; index--) {
+            EdgeId path_edge = path[index];
+
+            for (size_t i = 0; i < edges.size(); ++i) {
+                if (!HasIdealInfo(path_edge,
+                                  edges.at(i).e_,
+                                  path.LengthAt(index))) {
+                    to_exclude.insert(size_t(index));
+                }
+            }
+        }
+    }
+
+public:
+
+    IdealBasedExtensionChooser(const Graph &g,
+                               shared_ptr<WeightCounter> wc,
+                               double weight_threshold,
+                               double priority) :
+        ExcludingExtensionChooser(g, wc, PathAnalyzer(g), weight_threshold, priority) {
+    }
+
+private:
+    DECL_LOGGER("IdealBasedExtensionChooser");
+};
 
 class RNAExtensionChooser: public ExcludingExtensionChooser {
 protected:
-    void ExcludeEdges(const BidirectionalPath& /*path*/, const EdgeContainer& /*edges*/, std::set<size_t>& /*to_exclude*/) const override {
+    void ExcludeEdges(const BidirectionalPath& path, const EdgeContainer& edges, std::set<size_t>& to_exclude) const override {
+        ExcludingExtensionChooser::ExcludeEdges(path, edges, to_exclude);
+        if (edges.size() < 2) {
+            return;
+        }
+        size_t i = path.Size() - 1;
+        PathAnalyzer analyzer(g_);
+        while (i > 0) {
+            if (g_.IncomingEdgeCount(g_.EdgeStart(path[i])) > 1)
+                break;
+            to_exclude.insert(i);
+            --i;
+            }
+
+        if (i == 0)
+            to_exclude.clear();
     }
 
 public:
@@ -442,6 +485,7 @@ private:
 class LongEdgeExtensionChooser: public ExcludingExtensionChooser {
 protected:
     virtual void ExcludeEdges(const BidirectionalPath& path, const EdgeContainer& edges, std::set<size_t>& to_exclude) const {
+        ExcludingExtensionChooser::ExcludeEdges(path, edges, to_exclude);
         if (edges.size() < 2) {
             return;
         }
@@ -538,7 +582,7 @@ protected:
         set<EdgeId> jumping_edges;
         const auto& lib = wc_->lib();
         //todo lib (and FindJumpEdges) knows its var so it can be counted there
-        int is_scatter = int(math::round(double(lib.GetIsVar()) * is_scatter_coeff_));
+        int is_scatter = int(math::round(lib.GetIsVar() * is_scatter_coeff_));
         for (int i = (int) path.Size() - 1; i >= 0 && path.LengthAt(i) - g_.length(path.At(i)) <= lib.GetISMax(); --i) {
             set<EdgeId> jump_edges_i;
             lib.FindJumpEdges(path.At(i), jump_edges_i,
@@ -816,7 +860,8 @@ private:
 
 class LongReadsExtensionChooser : public ExtensionChooser {
 public:
-    LongReadsExtensionChooser(const Graph& g, PathContainer& pc,
+    LongReadsExtensionChooser(const Graph& g,
+                              const GraphCoverageMap& read_paths_cov_map,
                               double filtering_threshold,
                               double weight_priority_threshold,
                               double unique_edge_priority_threshold,
@@ -827,12 +872,12 @@ public:
               filtering_threshold_(filtering_threshold),
               weight_priority_threshold_(weight_priority_threshold),
               min_significant_overlap_(min_significant_overlap),
-              cov_map_(g, pc),
+              cov_map_(read_paths_cov_map),
               unique_edge_analyzer_(g, cov_map_, filtering_threshold,
                                     unique_edge_priority_threshold,
                                     max_repeat_length, uneven_depth),
-              simple_scaffolding_(g) {
-
+              simple_scaffolding_(g)
+    {
     }
 
     /* Choose extension as correct only if we have reads that traverse a unique edge from the path and this extension.
@@ -855,7 +900,6 @@ public:
         DEBUG("Found " << support_paths.size() << " covering paths!!!");
         for (auto it = support_paths.begin(); it != support_paths.end(); ++it) {
             auto positions = (*it)->FindAll(path.Back());
-            (*it)->Print();
             for (size_t i = 0; i < positions.size(); ++i) {
                 if ((int) positions[i] < (int) (*it)->Size() - 1
                         && EqualBegins(path, (int) path.Size() - 1, **it,
@@ -908,6 +952,7 @@ public:
     }
 
 private:
+
     bool UniqueBackPath(const BidirectionalPath& path, size_t pos) const {
         int int_pos = (int) pos;
         while (int_pos >= 0) {
@@ -919,462 +964,21 @@ private:
     }
 
     vector<pair<EdgeId, double> > MapToSortVector(const map<EdgeId, double>& map) const {
-        vector<pair<EdgeId, double> > result1(map.begin(), map.end());
-        std::sort(result1.begin(), result1.end(), EdgeWithWeightCompareReverse);
-        return result1;
+        vector<pair<EdgeId, double> > result(map.begin(), map.end());
+        std::sort(result.begin(), result.end(), EdgeWithWeightCompareReverse);
+        return result;
     }
 
     double filtering_threshold_;
     double weight_priority_threshold_;
     size_t min_significant_overlap_;
-    const GraphCoverageMap cov_map_;
+    const GraphCoverageMap& cov_map_;
     LongReadsUniqueEdgeAnalyzer unique_edge_analyzer_;
     SimpleScaffolding simple_scaffolding_;
 
     DECL_LOGGER("LongReadsExtensionChooser");
 };
 
-class MatePairExtensionChooser : public ExtensionChooser {
-public:
-    MatePairExtensionChooser(const Graph& g, shared_ptr<PairedInfoLibrary> lib,
-                             const PathContainer& paths, size_t max_number_of_paths_to_search,
-                             bool uneven_depth)
-            : ExtensionChooser(g),
-              g_(g),
-              lib_(lib),
-              search_dist_(lib->GetISMax()),
-              weight_counter_(g, lib, 10),
-              cov_map_(g_, paths),
-              path_searcher_(g_, cov_map_, lib_->GetISMax(), PathsWeightCounter(g, lib, (size_t) lib->GetSingleThreshold()), max_number_of_paths_to_search),
-              //TODO params
-              unique_edge_analyzer_(g, cov_map_, 0., 1000., 8000., uneven_depth),
-              simple_scaffolder_(g) {
-    }
-
-    //Attention! Uses const_cast to modify path!!!
-    EdgeContainer Filter(const BidirectionalPath& path,
-                         const EdgeContainer& init_edges) const override {
-        DEBUG("mp chooser");
-        path.Print();
-        if (path.Length() < lib_->GetISMin()) {
-            return EdgeContainer();
-        }
-        EdgeContainer edges = TryResolveBulge(path, init_edges);
-        map<EdgeId, BidirectionalPath*> best_paths;
-        for (size_t iedge = 0; iedge < edges.size(); ++iedge) {
-            BidirectionalPathSet following_paths = path_searcher_.FindNextPaths(path, edges[iedge].e_);
-            vector<BidirectionalPath*> max_weighted = MaxWeightedPath(path, following_paths);
-            if (max_weighted.size() == 0) {
-                DEBUG("too much paths or tip");
-                DeleteMapWithPaths(best_paths);
-                DeletePaths(following_paths);
-                best_paths.clear();
-                break;
-            } else {
-                best_paths[edges[iedge].e_] = new BidirectionalPath(*max_weighted[0]);
-            }
-            DeletePaths(following_paths);
-        }
-
-        BidirectionalPathSet next_paths;
-        if (edges.size() == 0) {
-            DEBUG("scaffolding edges size " << edges.size())
-            next_paths = path_searcher_.FindNextPaths(path, path.Back());
-        } else if (best_paths.size() == edges.size()) {
-            for (size_t iedge = 0; iedge < edges.size(); ++iedge) {
-                if (best_paths.count(edges[iedge].e_) > 0){
-                    next_paths.insert(best_paths[edges[iedge].e_]);
-                }
-            }
-        }
-        EdgeContainer result = ChooseBest(path, next_paths);
-        if (result.size() != 1) {
-            DEBUG("scaffold tree");
-            result = ScaffoldTree(const_cast<BidirectionalPath&>(path));
-        }
-        DeletePaths(next_paths);
-        if (result.size() != 1) {
-            DEBUG("nobody can extend " << g_.int_id(path.Back()));
-        }
-        return result;
-    }
-
-private:
-    EdgeContainer ScaffoldTree(BidirectionalPath& path) const {
-        DEBUG("try scaffold tree");
-        vector<BidirectionalPath*> next_paths = path_searcher_.ScaffoldTree(path);
-        VERIFY(next_paths.size() <= 1);
-        EdgeContainer result;
-        if (!next_paths.empty() && next_paths.back()->Size() > 0) {
-            BidirectionalPath* res = next_paths.back();
-            for (size_t i = 0; i < res->Size() - 1; ++i) {
-                path.PushBack(res->At(i), res->GapAt(i), res->TrashPreviousAt(i), res->TrashCurrentAt(i));
-            }
-            result = EdgeContainer(1, EdgeWithDistance(res->Back(), res->GapAt(res->Size() - 1)));
-        }
-        DeletePaths(next_paths);
-        return result;
-    }
-
-    bool IsBulge(const EdgeContainer& edges) const {
-        if (edges.size() == 0)
-            return false;
-        for (EdgeWithDistance e : edges) {
-            if (!InBuble(e.e_, g_))
-                return false;
-        }
-        return true;
-    }
-
-    map<EdgeId, double> FindBulgeWeights(const BidirectionalPath& p, const EdgeContainer& edges) const {
-        map<EdgeId, double> result;
-        for (size_t i = 0; i < edges.size(); ++i) {
-            result[edges[i].e_] = 0.0;
-        }
-        for (size_t i = 0; i < p.Size(); ++i) {
-            bool common = true;
-            bool common_ideal = true;
-            for (EdgeWithDistance e : edges) {
-                common_ideal = common_ideal && weight_counter_.HasIdealPI(p.At(i), e.e_, (int) p.LengthAt(i));
-                common = common && weight_counter_.HasPI(p.At(i), e.e_, (int) p.LengthAt(i));
-            }
-            if (!common_ideal || common) {
-                continue;
-            }
-            for (size_t j = 0; j < edges.size(); ++j) {
-                result[edges[j].e_] += weight_counter_.PI(p.At(i), edges[j].e_, (int) p.LengthAt(i));
-            }
-        }
-        return result;
-    }
-
-    EdgeContainer TryResolveBulge(const BidirectionalPath& p, const EdgeContainer& edges) const {
-        if (!IsBulge(edges))
-            return edges;
-        map<EdgeId, double> weights = FindBulgeWeights(p, edges);
-        double max_w = 0.0;
-        EdgeContainer result;
-        for (EdgeWithDistance e : edges) {
-            double w = weights[e.e_];
-            DEBUG("bulge " << g_.int_id(e.e_) << " w = " << w);
-            if (math::gr(w, max_w)) {
-                max_w = w;
-                result.clear();
-                result.push_back(e);
-            } else if (math::eq(w, max_w)) {
-                result.push_back(e);
-            }
-        }
-        if (result.size() != 1) {
-            result = edges;
-        }
-        return result;
-    }
-
-    EdgeContainer ChooseBest(const BidirectionalPath& path, const BidirectionalPathSet& next_paths) const {
-        DEBUG("Try to choose from best paths...");
-        vector<BidirectionalPath*> best_path = MaxWeightedPath(path, next_paths);
-        EdgeContainer result;
-        if (best_path.size() == 1) {
-            result.push_back(EdgeWithDistance((*best_path.begin())->At(0), (*best_path.begin())->GapAt(0)));
-        } else if (best_path.size() > 1) {
-            result = TryToScaffold(path, best_path);
-        }
-        return result;
-    }
-
-    bool HasPIFromUniqueEdges(const BidirectionalPath& p1, const BidirectionalPath& p2, const set<size_t>& p1_unique_edges) const {
-        for (size_t i1 = 0; i1 < p1.Size(); ++i1) {
-            if (p1_unique_edges.find(i1) == p1_unique_edges.end()) {
-                continue;
-            }
-            for (size_t i2 = 0; i2 < p2.Size(); ++i2) {
-                int gap = (int) p1.LengthAt(i1) + (int) p2.Length() - (int) p2.LengthAt(i2);
-                if (unique_edge_analyzer_.IsUnique(p2.At(i2)) && weight_counter_.HasPI(p1.At(i1), p2.At(i2), gap)) {
-                    DEBUG("has unique edge " << g_.int_id(p1.At(i1)) << " " << g_.int_id(p2.At(i2)));
-                    return true;
-                }
-            }
-        }
-        return false;
-    }
-
-    bool SignificallyDifferentEdges(const BidirectionalPath& init_path, const BidirectionalPath& path1, const map<size_t, double>& pi1,
-                                    const BidirectionalPath& path2, const map<size_t, double>& pi2, const set<size_t>& unique_init_edges) const {
-        double not_common_w1 = 0.0;
-        double common_w = 0.0;
-        for (auto iter = pi1.begin(); iter != pi1.end(); ++iter) {
-            auto iter2 = pi2.find(iter->first);
-            double w = 0.0;
-            if (iter2 != pi2.end() && !math::eq(iter2->second, 0.0)) {
-                w = min(iter2->second, iter->second);
-            }
-            not_common_w1 += iter->second - w;
-            common_w += w;
-        }
-        if (common_w < 0.8 * (not_common_w1 + common_w)
-                || (HasPIFromUniqueEdges(init_path, path1, unique_init_edges) && !HasPIFromUniqueEdges(init_path, path2, unique_init_edges))) {
-            DEBUG("common_w " << common_w << " sum * 0.8  = " << 0.8 * (not_common_w1 + common_w))
-            return true;
-        }
-        return false;
-    }
-
-    set<size_t> FindNotCommonEdges(const BidirectionalPath& path, const BidirectionalPathMap< map<size_t, double> >& all_pi) const {
-        set<size_t> res;
-        for (size_t i = 0; i < path.Size(); ++i) {
-            if (!unique_edge_analyzer_.IsUnique(path.At(i))) {
-                continue;
-            }
-            size_t pi_count = 0;
-            for (auto iter = all_pi.begin(); iter != all_pi.end(); ++iter) {
-                const map<size_t, double>& info = iter->second;
-                if (info.count(i) > 0 && math::gr(info.at(i), 0.0)) {
-                    pi_count++;
-                }
-            }
-            if (pi_count == 1)
-                res.insert(i);
-        }
-        return res;
-    }
-
-    void DeleteSmallWeights(const BidirectionalPath& path, BidirectionalPathSet& paths, BidirectionalPathMap< map<size_t, double> >& all_pi) const {
-        double max_weight = 0.0;
-        BidirectionalPath* max_path = NULL;
-        for (auto iter = paths.begin(); iter != paths.end(); ++iter) {
-            if ((*iter)->GetWeight() >= max_weight) {
-                max_weight = max(max_weight, (*iter)->GetWeight());
-                max_path = *iter;
-            }
-        }
-        BidirectionalPathSet to_del;
-        for (auto iter = paths.begin(); iter != paths.end(); ++iter) {
-            if (math::gr(max_weight, (*iter)->GetWeight() * 1.5) //TODO: move 1.5 to config
-                    && SignificallyDifferentEdges(path, *max_path, all_pi.find(max_path)->second, **iter, all_pi.find(*iter)->second,
-                                                  FindNotCommonEdges(path, all_pi)))
-                to_del.insert(*iter);
-        }
-        for (BidirectionalPath* p : to_del) {
-            paths.erase(p);
-            all_pi.erase(p);
-        }
-    }
-
-    void DeleteCommonPi(const BidirectionalPath& p, BidirectionalPathMap< map<size_t, double> >& all_pi) const {
-        weight_counter_.ClearCommonWeight();
-        for (size_t i = 0; i < p.Size(); ++i) {
-            double common = DBL_MAX;
-            for (auto iter = all_pi.begin(); iter != all_pi.end(); ++iter) {
-                common = iter->second.count(i) == 0 ? 0.0 : min(common, iter->second.at(i));
-            }
-            weight_counter_.SetCommonWeightFrom(i, common);
-        }
-    }
-
-    size_t FindCommonBegin(const BidirectionalPathSet& paths) const {
-        if (paths.size() == 0) {
-            return 0;
-        }
-        size_t common_begin = 0;
-        BidirectionalPath* p = *paths.begin();
-        while (common_begin < p->Size()) {
-            EdgeId e = p->At(common_begin);
-            for (BidirectionalPath* next : paths) {
-                if (common_begin >= next->Size() || next->At(common_begin) != e) {
-                    return common_begin;
-                }
-            }
-            common_begin++;
-        }
-        return common_begin;
-    }
-
-    void CountAllPairInfo(const BidirectionalPath& path, const BidirectionalPathSet& next_paths,
-                BidirectionalPathMap<map<size_t, double>>& result) const {
-        result.clear();
-        size_t common_begin = FindCommonBegin(next_paths);
-        DEBUG("common begin " << common_begin);
-        for (BidirectionalPath* next : next_paths) {
-            result[next] = weight_counter_.FindPairInfoFromPath(path, 0, path.Size(), *next, common_begin, next->Size());
-        }
-    }
-
-    void CountWeightsAndFilter(const BidirectionalPath& path, BidirectionalPathSet& next_paths, bool delete_small_w) const {
-        BidirectionalPathMap<map<size_t, double> > all_pi;
-        CountAllPairInfo(path, next_paths, all_pi);
-        DeleteCommonPi(path, all_pi);
-        for (BidirectionalPath* next : next_paths) {
-            next->SetWeight((float) weight_counter_.CountPairInfo(path, 0, path.Size(), *next, 0, next->Size()));
-        }
-        if (delete_small_w) {
-            DeleteSmallWeights(path, next_paths, all_pi);
-        }
-    }
-
-    struct PathWithWeightSort {
-        PathWithWeightSort(const MatePairExtensionChooser& mp_chooser, const BidirectionalPath& path, BidirectionalPathMap< map<size_t, double> >& all_pi)
-                : mp_chooser_(mp_chooser),
-                  path_(path),
-                  not_common_(mp_chooser_.FindNotCommonEdges(path_, all_pi)) {
-        }
-
-        bool operator()(const BidirectionalPath* p1, const BidirectionalPath* p2) {
-            if (mp_chooser_.HasPIFromUniqueEdges(path_, *p1, not_common_) && !mp_chooser_.HasPIFromUniqueEdges(path_, *p2, not_common_)) {
-                return true;
-            }
-            if (mp_chooser_.HasPIFromUniqueEdges(path_, *p2, not_common_) && !mp_chooser_.HasPIFromUniqueEdges(path_, *p1, not_common_)) {
-                return false;
-            }
-            if (!math::eq(p1->GetWeight(), p2->GetWeight())) {
-                return math::gr(p1->GetWeight(), p2->GetWeight());
-            }
-            if (!math::eq(p1->GetWeight(), p2->GetWeight())) {
-                return math::gr(p1->GetWeight(), p2->GetWeight());
-            }
-            if (p1->Length() != p2->Length()) {
-                return p1->Length() > p2->Length();
-            }
-            return p1->Size() > p2->Size();
-        }
-        const MatePairExtensionChooser& mp_chooser_;
-        const BidirectionalPath& path_;
-        const set<size_t> not_common_;
-    };
-
-    vector<BidirectionalPath*> SortResult(const BidirectionalPath& path, BidirectionalPathSet& next_paths) const {
-        BidirectionalPathMap< map<size_t, double> > all_pi;
-        CountAllPairInfo(path, next_paths, all_pi);
-        CountWeightsAndFilter(path, next_paths, false);
-        vector<BidirectionalPath*> to_sort(next_paths.begin(), next_paths.end());
-        PathWithWeightSort comparator(*this, path, all_pi);
-        std::sort(to_sort.begin(), to_sort.end(), comparator);
-        return to_sort;
-    }
-
-    vector<BidirectionalPath*> MaxWeightedPath(const BidirectionalPath& path, const BidirectionalPathSet& following_paths) const {
-        BidirectionalPathSet result(following_paths);
-        BidirectionalPathSet prev_result;
-        while (prev_result.size() != result.size()) {
-            prev_result = result;
-            DEBUG("iteration with paths " << result.size());
-            CountWeightsAndFilter(path, result, true);
-            if (result.size() == 0)
-                result = prev_result;
-            if (result.size() == 1)
-                break;
-        }
-        if (result.size() == 0) {
-            DEBUG("bad case");
-            return vector<BidirectionalPath*>();
-        }
-        return SortResult(path, result);
-    }
-
-    BidirectionalPath ChooseFromEnds(const BidirectionalPath& path, const vector<BidirectionalPath*>& paths, const BidirectionalPath& end) const { //TODO" rewrite
-        DEBUG("choose from ends " << paths.size());
-        end.Print();
-        vector<BidirectionalPath*> new_paths;
-        vector<BidirectionalPath*> paths_to_cover;
-        for (BidirectionalPath* p : paths) {
-            int from = 0;
-            int pos = p->FindFirst(end, from);
-            while (pos > -1) {
-                BidirectionalPath* new_p = new BidirectionalPath(path);
-                BidirectionalPath* new_end = new BidirectionalPath(p->SubPath(0, pos + end.Size()));
-                new_p->PushBack(*new_end);
-                new_paths.push_back(new_p);
-                paths_to_cover.push_back(new_end);
-                from = pos + 1;
-                pos = p->FindFirst(end, from);
-            }
-        }
-        BidirectionalPath max = **new_paths.begin();
-        size_t covered_edges_max = 0;
-        size_t min_size = max.Size();
-        for (BidirectionalPath* p : new_paths) {
-            size_t cov_edges = 0;
-            for (BidirectionalPath* e : paths_to_cover) {
-                vector<size_t> poses = p->FindAll(e->Back());
-                for (size_t pos : poses) {
-                    if (EqualBegins(*p, pos, *e, e->Size() - 1, true)) {
-                        cov_edges++;
-                        break;
-                    }
-                }
-            }
-            if (cov_edges > covered_edges_max || (cov_edges == covered_edges_max && min_size > p->Size())) {
-                DEBUG("cov_e " << cov_edges << " s " << p->Size());
-                max.Clear();
-                max.PushBack(*p);
-                covered_edges_max = cov_edges;
-                min_size = max.Size();
-            }
-        }
-        for (BidirectionalPath* p : new_paths) {
-            delete p;
-        }
-        for (BidirectionalPath* p : paths_to_cover) {
-            delete p;
-        }
-        BidirectionalPath result = max.SubPath(path.Size());
-        DEBUG("res");
-        result.Print();
-        return result;
-    }
-
-    int CheckPairInfo(const BidirectionalPath& path, const BidirectionalPath& result_end, int to_add) const {
-        while (to_add < (int)result_end.Size()) {
-            map<size_t, double> weights = weight_counter_.FindPairInfoFromPath(path, 0, path.Size(), result_end, to_add, to_add + 1);
-            double weight_to_edge = 0.0;
-            for (auto iter = weights.begin(); iter != weights.end(); ++iter) {
-                weight_to_edge += iter->second;
-            }
-            if (math::gr(weight_to_edge, 0.0)) {
-                break;
-            }
-            to_add++;
-        }
-        return to_add;
-    }
-
-    EdgeContainer TryToScaffold(const BidirectionalPath& path, const vector<BidirectionalPath*>& paths) const {
-        if (paths.size() == 0) {
-            return EdgeContainer();
-        }
-        DEBUG("Simple Scaffolding")
-        for (BidirectionalPath* p : paths) {
-            p->Print();
-        }
-        BidirectionalPath max_end = simple_scaffolder_.FindMaxCommonPath(paths, search_dist_);
-        if (max_end.Size() == 0) {
-            return EdgeContainer();
-        }
-        BidirectionalPath result_end = ChooseFromEnds(path, paths, max_end);
-        int to_add = result_end.FindFirst(max_end);
-        result_end.Print();
-        EdgeContainer result;
-        to_add = CheckPairInfo(path, result_end, to_add);
-        if (to_add < 0 || to_add >= (int) result_end.Size()) {
-            return EdgeContainer();
-        }
-        size_t gap_length = result_end.Length() - result_end.LengthAt(to_add);
-        DEBUG(" edge to add " << g_.int_id(result_end.At(to_add)) << " with length " << gap_length);
-        result.push_back(EdgeWithDistance(result_end.At(to_add), gap_length));
-        return result;
-    }
-
-    const Graph& g_;
-    shared_ptr<PairedInfoLibrary> lib_;
-    size_t search_dist_;
-    mutable PathsWeightCounter weight_counter_;
-    const GraphCoverageMap cov_map_;
-    NextPathSearcher path_searcher_;
-    LongReadsUniqueEdgeAnalyzer unique_edge_analyzer_;
-    SimpleScaffolding simple_scaffolder_;
-
-    DECL_LOGGER("MatePairExtensionChooser");
-};
 
 class CoordinatedCoverageExtensionChooser: public ExtensionChooser {
 public:
@@ -1388,14 +992,19 @@ public:
     EdgeContainer Filter(const BidirectionalPath& path,
             const EdgeContainer& edges) const override {
 
-        if(path.Length() < min_path_len_) {
+        if (edges.size() < 2) {
+            DEBUG("If unique candidate has not been accepted by previous choosers better not to touch it");
+            return EdgeContainer();
+        }
+
+        if (path.Length() < min_path_len_) {
             DEBUG("Path is too short");
             return EdgeContainer();
         }
 
         double path_coverage = provider_.EstimatePathCoverage(path);
-        if (math::eq(path_coverage, -1.0)) {
-            DEBUG("Path coverage can't be calculated");
+        if (math::eq(path_coverage, -1.0) || math::le(path_coverage, 10.0)) {
+            DEBUG("Path coverage can't be calculated of too low");
             return EdgeContainer();
         }
         DEBUG("Path coverage is " << path_coverage);
@@ -1434,16 +1043,14 @@ private:
             can_be_processed.pop();
             if (vertices_of_component.count(v) != 0) {
                 DEBUG("Component is too complex");
-                return GraphComponent<Graph>(g_, false);
+                return GraphComponent<Graph>::Empty(g_);
             }
             DEBUG("Adding vertex " << g_.str(v) << " to component set");
             vertices_of_component.insert(v);
             UpdateCanBeProcessed(v, can_be_processed, path_coverage);
         }
 
-        GraphComponent<Graph> gc(g_, vertices_of_component.begin(),
-                vertices_of_component.end());
-        return gc;
+        return GraphComponent<Graph>::FromVertices(g_, vertices_of_component);
     }
 
     EdgeContainer FinalFilter(const EdgeContainer& edges,
@@ -1468,7 +1075,7 @@ private:
         double answer = std::numeric_limits<double>::max();
 
         if (!CompatibleEdge(ext, path_coverage)) {
-            DEBUG("Extension coverage too low");
+            DEBUG("Extension coverage is too low");
             return answer;
         }
 
@@ -1492,7 +1099,7 @@ private:
         }
 
         DEBUG("Checking long sinks");
-        for (auto v : gc.sinks()) {
+        for (auto v : gc.exits()) {
             for (auto e : g_.OutgoingEdges(v)) {
                 if (g_.length(e) > max_edge_length_in_repeat_ && 
                         CompatibleEdge(e, path_coverage) &&
diff --git a/src/modules/algorithms/path_extend/ideal_pair_info.hpp b/src/common/modules/path_extend/ideal_pair_info.hpp
similarity index 100%
rename from src/modules/algorithms/path_extend/ideal_pair_info.hpp
rename to src/common/modules/path_extend/ideal_pair_info.hpp
diff --git a/src/modules/algorithms/path_extend/loop_traverser.hpp b/src/common/modules/path_extend/loop_traverser.hpp
similarity index 79%
rename from src/modules/algorithms/path_extend/loop_traverser.hpp
rename to src/common/modules/path_extend/loop_traverser.hpp
index 57eda57..40e451c 100644
--- a/src/modules/algorithms/path_extend/loop_traverser.hpp
+++ b/src/common/modules/path_extend/loop_traverser.hpp
@@ -25,9 +25,19 @@ class LoopTraverser {
 
     const Graph& g_;
     GraphCoverageMap& covMap_;
-    shared_ptr<ContigsMaker> extender_;
-    static const size_t MAX_EDGE_LENGTH = 1000;
+    size_t long_edge_limit_;
+    size_t component_size_limit_;
+    size_t shortest_path_limit_;
+    static const size_t DIJKSTRA_LIMIT = 3000;
 private:
+    bool AnyTipsInComponent(const GraphComponent<Graph>& component) const{
+        for(auto e : component.edges()) {
+            if (g_.IncomingEdgeCount(g_.EdgeStart(e)) == 0 || g_.OutgoingEdgeCount(g_.EdgeEnd(e)) == 0)
+                return true;
+        }
+        return false;
+    }
+
     EdgeId FindStart(const set<VertexId>& component_set) const{
         EdgeId result;
         for (auto it = component_set.begin(); it != component_set.end(); ++it) {
@@ -59,15 +69,6 @@ private:
         return result;
     }
 
-    void TryToGrow(BidirectionalPath* path, EdgeId component_entrance) {
-        BidirectionalPath clone = *path;
-        extender_->GrowPathSimple(*path);
-        if (!path->Contains(component_entrance)) {
-            DEBUG("Grown paths do not contain initial edges, rolling back");
-            path->Clear();
-            path->PushBack(clone);
-        }
-    }
 
     bool IsEndInsideComponent(const BidirectionalPath &path,
                               const set <VertexId> &component_set) {
@@ -101,7 +102,7 @@ private:
             return IsEndInsideComponent(path.SubPath((size_t) i + 1), component_set);
     }
 
-    void TraverseLoop(EdgeId start, EdgeId end, const set<VertexId>& component_set) {
+    bool TraverseLoop(EdgeId start, EdgeId end, const set<VertexId>& component_set) {
         DEBUG("start " << g_.int_id(start) << " end " << g_.int_id(end));
         BidirectionalPathSet coveredStartPaths =
                 covMap_.GetCoveringPaths(start);
@@ -111,33 +112,30 @@ private:
         for (auto it_path = coveredStartPaths.begin();
                 it_path != coveredStartPaths.end(); ++it_path) {
             if ((*it_path)->FindAll(end).size() > 0) {
-                return;
+                return false;
             }
         }
         if (coveredStartPaths.size() < 1 or coveredEndPaths.size() < 1) {
             DEBUG("TraverseLoop STRANGE SITUATION: start " << coveredStartPaths.size() << " end " << coveredEndPaths.size());
-            return;
+            return false;
         }
 
         if (coveredStartPaths.size() > 1 or coveredEndPaths.size() > 1) {
             DEBUG("Ambiguous situation in path joining, quitting");
-            return;
+            return false;
         }
 
         BidirectionalPath* startPath = *coveredStartPaths.begin();
         BidirectionalPath* endPath = *coveredEndPaths.begin();
         if ((*startPath) == endPath->Conjugate()){
-            return;
+            return false;
         }
 
-        //TryToGrow(startPath, start);
-        //TryToGrow(endPath->GetConjPath(), g_.conjugate(end));
-
         //Checking that paths ends are within component
         if (!IsEndInsideComponent(*startPath, start, component_set) ||
                 !IsEndInsideComponent(*endPath->GetConjPath(), g_.conjugate(end), component_set, true)) {
             DEBUG("Some path goes outside of the component")
-            return;
+            return false;
         }
 
         size_t commonSize = startPath->CommonEndSize(*endPath);
@@ -151,20 +149,19 @@ private:
             if (firstVertex == lastVertex) {
                 nLen = 0;
             } else {
-                DijkstraHelper<Graph>::BoundedDijkstra dijkstra(DijkstraHelper<Graph>::CreateBoundedDijkstra(g_, 1000, 3000));
+                DijkstraHelper<Graph>::BoundedDijkstra dijkstra(DijkstraHelper<Graph>::CreateBoundedDijkstra(g_, shortest_path_limit_,
+                                                                                                             DIJKSTRA_LIMIT));
                 dijkstra.Run(lastVertex);
                 vector<EdgeId> shortest_path = dijkstra.GetShortestPathTo(g_.EdgeStart(endPath->Front()));
 
-                if (shortest_path.size() == 0) {
+                if (shortest_path.empty()) {
                     DEBUG("Failed to find closing path");
-                    return;
+                    return false;
                 } else if (!IsEndInsideComponent(BidirectionalPath(g_, shortest_path), component_set)) {
                     DEBUG("Closing path is outside the component");
-                    return;
+                    return false;
                 } else {
-                    for (size_t i = 0; i < shortest_path.size(); ++i) {
-                        nLen += g_.length(shortest_path[i]);
-                    }
+                    nLen = CumulativeLength(g_, shortest_path);
                 }
             }
         }
@@ -180,11 +177,12 @@ private:
         DEBUG("conj");
         endPath->GetConjPath()->Print();
         endPath->Clear();
+        return true;
     }
 
     bool ContainsLongEdges(const GraphComponent<Graph>& component) const {
         for(auto e : component.edges()) {
-            if(g_.length(e) > MAX_EDGE_LENGTH) {
+            if(g_.length(e) > long_edge_limit_) {
                 return true;
             }
         }
@@ -192,29 +190,35 @@ private:
     }
 
 public:
-    LoopTraverser(const Graph& g, GraphCoverageMap& coverageMap, shared_ptr<ContigsMaker> extender) :
-            g_(g), covMap_(coverageMap), extender_(extender) {
+    LoopTraverser(const Graph& g, GraphCoverageMap& coverageMap, size_t long_edge_limit, size_t component_size_limit, size_t shortest_path_limit) :
+            g_(g), covMap_(coverageMap), long_edge_limit_(long_edge_limit), component_size_limit_(component_size_limit), shortest_path_limit_(shortest_path_limit) {
     }
 
-    void TraverseAllLoops() {
+    size_t TraverseAllLoops() {
         DEBUG("TraverseAllLoops");
-        shared_ptr<GraphSplitter<Graph>> splitter = LongEdgesExclusiveSplitter<Graph>(g_, MAX_EDGE_LENGTH);
+        size_t traversed = 0;
+        shared_ptr<GraphSplitter<Graph>> splitter = LongEdgesExclusiveSplitter<Graph>(g_, long_edge_limit_);
         while (splitter->HasNext()) {
             GraphComponent<Graph> component = splitter->Next();
-            if (component.v_size() > 10)
+            if (component.v_size() > component_size_limit_)
+                continue;
+            if (ContainsLongEdges(component))
                 continue;
-            if(ContainsLongEdges(component))
+            if (AnyTipsInComponent(component))
                 continue;
+
             set<VertexId> component_set(component.v_begin(), component.v_end());
             EdgeId start = FindStart(component_set);
             EdgeId finish = FindFinish(component_set);
             if (start == EdgeId() || finish == EdgeId()) {
                 continue;
             }
-            TraverseLoop(start, finish, component_set);
+            if (TraverseLoop(start, finish, component_set))
+                ++traversed;
         }
-
+        return traversed;
     }
+
 protected:
     DECL_LOGGER("LoopTraverser");
 };
diff --git a/src/modules/algorithms/path_extend/overlap_analysis.hpp b/src/common/modules/path_extend/overlap_analysis.hpp
similarity index 96%
rename from src/modules/algorithms/path_extend/overlap_analysis.hpp
rename to src/common/modules/path_extend/overlap_analysis.hpp
index b119a7d..3c3178f 100644
--- a/src/modules/algorithms/path_extend/overlap_analysis.hpp
+++ b/src/common/modules/path_extend/overlap_analysis.hpp
@@ -1,7 +1,7 @@
 #pragma once
 
-#include "dev_support/logger/logger.hpp"
-#include "dev_support/range.hpp"
+#include "utils/logger/logger.hpp"
+#include "utils/range.hpp"
 #include "ssw/ssw_cpp.h"
 
 namespace debruijn_graph {
@@ -42,7 +42,7 @@ struct OverlapInfo {
     }
 };
 
-std::ostream& operator<<(std::ostream& os, const OverlapInfo& info) {
+inline std::ostream& operator<<(std::ostream& os, const OverlapInfo& info) {
     return os << "R1: [" << info.r1.start_pos << ", " << info.r1.end_pos
             << "]; R2: [" << info.r2.start_pos << ", " << info.r2.end_pos << "]"
             << "; match_cnt: " << info.match_cnt;
diff --git a/src/modules/algorithms/path_extend/paired_library.hpp b/src/common/modules/path_extend/paired_library.hpp
similarity index 69%
rename from src/modules/algorithms/path_extend/paired_library.hpp
rename to src/common/modules/path_extend/paired_library.hpp
index f176ab9..2b22da0 100644
--- a/src/modules/algorithms/path_extend/paired_library.hpp
+++ b/src/common/modules/path_extend/paired_library.hpp
@@ -12,8 +12,7 @@
  *      Author: andrey
  */
 
-#ifndef PAIRED_LIBRARY_HPP_
-#define PAIRED_LIBRARY_HPP_
+#pragma once
 
 #include "pipeline/graph_pack.hpp"
 #include "paired_info/paired_info.hpp"
@@ -27,32 +26,28 @@ using debruijn_graph::Graph;
 using debruijn_graph::EdgeId;
 
 using omnigraph::de::PairedInfoIndexT;
-typedef omnigraph::de::PairInfo<EdgeId> DePairInfo;
 using omnigraph::de::Point;
 
-struct PairedInfoLibrary {
-    PairedInfoLibrary(size_t k, const Graph& g, size_t readS, size_t is,
-                      size_t is_min, size_t is_max, size_t is_var,
+class PairedInfoLibrary {
+public:
+    PairedInfoLibrary(size_t k, const Graph& g, size_t read_size, size_t is,
+                      size_t is_min, size_t is_max, double is_var,
                       bool is_mp,
                       const std::map<int, size_t>& is_distribution)
             : g_(g),
               k_(k),
-              read_size_(readS),
+              read_size_(read_size),
               is_(is),
               is_min_(is_min),
               is_max_(is_max),
               is_var_(is_var),
               is_mp_(is_mp),
-              single_threshold_(-1.0),
-              coverage_coeff_(1.0),
-              ideal_pi_counter_(g, (int) is_min, (int) is_max, readS, is_distribution) {
+              ideal_pi_counter_(g, (int) is_min, (int) is_max,
+                                read_size, is_distribution) {
     }
 
     virtual ~PairedInfoLibrary() {}
 
-    void SetCoverage(double cov) { coverage_coeff_ = cov; }
-    void SetSingleThreshold(double threshold) { single_threshold_ = threshold; }
-
     virtual size_t FindJumpEdges(EdgeId e, set<EdgeId>& result, int min_dist, int max_dist, size_t min_len = 0) const = 0;
     virtual void CountDistances(EdgeId e1, EdgeId e2, vector<int>& dist, vector<double>& w) const = 0;
     virtual double CountPairedInfo(EdgeId e1, EdgeId e2, int distance, bool from_interval = false) const = 0;
@@ -62,35 +57,31 @@ struct PairedInfoLibrary {
         return ideal_pi_counter_.IdealPairedInfo(e1, e2, distance, additive);
     }
 
+    size_t GetIS() const { return is_; }
     size_t GetISMin() const { return is_min_; }
-    double GetSingleThreshold() const { return single_threshold_; }
-    double GetCoverageCoeff() const { return coverage_coeff_; }
     size_t GetISMax() const { return is_max_; }
-    size_t GetIsVar() const { return is_var_; }
-    size_t GetLeftVar() const { return is_ - is_min_; }
-    size_t GetRightVar() const { return is_max_ - is_; }
-    size_t GetReadSize() const { return read_size_; }
+    double GetIsVar() const { return is_var_; }
     bool IsMp() const { return is_mp_; }
 
+protected:
     const Graph& g_;
     size_t k_;
     size_t read_size_;
     size_t is_;
     size_t is_min_;
     size_t is_max_;
-    size_t is_var_;
+    double is_var_;
     bool is_mp_;
-    double single_threshold_;
-    double coverage_coeff_;
     IdealPairInfoCounter ideal_pi_counter_;
-protected:
     DECL_LOGGER("PathExtendPI");
 };
 
 template<class Index>
-struct PairedInfoLibraryWithIndex : public PairedInfoLibrary {
+class PairedInfoLibraryWithIndex : public PairedInfoLibrary {
+    const Index& index_;
 
-    PairedInfoLibraryWithIndex(size_t k, const Graph& g, size_t readS, size_t is, size_t is_min, size_t is_max, size_t is_div,
+public:
+    PairedInfoLibraryWithIndex(size_t k, const Graph& g, size_t readS, size_t is, size_t is_min, size_t is_max, double is_div,
                                const Index& index, bool is_mp,
                                const std::map<int, size_t>& is_distribution)
         : PairedInfoLibrary(k, g, readS, is, is_min, is_max, is_div, is_mp, is_distribution),
@@ -167,13 +158,29 @@ struct PairedInfoLibraryWithIndex : public PairedInfoLibrary {
         return weight;
     }
 
-    const Index& index_;
-protected:
-    DECL_LOGGER("PathExtendPI");
 };
 
-typedef std::vector<shared_ptr<PairedInfoLibrary> > PairedInfoLibraries;
+template<class Index>
+shared_ptr<PairedInfoLibrary> MakeNewLib(const Graph& g,
+                                         const debruijn_graph::config::dataset::Library &lib,
+                                         const Index &paired_index) {
+    //why all those local variables? :)
+    size_t read_length = lib.data().read_length;
+    size_t is = (size_t) lib.data().mean_insert_size;
+    int is_min = (int) lib.data().insert_size_left_quantile;
+    int is_max = (int) lib.data().insert_size_right_quantile;
+    double var = lib.data().insert_size_deviation;
+    bool is_mp = lib.type() == io::LibraryType::MatePairs || lib.type() == io::LibraryType::HQMatePairs;
+    return make_shared<PairedInfoLibraryWithIndex<decltype(paired_index)>>(g.k(),
+                                                                           g,
+                                                                           read_length,
+                                                                           is,
+                                                                           is_min > 0 ? size_t(is_min) : 0,
+                                                                           is_max > 0 ? size_t(is_max) : 0,
+                                                                           var,
+                                                                           paired_index,
+                                                                           is_mp,
+                                                                           lib.data().insert_size_distribution);
+}
 
 }  // path extend
-
-#endif /* PAIRED_LIBRARY_HPP_ */
diff --git a/src/modules/algorithms/path_extend/path_extender.hpp b/src/common/modules/path_extend/path_extender.hpp
similarity index 91%
rename from src/modules/algorithms/path_extend/path_extender.hpp
rename to src/common/modules/path_extend/path_extender.hpp
index 0c8bda5..14ce6e4 100644
--- a/src/modules/algorithms/path_extend/path_extender.hpp
+++ b/src/common/modules/path_extend/path_extender.hpp
@@ -13,14 +13,12 @@
 
 #pragma once
 
-
 #include "extension_chooser.hpp"
 #include "path_filter.hpp"
 #include "overlap_analysis.hpp"
 #include "assembly_graph/graph_support/scaff_supplementary.hpp"
 #include <cmath>
 
-
 namespace path_extend {
 
 class ShortLoopResolver {
@@ -93,8 +91,9 @@ public:
             double in_cov = gp_.flanking_cov.GetOutCov(e_in); //g_.coverage(e_in);
             double out_cov = gp_.flanking_cov.GetInCov(e_out); //g_.coverage(e_out);
             double cov = (in_cov + out_cov) / 2.0;
-            double time1 = math::round(gp_.flanking_cov.GetInCov(e1) / cov);//math::round(gp_.g.coverage(e1) / cov);
-            double time2 = math::round(gp_.flanking_cov.GetInCov(e2) / cov);////math::round(gp_.g.coverage(e2) / cov);
+            //what are time variables???
+            double time1 = math::round(gp_.g.coverage(e1) / cov);
+            double time2 = math::round(gp_.g.coverage(e2) / cov);
             size_t time = (size_t) std::max(0.0, std::min(time1 - 1.0, time2));
             for (size_t i = 0; i < time; ++i) {
                 MakeCycleStep(path, edges.first);
@@ -143,11 +142,19 @@ class LoopResolver : public ShortLoopResolver {
     static const size_t ITER_COUNT = 10;
     const WeightCounter& wc_;
 
+private:
+    bool CheckLoopPlausible(EdgeId froward_loop_edge, EdgeId backward_loop_edge) const {
+        size_t single_loop_length = 2 * g_.length(froward_loop_edge) + g_.length(backward_loop_edge);
+        return single_loop_length <= wc_.get_libptr()->GetISMax();
+    }
+
 public:
     LoopResolver(const Graph& g, const WeightCounter& wc)
             : ShortLoopResolver(g),
               wc_(wc) { }
-
+    //This code works only if loop wasn't fairly resolved
+    //
+    //Weird interface; need comments
     void MakeBestChoice(BidirectionalPath& path, pair<EdgeId, EdgeId>& edges) const {
         UndoCycles(path, edges.first);
         BidirectionalPath experiment(path);
@@ -161,17 +168,25 @@ public:
                 weight = wc_.CountWeight(experiment, edges.second);
                 double weight2 = wc_.CountWeight(experiment, edges.first);
                 if (weight > max_weight || (weight == max_weight && weight - weight2 > diff)
-                        || (weight == max_weight && weight - weight2 == diff && i == 1)) {
+                    || (weight == max_weight && weight - weight2 == diff && i == 1)) {
                     max_weight = weight;
                     maxIter = i;
                     diff = weight - weight2;
                 }
             }
         }
-        for (size_t i = 0; i < maxIter; ++i) {
+
+        if (!CheckLoopPlausible(path.Back(), edges.first) && maxIter > 0) {
             MakeCycleStep(path, edges.first);
+            path.PushBack(edges.second, int(g_.k() + 100));
         }
-        path.PushBack(edges.second);
+        else {
+            for (size_t i = 0; i < maxIter; ++i) {
+                MakeCycleStep(path, edges.first);
+            }
+            path.PushBack(edges.second);
+        }
+
     }
 
     void ResolveShortLoop(BidirectionalPath& path) const override {
@@ -182,6 +197,7 @@ public:
             DEBUG("Resolving short loop done");
         }
     }
+
 };
 
 class GapJoiner {
@@ -484,7 +500,7 @@ public:
         DEBUG("Max flank length - " << max_flank_length);
 
         if ((double) max_flank_length * flank_multiplication_coefficient_
-                + flank_addition_coefficient_ > overlap_info.size()) {
+                + flank_addition_coefficient_ > (double) overlap_info.size()) {
             DEBUG("Too long flanks for such alignment");
             return Gap(INVALID_GAP);
         }
@@ -494,7 +510,7 @@ public:
             return Gap(INVALID_GAP);
         }
 
-        if ((g_.length(source) + g_.k())  - overlap_info.r1.end_pos > g_.length(source)) {
+        if (g_.k() + 1 > overlap_info.r1.end_pos) {
             DEBUG("Save kmers. Don't want to have edges shorter than k");
             return Gap(INVALID_GAP);
         }
@@ -602,16 +618,19 @@ inline Gap MimicLAGapJoiner(Sequence& s1, Sequence& s2) {
 //Detects a cycle as a minsuffix > IS present earlier in the path. Overlap is allowed.
 class InsertSizeLoopDetector {
 protected:
-    const Graph& g_;
-    const GraphCoverageMap& cov_map_;
+    GraphCoverageMap visited_cycles_coverage_map_;
+    PathContainer path_storage_;
     size_t min_cycle_len_;
 
 public:
-    InsertSizeLoopDetector(const Graph& g, const GraphCoverageMap& cov_map, size_t is): g_(g), cov_map_(cov_map), min_cycle_len_(is) {
+    InsertSizeLoopDetector(const Graph& g, size_t is):
+        visited_cycles_coverage_map_(g),
+        path_storage_(),
+        min_cycle_len_(is) {
     }
 
-    size_t GetMinCycleLenth() const {
-        return min_cycle_len_;
+    ~InsertSizeLoopDetector() {
+        path_storage_.DeleteAllPaths();
     }
 
     bool CheckCycledNonIS(const BidirectionalPath& path) const {
@@ -672,6 +691,65 @@ public:
         DEBUG("result pos " <<pos);
         return pos;
     }
+
+    //seems that it is outofdate
+    bool InExistingLoop(const BidirectionalPath& path) {
+        DEBUG("Checking existing loops");
+        auto visited_cycles = visited_cycles_coverage_map_.GetEdgePaths(path.Back());
+        for (auto cycle : *visited_cycles) {
+            DEBUG("checking  cycle ");
+            int pos = path.FindLast(*cycle);
+            if (pos == -1)
+                continue;
+
+            int start_cycle_pos = pos + (int) cycle->Size();
+            bool only_cycles_in_tail = true;
+            int last_cycle_pos = start_cycle_pos;
+            DEBUG("start_cycle pos "<< last_cycle_pos);
+            for (int i = start_cycle_pos; i < (int) path.Size() - (int) cycle->Size(); i += (int) cycle->Size()) {
+                if (!path.CompareFrom(i, *cycle)) {
+                    only_cycles_in_tail = false;
+                    break;
+                } else {
+                    last_cycle_pos = i + (int) cycle->Size();
+                    DEBUG("last cycle pos changed " << last_cycle_pos);
+                }
+            }
+            DEBUG("last_cycle_pos " << last_cycle_pos);
+            only_cycles_in_tail = only_cycles_in_tail && cycle->CompareFrom(0, path.SubPath(last_cycle_pos));
+            if (only_cycles_in_tail) {
+// seems that most of this is useless, checking
+                VERIFY (last_cycle_pos == start_cycle_pos);
+                DEBUG("find cycle " << last_cycle_pos);
+                DEBUG("path");
+                path.Print();
+                DEBUG("last subpath");
+                path.SubPath(last_cycle_pos).Print();
+                DEBUG("cycle");
+                cycle->Print();
+                DEBUG("last_cycle_pos " << last_cycle_pos << " path size " << path.Size());
+                VERIFY(last_cycle_pos <= (int)path.Size());
+                DEBUG("last cycle pos + cycle " << last_cycle_pos + (int)cycle->Size());
+                VERIFY(last_cycle_pos + (int)cycle->Size() >= (int)path.Size());
+
+                return true;
+            }
+        }
+        return false;
+    }
+
+    void AddCycledEdges(const BidirectionalPath& path, size_t pos) {
+        if (pos >= path.Size()) {
+            DEBUG("Wrong position in IS cycle");
+            return;
+        }
+        BidirectionalPath * p = new BidirectionalPath(path.SubPath(pos));
+        BidirectionalPath * cp = new BidirectionalPath(p->Conjugate());
+        visited_cycles_coverage_map_.Subscribe(p);
+        visited_cycles_coverage_map_.Subscribe(cp);
+        DEBUG("add cycle");
+        p->Print();
+    }
 };
 
 class RepeatDetector {
@@ -817,7 +895,7 @@ protected:
 
 class CompositeExtender : public ContigsMaker {
 public:
-    CompositeExtender(Graph & g, GraphCoverageMap& cov_map,
+    CompositeExtender(const Graph &g, GraphCoverageMap& cov_map,
                       size_t max_diff_len,
                       size_t max_repeat_length,
                       bool detect_repeats_online)
@@ -830,7 +908,7 @@ public:
               detect_repeats_online_(detect_repeats_online) {
     }
 
-    CompositeExtender(Graph & g, GraphCoverageMap& cov_map,
+    CompositeExtender(const Graph & g, GraphCoverageMap& cov_map,
                       vector<shared_ptr<PathExtender> > pes,
                       const ScaffoldingUniqueEdgeStorage& unique,
                       size_t max_diff_len,
@@ -870,7 +948,9 @@ public:
         while (MakeGrowStep(path, paths_storage, false)) { }
     }
 
-    bool MakeGrowStep(BidirectionalPath& path, PathContainer* paths_storage, bool detect_repeats_online_local = true) {
+
+    bool MakeGrowStep(BidirectionalPath& path, PathContainer* paths_storage,
+                      bool detect_repeats_online_local = true) {
         DEBUG("make grow step composite extender");
         if (detect_repeats_online_ && detect_repeats_online_local) {
             BidirectionalPath *repeat_path = repeat_detector_.RepeatPath(path);
@@ -940,24 +1020,25 @@ private:
     void SubscribeCoverageMap(BidirectionalPath * path) {
         path->Subscribe(&cover_map_);
         for (size_t i = 0; i < path->Size(); ++i) {
-            cover_map_.BackEdgeAdded(path->At(i), path, path->GapAt(i));
+            cover_map_.BackEdgeAdded(path->At(i), path, path->GapInfoAt(i));
         }
     }
 
     void GrowAllPaths(PathContainer& paths, PathContainer& result) {
-        cover_map_.Clear();
         for (size_t i = 0; i < paths.size(); ++i) {
             VERBOSE_POWER_T2(i, 100, "Processed " << i << " paths from " << paths.size() << " (" << i * 100 / paths.size() << "%)");
             if (paths.size() > 10 && i % (paths.size() / 10 + 1) == 0) {
                 INFO("Processed " << i << " paths from " << paths.size() << " (" << i * 100 / paths.size() << "%)");
             }
-//In 2015 modes do not use a seed already used in paths.
+            //In 2015 modes do not use a seed already used in paths.
             if (used_storage_->UniqueCheckEnabled()) {
                 bool was_used = false;
                 for (size_t ind =0; ind < paths.Get(i)->Size(); ind++) {
                     EdgeId eid = paths.Get(i)->At(ind);
                     if (used_storage_->IsUsedAndUnique(eid)) {
-                        was_used = true; break;
+                        DEBUG("Used edge " << g_.int_id(eid));
+                        was_used = true;
+                        break;
                     } else {
                         used_storage_->insert(eid);
                     }
@@ -967,7 +1048,7 @@ private:
                     continue;
                 }
             }
-//TODO: coverage_map should be exterminated
+
             if (!cover_map_.IsCovered(*paths.Get(i))) {
                 BidirectionalPath * path = new BidirectionalPath(*paths.Get(i));
                 BidirectionalPath * conjugatePath = new BidirectionalPath(*paths.GetConjugate(i));
@@ -991,38 +1072,32 @@ private:
 
 };
 
-//All Path-Extenders inherits this one.
-
+//All Path-Extenders inherit this one
 class LoopDetectingPathExtender : public PathExtender {
 
 protected:
-    size_t maxLoops_;
     bool investigate_short_loops_;
     bool use_short_loop_cov_resolver_;
     CovShortLoopResolver cov_loop_resolver_;
 
-    vector<shared_ptr<BidirectionalPath> > visited_cycles_;
     InsertSizeLoopDetector is_detector_;
     const GraphCoverageMap& cov_map_;
 
 public:
-    LoopDetectingPathExtender(const conj_graph_pack& gp, const GraphCoverageMap& cov_map, size_t max_loops,
-                              bool investigate_short_loops,
-                              bool use_short_loop_cov_resolver, size_t is)
+    LoopDetectingPathExtender(const conj_graph_pack &gp,
+                                  const GraphCoverageMap &cov_map,
+                                  bool investigate_short_loops,
+                                  bool use_short_loop_cov_resolver,
+                                  size_t is)
             : PathExtender(gp.g),
-              maxLoops_(max_loops),
               investigate_short_loops_(investigate_short_loops),
               use_short_loop_cov_resolver_(use_short_loop_cov_resolver),
               cov_loop_resolver_(gp),
-              is_detector_(gp.g, cov_map, is),
+              is_detector_(gp.g, is),
               cov_map_(cov_map) {
 
     }
 
-    size_t getMaxLoops() const {
-        return maxLoops_;
-    }
-
     bool isInvestigateShortLoops() const {
         return investigate_short_loops_;
     }
@@ -1031,67 +1106,6 @@ public:
         this->investigate_short_loops_ = investigateShortLoops;
     }
 
-    void setMaxLoops(size_t maxLoops) {
-        if (maxLoops != 0) {
-            this->maxLoops_ = maxLoops;
-        }
-    }
-//seems that it is outofdate
-    bool InExistingLoop(const BidirectionalPath& path) {
-        TRACE("Checking existing loops");
-        int j = 0;
-        for (auto cycle : visited_cycles_) {
-            VERBOSE_POWER2(j++, "checking ");
-            int pos = path.FindLast(*cycle);
-            if (pos == -1)
-                continue;
-
-            int start_cycle_pos = pos + (int) cycle->Size();
-            bool only_cycles_in_tail = true;
-            int last_cycle_pos = start_cycle_pos;
-            DEBUG("start_cycle pos "<< last_cycle_pos);
-            for (int i = start_cycle_pos; i < (int) path.Size() - (int) cycle->Size(); i += (int) cycle->Size()) {
-                if (!path.CompareFrom(i, *cycle)) {
-                    only_cycles_in_tail = false;
-                    break;
-                } else {
-                    last_cycle_pos = i + (int) cycle->Size();
-                    DEBUG("last cycle pos changed " << last_cycle_pos);
-                }
-            }
-            DEBUG("last_cycle_pos " << last_cycle_pos);
-            only_cycles_in_tail = only_cycles_in_tail && cycle->CompareFrom(0, path.SubPath(last_cycle_pos));
-            if (only_cycles_in_tail) {
-// seems that most of this is useless, checking
-                VERIFY (last_cycle_pos == start_cycle_pos);
-                DEBUG("find cycle " << last_cycle_pos);
-                DEBUG("path");
-                path.Print();
-                DEBUG("last subpath");
-                path.SubPath(last_cycle_pos).Print();
-                DEBUG("cycle");
-                cycle->Print();
-                DEBUG("last_cycle_pos " << last_cycle_pos << " path size " << path.Size());
-                VERIFY(last_cycle_pos <= (int)path.Size());
-                DEBUG("last cycle pos + cycle " << last_cycle_pos + (int)cycle->Size());
-                VERIFY(last_cycle_pos + (int)cycle->Size() >= (int)path.Size());
-
-                return true;
-            }
-        }
-        return false;
-    }
-
-    void AddCycledEdges(const BidirectionalPath& path, size_t pos) {
-        if (pos >= path.Size()) {
-            DEBUG("Wrong position in IS cycle");
-            return;
-        }
-        visited_cycles_.push_back(std::make_shared<BidirectionalPath>(path.SubPath(pos)));
-        DEBUG("add cycle");
-        path.SubPath(pos).Print();
-    }
-
     bool DetectCycle(BidirectionalPath& path) {
         DEBUG("detect cycle");
         if (is_detector_.CheckCycled(path)) {
@@ -1099,7 +1113,7 @@ public:
             int loop_pos = is_detector_.RemoveCycle(path);
             DEBUG("Removed IS cycle");
             if (loop_pos != -1) {
-                AddCycledEdges(path, loop_pos);
+                is_detector_.AddCycledEdges(path, loop_pos);
                 return true;
             }
         }
@@ -1121,11 +1135,11 @@ public:
     }
 
     bool MakeGrowStep(BidirectionalPath& path, PathContainer* paths_storage) override {
-        if (InExistingLoop(path)) {
+        if (is_detector_.InExistingLoop(path)) {
             DEBUG("in existing loop");
             return false;
         }
-        bool result = false;
+        bool result;
         LoopDetector loop_detector(&path, cov_map_);
         if (DetectCycle(path)) {
             result = false;
@@ -1193,9 +1207,13 @@ protected:
 
 public:
 
-    SimpleExtender(const conj_graph_pack& gp, const GraphCoverageMap& cov_map, shared_ptr<ExtensionChooser> ec, 
-                    size_t is, size_t max_loops, bool investigate_short_loops, bool use_short_loop_cov_resolver):
-        LoopDetectingPathExtender(gp, cov_map, max_loops, investigate_short_loops, use_short_loop_cov_resolver, is),
+    SimpleExtender(const conj_graph_pack &gp,
+                       const GraphCoverageMap &cov_map,
+                       shared_ptr<ExtensionChooser> ec,
+                       size_t is,
+                       bool investigate_short_loops,
+                       bool use_short_loop_cov_resolver) :
+        LoopDetectingPathExtender(gp, cov_map, investigate_short_loops, use_short_loop_cov_resolver, is),
         extensionChooser_(ec) {
     }
 
@@ -1312,33 +1330,40 @@ protected:
 
 public:
 
-    MultiExtender(const conj_graph_pack& gp, const GraphCoverageMap& cov_map, shared_ptr<ExtensionChooser> ec,
-                  size_t is, size_t max_loops, bool investigate_short_loops, bool use_short_loop_cov_resolver,
-                  size_t max_candidates = 0):
-        SimpleExtender(gp, cov_map, ec, is, max_loops, investigate_short_loops, use_short_loop_cov_resolver),
+    MultiExtender(const conj_graph_pack &gp,
+                      const GraphCoverageMap &cov_map,
+                      shared_ptr<ExtensionChooser> ec,
+                      size_t is,
+                      bool investigate_short_loops,
+                      bool use_short_loop_cov_resolver,
+                      size_t max_candidates = 0) :
+        SimpleExtender(gp, cov_map, ec, is, investigate_short_loops, use_short_loop_cov_resolver),
         max_candidates_(max_candidates) {
     }
 
 protected:
     virtual bool AddCandidates(BidirectionalPath& path, PathContainer* paths_storage, ExtensionChooser::EdgeContainer& candidates) override {
-        bool res = false;
+        if (candidates.size() == 0)
+            return false;
 
+        bool res = false;
+        LoopDetector loop_detector(&path, cov_map_);
+        DEBUG("loop detecor");
+        if (!investigate_short_loops_ &&
+            (loop_detector.EdgeInShortLoop(path.Back()) or loop_detector.EdgeInShortLoop(candidates.back().e_))
+            && extensionChooser_->WeightCounterBased()) {
+	    DEBUG("loop deteced");
+            return false;
+        }
         if (candidates.size() == 1) {
-            LoopDetector loop_detector(&path, cov_map_);
-            DEBUG("loop detecor");
-            if (!investigate_short_loops_ &&
-                (loop_detector.EdgeInShortLoop(path.Back()) or loop_detector.EdgeInShortLoop(candidates.back().e_))
-                && extensionChooser_->WeightCounterBased()) {
-                return false;
-            }
             DEBUG("push");
             EdgeId eid = candidates.back().e_;
             path.PushBack(eid, candidates.back().d_);
             DEBUG("push done");
             return true;
         }
-        else if (candidates.size() == 2 && (max_candidates_ == 0 || candidates.size() <= max_candidates_)) {
-            //Check for bulge
+        else if (candidates.size() == 2) {
+             //Check for bulge
             auto v = g_.EdgeStart(candidates.front().e_);
             auto u = g_.EdgeEnd(candidates.front().e_);
             for (auto edge : candidates) {
@@ -1346,33 +1371,23 @@ protected:
                     return false;
             }
 
-            LoopDetector loop_detector(&path, cov_map_);
-            DEBUG("loop detector");
-            if (!investigate_short_loops_ && loop_detector.EdgeInShortLoop(path.Back())
-                && extensionChooser_->WeightCounterBased()) {
-                return false;
-            }
-//First candidate is adding to THIS path.
-            else if (not (!investigate_short_loops_ && loop_detector.EdgeInShortLoop(candidates.front().e_)
-                && extensionChooser_->WeightCounterBased())) {
-                DEBUG("push");
-                path.PushBack(candidates.front().e_, candidates.front().d_);
-                DEBUG("push done");
-                res = true;
+            //Creating new paths for other than new candidate.
+            for (size_t i = 1; i < candidates.size(); ++i) {
+                DEBUG("push other candidates " << i);
+                BidirectionalPath *p = new BidirectionalPath(path);
+                p->PushBack(candidates[i].e_, candidates[i].d_);
+                BidirectionalPath *cp = new BidirectionalPath(p->Conjugate());
+                paths_storage->AddPair(p, cp);
             }
+
+            DEBUG("push");
+            path.PushBack(candidates.front().e_, candidates.front().d_);
+            DEBUG("push done");
+            res = true;
+
             if (candidates.size() > 1) {
                 DEBUG("Found " << candidates.size() << " candidates");
             }
-//Creating new paths for other than new candidate.
-            for (size_t i = 1; i < candidates.size(); ++i) {
-                if (not (!investigate_short_loops_ && loop_detector.EdgeInShortLoop(candidates.front().e_)
-                    && extensionChooser_->WeightCounterBased())) {
-                    BidirectionalPath *p = new BidirectionalPath(path);
-                    p->PushBack(candidates[i].e_, candidates[i].d_);
-                    BidirectionalPath *cp = new BidirectionalPath(p->Conjugate());
-                    paths_storage->AddPair(p, cp);
-                }
-            }
         }
 
         return res;
@@ -1489,16 +1504,15 @@ protected:
 
 public:
 
-    ScaffoldingPathExtender(const conj_graph_pack& gp,
-                            const GraphCoverageMap& cov_map,
+    ScaffoldingPathExtender(const conj_graph_pack &gp,
+                            const GraphCoverageMap &cov_map,
                             std::shared_ptr<ExtensionChooser> extension_chooser,
                             std::shared_ptr<GapJoiner> gap_joiner,
                             size_t is,
-                            size_t max_loops,
                             bool investigate_short_loops,
                             bool avoid_rc_connections,
                             bool check_sink = true):
-        LoopDetectingPathExtender(gp, cov_map, max_loops, investigate_short_loops, false, is),
+        LoopDetectingPathExtender(gp, cov_map, investigate_short_loops, false, is),
         extension_chooser_(extension_chooser),
         gap_joiner_(gap_joiner),
         avoid_rc_connections_(avoid_rc_connections),
@@ -1540,14 +1554,15 @@ protected:
 
 public:
 
-    RNAScaffoldingPathExtender(const conj_graph_pack& gp, const GraphCoverageMap& cov_map, std::shared_ptr<ExtensionChooser> extension_chooser,
+    RNAScaffoldingPathExtender(const conj_graph_pack &gp,
+                               const GraphCoverageMap &cov_map,
+                               std::shared_ptr<ExtensionChooser> extension_chooser,
                                std::shared_ptr<ExtensionChooser> strict_extension_chooser,
                                std::shared_ptr<GapJoiner> gap_joiner,
                                size_t is,
-                               size_t max_loops,
                                bool investigate_short_loops,
                                int min_overlap = 0):
-        ScaffoldingPathExtender(gp, cov_map, extension_chooser, gap_joiner, is, max_loops, investigate_short_loops, true),
+        ScaffoldingPathExtender(gp, cov_map, extension_chooser, gap_joiner, is, investigate_short_loops, true),
         strict_extension_chooser_(strict_extension_chooser), min_overlap_(min_overlap) {}
 
 
diff --git a/src/modules/algorithms/path_extend/path_filter.hpp b/src/common/modules/path_extend/path_filter.hpp
similarity index 81%
rename from src/modules/algorithms/path_extend/path_filter.hpp
rename to src/common/modules/path_extend/path_filter.hpp
index fa19ce9..b012dd3 100644
--- a/src/modules/algorithms/path_extend/path_filter.hpp
+++ b/src/common/modules/path_extend/path_filter.hpp
@@ -22,10 +22,10 @@ namespace path_extend {
 class CopyOnWritePathFilter {
 
 protected:
-    Graph& g;
+    const Graph& g;
 
 public:
-    CopyOnWritePathFilter(Graph& g_): g(g_) {
+    CopyOnWritePathFilter(const Graph& g_): g(g_) {
     }
 
     virtual bool predicate(BidirectionalPath& path) = 0;
@@ -52,7 +52,7 @@ protected:
 
 public:
 
-    IdFilter(Graph& g_, std::set<size_t> ids_): CopyOnWritePathFilter(g_), ids(ids_) {
+    IdFilter(const Graph& g_, std::set<size_t> ids_): CopyOnWritePathFilter(g_), ids(ids_) {
     }
 
     virtual bool predicate(BidirectionalPath& path) {
@@ -61,6 +61,34 @@ public:
 };
 
 
+class DuplicateFilter {
+
+protected:
+    const Graph& g;
+
+public:
+    DuplicateFilter(const Graph& g_): g(g_) {
+    }
+
+    PathContainer filter(PathContainer& paths) {
+        PathContainer result;
+
+        for (size_t i = 0; i < paths.size(); ++i) {
+            bool duplicate = false;
+            for (size_t j = 0; j < result.size(); ++j) {
+                if (result[j] == paths[j])
+                    duplicate = true;
+            }
+            if (!duplicate) {
+                result.AddPair(paths.Get(i), paths.GetConjugate(i));
+            }
+        }
+
+        return result;
+    }
+
+};
+
 class ErasingPathFilter {
 
 protected:
diff --git a/src/modules/algorithms/path_extend/path_visualizer.hpp b/src/common/modules/path_extend/path_visualizer.hpp
similarity index 65%
rename from src/modules/algorithms/path_extend/path_visualizer.hpp
rename to src/common/modules/path_extend/path_visualizer.hpp
index abcd4ad..b11d4c2 100644
--- a/src/modules/algorithms/path_extend/path_visualizer.hpp
+++ b/src/common/modules/path_extend/path_visualizer.hpp
@@ -23,8 +23,8 @@ namespace path_extend {
 using namespace debruijn_graph;
 
 template<class Graph>
-class PathGraphLabeler : public AbstractGraphLabeler<Graph> {
-    typedef AbstractGraphLabeler<Graph> base;
+class PathGraphLabeler : public visualization::graph_labeler::AbstractGraphLabeler<Graph> {
+    typedef visualization::graph_labeler::AbstractGraphLabeler<Graph> base;
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
 
@@ -79,21 +79,21 @@ public:
         std::fstream filestr;
         filestr.open(file_name.c_str(), std::fstream::out);
 
-        StrGraphLabeler<Graph> str_labeler(gp.g);
+        visualization::graph_labeler::StrGraphLabeler<Graph> str_labeler(gp.g);
         PathGraphLabeler<Graph> path_labeler(gp.g, paths);
-        CoverageGraphLabeler<Graph> cov_labler(gp.g);
-        EdgePosGraphLabeler<Graph> pos_labeler(gp.g, gp.edge_pos);
+        visualization::graph_labeler::CoverageGraphLabeler<Graph> cov_labler(gp.g);
+        visualization::graph_labeler::EdgePosGraphLabeler<Graph> pos_labeler(gp.g, gp.edge_pos);
 
-        CompositeLabeler<Graph> composite_labeler(str_labeler, cov_labler, path_labeler, pos_labeler);
-        shared_ptr<omnigraph::visualization::GraphColorer<Graph>> colorer;
+        visualization::graph_labeler::CompositeLabeler<Graph> composite_labeler(str_labeler, cov_labler, path_labeler, pos_labeler);
+        shared_ptr<visualization::graph_colorer::GraphColorer<Graph>> colorer;
         if (gp.index.IsAttached()) {
              colorer = stats::DefaultColorer(gp);
         } else {
-            colorer = omnigraph::visualization::DefaultColorer(gp.g);
+            colorer = visualization::graph_colorer::DefaultColorer(gp.g);
         }
 
-        omnigraph::visualization::ComponentVisualizer<Graph> visualizer(gp.g, false);
-        omnigraph::visualization::EmptyGraphLinker<Graph> linker;
+        visualization::visualizers::ComponentVisualizer<Graph> visualizer(gp.g, false);
+        visualization::vertex_linker::EmptyGraphLinker<Graph> linker;
         visualizer.Visualize(filestr, composite_labeler, *colorer, linker);
         filestr.close();
         INFO("Visualizing graph done");
@@ -104,22 +104,22 @@ public:
         std::fstream filestr;
         filestr.open(file_name.c_str(), std::fstream::out);
 
-        StrGraphLabeler<Graph> str_labeler(gp.g);
-        EdgePosGraphLabeler<Graph> pos_labeler(gp.g, gp.edge_pos);
-        CoverageGraphLabeler<Graph> cov_labler(gp.g);
-        CompositeLabeler<Graph> composite_labeler(str_labeler, cov_labler, pos_labeler);
+        visualization::graph_labeler::StrGraphLabeler<Graph> str_labeler(gp.g);
+        visualization::graph_labeler::EdgePosGraphLabeler<Graph> pos_labeler(gp.g, gp.edge_pos);
+        visualization::graph_labeler::CoverageGraphLabeler<Graph> cov_labler(gp.g);
+        visualization::graph_labeler::CompositeLabeler<Graph> composite_labeler(str_labeler, cov_labler, pos_labeler);
 
-        shared_ptr<omnigraph::visualization::GraphColorer<Graph>> colorer;
+        shared_ptr<visualization::graph_colorer::GraphColorer<Graph>> colorer;
 
         if (gp.index.IsAttached()) {
              colorer = stats::DefaultColorer(gp);
         } else {
             Path<EdgeId> empty;
-            colorer = omnigraph::visualization::DefaultColorer(gp.g, empty, empty);
+            colorer = visualization::graph_colorer::DefaultColorer(gp.g, empty, empty);
         }
 
-        omnigraph::visualization::ComponentVisualizer<Graph> visualizer(gp.g, false);
-        omnigraph::visualization::EmptyGraphLinker<Graph> linker;
+        visualization::visualizers::ComponentVisualizer<Graph> visualizer(gp.g, false);
+        visualization::vertex_linker::EmptyGraphLinker<Graph> linker;
         visualizer.Visualize(filestr, composite_labeler, *colorer, linker);
         filestr.close();
         INFO("Visualizing graph done");
@@ -130,17 +130,17 @@ public:
         std::fstream filestr;
         filestr.open(file_name.c_str(), std::fstream::out);
 
-        StrGraphLabeler<Graph> str_labeler(g);
-        CoverageGraphLabeler<Graph> cov_labler(g);
-        CompositeLabeler<Graph> composite_labeler(str_labeler, cov_labler);
+        visualization::graph_labeler::StrGraphLabeler<Graph> str_labeler(g);
+        visualization::graph_labeler::CoverageGraphLabeler<Graph> cov_labler(g);
+        visualization::graph_labeler::CompositeLabeler<Graph> composite_labeler(str_labeler, cov_labler);
 
-        shared_ptr<omnigraph::visualization::GraphColorer<Graph>> colorer;
+        shared_ptr<visualization::graph_colorer::GraphColorer<Graph>> colorer;
 
         Path<EdgeId> empty;
-        colorer = omnigraph::visualization::DefaultColorer(g, empty, empty);
+        colorer = visualization::graph_colorer::DefaultColorer(g, empty, empty);
 
-        omnigraph::visualization::ComponentVisualizer<Graph> visualizer(g, false);
-        omnigraph::visualization::EmptyGraphLinker<Graph> linker;
+        visualization::visualizers::ComponentVisualizer<Graph> visualizer(g, false);
+        visualization::vertex_linker::EmptyGraphLinker<Graph> linker;
         visualizer.Visualize(filestr, composite_labeler, *colorer, linker);
         filestr.close();
         INFO("Visualizing graph done");
diff --git a/src/modules/algorithms/path_extend/pe_config_struct.cpp b/src/common/modules/path_extend/pe_config_struct.cpp
similarity index 81%
rename from src/modules/algorithms/path_extend/pe_config_struct.cpp
rename to src/common/modules/path_extend/pe_config_struct.cpp
index 1acab7c..cccb95e 100644
--- a/src/modules/algorithms/path_extend/pe_config_struct.cpp
+++ b/src/common/modules/path_extend/pe_config_struct.cpp
@@ -10,12 +10,6 @@
 
 namespace path_extend {
 
-void load(output_broken_scaffolds& obs, boost::property_tree::ptree const& pt, std::string const& key, bool complete) {
-  if (complete || pt.find(key) != pt.not_found()) {
-    std::string ep = pt.get<std::string>(key);
-    obs = pe_config::output_broken_scaffolds_id(ep);
-  }
-}
 
 void load(scaffolding_mode &sm, boost::property_tree::ptree const& pt, std::string const& key, bool complete) {
     if (complete || pt.find(key) != pt.not_found()) {
@@ -31,7 +25,7 @@ void load(pe_config::ParamSetT::ScaffoldGraphParamsT& sg, boost::property_tree::
     load(sg.always_add,         pt, "always_add"        );
     load(sg.never_add,          pt, "never_add"         );
     load(sg.relative_threshold,  pt, "relative_threshold" );
-    load(sg.graph_connectivity, pt, "graph_connectivity");
+    load(sg.use_graph_connectivity, pt, "use_graph_connectivity");
     load(sg.max_path_length,    pt, "max_path_length"   );
 }
 
@@ -58,12 +52,6 @@ void load(pe_config::ParamSetT::ExtensionOptionsT& es,
     load(es.max_repeat_length, pt, "max_repeat_length", complete);
 }
 
-void load(pe_config::ParamSetT::LoopRemovalT& lr,
-          boost::property_tree::ptree const& pt, bool complete) {
-    using config_common::load;
-    load(lr.max_loops, pt, "max_loops", complete);
-    load(lr.mp_max_loops, pt, "mp_max_loops", complete);
-}
 
 void load(pe_config::ParamSetT::CoordinatedCoverageT& coord_cov,
           boost::property_tree::ptree const& pt, bool complete) {
@@ -121,6 +109,14 @@ void load(pe_config::ParamSetT::PathFiltrationT& pf,
     }
 }
 
+void load(pe_config::ParamSetT::GenomeConsistencyCheckerParamsT& gcc,
+          boost::property_tree::ptree const& pt, bool complete)
+{
+    using config_common::load;
+    load(gcc.max_gap      , pt, "max_gap"      , complete);
+    load(gcc.relative_max_gap      , pt, "relative_max_gap"      , complete);
+}
+
 void load(pe_config::ParamSetT& p, boost::property_tree::ptree const& pt, bool complete) {
     using config_common::load;
     load(p.sm, pt, "scaffolding_mode", complete);
@@ -132,16 +128,16 @@ void load(pe_config::ParamSetT& p, boost::property_tree::ptree const& pt, bool c
     load(p.extension_options, pt, "extension_options", complete);
     load(p.mate_pair_options, pt, "mate_pair_options", complete);
     load(p.scaffolder_options, pt, "scaffolder", complete);
-    load(p.loop_removal, pt, "loop_removal", complete);
     load(p.coordinated_coverage, pt, "coordinated_coverage", complete);
     load(p.use_coordinated_coverage, pt, "use_coordinated_coverage", complete);
     load(p.scaffolding2015, pt, "scaffolding2015", complete);
     load(p.scaffold_graph_params, pt, "scaffold_graph", complete);
     load(p.path_filtration, pt, "path_cleaning", complete);
-
+    load(p.genome_consistency_checker, pt, "genome_consistency_checker", complete);
+    load(p.uniqueness_analyser, pt, "uniqueness_analyser", complete);
+    load(p.loop_traversal, pt, "loop_traversal", complete);
 }
 
-
 void load(pe_config::LongReads& p, boost::property_tree::ptree const& pt,
           bool complete) {
     using config_common::load;
@@ -152,14 +148,31 @@ void load(pe_config::LongReads& p, boost::property_tree::ptree const& pt,
 
 }
 
-void load(pe_config::ParamSetT::Scaffolding2015& p, boost::property_tree::ptree const& pt,
-          bool) {
+void load(pe_config::ParamSetT::LoopTraversalParamsT& p, boost::property_tree::ptree const& pt,
+          bool complete) {
+    using config_common::load;
+    load(p.min_edge_length, pt, "min_edge_length", complete);
+    load(p.max_component_size, pt, "max_component_size", complete);
+    load(p.max_path_length, pt, "max_path_length", complete);
+}
+
+void load(pe_config::ParamSetT::UniquenessAnalyserParamsT& p, boost::property_tree::ptree const& pt,
+          bool complete) {
     using config_common::load;
-    load(p.autodetect, pt, "autodetect");
-    load(p.min_unique_length, pt, "min_unique_length");
-    load(p.unique_coverage_variation, pt, "unique_coverage_variation");
-    load(p.relative_weight_cutoff, pt, "relative_weight_cutoff");
+    load(p.enabled, pt, "enabled", complete);
+    load(p.nonuniform_coverage_variation, pt, "nonuniform_coverage_variation", complete);
+    load(p.uniformity_fraction_threshold, pt, "uniformity_fraction_threshold", complete);
+    load(p.unique_coverage_variation, pt, "unique_coverage_variation", complete);
+}
 
+void load(pe_config::ParamSetT::Scaffolding2015& p, boost::property_tree::ptree const& pt,
+          bool complete) {
+    using config_common::load;
+    load(p.unique_length_lower_bound, pt, "unique_length_lower_bound", complete);
+    load(p.unique_length_upper_bound, pt, "unique_length_upper_bound", complete);
+    load(p.unique_length_step, pt, "unique_length_step", complete);
+    load(p.graph_connectivity_max_edges, pt, "graph_connectivity_max_edges", complete);
+    load(p.relative_weight_cutoff, pt, "relative_weight_cutoff", complete);
 }
 
 void load(pe_config::AllLongReads& p, boost::property_tree::ptree const& pt,
@@ -177,7 +190,6 @@ void load(pe_config::MainPEParamsT& p, boost::property_tree::ptree const& pt,
     load(p.debug_output, pt, "debug_output", complete);
     load(p.output, pt, "output", complete);
     load(p.viz, pt, "visualize", complete);
-    load(p.obs, pt, "output_broken_scaffolds", complete);
     load(p.param_set, pt, "params", complete);
     load(p.long_reads, pt, "long_reads", complete);
     if (!p.debug_output) {
diff --git a/src/modules/algorithms/path_extend/pe_config_struct.hpp b/src/common/modules/path_extend/pe_config_struct.hpp
similarity index 75%
rename from src/modules/algorithms/path_extend/pe_config_struct.hpp
rename to src/common/modules/path_extend/pe_config_struct.hpp
index 620f7c8..a5b161f 100644
--- a/src/modules/algorithms/path_extend/pe_config_struct.hpp
+++ b/src/common/modules/path_extend/pe_config_struct.hpp
@@ -16,7 +16,7 @@
 #define LC_CONFIG_STRUCT_HPP_
 
 #include "pipeline/config_singl.hpp"
-#include "dev_support/cpp_utils.hpp"
+#include "utils/cpp_utils.hpp"
 
 #include <boost/optional.hpp>
 #include <boost/property_tree/ptree_fwd.hpp>
@@ -27,12 +27,6 @@
 
 namespace path_extend {
 
-enum output_broken_scaffolds {
-    obs_none,
-    obs_break_gaps,
-    obs_break_all
-};
-
 enum scaffolding_mode {
     sm_old,
     sm_2015,
@@ -40,6 +34,8 @@ enum scaffolding_mode {
     sm_old_pe_2015
 };
 
+//Both this functions return always true, right?
+//still necessary?
 inline bool IsScaffolder2015Enabled(const scaffolding_mode mode) {
     return (mode == sm_old_pe_2015 || mode == sm_2015 || mode == sm_combined);
 }
@@ -50,40 +46,6 @@ inline bool IsOldPEEnabled(const scaffolding_mode mode) {
 
 // struct for path extend subproject's configuration file
 struct pe_config {
-
-    typedef boost::bimap<std::string, output_broken_scaffolds> output_broken_scaffolds_id_mapping;
-
-    static const output_broken_scaffolds_id_mapping FillOBSInfo() {
-        output_broken_scaffolds_id_mapping::value_type info[] = {
-            output_broken_scaffolds_id_mapping::value_type("none", obs_none),
-            output_broken_scaffolds_id_mapping::value_type("break_gaps", obs_break_gaps),
-            output_broken_scaffolds_id_mapping::value_type("break_all", obs_break_all)
-        };
-
-        return output_broken_scaffolds_id_mapping(info, utils::array_end(info));
-    }
-
-    static const output_broken_scaffolds_id_mapping &output_broken_scaffolds_info() {
-        static output_broken_scaffolds_id_mapping output_broken_scaffolds_info = FillOBSInfo();
-        return output_broken_scaffolds_info;
-    }
-
-    static const std::string &output_broken_scaffolds_name(output_broken_scaffolds obs) {
-        auto it = output_broken_scaffolds_info().right.find(obs);
-        VERIFY_MSG(it != output_broken_scaffolds_info().right.end(),
-                   "No name for output broken scaffolds mode id = " << obs);
-
-        return it->second;
-    }
-
-    static output_broken_scaffolds output_broken_scaffolds_id(std::string name) {
-        auto it = output_broken_scaffolds_info().left.find(name);
-        VERIFY_MSG(it != output_broken_scaffolds_info().left.end(),
-                   "There is no output broken scaffolds mode with name = " << name);
-
-        return it->second;
-    }
-
     typedef boost::bimap<std::string, scaffolding_mode> scaffolding_mode_id_mapping;
 
     static const scaffolding_mode_id_mapping FillSMInfo() {
@@ -189,12 +151,6 @@ struct pe_config {
             boost::optional<int> min_overlap_for_rna_scaffolding;
         } scaffolder_options;
 
-
-        struct LoopRemovalT {
-            size_t max_loops;
-            size_t mp_max_loops;
-        } loop_removal;
-
         struct PathFiltrationT {
             bool enabled;
             size_t min_length;
@@ -213,10 +169,13 @@ struct pe_config {
         } coordinated_coverage;
 
         struct Scaffolding2015 {
-            bool autodetect;
-            size_t min_unique_length;
-            double unique_coverage_variation;
             double relative_weight_cutoff;
+
+            size_t unique_length_upper_bound;
+            size_t unique_length_lower_bound;
+            size_t unique_length_step;
+
+            size_t graph_connectivity_max_edges;
         } scaffolding2015;
 
         struct ScaffoldGraphParamsT {
@@ -225,9 +184,29 @@ struct pe_config {
             size_t always_add;
             size_t never_add;
             double relative_threshold;
-            bool graph_connectivity;
+            bool use_graph_connectivity;
             size_t max_path_length;
         } scaffold_graph_params;
+
+        struct GenomeConsistencyCheckerParamsT {
+            size_t max_gap;
+            double relative_max_gap;
+        } genome_consistency_checker;
+
+        struct LoopTraversalParamsT {
+            size_t min_edge_length ;
+            size_t max_component_size;
+            size_t max_path_length;
+        } loop_traversal;
+
+        struct UniquenessAnalyserParamsT  {
+            bool enabled;
+            double unique_coverage_variation;
+
+            double nonuniform_coverage_variation;
+            double uniformity_fraction_threshold;
+        } uniqueness_analyser;
+
     };
 
     struct LongReads {
@@ -246,9 +225,6 @@ struct pe_config {
 
 
     struct MainPEParamsT {
-        output_broken_scaffolds obs;
-
-        bool finalize_paths;
         bool debug_output;
         std::string etc_dir;
 
@@ -262,7 +238,6 @@ struct pe_config {
 
 void load(pe_config::ParamSetT &p, boost::property_tree::ptree const &pt, bool complete = true);
 void load(pe_config::MainPEParamsT &p, boost::property_tree::ptree const &pt, bool complete = true);
-//void load(pe_config& pe_cfg, boost::property_tree::ptree const& pt, bool complete);
 
 }
 
diff --git a/src/modules/algorithms/path_extend/pe_resolver.hpp b/src/common/modules/path_extend/pe_resolver.hpp
similarity index 88%
rename from src/modules/algorithms/path_extend/pe_resolver.hpp
rename to src/common/modules/path_extend/pe_resolver.hpp
index bc36993..dfbd4f3 100644
--- a/src/modules/algorithms/path_extend/pe_resolver.hpp
+++ b/src/common/modules/path_extend/pe_resolver.hpp
@@ -16,7 +16,6 @@
 #define PE_RESOLVER_HPP_
 
 #include "path_extender.hpp"
-#include "pe_io.hpp"
 
 namespace path_extend {
 
@@ -35,9 +34,49 @@ public:
         }
     }
 
+    size_t NonUniqueCommon(BidirectionalPath * path, int pos1, int pos2) {
+        size_t answer = 0;
+        while (pos1 >= 0) {
+            if (path->At(pos1) == path->At(pos2)) {
+                pos1--;
+                pos2--;
+                answer++;
+            } else {
+                break;
+            }
+        }
+        return answer;
+    }
+
+    size_t MaximumNonUniqueSuffix(BidirectionalPath * path) {
+        if (path->Size() == 0) {
+            return 0;
+        }
+
+        size_t answer = 0;
+        EdgeId back = path->Back();
+        vector<size_t> all_pos = path->FindAll(back);
+        for (size_t i = 0; i < all_pos.size() - 1; ++i) {
+            answer = std::max(answer, NonUniqueCommon(path, (int) all_pos[i], (int) path->Size() - 1));
+        }
+        return answer;
+    }
+
+    void CutNonUniqueSuffix(PathContainer& paths) {
+        vector<pair<BidirectionalPath *, BidirectionalPath *>> tmp_paths(paths.begin(), paths.end());
+        for (auto it = tmp_paths.begin(); it != tmp_paths.end(); ++it) {
+            BidirectionalPath * path1 = it->first;
+            BidirectionalPath * path2 = it->second;
+            size_t longest_suffix1 = MaximumNonUniqueSuffix(path1);
+            path1->PopBack(longest_suffix1);
+            size_t longest_suffix2 = MaximumNonUniqueSuffix(path2);
+            path2->PopBack(longest_suffix2);
+        }
+    }
+
     void CutPseudoSelfConjugatePaths(PathContainer& paths) {
         vector<pair<BidirectionalPath *, BidirectionalPath *>> tmp_paths(paths.begin(), paths.end());
-        for(auto it = tmp_paths.begin(); it != tmp_paths.end(); ++it) {
+        for (auto it = tmp_paths.begin(); it != tmp_paths.end(); ++it) {
             BidirectionalPath * path1 = it->first;
             BidirectionalPath * path2 = it->second;
             bool ups = false;
@@ -341,8 +380,8 @@ private:
             path1->SetOverlapedEndTo(path2);
         } else if (overlap_size < path2->Size()
                 && overlap_size < path1->Size()) {
-            BidirectionalPath* overlap = new BidirectionalPath(g_, path1->Back());
-            BidirectionalPath* conj_overlap = new BidirectionalPath(g_, g_.conjugate(path1->Back()));
+            BidirectionalPath *overlap = new BidirectionalPath(g_, path1->Back());
+            BidirectionalPath *conj_overlap = new BidirectionalPath(g_, g_.conjugate(path1->Back()));
             SubscribeCoverageMap(overlap);
             SubscribeCoverageMap(conj_overlap);
             paths.AddPair(overlap, conj_overlap);
@@ -429,7 +468,7 @@ public:
     PathExtendResolver(const Graph& g): g_(g), k_(g.k()) {
     }
 
-    PathContainer makeSimpleSeeds() {
+    PathContainer MakeSimpleSeeds() const {
         std::set<EdgeId> included;
         PathContainer edges;
         for (auto iter = g_.ConstEdgeBegin(); !iter.IsEnd(); ++iter) {
@@ -446,26 +485,41 @@ public:
         return edges;
     }
 
-    PathContainer extendSeeds(PathContainer& seeds, ContigsMaker& pathExtender) {
+    PathContainer ExtendSeeds(PathContainer &seeds, ContigsMaker &pathExtender) const {
         PathContainer paths;
         pathExtender.GrowAll(seeds, paths);
         return paths;
     }
 
-    void removeEqualPaths(PathContainer& paths, GraphCoverageMap& coverage_map,
-                          size_t max_overlap) {
+    void RemoveEqualPaths(PathContainer &paths, GraphCoverageMap &coverage_map,
+                          size_t min_edge_len) const  {
+
+        SimpleOverlapRemover remover(g_, coverage_map);
+        remover.RemoveSimilarPaths(paths, min_edge_len, min_edge_len, true, false, false, false, false);
+    }
+
+    void RemoveRNAOverlaps(PathContainer& paths, GraphCoverageMap& coverage_map,
+                          size_t min_edge_len, size_t max_path_diff) const  {
 
         SimpleOverlapRemover remover(g_, coverage_map);
-        remover.RemoveSimilarPaths(paths, max_overlap, max_overlap, true, false, false, false, false);
+        remover.RemoveSimilarPaths(paths, min_edge_len, max_path_diff, true, false, false, false, false);
+
+        remover.RemoveSimilarPaths(paths, min_edge_len, max_path_diff, false, true, false, false, false);
+
+        remover.RemoveOverlaps(paths);
+
+        remover.RemoveSimilarPaths(paths, min_edge_len, max_path_diff, true, false, false, false, false);
     }
 
-    void removeOverlaps(PathContainer& paths, GraphCoverageMap& coverage_map,
+    void RemoveOverlaps(PathContainer &paths, GraphCoverageMap &coverage_map,
                         size_t min_edge_len, size_t max_path_diff,
                         bool add_overlaps_begin,
-                        bool cut_preudo_self_conjugate) {
+                        bool cut_preudo_self_conjugate) const {
         SimpleOverlapRemover remover(g_, coverage_map);
         if (cut_preudo_self_conjugate)
             remover.CutPseudoSelfConjugatePaths(paths);
+
+        remover.CutNonUniqueSuffix(paths);
         //writer.WritePathsToFASTA(paths, output_dir + "/before.fasta");
         //DEBUG("Removing subpaths");
         //delete not only eq,
@@ -489,7 +543,7 @@ public:
         }
     }
 
-    void addUncoveredEdges(PathContainer& paths, GraphCoverageMap& coverageMap) {
+    void AddUncoveredEdges(PathContainer &paths, GraphCoverageMap &coverageMap) const {
         std::set<EdgeId> included;
         for (auto iter = g_.ConstEdgeBegin(); !iter.IsEnd(); ++iter) {
             if (included.count(*iter) == 0 && !coverageMap.IsCovered(*iter)) {
diff --git a/src/modules/algorithms/path_extend/pe_utils.hpp b/src/common/modules/path_extend/pe_utils.hpp
similarity index 75%
rename from src/modules/algorithms/path_extend/pe_utils.hpp
rename to src/common/modules/path_extend/pe_utils.hpp
index f061af5..8df0968 100644
--- a/src/modules/algorithms/path_extend/pe_utils.hpp
+++ b/src/common/modules/path_extend/pe_utils.hpp
@@ -17,10 +17,10 @@
 
 #include "assembly_graph/paths/bidirectional_path.hpp"
 
-using namespace debruijn_graph;
-
 namespace path_extend {
 
+using namespace debruijn_graph;
+
 //Checks whether we are in a cycle of length 2, used only for seed selection.
 inline bool InTwoEdgeCycle(EdgeId e, const Graph &g) {
     auto v = g.EdgeEnd(e);
@@ -49,32 +49,30 @@ inline bool InBuble(EdgeId e, const Graph& g) {
 
 // Handles all paths in PathContainer.
 // For each edge output all paths  that _traverse_ this path. If path contains multiple instances - count them. Position of the edge is not reported.
-//TODO: Inside is some WTF, should be rewritten.
-//TODO: Memory leaks, inefficient data structure.
 class GraphCoverageMap: public PathListener {
 
 public:
     typedef BidirectionalPathMultiset MapDataT;
 
 
-protected:
+private:
     const Graph& g_;
 
-    std::map <EdgeId, MapDataT * > edgeCoverage_;
+    std::unordered_map <EdgeId, MapDataT * > edge_coverage_;
 
     MapDataT * empty_;
 
     virtual void EdgeAdded(EdgeId e, BidirectionalPath * path, Gap /*gap*/) {
-        auto iter = edgeCoverage_.find(e);
-        if (iter == edgeCoverage_.end()) {
-            edgeCoverage_.insert(std::make_pair(e, new MapDataT()));
+        auto iter = edge_coverage_.find(e);
+        if (iter == edge_coverage_.end()) {
+            edge_coverage_.insert(std::make_pair(e, new MapDataT()));
         }
-        edgeCoverage_[e]->insert(path);
+        edge_coverage_[e]->insert(path);
     }
 
     virtual void EdgeRemoved(EdgeId e, BidirectionalPath * path) {
-        auto iter = edgeCoverage_.find(e);
-        if (iter != edgeCoverage_.end()) {
+        auto iter = edge_coverage_.find(e);
+        if (iter != edge_coverage_.end()) {
             if (iter->second->count(path) == 0) {
                 DEBUG("Error erasing path from coverage map");
             } else {
@@ -84,40 +82,46 @@ protected:
         }
     }
 
+    size_t EdgeCount() const {
+        size_t result = 0;
+        for (auto e = g_.ConstEdgeBegin(); !e.IsEnd(); ++e) {
+            ++result;
+        }
+        return result;
+    }
+
 public:
-    GraphCoverageMap(const Graph& g) : g_(g), edgeCoverage_() {
+    GraphCoverageMap(const Graph& g) : g_(g), edge_coverage_() {
         empty_ = new MapDataT();
+        edge_coverage_.reserve(EdgeCount());
     }
 
-    GraphCoverageMap(const Graph& g, const PathContainer& paths) : g_(g), edgeCoverage_() {
+    GraphCoverageMap(const Graph& g, const PathContainer& paths, bool subscribe = false) : g_(g), edge_coverage_() {
         empty_ = new MapDataT();
-        for (size_t i = 0; i < paths.size(); ++i) {
-            for (size_t j = 0; j < paths.Get(i)->Size(); ++j) {
-                EdgeAdded(paths.Get(i)->At(j), paths.Get(i), paths.Get(i)->GapAt(j));
-            }
-            for (size_t j = 0; j < paths.GetConjugate(i)->Size(); ++j) {
-                EdgeAdded(paths.GetConjugate(i)->At(j), paths.GetConjugate(i), paths.GetConjugate(i)->GapAt(j));
-            }
-        }
+        edge_coverage_.reserve(EdgeCount());
+        AddPaths(paths, subscribe);
     }
 
     virtual ~GraphCoverageMap() {
         delete empty_;
-        for (auto iter = edgeCoverage_.begin(); iter != edgeCoverage_.end(); ++iter) {
+        for (auto iter = edge_coverage_.begin(); iter != edge_coverage_.end(); ++iter) {
             delete iter->second;
         }
     }
 
-    void Clear() {
-        for (auto iter = edgeCoverage_.begin(); iter != edgeCoverage_.end(); ++iter) {
-            MapDataT* cover_paths = iter->second;
-            for (auto ipath = cover_paths->begin(); ipath != cover_paths->end(); ++ipath) {
-                BidirectionalPath* p = *ipath;
-                p->Unsubscribe(this);
+    void AddPaths(const PathContainer& paths, bool subscribe = false) {
+        for (size_t i = 0; i < paths.size(); ++i) {
+            if (subscribe)
+                paths.Get(i)->Subscribe(this);
+            for (size_t j = 0; j < paths.Get(i)->Size(); ++j) {
+                EdgeAdded(paths.Get(i)->At(j), paths.Get(i), paths.Get(i)->GapAt(j));
+            }
+            if (subscribe)
+                paths.GetConjugate(i)->Subscribe(this);
+            for (size_t j = 0; j < paths.GetConjugate(i)->Size(); ++j) {
+                EdgeAdded(paths.GetConjugate(i)->At(j), paths.GetConjugate(i), paths.GetConjugate(i)->GapAt(j));
             }
-            delete cover_paths;
         }
-        edgeCoverage_.clear();
     }
 
     void Subscribe(BidirectionalPath * path) {
@@ -127,25 +131,29 @@ public:
         }
     }
 
-    virtual void FrontEdgeAdded(EdgeId e, BidirectionalPath * path, Gap gap) {
+    //Inherited from PathListener
+    void FrontEdgeAdded(EdgeId e, BidirectionalPath * path, Gap gap) override {
         EdgeAdded(e, path, gap);
     }
 
-    virtual void BackEdgeAdded(EdgeId e, BidirectionalPath * path, Gap gap) {
+    //Inherited from PathListener
+    void BackEdgeAdded(EdgeId e, BidirectionalPath * path, Gap gap) override {
         EdgeAdded(e, path, gap);
     }
 
-    virtual void FrontEdgeRemoved(EdgeId e, BidirectionalPath * path) {
+    //Inherited from PathListener
+    void FrontEdgeRemoved(EdgeId e, BidirectionalPath * path) override {
         EdgeRemoved(e, path);
     }
 
-    virtual void BackEdgeRemoved(EdgeId e, BidirectionalPath * path) {
+    //Inherited from PathListener
+    void BackEdgeRemoved(EdgeId e, BidirectionalPath * path) override {
         EdgeRemoved(e, path);
     }
 
     MapDataT * GetEdgePaths(EdgeId e) const {
-        auto iter = edgeCoverage_.find(e);
-        if (iter != edgeCoverage_.end()) {
+        auto iter = edge_coverage_.find(e);
+        if (iter != edge_coverage_.end()) {
             return iter->second;
         }
         return empty_;
@@ -168,41 +176,20 @@ public:
         return true;
     }
 
-    int GetCoverage(const BidirectionalPath& path) const {
-        if (path.Empty()) {
-            return 0;
-        }
-
-        int cov = GetCoverage(path[0]);
-        for (size_t i = 1; i < path.Size(); ++i) {
-            int currentCov = GetCoverage(path[i]);
-            if (cov > currentCov) {
-                cov = currentCov;
-            }
-        }
-
-        return cov;
-    }
-
     BidirectionalPathSet GetCoveringPaths(EdgeId e) const {
         auto mapData = GetEdgePaths(e);
         return BidirectionalPathSet(mapData->begin(), mapData->end());
     }
 
-    int GetUniqueCoverage(EdgeId e) const {
-        return (int) GetCoveringPaths(e).size();
-    }
-
-    std::map <EdgeId, MapDataT * >::const_iterator begin() const {
-        return edgeCoverage_.begin();
+    std::unordered_map <EdgeId, MapDataT * >::const_iterator begin() const {
+        return edge_coverage_.begin();
     }
 
-    std::map <EdgeId, MapDataT * >::const_iterator end() const {
-        return edgeCoverage_.end();
+    std::unordered_map <EdgeId, MapDataT * >::const_iterator end() const {
+        return edge_coverage_.end();
     }
 
-    // DEBUG
-
+    // DEBUG output
     void PrintUncovered() const {
         DEBUG("Uncovered edges");
         int s = 0;
@@ -230,7 +217,7 @@ public:
     }
 
     size_t size() const {
-        return edgeCoverage_.size();
+        return edge_coverage_.size();
     }
 
     const Graph& graph() const {
@@ -404,58 +391,6 @@ inline bool LoopDetector::PrevEdgeInShortLoop() const {
     return false;
 }
 
-class ScaffoldBreaker {
-private:
-
-    int min_gap_;
-
-    PathContainer container_;
-
-    void SplitPath(const BidirectionalPath& path) {
-        size_t i = 0;
-
-        while (i < path.Size()) {
-            BidirectionalPath * p = new BidirectionalPath(path.graph(), path[i]);
-            ++i;
-
-            while (i < path.Size() and path.GapAt(i) <= min_gap_) {
-                p->PushBack(path[i], path.GapAt(i), path.TrashPreviousAt(i), path.TrashCurrentAt(i));
-                ++i;
-            }
-            
-            if (i < path.Size()) {
-                DEBUG("split path " << i << " gap " << path.GapAt(i));
-                p->Print();
-            }
-
-            BidirectionalPath * cp = new BidirectionalPath(p->Conjugate());
-            container_.AddPair(p, cp);
-        }
-    }
-
-public:
-
-    ScaffoldBreaker(int min_gap, const PathContainer &paths)
-            : min_gap_(min_gap) {
-        for (auto it = paths.begin(); it != paths.end(); ++it) {
-            SplitPath(*it.get());
-        }
-    }
-
-    ~ScaffoldBreaker() {
-        // FIXME: WTF, Why doesn't PathContainer own the paths?
-        container_.DeleteAllPaths();
-    }
-    
-    void clear() {
-        container_.clear();
-    }
-
-    PathContainer& container() {
-        return container_;
-    }
-
-};
 
 }
 
diff --git a/src/common/modules/path_extend/pipeline/extenders_logic.cpp b/src/common/modules/path_extend/pipeline/extenders_logic.cpp
new file mode 100644
index 0000000..7b26fed
--- /dev/null
+++ b/src/common/modules/path_extend/pipeline/extenders_logic.cpp
@@ -0,0 +1,423 @@
+//
+// Created by andrey on 14.11.16.
+//
+
+#include "extenders_logic.hpp"
+#include "modules/path_extend/scaffolder2015/extension_chooser2015.hpp"
+
+
+namespace path_extend {
+
+using namespace debruijn_graph;
+
+shared_ptr<ExtensionChooser> ExtendersGenerator::MakeLongReadsExtensionChooser(size_t lib_index,
+                                                                               const GraphCoverageMap &read_paths_cov_map) const {
+    auto long_reads_config = support_.GetLongReadsConfig(dataset_info_.reads[lib_index].type());
+    return make_shared<LongReadsExtensionChooser>(gp_.g, read_paths_cov_map,
+                                                  long_reads_config.filtering,
+                                                  long_reads_config.weight_priority,
+                                                  long_reads_config.unique_edge_priority,
+                                                  long_reads_config.min_significant_overlap,
+                                                  params_.pset.extension_options.max_repeat_length,
+                                                  params_.uneven_depth);
+}
+
+shared_ptr<SimpleExtender> ExtendersGenerator::MakeLongReadsExtender(size_t lib_index,
+                                                                     const GraphCoverageMap &read_paths_cov_map) const {
+    const auto &lib = dataset_info_.reads[lib_index];
+    //TODO params
+    size_t resolvable_repeat_length_bound = 10000ul;
+    if (!dataset_info_.reads[lib_index].is_contig_lib()) {
+        resolvable_repeat_length_bound = std::max(resolvable_repeat_length_bound, lib.data().read_length);
+    }
+    INFO("resolvable_repeat_length_bound set to " << resolvable_repeat_length_bound);
+
+
+    auto long_read_ec = MakeLongReadsExtensionChooser(lib_index, read_paths_cov_map);
+    return make_shared<SimpleExtender>(gp_, cover_map_,
+                                       long_read_ec,
+                                       resolvable_repeat_length_bound,
+                                       true, /* investigate short loops */
+                                       support_.UseCoverageResolverForSingleReads(lib.type()));
+}
+
+shared_ptr<SimpleExtender> ExtendersGenerator::MakeLongEdgePEExtender(size_t lib_index,
+                                                                      bool investigate_loops) const {
+    const auto &lib = dataset_info_.reads[lib_index];
+    shared_ptr<PairedInfoLibrary> paired_lib = MakeNewLib(gp_.g, lib, gp_.clustered_indices[lib_index]);
+    //INFO("Threshold for lib #" << lib_index << ": " << paired_lib->GetSingleThreshold());
+
+    shared_ptr<WeightCounter> wc =
+        make_shared<PathCoverWeightCounter>(gp_.g, paired_lib,
+                                            params_.pset.normalize_weight,
+                                            support_.SingleThresholdForLib(params_.pset, lib.data().pi_threshold));
+    auto opts = support_.GetExtensionOpts(paired_lib, params_.pset);
+    shared_ptr<ExtensionChooser> extension =
+        make_shared<LongEdgeExtensionChooser>(gp_.g, wc,
+                                              opts.weight_threshold,
+                                              opts.priority_coeff);
+
+    return make_shared<SimpleExtender>(gp_, cover_map_,
+                                       extension,
+                                       paired_lib->GetISMax(),
+                                       investigate_loops,
+                                       false /*use short loop coverage resolver*/);
+}
+
+shared_ptr<GapJoiner> ExtendersGenerator::MakeGapJoiners(double is_variation) const {
+    const auto &pset = params_.pset;
+
+    vector<shared_ptr<GapJoiner>> joiners;
+    if (params_.pset.scaffolder_options.use_la_gap_joiner)
+        joiners.push_back(std::make_shared<LAGapJoiner>(gp_.g, pset.scaffolder_options.min_overlap_length,
+                                                        pset.scaffolder_options.flank_multiplication_coefficient,
+                                                        pset.scaffolder_options.flank_addition_coefficient));
+
+
+    joiners.push_back(std::make_shared<HammingGapJoiner>(gp_.g,
+                                                         pset.scaffolder_options.min_gap_score,
+                                                         pset.scaffolder_options.short_overlap,
+                                                         (int) pset.scaffolder_options.basic_overlap_coeff
+                                                             * dataset_info_.RL()));
+
+    return std::make_shared<CompositeGapJoiner>(gp_.g,
+                                                joiners,
+                                                size_t(pset.scaffolder_options.max_can_overlap
+                                                           * (double) gp_.g.k()), /* may overlap threshold */
+                                                int(math::round(double(gp_.g.k())
+                                                                    - pset.scaffolder_options.var_coeff
+                                                                        * is_variation)),  /* must overlap threshold */
+                                                pset.scaffolder_options.artificial_gap);
+
+}
+
+shared_ptr<PathExtender> ExtendersGenerator::MakeScaffoldingExtender(size_t lib_index) const {
+
+    const auto &lib = dataset_info_.reads[lib_index];
+    const auto &pset = params_.pset;
+    shared_ptr<PairedInfoLibrary> paired_lib = MakeNewLib(gp_.g, lib, gp_.scaffolding_indices[lib_index]);
+
+    shared_ptr<WeightCounter> counter = make_shared<ReadCountWeightCounter>(gp_.g, paired_lib);
+
+    auto scaff_chooser = std::make_shared<ScaffoldingExtensionChooser>(gp_.g, counter,
+                                                                       pset.scaffolder_options.cl_threshold,
+                                                                       pset.scaffolder_options.var_coeff);
+
+    return make_shared<ScaffoldingPathExtender>(gp_, cover_map_, scaff_chooser,
+                                                MakeGapJoiners(paired_lib->GetIsVar()),
+                                                paired_lib->GetISMax(),
+                                                false, /* investigate short loops */
+                                                params_.avoid_rc_connections);
+}
+
+shared_ptr<PathExtender> ExtendersGenerator::MakeRNAScaffoldingExtender(size_t lib_index) const {
+
+    const auto &lib = dataset_info_.reads[lib_index];
+    const auto &pset = params_.pset;
+    shared_ptr<PairedInfoLibrary> paired_lib = MakeNewLib(gp_.g, lib, gp_.paired_indices[lib_index]);
+
+    shared_ptr<WeightCounter> counter = make_shared<ReadCountWeightCounter>(gp_.g, paired_lib);
+
+    auto scaff_chooser = std::make_shared<ScaffoldingExtensionChooser>(gp_.g,
+                                                                       counter,
+                                                                       pset.scaffolder_options.cutoff,
+                                                                       pset.scaffolder_options.var_coeff);
+    auto scaff_chooser2 = std::make_shared<ScaffoldingExtensionChooser>(gp_.g,
+                                                                        counter,
+                                                                        pset.scaffolder_options.hard_cutoff,
+                                                                        pset.scaffolder_options.var_coeff);
+
+
+    VERIFY(pset.scaffolder_options.min_overlap_for_rna_scaffolding.is_initialized());
+    return make_shared<RNAScaffoldingPathExtender>(gp_, cover_map_,
+                                                   scaff_chooser,
+                                                   scaff_chooser2,
+                                                   MakeGapJoiners(paired_lib->GetIsVar()),
+                                                   paired_lib->GetISMax(),
+                                                   false  /* investigate short loops */,
+                                                   *pset.scaffolder_options.min_overlap_for_rna_scaffolding);
+}
+
+shared_ptr<PathExtender> ExtendersGenerator::MakeMatePairScaffoldingExtender(
+    size_t lib_index,
+    const ScaffoldingUniqueEdgeStorage &storage) const {
+
+    const auto &lib = dataset_info_.reads[lib_index];
+    const auto &pset = params_.pset;
+    shared_ptr<PairedInfoLibrary> paired_lib;
+    INFO("Creating Scaffolding 2015 extender for lib #" << lib_index);
+
+    //FIXME: DimaA
+    if (gp_.paired_indices[lib_index].size() > gp_.clustered_indices[lib_index].size()) {
+        INFO("Paired unclustered indices not empty, using them");
+        paired_lib = MakeNewLib(gp_.g, lib, gp_.paired_indices[lib_index]);
+    } else if (gp_.clustered_indices[lib_index].size() != 0) {
+        INFO("clustered indices not empty, using them");
+        paired_lib = MakeNewLib(gp_.g, lib, gp_.clustered_indices[lib_index]);
+    } else {
+        ERROR("All paired indices are empty!");
+    }
+
+    //TODO::was copypasted from MakeScaffoldingExtender, refactor 2015 extension chooser
+    DEBUG("creating extchooser");
+    shared_ptr<ConnectionCondition>
+        condition = make_shared<PairedLibConnectionCondition>(gp_.g, paired_lib, lib_index, 0);
+    auto scaff_chooser = std::make_shared<ExtensionChooser2015>(gp_.g,
+                                                                nullptr,
+                                                                condition,
+                                                                storage,
+                                                                pset.scaffolder_options.cl_threshold,
+                                                                pset.scaffolder_options.var_coeff,
+                                                                pset.scaffolding2015.relative_weight_cutoff,
+                                                                gp_.g.size()
+                                                                    <= params_.pset.scaffolding2015.graph_connectivity_max_edges);
+
+    return make_shared<ScaffoldingPathExtender>(gp_, cover_map_,
+                                                scaff_chooser,
+                                                MakeGapJoiners(paired_lib->GetIsVar()),
+                                                paired_lib->GetISMax(),
+                                                false, /* investigate short loops */
+                                                params_.avoid_rc_connections,
+                                                false /* jump only from tips */);
+}
+
+shared_ptr<SimpleExtender> ExtendersGenerator::MakeCoordCoverageExtender(size_t lib_index) const {
+    const auto& lib = dataset_info_.reads[lib_index];
+    shared_ptr<PairedInfoLibrary> paired_lib = MakeNewLib(gp_.g, lib, gp_.clustered_indices[lib_index]);
+
+    auto provider = make_shared<CoverageAwareIdealInfoProvider>(gp_.g, paired_lib, dataset_info_.RL());
+
+    auto meta_wc = make_shared<PathCoverWeightCounter>(gp_.g, paired_lib,
+                                                       params_.pset.normalize_weight,
+                                                       support_.SingleThresholdForLib(params_.pset, lib.data().pi_threshold),
+                                                       provider);
+
+    auto permissive_pi_chooser = make_shared<IdealBasedExtensionChooser>(gp_.g,
+                                                                         meta_wc,
+                                                                         params_.pset.extension_options.weight_threshold,
+                                                                         params_.pset.extension_options.priority_coeff);
+
+    auto coord_cov_chooser = make_shared<CoordinatedCoverageExtensionChooser>(gp_.g, *provider,
+                                                                              params_.pset.coordinated_coverage.max_edge_length_in_repeat,
+                                                                              params_.pset.coordinated_coverage.delta,
+                                                                              params_.pset.coordinated_coverage.min_path_len);
+
+    auto chooser = make_shared<JointExtensionChooser>(gp_.g, permissive_pi_chooser, coord_cov_chooser);
+
+    return make_shared<SimpleExtender>(gp_, cover_map_, chooser,
+                                       -1ul /* insert size is needed only for loop detection, which is not needed in this case */,
+                                       false, /* investigate short loops */
+                                       false /*use short loop coverage resolver*/);
+}
+
+shared_ptr<SimpleExtender> ExtendersGenerator::MakeRNAExtender(size_t lib_index, bool investigate_loops) const {
+
+    const auto &lib = dataset_info_.reads[lib_index];
+    shared_ptr<PairedInfoLibrary> paired_lib = MakeNewLib(gp_.g, lib, gp_.clustered_indices[lib_index]);
+//    INFO("Threshold for lib #" << lib_index << ": " << paired_lib->GetSingleThreshold());
+
+    auto cip = make_shared<CoverageAwareIdealInfoProvider>(gp_.g, paired_lib, dataset_info_.RL());
+    shared_ptr<WeightCounter> wc =
+        make_shared<PathCoverWeightCounter>(gp_.g, paired_lib, params_.pset.normalize_weight,
+                                            support_.SingleThresholdForLib(params_.pset, lib.data().pi_threshold),
+                                            cip);
+
+    auto opts = support_.GetExtensionOpts(paired_lib, params_.pset);
+    shared_ptr<RNAExtensionChooser> extension =
+        make_shared<RNAExtensionChooser>(gp_.g, wc,
+                                         opts.weight_threshold,
+                                         opts.priority_coeff);
+
+    return make_shared<MultiExtender>(gp_, cover_map_,
+                                      extension,
+                                      paired_lib->GetISMax(),
+                                      investigate_loops,
+                                      false /*use short loop coverage resolver*/);
+}
+
+shared_ptr<SimpleExtender> ExtendersGenerator::MakePEExtender(size_t lib_index, bool investigate_loops) const {
+    const auto &lib = dataset_info_.reads[lib_index];
+    shared_ptr<PairedInfoLibrary> paired_lib = MakeNewLib(gp_.g, lib, gp_.clustered_indices[lib_index]);
+    VERIFY_MSG(!paired_lib->IsMp(), "Tried to create PE extender for MP library");
+    auto opts = params_.pset.extension_options;
+//    INFO("Threshold for lib #" << lib_index << ": " << paired_lib->GetSingleThreshold());
+
+    shared_ptr<CoverageAwareIdealInfoProvider> iip = nullptr;
+    if (opts.use_default_single_threshold) {
+        if (params_.uneven_depth) {
+            iip = make_shared<CoverageAwareIdealInfoProvider>(gp_.g, paired_lib, dataset_info_.RL());
+        } else {
+            double lib_cov = support_.EstimateLibCoverage(lib_index);
+            INFO("Estimated coverage of library #" << lib_index << " is " << lib_cov);
+            iip = make_shared<GlobalCoverageAwareIdealInfoProvider>(gp_.g, paired_lib, dataset_info_.RL(), lib_cov);
+        }
+    }
+    auto wc = make_shared<PathCoverWeightCounter>(gp_.g, paired_lib, params_.pset.normalize_weight,
+                                                  support_.SingleThresholdForLib(params_.pset, lib.data().pi_threshold),
+                                                  iip);
+
+    auto extension_chooser = make_shared<SimpleExtensionChooser>(gp_.g, wc,
+                                                         opts.weight_threshold,
+                                                         opts.priority_coeff);
+
+    return make_shared<SimpleExtender>(gp_, cover_map_,
+                                       extension_chooser,
+                                       paired_lib->GetISMax(),
+                                       investigate_loops,
+                                       false /*use short loop coverage resolver*/);
+}
+
+
+void ExtendersGenerator::PrintExtenders(const Extenders &extenders) const {
+    DEBUG("Extenders in vector:");
+    for (const auto& extender : extenders) {
+        //TODO: use polymorphism instead of RTTI
+        auto ext_ptr = extender.get();
+        DEBUG("Extender #i" << typeid(*ext_ptr).name());
+        if (instanceof<SimpleExtender>(ext_ptr)) {
+            auto ec = ((SimpleExtender *) ext_ptr)->GetExtensionChooser();
+            auto ec_ptr = ec.get();
+            DEBUG("    Extender #i" << typeid(*ec_ptr).name());
+        }
+        else if (instanceof<ScaffoldingPathExtender>(ext_ptr)) {
+            auto ec = ((ScaffoldingPathExtender *) ext_ptr)->GetExtensionChooser();
+            auto ec_ptr = ec.get();
+            DEBUG("    Extender #i" << typeid(*ec_ptr).name());
+        }
+    }
+}
+
+Extenders ExtendersGenerator::MakeMPExtenders(const ScaffoldingUniqueEdgeStorage &storage) const {
+    ExtenderTriplets result;
+
+    for (size_t lib_index = 0; lib_index < dataset_info_.reads.lib_count(); ++lib_index) {
+        const auto &lib = dataset_info_.reads[lib_index];
+
+        if (lib.is_mate_pair()) {
+            result.emplace_back(lib.type(), lib_index, MakeMatePairScaffoldingExtender(lib_index, storage));
+        }
+    }
+    std::stable_sort(result.begin(), result.end());
+
+    return ExtractExtenders(result);
+}
+
+Extenders ExtendersGenerator::MakePBScaffoldingExtenders(const ScaffoldingUniqueEdgeStorage &unique_storage_pb,
+                                                         const vector<shared_ptr<GraphCoverageMap>> &long_reads_cov_map) const {
+    const auto &pset = params_.pset;
+    ExtenderTriplets result;
+
+    for (size_t lib_index = 0; lib_index < dataset_info_.reads.lib_count(); lib_index++) {
+        if (support_.IsForSingleReadScaffolder(dataset_info_.reads[lib_index])) {
+            INFO("Creating scaffolding extender for lib " << lib_index);
+            shared_ptr<ConnectionCondition> condition = make_shared<LongReadsLibConnectionCondition>(gp_.g,
+                                                                                                     lib_index, 2,
+                                                                                                     *long_reads_cov_map[lib_index]);
+            auto scaff_chooser = std::make_shared<ExtensionChooser2015>(gp_.g,
+                                                                        nullptr,
+                                                                        condition,
+                                                                        unique_storage_pb,
+                                                                        pset.scaffolder_options.cl_threshold,
+                                                                        pset.scaffolder_options.var_coeff,
+                                                                        pset.scaffolding2015.relative_weight_cutoff);
+
+            result.emplace_back(dataset_info_.reads[lib_index].type(),
+                                lib_index,
+                                make_shared<ScaffoldingPathExtender>(gp_, cover_map_,
+                                                                     scaff_chooser,
+                                                                     MakeGapJoiners(1000), /* "IS vatiation" */
+                                                                     10000, /* insert size */
+                                                                     false, /* investigate short loops */
+                                                                     params_.avoid_rc_connections,
+                                                                     false /* jump only from tips */));
+
+        }
+    }
+    INFO("Using " << result.size() << " long reads scaffolding " << support_.LibStr(result.size()));
+    std::stable_sort(result.begin(), result.end());
+
+    return ExtractExtenders(result);
+}
+
+
+Extenders ExtendersGenerator::MakeCoverageExtenders() const {
+    Extenders result;
+
+    INFO("Using additional coordinated coverage extender");
+    result.push_back(MakeCoordCoverageExtender(0 /* lib index */));
+
+    return result;
+}
+
+Extenders ExtendersGenerator::MakeBasicExtenders(const ScaffoldingUniqueEdgeStorage &storage,
+                                                 const vector<shared_ptr<GraphCoverageMap>> &long_reads_cov_map) const {
+    ExtenderTriplets basic_extenders;
+    ExtenderTriplets loop_resolving_extenders;
+    ExtenderTriplets scaffolding_extenders;
+
+    size_t single_read_libs = 0;
+    size_t pe_libs = 0;
+    size_t scf_pe_libs = 0;
+
+    const auto &pset = params_.pset;
+
+    for (size_t lib_index = 0; lib_index < dataset_info_.reads.lib_count(); ++lib_index) {
+        const auto &lib = dataset_info_.reads[lib_index];
+
+        //TODO: scaff2015 does not need any single read libs?
+        if (support_.IsForSingleReadExtender(lib)) {
+            basic_extenders.emplace_back(lib.type(), lib_index, MakeLongReadsExtender(lib_index, *long_reads_cov_map[lib_index]));
+            ++single_read_libs;
+        }
+        if (support_.IsForPEExtender(lib)) {
+            ++pe_libs;
+            if (IsOldPEEnabled(pset.sm)) {
+                if (params_.mode == config::pipeline_type::moleculo) {
+                    basic_extenders.emplace_back(lib.type(), lib_index, MakeLongEdgePEExtender(lib_index, false));
+                } else if (pset.multi_path_extend) {
+                    basic_extenders.emplace_back(lib.type(), lib_index, MakePEExtender(lib_index, false));
+                    basic_extenders.emplace_back(lib.type(), lib_index, MakeRNAExtender(lib_index, false));
+                } else {
+                    basic_extenders.emplace_back(lib.type(), lib_index, MakePEExtender(lib_index, false));
+                }
+            } else if (pset.sm == sm_2015) {
+                basic_extenders.emplace_back(lib.type(), lib_index, MakeMatePairScaffoldingExtender(lib_index, storage));
+            }
+        }
+        //TODO logic is very cryptic!
+        if (support_.IsForShortLoopExtender(lib) && IsOldPEEnabled(pset.sm)) {
+            loop_resolving_extenders.emplace_back(lib.type(), lib_index, MakePEExtender(lib_index, true));
+            //TODO what about moleculo and rna here?
+        }
+        if (support_.IsForScaffoldingExtender(lib) && params_.use_scaffolder
+            && pset.scaffolder_options.enabled) {
+            ++scf_pe_libs;
+            if (params_.mode == config::pipeline_type::rna) {
+                scaffolding_extenders.emplace_back(lib.type(), lib_index, MakeRNAScaffoldingExtender(lib_index));
+            } else {
+                scaffolding_extenders.emplace_back(lib.type(), lib_index, MakeScaffoldingExtender(lib_index));
+                if (pset.sm == sm_combined) {
+                    scaffolding_extenders.emplace_back(lib.type(), lib_index, MakeMatePairScaffoldingExtender(lib_index, storage));
+                }
+            }
+        }
+    }
+
+    std::stable_sort(basic_extenders.begin(), basic_extenders.end());
+    std::stable_sort(scaffolding_extenders.begin(), scaffolding_extenders.end());
+    std::stable_sort(loop_resolving_extenders.begin(), loop_resolving_extenders.end());
+
+    Extenders result;
+    push_back_all(result, ExtractExtenders(basic_extenders));
+    push_back_all(result, ExtractExtenders(scaffolding_extenders));
+    push_back_all(result, ExtractExtenders(loop_resolving_extenders));
+
+    INFO("Using " << pe_libs << " paired-end " << support_.LibStr(pe_libs));
+    INFO("Using " << scf_pe_libs << " paired-end scaffolding " << support_.LibStr(scf_pe_libs));
+    INFO("Using " << single_read_libs << " single read " << support_.LibStr(single_read_libs));
+
+    PrintExtenders(result);
+    return result;
+}
+
+}
diff --git a/src/common/modules/path_extend/pipeline/extenders_logic.hpp b/src/common/modules/path_extend/pipeline/extenders_logic.hpp
new file mode 100644
index 0000000..2f6c190
--- /dev/null
+++ b/src/common/modules/path_extend/pipeline/extenders_logic.hpp
@@ -0,0 +1,118 @@
+//
+// Created by andrey on 14.11.16.
+//
+
+#pragma once
+
+#include "modules/path_extend/path_extender.hpp"
+#include "launch_support.hpp"
+
+namespace path_extend {
+
+using namespace debruijn_graph;
+
+struct ExtenderTriplet {
+    io::LibraryType lib_type_;
+    size_t lib_index_;
+    shared_ptr<PathExtender> extender_;
+
+    ExtenderTriplet(io::LibraryType lib_type, size_t lib_index, shared_ptr<PathExtender> extender):
+        lib_type_(lib_type), lib_index_(lib_index), extender_(extender) {
+
+    }
+
+    bool operator<(const ExtenderTriplet& that) const {
+        if (this->lib_type_ == that.lib_type_)
+            return this->lib_index_ < that.lib_index_;
+        return this->lib_type_ < that.lib_type_;
+    }
+};
+
+typedef vector<ExtenderTriplet> ExtenderTriplets;
+
+typedef vector<shared_ptr<PathExtender>> Extenders;
+
+inline Extenders ExtractExtenders(const ExtenderTriplets& triplets) {
+    Extenders result;
+    for (const auto& triplet : triplets)
+        result.push_back(triplet.extender_);
+
+    return result;
+}
+
+class ExtendersGenerator {
+    const config::dataset &dataset_info_;
+    const PathExtendParamsContainer &params_;
+    const conj_graph_pack &gp_;
+
+    const GraphCoverageMap &cover_map_;
+
+    const PELaunchSupport &support_;
+
+public:
+    ExtendersGenerator(const config::dataset &dataset_info,
+                       const PathExtendParamsContainer &params,
+                       const conj_graph_pack &gp,
+                       const GraphCoverageMap &cover_map,
+                       const PELaunchSupport& support) :
+        dataset_info_(dataset_info),
+        params_(params),
+        gp_(gp),
+        cover_map_(cover_map),
+        support_(support) { }
+
+    Extenders MakePBScaffoldingExtenders(const ScaffoldingUniqueEdgeStorage &unique_storage_pb,
+                                         const vector<shared_ptr<GraphCoverageMap>> &long_reads_cov_map) const;
+
+    Extenders MakeBasicExtenders(const ScaffoldingUniqueEdgeStorage &storage,
+                                 const vector<shared_ptr<GraphCoverageMap>> &long_reads_cov_map) const;
+
+    Extenders MakeMPExtenders(const ScaffoldingUniqueEdgeStorage &storage) const;
+
+    Extenders MakeCoverageExtenders() const;
+
+private:
+
+    shared_ptr<ExtensionChooser> MakeLongReadsExtensionChooser(size_t lib_index, const GraphCoverageMap& read_paths_cov_map) const;
+
+    shared_ptr<SimpleExtender> MakeLongReadsExtender(size_t lib_index, const GraphCoverageMap& read_paths_cov_map) const;
+
+    shared_ptr<SimpleExtender> MakeLongEdgePEExtender(size_t lib_index,
+                                                      bool investigate_loops) const;
+
+    shared_ptr<WeightCounter> MakeMetaWeightCounter(shared_ptr<PairedInfoLibrary> lib,
+                                                    size_t read_length) const;
+
+    shared_ptr<SimpleExtensionChooser> MakeMetaExtensionChooser(shared_ptr<PairedInfoLibrary> lib,
+                                                                size_t read_length) const;
+
+    shared_ptr<SimpleExtender> MakeMetaExtender(size_t lib_index, bool investigate_loops) const;
+
+
+    shared_ptr<SimpleExtender> MakePEExtender(size_t lib_index, bool investigate_loops) const;
+
+
+    shared_ptr<GapJoiner> MakeGapJoiners(double is_variation) const;
+
+
+    shared_ptr<PathExtender> MakeScaffoldingExtender(size_t lib_index) const;
+
+
+    shared_ptr<PathExtender> MakeRNAScaffoldingExtender(size_t lib_index) const;
+
+
+    shared_ptr<PathExtender> MakeMatePairScaffoldingExtender
+        (size_t lib_index, const ScaffoldingUniqueEdgeStorage &storage) const;
+
+
+    shared_ptr<SimpleExtender> MakeCoordCoverageExtender(size_t lib_index) const;
+
+
+    shared_ptr<SimpleExtender> MakeRNAExtender(size_t lib_index, bool investigate_loops) const;
+
+
+    void PrintExtenders(const vector<shared_ptr<PathExtender>> &extenders) const;
+
+};
+
+}
diff --git a/src/common/modules/path_extend/pipeline/launch_support.cpp b/src/common/modules/path_extend/pipeline/launch_support.cpp
new file mode 100644
index 0000000..3be9ce5
--- /dev/null
+++ b/src/common/modules/path_extend/pipeline/launch_support.cpp
@@ -0,0 +1,128 @@
+//
+// Created by andrey on 10.10.16.
+//
+
+#include "launch_support.hpp"
+
+namespace path_extend {
+
+using namespace debruijn_graph;
+
+bool PELaunchSupport::HasOnlyMPLibs() const {
+    for (const auto &lib : dataset_info_.reads) {
+        if (!(lib.is_mate_pair() && lib.data().mean_insert_size > 0.0)) {
+            return false;
+        }
+    }
+    return true;
+}
+
+pe_config::ParamSetT::ExtensionOptionsT PELaunchSupport::GetExtensionOpts(shared_ptr<PairedInfoLibrary> lib,
+                                                                          const pe_config::ParamSetT &pset) const {
+    return lib->IsMp() ? pset.mate_pair_options : pset.extension_options;
+}
+
+double PELaunchSupport::SingleThresholdForLib(const pe_config::ParamSetT &pset,
+                                               double threshold) const {
+    return pset.extension_options.use_default_single_threshold || math::le(threshold, 0.0) ?
+               pset.extension_options.single_threshold : threshold;
+}
+
+bool PELaunchSupport::IsForSingleReadExtender(const io::SequencingLibrary<config::DataSetData> &lib) const {
+    return (lib.data().single_reads_mapped || lib.is_long_read_lib() || lib.is_contig_lib());
+}
+bool PELaunchSupport::IsForSingleReadScaffolder(const io::SequencingLibrary<config::DataSetData> &lib) const {
+    return (lib.is_long_read_lib() || (lib.is_contig_lib() && lib.type() != io::LibraryType::PathExtendContigs));
+}
+
+bool PELaunchSupport::IsForPEExtender(const io::SequencingLibrary<config::DataSetData> &lib) const {
+    return (lib.type() == io::LibraryType::PairedEnd && lib.data().mean_insert_size > 0.0);
+}
+
+bool PELaunchSupport::IsForShortLoopExtender(const io::SequencingLibrary<config::DataSetData> &lib) const {
+    return (lib.type() == io::LibraryType::PairedEnd && lib.data().mean_insert_size > 0.0);
+}
+
+bool PELaunchSupport::IsForScaffoldingExtender(const io::SequencingLibrary<config::DataSetData> &lib) const {
+    return (lib.type() == io::LibraryType::PairedEnd && lib.data().mean_insert_size > 0.0);
+}
+
+//TODO: review usage
+bool PELaunchSupport::UseCoverageResolverForSingleReads(const io::LibraryType &type) const {
+    return HasOnlyMPLibs() && (type == io::LibraryType::HQMatePairs);
+}
+
+std::string PELaunchSupport::LibStr(size_t count) const {
+    return count == 1 ? "library" : "libraries";
+}
+
+pe_config::LongReads PELaunchSupport::GetLongReadsConfig(const io::LibraryType &type) const {
+    if (io::SequencingLibraryBase::is_long_read_lib(type)) {
+        return params_.pe_cfg.long_reads.pacbio_reads;
+    } else if (type == io::LibraryType::PathExtendContigs) {
+        return params_.pe_cfg.long_reads.meta_contigs;
+    } else if (io::SequencingLibraryBase::is_contig_lib(type)) {
+        return params_.pe_cfg.long_reads.contigs;
+    }
+    return params_.pe_cfg.long_reads.single_reads;
+}
+
+size_t PELaunchSupport::FindMaxMPIS() const {
+    size_t max_is = 0;
+    for (size_t i = 0; i < dataset_info_.reads.lib_count(); ++i) {
+        if (dataset_info_.reads[i].is_mate_pair()) {
+            max_is = max(max_is, (size_t) dataset_info_.reads[i].data().mean_insert_size);
+        }
+    }
+    return max_is;
+}
+
+bool PELaunchSupport::HasLongReads() const {
+    return path_extend::HasLongReads(dataset_info_);
+}
+
+bool PELaunchSupport::HasLongReadsScaffolding() const {
+    for (const auto &lib : dataset_info_.reads) {
+        if (IsForSingleReadScaffolder(lib))
+            return true;
+    }
+    return false;
+}
+
+bool PELaunchSupport::HasMPReads() const {
+    for (const auto &lib : dataset_info_.reads) {
+        if (lib.is_mate_pair()) {
+            return true;
+        }
+    }
+    return false;
+}
+bool PELaunchSupport::SingleReadsMapped() const {
+    for (const auto &lib : dataset_info_.reads) {
+        if (lib.data().single_reads_mapped) {
+            return true;
+        }
+    }
+    return false;
+}
+
+double PELaunchSupport::EstimateLibCoverage(size_t lib_index) const {
+    double cov_fraction = double(dataset_info_.reads[lib_index].data().total_nucls) / double(TotalNuclsInGraph());
+    return cov_fraction * dataset_info_.avg_coverage();
+}
+
+size_t PELaunchSupport::TotalNuclsInGraph() const {
+    size_t total_nc_count = 0;
+    for (const auto &lib: dataset_info_.reads) {
+        if (lib.is_graph_contructable())
+            total_nc_count += lib.data().total_nucls;
+    }
+    return total_nc_count;
+}
+
+
+bool PELaunchSupport::NeedsUniqueEdgeStorage() const {
+    return !(params_.pset.sm == sm_old ||
+        (params_.pset.sm == sm_old_pe_2015 && !HasLongReadsScaffolding() && !HasMPReads()));
+}
+}
diff --git a/src/common/modules/path_extend/pipeline/launch_support.hpp b/src/common/modules/path_extend/pipeline/launch_support.hpp
new file mode 100644
index 0000000..53870af
--- /dev/null
+++ b/src/common/modules/path_extend/pipeline/launch_support.hpp
@@ -0,0 +1,145 @@
+//
+// Created by andrey on 10.10.16.
+//
+
+#pragma once
+
+
+#include "modules/path_extend/paired_library.hpp"
+#include "pipeline/config_struct.hpp"
+#include "modules/path_extend/pe_config_struct.hpp"
+
+namespace path_extend {
+
+using namespace debruijn_graph;
+
+inline size_t FindMaxISRightQuantile(const config::dataset& dataset_info, bool include_mate_pairs = true) {
+    size_t res = 0;
+    for (const auto& lib : dataset_info.reads) {
+        if (lib.is_paired()) {
+            if (lib.is_mate_pair() && !include_mate_pairs)
+                continue;
+            res = max(res, (size_t) lib.data().insert_size_right_quantile);
+        }
+    }
+    return res;
+}
+
+inline bool HasLongReads(const config::dataset& dataset_info) {
+    for (const auto& lib : dataset_info.reads) {
+        if (lib.is_long_read_lib() || lib.is_contig_lib()) {
+            return true;
+        }
+    }
+    return false;
+}
+
+struct PathExtendParamsContainer {
+
+    PathExtendParamsContainer(const config::dataset& dataset_info,
+                              const pe_config::MainPEParamsT& pe_cfg_,
+                              const std::string& output_dir_,
+                              config::pipeline_type mode_,
+                              bool uneven_depth_,
+                              bool avoid_rc_connections_,
+                              bool use_scaffolder_):
+        pe_cfg(pe_cfg_),
+        pset(pe_cfg_.param_set),
+        output_dir(output_dir_),
+        etc_dir(output_dir + pe_cfg_.etc_dir + "/"),
+        mode(mode_),
+        uneven_depth(uneven_depth_),
+        avoid_rc_connections(avoid_rc_connections_),
+        use_scaffolder(use_scaffolder_),
+        traverse_loops(true),
+        detect_repeats_online(mode_ != config::pipeline_type::meta && mode_ != config::pipeline_type::rna)
+    {
+        if (!(use_scaffolder && pset.scaffolder_options.enabled)) {
+            traverse_loops = false;
+        }
+        if (mode_ == config::pipeline_type::rna)
+            traverse_loops = false;
+
+        //Parameters are subject to change
+        max_polisher_gap = FindMaxISRightQuantile(dataset_info);
+        //TODO: params
+        if (HasLongReads(dataset_info))
+            max_polisher_gap = max(max_polisher_gap, size_t(10000));
+
+        min_edge_len = 100;
+        max_path_diff = mode == config::pipeline_type::rna ? 1 : FindMaxISRightQuantile(dataset_info);
+    }
+
+    const pe_config::MainPEParamsT& pe_cfg;
+    const pe_config::ParamSetT& pset;
+
+    std::string output_dir;
+    std::string etc_dir;
+
+    config::pipeline_type mode;
+    bool uneven_depth;
+
+    bool avoid_rc_connections;
+    bool use_scaffolder;
+    bool traverse_loops;
+    bool detect_repeats_online;
+
+    size_t min_edge_len;
+    size_t max_path_diff;
+    size_t max_polisher_gap;
+    //TODO: move here size_t max_repeat_length;
+};
+
+
+class PELaunchSupport {
+    const config::dataset& dataset_info_;
+    const PathExtendParamsContainer& params_;
+
+public:
+
+    PELaunchSupport(const config::dataset& dataset_info,
+                    const PathExtendParamsContainer& params):
+        dataset_info_(dataset_info),
+        params_(params) { }
+
+    pe_config::ParamSetT::ExtensionOptionsT GetExtensionOpts(shared_ptr<PairedInfoLibrary> lib, const pe_config::ParamSetT& pset) const;
+
+    double SingleThresholdForLib(const pe_config::ParamSetT &pset, double threshold) const;
+
+    bool HasOnlyMPLibs() const;
+
+    bool IsForSingleReadExtender(const io::SequencingLibrary<config::DataSetData> &lib) const;
+
+    bool IsForSingleReadScaffolder(const io::SequencingLibrary<config::DataSetData> &lib) const;
+
+    bool IsForPEExtender(const io::SequencingLibrary<config::DataSetData> &lib) const;
+
+    bool IsForShortLoopExtender(const io::SequencingLibrary<config::DataSetData> &lib) const;
+
+    bool IsForScaffoldingExtender(const io::SequencingLibrary<config::DataSetData> &lib) const;
+
+    bool UseCoverageResolverForSingleReads(const io::LibraryType& type) const;
+
+    std::string LibStr(size_t count) const;
+
+    pe_config::LongReads GetLongReadsConfig(const io::LibraryType &type) const;
+
+    size_t FindMaxMPIS() const;
+
+    bool HasLongReads() const;
+
+    bool HasLongReadsScaffolding() const;
+
+    bool HasMPReads() const;
+
+    bool SingleReadsMapped() const;
+
+    double EstimateLibCoverage(size_t lib_index) const;
+
+    size_t TotalNuclsInGraph() const;
+
+    bool NeedsUniqueEdgeStorage() const;
+
+};
+
+}
diff --git a/src/common/modules/path_extend/pipeline/launcher.cpp b/src/common/modules/path_extend/pipeline/launcher.cpp
new file mode 100644
index 0000000..98540b6
--- /dev/null
+++ b/src/common/modules/path_extend/pipeline/launcher.cpp
@@ -0,0 +1,448 @@
+//
+// Created by andrey on 14.11.16.
+//
+
+#include "launcher.hpp"
+
+#include "modules/path_extend/path_visualizer.hpp"
+#include "modules/path_extend/loop_traverser.hpp"
+#include "modules/alignment/long_read_storage.hpp"
+#include "modules/path_extend/scaffolder2015/extension_chooser2015.hpp"
+#include "modules/path_extend/scaffolder2015/scaffold_graph_visualizer.hpp"
+#include "modules/path_extend/scaffolder2015/scaffold_graph_constructor.hpp"
+#include "assembly_graph/graph_support/coverage_uniformity_analyzer.hpp"
+#include "assembly_graph/graph_support/scaff_supplementary.hpp"
+#include "modules/path_extend/scaffolder2015/path_polisher.hpp"
+
+
+namespace path_extend {
+
+using namespace debruijn_graph;
+using namespace std;
+
+
+vector<shared_ptr<ConnectionCondition>>
+    PathExtendLauncher::ConstructPairedConnectionConditions(const ScaffoldingUniqueEdgeStorage& edge_storage) const {
+
+    vector<shared_ptr<ConnectionCondition>> conditions;
+    const pe_config::ParamSetT::ScaffoldGraphParamsT &params = params_.pset.scaffold_graph_params;
+
+    for (size_t lib_index = 0; lib_index < dataset_info_.reads.lib_count(); ++lib_index) {
+        const auto &lib = dataset_info_.reads[lib_index];
+        if (lib.is_paired()) {
+            shared_ptr<PairedInfoLibrary> paired_lib;
+            if (lib.is_mate_pair())
+                paired_lib = MakeNewLib(gp_.g, lib, gp_.paired_indices[lib_index]);
+            else if (lib.type() == io::LibraryType::PairedEnd)
+                paired_lib = MakeNewLib(gp_.g, lib, gp_.clustered_indices[lib_index]);
+            else {
+                INFO("Unusable for scaffold graph paired lib #" << lib_index);
+                continue;
+            }
+            conditions.push_back(make_shared<ScaffoldGraphPairedConnectionCondition>(gp_.g, edge_storage.GetSet(),
+                                                                                     paired_lib, lib_index,
+                                                                                     params.always_add,
+                                                                                     params.never_add,
+                                                                                     params.relative_threshold));
+        }
+    }
+    return conditions;
+}
+
+shared_ptr<scaffold_graph::ScaffoldGraph> PathExtendLauncher::ConstructScaffoldGraph(const ScaffoldingUniqueEdgeStorage &edge_storage) const {
+    using namespace scaffold_graph;
+
+    const pe_config::ParamSetT::ScaffoldGraphParamsT &params = params_.pset.scaffold_graph_params;
+
+    INFO("Constructing connections");
+    LengthLowerBound edge_condition(gp_.g, edge_storage.GetMinLength());
+
+    vector<shared_ptr<ConnectionCondition>> conditions =
+        ConstructPairedConnectionConditions(edge_storage);
+
+    if (params.use_graph_connectivity) {
+        auto as_con = make_shared<AssemblyGraphConnectionCondition>(gp_.g, params.max_path_length, edge_storage);
+        as_con->AddInterestingEdges(edge_condition);
+        conditions.push_back(as_con);
+    }
+
+    INFO("Total conditions " << conditions.size());
+
+    INFO("Constructing scaffold graph from set of size " << edge_storage.GetSet().size());
+
+    DefaultScaffoldGraphConstructor constructor(gp_.g, edge_storage.GetSet(), conditions, edge_condition);
+    auto scaffold_graph = constructor.Construct();
+
+    INFO("Scaffold graph contains " << scaffold_graph->VertexCount() << " vertices and " << scaffold_graph->EdgeCount()
+             << " edges");
+    return scaffold_graph;
+}
+
+void PathExtendLauncher::PrintScaffoldGraph(const scaffold_graph::ScaffoldGraph &scaffold_graph,
+                                            const set<EdgeId> &main_edge_set,
+                                            const debruijn_graph::GenomeConsistenceChecker &genome_checker,
+                                            const string &filename) const {
+    using namespace scaffold_graph;
+
+    INFO("Constructing reference labels");
+    map<debruijn_graph::EdgeId, string> edge_labels;
+    size_t count = 0;
+    for (const auto &edge_coord_pair: genome_checker.ConstructEdgeOrder()) {
+        if (edge_labels.find(edge_coord_pair.first) == edge_labels.end()) {
+            edge_labels[edge_coord_pair.first] = "";
+        }
+        edge_labels[edge_coord_pair.first] += "order: " + ToString(count) +
+            "\n mapped range: " + ToString(edge_coord_pair.second.mapped_range.start_pos) + " : "
+            + ToString(edge_coord_pair.second.mapped_range.end_pos) +
+            "\n init range: " + ToString(edge_coord_pair.second.initial_range.start_pos) + " : "
+            + ToString(edge_coord_pair.second.initial_range.end_pos) + "\n";
+        ++count;
+    }
+
+    auto vertex_colorer = make_shared<ScaffoldVertexSetColorer>(main_edge_set);
+    auto edge_colorer = make_shared<ScaffoldEdgeColorer>();
+    graph_colorer::CompositeGraphColorer<ScaffoldGraph> colorer(vertex_colorer, edge_colorer);
+
+    INFO("Visualizing scaffold graph");
+    ScaffoldGraphVisualizer singleVisualizer(scaffold_graph, edge_labels);
+    std::ofstream single_dot;
+    single_dot.open((filename + "_single.dot").c_str());
+    singleVisualizer.Visualize(single_dot, colorer);
+    single_dot.close();
+
+    INFO("Printing scaffold graph");
+    std::ofstream data_stream;
+    data_stream.open((filename + ".data").c_str());
+    scaffold_graph.Print(data_stream);
+    data_stream.close();
+}
+
+
+void PathExtendLauncher::MakeAndOutputScaffoldGraph() const {
+    //Scaffold graph
+    shared_ptr<scaffold_graph::ScaffoldGraph> scaffold_graph;
+    if (params_.pset.scaffold_graph_params.construct) {
+        debruijn_graph::GenomeConsistenceChecker genome_checker(gp_, unique_data_.main_unique_storage_,
+                                                                params_.pset.genome_consistency_checker.max_gap,
+                                                                params_.pset.genome_consistency_checker.relative_max_gap);
+        scaffold_graph = ConstructScaffoldGraph(unique_data_.main_unique_storage_);
+        if (params_.pset.scaffold_graph_params.output) {
+            PrintScaffoldGraph(*scaffold_graph,
+                               unique_data_.main_unique_storage_.GetSet(),
+                               genome_checker,
+                               params_.etc_dir + "scaffold_graph");
+        }
+    }
+}
+
+void PathExtendLauncher::CountMisassembliesWithReference(const PathContainer &paths) const {
+    if (gp_.genome.size() == 0)
+        return;
+
+    debruijn_graph::GenomeConsistenceChecker genome_checker(gp_, unique_data_.main_unique_storage_,
+                                                            params_.pset.genome_consistency_checker.max_gap,
+                                                            params_.pset.genome_consistency_checker.relative_max_gap);
+
+    size_t total_mis = 0, gap_mis = 0;
+    genome_checker.SpellGenome();
+    for (auto iter = paths.begin(); iter != paths.end(); ++iter) {
+        BidirectionalPath *path = iter.get();
+        auto map_res = genome_checker.CountMisassemblies(*path);
+        if (map_res.misassemblies > 0) {
+            INFO ("there are " << map_res.misassemblies << " misassemblies in path: ");
+            path->PrintInfo();
+            total_mis += map_res.misassemblies;
+        }
+        if (map_res.wrong_gap_size > 0) {
+            INFO ("there are " << map_res.wrong_gap_size << " wrong gaps in path: ");
+            path->PrintInfo();
+            gap_mis += map_res.wrong_gap_size;
+        }
+    }
+    INFO ("In total found " << total_mis << " misassemblies " << " and " << gap_mis << " gaps.");
+}
+
+
+void PathExtendLauncher::EstimateUniqueEdgesParams() {
+    bool uniform_coverage = false;
+    if (params_.pset.uniqueness_analyser.enabled) {
+        INFO("Autodetecting unique edge set parameters...");
+        unique_data_.min_unique_length_ = max(unique_data_.min_unique_length_, support_.FindMaxMPIS());
+        INFO("Minimal unique edge length set to the smallest MP library IS: " << unique_data_.min_unique_length_);
+
+        CoverageUniformityAnalyzer coverage_analyzer(gp_.g, unique_data_.min_unique_length_);
+        double median_coverage = coverage_analyzer.CountMedianCoverage();
+        double uniformity_fraction = coverage_analyzer.UniformityFraction(unique_data_.unique_variation_, median_coverage);
+        INFO ("median coverage for edges longer than " << unique_data_.min_unique_length_ << " is " << median_coverage <<
+            " uniformity " << size_t(uniformity_fraction * 100) << "%");
+        if (math::gr(uniformity_fraction, params_.pset.uniqueness_analyser.uniformity_fraction_threshold)) {
+            uniform_coverage = true;
+        }
+        if (!uniform_coverage) {
+            unique_data_.unique_variation_ = params_.pset.uniqueness_analyser.nonuniform_coverage_variation;
+            INFO("Coverage is not uniform, we do not rely on coverage for long edge uniqueness");
+        }
+
+    } else {
+        INFO("Unique edge set constructed with parameters from config : length " << unique_data_.min_unique_length_
+                 << " variation " << unique_data_.unique_variation_);
+    }
+}
+
+
+void PathExtendLauncher::FillUniqueEdgeStorage() {
+    ScaffoldingUniqueEdgeAnalyzer unique_edge_analyzer(gp_, unique_data_.min_unique_length_, unique_data_.unique_variation_);
+    unique_edge_analyzer.FillUniqueEdgeStorage(unique_data_.main_unique_storage_);
+}
+
+void PathExtendLauncher::DebugOutputPaths(const PathContainer &paths, const string &name) const {
+    if (!params_.pe_cfg.debug_output) {
+        return;
+    }
+    PathInfoWriter path_writer;
+    PathVisualizer visualizer;
+
+    writer_.OutputPaths(paths, params_.etc_dir + name);
+    if (params_.pe_cfg.output.write_paths) {
+        path_writer.WritePaths(paths, params_.etc_dir + name + ".dat");
+    }
+    if (params_.pe_cfg.viz.print_paths) {
+        visualizer.writeGraphWithPathsSimple(gp_, params_.etc_dir + name + ".dot", name, paths);
+    }
+}
+
+void PathExtendLauncher::FinalizePaths(PathContainer &paths,
+                                       GraphCoverageMap &cover_map,
+                                       const PathExtendResolver &resolver) const {
+
+    if (params_.pset.remove_overlaps) {
+        resolver.RemoveOverlaps(paths, cover_map, params_.min_edge_len, params_.max_path_diff,
+                                 params_.pset.cut_all_overlaps,
+                                 (params_.mode == config::pipeline_type::moleculo));
+    } else if (params_.mode == config::pipeline_type::rna) {
+        resolver.RemoveRNAOverlaps(paths, cover_map, params_.min_edge_len, params_.max_path_diff);
+    } else {
+        resolver.RemoveEqualPaths(paths, cover_map, params_.min_edge_len);
+    }
+
+    if (params_.avoid_rc_connections) {
+        paths.FilterInterstandBulges();
+    }
+    paths.FilterEmptyPaths();
+    resolver.AddUncoveredEdges(paths, cover_map);
+
+    if (params_.pset.path_filtration.enabled) {
+        LengthPathFilter(gp_.g, params_.pset.path_filtration.min_length).filter(paths);;
+        IsolatedPathFilter(gp_.g,
+                           params_.pset.path_filtration.min_length_for_low_covered,
+                           params_.pset.path_filtration.min_coverage).filter(paths);
+        IsolatedPathFilter(gp_.g, params_.pset.path_filtration.isolated_min_length).filter(paths);
+    }
+    paths.SortByLength();
+    for (auto &path : paths) {
+        path.first->ResetOverlaps();
+    }
+}
+
+void PathExtendLauncher::TraverseLoops(PathContainer &paths, GraphCoverageMap &cover_map) const {
+    INFO("Traversing tandem repeats");
+
+    LoopTraverser
+        loopTraverser(cover_map.graph(), cover_map,
+                      params_.pset.loop_traversal.min_edge_length,
+                      params_.pset.loop_traversal.max_component_size,
+                      params_.pset.loop_traversal.max_path_length);
+    size_t res = loopTraverser.TraverseAllLoops();
+    paths.SortByLength();
+
+    INFO("Traversed " << res << " loops");
+}
+
+Extenders PathExtendLauncher::ConstructMPExtender(const ExtendersGenerator &generator, size_t uniqe_edge_len) {
+    ScaffoldingUniqueEdgeAnalyzer additional_edge_analyzer(gp_, (size_t) uniqe_edge_len, unique_data_.unique_variation_);
+    unique_data_.unique_storages_.push_back(make_shared<ScaffoldingUniqueEdgeStorage>());
+    additional_edge_analyzer.FillUniqueEdgeStorage(*unique_data_.unique_storages_.back());
+
+    return generator.MakeMPExtenders(*unique_data_.unique_storages_.back());
+}
+
+Extenders PathExtendLauncher::ConstructMPExtenders(const ExtendersGenerator &generator) {
+    const pe_config::ParamSetT &pset = params_.pset;
+
+    Extenders extenders =  generator.MakeMPExtenders(unique_data_.main_unique_storage_);
+    INFO("Using " << extenders.size() << " mate-pair " << support_.LibStr(extenders.size()));
+
+    size_t cur_length = unique_data_.min_unique_length_ - pset.scaffolding2015.unique_length_step;
+    size_t lower_bound = max(pset.scaffolding2015.unique_length_lower_bound, pset.scaffolding2015.unique_length_step);
+
+    while (cur_length > lower_bound) {
+        INFO("Adding extender with length " << cur_length);
+        push_back_all(extenders, ConstructMPExtender(generator, cur_length));
+        cur_length -= pset.scaffolding2015.unique_length_step;
+    }
+    if (unique_data_.min_unique_length_ > lower_bound) {
+        INFO("Adding final extender with length " << lower_bound);
+        push_back_all(extenders, ConstructMPExtender(generator, lower_bound));
+    }
+
+    return extenders;
+}
+
+void PathExtendLauncher::FillPathContainer(size_t lib_index, size_t size_threshold) {
+    std::vector<PathInfo<Graph>> paths;
+    gp_.single_long_reads[lib_index].SaveAllPaths(paths);
+    for (const auto &path: paths) {
+        const auto &edges = path.getPath();
+        if (edges.size() <= size_threshold)
+            continue;
+
+        BidirectionalPath *new_path = new BidirectionalPath(gp_.g, edges);
+        BidirectionalPath *conj_path = new BidirectionalPath(new_path->Conjugate());
+        new_path->SetWeight((float) path.getWeight());
+        conj_path->SetWeight((float) path.getWeight());
+        unique_data_.long_reads_paths_[lib_index]->AddPair(new_path, conj_path);
+    }
+    DEBUG("Long reads paths " << unique_data_.long_reads_paths_[lib_index]->size());
+    unique_data_.long_reads_cov_map_[lib_index]->AddPaths(*unique_data_.long_reads_paths_[lib_index]);
+}
+
+
+void PathExtendLauncher::FillLongReadsCoverageMaps() {
+    for (size_t lib_index = 0; lib_index < dataset_info_.reads.lib_count(); lib_index++) {
+        unique_data_.long_reads_paths_.push_back(make_shared<PathContainer>());
+        unique_data_.long_reads_cov_map_.push_back(make_shared<GraphCoverageMap>(gp_.g));
+        if (support_.IsForSingleReadExtender(dataset_info_.reads[lib_index])) {
+            FillPathContainer(lib_index);
+        }
+    }
+}
+
+void  PathExtendLauncher::FillPBUniqueEdgeStorages() {
+    //FIXME magic constants
+    ScaffoldingUniqueEdgeAnalyzer unique_edge_analyzer_pb(gp_, 500, 0.5);
+
+    INFO("Filling backbone edges for long reads scaffolding...");
+    if (params_.uneven_depth) {
+        INFO(" with long reads paths");
+        //TODO:: muiltiple libraries?
+        for (size_t lib_index = 0; lib_index < dataset_info_.reads.lib_count(); lib_index++) {
+            if (support_.IsForSingleReadScaffolder(dataset_info_.reads[lib_index])) {
+                unique_edge_analyzer_pb.FillUniqueEdgesWithLongReads(unique_data_.long_reads_cov_map_[lib_index],
+                                                                     unique_data_.unique_pb_storage_,
+                                                                     support_.GetLongReadsConfig(dataset_info_.reads[lib_index].type()));
+            }
+        }
+        INFO("Removing fake unique with paired-end libs");
+        for (size_t lib_index = 0; lib_index < dataset_info_.reads.lib_count(); lib_index++) {
+            if (dataset_info_.reads[lib_index].type() == io::LibraryType::PairedEnd) {
+                unique_edge_analyzer_pb.ClearLongEdgesWithPairedLib(lib_index, unique_data_.unique_pb_storage_);
+            }
+        }
+
+    } else {
+        INFO(" with coverage")
+        unique_edge_analyzer_pb.FillUniqueEdgeStorage(unique_data_.unique_pb_storage_);
+    }
+    INFO(unique_data_.unique_pb_storage_.size() << " unique edges");
+}
+
+Extenders PathExtendLauncher::ConstructPBExtenders(const ExtendersGenerator &generator) {
+    FillPBUniqueEdgeStorages();
+    return generator.MakePBScaffoldingExtenders(unique_data_.unique_pb_storage_,
+                                                unique_data_.long_reads_cov_map_);
+}
+
+
+Extenders PathExtendLauncher::ConstructExtenders(const GraphCoverageMap& cover_map) {
+    INFO("Creating main extenders, unique edge length = " << unique_data_.min_unique_length_);
+    if (support_.SingleReadsMapped() || support_.HasLongReads())
+        FillLongReadsCoverageMaps();
+
+    ExtendersGenerator generator(dataset_info_, params_, gp_, cover_map, support_);
+    Extenders extenders = generator.MakeBasicExtenders(unique_data_.main_unique_storage_,
+                                                       unique_data_.long_reads_cov_map_);
+
+    //long reads scaffolding extenders.
+    if (support_.HasLongReads()) {
+        if (params_.pset.sm == sm_old) {
+            INFO("Will not use new long read scaffolding algorithm in this mode");
+        } else {
+            push_back_all(extenders, ConstructPBExtenders(generator));
+        }
+    }
+
+    if (support_.HasMPReads()) {
+        if (params_.pset.sm == sm_old) {
+            INFO("Will not use mate-pairs is this mode");
+        } else {
+            push_back_all(extenders, ConstructMPExtenders(generator));
+        }
+    }
+
+    if (params_.pset.use_coordinated_coverage)
+        push_back_all(extenders, generator.MakeCoverageExtenders());
+
+    INFO("Total number of extenders is " << extenders.size());
+    return extenders;
+}
+
+void PathExtendLauncher::PolishPaths(const PathContainer &paths, PathContainer &result) const {
+    //Fixes distances for paths gaps and tries to fill them in
+    INFO("Closing gaps in paths");
+    PathPolisher polisher(gp_, dataset_info_, unique_data_.main_unique_storage_, params_.max_polisher_gap);
+    polisher.PolishPaths(paths, result);
+    result.SortByLength();
+    INFO("Gap closing completed")
+}
+
+void PathExtendLauncher::Launch() {
+    INFO("ExSPAnder repeat resolving tool started");
+    make_dir(params_.output_dir);
+    make_dir(params_.etc_dir);
+
+    if (support_.NeedsUniqueEdgeStorage()) {
+        //Fill the storage to enable unique edge check
+        EstimateUniqueEdgesParams();
+        FillUniqueEdgeStorage();
+    }
+
+    MakeAndOutputScaffoldGraph();
+
+    PathExtendResolver resolver(gp_.g);
+
+    auto seeds = resolver.MakeSimpleSeeds();
+    seeds.SortByLength();
+    DebugOutputPaths(seeds, "init_paths");
+
+    GraphCoverageMap cover_map(gp_.g);
+    Extenders extenders = ConstructExtenders(cover_map);
+    shared_ptr<CompositeExtender> composite_extender = make_shared<CompositeExtender>(gp_.g, cover_map, extenders,
+                                                                                      unique_data_.main_unique_storage_,
+                                                                                      params_.max_path_diff,
+                                                                                      params_.pset.extension_options.max_repeat_length,
+                                                                                      params_.detect_repeats_online);
+
+    auto paths = resolver.ExtendSeeds(seeds, *composite_extender);
+    paths.FilterEmptyPaths();
+    paths.SortByLength();
+    DebugOutputPaths(paths, "raw_paths");
+
+    FinalizePaths(paths, cover_map, resolver);
+    DebugOutputPaths(paths, "before_loop_traversal");
+
+    TraverseLoops(paths, cover_map);
+    DebugOutputPaths(paths, "loop_traveresed");
+
+    PolishPaths(paths, gp_.contig_paths);
+    DebugOutputPaths(gp_.contig_paths, "polished_paths");
+    
+    GraphCoverageMap polished_map(gp_.g, gp_.contig_paths, true);
+    FinalizePaths(gp_.contig_paths, polished_map, resolver);
+    DebugOutputPaths(gp_.contig_paths, "final_paths");
+
+    CountMisassembliesWithReference(gp_.contig_paths);
+
+    INFO("ExSPAnder repeat resolving tool finished");
+}
+
+}
diff --git a/src/common/modules/path_extend/pipeline/launcher.hpp b/src/common/modules/path_extend/pipeline/launcher.hpp
new file mode 100644
index 0000000..e936f58
--- /dev/null
+++ b/src/common/modules/path_extend/pipeline/launcher.hpp
@@ -0,0 +1,115 @@
+//
+// Created by andrey on 14.11.16.
+//
+
+#ifndef PROJECT_LAUNCHER_H
+#define PROJECT_LAUNCHER_H
+
+#include "launch_support.hpp"
+#include "extenders_logic.hpp"
+
+#include "modules/path_extend/pe_resolver.hpp"
+#include "modules/genome_consistance_checker.hpp"
+#include "modules/path_extend/scaffolder2015/scaffold_graph.hpp"
+#include "assembly_graph/paths/bidirectional_path_io/bidirectional_path_output.hpp"
+
+namespace path_extend {
+
+using namespace debruijn_graph;
+
+class PathExtendLauncher {
+
+private:
+    const config::dataset& dataset_info_;
+    const PathExtendParamsContainer& params_;
+    conj_graph_pack& gp_;
+    PELaunchSupport support_;
+
+    DefaultContigCorrector<ConjugateDeBruijnGraph> corrector_;
+    DefaultContigConstructor<ConjugateDeBruijnGraph> constructor_;
+    shared_ptr<ContigNameGenerator> contig_name_generator_;
+    ContigWriter writer_;
+
+    struct {
+        size_t min_unique_length_;
+        double unique_variation_;
+
+        ScaffoldingUniqueEdgeStorage main_unique_storage_;
+        vector<shared_ptr<ScaffoldingUniqueEdgeStorage>> unique_storages_;
+
+        ScaffoldingUniqueEdgeStorage unique_pb_storage_;
+        vector<shared_ptr<PathContainer>> long_reads_paths_;
+        vector<shared_ptr<GraphCoverageMap>> long_reads_cov_map_;
+    } unique_data_;
+
+    vector<shared_ptr<ConnectionCondition>>
+        ConstructPairedConnectionConditions(const ScaffoldingUniqueEdgeStorage& edge_storage) const;
+
+    shared_ptr<scaffold_graph::ScaffoldGraph>
+        ConstructScaffoldGraph(const ScaffoldingUniqueEdgeStorage& edge_storage) const;
+
+    void PrintScaffoldGraph(const scaffold_graph::ScaffoldGraph &scaffold_graph,
+                            const set<EdgeId>& main_edge_set,
+                            const debruijn_graph::GenomeConsistenceChecker& genome_checker,
+                            const string& filename) const;
+
+    void MakeAndOutputScaffoldGraph() const;
+
+    void CountMisassembliesWithReference(const PathContainer& paths) const;
+
+    void EstimateUniqueEdgesParams();
+
+    void FillUniqueEdgeStorage();
+
+    void FillPBUniqueEdgeStorages();
+
+    void FillPathContainer(size_t lib_index, size_t size_threshold = 1);
+
+    void FillLongReadsCoverageMaps();
+
+    void DebugOutputPaths(const PathContainer& paths, const string& name) const;
+
+    void FinalizePaths(PathContainer& paths, GraphCoverageMap &cover_map, const PathExtendResolver&resolver) const;
+
+    void TraverseLoops(PathContainer& paths, GraphCoverageMap& cover_map) const;
+
+    void PolishPaths(const PathContainer &paths, PathContainer &result) const;
+
+    Extenders ConstructExtenders(const GraphCoverageMap& cover_map);
+
+    Extenders ConstructMPExtenders(const ExtendersGenerator &generator);
+
+    Extenders ConstructMPExtender(const ExtendersGenerator &generator, size_t uniqe_edge_len);
+
+    Extenders ConstructPBExtenders(const ExtendersGenerator &generator);
+
+
+public:
+
+    PathExtendLauncher(const config::dataset& dataset_info,
+                       const PathExtendParamsContainer& params,
+                       conj_graph_pack& gp):
+        dataset_info_(dataset_info),
+        params_(params),
+        gp_(gp),
+        support_(dataset_info, params),
+        corrector_(gp.g),
+        constructor_(gp.g, corrector_),
+        contig_name_generator_(MakeContigNameGenerator(params_.mode, gp)),
+        writer_(gp.g, constructor_, gp_.components, contig_name_generator_),
+        unique_data_()
+    {
+        unique_data_.min_unique_length_ = params.pset.scaffolding2015.unique_length_upper_bound;
+        unique_data_.unique_variation_ = params.pset.uniqueness_analyser.unique_coverage_variation;
+    }
+
+    ~PathExtendLauncher() {
+    }
+
+    void Launch();
+
+};
+
+}
+
+#endif //PROJECT_LAUNCHER_H
diff --git a/src/common/modules/path_extend/scaffolder2015/connection_condition2015.cpp b/src/common/modules/path_extend/scaffolder2015/connection_condition2015.cpp
new file mode 100644
index 0000000..9149f3c
--- /dev/null
+++ b/src/common/modules/path_extend/scaffolder2015/connection_condition2015.cpp
@@ -0,0 +1,260 @@
+#include "connection_condition2015.hpp"
+namespace path_extend {
+
+
+map <debruijn_graph::EdgeId, double> ConnectionCondition::ConnectedWith(debruijn_graph::EdgeId e, const ScaffoldingUniqueEdgeStorage& storage) const {
+    auto all_edges = this->ConnectedWith(e);
+    map <debruijn_graph::EdgeId, double> res;
+    for (auto edge: all_edges) {
+        if (storage.IsUnique(edge.first)){
+            res.insert(edge);
+        }
+    }
+    return res;
+}
+
+PairedLibConnectionCondition::PairedLibConnectionCondition(const debruijn_graph::Graph &graph,
+                             shared_ptr <PairedInfoLibrary> lib,
+                             size_t lib_index,
+                             size_t min_read_count) :
+        graph_(graph),
+        lib_(lib),
+        lib_index_(lib_index),
+        min_read_count_(min_read_count),
+        //FIXME reconsider condition; config!
+        left_dist_delta_(5 * (int) lib_->GetISMax()),
+        right_dist_delta_(max(5 * (int) lib_->GetIsVar(), int(lib_->GetIS()))) {
+}
+
+size_t PairedLibConnectionCondition::GetLibIndex() const {
+    return lib_index_;
+}
+
+map <debruijn_graph::EdgeId, double> PairedLibConnectionCondition::ConnectedWith(debruijn_graph::EdgeId e) const {
+    set <debruijn_graph::EdgeId> all_edges;
+    int e_length = (int) graph_.length(e);
+    lib_->FindJumpEdges(e, all_edges,  e_length - left_dist_delta_, e_length + right_dist_delta_);
+
+    map <debruijn_graph::EdgeId, double> result;
+    for (auto edge : all_edges) {
+        double w = GetWeight(e, edge);
+        if (edge != e && edge != graph_.conjugate(e) &&
+            math::ge(w, (double) min_read_count_)) {
+            result[edge] = w;
+        }
+    }
+    return result;
+}
+
+double PairedLibConnectionCondition::GetWeight(debruijn_graph::EdgeId e1, debruijn_graph::EdgeId e2) const {
+    int e_length = (int) graph_.length(e1);
+    return lib_->CountPairedInfo(e1, e2, e_length - left_dist_delta_, e_length + right_dist_delta_);
+}
+
+LongReadsLibConnectionCondition::LongReadsLibConnectionCondition(const debruijn_graph::Graph &graph,
+                                    size_t lib_index,
+                                    size_t min_read_count, const GraphCoverageMap& cov_map):graph_(graph), lib_index_(lib_index), min_read_count_(min_read_count), cov_map_(cov_map){}
+
+map<debruijn_graph::EdgeId, double> LongReadsLibConnectionCondition::ConnectedWith(debruijn_graph::EdgeId ) const {
+    return map <debruijn_graph::EdgeId, double>();
+};
+
+bool LongReadsLibConnectionCondition::CheckPath(BidirectionalPath *path, EdgeId e1, EdgeId e2) const {
+    auto pos1 = path->FindAll(e1);
+    if (pos1.size() != 1) return false;
+    auto pos2 = path->FindAll(e2);
+    if (pos2.size() != 1) {
+        if (pos2.size() >= 2) {
+            DEBUG("Something went wrong:: Edge " << graph_.int_id(e2) << "is called unique but presents in path twice! first edge " << graph_.int_id(e1) << " path ");
+            path->Print();
+        }
+        return false;
+    }
+    if (pos1[0] == path->Size() - 1) return false;
+    return true;
+}
+
+map<debruijn_graph::EdgeId, double> LongReadsLibConnectionCondition::ConnectedWith(debruijn_graph::EdgeId e, const ScaffoldingUniqueEdgeStorage& storage) const {
+    map <debruijn_graph::EdgeId, double> res;
+    auto cov_paths = cov_map_.GetCoveringPaths(e);
+    DEBUG("Got cov paths " << cov_paths.size());
+    for (const auto path: cov_paths) {
+        auto pos1 = path->FindAll(e);
+        if (pos1.size() != 1) {
+            DEBUG("***not unique " << graph_.int_id(e) << " len " << graph_.length(e) << "***");
+            continue;
+        }
+        size_t pos = pos1[0];
+        pos++;
+        while (pos < path->Size()){
+            if (storage.IsUnique(path->At(pos))) {
+                if (CheckPath(path, path->At(pos1[0]), path->At(pos))) {
+                    res[path->At(pos)] += path->GetWeight();
+                }
+                break;
+            }
+            pos++;
+        }
+    }
+    DEBUG("Before prefiltering " << res.size());
+    auto iter = res.begin();
+    while (iter != res.end()) {
+        if (iter->second < min_read_count_){
+            iter = res.erase(iter);
+        } else {
+            iter++;
+        }
+    }
+    DEBUG("After prefiltering" << res.size());
+    return res;
+}
+
+int LongReadsLibConnectionCondition::GetMedianGap(debruijn_graph::EdgeId e1, debruijn_graph::EdgeId e2) const {
+    auto cov_paths = cov_map_.GetCoveringPaths(e1);
+    std::vector<pair<int, double> > h;
+    for (const auto path: cov_paths) {
+        if (CheckPath(path, e1, e2)) {
+            auto pos1 = path->FindAll(e1);
+            auto pos2 = path->FindAll(e2);
+            h.push_back(make_pair(path->LengthAt(pos1[0] + 1) - path->LengthAt(pos2[0]), path->GetWeight()));
+        }
+    }
+    std::sort(h.begin(), h.end());
+    double sum = 0.0;
+    double sum2 = 0.0;
+    for (size_t j = 0; j< h.size(); ++j) {
+        sum += h[j].second;
+    }
+    size_t i = 0;
+    for (; i < h.size(); ++i) {
+        sum2 += h[i].second;
+        if (sum2 * 2 > sum)
+            break;
+    }
+    if (h.size() == 0) {
+        WARN("filtering incorrectness");
+        return 0;
+    }
+
+    return h[i].first;
+}
+
+size_t LongReadsLibConnectionCondition::GetLibIndex() const {
+    return lib_index_;
+}
+
+ScaffoldGraphPairedConnectionCondition::ScaffoldGraphPairedConnectionCondition(const debruijn_graph::Graph &graph,
+                                                                     const set<debruijn_graph::EdgeId>& graph_edges,
+                                                                     shared_ptr <PairedInfoLibrary> lib,
+                                                                     size_t lib_index,
+                                                                     size_t always_add,
+                                                                     size_t never_add,
+                                                                     double relative_threshold):
+    PairedLibConnectionCondition(graph, lib, lib_index, never_add),
+    graph_edges_(graph_edges),
+    always_add_(always_add),
+    never_add_(never_add),
+    relative_threshold_(relative_threshold) {}
+
+map <debruijn_graph::EdgeId, double> ScaffoldGraphPairedConnectionCondition::ConnectedWith(debruijn_graph::EdgeId e) const {
+    set <debruijn_graph::EdgeId> all_edges;
+    int e_length = (int) graph_.length(e);
+    lib_->FindJumpEdges(e, all_edges,  e_length - left_dist_delta_, e_length + right_dist_delta_);
+
+    double max_weight = 0;
+    for (auto edge : all_edges) {
+        if (edge != e && edge != graph_.conjugate(e)) {
+            double w = GetWeight(e, edge);
+            if (graph_edges_.count(edge) > 0 && math::gr(w, max_weight))
+                max_weight = w;
+        }
+    }
+    double threshold = std::max((double) never_add_, std::min((double) always_add_, max_weight * relative_threshold_));
+    map <debruijn_graph::EdgeId, double> result;
+    for (auto edge : all_edges) {
+        double w = GetWeight(e, edge);
+        if (edge != e && edge != graph_.conjugate(e) &&
+            math::ge(w, threshold)) {
+            result[edge] = w;
+        }
+    }
+    return result;
+}
+
+
+//TODO: We use same part of index twice, is it necessary?
+int PairedLibConnectionCondition::GetMedianGap(debruijn_graph::EdgeId e1, debruijn_graph::EdgeId e2) const {
+    std::vector<int> distances;
+    std::vector<double> weights;
+    int e_length = (int) graph_.length(e1);
+    lib_->CountDistances(e1, e2, distances, weights);
+    std::vector<pair<int, double> >h(distances.size());
+    for (size_t i = 0; i< distances.size(); i++) {
+//TODO:: we make same checks twice! That's bad
+        if (distances[i] >= e_length - left_dist_delta_ && distances[i] <= e_length + right_dist_delta_)
+            h.push_back(std::make_pair(distances[i], weights[i]));
+    }
+//TODO: is it really necessary?
+    std::sort(h.begin(), h.end());
+    double sum = 0.0;
+    double sum2 = 0.0;
+    for (size_t j = 0; j< h.size(); ++j) {
+        sum += h[j].second;
+    }
+    size_t i = 0;
+    for (; i < h.size(); ++i) {
+        sum2 += h[i].second;
+        if (sum2 * 2 > sum)
+            break;
+    }
+    if (h.size() == 0) {
+        WARN("filtering incorrectness");
+        return 0;
+    }
+    return (int) round(h[i].first - e_length);
+}
+
+AssemblyGraphConnectionCondition::AssemblyGraphConnectionCondition(const debruijn_graph::Graph &g,
+                    size_t max_connection_length, const ScaffoldingUniqueEdgeStorage & unique_edges) :
+        g_(g), max_connection_length_(max_connection_length), interesting_edge_set_(unique_edges.GetSet()), stored_distances_() {
+}
+
+map <debruijn_graph::EdgeId, double> AssemblyGraphConnectionCondition::ConnectedWith(debruijn_graph::EdgeId e) const {
+    VERIFY_MSG(interesting_edge_set_.find(e)!= interesting_edge_set_.end(), " edge "<< e.int_id() << " not applicable for connection condition");
+    if (stored_distances_.find(e) != stored_distances_.end()) {
+        return stored_distances_[e];
+    }
+    stored_distances_.insert(make_pair(e, map<debruijn_graph::EdgeId, double>()));
+    for (auto connected: g_.OutgoingEdges(g_.EdgeEnd(e))) {
+        if (interesting_edge_set_.find(connected) != interesting_edge_set_.end()) {
+            stored_distances_[e].insert(make_pair(connected, 1));
+        }
+    }
+    DijkstraHelper<debruijn_graph::Graph>::BoundedDijkstra dijkstra(
+            DijkstraHelper<debruijn_graph::Graph>::CreateBoundedDijkstra(g_, max_connection_length_));
+    dijkstra.Run(g_.EdgeEnd(e));
+    for (auto v: dijkstra.ReachedVertices()) {
+        for (auto connected: g_.OutgoingEdges(v)) {
+            if (interesting_edge_set_.find(connected) != interesting_edge_set_.end() && dijkstra.GetDistance(v) < max_connection_length_) {
+                stored_distances_[e].insert(make_pair(connected, 1));
+            }
+        }
+    }
+    return stored_distances_[e];
+}
+void AssemblyGraphConnectionCondition::AddInterestingEdges(func::TypedPredicate<typename Graph::EdgeId> edge_condition) {
+    for (auto e_iter = g_.ConstEdgeBegin(); !e_iter.IsEnd(); ++e_iter) {
+        if (edge_condition(*e_iter))
+            interesting_edge_set_.insert(*e_iter);
+    }
+}
+
+size_t AssemblyGraphConnectionCondition::GetLibIndex() const {
+    return (size_t) - 1;
+}
+
+int AssemblyGraphConnectionCondition::GetMedianGap (debruijn_graph::EdgeId, debruijn_graph::EdgeId) const {
+    return 0;
+}
+
+}
diff --git a/src/common/modules/path_extend/scaffolder2015/connection_condition2015.hpp b/src/common/modules/path_extend/scaffolder2015/connection_condition2015.hpp
new file mode 100644
index 0000000..be1f51c
--- /dev/null
+++ b/src/common/modules/path_extend/scaffolder2015/connection_condition2015.hpp
@@ -0,0 +1,143 @@
+#pragma once
+#include "modules/genome_consistance_checker.hpp"
+#include "utils/logger/logger.hpp"
+#include "modules/path_extend/paired_library.hpp"
+#include "assembly_graph/graph_support/scaff_supplementary.hpp"
+#include "modules/alignment/long_read_storage.hpp"
+#include "modules/path_extend/pe_utils.hpp"
+#include "common/assembly_graph/graph_support/basic_edge_conditions.hpp"
+#include <map>
+#include <set>
+
+
+namespace path_extend {
+using debruijn_graph::EdgeId;
+using debruijn_graph::Graph;
+
+//De Bruijn graph edge condition interface
+class LengthLowerBound : public omnigraph::EdgeCondition<Graph> {
+    typedef Graph::EdgeId EdgeId;
+    typedef EdgeCondition<Graph> base;
+
+    const size_t max_length_;
+
+public:
+
+    LengthLowerBound(const Graph &g, size_t max_length)
+            : base(g),
+              max_length_(max_length) {
+    }
+
+    bool Check(EdgeId e) const {
+        return this->g().length(e) >= max_length_;
+    }
+};
+
+/* Connection condition are used by both scaffolder's extension chooser and scaffold graph */
+
+class ConnectionCondition {
+protected:
+    DECL_LOGGER("ConnectionCondition")
+
+public:
+// Outputs the edges e is connected with.
+//TODO  performance issue: think about inside filtering. Return only unique connected edges?
+    virtual map<EdgeId, double> ConnectedWith(EdgeId e) const = 0;
+    virtual map<EdgeId, double> ConnectedWith(EdgeId e, const ScaffoldingUniqueEdgeStorage& storage) const;
+    virtual int GetMedianGap(EdgeId e1, EdgeId e2) const = 0;
+    virtual size_t GetLibIndex() const = 0;
+    virtual ~ConnectionCondition() {
+    }
+};
+
+// Main (mate pair library) connection condition.
+class PairedLibConnectionCondition : public ConnectionCondition {
+protected:
+    const Graph &graph_;
+    shared_ptr <PairedInfoLibrary> lib_;
+    size_t lib_index_;
+//Minimal number of mate pairs to call connection sound
+    size_t min_read_count_;
+public:
+//Only paired info with gap between e1 and e2 between -left_dist_delta_ and right_dist_delta_ taken in account
+    int left_dist_delta_;
+    int right_dist_delta_;
+
+    PairedLibConnectionCondition(const Graph &graph,
+                                 shared_ptr <PairedInfoLibrary> lib,
+                                 size_t lib_index,
+                                 size_t min_read_count);
+    size_t GetLibIndex() const override;
+    map <EdgeId, double> ConnectedWith(EdgeId e) const override;
+    double GetWeight(EdgeId e1, EdgeId e2) const;
+//Returns median gap size
+    int GetMedianGap (EdgeId e1, EdgeId e2) const override;
+};
+
+class LongReadsLibConnectionCondition : public ConnectionCondition {
+protected:
+    const Graph &graph_;
+    size_t lib_index_;
+//Minimal number of reads to call connection sound
+    size_t min_read_count_;
+    const GraphCoverageMap& cov_map_;
+
+    bool CheckPath(BidirectionalPath *path, EdgeId e1, EdgeId e2) const;
+
+public:
+//Only paired info with gap between e1 and e2 between -left_dist_delta_ and right_dist_delta_ taken in account
+
+    LongReadsLibConnectionCondition(const Graph &graph,
+                                 size_t lib_index,
+                                 size_t min_read_count, const GraphCoverageMap& cov_map);
+    size_t GetLibIndex() const override;
+    map<EdgeId, double> ConnectedWith(EdgeId e) const override;
+    map<EdgeId, double> ConnectedWith(EdgeId e, const ScaffoldingUniqueEdgeStorage& storage) const override;
+// Returns median gap size
+    int GetMedianGap (EdgeId e1, EdgeId e2) const override;
+
+};
+
+
+
+//Should it be removed after ConnectedWith using unique storage was introduced?
+class ScaffoldGraphPairedConnectionCondition: public PairedLibConnectionCondition {
+protected:
+    const set<EdgeId>& graph_edges_;
+
+    size_t always_add_;
+    size_t never_add_;
+    double relative_threshold_;
+
+public:
+    ScaffoldGraphPairedConnectionCondition(const Graph &graph,
+                                      const set<EdgeId>& graph_edges,
+                                      shared_ptr <PairedInfoLibrary> lib,
+                                      size_t lib_index,
+                                      size_t always_add,
+                                      size_t never_add,
+                                      double relative_threshold);
+
+    map<EdgeId, double> ConnectedWith(EdgeId e) const override;
+
+};
+
+/*  Condition used to find connected in graph edges.
+*
+*/
+class AssemblyGraphConnectionCondition : public ConnectionCondition {
+protected:
+    const Graph &g_;
+//Maximal gap to the connection.
+    size_t max_connection_length_;
+    set<EdgeId> interesting_edge_set_;
+    mutable map<EdgeId, map<EdgeId, double>> stored_distances_;
+public:
+    AssemblyGraphConnectionCondition(const Graph &g, size_t max_connection_length,
+                                     const ScaffoldingUniqueEdgeStorage& unique_edges);
+    void AddInterestingEdges(func::TypedPredicate<typename Graph::EdgeId> edge_condition);
+    map<EdgeId, double> ConnectedWith(EdgeId e) const override;
+    size_t GetLibIndex() const override;
+    int GetMedianGap(EdgeId, EdgeId ) const override;
+};
+}
diff --git a/src/modules/algorithms/path_extend/scaffolder2015/extension_chooser2015.cpp b/src/common/modules/path_extend/scaffolder2015/extension_chooser2015.cpp
similarity index 71%
rename from src/modules/algorithms/path_extend/scaffolder2015/extension_chooser2015.cpp
rename to src/common/modules/path_extend/scaffolder2015/extension_chooser2015.cpp
index 1e2af32..0267d68 100644
--- a/src/modules/algorithms/path_extend/scaffolder2015/extension_chooser2015.cpp
+++ b/src/common/modules/path_extend/scaffolder2015/extension_chooser2015.cpp
@@ -19,23 +19,24 @@ std::pair<EdgeId, int> ExtensionChooser2015::FindLastUniqueInPath(const Bidirect
 ExtensionChooser::EdgeContainer ExtensionChooser2015::FindNextUniqueEdge(const EdgeId from) const {
     VERIFY(unique_edges_.IsUnique(from));
     EdgeContainer result;
-    set<EdgeId> candidate_edges = paired_connection_condition_.ConnectedWith(from);
+    map<EdgeId, double> candidate_edges = lib_connection_condition_->ConnectedWith(from, unique_edges_);
+    DEBUG(candidate_edges.size() << " candidate edges");
     vector<pair<double, pair<EdgeId, int >>> to_sort;
-    for (EdgeId e : candidate_edges) {
-        if (!unique_edges_.IsUnique(e)) {
-            continue;
-        }
-        double sum = paired_connection_condition_.GetWeight(from, e);
+    for (const auto& pair: candidate_edges) {
+        EdgeId e = pair.first;
+        double sum = pair.second;
         DEBUG("edge " << g_.int_id(e) << " weight " << sum);
         if (sum < absolute_weight_threshold_) {
             DEBUG("Edge " << g_.int_id(e)  << " weight " << sum << " failed absolute weight threshold " << absolute_weight_threshold_);
             continue;
         }
-        int gap = paired_connection_condition_.GetMedianGap(from, e);
+        int gap = lib_connection_condition_->GetMedianGap(from, e);
 
-        auto connected_with = graph_connection_condition_.ConnectedWith(from);
-        if (connected_with.find(e) != connected_with.end()) {
-            sum *= graph_connection_bonus_;
+        if (use_graph_connectivity_) {
+            auto connected_with = graph_connection_condition_.ConnectedWith(from);
+            if (connected_with.find(e) != connected_with.end()) {
+                sum *= graph_connection_bonus_;
+            }
         }
         to_sort.push_back(make_pair(sum, make_pair(e, gap)));
     }
@@ -53,29 +54,39 @@ ExtensionChooser::EdgeContainer ExtensionChooser2015::FindNextUniqueEdge(const E
     }
     return result;
 }
+void ExtensionChooser2015::InsertAdditionalGaps(ExtensionChooser::EdgeContainer& result) const{
+    for (size_t i = 0; i< result.size(); i++) {
+//At least 10*"N" when scaffolding
+        if (result[i].d_ < MIN_N_QUANTITY + int(g_.k())) {
+            result[i].d_ = MIN_N_QUANTITY + int(g_.k());
+        }
+    }
+}
 
 ExtensionChooser::EdgeContainer ExtensionChooser2015::Filter(const BidirectionalPath& path, const ExtensionChooser::EdgeContainer& /*edges*/) const {
-//    set<EdgeId> candidates = FindCandidates(path);
+    DEBUG("filtering started");
     pair<EdgeId, int> last_unique = FindLastUniqueInPath(path);
+    DEBUG ("last unique found");
     EdgeContainer result;
-
+    DEBUG(g_.int_id(last_unique.first)<< " " << last_unique.second << " " << path.Size());
     if (last_unique.second < 0) {
 // No unique edge found
         return result;
     }
 
     result = FindNextUniqueEdge(last_unique.first);
+    DEBUG("next unique edges found, there are " << result.size() << " of them");
 //Backward check. We connected edges iff they are best continuation to each other.
     if (result.size() == 1) {
         //We should reduce gap size with length of the edges that came after last unique.
         result[0].d_ -= int (path.LengthAt(last_unique.second) - g_.length(last_unique.first));
-
         DEBUG("For edge " << g_.int_id(last_unique.first) << " unique next edge "<< result[0].e_ <<" found, doing backwards check ");
         EdgeContainer backwards_check = FindNextUniqueEdge(g_.conjugate(result[0].e_));
         if ((backwards_check.size() != 1) || (g_.conjugate(backwards_check[0].e_) != last_unique.first)) {
             result.clear();
         }
     }
+    InsertAdditionalGaps(result);
     return result;
 }
 
diff --git a/src/common/modules/path_extend/scaffolder2015/extension_chooser2015.hpp b/src/common/modules/path_extend/scaffolder2015/extension_chooser2015.hpp
new file mode 100644
index 0000000..18b5721
--- /dev/null
+++ b/src/common/modules/path_extend/scaffolder2015/extension_chooser2015.hpp
@@ -0,0 +1,65 @@
+//
+// Created by lab42 on 8/26/15.
+//
+#pragma once
+
+#include "modules/path_extend/extension_chooser.hpp"
+#include "connection_condition2015.hpp"
+#include "modules/genome_consistance_checker.hpp"
+#include "utils/logger/logger.hpp"
+#include <map>
+#include <set>
+namespace path_extend {
+
+class ExtensionChooser2015: public ScaffoldingExtensionChooser {
+    static const int MIN_N_QUANTITY = 10;
+    shared_ptr<ConnectionCondition> lib_connection_condition_;
+    const ScaffoldingUniqueEdgeStorage& unique_edges_;
+
+    // for possible connections e1 and e2 if weight(e1) > relative_weight_threshold_ * weight(e2) then e2 will be ignored
+    double relative_weight_threshold_;
+    AssemblyGraphConnectionCondition graph_connection_condition_;
+    // weight < absolute_weight_threshold_ will be ignored
+    size_t absolute_weight_threshold_;
+    // multiplicator for the pairs which are connected in graph.
+    double graph_connection_bonus_;
+    bool use_graph_connectivity_;
+
+    //If path contains no unique edges return -1
+    pair<EdgeId, int> FindLastUniqueInPath(const BidirectionalPath& path) const;
+    //Find all possible next unique edges confirmed with mate-pair information. (absolute/relative)_weight_threshold_ used for filtering
+    EdgeContainer FindNextUniqueEdge(const EdgeId from) const;
+public:
+    ExtensionChooser2015(const Graph& g,
+                         shared_ptr<WeightCounter> wc,
+                         shared_ptr<ConnectionCondition> condition,
+                         const ScaffoldingUniqueEdgeStorage& unique_edges,
+                         double cl_weight_threshold,
+                         double is_scatter_coeff,
+                         double relative_threshold,
+                         bool use_graph_connectivity = true):
+            //TODO: constants are subject to reconsider
+            ScaffoldingExtensionChooser(g, wc, cl_weight_threshold, is_scatter_coeff),
+            lib_connection_condition_(condition),
+            unique_edges_(unique_edges),
+            relative_weight_threshold_(relative_threshold),
+            graph_connection_condition_(g, 2 * unique_edges_.GetMinLength(), unique_edges),
+            //TODO to config!
+            absolute_weight_threshold_(2),
+            graph_connection_bonus_(2),
+            use_graph_connectivity_(use_graph_connectivity) {
+        INFO("ExtensionChooser2015 created");
+    }
+
+    /* @param edges are really not used and left for compatibility
+     * @returns possible next edge if there is unique one, else returns empty container
+     */
+    EdgeContainer Filter(const BidirectionalPath& path, const EdgeContainer&) const override;
+    void InsertAdditionalGaps(ExtensionChooser::EdgeContainer& result) const;
+
+private:
+    DECL_LOGGER("ExtensionChooser2015");
+};
+
+
+}
diff --git a/src/common/modules/path_extend/scaffolder2015/path_polisher.cpp b/src/common/modules/path_extend/scaffolder2015/path_polisher.cpp
new file mode 100644
index 0000000..77f636e
--- /dev/null
+++ b/src/common/modules/path_extend/scaffolder2015/path_polisher.cpp
@@ -0,0 +1,326 @@
+
+#include "path_polisher.hpp"
+
+namespace path_extend {
+
+void PathPolisher::InfoAboutGaps(const PathContainer & result){
+    for (const auto& p_iter: result) {
+        for (size_t i = 1; i < p_iter.first->Size(); ++i) {
+            if (p_iter.first->GapAt(i) > 0) {
+                DEBUG("Gap "<< p_iter.first->GapAt(i) << " left between " << gp_.g.int_id(p_iter.first->At(i-1)) << " and " << gp_.g.int_id(p_iter.first->At(i)));
+            }
+        }
+    }
+}
+
+PathPolisher::PathPolisher(const conj_graph_pack& gp, const config::dataset& dataset_info, const ScaffoldingUniqueEdgeStorage& storage, size_t max_resolvable_len ): gp_(gp) {
+    gap_closers.push_back(make_shared<DijkstraGapCloser>(gp.g, max_resolvable_len));
+    for (size_t i = 0; i <  dataset_info.reads.lib_count(); i++) {
+        auto lib = dataset_info.reads[i];
+        if (lib.type() == io::LibraryType::HQMatePairs || lib.type() == io::LibraryType::MatePairs) {
+            shared_ptr<PairedInfoLibrary> paired_lib = MakeNewLib(gp.g, lib, gp.paired_indices[i]);
+            gap_closers.push_back(make_shared<MatePairGapCloser> (gp.g, max_resolvable_len, paired_lib, storage));
+        }
+    }
+}
+
+void PathPolisher::PolishPaths(const PathContainer &paths, PathContainer &result) {
+    result.clear();
+
+    for (auto iter = paths.begin(); iter != paths.end(); ++iter) {
+
+        BidirectionalPath *path = new BidirectionalPath(Polish(*iter.get()));
+        BidirectionalPath *conjugatePath = new BidirectionalPath(Polish(path->Conjugate()));
+        BidirectionalPath *re_path = new BidirectionalPath(conjugatePath->Conjugate());
+        result.AddPair(re_path, conjugatePath);
+    }
+    InfoAboutGaps(result);
+}
+
+size_t DijkstraGapCloser::MinPathLength(const omnigraph::PathStorageCallback<Graph>& path_storage) const {
+    size_t shortest_len = omnigraph::CumulativeLength(g_, path_storage.paths().front());
+    for (size_t j = 1; j < path_storage.paths().size(); ++j) {
+        size_t cur_len = omnigraph::CumulativeLength(g_, path_storage.paths()[j]);
+        shortest_len = min(shortest_len, cur_len);
+    }
+    return shortest_len;
+}
+
+BidirectionalPath PathPolisher::Polish(const BidirectionalPath &path) {
+    if (path.Empty())
+        return path;
+    shared_ptr<BidirectionalPath> current;
+    shared_ptr<BidirectionalPath> prev_step = std::make_shared<BidirectionalPath>(path);
+    bool changed = true;
+    size_t count = 0;
+    while (changed) {
+        changed = false;
+        for (size_t i = 0; i < gap_closers.size(); i++) {
+            current = std::make_shared<BidirectionalPath>(gap_closers[i]->Polish(*prev_step));
+            if (current->Size() != prev_step->Size()){
+                changed = true;
+                std::swap(current, prev_step);
+                break;
+            }
+        }
+        count++;
+        if (count > 5) {
+            INFO("Unexpected cycle while polishing path, stopping polishing " );
+            path.Print();
+            break;
+        }
+    }
+    return *prev_step;
+}
+
+BidirectionalPath DijkstraGapCloser::Polish(const BidirectionalPath &path) {
+    BidirectionalPath result(g_);
+    if (path.Empty())
+        return result;
+    result.PushBack(path[0], path.GapInfoAt(0));
+    for (size_t i = 1; i < path.Size(); ++i) {
+        if (g_.EdgeEnd(path[i - 1]) == g_.EdgeStart(path[i])) {
+            result.PushBack(path[i], path.GapInfoAt(i));
+        } else {
+            //Connect edges using Dijkstra
+            omnigraph::PathStorageCallback<Graph> path_storage(g_);
+            omnigraph::ProcessPaths(g_, 0,
+                                    max_path_len_,
+                                    g_.EdgeEnd(path[i - 1]),
+                                    g_.EdgeStart(path[i]),
+                                    path_storage);
+
+            if (path_storage.size() == 0) {
+                //No paths found, keeping the gap
+                result.PushBack(path[i], path.GapInfoAt(i));
+            } else if (path_storage.size() > 1) {
+                //More than one path, using shortest path for gap length estimation
+                //We cannot use both common paths and bridges in one attempt;
+                if (!FillWithMultiplePaths(path, i, path_storage, result))
+                    FillWithBridge(path, i, path_storage, result);
+            } else {
+                //Closing the gap with the unique shortest path
+                for (size_t j = 0; j < path_storage.paths().front().size(); ++j) {
+                    result.PushBack(path_storage.paths().front()[j]);
+                }
+                result.PushBack(path[i]);
+            }
+        }
+    }
+    return result;
+}
+
+
+bool DijkstraGapCloser::FillWithBridge(const BidirectionalPath& path, size_t index,
+                                                          const omnigraph::PathStorageCallback<Graph>& path_storage,
+                                                          BidirectionalPath& result) const {
+//TODO:: constant;
+    auto counts = CountEdgesQuantity(path_storage, 300);
+    size_t path_quantity = path_storage.paths().size();
+    vector<EdgeId> bridges;
+    for (const auto& pair: counts)
+        if (pair.second == path_quantity)
+            bridges.push_back(pair.first);
+    if (bridges.size() > 0) {
+        std::sort(bridges.begin(), bridges.end(), [&] (EdgeId e1, EdgeId e2) {
+            return g_.length(e1) > g_.length(e2); });
+        EdgeId bridge = bridges[0];
+        int min_gap_before = path.GapAt(index);
+        int min_gap_after = path.GapAt(index);
+        for (const auto& path:path_storage.paths()) {
+            int current_before = 0;
+            for(size_t i = 0; i< path.size(); i++) {
+                if (path[i] != bridge)
+                    current_before += (int)g_.length(path[i]);
+                else
+                    break;
+            }
+            int current_after = (int)CumulativeLength(g_, path) - current_before - int(g_.length(bridge));
+            min_gap_after = std::min(current_after, min_gap_after);
+            min_gap_before = std::min(current_before, min_gap_before);
+        }
+        min_gap_after = std::max(min_gap_after, min_gap_);
+        min_gap_before = std::max(min_gap_before, min_gap_);
+        result.PushBack(bridge, min_gap_before);
+        result.PushBack(path[index], min_gap_after);
+        return true;
+    } else {
+        result.PushBack(path[index], path.GapAt(index));
+        return false;
+    }
+}
+
+bool DijkstraGapCloser::FillWithMultiplePaths(const BidirectionalPath& path, size_t index,
+                                              const omnigraph::PathStorageCallback<Graph>& path_storage,
+                                              BidirectionalPath& result) const {
+    bool changed = false;
+    auto left = LCP(path_storage);
+    for (auto e : left) {
+        result.PushBack(e);
+        changed = true;
+    }
+    int middle_gap = (int) max(size_t(min_gap_), MinPathLength(path_storage) -
+            omnigraph::CumulativeLength(g_, left));
+    if (changed)
+        result.PushBack(path[index], middle_gap);
+    return changed;
+}
+
+std::map<EdgeId, size_t> DijkstraGapCloser::CountEdgesQuantity(const omnigraph::PathStorageCallback<Graph>& path_storage, size_t length_limit ) const{
+    map<EdgeId, size_t> res;
+    for (const auto& path: path_storage.paths()) {
+        set<EdgeId> edge_set(path.begin(), path.end());
+        for (const auto& e: edge_set) {
+            if (g_.length(e) >= length_limit) {
+                res[e] += 1;
+            }
+        }
+    }
+    return res;
+};
+
+size_t DijkstraGapCloser::MinPathSize(const omnigraph::PathStorageCallback<Graph>& path_storage) const {
+    size_t size = path_storage.paths().front().size();
+    for (size_t i = 1; i < path_storage.size(); ++i) {
+        size = min(size, path_storage.paths()[i].size());
+    }
+    return size;
+}
+
+vector<EdgeId> DijkstraGapCloser::LCP(const omnigraph::PathStorageCallback<Graph>& path_storage) const {
+    bool all_equal = true;
+    size_t index = 0;
+    size_t min_size = MinPathSize(path_storage);
+
+    while (index < min_size && all_equal) {
+        for (size_t i = 1; i < path_storage.size(); ++i) {
+            auto e = path_storage.paths().front()[index];
+            if (e != path_storage.paths()[i][index]) {
+                all_equal = false;
+                break;
+            }
+        }
+        if (all_equal)
+            ++index;
+    }
+
+    vector<EdgeId> result;
+    for (size_t i = 0; i < index; ++i) {
+        result.push_back(path_storage.paths().front()[i]);
+    }
+    return result;
+}
+
+
+EdgeId MatePairGapCloser::FindNext(const BidirectionalPath& path, size_t index,
+                    const set<EdgeId>& present_in_paths, VertexId v) const {
+    auto next_edges = g_.OutgoingEdges(v);
+    map<EdgeId, double> candidates;
+    for (const auto edge: next_edges)
+        if (present_in_paths.find(edge) != present_in_paths.end())
+            candidates.insert(make_pair(edge, 0));
+    if (candidates.size() <= 1 ) {
+        if (candidates.size() == 0 || candidates.begin()->first == path[index])
+            return EdgeId(0);
+        else 
+            return (candidates.begin()->first);
+    } else {
+        int i = (int) index - 1;
+        for (; i >= 0; i--) {
+            if (storage_.IsUnique(path[i]))
+                break;
+        }
+        if (i < 0) {
+            return EdgeId(0);
+        } else {
+            EdgeId last_unique = path[i];
+            for (auto &pair: candidates){
+                vector<int> d;
+                vector<double> w;
+//TODO:: any filtration?
+                lib_->CountDistances(last_unique, pair.first, d, w);
+                double sum = 0;
+                for (auto weight: w)
+                    sum += weight;
+                pair.second = sum / double(g_.length(pair.first));
+            }
+            vector<std::pair<EdgeId, double>> to_sort(candidates.begin(),candidates.end());
+            sort(to_sort.begin(), to_sort.end(), [&] (std::pair<EdgeId, double> a, std::pair<EdgeId, double> b ) {
+                return a.second > b.second;
+            });
+            if (to_sort[0].second > to_sort[1].second * weight_priority && to_sort[0].first != path[index])
+                return to_sort[0].first;
+            else
+                return EdgeId(0);
+        }
+    }
+}
+
+//TODO: make shorter functions
+BidirectionalPath MatePairGapCloser::Polish(const BidirectionalPath& path) {
+    BidirectionalPath result(g_);
+    DEBUG("Path " << path.GetId() << " len "<< path.Length() << " size " << path.Size());
+    result.PushBack(path[0], path.GapInfoAt(0));
+    for (size_t i = 1; i < path.Size(); ++i) {
+        if (g_.EdgeEnd(path[i - 1]) == g_.EdgeStart(path[i]) || path.GapAt(i) <= min_gap_) {
+            result.PushBack(path[i], path.GapInfoAt(i));
+        } else {
+            DEBUG("position "<< i <<" gap between edges " << g_.int_id(path[i-1]) << " and " << g_.int_id(path[i]) << " was " << path.GapAt(i));
+
+            vector<EdgeId> addition;
+            VertexId v = g_.EdgeEnd(path[i - 1]);
+            EdgeId last = path[i - 1];
+            omnigraph::PathStorageCallback<Graph> path_storage(g_);
+            omnigraph::ProcessPaths(g_, 0,
+                                    max_path_len_,
+                                    g_.EdgeEnd(path[i - 1]),
+                                    g_.EdgeStart(path[i]),
+                                    path_storage);
+            set<EdgeId> present_in_paths;
+            for(const auto &p: path_storage.paths())
+                for(size_t j = 0; j < p.size(); j ++)
+                    present_in_paths.insert(p[j]);
+            size_t total = 0;
+            while (last != EdgeId(0)){
+                last = FindNext(path, i, present_in_paths, v);
+                if (last != EdgeId(0)){
+                    v = g_.EdgeEnd(last);
+                    addition.push_back(last);
+                    total += g_.length(last);
+                }
+                if (total > max_path_len_){
+                    DEBUG("gap between edges " << g_.int_id(path[i-1]) << " and " << g_.int_id(path[i]) << " was: " << path.GapAt(i) << ", closing path length too long: " << total);
+                    break;
+                }
+            }
+            if (total > max_path_len_) {
+                result.PushBack(path[i], path.GapInfoAt(i));
+                continue;                
+            }
+            int len = int(CumulativeLength(g_, addition));
+            int new_gap = path.GapAt(i) - len;
+            if (new_gap < min_gap_ && addition.size() > 0) {
+                if (path.GapAt(i) * 3 < len * 2 ) {
+//inserted path significantly longer than estimated gap
+                    DEBUG("Gap size estimation problem: gap between edges " << g_.int_id(path[i - 1]) << " and " << g_.int_id(path[i]) << " was " <<
+                         path.GapAt(i) << "filled len" << len);
+                }
+                if (g_.EdgeEnd(addition.back()) != g_.EdgeStart(path[i]))
+                    new_gap = min_gap_;
+                else
+                    new_gap = 0;
+            }
+            DEBUG("filling");
+            for (size_t j = 0; j < addition.size(); j++) {
+                DEBUG(g_.int_id(addition[j]));
+                result.PushBack(addition[j], 0);
+            }
+            result.PushBack(path[i], new_gap);
+            DEBUG("filled");
+        }
+    }
+    DEBUG("result " << result.GetId() << " len "<< result.Length() << " size " << result.Size());
+    return result;
+}
+
+}
diff --git a/src/common/modules/path_extend/scaffolder2015/path_polisher.hpp b/src/common/modules/path_extend/scaffolder2015/path_polisher.hpp
new file mode 100644
index 0000000..c13ddcb
--- /dev/null
+++ b/src/common/modules/path_extend/scaffolder2015/path_polisher.hpp
@@ -0,0 +1,85 @@
+#pragma once
+
+#include "assembly_graph/paths/path_processor.hpp"
+#include "assembly_graph/paths/path_utils.hpp"
+#include "assembly_graph/paths/bidirectional_path.hpp"
+#include "assembly_graph/core/basic_graph_stats.hpp"
+#include "modules/path_extend/paired_library.hpp"
+#include "assembly_graph/graph_support/scaff_supplementary.hpp"
+#include "common/pipeline/graph_pack.hpp"
+
+namespace path_extend {
+
+class PathGapCloser {
+protected:
+    const Graph& g_;
+    size_t max_path_len_;
+    int min_gap_;
+public:
+    virtual BidirectionalPath Polish(const BidirectionalPath& path) = 0;
+//TODO:: config
+    PathGapCloser(const Graph& g, size_t max_path_len): g_(g), max_path_len_(max_path_len), min_gap_(int(g.k() + 10)) {}
+
+};
+
+class MatePairGapCloser: public PathGapCloser {
+    const shared_ptr<PairedInfoLibrary> lib_;
+    const ScaffoldingUniqueEdgeStorage& storage_;
+
+//TODO: config? somewhere else?
+    static constexpr double weight_priority = 5;
+public:
+    EdgeId FindNext(const BidirectionalPath& path, size_t index,
+                        const set<EdgeId>& present_in_paths, VertexId v) const;
+    MatePairGapCloser(const Graph& g, size_t max_path_len, const shared_ptr<PairedInfoLibrary> lib, const ScaffoldingUniqueEdgeStorage& storage):
+            PathGapCloser(g, max_path_len), lib_(lib), storage_(storage) {}
+    BidirectionalPath Polish(const BidirectionalPath& path) override;
+};
+
+class DijkstraGapCloser: public PathGapCloser {
+
+protected:
+
+    BidirectionalPath Polish(const BidirectionalPath& path) override;
+
+    size_t MinPathLength(const omnigraph::PathStorageCallback<Graph>& path_storage) const;
+
+    bool FillWithMultiplePaths(const BidirectionalPath& path, size_t index,
+                                       const omnigraph::PathStorageCallback<Graph>& path_storage,
+                                       BidirectionalPath& result) const;
+
+    bool FillWithBridge(const BidirectionalPath& path, size_t index,
+                                                                  const omnigraph::PathStorageCallback<Graph>& path_storage,
+                                                                  BidirectionalPath& result) const;
+
+    size_t MinPathSize(const omnigraph::PathStorageCallback<Graph>& path_storage) const;
+
+    vector<EdgeId> LCP(const omnigraph::PathStorageCallback<Graph>& path_storage) const;
+
+    std::map<EdgeId, size_t> CountEdgesQuantity(const omnigraph::PathStorageCallback<Graph>& path_storage, size_t length_limit) const;
+
+public:
+    DijkstraGapCloser(const Graph& g, size_t max_path_len):
+        PathGapCloser(g, max_path_len) {}
+
+
+};
+
+class PathPolisher {
+
+private:
+    const conj_graph_pack& gp_;
+    vector<shared_ptr<PathGapCloser>> gap_closers;
+
+private:
+    void InfoAboutGaps(const PathContainer & result);
+    BidirectionalPath Polish(const BidirectionalPath& path);
+
+public:
+    PathPolisher(const conj_graph_pack& gp, const config::dataset& dataset_info, const ScaffoldingUniqueEdgeStorage& storage, size_t max_resolvable_len);
+
+    void PolishPaths(const PathContainer& paths, PathContainer& result);
+};
+
+
+}
diff --git a/src/modules/algorithms/path_extend/scaffolder2015/scaffold_graph.cpp b/src/common/modules/path_extend/scaffolder2015/scaffold_graph.cpp
similarity index 89%
rename from src/modules/algorithms/path_extend/scaffolder2015/scaffold_graph.cpp
rename to src/common/modules/path_extend/scaffolder2015/scaffold_graph.cpp
index 7e3312a..f4a6417 100644
--- a/src/modules/algorithms/path_extend/scaffolder2015/scaffold_graph.cpp
+++ b/src/common/modules/path_extend/scaffolder2015/scaffold_graph.cpp
@@ -6,11 +6,11 @@ namespace scaffold_graph {
 
 std::atomic<ScaffoldGraph::ScaffoldEdgeIdT> ScaffoldGraph::ScaffoldEdge::scaffold_edge_id_{0};
 
-void ScaffoldGraph::AddEdgeSimple(const ScaffoldGraph::ScaffoldEdge &e, size_t conjugate_id) {
+
+void ScaffoldGraph::AddEdgeSimple(const ScaffoldGraph::ScaffoldEdge &e) {
     edges_.emplace(e.getId(), e);
     outgoing_edges_.emplace(e.getStart(), e.getId());
     incoming_edges_.emplace(e.getEnd(), e.getId());
-    conjugate_[e.getId()] = conjugate_id;
 }
 
 void ScaffoldGraph::DeleteOutgoing(const ScaffoldGraph::ScaffoldEdge &e) {
@@ -41,12 +41,7 @@ void ScaffoldGraph::DeleteAllOutgoingEdgesSimple(ScaffoldGraph::ScaffoldVertex v
 
 void ScaffoldGraph::DeleteEdgeFromStorage(const ScaffoldGraph::ScaffoldEdge &e) {
     VERIFY(!Exists(e));
-
-    size_t conjugate_id = conjugate_[e.getId()];
     edges_.erase(e.getId());
-    edges_.erase(conjugate_id);
-    conjugate_.erase(e.getId());
-    conjugate_.erase(conjugate_id);
 }
 
 void ScaffoldGraph::DeleteAllIncomingEdgesSimple(ScaffoldGraph::ScaffoldVertex v) {
@@ -76,10 +71,6 @@ ScaffoldGraph::ScaffoldVertex ScaffoldGraph::conjugate(ScaffoldGraph::ScaffoldVe
 }
 
 ScaffoldGraph::ScaffoldEdge ScaffoldGraph::conjugate(const ScaffoldGraph::ScaffoldEdge &e) const {
-    auto iter = conjugate_.find(e.getId());
-    if (iter != conjugate_.end()) {
-        return edges_.at(iter->second);
-    }
     return ScaffoldEdge(conjugate(e.getEnd()), conjugate(e.getStart()), e.getColor(), e.getWeight());
 }
 
@@ -105,13 +96,11 @@ bool ScaffoldGraph::AddEdge(ScaffoldGraph::ScaffoldVertex v1, ScaffoldGraph::Sca
 
     ScaffoldEdge e(v1, v2, lib_id, weight);
     if (Exists(e)) {
-        VERIFY(Exists(conjugate(e)));
         return false;
     }
 
-    auto conj = conjugate(e);
-    AddEdgeSimple(e, conj.getId());
-    AddEdgeSimple(conj, e.getId());
+
+    AddEdgeSimple(e);
     return true;
 }
 
@@ -121,7 +110,7 @@ void ScaffoldGraph::Print(ostream &os) const {
             << ": len = " << assembly_graph_.length(v) << ", cov = " << assembly_graph_.coverage(v) << endl;
     }
     for (auto e_iter = edges_.begin(); e_iter != edges_.end(); ++e_iter) {
-        os << "Edge " << e_iter->second.getId() << " ~ " << conjugate(e_iter->second).getId() <<
+        os << "Edge " << e_iter->second.getId() <<
             ": " << int_id(e_iter->second.getStart()) << " -> " << int_id(e_iter->second.getEnd()) <<
             ", lib index = " << e_iter->second.getColor() << ", weight " << e_iter->second.getWeight() << endl;
     }
@@ -224,10 +213,7 @@ adt::iterator_range<ScaffoldGraph::ConstScaffoldEdgeIterator> ScaffoldGraph::edg
 }
 
 bool ScaffoldGraph::IsVertexIsolated(ScaffoldGraph::ScaffoldVertex assembly_graph_edge) const {
-    bool
-        result = incoming_edges_.count(assembly_graph_edge) == 0 && outgoing_edges_.count(assembly_graph_edge) == 0;
-    VERIFY((incoming_edges_.count(conjugate(assembly_graph_edge)) == 0
-        && incoming_edges_.count(assembly_graph_edge) == 0) == result);
+    bool result = incoming_edges_.count(assembly_graph_edge) == 0 && outgoing_edges_.count(assembly_graph_edge) == 0;
     return result;
 }
 
@@ -255,11 +241,8 @@ bool ScaffoldGraph::RemoveVertex(ScaffoldGraph::ScaffoldVertex assembly_graph_ed
 
 bool ScaffoldGraph::RemoveEdge(const ScaffoldGraph::ScaffoldEdge &e) {
     if (Exists(e)) {
-        VERIFY(Exists(conjugate(e)));
         DeleteOutgoing(e);
         DeleteIncoming(e);
-        DeleteOutgoing(conjugate(e));
-        DeleteIncoming(conjugate(e));
         DeleteEdgeFromStorage(e);
 
         return true;
@@ -272,4 +255,4 @@ bool ScaffoldGraph::AddEdge(const ScaffoldGraph::ScaffoldEdge &e) {
 }
 
 } //scaffold_graph
-} //path_extend
\ No newline at end of file
+} //path_extend
diff --git a/src/modules/algorithms/path_extend/scaffolder2015/scaffold_graph.hpp b/src/common/modules/path_extend/scaffolder2015/scaffold_graph.hpp
similarity index 91%
rename from src/modules/algorithms/path_extend/scaffolder2015/scaffold_graph.hpp
rename to src/common/modules/path_extend/scaffolder2015/scaffold_graph.hpp
index 5e51863..9ac3fdf 100644
--- a/src/modules/algorithms/path_extend/scaffolder2015/scaffold_graph.hpp
+++ b/src/common/modules/path_extend/scaffolder2015/scaffold_graph.hpp
@@ -3,13 +3,13 @@
 //
 #pragma once
 
-#include "dev_support/logger/logger.hpp"
-#include "assembly_graph/graph_core/graph.hpp"
-#include "algorithms/path_extend/paired_library.hpp"
+#include "utils/logger/logger.hpp"
+#include "assembly_graph/core/graph.hpp"
+#include "modules/path_extend/paired_library.hpp"
 #include "connection_condition2015.hpp"
 
-#include "dev_support/standard_base.hpp"
-#include "utils/adt/iterator_range.hpp"
+#include "utils/standard_base.hpp"
+#include "common/adt/iterator_range.hpp"
 
 namespace path_extend {
 namespace scaffold_graph {
@@ -124,22 +124,16 @@ private:
 
     const debruijn_graph::Graph &assembly_graph_;
 
-    //Map for storing conjugate scaffolding edges
-    std::unordered_map<ScaffoldEdgeIdT, ScaffoldEdgeIdT> conjugate_;
-
     AdjacencyStorage outgoing_edges_;
 
     AdjacencyStorage incoming_edges_;
 
-    //Add edge without any checks and conjugate
-    void AddEdgeSimple(const ScaffoldEdge &e, size_t conjugate_id);
+    void AddEdgeSimple(const ScaffoldEdge &e);
 
     //Delete outgoing edge from adjancecy list without checks
-    //and removing conjugate and respective incoming edge
     void DeleteOutgoing(const ScaffoldEdge &e);
 
     //Delete incoming edge from adjancecy list without checks
-    //and removing conjugate and respective outoging edge
     void DeleteIncoming(const ScaffoldEdge &e);
 
     //Delete all edge info from storage
diff --git a/src/modules/algorithms/path_extend/scaffolder2015/scaffold_graph_constructor.cpp b/src/common/modules/path_extend/scaffolder2015/scaffold_graph_constructor.cpp
similarity index 87%
rename from src/modules/algorithms/path_extend/scaffolder2015/scaffold_graph_constructor.cpp
rename to src/common/modules/path_extend/scaffolder2015/scaffold_graph_constructor.cpp
index 61a813b..f05f7e7 100644
--- a/src/modules/algorithms/path_extend/scaffolder2015/scaffold_graph_constructor.cpp
+++ b/src/common/modules/path_extend/scaffolder2015/scaffold_graph_constructor.cpp
@@ -8,15 +8,11 @@ namespace path_extend {
 namespace scaffold_graph {
 
 
-bool LengthEdgeCondition::IsSuitable(debruijn_graph::EdgeId e) const {
-    return graph_.length(e) >= min_length_;
-}
-
-void BaseScaffoldGraphConstructor::ConstructFromEdgeConditions(const EdgeCondition &edge_condition,
+void BaseScaffoldGraphConstructor::ConstructFromEdgeConditions(func::TypedPredicate<typename Graph::EdgeId> edge_condition,
                                                            vector<shared_ptr<ConnectionCondition>> &connection_conditions,
                                                            bool use_terminal_vertices_only) {
     for (auto e = graph_->AssemblyGraph().ConstEdgeBegin(); !e.IsEnd(); ++e) {
-        if (edge_condition.IsSuitable(*e)) {
+        if (edge_condition(*e)) {
             graph_->AddVertex(*e);
         }
     }
@@ -50,12 +46,14 @@ void BaseScaffoldGraphConstructor::ConstructFromSingleCondition(const shared_ptr
             continue;
 
         auto connected_with = condition->ConnectedWith(v);
-        for (auto connected : connected_with) {
+        for (const auto& pair : connected_with) {
+            EdgeId connected = pair.first;
+            double w = pair.second;
             TRACE("Connected with " << graph_->int_id(connected));
             if (graph_->Exists(connected)) {
                 if (use_terminal_vertices_only && graph_->IncomingEdgeCount(connected) > 0)
                     continue;
-                graph_->AddEdge(v, connected, condition->GetLibIndex(), condition->GetWeight(v, connected));
+                graph_->AddEdge(v, connected, condition->GetLibIndex(), w);
             }
         }
     }
diff --git a/src/modules/algorithms/path_extend/scaffolder2015/scaffold_graph_constructor.hpp b/src/common/modules/path_extend/scaffolder2015/scaffold_graph_constructor.hpp
similarity index 78%
rename from src/modules/algorithms/path_extend/scaffolder2015/scaffold_graph_constructor.hpp
rename to src/common/modules/path_extend/scaffolder2015/scaffold_graph_constructor.hpp
index bbf45f4..fe6c34a 100644
--- a/src/modules/algorithms/path_extend/scaffolder2015/scaffold_graph_constructor.hpp
+++ b/src/common/modules/path_extend/scaffolder2015/scaffold_graph_constructor.hpp
@@ -10,27 +10,6 @@
 namespace path_extend {
 namespace scaffold_graph {
 
-//De Bruijn graph edge condition interface
-class EdgeCondition {
-public:
-    virtual bool IsSuitable(debruijn_graph::EdgeId e) const = 0;
-
-    virtual ~EdgeCondition() { }
-
-};
-
-//Edge length condition
-class LengthEdgeCondition: public EdgeCondition {
-    const debruijn_graph::Graph &graph_;
-
-    size_t min_length_;
-
-public:
-    LengthEdgeCondition(const debruijn_graph::Graph &graph, size_t min_len) : graph_(graph), min_length_(min_len) {
-    }
-
-    bool IsSuitable(debruijn_graph::EdgeId e) const;
-};
 
 //Iterface
 class ScaffoldGraphConstructor {
@@ -58,7 +37,7 @@ protected:
                           vector<shared_ptr<ConnectionCondition>> &connection_conditions,
                           bool use_terminal_vertices_only = false);
 
-    void ConstructFromEdgeConditions(const EdgeCondition& edge_condition,
+    void ConstructFromEdgeConditions(func::TypedPredicate<typename Graph::EdgeId> edge_condition,
                                      vector<shared_ptr<ConnectionCondition>> &connection_conditions,
                                      bool use_terminal_vertices_only = false);
 };
@@ -81,13 +60,13 @@ public:
 
 class DefaultScaffoldGraphConstructor: public SimpleScaffoldGraphConstructor {
 protected:
-    const EdgeCondition& edge_condition_;
+    func::TypedPredicate<typename Graph::EdgeId> edge_condition_;
 
 public:
     DefaultScaffoldGraphConstructor(const debruijn_graph::Graph& assembly_graph,
                                     const set<EdgeId>& edge_set,
                                     vector<shared_ptr<ConnectionCondition>> &connection_conditions,
-                                    const EdgeCondition& edge_condition):
+                                    func::TypedPredicate<typename Graph::EdgeId> edge_condition):
         SimpleScaffoldGraphConstructor(assembly_graph, edge_set, connection_conditions),
         edge_condition_(edge_condition)
     {}
diff --git a/src/modules/algorithms/path_extend/scaffolder2015/scaffold_graph_visualizer.cpp b/src/common/modules/path_extend/scaffolder2015/scaffold_graph_visualizer.cpp
similarity index 69%
rename from src/modules/algorithms/path_extend/scaffolder2015/scaffold_graph_visualizer.cpp
rename to src/common/modules/path_extend/scaffolder2015/scaffold_graph_visualizer.cpp
index 8e5aec6..8017eee 100644
--- a/src/modules/algorithms/path_extend/scaffolder2015/scaffold_graph_visualizer.cpp
+++ b/src/common/modules/path_extend/scaffolder2015/scaffold_graph_visualizer.cpp
@@ -24,32 +24,29 @@ string ScaffoldGraphLabeler::label(EdgeId e) const {
 }
 
 string ScaffoldGraphLabeler::label(VertexId v) const {
+    auto it = additional_vertex_labels_.find(v);
+    string additional_label = it == additional_vertex_labels_.end() ? "" : it->second + "\n";
     return "ID: " + ToString(graph_.int_id(v)) +
         "\\n Len: " + ToString(graph_.AssemblyGraph().length(v)) +
-        "\\n Cov: " + ToString(graph_.AssemblyGraph().coverage(v));
+        "\\n Cov: " + ToString(graph_.AssemblyGraph().coverage(v)) + "\n" +
+        additional_label;
 }
 
-void ScaffoldGraphVisualizer::Visualize(GraphPrinter<ScaffoldGraph> &printer) {
+void ScaffoldGraphVisualizer::Visualize(graph_printer::GraphPrinter<ScaffoldGraph> &printer) {
     printer.open();
     printer.AddVertices(graph_.vbegin(), graph_.vend());
-    //for (auto e = graph_.ebegin(); e != graph_.eend(); ++e) {
     for (const auto& e : graph_.edges()) {
         printer.AddEdge(e);
     }
     printer.close();
 }
 
-void ScaffoldGraphVisualizer::Visualize(ostream &os, CompositeGraphColorer<ScaffoldGraph>& colorer) {
-    ScaffoldGraphLabeler labeler(graph_);
-    EmptyGraphLinker<ScaffoldGraph> linker;
+void ScaffoldGraphVisualizer::Visualize(ostream &os, graph_colorer::CompositeGraphColorer<ScaffoldGraph>& colorer) {
+    ScaffoldGraphLabeler labeler(graph_, additional_vertex_labels_);
+    vertex_linker::EmptyGraphLinker<ScaffoldGraph> linker;
 
-    if (paired_) {
-        PairedGraphPrinter <ScaffoldGraph> printer(graph_, os, labeler, colorer, linker);
-        Visualize(printer);
-    } else {
-        SingleGraphPrinter <ScaffoldGraph> printer(graph_, os, labeler, colorer, linker);
-        Visualize(printer);
-    }
+    graph_printer::SingleGraphPrinter <ScaffoldGraph> printer(graph_, os, labeler, colorer, linker);
+    Visualize(printer);
 }
 
 string ScaffoldEdgeColorer::GetValue(ScaffoldGraph::EdgeId e) const {
diff --git a/src/common/modules/path_extend/scaffolder2015/scaffold_graph_visualizer.hpp b/src/common/modules/path_extend/scaffolder2015/scaffold_graph_visualizer.hpp
new file mode 100644
index 0000000..51d40ef
--- /dev/null
+++ b/src/common/modules/path_extend/scaffolder2015/scaffold_graph_visualizer.hpp
@@ -0,0 +1,79 @@
+//
+// Created by andrey on 21.09.15.
+//
+
+#ifndef PROJECT_SCAFFOLD_GRAPH_VISUALIZER_HPP
+#define PROJECT_SCAFFOLD_GRAPH_VISUALIZER_HPP
+
+#include "pipeline/graphio.hpp"
+#include "scaffold_graph.hpp"
+
+namespace path_extend { namespace scaffold_graph {
+
+using namespace visualization;
+
+
+class ScaffoldGraphLabeler : public graph_labeler::GraphLabeler<ScaffoldGraph> {
+
+private:
+    const ScaffoldGraph &graph_;
+
+    const map<VertexId, string>& additional_vertex_labels_;
+
+public:
+    ScaffoldGraphLabeler(const ScaffoldGraph &graph, const map<VertexId, string>& additional_vertex_labels):
+        graph_(graph), additional_vertex_labels_(additional_vertex_labels) {
+    }
+
+    string label(VertexId v) const;
+
+    string label(EdgeId e) const;
+};
+
+
+class ScaffoldEdgeColorer : public graph_colorer::ElementColorer<ScaffoldGraph::EdgeId> {
+private:
+    static const map<size_t, string> color_map;
+
+    static const string default_color;
+
+public:
+    string GetValue(ScaffoldGraph::EdgeId e) const;
+};
+
+
+class ScaffoldVertexSetColorer : public graph_colorer::ElementColorer<ScaffoldGraph::VertexId> {
+ private:
+  set<ScaffoldGraph::VertexId> vertex_set_;
+
+ public:
+  ScaffoldVertexSetColorer(const set<ScaffoldGraph::VertexId>& vertex_set): vertex_set_(vertex_set) {
+  }
+
+    string GetValue(ScaffoldGraph::VertexId v) const;
+};
+
+class ScaffoldGraphVisualizer {
+private:
+    const ScaffoldGraph &graph_;
+
+    const map<ScaffoldGraph::VertexId, string>& additional_vertex_labels_;
+
+private:
+    void Visualize(graph_printer::GraphPrinter<ScaffoldGraph> &printer);
+
+public:
+    ScaffoldGraphVisualizer(const ScaffoldGraph &graph,
+                            const map<ScaffoldGraph::VertexId, string>& additional_vertex_labels) :
+            graph_(graph),
+            additional_vertex_labels_(additional_vertex_labels){
+    }
+
+    void Visualize(ostream &os, graph_colorer::CompositeGraphColorer<ScaffoldGraph>& colorer);
+};
+
+} //scaffold_graph
+} //path_extend
+
+
+#endif //PROJECT_SCAFFOLD_GRAPH_VISUALIZER_HPP
diff --git a/src/modules/algorithms/path_extend/split_graph_pair_info.hpp b/src/common/modules/path_extend/split_graph_pair_info.hpp
similarity index 96%
rename from src/modules/algorithms/path_extend/split_graph_pair_info.hpp
rename to src/common/modules/path_extend/split_graph_pair_info.hpp
index 8991d57..007495c 100644
--- a/src/modules/algorithms/path_extend/split_graph_pair_info.hpp
+++ b/src/common/modules/path_extend/split_graph_pair_info.hpp
@@ -5,18 +5,11 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-/*
- * split_graph_pair_info.hpp
- *
- *  Created on: May 14, 2013
- *      Author: ira
- */
-
 #ifndef SPLIT_GRAPH_PAIR_INFO_HPP_
 #define SPLIT_GRAPH_PAIR_INFO_HPP_
 
 #include <paired_info/weights.hpp>
-#include "assembly_graph/graph_alignment/sequence_mapper_notifier.hpp"
+#include "modules/alignment/sequence_mapper_notifier.hpp"
 #include "io/dataset_support/read_converter.hpp"
 #include "ideal_pair_info.hpp"
 
@@ -288,22 +281,12 @@ public:
                           read1, read2, r.distance());
     }
 
-    void ProcessSingleRead(size_t, const io::SingleRead&, const MappingPath<EdgeId>&) override {
-        //only paired reads are interesting
-    }
-
-    void ProcessSingleRead(size_t, const io::SingleReadSeq&, const MappingPath<EdgeId>&) override {
-        //only paired reads are interesting
-    }
     void MergeBuffer(size_t thread_index) override {
         basket_index_.AddAll(baskets_buffer_[thread_index]);
         baskets_buffer_[thread_index].Clear();
     }
 
     void StopProcessLibrary() override {
-        for (size_t i = 0; i < baskets_buffer_.size(); ++i)
-            MergeBuffer(i);
-
         FindThreshold();
 
         baskets_buffer_.clear();
@@ -400,8 +383,8 @@ private:
             pair<EdgeId, MappingRange> mapping_edge_1 = path1[i];
             for (size_t j = 0; j < path2.size(); ++j) {
                 pair<EdgeId, MappingRange> mapping_edge_2 = path2[j];
-                double weight = PairedReadCountWeight(mapping_edge_1.second,
-                                                      mapping_edge_2.second);
+                double weight = PairedReadCountWeight(std::make_pair(mapping_edge_1.first, mapping_edge_2.first),
+                                                      mapping_edge_1.second, mapping_edge_2.second);
                 size_t kmer_distance = read_distance
                         + mapping_edge_2.second.initial_range.end_pos
                         - mapping_edge_1.second.initial_range.start_pos;
diff --git a/src/common/modules/path_extend/weight_counter.hpp b/src/common/modules/path_extend/weight_counter.hpp
new file mode 100644
index 0000000..d031bb2
--- /dev/null
+++ b/src/common/modules/path_extend/weight_counter.hpp
@@ -0,0 +1,357 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+/*
+ * weight_counter.hpp
+ *
+ *  Created on: Feb 19, 2012
+ *      Author: andrey
+ */
+
+#ifndef WEIGHT_COUNTER_HPP_
+#define WEIGHT_COUNTER_HPP_
+
+#include "assembly_graph/paths/bidirectional_path.hpp"
+#include "paired_library.hpp"
+#include <algorithm>
+#include <boost/math/special_functions/fpclassify.hpp>
+
+namespace path_extend {
+
+inline int median(const vector<int>& dist, const vector<double>& w, int min, int max) {
+    VERIFY(dist.size() == w.size());
+    double S = 0;
+    for (size_t i = 0; i < w.size(); ++i) {
+        if (dist[i] >= min && dist[i] <= max)
+            S += w[i];
+    }
+    if (S == 0) {
+        DEBUG("Empty histogram");
+        return 0;
+    }
+
+    double sum = S;
+    for (size_t i = 0; i < w.size(); ++i) {
+        if (dist[i] >= min && dist[i] <= max) {
+            sum -= w[i];
+            if (sum <= S / 2) {
+                return dist[i];
+            }
+        }
+    }
+    VERIFY(false);
+    return -1;
+}
+
+struct EdgeWithPairedInfo {
+    size_t e_;
+    double pi_;
+
+    EdgeWithPairedInfo(size_t e_, double pi) :
+            e_(e_), pi_(pi) {
+
+    }
+};
+
+struct EdgeWithDistance {
+    EdgeId e_;
+    int d_;
+
+    EdgeWithDistance(EdgeId e, size_t d) :
+            e_(e), d_((int) d) {
+    }
+
+    struct DistanceComparator {
+        bool operator()(const EdgeWithDistance& e1, const EdgeWithDistance& e2) {
+            if (e1.d_ == e2.d_)
+                return e1.e_ < e2.e_;
+            return e1.d_ > e2.d_;
+        }
+    };
+
+    //static DistanceComparator comparator;
+};
+
+class IdealInfoProvider {
+public:
+    virtual ~IdealInfoProvider() {}
+
+    virtual std::vector<EdgeWithPairedInfo> FindCoveredEdges(const BidirectionalPath& path, EdgeId candidate) const = 0;
+};
+
+class BasicIdealInfoProvider : public IdealInfoProvider {
+    const shared_ptr<PairedInfoLibrary> lib_;
+public:
+    BasicIdealInfoProvider(const shared_ptr<PairedInfoLibrary>& lib) : lib_(lib) {
+    }
+
+    std::vector<EdgeWithPairedInfo> FindCoveredEdges(const BidirectionalPath& path, EdgeId candidate) const override {
+        std::vector<EdgeWithPairedInfo> covered;
+        for (int i = (int) path.Size() - 1; i >= 0; --i) {
+            double w = lib_->IdealPairedInfo(path[i], candidate,
+                                            (int) path.LengthAt(i));
+            //FIXME think if we need extremely low ideal weights
+            if (math::gr(w, 0.)) {
+                covered.push_back(EdgeWithPairedInfo(i, w));
+            }
+        }
+        return covered;
+    }
+};
+
+class WeightCounter {
+
+protected:
+    const Graph& g_;
+    const shared_ptr<PairedInfoLibrary> lib_;
+    bool normalize_weight_;
+    shared_ptr<IdealInfoProvider> ideal_provider_;
+
+public:
+
+    WeightCounter(const Graph& g, const shared_ptr<PairedInfoLibrary>& lib, 
+                  bool normalize_weight = true, 
+                  shared_ptr<IdealInfoProvider> ideal_provider = nullptr) :
+            g_(g), lib_(lib), normalize_weight_(normalize_weight), ideal_provider_(ideal_provider) {
+       if (!ideal_provider_) {
+           ideal_provider_ = make_shared<BasicIdealInfoProvider>(lib);
+       }
+    }
+
+    virtual std::set<size_t> PairInfoExist(const BidirectionalPath& path, EdgeId e, 
+                                    int gap = 0) const = 0;
+
+    virtual double CountWeight(const BidirectionalPath& path, EdgeId e,
+            const std::set<size_t>& excluded_edges = std::set<size_t>(), int gapLength = 0) const = 0;
+
+    const PairedInfoLibrary& lib() const {
+        return *lib_;
+    }
+
+    const shared_ptr<PairedInfoLibrary> get_libptr() const {
+        return lib_;
+    };
+
+private:
+    DECL_LOGGER("WeightCounter");
+};
+
+class ReadCountWeightCounter: public WeightCounter {
+
+    std::vector<EdgeWithPairedInfo> CountLib(const BidirectionalPath& path, EdgeId e,
+            int add_gap = 0) const {
+        std::vector<EdgeWithPairedInfo> answer;
+
+        for (const EdgeWithPairedInfo& e_w_pi : ideal_provider_->FindCoveredEdges(path, e)) {
+            double w = lib_->CountPairedInfo(path[e_w_pi.e_], e,
+                    (int) path.LengthAt(e_w_pi.e_) + add_gap);
+
+            if (normalize_weight_) {
+                w /= e_w_pi.pi_;
+            }
+            answer.push_back(EdgeWithPairedInfo(e_w_pi.e_, w));
+        }
+
+        return answer;
+    }
+
+public:
+
+    ReadCountWeightCounter(const Graph& g, const shared_ptr<PairedInfoLibrary>& lib,
+                            bool normalize_weight = true, 
+                            shared_ptr<IdealInfoProvider> ideal_provider = nullptr) :
+            WeightCounter(g, lib, normalize_weight, ideal_provider) {
+    }
+
+    double CountWeight(const BidirectionalPath& path, EdgeId e, 
+                        const std::set<size_t>& excluded_edges, int gap) const override {
+        double weight = 0.0;
+
+        for (const auto& e_w_pi : CountLib(path, e, gap)) {
+            if (!excluded_edges.count(e_w_pi.e_)) {
+                weight += e_w_pi.pi_;
+            }
+        }
+
+        return weight;
+    }
+
+    std::set<size_t> PairInfoExist(const BidirectionalPath& path, EdgeId e, 
+                                    int gap = 0) const override {
+        std::set<size_t> answer;
+        for (const auto& e_w_pi : CountLib(path, e, gap)) {
+            if (math::gr(e_w_pi.pi_, 0.)) {
+                answer.insert(e_w_pi.e_);
+            }
+        }
+        
+        return answer;
+    }
+
+};
+
+class PathCoverWeightCounter: public WeightCounter {
+    double single_threshold_;
+
+    double TotalIdealNonExcluded(const std::vector<EdgeWithPairedInfo>& ideally_covered_edges, 
+                        const std::set<size_t>& excluded_edges) const {
+        double ideal_total = 0.0;
+
+        for (const EdgeWithPairedInfo& e_w_pi : ideally_covered_edges) {
+            if (!excluded_edges.count(e_w_pi.e_))
+                ideal_total += e_w_pi.pi_;
+        }
+
+        return ideal_total;
+    }
+
+    std::vector<EdgeWithPairedInfo> CountLib(const BidirectionalPath& path, EdgeId e,
+            const std::vector<EdgeWithPairedInfo>& ideally_covered_edges, int add_gap = 0) const {
+        std::vector<EdgeWithPairedInfo> answer;
+
+        for (const EdgeWithPairedInfo& e_w_pi : ideally_covered_edges) {
+            double ideal_weight = e_w_pi.pi_;
+
+            double weight = lib_->CountPairedInfo(
+                    path[e_w_pi.e_], e,
+                    (int) path.LengthAt(e_w_pi.e_) + add_gap);
+
+            if (normalize_weight_) {
+                weight /= ideal_weight;
+            }
+
+            if (math::ge(weight, single_threshold_)) {
+                answer.push_back(EdgeWithPairedInfo(e_w_pi.e_, ideal_weight));
+            }
+        }
+
+        return answer;
+    }
+
+public:
+
+    PathCoverWeightCounter(const Graph& g, const shared_ptr<PairedInfoLibrary>& lib,
+                           bool normalize_weight,
+                           double single_threshold,
+                           shared_ptr<IdealInfoProvider> ideal_provider = nullptr) :
+            WeightCounter(g, lib, normalize_weight, ideal_provider),
+            single_threshold_(single_threshold) {
+        VERIFY_MSG(math::gr(single_threshold_, 0.), "Threshold value not initialized");
+    }
+
+    double CountWeight(const BidirectionalPath& path, EdgeId e,
+            const std::set<size_t>& excluded_edges, int gap) const override {
+        double lib_weight = 0.;
+        const auto ideal_coverage = ideal_provider_->FindCoveredEdges(path, e);
+
+        for (const auto& e_w_pi : CountLib(path, e, ideal_coverage, gap)) {
+            if (!excluded_edges.count(e_w_pi.e_)) {
+                lib_weight += e_w_pi.pi_;
+            }
+        }
+
+        double total_ideal_coverage = TotalIdealNonExcluded(ideal_coverage, excluded_edges);
+        return math::eq(total_ideal_coverage, 0.) ? 0. : lib_weight / total_ideal_coverage;
+    }
+
+    std::set<size_t> PairInfoExist(const BidirectionalPath& path, EdgeId e, 
+                                    int gap = 0) const override {
+        std::set<size_t> answer;
+        for (const auto& e_w_pi : CountLib(path, e, ideal_provider_->FindCoveredEdges(path, e), gap)) {
+            if (math::gr(e_w_pi.pi_, 0.)) {
+                answer.insert(e_w_pi.e_);
+            }
+        }
+        return answer;
+    }
+};
+
+class CoverageAwareIdealInfoProvider : public BasicIdealInfoProvider {
+    static constexpr double MAGIC_COEFF = 2.;
+    const Graph& g_;
+    size_t read_length_; 
+
+public:
+    //works for single lib only!!!
+    virtual double EstimatePathCoverage(const BidirectionalPath& path) const  {
+        VERIFY(path.Length() > 0);
+        double answer = std::numeric_limits<double>::max();
+        for (size_t i = 0; i < path.Size(); ++i) {
+            answer = std::min(g_.coverage(path.At(i)), answer);
+        }
+        return answer;
+    }
+
+    CoverageAwareIdealInfoProvider(const Graph& g, const shared_ptr<PairedInfoLibrary>& lib,
+                                    size_t read_length) :
+                BasicIdealInfoProvider(lib), g_(g), read_length_(read_length) {
+        VERIFY(read_length_ > g_.k());
+    }
+
+    std::vector<EdgeWithPairedInfo> FindCoveredEdges(const BidirectionalPath& path, EdgeId candidate) const override {
+        VERIFY(read_length_ != -1ul);
+        //bypassing problems with ultra-low coverage estimates
+        double estimated_coverage = max(EstimatePathCoverage(path), 1.0);
+        double correction_coeff = estimated_coverage / ((double(read_length_) - double(g_.k())) * MAGIC_COEFF);
+
+        std::vector<EdgeWithPairedInfo> answer = BasicIdealInfoProvider::FindCoveredEdges(path, candidate);
+        for (auto& e_w_pi : answer) {
+            e_w_pi.pi_ *= correction_coeff;
+        }
+        return answer;
+    }
+};
+
+class GlobalCoverageAwareIdealInfoProvider : public CoverageAwareIdealInfoProvider {
+    double lib_coverage_;
+
+public:
+
+    GlobalCoverageAwareIdealInfoProvider(const Graph& g,
+                                         const shared_ptr<PairedInfoLibrary>& lib,
+                                         size_t read_length,
+                                         double lib_coverage):
+        CoverageAwareIdealInfoProvider(g, lib, read_length),
+        lib_coverage_(lib_coverage) {
+    }
+
+    double EstimatePathCoverage(const BidirectionalPath&) const override {
+        return lib_coverage_;
+    }
+};
+
+//TODO optimize number of calls of EstimatePathCoverage(path)
+//class MetagenomicWeightCounter: public WeightCounter {
+//    shared_ptr<CoverageAwareIdealInfoProvider> cov_info_provider_;
+//    shared_ptr<WeightCounter> normalizing_wc_;
+//
+//public:
+//
+//    //negative raw_threshold leads to the halt if no sufficiently long edges are in the path
+//    MetagenomicWeightCounter(const Graph& g, const shared_ptr<PairedInfoLibrary>& lib,
+//                             size_t read_length, double weight_threshold) :
+//            WeightCounter(g, lib) {
+//        cov_info_provider_ = make_shared<CoverageAwareIdealInfoProvider>(g, lib, read_length);
+//        normalizing_wc_ = make_shared<PathCoverWeightCounter>(g, lib,
+//                /*normalize weight*/true, weight_threshold, cov_info_provider_);
+//    }
+//
+//    double CountWeight(const BidirectionalPath& path, EdgeId e,
+//            const std::set<size_t>& excluded_edges, int gap = 0) const override {
+//        VERIFY(path.Length() > 0);
+//        return normalizing_wc_->CountWeight(path, e, excluded_edges, gap);
+//    }
+//
+//    std::set<size_t> PairInfoExist(const BidirectionalPath& path, EdgeId e,
+//                                    int gap = 0) const override {
+//        return normalizing_wc_->PairInfoExist(path, e, gap);
+//    }
+//};
+
+};
+
+#endif /* WEIGHT_COUNTER_HPP_ */
diff --git a/src/modules/algorithms/simplification/bulge_remover.hpp b/src/common/modules/simplification/bulge_remover.hpp
similarity index 79%
rename from src/modules/algorithms/simplification/bulge_remover.hpp
rename to src/common/modules/simplification/bulge_remover.hpp
index 1ab3de6..73254b1 100644
--- a/src/modules/algorithms/simplification/bulge_remover.hpp
+++ b/src/common/modules/simplification/bulge_remover.hpp
@@ -20,8 +20,8 @@
 #include "assembly_graph/paths/path_processor.hpp"
 #include "assembly_graph/graph_support/comparators.hpp"
 #include "assembly_graph/components/graph_component.hpp"
-#include "data_structures/sequence/sequence_tools.hpp"
-#include "dev_support/standard_base.hpp"
+#include "sequence/sequence_tools.hpp"
+#include "utils/standard_base.hpp"
 #include <cmath>
 #include <stack>
 #include "math/xmath.h"
@@ -136,8 +136,6 @@ class BulgeGluer {
 
         TRACE("Process bulge " << path.size() << " edges");
 
-        //fixme remove after checking results
-        bool flag = false;
         VERIFY(bulge_prefix_lengths.back() == g_.length(edge));
 
         for (size_t i = 0; i < path.size(); ++i) {
@@ -158,18 +156,15 @@ class BulgeGluer {
                     edge_to_split = split_result.second;
 
                     TRACE("GlueEdges " << g_.str(split_result.first));
-                    flag = true;
                     g_.GlueEdges(split_result.first, path[i]);
 
                 } else {
                     TRACE("GlueEdges " << g_.str(edge_to_split));
-                    flag = true;
                     g_.GlueEdges(edge_to_split, path[i]);
                 }
             }
             prev_length = bulge_prefix_lengths[i];
         }
-        VERIFY(flag);
     }
 
 public:
@@ -305,10 +300,10 @@ private:
 };
 
 template<class Graph>
-pred::TypedPredicate<typename Graph::EdgeId>
+func::TypedPredicate<typename Graph::EdgeId>
 NecessaryBulgeCondition(const Graph& g, size_t max_length, double max_coverage) {
     return AddAlternativesPresenceCondition(g,
-                                            pred::And(LengthUpperBound<Graph>(g, max_length),
+                                            func::And(LengthUpperBound<Graph>(g, max_length),
                                                      CoverageUpperBound<Graph>(g, max_coverage)));
 }
 
@@ -317,94 +312,13 @@ NecessaryBulgeCondition(const Graph& g, size_t max_length, double max_coverage)
  * the graph and for each edge checks if this edge is likely to be a simple bulge
  * if edge is judged to be one it is removed.
  */
-//template<class Graph>
-//class OldBulgeRemover: public EdgeProcessingAlgorithm<Graph> {
-//    typedef EdgeProcessingAlgorithm<Graph> base;
-//    typedef typename Graph::EdgeId EdgeId;
-//    typedef typename Graph::VertexId VertexId;
-//
-//protected:
-//
-//    /*virtual*/
-//    bool ProcessEdge(EdgeId e) {
-//        TRACE("Considering edge " << this->g().str(e)
-//                      << " of length " << this->g().length(e)
-//                      << " and avg coverage " << this->g().coverage(e));
-//
-//        if (!HasAlternatives(this->g(), e)) {
-//            TRACE("Not possible bulge edge");
-//            return false;
-//        }
-//
-//        for (const auto& analyzer : alternatives_analyzers_) {
-//            vector<EdgeId> alternative = analyzer(e);
-//            if (!alternative.empty()) {
-//                gluer_(e, alternative);
-//                return true;
-//            }
-//        }
-//        return false;
-//    }
-//
-//public:
-//
-//    typedef std::function<void(EdgeId edge, const vector<EdgeId>& path)> BulgeCallbackF;
-//
-////    BulgeRemover(Graph& g,  double max_coverage, size_t max_length,
-////            double max_relative_coverage, size_t max_delta,
-////            double max_relative_delta,
-////            size_t max_edge_cnt,
-////            BulgeCallbackF opt_callback = 0,
-////            std::function<void(EdgeId)> removal_handler = 0) :
-////            base(g, true),
-////            gluer_(g, opt_callback, removal_handler) {
-////                DEBUG("Launching br max_length=" << max_length
-////                << " max_coverage=" << max_coverage
-////                << " max_relative_coverage=" << max_relative_coverage
-////                << " max_delta=" << max_delta
-////                << " max_relative_delta=" << max_relative_delta
-////                << " max_number_edges=" << max_edge_cnt);
-////                alternatives_analyzers_.push_back(
-////                        AlternativesAnalyzer<Graph>(g, max_coverage,
-////                                                    max_length, max_relative_coverage,
-////                                                    max_delta, max_relative_delta, max_edge_cnt));
-////    }
-//
-//    OldBulgeRemover(Graph& g,
-//            const std::vector<AlternativesAnalyzer<Graph>>& alternatives_analyzers,
-//            BulgeCallbackF opt_callback = 0,
-//            std::function<void(EdgeId)> removal_handler = 0) :
-//            base(g, true),
-//            alternatives_analyzers_(alternatives_analyzers),
-//            gluer_(g, opt_callback, removal_handler) {
-//    }
-//
-//private:
-//    std::vector<AlternativesAnalyzer<Graph>> alternatives_analyzers_;
-//    BulgeGluer<Graph> gluer_;
-//private:
-//    DECL_LOGGER("BulgeRemover")
-//};
-
 template<class Graph>
-inline double AbsoluteMaxCoverage(const std::vector<AlternativesAnalyzer<Graph>>& alternatives_analyzers) {
-    double ans = -1.;
-    for (const auto& analyzer : alternatives_analyzers) {
-        ans = std::max(ans, analyzer.max_coverage());
-    }
-    return ans;
-}
-
-//fixme maybe switch on parallel finder?
-template<class Graph, class InterestingElementFinder>
 class BulgeRemover: public PersistentProcessingAlgorithm<Graph,
                                                         typename Graph::EdgeId,
-                                                        InterestingElementFinder,
                                                         CoverageComparator<Graph>> {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
-    typedef PersistentProcessingAlgorithm<Graph, EdgeId,
-            InterestingElementFinder, CoverageComparator<Graph>> base;
+    typedef PersistentProcessingAlgorithm<Graph, EdgeId, CoverageComparator<Graph>> base;
 
 protected:
 
@@ -431,27 +345,7 @@ public:
 
     typedef std::function<void(EdgeId edge, const vector<EdgeId>& path)> BulgeCallbackF;
 
-//  BulgeRemover(Graph& g,  double max_coverage, size_t max_length,
-//          double max_relative_coverage, size_t max_delta,
-//          double max_relative_delta,
-//          size_t max_edge_cnt,
-//          BulgeCallbackF opt_callback = 0,
-//          std::function<void(EdgeId)> removal_handler = 0) :
-//          base(g, true),
-//          gluer_(g, opt_callback, removal_handler) {
-//                DEBUG("Launching br max_length=" << max_length
-//                << " max_coverage=" << max_coverage
-//                << " max_relative_coverage=" << max_relative_coverage
-//                << " max_delta=" << max_delta
-//                << " max_relative_delta=" << max_relative_delta
-//                << " max_number_edges=" << max_edge_cnt);
-//                alternatives_analyzers_.push_back(
-//                        AlternativesAnalyzer<Graph>(g, max_coverage,
-//                                                    max_length, max_relative_coverage,
-//                                                    max_delta, max_relative_delta, max_edge_cnt));
-//    }
-
-    BulgeRemover(Graph& g, const InterestingElementFinder& interesting_finder,
+    BulgeRemover(Graph& g, const std::shared_ptr<InterestingElementFinder<Graph, EdgeId>>& interesting_finder,
             const AlternativesAnalyzer<Graph>& alternatives_analyzer,
             BulgeCallbackF opt_callback = 0,
             std::function<void(EdgeId)> removal_handler = 0,
@@ -472,10 +366,12 @@ private:
     DECL_LOGGER("BulgeRemover")
 };
 
-template<class Graph, class InterestingElementFinder>
+template<class Graph>
 class ParallelBulgeRemover : public PersistentAlgorithmBase<Graph> {
+private:
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
+    typedef std::shared_ptr<InterestingElementFinder<Graph, EdgeId>> CandidateFinderPtr;
     typedef SmartSetIterator<Graph, EdgeId, CoverageComparator<Graph>> SmartEdgeSet;
 
     size_t buff_size_;
@@ -483,7 +379,7 @@ class ParallelBulgeRemover : public PersistentAlgorithmBase<Graph> {
     double buff_cov_rel_diff_;
     AlternativesAnalyzer<Graph> alternatives_analyzer_;
     BulgeGluer<Graph> gluer_;
-    InterestingElementFinder interesting_edge_finder_;
+    CandidateFinderPtr interesting_edge_finder_;
     //todo remove
     bool tracking_;
 
@@ -505,6 +401,7 @@ class ParallelBulgeRemover : public PersistentAlgorithmBase<Graph> {
             id(-1ul) {
         }
 
+        //passing by value is not a mistake!
         BulgeInfo(size_t id_, EdgeId e_, std::vector<EdgeId> alternative_) :
             id(id_), e(e_), alternative(std::move(alternative_)) {
 
@@ -566,7 +463,7 @@ class ParallelBulgeRemover : public PersistentAlgorithmBase<Graph> {
     }
 
     //false if time to stop
-    bool FillEdgeBuffer(vector<EdgeId>& buffer, pred::TypedPredicate<EdgeId> proceed_condition) {
+    bool FillEdgeBuffer(vector<EdgeId>& buffer, func::TypedPredicate<EdgeId> proceed_condition) {
         VERIFY(buffer.empty());
         DEBUG("Filling edge buffer of size " << buff_size_);
         perf_counter perf;
@@ -608,7 +505,7 @@ class ParallelBulgeRemover : public PersistentAlgorithmBase<Graph> {
         }
     }
 
-    std::vector<std::vector<BulgeInfo>> FindBulges(const std::vector<EdgeId> edge_buffer) const {
+    std::vector<std::vector<BulgeInfo>> FindBulges(const std::vector<EdgeId>& edge_buffer) const {
         DEBUG("Looking for bulges (in parallel). Edge buffer size " << edge_buffer.size());
         perf_counter perf;
         std::vector<std::vector<BulgeInfo>> bulge_buffers(omp_get_max_threads());
@@ -677,15 +574,15 @@ class ParallelBulgeRemover : public PersistentAlgorithmBase<Graph> {
         return interacting_edges;
     }
 
-    bool ProcessBulges(const std::vector<BulgeInfo>& independent_bulges, SmartEdgeSet&& interacting_edges) {
+    size_t ProcessBulges(const std::vector<BulgeInfo>& independent_bulges, SmartEdgeSet&& interacting_edges) {
         DEBUG("Processing bulges");
         perf_counter perf;
 
-        bool triggered = false;
+        size_t triggered = 0;
 
         for (const BulgeInfo& info : independent_bulges) {
             TRACE("Processing bulge " << info.str(this->g()));
-            triggered = true;
+            triggered++;
             gluer_(info.e, info.alternative);
         }
 
@@ -700,7 +597,7 @@ class ParallelBulgeRemover : public PersistentAlgorithmBase<Graph> {
             std::vector<EdgeId> alternative = alternatives_analyzer_(e);
             if (!alternative.empty()) {
                 gluer_(e, alternative);
-                triggered = true;
+                triggered++;
             }
         }
         DEBUG("Interacting edges processed in " << perf.time() << " seconds");
@@ -711,7 +608,7 @@ public:
 
     typedef std::function<void(EdgeId edge, const vector<EdgeId>& path)> BulgeCallbackF;
 
-    ParallelBulgeRemover(Graph& g, const InterestingElementFinder& interesting_edge_finder,
+    ParallelBulgeRemover(Graph& g, const CandidateFinderPtr& interesting_edge_finder,
                          size_t buff_size, double buff_cov_diff,
                          double buff_cov_rel_diff, const AlternativesAnalyzer<Graph>& alternatives_analyzer,
                          BulgeCallbackF opt_callback = 0,
@@ -731,11 +628,11 @@ public:
         it_.Detach();
     }
 
-    bool Run(bool force_primary_launch = false) override {
+    size_t Run(bool force_primary_launch = false) override {
         bool primary_launch = force_primary_launch ? true : curr_iteration_ == 0;
         //todo remove if not needed;
         //potentially can vary coverage threshold in coordination with ec threshold
-        auto proceed_condition = pred::AlwaysTrue<EdgeId>();
+        auto proceed_condition = func::AlwaysTrue<EdgeId>();
 
         if (!it_.IsAttached()) {
             it_.Attach();
@@ -744,14 +641,14 @@ public:
             it_.clear();
             TRACE("Primary launch.");
             TRACE("Start search for interesting edges");
-            interesting_edge_finder_.Run(it_);
+            interesting_edge_finder_->Run(this->g(), [&](EdgeId e) {it_.push(e);});
             TRACE(it_.size() << " interesting edges to process");
         } else {
             VERIFY(tracking_);
             TRACE(it_.size() << " edges to process");
         }
 
-        bool triggered = false;
+        size_t triggered = 0;
         bool proceed = true;
         while (proceed) {
             std::vector<EdgeId> edge_buffer;
@@ -762,9 +659,9 @@ public:
 
             auto interacting_edges = RetainIndependentBulges(bulges);
 
-            bool inner_triggered  = ProcessBulges(bulges, std::move(interacting_edges));
-            proceed |= inner_triggered;
-            triggered |= inner_triggered;
+            size_t inner_triggered = ProcessBulges(bulges, std::move(interacting_edges));
+            proceed |= (inner_triggered > 0);
+            triggered += inner_triggered;
         }
 
         TRACE("Finished processing. Triggered = " << triggered);
diff --git a/src/modules/algorithms/simplification/cleaner.hpp b/src/common/modules/simplification/cleaner.hpp
similarity index 64%
rename from src/modules/algorithms/simplification/cleaner.hpp
rename to src/common/modules/simplification/cleaner.hpp
index 1787e56..ce3eac5 100644
--- a/src/modules/algorithms/simplification/cleaner.hpp
+++ b/src/common/modules/simplification/cleaner.hpp
@@ -7,13 +7,10 @@
 namespace omnigraph {
 
 template<class Graph>
-class Cleaner : public PersistentProcessingAlgorithm<Graph,
-        typename Graph::VertexId,
-        ParallelInterestingElementFinder < Graph, typename Graph::VertexId>> {
+class Cleaner : public PersistentProcessingAlgorithm<Graph, typename Graph::VertexId> {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
-    typedef PersistentProcessingAlgorithm <Graph,
-    VertexId, ParallelInterestingElementFinder<Graph, VertexId>> base;
+    typedef PersistentProcessingAlgorithm<Graph, VertexId> base;
     typedef IsolatedVertexCondition<Graph> ConditionT;
 
     Graph &g_;
@@ -22,8 +19,7 @@ class Cleaner : public PersistentProcessingAlgorithm<Graph,
 public:
     Cleaner(Graph &g, size_t chunk_cnt = 1) :
             base(g,
-                 ParallelInterestingElementFinder<Graph, VertexId>(g,
-                                                                   ConditionT(g), chunk_cnt),
+                 std::make_shared<ParallelInterestingElementFinder<Graph, VertexId>>(ConditionT(g), chunk_cnt),
                     /*canonical only*/true),
             g_(g), isolated_condition_(g) {
     }
diff --git a/src/modules/algorithms/simplification/complex_bulge_remover.hpp b/src/common/modules/simplification/complex_bulge_remover.hpp
similarity index 80%
rename from src/modules/algorithms/simplification/complex_bulge_remover.hpp
rename to src/common/modules/simplification/complex_bulge_remover.hpp
index e3a531a..2abed3d 100644
--- a/src/modules/algorithms/simplification/complex_bulge_remover.hpp
+++ b/src/common/modules/simplification/complex_bulge_remover.hpp
@@ -10,14 +10,15 @@
 #include <cmath>
 #include <stack>
 #include <queue>
-#include "utils/adt/concurrent_dsu.hpp"
-#include "dev_support/standard_base.hpp"
+#include "common/adt/concurrent_dsu.hpp"
+#include "utils/standard_base.hpp"
 #include "assembly_graph/components/graph_component.hpp"
 #include "math/xmath.h"
-#include "data_structures/sequence/sequence_tools.hpp"
+#include "sequence/sequence_tools.hpp"
 #include "assembly_graph/paths/path_processor.hpp"
 #include "visualization/visualization.hpp"
 #include "dominated_set_finder.hpp"
+#include "assembly_graph/graph_support/parallel_processing.hpp"
 
 
 namespace omnigraph {
@@ -36,7 +37,6 @@ class LocalizedComponent: public GraphActionHandler<Graph> /*: public GraphCompo
     //usage of inclusive-inclusive range!!!
     map<VertexId, Range> vertex_depth_;
     multimap<size_t, VertexId> height_2_vertices_;
-    size_t diff_threshold_;
 
     bool AllEdgeOut(VertexId v) const {
         for (EdgeId e : g_.OutgoingEdges(v)) {
@@ -87,8 +87,7 @@ public:
         DEBUG("Adding vertex " << g_.str(v) << " to the component");
         vertex_depth_.insert(make_pair(v, dist_range));
         height_2_vertices_.insert(make_pair(Average(dist_range), v));
-        DEBUG(
-                "Range " << dist_range << " Average height " << Average(dist_range));
+        DEBUG("Range " << dist_range << " Average height " << Average(dist_range));
         for (EdgeId e : g_.IncomingEdges(v)) {
             end_vertices_.erase(g_.EdgeStart(e));
         }
@@ -98,16 +97,21 @@ public:
     }
 
     //todo what if path processor will fail inside
-    size_t TotalPathCount() const {
-        size_t answer = 0;
-        for (VertexId end_v : end_vertices_) {
-            PathStorageCallback<Graph> path_storage(g_);
-            Range r = vertex_depth_.find(end_v)->second;
-            ProcessPaths(g_, r.start_pos, r.end_pos, start_vertex_, end_v, path_storage);
-            answer += path_storage.size();
-        }
-        return answer;
-    }
+//    size_t TotalPathCount() const {
+//        size_t answer = 0;
+//        size_t max_len = 0;
+//        for (VertexId end_v : end_vertices_) {
+//            max_len = std::max(max_len, vertex_depth_.find(end_v)->second.end_pos);
+//        }
+//        PathProcessor<Graph> processor(g_, start_vertex_, max_len);
+//        for (VertexId end_v : end_vertices_) {
+//            PathStorageCallback<Graph> path_storage(g_);
+//            Range r = vertex_depth_.find(end_v)->second;
+//            processor.Process(end_v, r.start_pos, r.end_pos, path_storage, /*max_edge_cnt*/ -1ul);
+//            answer += path_storage.size();
+//        }
+//        return answer;
+//    }
 
     bool CheckCompleteness() const {
         for (VertexId v : key_set(vertex_depth_)) {
@@ -121,15 +125,20 @@ public:
 
     bool NeedsProjection() const {
         DEBUG("Checking if component needs projection");
-        size_t tot_path_count = TotalPathCount();
-        bool answer = tot_path_count > end_vertices_.size();
-//        more robust to path processor failure this way VERIFY(tot_path_count >= end_vertices_.size());
-        if (answer) {
-            DEBUG("Needs projection");
-        } else {
-            DEBUG("Doesn't need projection");
+        for (VertexId v : key_set(vertex_depth_)) {
+            if (v == start_vertex_)
+                continue;
+            vector<EdgeId> filtered_incoming;
+            std::copy_if(g_.in_begin(v), g_.in_end(v), std::back_inserter(filtered_incoming), 
+                        [&] (EdgeId e) {return contains(g_.EdgeStart(e));});
+            VERIFY_MSG(filtered_incoming.size() == g_.IncomingEdgeCount(v), "Strange component");
+            if (g_.IncomingEdgeCount(v) > 1) {
+                DEBUG("Needs projection");
+                return true;
+            }
         }
-        return answer;
+        DEBUG("Doesn't need projection");
+        return false;
     }
 
     bool contains(VertexId v) const {
@@ -158,6 +167,10 @@ public:
         return distances;
     }
 
+    size_t v_size() const {
+        return vertex_depth_.size();
+    }
+
     VertexId start_vertex() const {
         return start_vertex_;
     }
@@ -181,8 +194,7 @@ public:
     }
 
     GraphComponent<Graph> AsGraphComponent() const {
-        set<VertexId> vertices = key_set(vertex_depth_);
-        return GraphComponent<Graph>(g_, vertices.begin(), vertices.end());
+        return GraphComponent<Graph>::FromVertices(g_, key_set(vertex_depth_));
     }
 
     bool ContainsConjugateVertices() const {
@@ -262,8 +274,7 @@ public:
     }
 
 private:
-    DECL_LOGGER("LocalizedComponent")
-    ;
+    DECL_LOGGER("LocalizedComponent");
 };
 
 template<class Graph>
@@ -354,8 +365,7 @@ private:
     set<VertexId> vertices_;
 
 private:
-    DECL_LOGGER("SkeletonTree")
-    ;
+    DECL_LOGGER("SkeletonTree");
 };
 
 typedef size_t mask;
@@ -446,10 +456,8 @@ public:
         auto it = vertex_colors_.find(v);
         if (it == vertex_colors_.end()) {
             DEBUG("No color for vertex " << comp_.g().str(v));
-            DEBUG(
-                    "Incoming edges " << comp_.g().str(comp_.g().IncomingEdges(v)));
-            DEBUG(
-                    "Outgoing edges " << comp_.g().str(comp_.g().OutgoingEdges(v)));
+            DEBUG("Incoming edges " << comp_.g().str(comp_.g().IncomingEdges(v)));
+            DEBUG("Outgoing edges " << comp_.g().str(comp_.g().OutgoingEdges(v)));
         }
         VERIFY(it != vertex_colors_.end());
         return it->second;
@@ -483,8 +491,7 @@ public:
     }
 
 private:
-    DECL_LOGGER("ComponentColoring")
-    ;
+    DECL_LOGGER("ComponentColoring");
 };
 
 template<class Graph>
@@ -711,24 +718,23 @@ void PrintComponent(const LocalizedComponent<Graph>& component,
         const SkeletonTree<Graph>& tree, const string& file_name) {
     typedef typename Graph::EdgeId EdgeId;
     const set<EdgeId> tree_edges = tree.edges();
-    shared_ptr<omnigraph::visualization::ElementColorer<typename Graph::EdgeId>> edge_colorer = make_shared<omnigraph::visualization::MapColorer<EdgeId>>(
+    shared_ptr<visualization::graph_colorer::ElementColorer<typename Graph::EdgeId>> edge_colorer =
+            make_shared<visualization::graph_colorer::MapColorer<EdgeId>>(
             tree_edges.begin(), tree_edges.end(),"green", ""
         );
-    visualization::WriteComponentSinksSources(component.AsGraphComponent(), file_name,
-            omnigraph::visualization::DefaultColorer(component.g(), edge_colorer),
-            *StrGraphLabelerInstance(component.g()));
+    visualization::visualization_utils::WriteComponentSinksSources(component.AsGraphComponent(), file_name,
+            visualization::graph_colorer::DefaultColorer(component.g(), edge_colorer),
+            *visualization::graph_labeler::StrGraphLabelerInstance(component.g()));
 }
 
 template<class Graph>
 void PrintComponent(const LocalizedComponent<Graph>& component,
         const string& file_name) {
-    visualization::WriteComponent(component.AsGraphComponent(), file_name,
-            omnigraph::visualization::DefaultColorer(component.g()),
-            *StrGraphLabelerInstance(component.g()));
+    visualization::visualization_utils::WriteComponent(component.AsGraphComponent(), file_name,
+            visualization::graph_colorer::DefaultColorer(component.g()),
+            *visualization::graph_labeler::StrGraphLabelerInstance(component.g()));
 }
 
-
-
 template<class Graph>
 class ComponentProjector {
     typedef typename Graph::EdgeId EdgeId;
@@ -754,8 +760,7 @@ class ComponentProjector {
             VertexId end_v = g_.EdgeEnd(*it);
             size_t start_dist = component_.avg_distance(start_v);
             size_t end_dist = component_.avg_distance(end_v);
-            DEBUG(
-                    "Processing edge " << g_.str(*it) << " avg_start " << start_dist << " avg_end " << end_dist);
+            DEBUG("Processing edge " << g_.str(*it) << " avg_start " << start_dist << " avg_end " << end_dist);
             set<size_t> dist_to_split(level_heights.lower_bound(start_dist),
                     level_heights.upper_bound(end_dist));
             DEBUG("Distances to split " << ToString<size_t>(dist_to_split));
@@ -769,7 +774,7 @@ class ComponentProjector {
                     continue;
                 DEBUG("Splitting on " << curr);
                 size_t pos = curr - offset;
-                if(pos >= g_.length(e)) {
+                if (pos >= g_.length(e)) {
                     return false;
                 }
                 DEBUG("Splitting edge " << g_.str(e) << " on position " << pos);
@@ -796,8 +801,7 @@ class ComponentProjector {
         for (VertexId v : component_.vertices_on_height(start_height)) {
             if (component_.end_vertices().count(v) == 0) {
                 for (EdgeId e : g_.OutgoingEdges(v)) {
-                    VERIFY(
-                            component_.avg_distance(g_.EdgeEnd(e)) == end_height);
+                    VERIFY(component_.avg_distance(g_.EdgeEnd(e)) == end_height);
                     if (tree_.Contains(e)
                             && coloring_.IsSubset(coloring_.color(e), color)) {
                         return e;
@@ -812,7 +816,7 @@ class ComponentProjector {
 public:
 
     bool ProjectComponent() {
-        if(!SplitComponent()) {
+        if (!SplitComponent()) {
             DEBUG("Component can't be split");
             return false;
         }
@@ -826,8 +830,7 @@ public:
             EdgeId target = CorrespondingTreeEdge(*it);
             DEBUG("Target found " << g_.str(target));
             if (target != *it) {
-                DEBUG(
-                        "Glueing " << g_.str(*it) << " to target " << g_.str(target));
+                DEBUG("Glueing " << g_.str(*it) << " to target " << g_.str(target));
                 g_.GlueEdges(*it, target);
                 DEBUG("Glued");
             }
@@ -845,8 +848,7 @@ public:
     }
 
 private:
-    DECL_LOGGER("ComponentProjector")
-    ;
+    DECL_LOGGER("ComponentProjector");
 };
 
 template<class Graph>
@@ -857,7 +859,7 @@ class LocalizedComponentFinder {
     static const size_t exit_bound = 32;
     static const size_t inf = -1ul;
 
-    Graph& g_;
+    const Graph& g_;
     size_t max_length_;
     size_t length_diff_threshold_;
 
@@ -930,6 +932,7 @@ class LocalizedComponentFinder {
         return true;
     }
 
+    //todo optimize
     boost::optional<VertexId> ClosestNeigbour() const {
         size_t min_dist = inf;
         boost::optional<VertexId> answer = boost::none;
@@ -984,16 +987,15 @@ class LocalizedComponentFinder {
     }
 
 public:
-    LocalizedComponentFinder(Graph& g, size_t max_length,
+    LocalizedComponentFinder(const Graph& g, size_t max_length,
             size_t length_diff_threshold, VertexId start_v) :
             g_(g), max_length_(max_length), length_diff_threshold_(
                     length_diff_threshold), comp_(g, start_v) {
-        DEBUG(
-                "Component finder from vertex " << g_.str(comp_.start_vertex()) << " created");
-        DominatedSetFinder<Graph> dominated_set_finder(g_, start_v, max_length);
+        DEBUG("Component finder from vertex " << g_.str(comp_.start_vertex()) << " created");
+        //todo introduce reasonable vertex bound
+        DominatedSetFinder<Graph> dominated_set_finder(g_, start_v, max_length/*, 1000*/);
         dominated_set_finder.FillDominated();
         dominated_ = dominated_set_finder.dominated();
-//        ProcessStartVertex();
     }
 
     bool ProceedFurther() {
@@ -1004,8 +1006,7 @@ public:
             optional<VertexId> next_v = ClosestNeigbour();
 
             if (next_v) {
-                DEBUG(
-                        "Vertex " << g_.str(*next_v) << " was chosen as closest neighbour");
+                DEBUG("Vertex " << g_.str(*next_v) << " was chosen as closest neighbour");
                 interfering_.insert(*next_v);
                 DEBUG("Trying to construct closure");
                 if (!CloseComponent()) {
@@ -1046,19 +1047,52 @@ public:
     }
 
 private:
-    DECL_LOGGER("LocalizedComponentFinder")
-    ;
+    DECL_LOGGER("LocalizedComponentFinder");
+};
+
+template<class Graph>
+class CandidateFinder : public VertexCondition<Graph> {
+    typedef typename Graph::VertexId VertexId;
+    size_t max_length_;
+    size_t length_diff_;
+
+public:
+    CandidateFinder(const Graph& g, size_t max_length, size_t length_diff) : 
+        VertexCondition<Graph>(g), max_length_(max_length), length_diff_(length_diff) {
+    }
+
+    bool Check(VertexId v) const override {
+        const Graph& g = this->g();
+        LocalizedComponentFinder<Graph> comp_finder(g, max_length_,
+                                                    length_diff_, v);
+        while (comp_finder.ProceedFurther()) {
+            DEBUG("Found component candidate start_v " << g.str(v));
+            LocalizedComponent<Graph> component = comp_finder.component();
+            //todo introduce reasonable size bound
+            //if (component.size() > 1000) {
+            //    return false;
+            //}
+            ComponentColoring<Graph> coloring(component);
+            SkeletonTreeFinder<Graph> tree_finder(component, coloring);
+            DEBUG("Looking for a tree");
+            if (tree_finder.FindTree()) {
+                return true;
+            }
+        }
+        return false;
+    }
+private:
+    DECL_LOGGER("CBRCandidateFinder");
 };
 
 template<class Graph>
-class ComplexBulgeRemover {
+class ComplexBulgeRemover : public PersistentProcessingAlgorithm<Graph, typename Graph::VertexId> {
     typedef typename Graph::VertexId VertexId;
     typedef typename Graph::EdgeId EdgeId;
+    typedef PersistentProcessingAlgorithm<Graph, VertexId> base;
 
-    Graph& g_;
     size_t max_length_;
     size_t length_diff_;
-
     string pics_folder_;
 
     bool ProcessComponent(LocalizedComponent<Graph>& component,
@@ -1069,92 +1103,111 @@ class ComplexBulgeRemover {
         DEBUG("Looking for a tree");
         if (tree_finder.FindTree()) {
             DEBUG("Tree found");
-
             SkeletonTree<Graph> tree(component, tree_finder.GetTreeEdges());
 
             if (!pics_folder_.empty()) {
                 PrintComponent(component, tree,
                         pics_folder_ + "success/"
-                                + ToString(g_.int_id(component.start_vertex()))
+                                + ToString(this->g().int_id(component.start_vertex()))
                                 + "_" + ToString(candidate_cnt) + ".dot");
             }
 
-            ComponentProjector<Graph> projector(g_, component, coloring, tree);
-            if(!projector.ProjectComponent()) {
+            ComponentProjector<Graph> projector(this->g(), component, coloring, tree);
+            if (!projector.ProjectComponent()) {
+                //todo think of stopping the whole process
                 DEBUG("Component can't be projected");
                 return false;
             }
-            DEBUG(
-                    "Successfully processed component candidate " << candidate_cnt << " start_v " << g_.str(component.start_vertex()));
+            DEBUG("Successfully processed component candidate " << candidate_cnt << " start_v " << this->g().str(component.start_vertex()));
             return true;
         } else {
-            DEBUG(
-                    "Failed to find skeleton tree for candidate " << candidate_cnt << " start_v " << g_.str(component.start_vertex()));
+            DEBUG("Failed to find skeleton tree for candidate " << candidate_cnt << " start_v " << this->g().str(component.start_vertex()));
             if (!pics_folder_.empty()) {
                 //todo check if we rewrite all of the previous pics!
                 PrintComponent(component,
                         pics_folder_ + "fail/"
-                                + ToString(g_.int_id(component.start_vertex())) //+ "_" + ToString(candidate_cnt)
+                                + ToString(this->g().int_id(component.start_vertex())) //+ "_" + ToString(candidate_cnt)
                                 + ".dot");
             }
             return false;
         }
     }
 
-public:
-    ComplexBulgeRemover(Graph& g, size_t max_length, size_t length_diff,
-            const string& pics_folder = "") :
-            g_(g), max_length_(max_length), length_diff_(length_diff), pics_folder_(
-                    pics_folder) {
+    bool InnerProcess(VertexId v, std::vector<VertexId>& vertices_to_post_process) {
+        size_t candidate_cnt = 0;
+        LocalizedComponentFinder<Graph> comp_finder(this->g(), max_length_,
+                                                    length_diff_, v);
+        while (comp_finder.ProceedFurther()) {
+            candidate_cnt++;
+            DEBUG("Found component candidate " << candidate_cnt << " start_v " << this->g().str(v));
+            LocalizedComponent<Graph> component = comp_finder.component();
+            if (ProcessComponent(component, candidate_cnt)) {
+                GraphComponent<Graph> gc = component.AsGraphComponent();
+                std::copy(gc.v_begin(), gc.v_end(), std::back_inserter(vertices_to_post_process));
+                return true;
+            }
+        }
+        return false;
     }
 
-    bool Run() {
-        size_t cnt = 0;
-        DEBUG("Complex bulge remover started");
+    //todo shrink this set if needed
+    set<VertexId> Neighbours(VertexId v) const {
+        set<VertexId> answer;
+        for (EdgeId e : this->g().IncidentEdges(v)) {
+            answer.insert(this->g().EdgeStart(e));
+            answer.insert(this->g().EdgeEnd(e));
+        }
+        return answer;
+    }
+
+public:
+
+    //track_changes=false leads to every iteration run from scratch
+    ComplexBulgeRemover(Graph& g, size_t max_length, size_t length_diff,
+                        size_t chunk_cnt, const string& pics_folder = "") :
+            base(g, std::make_shared<omnigraph::ParallelInterestingElementFinder<Graph, VertexId>>(
+                CandidateFinder<Graph>(g, max_length, length_diff), chunk_cnt), 
+                false, std::less<VertexId>(), /*track changes*/false),
+            max_length_(max_length), 
+            length_diff_(length_diff), 
+            pics_folder_(pics_folder) {
         if (!pics_folder_.empty()) {
 //            remove_dir(pics_folder_);
             make_dir(pics_folder_);
             make_dir(pics_folder_ + "success/");
             make_dir(pics_folder_ + "fail/");
         }
-        bool something_done_flag = false;
-        for (auto it = g_.SmartVertexBegin(); !it.IsEnd(); ++it) {
-            DEBUG("Processing vertex " << g_.str(*it));
-            size_t candidate_cnt = 0;
-            vector<VertexId> vertices_to_post_process;
-            { //important scope!!!
-                LocalizedComponentFinder<Graph> comp_finder(g_, max_length_,
-                        length_diff_, *it);
-                while (comp_finder.ProceedFurther()) {
-                    candidate_cnt++;
-                    DEBUG(
-                            "Found component candidate " << candidate_cnt << " start_v " << g_.str(*it));
-                    LocalizedComponent<Graph> component =
-                            comp_finder.component();
-                    if (ProcessComponent(component, candidate_cnt)) {
-                        something_done_flag = true;
-                        cnt++;
-                        GraphComponent<Graph> gc = component.AsGraphComponent();
-                        vertices_to_post_process.insert(
-                                vertices_to_post_process.end(), gc.v_begin(),
-                                gc.v_end());
-                        break;
-                    }
+
+    }
+
+    bool Process(VertexId v) override {
+        DEBUG("Processing vertex " << this->g().str(v));
+        vector<VertexId> vertices_to_post_process;
+        //a bit of hacking (look further)
+        SmartSetIterator<Graph, VertexId> added_vertices(this->g(), true);
+
+        if (InnerProcess(v, vertices_to_post_process)) {
+            for (VertexId p_p : vertices_to_post_process) {
+                //Neighbours(p_p) includes p_p
+                for (VertexId n : Neighbours(p_p)) {
+                    this->ReturnForConsideration(n);
                 }
+                this->g().CompressVertex(p_p);
             }
-            for (VertexId v : vertices_to_post_process) {
-                it.HandleAdd(v);
-                g_.CompressVertex(v);
+            return true;
+        } else {
+            //a bit of hacking:
+            //reverting changes resulting from potentially attempted, but failed split
+            Compressor<Graph> compressor(this->g());
+            for (; !added_vertices.IsEnd(); ++added_vertices) {
+                compressor.CompressVertex(*added_vertices);
             }
+            return false;
         }
-        DEBUG("Complex bulge remover finished");
-        DEBUG("Bulges processed " << cnt);
-        return something_done_flag;
     }
 
 private:
-    DECL_LOGGER("ComplexBulgeRemover")
-    ;
+    DECL_LOGGER("ComplexBulgeRemover");
 };
 
 }
diff --git a/src/common/modules/simplification/complex_tip_clipper.hpp b/src/common/modules/simplification/complex_tip_clipper.hpp
new file mode 100644
index 0000000..5da0d68
--- /dev/null
+++ b/src/common/modules/simplification/complex_tip_clipper.hpp
@@ -0,0 +1,178 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#pragma once
+
+#include <limits>
+
+#include "visualization/visualization.hpp"
+#include "compressor.hpp"
+#include "dominated_set_finder.hpp"
+#include "assembly_graph/graph_support/parallel_processing.hpp"
+
+
+namespace omnigraph{
+
+template<class Graph>
+class ComplexTipFinder {
+    typedef typename Graph::VertexId VertexId;
+    typedef typename Graph::EdgeId EdgeId;
+    const Graph& g_;
+
+    double relative_coverage_treshold_;
+    size_t edge_length_treshold_;
+    size_t max_path_length_;
+
+    double GetTipCoverage(const GraphComponent<Graph>& component) const {
+        double cov = numeric_limits<double>::max();
+        for (auto edge : component.edges()) {
+            cov = std::min(cov, g_.coverage(edge));
+        }
+        return cov;
+    }
+
+    double GetOutwardCoverage(const GraphComponent<Graph>& component) const {
+        double cov = 0.0;
+        for (auto v : component.vertices()) {
+            for (auto edge : g_.IncidentEdges(v)) {
+                if (!component.contains(edge)) {
+                    cov = std::max(cov, g_.coverage(edge));
+                }
+            }
+        }
+        return cov;
+    }
+
+    double GetRelativeTipCoverage(const GraphComponent<Graph>& component) const {
+        return GetTipCoverage(component) / GetOutwardCoverage(component);
+    }
+
+    bool ComponentCheck(const GraphComponent<Graph>& component) const {
+        if (component.empty() || component.e_size() == 0)
+            return false;
+
+        //check if usual tip
+        if (component.vertices().size() == 2) {
+            DEBUG("Component is a tip! Exiting...");
+            return false;
+        }
+
+        //checking edge lengths
+        if (std::any_of(component.e_begin(), component.e_end(), [&](EdgeId e) {return g_.length(e) > edge_length_treshold_;})) {
+            DEBUG("Tip contains too long edges");
+            return false;
+        }
+
+        if (math::ge(GetRelativeTipCoverage(component), relative_coverage_treshold_)) {
+            DEBUG("Tip is too high covered with respect to external edges");
+            return false;
+        }
+
+        return true;
+    }
+
+public:
+    ComplexTipFinder(const Graph& g, double relative_coverage,
+                      size_t max_edge_length, size_t max_path_length)
+            : g_(g),
+              relative_coverage_treshold_(math::ge(relative_coverage, 0.0) ?
+                                          relative_coverage : std::numeric_limits<double>::max()),
+              edge_length_treshold_(max_edge_length), max_path_length_(max_path_length)
+    { }
+
+    GraphComponent<Graph> operator()(VertexId v) const {
+        GraphComponent<Graph> empty(g_);
+        VERIFY(empty.empty());
+        if (g_.IncomingEdgeCount(v) != 0) {
+            return empty;
+        }
+
+        DominatedSetFinder<Graph> finder(g_, v, max_path_length_);
+        if (finder.FillDominated()) {
+            auto ranges = finder.dominated();
+            auto dom_component = finder.AsGraphComponent();
+            std::set<EdgeId> component_edges(dom_component.edges());
+            for (auto v : dom_component.exits()) {
+                size_t current_path_length = ranges[v].end_pos;
+                for (auto e : g_.OutgoingEdges(v)) {
+                    if (current_path_length + g_.length(e) > max_path_length_) {
+                        DEBUG("Component contains too long paths");
+                        return empty;
+                    }
+                    component_edges.insert(e);
+                }
+            }
+            auto extended_component = GraphComponent<Graph>::FromEdges(g_, component_edges);
+            if (ComponentCheck(extended_component))
+                return extended_component;
+            else
+                return empty;
+        } else {
+            DEBUG("Failed to find dominated component");
+            return empty;
+        }
+    }
+
+private:
+    DECL_LOGGER("ComplexTipClipper")
+};
+
+template<class Graph>
+class ComplexTipClipper : public PersistentProcessingAlgorithm<Graph, typename Graph::VertexId> {
+    typedef typename Graph::VertexId VertexId;
+    typedef typename Graph::EdgeId EdgeId;
+    typedef PersistentProcessingAlgorithm<Graph, VertexId> base;
+    typedef typename ComponentRemover<Graph>::HandlerF HandlerF;
+
+    string pics_folder_;
+    ComplexTipFinder<Graph> finder_;
+    ComponentRemover<Graph> component_remover_;
+
+public:
+    //track_changes=false leads to every iteration run from scratch
+    ComplexTipClipper(Graph& g, double relative_coverage,
+                      size_t max_edge_len, size_t max_path_len,
+                      size_t chunk_cnt,
+                      const string& pics_folder = "" ,
+                      HandlerF removal_handler = nullptr) :
+            base(g, nullptr, false, std::less<VertexId>(), /*track changes*/false),
+            pics_folder_(pics_folder),
+            finder_(g, relative_coverage, max_edge_len, max_path_len),
+            component_remover_(g, removal_handler) {
+        if (!pics_folder_.empty()) {
+            make_dir(pics_folder_);
+        }
+        this->interest_el_finder_ = std::make_shared<ParallelInterestingElementFinder<Graph, VertexId>>(
+                [&](VertexId v) {return !finder_(v).empty();}, chunk_cnt);
+    }
+
+    bool Process(VertexId v) override {
+        DEBUG("Processing vertex " << this->g().str(v));
+        auto component = finder_(v);
+        if (component.empty()) {
+            DEBUG("Failed to detect complex tip starting with vertex " << this->g().str(v));
+            return false;
+        }
+
+        if (!pics_folder_.empty()) {
+            visualization::visualization_utils::WriteComponentSinksSources(component,
+                                                      pics_folder_
+                                                      + ToString(this->g().int_id(v)) //+ "_" + ToString(candidate_cnt)
+                                                      + ".dot");
+        }
+
+        VERIFY(component.e_size() && component.v_size());
+        DEBUG("Detected tip component edge cnt: " << component.e_size());
+        component_remover_.DeleteComponent(component.e_begin(), component.e_end());
+        DEBUG("Complex tip removed");
+        return true;
+    }
+
+private:
+    DECL_LOGGER("ComplexTipClipper")
+};
+
+}
diff --git a/src/modules/algorithms/simplification/compressor.hpp b/src/common/modules/simplification/compressor.hpp
similarity index 64%
rename from src/modules/algorithms/simplification/compressor.hpp
rename to src/common/modules/simplification/compressor.hpp
index 27257f0..7d210fd 100644
--- a/src/modules/algorithms/simplification/compressor.hpp
+++ b/src/common/modules/simplification/compressor.hpp
@@ -8,13 +8,10 @@ namespace omnigraph {
 * simple one-by-one compressing has square complexity.
 */
 template<class Graph>
-class Compressor : public PersistentProcessingAlgorithm<Graph, typename Graph::VertexId,
-        ParallelInterestingElementFinder<Graph, typename Graph::VertexId>> {
+class Compressor {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
-    typedef PersistentProcessingAlgorithm <Graph,
-    VertexId, ParallelInterestingElementFinder<Graph, VertexId>> base;
-    typedef CompressCondition <Graph> ConditionT;
+    typedef CompressCondition<Graph> ConditionT;
 
     Graph &graph_;
     ConditionT compress_condition_;
@@ -40,9 +37,8 @@ class Compressor : public PersistentProcessingAlgorithm<Graph, typename Graph::V
         return true;
     }
 
-//do not use without checks:)
+    //do not use without checks:)
     EdgeId CompressWithoutChecks(VertexId v) {
-
         EdgeId e = graph_.GetUniqueOutgoingEdge(v);
         EdgeId start_edge = e;
         while (GoUniqueWayBackward(e) && e != start_edge
@@ -50,7 +46,6 @@ class Compressor : public PersistentProcessingAlgorithm<Graph, typename Graph::V
                                           graph_.EdgeEnd(e))) {
         }
         vector <EdgeId> mergeList;
-        //        e = graph_.conjugate(e);
         start_edge = e;
         do {
             mergeList.push_back(e);
@@ -64,25 +59,8 @@ class Compressor : public PersistentProcessingAlgorithm<Graph, typename Graph::V
 
     }
 
-//    //todo use graph method!
-//    bool CanCompressVertex(VertexId v) const {
-//        if (!graph_.CheckUniqueOutgoingEdge(v)
-//            || !graph_.CheckUniqueIncomingEdge(v)) {
-//            TRACE(
-//                    "Vertex "
-//                            << graph_.str(v)
-//                            << " judged NOT compressible. Proceeding to the next vertex");
-//            TRACE("Processing vertex " << graph_.str(v) << " finished");
-//            return false;
-//        }
-//        return true;
-//    }
 public:
-    Compressor(Graph &graph, size_t chunk_cnt = 1, bool safe_merging = true) :
-            base(graph,
-                 ParallelInterestingElementFinder<Graph, VertexId>(graph,
-                                                                   ConditionT(graph), chunk_cnt),
-                    /*canonical only*/true),
+    Compressor(Graph& graph, bool safe_merging = true) :
             graph_(graph),
             compress_condition_(graph),
             safe_merging_(safe_merging) {
@@ -94,13 +72,7 @@ public:
      * @return true if vertex can be compressed and false otherwise
      */
     bool CompressVertex(VertexId v) {
-        TRACE("Processing vertex " << graph_.str(v) << " started");
-        if (!compress_condition_.Check(v)) {
-            return false;
-        }
-        TRACE("Vertex " << graph_.str(v) << " judged compressible");
-        CompressWithoutChecks(v);
-        return true;
+        return CompressVertexEdgeId(v) != EdgeId(0);
     }
 
     EdgeId CompressVertexEdgeId(VertexId v) {
@@ -116,18 +88,30 @@ public:
 //        return CanCompressVertex(v);
 //    }
 
+private:
+    DECL_LOGGER("Compressor")
+};
+
+template<class Graph>
+class CompressingProcessor : public PersistentProcessingAlgorithm<Graph, typename Graph::VertexId> {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+    typedef PersistentProcessingAlgorithm<Graph, VertexId> base;
+    typedef CompressCondition<Graph> ConditionT;
+
+    Compressor<Graph> compressor_;
+public:
+    CompressingProcessor(Graph &graph, size_t chunk_cnt = 1, bool safe_merging = true) :
+            base(graph,
+                 std::make_shared<ParallelInterestingElementFinder<Graph, VertexId>>(ConditionT(graph), chunk_cnt),
+                    /*canonical only*/true),
+            compressor_(graph, safe_merging) {
+    }
+
 protected:
     bool Process(VertexId v) override {
-        if (compress_condition_.Check(v)) {
-            CompressWithoutChecks(v);
-            return true;
-        } else {
-            return false;
-        }
+        return compressor_.CompressVertex(v);
     }
-
-private:
-    DECL_LOGGER("Compressor")
 };
 
 /**
@@ -135,7 +119,7 @@ private:
 */
 template<class Graph>
 bool CompressAllVertices(Graph &g, bool safe_merging = true, size_t chunk_cnt = 1) {
-    Compressor<Graph> compressor(g, chunk_cnt, safe_merging);
+    CompressingProcessor<Graph> compressor(g, chunk_cnt, safe_merging);
     return compressor.Run();
 }
 }
diff --git a/src/modules/algorithms/simplification/dominated_set_finder.hpp b/src/common/modules/simplification/dominated_set_finder.hpp
similarity index 97%
rename from src/modules/algorithms/simplification/dominated_set_finder.hpp
rename to src/common/modules/simplification/dominated_set_finder.hpp
index 050777d..b7e779a 100644
--- a/src/modules/algorithms/simplification/dominated_set_finder.hpp
+++ b/src/common/modules/simplification/dominated_set_finder.hpp
@@ -115,8 +115,7 @@ public:
     }
 
     GraphComponent<Graph> AsGraphComponent() const {
-        set<VertexId> vertices = key_set(dominated_);
-        return GraphComponent<Graph>(g_, vertices.begin(), vertices.end());
+        return GraphComponent<Graph>::FromVertices(g_, key_set(dominated_));
     }
 
     //little meaning if FillDominated returned false
diff --git a/src/modules/algorithms/simplification/ec_threshold_finder.hpp b/src/common/modules/simplification/ec_threshold_finder.hpp
similarity index 97%
rename from src/modules/algorithms/simplification/ec_threshold_finder.hpp
rename to src/common/modules/simplification/ec_threshold_finder.hpp
index 84d7af2..f0e27eb 100644
--- a/src/modules/algorithms/simplification/ec_threshold_finder.hpp
+++ b/src/common/modules/simplification/ec_threshold_finder.hpp
@@ -8,13 +8,13 @@
 #ifndef OMNI_TOOLS_HPP_
 #define OMNI_TOOLS_HPP_
 
-#include "dev_support/simple_tools.hpp"
+#include "utils/simple_tools.hpp"
 
-#include "dev_support/path_helper.hpp"
+#include "utils/path_helper.hpp"
 #include "assembly_graph/graph_support/basic_edge_conditions.hpp"
 #include "assembly_graph/graph_support/parallel_processing.hpp"
 #include "assembly_graph/graph_support/basic_vertex_conditions.hpp"
-#include "assembly_graph/graph_core/basic_graph_stats.hpp"
+#include "assembly_graph/core/basic_graph_stats.hpp"
 
 #ifdef USE_GLIBCXX_PARALLEL
 #include "parallel/algorithm"
diff --git a/src/modules/algorithms/simplification/erroneous_connection_remover.hpp b/src/common/modules/simplification/erroneous_connection_remover.hpp
similarity index 50%
rename from src/modules/algorithms/simplification/erroneous_connection_remover.hpp
rename to src/common/modules/simplification/erroneous_connection_remover.hpp
index c755d19..f841913 100644
--- a/src/modules/algorithms/simplification/erroneous_connection_remover.hpp
+++ b/src/common/modules/simplification/erroneous_connection_remover.hpp
@@ -16,21 +16,16 @@
 
 #include "assembly_graph/graph_support/graph_processing_algorithm.hpp"
 #include "assembly_graph/graph_support/basic_edge_conditions.hpp"
-#include "dev_support/func.hpp"
+#include "func/func.hpp"
 #include "math/xmath.h"
-#include "algorithms/dijkstra/dijkstra_helper.hpp"
-#include "assembly_graph/graph_core/coverage.hpp"
+#include "assembly_graph/dijkstra/dijkstra_helper.hpp"
+#include "assembly_graph/core/coverage.hpp"
+#include "assembly_graph/graph_support/detail_coverage.hpp"
+#include "modules/simplification/topological_edge_conditions.hpp"
 
 namespace omnigraph {
 
-template<class Graph>
-pred::TypedPredicate<typename Graph::EdgeId>
-NecessaryECCondition(const Graph& g, size_t max_length, double max_coverage) {
-    return AddAlternativesPresenceCondition(g, pred::And(LengthUpperBound<Graph>(g, max_length),
-                                                        CoverageUpperBound<Graph>(g, max_coverage)));
-}
-
-
+//todo move to rnaSPAdes project
 template<class Graph>
 class RelativeCoverageECCondition: public EdgeCondition<Graph> {
     typedef typename Graph::EdgeId EdgeId;
@@ -106,12 +101,14 @@ public:
 
 };
 
+//todo move to rnaSPAdes project
 template<class Graph>
-pred::TypedPredicate<typename Graph::EdgeId> AddRelativeCoverageECCondition(const Graph &g, double rcec_ratio,
-                                                                            pred::TypedPredicate<typename Graph::EdgeId> condition) {
-    return pred::And(RelativeCoverageECCondition<Graph>(g, rcec_ratio), condition);
+func::TypedPredicate<typename Graph::EdgeId> AddRelativeCoverageECCondition(const Graph &g, double rcec_ratio,
+                                                                            func::TypedPredicate<typename Graph::EdgeId> condition) {
+    return func::And(RelativeCoverageECCondition<Graph>(g, rcec_ratio), condition);
 }
 
+//todo move to rnaSPAdes project
 template<class Graph>
 inline bool IsSimpleBulge(const Graph &g, typename Graph::EdgeId e){
     size_t edge_count = g.GetEdgesBetween(g.EdgeStart(e), g.EdgeEnd(e)).size();
@@ -122,156 +119,132 @@ inline bool IsSimpleBulge(const Graph &g, typename Graph::EdgeId e){
 }
 
 template<class Graph>
-class NotBulgeECCondition : public EdgeCondition<Graph> {
+inline bool IsAlternativePathExist(const Graph &g, typename Graph::EdgeId e){
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
-    typedef EdgeCondition<Graph> base;
 
-public:
+    MostCoveredSimpleAlternativePathChooser<Graph> path_chooser(g, e);
 
-    NotBulgeECCondition(const Graph &g)
-            : base(g) {
+    VertexId start = g.EdgeStart(e);
+    TRACE("Start " << g.str(start));
+    VertexId end = g.EdgeEnd(e);
+    TRACE("End " << g.str(end));
 
-    }
+    ProcessPaths(g, 0, std::numeric_limits<std::size_t>::max(), start, end, path_chooser, std::numeric_limits<std::size_t>::max());
 
-    bool Check(EdgeId e) const {
-        if (HasAlternatives(this->g(), e) && !IsSimpleBulge(this->g(), e)){
-            DEBUG("edge id = " << this->g().int_id(e)
-                 << " between = " << this->g().GetEdgesBetween(this->g().EdgeStart(e), this->g().EdgeEnd(e)).size()
-                 << " between ids: " << this->g().GetEdgesBetween(this->g().EdgeStart(e), this->g().EdgeEnd(e))
-                 << " outgoing s = " << this->g().OutgoingEdgeCount(this->g().EdgeStart(e))
-                 << " incoming e = " << this->g().IncomingEdgeCount(this->g().EdgeEnd(e)));
-        }
-        return !IsSimpleBulge(this->g(), e);
-    }
+    const vector<EdgeId>& path = path_chooser.most_covered_path();
+    double path_coverage = path_chooser.max_coverage();
+    if (!path.empty() && math::gr(path_coverage, 0.)) {
+        VERIFY(g.EdgeStart(path[0]) == start);
+        VERIFY(g.EdgeEnd(path.back()) == end);
 
-};
-
-template<class Graph>
-pred::TypedPredicate<typename Graph::EdgeId> AddNotBulgeECCondition(const Graph &g,
-                                                                    pred::TypedPredicate<typename Graph::EdgeId> condition) {
-    return pred::And(NotBulgeECCondition<Graph>(g), condition);
+        return true;
+    }
+    else
+        return false;
 }
 
 template<class Graph>
-bool RemoveErroneousEdgesInCoverageOrder(Graph &g,
-                                         pred::TypedPredicate<typename Graph::EdgeId> removal_condition,
-                                         double max_coverage,
-                                         std::function<void(typename Graph::EdgeId)> removal_handler) {
-    omnigraph::EdgeRemovingAlgorithm<Graph> erroneous_edge_remover(g,
-                                                                   AddAlternativesPresenceCondition(g, removal_condition),
-                                                                   removal_handler);
-
-    return erroneous_edge_remover.Run(CoverageComparator<Graph>(g),
-                                      CoverageUpperBound<Graph>(g, max_coverage));
+inline bool IsAlternativeInclusivePathExist(const Graph &g, typename Graph::EdgeId forbidden_edge, typename Graph::EdgeId compulsory_edge){
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+
+    MostCoveredSimpleAlternativePathChooser<Graph> path_chooser(g, forbidden_edge);
+
+    VertexId start = g.EdgeStart(forbidden_edge);
+    TRACE("Start " << g.str(start));
+    VertexId end = g.EdgeEnd(forbidden_edge);
+    TRACE("End " << g.str(end));
+
+    ProcessPaths(g, 0, std::numeric_limits<std::size_t>::max(), start, end, path_chooser, std::numeric_limits<std::size_t>::max());
+
+    const vector<EdgeId>& path = path_chooser.most_covered_path();
+    double path_coverage = path_chooser.max_coverage();
+    if (!path.empty() && math::gr(path_coverage, 0.)) {
+        VERIFY(g.EdgeStart(path[0]) == start);
+        VERIFY(g.EdgeEnd(path.back()) == end);
+
+        if(std::find(path.begin(), path.end(), compulsory_edge) != path.end()){
+            return true;
+        }
+    }
+    return false;
 }
 
 template<class Graph>
-bool RemoveErroneousEdgesInLengthOrder(Graph &g,
-                                       pred::TypedPredicate<typename Graph::EdgeId> removal_condition,
-                                       size_t max_length,
-                                       std::function<void(typename Graph::EdgeId)> removal_handler) {
-    omnigraph::EdgeRemovingAlgorithm<Graph> erroneous_edge_remover(g,
-                                                                   AddAlternativesPresenceCondition(g, removal_condition),
-                                                                   removal_handler);
-
-    return erroneous_edge_remover.Run(LengthComparator<Graph>(g),
-                                      LengthUpperBound<Graph>(g, max_length));
+inline bool IsReachableBulge(const Graph &g, typename Graph::EdgeId e){
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+
+    bool res = IsAlternativePathExist(g, e);
+    if(res)
+        return res;
+    else{
+        VertexId start = g.EdgeStart(e), end = g.EdgeEnd(e);
+        vector<EdgeId> incident;
+        push_back_all(incident, g.IncomingEdges(end));
+        push_back_all(incident, g.OutgoingEdges(start));
+        for (auto it = incident.begin(); it != incident.end(); ++it){
+            res = IsAlternativeInclusivePathExist(g, *it, e);
+            if(res){
+                return res;
+            }
+        }
+    }
+    return false;
 }
 
+//todo move to rnaSPAdes project
 template<class Graph>
-class SelfConjugateCondition : public EdgeCondition<Graph> {
+class NotBulgeECCondition : public EdgeCondition<Graph> {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
     typedef EdgeCondition<Graph> base;
 
- public:
+public:
 
-    SelfConjugateCondition(const Graph& g)
+    NotBulgeECCondition(const Graph &g)
             : base(g) {
+
     }
 
     bool Check(EdgeId e) const {
-        return e == this->g().conjugate(e);
+        if (HasAlternatives(this->g(), e) && !IsSimpleBulge(this->g(), e)){
+            DEBUG("edge id = " << this->g().int_id(e)
+                 << " between = " << this->g().GetEdgesBetween(this->g().EdgeStart(e), this->g().EdgeEnd(e)).size()
+                 << " between ids: " << this->g().GetEdgesBetween(this->g().EdgeStart(e), this->g().EdgeEnd(e))
+                 << " outgoing s = " << this->g().OutgoingEdgeCount(this->g().EdgeStart(e))
+                 << " incoming e = " << this->g().IncomingEdgeCount(this->g().EdgeEnd(e)));
+        }
+//        return !IsSimpleBulge(this->g(), e);
+        return !IsReachableBulge(this->g(), e);
     }
 
- private:
-    DECL_LOGGER("SelfConjugateCondition");
+private:
+    DECL_LOGGER("NotBulgeECCondition");
+
 };
 
-//coverage comparator
-//template<class Graph>
-//class RelativeCoverageCondition : public EdgeCondition<Graph> {
-//    typedef typename Graph::EdgeId EdgeId;
-//    typedef typename Graph::VertexId VertexId;
-//    typedef EdgeCondition<Graph> base;
-//
-//    double min_coverage_gap_;
-//
-//    bool StrongNeighbourCondition(EdgeId neighbour_edge,
-//                                  EdgeId possible_ec) const {
-//        return neighbour_edge == possible_ec
-//                || math::gr(this->g().coverage(neighbour_edge),
-//                            this->g().coverage(possible_ec) * min_coverage_gap_);
-////                  || this->g().length(neighbour_edge)
-////                          >= neighbour_length_threshold_;
-//    }
-//
-//    bool CheckAdjacent(const vector<EdgeId>& edges, EdgeId possible_ec) const {
-//        FOREACH (EdgeId e, edges) {
-//            if (!StrongNeighbourCondition(e, possible_ec))
-//                return false;
-//        }
-//        return true;
-//    }
-//
-// public:
-//
-//    RelativeCoverageCondition(const Graph& g, double min_coverage_gap)
-//            : base(g),
-//              min_coverage_gap_(min_coverage_gap) {
-//
-//    }
-//
-//    bool Check(EdgeId e) const {
-//        const Graph& g = this->g();
-//        return CheckAdjacent(g.IncidentEdges(g.EdgeStart(e)), e)
-//                && CheckAdjacent(g.IncidentEdges(g.EdgeEnd(e)), e);
-//    }
-//
-// private:
-//    DECL_LOGGER("RelativeCoverageCondition")
-//    ;
-//
-//};
-
-//todo refactor
+//todo move to rnaSPAdes project
 template<class Graph>
-class ThornCondition : public EdgeCondition<Graph> {
+func::TypedPredicate<typename Graph::EdgeId> AddNotBulgeECCondition(const Graph &g,
+                                                                    func::TypedPredicate<typename Graph::EdgeId> condition) {
+    return func::And(NotBulgeECCondition<Graph>(g), condition);
+}
+
+template<class Graph>
+class TopologicalThornCondition : public EdgeCondition<Graph> {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
     typedef EdgeCondition<Graph> base;
+    typedef std::vector<EdgeId> Path;
 
-    size_t uniqueness_length_;
-    size_t dijkstra_depth_;
-
-    bool Unique(const vector<EdgeId>& edges, bool forward) const {
-        return edges.size() == 1 && CheckUniqueness(*edges.begin(), forward);
-    }
+    size_t max_jump_distance_;
+    size_t max_edge_cnt_;
 
-    bool CheckUnique(EdgeId e) const {
-        TRACE("Checking conditions for edge start");
-        return Unique(vector<EdgeId>(this->g().in_begin(this->g().EdgeStart(e)), this->g().in_end(this->g().EdgeStart(e))), false)
-                || Unique(vector<EdgeId>(this->g().out_begin(this->g().EdgeEnd(e)), this->g().out_end(this->g().EdgeEnd(e))), true);
-    }
-
-    bool CheckThorn(EdgeId e) const {
+    bool CheckEdgeCounts(EdgeId e) const {
         if (this->g().EdgeStart(e) == this->g().EdgeEnd(e))
             return false;
-        if (this->g().RelatedVertices(this->g().EdgeStart(e),
-                                      this->g().EdgeEnd(e))) {
-            return true;
-        }
         if (this->g().OutgoingEdgeCount(this->g().EdgeStart(e)) != 2)
             return false;
         if (this->g().IncomingEdgeCount(this->g().EdgeStart(e)) != 1)
@@ -280,185 +253,95 @@ class ThornCondition : public EdgeCondition<Graph> {
             return false;
         if (this->g().IncomingEdgeCount(this->g().EdgeEnd(e)) != 2)
             return false;
-
-        auto dij = DijkstraHelper<Graph>::CreateBoundedDijkstra(this->g(), dijkstra_depth_);
-        dij.Run(this->g().EdgeStart(e));
-        vector<VertexId> reached = dij.ReachedVertices();
-        for (auto it = reached.begin(); it != reached.end(); ++it) {
-            if (*it != this->g().EdgeEnd(e)
-                    && this->g().RelatedVertices(*it, this->g().EdgeEnd(e))) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-    template<class EdgeContainer>
-    bool CheckAlternativeCoverage(const EdgeContainer& edges, EdgeId base) const {
-        for (EdgeId e: edges) {
-            if (e != base && this->g().length(e) < 400
-                    && this->g().coverage(e) < 15 * this->g().coverage(base)) {
-                return false;
-            }
-        }
         return true;
     }
 
-    bool CheckCoverageAround(EdgeId e) const {
-        return CheckAlternativeCoverage(
-                this->g().IncidentEdges(this->g().EdgeStart(e)), e)
-                && CheckAlternativeCoverage(
-                        this->g().IncidentEdges(this->g().EdgeEnd(e)), e);
-    }
-
-    bool CheckUniqueness(EdgeId e, bool /*forward*/) const {
-        return this->g().length(e) >= uniqueness_length_;
-    }
-
- public:
+public:
 
-    ThornCondition(Graph& g, size_t uniqueness_length, size_t dijkstra_depth)
+    TopologicalThornCondition(Graph& g,
+                              size_t max_jump_dist,
+                              size_t max_edge_cnt = -1ul)
             : base(g),
-              uniqueness_length_(uniqueness_length),
-              dijkstra_depth_(dijkstra_depth) {
+              max_jump_distance_(max_jump_dist),
+              max_edge_cnt_(max_edge_cnt) {
     }
 
-    bool Check(EdgeId e) const {
-        bool tmp = (CheckUnique(e) || CheckCoverageAround(e));
-        if (tmp)
-            tmp &= CheckThorn(e);
-        return tmp;
-    }
+    bool Check(EdgeId e) const override {
+        const Graph& g = this->g();
+        if (!CheckEdgeCounts(e))
+            return false;
 
- private:
-    DECL_LOGGER("ThornCondition")
-    ;
+        //fixme micro-optimization to be removed
+        if (g.conjugate(g.EdgeStart(e)) == g.EdgeEnd(e)) {
+            return true;
+        }
 
-};
+        auto comparator = [](const Path& a, const Path& b) {return a.size() >= b.size();};
 
+        BestPathStorage<Graph, decltype(comparator)> callback(g, comparator);
+        ProcessPaths(g, 0, max_jump_distance_, g.EdgeStart(e), g.conjugate(g.EdgeEnd(e)), callback, max_edge_cnt_);
+        return (bool) callback.best_path();
+    }
+};
 
 template<class Graph>
-class MultiplicityCounter {
-private:
-    typedef typename Graph::VertexId VertexId;
+class AdditionalMDAThornCondition : public EdgeCondition<Graph> {
     typedef typename Graph::EdgeId EdgeId;
-    const Graph &graph_;
+    typedef typename Graph::VertexId VertexId;
+    typedef EdgeCondition<Graph> base;
+    typedef std::vector<EdgeId> Path;
+
     size_t uniqueness_length_;
-    size_t max_depth_;
 
-    bool search(VertexId a, VertexId start, EdgeId e, size_t depth,
-                std::set<VertexId> &was, pair<size_t, size_t> &result) const {
-        if (depth > max_depth_)
-            return false;
-        if (was.count(a) == 1)
-            return true;
-        was.insert(a);
-        if (graph_.OutgoingEdgeCount(a) == 0
-            || graph_.IncomingEdgeCount(a) == 0)
-            return false;
-        for (auto I = graph_.out_begin(a), E = graph_.out_end(a); I != E; ++I) {
-            if (*I == e) {
-                if (a != start) {
-                    return false;
-                }
-            } else {
-                if (graph_.length(*I) >= uniqueness_length_) {
-                    result.second++;
-                } else {
-                    if (!search(graph_.EdgeEnd(*I), start, e,
-                                depth + 1 /*graph_.length(*it)*/, was, result))
-                        return false;
-                }
-            }
-        }
-        for (EdgeId in_e : graph_.IncomingEdges(a)) {
-            if (in_e == e) {
-                if (a != start) {
-                    return false;
-                }
-            } else {
-                if (graph_.length(in_e) >= uniqueness_length_) {
-                    result.first++;
-                } else {
-                    if (!search(graph_.EdgeStart(in_e), start, e,
-                                depth + 1 /*graph_.length(*it)*/, was, result))
-                        return false;
-                }
-            }
-        }
-        return true;
+    bool CheckUniqueness(EdgeId e) const {
+        return this->g().length(e) >= uniqueness_length_;
     }
 
-public:
-    MultiplicityCounter(const Graph &graph, size_t uniqueness_length,
-                        size_t max_depth)
-            : graph_(graph),
-              uniqueness_length_(uniqueness_length),
-              max_depth_(max_depth) {
+    bool CheckUnique(VertexId v) const {
+        return this->g().CheckUniqueIncomingEdge(v) &&
+                CheckUniqueness(this->g().GetUniqueIncomingEdge(v));
     }
 
-    size_t count(EdgeId e, VertexId start) const {
-        std::pair<size_t, size_t> result;
-        std::set<VertexId> was;
-        bool valid = search(start, start, e, 0, was, result);
-        if (!valid) {
-            return (size_t) (-1);
-        }
-        if (graph_.EdgeStart(e) == start) {
-            if (result.first < result.second) {
-                return (size_t) (-1);
-            }
-            return result.first - result.second;
-        } else {
-            if (result.first > result.second) {
-                return (size_t) (-1);
+    bool CheckUniqueCondition(EdgeId e) const {
+        TRACE("Checking conditions for edge start");
+        return CheckUnique(this->g().EdgeStart(e)) ||
+                CheckUnique(this->g().conjugate(this->g().EdgeEnd(e)));
+    }
+
+    template<class EdgeContainer>
+    bool CheckAlternativesForEC(const EdgeContainer& edges, EdgeId base) const {
+        for (EdgeId e: edges) {
+            if (e != base && this->g().length(e) < 400
+                    && math::ls(this->g().coverage(e) / this->g().coverage(base), 15.)) {
+                return false;
             }
-            return -result.first + result.second;
         }
+        return true;
     }
-};
-
-template<class Graph>
-class MultiplicityCountingCondition : public UniquenessPlausabilityCondition<Graph> {
-    typedef typename Graph::EdgeId EdgeId;
-    typedef typename Graph::VertexId VertexId;
-    typedef pred::TypedPredicate<EdgeId> EdgePredicate;
-    typedef UniquenessPlausabilityCondition<Graph> base;
 
-    MultiplicityCounter<Graph> multiplicity_counter_;
-    EdgePredicate plausiblity_condition_;
-
-public:
-    bool CheckUniqueness(EdgeId e, bool forward) const {
-        TRACE( "Checking " << this->g().int_id(e) << " for uniqueness in " << (forward ? "forward" : "backward") << " direction");
-        VertexId start =
-                forward ? this->g().EdgeEnd(e) : this->g().EdgeStart(e);
-        bool result = multiplicity_counter_.count(e, start) <= 1;
-        TRACE( "Edge " << this->g().int_id(e) << " is" << (result ? "" : " not") << " unique");
-        return result;
+    bool CheckForECAround(EdgeId e) const {
+        return CheckAlternativesForEC(
+                this->g().IncidentEdges(this->g().EdgeStart(e)), e)
+                && CheckAlternativesForEC(
+                this->g().IncidentEdges(this->g().EdgeEnd(e)), e);
     }
 
-    bool CheckPlausibility(EdgeId e, bool) const {
-        return plausiblity_condition_(e);
-    }
+ public:
 
-    MultiplicityCountingCondition(const Graph& g, size_t uniqueness_length,
-                                  EdgePredicate plausiblity_condition)
-            :
-              //todo why 8???
-              base(g),
-              multiplicity_counter_(g, uniqueness_length, 8),
-              plausiblity_condition_(plausiblity_condition) {
+    AdditionalMDAThornCondition(Graph& g, size_t uniqueness_length)
+            : base(g),
+              uniqueness_length_(uniqueness_length) {
+    }
 
+    bool Check(EdgeId e) const override {
+        return CheckUniqueCondition(e) || CheckForECAround(e);
     }
 
  private:
-
-    DECL_LOGGER("MultiplicityCountingCondition")
-    ;
+    DECL_LOGGER("AdditionalMDAThornCondition");
 };
 
-
+//todo move to rnaSPAdes simplification
 template<class Graph>
 class ECLoopRemover : public EdgeProcessingAlgorithm<Graph> {
     typedef std::less<typename Graph::EdgeId> Comparator;
@@ -468,7 +351,7 @@ class ECLoopRemover : public EdgeProcessingAlgorithm<Graph> {
 
     double ec_threshold_;
     double relative_threshold_;
-    const AbstractFlankingCoverage<Graph> &flanking_coverage_;
+    const FlankingCoverage<Graph> &flanking_coverage_;
     EdgeRemover<Graph> edge_remover_;
     size_t coverage_loops_removed = 0;
     size_t dead_loops_removed = 0;
@@ -549,8 +432,8 @@ class ECLoopRemover : public EdgeProcessingAlgorithm<Graph> {
 
 
 public:
-    ECLoopRemover(Graph &g, const AbstractFlankingCoverage<Graph> &flanking_coverage, double ec_threshold, double relative_threshold,
-                  HandlerF<Graph> removal_handler = 0): base(g),ec_threshold_(ec_threshold),
+    ECLoopRemover(Graph &g, const FlankingCoverage<Graph> &flanking_coverage, double ec_threshold, double relative_threshold,
+                  EdgeRemovalHandlerF<Graph> removal_handler = 0): base(g),ec_threshold_(ec_threshold),
                                                                             relative_threshold_(relative_threshold), flanking_coverage_(flanking_coverage),
                                                                             edge_remover_(g, removal_handler){
     }
@@ -562,61 +445,141 @@ private:
     DECL_LOGGER("ECLoopRemover");
 };
 
-
 template<class Graph>
-class HiddenECRemover: public EdgeProcessingAlgorithm<Graph> {
-    typedef EdgeProcessingAlgorithm<Graph> base;
+class MetaHiddenECRemover: public PersistentProcessingAlgorithm<Graph, typename Graph::VertexId> {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
-private:
+    typedef PersistentProcessingAlgorithm<Graph, VertexId> base;
+    const FlankingCoverage<Graph>& flanking_coverage_;
     size_t uniqueness_length_;
-    double unreliability_threshold_;
-    double ec_threshold_;
     double relative_threshold_;
-    const AbstractFlankingCoverage<Graph> &flanking_coverage_;
-    EdgeRemover<Graph> edge_remover_;
-    MultiplicityCountingCondition<Graph> condition_;
-private:
-    void RemoveHiddenEC(EdgeId edge) {
-        if (this->g().length(edge) <= this->g().k() || (edge == this->g().conjugate(edge) && this->g().length(edge) <= 2 * this->g().k()))
-            edge_remover_.DeleteEdge(edge);
-        else {
-            auto split_result = this->g().SplitEdge(edge, this->g().k());
-            edge_remover_.DeleteEdge(split_result.first);
+
+    EdgeDisconnector<Graph> disconnector_;
+
+    void DisconnectEdges(VertexId v) {
+        while (!this->g().IsDeadEnd(v)) {
+            disconnector_(*(this->g().out_begin(v)), /*compress*/false);
         }
     }
 
-    void RemoveHiddenECWithNoCompression(EdgeId edge) {
-        if (this->g().length(edge) <= this->g().k() || (edge == this->g().conjugate(edge) && this->g().length(edge) <= 2 * this->g().k())) {
-            edge_remover_.DeleteEdgeWithNoCompression(edge);
+    bool CheckUniqueness(EdgeId e) {
+        return UniquePathLengthLowerBound(this->g(), uniqueness_length_)(e);
+    }
+
+    void ProcessHiddenEC(VertexId v) {
+        VERIFY(this->g().OutgoingEdgeCount(v) == 2);
+        vector<EdgeId> edges(this->g().out_begin(v), this->g().out_end(v));
+        if (math::gr(flanking_coverage_.CoverageOfStart(edges.front()),
+                    flanking_coverage_.CoverageOfStart(edges.back()))) {
+            std::swap(edges.front(), edges.back());
+        }
+        double c1 = flanking_coverage_.CoverageOfStart(edges.front());
+        double c2 = flanking_coverage_.CoverageOfStart(edges.back());
+        TRACE("c1 " << c1 << "; c2 " << c2);
+        if (math::ls(c1 * relative_threshold_, c2)) {
+            TRACE("Disconnecting " << this->g().str(edges.front()));
+            disconnector_(edges.front());
         } else {
-            auto split_result = this->g().SplitEdge(edge, this->g().k());
-            edge_remover_.DeleteEdgeWithNoCompression(split_result.first);
+            TRACE("Disconnecting " << this->g().str(edges.front()) << " and " << this->g().str(edges.back()));
+            DisconnectEdges(v);
         }
     }
 
+    bool CheckSuspicious(VertexId v) {
+        if (this->g().IncomingEdgeCount(v) != 1 || this->g().OutgoingEdgeCount(v) != 2) {
+            return false;
+        }
+        vector<EdgeId> edges;
+        push_back_all(edges, this->g().OutgoingEdges(v));
+        VERIFY(edges.size() == 2);
+        if (this->g().conjugate(edges[0]) != edges[1]) {
+            return false;
+        }
+        return CheckUniqueness(this->g().GetUniqueIncomingEdge(v));
+    }
+
+protected:
+
+    bool Process(VertexId v) override {
+        if (CheckSuspicious(v)) {
+            ProcessHiddenEC(v);
+            return true;
+        }
+        return false;
+    }
+
+public:
+    MetaHiddenECRemover(Graph& g, size_t chunk_cnt,
+                    const FlankingCoverage<Graph> &flanking_coverage,
+                    size_t uniqueness_length,
+                    double relative_threshold,
+                    EdgeRemovalHandlerF<Graph> removal_handler = 0)
+            : base(g, nullptr, /*canonical only*/ false, std::less<VertexId>(), /*track changes*/false), 
+              flanking_coverage_(flanking_coverage),
+              uniqueness_length_(uniqueness_length),
+              relative_threshold_(relative_threshold),
+              disconnector_(g, removal_handler, g.k() + 1) {
+        this->interest_el_finder_ = std::make_shared<ParallelInterestingElementFinder<Graph, VertexId>>(
+                [&](VertexId v) {
+                    return CheckSuspicious(v);
+                }, chunk_cnt);
+    }
+
+private:
+    DECL_LOGGER("MetaHiddenECRemover");
+};
+
+//be careful unreliability_threshold_ is dependent on ec_threshold_!
+template<class Graph>
+class HiddenECRemover: public PersistentProcessingAlgorithm<Graph, typename Graph::VertexId> {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+    typedef PersistentProcessingAlgorithm<Graph, VertexId> base;
+    const FlankingCoverage<Graph>& flanking_coverage_;
+    size_t uniqueness_length_;
+    double unreliability_threshold_;
+    double ec_threshold_;
+    double relative_threshold_;
+
+    EdgeDisconnector<Graph> disconnector_;
+
     void DisconnectEdges(VertexId v) {
-        while(!this->g().IsDeadEnd(v)) {
-            RemoveHiddenECWithNoCompression(*(this->g().out_begin(v)));
+        while (!this->g().IsDeadEnd(v)) {
+            disconnector_(*(this->g().out_begin(v)), /*compress*/false);
         }
     }
 
-    bool FindHiddenEC(VertexId v) {
+    bool CheckUniqueness(EdgeId e) {
+        //todo why 8???
+        omnigraph::MultiplicityCounter<Graph> mult_counter(this->g(), uniqueness_length_, 8);
+
+        vector<EdgeId> edges;
+        push_back_all(edges, this->g().OutgoingEdges(this->g().EdgeEnd(e)));
+        VERIFY(edges.size() == 2);
+        return (this->g().conjugate(edges[0]) == edges[1] && mult_counter.count(e, this->g().EdgeStart(e)) <= 1) ||
+                this->g().length(e) >= uniqueness_length_;
+    }
+
+    bool ProcessHiddenEC(VertexId v) {
+        TRACE("Processing outgoing edges for vertex " << this->g().str(v));
+        VERIFY(this->g().OutgoingEdgeCount(v) == 2)
         vector<EdgeId> edges(this->g().out_begin(v), this->g().out_end(v));
-        if(flanking_coverage_.GetInCov(edges[0]) > flanking_coverage_.GetInCov(edges[1])) {
-            auto tmp = edges[0];
-            edges[0] = edges[1];
-            edges[1] = tmp;
+        if (math::gr(flanking_coverage_.CoverageOfStart(edges.front()),
+                    flanking_coverage_.CoverageOfStart(edges.back()))) {
+            std::swap(edges.front(), edges.back());
         }
-//        cout << flanking_coverage_.GetInCov(edges[0]) << " " << flanking_coverage_.GetInCov(edges[1]) << endl;
-        if(flanking_coverage_.GetInCov(edges[1]) < unreliability_threshold_) {
+        double c1 = flanking_coverage_.CoverageOfStart(edges.front());
+        TRACE("Flank start of e1 " << this->g().str(edges.front()) << ": " << c1);
+        double c2 = flanking_coverage_.CoverageOfStart(edges.back());
+        TRACE("Flank start of e1 " << this->g().str(edges.back()) << ": " << c2);
+        if (math::ls(c2, unreliability_threshold_)) {
+            TRACE("Disconnecting both edges from vertex " << this->g().str(v));
             DisconnectEdges(v);
-//            cout << "disconnected" << endl;
             return true;
         }
-        if(flanking_coverage_.GetInCov(edges[0]) * relative_threshold_ < flanking_coverage_.GetInCov(edges[1]) && flanking_coverage_.GetInCov(edges[0]) < ec_threshold_) {
-            RemoveHiddenEC(edges[0]);
-//            cout << "success" << endl;
+        if (math::ls(c1 * relative_threshold_, c2) && math::ls(c1, ec_threshold_)) {
+            TRACE("Disconnecting edge " << this->g().str(edges.front()) << " from vertex " << this->g().str(v));
+            disconnector_(edges.front());
             return true;
         }
         return false;
@@ -626,30 +589,36 @@ private:
         if (this->g().IncomingEdgeCount(v) != 1 || this->g().OutgoingEdgeCount(v) != 2) {
             return false;
         }
-        vector<EdgeId> edges(this->g().out_begin(v), this->g().out_end(v));
-        return (edges.size() == 2 && this->g().conjugate(edges[0]) == edges[1] && condition_.CheckUniqueness(this->g().GetUniqueIncomingEdge(v), false)) || this->g().length(this->g().GetUniqueIncomingEdge(v)) >= uniqueness_length_;
+        return CheckUniqueness(this->g().GetUniqueIncomingEdge(v));
     }
 
-    bool ProcessEdge(EdgeId e) {
-        VertexId v = this->g().EdgeEnd(e);
-        if(CheckSuspicious(v)) {
-//            cout << "client: " << this->g().int_id(v) << endl;
-            return FindHiddenEC(v);
+protected:
+
+    bool Process(VertexId v) override {
+        if (CheckSuspicious(v)) {
+            return ProcessHiddenEC(v);
         }
         return false;
     }
 
 public:
-    HiddenECRemover(Graph& g, size_t uniqueness_length,
-                    const AbstractFlankingCoverage<Graph> &flanking_coverage,
-                    double unreliability_threshold, double ec_threshold,
-                    double relative_threshold,
-                    std::function<void(EdgeId)> removal_handler = 0)
-            : base(g), uniqueness_length_(uniqueness_length),
-              unreliability_threshold_(unreliability_threshold * ec_threshold), ec_threshold_(ec_threshold),
-              relative_threshold_(relative_threshold), flanking_coverage_(flanking_coverage),
-              edge_remover_(g, removal_handler),
-              condition_(g, uniqueness_length, pred::AlwaysTrue<EdgeId>()) {
+    HiddenECRemover(Graph& g, size_t chunk_cnt,
+                    const FlankingCoverage<Graph> &flanking_coverage,
+                    size_t uniqueness_length,
+                    double unreliability_coeff,
+                    double ec_threshold, double relative_threshold,
+                    EdgeRemovalHandlerF<Graph> removal_handler = 0)
+            : base(g, nullptr, /*canonical only*/ false, std::less<VertexId>(), /*track changes*/false), 
+              flanking_coverage_(flanking_coverage),
+              uniqueness_length_(uniqueness_length),
+              unreliability_threshold_(unreliability_coeff * ec_threshold), ec_threshold_(ec_threshold),
+              relative_threshold_(relative_threshold),
+              disconnector_(g, removal_handler, g.k() + 1) {
+        VERIFY(math::gr(unreliability_coeff, 0.));
+        this->interest_el_finder_ = std::make_shared<ParallelInterestingElementFinder<Graph, VertexId>>(
+                [&](VertexId v) {
+                    return CheckSuspicious(v);
+                }, chunk_cnt);
     }
 
 private:
diff --git a/src/modules/algorithms/simplification/mf_ec_remover.hpp b/src/common/modules/simplification/mf_ec_remover.hpp
similarity index 100%
rename from src/modules/algorithms/simplification/mf_ec_remover.hpp
rename to src/common/modules/simplification/mf_ec_remover.hpp
diff --git a/src/modules/algorithms/simplification/parallel_simplification_algorithms.hpp b/src/common/modules/simplification/parallel_simplification_algorithms.hpp
similarity index 84%
rename from src/modules/algorithms/simplification/parallel_simplification_algorithms.hpp
rename to src/common/modules/simplification/parallel_simplification_algorithms.hpp
index bea146c..f33075b 100644
--- a/src/modules/algorithms/simplification/parallel_simplification_algorithms.hpp
+++ b/src/common/modules/simplification/parallel_simplification_algorithms.hpp
@@ -9,10 +9,11 @@
 
 #include "cleaner.hpp"
 #include "bulge_remover.hpp"
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 #include "assembly_graph/graph_support/graph_processing_algorithm.hpp"
+#include "assembly_graph/graph_support/parallel_processing.hpp"
 #include "assembly_graph/graph_support/basic_edge_conditions.hpp"
-#include "assembly_graph/graph_core/construction_helper.hpp"
+#include "assembly_graph/core/construction_helper.hpp"
 #include "assembly_graph/graph_support/marks_and_locks.hpp"
 #include "compressor.hpp"
 
@@ -20,30 +21,16 @@ namespace debruijn {
 
 namespace simplification {
 
-//    bool EnableParallel() {
-//        if (simplif_cfg_.presimp.parallel) {
-//            INFO("Trying to enable parallel presimplification.");
-//            if (gp_.g.AllHandlersThreadSafe()) {
-//                return true;
-//            } else {
-//                WARN("Not all handlers are threadsafe, switching to non-parallel presimplif");
-//                //gp.g.PrintHandlersNames();
-//            }
-//        }
-//        return false;
-//    }
-
 template<class Graph>
 class ParallelTipClippingFunctor {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
-    typedef std::function<void(EdgeId)> HandlerF;
     typedef omnigraph::GraphElementLock<VertexId> VertexLockT;
 
     Graph& g_;
     size_t length_bound_;
     double coverage_bound_;
-    HandlerF handler_f_;
+    omnigraph::EdgeRemovalHandlerF<Graph> handler_f_;
 
     size_t LockingIncomingCount(VertexId v) const {
         VertexLockT lock(v);
@@ -69,7 +56,8 @@ class ParallelTipClippingFunctor {
 
 public:
 
-    ParallelTipClippingFunctor(Graph& g, size_t length_bound, double coverage_bound, HandlerF handler_f = 0)
+    ParallelTipClippingFunctor(Graph& g, size_t length_bound, double coverage_bound,
+                               omnigraph::EdgeRemovalHandlerF<Graph> handler_f = nullptr)
             : g_(g),
               length_bound_(length_bound),
               coverage_bound_(coverage_bound),
@@ -145,7 +133,7 @@ class ParallelSimpleBRFunctor {
             if (g_.length(e) <= max_length_ && math::le(g_.coverage(e), max_coverage_)) {
                 EdgeId alt = Alternative(e, edges);
                 if (alt != EdgeId(0) && math::ge(g_.coverage(alt) * max_relative_coverage_, g_.coverage(e))) {
-                    //todo is not work in multiple threads for now :)
+                    //does not work in multiple threads for now...
                     //Reasons: id distribution, kmer-mapping
                     handler_f_(e);
                     g_.GlueEdges(e, alt);
@@ -244,7 +232,6 @@ template<class Graph>
 class CriticalEdgeMarker {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
-    typedef std::function<void(EdgeId)> HandlerF;
 
     Graph& g_;
     size_t chunk_cnt_;
@@ -298,13 +285,12 @@ template<class Graph>
 class ParallelLowCoverageFunctor {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
-    typedef std::function<void(EdgeId)> HandlerF;
     typedef omnigraph::GraphElementLock<VertexId> VertexLockT;
 
     Graph& g_;
     typename Graph::HelperT helper_;
-    pred::TypedPredicate<EdgeId> ec_condition_;
-    HandlerF handler_f_;
+    func::TypedPredicate<EdgeId> ec_condition_;
+    omnigraph::EdgeRemovalHandlerF<Graph> handler_f_;
 
     omnigraph::GraphElementMarker<EdgeId> edge_marker_;
     vector<EdgeId> edges_to_remove_;
@@ -324,10 +310,11 @@ class ParallelLowCoverageFunctor {
 public:
 
     //should be launched with conjugate copies filtered
-    ParallelLowCoverageFunctor(Graph& g, size_t max_length, double max_coverage, HandlerF handler_f = 0)
+    ParallelLowCoverageFunctor(Graph& g, size_t max_length, double max_coverage,
+                               omnigraph::EdgeRemovalHandlerF<Graph> handler_f = nullptr)
             : g_(g),
               helper_(g_.GetConstructionHelper()),
-              ec_condition_(pred::And(pred::And(omnigraph::LengthUpperBound<Graph>(g, max_length),
+              ec_condition_(func::And(func::And(omnigraph::LengthUpperBound<Graph>(g, max_length),
                                               omnigraph::CoverageUpperBound<Graph>(g, max_coverage)),
                                      omnigraph::AlternativesPresenceCondition<Graph>(g))),
                             handler_f_(handler_f) {}
@@ -764,8 +751,9 @@ public:
         VERIFY(chunk_iterators.size() > 1);
         omnigraph::SmartSetIterator<Graph, ElementType, Comparator> it(g_, false, comp);
 
-        FillInterestingFromChunkIterators(chunk_iterators, it,
-                                          std::bind(&Algo::IsOfInterest, std::ref(algo), std::placeholders::_1));
+        omnigraph::FindInterestingFromChunkIterators(chunk_iterators,
+                                          [&](ElementType el) {return algo.IsOfInterest(el);},
+                                          [&](ElementType el) {it.push(el);});
 
         bool changed = false;
         for (; !it.IsEnd(); ++it) {
@@ -775,28 +763,26 @@ public:
     }
 
 private:
-    DECL_LOGGER("SemiParallelAlgorithmRunner")
-    ;
+    DECL_LOGGER("SemiParallelAlgorithmRunner");
 };
 
-//todo generalize to use for other algorithms if needed
 template<class Graph>
 class SemiParallelEdgeRemovingAlgorithm {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
     Graph& g_;
-    pred::TypedPredicate<EdgeId> condition_;
+    func::TypedPredicate<EdgeId> condition_;
     omnigraph::EdgeRemover<Graph> edge_remover_;
 
 public:
     SemiParallelEdgeRemovingAlgorithm(Graph& g,
-                                      pred::TypedPredicate<EdgeId> condition,
+                                      func::TypedPredicate<EdgeId> condition,
                                       std::function<void(EdgeId)> removal_handler = 0) :
             g_(g), condition_(condition), edge_remover_(g, removal_handler) {
     }
 
     bool IsOfInterest(EdgeId e) const {
-        return condition_->Check(e);
+        return condition_(e);
     }
 
     bool Process(EdgeId e) {
@@ -815,6 +801,100 @@ bool RunEdgeAlgorithm(Graph& g, AlgoRunner& runner, Algo& algo, size_t chunk_cnt
     return runner.RunFromChunkIterators(algo, omnigraph::IterationHelper<Graph, typename Graph::EdgeId>(g).Chunks(chunk_cnt));
 }
 
+template<class Graph>
+void ParallelCompress(Graph &g, size_t chunk_cnt, bool loop_post_compression = true) {
+    INFO("Parallel compression");
+    debruijn::simplification::ParallelCompressor<Graph> compressor(g);
+    TwoStepAlgorithmRunner<Graph, typename Graph::VertexId> runner(g, false);
+    RunVertexAlgorithm(g, runner, compressor, chunk_cnt);
+
+    //have to call cleaner to get rid of new isolated vertices
+    omnigraph::Cleaner<Graph>(g, chunk_cnt).Run();
+
+    if (loop_post_compression) {
+        INFO("Launching post-compression to compress loops");
+        omnigraph::CompressAllVertices(g, chunk_cnt);
+    }
+}
+
+template<class Graph>
+bool ParallelClipTips(Graph &g,
+                      size_t max_length,
+                      double max_coverage,
+                      size_t chunk_cnt,
+                      omnigraph::EdgeRemovalHandlerF<Graph> removal_handler = nullptr) {
+    INFO("Parallel tip clipping");
+
+    debruijn::simplification::ParallelTipClippingFunctor<Graph> tip_clipper(g,
+                                                                            max_length, max_coverage, removal_handler);
+
+    AlgorithmRunner<Graph, typename Graph::VertexId> runner(g);
+
+    RunVertexAlgorithm(g, runner, tip_clipper, chunk_cnt);
+
+    ParallelCompress(g, chunk_cnt);
+    //Cleaner is launched inside ParallelCompression
+    //CleanGraph(g, info.chunk_cnt());
+
+    return true;
+}
+
+//template<class Graph>
+//bool ParallelRemoveBulges(Graph &g,
+//              const config::debruijn_config::simplification::bulge_remover &br_config,
+//              size_t /*read_length*/,
+//              std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
+//    INFO("Parallel bulge remover");
+//
+//    size_t max_length = LengthThresholdFinder::MaxBulgeLength(
+//        g.k(), br_config.max_bulge_length_coefficient,
+//        br_config.max_additive_length_coefficient);
+//
+//    DEBUG("Max bulge length " << max_length);
+//
+//    debruijn::simplification::ParallelSimpleBRFunctor<Graph> bulge_remover(g,
+//                            max_length,
+//                            br_config.max_coverage,
+//                            br_config.max_relative_coverage,
+//                            br_config.max_delta,
+//                            br_config.max_relative_delta,
+//                            removal_handler);
+//    for (VertexId v : g) {
+//        bulge_remover(v);
+//    }
+//
+//    Compress(g);
+//    return true;
+//}
+
+template<class Graph>
+bool ParallelEC(Graph &g,
+                size_t max_length,
+                double max_coverage,
+                size_t chunk_cnt,
+                omnigraph::EdgeRemovalHandlerF<Graph> removal_handler = nullptr) {
+    INFO("Parallel ec remover");
+
+    debruijn::simplification::CriticalEdgeMarker<Graph> critical_marker(g, chunk_cnt);
+    critical_marker.PutMarks();
+
+    debruijn::simplification::ParallelLowCoverageFunctor<Graph> ec_remover(g,
+                                                                           max_length,
+                                                                           max_coverage,
+                                                                           removal_handler);
+
+    TwoStepAlgorithmRunner<Graph, typename Graph::EdgeId> runner(g, true);
+
+    RunEdgeAlgorithm(g, runner, ec_remover, chunk_cnt);
+
+    critical_marker.ClearMarks();
+
+    ParallelCompress(g, chunk_cnt);
+    //called in parallel compress
+    //CleanGraph(g, info.chunk_cnt());
+    return true;
+}
+
 }
 
 }
diff --git a/src/modules/algorithms/simplification/relative_coverage_remover.hpp b/src/common/modules/simplification/relative_coverage_remover.hpp
similarity index 69%
rename from src/modules/algorithms/simplification/relative_coverage_remover.hpp
rename to src/common/modules/simplification/relative_coverage_remover.hpp
index bc6da7e..177f5b6 100644
--- a/src/modules/algorithms/simplification/relative_coverage_remover.hpp
+++ b/src/common/modules/simplification/relative_coverage_remover.hpp
@@ -7,24 +7,20 @@
 
 #pragma once
 
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 #include "assembly_graph/components/graph_component.hpp"
 #include "visualization/graph_colorer.hpp"
 #include "assembly_graph/graph_support/graph_processing_algorithm.hpp"
+#include "assembly_graph/graph_support/detail_coverage.hpp"
+#include "assembly_graph/graph_support/comparators.hpp"
+#include "assembly_graph/graph_support/basic_edge_conditions.hpp"
+#include "assembly_graph/graph_support/parallel_processing.hpp"
+#include "assembly_graph/components/splitters.hpp"
 
 namespace omnigraph {
 
 namespace simplification {
 
-template<class EdgeContainer>
-void SingleEdgeAdapter(
-        const EdgeContainer& edges,
-        std::function<void(typename EdgeContainer::value_type)> single_edge_handler_f) {
-    for (auto e : edges) {
-        single_edge_handler_f(e);
-    }
-}
-
 namespace relative_coverage {
 
 template<class Graph>
@@ -136,7 +132,7 @@ public:
         }
         return answer;
     }
-    
+
     //terminating edges, going out of the component
     set<EdgeId> terminating_out_edges() const {
         set<EdgeId> answer;
@@ -171,24 +167,25 @@ template<class Graph>
 class RelativeCoverageHelper {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
-    typedef std::function<double(EdgeId, VertexId)> LocalCoverageFT;
 
     const Graph& g_;
-    LocalCoverageFT local_coverage_f_;
+    const FlankingCoverage<Graph>& flanking_cov_;
     double min_coverage_gap_;
 
 public:
-    RelativeCoverageHelper(const Graph& g, LocalCoverageFT local_coverage_f,
+    RelativeCoverageHelper(const Graph& g,
+                           const FlankingCoverage<Graph>& flanking_cov,
                            double min_coverage_gap)
             : g_(g),
-              local_coverage_f_(local_coverage_f),
+              flanking_cov_(flanking_cov),
               min_coverage_gap_(min_coverage_gap) {
         VERIFY(math::gr(min_coverage_gap, 1.));
     }
 
     double LocalCoverage(EdgeId e, VertexId v) const {
-        DEBUG("Local coverage of edge " << g_.str(e) << " around vertex " << g_.str(v) << " was " << local_coverage_f_(e, v));
-        return local_coverage_f_(e, v);
+        double ans = flanking_cov_.LocalCoverage(e, v);
+        DEBUG("Local coverage of edge " << g_.str(e) << " around vertex " << g_.str(v) << " was " << ans);
+        return ans;
     }
 
     template<class EdgeContainer>
@@ -207,6 +204,16 @@ public:
                         base_coverage * min_coverage_gap_);
     }
 
+    bool AnyHighlyCoveredOnBothSides(VertexId v, double base_coverage) const {
+        return CheckAnyHighlyCovered(g_.IncomingEdges(v), v, base_coverage) &&
+                CheckAnyHighlyCovered(g_.OutgoingEdges(v), v, base_coverage);
+    }
+
+    bool AnyHighlyCoveredOnFourSides(EdgeId e) const {
+        return AnyHighlyCoveredOnBothSides(g_.EdgeStart(e), LocalCoverage(e, g_.EdgeStart(e))) &&
+                AnyHighlyCoveredOnBothSides(g_.EdgeEnd(e), LocalCoverage(e, g_.EdgeEnd(e)));
+    }
+
     double RelativeCoverageToReport(VertexId v, double base_coverage) const {
         return std::min(MaxLocalCoverage(g_.OutgoingEdges(v), v),
                         MaxLocalCoverage(g_.IncomingEdges(v), v))
@@ -218,6 +225,44 @@ private:
 };
 
 template<class Graph>
+class RelativeCovDisconnectionCondition : public EdgeCondition<Graph> {
+    typedef EdgeCondition<Graph> base;
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+    const RelativeCoverageHelper<Graph> rel_helper_;
+    const double diff_mult_;
+
+    //Total length of highly-covered neighbourhood
+    // We believe that if high-covered component is small it is likely to be repeat or loop
+    const size_t min_neighbourhood_size_;
+public:
+    RelativeCovDisconnectionCondition(const Graph& g,
+                                      const FlankingCoverage<Graph>& flanking_cov,
+                                      double diff_mult,
+                                      size_t min_neighbourhood_size) :
+            base(g),
+            rel_helper_(g, flanking_cov, diff_mult),
+            diff_mult_(diff_mult),
+            min_neighbourhood_size_(min_neighbourhood_size) {
+    }
+
+    bool Check(EdgeId e) const override {
+        VertexId v = this->g().EdgeStart(e);
+        double coverage_edge_around_v = rel_helper_.LocalCoverage(e, v);
+        DEBUG("Local flanking coverage - " << coverage_edge_around_v);
+        DEBUG("Max local coverage incoming  - " << rel_helper_.MaxLocalCoverage(this->g().IncomingEdges(v), v));
+        DEBUG("Max local coverage outgoing  - " << rel_helper_.MaxLocalCoverage(this->g().OutgoingEdges(v), v));
+        return rel_helper_.AnyHighlyCoveredOnBothSides(v, coverage_edge_around_v) &&
+                HighCoverageComponentFinder<Graph>(this->g(), this->g().coverage(e) * diff_mult_)
+                       .EdgeSummaryLength(v) >= min_neighbourhood_size_;
+    }
+
+private:
+    DECL_LOGGER("RelativeCovDisconnectionCondition");
+};
+
+namespace component_remover {
+template<class Graph>
 class LongestPathFinder {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
@@ -276,7 +321,7 @@ class LongestPathFinder {
 
 public:
     LongestPathFinder(const Component<Graph>& component)
-    : component_(component), g_(component.g()), cycle_detected_(false) {
+            : component_(component), g_(component.g()), cycle_detected_(false) {
     }
 
     //-1u if component contains a cycle or no path between terminating vertices
@@ -312,7 +357,9 @@ class ComponentChecker {
         for (EdgeId e : component.edges()) {
             if (math::gr(g_.coverage(e), max_coverage_)) {
                 TRACE("Too high coverage! Component contains highly covered edge " << g_.str(e)
-                     << " of coverage " << g_.coverage(e) << " while threshold was " << max_coverage_);
+                                                                                   << " of coverage " << g_.coverage(e)
+                                                                                   << " while threshold was "
+                                                                                   << max_coverage_);
                 return false;
             }
         }
@@ -344,18 +391,21 @@ public:
         size_t longest_connecting_path = LongestPathFinder<Graph>(component).Find();
         if (longest_connecting_path != -1u) {
             if (longest_connecting_path >= longest_connecting_path_bound_) {
-                TRACE("Length of longest path: " << longest_connecting_path << "; threshold: " << longest_connecting_path_bound_);
+                TRACE("Length of longest path: " << longest_connecting_path << "; threshold: "
+                                                 << longest_connecting_path_bound_);
                 return false;
             }
         } else {
             TRACE("Failed to find longest connecting path (check for cycles)");
         }
         if (!component.contains_deadends()
-                && component.length() > length_bound_) {
-            TRACE("Too long component of length " << component.length() << "! Longer than length bound " << length_bound_);
+            && component.length() > length_bound_) {
+            TRACE("Too long component of length " << component.length() << "! Longer than length bound "
+                                                  << length_bound_);
             return false;
         } else if (component.length() > tip_allowing_length_bound_) {
-            TRACE("Too long component of length " << component.length() << "! Longer than tip allowing length bound " << tip_allowing_length_bound_);
+            TRACE("Too long component of length " << component.length() << "! Longer than tip allowing length bound "
+                                                  << tip_allowing_length_bound_);
             return false;
         }
 
@@ -366,79 +416,8 @@ private:
     DECL_LOGGER("RelativelyLowCoveredComponentChecker");
 };
 
-//Removes last (k+1)-mer of graph edge
-template<class Graph>
-class EdgeDisconnector {
-    typedef typename Graph::EdgeId EdgeId;
-    Graph& g_;
-    EdgeRemover<Graph> edge_remover_;
-
-public:
-    EdgeDisconnector(Graph& g,
-                     HandlerF<Graph> removal_handler = nullptr):
-                                 g_(g), edge_remover_(g, removal_handler) {
-    }
-
-    EdgeId operator()(EdgeId e) {
-        VERIFY(g_.length(e) > 1);
-        pair<EdgeId, EdgeId> split_res = g_.SplitEdge(e, 1);
-        edge_remover_.DeleteEdge(split_res.first);
-        return split_res.first;
-    }
-};
-
-//todo make parallel
-template<class Graph>
-class RelativeCoverageDisconnector: public EdgeProcessingAlgorithm<Graph> {
-    typedef typename Graph::EdgeId EdgeId;
-    typedef typename Graph::VertexId VertexId;
-    typedef std::function<double(EdgeId, VertexId)> LocalCoverageFT;
-    typedef EdgeProcessingAlgorithm<Graph> base;
-
-    const RelativeCoverageHelper<Graph> rel_helper_;
-    EdgeDisconnector<Graph> disconnector_;
-    size_t cnt_;
-public:
-    RelativeCoverageDisconnector(Graph& g,
-            LocalCoverageFT local_coverage_f, double diff_mult) :
-            base(g, false),
-            rel_helper_(g, local_coverage_f, diff_mult),
-            disconnector_(g),
-            cnt_(0) {
-    }
-
-    ~RelativeCoverageDisconnector() {
-        DEBUG("Disconnected edge cnt " << cnt_);
-    }
-
-protected:
-    bool ProcessEdge(EdgeId edge) {
-        DEBUG("Processing edge " << this->g().int_id(edge));
-        VertexId v = this->g().EdgeStart(edge);
-        double coverage_edge_around_v = rel_helper_.LocalCoverage(edge, v);
-        DEBUG("Local flanking coverage - " << coverage_edge_around_v);
-        DEBUG("Max local coverage incoming  - " << rel_helper_.MaxLocalCoverage(this->g().IncomingEdges(v), v));
-        DEBUG("Max local coverage outgoing  - " << rel_helper_.MaxLocalCoverage(this->g().OutgoingEdges(v), v));
-        if (this->g().length(edge) > 1 &&
-                rel_helper_.CheckAnyHighlyCovered(this->g().IncomingEdges(v), v, coverage_edge_around_v) &&
-                rel_helper_.CheckAnyHighlyCovered(this->g().OutgoingEdges(v), v, coverage_edge_around_v)) {
-            DEBUG("Disconnecting");
-            disconnector_(edge);
-            cnt_++;
-            return true;
-        } else {
-            DEBUG("No need to disconnect");
-            return false;
-      }
-    }
-
-private:
-
-    DECL_LOGGER("RelativeCoverageDisconnector");
-};
-
 template<class Graph>
-class ComponentSearcher {
+class InnerComponentSearcher {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
 
@@ -448,10 +427,10 @@ class ComponentSearcher {
     Component<Graph> component_;
 
 public:
-    ComponentSearcher(const Graph& g,
-                      const RelativeCoverageHelper<Graph>& rel_helper,
-            const ComponentChecker<Graph>& checker,
-            EdgeId first_edge)
+    InnerComponentSearcher(const Graph& g,
+                           const RelativeCoverageHelper<Graph>& rel_helper,
+                           const ComponentChecker<Graph>& checker,
+                           EdgeId first_edge)
             : g_(g), rel_helper_(rel_helper), checker_(checker),
               component_(g_, first_edge) {
     }
@@ -492,8 +471,8 @@ private:
                 RetainEdgesFromComponent(g_.IncidentEdges(v)), v);
         return CheckAnyFilteredHighlyCovered(g_.OutgoingEdges(v),
                                              v, base_coverage)
-                && CheckAnyFilteredHighlyCovered(
-                        g_.IncomingEdges(v), v, base_coverage);
+               && CheckAnyFilteredHighlyCovered(
+                g_.IncomingEdges(v), v, base_coverage);
     }
 
     template<class EdgeContainer>
@@ -528,20 +507,14 @@ private:
         return answer;
     }
 
-    DECL_LOGGER("RelativelyLowCoveredComponentSearcher")
-    ;
+    DECL_LOGGER("InnerComponentSearcher");
 };
 
-//currently works with conjugate graphs only (due to the assumption in the outer cycle)
 template<class Graph>
-class RelativeCoverageComponentRemover : public EdgeProcessingAlgorithm<Graph> {
-    typedef EdgeProcessingAlgorithm<Graph> base;
-    typedef typename Graph::EdgeId EdgeId;
+class RelativeCovComponentFinder {
     typedef typename Graph::VertexId VertexId;
-    typedef std::function<double(EdgeId, VertexId)> LocalCoverageFT;
-    typedef typename ComponentRemover<Graph>::HandlerF HandlerF;
-    typedef pred::TypedPredicate<EdgeId> ProceedConditionT;
-
+    typedef typename Graph::EdgeId EdgeId;
+    const Graph& g_;
     RelativeCoverageHelper<Graph> rel_helper_;
     size_t length_bound_;
     size_t tip_allowing_length_bound_;
@@ -550,57 +523,55 @@ class RelativeCoverageComponentRemover : public EdgeProcessingAlgorithm<Graph> {
     //bound on the number of inner vertices
     size_t vertex_count_limit_;
     std::string vis_dir_;
-    ComponentRemover<Graph> component_remover_;
 
-    size_t fail_cnt_;
-    size_t succ_cnt_;
+    mutable std::atomic_uint fail_cnt_;
+    mutable std::atomic_uint succ_cnt_;
 
-    void VisualizeNontrivialComponent(const set<typename Graph::EdgeId>& edges, bool success) {
-        auto colorer = omnigraph::visualization::DefaultColorer(this->g());
-        auto edge_colorer = make_shared<visualization::CompositeEdgeColorer<Graph>>("black");
+    void VisualizeNontrivialComponent(const set<typename Graph::EdgeId>& edges, bool success) const {
+        auto colorer = visualization::graph_colorer::DefaultColorer(g_);
+        auto edge_colorer = make_shared<visualization::graph_colorer::CompositeEdgeColorer<Graph>>("black");
         edge_colorer->AddColorer(colorer);
-        edge_colorer->AddColorer(make_shared<visualization::SetColorer<Graph>>(this->g(), edges, "green"));
-    //    shared_ptr<visualization::GraphColorer<Graph>>
-        auto resulting_colorer = make_shared<visualization::CompositeGraphColorer<Graph>>(colorer, edge_colorer);
+        edge_colorer->AddColorer(make_shared<visualization::graph_colorer::SetColorer<Graph>>(g_, edges, "green"));
+        //    shared_ptr<visualization::graph_colorer::GraphColorer<Graph>>
+        auto resulting_colorer = make_shared<visualization::graph_colorer::CompositeGraphColorer<Graph>>(colorer, edge_colorer);
 
-        StrGraphLabeler<Graph> str_labeler(this->g());
-        CoverageGraphLabeler<Graph> cov_labler(this->g());
-        CompositeLabeler<Graph> labeler(str_labeler, cov_labler);
+        visualization::graph_labeler::StrGraphLabeler<Graph> str_labeler(g_);
+        visualization::graph_labeler::CoverageGraphLabeler<Graph> cov_labler(g_);
+        visualization::graph_labeler::CompositeLabeler<Graph> labeler(str_labeler, cov_labler);
 
         if (edges.size() > 1) {
             set<typename Graph::VertexId> vertices;
             for (auto e : edges) {
-                vertices.insert(this->g().EdgeStart(e));
-                vertices.insert(this->g().EdgeEnd(e));
+                vertices.insert(g_.EdgeStart(e));
+                vertices.insert(g_.EdgeEnd(e));
             }
-    
-    
+
             auto filename = success ? vis_dir_ + "/success/" + ToString(succ_cnt_++) : vis_dir_ + "/fail/" + ToString(fail_cnt_++);
-            visualization::WriteComponent(
-                    ComponentCloser<Graph>(this->g(), 0).CloseComponent(GraphComponent<Graph>(this->g(), vertices.begin(), vertices.end())),
+            visualization::visualization_utils::WriteComponent(
+                    ComponentCloser<Graph>(g_, 0).CloseComponent(
+                            GraphComponent<Graph>::FromVertices(g_, vertices)),
                     filename + ".dot", colorer, labeler);
         }
     }
 
 public:
-    RelativeCoverageComponentRemover(
-            Graph& g, LocalCoverageFT local_coverage_f,
+    RelativeCovComponentFinder(Graph& g,
+            const FlankingCoverage<Graph>& flanking_cov,
             double min_coverage_gap,
             size_t length_bound,
             size_t tip_allowing_length_bound,
             size_t longest_connecting_path_bound,
-            double max_coverage = std::numeric_limits<double>::max(),
-            HandlerF handler_function = 0, size_t vertex_count_limit = 10, 
-            std::string vis_dir = "")
-            : base(g),
-              rel_helper_(g, local_coverage_f, min_coverage_gap),
+            double max_coverage,
+            size_t vertex_count_limit,
+            const std::string& vis_dir)
+            : g_(g),
+              rel_helper_(g, flanking_cov, min_coverage_gap),
               length_bound_(length_bound),
               tip_allowing_length_bound_(tip_allowing_length_bound),
               longest_connecting_path_bound_(longest_connecting_path_bound),
               max_coverage_(max_coverage),
               vertex_count_limit_(vertex_count_limit),
               vis_dir_(vis_dir),
-              component_remover_(g, handler_function),
               fail_cnt_(0),
               succ_cnt_(0) {
         VERIFY(math::gr(min_coverage_gap, 1.));
@@ -613,43 +584,34 @@ public:
         }
     }
 
-protected:
-
-    bool ProcessEdge(EdgeId e) {
-        TRACE("Processing edge " << this->g().str(e));
+    boost::optional<Component<Graph>> operator()(EdgeId e) const {
+        TRACE("Processing edge " << g_.str(e));
 
         //here we use that the graph is conjugate!
-        VertexId v = this->g().EdgeStart(e);
-        if (this->g().IsDeadEnd(v) && this->g().IsDeadStart(v)) {
-            TRACE("Isolated");
-            return false;
-        }
-        if (this->g().IsDeadEnd(v) || this->g().IsDeadStart(v)) {
+        VertexId v = g_.EdgeStart(e);
+        if (g_.IncomingEdgeCount(v) == 0 || g_.OutgoingEdgeCount(v) < 2/*==1*/) {
             TRACE("Tip");
-            return false;
+            return boost::none;
         }
 
         double local_cov = rel_helper_.LocalCoverage(e, v);
 
-        TRACE("Local coverage around start " << this->g().str(v) << " is " << local_cov);
+        TRACE("Local coverage around start " << g_.str(v) << " is " << local_cov);
 
         //since min_coverage_gap_ > 1, we don't need to think about e here
         TRACE("Checking presence of highly covered edges around start")
-        if (rel_helper_.CheckAnyHighlyCovered(this->g().OutgoingEdges(v), v, local_cov)
-                && rel_helper_.CheckAnyHighlyCovered(this->g().IncomingEdges(v), v,
-                                         local_cov)) {
+        if (rel_helper_.AnyHighlyCoveredOnBothSides(v, local_cov)) {
             TRACE("Looking for component");
-            ComponentChecker<Graph> checker(this->g(), vertex_count_limit_, length_bound_,
+            ComponentChecker<Graph> checker(g_, vertex_count_limit_, length_bound_,
                                             tip_allowing_length_bound_,
                                             longest_connecting_path_bound_, max_coverage_);
             //case of e being loop is handled implicitly!
-            ComponentSearcher<Graph> component_searcher(
-                    this->g(), rel_helper_, checker, e);
+            InnerComponentSearcher<Graph> component_searcher(
+                    g_, rel_helper_, checker, e);
+
             if (component_searcher.FindComponent()) {
                 TRACE("Deleting component");
-                const Component<Graph>& component = component_searcher.component();
-                component_remover_.DeleteComponent(component.edges());
-                return true;
+                return boost::optional<Component<Graph>>(component_searcher.component());
             } else {
                 TRACE("Failed to find component");
                 if (!vis_dir_.empty()) {
@@ -660,8 +622,63 @@ protected:
         } else {
             TRACE("No highly covered edges around");
         }
+        return boost::none;
+    }
+
+private:
+    DECL_LOGGER("RelativeCovComponentFinder")
+};
+} //namespace component_remover
+
+//currently works with conjugate graphs only (due to the assumption in the outer cycle)
+template<class Graph>
+class RelativeCoverageComponentRemover : public PersistentProcessingAlgorithm<Graph,
+        typename Graph::EdgeId, CoverageComparator<Graph>> {
+    typedef typename Graph::VertexId VertexId;
+    typedef typename Graph::EdgeId EdgeId;
+    typedef PersistentProcessingAlgorithm<Graph, EdgeId, CoverageComparator<Graph>> base;
+    typedef typename ComponentRemover<Graph>::HandlerF HandlerF;
+
+    component_remover::RelativeCovComponentFinder<Graph> finder_;
+    ComponentRemover<Graph> component_remover_;
+
+public:
+    RelativeCoverageComponentRemover(
+            Graph& g,
+            size_t chunk_cnt,
+            const FlankingCoverage<Graph>& flanking_cov,
+            double min_coverage_gap,
+            size_t length_bound,
+            size_t tip_allowing_length_bound,
+            size_t longest_connecting_path_bound,
+            double max_coverage = std::numeric_limits<double>::max(),
+            HandlerF handler_function = nullptr, size_t vertex_count_limit = 10,
+            std::string vis_dir = "")
+            : base(g, nullptr, /*canonical only*/ false, 
+                    CoverageComparator<Graph>(g), /*track changes*/ false),
+              finder_(g, flanking_cov,
+                      min_coverage_gap, length_bound,
+                      tip_allowing_length_bound, longest_connecting_path_bound,
+                      max_coverage, vertex_count_limit, vis_dir),
+              component_remover_(g, handler_function) {
+        this->interest_el_finder_ = std::make_shared<ParallelInterestingElementFinder<Graph, EdgeId>>(
+                [&](EdgeId e) { return finder_(e); }, chunk_cnt);
+    }
 
-        return false;
+protected:
+
+    bool Process(EdgeId e) override {
+        DEBUG("Processing edge " << this->g().str(e));
+        auto opt_component = finder_(e);
+        if (!opt_component) {
+            DEBUG("Failed to detect component starting with edge " << this->g().str(e));
+            return false;
+        }
+        VERIFY(opt_component->edges().size());
+        DEBUG("Detected component edge cnt: " << opt_component->edges().size());
+        component_remover_.DeleteComponent(opt_component->edges());
+        DEBUG("Relatively low coverage component removed");
+        return true;
     }
 
 private:
@@ -670,5 +687,4 @@ private:
 
 }
 }
-
 }
diff --git a/src/modules/algorithms/simplification/tip_clipper.hpp b/src/common/modules/simplification/tip_clipper.hpp
similarity index 84%
rename from src/modules/algorithms/simplification/tip_clipper.hpp
rename to src/common/modules/simplification/tip_clipper.hpp
index a4b7db3..7f87d66 100644
--- a/src/modules/algorithms/simplification/tip_clipper.hpp
+++ b/src/common/modules/simplification/tip_clipper.hpp
@@ -8,10 +8,10 @@
 #pragma once
 
 #include "math/xmath.h"
-#include "dev_support/func.hpp"
+#include "func/func.hpp"
 #include "assembly_graph/graph_support/basic_edge_conditions.hpp"
 #include "assembly_graph/graph_support/graph_processing_algorithm.hpp"
-#include "data_structures/sequence/sequence.hpp"
+#include "sequence/sequence.hpp"
 
 #include <set>
 
@@ -195,16 +195,9 @@ private:
 };
 
 template<class Graph>
-pred::TypedPredicate<typename Graph::EdgeId> AddTipCondition(const Graph& g,
-                                                            pred::TypedPredicate<typename Graph::EdgeId> condition) {
-    return pred::And(TipCondition<Graph>(g), condition);
-}
-
-template<class Graph>
-pred::TypedPredicate<typename Graph::EdgeId>
-NecessaryTipCondition(const Graph& g, size_t max_length, double max_coverage) {
-    return AddTipCondition(g, pred::And(LengthUpperBound<Graph>(g, max_length),
-                                       CoverageUpperBound<Graph>(g, max_coverage)));
+func::TypedPredicate<typename Graph::EdgeId> AddTipCondition(const Graph& g,
+                                                            func::TypedPredicate<typename Graph::EdgeId> condition) {
+    return func::And(TipCondition<Graph>(g), condition);
 }
 
 template<class Graph>
@@ -247,25 +240,9 @@ public:
 };
 
 template<class Graph>
-pred::TypedPredicate<typename Graph::EdgeId>AddDeadEndCondition(const Graph& g,
-                                                                pred::TypedPredicate<typename Graph::EdgeId> condition) {
-    return pred::And(DeadEndCondition<Graph>(g), condition);
+func::TypedPredicate<typename Graph::EdgeId>AddDeadEndCondition(const Graph& g,
+                                                                func::TypedPredicate<typename Graph::EdgeId> condition) {
+    return func::And(DeadEndCondition<Graph>(g), condition);
 }
 
-//template<class Graph>
-//bool ClipTips(
-//        Graph& g,
-//        size_t max_length,
-//        shared_ptr<Predicate<typename Graph::EdgeId>> condition
-//            = make_shared<func::AlwaysTrue<typename Graph::EdgeId>>(),
-//        std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
-//
-//    omnigraph::EdgeRemovingAlgorithm<Graph> tc(g,
-//                                               AddTipCondition(g, condition),
-//                                               removal_handler);
-//
-//    return tc.Run(LengthComparator<Graph>(g),
-//                      make_shared<LengthUpperBound<Graph>>(g, max_length));
-//}
-
 } // namespace omnigraph
diff --git a/src/modules/assembly_graph/graph_support/basic_edge_conditions.hpp b/src/common/modules/simplification/topological_edge_conditions.hpp
similarity index 50%
rename from src/modules/assembly_graph/graph_support/basic_edge_conditions.hpp
rename to src/common/modules/simplification/topological_edge_conditions.hpp
index f0b72a0..88164a9 100644
--- a/src/modules/assembly_graph/graph_support/basic_edge_conditions.hpp
+++ b/src/common/modules/simplification/topological_edge_conditions.hpp
@@ -1,131 +1,10 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
 #pragma once
 
-#include "dev_support/func.hpp"
-#include "math/pred.hpp"
-#include "assembly_graph/graph_core/basic_graph_stats.hpp"
-#include "assembly_graph/graph_core/directions.hpp"
-#include "assembly_graph/paths/path_finders.hpp"
+#include "assembly_graph/graph_support/basic_edge_conditions.hpp"
+#include "assembly_graph/core/directions.hpp"
 
 namespace omnigraph {
 
-using namespace func;
-
-template<class Graph>
-class EdgeCondition : public Predicate<typename Graph::EdgeId> {
-    typedef typename Graph::EdgeId EdgeId;
-
-    const Graph &g_;
-protected:
-
-    EdgeCondition(const Graph &g)
-            : g_(g) {
-    }
-
-    const Graph &g() const {
-        return g_;
-    }
-
-};
-
-template<class Graph>
-class IsolatedEdgeCondition : public EdgeCondition<Graph> {
-    typedef typename Graph::EdgeId EdgeId;
-    typedef typename Graph::VertexId VertexId;
-    typedef EdgeCondition<Graph> base;
-
-    bool IsTerminalVertex(VertexId v) const {
-        return this->g().IncomingEdgeCount(v) + this->g().OutgoingEdgeCount(v) == 1;
-    }
-
-public:
-    IsolatedEdgeCondition(const Graph &g) : base(g) {
-    }
-
-    bool Check(EdgeId e) const {
-        return IsTerminalVertex(this->g().EdgeStart(e)) && IsTerminalVertex(this->g().EdgeEnd(e));
-    }
-
-};
-
-template<class Graph>
-inline bool HasAlternatives(const Graph &g, typename Graph::EdgeId e) {
-    return g.OutgoingEdgeCount(g.EdgeStart(e)) > 1
-           && g.IncomingEdgeCount(g.EdgeEnd(e)) > 1;
-}
-
-
-template<class Graph>
-class AlternativesPresenceCondition : public EdgeCondition<Graph> {
-    typedef typename Graph::EdgeId EdgeId;
-    typedef typename Graph::VertexId VertexId;
-    typedef EdgeCondition<Graph> base;
-
-public:
-
-    AlternativesPresenceCondition(const Graph &g)
-            : base(g) {
-
-    }
-
-    bool Check(EdgeId e) const {
-        return HasAlternatives(this->g(), e);
-    }
-
-};
-
-template<class Graph>
-pred::TypedPredicate<typename Graph::EdgeId> AddAlternativesPresenceCondition(const Graph &g,
-                                                                              pred::TypedPredicate<typename Graph::EdgeId> condition) {
-    return pred::And(AlternativesPresenceCondition<Graph>(g), condition);
-}
-
-
-template<class Graph>
-class CoverageUpperBound : public EdgeCondition<Graph> {
-    typedef typename Graph::EdgeId EdgeId;
-    typedef EdgeCondition<Graph> base;
-    const double max_coverage_;
-
-public:
-
-    CoverageUpperBound(const Graph &g, double max_coverage)
-            : base(g),
-              max_coverage_(max_coverage) {
-    }
-
-    bool Check(EdgeId e) const {
-        return math::le(this->g().coverage(e), max_coverage_);
-    }
-
-};
-
-template<class Graph>
-class LengthUpperBound : public EdgeCondition<Graph> {
-    typedef typename Graph::EdgeId EdgeId;
-    typedef EdgeCondition<Graph> base;
-
-    const size_t max_length_;
-
-public:
-
-    LengthUpperBound(const Graph &g, size_t max_length)
-            : base(g),
-              max_length_(max_length) {
-    }
-
-    bool Check(EdgeId e) const {
-        return this->g().length(e) <= max_length_;
-    }
-
-};
-
 template<class Graph, class PathFinder>
 class PathLengthLowerBound : public EdgeCondition<Graph> {
     typedef typename Graph::EdgeId EdgeId;
@@ -163,12 +42,29 @@ public:
 };
 
 template<class Graph, class PathFinder>
-PathLengthLowerBound<Graph, PathFinder>
+EdgePredicate<Graph>
 MakePathLengthLowerBound(const Graph &g, const PathFinder &path_finder, size_t min_length) {
     return PathLengthLowerBound<Graph, PathFinder>(g, path_finder, min_length);
 }
 
 template<class Graph>
+EdgePredicate<Graph>
+UniquePathLengthLowerBound(const Graph &g, size_t min_length) {
+    return MakePathLengthLowerBound(g, UniquePathFinder<Graph>(g), min_length);
+}
+
+template<class Graph>
+EdgePredicate<Graph>
+UniqueIncomingPathLengthLowerBound(const Graph &g, size_t min_length) {
+    return [&] (typename Graph::EdgeId e) {
+        typename Graph::VertexId v = g.EdgeStart(e);
+        return g.CheckUniqueIncomingEdge(v) &&
+                UniquePathLengthLowerBound(g, min_length)(g.GetUniqueIncomingEdge(v));
+    };
+}
+
+//todo can disconnect uniqueness and plausibility conditions, since graph is always conjugate!
+template<class Graph>
 class UniquenessPlausabilityCondition : public EdgeCondition<Graph> {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
@@ -221,11 +117,10 @@ class PredicateUniquenessPlausabilityCondition :
         public UniquenessPlausabilityCondition<Graph> {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
-    typedef pred::TypedPredicate<EdgeId> EdgePredicate;
     typedef UniquenessPlausabilityCondition<Graph> base;
 
-    EdgePredicate uniqueness_condition_;
-    EdgePredicate plausiblity_condition_;
+    EdgePredicate<Graph> uniqueness_condition_;
+    EdgePredicate<Graph> plausiblity_condition_;
 
     bool CheckUniqueness(EdgeId e, bool) const {
         return uniqueness_condition_(e);
@@ -238,8 +133,8 @@ class PredicateUniquenessPlausabilityCondition :
 public:
 
     PredicateUniquenessPlausabilityCondition(
-            const Graph &g, EdgePredicate uniqueness_condition,
-            EdgePredicate plausiblity_condition)
+            const Graph &g, EdgePredicate<Graph> uniqueness_condition,
+            EdgePredicate<Graph> plausiblity_condition)
             : base(g),
               uniqueness_condition_(uniqueness_condition),
               plausiblity_condition_(plausiblity_condition) {
@@ -252,7 +147,6 @@ class DefaultUniquenessPlausabilityCondition :
         public PredicateUniquenessPlausabilityCondition<Graph> {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
-    typedef pred::TypedPredicate<EdgeId> EdgePredicate;
     typedef PredicateUniquenessPlausabilityCondition<Graph> base;
 
 public:
@@ -261,8 +155,7 @@ public:
                                            size_t uniqueness_length,
                                            size_t plausibility_length)
             : base(g,
-                   MakePathLengthLowerBound(g,
-                                            UniquePathFinder<Graph>(g), uniqueness_length),
+                   UniquePathLengthLowerBound(g, uniqueness_length),
                    MakePathLengthLowerBound(g,
                                             PlausiblePathFinder<Graph>(g, 2 * plausibility_length),
                                             plausibility_length)) {
@@ -270,4 +163,124 @@ public:
 
 };
 
+template<class Graph>
+class MultiplicityCounter {
+private:
+    typedef typename Graph::VertexId VertexId;
+    typedef typename Graph::EdgeId EdgeId;
+    const Graph &graph_;
+    size_t uniqueness_length_;
+    size_t max_depth_;
+
+    bool search(VertexId a, VertexId start, EdgeId e, size_t depth,
+                std::set<VertexId> &was, pair<size_t, size_t> &result) const {
+        if (depth > max_depth_)
+            return false;
+        if (was.count(a) == 1)
+            return true;
+        was.insert(a);
+        if (graph_.OutgoingEdgeCount(a) == 0
+            || graph_.IncomingEdgeCount(a) == 0)
+            return false;
+        for (auto I = graph_.out_begin(a), E = graph_.out_end(a); I != E; ++I) {
+            if (*I == e) {
+                if (a != start) {
+                    return false;
+                }
+            } else {
+                if (graph_.length(*I) >= uniqueness_length_) {
+                    result.second++;
+                } else {
+                    if (!search(graph_.EdgeEnd(*I), start, e,
+                                depth + 1 /*graph_.length(*it)*/, was, result))
+                        return false;
+                }
+            }
+        }
+        for (EdgeId in_e : graph_.IncomingEdges(a)) {
+            if (in_e == e) {
+                if (a != start) {
+                    return false;
+                }
+            } else {
+                if (graph_.length(in_e) >= uniqueness_length_) {
+                    result.first++;
+                } else {
+                    if (!search(graph_.EdgeStart(in_e), start, e,
+                                depth + 1 /*graph_.length(*it)*/, was, result))
+                        return false;
+                }
+            }
+        }
+        return true;
+    }
+
+public:
+    MultiplicityCounter(const Graph &graph, size_t uniqueness_length,
+                        size_t max_depth)
+            : graph_(graph),
+              uniqueness_length_(uniqueness_length),
+              max_depth_(max_depth) {
+    }
+
+    size_t count(EdgeId e, VertexId start) const {
+        std::pair<size_t, size_t> result;
+        std::set<VertexId> was;
+        bool valid = search(start, start, e, 0, was, result);
+        if (!valid) {
+            return (size_t) (-1);
+        }
+        if (graph_.EdgeStart(e) == start) {
+            if (result.first < result.second) {
+                return (size_t) (-1);
+            }
+            return result.first - result.second;
+        } else {
+            if (result.first > result.second) {
+                return (size_t) (-1);
+            }
+            return -result.first + result.second;
+        }
+    }
+};
+
+template<class Graph>
+class MultiplicityCountingCondition : public UniquenessPlausabilityCondition<Graph> {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+    typedef UniquenessPlausabilityCondition<Graph> base;
+
+    MultiplicityCounter<Graph> multiplicity_counter_;
+    EdgePredicate<Graph> plausiblity_condition_;
+
+public:
+    bool CheckUniqueness(EdgeId e, bool forward) const {
+        TRACE( "Checking " << this->g().int_id(e) << " for uniqueness in " << (forward ? "forward" : "backward") << " direction");
+        VertexId start =
+                forward ? this->g().EdgeEnd(e) : this->g().EdgeStart(e);
+        bool result = multiplicity_counter_.count(e, start) <= 1;
+        TRACE( "Edge " << this->g().int_id(e) << " is" << (result ? "" : " not") << " unique");
+        return result;
+    }
+
+    bool CheckPlausibility(EdgeId e, bool) const {
+        return plausiblity_condition_(e);
+    }
+
+    MultiplicityCountingCondition(const Graph& g, size_t uniqueness_length,
+                                  EdgePredicate<Graph> plausiblity_condition)
+            :
+    //todo why 8???
+            base(g),
+            multiplicity_counter_(g, uniqueness_length, 8),
+            plausiblity_condition_(plausiblity_condition) {
+
+    }
+
+private:
+
+    DECL_LOGGER("MultiplicityCountingCondition");
+};
+
+
 }
diff --git a/src/common/paired_info/concurrent_pair_info_buffer.hpp b/src/common/paired_info/concurrent_pair_info_buffer.hpp
new file mode 100644
index 0000000..5662a32
--- /dev/null
+++ b/src/common/paired_info/concurrent_pair_info_buffer.hpp
@@ -0,0 +1,120 @@
+//***************************************************************************
+//* Copyright (c) 2016 Saint Petersburg State University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#pragma once
+
+#include "histogram.hpp"
+#include "histptr.hpp"
+
+#include <btree/btree_map.h>
+#include <cuckoo/cuckoohash_map.hh>
+
+namespace omnigraph {
+
+namespace de {
+
+template<typename G, typename Traits, template<typename, typename> class Container>
+class ConcurrentPairedBuffer : public PairedBufferBase<ConcurrentPairedBuffer<G, Traits, Container>,
+                                                       G, Traits> {
+    typedef ConcurrentPairedBuffer<G, Traits, Container> self;
+    typedef PairedBufferBase<self, G, Traits> base;
+
+    friend class PairedBufferBase<self, G, Traits>;
+
+  protected:
+    using typename base::InnerPoint;
+    typedef omnigraph::de::Histogram<InnerPoint> InnerHistogram;
+    typedef omnigraph::de::StrongWeakPtr<InnerHistogram> InnerHistPtr;
+
+
+  public:
+    using typename base::Graph;
+    using typename base::EdgeId;
+    using typename base::EdgePair;
+    using typename base::Point;
+
+    typedef Container<EdgeId, InnerHistPtr> InnerMap;
+    typedef cuckoohash_map<EdgeId, InnerMap> StorageMap;
+
+  public:
+    ConcurrentPairedBuffer(const Graph &g)
+            : base(g) {
+        clear();
+    }
+
+    //---------------- Miscellaneous ----------------
+
+    /**
+     * @brief Clears the whole index. Used in merging.
+     */
+    void clear() {
+        storage_.clear();
+        this->size_ = 0;
+    }
+
+    typename StorageMap::locked_table lock_table() {
+        return storage_.lock_table();
+    }
+
+  private:
+    std::pair<typename InnerHistPtr::pointer, size_t> InsertOne(EdgeId e1, EdgeId e2, InnerPoint p) {
+        if (!storage_.contains(e1))
+            storage_.insert(e1, InnerMap()); // We can fail to insert here, it's ok
+
+        size_t added = 0;
+        typename InnerHistPtr::pointer inserted = nullptr;
+        storage_.update_fn(e1,
+                           [&](InnerMap &second) { // Now we will hold lock to the whole "subtree" starting from e1
+                               if (!second.count(e2)) {
+                                   inserted = new InnerHistogram();
+                                   second.insert(std::make_pair(e2, InnerHistPtr(inserted, /* owning */ true)));
+                               }
+                               added = second[e2]->merge_point(p);
+                           });
+
+        return { inserted, added };
+    }
+
+    template<class OtherHist>
+    std::pair<typename InnerHistPtr::pointer, size_t> InsertHist(EdgeId e1, EdgeId e2, const OtherHist &h) {
+        if (!storage_.contains(e1))
+            storage_.insert(e1, InnerMap()); // We can fail to insert here, it's ok
+
+        size_t added = 0;
+        typename InnerHistPtr::pointer inserted = nullptr;
+        storage_.update_fn(e1,
+                           [&](InnerMap &second) { // Now we will hold lock to the whole "subtree" starting from e1
+                               if (!second.count(e2)) {
+                                   inserted = new InnerHistogram();
+                                   second.insert(std::make_pair(e2, InnerHistPtr(inserted, /* owning */ true)));
+                               }
+                               added = second[e2]->merge(h);
+                           });
+
+        return { inserted, added };
+    }
+
+    void InsertHistView(EdgeId e1, EdgeId e2, typename InnerHistPtr::pointer p) {
+        if (!storage_.contains(e1))
+            storage_.insert(e1, InnerMap()); // We can fail to insert here, it's ok
+
+        storage_.update_fn(e1,
+                           [&](InnerMap &second) { // Now we will hold lock to the whole "subtree" starting from e1
+                               auto res = second.insert(std::make_pair(e2, InnerHistPtr(p, /* owning */ false)));
+                               VERIFY_MSG(res.second, "Index insertion inconsistency");
+                           });
+    }
+
+  protected:
+    StorageMap storage_;
+};
+
+template<class Graph>
+using ConcurrentPairedInfoBuffer = ConcurrentPairedBuffer<Graph, RawPointTraits, btree_map>;
+
+} // namespace de
+
+} // namespace omnigraph
diff --git a/src/modules/paired_info/data_divider.hpp b/src/common/paired_info/data_divider.hpp
similarity index 99%
rename from src/modules/paired_info/data_divider.hpp
rename to src/common/paired_info/data_divider.hpp
index 7bd2c7b..c124470 100644
--- a/src/modules/paired_info/data_divider.hpp
+++ b/src/common/paired_info/data_divider.hpp
@@ -18,7 +18,7 @@
 
 #include <iostream>
 #include <math.h>
-#include "dev_support/verify.hpp"
+#include "utils/verify.hpp"
 #include <vector>
 #include <utility>
 #include <cstdlib>
diff --git a/src/modules/paired_info/distance_estimation.hpp b/src/common/paired_info/distance_estimation.hpp
similarity index 87%
rename from src/modules/paired_info/distance_estimation.hpp
rename to src/common/paired_info/distance_estimation.hpp
index 7143ef3..97663a4 100644
--- a/src/modules/paired_info/distance_estimation.hpp
+++ b/src/common/paired_info/distance_estimation.hpp
@@ -9,7 +9,7 @@
 #define DISTANCE_ESTIMATION_HPP_
 
 #include "math/xmath.h"
-#include "dev_support/openmp_wrapper.h"
+#include "utils/openmp_wrapper.h"
 
 #include "paired_info.hpp"
 #include "assembly_graph/paths/path_processor.hpp"
@@ -44,39 +44,30 @@ public:
 
     // finds all distances from a current edge to a set of edges
     void FillGraphDistancesLengths(EdgeId e1, LengthMap &second_edges) const {
-        vector<VertexId> end_points;
         vector<size_t> path_lower_bounds;
-        for (const auto &entry : second_edges) {
-            EdgeId second_edge = entry.first;
-            end_points.push_back(graph_.EdgeStart(second_edge));
-            path_lower_bounds.push_back(PairInfoPathLengthLowerBound(graph_.k(), graph_.length(e1),
-                                                                     graph_.length(second_edge), gap_, delta_));
-            TRACE("Bounds for paths are " << path_lower_bounds.back());
-        }
 
         size_t path_upper_bound = PairInfoPathLengthUpperBound(graph_.k(), insert_size_, delta_);
 
-        DistancesLengthsCallback<Graph> callback(graph_);
-
         PathProcessor<Graph> paths_proc(graph_, graph_.EdgeEnd(e1), path_upper_bound);
 
-        for (size_t i = 0; i < end_points.size(); ++i) {
-            //FIXME should max dist also depend on the point?
-            paths_proc.Process(end_points[i], path_lower_bounds[i], path_upper_bound, callback);
-        }
+        for (auto &entry : second_edges) {
+            EdgeId e2 = entry.first;
+            size_t path_lower_bound = PairInfoPathLengthLowerBound(graph_.k(), graph_.length(e1),
+                                                                   graph_.length(e2), gap_, delta_);
 
-        vector<GraphLengths> result;
+            TRACE("Bounds for paths are " << path_lower_bound << " " << path_upper_bound);
 
-        size_t i = 0;
-        for (auto &entry : second_edges) {
-            GraphLengths lengths = callback.distances(i++);
+            DistancesLengthsCallback<Graph> callback(graph_);
+            paths_proc.Process(graph_.EdgeStart(e2), path_lower_bound, path_upper_bound, callback);
+            GraphLengths lengths = callback.distances();
             for (size_t j = 0; j < lengths.size(); ++j) {
                 lengths[j] += graph_.length(e1);
-                TRACE("Resulting distance set # " << i <<
-                      " edge " << graph_.int_id(entry.first) << " #" << j << " length " << lengths[j]);
+                TRACE("Resulting distance set for " <<
+                          " edge " << graph_.int_id(e2) <<
+                          " #" << j << " length " << lengths[j]);
             }
 
-            if (e1 == entry.first)
+            if (e1 == e2)
                 lengths.push_back(0);
 
             std::sort(lengths.begin(), lengths.end());
@@ -132,14 +123,14 @@ protected:
         OutHistogram result;
         for (size_t i = 0; i < estimated.size(); ++i) {
             size_t left = i;
-            double weight = estimated[i].second;
+            DEWeight weight = DEWeight(estimated[i].second);
             while (i + 1 < estimated.size() &&
                    (estimated[i + 1].first - estimated[i].first) <= (int) linkage_distance_) {
                 ++i;
                 weight += estimated[i].second;
             }
-            double center = (estimated[left].first + estimated[i].first) * 0.5;
-            double var = (estimated[i].first - estimated[left].first) * 0.5;
+            DEDistance center = DEDistance((estimated[left].first + estimated[i].first) * 0.5);
+            DEVariance var = DEVariance((estimated[i].first - estimated[left].first) * 0.5);
             result.insert(Point(center, weight, var));
         }
         return result;
@@ -204,7 +195,7 @@ public:
 
         for (size_t i = 0; i < nthreads; ++i) {
             result.Merge(buffer[i]);
-            buffer[i].Clear();
+            buffer[i].clear();
         }
     }
 
@@ -222,12 +213,12 @@ protected:
 
         TRACE("Bounds are " << minD << " " << maxD);
         EstimHist result;
-        vector<int> forward;
+        vector<DEDistance> forward;
         forward.reserve(raw_forward.size());
         for (auto raw_length : raw_forward) {
             int length = int(raw_length);
             if (minD - int(max_distance_) <= length && length <= maxD + int(max_distance_))
-                forward.push_back(length);
+                forward.push_back(DEDistance(length));
         }
         if (forward.size() == 0)
             return result;
@@ -235,7 +226,7 @@ protected:
         size_t cur_dist = 0;
         vector<DEWeight> weights(forward.size(), 0);
         for (auto point : histogram) {
-            if (ls(2 * point.d + second_len, DEDistance(first_len)))
+            if (ls(2 * point.d + DEDistance(second_len), DEDistance(first_len)))
                 continue;
             while (cur_dist + 1 < forward.size() && forward[cur_dist + 1] < point.d)
                 ++cur_dist;
diff --git a/src/modules/paired_info/histogram.hpp b/src/common/paired_info/histogram.hpp
similarity index 93%
rename from src/modules/paired_info/histogram.hpp
rename to src/common/paired_info/histogram.hpp
index c326f6e..d8983fc 100644
--- a/src/modules/paired_info/histogram.hpp
+++ b/src/common/paired_info/histogram.hpp
@@ -8,8 +8,8 @@
 #pragma once
 
 #include <btree/btree_set.h>
-#include "utils/adt/flat_set.hpp"
-#include "utils/adt/small_pod_vector.hpp"
+#include "common/adt/flat_set.hpp"
+#include "common/adt/small_pod_vector.hpp"
 #include "index_point.hpp"
 
 namespace omnigraph {
@@ -54,6 +54,10 @@ public:
         insert(b, e);
     }
 
+    Histogram(std::initializer_list<Point> l) {
+        insert(l.begin(), l.end());
+    }
+
     // Iterator routines.
     iterator begin() { return tree_.begin(); }
     const_iterator begin() const { return tree_.begin(); }
@@ -162,11 +166,16 @@ public:
 
     template<class OtherHist>
     size_t merge(const OtherHist &other) {
-        size_t added = 0;
-        for (const auto &new_point : other) {
-            added += merge_point(new_point);
+        // If histogram is empty, we could simply insert everything
+        if (size() == 0) {
+            insert(other.begin(), other.end());
+            return size();
         }
-        return added;
+
+        size_t old_size = size();
+        for (const auto &new_point : other)
+            merge_point(new_point);
+        return size() - old_size;
     }
 };
 
diff --git a/src/common/paired_info/histptr.hpp b/src/common/paired_info/histptr.hpp
new file mode 100644
index 0000000..58f34c7
--- /dev/null
+++ b/src/common/paired_info/histptr.hpp
@@ -0,0 +1,156 @@
+//***************************************************************************
+//* Copyright (c) 2015-2016 Saint Petersburg State University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#pragma once
+
+namespace omnigraph {
+namespace de {
+
+template<class T>
+class StrongWeakPtr {
+  public:
+    typedef T  element_type;
+    typedef T* pointer;
+
+    StrongWeakPtr() noexcept
+            : ptr_(pointer(), false) {}
+
+    StrongWeakPtr(std::nullptr_t) noexcept
+            : ptr_(pointer(), false) {}
+
+    StrongWeakPtr(pointer p, bool owning = true) noexcept
+            : ptr_(std::move(p), owning) { }
+
+    StrongWeakPtr(StrongWeakPtr &&p) noexcept
+            : ptr_(p.release(), p.owning()) {}
+
+    StrongWeakPtr& operator=(StrongWeakPtr &&p) noexcept {
+        reset(p.release(), p.owning());
+        return *this;
+    }
+
+    ~StrongWeakPtr() {
+        reset();
+    }
+
+    StrongWeakPtr &operator=(std::nullptr_t) noexcept {
+        reset();
+        return *this;
+    }
+
+    typename std::add_lvalue_reference<T>::type operator*() const {
+        return *ptr_.getPointer();
+    }
+
+    pointer operator->() const noexcept {
+        return ptr_.getPointer();
+    }
+
+    pointer get() const noexcept {
+        return ptr_.getPointer();
+    }
+    
+    explicit operator bool() const noexcept {
+        return ptr_.getPointer() != nullptr;
+    }
+
+    bool owning() const noexcept {
+        return ptr_.getInt();
+    }
+    
+    pointer release() noexcept {
+        pointer p = ptr_.getPointer();
+        ptr_ = raw_type();
+        return p;
+    }
+    
+    void reset(pointer p = pointer(), bool own = true) {
+        pointer tmp = ptr_.getPointer(); bool is_owning = ptr_.getInt();
+        ptr_ = raw_type(p, own);
+        if (is_owning)
+            delete tmp;
+    }
+
+    void swap(StrongWeakPtr &p) noexcept {
+        std::swap(p.ptr_, ptr_);
+    }
+    
+  private:
+    llvm::PointerIntPair<pointer, 1, bool> ptr_;
+  public:
+    typedef decltype(ptr_) raw_type;
+};
+
+
+template<class T>
+inline void swap(StrongWeakPtr<T> &x, StrongWeakPtr<T> &y) noexcept {
+    x.swap(y);
+}
+
+template<class T>
+inline bool operator==(const StrongWeakPtr<T> &x, const StrongWeakPtr<T> &y) noexcept {
+    return x.get() == y.get();
+}
+
+template<class T>
+inline bool operator!=(const StrongWeakPtr<T> &x, const StrongWeakPtr<T> &y) noexcept {
+    return !(x == y);
+}
+
+template<class T1, class T2>
+inline bool operator<(const StrongWeakPtr<T1> &x, const StrongWeakPtr<T2> &y) noexcept {
+    typedef typename StrongWeakPtr<T1>::pointer P1;
+    typedef typename StrongWeakPtr<T2>::pointer P2;
+    typedef typename std::common_type<P1, P2>::type Common;
+
+    using namespace std;
+    return less<Common>()(x.get(), y.get());
+}
+
+template<class T1, class T2>
+inline bool operator>(const StrongWeakPtr<T1> &x, const StrongWeakPtr<T2> &y) noexcept {
+    return y < x;
+}
+
+template<class T1, class T2>
+inline bool operator<=(const StrongWeakPtr<T1> &x, const StrongWeakPtr<T2> &y) noexcept {
+    return !(y < x);
+}
+
+template<class T1, class T2>
+inline bool operator>=(const StrongWeakPtr<T1> &x, const StrongWeakPtr<T2> &y) noexcept {
+    return !(x < y);
+}
+
+template<class T>
+inline bool operator==(const StrongWeakPtr<T> &x, std::nullptr_t) noexcept {
+    return !x;
+}
+
+template<class T>
+inline bool operator==(std::nullptr_t, const StrongWeakPtr<T> &x) noexcept {
+    return !x;
+}
+
+template<class T>
+inline bool operator!=(const StrongWeakPtr<T> &x, std::nullptr_t) noexcept {
+    return static_cast<bool>(x);
+}
+
+template<class T>
+inline bool operator!=(std::nullptr_t, const StrongWeakPtr<T> &x) noexcept {
+    return static_cast<bool>(x);
+}
+
+template<class T, class... Args>
+StrongWeakPtr<T>
+make_sw(Args&&... args) {
+    return StrongWeakPtr<T>(new T(std::forward<Args>(args)...));
+}
+
+}
+}
+
diff --git a/src/modules/paired_info/index_point.hpp b/src/common/paired_info/index_point.hpp
similarity index 100%
rename from src/modules/paired_info/index_point.hpp
rename to src/common/paired_info/index_point.hpp
diff --git a/src/modules/paired_info/insert_size_refiner.hpp b/src/common/paired_info/insert_size_refiner.hpp
similarity index 98%
rename from src/modules/paired_info/insert_size_refiner.hpp
rename to src/common/paired_info/insert_size_refiner.hpp
index cbaf257..6910899 100644
--- a/src/modules/paired_info/insert_size_refiner.hpp
+++ b/src/common/paired_info/insert_size_refiner.hpp
@@ -7,8 +7,8 @@
 
 #pragma once
 
-#include "dev_support/standard_base.hpp"
-#include "dev_support/cpp_utils.hpp"
+#include "utils/standard_base.hpp"
+#include "utils/cpp_utils.hpp"
 #include "assembly_graph/stats/picture_dump.hpp"
 //#include "sequence_mapper.hpp"
 
diff --git a/src/modules/paired_info/is_counter.hpp b/src/common/paired_info/is_counter.hpp
similarity index 68%
rename from src/modules/paired_info/is_counter.hpp
rename to src/common/paired_info/is_counter.hpp
index 678387c..bde7736 100644
--- a/src/modules/paired_info/is_counter.hpp
+++ b/src/common/paired_info/is_counter.hpp
@@ -5,19 +5,12 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-/*
- * is_counter.hpp
- *
- *  Created on: May 25, 2014
- *      Author: andrey
- */
-
 #ifndef IS_COUNTER_HPP_
 #define IS_COUNTER_HPP_
 
 
 #include "paired_info/insert_size_refiner.hpp"
-#include "assembly_graph/graph_alignment/sequence_mapper_notifier.hpp"
+#include "modules/alignment/sequence_mapper_notifier.hpp"
 
 namespace debruijn_graph {
 
@@ -41,7 +34,7 @@ public:
     size_t negative() const { return negative_.total_; }
 
 
-    virtual void StartProcessLibrary(size_t threads_count) {
+    void StartProcessLibrary(size_t threads_count) override {
         hist_.clear();
         tmp_hists_ = vector<HistType>(threads_count);
 
@@ -50,42 +43,33 @@ public:
         negative_ = count_data(threads_count);
     }
 
-    virtual void StopProcessLibrary() {
-        for (size_t i = 0; i < tmp_hists_.size(); ++i) {
-            MergeBuffer(i);
-        }
+    void StopProcessLibrary() override {
         tmp_hists_.clear();
         total_.merge();
         counted_.merge();
         negative_.merge();
     }
 
-    virtual void ProcessPairedRead(size_t thread_index,
-                                   const io::PairedRead& r,
-                                   const MappingPath<EdgeId>& read1,
-                                   const MappingPath<EdgeId>& read2) {
+    void ProcessPairedRead(size_t thread_index,
+                           const io::PairedRead& r,
+                           const MappingPath<EdgeId>& read1,
+                           const MappingPath<EdgeId>& read2) override {
         ProcessPairedRead(thread_index, read1, read2, (int) r.second().size(),
                           (int) r.first().GetLeftOffset() + (int) r.second().GetRightOffset());
     }
 
-    virtual void ProcessPairedRead(size_t thread_index,
-                                   const io::PairedReadSeq& r,
-                                   const MappingPath<EdgeId>& read1,
-                                   const MappingPath<EdgeId>& read2) {
+    void ProcessPairedRead(size_t thread_index,
+                           const io::PairedReadSeq& r,
+                           const MappingPath<EdgeId>& read1,
+                           const MappingPath<EdgeId>& read2) override {
         ProcessPairedRead(thread_index, read1, read2, (int) r.second().size(),
                           (int) r.first().GetLeftOffset() + (int) r.second().GetRightOffset());
     }
 
-    virtual void ProcessSingleRead(size_t /*thread_index*/, const io::SingleRead&, const MappingPath<EdgeId>& /*read*/) {
-    }
-
-    virtual void ProcessSingleRead(size_t /*thread_index*/, const io::SingleReadSeq&, const MappingPath<EdgeId>& /*read*/) {
-    }
-
-    virtual void MergeBuffer(size_t thread_index) {
-        for (const auto& kv: tmp_hists_[thread_index]) {
+    void MergeBuffer(size_t thread_index) override {
+        for (const auto& kv: tmp_hists_[thread_index])
             hist_[kv.first] += kv.second;
-        }
+
         tmp_hists_[thread_index].clear();
     }
 
@@ -98,11 +82,10 @@ public:
     }
 
 private:
-    virtual void ProcessPairedRead(size_t thread_index,
-                                   const MappingPath<EdgeId>& read1,
-                                   const MappingPath<EdgeId>& read2,
-                                   int read2_size,
-                                   int is_delta) {
+    void ProcessPairedRead(size_t thread_index,
+                           const MappingPath<EdgeId>& read1, const MappingPath<EdgeId>& read2,
+                           int read2_size,
+                           int is_delta) {
 
         ++total_.arr_[thread_index];
 
@@ -120,7 +103,7 @@ private:
             int is = read2_start - read1_start + read2_size + is_delta;
             TRACE("IS: " << read2_start << " - " <<  read1_start << " + " << (int) is_delta << " = " << is);
 
-            if (is > 0 || !ignore_negative_) {
+            if (is > 0 || ignore_negative_) {
                 tmp_hists_[thread_index][is] += 1;
                 ++counted_.arr_[thread_index];
             } else {
@@ -133,13 +116,13 @@ private:
     struct count_data {
       size_t total_;
       vector<size_t> arr_;
-      count_data(): total_(0) {
-      }
-      count_data(size_t nthreads): total_(0), arr_(nthreads, 0) {
-      }
-      void inc(size_t i) {
-        ++arr_[i];
-      }
+      count_data()
+              : total_(0) {}
+
+      count_data(size_t nthreads)
+              : total_(0), arr_(nthreads, 0) {}
+
+      void inc(size_t i) { ++arr_[i]; }
       void merge() {
         for (size_t i = 0; i < arr_.size(); ++i) {
           total_ += arr_[i];
@@ -148,7 +131,7 @@ private:
     };
 
 private:
-    const conj_graph_pack& gp_;
+    const conj_graph_pack &gp_;
 
     HistType hist_;
     vector<HistType> tmp_hists_;
diff --git a/src/modules/paired_info/pair_info_bounds.hpp b/src/common/paired_info/pair_info_bounds.hpp
similarity index 96%
rename from src/modules/paired_info/pair_info_bounds.hpp
rename to src/common/paired_info/pair_info_bounds.hpp
index ae0c041..c6c4b0c 100644
--- a/src/modules/paired_info/pair_info_bounds.hpp
+++ b/src/common/paired_info/pair_info_bounds.hpp
@@ -8,7 +8,7 @@
 #ifndef OMNI_UTILS_HPP_
 #define OMNI_UTILS_HPP_
 
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 
 namespace omnigraph {
 
diff --git a/src/common/paired_info/pair_info_filler.hpp b/src/common/paired_info/pair_info_filler.hpp
new file mode 100644
index 0000000..e0633f0
--- /dev/null
+++ b/src/common/paired_info/pair_info_filler.hpp
@@ -0,0 +1,108 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#ifndef PAIR_INFO_FILLER_HPP_
+#define PAIR_INFO_FILLER_HPP_
+
+#include "paired_info/concurrent_pair_info_buffer.hpp"
+#include "modules/alignment/sequence_mapper_notifier.hpp"
+
+namespace debruijn_graph {
+
+/**
+ * As for now it ignores sophisticated case of repeated consecutive
+ * occurrence of edge in path due to gaps in mapping
+ *
+ */
+class LatePairedIndexFiller : public SequenceMapperListener {
+    typedef std::pair<EdgeId, EdgeId> EdgePair;
+public:
+    typedef std::function<double(const EdgePair&, const MappingRange&, const MappingRange&)> WeightF;
+
+    LatePairedIndexFiller(const Graph &graph, WeightF weight_f,
+                          unsigned round_distance,
+                          omnigraph::de::UnclusteredPairedInfoIndexT<Graph>& paired_index)
+            : weight_f_(std::move(weight_f)),
+              paired_index_(paired_index),
+              buffer_pi_(graph),
+              round_distance_(round_distance) {}
+
+    void StartProcessLibrary(size_t) override {
+        DEBUG("Start processing: start");
+        buffer_pi_.clear();
+        DEBUG("Start processing: end");
+    }
+
+    void StopProcessLibrary() override {
+        // paired_index_.Merge(buffer_pi_);
+        paired_index_.MoveAssign(buffer_pi_);
+        buffer_pi_.clear();
+    }
+    
+    void ProcessPairedRead(size_t,
+                           const io::PairedRead& r,
+                           const MappingPath<EdgeId>& read1,
+                           const MappingPath<EdgeId>& read2) override {
+        ProcessPairedRead(read1, read2, r.distance());
+    }
+
+    void ProcessPairedRead(size_t,
+                           const io::PairedReadSeq& r,
+                           const MappingPath<EdgeId>& read1,
+                           const MappingPath<EdgeId>& read2) override {
+        ProcessPairedRead(read1, read2, r.distance());
+    }
+
+    virtual ~LatePairedIndexFiller() {}
+
+private:
+    void ProcessPairedRead(const MappingPath<EdgeId>& path1,
+                           const MappingPath<EdgeId>& path2, size_t read_distance) {
+        for (size_t i = 0; i < path1.size(); ++i) {
+            std::pair<EdgeId, MappingRange> mapping_edge_1 = path1[i];
+            for (size_t j = 0; j < path2.size(); ++j) {
+                std::pair<EdgeId, MappingRange> mapping_edge_2 = path2[j];
+
+                omnigraph::de::DEWeight weight =
+                        weight_f_({mapping_edge_1.first, mapping_edge_2.first},
+                                  mapping_edge_1.second, mapping_edge_2.second);
+
+                // Add only if weight is non-zero
+                if (math::gr(weight, 0)) {
+                    size_t kmer_distance = read_distance
+                                           + mapping_edge_2.second.initial_range.end_pos
+                                           - mapping_edge_1.second.initial_range.start_pos;
+                    int edge_distance = (int) kmer_distance
+                                        + (int) mapping_edge_1.second.mapped_range.start_pos
+                                        - (int) mapping_edge_2.second.mapped_range.end_pos;
+
+                    // Additionally round, if necessary
+                    if (round_distance_ > 1)
+                        edge_distance = int(std::round(edge_distance / double(round_distance_))) * round_distance_;
+
+                    buffer_pi_.Add(mapping_edge_1.first, mapping_edge_2.first,
+                                   omnigraph::de::RawPoint(edge_distance, weight));
+
+                }
+            }
+        }
+    }
+
+private:
+    WeightF weight_f_;
+    omnigraph::de::UnclusteredPairedInfoIndexT<Graph>& paired_index_;
+    omnigraph::de::ConcurrentPairedInfoBuffer<Graph> buffer_pi_;
+    unsigned round_distance_;
+
+    DECL_LOGGER("LatePairedIndexFiller");
+};
+
+
+}
+
+
+#endif /* PAIR_INFO_FILLER_HPP_ */
diff --git a/src/modules/paired_info/pair_info_filters.hpp b/src/common/paired_info/pair_info_filters.hpp
similarity index 100%
rename from src/modules/paired_info/pair_info_filters.hpp
rename to src/common/paired_info/pair_info_filters.hpp
diff --git a/src/modules/paired_info/pair_info_improver.hpp b/src/common/paired_info/pair_info_improver.hpp
similarity index 97%
rename from src/modules/paired_info/pair_info_improver.hpp
rename to src/common/paired_info/pair_info_improver.hpp
index ac6475c..8b6ccfc 100644
--- a/src/modules/paired_info/pair_info_improver.hpp
+++ b/src/common/paired_info/pair_info_improver.hpp
@@ -12,7 +12,7 @@
 #include "paired_info/paired_info_helpers.hpp"
 #include "assembly_graph/paths/path_utils.hpp"
 #include <math.h>
-#include <io/reads_io/read_processor.hpp>
+#include <io/reads/read_processor.hpp>
 
 namespace debruijn_graph {
 
@@ -140,7 +140,7 @@ class PairInfoImprover {
                 auto paths = GetAllPathsBetweenEdges(graph_, e1, e2, 0, (size_t) ceil(pi_dist - first_length + var));
                 return (paths.size() > 0);
             } else {
-                if (math::gr(p2.d, p1.d + first_length)) {
+                if (math::gr(p2.d, p1.d + omnigraph::de::DEDistance(first_length))) {
                     auto paths = GetAllPathsBetweenEdges(graph_, e1, e2,
                                                          (size_t) floor(pi_dist - first_length - var),
                                                          (size_t)  ceil(pi_dist - first_length + var));
@@ -193,7 +193,7 @@ class PairInfoImprover {
         DEBUG("Merging maps");
         for (size_t i = 1; i < nthreads; ++i) {
             to_remove[0].Merge(to_remove[i]);
-            to_remove[i].Clear();
+            to_remove[i].clear();
         }
         DEBUG("Resulting size " << to_remove[0].size());
 
@@ -202,7 +202,7 @@ class PairInfoImprover {
             I != omnigraph::de::half_pair_end(to_remove[0]); ++I) {
             cnt += DeleteIfExist(I.first(), I.second(), *I);
         }
-        to_remove[0].Clear();
+        to_remove[0].clear();
 
         DEBUG("Size of index " << index_.size());
         DEBUG("ParallelRemoveContraditional: Clean finished");
@@ -249,7 +249,7 @@ class PairInfoImprover {
                 for (auto p : *I)
                     cnt += TryToAddPairInfo(index_, e1, e2, p);
             }
-            to_add[i].Clear();
+            to_add[i].clear();
         }
 
         DEBUG("Size of paired index " << index_.size());
diff --git a/src/modules/paired_info/paired_info.hpp b/src/common/paired_info/paired_info.hpp
similarity index 63%
rename from src/modules/paired_info/paired_info.hpp
rename to src/common/paired_info/paired_info.hpp
index 952617b..0bba662 100644
--- a/src/modules/paired_info/paired_info.hpp
+++ b/src/common/paired_info/paired_info.hpp
@@ -7,59 +7,58 @@
 
 #pragma once
 
-#include "utils/adt/iterator_range.hpp"
+#include "common/adt/iterator_range.hpp"
 #include <boost/iterator/iterator_facade.hpp>
 #include <btree/safe_btree_map.h>
-#include <sparsehash/sparse_hash_map>
 
-#include <type_traits>
+#include "paired_info_buffer.hpp"
 
-#include "histogram.hpp"
+#include <type_traits>
 
 namespace omnigraph {
 
 namespace de {
 
-/**
- * @brief Index of paired reads information. For each pair of edges, we store so-called histogram which is a set
- *        of points with distance between those edges. Index is internally arranged as a map of map of histograms:
- *        edge1 -> (edge2 -> histogram)
- *        When we add a point (a,b)->p into the index, we automatically insert a conjugate point (b',a')->p',
- *        (self-conjugate edge pairs are the sole exception), so the index is always conjugate-symmetrical.
- *        Index provides access for a lot of different information:
- *        - if you need to have a histogram between two edges, use Get(edge1, edge2);
- *        - if you need to get a neighbourhood of some edge (second edges with corresponding histograms), use Get(edge1);
- *        - if you need to skip a symmetrical half of that neighbourhood, use GetHalf(edge1);
- *        Backward information (e.g., (b,a)->-p) is currently inaccessible.
- * @param G graph type
- * @param Traits Policy-like structure with associated types of inner and resulting points, and how to convert between them
- * @param C map-like container type (parameterized by key and value type)
- */
 template<typename G, typename Traits, template<typename, typename> class Container>
-class PairedIndex {
+class PairedIndex : public PairedBuffer<G, Traits, Container> {
+    typedef PairedIndex<G, Traits, Container> self;
+    typedef PairedBuffer<G, Traits, Container> base;
 
-private:
-    typedef typename Traits::Gapped InnerPoint;
-    typedef omnigraph::de::Histogram<InnerPoint> InnerHistogram;
+    typedef typename base::InnerHistogram InnerHistogram;
+    typedef typename base::InnerHistPtr InnerHistPtr;
+    typedef typename base::InnerPoint InnerPoint;
+
+    using typename base::EdgePair;
 
 public:
-    typedef G Graph;
+    using typename base::Graph;
+    using typename base::EdgeId;
+    typedef typename base::InnerMap InnerMap;
+    typedef typename base::StorageMap StorageMap;
+    using typename base::Point;
 
-    typedef typename Traits::Expanded Point;
     typedef omnigraph::de::Histogram<Point> Histogram;
-    typedef typename Graph::EdgeId EdgeId;
-    typedef std::pair<EdgeId, EdgeId> EdgePair;
-
-    typedef Container<EdgeId, InnerHistogram> InnerMap;
-    typedef Container<EdgeId, InnerMap> StorageMap;
-
-    typedef PairedIndex<G, Traits, Container> Self;
 
     //--Data access types--
 
     typedef typename StorageMap::const_iterator ImplIterator;
 
-public:
+    //---------------- Data accessing methods ----------------
+
+    /**
+     * @brief Underlying raw implementation data (for custom iterator helpers).
+     */
+    ImplIterator data_begin() const {
+        return this->storage_.begin();
+    }
+
+    /**
+     * @brief Underlying raw implementation data (for custom iterator helpers).
+     */
+    ImplIterator data_end() const {
+        return this->storage_.end();
+    }
+
     /**
      * @brief Smart proxy set representing a composite histogram of points between two edges.
      * @detail You can work with the proxy just like any constant set.
@@ -126,13 +125,6 @@ public:
             return res;
         }
 
-        /**
-         * @brief Adds a point to the histogram.
-         */
-        //void insert(Point p) {
-        //    hist_.insert(Traits::Shrink(p, offset_));
-        //}
-
         Iterator begin() const {
             return Iterator(back_ ? hist_.end() : hist_.begin(), offset_, back_);
         }
@@ -242,7 +234,7 @@ public:
             }
 
             EdgeHist dereference() const {
-                const auto& hist = iter_->second;
+                const auto& hist = *iter_->second;
                 return std::make_pair(iter_->first, HistProxy(hist, index_.CalcOffset(edge_)));
             }
 
@@ -271,11 +263,6 @@ public:
             return index_.Get(edge_, e2);
         }
 
-        //Currently unused
-        /*HistProxy<true> GetBack(EdgeId e2) const {
-            return index_.GetBack(edge_, e2);
-        }*/
-
         bool empty() const {
             return map_.empty();
         }
@@ -294,99 +281,50 @@ public:
     //---------------- Constructor ----------------
 
     PairedIndex(const Graph &graph)
-        : size_(0), graph_(graph)
+        : base(graph)
     {}
 
-public:
-    /**
-     * @brief Returns a conjugate pair for two edges.
-     */
-    EdgePair ConjugatePair(EdgeId e1, EdgeId e2) const {
-        return std::make_pair(graph_.conjugate(e2), graph_.conjugate(e1));
-    }
-    /**
-     * @brief Returns a conjugate pair for a pair of edges.
-     */
-    EdgePair ConjugatePair(EdgePair ep) const {
-        return ConjugatePair(ep.first, ep.second);
-    }
-
 private:
     bool GreaterPair(EdgeId e1, EdgeId e2) const {
         auto ep = std::make_pair(e1, e2);
-        return ep > ConjugatePair(ep);
-    }
-
-    void SwapConj(EdgeId &e1, EdgeId &e2) const {
-        auto tmp = e1;
-        e1 = graph_.conjugate(e2);
-        e2 = graph_.conjugate(tmp);
-    }
-
-    size_t CalcOffset(EdgeId e) const {
-        return this->graph().length(e);
+        return ep > this->ConjugatePair(ep);
     }
 
 public:
-    //---------------- Data inserting methods ----------------
     /**
-     * @brief Adds a point between two edges to the index,
-     *        merging weights if there's already one with the same distance.
-     */
-    void Add(EdgeId e1, EdgeId e2, Point p) {
-        InnerPoint sp = Traits::Shrink(p, CalcOffset(e1));
-        InsertWithConj(e1, e2, sp);
-    }
-
-    /**
-     * @brief Adds a whole set of points between two edges to the index.
+     * @brief Adds a lot of info from another index, using fast merging strategy.
+     *        Should be used instead of point-by-point index merge.
      */
-    template<typename TH>
-    void AddMany(EdgeId e1, EdgeId e2, const TH& hist) {
-        for (auto p : hist) {
-            InnerPoint sp = Traits::Shrink(p, CalcOffset(e1));
-            InsertWithConj(e1, e2, sp);
-        }
-    }
-
-private:
+    template<class Buffer>
+    void Merge(Buffer& index_to_add) {
+        if (index_to_add.size() == 0)
+            return;
 
-    void InsertWithConj(EdgeId e1, EdgeId e2, InnerPoint p) {
-        size_ += storage_[e1][e2].merge_point(p);
-        //TODO: deal with loops and self-conj
-        SwapConj(e1, e2);
-        size_ += storage_[e1][e2].merge_point(p);
-    }
+        auto locked_table = index_to_add.lock_table();
+        for (auto& kvpair : locked_table) {
+            EdgeId e1_to_add = kvpair.first; auto& map_to_add = kvpair.second;
 
-    bool IsSelfConj(EdgeId e1, EdgeId e2) {
-        return e1 == graph_.conjugate(e2);
-    }
+            for (auto& to_add : map_to_add) {
+                EdgePair ep(e1_to_add, to_add.first), conj = this->ConjugatePair(e1_to_add, to_add.first);
+                if (ep > conj)
+                    continue;
 
-public:
-    /**
-     * @brief Adds a lot of info from another index, using fast merging strategy.
-     *        Should be used instead of point-by-point index merge.
-     */
-    template<class Index>
-    void Merge(const Index& index_to_add) {
-        auto& base_index = storage_;
-        for (auto AddI = index_to_add.data_begin(); AddI != index_to_add.data_end(); ++AddI) {
-            EdgeId e1_to_add = AddI->first;
-            const auto& map_to_add = AddI->second;
-            InnerMap& map_already_exists = base_index[e1_to_add];
-            MergeInnerMaps(map_to_add, map_already_exists);
+                base::Merge(ep.first, ep.second, *to_add.second);
+            }
         }
-        VERIFY(size() >= index_to_add.size());
+        VERIFY(this->size() >= index_to_add.size());
     }
 
-private:
-    template<class OtherMap>
-    void MergeInnerMaps(const OtherMap& map_to_add,
-                        InnerMap& map) {
-        for (const auto& to_add : map_to_add) {
-            InnerHistogram& hist_exists = map[to_add.first];
-            size_ += hist_exists.merge(to_add.second);
+    template<class Buffer>
+    typename std::enable_if<std::is_convertible<typename Buffer::InnerMap, InnerMap>::value,
+        void>::type MoveAssign(Buffer& from) {
+        auto& base_index = this->storage_;
+        base_index.clear();
+        auto locked_table = from.lock_table();
+        for (auto& kvpair : locked_table) {
+            base_index[kvpair.first] = std::move(kvpair.second);
         }
+        this->size_ = from.size();
     }
 
 public:
@@ -398,12 +336,20 @@ public:
      * @return The number of deleted entries (0 if there wasn't such entry)
      */
     size_t Remove(EdgeId e1, EdgeId e2, Point p) {
-        InnerPoint point = Traits::Shrink(p, graph_.length(e1));
-        auto res = RemoveSingle(e1, e2, point);
-        //TODO: deal with loops and self-conj
-        SwapConj(e1, e2);
-        res += RemoveSingle(e1, e2, point);
-        return res;
+        InnerPoint point = Traits::Shrink(p, this->graph_.length(e1));
+
+        // We remove first "non-owning part"
+        EdgePair minep, maxep;
+        std::tie(minep, maxep) = this->MinMaxConjugatePair({ e1, e2 });
+
+        size_t res = RemoveSingle(minep.first, minep.second, point);
+        size_t removed = (this->IsSelfConj(e1, e2) ? res : 2 * res);
+        this->size_ -= removed;
+
+        Prune(maxep.first, maxep.second);
+        Prune(minep.first, minep.second);
+
+        return removed;
     }
 
     /**
@@ -412,51 +358,64 @@ public:
      * @return The number of deleted entries
      */
     size_t Remove(EdgeId e1, EdgeId e2) {
-        auto res = RemoveAll(e1, e2);
-        if (!IsSelfConj(e1, e2)) { //TODO: loops?
-            SwapConj(e1, e2);
-            res += RemoveAll(e1, e2);
-        }
-        return res;
+        EdgePair minep, maxep;
+        std::tie(minep, maxep) = this->MinMaxConjugatePair({ e1, e2 });
+
+        size_t removed = RemoveAll(maxep.first, maxep.second);
+        removed += RemoveAll(minep.first, minep.second);
+        this->size_ -= removed;
+
+        return removed;
     }
 
-private:
+  private:
+    void Prune(EdgeId e1, EdgeId e2) {
+        auto i1 = this->storage_.find(e1);
+        if (i1 == this->storage_.end())
+            return;
+
+        auto& map = i1->second;
+        auto i2 = map.find(e2);
+        if (i2 == map.end())
+            return;
+
+        if (!i2->second->empty())
+            return;
+
+        map.erase(e2);
+        if (map.empty())
+            this->storage_.erase(e1);
+    }
 
-    //TODO: remove duplicode
     size_t RemoveSingle(EdgeId e1, EdgeId e2, InnerPoint point) {
-        auto i1 = storage_.find(e1);
-        if (i1 == storage_.end())
+        auto i1 = this->storage_.find(e1);
+        if (i1 == this->storage_.end())
             return 0;
+
         auto& map = i1->second;
         auto i2 = map.find(e2);
         if (i2 == map.end())
             return 0;
-        InnerHistogram& hist = i2->second;
-        if (!hist.erase(point))
+
+        if (!i2->second->erase(point))
            return 0;
-        --size_;
-        if (hist.empty()) { //Prune empty maps
-            map.erase(e2);
-            if (map.empty())
-                storage_.erase(e1);
-        }
+
         return 1;
     }
 
     size_t RemoveAll(EdgeId e1, EdgeId e2) {
-        auto i1 = storage_.find(e1);
-        if (i1 == storage_.end())
+        auto i1 = this->storage_.find(e1);
+        if (i1 == this->storage_.end())
             return 0;
         auto& map = i1->second;
         auto i2 = map.find(e2);
         if (i2 == map.end())
             return 0;
-        InnerHistogram& hist = i2->second;
-        size_t size_decrease = hist.size();
+
+        size_t size_decrease = i2->second->size();
         map.erase(i2);
-        size_ -= size_decrease;
         if (map.empty()) //Prune empty maps
-            storage_.erase(i1);
+            this->storage_.erase(i1);
         return size_decrease;
     }
 
@@ -468,7 +427,7 @@ public:
      * @return The number of deleted entries
      */
     size_t Remove(EdgeId edge) {
-        InnerMap &inner_map = storage_[edge];
+        InnerMap &inner_map = this->storage_[edge];
         std::vector<EdgeId> to_remove;
         to_remove.reserve(inner_map.size());
         size_t old_size = this->size();
@@ -479,42 +438,22 @@ public:
         return old_size - this->size();
     }
 
-    //---------------- Data accessing methods ----------------
-
-    /**
-     * @brief Underlying raw implementation data (for custom iterator helpers).
-     */
-    ImplIterator data_begin() const {
-        return storage_.begin();
-    }
-
-    /**
-     * @brief Underlying raw implementation data (for custom iterator helpers).
-     */
-    ImplIterator data_end() const {
-        return storage_.end();
-    }
-
-    adt::iterator_range<ImplIterator> data() const {
-        return adt::make_range(data_begin(), data_end());
-    }
-
 private:
     //When there is no such edge, returns a fake empty map for safety
     const InnerMap& GetImpl(EdgeId e) const {
-        auto i = storage_.find(e);
-        if (i != storage_.end())
+        auto i = this->storage_.find(e);
+        if (i != this->storage_.end())
             return i->second;
         return empty_map_;
     }
 
     //When there is no such histogram, returns a fake empty histogram for safety
     const InnerHistogram& GetImpl(EdgeId e1, EdgeId e2) const {
-        auto i = storage_.find(e1);
-        if (i != storage_.end()) {
+        auto i = this->storage_.find(e1);
+        if (i != this->storage_.end()) {
             auto j = i->second.find(e2);
             if (j != i->second.end())
-                return j->second;
+                return *j->second;
         }
         return HistProxy::empty_hist();
     }
@@ -548,7 +487,7 @@ public:
      * @brief Returns a histogram proxy for all points between two edges.
      */
     HistProxy Get(EdgeId e1, EdgeId e2) const {
-        return HistProxy(GetImpl(e1, e2), CalcOffset(e1));
+        return HistProxy(GetImpl(e1, e2), this->CalcOffset(e1));
     }
 
     /**
@@ -558,99 +497,79 @@ public:
         return Get(p.first, p.second);
     }
 
-    //Currently unused
-    /**
-     * @brief Returns a backwards histogram proxy for all points between two edges.
-     */
-    /*HistProxy<true> GetBack(EdgeId e1, EdgeId e2) const {
-        return HistProxy<true>(GetImpl(e2, e1), CalcOffset(e2));
-    }*/
-
     /**
      * @brief Checks if an edge (or its conjugated twin) is consisted in the index.
      */
     bool contains(EdgeId edge) const {
-        return storage_.count(edge) + storage_.count(graph_.conjugate(edge)) > 0;
+        return this->storage_.count(edge) + this->storage_.count(this->graph_.conjugate(edge)) > 0;
     }
 
     /**
      * @brief Checks if there is a histogram for two points (or their conjugated pair).
      */
     bool contains(EdgeId e1, EdgeId e2) const {
-        auto i1 = storage_.find(e1);
-        if (i1 != storage_.end() && i1->second.count(e2))
+        auto i1 = this->storage_.find(e1);
+        if (i1 != this->storage_.end() && i1->second.count(e2))
             return true;
         return false;
     }
 
-    //---------------- Miscellaneous ----------------
-
-    /**
-     * Returns the graph the index is based on. Needed for custom iterators.
-     */
-    const Graph &graph() const { return graph_; }
-
     /**
      * @brief Inits the index with graph data. For each edge, adds a loop with zero weight.
      * @warning Do not call this on non-empty indexes.
      */
     void Init() {
         //VERIFY(size() == 0);
-        for (auto it = graph_.ConstEdgeBegin(); !it.IsEnd(); ++it)
-            Add(*it, *it, Point());
+        for (auto it = this->graph_.ConstEdgeBegin(); !it.IsEnd(); ++it)
+            this->Add(*it, *it, Point());
     }
 
-    /**
-     * @brief Clears the whole index. Used in merging.
-     */
-    void Clear() {
-        storage_.clear();
-        size_ = 0;
-    }
+private:
+    InnerMap empty_map_; //null object
+};
 
-    /**
-     * @brief Returns the physical index size (total count of all histograms).
-     */
-    size_t size() const { return size_; }
+template<class T>
+class NoLockingAdapter : public T {
+  public:
+    class locked_table {
+      public:
+        using iterator = typename T::iterator;
+        using const_iterator = typename T::const_iterator;
 
-private:
-    PairedIndex(size_t size, const Graph& graph, const StorageMap& storage)
-        : size_(size), graph_(graph), storage_(storage) {}
+        locked_table(T& table)
+                : table_(table) {}
 
-public:
-    /**
-     * @brief Returns a copy of sub-index.
-     * @deprecated Needed only in smoothing distance estimator.
-     */
-    Self SubIndex(EdgeId e1, EdgeId e2) const {
-        InnerMap tmp;
-        const auto& h1 = GetImpl(e1, e2);
-        size_t size = h1.size();
-        tmp[e1][e2] = h1;
-        SwapConj(e1, e2);
-        const auto& h2 = GetImpl(e1, e2);
-        size += h2.size();
-        tmp[e1][e2] = h2;
-        return Self(size, graph_, tmp);
+        iterator begin() { return table_.begin();  }
+        const_iterator begin() const { return table_.begin(); }
+        const_iterator cbegin() const { return table_.begin(); }
+
+        iterator end() { return table_.end(); }
+        const_iterator end() const { return table_.end(); }
+        const_iterator cend() const { return table_.end(); }
+
+        size_t size() const { return table_.size(); }
+
+      private:
+        T& table_;
     };
 
-private:
-    size_t size_;
-    const Graph& graph_;
-    StorageMap storage_;
-    InnerMap empty_map_; //null object
+    // Nothing to lock here
+    locked_table lock_table() {
+        return locked_table(*this);
+    }
 };
 
 //Aliases for common graphs
 template<typename K, typename V>
-using safe_btree_map = btree::safe_btree_map<K, V>; //Two-parameters wrapper
+using safe_btree_map = NoLockingAdapter<btree::safe_btree_map<K, V>>; //Two-parameters wrapper
 template<typename Graph>
 using PairedInfoIndexT = PairedIndex<Graph, PointTraits, safe_btree_map>;
 
 template<typename K, typename V>
-using sparse_hash_map = google::sparse_hash_map<K, V>; //Two-parameters wrapper
+using btree_map = NoLockingAdapter<btree::btree_map<K, V>>; //Two-parameters wrapper
+
 template<typename Graph>
-using UnclusteredPairedInfoIndexT = PairedIndex<Graph, RawPointTraits, sparse_hash_map>;
+using UnclusteredPairedInfoIndexT = PairedIndex<Graph, RawPointTraits, btree_map>;
 
 /**
  * @brief A collection of paired indexes which can be manipulated as one.
@@ -665,7 +584,6 @@ public:
     PairedIndices() {}
 
     PairedIndices(const typename Index::Graph& graph, size_t lib_num) {
-        data_.reserve(lib_num);
         for (size_t i = 0; i < lib_num; ++i)
             data_.emplace_back(graph);
     }
@@ -678,7 +596,7 @@ public:
     /**
      * @brief Clears all indexes.
      */
-    void Clear() { for (auto& it : data_) it.Clear(); }
+    void Clear() { for (auto& it : data_) it.clear(); }
 
     Index& operator[](size_t i) { return data_[i]; }
 
@@ -700,9 +618,9 @@ template<class Graph>
 using UnclusteredPairedInfoIndicesT = PairedIndices<UnclusteredPairedInfoIndexT<Graph>>;
 
 template<typename K, typename V>
-using unordered_map = std::unordered_map<K, V>; //Two-parameters wrapper
+using unordered_map = NoLockingAdapter<std::unordered_map<K, V>>; //Two-parameters wrapper
 template<class Graph>
-using PairedInfoBuffer = PairedIndex<Graph, RawPointTraits, unordered_map>;
+using PairedInfoBuffer = PairedBuffer<Graph, RawPointTraits, unordered_map>;
 
 template<class Graph>
 using PairedInfoBuffersT = PairedIndices<PairedInfoBuffer<Graph>>;
diff --git a/src/common/paired_info/paired_info_buffer.hpp b/src/common/paired_info/paired_info_buffer.hpp
new file mode 100644
index 0000000..2c26c7d
--- /dev/null
+++ b/src/common/paired_info/paired_info_buffer.hpp
@@ -0,0 +1,227 @@
+//***************************************************************************
+//* Copyright (c) 2016 Saint Petersburg State University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#pragma once
+
+#include "histogram.hpp"
+#include "histptr.hpp"
+
+namespace omnigraph {
+
+namespace de {
+
+/**
+ * @brief Index of paired reads information. For each pair of edges, we store so-called histogram which is a set
+ *        of points with distance between those edges. Index is internally arranged as a map of map of histograms:
+ *        edge1 -> (edge2 -> histogram)
+ *        When we add a point (a,b)->p into the index, we automatically insert a conjugate point (b',a')->p',
+ *        (self-conjugate edge pairs are the sole exception), so the index is always conjugate-symmetrical.
+ *        Index provides access for a lot of different information:
+ *        - if you need to have a histogram between two edges, use Get(edge1, edge2);
+ *        - if you need to get a neighbourhood of some edge (second edges with corresponding histograms), use Get(edge1);
+ *        - if you need to skip a symmetrical half of that neighbourhood, use GetHalf(edge1);
+ *        Backward information (e.g., (b,a)->-p) is currently inaccessible.
+ * @param G graph type
+ * @param Traits Policy-like structure with associated types of inner and resulting points, and how to convert between them
+ * @param C map-like container type (parameterized by key and value type)
+ */
+
+template<class Derived, class G, class Traits>
+class PairedBufferBase {
+  protected:
+    typedef typename Traits::Gapped InnerPoint;
+
+  public:
+    typedef G Graph;
+    typedef typename Graph::EdgeId EdgeId;
+    typedef std::pair<EdgeId, EdgeId> EdgePair;
+    typedef typename Traits::Expanded Point;
+
+  public:
+    PairedBufferBase(const Graph &g)
+            : size_(0), graph_(g) {}
+
+    //---------------- Data inserting methods ----------------
+    /**
+     * @brief Adds a point between two edges to the index,
+     *        merging weights if there's already one with the same distance.
+     */
+    void Add(EdgeId e1, EdgeId e2, Point p) {
+        InnerPoint sp = Traits::Shrink(p, CalcOffset(e1));
+        InsertWithConj(e1, e2, sp);
+    }
+
+    /**
+     * @brief Adds a whole set of points between two edges to the index.
+     */
+    template<typename TH>
+    void AddMany(EdgeId e1, EdgeId e2, const TH& hist) {
+        for (auto p : hist) {
+            InnerPoint sp = Traits::Shrink(p, CalcOffset(e1));
+            InsertWithConj(e1, e2, sp);
+        }
+    }
+    //---------------- Miscellaneous ----------------
+
+    /**
+     * Returns the graph the index is based on. Needed for custom iterators.
+     */
+    const Graph &graph() const { return graph_; }
+
+    /**
+     * @brief Returns the physical index size (total count of all histograms).
+     */
+    size_t size() const { return size_; }
+
+  public:
+    /**
+     * @brief Returns a conjugate pair for two edges.
+     */
+    EdgePair ConjugatePair(EdgeId e1, EdgeId e2) const {
+        return std::make_pair(this->graph_.conjugate(e2), this->graph_.conjugate(e1));
+    }
+    /**
+     * @brief Returns a conjugate pair for a pair of edges.
+     */
+    EdgePair ConjugatePair(EdgePair ep) const {
+        return ConjugatePair(ep.first, ep.second);
+    }
+
+  private:
+    void InsertWithConj(EdgeId e1, EdgeId e2, InnerPoint p) {
+        EdgePair minep, maxep;
+        std::tie(minep, maxep) = this->MinMaxConjugatePair({ e1, e2 });
+        bool selfconj = this->IsSelfConj(e1, e2);
+
+        auto res = static_cast<Derived*>(this)->InsertOne(minep.first, minep.second, p);
+        size_t added = (selfconj ? res.second : 2 * res.second);
+#       pragma omp atomic
+        size_ += added;
+        if (res.first && !selfconj)
+            static_cast<Derived*>(this)->InsertHistView(maxep.first, maxep.second, res.first);
+        else if (selfconj) // This would double the weigth of self-conjugate pairs
+            static_cast<Derived*>(this)->InsertOne(minep.first, minep.second, p);
+    }
+
+  protected:
+    template<class OtherHist>
+    void Merge(EdgeId e1, EdgeId e2, const OtherHist &h) {
+        EdgePair minep, maxep;
+        std::tie(minep, maxep) = this->MinMaxConjugatePair({ e1, e2 });
+        bool selfconj = this->IsSelfConj(e1, e2);
+
+        auto res = static_cast<Derived*>(this)->InsertHist(minep.first, minep.second, h);
+        size_t added = (selfconj ? res.second : 2 * res.second);
+#       pragma omp atomic
+        size_ += added;
+        if (res.first && !selfconj)
+            static_cast<Derived*>(this)->InsertHistView(maxep.first, maxep.second, res.first);
+        else if (selfconj) // This would double the weigth of self-conjugate pairs
+            static_cast<Derived*>(this)->InsertHist(minep.first, minep.second, h);
+    }
+
+    std::pair<EdgePair, EdgePair> MinMaxConjugatePair(EdgePair ep) const {
+        EdgePair conj = ConjugatePair(ep);
+
+        return (ep < conj ? std::make_pair(ep, conj) : std::make_pair(conj, ep));
+    }
+
+    bool IsSelfConj(EdgeId e1, EdgeId e2) const {
+        return e1 == this->graph_.conjugate(e2);
+    }
+
+    size_t CalcOffset(EdgeId e) const {
+        return this->graph().length(e);
+    }
+
+  protected:
+    size_t size_;
+    const Graph& graph_;
+};
+
+
+template<typename G, typename Traits, template<typename, typename> class Container>
+class PairedBuffer : public PairedBufferBase<PairedBuffer<G, Traits, Container>,
+                                             G, Traits> {
+    typedef PairedBuffer<G, Traits, Container> self;
+    typedef PairedBufferBase<self, G, Traits> base;
+
+    friend class PairedBufferBase<self, G, Traits>;
+
+  protected:
+    using typename base::InnerPoint;
+    typedef omnigraph::de::Histogram<InnerPoint> InnerHistogram;
+    typedef omnigraph::de::StrongWeakPtr<InnerHistogram> InnerHistPtr;
+
+  public:
+    using typename base::Graph;
+    using typename base::EdgeId;
+    using typename base::EdgePair;
+    using typename base::Point;
+
+    typedef Container<EdgeId, InnerHistPtr> InnerMap;
+    typedef Container<EdgeId, InnerMap> StorageMap;
+
+  public:
+    PairedBuffer(const Graph &g)
+            : base(g) {
+        clear();
+    }
+
+    //---------------- Miscellaneous ----------------
+
+    /**
+     * @brief Clears the whole index. Used in merging.
+     */
+    void clear() {
+        storage_.clear();
+        this->size_ = 0;
+    }
+
+    typename StorageMap::locked_table lock_table() {
+        return storage_.lock_table();
+    }
+
+  private:
+    std::pair<typename InnerHistPtr::pointer, size_t> InsertOne(EdgeId e1, EdgeId e2, InnerPoint p) {
+        InnerMap& second = storage_[e1];
+        typename InnerHistPtr::pointer inserted = nullptr;
+        if (!second.count(e2)) {
+            inserted = new InnerHistogram();
+            second.insert(std::make_pair(e2, InnerHistPtr(inserted, /* owning */ true)));
+        }
+
+        size_t added = second[e2]->merge_point(p);
+
+        return { inserted, added };
+    }
+
+    template<class OtherHist>
+    std::pair<typename InnerHistPtr::pointer, size_t> InsertHist(EdgeId e1, EdgeId e2, const OtherHist &h) {
+        InnerMap& second = storage_[e1];
+        typename InnerHistPtr::pointer inserted = nullptr;
+        if (!second.count(e2)) {
+            inserted = new InnerHistogram();
+            second.insert(std::make_pair(e2, InnerHistPtr(inserted, /* owning */ true)));
+        }
+
+        size_t added = second[e2]->merge(h);
+
+        return { inserted, added };
+    }
+
+    void InsertHistView(EdgeId e1, EdgeId e2, typename InnerHistPtr::pointer p) {
+        auto res = storage_[e1].insert(std::make_pair(e2, InnerHistPtr(p, /* owning */ false)));
+        VERIFY_MSG(res.second, "Index insertion inconsistency");
+    }
+
+  protected:
+    StorageMap storage_;
+};
+
+} // namespace de
+
+} // namespace omnigraph
diff --git a/src/modules/paired_info/paired_info_helpers.hpp b/src/common/paired_info/paired_info_helpers.hpp
similarity index 100%
rename from src/modules/paired_info/paired_info_helpers.hpp
rename to src/common/paired_info/paired_info_helpers.hpp
diff --git a/src/modules/paired_info/peak_finder.hpp b/src/common/paired_info/peak_finder.hpp
similarity index 99%
rename from src/modules/paired_info/peak_finder.hpp
rename to src/common/paired_info/peak_finder.hpp
index c127108..059c5ea 100644
--- a/src/modules/paired_info/peak_finder.hpp
+++ b/src/common/paired_info/peak_finder.hpp
@@ -15,7 +15,7 @@
 #ifndef PEAKFINDER_HPP_
 #define PEAKFINDER_HPP_
 
-#include "dev_support/verify.hpp"
+#include "utils/verify.hpp"
 #include "data_divider.hpp"
 #include "paired_info.hpp"
 #include <stdio.h>
diff --git a/src/modules/paired_info/smoothing_distance_estimation.hpp b/src/common/paired_info/smoothing_distance_estimation.hpp
similarity index 98%
rename from src/modules/paired_info/smoothing_distance_estimation.hpp
rename to src/common/paired_info/smoothing_distance_estimation.hpp
index 04f9410..c605e00 100644
--- a/src/modules/paired_info/smoothing_distance_estimation.hpp
+++ b/src/common/paired_info/smoothing_distance_estimation.hpp
@@ -228,8 +228,8 @@ private:
 
         // Check, whether two histograms intersect. If not, we can just merge them
         // straightforwardly.
-        if (math::ls(where.rbegin()->d, what.min().d + shift) ||
-            math::gr(where.begin()->d, what.max().d + shift)) {
+        if (math::ls(where.rbegin()->d, what.min().d + float(shift)) ||
+            math::gr(where.begin()->d, what.max().d + float(shift))) {
             for (auto to_be_added : what) {
                 to_be_added.d += shift;
                 where.insert(to_be_added);
@@ -238,7 +238,7 @@ private:
             for (auto to_be_added : what) {
                 to_be_added.d += shift;
                 auto low_bound = std::lower_bound(where.begin(), where.end(), to_be_added);
-                if (to_be_added == *low_bound) {
+                if (low_bound != where.end() && to_be_added == *low_bound) {
                     to_be_added.weight += low_bound->weight;
                     where.erase(to_be_added);
                     where.insert(to_be_added);
diff --git a/src/modules/paired_info/split_path_constructor.hpp b/src/common/paired_info/split_path_constructor.hpp
similarity index 87%
rename from src/modules/paired_info/split_path_constructor.hpp
rename to src/common/paired_info/split_path_constructor.hpp
index 9cf0c2f..d2d23b2 100644
--- a/src/modules/paired_info/split_path_constructor.hpp
+++ b/src/common/paired_info/split_path_constructor.hpp
@@ -14,7 +14,8 @@
 
 #pragma once
 
-#include "dev_support/logger/logger.hpp"
+#include <common/assembly_graph/paths/path_utils.hpp>
+#include "utils/logger/logger.hpp"
 #include "paired_info/paired_info.hpp"
 #include "assembly_graph/paths/path_processor.hpp"
 #include "paired_info/pair_info_bounds.hpp"
@@ -65,7 +66,7 @@ class SplitPathConstructor {
     typedef typename Graph::EdgeId EdgeId;
     typedef PathInfoClass<Graph> PathInfo;
     typedef omnigraph::de::PairInfo<EdgeId> PairInfo;
-
+    static const size_t MAX_DIJKSTRA_DEPTH = 3000;
 public:
     SplitPathConstructor(const Graph &graph) : graph_(graph) { }
 
@@ -75,7 +76,9 @@ public:
         for (auto i : pi.Get(cur_edge))
             for (auto j : i.second)
                 pair_infos.emplace_back(cur_edge, i.first, j);
-
+        std::sort(pair_infos.begin(), pair_infos.end(),[&](const PairInfo p1, const PairInfo p2){
+            return (p1.point.d > p2.point.d || ((p1.point.d == p2.point.d) && (p1.second < p2.second )));
+        });
         vector<PathInfo> result;
         if (pair_infos.empty())
             return result;
@@ -85,12 +88,10 @@ public:
         size_t path_upper_bound = PairInfoPathLengthUpperBound(graph_.k(), (size_t) is, is_var);
 
         //FIXME is path_upper_bound enough?
-        PathProcessor<Graph> path_processor(graph_,
-                                            graph_.EdgeEnd(cur_edge),
-                                            path_upper_bound);
 
-        TRACE("Path_processor is done");
 
+        typename omnigraph::DijkstraHelper<Graph>::BoundedDijkstra dijkstra(omnigraph::DijkstraHelper<Graph>::CreateBoundedDijkstra(graph_, path_upper_bound, MAX_DIJKSTRA_DEPTH));
+        dijkstra.Run(graph_.EdgeEnd(cur_edge));
         for (size_t i = pair_infos.size(); i > 0; --i) {
             const PairInfo &cur_info = pair_infos[i - 1];
             if (math::le(cur_info.d(), 0.))
@@ -98,10 +99,11 @@ public:
             if (pair_info_used[i - 1])
                 continue;
             DEBUG("SPC: pi " << cur_info);
+
             vector<EdgeId> common_part = GetCommonPathsEnd(graph_, cur_edge, cur_info.second,
                                                            (size_t) (cur_info.d() - cur_info.var()),
                                                            (size_t) (cur_info.d() + cur_info.var()),
-                                                           path_processor);
+                                                           dijkstra);
             DEBUG("Found common part of size " << common_part.size());
             PathInfoClass<Graph> sub_res(cur_edge);
             if (common_part.size() > 0) {
diff --git a/src/modules/paired_info/weighted_distance_estimation.hpp b/src/common/paired_info/weighted_distance_estimation.hpp
similarity index 68%
rename from src/modules/paired_info/weighted_distance_estimation.hpp
rename to src/common/paired_info/weighted_distance_estimation.hpp
index 9928ef9..486a608 100644
--- a/src/modules/paired_info/weighted_distance_estimation.hpp
+++ b/src/common/paired_info/weighted_distance_estimation.hpp
@@ -56,7 +56,8 @@ protected:
         EstimHist result;
         int maxD = rounded_d(histogram.max()), minD = rounded_d(histogram.min());
         vector<int> forward;
-        for (auto length : raw_forward) {
+        for (auto len : raw_forward) {
+            int length = (int) len;
             if (minD - (int) this->max_distance_ <= length && length <= maxD + (int) this->max_distance_) {
                 forward.push_back(length);
             }
@@ -65,31 +66,30 @@ protected:
             return result;
 
         DEDistance max_dist = this->max_distance_;
-        size_t cur_dist = 0;
+        size_t i = 0;
         vector<double> weights(forward.size());
         for (auto point : histogram) {
-            if (ls(2. * point.d + (double) second_len, (double) first_len))
+            DEDistance cur_dist(forward[i]), next_dist(forward[i + 1]);
+            if (le(2 * point.d + DEDistance(second_len), DEDistance(first_len)))
                 continue;
-            while (cur_dist + 1 < forward.size() && (double) forward[cur_dist + 1] < point.d) {
-                ++cur_dist;
+            while (i + 1 < forward.size() && next_dist < point.d) {
+                ++i;
             }
-            if (cur_dist + 1 < forward.size() && ls((double) forward[cur_dist + 1] - point.d,
-                                                    point.d - (double) forward[cur_dist])) {
-                ++cur_dist;
-                if (le(abs(forward[cur_dist] - point.d), max_dist))
-                    weights[cur_dist] += point.weight * weight_f_(forward[cur_dist] - rounded_d(point));
+            if (i + 1 < forward.size() && ls(DEDistance(next_dist) - point.d, point.d - DEDistance(cur_dist))) {
+                ++i;
+                if (le(abs(cur_dist - point.d), max_dist))
+                    weights[i] += point.weight * weight_f_(forward[i] - rounded_d(point));
             }
-            else if (cur_dist + 1 < forward.size() && eq(forward[cur_dist + 1] - point.d,
-                                                         point.d - forward[cur_dist])) {
-                if (le(abs(forward[cur_dist] - point.d), max_dist))
-                    weights[cur_dist] += point.weight * 0.5 * weight_f_(forward[cur_dist] - rounded_d(point));
+            else if (i + 1 < forward.size() && eq(next_dist - point.d, point.d - cur_dist)) {
+                if (le(abs(cur_dist - point.d), max_dist))
+                    weights[i] += point.weight * 0.5 * weight_f_(forward[i] - rounded_d(point));
 
-                ++cur_dist;
+                ++i;
 
-                if (le(abs(forward[cur_dist] - point.d), max_dist))
-                    weights[cur_dist] += point.weight * 0.5 * weight_f_(forward[cur_dist] - rounded_d(point));
-            } else if (le(abs(forward[cur_dist] - point.d), max_dist))
-                weights[cur_dist] += point.weight * weight_f_(forward[cur_dist] - rounded_d(point));
+                if (le(abs(cur_dist - point.d), max_dist))
+                    weights[i] += point.weight * 0.5 * weight_f_(forward[i] - rounded_d(point));
+            } else if (le(abs(cur_dist - point.d), max_dist))
+                weights[i] += point.weight * weight_f_(forward[i] - rounded_d(point));
         }
 
         for (size_t i = 0; i < forward.size(); ++i)
diff --git a/src/modules/paired_info/weights.hpp b/src/common/paired_info/weights.hpp
similarity index 86%
rename from src/modules/paired_info/weights.hpp
rename to src/common/paired_info/weights.hpp
index 8812d68..c0e8b43 100644
--- a/src/modules/paired_info/weights.hpp
+++ b/src/common/paired_info/weights.hpp
@@ -6,12 +6,13 @@ using omnigraph::Range;
 using omnigraph::MappingRange;
 
 namespace debruijn_graph {
-inline double PairedReadCountWeight(const MappingRange&, const MappingRange&) {
+inline double PairedReadCountWeight(const std::pair<EdgeId, EdgeId>&,
+                                    const MappingRange&, const MappingRange&) {
     return 1.;
 }
 
-inline double KmerCountProductWeight(const MappingRange& mr1,
-                                     const MappingRange& mr2) {
+inline double KmerCountProductWeight(const std::pair<EdgeId, EdgeId>&,
+                                     const MappingRange& mr1, const MappingRange& mr2) {
     return (double)(mr1.initial_range.size() * mr2.initial_range.size());
 }
 
@@ -79,4 +80,4 @@ public:
 inline double UnityFunction(int /*x*/) {
     return 1.;
 }
-}
\ No newline at end of file
+}
diff --git a/src/modules/pipeline/CMakeLists.txt b/src/common/pipeline/CMakeLists.txt
similarity index 100%
rename from src/modules/pipeline/CMakeLists.txt
rename to src/common/pipeline/CMakeLists.txt
diff --git a/src/modules/pipeline/config_common.hpp b/src/common/pipeline/config_common.hpp
similarity index 97%
rename from src/modules/pipeline/config_common.hpp
rename to src/common/pipeline/config_common.hpp
index e540017..0f38490 100755
--- a/src/modules/pipeline/config_common.hpp
+++ b/src/common/pipeline/config_common.hpp
@@ -7,9 +7,9 @@
 
 #pragma once
 
-#include "dev_support/simple_tools.hpp"
-#include "dev_support/path_helper.hpp"
-#include "dev_support/verify.hpp"
+#include "utils/simple_tools.hpp"
+#include "utils/path_helper.hpp"
+#include "utils/verify.hpp"
 
 // todo: undo dirty fix
 
diff --git a/src/modules/pipeline/config_singl.hpp b/src/common/pipeline/config_singl.hpp
similarity index 97%
rename from src/modules/pipeline/config_singl.hpp
rename to src/common/pipeline/config_singl.hpp
index 9bf726e..93f1767 100644
--- a/src/modules/pipeline/config_singl.hpp
+++ b/src/common/pipeline/config_singl.hpp
@@ -9,7 +9,7 @@
 #define __CONFIG_SINGL_HPP__
 
 
-#include "dev_support/verify.hpp"
+#include "utils/verify.hpp"
 
 #include <string>
 
diff --git a/src/modules/pipeline/config_struct.cpp b/src/common/pipeline/config_struct.cpp
similarity index 92%
rename from src/modules/pipeline/config_struct.cpp
rename to src/common/pipeline/config_struct.cpp
index fecc73b..ad5795c 100644
--- a/src/modules/pipeline/config_struct.cpp
+++ b/src/common/pipeline/config_struct.cpp
@@ -7,12 +7,12 @@
 #include "pipeline/config_struct.hpp"
 
 #include "pipeline/config_common.hpp"
-#include "dev_support/openmp_wrapper.h"
+#include "utils/openmp_wrapper.h"
 
-#include "dev_support/logger/logger.hpp"
-#include "dev_support/verify.hpp"
+#include "utils/logger/logger.hpp"
+#include "utils/verify.hpp"
 
-#include "io/reads_io/file_reader.hpp"
+#include "io/reads/file_reader.hpp"
 
 #include <string>
 #include <vector>
@@ -43,6 +43,9 @@ void SequencingLibrary<debruijn_graph::config::DataSetData>::yamlize(llvm::yaml:
   io.mapOptional("pi threshold"               , data_.pi_threshold);
   io.mapOptional("binary converted"           , data_.binary_reads_info.binary_coverted);
   io.mapOptional("single reads mapped"        , data_.single_reads_mapped);
+  io.mapOptional("library index"              , data_.lib_index);
+  io.mapOptional("number of reads"            , data_.read_count);
+  io.mapOptional("total nucleotides"          , data_.total_nucls);
 }
 
 template<>
@@ -90,7 +93,9 @@ vector<string> PipelineTypeNames() {
                     {"moleculo", pipeline_type::moleculo},
                     {"diploid", pipeline_type::diploid},
                     {"rna", pipeline_type::rna},
-                    {"plasmid", pipeline_type::plasmid}}, pipeline_type::total);
+                    {"plasmid", pipeline_type::plasmid},
+                    {"large_genome", pipeline_type::large_genome}
+                    }, pipeline_type::total);
 }
 
 vector<string> ConstructionModeNames() {
@@ -120,6 +125,14 @@ vector<string> SingleReadResolveModeNames() {
                     {"all", single_read_resolving_mode::all}}, single_read_resolving_mode::total);
 }
 
+vector<string> BrokenScaffoldsModeNames() {
+    return CheckedNames<output_broken_scaffolds>({
+                                             {"none", output_broken_scaffolds::none},
+                                             {"break_gaps", output_broken_scaffolds::break_gaps},
+                                             {"break_all", output_broken_scaffolds::break_all}}, output_broken_scaffolds::total);
+}
+
+
 void load_lib_data(const std::string& prefix) {
   // First, load the data into separate libs
   cfg::get_writable().ds.reads.load(prefix + ".lib_data");
@@ -176,6 +189,13 @@ void load(single_read_resolving_mode &rm, boost::property_tree::ptree const &pt,
     }
 }
 
+void load(output_broken_scaffolds &obs, boost::property_tree::ptree const &pt,
+          std::string const &key, bool complete) {
+    if (complete || pt.find(key) != pt.not_found()) {
+        obs = ModeByName<output_broken_scaffolds>(pt.get<std::string>(key), BrokenScaffoldsModeNames());
+    }
+}
+
 void load(construction_mode& con_mode,
           boost::property_tree::ptree const& pt, std::string const& key,
           bool complete) {
@@ -257,6 +277,7 @@ void load(debruijn_config::simplification::relative_coverage_edge_disconnector&
   using config_common::load;
   load(relative_ed.enabled, pt, "enabled", complete);
   load(relative_ed.diff_mult, pt, "diff_mult", complete);
+  load(relative_ed.edge_sum, pt, "edge_sum", complete);
 }
 
 void load(debruijn_config::simplification::relative_coverage_comp_remover& rcc,
@@ -363,13 +384,16 @@ void load(debruijn_config::simplification::hidden_ec_remover& her,
 }
 
 void load(debruijn_config::distance_estimator& de,
-          boost::property_tree::ptree const& pt, bool /*complete*/) {
+          boost::property_tree::ptree const& pt, bool complete) {
   using config_common::load;
 
-  load(de.linkage_distance_coeff, pt, "linkage_distance_coeff");
-  load(de.max_distance_coeff, pt, "max_distance_coeff");
-  load(de.max_distance_coeff_scaff, pt, "max_distance_coeff_scaff");
-  load(de.filter_threshold, pt, "filter_threshold");
+  load(de.linkage_distance_coeff, pt, "linkage_distance_coeff", complete);
+  load(de.max_distance_coeff, pt, "max_distance_coeff", complete);
+  load(de.max_distance_coeff_scaff, pt, "max_distance_coeff_scaff", complete);
+  load(de.clustered_filter_threshold, pt, "clustered_filter_threshold", complete);
+  load(de.raw_filter_threshold, pt, "raw_filter_threshold", complete);
+  load(de.rounding_coeff, pt, "rounding_coeff", complete);
+  load(de.rounding_thr, pt, "rounding_threshold", complete);
 }
 
 void load(debruijn_config::smoothing_distance_estimator& ade,
@@ -387,8 +411,8 @@ void load(debruijn_config::smoothing_distance_estimator& ade,
 }
 
 //FIXME make amb_de optional field
-inline void load(debruijn_config::ambiguous_distance_estimator &amde,
-                 boost::property_tree::ptree const &pt, bool complete) {
+void load(debruijn_config::ambiguous_distance_estimator &amde,
+          boost::property_tree::ptree const &pt, bool complete) {
     using config_common::load;
 
     load(amde.enabled, pt, "enabled", complete);
@@ -431,6 +455,8 @@ void load(debruijn_config::pacbio_processor& pb,
   load(pb.domination_cutoff, pt, "domination_cutoff");
   load(pb.path_limit_stretching, pt, "path_limit_stretching");
   load(pb.path_limit_pressing, pt, "path_limit_pressing");
+  load(pb.max_path_in_dijkstra, pt, "max_path_in_dijkstra");
+  load(pb.max_vertex_in_dijkstra, pt, "max_vertex_in_dijkstra");
   load(pb.ignore_middle_alignment, pt, "ignore_middle_alignment");
   load(pb.long_seq_limit, pt, "long_seq_limit");
   load(pb.pacbio_min_gap_quantity, pt, "pacbio_min_gap_quantity");
@@ -475,6 +501,14 @@ void load(debruijn_config::gap_closer& gc,
   load(gc.weight_threshold, pt, "weight_threshold");
 }
 
+void load(debruijn_config::contig_output& co,
+          boost::property_tree::ptree const& pt, bool complete) {
+    using config_common::load;
+    load(co.contigs_name, pt, "contigs_name", complete);
+    load(co.scaffolds_name, pt, "scaffolds_name", complete);
+    load(co.obs_mode, pt, "output_broken_scaffolds", complete);
+}
+
 void load(debruijn_config::graph_read_corr_cfg& graph_read_corr,
           boost::property_tree::ptree const& pt, bool /*complete*/) {
   using config_common::load;
@@ -534,6 +568,7 @@ void load(debruijn_config::simplification& simp,
   using config_common::load;
 
   load(simp.cycle_iter_count, pt, "cycle_iter_count", complete);
+
   load(simp.post_simplif_enabled, pt, "post_simplif_enabled", complete);
   load(simp.topology_simplif_enabled, pt, "topology_simplif_enabled", complete);
   load(simp.tc, pt, "tc", complete); // tip clipper:
@@ -556,7 +591,7 @@ void load(debruijn_config::simplification& simp,
   load(simp.init_clean, pt, "init_clean", complete); // presimplification
   load(simp.final_tc, pt, "final_tc", complete);
   load(simp.final_br, pt, "final_br", complete);
-  simp.second_final_br = simp.final_br; 
+  simp.second_final_br = simp.final_br;
   load(simp.second_final_br, pt, "second_final_br", false);
 }
 
@@ -688,6 +723,8 @@ void load_cfg(debruijn_config &cfg, boost::property_tree::ptree const &pt,
     //FIXME
     load(cfg.tsa, pt, "tsa", complete);
 
+    load(cfg.co, pt, "contig_output", complete);
+
     load(cfg.use_unipaths, pt, "use_unipaths", complete);
 
     load(cfg.pb, pt, "pacbio_processor", complete);
@@ -730,6 +767,8 @@ void load_cfg(debruijn_config &cfg, boost::property_tree::ptree const &pt,
 
     load(cfg.bwa, pt, "bwa_aligner", complete);
 
+    load(cfg.series_analysis, pt, "series_analysis", complete);
+
     if (pt.count("plasmid")) {
         VERIFY_MSG(!cfg.pd, "Option can be loaded only once");
         cfg.pd.reset(debruijn_config::plasmid());
diff --git a/src/modules/pipeline/config_struct.hpp b/src/common/pipeline/config_struct.hpp
similarity index 96%
rename from src/modules/pipeline/config_struct.hpp
rename to src/common/pipeline/config_struct.hpp
index 70e4e3b..deddf72 100644
--- a/src/modules/pipeline/config_struct.hpp
+++ b/src/common/pipeline/config_struct.hpp
@@ -7,7 +7,7 @@
 #pragma once
 
 #include "pipeline/config_singl.hpp"
-#include "algorithms/path_extend/pe_config_struct.hpp"
+#include "modules/path_extend/pe_config_struct.hpp"
 #include "pipeline/library.hpp"
 
 #include <boost/optional.hpp>
@@ -39,6 +39,7 @@ enum class pipeline_type : char {
     diploid,
     rna,
     plasmid,
+    large_genome,
 
     total
 };
@@ -81,6 +82,14 @@ enum class single_read_resolving_mode : char {
     total
 };
 
+enum class output_broken_scaffolds: char {
+    none = 0,
+    break_gaps,
+    break_all,
+
+    total
+};
+
 std::vector<std::string> SingleReadResolveModeNames();
 
 template<typename mode_t>
@@ -293,6 +302,7 @@ struct debruijn_config {
         struct relative_coverage_edge_disconnector {
             bool enabled;
             double diff_mult;
+            size_t edge_sum;
         };
 
         struct relative_coverage_comp_remover {
@@ -317,6 +327,7 @@ struct debruijn_config {
         };
 
         size_t cycle_iter_count;
+
         bool post_simplif_enabled;
         bool topology_simplif_enabled;
         tip_clipper tc;
@@ -371,7 +382,10 @@ struct debruijn_config {
         double linkage_distance_coeff;
         double max_distance_coeff;
         double max_distance_coeff_scaff;
-        double filter_threshold;
+        double clustered_filter_threshold;
+        unsigned raw_filter_threshold;
+        double rounding_thr;
+        double rounding_coeff;
     };
 
     struct smoothing_distance_estimator {
@@ -411,6 +425,8 @@ struct debruijn_config {
       double path_limit_stretching; //1.3
       double path_limit_pressing;//0.7
       bool ignore_middle_alignment; //true; false for stats and mate_pairs;
+      size_t max_path_in_dijkstra; //15000
+      size_t max_vertex_in_dijkstra; //2000
   //gap_closer
       size_t long_seq_limit; //400
       size_t pacbio_min_gap_quantity; //2
@@ -484,6 +500,7 @@ struct debruijn_config {
     std::string output_saves;
     std::string final_contigs_file;
     std::string log_filename;
+    std::string series_analysis;
 
     bool output_pictures;
     bool output_nonfinal_contigs;
@@ -505,6 +522,14 @@ struct debruijn_config {
         std::string genome_file;
     };
 
+    struct contig_output {
+        std::string contigs_name;
+        std::string scaffolds_name;
+        output_broken_scaffolds obs_mode;
+    };
+
+    contig_output co;
+
     boost::optional<scaffold_correction> sc_cor;
     truseq_analysis tsa;
     std::string load_from;
diff --git a/src/modules/pipeline/genomic_info.hpp b/src/common/pipeline/genomic_info.hpp
similarity index 100%
rename from src/modules/pipeline/genomic_info.hpp
rename to src/common/pipeline/genomic_info.hpp
diff --git a/src/modules/pipeline/genomic_info_filler.cpp b/src/common/pipeline/genomic_info_filler.cpp
similarity index 94%
rename from src/modules/pipeline/genomic_info_filler.cpp
rename to src/common/pipeline/genomic_info_filler.cpp
index 65a8eda..67a91aa 100644
--- a/src/modules/pipeline/genomic_info_filler.cpp
+++ b/src/common/pipeline/genomic_info_filler.cpp
@@ -7,8 +7,8 @@
 
 #include "genomic_info_filler.hpp"
 
-#include "math/kmer_coverage_model.hpp"
-#include "algorithms/simplification/ec_threshold_finder.hpp"
+#include "utils/coverage_model/kmer_coverage_model.hpp"
+#include "modules/simplification/ec_threshold_finder.hpp"
 
 #include "llvm/Support/YAMLTraits.h"
 #include "llvm/Support/Errc.h"
@@ -123,7 +123,7 @@ void GenomicInfoFiller::run(conj_graph_pack &gp, const char*) {
         gp.ginfo.set_cov_histogram(extract(tmp));
 
         // Fit the coverage model and get the threshold
-        cov_model::KMerCoverageModel CovModel(gp.ginfo.cov_histogram(), cfg::get().kcm.probability_threshold, cfg::get().kcm.strong_probability_threshold);
+        utils::coverage_model::KMerCoverageModel CovModel(gp.ginfo.cov_histogram(), cfg::get().kcm.probability_threshold, cfg::get().kcm.strong_probability_threshold);
         CovModel.Fit();
 
         gp.ginfo.set_genome_size(CovModel.GetGenomeSize());
diff --git a/src/modules/pipeline/genomic_info_filler.hpp b/src/common/pipeline/genomic_info_filler.hpp
similarity index 100%
rename from src/modules/pipeline/genomic_info_filler.hpp
rename to src/common/pipeline/genomic_info_filler.hpp
diff --git a/src/modules/pipeline/graph_pack.hpp b/src/common/pipeline/graph_pack.hpp
similarity index 84%
rename from src/modules/pipeline/graph_pack.hpp
rename to src/common/pipeline/graph_pack.hpp
index e445ba0..e05a243 100644
--- a/src/modules/pipeline/graph_pack.hpp
+++ b/src/common/pipeline/graph_pack.hpp
@@ -7,22 +7,24 @@
 
 #pragma once
 
-#include "data_structures/indices/edge_position_index.hpp"
-#include "data_structures/indices/storing_traits.hpp"
-#include "data_structures/sequence/genome_storage.hpp"
+#include "utils/indices/edge_position_index.hpp"
+#include "utils/indices/storing_traits.hpp"
+#include "sequence/genome_storage.hpp"
 #include "assembly_graph/handlers/id_track_handler.hpp"
 #include "assembly_graph/handlers/edges_position_handler.hpp"
-#include "assembly_graph/graph_core/graph.hpp"
+#include "assembly_graph/core/graph.hpp"
 #include "paired_info/paired_info.hpp"
 #include "pipeline/config_struct.hpp"
-#include "assembly_graph/graph_alignment/edge_index.hpp"
+#include "modules/alignment/edge_index.hpp"
 #include "assembly_graph/graph_support/genomic_quality.hpp"
-#include "assembly_graph/graph_alignment/sequence_mapper.hpp"
+#include "modules/alignment/sequence_mapper.hpp"
 #include "genomic_info.hpp"
-#include "assembly_graph/graph_alignment/long_read_storage.hpp"
+#include "modules/alignment/long_read_storage.hpp"
 #include "assembly_graph/graph_support/detail_coverage.hpp"
 #include "assembly_graph/components/connected_component.hpp"
-#include "assembly_graph/graph_alignment/kmer_mapper.hpp"
+#include "modules/alignment/kmer_mapper.hpp"
+#include "common/visualization/position_filler.hpp"
+#include "common/assembly_graph/paths/bidirectional_path.hpp"
 
 namespace debruijn_graph {
 
@@ -31,7 +33,7 @@ struct graph_pack: private boost::noncopyable {
     typedef Graph graph_t;
     typedef typename Graph::VertexId VertexId;
     typedef typename Graph::EdgeId EdgeId;
-    typedef runtime_k::RtSeq seq_t;
+    typedef RtSeq seq_t;
     typedef EdgeIndex<graph_t> index_t;
     using PairedInfoIndicesT = omnigraph::de::PairedInfoIndicesT<Graph>;
     //typedef omnigraph::de::PairedInfoIndicesT<Graph> PairedInfoIndicesT;
@@ -54,6 +56,8 @@ struct graph_pack: private boost::noncopyable {
     EdgeQuality<Graph> edge_qual;
     mutable EdgesPositionHandler<graph_t> edge_pos;
     ConnectedComponentCounter components;
+    path_extend::PathContainer contig_paths;
+
     graph_pack(size_t k, const std::string &workdir, size_t lib_count,
                         const std::string &genome = "",
                         size_t flanking_range = 50,
@@ -70,7 +74,8 @@ struct graph_pack: private boost::noncopyable {
               genome(genome),
               edge_qual(g),
               edge_pos(g, max_mapping_gap + k, max_gap_diff),
-              components(g)
+              components(g),
+              contig_paths()
     { 
         if (detach_indices) {
             DetachAll();
@@ -116,8 +121,8 @@ struct graph_pack: private boost::noncopyable {
             edge_pos.Attach();
         }
         edge_pos.clear();
-        FillPos(*this, genome.GetSequence(), "ref0");
-        FillPos(*this, !genome.GetSequence(), "ref1");
+        visualization::position_filler::FillPos(*this, genome.GetSequence(), "ref0");
+        visualization::position_filler::FillPos(*this, !genome.GetSequence(), "ref1");
     }
     
     void EnsureDebugInfo() {
@@ -133,13 +138,17 @@ struct graph_pack: private boost::noncopyable {
 
     void ClearRRIndices() {
         for (auto& pi : paired_indices) {
-            pi.Clear();
+            pi.clear();
         }
         clustered_indices.Clear();
         scaffolding_indices.Clear();
         single_long_reads.Clear();
     }
 
+    void ClearPaths() {
+        contig_paths.DeleteAllPaths();
+    }
+
     void DetachAll() {
         index.Detach();
         kmer_mapper.Detach();
diff --git a/src/modules/pipeline/graphio.hpp b/src/common/pipeline/graphio.hpp
similarity index 97%
rename from src/modules/pipeline/graphio.hpp
rename to src/common/pipeline/graphio.hpp
index d47d00a..118b484 100644
--- a/src/modules/pipeline/graphio.hpp
+++ b/src/common/pipeline/graphio.hpp
@@ -7,7 +7,7 @@
 
 #pragma once
 
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 
 #include "assembly_graph/handlers/id_track_handler.hpp"
 #include "assembly_graph/handlers/edges_position_handler.hpp"
@@ -15,11 +15,11 @@
 
 #include "paired_info/paired_info.hpp"
 
-#include "assembly_graph/graph_core/graph.hpp"
+#include "assembly_graph/core/graph.hpp"
 #include "assembly_graph/graph_support/detail_coverage.hpp"
-#include "assembly_graph/graph_alignment/long_read_storage.hpp"
+#include "modules/alignment/long_read_storage.hpp"
 
-#include "assembly_graph/graph_core/order_and_law.hpp"
+#include "assembly_graph/core/order_and_law.hpp"
 
 #include <cmath>
 #include <set>
@@ -45,8 +45,8 @@ void SaveKmerMapper(const string& file_name,
     DEBUG("Saving kmer mapper, " << file_name <<" created");
     VERIFY(file.is_open());
 
-    uint32_t k_ = (uint32_t) mapper.get_k();
-    file.write((char *) &k_, sizeof(uint32_t));
+    uint32_t k = (uint32_t) mapper.k();
+    file.write((char *) &k, sizeof(uint32_t));
     mapper.BinWrite(file);
 
     file.close();
@@ -68,7 +68,7 @@ bool LoadKmerMapper(const string& file_name,
     uint32_t k_;
     file.read((char *) &k_, sizeof(uint32_t));
 
-    VERIFY_MSG(k_ == kmer_mapper.get_k(), "Cannot read kmer mapper, different Ks");
+    VERIFY_MSG(k_ == kmer_mapper.k(), "Cannot read kmer mapper, different Ks");
     kmer_mapper.BinRead(file);
 
     file.close();
@@ -309,8 +309,12 @@ class DataPrinter {
   protected:
 
     //todo optimize component copy
-    DataPrinter(const GraphComponent<Graph>& component) :
-            component_(component) {
+//    DataPrinter(const GraphComponent<Graph>& component) :
+//            component_(component) {
+//    }
+
+    DataPrinter(GraphComponent<Graph>&& component) :
+            component_(std::move(component)) {
     }
 
     const GraphComponent<Graph>& component() const {
@@ -329,7 +333,7 @@ class ConjugateDataPrinter: public DataPrinter<Graph> {
     typedef typename Graph::VertexId VertexId;
   public:
     ConjugateDataPrinter(Graph const& g) :
-            base(g) {
+            base(GraphComponent<Graph>::WholeGraph(g)) {
     }
 
     ConjugateDataPrinter(const GraphComponent<Graph>& graph_component) :
@@ -338,7 +342,7 @@ class ConjugateDataPrinter: public DataPrinter<Graph> {
 
     template<class VertexIt>
     ConjugateDataPrinter(const Graph& g, VertexIt begin, VertexIt end) :
-            base(GraphComponent<Graph>(g, begin, end, true)) {
+            base(GraphComponent<Graph>::FromVertices(g, begin, end, true)) {
     }
 
     std::string ToPrint(VertexId v) const {
@@ -885,6 +889,7 @@ void ScanGraphPack(const string& file_name,
             WARN("Cannot load kmer_mapper, information on projected kmers will be missed");
         }
     if (!scanner.LoadFlankingCoverage(file_name, gp.flanking_cov)) {
+        WARN("Cannot load flanking coverage, flanking coverage will be recovered from index");
         gp.flanking_cov.Fill(gp.index.inner_index());
     }
 }
@@ -1003,7 +1008,9 @@ void ScanWithPairedIndices(const string& file_name, graph_pack& gp,
 template<class graph_pack>
 void ScanWithClusteredIndices(const string& file_name, graph_pack& gp,
                               PairedInfoIndicesT<typename graph_pack::graph_t>& paired_indices) {
-    ScanWithPairedIndices(file_name, gp, paired_indices, true);
+    ConjugateDataScanner<typename graph_pack::graph_t> scanner(gp.g);
+    ScanGraphPack(file_name, scanner, gp);
+    ScanClusteredIndices(file_name, scanner, paired_indices, false);
 }
 
 template<class Graph>
diff --git a/src/modules/pipeline/library.cpp b/src/common/pipeline/library.cpp
similarity index 97%
rename from src/modules/pipeline/library.cpp
rename to src/common/pipeline/library.cpp
index 6852156..6ed907d 100644
--- a/src/modules/pipeline/library.cpp
+++ b/src/common/pipeline/library.cpp
@@ -6,7 +6,7 @@
 //***************************************************************************
 
 #include "pipeline/library.hpp"
-#include "dev_support/path_helper.hpp"
+#include "utils/path_helper.hpp"
 
 #include "llvm/Support/YAMLTraits.h"
 #include "llvm/Support/Errc.h"
@@ -40,6 +40,7 @@ struct ScalarEnumerationTraits<LibraryType> {
         io.enumCase(value, "single",              LibraryType::SingleReads);
         io.enumCase(value, "sanger",              LibraryType::SangerReads);
         io.enumCase(value, "nanopore",            LibraryType::NanoporeReads);
+        io.enumCase(value, "tslr",                LibraryType::TSLReads);
         io.enumCase(value, "trusted-contigs",     LibraryType::TrustedContigs);
         io.enumCase(value, "untrusted-contigs",   LibraryType::UntrustedContigs);
         io.enumCase(value, "path-extend-contigs", LibraryType::PathExtendContigs);
@@ -98,6 +99,7 @@ void SequencingLibraryBase::validate(llvm::yaml::IO &, llvm::StringRef &res) {
     case LibraryType::PacBioReads:
     case LibraryType::SangerReads:
     case LibraryType::NanoporeReads:
+    case LibraryType::TSLReads:
     case LibraryType::TrustedContigs:
     case LibraryType::UntrustedContigs:
     case LibraryType::PathExtendContigs:
diff --git a/src/modules/pipeline/library.hpp b/src/common/pipeline/library.hpp
similarity index 86%
rename from src/modules/pipeline/library.hpp
rename to src/common/pipeline/library.hpp
index a183fe9..4598721 100644
--- a/src/modules/pipeline/library.hpp
+++ b/src/common/pipeline/library.hpp
@@ -8,8 +8,8 @@
 #ifndef __IO_LIBRARY_HPP__
 #define __IO_LIBRARY_HPP__
 
-#include "utils/adt/chained_iterator.hpp"
-#include "utils/adt/iterator_range.hpp"
+#include "common/adt/chained_iterator.hpp"
+#include "common/adt/iterator_range.hpp"
 
 #include <boost/iterator/iterator_facade.hpp>
 
@@ -24,15 +24,17 @@ namespace io {
 
 enum class LibraryType {
     SingleReads,
-    PairedEnd,
-    MatePairs,
-    HQMatePairs,
-    PacBioReads,
     SangerReads,
+    PacBioReads,
     NanoporeReads,
+    PairedEnd,
+    HQMatePairs,
+    MatePairs,
     TrustedContigs,
-    UntrustedContigs,
-    PathExtendContigs
+    TSLReads,
+    PathExtendContigs,
+    UntrustedContigs
+
 };
 
 static std::vector<LibraryType> LibraryPriotity = {
@@ -44,6 +46,7 @@ static std::vector<LibraryType> LibraryPriotity = {
     LibraryType::HQMatePairs,
     LibraryType::MatePairs,
     LibraryType::TrustedContigs,
+    LibraryType::TSLReads,
     LibraryType::PathExtendContigs,
     LibraryType::UntrustedContigs
 };
@@ -157,9 +160,9 @@ public:
     }
 
     bool is_graph_contructable() const {
-        return (type_ == io::LibraryType::PairedEnd ||
-                type_ == io::LibraryType::SingleReads ||
-                type_ == io::LibraryType::HQMatePairs);
+        return type_ == io::LibraryType::PairedEnd ||
+               type_ == io::LibraryType::SingleReads ||
+               type_ == io::LibraryType::HQMatePairs;
     }
 
     bool is_bwa_alignable() const {
@@ -170,26 +173,19 @@ public:
         return is_graph_contructable();
     }
 
-    bool is_binary_covertable() {
-        return is_graph_contructable() || is_mismatch_correctable() || is_paired();
-    }
+//    bool is_binary_covertable() {
+//        return is_graph_contructable() || is_mismatch_correctable() || is_paired();
+//    }
 
     bool is_paired() const {
-        return (type_ == io::LibraryType::PairedEnd ||
-                type_ == io::LibraryType::MatePairs||
-                type_ == io::LibraryType::HQMatePairs);
+        return type_ == io::LibraryType::PairedEnd ||
+               type_ == io::LibraryType::MatePairs ||
+               type_ == io::LibraryType::HQMatePairs;
     }
 
-    bool is_repeat_resolvable() const {
-        return (type_ == io::LibraryType::PairedEnd ||
-                type_ == io::LibraryType::HQMatePairs ||
-                type_ == io::LibraryType::MatePairs ||
-                type_ == io::LibraryType::PacBioReads ||
-                type_ == io::LibraryType::SangerReads ||
-                type_ == io::LibraryType::NanoporeReads ||
-                type_ == io::LibraryType::TrustedContigs ||
-                type_ == io::LibraryType::UntrustedContigs ||
-                type_ == io::LibraryType::PathExtendContigs);
+    bool is_mate_pair() const {
+        return type_ == io::LibraryType::MatePairs ||
+               type_ == io::LibraryType::HQMatePairs;
     }
 
     static bool is_contig_lib(LibraryType type) {
@@ -199,9 +195,10 @@ public:
     }
 
     static bool is_long_read_lib(LibraryType type) {
-        return type == io::LibraryType::PacBioReads || 
-               type == io::LibraryType::SangerReads || 
-               type == io::LibraryType::NanoporeReads;
+        return type == io::LibraryType::PacBioReads ||
+               type == io::LibraryType::SangerReads ||
+               type == io::LibraryType::NanoporeReads ||
+               type == io::LibraryType::TSLReads;
     }
 
     bool is_contig_lib() const {
@@ -212,13 +209,18 @@ public:
         return is_long_read_lib(type_);
     }
 
-    bool is_pacbio_alignable() const {
-        return (type_ == io::LibraryType::PacBioReads ||
-                type_ == io::LibraryType::SangerReads ||
-                type_ == io::LibraryType::NanoporeReads ||
-                //comment next line to switch alignment method for trusted contigs
-                type_ == io::LibraryType::TrustedContigs ||
-                type_ == io::LibraryType::UntrustedContigs);
+    bool is_repeat_resolvable() const {
+        return is_paired() ||
+               is_long_read_lib() ||
+               is_contig_lib();
+    }
+
+    //hybrid libraries are used to close gaps in the graph during their alignment
+    bool is_hybrid_lib() const {
+        return is_long_read_lib() ||
+               //comment next line to switch alignment method for trusted contigs
+               type_ == io::LibraryType::TrustedContigs ||
+               type_ == io::LibraryType::UntrustedContigs;
     }
 
 private:
diff --git a/src/modules/pipeline/library.inl b/src/common/pipeline/library.inl
similarity index 100%
rename from src/modules/pipeline/library.inl
rename to src/common/pipeline/library.inl
diff --git a/src/modules/pipeline/stage.cpp b/src/common/pipeline/stage.cpp
similarity index 98%
rename from src/modules/pipeline/stage.cpp
rename to src/common/pipeline/stage.cpp
index 4477536..3119b0a 100644
--- a/src/modules/pipeline/stage.cpp
+++ b/src/common/pipeline/stage.cpp
@@ -8,7 +8,7 @@
 #include "pipeline/stage.hpp"
 #include "pipeline/graphio.hpp"
 
-#include "dev_support/logger/log_writers.hpp"
+#include "utils/logger/log_writers.hpp"
 
 #include <algorithm>
 #include <cstring>
diff --git a/src/modules/pipeline/stage.hpp b/src/common/pipeline/stage.hpp
similarity index 100%
rename from src/modules/pipeline/stage.hpp
rename to src/common/pipeline/stage.hpp
diff --git a/src/modules/data_structures/sequence/genome_storage.cpp b/src/common/sequence/genome_storage.hpp
similarity index 64%
rename from src/modules/data_structures/sequence/genome_storage.cpp
rename to src/common/sequence/genome_storage.hpp
index f2f262e..d790386 100644
--- a/src/modules/data_structures/sequence/genome_storage.cpp
+++ b/src/common/sequence/genome_storage.hpp
@@ -4,21 +4,27 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-//
-// Created by lab42 on 8/19/15.
-//
+#pragma once
 
-#include "genome_storage.hpp"
-#include "data_structures/sequence/nucl.hpp"
-using namespace std;
+#include <string>
+#include "sequence.hpp"
+#include "nucl.hpp"
 
-namespace debruijn_graph {
-//TODO exterminate this where possible
-    Sequence GenomeStorage::GetSequence() const{
+class GenomeStorage {
+    std::string s_;
+public:
+    GenomeStorage() {
+    }
+
+    GenomeStorage(const std::string &s): s_(s) {
+    }
+
+    //TODO exterminate this where possible
+    Sequence GetSequence() const {
         stringstream ss;
         size_t l = 0, r = 0;
         for(size_t i = 0; i < s_.size(); i++) {
-            if (! is_nucl(s_[i]) ) {
+            if (!is_nucl(s_[i]) ) {
                 if (r > l) {
                     ss << s_.substr(l, r - l);
                 }
@@ -33,13 +39,17 @@ namespace debruijn_graph {
         }
         return Sequence(ss.str());
     }
-    void GenomeStorage::SetSequence(const Sequence &s) {
+
+    void SetSequence(const Sequence &s) {
         s_ = s.str();
     }
-    string GenomeStorage::str() const{
+
+    std::string str() const {
         return s_;
     }
-    size_t GenomeStorage::size() const {
+
+    size_t size() const {
         return s_.size();
     }
-}
\ No newline at end of file
+};
+
diff --git a/src/modules/data_structures/sequence/nucl.hpp b/src/common/sequence/nucl.hpp
similarity index 98%
rename from src/modules/data_structures/sequence/nucl.hpp
rename to src/common/sequence/nucl.hpp
index 905d8c2..3170593 100755
--- a/src/modules/data_structures/sequence/nucl.hpp
+++ b/src/common/sequence/nucl.hpp
@@ -27,7 +27,7 @@
 #ifndef NUCL_HPP_
 #define NUCL_HPP_
 
-#include "dev_support/verify.hpp"
+#include "utils/verify.hpp"
 #include <iostream>
 
 const char dignucl_map['T' + 1] = {
diff --git a/src/modules/data_structures/sequence/quality.hpp b/src/common/sequence/quality.hpp
similarity index 100%
rename from src/modules/data_structures/sequence/quality.hpp
rename to src/common/sequence/quality.hpp
diff --git a/src/modules/data_structures/sequence/rtseq.hpp b/src/common/sequence/rtseq.hpp
similarity index 97%
rename from src/modules/data_structures/sequence/rtseq.hpp
rename to src/common/sequence/rtseq.hpp
index ea1e279..5bc27e7 100644
--- a/src/modules/data_structures/sequence/rtseq.hpp
+++ b/src/common/sequence/rtseq.hpp
@@ -16,11 +16,11 @@
 #define RTSEQ_HPP_
 
 #include <string>
-#include "dev_support/verify.hpp"
+#include "utils/verify.hpp"
 #include <array>
 #include <algorithm>
-#include "data_structures/sequence/nucl.hpp"
-#include "dev_support/log.hpp"
+#include "nucl.hpp"
+#include "utils/log.hpp"
 #include "seq_common.hpp"
 #include "seq.hpp"
 #include "simple_seq.hpp"
@@ -706,6 +706,17 @@ public:
         }
     };
 
+    struct less3 {
+        bool operator()(const RuntimeSeq<max_size_, T> &l, const RuntimeSeq<max_size_, T> &r) const {
+            VERIFY(l.size() == r.size());
+            const T* l_data = l.data();
+            const T* r_data = r.data();
+            for (size_t i = 0; i < l.data_size(); ++i)
+                if (l_data[i] != r_data[i])
+                    return l_data[i] < r_data[i];
+            return false;
+        }
+    };
 };
 
 template<size_t max_size_, typename T = seq_element_type>
@@ -719,7 +730,6 @@ bool operator<(const RuntimeSeq<max_size_, T> &l, const RuntimeSeq<max_size_, T>
     return l.size() < r.size();
 }
 
-
 template<size_t max_size_, typename T>
 std::ostream &operator<<(std::ostream &os, RuntimeSeq<max_size_, T> seq) {
     os << seq.str();
@@ -734,7 +744,8 @@ struct hash<RuntimeSeq<max_size, T>> {
     }
 };
 
-};
+}
 
+typedef RuntimeSeq<UPPER_BOUND> RtSeq;
 
 #endif /* RTSEQ_HPP_ */
diff --git a/src/modules/data_structures/sequence/seq.hpp b/src/common/sequence/seq.hpp
similarity index 99%
rename from src/modules/data_structures/sequence/seq.hpp
rename to src/common/sequence/seq.hpp
index 3753b74..bcaaa72 100755
--- a/src/modules/data_structures/sequence/seq.hpp
+++ b/src/common/sequence/seq.hpp
@@ -34,9 +34,9 @@
 
 #include <city/city.h>
 
-#include "dev_support/verify.hpp"
-#include "data_structures/sequence/nucl.hpp"
-#include "dev_support/log.hpp"
+#include "utils/verify.hpp"
+#include "nucl.hpp"
+#include "utils/log.hpp"
 #include "seq_common.hpp"
 
 
diff --git a/src/modules/data_structures/sequence/runtime_k.hpp b/src/common/sequence/seq_common.hpp
similarity index 55%
rename from src/modules/data_structures/sequence/runtime_k.hpp
rename to src/common/sequence/seq_common.hpp
index bbb28b7..51ceb42 100644
--- a/src/modules/data_structures/sequence/runtime_k.hpp
+++ b/src/common/sequence/seq_common.hpp
@@ -5,17 +5,19 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#ifndef RUNTIME_K_HPP_
-#define RUNTIME_K_HPP_
+/*
+ * seq_common.hpp
+ *
+ *  Created on: Jun 25, 2012
+ *      Author: andrey
+ */
 
-#include "data_structures/sequence/sequence.hpp"
-#include "data_structures/sequence/seq.hpp"
-#include "data_structures/sequence/simple_seq.hpp"
-#include "data_structures/sequence/rtseq.hpp"
+#ifndef SEQ_COMMON_HPP_
+#define SEQ_COMMON_HPP_
 
 #include "k_range.hpp"
 
-namespace runtime_k {
+typedef u_int64_t seq_element_type;
 
 constexpr size_t t_size(void) {
     return sizeof(seq_element_type);
@@ -33,15 +35,10 @@ constexpr size_t get_upper_bound(size_t value) {
     return get_k_by_ts(get_t_elements_number(value));
 }
 
-const size_t UPPER_BOUND = get_upper_bound(MAX_K); //((MAX_K - 1) / (sizeof(seq_element_type) << 2) + 1) * (sizeof(seq_element_type) << 2);
+const size_t UPPER_BOUND = get_upper_bound(runtime_k::MAX_K); //((MAX_K - 1) / (sizeof(seq_element_type) << 2) + 1) * (sizeof(seq_element_type) << 2);
 
-const size_t MAX_TS = get_t_elements_number(MAX_K);
+const size_t MAX_TS = get_t_elements_number(runtime_k::MAX_K);
 
-const size_t MIN_TS = get_t_elements_number(MIN_K);
+const size_t MIN_TS = get_t_elements_number(runtime_k::MIN_K);
 
-
-typedef RuntimeSeq<UPPER_BOUND> RtSeq;
-
-} /* namespace runtime_k */
-
-#endif /* RUNTIME_K_HPP_ */
+#endif /* SEQ_COMMON_HPP_ */
diff --git a/src/modules/data_structures/sequence/sequence.hpp b/src/common/sequence/sequence.hpp
similarity index 99%
rename from src/modules/data_structures/sequence/sequence.hpp
rename to src/common/sequence/sequence.hpp
index b25d217..aaaf21b 100755
--- a/src/modules/data_structures/sequence/sequence.hpp
+++ b/src/common/sequence/sequence.hpp
@@ -13,8 +13,8 @@
 #include <memory>
 #include <cstring>
 
-#include "data_structures/sequence/seq.hpp"
-#include "data_structures/sequence/rtseq.hpp"
+#include "seq.hpp"
+#include "rtseq.hpp"
 
 class Sequence {
     // Type to store Seq in Sequences
diff --git a/src/modules/data_structures/sequence/sequence_tools.hpp b/src/common/sequence/sequence_tools.hpp
similarity index 97%
rename from src/modules/data_structures/sequence/sequence_tools.hpp
rename to src/common/sequence/sequence_tools.hpp
index eea0e65..f2231e2 100644
--- a/src/modules/data_structures/sequence/sequence_tools.hpp
+++ b/src/common/sequence/sequence_tools.hpp
@@ -12,8 +12,8 @@
 #include <string>
 #include <vector>
 
-#include "data_structures/sequence/nucl.hpp"
-#include "data_structures/sequence/sequence.hpp"
+#include "nucl.hpp"
+#include "sequence.hpp"
 #include "utils/levenshtein.hpp"
 
 inline const std::string Reverse(const std::string &s) {
diff --git a/src/modules/data_structures/sequence/simple_seq.hpp b/src/common/sequence/simple_seq.hpp
similarity index 97%
rename from src/modules/data_structures/sequence/simple_seq.hpp
rename to src/common/sequence/simple_seq.hpp
index 77d0fe3..5bc144a 100644
--- a/src/modules/data_structures/sequence/simple_seq.hpp
+++ b/src/common/sequence/simple_seq.hpp
@@ -21,9 +21,9 @@
 #include <cstring>
 #include <iostream>
 
-#include "dev_support/verify.hpp"
-#include "data_structures/sequence/nucl.hpp"
-#include "dev_support/log.hpp"
+#include "utils/verify.hpp"
+#include "nucl.hpp"
+#include "utils/log.hpp"
 #include "seq_common.hpp"
 /**
  * @param T is max number of nucleotides, type for storage
diff --git a/src/modules/stages/CMakeLists.txt b/src/common/stages/CMakeLists.txt
similarity index 100%
rename from src/modules/stages/CMakeLists.txt
rename to src/common/stages/CMakeLists.txt
diff --git a/src/modules/stages/construction.cpp b/src/common/stages/construction.cpp
similarity index 90%
rename from src/modules/stages/construction.cpp
rename to src/common/stages/construction.cpp
index 5702185..6116a62 100644
--- a/src/modules/stages/construction.cpp
+++ b/src/common/stages/construction.cpp
@@ -5,12 +5,12 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "io/reads_io/vector_reader.hpp"
+#include "io/reads/vector_reader.hpp"
 #include "io/dataset_support/dataset_readers.hpp"
 #include "pipeline/graph_pack.hpp"
 #include "io/dataset_support/read_converter.hpp"
 
-#include "algorithms/graph_construction.hpp"
+#include "modules/graph_construction.hpp"
 #include "assembly_graph/stats/picture_dump.hpp"
 #include "construction.hpp"
 
@@ -29,7 +29,7 @@ void construct_graph(io::ReadStreamList<Read>& streams,
     if (!cfg::get().ds.RL()) {
         INFO("Figured out: read length = " << rl);
         cfg::get_writable().ds.set_RL(rl);
-        cfg::get_writable().ds.set_aRL(1.0 * stats.bases_ / stats.reads_);
+        cfg::get_writable().ds.set_aRL((double) stats.bases_ / (double) stats.reads_);
     } else if (cfg::get().ds.RL() != rl)
         WARN("In datasets.info, wrong RL is specified: " << cfg::get().ds.RL() << ", not " << rl);
 }
@@ -63,7 +63,7 @@ void Construction::run(conj_graph_pack &gp, const char*) {
         if (dataset.reads[i].is_graph_contructable())
             libs_for_construction.push_back(i);
 
-    auto streams = single_binary_readers_for_libs(dataset, libs_for_construction, true, true);
+    auto streams = io::single_binary_readers_for_libs(dataset, libs_for_construction, true, true);
     construct_graph<io::SingleReadSeq>(streams, gp, contigs_stream);
 }
 
diff --git a/src/modules/stages/construction.hpp b/src/common/stages/construction.hpp
similarity index 100%
rename from src/modules/stages/construction.hpp
rename to src/common/stages/construction.hpp
diff --git a/src/modules/stages/simplification.cpp b/src/common/stages/simplification.cpp
similarity index 71%
rename from src/modules/stages/simplification.cpp
rename to src/common/stages/simplification.cpp
index cd46d1a..f0cd8a9 100644
--- a/src/modules/stages/simplification.cpp
+++ b/src/common/stages/simplification.cpp
@@ -5,11 +5,12 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "assembly_graph/graph_core/basic_graph_stats.hpp"
+#include "assembly_graph/core/basic_graph_stats.hpp"
 #include "assembly_graph/graph_support/graph_processing_algorithm.hpp"
 #include "stages/simplification_pipeline/simplification_settings.hpp"
 #include "stages/simplification_pipeline/graph_simplification.hpp"
-#include "algorithms/simplification/parallel_simplification_algorithms.hpp"
+#include "stages/simplification_pipeline/single_cell_simplification.hpp"
+#include "stages/simplification_pipeline/rna_simplification.hpp"
 
 #include "simplification.hpp"
 
@@ -18,14 +19,18 @@ namespace debruijn_graph {
 using namespace debruijn::simplification;
 using namespace config;
 
+template<class graph_pack>
+shared_ptr<visualization::graph_colorer::GraphColorer<typename graph_pack::graph_t>> DefaultGPColorer(
+    const graph_pack& gp) {
+    io::SingleRead genome("ref", gp.genome.str());
+    auto mapper = MapperInstance(gp);
+    auto path1 = mapper->MapRead(genome).path();
+    auto path2 = mapper->MapRead(!genome).path();
+    return visualization::graph_colorer::DefaultColorer(gp.g, path1, path2);
+}
+
 class GraphSimplifier {
     typedef std::function<void(EdgeId)> HandlerF;
-    typedef omnigraph::PersistentEdgeRemovingAlgorithm<Graph,
-            omnigraph::ParallelInterestingElementFinder<Graph, EdgeId>,
-            LengthComparator<Graph>> TipClipperT;
-    typedef omnigraph::PersistentEdgeRemovingAlgorithm<Graph,
-            omnigraph::ParallelInterestingElementFinder<Graph, EdgeId>,
-            CoverageComparator<Graph>> ECRemoverT;
 
     typedef std::vector<std::pair<AlgoPtr<Graph>, std::string>> AlgoStorageT;
 
@@ -38,33 +43,6 @@ class GraphSimplifier {
     HandlerF removal_handler_;
     stats::detail_info_printer& printer_;
 
-//    bool FastModeAvailable(const SimplifInfoContainer& info, double activation_cov_threshold) {
-//        const auto& cfg = cfg::get();
-//
-//        //todo fix logic
-//        //also handles meta case for now
-//        if (cfg.ds.single_cell) {
-//            return !cfg::get().main_iteration;
-//        }
-//
-//        if (math::eq(info.detected_mean_coverage(), 0.) &&
-//            !cfg.kcm.use_coverage_threshold) {
-//            WARN("Mean coverage wasn't reliably estimated");
-//            return false;
-//        }
-//
-//        //todo review logic
-//        if (math::ls(info.detected_mean_coverage(), activation_cov_threshold) &&
-//            !(cfg.kcm.use_coverage_threshold &&
-//              math::ge(cfg.kcm.coverage_threshold, activation_cov_threshold))) {
-//            INFO("Estimated mean coverage " << info.detected_mean_coverage() <<
-//                 " is less than fast mode activation coverage " << activation_cov_threshold);
-//            return false;
-//        }
-//
-//        return true;
-//    }
-
     bool PerformInitCleaning() {
 
         if (simplif_cfg_.init_clean.early_it_only && info_container_.main_iteration()) {
@@ -88,7 +66,7 @@ class GraphSimplifier {
         ATCondition<Graph> condition (g_, 0.8, max_length, false);
         for (auto iter = g_.SmartEdgeBegin(); !iter.IsEnd(); ++iter){
             if (g_.length(*iter) == 1 && condition.Check(*iter)) {
-                er.DeleteEdgeWithNoCompression(*iter);
+                er.DeleteEdgeNoCompress(*iter);
             }
         }
         ParallelCompress(g_, chunk_cnt);
@@ -146,7 +124,6 @@ class GraphSimplifier {
 
         RunAlgos(algos);
 
-        //FIXME why called directly?
         if (info_container_.mode() == config::pipeline_type::rna){
             RemoveHiddenLoopEC(g_, gp_.flanking_cov, info_container_.detected_coverage_bound(), simplif_cfg_.her, removal_handler_);
             cnt_callback_.Report();
@@ -169,37 +146,7 @@ class GraphSimplifier {
     }
 
     bool FinalRemoveErroneousEdges() {
-
-    //    gp.ClearQuality();
-    //    gp.FillQuality();
-    //    auto colorer = debruijn_graph::DefaultGPColorer(gp);
-    //    omnigraph::DefaultLabeler<typename gp_t::graph_t> labeler(gp.g, gp.edge_pos);
-    //    QualityEdgeLocalityPrintingRH<Graph> qual_removal_handler(gp.g, gp.edge_qual, labeler, colorer,
-    //                                   cfg::get().output_dir + "pictures/colored_edges_deleted/");
-    //
-    //    //positive quality edges removed (folder colored_edges_deleted)
-    //    std::function<void(EdgeId)> qual_removal_handler_f = boost::bind(
-    //            //            &QualityLoggingRemovalHandler<Graph>::HandleDelete,
-    //            &QualityEdgeLocalityPrintingRH<Graph>::HandleDelete,
-    //            boost::ref(qual_removal_handler), _1);
-    //
-    //    std::function<void(set<EdgeId>)> set_removal_handler_f = boost::bind(
-    //                &omnigraph::simplification::SingleEdgeAdapter<set<EdgeId>>, _1, qual_removal_handler_f);
-    //
-
-        std::function<void(set<EdgeId>)> set_removal_handler_f(0);
-        if (removal_handler_) {
-            set_removal_handler_f = std::bind(
-                &omnigraph::simplification::SingleEdgeAdapter<set<EdgeId>>, std::placeholders::_1, removal_handler_);
-        }
-
-        bool changed = RemoveRelativelyLowCoverageComponents(gp_.g, gp_.flanking_cov,
-                                              simplif_cfg_.rcc, info_container_, set_removal_handler_f);
-
-        cnt_callback_.Report();
-
-        changed |= DisconnectRelativelyLowCoverageEdges(gp_.g, gp_.flanking_cov, simplif_cfg_.relative_ed);
-
+        bool changed = false;
         if (simplif_cfg_.topology_simplif_enabled && info_container_.main_iteration()) {
             changed |= AllTopology();
             changed |= MaxFlowRemoveErroneousEdges(gp_.g, simplif_cfg_.mfec,
@@ -210,11 +157,69 @@ class GraphSimplifier {
     }
 
     void PostSimplification() {
+        using namespace omnigraph;
+        using namespace func;
         INFO("PROCEDURE == Post simplification");
-        size_t iteration = 0;
 
         AlgoStorageT algos;
 
+        //auto colorer = debruijn_graph::DefaultGPColorer(gp_);
+        //visualization::graph_labeler::DefaultLabeler<Graph> labeler(g_, gp_.edge_pos);
+
+        //    gp.ClearQuality();
+        //    gp.FillQuality();
+        //    QualityEdgeLocalityPrintingRH<Graph> qual_removal_handler(gp.g, gp.edge_qual, labeler, colorer,
+        //                                   cfg::get().output_dir + "pictures/colored_edges_deleted/");
+        //
+        //    //positive quality edges removed (folder colored_edges_deleted)
+        //    std::function<void(EdgeId)> qual_removal_handler_f = boost::bind(
+        //            //            &QualityLoggingRemovalHandler<Graph>::HandleDelete,
+        //            &QualityEdgeLocalityPrintingRH<Graph>::HandleDelete,
+        //            boost::ref(qual_removal_handler), _1);
+        
+        //visualization::visualization_utils::LocalityPrintingRH<Graph> drawing_handler(gp_.g, labeler, colorer, "/home/snurk/pics");
+        //auto printing_handler=[&] (EdgeId e) {
+        //    std::cout << "Edge:" << g_.str(e) << "; cov: " << g_.coverage(e) << "; start " << g_.str(g_.EdgeStart(e)) << "; end " << g_.str(g_.EdgeEnd(e)) << std::endl;
+        //};
+        //auto extensive_handler = [&] (EdgeId e) {removal_handler_(e) ; printing_handler(e); drawing_handler.HandleDelete(e);};
+
+
+        typename ComponentRemover<Graph>::HandlerF set_removal_handler_f;
+        if (removal_handler_) {
+            set_removal_handler_f = [=](const set<EdgeId>& edges) {
+                std::for_each(edges.begin(), edges.end(), removal_handler_);
+            };
+        }
+
+        PushValid(
+                RelativeECRemoverInstance(gp_.g,
+                                          simplif_cfg_.rcec, info_container_, removal_handler_),
+                "Relative coverage component remover",
+                algos);
+
+        PushValid(
+                RelativeCoverageComponentRemoverInstance(gp_.g, gp_.flanking_cov,
+                                                         simplif_cfg_.rcc, info_container_, set_removal_handler_f),
+                "Relative coverage component remover",
+                algos);
+
+
+        PushValid(
+                RelativelyLowCoverageDisconnectorInstance(gp_.g, gp_.flanking_cov,
+                                                          simplif_cfg_.relative_ed, info_container_),
+                "Disconnecting edges with relatively low coverage",
+                algos);
+
+        PushValid(
+                ComplexTipClipperInstance(gp_.g, simplif_cfg_.complex_tc, info_container_, set_removal_handler_f),
+                "Complex tip clipper",
+                algos);
+
+        PushValid(
+                ComplexBRInstance(gp_.g, simplif_cfg_.cbr, info_container_),
+                "Complex bulge remover",
+                algos);
+
         PushValid(
                 TipClipperInstance(g_, simplif_cfg_.tc,
                                    info_container_, removal_handler_),
@@ -255,12 +260,33 @@ class GraphSimplifier {
                                        info_container_, removal_handler_),
                     "Yet another final bulge remover",
                     algos);
+
+            EdgePredicate<Graph> meta_thorn_condition
+                    = And(LengthUpperBound<Graph>(g_, LengthThresholdFinder::MaxErroneousConnectionLength(
+                                                                           g_.k(), simplif_cfg_.isec.max_ec_length_coefficient)),
+
+                      And([&] (EdgeId e) {
+                              //todo configure!
+                              return simplification::relative_coverage::
+                                         RelativeCoverageHelper<Graph>(g_, gp_.flanking_cov, 2).AnyHighlyCoveredOnFourSides(e);
+                          },
+
+                      And(UniqueIncomingPathLengthLowerBound(g_, simplif_cfg_.isec.uniqueness_length),
+
+                          //todo configure!
+                          TopologicalThornCondition<Graph>(g_, simplif_cfg_.isec.span_distance, /*max edge cnt*/5))));
+
+            PushValid(std::make_shared<ParallelEdgeRemovingAlgorithm<Graph>>(g_, meta_thorn_condition, info_container_.chunk_cnt(), 
+                      removal_handler_),
+                      "Thorn remover (meta)",
+                      algos);
         }
 
         if (info_container_.mode() == config::pipeline_type::rna) {
             PushValid(ATTipClipperInstance(g_, removal_handler_, info_container_.chunk_cnt()), "AT Tips", algos);
         }
 
+        size_t iteration = 0;
         bool enable_flag = true;
         while (enable_flag) {
             enable_flag = false;
@@ -270,12 +296,6 @@ class GraphSimplifier {
             enable_flag |= FinalRemoveErroneousEdges();
             cnt_callback_.Report();
 
-            enable_flag |=  ClipComplexTips(gp_.g, simplif_cfg_.complex_tc, info_container_, removal_handler_);
-            cnt_callback_.Report();
-
-            enable_flag |= RemoveComplexBulges(gp_.g, simplif_cfg_.cbr, iteration);
-            cnt_callback_.Report();
-
             enable_flag |= RunAlgos(algos);
 
             iteration++;
@@ -286,13 +306,23 @@ class GraphSimplifier {
             //        printer(ipp_final_bulge_removal, str(format("_%d") % iteration));
         }
 
-        //fixme move to AllTopology?
         if (simplif_cfg_.topology_simplif_enabled) {
             RemoveHiddenEC(gp_.g, gp_.flanking_cov, simplif_cfg_.her, info_container_, removal_handler_);
 
             cnt_callback_.Report();
         }
 
+        if (info_container_.mode() == config::pipeline_type::meta && simplif_cfg_.her.enabled) {
+            VERIFY(math::ls(simplif_cfg_.her.unreliability_threshold, 0.));
+            MetaHiddenECRemover<Graph> algo(g_, info_container_.chunk_cnt(), gp_.flanking_cov,
+                                                      simplif_cfg_.her.uniqueness_length,
+                                                      simplif_cfg_.her.relative_threshold,
+                                                      removal_handler_);
+            INFO("Running Hidden EC remover (meta)");
+            LoopedRun(algo);
+            cnt_callback_.Report();
+        }
+
         INFO("Disrupting self-conjugate edges");
         SelfConjugateDisruptor<Graph>(gp_.g, removal_handler_).Run();
         cnt_callback_.Report();
@@ -309,23 +339,24 @@ class GraphSimplifier {
     //    CompressAllVertices(graph);
     //}
 
-//    std::shared_ptr<Predicate<EdgeId>> ParseCondition(const string& condition) const {
-//        ConditionParser<Graph> parser(g_, condition, info_container_);
-//        return parser();
-//    }
-
     void PushValid(const AlgoPtr<Graph>& algo_ptr, std::string comment, AlgoStorageT& algos) const {
         if (algo_ptr) {
             algos.push_back(std::make_pair(algo_ptr, comment));
         }
     }
 
+    bool RunAlgo(const AlgoPtr<Graph>& algo, const string &comment, bool force_primary_launch = false) {
+        INFO("Running " << comment);
+        size_t triggered = algo->Run(force_primary_launch);
+        INFO("Triggered " << triggered << " times");
+        cnt_callback_.Report();
+        return (triggered > 0);
+    }
+
     bool RunAlgos(AlgoStorageT& algos, bool force_primary_launch = false) {
         bool changed = false;
         for (auto algo_comment : algos) {
-             INFO("Running " << algo_comment.second);
-             changed |= algo_comment.first->Run(force_primary_launch);
-             cnt_callback_.Report();
+             changed |= RunAlgo(algo_comment.first, algo_comment.second, force_primary_launch);
          }
         return changed;
     }
@@ -353,11 +384,11 @@ public:
         AlgoStorageT algos;
 
         PushValid(
-                TipClipperInstance(g_, simplif_cfg_.tc, info_container_, removal_handler_, simplif_cfg_.cycle_iter_count),
+                TipClipperInstance(g_, simplif_cfg_.tc, info_container_, removal_handler_),
                 "Tip clipper",
                 algos);
         PushValid(
-                BRInstance(g_, simplif_cfg_.br, info_container_, removal_handler_, simplif_cfg_.cycle_iter_count),
+                BRInstance(g_, simplif_cfg_.br, info_container_, removal_handler_),
                 "Bulge remover",
                 algos);
         PushValid(
@@ -367,7 +398,7 @@ public:
 
         size_t iteration = 0;
         bool graph_changed = true;
-        //cannot stop simply if nothing changed, since threshold change on every iteration
+        //cannot stop simply if nothing changed, since threshold changes on every iteration
         while (iteration < simplif_cfg_.cycle_iter_count || graph_changed) {
             INFO("PROCEDURE == Simplification cycle, iteration " << iteration + 1);
             graph_changed = RunAlgos(algos);
@@ -383,6 +414,7 @@ public:
         }
     }
 
+    //TODO reduce code duplication
     void SimplifyRNAGraph() {
         printer_(info_printer_pos::before_simplification);
         INFO("Graph simplification started");
@@ -393,40 +425,40 @@ public:
             DEBUG("Reference genome length = " + std::to_string(gp_.genome.GetSequence().size()));
         }
 
-        AlgoStorageT ec_algo;
-
-        PushValid(ECRemoverInstance(g_, simplif_cfg_.ec, info_container_, removal_handler_,
-                                            simplif_cfg_.cycle_iter_count), "Low coverage edge remover", ec_algo);
+        auto ec_algo = ECRemoverInstance(g_, simplif_cfg_.ec, info_container_, removal_handler_,
+                                            simplif_cfg_.cycle_iter_count);
 
         size_t iteration = 0;
         bool graph_changed_ec = true;
-        //TODO: config. Or just graph_changed?
-        size_t tc_max_iteration = 2;
-        //cannot stop simply if nothing changed, since threshold change on every iteration
+
+        //cannot stop simply if nothing changed, since threshold changes on every iteration
         while (iteration < simplif_cfg_.cycle_iter_count || graph_changed_ec) {
+            //FIXME either algos creation can be moved out of the cycle,
+            // or checking graph_changed_ec is not enough for correct behaviour
             AlgoStorageT algos;
             PushValid(
-                    TipClipperInstance(g_, simplif_cfg_.tc, info_container_, removal_handler_, tc_max_iteration),
+                    TipClipperInstance(g_, simplif_cfg_.tc, info_container_, removal_handler_),
                     "Tip clipper",
                     algos);
             PushValid(
-                    DeadEndInstance(g_, simplif_cfg_.dead_end, info_container_, removal_handler_, tc_max_iteration),
+                    DeadEndInstance(g_, simplif_cfg_.dead_end, info_container_, removal_handler_),
                     "Dead end clipper",
                     algos);
             PushValid(
-                    BRInstance(g_, simplif_cfg_.br, info_container_, removal_handler_, tc_max_iteration),
+                    BRInstance(g_, simplif_cfg_.br, info_container_, removal_handler_),
                     "Bulge remover",
                     algos);
-            bool graph_changed = true;
-            size_t tc_iteration = 0;
 
-            while (tc_iteration < tc_max_iteration || graph_changed) {
-                INFO("PROCEDURE == Tip clipper and bulge removal cycle, iteration " << iteration + 1 << "." << tc_iteration);
+            bool graph_changed = true;
+            size_t inner_iteration = 0;
+            while (graph_changed) {
+                INFO("PROCEDURE == Tip clipper and bulge removal cycle, iteration "
+                             << iteration + 1 << "." << inner_iteration);
                 graph_changed = RunAlgos(algos);
-                ++tc_iteration;
+                ++inner_iteration;
             }
             INFO("PROCEDURE == Erroneous connection, iteration " << iteration + 1);
-            graph_changed_ec = RunAlgos(ec_algo);
+            graph_changed_ec = RunAlgo(ec_algo, "Low coverage edge remover");
             ++iteration;
         }
 
@@ -440,6 +472,13 @@ public:
     }
 };
 
+shared_ptr<visualization::graph_colorer::GraphColorer<Graph>> DefaultGPColorer(
+        const conj_graph_pack &gp) {
+    auto mapper = MapperInstance(gp);
+    auto path1 = mapper->MapSequence(gp.genome.GetSequence()).path();
+    auto path2 = mapper->MapSequence(!gp.genome.GetSequence()).path();
+    return visualization::graph_colorer::DefaultColorer(gp.g, path1, path2);
+}
 
 void Simplification::run(conj_graph_pack &gp, const char*) {
     using namespace omnigraph;
@@ -448,7 +487,7 @@ void Simplification::run(conj_graph_pack &gp, const char*) {
     gp.index.Detach();
     gp.index.clear();
 
-    omnigraph::DefaultLabeler<Graph> labeler(gp.g, gp.edge_pos);
+    visualization::graph_labeler::DefaultLabeler<Graph> labeler(gp.g, gp.edge_pos);
     
     stats::detail_info_printer printer(gp, labeler, cfg::get().output_dir);
 
@@ -496,7 +535,7 @@ void SimplificationCleanup::run(conj_graph_pack &gp, const char*) {
 
 
     auto isolated_edge_remover =
-        IsolatedEdgeRemoverInstance(gp.g, cfg::get().simp.ier, info_container, (HandlerF<Graph>)nullptr);
+        IsolatedEdgeRemoverInstance(gp.g, cfg::get().simp.ier, info_container, (EdgeRemovalHandlerF<Graph>)nullptr);
     if (isolated_edge_remover != nullptr)
         isolated_edge_remover->Run();
 
@@ -507,13 +546,13 @@ void SimplificationCleanup::run(conj_graph_pack &gp, const char*) {
                 cov_cleaner(gp.g,
                             CoverageUpperBound<Graph>(gp.g, low_threshold),
                             info_container.chunk_cnt(),
-                            (HandlerF<Graph>)nullptr,
+                            (EdgeRemovalHandlerF<Graph>)nullptr,
                             /*canonical_only*/true,
                             CoverageComparator<Graph>(gp.g));
         cov_cleaner.Run();
     }
 
-    omnigraph::DefaultLabeler<Graph> labeler(gp.g, gp.edge_pos);
+    visualization::graph_labeler::DefaultLabeler<Graph> labeler(gp.g, gp.edge_pos);
     stats::detail_info_printer printer(gp, labeler, cfg::get().output_dir);
     printer(info_printer_pos::final_simplified);
 
diff --git a/src/modules/stages/simplification.hpp b/src/common/stages/simplification.hpp
similarity index 100%
rename from src/modules/stages/simplification.hpp
rename to src/common/stages/simplification.hpp
diff --git a/src/common/stages/simplification_pipeline/graph_simplification.hpp b/src/common/stages/simplification_pipeline/graph_simplification.hpp
new file mode 100644
index 0000000..99937ed
--- /dev/null
+++ b/src/common/stages/simplification_pipeline/graph_simplification.hpp
@@ -0,0 +1,678 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+/*
+ * graph_simplification.hpp
+ *
+ *  Created on: Aug 12, 2011
+ *      Author: sergey
+ */
+
+#pragma once
+
+#include "pipeline/config_struct.hpp"
+
+#include "modules/simplification/tip_clipper.hpp"
+#include "modules/simplification/complex_tip_clipper.hpp"
+#include "modules/simplification/bulge_remover.hpp"
+#include "modules/simplification/complex_bulge_remover.hpp"
+#include "modules/simplification/erroneous_connection_remover.hpp"
+#include "modules/simplification/relative_coverage_remover.hpp"
+#include "modules/simplification/mf_ec_remover.hpp"
+#include "modules/simplification/parallel_simplification_algorithms.hpp"
+#include "stages/simplification_pipeline/simplification_settings.hpp"
+
+#include "modules/graph_read_correction.hpp"
+
+#include "assembly_graph/graph_support/chimera_stats.hpp"
+#include "assembly_graph/graph_support/basic_edge_conditions.hpp"
+#include "assembly_graph/stats/picture_dump.hpp"
+#include "assembly_graph/graph_support/parallel_processing.hpp"
+#include "assembly_graph/graph_support/detail_coverage.hpp"
+
+#include "assembly_graph/core/graph.hpp"
+
+#include "visualization/graph_colorer.hpp"
+#include "utils/standard_base.hpp"
+
+namespace debruijn {
+
+namespace simplification {
+
+//todo remove this line
+using namespace debruijn_graph;
+
+template<class Graph>
+using AlgoPtr = std::shared_ptr<omnigraph::PersistentAlgorithmBase<Graph>>;
+
+template<class Graph>
+using EdgeConditionT = func::TypedPredicate<typename Graph::EdgeId>;
+
+template<class Graph>
+class ConditionParser {
+private:
+    typedef typename Graph::EdgeId EdgeId;
+
+    const Graph &g_;
+    string next_token_;
+    string input_;
+    const SimplifInfoContainer settings_;
+    size_t curr_iteration_;
+    size_t iteration_cnt_;
+    std::queue<string> tokenized_input_;
+
+    size_t max_length_bound_;
+    double max_coverage_bound_;
+
+    string ReadNext() {
+        if (!tokenized_input_.empty()) {
+            next_token_ = tokenized_input_.front();
+            tokenized_input_.pop();
+        } else {
+            next_token_ = "";
+        }
+        return next_token_;
+    }
+
+    template<typename T>
+    bool RelaxMax(T &cur_max, T t) {
+        if (t > cur_max) {
+            cur_max = t;
+            return true;
+        }
+        return false;
+    }
+
+    template<typename T>
+    bool RelaxMin(T &cur_min, T t) {
+        if (t < cur_min) {
+            cur_min = t;
+            return true;
+        }
+        return false;
+    }
+
+    double GetCoverageBound() {
+        if (next_token_ == "auto") {
+            return settings_.detected_coverage_bound();
+        } else {
+            return std::stod(next_token_);
+        }
+    }
+
+    func::TypedPredicate<EdgeId> ParseCondition(size_t &min_length_bound,
+                                               double &min_coverage_bound) {
+        if (next_token_ == "tc_lb") {
+            double length_coeff = std::stod(ReadNext());
+
+            DEBUG("Creating tip length bound. Coeff " << length_coeff);
+            size_t length_bound = LengthThresholdFinder::MaxTipLength(
+                settings_.read_length(), g_.k(), length_coeff);
+
+            DEBUG("Length bound " << length_bound);
+
+            RelaxMin(min_length_bound, length_bound);
+            DEBUG("Min length bound - " << min_length_bound);
+            return LengthUpperBound<Graph>(g_, length_bound);
+
+        } else if (next_token_ == "rlmk") {
+            //Read length minus k
+            VERIFY_MSG(settings_.read_length() > g_.k(), "Read length was shorter than K");
+            DEBUG("Creating (rl - k) bound");
+            size_t length_bound = settings_.read_length() - g_.k();
+            RelaxMin(min_length_bound, length_bound);
+            DEBUG("Min length bound - " << min_length_bound);
+            return LengthUpperBound<Graph>(g_, length_bound);
+
+        } else if (next_token_ == "to_ec_lb") {
+            double length_coeff = std::stod(ReadNext());
+
+            DEBUG( "Creating length bound for erroneous connections originated from tip merging. Coeff " << length_coeff);
+            size_t length_bound =
+                    LengthThresholdFinder::MaxTipOriginatedECLength(
+                        settings_.read_length(), g_.k(), length_coeff);
+
+            DEBUG("Length bound " << length_bound);
+
+            RelaxMin(min_length_bound, length_bound);
+            DEBUG("Min length bound - " << min_length_bound);
+            return LengthUpperBound<Graph>(g_, length_bound);
+
+        } else if (next_token_ == "ec_lb") {
+            size_t length_coeff = std::stoll(ReadNext());
+
+            DEBUG("Creating ec length bound. Coeff " << length_coeff);
+            size_t length_bound =
+                    LengthThresholdFinder::MaxErroneousConnectionLength(
+                        g_.k(), length_coeff);
+
+            DEBUG("Length bound " << length_bound);
+
+            RelaxMin(min_length_bound, length_bound);
+            DEBUG("Min length bound - " << min_length_bound);
+            return LengthUpperBound<Graph>(g_, length_bound);
+        } else if (next_token_ == "lb") {
+            size_t length_bound = std::stoll(ReadNext());
+
+            DEBUG("Creating length bound. Value " << length_bound);
+
+            RelaxMin(min_length_bound, length_bound);
+            DEBUG("Min length bound - " << min_length_bound);
+            return LengthUpperBound<Graph>(g_, length_bound);
+        } else if (next_token_ == "cb") {
+            ReadNext();
+            double cov_bound = GetCoverageBound();
+            DEBUG("Creating coverage upper bound " << cov_bound);
+            RelaxMin(min_coverage_bound, cov_bound);
+            return CoverageUpperBound<Graph>(g_, cov_bound);
+        } else if (next_token_ == "icb") {
+            VERIFY(iteration_cnt_ != -1ul && curr_iteration_ != -1ul);
+            ReadNext();
+            double cov_bound = GetCoverageBound();
+            cov_bound = cov_bound / (double) iteration_cnt_ * (double) (curr_iteration_ + 1);
+            DEBUG("Creating iterative coverage upper bound " << cov_bound);
+            RelaxMin(min_coverage_bound, cov_bound);
+            return CoverageUpperBound<Graph>(g_, cov_bound);
+        } else if (next_token_ == "nbr") {
+            return NotBulgeECCondition<Graph>(g_);
+        } else if (next_token_ == "rctc") {
+            ReadNext();
+            DEBUG("Creating relative cov tip cond " << next_token_);
+            return RelativeCoverageTipCondition<Graph>(g_, std::stod(next_token_));
+        } else if (next_token_ == "disabled") {
+            DEBUG("Creating disabling condition");
+            return func::AlwaysFalse<EdgeId>();
+        } else if (next_token_ == "mmm") {
+            ReadNext();
+            DEBUG("Creating max mismatches cond " << next_token_);
+            return MismatchTipCondition<Graph>(g_, std::stoll(next_token_));
+        } else {
+            VERIFY(false);
+            return func::AlwaysTrue<EdgeId>();
+        }
+    }
+
+    func::TypedPredicate<EdgeId> ParseConjunction(size_t &min_length_bound,
+                                                  double &min_coverage_bound) {
+        func::TypedPredicate<EdgeId> answer = func::AlwaysTrue<EdgeId>();
+        VERIFY(next_token_ == "{");
+        ReadNext();
+        while (next_token_ != "}") {
+            answer = func::And(answer,
+                              ParseCondition(min_length_bound, min_coverage_bound));
+            ReadNext();
+        }
+        return answer;
+    }
+
+public:
+
+    ConditionParser(const Graph &g, string input, const SimplifInfoContainer &settings,
+                    size_t curr_iteration = -1ul, size_t iteration_cnt = -1ul)
+            : g_(g),
+              input_(input),
+              settings_(settings),
+              curr_iteration_(curr_iteration),
+              iteration_cnt_(iteration_cnt),
+              max_length_bound_(0),
+              max_coverage_bound_(0.) {
+        DEBUG("Creating parser for string " << input);
+        using namespace boost;
+        vector<string> tmp_tokenized_input;
+        boost::split(tmp_tokenized_input, input_, boost::is_any_of(" ,;"), boost::token_compress_on);
+        for (auto it = tmp_tokenized_input.begin();
+             it != tmp_tokenized_input.end(); ++it) {
+            tokenized_input_.push(*it);
+        }
+        ReadNext();
+    }
+
+    func::TypedPredicate<EdgeId> operator()() {
+        DEBUG("Parsing");
+        func::TypedPredicate<EdgeId> answer = func::AlwaysFalse<EdgeId>();
+        VERIFY_MSG(next_token_ == "{", "Expected \"{\", but next token was " << next_token_);
+        while (next_token_ == "{") {
+            size_t min_length_bound = numeric_limits<size_t>::max();
+            double min_coverage_bound = numeric_limits<double>::max();
+            answer = func::Or(answer,
+                             ParseConjunction(min_length_bound, min_coverage_bound));
+            RelaxMax(max_length_bound_, min_length_bound);
+            RelaxMax(max_coverage_bound_, min_coverage_bound);
+            ReadNext();
+        }
+        return answer;
+    }
+
+    size_t max_length_bound() const {
+        return max_length_bound_;
+    }
+
+    double max_coverage_bound() const {
+        return max_coverage_bound_;
+    }
+
+private:
+    DECL_LOGGER("ConditionParser");
+};
+
+template<class Graph>
+class EditDistanceTrackingCallback {
+    typedef typename Graph::EdgeId EdgeId;
+    const Graph &g_;
+
+public:
+    EditDistanceTrackingCallback(const Graph &g)
+            : g_(g) {
+    }
+
+    bool operator()(EdgeId edge, const vector<EdgeId>& path) const {
+        vector<Sequence> path_sequences;
+        for (EdgeId e : path) {
+            path_sequences.push_back(g_.EdgeNucls(e));
+        }
+        Sequence path_sequence(
+            MergeOverlappingSequences(path_sequences, g_.k()));
+        size_t dist = EditDistance(g_.EdgeNucls(edge), path_sequence);
+        TRACE( "Bulge sequences with distance " << dist << " were " << g_.EdgeNucls(edge) << " and " << path_sequence);
+        return true;
+    }
+
+private:
+    DECL_LOGGER("EditDistanceTrackingCallback");
+};
+
+//enabling tip projection
+template<class gp_t>
+EdgeRemovalHandlerF<typename gp_t::graph_t> WrapWithProjectionCallback(
+    gp_t &gp,
+    EdgeRemovalHandlerF<typename gp_t::graph_t> removal_handler) {
+    typedef typename gp_t::graph_t Graph;
+    typedef typename Graph::EdgeId EdgeId;
+    TipsProjector<gp_t> tip_projector(gp);
+
+    EdgeRemovalHandlerF<Graph> projecting_callback = std::bind(&TipsProjector<gp_t>::ProjectTip,
+                                             tip_projector, std::placeholders::_1);
+
+    return func::CombineCallbacks<EdgeId>(std::ref(removal_handler), projecting_callback);
+}
+
+template<class Graph>
+class LowCoverageEdgeRemovingAlgorithm : public PersistentProcessingAlgorithm<Graph,
+                                                                              typename Graph::EdgeId,
+                                                                              omnigraph::CoverageComparator<Graph>> {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef PersistentProcessingAlgorithm<Graph, EdgeId, omnigraph::CoverageComparator<Graph>> base;
+
+    const SimplifInfoContainer simplif_info_;
+    const std::string condition_str_;
+    EdgeRemover<Graph> edge_remover_;
+
+    func::TypedPredicate<EdgeId> remove_condition_;
+    func::TypedPredicate<EdgeId> proceed_condition_;
+
+protected:
+
+    void PrepareIteration(size_t it_cnt, size_t total_it_estimate) override {
+        TRACE("Preparing iteration " << it_cnt << " out of total estimate " << total_it_estimate);
+        ConditionParser<Graph> parser(this->g(), condition_str_,
+                                      simplif_info_, it_cnt, total_it_estimate);
+        remove_condition_ = omnigraph::AddAlternativesPresenceCondition(this->g(), parser());
+        TRACE("Updated remove condition");
+        proceed_condition_ = CoverageUpperBound<Graph>(this->g(), parser.max_coverage_bound());
+        TRACE("Updated proceed condition up to coverage " << parser.max_coverage_bound());
+    }
+
+    bool Proceed(EdgeId e) const override {
+        return proceed_condition_(e);
+    }
+
+    bool Process(EdgeId e) override {
+        TRACE("Checking edge " << this->g().str(e) << " for the removal condition");
+        if (remove_condition_(e)) {
+            TRACE("Check passed, removing");
+            edge_remover_.DeleteEdge(e);
+            return true;
+        }
+        TRACE("Check not passed");
+        return false;
+    }
+
+public:
+    LowCoverageEdgeRemovingAlgorithm(Graph &g,
+                                     const std::string &condition_str,
+                                     const SimplifInfoContainer &simplif_info,
+                                     std::function<void(EdgeId)> removal_handler = nullptr,
+                                     bool canonical_only = true,
+                                     bool track_changes = true,
+                                     size_t total_iteration_estimate = -1ul)
+            : base(g, nullptr,
+                   canonical_only,
+                   omnigraph::CoverageComparator<Graph>(g),
+                   track_changes,
+                   total_iteration_estimate),
+              simplif_info_(simplif_info),
+              condition_str_(condition_str),
+              edge_remover_(g, removal_handler),
+              remove_condition_(func::AlwaysFalse<EdgeId>()),
+              proceed_condition_(func::AlwaysTrue<EdgeId>()) {
+
+        ConditionParser<Graph> parser(g, condition_str, simplif_info,
+                                      total_iteration_estimate - 1, total_iteration_estimate);
+        this->interest_el_finder_ =
+                std::make_shared<omnigraph::ParallelInterestingElementFinder<Graph>>(
+                        AddAlternativesPresenceCondition(g, parser()),
+                        simplif_info.chunk_cnt());
+    }
+
+private:
+    DECL_LOGGER("LowCoverageEdgeRemovingAlgorithm");
+};
+
+template<class Graph>
+AlternativesAnalyzer<Graph> ParseBRConfig(const Graph &g,
+                                          const config::debruijn_config::simplification::bulge_remover &config) {
+    size_t max_length = LengthThresholdFinder::MaxBulgeLength(
+        g.k(), config.max_bulge_length_coefficient,
+        config.max_additive_length_coefficient);
+
+    DEBUG("Length bound " << max_length);
+
+    return AlternativesAnalyzer<Graph>(g, config.max_coverage,
+                                                    max_length,
+                                                    config.max_relative_coverage,
+                                                    config.max_delta,
+                                                    config.max_relative_delta,
+                                                    config.max_number_edges);
+}
+
+template<class Graph>
+AlgoPtr<Graph> SelfConjugateEdgeRemoverInstance(Graph &g, const string &condition_str,
+                const SimplifInfoContainer &info,
+                EdgeRemovalHandlerF<Graph> removal_handler = 0) {
+    ConditionParser<Graph> parser(g, condition_str, info);
+    auto condition = func::And(SelfConjugateCondition<Graph>(g), parser());
+
+    return std::make_shared<omnigraph::ParallelEdgeRemovingAlgorithm<Graph>>(g,
+                                                                  condition,
+                                                                  info.chunk_cnt(),
+                                                                  removal_handler,
+                                                                  /*canonical_only*/true);
+}
+
+template<class Graph>
+AlgoPtr<Graph> RelativeCoverageComponentRemoverInstance (
+        Graph &g,
+        const FlankingCoverage<Graph> &flanking_cov,
+        const config::debruijn_config::simplification::relative_coverage_comp_remover &rcc_config,
+        const SimplifInfoContainer &info,
+        typename ComponentRemover<Graph>::HandlerF removal_handler = nullptr) {
+    if (!rcc_config.enabled) {
+        return nullptr;
+        INFO("Removal of relatively low covered connections disabled");
+    }
+
+    //     INFO("Removing relatively low covered connections");
+    size_t connecting_path_length_bound = LengthThresholdFinder::MaxErroneousConnectionLength(
+            g.k(), rcc_config.max_ec_length_coefficient);
+
+    std::string pics_dir = "";
+
+    double max_coverage = math::ge(rcc_config.max_coverage_coeff, 0.)
+                          ? info.detected_coverage_bound() * rcc_config.max_coverage_coeff
+                          : std::numeric_limits<double>::max();
+
+    return std::make_shared<omnigraph::simplification::relative_coverage::
+        RelativeCoverageComponentRemover<Graph>>(g,
+                                                 info.chunk_cnt(),
+                                                 flanking_cov,
+                                                 rcc_config.coverage_gap,
+                                                 size_t(double(info.read_length()) * rcc_config.length_coeff),
+                                                 size_t(double(info.read_length()) * rcc_config.tip_allowing_length_coeff),
+                                                 connecting_path_length_bound,
+                                                 max_coverage,
+                                                 removal_handler, rcc_config.vertex_count_limit, pics_dir);
+}
+
+template<class Graph>
+AlgoPtr<Graph> RelativelyLowCoverageDisconnectorInstance(Graph &g,
+        const FlankingCoverage<Graph> &flanking_cov,
+        const config::debruijn_config::simplification::relative_coverage_edge_disconnector &rced_config,
+        const SimplifInfoContainer &info) {
+    if (!rced_config.enabled) {
+        INFO("Disconnection of relatively low covered edges disabled");
+        return nullptr;
+    }
+
+    return std::make_shared<omnigraph::DisconnectionAlgorithm<Graph>>(g,
+            omnigraph::simplification::relative_coverage::
+            RelativeCovDisconnectionCondition<Graph>(g, flanking_cov, rced_config.diff_mult, rced_config.edge_sum),
+            info.chunk_cnt(),
+            nullptr);
+}
+
+template<class Graph>
+AlgoPtr<Graph> ComplexBRInstance(
+    Graph &g,
+    config::debruijn_config::simplification::complex_bulge_remover cbr_config,
+    const SimplifInfoContainer &info) {
+    if (!cbr_config.enabled)
+        return nullptr;
+    size_t max_length = (size_t) ((double) g.k() * cbr_config.max_relative_length);
+    size_t max_diff = cbr_config.max_length_difference;
+    return std::make_shared<omnigraph::complex_br::ComplexBulgeRemover<Graph>>(g, max_length,
+                                                                               max_diff, info.chunk_cnt());
+}
+
+template<class Graph>
+AlgoPtr<Graph> ComplexTipClipperInstance(Graph &g,
+                     config::debruijn_config::simplification::complex_tip_clipper ctc_conf,
+                     const SimplifInfoContainer &info,
+                     typename ComponentRemover<Graph>::HandlerF removal_handler = 0) {
+    if (!ctc_conf.enabled) {
+        INFO("Complex tip clipping disabled");
+        return nullptr;
+    }
+
+    ConditionParser<Graph> parser(g, ctc_conf.condition, info);
+    parser();
+
+    return std::make_shared<omnigraph::ComplexTipClipper<Graph>>(g, ctc_conf.max_relative_coverage,
+                                         ctc_conf.max_edge_len,
+                                         parser.max_length_bound(), info.chunk_cnt(),
+                                         "", removal_handler);
+}
+
+template<class Graph>
+AlgoPtr<Graph> IsolatedEdgeRemoverInstance(Graph &g,
+                                           config::debruijn_config::simplification::isolated_edges_remover ier,
+                                           const SimplifInfoContainer &info,
+                                           EdgeRemovalHandlerF<Graph> removal_handler = 0) {
+    if (!ier.enabled) {
+        return nullptr;
+    }
+    size_t max_length_any_cov = std::max(info.read_length(), ier.max_length_any_cov);
+
+    auto condition = func::And(IsolatedEdgeCondition<Graph>(g),
+                              func::Or(LengthUpperBound<Graph>(g, max_length_any_cov),
+                                      func::And(LengthUpperBound<Graph>(g, ier.max_length),
+                                               CoverageUpperBound<Graph>(g, ier.max_coverage))));
+
+    return std::make_shared<omnigraph::ParallelEdgeRemovingAlgorithm<Graph>>(g,
+                                                                  condition,
+                                                                  info.chunk_cnt(),
+                                                                  removal_handler,
+                                                                  /*canonical_only*/true);
+}
+
+template<class Graph>
+AlgoPtr<Graph> RelativeECRemoverInstance(Graph &g,
+                                         const config::debruijn_config::simplification::relative_coverage_ec_remover &rcec_config,
+                                         const SimplifInfoContainer &info,
+                                         EdgeRemovalHandlerF<Graph> removal_handler) {
+    if (!rcec_config.enabled)
+        return nullptr;
+
+    return std::make_shared<omnigraph::ParallelEdgeRemovingAlgorithm<Graph>>(g,
+            AddRelativeCoverageECCondition(g, rcec_config.rcec_ratio,
+                                           AddAlternativesPresenceCondition(g, func::TypedPredicate<typename Graph::EdgeId>
+                                                   (LengthUpperBound<Graph>(g, rcec_config.max_ec_length)))),
+            info.chunk_cnt(), removal_handler, /*canonical_only*/true);
+}
+
+template<class Graph>
+AlgoPtr<Graph> ECRemoverInstance(Graph &g,
+                                 const config::debruijn_config::simplification::erroneous_connections_remover &ec_config,
+                                 const SimplifInfoContainer &info,
+                                 EdgeRemovalHandlerF<Graph> removal_handler = nullptr,
+                                 size_t iteration_cnt = 1) {
+    if (ec_config.condition.empty())
+        return nullptr;
+
+    return std::make_shared<LowCoverageEdgeRemovingAlgorithm<Graph>>(
+            g, ec_config.condition, info, removal_handler,
+            /*canonical only*/ true, /*track changes*/ true, iteration_cnt);
+}
+
+template<class Graph>
+AlgoPtr<Graph> TipClipperInstance(Graph &g,
+                                  const EdgeConditionT<Graph> &condition,
+                                  const SimplifInfoContainer &info,
+                                  EdgeRemovalHandlerF<Graph> removal_handler = nullptr,
+                                  bool track_changes = true) {
+    return make_shared<omnigraph::ParallelEdgeRemovingAlgorithm<Graph, omnigraph::LengthComparator<Graph>>>(g,
+                                                                        AddTipCondition(g, condition),
+                                                                        info.chunk_cnt(),
+                                                                        removal_handler,
+                                                                        /*canonical_only*/true,
+                                                                        LengthComparator<Graph>(g),
+                                                                        track_changes);
+}
+
+template<class Graph>
+AlgoPtr<Graph> TipClipperInstance(Graph &g,
+                                  const config::debruijn_config::simplification::tip_clipper &tc_config,
+                                  const SimplifInfoContainer &info,
+                                  EdgeRemovalHandlerF<Graph> removal_handler = nullptr) {
+    if (tc_config.condition.empty())
+        return nullptr;
+
+    ConditionParser<Graph> parser(g, tc_config.condition, info);
+    auto condition = parser();
+    return TipClipperInstance(g, condition, info, removal_handler, /*track changes*/true);
+}
+
+template<class Graph>
+AlgoPtr<Graph> DeadEndInstance(Graph &g,
+                               const config::debruijn_config::simplification::dead_end_clipper &dead_end_config,
+                               const SimplifInfoContainer &info,
+                               EdgeRemovalHandlerF<Graph> removal_handler) {
+    if (!dead_end_config.enabled || dead_end_config.condition.empty())
+        return nullptr;
+
+    ConditionParser<Graph> parser(g, dead_end_config.condition, info);
+    auto condition = parser();
+    return make_shared<omnigraph::ParallelEdgeRemovingAlgorithm<Graph, omnigraph::LengthComparator<Graph>>>(g,
+            AddDeadEndCondition(g, condition), info.chunk_cnt(), removal_handler, /*canonical_only*/true,
+            LengthComparator<Graph>(g), /*track changes*/true);
+}
+
+template<class Graph>
+AlgoPtr<Graph> TopologyTipClipperInstance(
+    Graph &g,
+    const config::debruijn_config::simplification::topology_tip_clipper &ttc_config,
+    const SimplifInfoContainer &info,
+    EdgeRemovalHandlerF<Graph> removal_handler = nullptr) {
+
+    auto condition
+            = func::And(LengthUpperBound<Graph>(g,
+                                               LengthThresholdFinder::MaxTipLength(info.read_length(), g.k(), ttc_config.length_coeff)),
+                       DefaultUniquenessPlausabilityCondition<Graph>(g,
+                                                                     ttc_config.uniqueness_length, ttc_config.plausibility_length));
+
+    return TipClipperInstance(g,
+                              condition, info, removal_handler, /*track changes*/false);
+}
+
+template<class Graph>
+AlgoPtr<Graph> BRInstance(Graph &g,
+                          const config::debruijn_config::simplification::bulge_remover &br_config,
+                          const SimplifInfoContainer &info,
+                          EdgeRemovalHandlerF<Graph> removal_handler = nullptr) {
+    if (!br_config.enabled || (br_config.main_iteration_only && !info.main_iteration())) {
+        return nullptr;
+    }
+
+    auto alternatives_analyzer = ParseBRConfig(g, br_config);
+
+    auto candidate_finder = std::make_shared<omnigraph::ParallelInterestingElementFinder<Graph>>(
+                                                          omnigraph::NecessaryBulgeCondition(g,
+                                                                              alternatives_analyzer.max_length(),
+                                                                              alternatives_analyzer.max_coverage()),
+                                                          info.chunk_cnt());
+    if (br_config.parallel) {
+        INFO("Creating parallel br instance");
+        return make_shared<omnigraph::ParallelBulgeRemover<Graph>>(g,
+                candidate_finder,
+                br_config.buff_size,
+                br_config.buff_cov_diff,
+                br_config.buff_cov_rel_diff,
+                alternatives_analyzer,
+                nullptr,
+                removal_handler,
+                /*track_changes*/true);
+    } else {
+        INFO("Creating br instance");
+        return make_shared<omnigraph::BulgeRemover<Graph>>(g,
+                candidate_finder,
+                alternatives_analyzer,
+                nullptr,
+                removal_handler,
+                /*track_changes*/true);
+    }
+}
+
+template<class Graph>
+AlgoPtr<Graph> LowFlankDisconnectorInstance(Graph &g,
+                                           const FlankingCoverage<Graph> &flanking_cov,
+                                           double cov_bound,
+                                           const SimplifInfoContainer &info,
+                                           EdgeRemovalHandlerF<Graph> removal_handler) {
+    if (math::ls(cov_bound, 0.)) {
+        INFO("Flanking coverage based disconnection disabled");
+        return nullptr;
+    }
+
+    auto condition = [&,cov_bound] (EdgeId e) {
+        return g.OutgoingEdgeCount(g.EdgeStart(e)) > 1
+               && math::le(flanking_cov.CoverageOfStart(e), cov_bound);
+    };
+
+    return make_shared<omnigraph::DisconnectionAlgorithm<Graph>>(g, condition,
+                                                                 info.chunk_cnt(),
+                                                                 removal_handler);
+}
+
+template<class Graph>
+bool RemoveHiddenLoopEC(Graph &g,
+                        const FlankingCoverage<Graph> &flanking_cov,
+                        double determined_coverage_threshold,
+                        config::debruijn_config::simplification::hidden_ec_remover her_config,
+                        EdgeRemovalHandlerF<Graph> removal_handler) {
+    if (her_config.enabled) {
+        INFO("Removing loops and rc loops with erroneous connections");
+        ECLoopRemover<Graph> hc(g, flanking_cov,
+                                determined_coverage_threshold,
+                                her_config.relative_threshold, removal_handler);
+        bool res = hc.Run();
+        hc.PrintLoopStats();
+        return res;
+    }
+    return false;
+}
+
+}
+}
diff --git a/src/common/stages/simplification_pipeline/rna_simplification.hpp b/src/common/stages/simplification_pipeline/rna_simplification.hpp
new file mode 100644
index 0000000..050fa61
--- /dev/null
+++ b/src/common/stages/simplification_pipeline/rna_simplification.hpp
@@ -0,0 +1,22 @@
+#pragma once
+
+#include "assembly_graph/graph_support/parallel_processing.hpp"
+#include "stages/simplification_pipeline/simplification_settings.hpp"
+
+namespace debruijn {
+namespace simplification {
+
+template<class Graph>
+AlgoPtr<Graph> ShortPolyATEdgesRemoverInstance(Graph &g, size_t max_length, EdgeRemovalHandlerF<Graph> removal_handler = 0, size_t chunk_cnt = 1) {
+    auto condition = func::And(ATCondition<Graph>(g, 0.8, max_length, false), LengthUpperBound<Graph>(g, 1));
+    return std::make_shared<omnigraph::ParallelEdgeRemovingAlgorithm<Graph>>(g, condition, chunk_cnt, removal_handler, true);
+}
+
+template<class Graph>
+AlgoPtr<Graph> ATTipClipperInstance(Graph &g, EdgeRemovalHandlerF<Graph> removal_handler = 0, size_t chunk_cnt = 1) {
+//TODO: review params 0.8, 200?
+    return std::make_shared<omnigraph::ParallelEdgeRemovingAlgorithm<Graph>>(g, ATCondition<Graph>(g, 0.8, 200, true), chunk_cnt, removal_handler, true);
+}
+
+}
+}
diff --git a/src/modules/stages/simplification_pipeline/simplification_settings.hpp b/src/common/stages/simplification_pipeline/simplification_settings.hpp
similarity index 98%
rename from src/modules/stages/simplification_pipeline/simplification_settings.hpp
rename to src/common/stages/simplification_pipeline/simplification_settings.hpp
index efaf4d6..ae0edf7 100644
--- a/src/modules/stages/simplification_pipeline/simplification_settings.hpp
+++ b/src/common/stages/simplification_pipeline/simplification_settings.hpp
@@ -6,7 +6,7 @@
 //***************************************************************************
 
 #pragma once
-#include "modules/pipeline/config_struct.hpp"
+#include "pipeline/config_struct.hpp"
 
 namespace debruijn {
 
diff --git a/src/modules/stages/simplification_pipeline/single_cell_simplification.hpp b/src/common/stages/simplification_pipeline/single_cell_simplification.hpp
similarity index 55%
rename from src/modules/stages/simplification_pipeline/single_cell_simplification.hpp
rename to src/common/stages/simplification_pipeline/single_cell_simplification.hpp
index 49dbc27..ae5a208 100644
--- a/src/modules/stages/simplification_pipeline/single_cell_simplification.hpp
+++ b/src/common/stages/simplification_pipeline/single_cell_simplification.hpp
@@ -1,14 +1,44 @@
 #pragma once
 
 #include "pipeline/config_struct.hpp"
-#include "algorithms/simplification/erroneous_connection_remover.hpp"
-#include "algorithms/simplification/mf_ec_remover.hpp"
-#include "stages/simplification_pipeline/simplification_settings.hpp"
+#include "assembly_graph/graph_support/comparators.hpp"
+#include "assembly_graph/graph_support/basic_edge_conditions.hpp"
 #include "assembly_graph/graph_support/detail_coverage.hpp"
+#include "modules/simplification/erroneous_connection_remover.hpp"
+#include "modules/simplification/mf_ec_remover.hpp"
+#include "stages/simplification_pipeline/simplification_settings.hpp"
 
 namespace debruijn {
 namespace simplification {
 
+//deprecated
+template<class Graph>
+bool RemoveErroneousEdgesInCoverageOrder(Graph &g,
+                                         func::TypedPredicate<typename Graph::EdgeId> removal_condition,
+                                         double max_coverage,
+                                         std::function<void(typename Graph::EdgeId)> removal_handler) {
+    omnigraph::EdgeRemovingAlgorithm<Graph> erroneous_edge_remover(g,
+                                                                   AddAlternativesPresenceCondition(g, removal_condition),
+                                                                   removal_handler);
+
+    return erroneous_edge_remover.Run(omnigraph::CoverageComparator<Graph>(g),
+                                      omnigraph::CoverageUpperBound<Graph>(g, max_coverage));
+}
+
+//deprecated
+template<class Graph>
+bool RemoveErroneousEdgesInLengthOrder(Graph &g,
+                                       func::TypedPredicate<typename Graph::EdgeId> removal_condition,
+                                       size_t max_length,
+                                       std::function<void(typename Graph::EdgeId)> removal_handler) {
+    omnigraph::EdgeRemovingAlgorithm<Graph> erroneous_edge_remover(g,
+                                                                   AddAlternativesPresenceCondition(g, removal_condition),
+                                                                   removal_handler);
+
+    return erroneous_edge_remover.Run(omnigraph::LengthComparator<Graph>(g),
+                                      omnigraph::LengthUpperBound<Graph>(g, max_length));
+}
+
 template<class Graph>
 bool TopologyRemoveErroneousEdges(
     Graph &g,
@@ -18,10 +48,10 @@ bool TopologyRemoveErroneousEdges(
     size_t max_length = LengthThresholdFinder::MaxErroneousConnectionLength(
         g.k(), tec_config.max_ec_length_coefficient);
 
-    pred::TypedPredicate<typename Graph::EdgeId>
+    func::TypedPredicate<typename Graph::EdgeId>
             condition(omnigraph::DefaultUniquenessPlausabilityCondition<Graph>(g, tec_config.uniqueness_length, tec_config.plausibility_length));
 
-    return omnigraph::RemoveErroneousEdgesInLengthOrder(g, condition, max_length, removal_handler);
+    return RemoveErroneousEdgesInLengthOrder(g, condition, max_length, removal_handler);
 }
 
 template<class Graph>
@@ -33,12 +63,12 @@ bool MultiplicityCountingRemoveErroneousEdges(
     size_t max_length = LengthThresholdFinder::MaxErroneousConnectionLength(
         g.k(), tec_config.max_ec_length_coefficient);
 
-    pred::TypedPredicate<typename Graph::EdgeId>
+    func::TypedPredicate<typename Graph::EdgeId>
             condition(omnigraph::MultiplicityCountingCondition<Graph>(g, tec_config.uniqueness_length,
                                           /*plausibility*/ MakePathLengthLowerBound(g,
                                           omnigraph::PlausiblePathFinder<Graph>(g, 2 * tec_config.plausibility_length), tec_config.plausibility_length)));
 
-    return omnigraph::RemoveErroneousEdgesInLengthOrder(g, condition, max_length, removal_handler);
+    return RemoveErroneousEdgesInLengthOrder(g, condition, max_length, removal_handler);
 }
 
 template<class Graph>
@@ -51,10 +81,11 @@ bool RemoveThorns(
         g.k(), isec_config.max_ec_length_coefficient);
 
     auto condition
-            = pred::And(omnigraph::LengthUpperBound<Graph>(g, max_length),
-                        omnigraph::ThornCondition<Graph>(g, isec_config.uniqueness_length, isec_config.span_distance));
+            = func::And(omnigraph::LengthUpperBound<Graph>(g, max_length),
+                        func::And(omnigraph::AdditionalMDAThornCondition<Graph>(g, isec_config.uniqueness_length),
+                                  omnigraph::TopologicalThornCondition<Graph>(g, isec_config.span_distance)));
 
-    return omnigraph::RemoveErroneousEdgesInCoverageOrder(g, condition, numeric_limits<double>::max(), removal_handler);
+    return RemoveErroneousEdgesInCoverageOrder(g, condition, numeric_limits<double>::max(), removal_handler);
 }
 
 template<class Graph>
@@ -67,19 +98,19 @@ bool TopologyReliabilityRemoveErroneousEdges(
         g.k(), trec_config.max_ec_length_coefficient);
 
     auto condition
-            = pred::And(omnigraph::CoverageUpperBound<Graph>(g, trec_config.unreliable_coverage),
+            = func::And(omnigraph::CoverageUpperBound<Graph>(g, trec_config.unreliable_coverage),
                         omnigraph::PredicateUniquenessPlausabilityCondition<Graph>(g,
                         /*uniqueness*/omnigraph::MakePathLengthLowerBound(g, omnigraph::UniquePathFinder<Graph>(g), trec_config.uniqueness_length),
-                        /*plausibility*/pred::AlwaysTrue<typename Graph::EdgeId>()));
+                        /*plausibility*/func::AlwaysTrue<typename Graph::EdgeId>()));
 
-    return omnigraph::RemoveErroneousEdgesInLengthOrder(g, condition, max_length, removal_handler);
+    return RemoveErroneousEdgesInLengthOrder(g, condition, max_length, removal_handler);
 }
 
 template<class Graph>
 bool MaxFlowRemoveErroneousEdges(
     Graph &g,
     const debruijn_graph::config::debruijn_config::simplification::max_flow_ec_remover& mfec_config,
-    omnigraph::HandlerF<Graph> removal_handler = 0) {
+    omnigraph::EdgeRemovalHandlerF<Graph> removal_handler = 0) {
     if (!mfec_config.enabled)
         return false;
     INFO("Removing connections based on max flow strategy");
@@ -93,15 +124,16 @@ bool MaxFlowRemoveErroneousEdges(
 
 template<class Graph>
 bool RemoveHiddenEC(Graph& g,
-                    const debruijn_graph::FlankingCoverage<Graph>& flanking_cov,
+                    const omnigraph::FlankingCoverage<Graph>& flanking_cov,
                     const debruijn_graph::config::debruijn_config::simplification::hidden_ec_remover& her_config,
                     const SimplifInfoContainer& info,
-                    omnigraph::HandlerF<Graph> removal_handler) {
+                    omnigraph::EdgeRemovalHandlerF<Graph> removal_handler) {
     if (her_config.enabled) {
         INFO("Removing hidden erroneous connections");
-        return omnigraph::HiddenECRemover<Graph>(g, her_config.uniqueness_length, flanking_cov,
+        omnigraph::HiddenECRemover<Graph> remover(g, info.chunk_cnt(), flanking_cov, her_config.uniqueness_length,
                                her_config.unreliability_threshold, info.detected_coverage_bound(),
-                               her_config.relative_threshold, removal_handler).Run();
+                               her_config.relative_threshold, removal_handler);
+        return LoopedRun(remover) > 0;
     }
     return false;
 }
diff --git a/src/modules/math/CMakeLists.txt b/src/common/utils/CMakeLists.txt
similarity index 59%
rename from src/modules/math/CMakeLists.txt
rename to src/common/utils/CMakeLists.txt
index 28cb6c6..40c2d20 100644
--- a/src/modules/math/CMakeLists.txt
+++ b/src/common/utils/CMakeLists.txt
@@ -5,10 +5,16 @@
 # See file LICENSE for details.
 ############################################################################
 
-project(math_module CXX)
+project(utils CXX)
 
-add_library(math_module STATIC
-            kmer_coverage_model.cpp)
+set(utils_src
+    copy_file.cpp
+    path_helper.cpp
+    logger/logger_impl.cpp)
 
-target_link_libraries(math_module nlopt)
+if (READLINE_FOUND)
+  set(utils_src ${utils_src} autocompletion.cpp)
+endif()
 
+add_library(utils STATIC
+            ${utils_src})
diff --git a/src/modules/dev_support/autocompletion.cpp b/src/common/utils/autocompletion.cpp
similarity index 98%
rename from src/modules/dev_support/autocompletion.cpp
rename to src/common/utils/autocompletion.cpp
index 6b5060d..bb79146 100644
--- a/src/modules/dev_support/autocompletion.cpp
+++ b/src/common/utils/autocompletion.cpp
@@ -8,6 +8,7 @@
 #include <vector>
 #include <string>
 #include <queue>
+#include <cstring>
 #include <readline/readline.h>
 
 namespace online_visualization {
diff --git a/src/modules/dev_support/autocompletion.hpp b/src/common/utils/autocompletion.hpp
similarity index 100%
rename from src/modules/dev_support/autocompletion.hpp
rename to src/common/utils/autocompletion.hpp
diff --git a/src/modules/dev_support/copy_file.cpp b/src/common/utils/copy_file.cpp
similarity index 98%
rename from src/modules/dev_support/copy_file.cpp
rename to src/common/utils/copy_file.cpp
index f68d9d2..289ff34 100644
--- a/src/modules/dev_support/copy_file.cpp
+++ b/src/common/utils/copy_file.cpp
@@ -7,8 +7,8 @@
 
 #include "copy_file.hpp"
 
-#include "dev_support/path_helper.hpp"
-#include "dev_support/logger/logger.hpp"
+#include "utils/path_helper.hpp"
+#include "utils/logger/logger.hpp"
 
 #include <boost/algorithm/string.hpp>
 
diff --git a/src/modules/dev_support/copy_file.hpp b/src/common/utils/copy_file.hpp
similarity index 94%
rename from src/modules/dev_support/copy_file.hpp
rename to src/common/utils/copy_file.hpp
index f402772..4f0e4ab 100644
--- a/src/modules/dev_support/copy_file.hpp
+++ b/src/common/utils/copy_file.hpp
@@ -5,7 +5,7 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "dev_support/path_helper.hpp"
+#include "utils/path_helper.hpp"
 #include <string>
 
 namespace path {
diff --git a/src/modules/paired_info/CMakeLists.txt b/src/common/utils/coverage_model/CMakeLists.txt
similarity index 70%
rename from src/modules/paired_info/CMakeLists.txt
rename to src/common/utils/coverage_model/CMakeLists.txt
index 35d1605..4df6767 100644
--- a/src/modules/paired_info/CMakeLists.txt
+++ b/src/common/utils/coverage_model/CMakeLists.txt
@@ -5,10 +5,10 @@
 # See file LICENSE for details.
 ############################################################################
 
-project(paired_info CXX)
+project(coverage_model CXX)
 
-add_library(paired_info STATIC
-            bwa_pair_info_filler.cpp)
+add_library(coverage_model STATIC
+        kmer_coverage_model.cpp)
 
-target_link_libraries(paired_info input)
+target_link_libraries(coverage_model nlopt)
 
diff --git a/src/modules/math/kmer_coverage_model.cpp b/src/common/utils/coverage_model/kmer_coverage_model.cpp
similarity index 83%
rename from src/modules/math/kmer_coverage_model.cpp
rename to src/common/utils/coverage_model/kmer_coverage_model.cpp
index db886d7..ce77e11 100644
--- a/src/modules/math/kmer_coverage_model.cpp
+++ b/src/common/utils/coverage_model/kmer_coverage_model.cpp
@@ -7,10 +7,10 @@
 
 #include "kmer_coverage_model.hpp"
 
+#include "utils/logger/logger.hpp"
+#include "utils/verify.hpp"
 #include "math/xmath.h"
-#include "dev_support/logger/logger.hpp"
 #include "math/smooth.hpp"
-#include "dev_support/verify.hpp"
 
 #include <boost/math/special_functions/zeta.hpp>
 #include <boost/math/distributions/normal.hpp>
@@ -27,7 +27,9 @@
 #include <cstddef>
 #include <cmath>
 
-namespace cov_model {
+namespace utils {
+namespace coverage_model {
+
 using std::isfinite;
 
 static const size_t MaxCopy = 10;
@@ -42,7 +44,7 @@ static double perr(size_t i, double scale, double shape) {
 }
 
 static double pgood(size_t i, double zp, double u, double sd, double shape,
-                    double *mixprobs = NULL) {
+                    double* mixprobs = NULL) {
     double res = 0;
 
     for (unsigned copy = 0; copy < MaxCopy; ++copy) {
@@ -55,17 +57,17 @@ static double pgood(size_t i, double zp, double u, double sd, double shape,
 }
 
 class CovModelLogLike {
-    const std::vector <size_t> &cov;
+    const std::vector<size_t>& cov;
 
 public:
-    CovModelLogLike(const std::vector <size_t> &cov)
-            : cov(cov) { }
+    CovModelLogLike(const std::vector<size_t>& cov)
+            : cov(cov) {}
 
     int getN() const { return 7; };
 
 private:
 
-    double eval_(const double *x) const {
+    double eval_(const double* x) const {
         double zp = x[0], p = x[1], shape = x[2], u = x[3], sd = x[4], scale = x[5], shape2 = x[6];
 
         if (zp <= 1 || shape <= 0 || sd <= 0 || p < 1e-9 || p > 1 - 1e-9 || u <= 0 || scale <= 0 ||
@@ -73,7 +75,7 @@ private:
             !isfinite(scale) || !isfinite(shape2))
             return +std::numeric_limits<double>::infinity();
 
-        std::vector <double> kmer_probs(cov.size());
+        std::vector<double> kmer_probs(cov.size());
 
         // Error
         for (size_t i = 0; i < kmer_probs.size(); ++i)
@@ -92,11 +94,11 @@ private:
 };
 
 struct CovModelLogLikeEMData {
-    const std::vector <size_t> &cov;
-    const std::vector <double> &z;
+    const std::vector<size_t>& cov;
+    const std::vector<double>& z;
 };
 
-static double CovModelLogLikeEM(unsigned, const double *x, double *, void *data) {
+static double CovModelLogLikeEM(unsigned, const double* x, double*, void* data) {
     double zp = x[0], shape = x[1], u = x[2], sd = x[3], scale = x[4], shape2 = x[5];
 
     // INFO("Entry: " << x[0] << " " << x[1] << " " << x[2] << " " << x[3] << " " << x[4]);
@@ -106,10 +108,10 @@ static double CovModelLogLikeEM(unsigned, const double *x, double *, void *data)
         !isfinite(scale) || !isfinite(shape2))
         return -std::numeric_limits<double>::infinity();
 
-    const std::vector <size_t> &cov = static_cast<CovModelLogLikeEMData *>(data)->cov;
-    const std::vector <double> &z = static_cast<CovModelLogLikeEMData *>(data)->z;
+    const std::vector<size_t>& cov = static_cast<CovModelLogLikeEMData*>(data)->cov;
+    const std::vector<double>& z = static_cast<CovModelLogLikeEMData*>(data)->z;
 
-    std::vector <double> kmer_probs(cov.size(), 0);
+    std::vector<double> kmer_probs(cov.size(), 0);
 
     // Error
     for (size_t i = 0; i < kmer_probs.size(); ++i) {
@@ -121,7 +123,7 @@ static double CovModelLogLikeEM(unsigned, const double *x, double *, void *data)
 
     // Good
     // Pre-compute mixing probabilities
-    std::vector <double> mixprobs(MaxCopy, 0);
+    std::vector<double> mixprobs(MaxCopy, 0);
     for (unsigned copy = 0; copy < MaxCopy; ++copy)
         mixprobs[copy] = dzeta(copy + 1, zp);
 
@@ -145,11 +147,11 @@ static double CovModelLogLikeEM(unsigned, const double *x, double *, void *data)
 }
 
 
-static std::vector <double> EStep(const std::vector <double> &x,
-                                  double p, size_t N) {
+static std::vector<double> EStep(const std::vector<double>& x,
+                                 double p, size_t N) {
     double zp = x[0], shape = x[1], u = x[2], sd = x[3], scale = x[4], shape2 = x[5];
 
-    std::vector <double> res(N);
+    std::vector<double> res(N);
     for (size_t i = 0; i < N; ++i) {
         double pe = p * perr(i + 1, scale, shape);
         res[i] = pe / (pe + (1 - p) * pgood(i + 1, zp, u, sd, shape2));
@@ -164,7 +166,7 @@ static std::vector <double> EStep(const std::vector <double> &x,
 // first valley.
 size_t KMerCoverageModel::EstimateValley() const {
     // Smooth the histogram
-    std::vector <size_t> scov;
+    std::vector<size_t> scov;
     math::Smooth3RS3R(scov, cov_);
 
     size_t Valley = scov[0];
@@ -216,7 +218,7 @@ void KMerCoverageModel::Fit() {
     if (MaxCov_ - Valley_ < 3)
         WARN("Too many erroneous kmers, the estimates might be unreliable");
 
-    std::vector <size_t> mvals(1 + MaxCov_ - Valley_);
+    std::vector<size_t> mvals(1 + MaxCov_ - Valley_);
     mvals[0] = cov_[MaxCov_];
     size_t tmadcov = mvals[0];
     for (size_t i = 1; i < std::min(MaxCov_ - Valley_, cov_.size() - MaxCov_); ++i) {
@@ -224,10 +226,10 @@ void KMerCoverageModel::Fit() {
         tmadcov += mvals[i];
     }
     size_t madcov = 0;
-    double CovSd = sqrt(5.0 * (double) MaxCov_);
+    double CovSd = sqrt((double) (5 * MaxCov_));
     for (size_t i = 0; i < MaxCov_ - Valley_; ++i) {
         if (madcov > tmadcov / 2) {
-            CovSd = i;
+            CovSd = (double) i;
             break;
         }
         madcov += mvals[i];
@@ -251,26 +253,9 @@ void KMerCoverageModel::Fit() {
     TRACE("Total: " << Total << ". Before: " << BeforeValley);
     TRACE("p: " << ErrorProb);
 
-    std::vector <double> x(6), lb(6), ub(6);
-
-    x[0] = 3;
-    lb[0] = 0;
-    ub[0] = 2000;
-    x[1] = 3;
-    lb[1] = 0;
-    ub[1] = 2000;
-    x[2] = MaxCov_;
-    lb[2] = 0;
-    ub[2] = 2 * MaxCov_;
-    x[3] = CovSd;
-    lb[3] = MaxCov_ - Valley_;
-    ub[3] = SecondValley;
-    x[4] = 1;
-    lb[4] = 0;
-    ub[4] = 2000;
-    x[5] = 0;
-    lb[5] = -6;
-    ub[5] = 6;
+    std::vector<double> x = {3.0, 3.0, (double) MaxCov_, CovSd, 1.0, 0.0},
+        lb = {0.0, 0.0, 0.0, (double) (MaxCov_ - Valley_), 0.0, -6.0},
+        ub = {2000.0, 2000.0, (double) (2 * MaxCov_), (double) SecondValley, 2000.0, 6.0};
 
     INFO("Fitting coverage model");
     // Ensure that there will be at least 2 iterations.
@@ -282,7 +267,7 @@ void KMerCoverageModel::Fit() {
     unsigned it = 1;
     while (fabs(PrevErrProb - ErrorProb) > ErrProbThr) {
         // Recalculate the vector of posterior error probabilities
-        std::vector <double> z = EStep(x, ErrorProb, GoodCov.size());
+        std::vector<double> z = EStep(x, ErrorProb, GoodCov.size());
 
         // Recalculate the probability of error
         PrevErrProb = ErrorProb;
@@ -305,7 +290,7 @@ void KMerCoverageModel::Fit() {
         nlopt::result Results = nlopt::FAILURE;
         try {
             Results = opt.optimize(x, fMin);
-        } catch (nlopt::roundoff_limited &) {
+        } catch (nlopt::roundoff_limited&) {
         }
 
         VERBOSE_POWER_T2(it, 1, "... iteration " << it);
@@ -314,7 +299,7 @@ void KMerCoverageModel::Fit() {
 
         double zp = x[0], shape = x[1], u = x[2], sd = x[3], scale = x[4], shape2 = x[5];
         TRACE("zp: " << zp << " p: " << ErrorProb << " shape: " << shape << " u: " << u << " sd: " << sd <<
-              " scale: " << scale << " shape2: " << shape2);
+                     " scale: " << scale << " shape2: " << shape2);
 
         it += 1;
     }
@@ -345,7 +330,7 @@ void KMerCoverageModel::Fit() {
 
     // If the model converged, then use it to estimate the thresholds.
     if (converged_) {
-        std::vector <double> z = EStep(x, ErrorProb, GoodCov.size());
+        std::vector<double> z = EStep(x, ErrorProb, GoodCov.size());
 
         INFO("Probability of erroneous kmer at valley: " << z[Valley_]);
         converged_ = false;
@@ -359,13 +344,13 @@ void KMerCoverageModel::Fit() {
             }
 
 #if 0
-for (size_t i = 0; i < z.size(); ++i) {
-    double zp = x[0], shape = x[1], u = x[2], sd = x[3], scale = x[4], shape2 = x[5];
-    double pe = ErrorProb * perr(i + 1, scale, shape);
-    double pg = (1 - ErrorProb) * pgood(i + 1, zp, u, sd, shape2);
+        for (size_t i = 0; i < z.size(); ++i) {
+            double zp = x[0], shape = x[1], u = x[2], sd = x[3], scale = x[4], shape2 = x[5];
+            double pe = ErrorProb * perr(i + 1, scale, shape);
+            double pg = (1 - ErrorProb) * pgood(i + 1, zp, u, sd, shape2);
 
-    fprintf(stderr, "%e %e %e %e\n", pe, pg, z[i], perr(i + 1, scale, shape));
-}
+            fprintf(stderr, "%e %e %e %e\n", pe, pg, z[i], perr(i + 1, scale, shape));
+        }
 #endif
     }
 
@@ -373,7 +358,7 @@ for (size_t i = 0; i < z.size(); ++i) {
     if (converged_) {
         INFO("Preliminary threshold calculated as: " << ErrorThreshold_);
         ErrorThreshold_ = (Valley_ < mean_coverage_ ?
-                           std::min(Valley_ + (size_t) (mean_coverage_ - Valley_) / 2, ErrorThreshold_) :
+                           std::min(Valley_ + (size_t) (mean_coverage_ - (double) Valley_) / 2, ErrorThreshold_) :
                            Valley_);
         INFO("Threshold adjusted to: " << ErrorThreshold_);
     } else {
@@ -391,4 +376,5 @@ for (size_t i = 0; i < z.size(); ++i) {
     INFO("Estimated genome size (ignoring repeats): " << GenomeSize_);
 }
 
-};
+}
+}
diff --git a/src/modules/math/kmer_coverage_model.hpp b/src/common/utils/coverage_model/kmer_coverage_model.hpp
similarity index 84%
rename from src/modules/math/kmer_coverage_model.hpp
rename to src/common/utils/coverage_model/kmer_coverage_model.hpp
index 1e7ec38..2268262 100644
--- a/src/modules/math/kmer_coverage_model.hpp
+++ b/src/common/utils/coverage_model/kmer_coverage_model.hpp
@@ -5,26 +5,26 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#ifndef __KMER_COVERAGE_MODEL_HPP__
-#define __KMER_COVERAGE_MODEL_HPP__
+#pragma once
 
 #include <vector>
 #include <cstddef>
 
-namespace cov_model {
+namespace utils {
+namespace coverage_model {
 
 class KMerCoverageModel {
-    const std::vector <size_t> &cov_;
+    const std::vector<size_t>& cov_;
     size_t MaxCov_, Valley_, ErrorThreshold_, LowThreshold_, GenomeSize_;
     double probability_threshold_, strong_probability_threshold_, mean_coverage_, sd_coverage_;
     bool converged_;
 
 public:
-    KMerCoverageModel(const std::vector <size_t> &cov, double probability_threshold,
+    KMerCoverageModel(const std::vector<size_t>& cov, double probability_threshold,
                       double strong_probability_threshold)
             : cov_(cov), LowThreshold_(0), probability_threshold_(probability_threshold),
               strong_probability_threshold_(strong_probability_threshold),
-              mean_coverage_(0.0), sd_coverage_(0.0), converged_(false) { }
+              mean_coverage_(0.0), sd_coverage_(0.0), converged_(false) {}
 
     void Fit();
 
@@ -44,7 +44,5 @@ private:
     size_t EstimateValley() const;
 };
 
-};
-
-
-#endif
+}
+}
diff --git a/src/modules/dev_support/cpp_utils.hpp b/src/common/utils/cpp_utils.hpp
similarity index 100%
rename from src/modules/dev_support/cpp_utils.hpp
rename to src/common/utils/cpp_utils.hpp
diff --git a/src/modules/data_structures/debruijn_graph/debruijn_graph_constructor.hpp b/src/common/utils/debruijn_graph/debruijn_graph_constructor.hpp
similarity index 93%
rename from src/modules/data_structures/debruijn_graph/debruijn_graph_constructor.hpp
rename to src/common/utils/debruijn_graph/debruijn_graph_constructor.hpp
index 7a293f5..47aed1d 100644
--- a/src/modules/data_structures/debruijn_graph/debruijn_graph_constructor.hpp
+++ b/src/common/utils/debruijn_graph/debruijn_graph_constructor.hpp
@@ -6,12 +6,12 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "assembly_graph/graph_core/graph.hpp"
-#include "assembly_graph/graph_core/construction_helper.hpp"
-#include "dev_support/standard_base.hpp"
-#include "data_structures/indices/kmer_extension_index.hpp"
-#include "dev_support/openmp_wrapper.h"
-#include "dev_support/parallel_wrapper.hpp"
+#include "assembly_graph/core/graph.hpp"
+#include "assembly_graph/core/construction_helper.hpp"
+#include "utils/standard_base.hpp"
+#include "utils/indices/kmer_extension_index.hpp"
+#include "utils/openmp_wrapper.h"
+#include "utils/parallel_wrapper.hpp"
 
 namespace debruijn_graph {
 
@@ -182,15 +182,13 @@ private:
 class UnbranchingPathFinder {
 private:
     typedef DeBruijnExtensionIndex<> Index;
-    typedef runtime_k::RtSeq Kmer;
+    typedef RtSeq Kmer;
     typedef Index::kmer_iterator kmer_iterator;
     typedef Index::KeyWithHash KeyWithHash;
     typedef Index::DeEdge DeEdge;
 
     Index &origin_;
     size_t kmer_size_;
-    bool clean_condensed_;
-
 
 public:
     UnbranchingPathFinder(Index &origin, size_t kmer_size) : origin_(origin), kmer_size_(kmer_size) {
@@ -219,20 +217,31 @@ public:
         return ConstructSeqGoingRight(edge);
     }
 
+    //Loop consists of 4 parts: 2 selfRC k+1-mers and two sequences of arbitrary length RC to each other; pos is a position of one of selfRC edges
+    vector<Sequence> SplitLoop(Sequence s, size_t pos) {
+        return {s.Subseq(pos, pos + kmer_size_ + 1), s.Subseq(pos + 1, s.size() - kmer_size_) + s.Subseq(0, pos + kmer_size_)};
+
+    }
+
 //TODO Think about what happends to self rc perfect loops
-    Sequence ConstructLoopFromVertex(const KeyWithHash &kh) {
+    vector<Sequence> ConstructLoopFromVertex(const KeyWithHash &kh) {
         DeEdge break_point(kh, origin_.GetUniqueOutgoing(kh));
-        Sequence result = ConstructSequenceWithEdge(break_point);
-        if (clean_condensed_)
-            origin_.IsolateVertex(kh);
-        return result;
+        Sequence s = ConstructSequenceWithEdge(break_point);
+        Kmer kmer = s.start<Kmer>(kmer_size_ + 1) >> 'A';
+        for(size_t i = kmer_size_; i < s.size(); i++) {
+            kmer = kmer << s[i];
+            if (kmer == !kmer) {
+                return SplitLoop(s, i - kmer_size_);
+            }
+        }
+        return {s};
     }
 };
 
 class UnbranchingPathExtractor {
 private:
     typedef DeBruijnExtensionIndex<> Index;
-    typedef runtime_k::RtSeq Kmer;
+    typedef RtSeq Kmer;
     typedef Index::kmer_iterator kmer_iterator;
     typedef Index::DeEdge DeEdge;
     typedef Index::KeyWithHash KeyWithHash;
@@ -307,12 +316,13 @@ private:
         for (kmer_iterator it = origin_.kmer_begin(); it.good(); ++it) {
             KeyWithHash kh = origin_.ConstructKWH(Kmer(kmer_size_, *it));
             if (!IsJunction(kh)) {
-                Sequence loop = finder.ConstructLoopFromVertex(kh);
-                result.push_back(loop);
-                CleanCondensed(loop);
-                if(loop != (!loop)) {
-                    CleanCondensed(!loop);
-                    result.push_back(!loop);
+                vector<Sequence> loop = finder.ConstructLoopFromVertex(kh);
+                for(Sequence s: loop) {
+                    result.push_back(s);
+                    CleanCondensed(s);
+                    if(s != (!s)) {
+                        result.push_back(!s);
+                    }
                 }
             }
         }
@@ -368,7 +378,7 @@ class FastGraphFromSequencesConstructor {
 private:
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
-    typedef runtime_k::RtSeq Kmer;
+    typedef RtSeq Kmer;
     typedef DeBruijnExtensionIndex<> Index;
     size_t kmer_size_;
     Index &origin_;
@@ -508,7 +518,7 @@ private:
     typedef typename Graph::EdgeId EdgeId;
     typedef DeBruijnExtensionIndex<> DeBruijn;
     typedef typename Graph::VertexId VertexId;
-    typedef runtime_k::RtSeq Kmer;
+    typedef RtSeq Kmer;
 
     Graph &graph_;
     DeBruijn &origin_;
diff --git a/src/modules/data_structures/debruijn_graph/early_simplification.hpp b/src/common/utils/debruijn_graph/early_simplification.hpp
similarity index 92%
rename from src/modules/data_structures/debruijn_graph/early_simplification.hpp
rename to src/common/utils/debruijn_graph/early_simplification.hpp
index 3fc9d55..d85649f 100644
--- a/src/modules/data_structures/debruijn_graph/early_simplification.hpp
+++ b/src/common/utils/debruijn_graph/early_simplification.hpp
@@ -6,10 +6,9 @@
 //***************************************************************************
 
 #pragma once
-#include "dev_support/standard_base.hpp"
-#include "data_structures/indices/perfect_hash_map.hpp"
-#include "data_structures/sequence/runtime_k.hpp"
-#include "data_structures/mph_index/kmer_index.hpp"
+#include "utils/standard_base.hpp"
+#include "utils/indices/perfect_hash_map.hpp"
+#include "utils/mph_index/kmer_index.hpp"
 
 namespace debruijn_graph {
 
@@ -47,9 +46,9 @@ public:
 #   pragma omp parallel for schedule(guided)
         for(size_t i = 0; i < iters.size(); i++) {
             for (Index::kmer_iterator &it = iters[i]; it.good(); ++it) {
-                KeyWithHash kh = index_.ConstructKWH(runtime_k::RtSeq(index_.k(), *it));
+                KeyWithHash kh = index_.ConstructKWH(RtSeq(index_.k(), *it));
                 if (kh.is_minimal()) {
-                    KeyWithHash kh = index_.ConstructKWH(runtime_k::RtSeq(index_.k(), *it));
+                    KeyWithHash kh = index_.ConstructKWH(RtSeq(index_.k(), *it));
                     for (char i = 0; i < 4; i++) {
                         CleanForwardLinks(kh, i);
                         CleanBackwardLinks(kh, i);
@@ -153,7 +152,7 @@ private:
 #   pragma omp parallel for schedule(guided)
         for(size_t i = 0; i < iters.size(); i++) {
             for(Index::kmer_iterator &it = iters[i]; it.good(); ++it) {
-                KeyWithHash kh = index_.ConstructKWH(runtime_k::RtSeq(index_.k(), *it));
+                KeyWithHash kh = index_.ConstructKWH(RtSeq(index_.k(), *it));
                 if(kh.is_minimal()) {
                     if (index_.OutgoingEdgeCount(kh) >= 2) {
                         result[i] += RemoveForward(kh);
diff --git a/src/modules/dev_support/file_limit.hpp b/src/common/utils/file_limit.hpp
similarity index 96%
rename from src/modules/dev_support/file_limit.hpp
rename to src/common/utils/file_limit.hpp
index 6990b6f..d97c791 100644
--- a/src/modules/dev_support/file_limit.hpp
+++ b/src/common/utils/file_limit.hpp
@@ -11,7 +11,7 @@
 #include <sys/time.h>
 #include <sys/resource.h>
 
-#include "dev_support/verify.hpp"
+#include "utils/verify.hpp"
 
 inline rlim_t limit_file(size_t limit) {
   struct rlimit rl;
diff --git a/src/modules/data_structures/indices/edge_index_builders.hpp b/src/common/utils/indices/edge_index_builders.hpp
similarity index 99%
rename from src/modules/data_structures/indices/edge_index_builders.hpp
rename to src/common/utils/indices/edge_index_builders.hpp
index 5281bbc..95d5831 100644
--- a/src/modules/data_structures/indices/edge_index_builders.hpp
+++ b/src/common/utils/indices/edge_index_builders.hpp
@@ -7,7 +7,7 @@
 
 #pragma once
 
-#include "edge_position_index.hpp"
+#include "edge_info_updater.hpp"
 #include "perfect_hash_map_builder.hpp"
 
 namespace debruijn_graph {
diff --git a/src/modules/data_structures/indices/edge_info_updater.hpp b/src/common/utils/indices/edge_info_updater.hpp
similarity index 68%
rename from src/modules/data_structures/indices/edge_info_updater.hpp
rename to src/common/utils/indices/edge_info_updater.hpp
index ce957f6..3760f00 100644
--- a/src/modules/data_structures/indices/edge_info_updater.hpp
+++ b/src/common/utils/indices/edge_info_updater.hpp
@@ -7,10 +7,11 @@
 
 #pragma once
 
-#include "dev_support/standard_base.hpp"
-#include "dev_support/openmp_wrapper.h"
-#include "modules/data_structures/sequence/sequence.hpp"
-#include "modules/assembly_graph/graph_core/graph_iterators.hpp"
+#include "utils/standard_base.hpp"
+#include "utils/openmp_wrapper.h"
+#include "sequence/sequence.hpp"
+#include "assembly_graph/core/graph_iterators.hpp"
+#include "utils/indices/edge_position_index.hpp"
 
 namespace debruijn_graph {
 
@@ -19,39 +20,39 @@ class EdgeInfoUpdater {
     typedef typename Index::KMer Kmer;
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Index::KeyWithHash KeyWithHash;
-    typedef typename Index::Value EdgeInfo;
 
     const Graph &g_;
     Index &index_;
 
-
-    void PutInIndex(const KeyWithHash &kwh, EdgeId id, size_t offset) {
-        if (index_.valid(kwh)) {
-            auto &entry = index_.get_raw_value_reference(kwh);
-            if (!entry.valid() || index_.contains(kwh)) {
-                index_.put_value(kwh, EdgeInfo(id, (unsigned)offset, entry.count));
-            }
+//    void PutInIndex(const KeyWithHash &kwh, EdgeId id, size_t offset) {
+//        if (index_.valid(kwh)) {
+//            auto &entry = index_.get_raw_value_reference(kwh);
+//            if (!entry.valid() || index_.contains(kwh)) {
+//                index_.put_value(kwh, EdgeInfo(id, (unsigned)offset, entry.count));
+//            }
+//        }
+//    }
+
+    //todo why do we need to check equality???!!!
+    bool DeleteIfEqual(const KeyWithHash& kwh, EdgeId e) {
+        if (!index_.contains(kwh))
+            return false;
+        if (index_.get_value(kwh).edge_id == e) {
+            index_.get_raw_value_reference(kwh).clear();
+            return true;
         }
+        return false;
     }
 
-      //todo why do we need to check equality???!!!
-      bool DeleteIfEqual(const KeyWithHash &kwh, EdgeId e) {
-          if (!index_.contains(kwh))
-              return false;
-          if (index_.get_value(kwh).edge_id == e) {
-              index_.get_raw_value_reference(kwh).invalidate();
-              return true;
-          }
-          return false;
-      }
-
     void UpdateKMers(const Sequence &nucls, EdgeId e) {
         VERIFY(nucls.size() >= index_.k());
         KeyWithHash kwh = index_.ConstructKWH(Kmer(index_.k(), nucls));
-        index_.PutInIndex(kwh, e, 0);
+        if (kwh.is_minimal())
+            index_.PutInIndex(kwh, e, 0);
         for (size_t i = index_.k(), n = nucls.size(); i < n; ++i) {
             kwh <<= nucls[i];
-            index_.PutInIndex(kwh, e, i - index_.k() + 1);
+            if (kwh.is_minimal())
+                index_.PutInIndex(kwh, e, i - index_.k() + 1);
         }
     }
 
diff --git a/src/modules/data_structures/indices/edge_multi_index.hpp b/src/common/utils/indices/edge_multi_index.hpp
similarity index 98%
rename from src/modules/data_structures/indices/edge_multi_index.hpp
rename to src/common/utils/indices/edge_multi_index.hpp
index c514e55..763e9a5 100644
--- a/src/modules/data_structures/indices/edge_multi_index.hpp
+++ b/src/common/utils/indices/edge_multi_index.hpp
@@ -90,7 +90,7 @@ public:
 };
 
 //todo it is not handling graph events!!!
-template<class IdType, class Seq = runtime_k::RtSeq,
+template<class IdType, class Seq = RtSeq,
     class traits = kmer_index_traits<Seq>,  class StoringType = SimpleStoring >
 class DeBruijnEdgeMultiIndex : public KeyStoringMap<Seq, EdgeInfoStorage<IdType>, traits, StoringType > {
   typedef KeyStoringMap<Seq, EdgeInfoStorage<IdType>, traits, StoringType > base;
diff --git a/src/modules/data_structures/indices/edge_position_index.hpp b/src/common/utils/indices/edge_position_index.hpp
similarity index 62%
rename from src/modules/data_structures/indices/edge_position_index.hpp
rename to src/common/utils/indices/edge_position_index.hpp
index 76f3502..446fad4 100644
--- a/src/modules/data_structures/indices/edge_position_index.hpp
+++ b/src/common/utils/indices/edge_position_index.hpp
@@ -8,9 +8,7 @@
 #pragma once
 
 #include "perfect_hash_map.hpp"
-#include "edge_info_updater.hpp"
-#include "data_structures/sequence/runtime_k.hpp"
-#include "modules/io/reads/single_read.hpp"
+#include "io/reads/single_read.hpp"
 
 namespace debruijn_graph {
 
@@ -20,8 +18,10 @@ struct EdgeInfo {
     unsigned offset;
     unsigned count;
 
-    EdgeInfo(IdType edge_id_ = IdType(), unsigned offset_ = -1u, unsigned count_ = 0) :
-            edge_id(edge_id_), offset(offset_), count(count_) { }
+    EdgeInfo(IdType edge_id_ = IdType(), unsigned offset_ = unsigned(-1), unsigned count_ = 0) :
+            edge_id(edge_id_), offset(offset_), count(count_) {
+        VERIFY(edge_id != IdType() || clean());
+    }
 
     template<class KWH>
     EdgeInfo conjugate(const KWH &kwh) const {
@@ -36,23 +36,37 @@ struct EdgeInfo {
         }
     }
 
-    void invalidate() {
+    void clear() {
         offset = unsigned(-1);
     }
 
+    bool clean() const {
+        return offset == unsigned(-1);
+    }
+
+    void remove() {
+        offset = unsigned(-2);
+    }
+
+    bool removed() const {
+        return offset == unsigned(-2);
+    }
+
     bool valid() const {
-        return offset != unsigned(-1);
+        return !clean() && !removed();
     }
 };
 
 template<class stream, class IdType>
 stream &operator<<(stream &s, const EdgeInfo<IdType> &info) {
-    return s << "EdgeInfo[" << info.edge_id << ", " << info.offset << ", " << info.count << "]"; 
+    return s << "EdgeInfo[" << info.edge_id.int_id() << ", " << info.offset << ", " << info.count << "]";
 }
 
-template<class Graph, class Seq = runtime_k::RtSeq, class traits = kmer_index_traits<Seq>, class StoringType = DefaultStoring>
-class KmerFreeEdgeIndex : public KeyIteratingMap<Seq, EdgeInfo<typename Graph::EdgeId>, traits, StoringType> {
-    typedef KeyIteratingMap<Seq, EdgeInfo<typename Graph::EdgeId>, traits, StoringType> base;
+template<class Graph, class StoringType = DefaultStoring>
+class KmerFreeEdgeIndex : public KeyIteratingMap<RtSeq, EdgeInfo<typename Graph::EdgeId>,
+        kmer_index_traits<RtSeq>, StoringType> {
+    typedef KeyIteratingMap<RtSeq, EdgeInfo<typename Graph::EdgeId>,
+            kmer_index_traits<RtSeq>, StoringType> base;
     const Graph &graph_;
 
 public:
@@ -63,7 +77,7 @@ public:
     typedef Graph GraphT;
     typedef typename Graph::EdgeId IdType;
     typedef typename base::KeyWithHash KeyWithHash;
-    typedef EdgeInfo<typename Graph::EdgeId> Value;
+    typedef EdgeInfo<typename Graph::EdgeId> KmerPos;
     using base::valid;
     using base::ConstructKWH;
 
@@ -80,10 +94,9 @@ public:
         if (!valid(kwh))
             return false;
 
-        Value entry = base::get_value(kwh);
-        if (entry.offset == -1u)
+        KmerPos entry = base::get_value(kwh);
+        if (!entry.valid())
             return false;
-
         return graph_.EdgeNucls(entry.edge_id).contains(kwh.key(), entry.offset);
     }
 
@@ -91,16 +104,27 @@ public:
         if (!valid(kwh))
             return;
         
-        auto &entry = this->get_raw_value_reference(kwh);
-        if (!entry.valid() || contains(kwh)) {
-            this->put_value(kwh, Value(id, (unsigned)offset, entry.count));
+        KmerPos &entry = this->get_raw_value_reference(kwh);
+        if (entry.removed()) {
+            //VERIFY(false);
+            return;
+        }
+        if (entry.clean()) {
+            //put verify on this conversion!
+            this->put_value(kwh, KmerPos(id, (unsigned)offset, entry.count));
+        } else if (contains(kwh)) {
+            //VERIFY(false);
+            entry.remove();
+        } else {
+            //VERIFY(false);
+            //FIXME bad situation; some other kmer is there; think of putting verify
         }
     }
 
     //Only coverage is loaded
     template<class Writer>
     void BinWrite(Writer &writer) const {
-        this->index_.serialize(writer);
+        this->index_ptr_->serialize(writer);
         size_t sz = this->data_.size();
         writer.write((char*)&sz, sizeof(sz));
         for (size_t i = 0; i < sz; ++i)
@@ -110,7 +134,7 @@ public:
     template<class Reader>
     void BinRead(Reader &reader, const std::string/* &FileName*/) {
         this->clear();
-        this->index_.deserialize(reader);
+        this->index_ptr_->deserialize(reader);
         size_t sz = 0;
         reader.read((char*)&sz, sizeof(sz));
         this->data_.resize(sz);
@@ -119,9 +143,11 @@ public:
     }
 };
 
-template<class Graph, class Seq = runtime_k::RtSeq, class traits = kmer_index_traits<Seq>, class StoringType = DefaultStoring>
-class KmerStoringEdgeIndex : public KeyStoringMap<Seq, EdgeInfo<typename Graph::EdgeId>, traits, StoringType> {
-  typedef KeyStoringMap<Seq, EdgeInfo<typename Graph::EdgeId>, traits, StoringType> base;
+template<class Graph, class StoringType = DefaultStoring>
+class KmerStoringEdgeIndex : public KeyStoringMap<RtSeq, EdgeInfo<typename Graph::EdgeId>,
+        kmer_index_traits<RtSeq>, StoringType> {
+  typedef KeyStoringMap<RtSeq, EdgeInfo<typename Graph::EdgeId>,
+          kmer_index_traits<RtSeq>, StoringType> base;
 
 public:
   typedef typename base::traits_t traits_t;
@@ -131,7 +157,7 @@ public:
   typedef Graph GraphT;
   typedef typename Graph::EdgeId IdType;
   typedef typename base::KeyWithHash KeyWithHash;
-  typedef EdgeInfo<typename Graph::EdgeId> Value;
+  typedef EdgeInfo<typename Graph::EdgeId> KmerPos;
   using base::valid;
   using base::ConstructKWH;
 
@@ -152,7 +178,7 @@ public:
 
   template<class Writer>
   void BinWrite(Writer &writer) const {
-      this->index_.serialize(writer);
+      this->index_ptr_->serialize(writer);
       size_t sz = this->data_.size();
       writer.write((char*)&sz, sizeof(sz));
       for (size_t i = 0; i < sz; ++i)
@@ -163,7 +189,7 @@ public:
   template<class Reader>
   void BinRead(Reader &reader, const std::string &FileName) {
       this->clear();
-      this->index_.deserialize(reader);
+      this->index_ptr_->deserialize(reader);
       size_t sz = 0;
       reader.read((char*)&sz, sizeof(sz));
       this->data_.resize(sz);
@@ -171,11 +197,17 @@ public:
           reader.read((char*)&(this->data_[i].count), sizeof(this->data_[0].count));
       this->BinReadKmers(reader, FileName);
   }
+
   void PutInIndex(KeyWithHash &kwh, IdType id, size_t offset) {
-      if (valid(kwh)) {
-          auto &entry = this->get_raw_value_reference(kwh);
-          if (!entry.valid() || contains(kwh)) {
-              this->put_value(kwh, Value(id, (unsigned)offset, entry.count));
+      //here valid already checks equality of query-kmer and stored-kmer sequences
+      if (base::valid(kwh)) {
+          KmerPos &entry = this->get_raw_value_reference(kwh);
+          if (entry.removed())
+              return;
+          if (!entry.clean()) {
+              this->put_value(kwh, KmerPos(id, (unsigned)offset, entry.count));
+          } else {
+              entry.remove();
           }
       }
   }
diff --git a/src/modules/data_structures/indices/editable_index.hpp b/src/common/utils/indices/editable_index.hpp
similarity index 94%
rename from src/modules/data_structures/indices/editable_index.hpp
rename to src/common/utils/indices/editable_index.hpp
index 204bf3f..60b629e 100644
--- a/src/modules/data_structures/indices/editable_index.hpp
+++ b/src/common/utils/indices/editable_index.hpp
@@ -206,17 +206,17 @@ namespace debruijn_graph {
 //};
 
 //template <>
-//class EditableDeBruijnKMerIndexBuilder<kmer_index_traits<runtime_k::RtSeq>> {
+//class EditableDeBruijnKMerIndexBuilder<kmer_index_traits<RtSeq>> {
 // public:
 //  template <class IdType, class Read>
-//  size_t BuildIndexFromStream(EditableDeBruijnKMerIndex<IdType, kmer_index_traits<runtime_k::RtSeq>> &index,
+//  size_t BuildIndexFromStream(EditableDeBruijnKMerIndex<IdType, kmer_index_traits<RtSeq>> &index,
 //                              io::ReadStreamVector<io::IReader<Read> > &streams,
 //                              SingleReadStream* contigs_stream = 0) const {
 //    DeBruijnReadKMerSplitter<Read> splitter(index.workdir(),
 //                                            index.K(), 0,
 //                                            streams, contigs_stream);
-//    KMerDiskCounter<runtime_k::RtSeq> counter(index.workdir(), splitter);
-//    KMerIndexBuilder<typename DeBruijnKMerIndex<IdType, kmer_index_traits<runtime_k::RtSeq>>::KMerIndexT> builder(index.workdir(), 16, streams.size());
+//    KMerDiskCounter<RtSeq> counter(index.workdir(), splitter);
+//    KMerIndexBuilder<typename DeBruijnKMerIndex<IdType, kmer_index_traits<RtSeq>>::KMerIndexT> builder(index.workdir(), 16, streams.size());
 //    size_t sz = builder.BuildIndex(index.index_, counter, /* save final */ true);
 //    index.data_.resize(sz);
 //
@@ -229,11 +229,11 @@ namespace debruijn_graph {
 //  }
 //
 //  template <class IdType, class Graph>
-//  void BuildIndexFromGraph(EditableDeBruijnKMerIndex<IdType, runtime_k::RtSeq> &index,
+//  void BuildIndexFromGraph(EditableDeBruijnKMerIndex<IdType, RtSeq> &index,
 //                           const Graph &g) const {
 //    DeBruijnGraphKMerSplitter<Graph> splitter(index.workdir(), index.K(), g);
-//    KMerDiskCounter<runtime_k::RtSeq> counter(index.workdir(), splitter);
-//    KMerIndexBuilder<typename DeBruijnKMerIndex<typename Graph::EdgeId, kmer_index_traits<runtime_k::RtSeq>>::KMerIndexT> builder(index.workdir(), 16, 1);
+//    KMerDiskCounter<RtSeq> counter(index.workdir(), splitter);
+//    KMerIndexBuilder<typename DeBruijnKMerIndex<typename Graph::EdgeId, kmer_index_traits<RtSeq>>::KMerIndexT> builder(index.workdir(), 16, 1);
 //    size_t sz = builder.BuildIndex(index.index_, counter, /* save final */ true);
 //    index.data_.resize(sz);
 //
diff --git a/src/modules/data_structures/indices/key_with_hash.hpp b/src/common/utils/indices/key_with_hash.hpp
similarity index 97%
rename from src/modules/data_structures/indices/key_with_hash.hpp
rename to src/common/utils/indices/key_with_hash.hpp
index 81026ae..57e5a5a 100644
--- a/src/modules/data_structures/indices/key_with_hash.hpp
+++ b/src/common/utils/indices/key_with_hash.hpp
@@ -57,6 +57,8 @@ public:
 
     bool operator==(const SimpleKeyWithHash &that) const {
         VERIFY(&this->hash_ == &that.hash_);
+        if (this->ready_ && that.ready_)
+            return this->idx_ == that.idx_ && this->is_minimal_ == that.is_minimal_;
         return this->key_ == that.key_;
     }
 
@@ -90,7 +92,7 @@ public:
     }
 
     bool is_minimal() const {
-        return true;;
+        return true;
     }
 };
 
diff --git a/src/modules/data_structures/indices/kmer_extension_index.hpp b/src/common/utils/indices/kmer_extension_index.hpp
similarity index 97%
rename from src/modules/data_structures/indices/kmer_extension_index.hpp
rename to src/common/utils/indices/kmer_extension_index.hpp
index 9e7cc55..b72be84 100644
--- a/src/modules/data_structures/indices/kmer_extension_index.hpp
+++ b/src/common/utils/indices/kmer_extension_index.hpp
@@ -8,7 +8,7 @@
 #pragma once
 
 #include "perfect_hash_map.hpp"
-#include "dev_support/simple_tools.hpp"
+#include "utils/simple_tools.hpp"
 #include "storing_traits.hpp"
 #include <bitset>
 
@@ -196,7 +196,7 @@ struct AbstractDeEdge {
     }
 
     bool operator==(const AbstractDeEdge &other) {
-        return start.idx() == other.start.idx() && end.idx() == other.end.idx();
+        return start == other.start && end == other.end;
     }
 
     bool operator!=(const AbstractDeEdge &other) {
@@ -209,7 +209,7 @@ stream &operator<<(stream &s, const AbstractDeEdge<KWH> de_edge) {
     return s << "DeEdge[" << de_edge.start << ", " << de_edge.end << "]";
 }
 
-template<class traits = slim_kmer_index_traits<runtime_k::RtSeq>, class StoringType = DefaultStoring>
+template<class traits = slim_kmer_index_traits<RtSeq>, class StoringType = DefaultStoring>
 class DeBruijnExtensionIndex : public KeyIteratingMap<typename traits::SeqType, InOutMask, traits, StoringType> {
     typedef KeyIteratingMap<typename traits::SeqType, InOutMask, traits, StoringType> base;
 
diff --git a/src/modules/data_structures/indices/kmer_extension_index_builder.hpp b/src/common/utils/indices/kmer_extension_index_builder.hpp
similarity index 87%
rename from src/modules/data_structures/indices/kmer_extension_index_builder.hpp
rename to src/common/utils/indices/kmer_extension_index_builder.hpp
index 6f4f9fc..4ca9089 100644
--- a/src/modules/data_structures/indices/kmer_extension_index_builder.hpp
+++ b/src/common/utils/indices/kmer_extension_index_builder.hpp
@@ -25,7 +25,7 @@ public:
             if (seq.size() < k + 1)
                 continue;
 
-            typename Index::KeyWithHash kwh = index.ConstructKWH(seq.start<runtime_k::RtSeq>(k));
+            typename Index::KeyWithHash kwh = index.ConstructKWH(seq.start<RtSeq>(k));
             for (size_t j = k; j < seq.size(); ++j) {
                 char nnucl = seq[j], pnucl = kwh[0];
                 index.AddOutgoing(kwh, nnucl);
@@ -43,16 +43,16 @@ public:
         unsigned KPlusOne = index.k() + 1;
 
         typename Index::kmer_iterator it(KPlusOneMersFilename,
-                                         runtime_k::RtSeq::GetDataSize(KPlusOne));
+                                         RtSeq::GetDataSize(KPlusOne));
         for (; it.good(); ++it) {
-            runtime_k::RtSeq kpomer(KPlusOne, *it);
+            RtSeq kpomer(KPlusOne, *it);
 
             char pnucl = kpomer[0], nnucl = kpomer[KPlusOne - 1];
             TRACE("processing k+1-mer " << kpomer);
-            index.AddOutgoing(index.ConstructKWH(runtime_k::RtSeq(KPlusOne - 1, kpomer)),
+            index.AddOutgoing(index.ConstructKWH(RtSeq(KPlusOne - 1, kpomer)),
                               nnucl);
             // FIXME: This is extremely ugly. Needs to add start / end methods to extract first / last N symbols...
-            index.AddIncoming(index.ConstructKWH(runtime_k::RtSeq(KPlusOne - 1, kpomer << 0)),
+            index.AddIncoming(index.ConstructKWH(RtSeq(KPlusOne - 1, kpomer << 0)),
                               pnucl);
         }
     }
@@ -68,7 +68,7 @@ public:
                                  StoringTypeFilter<typename Index::storing_type>>
                 splitter(index.workdir(), index.k() + 1, 0xDEADBEEF, streams,
                          contigs_stream, read_buffer_size);
-        KMerDiskCounter<runtime_k::RtSeq> counter(index.workdir(), splitter);
+        KMerDiskCounter<RtSeq> counter(index.workdir(), splitter);
         counter.CountAll(nthreads, nthreads, /* merge */false);
 
         // Now, count unique k-mers from k+1-mers
@@ -77,7 +77,7 @@ public:
                           index.k() + 1, Index::storing_type::IsInvertable(), read_buffer_size);
         for (unsigned i = 0; i < nthreads; ++i)
             splitter2.AddKMers(counter.GetMergedKMersFname(i));
-        KMerDiskCounter<runtime_k::RtSeq> counter2(index.workdir(), splitter2);
+        KMerDiskCounter<RtSeq> counter2(index.workdir(), splitter2);
 
         BuildIndex(index, counter2, 16, nthreads);
 
diff --git a/src/modules/data_structures/indices/kmer_splitters.hpp b/src/common/utils/indices/kmer_splitters.hpp
similarity index 94%
rename from src/modules/data_structures/indices/kmer_splitters.hpp
rename to src/common/utils/indices/kmer_splitters.hpp
index 9e35934..4f3b087 100644
--- a/src/modules/data_structures/indices/kmer_splitters.hpp
+++ b/src/common/utils/indices/kmer_splitters.hpp
@@ -7,12 +7,11 @@
 
 #pragma once
 
-#include "io/reads_io/io_helper.hpp"
+#include "io/reads/io_helper.hpp"
 #include "storing_traits.hpp"
 
-#include "dev_support/file_limit.hpp"
-#include "data_structures/sequence/runtime_k.hpp"
-#include "data_structures/mph_index/kmer_index_builder.hpp"
+#include "utils/file_limit.hpp"
+#include "utils/mph_index/kmer_index_builder.hpp"
 
 namespace debruijn_graph {
 
@@ -36,7 +35,7 @@ struct StoringTypeFilter<InvertableStoring> {
     }
 };
 
-using RtSeqKMerSplitter = ::KMerSortingSplitter<runtime_k::RtSeq>;
+using RtSeqKMerSplitter = ::KMerSortingSplitter<RtSeq>;
 
 template<class KmerFilter>
 class DeBruijnKMerSplitter : public RtSeqKMerSplitter {
@@ -50,7 +49,7 @@ class DeBruijnKMerSplitter : public RtSeqKMerSplitter {
       if (seq.size() < this->K_)
         return false;
 
-      runtime_k::RtSeq kmer = seq.start<runtime_k::RtSeq>(this->K_) >> 'A';
+      RtSeq kmer = seq.start<RtSeq>(this->K_) >> 'A';
       bool stop = false;
       for (size_t j = this->K_ - 1; j < seq.size(); ++j) {
         kmer <<= seq[j];
@@ -168,6 +167,8 @@ path::files_t DeBruijnReadKMerSplitter<Read, KmerFilter>::Split(size_t num_files
     }
   }
 
+  this->ClearBuffers();
+
   INFO("Used " << counter << " reads. Maximum read length " << rl);
   INFO("Average read length " << double(bases) / double(counter));
   rs_ = { counter, rl, bases };
@@ -228,13 +229,15 @@ path::files_t DeBruijnGraphKMerSplitter<Graph, KmerFilter>::Split(size_t num_fil
 
   INFO("Used " << counter << " sequences.");
 
+  this->ClearBuffers();
+  
   return out;
 }
 
 
 template<class KmerFilter>
 class DeBruijnKMerKMerSplitter : public DeBruijnKMerSplitter<KmerFilter> {
-  typedef MMappedFileRecordArrayIterator<runtime_k::RtSeq::DataType> kmer_iterator;
+  typedef MMappedFileRecordArrayIterator<RtSeq::DataType> kmer_iterator;
 
   unsigned K_source_;
   std::vector<std::string> kmers_;
@@ -261,7 +264,7 @@ inline size_t DeBruijnKMerKMerSplitter<KmerFilter>::FillBufferFromKMers(kmer_ite
                                                                         unsigned thread_id) {
   size_t seqs = 0;
   for (; kmer.good(); ++kmer) {
-    Sequence nucls(runtime_k::RtSeq(K_source_, *kmer));
+    Sequence nucls(RtSeq(K_source_, *kmer));
     seqs += 1;
 
     bool stop = this->FillBufferFromSequence(nucls, thread_id);
@@ -287,7 +290,7 @@ path::files_t DeBruijnKMerKMerSplitter<KmerFilter>::Split(size_t num_files) {
   std::vector<kmer_iterator> its;
   its.reserve(nthreads);
   for (auto it = kmers_.begin(), et = kmers_.end(); it != et; ++it)
-    its.emplace_back(*it, runtime_k::RtSeq::GetDataSize(K_source_));
+    its.emplace_back(*it, RtSeq::GetDataSize(K_source_));
 
   while (std::any_of(its.begin(), its.end(),
                      [](const kmer_iterator &it) { return it.good(); })) {
@@ -305,6 +308,8 @@ path::files_t DeBruijnKMerKMerSplitter<KmerFilter>::Split(size_t num_files) {
 
   INFO("Used " << counter << " kmers.");
 
+  this->ClearBuffers();
+  
   return out;
 }
 
diff --git a/src/modules/data_structures/indices/perfect_hash_map.hpp b/src/common/utils/indices/perfect_hash_map.hpp
similarity index 86%
rename from src/modules/data_structures/indices/perfect_hash_map.hpp
rename to src/common/utils/indices/perfect_hash_map.hpp
index 941acba..857efc9 100644
--- a/src/modules/data_structures/indices/perfect_hash_map.hpp
+++ b/src/common/utils/indices/perfect_hash_map.hpp
@@ -6,11 +6,11 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "dev_support/openmp_wrapper.h"
-#include "dev_support/path_helper.hpp"
-#include "io/kmers_io/kmer_iterator.hpp"
+#include "utils/openmp_wrapper.h"
+#include "utils/path_helper.hpp"
+#include "io/kmers/kmer_iterator.hpp"
 
-#include "data_structures/mph_index/kmer_index.hpp"
+#include "utils/mph_index/kmer_index.hpp"
 
 #include "key_with_hash.hpp"
 #include "values.hpp"
@@ -32,31 +32,38 @@ public:
 protected:
     typedef KMerIndex<traits> KMerIndexT;
     //these fields are protected only for reduction of storage in edge indices BinWrite
-    KMerIndexT index_;
+    std::shared_ptr<KMerIndexT> index_ptr_;
 private:
     std::string workdir_;
     unsigned k_;
 
 protected:
     size_t raw_seq_idx(const typename KMerIndexT::KMerRawReference s) const {
-        return index_.raw_seq_idx(s);
+        return index_ptr_->raw_seq_idx(s);
     }
 
     bool valid(const size_t idx) const {
-        return idx != InvalidIdx && idx < index_.size();
+        return idx != InvalidIdx && idx < index_ptr_->size();
     }
 public:
-    IndexWrapper(size_t k, const std::string &workdir) : k_((unsigned) k) {
+    IndexWrapper(size_t k, const std::string &workdir)
+            : index_ptr_(std::make_shared<KMerIndexT>())
+            , k_((unsigned) k) {
         //fixme string literal
         workdir_ = path::make_temp_dir(workdir, "kmeridx");
     }
 
+    IndexWrapper(size_t k, const std::string &workdir, std::shared_ptr<KMerIndexT> index_ptr)
+            : IndexWrapper(k, workdir) {
+        index_ptr_ = index_ptr;
+    }
+
     ~IndexWrapper() {
         path::remove_dir(workdir_);
     }
 
     void clear() {
-        index_.clear();
+        index_ptr_->clear();
     }
 
     unsigned k() const { return k_; }
@@ -64,13 +71,13 @@ public:
 public:
     template<class Writer>
     void BinWrite(Writer &writer) const {
-        index_.serialize(writer);
+        index_ptr_->serialize(writer);
     }
 
     template<class Reader>
     void BinRead(Reader &reader, const std::string &) {
         clear();
-        index_.deserialize(reader);
+        index_ptr_->deserialize(reader);
     }
 
     const std::string &workdir() const {
@@ -85,12 +92,12 @@ public:
     typedef K KeyType;
     typedef ValueArray<V> ValueBase;
     typedef IndexWrapper<KeyType, traits> KeyBase;
-    using KeyBase::index_;
+    using KeyBase::index_ptr_;
     typedef typename KeyBase::KMerIndexT KMerIndexT;
     typedef typename StoringTraits<K, KMerIndexT, StoringType>::KeyWithHash KeyWithHash;
 
     KeyWithHash ConstructKWH(const KeyType &key) const {
-        return KeyWithHash(key, index_);
+        return KeyWithHash(key, *index_ptr_);
     }
 
     bool valid(const KeyWithHash &kwh) const {
@@ -100,6 +107,11 @@ public:
     PerfectHashMap(size_t k, const std::string &workdir) : KeyBase(k, workdir) {
     }
 
+    PerfectHashMap(size_t k, const std::string &workdir, std::shared_ptr<KMerIndexT> index_ptr)
+        : KeyBase(k, workdir, index_ptr) {
+        ValueBase::resize(index_ptr_->size());
+    }
+
     ~PerfectHashMap() {
     }
 
@@ -112,6 +124,11 @@ public:
         return StoringType::get_value(*this, kwh);
     }
 
+    template<typename F>
+    const V get_value(const KeyWithHash &kwh, const F& inverter) const {
+        return StoringType::get_value(*this, kwh, inverter);
+    }
+
     //Think twice or ask AntonB if you want to use it!
     V &get_raw_value_reference(const KeyWithHash &kwh) {
         return ValueBase::operator[](kwh.idx());
@@ -125,10 +142,15 @@ public:
         StoringType::set_value(*this, kwh, value);
     }
 
+    template<typename F>
+    void put_value(const KeyWithHash &kwh, const V &value, const F& inverter) {
+        StoringType::set_value(*this, kwh, value, inverter);
+    }
+
     template<class Writer>
     void BinWrite(Writer &writer) const {
-        ValueBase::BinWrite(writer);
         KeyBase::BinWrite(writer);
+        ValueBase::BinWrite(writer);
     }
 
     template<class Reader>
@@ -186,6 +208,7 @@ protected:
         this->kmers_ = traits_t::raw_deserialize(reader, FileName);
     }
 
+public:
     template<class Writer>
     void BinWrite(Writer &writer) const {
         base::BinWrite(writer);
@@ -198,8 +221,6 @@ protected:
         BinReadKmers(reader, FileName);
     }
 
-public:
-
     KeyStoringMap(size_t k, const std::string &workdir)
             : base(k, workdir), kmers_(nullptr) {}
 
diff --git a/src/modules/data_structures/indices/perfect_hash_map_builder.hpp b/src/common/utils/indices/perfect_hash_map_builder.hpp
similarity index 93%
rename from src/modules/data_structures/indices/perfect_hash_map_builder.hpp
rename to src/common/utils/indices/perfect_hash_map_builder.hpp
index b94a596..c8d6972 100644
--- a/src/modules/data_structures/indices/perfect_hash_map_builder.hpp
+++ b/src/common/utils/indices/perfect_hash_map_builder.hpp
@@ -5,7 +5,7 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "data_structures/mph_index/kmer_index_builder.hpp"
+#include "utils/mph_index/kmer_index_builder.hpp"
 
 #include "perfect_hash_map.hpp"
 #include "kmer_splitters.hpp"
@@ -18,11 +18,11 @@ struct PerfectHashMapBuilder {
                     Counter& counter, size_t bucket_num,
                     size_t thread_num, bool save_final = true) const {
         using KMerIndex = typename PerfectHashMap<K, V, traits, StoringType>::KMerIndexT;
-        
+
         KMerIndexBuilder<KMerIndex> builder(index.workdir(),
                                             (unsigned) bucket_num,
                                             (unsigned) thread_num);
-        size_t sz = builder.BuildIndex(index.index_, counter, save_final);
+        size_t sz = builder.BuildIndex(*index.index_ptr_, counter, save_final);
         index.resize(sz);
     }
 };
@@ -85,7 +85,7 @@ size_t BuildIndexFromStream(Index &index,
     DeBruijnReadKMerSplitter<typename Streams::ReadT,
                              StoringTypeFilter<typename Index::storing_type>>
             splitter(index.workdir(), index.k(), 0, streams, contigs_stream);
-    KMerDiskCounter<runtime_k::RtSeq> counter(index.workdir(), splitter);
+    KMerDiskCounter<RtSeq> counter(index.workdir(), splitter);
     BuildIndex(index, counter, 16, streams.size());
     return 0;
 }
@@ -95,7 +95,7 @@ void BuildIndexFromGraph(Index &index, const Graph &g, size_t read_buffer_size =
     DeBruijnGraphKMerSplitter<Graph,
                               StoringTypeFilter<typename Index::storing_type>>
             splitter(index.workdir(), index.k(), g, read_buffer_size);
-    KMerDiskCounter<runtime_k::RtSeq> counter(index.workdir(), splitter);
+    KMerDiskCounter<RtSeq> counter(index.workdir(), splitter);
     BuildIndex(index, counter, 16, 1);
 }
 
diff --git a/src/modules/data_structures/indices/storing_traits.hpp b/src/common/utils/indices/storing_traits.hpp
similarity index 56%
rename from src/modules/data_structures/indices/storing_traits.hpp
rename to src/common/utils/indices/storing_traits.hpp
index b91406f..0904cd4 100644
--- a/src/modules/data_structures/indices/storing_traits.hpp
+++ b/src/common/utils/indices/storing_traits.hpp
@@ -35,20 +35,40 @@ struct SimpleStoring {
 };
 
 struct InvertableStoring {
-    template<class K, class V>
-    static V get_value(const ValueArray<V> &values, const K& key) {
-        if(key.is_minimal())
+    template<typename V>
+    struct default_inverter {
+        template<typename K>
+        V operator()(const V& v, const K& k) const {
+            return v.conjugate(k);
+        }
+    };
+
+    template<typename V>
+    struct trivial_inverter {
+        template<typename K>
+        V operator()(const V& v, const K& /*k*/) const {
+            return v;
+        }
+    };
+
+    template<class K, class V, class F = default_inverter<V>>
+    static V get_value(const ValueArray<V> &values, const K& key,
+                       const F& inverter = F()) {
+        if (key.is_minimal())
             return values[key.idx()];
         else
-            return values[key.idx()].conjugate(key);
+            return inverter(values[key.idx()], key);
     }
 
-    template<class K, class V>
-    static void set_value(ValueArray<V> &values, const K& key, const V& value) {
-        if(key.is_minimal())
+    template<class K, class V, class F = default_inverter<V>>
+    static void set_value(ValueArray<V>& values, const K& key, const V& value,
+                          const F& inverter = F()) {
+        VERIFY(key.idx() < values.size());
+        if (key.is_minimal()) {
             values[key.idx()] = value;
-        else
-            values[key.idx()] = value.conjugate(key);
+        } else {
+            values[key.idx()] = inverter(value, key);
+        }
     }
 
     static bool IsInvertable() {
diff --git a/src/modules/data_structures/indices/values.hpp b/src/common/utils/indices/values.hpp
similarity index 100%
rename from src/modules/data_structures/indices/values.hpp
rename to src/common/utils/indices/values.hpp
diff --git a/src/utils/levenshtein.hpp b/src/common/utils/levenshtein.hpp
similarity index 99%
rename from src/utils/levenshtein.hpp
rename to src/common/utils/levenshtein.hpp
index 007966a..9fad614 100644
--- a/src/utils/levenshtein.hpp
+++ b/src/common/utils/levenshtein.hpp
@@ -9,7 +9,7 @@
 
 #include <string>
 #include <vector>
-#include "dev_support/simple_tools.hpp"
+#include "utils/simple_tools.hpp"
 
 /*
  * Little modified copy-paste from http://www.merriampark.com/ldcpp.htm
diff --git a/src/modules/dev_support/log.hpp b/src/common/utils/log.hpp
similarity index 100%
rename from src/modules/dev_support/log.hpp
rename to src/common/utils/log.hpp
diff --git a/src/modules/dev_support/logger/log_writers.hpp b/src/common/utils/logger/log_writers.hpp
similarity index 97%
rename from src/modules/dev_support/logger/log_writers.hpp
rename to src/common/utils/logger/log_writers.hpp
index 12330f3..666c03f 100644
--- a/src/modules/dev_support/logger/log_writers.hpp
+++ b/src/common/utils/logger/log_writers.hpp
@@ -7,7 +7,7 @@
 
 #pragma once
 
-#include "dev_support/path_helper.hpp"
+#include "utils/path_helper.hpp"
 #include "logger.hpp"
 
 #include <iostream>
diff --git a/src/modules/dev_support/logger/logger.hpp b/src/common/utils/logger/logger.hpp
similarity index 99%
rename from src/modules/dev_support/logger/logger.hpp
rename to src/common/utils/logger/logger.hpp
index e72329a..c088aed 100644
--- a/src/modules/dev_support/logger/logger.hpp
+++ b/src/common/utils/logger/logger.hpp
@@ -6,7 +6,7 @@
 //***************************************************************************
 
 #pragma once
-#include "dev_support/perfcounter.hpp"
+#include "utils/perfcounter.hpp"
 
 #include <vector>
 #include <unordered_map>
diff --git a/src/modules/dev_support/logger/logger_impl.cpp b/src/common/utils/logger/logger_impl.cpp
similarity index 98%
rename from src/modules/dev_support/logger/logger_impl.cpp
rename to src/common/utils/logger/logger_impl.cpp
index c9d8570..4b8ce6b 100644
--- a/src/modules/dev_support/logger/logger_impl.cpp
+++ b/src/common/utils/logger/logger_impl.cpp
@@ -12,7 +12,7 @@
 #include <fstream>
 #include <vector>
 
-#include "dev_support/logger/logger.hpp"
+#include "utils/logger/logger.hpp"
 
 #include "config.hpp"
 
diff --git a/src/modules/dev_support/md5.h b/src/common/utils/md5.h
similarity index 100%
rename from src/modules/dev_support/md5.h
rename to src/common/utils/md5.h
diff --git a/src/modules/dev_support/memory.hpp b/src/common/utils/memory.hpp
similarity index 100%
rename from src/modules/dev_support/memory.hpp
rename to src/common/utils/memory.hpp
diff --git a/src/modules/dev_support/memory_limit.hpp b/src/common/utils/memory_limit.hpp
similarity index 100%
rename from src/modules/dev_support/memory_limit.hpp
rename to src/common/utils/memory_limit.hpp
diff --git a/src/modules/data_structures/mph_index/CMakeLists.txt b/src/common/utils/mph_index/CMakeLists.txt
similarity index 100%
rename from src/modules/data_structures/mph_index/CMakeLists.txt
rename to src/common/utils/mph_index/CMakeLists.txt
diff --git a/src/modules/data_structures/mph_index/base_hash.hpp b/src/common/utils/mph_index/base_hash.hpp
similarity index 100%
rename from src/modules/data_structures/mph_index/base_hash.hpp
rename to src/common/utils/mph_index/base_hash.hpp
diff --git a/src/modules/data_structures/mph_index/bitpair_vector.cpp b/src/common/utils/mph_index/bitpair_vector.cpp
similarity index 100%
rename from src/modules/data_structures/mph_index/bitpair_vector.cpp
rename to src/common/utils/mph_index/bitpair_vector.cpp
diff --git a/src/modules/data_structures/mph_index/bitpair_vector.hpp b/src/common/utils/mph_index/bitpair_vector.hpp
similarity index 100%
rename from src/modules/data_structures/mph_index/bitpair_vector.hpp
rename to src/common/utils/mph_index/bitpair_vector.hpp
diff --git a/src/modules/data_structures/mph_index/common.hpp b/src/common/utils/mph_index/common.hpp
similarity index 100%
rename from src/modules/data_structures/mph_index/common.hpp
rename to src/common/utils/mph_index/common.hpp
diff --git a/src/modules/data_structures/mph_index/emphf_config.hpp b/src/common/utils/mph_index/emphf_config.hpp
similarity index 100%
rename from src/modules/data_structures/mph_index/emphf_config.hpp
rename to src/common/utils/mph_index/emphf_config.hpp
diff --git a/src/modules/data_structures/mph_index/hypergraph.hpp b/src/common/utils/mph_index/hypergraph.hpp
similarity index 100%
rename from src/modules/data_structures/mph_index/hypergraph.hpp
rename to src/common/utils/mph_index/hypergraph.hpp
diff --git a/src/modules/data_structures/mph_index/hypergraph_sorter_seq.hpp b/src/common/utils/mph_index/hypergraph_sorter_seq.hpp
similarity index 98%
rename from src/modules/data_structures/mph_index/hypergraph_sorter_seq.hpp
rename to src/common/utils/mph_index/hypergraph_sorter_seq.hpp
index 649be20..9adfdc3 100644
--- a/src/modules/data_structures/mph_index/hypergraph_sorter_seq.hpp
+++ b/src/common/utils/mph_index/hypergraph_sorter_seq.hpp
@@ -12,7 +12,7 @@
 #include "common.hpp"
 #include "hypergraph.hpp"
 
-#include "dev_support/logger/logger.hpp"
+#include "utils/logger/logger.hpp"
 
 namespace emphf {
 
diff --git a/src/modules/data_structures/mph_index/kmer_index.hpp b/src/common/utils/mph_index/kmer_index.hpp
similarity index 100%
rename from src/modules/data_structures/mph_index/kmer_index.hpp
rename to src/common/utils/mph_index/kmer_index.hpp
diff --git a/src/modules/data_structures/mph_index/kmer_index_builder.hpp b/src/common/utils/mph_index/kmer_index_builder.hpp
similarity index 79%
rename from src/modules/data_structures/mph_index/kmer_index_builder.hpp
rename to src/common/utils/mph_index/kmer_index_builder.hpp
index 9993ba1..1d72db1 100644
--- a/src/modules/data_structures/mph_index/kmer_index_builder.hpp
+++ b/src/common/utils/mph_index/kmer_index_builder.hpp
@@ -8,18 +8,21 @@
 
 #include "kmer_index.hpp"
 
-#include "io/kmers_io/mmapped_reader.hpp"
-#include "io/kmers_io/mmapped_writer.hpp"
-#include "utils/adt/pointer_iterator.hpp"
-#include "utils/adt/kmer_vector.hpp"
+#include "io/kmers/mmapped_reader.hpp"
+#include "io/kmers/mmapped_writer.hpp"
+#include "common/adt/pointer_iterator.hpp"
+#include "common/adt/kmer_vector.hpp"
 
-#include "dev_support/openmp_wrapper.h"
+#include "utils/openmp_wrapper.h"
 
-#include "dev_support/logger/logger.hpp"
-#include "dev_support/path_helper.hpp"
+#include "utils/logger/logger.hpp"
+#include "utils/path_helper.hpp"
 
-#include "dev_support/memory_limit.hpp"
-#include "dev_support/file_limit.hpp"
+#include "utils/memory_limit.hpp"
+#include "utils/file_limit.hpp"
+
+#include "adt/iterator_range.hpp"
+#include "adt/loser_tree.hpp"
 
 #include "mphf.hpp"
 #include "base_hash.hpp"
@@ -150,9 +153,18 @@ class KMerSortingSplitter : public KMerSplitter<Seq> {
 
 #     pragma omp critical
       {
+        size_t cnt =  it - SortBuffer.begin();
+
+        // Write k-mers
         FILE *f = fopen(ostreams[k].c_str(), "ab");
         VERIFY_MSG(f, "Cannot open temporary file to write");
-        fwrite(SortBuffer.data(), SortBuffer.el_data_size(), it - SortBuffer.begin(), f);
+        fwrite(SortBuffer.data(), SortBuffer.el_data_size(), cnt, f);
+        fclose(f);
+
+        // Write index
+        f = fopen((ostreams[k] + ".idx").c_str(), "ab");
+        VERIFY_MSG(f, "Cannot open temporary file to write");
+        fwrite(&cnt, sizeof(cnt), 1, f);
         fclose(f);
       }
     }
@@ -162,6 +174,14 @@ class KMerSortingSplitter : public KMerSplitter<Seq> {
         eentry.clear();
   }
 
+  void ClearBuffers() {
+    for (auto & entry : kmer_buffers_)
+      for (auto & eentry : entry) {
+        eentry.clear();
+        eentry.shrink_to_fit();
+      }
+  }
+  
   std::string GetRawKMersFname(unsigned suffix) const {
     return path::append_path(this->work_dir_, "kmers.raw." + std::to_string(suffix));
   }
@@ -300,18 +320,80 @@ private:
                     unsigned K) {
     MMappedRecordArrayReader<typename Seq::DataType> ins(ifname, Seq::GetDataSize(K), /* unlink */ true);
 
-    // Sort the stuff
-    libcxx::sort(ins.begin(), ins.end(), array_less<typename Seq::DataType>());
+    std::string IdxFileName = ifname + ".idx";
+    if (FILE *f = fopen(IdxFileName.c_str(), "rb")) {
+      fclose(f);
+      MMappedRecordReader<size_t> index(ifname + ".idx", true, -1ULL);
+
+      // INFO("Total runs: " << index.size());
+
+      // Prepare runs
+      std::vector<adt::iterator_range<decltype(ins.begin())>> ranges;
+      auto beg = ins.begin();
+      for (size_t sz : index) {
+        auto end = std::next(beg, sz);
+        ranges.push_back(adt::make_range(beg, end));
+        VERIFY(std::is_sorted(beg, end, array_less<typename Seq::DataType>()));
+        beg = end;
+      }
+
+      // Construct tree on top entries of runs
+      adt::loser_tree<decltype(beg),
+                      array_less<typename Seq::DataType>> tree(ranges);
 
-    // FIXME: Use something like parallel version of unique_copy but with explicit
-    // resizing.
-    auto it = std::unique(ins.begin(), ins.end(), array_equal_to<typename Seq::DataType>());
+      if (tree.empty()) {
+        FILE *g = fopen(ofname.c_str(), "ab");
+        VERIFY_MSG(g, "Cannot open temporary file to write");
+        fclose(g);
+        return 0;
+      }
 
-    MMappedRecordArrayWriter<typename Seq::DataType> os(ofname, Seq::GetDataSize(K));
-    os.resize(it - ins.begin());
-    std::copy(ins.begin(), it, os.begin());
+      // Write it down!
+      KMerVector<Seq> buf(K, 1024*1024);
+      auto pval = tree.pop();
+      size_t total = 0;
+      while (!tree.empty()) {
+          buf.clear();
+          for (size_t cnt = 0; cnt < buf.capacity() && !tree.empty(); ) {
+              auto cval = tree.pop();
+              if (!array_equal_to<typename Seq::DataType>()(pval, cval)) {
+                  buf.push_back(pval);
+                  pval = cval;
+                  cnt += 1;
+              }
+          }
+          total += buf.size();
+
+          FILE *g = fopen(ofname.c_str(), "ab");
+          VERIFY_MSG(g, "Cannot open temporary file to write");
+          fwrite(buf.data(), buf.el_data_size(), buf.size(), g);
+          fclose(g);
+      }
 
-    return it - ins.begin();
+      // Handle very last value
+      {
+        FILE *g = fopen(ofname.c_str(), "ab");
+        VERIFY_MSG(g, "Cannot open temporary file to write");
+        fwrite(pval.data(), pval.data_size(), 1, g);
+        fclose(g);
+        total += 1;
+      }
+      
+      return total;
+    } else {
+      // Sort the stuff
+      libcxx::sort(ins.begin(), ins.end(), array_less<typename Seq::DataType>());
+
+      // FIXME: Use something like parallel version of unique_copy but with explicit
+      // resizing.
+      auto it = std::unique(ins.begin(), ins.end(), array_equal_to<typename Seq::DataType>());
+
+      MMappedRecordArrayWriter<typename Seq::DataType> os(ofname, Seq::GetDataSize(K));
+      os.resize(it - ins.begin());
+      std::copy(ins.begin(), it, os.begin());
+
+      return it - ins.begin();
+    }
   }
 };
 
diff --git a/src/modules/data_structures/mph_index/kmer_index_traits.hpp b/src/common/utils/mph_index/kmer_index_traits.hpp
similarity index 94%
rename from src/modules/data_structures/mph_index/kmer_index_traits.hpp
rename to src/common/utils/mph_index/kmer_index_traits.hpp
index c9ef67b..4656720 100644
--- a/src/modules/data_structures/mph_index/kmer_index_traits.hpp
+++ b/src/common/utils/mph_index/kmer_index_traits.hpp
@@ -5,7 +5,7 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "io/kmers_io/mmapped_reader.hpp"
+#include "io/kmers/mmapped_reader.hpp"
 #include "mphf.hpp"
 
 template<class Seq>
@@ -72,6 +72,11 @@ struct kmer_index_traits {
     writer.write((char*)data->data(), data->data_size());
   }
 
+  template<class Writer>
+  static void raw_serialize(Writer &writer, const std::unique_ptr<RawKMerStorage> &data) {
+    raw_serialize(writer, data.get());
+  }
+
   template<class Reader>
   static std::unique_ptr<RawKMerStorage> raw_deserialize(Reader &reader, const std::string &FileName) {
     size_t sz, off, elcnt;
diff --git a/src/modules/data_structures/mph_index/mphf.hpp b/src/common/utils/mph_index/mphf.hpp
similarity index 99%
rename from src/modules/data_structures/mph_index/mphf.hpp
rename to src/common/utils/mph_index/mphf.hpp
index 6c364ca..3327fef 100644
--- a/src/modules/data_structures/mph_index/mphf.hpp
+++ b/src/common/utils/mph_index/mphf.hpp
@@ -5,7 +5,7 @@
 #include "bitpair_vector.hpp"
 #include "ranked_bitpair_vector.hpp"
 
-#include "dev_support/logger/logger.hpp"
+#include "utils/logger/logger.hpp"
 
 namespace emphf {
 
diff --git a/src/modules/data_structures/mph_index/ranked_bitpair_vector.hpp b/src/common/utils/mph_index/ranked_bitpair_vector.hpp
similarity index 100%
rename from src/modules/data_structures/mph_index/ranked_bitpair_vector.hpp
rename to src/common/utils/mph_index/ranked_bitpair_vector.hpp
diff --git a/src/modules/dev_support/openmp_wrapper.h b/src/common/utils/openmp_wrapper.h
similarity index 100%
rename from src/modules/dev_support/openmp_wrapper.h
rename to src/common/utils/openmp_wrapper.h
diff --git a/src/modules/dev_support/parallel_wrapper.hpp b/src/common/utils/parallel_wrapper.hpp
similarity index 100%
rename from src/modules/dev_support/parallel_wrapper.hpp
rename to src/common/utils/parallel_wrapper.hpp
diff --git a/src/modules/dev_support/path_helper.cpp b/src/common/utils/path_helper.cpp
similarity index 99%
rename from src/modules/dev_support/path_helper.cpp
rename to src/common/utils/path_helper.cpp
index 534d459..4225f7e 100644
--- a/src/modules/dev_support/path_helper.cpp
+++ b/src/common/utils/path_helper.cpp
@@ -5,7 +5,7 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "dev_support/path_helper.hpp"
+#include "utils/path_helper.hpp"
 
 #include <sys/types.h>
 #include <sys/stat.h>
diff --git a/src/modules/dev_support/path_helper.hpp b/src/common/utils/path_helper.hpp
similarity index 96%
rename from src/modules/dev_support/path_helper.hpp
rename to src/common/utils/path_helper.hpp
index 372c6f4..73b2ab5 100644
--- a/src/modules/dev_support/path_helper.hpp
+++ b/src/common/utils/path_helper.hpp
@@ -14,8 +14,8 @@
 
 #include <string>
 #include <vector>
-#include "dev_support/logger/logger.hpp"
-#include "dev_support/verify.hpp"
+#include "utils/logger/logger.hpp"
+#include "utils/verify.hpp"
 
 namespace path {
 //todo review and make names consistent!
diff --git a/src/modules/dev_support/perfcounter.hpp b/src/common/utils/perfcounter.hpp
similarity index 100%
rename from src/modules/dev_support/perfcounter.hpp
rename to src/common/utils/perfcounter.hpp
diff --git a/src/modules/dev_support/range.hpp b/src/common/utils/range.hpp
similarity index 98%
rename from src/modules/dev_support/range.hpp
rename to src/common/utils/range.hpp
index bf2595d..2e05bed 100644
--- a/src/modules/dev_support/range.hpp
+++ b/src/common/utils/range.hpp
@@ -1,6 +1,6 @@
 #pragma once
 
-#include "dev_support/verify.hpp"
+#include "utils/verify.hpp"
 
 namespace omnigraph {
 
diff --git a/src/modules/dev_support/segfault_handler.hpp b/src/common/utils/segfault_handler.hpp
similarity index 97%
rename from src/modules/dev_support/segfault_handler.hpp
rename to src/common/utils/segfault_handler.hpp
index 836e2f2..2512ba5 100644
--- a/src/modules/dev_support/segfault_handler.hpp
+++ b/src/common/utils/segfault_handler.hpp
@@ -8,7 +8,7 @@
 
 #pragma once
 
-#include "dev_support/stacktrace.hpp"
+#include "utils/stacktrace.hpp"
 #include "boost/noncopyable.hpp"
 
 #include <signal.h>
diff --git a/src/modules/dev_support/simple_tools.hpp b/src/common/utils/simple_tools.hpp
similarity index 94%
rename from src/modules/dev_support/simple_tools.hpp
rename to src/common/utils/simple_tools.hpp
index 00690a5..c47f70f 100644
--- a/src/modules/dev_support/simple_tools.hpp
+++ b/src/common/utils/simple_tools.hpp
@@ -19,9 +19,9 @@
 #include <sys/types.h>
 #include <unistd.h>
 
-#include "dev_support/verify.hpp"
-#include "io/reads_io/ireader.hpp"
-#include "dev_support/path_helper.hpp"
+#include "utils/verify.hpp"
+#include "io/reads/ireader.hpp"
+#include "utils/path_helper.hpp"
 #include <memory>
 #include <string>
 #include <set>
@@ -131,7 +131,7 @@ public:
     TmpFolderFixture(std::string tmp_folder = "tmp") :
         tmp_folder_(tmp_folder)
     {
-        path::make_dir(tmp_folder_);
+        path::make_dirs(tmp_folder_);
     }
 
     ~TmpFolderFixture()
@@ -181,4 +181,9 @@ std::ostream& operator<< (std::ostream& os, const std::set<T>& set)
 
 }
 
+template<typename Base, typename T>
+inline bool instanceof(const T *ptr) {
+    return dynamic_cast<const Base *>(ptr) != nullptr;
+}
+
 #endif /* SIMPLE_TOOLS_HPP_ */
diff --git a/src/modules/dev_support/stacktrace.hpp b/src/common/utils/stacktrace.hpp
similarity index 100%
rename from src/modules/dev_support/stacktrace.hpp
rename to src/common/utils/stacktrace.hpp
diff --git a/src/modules/dev_support/standard_base.hpp b/src/common/utils/standard_base.hpp
similarity index 93%
rename from src/modules/dev_support/standard_base.hpp
rename to src/common/utils/standard_base.hpp
index 9adc83b..fac6fcf 100644
--- a/src/modules/dev_support/standard_base.hpp
+++ b/src/common/utils/standard_base.hpp
@@ -84,10 +84,10 @@ using boost::none;
 using boost::noncopyable;
 
 // err handling
-#include "dev_support/stacktrace.hpp"
+#include "utils/stacktrace.hpp"
 
 // path manipulation instead of boost filesystem
-#include "dev_support/path_helper.hpp"
+#include "utils/path_helper.hpp"
 using path::make_dir;
 using path::remove_dir;
 
@@ -132,9 +132,9 @@ inline void assertion_failed_msg(char const * expr, char const * msg,
 //our
 //math
 #include "math/xmath.h"
-#include "dev_support/func.hpp"
-#include "dev_support/verify.hpp"
+#include "func/func.hpp"
+#include "utils/verify.hpp"
 // log
-#include "dev_support/logger/logger.hpp"
+#include "utils/logger/logger.hpp"
 
 
diff --git a/src/modules/dev_support/verify.hpp b/src/common/utils/verify.hpp
similarity index 97%
rename from src/modules/dev_support/verify.hpp
rename to src/common/utils/verify.hpp
index 337828e..b677a3e 100644
--- a/src/modules/dev_support/verify.hpp
+++ b/src/common/utils/verify.hpp
@@ -6,7 +6,7 @@
 //***************************************************************************
 
 #pragma once
-#include "dev_support/stacktrace.hpp"
+#include "utils/stacktrace.hpp"
 #include "boost/current_function.hpp"
 #include <sstream>
 #include <iostream>
diff --git a/src/modules/visualization/graph_colorer.hpp b/src/common/visualization/graph_colorer.hpp
similarity index 65%
rename from src/modules/visualization/graph_colorer.hpp
rename to src/common/visualization/graph_colorer.hpp
index 234e1c1..2a77d89 100644
--- a/src/modules/visualization/graph_colorer.hpp
+++ b/src/common/visualization/graph_colorer.hpp
@@ -12,17 +12,20 @@
 #include "visualization/printing_parameter_storage.hpp"
 //#include "edges_position_handler.hpp"
 
-namespace omnigraph {
+using namespace omnigraph;
+
 namespace visualization {
 
+namespace graph_colorer {
+
 template<typename ElementId>
-class ElementColorer : public virtual ParameterStorage<ElementId, string> {
+class ElementColorer : public virtual printing_parameter_storage::ParameterStorage<ElementId, string> {
 public:
     template<typename Iter>
     set<ElementId> ColoredWith(Iter begin, Iter end, const string &color) {
         set<ElementId> result;
-        for(Iter it = begin; it != end; ++it) {
-            if(this->GetValue(*it) == color)
+        for (Iter it = begin; it != end; ++it) {
+            if (this->GetValue(*it) == color)
                 result.insert(*it);
         }
         return result;
@@ -32,19 +35,21 @@ public:
 //TODO remove all default color parameters!
 
 template<typename ElementId>
-class MapColorer : public ElementColorer<ElementId>, public MapParameterStorage<ElementId, string> {
+class MapColorer : public ElementColorer<ElementId>, public printing_parameter_storage::MapParameterStorage<ElementId, string> {
 public:
-    MapColorer(const string &default_color) : MapParameterStorage<ElementId, string>(default_color) {
+    MapColorer(const string &default_color) : printing_parameter_storage::MapParameterStorage<ElementId, string>(default_color) {
     }
 
-    MapColorer(const map<ElementId, string> &color_map) : MapParameterStorage<ElementId, string>(color_map) {
+    MapColorer(const map<ElementId, string> &color_map) : printing_parameter_storage::MapParameterStorage<ElementId, string>(color_map) {
     }
 
-    MapColorer(const map<ElementId, string> &color_map, const string& default_color) : MapParameterStorage<ElementId, string>(color_map, default_color) {
+    MapColorer(const map<ElementId, string> &color_map, const string &default_color)
+            : printing_parameter_storage::MapParameterStorage<ElementId, string>(color_map, default_color) {
     }
 
     template<class It>
-    MapColorer(It begin, It end, const string& color, const string& default_color) : MapParameterStorage<ElementId, string>(begin, end, color, default_color) {
+    MapColorer(It begin, It end, const string &color, const string &default_color)
+            : printing_parameter_storage::MapParameterStorage<ElementId, string>(begin, end, color, default_color) {
     }
 
     virtual ~MapColorer() {
@@ -52,9 +57,9 @@ public:
 };
 
 template<typename ElementId>
-class FixedColorer: public MapColorer<ElementId> {
+class FixedColorer : public MapColorer<ElementId> {
 public:
-    FixedColorer(const string& default_color): MapColorer<ElementId>(default_color) {
+    FixedColorer(const string &default_color) : MapColorer<ElementId>(default_color) {
     }
 };
 
@@ -77,12 +82,13 @@ private:
 public:
     template<class It>
     SetColorer(const Graph &graph, It begin, It end, const string &color) :
-        MapColorer<typename Graph::EdgeId>(ConstructColorMap(begin, end, color), "black"), graph_(graph) {
+            MapColorer<typename Graph::EdgeId>(ConstructColorMap(begin, end, color), "black"), graph_(graph) {
     }
 
     template<class Collection>
-    SetColorer(const Graph &graph, const Collection& c, const string &color) :
-        MapColorer<typename Graph::EdgeId>(ConstructColorMap(c.begin(), c.end(), color), "black"), graph_(graph) {
+    SetColorer(const Graph &graph, const Collection &c, const string &color) :
+            MapColorer<typename Graph::EdgeId>(ConstructColorMap(c.begin(), c.end(), color), "black"),
+            graph_(graph) {
     }
 
 };
@@ -111,7 +117,7 @@ public:
 
 
 template<class Graph>
-class CompositeEdgeColorer: public ElementColorer<typename Graph::EdgeId> {
+class CompositeEdgeColorer : public ElementColorer<typename Graph::EdgeId> {
 private:
     typedef typename Graph::VertexId VertexId;
     typedef typename Graph::EdgeId EdgeId;
@@ -120,33 +126,35 @@ private:
 
     vector<string> CollectColors(EdgeId edge) const {
         vector<string> result = {default_color_};
-        for(auto it = colorers_.begin(); it != colorers_.end(); ++it) {
+        for (auto it = colorers_.begin(); it != colorers_.end(); ++it) {
             string next_color = (*it)->GetValue(edge);
-            if(std::find(result.begin(), result.end(), next_color) == result.end())
+            if (std::find(result.begin(), result.end(), next_color) == result.end())
                 result.push_back(next_color);
         }
         return result;
     }
 
     string ConstructColorString(const vector<string> &colors) const {
-        if(colors.size() == 1)
+        if (colors.size() == 1)
             return default_color_;
         string result = "";
-        for(size_t i = 1; i < colors.size(); i++)
+        for (size_t i = 1; i < colors.size(); i++)
             result += ":" + colors[i];
         return result.substr(1, result.size());
     }
 
 public:
-    CompositeEdgeColorer(const string &default_color): default_color_(default_color) {
+    CompositeEdgeColorer(const string &default_color) : default_color_(default_color) {
     }
 
-    CompositeEdgeColorer(shared_ptr<ElementColorer<typename Graph::EdgeId>> colorer, const string &default_color): default_color_(default_color) {
+    CompositeEdgeColorer(shared_ptr<ElementColorer<typename Graph::EdgeId>> colorer,
+                         const string &default_color) : default_color_(default_color) {
         AddColorer(colorer);
     }
 
-    CompositeEdgeColorer(shared_ptr<ElementColorer<typename Graph::EdgeId>> colorer1, shared_ptr<ElementColorer<typename Graph::EdgeId>> colorer2,
-            const string &default_color): default_color_(default_color) {
+    CompositeEdgeColorer(shared_ptr<ElementColorer<typename Graph::EdgeId>> colorer1,
+                         shared_ptr<ElementColorer<typename Graph::EdgeId>> colorer2,
+                         const string &default_color) : default_color_(default_color) {
         AddColorer(colorer1);
         AddColorer(colorer2);
     }
@@ -161,9 +169,11 @@ public:
 };
 
 template<class Graph>
-class GraphColorer : public ElementColorer<typename Graph::VertexId>, public ElementColorer<typename Graph::EdgeId>{
+class GraphColorer
+        : public ElementColorer<typename Graph::VertexId>, public ElementColorer<typename Graph::EdgeId> {
 public:
     string GetValue(typename Graph::VertexId) const = 0;
+
     string GetValue(typename Graph::EdgeId) const = 0;
 
     template<typename Iter>
@@ -183,6 +193,7 @@ public:
     string GetValue(typename Graph::VertexId v) const {
         return inner_colorer_.GetValue(v);
     }
+
     string GetValue(typename Graph::EdgeId e) const {
         return inner_colorer_.GetValue(e);
     }
@@ -209,12 +220,13 @@ public:
 //    }
 
     BorderDecorator(const GraphComponent<Graph> &component,
-            const GraphColorer<Graph> &colorer, const string &border_color = "yellow") :
-            component_(component), vertex_colorer_(colorer), edge_colorer_(colorer), border_color_(border_color) {
+                    const GraphColorer<Graph> &colorer, const string &border_color = "yellow") :
+            component_(component), vertex_colorer_(colorer), edge_colorer_(colorer),
+            border_color_(border_color) {
     }
 
     string GetValue(VertexId v) const {
-        if(component_.IsBorder(v)) {
+        if (component_.IsBorder(v)) {
             return border_color_;
         } else {
             return vertex_colorer_.GetValue(v);
@@ -226,7 +238,8 @@ public:
     }
 
     static shared_ptr<BorderDecorator<Graph>> GetInstance(const GraphComponent<Graph> &component,
-            const GraphColorer<Graph> &colorer, const string &border_color = "yellow") {
+                                                          const GraphColorer<Graph> &colorer,
+                                                          const string &border_color = "yellow") {
         return make_shared<BorderDecorator<Graph>>(component, colorer, border_color);
     }
 };
@@ -248,20 +261,20 @@ private:
 public:
 
     SinkSourceDecorator(const GraphComponent<Graph> &component,
-            const GraphColorer<Graph> &colorer, const string &sink_color = "red", const string &source_color = "orange", const string &sinksource_color = "green") :
-            component_(component), vertex_colorer_(colorer), edge_colorer_(colorer), sink_color_(sink_color), source_color_(source_color), sinksource_color_(sinksource_color)  {
+                        const GraphColorer<Graph> &colorer, const string &sink_color = "red",
+                        const string &source_color = "orange", const string &sinksource_color = "green") :
+            component_(component), vertex_colorer_(colorer), edge_colorer_(colorer), sink_color_(sink_color),
+            source_color_(source_color), sinksource_color_(sinksource_color) {
     }
 
     string GetValue(VertexId v) const {
-        if(component_.sinks().count(v) && !component_.sources().count(v)) {
+        if (component_.exits().count(v) && !component_.entrances().count(v)) {
             return sink_color_;
         }
-        if(component_.sources().count(v) && !component_.sinks().count(v))
-        {
+        if (component_.entrances().count(v) && !component_.exits().count(v)) {
             return source_color_;
         }
-        if(component_.sources().count(v) && component_.sinks().count(v))
-        {
+        if (component_.entrances().count(v) && component_.exits().count(v)) {
             return sinksource_color_;
         }
 
@@ -273,13 +286,15 @@ public:
     }
 
     static shared_ptr<SinkSourceDecorator<Graph>> GetInstance(const GraphComponent<Graph> &component,
-            const GraphColorer<Graph> &colorer, const string &sink_color = "red", const string &source_color = "orange") {
+                                                              const GraphColorer<Graph> &colorer,
+                                                              const string &sink_color = "red",
+                                                              const string &source_color = "orange") {
         return make_shared<SinkSourceDecorator<Graph>>(component, colorer, sink_color, source_color);
     }
 };
 
 template<class Graph>
-class CompositeGraphColorer: public GraphColorer<Graph> {
+class CompositeGraphColorer : public GraphColorer<Graph> {
 private:
     typedef typename Graph::VertexId VertexId;
     typedef typename Graph::EdgeId EdgeId;
@@ -287,10 +302,10 @@ private:
     const shared_ptr<ElementColorer<VertexId>> vertex_colorer_;
     const shared_ptr<ElementColorer<EdgeId>> edge_colorer_;
 public:
-    CompositeGraphColorer(shared_ptr<ElementColorer<VertexId>> vertex_colorer
-            , shared_ptr<ElementColorer<EdgeId>> edge_colorer) :
-                vertex_colorer_(vertex_colorer),
-                edge_colorer_(edge_colorer) {
+    CompositeGraphColorer(shared_ptr<ElementColorer<VertexId>> vertex_colorer,
+                          shared_ptr<ElementColorer<EdgeId>> edge_colorer) :
+            vertex_colorer_(vertex_colorer),
+            edge_colorer_(edge_colorer) {
     }
 
 //    explicit CompositeGraphColorer(shared_ptr<ElementColorer<EdgeId>> edge_colorer = make_shared<FixedColorer<EdgeId>>("black")) :
@@ -309,19 +324,20 @@ public:
 };
 
 
-
 // edge_colorer management is passed here
 //TODO check all usages
-template <class Graph>
-shared_ptr<GraphColorer<Graph>> DefaultColorer(const Graph& /*g*/,
-        shared_ptr<ElementColorer<typename Graph::EdgeId>> edge_colorer) {
-    return shared_ptr<GraphColorer<Graph>>(new CompositeGraphColorer<Graph>(make_shared<FixedColorer<typename Graph::VertexId>>("white"), edge_colorer));
+template<class Graph>
+shared_ptr<GraphColorer<Graph>> DefaultColorer(const Graph & /*g*/,
+                                               shared_ptr<ElementColorer<typename Graph::EdgeId>> edge_colorer) {
+    return shared_ptr<GraphColorer<Graph>>(
+            new CompositeGraphColorer<Graph>(make_shared<FixedColorer<typename Graph::VertexId>>("white"),
+                                             edge_colorer));
 }
 
-template <class Graph>
-shared_ptr<GraphColorer<Graph>> DefaultColorer(const Graph& g,
-        const Path<typename Graph::EdgeId>& path1,
-        const Path<typename Graph::EdgeId>& path2) {
+template<class Graph>
+shared_ptr<GraphColorer<Graph>> DefaultColorer(const Graph &g,
+                                               const Path<typename Graph::EdgeId> &path1,
+                                               const Path<typename Graph::EdgeId> &path2) {
     shared_ptr<ElementColorer<typename Graph::EdgeId>> edge_colorer =
             make_shared<CompositeEdgeColorer<Graph>>(
                     make_shared<SetColorer<Graph>>(g, path1.sequence(), "red"),
@@ -330,11 +346,10 @@ shared_ptr<GraphColorer<Graph>> DefaultColorer(const Graph& g,
 }
 
 template<class Graph>
-shared_ptr<GraphColorer<Graph>> DefaultColorer(const Graph& /*g*/) {
+shared_ptr<GraphColorer<Graph>> DefaultColorer(const Graph & /*g*/) {
     return shared_ptr<GraphColorer<Graph>>(new CompositeGraphColorer<Graph>(
-                            make_shared<FixedColorer<typename Graph::VertexId>>("white"),
-                            make_shared<FixedColorer<typename Graph::EdgeId>>("black")));
-}
-
+            make_shared<FixedColorer<typename Graph::VertexId>>("white"),
+            make_shared<FixedColorer<typename Graph::EdgeId>>("black")));
 }
 }
+}
\ No newline at end of file
diff --git a/src/modules/visualization/graph_labeler.hpp b/src/common/visualization/graph_labeler.hpp
similarity index 75%
rename from src/modules/visualization/graph_labeler.hpp
rename to src/common/visualization/graph_labeler.hpp
index 733ca0f..8690af7 100644
--- a/src/modules/visualization/graph_labeler.hpp
+++ b/src/common/visualization/graph_labeler.hpp
@@ -5,20 +5,21 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#ifndef GRAPH_LABELER_HPP_
-#define GRAPH_LABELER_HPP_
+#pragma once
 
-#include "dev_support/simple_tools.hpp"
-#include "dev_support/standard_base.hpp"
-#include "assembly_graph/handlers/edges_position_handler.hpp"
+#include "utils/simple_tools.hpp"
+#include "utils/standard_base.hpp"
+#include "common/assembly_graph/handlers/edges_position_handler.hpp"
 
-namespace omnigraph {
+namespace visualization {
+
+namespace graph_labeler {
 
 /**
- * (Interface)
- * Provides string labels for vertices and edges of some graph.
- * Used with GraphPrinter to visualize graphs.
- */
+* (Interface)
+* Provides string labels for vertices and edges of some graph.
+* Used with GraphPrinter to visualize graphs.
+*/
 template<class Graph>
 class GraphLabeler {
 public:
@@ -62,16 +63,16 @@ public:
 //};
 
 template<class Graph>
-class AbstractGraphLabeler: public GraphLabeler<Graph> {
+class AbstractGraphLabeler : public GraphLabeler<Graph> {
     typedef typename Graph::VertexId VertexId;
     typedef typename Graph::EdgeId EdgeId;
-    const Graph& g_;
+    const Graph &g_;
 protected:
-    AbstractGraphLabeler(const Graph& g): g_(g) {
+    AbstractGraphLabeler(const Graph &g) : g_(g) {
 
     }
 
-    const Graph& graph() const {
+    const Graph &graph() const {
         return g_;
     }
 
@@ -87,9 +88,9 @@ public:
 };
 
 /**
- * Trivial implementation of GraphLabeler.
- * All labels are "".
- */
+* Trivial implementation of GraphLabeler.
+* All labels are "".
+*/
 template<class Graph>
 class EmptyGraphLabeler : public GraphLabeler<Graph> {
     typedef GraphLabeler<Graph> base;
@@ -108,16 +109,16 @@ public:
 };
 
 /**
- * Implementation of GraphLabeler for Graphs that have methods
- * str(VertexId) and str(EdgeId), such as AbstractGraph.
- */
+* Implementation of GraphLabeler for Graphs that have methods
+* str(VertexId) and str(EdgeId), such as AbstractGraph.
+*/
 template<class Graph>
 class StrGraphLabeler : public AbstractGraphLabeler<Graph> {
     typedef AbstractGraphLabeler<Graph> base;
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
 public:
-    StrGraphLabeler(const Graph& g) : base(g) {}
+    StrGraphLabeler(const Graph &g) : base(g) {}
 
     /*virtual*/ std::string label(VertexId v) const {
         return this->graph().str(v);
@@ -132,8 +133,8 @@ public:
     }
 };
 
-template <class Graph>
-shared_ptr<GraphLabeler<Graph>> StrGraphLabelerInstance(const Graph& g) {
+template<class Graph>
+shared_ptr<GraphLabeler<Graph>> StrGraphLabelerInstance(const Graph &g) {
     return make_shared<StrGraphLabeler<Graph>>(g);
 }
 
@@ -143,7 +144,7 @@ class LengthIdGraphLabeler : public StrGraphLabeler<Graph> {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
 public:
-    LengthIdGraphLabeler(const Graph& g) : base(g) {}
+    LengthIdGraphLabeler(const Graph &g) : base(g) {}
 
     /*virtual*/ std::string label(EdgeId e) const {
         std::stringstream ss;
@@ -159,7 +160,7 @@ class LengthGraphLabeler : public StrGraphLabeler<Graph> {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
 public:
-    LengthGraphLabeler(const Graph& g) : base(g) {}
+    LengthGraphLabeler(const Graph &g) : base(g) {}
 
     /*virtual*/ std::string label(EdgeId e) const {
         return ToString(this->graph().length(e));
@@ -173,7 +174,7 @@ class CoverageGraphLabeler : public AbstractGraphLabeler<Graph> {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
 public:
-    CoverageGraphLabeler(const Graph& g) : base(g) {}
+    CoverageGraphLabeler(const Graph &g) : base(g) {}
 
     std::string label(EdgeId e) const {
         double coverage = this->graph().coverage(e);
@@ -186,21 +187,21 @@ class CompositeLabeler : public GraphLabeler<Graph> {
 private:
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
-    vector<GraphLabeler<Graph>*> list_;
+    vector<GraphLabeler<Graph> *> list_;
 
     template<typename ElementId>
     string ConstructLabel(ElementId id) const {
         vector<string> to_print;
-        for(size_t i = 0; i < list_.size(); i++) {
+        for (size_t i = 0; i < list_.size(); i++) {
             string next = list_[i]->label(id);
-            if(next.size() != 0) {
+            if (next.size() != 0) {
                 to_print.push_back(next);
             }
         }
         string result = "";
-        for(size_t i = 0; i < to_print.size(); i++) {
+        for (size_t i = 0; i < to_print.size(); i++) {
             result += to_print[i];
-            if(i + 1 < to_print.size())
+            if (i + 1 < to_print.size())
                 result += "\\n";
         }
         return result;
@@ -210,14 +211,17 @@ public:
     CompositeLabeler() {
     }
 
-    CompositeLabeler(GraphLabeler<Graph> &labeler1, GraphLabeler<Graph> &labeler2, GraphLabeler<Graph> &labeler3, GraphLabeler<Graph> &labeler4) {
+    CompositeLabeler(GraphLabeler<Graph> &labeler1, GraphLabeler<Graph> &labeler2,
+                     GraphLabeler<Graph> &labeler3,
+                     GraphLabeler<Graph> &labeler4) {
         AddLabeler(labeler1);
         AddLabeler(labeler2);
         AddLabeler(labeler3);
         AddLabeler(labeler4);
     }
 
-    CompositeLabeler(GraphLabeler<Graph> &labeler1, GraphLabeler<Graph> &labeler2, GraphLabeler<Graph> &labeler3) {
+    CompositeLabeler(GraphLabeler<Graph> &labeler1, GraphLabeler<Graph> &labeler2,
+                     GraphLabeler<Graph> &labeler3) {
         AddLabeler(labeler1);
         AddLabeler(labeler2);
         AddLabeler(labeler3);
@@ -245,14 +249,14 @@ public:
 };
 
 template<class Graph>
-class EdgePosGraphLabeler: public AbstractGraphLabeler<Graph> {
+class EdgePosGraphLabeler : public AbstractGraphLabeler<Graph> {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
 public:
-    const EdgesPositionHandler<Graph>& edge_pos_;
+    const omnigraph::EdgesPositionHandler<Graph> &edge_pos_;
 
-    EdgePosGraphLabeler(const Graph& g, const EdgesPositionHandler<Graph>& edge_pos) :
-        AbstractGraphLabeler<Graph>(g), edge_pos_(edge_pos) {
+    EdgePosGraphLabeler(const Graph &g, const omnigraph::EdgesPositionHandler<Graph> &edge_pos) :
+            AbstractGraphLabeler<Graph>(g), edge_pos_(edge_pos) {
     }
 
     virtual std::string label(EdgeId edgeId) const {
@@ -262,23 +266,24 @@ public:
     virtual ~EdgePosGraphLabeler() {
 //        TRACE("~EdgePosGraphLabeler");
     }
+
 private:
     DECL_LOGGER("EdgePosGraphLabeler")
 };
 
 template<class Graph>
-class DefaultLabeler: public GraphLabeler<Graph> {
+class DefaultLabeler : public GraphLabeler<Graph> {
 private:
-    const Graph& g_;
-    const EdgesPositionHandler<Graph> &edges_positions_;
+    const Graph &g_;
+    const omnigraph::EdgesPositionHandler<Graph> &edges_positions_;
 protected:
     typedef GraphLabeler<Graph> super;
     typedef typename super::EdgeId EdgeId;
     typedef typename super::VertexId VertexId;
 public:
 
-    DefaultLabeler(const Graph &g, const EdgesPositionHandler<Graph> &position_handler) :
-        g_(g), edges_positions_(position_handler) {
+    DefaultLabeler(const Graph &g, const omnigraph::EdgesPositionHandler<Graph> &position_handler) :
+            g_(g), edges_positions_(position_handler) {
     }
 
     virtual std::string label(VertexId vertexId) const {
@@ -288,7 +293,7 @@ public:
     virtual std::string label(EdgeId edgeId) const {
         std::string ret_label;
         ret_label += "Id " + g_.str(edgeId) + "\\n";
-        ret_label += "Positions:\\n"+ edges_positions_.str(edgeId);
+        ret_label += "Positions:\\n" + edges_positions_.str(edgeId);
         size_t len = g_.length(edgeId);
         double cov = g_.coverage(edgeId);
         ret_label += "Len(cov): " + ToString(len) + "(" + ToString(cov) + ")";
@@ -298,7 +303,6 @@ public:
     virtual ~DefaultLabeler() {
     }
 };
-
+}
 }
 
-#endif /* GRAPH_LABELER_HPP_ */
diff --git a/src/modules/io/graph_io/graph_print_utils.hpp b/src/common/visualization/graph_print_utils.hpp
similarity index 98%
rename from src/modules/io/graph_io/graph_print_utils.hpp
rename to src/common/visualization/graph_print_utils.hpp
index abed05f..0c2f978 100755
--- a/src/modules/io/graph_io/graph_print_utils.hpp
+++ b/src/common/visualization/graph_print_utils.hpp
@@ -8,9 +8,9 @@
 #ifndef GRAPH_PRINTER_HPP_
 #define GRAPH_PRINTER_HPP_
 
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 
-namespace gvis {
+namespace visualization {
 
 template<class VertexId>
 struct BaseVertex {
@@ -129,9 +129,9 @@ protected:
         int bound = approximateLength / 6;
         int num = currentLength / bound;
         double perc = (currentLength % bound) * 1. / bound;
-        for(int i = 0; i < 3; i++) {
+        for (int i = 0; i < 3; i++) {
             ss << getColorParameter(points[num][i], points[num + 1][i], perc);
-            if(i != 2)
+            if (i != 2)
                 ss << ",";
         }
         return ss.str();
@@ -323,6 +323,5 @@ public:
     }
 };
 
-
 }
 #endif //GRAPH_PRINTER_HPP_//
diff --git a/src/modules/visualization/graph_printer.hpp b/src/common/visualization/graph_printer.hpp
similarity index 51%
rename from src/modules/visualization/graph_printer.hpp
rename to src/common/visualization/graph_printer.hpp
index 9a9927f..d1f9b67 100644
--- a/src/modules/visualization/graph_printer.hpp
+++ b/src/common/visualization/graph_printer.hpp
@@ -7,15 +7,18 @@
 
 #pragma once
 
-#include "dev_support/standard_base.hpp"
-#include "io/graph_io/graph_print_utils.hpp"
+#include "utils/standard_base.hpp"
+#include "graph_print_utils.hpp"
 #include "graph_labeler.hpp"
 #include "graph_colorer.hpp"
 #include "vertex_linker.hpp"
 
-namespace omnigraph {
+using namespace omnigraph;
+
 namespace visualization {
 
+namespace graph_printer {
+
 template<class Graph>
 class GraphPrinter {
 private:
@@ -24,9 +27,9 @@ private:
 //    ostream& os_;
     const Graph &graph_;
 protected:
-    const GraphLabeler<Graph> &labeler_;
-    const GraphColorer<Graph> &colorer_;
-    const VertexLinker<Graph> &linker_;
+    const graph_labeler::GraphLabeler<Graph> &labeler_;
+    const graph_colorer::GraphColorer<Graph> &colorer_;
+    const vertex_linker::VertexLinker<Graph> &linker_;
 
 protected:
 //    ostream& os() {
@@ -39,13 +42,13 @@ protected:
     }
 
     template<class GvisVertexId>
-    gvis::BaseVertex<GvisVertexId> CreateBaseVertex(GvisVertexId id, VertexId v) {
-        return gvis::BaseVertex<GvisVertexId>(id, labeler_.label(v), linker_.GetValue(v), colorer_.GetValue(v));
+    BaseVertex<GvisVertexId> CreateBaseVertex(GvisVertexId id, VertexId v) {
+        return BaseVertex<GvisVertexId>(id, labeler_.label(v), linker_.GetValue(v), colorer_.GetValue(v));
     }
 
     template<class GvisVertexId>
-    gvis::BaseEdge<GvisVertexId> CreateBaseEdge(GvisVertexId from, GvisVertexId to, EdgeId e){
-        return gvis::BaseEdge<GvisVertexId>(from, to, this->labeler_.label(e), this->colorer_.GetValue(e));
+    BaseEdge<GvisVertexId> CreateBaseEdge(GvisVertexId from, GvisVertexId to, EdgeId e){
+        return BaseEdge<GvisVertexId>(from, to, this->labeler_.label(e), this->colorer_.GetValue(e));
     }
 
     virtual void ManageDrawn(VertexId v, set<VertexId> &visited) {
@@ -54,11 +57,11 @@ protected:
 
 public:
     GraphPrinter(const Graph &graph, /*ostream &os,*/
-            const GraphLabeler<Graph> &labeler,
-            const GraphColorer<Graph> &colorer,
-            const VertexLinker<Graph> &linker) :
-            /*os_(os), */graph_(graph), labeler_(labeler), colorer_(colorer), linker_(
-                    linker) {
+                 const graph_labeler::GraphLabeler<Graph> &labeler,
+                 const graph_colorer::GraphColorer<Graph> &colorer,
+                 const vertex_linker::VertexLinker<Graph> &linker) :
+    /*os_(os), */graph_(graph), labeler_(labeler), colorer_(colorer), linker_(
+            linker) {
     }
 
     virtual void open() = 0;
@@ -70,8 +73,8 @@ public:
     template<class iter>
     void AddVertices(iter vbegin, iter vend) {
         set<VertexId> drawn;
-        for(;vbegin != vend; ++vbegin) {
-            if(drawn.count(*vbegin) == 0) {
+        for (; vbegin != vend; ++vbegin) {
+            if (drawn.count(*vbegin) == 0) {
                 AddVertex(*vbegin);
                 ManageDrawn(*vbegin, drawn);
             }
@@ -82,7 +85,7 @@ public:
 
     template<class iter>
     void AddEdges(iter ebegin, iter eend) {
-        for(;ebegin != eend; ++ebegin) {
+        for (; ebegin != eend; ++ebegin) {
             AddEdge(*ebegin);
         }
     }
@@ -97,13 +100,15 @@ private:
     typedef typename Graph::VertexId VertexId;
     typedef typename Graph::EdgeId EdgeId;
 
-    gvis::DotSingleGraphRecorder<size_t> recorder_;
+    DotSingleGraphRecorder<size_t> recorder_;
 
 public:
     SingleGraphPrinter(const Graph &graph, ostream &os,
-            const GraphLabeler<Graph> &labeler,
-            const GraphColorer<Graph> &colorer,
-            const VertexLinker<Graph> &linker) : GraphPrinter<Graph>(/*os_, */graph, labeler, colorer, linker), recorder_(os){
+                       const graph_labeler::GraphLabeler<Graph> &labeler,
+                       const graph_colorer::GraphColorer<Graph> &colorer,
+                       const vertex_linker::VertexLinker<Graph> &linker) : GraphPrinter<Graph>(/*os_, */graph, labeler,
+                                                                                         colorer, linker),
+                                                            recorder_(os) {
     }
 
     void open() {
@@ -115,11 +120,13 @@ public:
     }
 
     void AddVertex(VertexId v) {
-        recorder_.recordVertex(this->CreateBaseVertex((size_t)this->graph().int_id(v), v));
+        recorder_.recordVertex(this->CreateBaseVertex((size_t) this->graph().int_id(v), v));
     }
 
     void AddEdge(EdgeId edge) {
-        recorder_.recordEdge(this->CreateBaseEdge((size_t)this->graph().int_id(this->graph().EdgeStart(edge)), (size_t)this->graph().int_id(this->graph().EdgeEnd(edge)), edge));
+        recorder_.recordEdge(this->CreateBaseEdge((size_t) this->graph().int_id(this->graph().EdgeStart(edge)),
+                                                  (size_t) this->graph().int_id(this->graph().EdgeEnd(edge)),
+                                                  edge));
     }
 };
 
@@ -129,17 +136,18 @@ private:
     typedef typename Graph::VertexId VertexId;
     typedef typename Graph::EdgeId EdgeId;
 
-    gvis::DotPairedGraphRecorder<size_t> recorder_;
+    DotPairedGraphRecorder<size_t> recorder_;
 
-    pair<gvis::BaseVertex<size_t>, gvis::BaseVertex<size_t>> CreateDoubleVertex(VertexId v) {
-        gvis::BaseVertex<size_t> u1 = this->CreateBaseVertex((size_t)this->graph().int_id(v), v);
-        gvis::BaseVertex<size_t> u2 = this->CreateBaseVertex((size_t)this->graph().int_id(this->graph().conjugate(v)), this->graph().conjugate(v));
+    pair<BaseVertex<size_t>, BaseVertex<size_t>> CreateDoubleVertex(VertexId v) {
+        BaseVertex<size_t> u1 = this->CreateBaseVertex((size_t)this->graph().int_id(v), v);
+        BaseVertex<size_t> u2 = this->CreateBaseVertex((size_t)this->graph().int_id(this->graph().conjugate(v)), this->graph().conjugate(v));
         return make_pair(u1, u2);
     }
 
     pair<size_t, size_t> CreateDoubleVertexId(VertexId v) {
         return make_pair(this->graph().int_id(v), this->graph().int_id(this->graph().conjugate(v)));
     }
+
 protected:
     /*virtual */void ManageDrawn(VertexId v, set<VertexId> &visited) {
         visited.insert(v);
@@ -148,9 +156,11 @@ protected:
 
 public:
     PairedGraphPrinter(const Graph &graph, ostream &os,
-            const GraphLabeler<Graph> &labeler,
-            const GraphColorer<Graph> &colorer,
-            const VertexLinker<Graph> &linker) : GraphPrinter<Graph>(/*os_, */graph, labeler, colorer, linker), recorder_(os) {
+                       const graph_labeler::GraphLabeler<Graph> &labeler,
+                       const graph_colorer::GraphColorer<Graph> &colorer,
+                       const vertex_linker::VertexLinker<Graph> &linker) : GraphPrinter<Graph>(/*os_, */graph, labeler,
+                                                                                         colorer, linker),
+                                                            recorder_(os) {
     }
 
     void open() {
@@ -168,7 +178,7 @@ public:
     void AddEdge(EdgeId edge) {
         auto vid1 = CreateDoubleVertexId(this->graph().EdgeStart(edge));
         auto vid2 = CreateDoubleVertexId(this->graph().EdgeEnd(edge));
-        recorder_.recordEdge(gvis::BaseEdge<pair<size_t, size_t>>(vid1, vid2, this->labeler_.label(edge), this->colorer_.GetValue(edge)));
+        recorder_.recordEdge(BaseEdge<pair<size_t, size_t>>(vid1, vid2, this->labeler_.label(edge), this->colorer_.GetValue(edge)));
     }
 };
 
diff --git a/src/modules/visualization/position_filler.hpp b/src/common/visualization/position_filler.hpp
similarity index 61%
rename from src/modules/visualization/position_filler.hpp
rename to src/common/visualization/position_filler.hpp
index 406d679..e0e61b3 100644
--- a/src/modules/visualization/position_filler.hpp
+++ b/src/common/visualization/position_filler.hpp
@@ -7,25 +7,26 @@
 
 #pragma once
 
-#include "assembly_graph/graph_alignment/sequence_mapper.hpp"
+#include "common/modules/alignment/sequence_mapper.hpp"
 #include "assembly_graph/handlers/edges_position_handler.hpp"
-#include "io/reads_io/wrapper_collection.hpp"
-#include "io/reads_io/easy_reader.hpp"
-#include "io/reads_io/io_helper.hpp"
+#include "io/reads/wrapper_collection.hpp"
+#include "io/reads/io_helper.hpp"
 
-namespace debruijn_graph {
+namespace visualization {
+
+namespace position_filler {
 
 template<class Graph>
 class PosFiller {
     typedef typename Graph::EdgeId EdgeId;
-    typedef std::shared_ptr<SequenceMapper<Graph>> MapperPtr;
+    typedef std::shared_ptr<debruijn_graph::SequenceMapper < Graph>> MapperPtr;
     const Graph &g_;
     MapperPtr mapper_;
-    EdgesPositionHandler<Graph> &edge_pos_;
+    omnigraph::EdgesPositionHandler<Graph> &edge_pos_;
 
 public:
     PosFiller(const Graph &g, MapperPtr mapper,
-              EdgesPositionHandler<Graph> &edge_pos) :
+              omnigraph::EdgesPositionHandler<Graph> &edge_pos) :
             g_(g), mapper_(mapper), edge_pos_(edge_pos) {
 
     }
@@ -36,26 +37,26 @@ public:
     }
 
     void Process(const io::SingleRead &read) const {
-        MappingPath<EdgeId> path = mapper_->MapRead(read);
+        omnigraph::MappingPath<EdgeId> path = mapper_->MapRead(read);
         const string name = read.name();
         int cur_pos = 0;
         TRACE("Contig " << name << " mapped on " << path.size()
-              << " fragments.");
+                        << " fragments.");
         for (size_t i = 0; i < path.size(); i++) {
             EdgeId ei = path[i].first;
-            MappingRange mr = path[i].second;
+            omnigraph::MappingRange mr = path[i].second;
             int len = (int) (mr.mapped_range.end_pos - mr.mapped_range.start_pos);
             if (i > 0) if (path[i - 1].first != ei) if (g_.EdgeStart(ei) != g_.EdgeEnd(path[i - 1].first)) {
                 TRACE(
                         "Contig " << name
-                        << " mapped on not adjacent edge. Position in contig is "
-                        << path[i - 1].second.initial_range.start_pos
-                           + 1
-                        << "--"
-                        << path[i - 1].second.initial_range.end_pos
-                        << " and "
-                        << mr.initial_range.start_pos + 1
-                        << "--" << mr.initial_range.end_pos);
+                                  << " mapped on not adjacent edge. Position in contig is "
+                                  << path[i - 1].second.initial_range.start_pos
+                                     + 1
+                                  << "--"
+                                  << path[i - 1].second.initial_range.end_pos
+                                  << " and "
+                                  << mr.initial_range.start_pos + 1
+                                  << "--" << mr.initial_range.end_pos);
             }
             edge_pos_.AddEdgePosition(ei, name, mr.initial_range.start_pos,
                                       mr.initial_range.end_pos,
@@ -79,15 +80,17 @@ private:
 
 template<class gp_t>
 void FillPos(gp_t &gp, const string &contig_file, string prefix, bool with_rc = false) {
-    PosFiller<typename gp_t::graph_t> pos_filler(gp.g, MapperInstance(gp), gp.edge_pos);
-    auto irs = std::make_shared<io::PrefixAddingReaderWrapper>(io::EasyStream(contig_file, with_rc, false), prefix);
+    PosFiller<typename gp_t::graph_t> pos_filler(gp.g, debruijn_graph::MapperInstance(gp), gp.edge_pos);
+    auto irs = std::make_shared<io::PrefixAddingReaderWrapper>(io::EasyStream(contig_file, with_rc, false),
+                                                               prefix);
     pos_filler.Process(*irs);
 }
 
 template<class gp_t>
 void FillPos(gp_t &gp, const Sequence &s, string name) {
-    PosFiller<typename gp_t::graph_t> pos_filler(gp.g, MapperInstance(gp), gp.edge_pos);
+    PosFiller<typename gp_t::graph_t> pos_filler(gp.g, debruijn_graph::MapperInstance(gp), gp.edge_pos);
     pos_filler.Process(s, name);
 }
 
 }
+}
\ No newline at end of file
diff --git a/src/modules/visualization/printing_parameter_storage.hpp b/src/common/visualization/printing_parameter_storage.hpp
similarity index 79%
rename from src/modules/visualization/printing_parameter_storage.hpp
rename to src/common/visualization/printing_parameter_storage.hpp
index f052733..2d4d500 100644
--- a/src/modules/visualization/printing_parameter_storage.hpp
+++ b/src/common/visualization/printing_parameter_storage.hpp
@@ -8,11 +8,15 @@
 //***************************************************************************
 
 
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 #include "assembly_graph/components/graph_component.hpp"
-namespace omnigraph {
+
+using namespace omnigraph;
+
 namespace visualization {
 
+namespace printing_parameter_storage {
+
 template<typename ElementId, typename Value>
 class ParameterStorage {
 public:
@@ -27,7 +31,7 @@ class MapParameterStorage : public virtual ParameterStorage<ElementId, Value> {
 private:
 private:
     template<class It>
-    static map<ElementId, string> ConstructMap(It begin, It end, const string& color) {
+    static map<ElementId, string> ConstructMap(It begin, It end, const string &color) {
         map<ElementId, string> result;
         for (auto it = begin; it != end; ++it) {
             result.insert(make_pair(*it, color));
@@ -43,14 +47,16 @@ public:
     MapParameterStorage(const string &default_value) : default_value_(default_value) {
     }
 
-    MapParameterStorage(map<ElementId, Value> storage, Value default_value) : storage_(storage), default_value_(default_value) {
+    MapParameterStorage(map<ElementId, Value> storage, Value default_value) : storage_(storage),
+                                                                              default_value_(default_value) {
     }
 
     MapParameterStorage(map<ElementId, Value> storage) : storage_(storage) {
     }
 
     template<class It>
-    MapParameterStorage(It begin, It end, const Value& value, const string& default_value) : storage_(ConstructMap(begin, end, value)), default_value_(default_value) {
+    MapParameterStorage(It begin, It end, const Value &value, const string &default_value) : storage_(
+            ConstructMap(begin, end, value)), default_value_(default_value) {
     }
 
 
@@ -69,7 +75,8 @@ class DecoratorParameterStorage : public virtual ParameterStorage<ElementId, Val
 private:
     ParameterStorage<ElementId, Value> inner_storage_;
 public:
-    DecoratorParameterStorage(ParameterStorage<ElementId, Value> inner_storage) : inner_storage_(inner_storage) {
+    DecoratorParameterStorage(ParameterStorage<ElementId, Value> inner_storage) : inner_storage_(
+            inner_storage) {
     }
 
     Value GetInnerValue(ElementId element) {
@@ -78,4 +85,4 @@ public:
 };
 
 }
-}
+}
\ No newline at end of file
diff --git a/src/modules/visualization/vertex_linker.hpp b/src/common/visualization/vertex_linker.hpp
similarity index 58%
rename from src/modules/visualization/vertex_linker.hpp
rename to src/common/visualization/vertex_linker.hpp
index f960b20..b85ab76 100644
--- a/src/modules/visualization/vertex_linker.hpp
+++ b/src/common/visualization/vertex_linker.hpp
@@ -7,23 +7,27 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 #include "printing_parameter_storage.hpp"
 
-namespace omnigraph {
 namespace visualization {
 
+namespace vertex_linker {
+
 template<class Graph>
-class VertexLinker : public virtual ParameterStorage<typename Graph::VertexId, string> {
+class VertexLinker
+        : public virtual printing_parameter_storage::ParameterStorage<typename Graph::VertexId, string> {
 };
 
 template<class Graph>
-class MapVertexLinker : public VertexLinker<Graph>, public MapParameterStorage<typename Graph::VertexId, string> {
+class MapVertexLinker : public VertexLinker<Graph>,
+                        public printing_parameter_storage::MapParameterStorage<typename Graph::VertexId, string> {
 public:
-    MapVertexLinker() : MapParameterStorage<typename Graph::VertexId, string>("") {
+    MapVertexLinker() : printing_parameter_storage::MapParameterStorage<typename Graph::VertexId, string>("") {
     }
 
-    MapVertexLinker(const map<typename Graph::VertexId, string> &link_map) : MapParameterStorage<typename Graph::VertexId, string>(link_map, "") {
+    MapVertexLinker(const map<typename Graph::VertexId, string> &link_map) :
+            printing_parameter_storage::MapParameterStorage<typename Graph::VertexId, string>(link_map, "") {
     }
 
     virtual ~MapVertexLinker() {
@@ -38,4 +42,5 @@ public:
 };
 
 }
-}
+
+}
\ No newline at end of file
diff --git a/src/modules/visualization/visualization.hpp b/src/common/visualization/visualization.hpp
similarity index 100%
rename from src/modules/visualization/visualization.hpp
rename to src/common/visualization/visualization.hpp
diff --git a/src/common/visualization/visualization_utils.hpp b/src/common/visualization/visualization_utils.hpp
new file mode 100644
index 0000000..34ec334
--- /dev/null
+++ b/src/common/visualization/visualization_utils.hpp
@@ -0,0 +1,223 @@
+#pragma once
+
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#include "graph_printer.hpp"
+#include "assembly_graph/dijkstra/dijkstra_helper.hpp"
+#include "assembly_graph/components/splitters.hpp"
+#include "assembly_graph/components/graph_component.hpp"
+#include "visualizers.hpp"
+#include "vertex_linker.hpp"
+
+#include <fstream>
+
+namespace visualization {
+
+namespace visualization_utils {
+
+template<class Graph>
+void WriteComponents(const Graph &g,
+                     const string &folder_name,
+                     shared_ptr<GraphSplitter<Graph>> inner_splitter,
+                     shared_ptr<graph_colorer::GraphColorer<Graph>> colorer,
+                     const graph_labeler::GraphLabeler<Graph> &labeler) {
+    vertex_linker::EmptyGraphLinker<Graph> linker;
+//  shared_ptr<GraphComponentFilter<Graph>> checker = make_shared<ComponentSizeFilter<Graph>>(g, 1500, 2, 300);
+    auto filter = make_shared<omnigraph::SmallComponentFilter<Graph>>(g, 3);
+    shared_ptr<GraphSplitter<Graph>> splitter = make_shared<omnigraph::CollectingSplitterWrapper<Graph>>(
+            inner_splitter, filter);
+    visualization::visualizers::SplittingGraphVisualizer<Graph>(g, labeler, *colorer, linker).SplitAndVisualize(*splitter,
+                                                                                                   folder_name);
+}
+
+template<class Graph>
+void DrawComponentsOfShortEdges(const Graph &g, const string &output_dir, size_t min_length, size_t sinks,
+                                size_t sources) {
+    vector<typename Graph::EdgeId> short_edges;
+    std::string pics_folder_ =
+            output_dir + ToString(min_length) + "_" + ToString(sinks) + "_" + ToString(sources) + "_" +
+            "pics_polymorphic/";
+    make_dir(pics_folder_);
+    INFO("Writing pics with components consisting of short edges to " + pics_folder_);
+    shared_ptr<GraphSplitter<Graph>> splitter = LongEdgesExclusiveSplitter<Graph>(g, min_length);
+    while (splitter->HasNext()) {
+        GraphComponent<Graph> component = splitter->Next();
+        if (component.v_size() > 3 && component.exits().size() == sinks &&
+                component.entrances().size() == sources) {
+            bool fail = false;
+            for (auto v : component.entrances()) {
+                if (component.g().IncomingEdgeCount(v) != 1) {
+                    fail = true;
+                }
+            }
+            for (auto v : component.exits()) {
+                if (component.g().OutgoingEdgeCount(v) != 1) {
+                    fail = true;
+                }
+            }
+
+            if (fail) {
+                continue;
+            }
+
+            graph_labeler::StrGraphLabeler<Graph> labeler(component.g());
+            graph_labeler::CoverageGraphLabeler<Graph> labeler2(component.g());
+            graph_labeler::CompositeLabeler<Graph> compositeLabeler(labeler, labeler2);
+            WriteComponentSinksSources(component,
+                                       pics_folder_ + ToString(g.int_id(*component.vertices().begin()))
+                                       + ".dot", visualization::graph_colorer::DefaultColorer(g),
+                                       compositeLabeler);
+            INFO("Component is written to " + ToString(g.int_id(*component.vertices().begin())) + ".dot");
+
+            //            PrintComponent(component,
+//                                pics_folder_ + "ShortComponents/"
+//                                        + ToString(gp.g.int_id(component.vertices_[0]))
+//                                         + ".dot");
+        }
+    }
+}
+
+
+template<class Graph>
+void WriteSizeLimitedComponents(const Graph &g,
+                                const string &folder_name,
+                                shared_ptr<GraphSplitter<Graph>> inner_splitter,
+                                shared_ptr<graph_colorer::GraphColorer<Graph>> colorer,
+                                const graph_labeler::GraphLabeler<Graph> &labeler, int min_component_size,
+                                int max_component_size, size_t max_components) {
+    vertex_linker::EmptyGraphLinker<Graph> linker;
+
+    auto filter = make_shared<omnigraph::ComponentSizeFilter<Graph>>(g, 1000000000, (size_t) min_component_size,
+                                                                     (size_t) max_component_size);
+    shared_ptr<GraphSplitter<Graph>> splitter = make_shared<omnigraph::CollectingSplitterWrapper<Graph>>(
+            inner_splitter, filter);
+    visualization::visualizers::SplittingGraphVisualizer<Graph>(g, labeler, *colorer, linker, false,
+                                                   max_components).SplitAndVisualize(*splitter, folder_name);
+}
+
+template<class Graph>
+void WriteComponent(const GraphComponent<Graph> &gc,
+                    const string &file_name, shared_ptr<graph_colorer::GraphColorer<Graph>> colorer,
+                    const graph_labeler::GraphLabeler<Graph> &labeler) {
+    vertex_linker::EmptyGraphLinker<Graph> linker;
+    graph_colorer::BorderDecorator<Graph> component_colorer(gc, *colorer, "yellow");
+    std::ofstream os;
+    os.open(file_name);
+    visualization::visualizers::ComponentVisualizer<Graph>(gc.g(), true).
+            Visualize(gc, os, labeler, component_colorer, linker);
+    os.close();
+}
+
+template<class Graph>
+void WriteComponentSinksSources(const GraphComponent<Graph> &gc,
+                                const string &file_name, shared_ptr<graph_colorer::GraphColorer<Graph>> colorer,
+                                const graph_labeler::GraphLabeler<Graph> &labeler) {
+    vertex_linker::EmptyGraphLinker<Graph> linker;
+    graph_colorer::SinkSourceDecorator<Graph> component_colorer(gc, *colorer);
+    std::ofstream os;
+    os.open(file_name);
+    visualization::visualizers::ComponentVisualizer<Graph>(gc.g(), true).
+            Visualize(gc, os, labeler, component_colorer, linker);
+    os.close();
+}
+
+template<class Graph>
+void WriteComponentSinksSources(const GraphComponent<Graph> &gc,
+                                const string &file_name) {
+
+    graph_labeler::StrGraphLabeler<Graph> labeler(gc.g());
+    graph_labeler::CoverageGraphLabeler<Graph> labeler2(gc.g());
+    graph_labeler::CompositeLabeler<Graph> compositeLabeler(labeler, labeler2);
+    vertex_linker::EmptyGraphLinker<Graph> linker;
+    WriteComponentSinksSources(gc, file_name, graph_colorer::DefaultColorer(gc.g()),
+                               compositeLabeler);
+}
+
+template<class Graph>
+void WriteSimpleComponent(const GraphComponent<Graph> &gc,
+                          const string &file_name, shared_ptr<graph_colorer::GraphColorer<Graph>> colorer,
+                          const graph_labeler::GraphLabeler<Graph> &labeler) {
+    vertex_linker::EmptyGraphLinker<Graph> linker;
+    std::ofstream os;
+    os.open(file_name);
+    visualization::visualizers::ComponentVisualizer<Graph>(gc.g(), false).
+            Visualize(gc, os, labeler, *colorer, linker);
+    os.close();
+}
+
+template<class Graph>
+void WriteComponentsAlongPath(const Graph &g, const vector<typename Graph::EdgeId> &path,
+                              const string &prefix_path, shared_ptr<graph_colorer::GraphColorer<Graph>> colorer,
+                              const graph_labeler::GraphLabeler<Graph> &labeler, bool color_path = true) {
+    auto edge_colorer = make_shared<graph_colorer::CompositeEdgeColorer<Graph>>("black");
+    edge_colorer->AddColorer(colorer);
+    if (color_path) {
+        edge_colorer->AddColorer(make_shared<graph_colorer::SetColorer<Graph>>(g, path, "green"));
+    }
+    shared_ptr<graph_colorer::GraphColorer<Graph>> resulting_colorer = make_shared<graph_colorer::CompositeGraphColorer<Graph>>(
+            colorer, edge_colorer);
+    shared_ptr<GraphSplitter<Graph>> rs = ReliableSplitterAlongPath<Graph>(g, path);
+    auto filter = make_shared<omnigraph::SmallComponentFilter<Graph>>(g, 3);
+    shared_ptr<GraphSplitter<Graph>> splitter = make_shared<omnigraph::CondensingSplitterWrapper<Graph>>(rs,
+                                                                                                         filter);
+    WriteComponents<Graph>(g, prefix_path, splitter, resulting_colorer, labeler);
+}
+
+template<class Graph>
+class LocalityPrintingRH {
+    typedef typename Graph::EdgeId EdgeId;
+    typedef typename Graph::VertexId VertexId;
+    const Graph &g_;
+    const graph_labeler::GraphLabeler<Graph> &labeler_;
+    std::shared_ptr<graph_colorer::GraphColorer<Graph>> colorer_;
+    const string output_folder_;
+public:
+    LocalityPrintingRH(const Graph &g, const graph_labeler::GraphLabeler<Graph> &labeler,
+                       std::shared_ptr<graph_colorer::GraphColorer<Graph>> colorer, const string &output_folder)
+            :
+            g_(g),
+            labeler_(labeler),
+            colorer_(colorer),
+            output_folder_(output_folder) {
+//        path::make_dirs(output_folder_);
+    }
+
+    void HandleDelete(EdgeId e, const string &add_label = "") {
+        //todo magic constant
+//          map<EdgeId, string> empty_coloring;
+        auto edge_colorer = make_shared<graph_colorer::CompositeEdgeColorer<Graph>>("black");
+        edge_colorer->AddColorer(colorer_);
+        edge_colorer->AddColorer(
+                make_shared<graph_colorer::SetColorer<Graph>>(g_, vector<EdgeId>(1, e), "green"));
+        shared_ptr<graph_colorer::GraphColorer<Graph>> resulting_colorer = make_shared<graph_colorer::CompositeGraphColorer<Graph>>(
+                colorer_, edge_colorer);
+
+        string fn = output_folder_ + "/edge_" + ToString(g_.int_id(e)) + add_label + ".dot";
+        visualization::visualization_utils::WriteComponent(omnigraph::EdgeNeighborhood<Graph>(g_, e, 50, 250), fn, resulting_colorer,
+                                      labeler_);
+    }
+
+private:
+    DECL_LOGGER("LocalityPrintingRH");
+};
+
+//static void WriteFilteredComponents(const Graph& g,
+//      const string& folder_name,
+//      shared_ptr<GraphComponentFilter<Graph>> filter,
+//      shared_ptr<GraphSplitter<Graph>> splitter,
+//      shared_ptr<graph_colorer::GraphColorer<Graph>> colorer,
+//      const GraphLabeler<Graph> &labeler) {
+//  vertex_linker::EmptyGraphLinker<Graph> linker;
+////    shared_ptr<GraphComponentFilter<Graph>> checker = make_shared<ComponentSizeFilter<Graph>>(g, 1500, 2, 300);
+//  omnigraph::FilteringSplitterWrapper<Graph> filtered_splitter(splitter, filter);
+//  visualization::visualizers::SplittingGraphVisualizer<Graph>(g, labeler, *colorer, linker).SplitAndVisualize(filtered_splitter, folder_name);
+//}
+
+}
+
+}
\ No newline at end of file
diff --git a/src/modules/visualization/visualizers.hpp b/src/common/visualization/visualizers.hpp
similarity index 50%
rename from src/modules/visualization/visualizers.hpp
rename to src/common/visualization/visualizers.hpp
index 6b35a94..50819fe 100644
--- a/src/modules/visualization/visualizers.hpp
+++ b/src/common/visualization/visualizers.hpp
@@ -8,55 +8,57 @@
 //***************************************************************************
 
 
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 #include "graph_printer.hpp"
 #include <fstream>
 
-namespace omnigraph {
+using namespace omnigraph;
+
 namespace visualization {
 
+namespace visualizers {
+
 //DECL_LOGGER("omg.gvis")
 
 template<class Graph>
 class ComponentVisualizer {
-    const Graph& graph_;
+    const Graph &graph_;
     const bool paired_;
 
 private:
-    void Visualize(const GraphComponent<Graph>& component, GraphPrinter<Graph> &printer) {
+    void Visualize(const GraphComponent<Graph> &component, graph_printer::GraphPrinter <Graph> &printer) {
         printer.open();
         printer.AddVertices(component.vertices().begin(), component.vertices().end());
         for (auto e_it = component.e_begin(); e_it != component.e_end();
-                ++e_it) {
+             ++e_it) {
             printer.AddEdge(*e_it);
         }
         printer.close();
     }
 
 public:
-    ComponentVisualizer(const Graph& graph, bool paired = true) :
-        graph_(graph), paired_(paired) {
+    ComponentVisualizer(const Graph &graph, bool paired = true) :
+            graph_(graph), paired_(paired) {
     }
 
-    void Visualize(const GraphComponent<Graph>& component, ostream &os,
-            const GraphLabeler<Graph> &labeler,
-            const GraphColorer<Graph> &colorer,
-            const VertexLinker<Graph> &linker) {
-        if(paired_) {
-            PairedGraphPrinter<Graph> printer(graph_, os, labeler, colorer, linker);
+    void Visualize(const GraphComponent<Graph> &component, ostream &os,
+                   const graph_labeler::GraphLabeler<Graph> &labeler,
+                   const graph_colorer::GraphColorer<Graph> &colorer,
+                   const vertex_linker::VertexLinker<Graph> &linker) {
+        if (paired_) {
+            graph_printer::PairedGraphPrinter<Graph> printer(graph_, os, labeler, colorer, linker);
             Visualize(component, printer);
         } else {
-            SingleGraphPrinter<Graph> printer(graph_, os, labeler, colorer, linker);
+            graph_printer::SingleGraphPrinter<Graph> printer(graph_, os, labeler, colorer, linker);
             Visualize(component, printer);
         }
     }
 
     void Visualize(ostream &os,
-            const GraphLabeler<Graph> &labeler,
-            const GraphColorer<Graph> &colorer,
-            const VertexLinker<Graph> &linker) {
-        GraphComponent<Graph> component(graph_, graph_.begin(), graph_.end(), false);
-        Visualize(component, os, labeler, colorer, linker);
+                   const graph_labeler::GraphLabeler<Graph> &labeler,
+                   const graph_colorer::GraphColorer<Graph> &colorer,
+                   const vertex_linker::VertexLinker<Graph> &linker) {
+        Visualize(GraphComponent<Graph>::WholeGraph(graph_), os, labeler, colorer, linker);
     }
 };
 
@@ -64,27 +66,28 @@ public:
 template<class Graph>
 class ComponentNameGenerator {
 public:
-    virtual string ComponentName(const GraphComponent<Graph>& component) = 0;
+    virtual string ComponentName(const GraphComponent<Graph> &component) = 0;
 
     virtual ~ComponentNameGenerator() {
     }
 };
 
 template<class Graph>
-class SimpleCountingComponentNameGenerator: public ComponentNameGenerator<Graph> {
+class SimpleCountingComponentNameGenerator : public ComponentNameGenerator<Graph> {
 private:
     string name_;
     string extension_;
     size_t cnt_;
 public:
-    SimpleCountingComponentNameGenerator(string name, string extension): name_(name), extension_(extension), cnt_(0) {
+    SimpleCountingComponentNameGenerator(string name, string extension) : name_(name), extension_(extension),
+                                                                          cnt_(0) {
     }
 
-    string ComponentName(const GraphComponent<Graph>& component) {
+    string ComponentName(const GraphComponent<Graph> &component) {
         cnt_++;
         stringstream ss;
         ss << name_ << "_" << cnt_;
-        if(component.name().size() > 0)
+        if (component.name().size() > 0)
             ss << "_" << component.name();
         ss << "." << extension_;
         return ss.str();
@@ -92,20 +95,21 @@ public:
 };
 
 template<class Graph>
-class CountingSizeComponentNameGenerator: public ComponentNameGenerator<Graph> {
+class CountingSizeComponentNameGenerator : public ComponentNameGenerator<Graph> {
 private:
     string name_;
     string extension_;
     size_t cnt_;
 public:
-    CountingSizeComponentNameGenerator(string name, string extension): name_(name), extension_(extension), cnt_(0) {
+    CountingSizeComponentNameGenerator(string name, string extension) : name_(name), extension_(extension),
+                                                                        cnt_(0) {
     }
 
-    string ComponentName(const GraphComponent<Graph>& component) {
+    string ComponentName(const GraphComponent<Graph> &component) {
         cnt_++;
         stringstream ss;
         ss << name_ << "_" << cnt_;
-        if(component.name().size() > 0)
+        if (component.name().size() > 0)
             ss << "_" << component.name();
         ss << "_size_" << component.size();
         ss << "." << extension_;
@@ -118,45 +122,47 @@ public:
 template<class Graph>
 class SplittingGraphVisualizer {
 private:
-    const Graph& graph_;
-    const GraphLabeler<Graph> &labeler_;
-    const GraphColorer<Graph> &colorer_;
-    const VertexLinker<Graph> &linker_;
+    const Graph &graph_;
+    const graph_labeler::GraphLabeler <Graph> &labeler_;
+    const graph_colorer::GraphColorer <Graph> &colorer_;
+    const vertex_linker::VertexLinker <Graph> &linker_;
     const bool paired_;
     const size_t max_component_number_;
     static const size_t DEFAULT_MAX_COMPONENT_NUMBER = 500;
 
-    string ComponentFileName(size_t cnt, const string &folder, const GraphComponent<Graph>& component) {
+    string ComponentFileName(size_t cnt, const string &folder, const GraphComponent<Graph> &component) {
         stringstream ss;
         ss << folder << cnt;
-        if(component.name().size() > 0)
+        if (component.name().size() > 0)
             ss << "graph_" << component.name();
         ss << ".dot";
         return ss.str();
     }
 
 public:
-    SplittingGraphVisualizer(const Graph& graph,
-            const GraphLabeler<Graph> &labeler,
-            const GraphColorer<Graph> &colorer,
-            const VertexLinker<Graph> &linker,
-            bool paired = true,
-            size_t max_component_number = DEFAULT_MAX_COMPONENT_NUMBER) :
-            graph_(graph), labeler_(labeler), colorer_(colorer), linker_(linker), paired_(paired), max_component_number_(max_component_number) {
+    SplittingGraphVisualizer(const Graph &graph,
+                             const graph_labeler::GraphLabeler <Graph> &labeler,
+                             const graph_colorer::GraphColorer <Graph> &colorer,
+                             const vertex_linker::VertexLinker <Graph> &linker,
+                             bool paired = true,
+                             size_t max_component_number = DEFAULT_MAX_COMPONENT_NUMBER) :
+            graph_(graph), labeler_(labeler), colorer_(colorer), linker_(linker), paired_(paired),
+            max_component_number_(max_component_number) {
     }
 
     size_t SplitAndVisualize(GraphSplitter<Graph> &splitter, const string &folder) {
         INFO("Writing components to folder " << folder);
         ComponentVisualizer<Graph> visualizer(graph_, paired_);
         size_t cnt = 0;
-        while(splitter.HasNext()) {
-            if(cnt > max_component_number_) {
-                INFO("The number of graph components exceeded " << max_component_number_ << ". Aborting current visualization.");
+        while (splitter.HasNext()) {
+            if (cnt > max_component_number_) {
+                INFO("The number of graph components exceeded " << max_component_number_
+                                                                << ". Aborting current visualization.");
                 break;
             }
             cnt++;
             GraphComponent<Graph> component = splitter.Next();
-            BorderDecorator<Graph> border_colorer(component, colorer_, "yellow");
+            graph_colorer::BorderDecorator<Graph> border_colorer(component, colorer_, "yellow");
             ofstream os(ComponentFileName(cnt, folder, component));
             visualizer.Visualize(component, os, labeler_, border_colorer, linker_);
             os.close();
@@ -171,3 +177,4 @@ private:
 }
 }
 
+
diff --git a/src/modules/CMakeLists.txt b/src/modules/CMakeLists.txt
deleted file mode 100644
index 280629f..0000000
--- a/src/modules/CMakeLists.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-############################################################################
-# Copyright (c) 2015 Saint Petersburg State University
-# Copyright (c) 2011-2014 Saint Petersburg Academic University
-# All Rights Reserved
-# See file LICENSE for details.
-############################################################################
-
-project(spades_modules CXX)
-
-add_subdirectory(pipeline)
-add_subdirectory(assembly_graph)
-add_subdirectory(data_structures/sequence)
-add_subdirectory(math)
-add_subdirectory(algorithms/path_extend)
-add_subdirectory(algorithms)
-add_subdirectory(paired_info)
-add_subdirectory(stages)
-add_subdirectory(dev_support)
-add_subdirectory(io)
-add_subdirectory(data_structures/mph_index)
-
-add_library(spades_modules STATIC empty.cpp)
-
-target_link_libraries(spades_modules graph_support input sequence pipeline math_module path_extend paired_info stages dev_support mph_index algorithms)
diff --git a/src/modules/algorithms/path_extend/next_path_searcher.hpp b/src/modules/algorithms/path_extend/next_path_searcher.hpp
deleted file mode 100644
index e332805..0000000
--- a/src/modules/algorithms/path_extend/next_path_searcher.hpp
+++ /dev/null
@@ -1,1031 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-/*
- * next_path_searcher.hpp
- *
- *  Created on: Sep 27, 2013
- *      Author: ira
- */
-#pragma once
-
-#include <set>
-#include <vector>
-#include <map>
-
-#include "pipeline/graph_pack.hpp"
-#include "assembly_graph/graph_core/graph.hpp"
-#include "assembly_graph/paths/bidirectional_path.hpp"
-#include "pe_utils.hpp"
-
-namespace path_extend {
-using debruijn_graph::Graph;
-using std::set;
-using std::vector;
-using std::multimap;
-
-class Edge {
-public:
-    Edge(const Graph& g, EdgeId id, Edge* prev_e, size_t dist, int gap = 0)
-            : g_(g),
-              id_(id),
-              prev_edge_(prev_e),
-              dist_(dist),
-              gap_(gap) {
-    }
-    ~Edge() {
-        for (size_t i = 0; i < out_edges_.size(); ++i) {
-            delete out_edges_[i];
-        }
-        for (size_t i = 0; i < not_out_edges_.size(); ++i) {
-            delete not_out_edges_[i];
-        }
-    }
-    Edge* AddOutEdge(EdgeId edge, int gap = 0) {
-        return AddIfNotExist(edge, gap, out_edges_);
-    }
-    Edge* AddIncorrectOutEdge(EdgeId edge, int gap = 0) {
-        for (size_t i = 0; i < out_edges_.size(); ++i) {
-            if (out_edges_[i]->GetId() == edge) {
-                not_out_edges_.push_back(out_edges_[i]);
-                out_edges_.erase(out_edges_.begin() + i);
-                break;
-            }
-        }
-        return AddIfNotExist(edge, gap, not_out_edges_);
-    }
-    Edge* AddPath(const BidirectionalPath& path, size_t from) {
-        Edge* e = this;
-        for (size_t i = from; i < path.Size(); ++i) {
-            e = e->AddOutEdge(path.At(i), path.GapAt(i));
-        }
-        return e;
-    }
-
-    int GetOutEdgeIndex(EdgeId edge) const {
-        return GetEdgeIndex(edge, out_edges_);
-    }
-
-    int GetIncorrectEdgeIndex(EdgeId edge) const {
-        return GetEdgeIndex(edge, not_out_edges_);
-    }
-
-    size_t OutSize() const {
-        return out_edges_.size();
-    }
-
-    Edge* GetOutEdge(size_t i) const {
-        return out_edges_[i];
-    }
-
-    BidirectionalPath GetPrevPath(size_t from) const {
-        BidirectionalPath result(g_);
-        vector<pair<EdgeId, int> > edges_wgaps;
-        const Edge* e = this;
-        edges_wgaps.push_back(make_pair(e->GetId(), e->Gap()));
-        while (e->prev_edge_) {
-            e = e->prev_edge_;
-            edges_wgaps.push_back(make_pair(e->GetId(), e->Gap()));
-        }
-        for (int i = (int) edges_wgaps.size() - 1 - (int) from; i >= 0; i--) {
-            result.PushBack(edges_wgaps[i].first, edges_wgaps[i].second);
-        }
-        return result;
-    }
-
-    bool IsCorrect() {
-        Edge* e = this;
-        while (e->prev_edge_) {
-            if (e->prev_edge_->GetOutEdgeIndex(e->GetId()) == -1) {
-                TRACE("after " << g_.int_id(e->prev_edge_->GetId()) << " souldn't go " << g_.int_id(e->GetId()));
-                return false;
-            }
-            e = e->prev_edge_;
-        }
-        return true;
-    }
-
-    bool EqualBegins(const BidirectionalPath& path, int pos) {
-        BidirectionalPath p = this->GetPrevPath(0);
-        return path_extend::EqualBegins(path, (size_t) pos, p, p.Size() - 1, true);
-    }
-    size_t Length() const {
-        return dist_;
-    }
-    set<Edge*> GetPrevEdges(size_t dist) {
-        size_t init_len = Length();
-        Edge* e = this;
-        set<Edge*> result;
-        while (e && init_len - e->Length() < dist) {
-            result.insert(e);
-            e = e->prev_edge_;
-        }
-        return result;
-    }
-    EdgeId GetId() const {
-        return id_;
-    }
-    int Gap() const {
-        return gap_;
-    }
-private:
-    Edge* AddIfNotExist(EdgeId e, int gap, vector<Edge*>& vect) {
-        int i = GetEdgeIndex(e, vect);
-        if (i != -1) {
-            return vect[i];
-        }
-        size_t dist = dist_ + gap + g_.length(e);
-        vect.push_back(new Edge(g_, e, this, dist, gap));
-        return vect.back();
-    }
-    int GetEdgeIndex(EdgeId e, const vector<Edge*>& vect) const {
-        for (size_t i = 0; i < vect.size(); ++i) {
-            if (vect[i]->GetId() == e)
-                return (int) i;
-        }
-        return -1;
-    }
-    const Graph& g_;
-    EdgeId id_;
-    vector<Edge*> out_edges_;
-    vector<Edge*> not_out_edges_;
-    Edge* prev_edge_;
-    size_t dist_;
-    int gap_;
-
-protected:
-    DECL_LOGGER("NextPathSearcher")
-};
-struct PathWithDistance {
-    PathWithDistance(BidirectionalPath p, int dist)
-            : p_(p),
-              dist_(dist) {
-
-    }
-    BidirectionalPath p_;
-    int dist_;
-};
-class NextPathSearcher {
-public:
-    typedef set<EdgeWithDistance, EdgeWithDistance::DistanceComparator> EdgeSet;
-    typedef multimap<EdgeId, PathWithDistance> ConstructedPathT;
-
-    NextPathSearcher(const Graph& g, const GraphCoverageMap& cover_map, size_t search_dist, PathsWeightCounter weight_counter, size_t max_number_of_paths_to_search);
-    BidirectionalPathSet FindNextPaths(const BidirectionalPath& path, EdgeId begin_edge, bool jump = true) const ;
-    vector<BidirectionalPath*> ScaffoldTree(const BidirectionalPath& path) const;
-private:
-    bool IsOutTip(VertexId v) const;
-    bool IsInTip(VertexId v) const;
-    vector<Edge*> GrowPath(const BidirectionalPath& init_path, Edge* e) const;
-    Edge* AddEdge(const BidirectionalPath& init_path, Edge* prev_e, EdgeId e_to_add, int gap) const;
-    bool AnalyzeBubble(const BidirectionalPath& p, EdgeId buldge_edge, size_t gap, Edge* prev_edge) const;
-
-    void ScaffoldTip(const BidirectionalPath& path, Edge * current_path, vector<Edge*>& result_edges, vector<Edge*>& stopped_paths, vector<Edge*>& to_add,
-                     bool jump) const;
-    void ScaffoldChristmasTree(const BidirectionalPath& path, Edge * current_path, vector<Edge*>& to_add, size_t min_length_from) const;
-    void Scaffold(const BidirectionalPath& init_path, Edge* current_path, ConstructedPathT& constructed_paths, set<EdgeId>& seeds, bool is_gap) const;
-    void FindScaffoldingCandidates(const BidirectionalPath& init_path, Edge* current_path, EdgeSet& candidate_set, size_t min_length_from) const;
-    void FindScaffoldingCandidates(EdgeId e, size_t distance_to_tip, vector<EdgeWithDistance>& jump_edges) const;
-    void OrderScaffoldingCandidates(EdgeSet& candidate_set, const BidirectionalPath& init_path, Edge* current_path, ConstructedPathT& constructed_paths, set<EdgeId>& seeds, bool is_gap) const;
-    void RemoveRedundant(ConstructedPathT& constructed_paths) const;
-    void ConvertPaths(const ConstructedPathT& constructed_paths, Edge* current_path, vector<Edge*>& to_add) const;
-    void ProcessScaffoldingCandidate(EdgeWithDistance& e, EdgeSet& candidate_set, Edge* current_path, size_t grown_path_len,
-                                     ConstructedPathT& constructed_paths, bool is_gap) const;
-    int EstimateGapForPath(EdgeSet& candidate_set, const BidirectionalPath& p) const;
-    void AddConstructedPath(const BidirectionalPath& cp, size_t from, int gap, ConstructedPathT& constructed_paths) const;
-    void FilterBackPaths(BidirectionalPathSet& back_paths, EdgeId edge_to_reach, BidirectionalPathSet& reached_paths, size_t max_len = -1UL) const;
-    void JoinPathsByGraph(ConstructedPathT& constructed_paths) const;
-    void JoinPathsByPI(ConstructedPathT& constructed_paths) const;
-    void JoinPathsByDejikstra(const BidirectionalPath& init_path, ConstructedPathT& constructed_paths) const;
-    map<PathWithDistance*, size_t> FindDistances(const BidirectionalPath& p, vector<PathWithDistance*>& paths) const;
-    void FindConnections(vector<PathWithDistance*>& all_paths, map<PathWithDistance*, set<PathWithDistance*> >& connections) const;
-    vector<vector<PathWithDistance*> > FilterConnections(vector<PathWithDistance*>& all_paths, map<PathWithDistance*, set<PathWithDistance*> >& connections) const;
-    void ConnectPaths(const BidirectionalPath& init_path, vector<vector<PathWithDistance*> >& variants) const;
-
-    const Graph& g_;
-    const GraphCoverageMap& cover_map_;
-    size_t search_dist_;
-    PathsWeightCounter weight_counter_;
-    size_t long_edge_len_;
-    size_t max_paths_;
-
-protected:
-    DECL_LOGGER("NextPathSearcher")
-};
-
-inline NextPathSearcher::NextPathSearcher(const Graph& g, const GraphCoverageMap& cover_map, size_t search_dist, PathsWeightCounter weight_counter, size_t max_number_of_paths_to_search)
-        : g_(g),
-          cover_map_(cover_map),
-          search_dist_(search_dist),
-          weight_counter_(weight_counter),
-          long_edge_len_(500),
-          max_paths_(max_number_of_paths_to_search) {
-
-}
-
-inline vector<BidirectionalPath*> NextPathSearcher::ScaffoldTree(const BidirectionalPath& path) const {
-    Edge* start_e = new Edge(g_, path.At(0), NULL, g_.length(path.At(0)) + path.GapAt(0), path.GapAt(0));
-    Edge* e = start_e->AddPath(path, 1);
-    //jump forward when too much paths
-    DEBUG("Scaffolding tree for edge " << g_.int_id(start_e->GetId()));
-    path.Print();
-    vector<Edge*> result_edges;
-    ScaffoldChristmasTree(path, e, result_edges, 0);
-    std::vector<BidirectionalPath*> result_paths;
-    for (size_t i = 0; i < result_edges.size(); ++i) {
-        BidirectionalPath result_path = result_edges[i]->GetPrevPath(path.Size());
-        if (!result_path.Empty())
-            result_paths.push_back(new BidirectionalPath(result_path));
-    }
-    if (result_paths.size() != 1) {
-        for (size_t i = 0; i < result_paths.size(); ++i) {
-            delete result_paths[i];
-        }
-        result_paths.clear();
-        result_edges.clear();
-        ScaffoldChristmasTree(path, e, result_edges, long_edge_len_);
-        for (size_t i = 0; i < result_edges.size(); ++i) {
-            BidirectionalPath result_path = result_edges[i]->GetPrevPath(path.Size());
-            if (!result_path.Empty())
-                result_paths.push_back(new BidirectionalPath(result_path));
-        }
-    }
-    delete start_e;
-    DEBUG( "for path " << path.GetId() << " several extension " << result_paths.size());
-    return result_paths;
-}
-
-inline BidirectionalPathSet NextPathSearcher::FindNextPaths(const BidirectionalPath& path, EdgeId begin_edge, bool jump) const {
-    TRACE("begin find next paths");
-    vector<Edge*> grow_paths;
-    vector<Edge*> result_edges;
-    vector<Edge*> stopped_paths;
-    size_t max_len = search_dist_ + path.Length();
-    std::set<Edge*> used_edges;
-    int count_to_grow = 1;
-
-    Edge* start_e = new Edge(g_, path.At(0), NULL, g_.length(path.At(0)) + path.GapAt(0), path.GapAt(0));
-    Edge* e = start_e->AddPath(path, 1);
-    if (begin_edge != path.Back()) {
-        e = e->AddOutEdge(begin_edge);
-        DEBUG( "Try to find next path for path with edge " << g_.int_id(begin_edge));
-    } else {
-        DEBUG( "Try to search for path with last edge " << g_.int_id(path.Back()) << " Scaffolding: " << jump << ", next edges " << g_.OutgoingEdgeCount(g_.EdgeEnd(path.Back())));
-    }
-    grow_paths.push_back(e);
-
-    size_t ipath = 0;
-    DEBUG("Processing paths");
-    while (ipath < grow_paths.size()) {
-        DEBUG("Processing path " << ipath << " of " << grow_paths.size() << " need to grow " << count_to_grow);
-        Edge* current_path = grow_paths[ipath++];
-        DEBUG(" edge " << g_.int_id(current_path->GetId()));
-        if (used_edges.count(current_path) > 0) {
-            count_to_grow--;
-            continue;
-        }
-        used_edges.insert(current_path);
-        if (current_path->Length() >= max_len && current_path->IsCorrect()) {
-            result_edges.push_back(current_path);
-            count_to_grow--;
-            continue;
-        }
-        DEBUG("Growing path");
-        vector<Edge*> to_add = GrowPath(path, current_path);
-        DEBUG("Path grown");
-        if (to_add.empty() && current_path->IsCorrect()) {
-            DEBUG("scaffold tip");
-            ScaffoldTip(path, current_path, result_edges, stopped_paths, to_add, jump);
-        }
-        count_to_grow--;
-        for (Edge* e_to_add : to_add) {
-            grow_paths.push_back(e_to_add);
-            count_to_grow++;
-        }
-
-        if (count_to_grow > (int) max_paths_ || ipath > max_paths_ * 10) {
-            DEBUG("too many paths");
-            delete start_e;
-            return BidirectionalPathSet();
-        }
-    }
-    DEBUG("Paths processed");
-
-    BidirectionalPathSet result_paths;
-    TRACE("adding paths " << result_edges.size());
-    for (size_t i = 0; i < result_edges.size(); ++i) {
-        BidirectionalPath result_path = result_edges[i]->GetPrevPath(path.Size());
-        if (!result_path.Empty()) {
-            result_paths.insert(new BidirectionalPath(result_path));
-        }
-    }
-    delete start_e;
-    DEBUG( "for path " << path.GetId() << " several extension " << result_paths.size());
-    return result_paths;
-}
-
-inline bool NextPathSearcher::AnalyzeBubble(const BidirectionalPath& p, EdgeId buldge_edge, size_t gap, Edge* prev_edge) const {
-    EdgeId max_edge = buldge_edge;
-    if (prev_edge->GetOutEdgeIndex(buldge_edge) != -1 || prev_edge->GetIncorrectEdgeIndex(buldge_edge) != -1) {
-        return prev_edge->GetOutEdgeIndex(buldge_edge) != -1;
-    }
-    double max_w = 0.0;
-    for (EdgeId e : g_.OutgoingEdges(g_.EdgeStart(buldge_edge))) {
-        double w = weight_counter_.CountPairInfo(p, 0, p.Size(), e, gap);
-        if (math::gr(w, max_w) || (math::eq(w, max_w) && g_.int_id(e) < g_.int_id(max_edge))) {
-            max_w = w;
-            max_edge = e;
-        }
-    }
-    for (EdgeId e : g_.OutgoingEdges(g_.EdgeStart(buldge_edge))) {
-        if (e == max_edge) {
-            prev_edge->AddOutEdge(e);
-        } else {
-            prev_edge->AddIncorrectOutEdge(e);
-        }
-    }
-    return max_edge == buldge_edge;
-}
-
-inline Edge* NextPathSearcher::AddEdge(const BidirectionalPath& init_path, Edge* prev_e, EdgeId e_to_add, int gap) const {
-    Edge* e = prev_e;
-    if (e->GetIncorrectEdgeIndex(e_to_add) != -1) {
-        return e;
-    }
-    int inext = e->GetOutEdgeIndex(e_to_add);
-    if (inext != -1) {
-        return e->GetOutEdge(inext);
-    }
-    if (InBuble(e_to_add, g_)) {
-        if (AnalyzeBubble(init_path, e_to_add, gap, e)) {
-            return e->AddOutEdge(e_to_add);
-        }
-    } else if (e->GetId() != e_to_add) {
-        return e->AddOutEdge(e_to_add);
-    }
-    return e;
-}
-
-inline vector<Edge*> NextPathSearcher::GrowPath(const BidirectionalPath& init_path, Edge* e) const {
-    TRACE("in growing path");
-    vector<Edge*> to_add;
-    if (!e->IsCorrect()) {
-        TRACE("incorrect");
-        return to_add;
-    }
-    for (EdgeId next_edge : g_.OutgoingEdges(g_.EdgeEnd(e->GetId()))) {
-        TRACE("Analyze outgoing edge " << g_.int_id(next_edge));
-        BidirectionalPathSet cov_paths = cover_map_.GetCoveringPaths(next_edge);
-        TRACE("cov_map size " << cov_paths.size());
-        bool already_added = false;
-        for (auto inext_path = cov_paths.begin(); inext_path != cov_paths.end() && !already_added; ++inext_path) {
-            vector<size_t> positions = (*inext_path)->FindAll(next_edge);
-            for (size_t pos : positions) {
-                if (pos == 0 || e->EqualBegins(**inext_path, (int) pos - 1)) {
-                    TRACE("Found equal begin");
-                    Edge* new_edge = AddEdge(init_path, e, (*inext_path)->At(pos), (*inext_path)->GapAt(pos));
-                    if (new_edge && new_edge != e) {
-                        TRACE("Add edge")
-                        to_add.push_back(new_edge);
-                        already_added = true;
-                        break;
-                    }
-                }
-            }
-        }
-    }
-    if (to_add.size() == 0) {
-        for (EdgeId next_edge : g_.OutgoingEdges(g_.EdgeEnd(e->GetId()))) {
-            if (next_edge != e->GetId()) {
-                to_add.push_back(e->AddOutEdge(next_edge));
-            }
-        }
-    }
-    stringstream str;
-    str << " for edge " << g_.int_id(e->GetId()) << " add ";
-    for (Edge* e1 : to_add) {
-        str << " " << g_.int_id(e1->GetId());
-    }
-    TRACE(str.str());
-    return to_add;
-}
-
-inline void NextPathSearcher::ScaffoldTip(const BidirectionalPath& path, Edge * current_path, vector<Edge*>& result_edges, vector<Edge*>& stopped_paths,
-                                          vector<Edge*>& to_add, bool jump) const {
-
-    if (jump) {
-        //jump forward when tip
-        DEBUG("Scaffolding");
-        ConstructedPathT constructed_paths;
-        set<EdgeId> seeds;
-        Scaffold(path, current_path, constructed_paths, seeds, true);
-        if (constructed_paths.empty()) {
-            stopped_paths.push_back(current_path);
-        } else {
-            DEBUG("Jumped! " << to_add.size());
-            ConvertPaths(constructed_paths, current_path, to_add);
-        }
-    } else {
-        DEBUG("Not scaffolding because going back");
-        result_edges.push_back(current_path);
-    }
-}
-
-inline void NextPathSearcher::ScaffoldChristmasTree(const BidirectionalPath& path, Edge * current_path, vector<Edge*>& to_add, size_t min_length_from) const {
-    //jump forward when too much paths
-    DEBUG("========= Scaffolding when too many paths =========");
-    ConstructedPathT constructed_paths;
-    set<EdgeId> seeds;
-    //Scaffold(path, current_path, constructed_paths, seeds, false);
-    EdgeSet candidate_set;
-    FindScaffoldingCandidates(path, current_path, candidate_set, min_length_from);
-    for (EdgeWithDistance e : candidate_set) {
-        constructed_paths.insert(make_pair(e.e_,PathWithDistance(BidirectionalPath(g_, e.e_), e.d_)));
-    }
-    RemoveRedundant(constructed_paths);
-    JoinPathsByDejikstra(path, constructed_paths);
-
-    RemoveRedundant(constructed_paths);
-    DEBUG("Scafolding candidates");
-    for (EdgeWithDistance e : candidate_set) {
-        DEBUG( "Edge " << g_.int_id(e.e_) << " (" << g_.length(e.e_) << ")" << ", distance " << e.d_);
-    }
-
-    DEBUG("scaffolding candidates for tree " << constructed_paths.size());
-    for (auto iter = constructed_paths.begin(); iter != constructed_paths.end(); ++iter){
-        iter->second.p_.Print();
-    }
-
-    if (constructed_paths.size() > 0 && constructed_paths.upper_bound(constructed_paths.begin()->first) == constructed_paths.end()) {
-        DEBUG("All paths from one seed");
-        int first_seed_pos = 0;
-        auto p = constructed_paths.begin();
-        if (constructed_paths.size() > 1) {
-            //Searching for path with max number of seeds
-            DEBUG("Many paths from one seed " << constructed_paths.size());
-            int max_seeds = 0;
-            for (auto it = constructed_paths.begin(); it != constructed_paths.end(); ++it) {
-                int seed_count = 0;
-                for (EdgeId e : seeds) {
-                    if (it->second.p_.Contains(e)) {
-                        ++seed_count;
-                    }
-                }
-                if (seed_count > max_seeds) {
-                    max_seeds = seed_count;
-                    p = it;
-                }
-            }
-            DEBUG("Max seed containing contains " << max_seeds << " seeds");
-            //Looking for first seed in that path
-            PathWithDistance& winner(p->second);
-            first_seed_pos = (int) winner.p_.Size() + 1;
-            for (EdgeId e : seeds) {
-                int pos = winner.p_.FindFirst(e);
-                if (pos != -1)
-                    first_seed_pos = min(pos, first_seed_pos);
-            }
-            VERIFY(first_seed_pos != (int) winner.p_.Size() + 1);
-            DEBUG("First seed position " << first_seed_pos << " seeds");
-        }
-        PathWithDistance& path_to_add(p->second);
-        int distance = path_to_add.dist_ + (int) path_to_add.p_.Length() - (int) path_to_add.p_.LengthAt(first_seed_pos);
-        to_add.push_back(current_path->AddOutEdge(path_to_add.p_[first_seed_pos], distance));
-        to_add.back() = to_add.back()->AddPath(path_to_add.p_, first_seed_pos + 1);
-    }
-    DEBUG("========= Done scaffolding when too many paths =========");
-}
-
-inline void NextPathSearcher::Scaffold(const BidirectionalPath& init_path, Edge* current_path,
-                                       ConstructedPathT& constructed_paths, set<EdgeId>& seeds, bool is_gap) const {
-
-    EdgeSet candidate_set;
-    FindScaffoldingCandidates(init_path, current_path, candidate_set, 0);
-
-    DEBUG("Scafolding candidates");
-    for (EdgeWithDistance e : candidate_set) {
-        DEBUG( "Edge " << g_.int_id(e.e_) << " (" << g_.length(e.e_) << ")" << ", distance " << e.d_);
-    }
-
-    OrderScaffoldingCandidates(candidate_set, init_path, current_path, constructed_paths, seeds, is_gap);
-}
-
-inline void NextPathSearcher::FindScaffoldingCandidates(const BidirectionalPath& init_path, Edge* current_path, EdgeSet& candidate_set, size_t min_length_from) const {
-    set<EdgeId> path_end;
-    set<Edge*> prev_edges = current_path->GetPrevEdges(search_dist_);
-    for (Edge* e : prev_edges) {
-        path_end.insert(e->GetId());
-        path_end.insert(g_.conjugate(e->GetId()));
-    }
-    map<EdgeId, vector<int> > candidates;
-    //current_path->GetPrevPath(0).Print();
-    TRACE(current_path->Length() << " " << init_path.Length());
-    VERIFY(current_path->Length() >= init_path.Length());
-    size_t grown_path_len = current_path->Length() - init_path.Length();
-    TRACE("Path already grown to " << grown_path_len);
-
-    for (size_t i = 0; i < init_path.Size(); ++i) {
-        if (g_.length(init_path[i]) <= min_length_from) {
-            continue;
-        }
-        vector<EdgeWithDistance> jump_edges;
-        size_t distance_to_tip = init_path.LengthAt(i) + grown_path_len;
-        FindScaffoldingCandidates(init_path[i], distance_to_tip, jump_edges);
-        for (EdgeWithDistance e : jump_edges) {
-            if (candidates.find(e.e_) == candidates.end()) {
-                candidates[e.e_] = vector<int>();
-            }
-            DEBUG("ADD JUMP EDGE FROM " << g_.int_id(init_path[i]) << " TO " << g_.int_id(e.e_))
-            candidates[e.e_].push_back(/*max(e.d_ - (int) distance_to_tip, 100)*/100);
-        }
-    }
-
-    for (std::pair<EdgeId, vector<int> > e : candidates) {
-        if (path_end.count(e.first) > 0) {
-            continue;
-        }
-        int avg_distance = 0;
-        TRACE( "All distances for edge " << g_.int_id(e.first) << " (" << g_.length(e.first) << ")");
-        for (int dist : e.second) {
-            TRACE(dist);
-            avg_distance += dist;
-        }
-        avg_distance /= (int) e.second.size();
-        candidate_set.insert(EdgeWithDistance(e.first, avg_distance));
-    }
-}
-
-inline void NextPathSearcher::FindScaffoldingCandidates(EdgeId e, size_t distance_to_tip, vector<EdgeWithDistance>& jump_edges) const {
-    if (g_.length(e) < long_edge_len_ || distance_to_tip - g_.length(e) >= search_dist_)
-        return;
-
-    TRACE("Edge " << g_.int_id(e) << ", length " << g_.length(e));
-    TRACE( distance_to_tip << " " << distance_to_tip - g_.length(e) << " " << search_dist_);
-
-    set<EdgeId> candidate_edges;
-    int min_distance = std::max((int) distance_to_tip - (int) weight_counter_.GetLib()->GetLeftVar(), 0);
-    int max_distance = (int) search_dist_ + (int) g_.length(e);
-    TRACE("Looking in range " << min_distance << " " << max_distance);
-    weight_counter_.FindJumpCandidates(e, min_distance, max_distance, long_edge_len_, candidate_edges);
-    weight_counter_.FindJumpEdges(e, candidate_edges, min_distance, max_distance, jump_edges);
-    TRACE("Found " << jump_edges.size() << " candidate(s) from  this edge");
-}
-
-inline void NextPathSearcher::OrderScaffoldingCandidates(EdgeSet& candidate_set, const BidirectionalPath& init_path,
-                                                         Edge* current_path, ConstructedPathT& constructed_paths,
-                                                         set<EdgeId>& seeds, bool is_gap) const {
-    size_t grown_path_len = current_path->Length() - init_path.Length();
-
-    TRACE("Order Scaffolding Candidates, is gap " << is_gap);
-    for (EdgeWithDistance e : candidate_set) {
-        TRACE("e " << g_.int_id(e.e_));
-        if (constructed_paths.count(e.e_) > 0) {
-            TRACE("visited");
-            continue;
-        }
-        ProcessScaffoldingCandidate(e, candidate_set, current_path, grown_path_len, constructed_paths, is_gap);
-        for (auto p1 = constructed_paths.begin(); p1 != constructed_paths.end(); ++p1) {
-            TRACE("current constructed paths " << g_.int_id(p1->first));
-            //p1->second.p_.Print();
-        }
-
-    }
-    RemoveRedundant(constructed_paths);
-    for (auto it = constructed_paths.begin(); it != constructed_paths.end(); ++it) {
-        seeds.insert(it->first);
-    }
-    JoinPathsByGraph(constructed_paths);
-    JoinPathsByPI(constructed_paths);
-
-    RemoveRedundant(constructed_paths);
-}
-
-inline void NextPathSearcher::ConvertPaths(const ConstructedPathT& constructed_paths, Edge* current_path, vector<Edge*>& to_add) const {
-    for (auto edge = constructed_paths.begin(); edge != constructed_paths.end(); ++edge) {
-        to_add.push_back(current_path->AddOutEdge(edge->second.p_[0], edge->second.dist_));
-        to_add.back() = to_add.back()->AddPath(edge->second.p_, 1);
-    }
-}
-
-inline void NextPathSearcher::RemoveRedundant(ConstructedPathT& constructed_paths) const {
-    for (auto edge = constructed_paths.begin(); edge != constructed_paths.end();) {
-        if (edge->second.p_.Empty()) {
-            edge = constructed_paths.erase(edge);
-        } else {
-            ++edge;
-        }
-    }
-}
-
-inline void NextPathSearcher::ProcessScaffoldingCandidate(EdgeWithDistance& e, EdgeSet& candidate_set, Edge* current_path, size_t grown_path_len,
-                                                          ConstructedPathT& constructed_paths, bool is_gap) const {
-    bool looking_for_tip = is_gap;
-    //Search back from e till tip or maximim length back
-    TRACE(" === Searching back === ");
-    TRACE( "Distances: search = " << search_dist_ << ", grown = " << grown_path_len << ", estimated gap = " << e.d_);
-    VERIFY(search_dist_ >= grown_path_len);
-    VERIFY((int) search_dist_ >= e.d_);
-
-    size_t max_length_back = search_dist_ - grown_path_len;
-    TRACE(search_dist_ << " " << grown_path_len);
-    TRACE( "Searchin for edge of length " << g_.length(e.e_) << " to dist " << max_length_back);
-    NextPathSearcher back_searcher(g_, cover_map_, max_length_back, weight_counter_, max_paths_);
-    BidirectionalPath jumped_edge(g_, g_.conjugate(e.e_));
-    BidirectionalPathSet back_paths = back_searcher.FindNextPaths(jumped_edge, jumped_edge.Back(), false);
-    TRACE(" === DONE SEARCHING === ");
-    TRACE("Found " << back_paths.size() << " is tip " << IsInTip(g_.EdgeStart(e.e_)) << " look for tip " << looking_for_tip);
-
-    if (back_paths.empty()) {
-        if (IsInTip(g_.EdgeStart(e.e_)) && looking_for_tip) {
-            TRACE( "Added tip edge " << g_.int_id(e.e_) << " (" << g_.length(e.e_) << ")" << ", distance " << e.d_);
-            constructed_paths.insert(make_pair(e.e_, PathWithDistance(BidirectionalPath(g_, e.e_), e.d_)));
-        } else if (!IsInTip(g_.EdgeStart(e.e_)) && !looking_for_tip) {
-            constructed_paths.insert(make_pair(e.e_, PathWithDistance(BidirectionalPath(g_, e.e_), e.d_)));
-        }
-    } else {
-        TRACE("Found several back paths " << back_paths.size());
-        BidirectionalPathSet reached_paths;
-        FilterBackPaths(back_paths, g_.conjugate(current_path->GetId()), reached_paths, search_dist_ - grown_path_len);
-        //Found a path back to the init path
-        if (reached_paths.size() > 0 && !looking_for_tip) {
-            TRACE("Found " << reached_paths.size() << " direct path(s) back");
-            int i = 0;
-            for (BidirectionalPath* p : reached_paths) {
-                TRACE("Processing reached path " << i++);
-                BidirectionalPath cp = p->Conjugate();
-                //Adding jumped edge since its not included in the path
-                cp.PushBack(e.e_);
-                //cp.Print();
-                int reached_edge_pos = cp.FindLast(current_path->GetId());
-                VERIFY(reached_edge_pos != -1);
-                AddConstructedPath(cp, reached_edge_pos + 1, 0, constructed_paths);
-            }
-        } else if (reached_paths.size() > 0 && looking_for_tip) {
-            DEBUG("Impossible: back path reaches tip");
-        } else if (looking_for_tip) {
-            TRACE( "Found " << back_paths.size() << " path(s) going back to tip");
-            int i = 0;
-            for (BidirectionalPath* p : back_paths) {
-                DEBUG("Processing tip path " << i++);
-                BidirectionalPath cp = p->Conjugate();
-                //Adding jumped edge since its not included in the path
-                cp.PushBack(e.e_);
-                AddConstructedPath(cp, 0, EstimateGapForPath(candidate_set, cp), constructed_paths);
-            }
-        }
-    }
-    for (BidirectionalPath* p : back_paths) {
-        delete p;
-    }
-}
-
-inline int NextPathSearcher::EstimateGapForPath(EdgeSet& candidate_set, const BidirectionalPath& p) const {
-    int gap = 0;
-    int count = 0;
-    for (EdgeWithDistance e : candidate_set) {
-        int pos = p.FindFirst(e.e_);
-        if (pos != -1) {
-            size_t length_to_e = 0;
-            for (int i = 0; i < pos; ++i) {
-                length_to_e += p.LengthAt(i);
-            }
-            gap += e.d_ - (int) length_to_e;
-        }
-        ++count;
-    }
-    gap /= count;
-    return gap > 0 ? gap : 100;
-}
-
-inline void NextPathSearcher::AddConstructedPath(const BidirectionalPath& cp, size_t from, int gap, ConstructedPathT& constructed_paths) const {
-    VERIFY(!cp.Empty());
-
-    //Adding if there is unique (candidate - tip)
-    EdgeId candidate = cp.Back();
-    for (auto it = constructed_paths.lower_bound(candidate); it != constructed_paths.upper_bound(candidate); ++it) {
-        if (it->second.p_.Front() == cp.Front()) {
-            return;
-        }
-    }
-
-    TRACE("Adding path starting from " << from);
-    constructed_paths.insert(make_pair(candidate, PathWithDistance(cp.SubPath(from), gap)));
-    TRACE("add constructed path " << g_.int_id(candidate));
-    //cp.Print();
-
-    for (size_t i = 0; i < cp.Size() - 1; ++i) {
-        EdgeId edge = cp[i];
-        for (auto it = constructed_paths.lower_bound(edge); it != constructed_paths.upper_bound(edge); ++it) {
-            TRACE("found " << g_.int_id(edge));
-            //it->second.p_.Print();
-            TRACE("clear");
-            it->second.p_.Clear();
-        }
-    }
-}
-inline bool NextPathSearcher::IsOutTip(VertexId v) const {
-    if (g_.OutgoingEdgeCount(v) == 0) {
-        return true;
-    }
-    if (g_.OutgoingEdgeCount(v) != 1) {
-        return false;
-    }
-    EdgeId oute = *g_.OutgoingEdges(v).begin();
-    for (EdgeId ine : g_.IncomingEdges(v)) {
-        if (oute == ine) {
-            return true;
-        }
-    }
-    return false;
-}
-inline bool NextPathSearcher::IsInTip(VertexId v) const {
-    if (g_.IncomingEdgeCount(v) == 0) {
-        return true;
-    }
-    if (g_.IncomingEdgeCount(v) != 1) {
-        return false;
-    }
-    EdgeId ine = *g_.IncomingEdges(v).begin();
-    for (EdgeId oute : g_.OutgoingEdges(v)) {
-        if (oute == ine) {
-            return true;
-        }
-    }
-    return false;
-}
-inline void NextPathSearcher::FilterBackPaths(BidirectionalPathSet& back_paths, EdgeId edge_to_reach, BidirectionalPathSet& reached_paths,
-                                              size_t max_len) const {
-    TRACE("Searching for proper back paths");
-
-    int i = 0;
-    for (auto piter = back_paths.begin(); piter != back_paths.end();) {
-        BidirectionalPath* p = *piter;
-        VERIFY(!p->Empty());
-        EdgeId last_e = p->Back();
-        VertexId last_v = g_.EdgeEnd(last_e);
-        TRACE("Processing path " << i++);
-        //p->Print();
-        if (p->FindFirst(edge_to_reach) != -1) {
-            reached_paths.insert(p);
-            ++piter;
-        } else if (IsInTip(last_v) == 0 && p->Length() < max_len) {
-            ++piter;
-        } else {
-            delete p;
-            piter = back_paths.erase(piter);
-        }
-    }
-}
-
-inline void NextPathSearcher::JoinPathsByGraph(ConstructedPathT& constructed_paths) const {
-    TRACE("==  try to join paths using graph ==");
-    for (auto p1 = constructed_paths.begin(); p1 != constructed_paths.end(); ++p1) {
-        //p1->second.p_.Print();
-    }
-    TRACE("==  printed ==");
-
-    //Removing edges whose seed is contained in any other path
-    set<EdgeId> to_remove;
-    for (auto p1 = constructed_paths.begin(); p1 != constructed_paths.end(); ++p1) {
-        if (to_remove.count(p1->first) > 0) {
-            continue;
-        }
-        for (auto p2 = constructed_paths.begin(); p2 != constructed_paths.end(); ++p2) {
-            if (p1->first == p2->first || to_remove.count(p2->first) > 0) {
-                continue;
-            }
-            if (p1->second.p_.Contains(p2->first)) {
-                to_remove.insert(p2->first);
-            }
-        }
-    }
-    for (auto p = constructed_paths.begin(); p != constructed_paths.end(); ) {
-        if (to_remove.count(p->first) > 0) {
-            p = constructed_paths.erase(p);
-        } else {
-            ++p;
-        }
-    }
-}
-
-inline void NextPathSearcher::JoinPathsByPI(ConstructedPathT& constructed_paths) const {
-    DEBUG("==  try to join paths ===");
-    for (auto p1 = constructed_paths.begin(); p1 != constructed_paths.end(); ++p1) {
-        p1->second.p_.Print();
-    }
-    DEBUG("==  printed ===");
-
-    //Checking paired info
-    set<EdgeId> visited;
-    for (auto p1 = constructed_paths.begin(); p1 != constructed_paths.end(); ++p1) {
-        if (visited.count(p1->first) > 0) {
-            continue;
-        }
-        for (auto p2 = constructed_paths.begin(); p2 != constructed_paths.end(); ++p2) {
-            if (p1->first == p2->first) {
-                continue;
-            }
-            BidirectionalPath& path1 = p1->second.p_;
-            BidirectionalPath& path2 = p2->second.p_;
-            bool has_pi = false;
-            for (size_t i = 0; i < path1.Size(); ++i) {
-
-                for (size_t j = 0; j < path2.Size(); ++j) {
-                    size_t len_to_e2 = path2.Length() - path2.LengthAt(j);
-                    size_t dist = path1.LengthAt(i) + len_to_e2;
-                    size_t min_dist = (size_t) max(0, (int) dist - (int) weight_counter_.GetLib()->GetLeftVar());
-                    size_t max_dist = dist + search_dist_;
-                    DEBUG("try to find pair info between " << g_.int_id(path1[i]) << " and  " << g_.int_id(path2[j])
-                          << " distance from " << min_dist
-                          <<" to " << max_dist);
-                    if (path1[i] != path2[j] &&
-                            weight_counter_.HasPI(path1[i], path2[j], min_dist, max_dist)) {
-                        has_pi = true;
-                        break;
-                    }
-                }
-                if (has_pi) {
-                    break;
-                }
-            }
-
-            set<EdgeId> edges_path1;
-            for (size_t i = 0; i < path1.Size(); ++i) {
-                edges_path1.insert(path1.At(i));
-            }
-            for (size_t i = 0; i < path2.Size(); ++i) {
-                if (edges_path1.count(path2.At(i)) > 0 || edges_path1.count(g_.conjugate(path2.At(i))) > 0) {
-                    has_pi = false;
-                }
-            }
-            if (has_pi) {
-                DEBUG("has pi from ");
-                path1.Print();
-                DEBUG("to");
-                path2.Print();
-                path1.PushBack(path2.Front(), 100);
-                for (int i = 1; i < (int) path2.Size(); ++i) {
-                    path1.PushBack(path2[i], path2.GapAt(i), path2.TrashPreviousAt(i), path2.TrashCurrentAt(i));
-                }
-                DEBUG("new path");
-                path1.Print();
-                path2.Clear();
-                visited.insert(p2->first);
-            }
-        }
-    }
-}
-inline void Generate(size_t l, size_t r, vector<size_t> a,
-        vector<vector<size_t> >& res, vector<PathWithDistance*>& all_paths, map<PathWithDistance*, set<PathWithDistance*> >& connections) {
-    if (l == r) {
-        DEBUG("result " << a.size())
-        res.push_back(a);
-    } else {
-        for (size_t i = l; i < r; ++i) {
-            if (l > 0 && connections[all_paths[a[l - 1]]].count(all_paths[a[i]]) == 0) {
-                DEBUG(" not connected " << a[l-1] << " and " << a[i])
-                continue;
-            }
-            DEBUG("  connected " << l-1 << " and " << i)
-            size_t v = a[l];
-            a[l] = a[i];
-            a[i] = v;
-            Generate(l + 1, r, a, res, all_paths, connections);
-            v = a[l];
-            a[l] = a[i];
-            a[i] = v;
-        }
-    }
-}
-
-inline vector<vector<size_t> > Generate(size_t n, vector<PathWithDistance*>& all_paths, map<PathWithDistance*, set<PathWithDistance*> >& connections) {
-    vector<vector<size_t> > result;
-    if (n > 5) {
-        return result;
-    }
-    vector<size_t> a;
-    for (size_t i = 0; i < n; ++i) {
-        a.push_back(i);
-    }
-    Generate(0, n, a, result, all_paths, connections);
-    return result;
-}
-
-inline map<PathWithDistance*, size_t> NextPathSearcher::FindDistances(const BidirectionalPath& p, vector<PathWithDistance*>& paths) const {
-    DEBUG("find distances from e " << g_.int_id(p.Back()))
-    map<PathWithDistance*, size_t> result;
-    DijkstraHelper<Graph>::BoundedDijkstra dijkstra(DijkstraHelper<Graph>::CreateBoundedDijkstra(g_, search_dist_, 3000));
-    dijkstra.Run(g_.EdgeEnd(p.Back()));
-    DEBUG("paths size " << paths.size());
-    for (auto ipath = paths.begin(); ipath != paths.end(); ++ipath) {
-        vector<EdgeId> shortest_path = dijkstra.GetShortestPathTo(g_.EdgeStart((*ipath)->p_.Front()));
-        if (shortest_path.size() != 0) {
-            int gap = 0;
-            for (size_t i = 0; i < shortest_path.size(); ++i) {
-                gap += (int) g_.length(shortest_path[i]);
-            }
-            gap += (int) g_.k();
-            result[*ipath] = gap;
-        }
-    }
-    DEBUG("return result " << result.size());
-    return result;
-}
-
-inline void NextPathSearcher::FindConnections(vector<PathWithDistance*>& all_paths, map<PathWithDistance*, set<PathWithDistance*> >& connections) const {
-    for (auto p1 = all_paths.begin(); p1 != all_paths.end(); ++p1) {
-        map<PathWithDistance*, size_t> distances = FindDistances((*p1)->p_, all_paths);
-        connections[*p1] = set<PathWithDistance*>();
-        for (auto iter = distances.begin(); iter != distances.end(); ++iter) {
-            if ((*p1)->p_.Length() + iter->second < search_dist_){
-                connections[*p1].insert(iter->first);
-            }
-        }
-    }
-}
-
-inline void NextPathSearcher::ConnectPaths(const BidirectionalPath& init_path, vector<vector<PathWithDistance*> >& variants) const {
-    if (variants.size() == 1 && variants[0].size() > 0) {
-        vector<PathWithDistance*> res = variants[0];
-        vector<PathWithDistance*> for_dijkstra;
-        BidirectionalPath& path1 = res[0]->p_;
-        for_dijkstra.push_back(res[0]);
-        map<PathWithDistance*, size_t> distances = FindDistances(init_path, for_dijkstra);
-        size_t gap = distances.count(res[0]) > 0 ? distances[res[0]] : 100 + g_.k();
-        BidirectionalPath p(path1);
-        path1.Clear();
-        path1.PushBack(p.Front(), (int)gap);
-        path1.PushBack(p.SubPath(1));
-        for (size_t i = 1; i < res.size(); ++i) {
-            for_dijkstra.clear();
-            for_dijkstra.push_back(res[i]);
-            BidirectionalPath& path2 = res[i]->p_;
-            distances = FindDistances(path1, for_dijkstra);
-            gap = distances.count(res[i]) > 0 ? distances[res[i]] : 100 + g_.k();
-            path1.PushBack(path2.Front(), (int)gap);
-            for (int i = 1; i < (int) path2.Size(); ++i) {
-                path1.PushBack(path2[i], path2.GapAt(i), path2.TrashPreviousAt(i), path2.TrashCurrentAt(i));
-            }
-            path2.Clear();
-        }
-    } else if (variants.size() > 1) {
-        vector<PathWithDistance*> res = variants[0];
-        EdgeId last = res.back()->p_.Back();
-        for (size_t i = 1; i < variants.size(); ++i) {
-            if (last != variants[i].back()->p_.Back()) {
-                return;
-            }
-        }
-        for (size_t i = 0; i < res.size(); ++i) {
-            res[i]->p_.Clear();
-        }
-        int gap = (int) 1000 + (int) g_.k();
-        res[0]->p_.PushBack(last, gap);
-    }
-}
-
-inline vector<vector<PathWithDistance*> > NextPathSearcher::FilterConnections(vector<PathWithDistance*>& all_paths, map<PathWithDistance*, set<PathWithDistance*> >& connections) const {
-    vector<vector<PathWithDistance*> > variants;
-    DEBUG("filter connections " << connections.size() << " all paths size " << all_paths.size())
-    vector<vector<size_t> > permutations = Generate(all_paths.size(), all_paths, connections);
-    DEBUG("generated all permutations " << permutations.size());
-    for (size_t i = 0; i < permutations.size(); ++i) {
-        vector<PathWithDistance*> variant;
-        for (size_t j = 0; j < permutations[i].size(); ++j) {
-            variant.push_back(all_paths[permutations[i][j]]);
-        }
-        variants.push_back(variant);
-    }
-    return variants;
-}
-
-inline void NextPathSearcher::JoinPathsByDejikstra(const BidirectionalPath& init_path, ConstructedPathT& constructed_paths) const {
-    DEBUG("==  try to join paths by dejikstra ===");
-    for (auto p1 = constructed_paths.begin(); p1 != constructed_paths.end(); ++p1) {
-        p1->second.p_.Print();
-    }
-    DEBUG("==  printed ===");
-
-    vector<PathWithDistance*> all_paths;
-    for (auto p1 = constructed_paths.begin(); p1 != constructed_paths.end(); ++p1) {
-        if (p1->second.p_.Size() != 0) {
-            all_paths.push_back(&p1->second);
-        }
-    }
-    map<PathWithDistance*, set<PathWithDistance*> > connections;
-    FindConnections(all_paths, connections);
-    vector<vector<PathWithDistance*> > variants = FilterConnections(all_paths, connections);
-    ConnectPaths(init_path, variants);
-
-    DEBUG("==  after to join paths ===");
-    for (auto p1 = constructed_paths.begin(); p1 != constructed_paths.end(); ++p1) {
-        p1->second.p_.Print();
-    }
-    DEBUG("==  printed ===");
-}
-
-}  // namespace path_extend
diff --git a/src/modules/algorithms/path_extend/path_extend_launch.hpp b/src/modules/algorithms/path_extend/path_extend_launch.hpp
deleted file mode 100644
index ba1d4e3..0000000
--- a/src/modules/algorithms/path_extend/path_extend_launch.hpp
+++ /dev/null
@@ -1,1257 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-/*
- * lc_launch.hpp
- *
- *  Created on: Dec 1, 2011
- *      Author: andrey
- */
-
-#ifndef PATH_EXTEND_LAUNCH_HPP_
-#define PATH_EXTEND_LAUNCH_HPP_
-
-#include "scaffolder2015/scaffold_graph_constructor.hpp"
-#include "pe_config_struct.hpp"
-#include "pe_resolver.hpp"
-#include "path_extender.hpp"
-#include "pe_io.hpp"
-#include "path_visualizer.hpp"
-#include "loop_traverser.hpp"
-#include "assembly_graph/graph_alignment/long_read_storage.hpp"
-#include "next_path_searcher.hpp"
-#include "scaffolder2015/extension_chooser2015.hpp"
-#include "algorithms/genome_consistance_checker.hpp"
-#include "scaffolder2015/scaffold_graph.hpp"
-#include "scaffolder2015/scaffold_graph_visualizer.hpp"
-
-namespace path_extend {
-
-using namespace debruijn_graph;
-
-struct PathExtendParamsContainer {
-
-    PathExtendParamsContainer(const pe_config::MainPEParamsT& pe_cfg_,
-                              const std::string& output_dir_,
-                              const std::string& contigs_name_,
-                              const std::string& scf_name_,
-                              config::pipeline_type mode_,
-                              bool uneven_depth_,
-                              bool avoid_rc_connections_,
-                              bool use_scaffolder_,
-                              bool output_broken_scaffolds_ = true):
-        pe_cfg(pe_cfg_),
-        pset(pe_cfg_.param_set),
-        output_dir(output_dir_),
-        etc_dir(output_dir + pe_cfg_.etc_dir + "/"),
-        contigs_name(scf_name_),
-        broken_contigs(contigs_name_),
-        mode(mode_),
-        uneven_depth(uneven_depth_),
-        avoid_rc_connections(avoid_rc_connections_),
-        use_scaffolder(use_scaffolder_),
-        traverse_loops(true),
-        output_broken_scaffolds(output_broken_scaffolds_)
-    {
-        if (!(use_scaffolder && pset.scaffolder_options.enabled)) {
-            contigs_name = contigs_name_;
-            traverse_loops = false;
-            output_broken_scaffolds = false;
-        }
-    }
-
-    const pe_config::MainPEParamsT& pe_cfg;
-    const pe_config::ParamSetT& pset;
-
-    std::string output_dir;
-    std::string etc_dir;
-
-    std::string contigs_name;
-    std::string broken_contigs;
-
-    config::pipeline_type mode;
-    bool uneven_depth;
-
-    bool avoid_rc_connections;
-    bool use_scaffolder;
-    bool traverse_loops;
-    bool output_broken_scaffolds;
-};
-
-inline void DebugOutputPaths(const conj_graph_pack& gp,
-                             const PathExtendParamsContainer& params,
-                             const PathContainer& paths,
-                             const string& name) {
-    PathInfoWriter path_writer;
-    PathVisualizer visualizer;
-
-    DefaultContigCorrector<ConjugateDeBruijnGraph> corrector(gp.g);
-    DefaultContigConstructor<ConjugateDeBruijnGraph> constructor(gp.g, corrector);
-    ContigWriter writer(gp.g, constructor, gp.components, params.mode == config::pipeline_type::plasmid);
-
-    if (!params.pe_cfg.debug_output) {
-        return;
-    }
-    writer.OutputPaths(paths, params.etc_dir + name);
-    if (params.pe_cfg.output.write_paths) {
-        path_writer.WritePaths(paths, params.etc_dir + name + ".dat");
-    }
-    if (params.pe_cfg.viz.print_paths) {
-        visualizer.writeGraphWithPathsSimple(gp, params.etc_dir + name + ".dot", name, paths);
-    }
-}
-
-inline double GetWeightThreshold(shared_ptr<PairedInfoLibrary> lib, const pe_config::ParamSetT& pset) {
-    return lib->IsMp() ? pset.mate_pair_options.weight_threshold : pset.extension_options.weight_threshold;
-}
-
-inline double GetPriorityCoeff(shared_ptr<PairedInfoLibrary> lib, const pe_config::ParamSetT& pset) {
-    return lib->IsMp() ? pset.mate_pair_options.priority_coeff : pset.extension_options.priority_coeff;
-}
-
-inline void SetSingleThresholdForLib(shared_ptr<PairedInfoLibrary> lib, const pe_config::ParamSetT &pset, double threshold, double correction_coeff = 1.0) {
-    if  (lib->IsMp()) {
-        lib->SetSingleThreshold(pset.mate_pair_options.use_default_single_threshold || math::le(threshold, 0.0) ?
-                                pset.mate_pair_options.single_threshold : threshold);
-    }
-    else {
-        double t = pset.extension_options.use_default_single_threshold || math::le(threshold, 0.0) ?
-                   pset.extension_options.single_threshold : threshold;
-        t = correction_coeff * t;
-        lib->SetSingleThreshold(t);
-    }
-}
-
-
-inline void OutputBrokenScaffolds(PathContainer& paths,
-                                  const PathExtendParamsContainer& params,
-                                  int k,
-                                  const ContigWriter& writer,
-                                  const std::string& filename) {
-    if (!params.pset.scaffolder_options.enabled
-        || !params.use_scaffolder
-        || params.pe_cfg.obs == obs_none) {
-        return;
-    }
-
-    int min_gap = params.pe_cfg.obs == obs_break_all ? k / 2 : k;
-
-    ScaffoldBreaker breaker(min_gap, paths);
-    breaker.container().SortByLength();
-    writer.OutputPaths(breaker.container(), filename);
-}
-
-inline void AddPathsToContainer(const conj_graph_pack& gp,
-                         const std::vector<PathInfo<Graph> > paths,
-                         size_t size_threshold, PathContainer& result) {
-    for (size_t i = 0; i < paths.size(); ++i) {
-        auto path = paths.at(i);
-        vector<EdgeId> edges = path.getPath();
-        if (edges.size() <= size_threshold) {
-            continue;
-        }
-        BidirectionalPath* new_path = new BidirectionalPath(gp.g, edges);
-        BidirectionalPath* conj_path = new BidirectionalPath(new_path->Conjugate());
-        new_path->SetWeight((float) path.getWeight());
-        conj_path->SetWeight((float) path.getWeight());
-        result.AddPair(new_path, conj_path);
-    }
-    DEBUG("Long reads paths " << result.size() << " == ");
-}
-
-bool HasOnlyMPLibs(const config::dataset& dataset_info) {
-    for (const auto& lib : dataset_info.reads) {
-        if (!((lib.type() == io::LibraryType::MatePairs || lib.type() == io::LibraryType::HQMatePairs) &&
-              lib.data().mean_insert_size > 0.0)) {
-            return false;
-        }
-    }
-    return true;
-}
-
-bool UseCoverageResolverForSingleReads(const config::dataset& dataset_info,
-                                       const io::LibraryType& type) {
-    return HasOnlyMPLibs(dataset_info) && (type == io::LibraryType::HQMatePairs);
-}
-
-inline size_t CountEdgesInGraph(const Graph& g) {
-    size_t count = 0;
-    for (auto iter = g.ConstEdgeBegin(); !iter.IsEnd(); ++iter) {
-        count++;
-    }
-    return count;
-}
-
-inline size_t GetNumberMPPaths(const Graph& g) {
-    size_t count_edge = CountEdgesInGraph(g);
-    if (count_edge < 1000) {
-        return 1000;
-    }
-    if (count_edge < 10000) {
-        return 100;
-    }
-    return 50;
-}
-
-inline string LibStr(size_t count) {
-    return count == 1 ? "library" : "libraries";
-}
-
-inline void ClonePathContainer(PathContainer& spaths, PathContainer& tpaths, GraphCoverageMap& tmap) {
-    tpaths.clear();
-    tmap.Clear();
-
-    for (auto iter = spaths.begin(); iter != spaths.end(); ++iter) {
-        BidirectionalPath& path = *iter.get();
-        BidirectionalPath* new_path = new BidirectionalPath(path.graph());
-        new_path->Subscribe(&tmap);
-        new_path->PushBack(path);
-
-        BidirectionalPath& cpath = *iter.getConjugate();
-        BidirectionalPath* new_cpath = new BidirectionalPath(cpath.graph());
-        new_cpath->Subscribe(&tmap);
-        new_cpath->PushBack(cpath);
-
-        tpaths.AddPair(new_path, new_cpath);
-    }
-}
-
-inline void FinalizePaths(const PathExtendParamsContainer& params,
-                          PathContainer& paths,
-                          const Graph& g,
-                          GraphCoverageMap& cover_map,
-                          size_t min_edge_len,
-                          size_t max_path_diff,
-                          bool mate_pairs = false) {
-    PathExtendResolver resolver(cover_map.graph());
-
-    if (params.pset.remove_overlaps) {
-        resolver.removeOverlaps(paths, cover_map, min_edge_len, max_path_diff,
-                                params.pset.cut_all_overlaps,
-                                (params.mode == config::pipeline_type::moleculo));
-    }
-    else {
-        resolver.removeEqualPaths(paths, cover_map, min_edge_len);
-    }
-    if (mate_pairs) {
-        resolver.RemoveMatePairEnds(paths, min_edge_len);
-    }
-    if (params.avoid_rc_connections) {
-        paths.FilterInterstandBulges();
-    }
-    paths.FilterEmptyPaths();
-    if (!mate_pairs) {
-        resolver.addUncoveredEdges(paths, cover_map);
-    }
-    if (params.pset.path_filtration.enabled) {
-        LengthPathFilter(g, params.pset.path_filtration.min_length).filter(paths);;
-        IsolatedPathFilter(g, params.pset.path_filtration.min_length_for_low_covered, params.pset.path_filtration.min_coverage).filter(paths);
-        IsolatedPathFilter(g, params.pset.path_filtration.isolated_min_length).filter(paths);
-    }
-    paths.SortByLength();
-    for(auto& path : paths) {
-        path.first->ResetOverlaps();
-    }
-
-}
-
-inline void TraverseLoops(PathContainer& paths, GraphCoverageMap& cover_map, shared_ptr<ContigsMaker> extender) {
-    INFO("Traversing tandem repeats");
-    LoopTraverser loopTraverser(cover_map.graph(), cover_map, extender);
-    loopTraverser.TraverseAllLoops();
-    paths.SortByLength();
-}
-
-inline bool IsForSingleReadExtender(const io::SequencingLibrary<config::DataSetData> &lib) {
-    io::LibraryType lt = lib.type();
-    return (lib.data().single_reads_mapped ||
-            lt == io::LibraryType::PacBioReads ||
-            lt == io::LibraryType::SangerReads ||
-            lt == io::LibraryType::NanoporeReads ||
-            lib.is_contig_lib());
-}
-
-inline bool IsForPEExtender(const io::SequencingLibrary<config::DataSetData> &lib) {
-    return (lib.type() == io::LibraryType::PairedEnd &&
-            lib.data().mean_insert_size > 0.0);
-}
-
-inline bool IsForShortLoopExtender(const io::SequencingLibrary<config::DataSetData> &lib) {
-    return (lib.type() == io::LibraryType::PairedEnd &&
-            lib.data().mean_insert_size > 0.0);
-}
-
-inline bool IsForScaffoldingExtender(const io::SequencingLibrary<config::DataSetData> &lib) {
-    return (lib.type() == io::LibraryType::PairedEnd &&
-            lib.data().mean_insert_size > 0.0);
-}
-
-inline bool IsForMPExtender(const io::SequencingLibrary<config::DataSetData> &lib) {
-    return lib.data().mean_insert_size > 0.0 &&
-            (lib.type() == io::LibraryType::HQMatePairs ||
-             lib.type() == io::LibraryType::MatePairs);
-}
-
-enum class PathExtendStage {
-    PEStage,
-    PEPolishing,
-    MPStage,
-    FinalizingPEStage,
-    FinalPolishing,
-};
-
-inline bool IsPEStage(PathExtendStage stage) {
-    return stage == PathExtendStage::PEPolishing || stage == PathExtendStage::PEStage;
-}
-
-inline bool IsMPStage(PathExtendStage stage) {
-    return stage == PathExtendStage::MPStage;
-}
-
-inline bool IsFinalStage(PathExtendStage stage) {
-    return stage == PathExtendStage::FinalizingPEStage || stage == PathExtendStage::FinalPolishing;
-}
-
-inline bool IsPolishingStage(PathExtendStage stage) {
-    return stage == PathExtendStage::PEPolishing || stage == PathExtendStage::FinalPolishing;
-}
-
-
-template<class Index>
-inline shared_ptr<PairedInfoLibrary> MakeNewLib(const config::dataset::Library& lib,
-                                                const conj_graph_pack::graph_t& g,
-                                                const Index& paired_index) {
-    size_t read_length = lib.data().read_length;
-    size_t is = (size_t) lib.data().mean_insert_size;
-    int is_min = (int) lib.data().insert_size_left_quantile;
-    int is_max = (int) lib.data().insert_size_right_quantile;
-    int var = (int) lib.data().insert_size_deviation;
-    bool is_mp = lib.type() == io::LibraryType::MatePairs ||  lib.type() == io::LibraryType::HQMatePairs ;
-    return make_shared< PairedInfoLibraryWithIndex<decltype(paired_index)> >(g.k(), g, read_length,
-                                                                                    is, is_min > 0.0 ? size_t(is_min) : 0, is_max > 0.0 ? size_t(is_max) : 0,
-                                                                                    size_t(var),
-                                                                                    paired_index, is_mp,
-                                                                                    lib.data().insert_size_distribution);
-}
-
-pe_config::LongReads GetLongReadsConfig(const PathExtendParamsContainer& params,
-                                        const io::LibraryType& type) {
-    if (io::SequencingLibraryBase::is_long_read_lib(type)) {
-        return params.pe_cfg.long_reads.pacbio_reads;
-    } else if (type == io::LibraryType::PathExtendContigs){
-        return params.pe_cfg.long_reads.meta_contigs;
-    } else if (io::SequencingLibraryBase::is_contig_lib(type)) {
-        return params.pe_cfg.long_reads.contigs;
-    }
-    return params.pe_cfg.long_reads.single_reads;
-}
-
-
-inline shared_ptr<ExtensionChooser> MakeLongReadsExtensionChooser(const config::dataset::Library& lib,
-                                                                  size_t lib_index,
-                                                                  const PathExtendParamsContainer& params,
-                                                                  const conj_graph_pack& gp) {
-    PathContainer paths;
-    AddPathsToContainer(gp, gp.single_long_reads[lib_index].GetAllPaths(), 1, paths);
-
-    auto long_reads_config = GetLongReadsConfig(params, lib.type());
-    return make_shared<LongReadsExtensionChooser>(gp.g, paths, long_reads_config.filtering,
-                                                  long_reads_config.weight_priority,
-                                                  long_reads_config.unique_edge_priority,
-                                                  long_reads_config.min_significant_overlap,
-                                                  params.pset.extension_options.max_repeat_length,
-                                                  params.uneven_depth);
-}
-
-
-inline shared_ptr<SimpleExtender> MakeLongReadsExtender(const config::dataset& dataset_info,
-                                                        size_t lib_index,
-                                                        const PathExtendParamsContainer& params,
-                                                        const conj_graph_pack& gp,
-                                                        const GraphCoverageMap& cov_map) {
-    const auto& lib = dataset_info.reads[lib_index];
-    size_t resolvable_repeat_length_bound = 10000ul;
-    if (!lib.is_contig_lib()) {
-        resolvable_repeat_length_bound = std::max(resolvable_repeat_length_bound, lib.data().read_length);
-    }
-    INFO("resolvable_repeat_length_bound set to " << resolvable_repeat_length_bound);
-
-
-    auto long_read_ec = MakeLongReadsExtensionChooser(lib, lib_index, params, gp);
-    return make_shared<SimpleExtender>(gp, cov_map,
-                                       long_read_ec,
-                                       resolvable_repeat_length_bound,
-                                       params.pset.loop_removal.max_loops,
-                                       true, /* investigate short loops */
-                                       UseCoverageResolverForSingleReads(dataset_info, lib.type()));
-}
-
-inline shared_ptr<SimpleExtender> MakeLongEdgePEExtender(const config::dataset& dataset_info,
-                                                         size_t lib_index,
-                                                         const PathExtendParamsContainer& params,
-                                                         const conj_graph_pack& gp,
-                                                         const GraphCoverageMap& cov_map,
-                                                         bool investigate_loops) {
-
-    const auto& lib = dataset_info.reads[lib_index];
-    shared_ptr<PairedInfoLibrary> paired_lib = MakeNewLib(lib, gp.g, gp.clustered_indices[lib_index]);
-    SetSingleThresholdForLib(paired_lib, params.pset, lib.data().pi_threshold);
-    INFO("Threshold for lib #" << lib_index << ": " << paired_lib->GetSingleThreshold());
-
-    shared_ptr<WeightCounter> wc =
-        make_shared<PathCoverWeightCounter>(gp.g, paired_lib, params.pset.normalize_weight);
-    shared_ptr<ExtensionChooser> extension =
-        make_shared<LongEdgeExtensionChooser>(gp.g, wc,
-                                              GetWeightThreshold(paired_lib, params.pset),
-                                              GetPriorityCoeff(paired_lib, params.pset));
-
-    return make_shared<SimpleExtender>(gp, cov_map,
-                                       extension,
-                                       paired_lib->GetISMax(),
-                                       params.pset.loop_removal.max_loops,
-                                       investigate_loops,
-                                       false /*use short loop coverage resolver*/);
-}
-
-inline shared_ptr<SimpleExtensionChooser> MakeMetaExtensionChooser(shared_ptr<PairedInfoLibrary> lib,
-                                                                   const PathExtendParamsContainer& params,
-                                                                   const conj_graph_pack& gp,
-                                                                   size_t read_length) {
-    VERIFY(params.mode == config::pipeline_type::meta);
-    VERIFY(!lib->IsMp());
-    shared_ptr<WeightCounter> wc = make_shared<MetagenomicWeightCounter>(gp.g,
-                                                                         lib,
-                                                                         read_length, //read_length
-                                                                         0.3, //normalized_threshold
-                                                                         3, //raw_threshold
-                                                                         0 /*estimation_edge_length*/ );
-    return make_shared<SimpleExtensionChooser>(gp.g, wc,
-                                               params.pset.extension_options.weight_threshold,
-                                               params.pset.extension_options.priority_coeff);
-}
-
-inline shared_ptr<SimpleExtender> MakeMetaExtender(const config::dataset& dataset_info,
-                                                   size_t lib_index,
-                                                   const PathExtendParamsContainer& params,
-                                                   const conj_graph_pack& gp,
-                                                   const GraphCoverageMap& cov_map,
-                                                   bool investigate_loops) {
-
-    const auto& lib = dataset_info.reads[lib_index];
-    shared_ptr<PairedInfoLibrary> paired_lib = MakeNewLib(lib, gp.g, gp.clustered_indices[lib_index]);
-
-    return make_shared<SimpleExtender>(gp, cov_map,
-                                       MakeMetaExtensionChooser(paired_lib, params, gp, dataset_info.RL()),
-                                       paired_lib->GetISMax(),
-                                       params.pset.loop_removal.max_loops,
-                                       investigate_loops,
-                                       false /*use short loop coverage resolver*/);
-}
-
-inline shared_ptr<SimpleExtender> MakePEExtender(const config::dataset& dataset_info,
-                                                 size_t lib_index,
-                                                 const PathExtendParamsContainer& params,
-                                                 const conj_graph_pack& gp,
-                                                 const GraphCoverageMap& cov_map,
-                                                 bool investigate_loops) {
-
-    const auto& lib = dataset_info.reads[lib_index];
-    shared_ptr<PairedInfoLibrary>  paired_lib = MakeNewLib(lib, gp.g, gp.clustered_indices[lib_index]);
-    SetSingleThresholdForLib(paired_lib, params.pset, lib.data().pi_threshold);
-    INFO("Threshold for lib #" << lib_index << ": " << paired_lib->GetSingleThreshold());
-
-    shared_ptr<WeightCounter> wc = make_shared<PathCoverWeightCounter>(gp.g, paired_lib, params.pset.normalize_weight);
-    auto extension = make_shared<SimpleExtensionChooser>(gp.g, wc,
-                                                         GetWeightThreshold(paired_lib, params.pset),
-                                                         GetPriorityCoeff(paired_lib, params.pset));
-
-    return make_shared<SimpleExtender>(gp, cov_map,
-                                       extension,
-                                       paired_lib->GetISMax(),
-                                       params.pset.loop_removal.max_loops,
-                                       investigate_loops,
-                                       false /*use short loop coverage resolver*/);
-}
-
-
-inline shared_ptr<PathExtender> MakeScaffoldingExtender(const config::dataset& dataset_info,
-                                                        size_t lib_index,
-                                                        const PathExtendParamsContainer& params,
-                                                        const conj_graph_pack& gp,
-                                                        const GraphCoverageMap& cov_map) {
-    const auto& lib = dataset_info.reads[lib_index];
-    const auto& pset = params.pset;
-    shared_ptr<PairedInfoLibrary>  paired_lib = MakeNewLib(lib, gp.g, gp.scaffolding_indices[lib_index]);
-
-    shared_ptr<WeightCounter> counter = make_shared<ReadCountWeightCounter>(gp.g, paired_lib);
-
-    auto scaff_chooser = std::make_shared<ScaffoldingExtensionChooser>(gp.g, counter,
-                                                                       pset.scaffolder_options.cl_threshold,
-                                                                       pset.scaffolder_options.var_coeff);
-
-    vector<shared_ptr<GapJoiner>> joiners;
-    if (params.pset.scaffolder_options.use_la_gap_joiner)
-        joiners.push_back(std::make_shared<LAGapJoiner>(gp.g, pset.scaffolder_options.min_overlap_length,
-                                                        pset.scaffolder_options.flank_multiplication_coefficient,
-                                                        pset.scaffolder_options.flank_addition_coefficient));
-
-
-    joiners.push_back(std::make_shared<HammingGapJoiner>(gp.g,
-                                                         pset.scaffolder_options.min_gap_score,
-                                                         pset.scaffolder_options.short_overlap,
-                                                         (int) pset.scaffolder_options.basic_overlap_coeff * dataset_info.RL()));
-
-    auto composite_gap_joiner = std::make_shared<CompositeGapJoiner>(gp.g, 
-                                                joiners, 
-                                                size_t(pset.scaffolder_options.max_can_overlap * (double) gp.g.k()), /* may overlap threshold */
-                                                int(math::round((double) gp.g.k() - pset.scaffolder_options.var_coeff * (double) paired_lib->GetIsVar())),  /* must overlap threshold */
-                                                pset.scaffolder_options.artificial_gap);
-
-    return make_shared<ScaffoldingPathExtender>(gp, cov_map, scaff_chooser,
-                                                composite_gap_joiner,
-                                                paired_lib->GetISMax(),
-                                                pset.loop_removal.max_loops,
-                                                false, /* investigate short loops */
-                                                params.avoid_rc_connections);
-}
-
-
-inline shared_ptr<PathExtender> MakeRNAScaffoldingExtender(const config::dataset& dataset_info,
-                                                            size_t lib_index,
-                                                            const PathExtendParamsContainer& params,
-                                                            const conj_graph_pack& gp,
-                                                            const GraphCoverageMap& cov_map) {
-
-    const auto& lib = dataset_info.reads[lib_index];
-    const auto& pset = params.pset;
-    shared_ptr<PairedInfoLibrary> paired_lib = MakeNewLib(lib, gp.g, gp.paired_indices[lib_index]);
-
-    shared_ptr<WeightCounter> counter = make_shared<ReadCountWeightCounter>(gp.g, paired_lib);
-
-    auto scaff_chooser = std::make_shared<ScaffoldingExtensionChooser>(gp.g, counter, pset.scaffolder_options.cutoff, pset.scaffolder_options.var_coeff);
-    auto scaff_chooser2 = std::make_shared<ScaffoldingExtensionChooser>(gp.g, counter, pset.scaffolder_options.hard_cutoff, pset.scaffolder_options.var_coeff);
-
-    vector<shared_ptr<GapJoiner>> joiners;
-    if (params.pset.scaffolder_options.use_la_gap_joiner)
-        joiners.push_back(std::make_shared<LAGapJoiner>(gp.g, pset.scaffolder_options.min_overlap_length,
-                                                        pset.scaffolder_options.flank_multiplication_coefficient,
-                                                        pset.scaffolder_options.flank_addition_coefficient));
-
-
-    joiners.push_back(std::make_shared<HammingGapJoiner>(gp.g,
-                                                         pset.scaffolder_options.min_gap_score,
-                                                         pset.scaffolder_options.short_overlap,
-                                                         (int) pset.scaffolder_options.basic_overlap_coeff * dataset_info.RL()));
-
-    auto composite_gap_joiner = std::make_shared<CompositeGapJoiner>(gp.g,
-                                                                     joiners,
-                                                                     size_t(pset.scaffolder_options.max_can_overlap * (double) gp.g.k()), /* may overlap threshold */
-                                                                     int(math::round((double) gp.g.k() - pset.scaffolder_options.var_coeff * (double) paired_lib->GetIsVar())),  /* must overlap threshold */
-                                                                     pset.scaffolder_options.artificial_gap);
-
-    VERIFY(pset.scaffolder_options.min_overlap_for_rna_scaffolding.is_initialized());
-    return make_shared<RNAScaffoldingPathExtender>(gp, cov_map,
-                                                   scaff_chooser,
-                                                   scaff_chooser2,
-                                                   composite_gap_joiner,
-                                                   paired_lib->GetISMax(),
-                                                   pset.loop_removal.max_loops,
-                                                   false  /* investigate short loops */,
-                                                   *pset.scaffolder_options.min_overlap_for_rna_scaffolding);
-}
-
-
-inline shared_ptr<PathExtender> MakeScaffolding2015Extender(const config::dataset& dataset_info,
-                                                            size_t lib_index,
-                                                            const PathExtendParamsContainer& params,
-                                                            const conj_graph_pack& gp,
-                                                            const GraphCoverageMap& cov_map,
-                                                            const ScaffoldingUniqueEdgeStorage& storage) {
-
-    const auto& lib = dataset_info.reads[lib_index];
-    const auto& pset = params.pset;
-    shared_ptr<PairedInfoLibrary> paired_lib;
-    INFO("Creating Scaffolding 2015 extender for lib #" << lib_index);
-
-    //TODO:: temporary solution
-    if (gp.paired_indices[lib_index].size() > gp.clustered_indices[lib_index].size()) {
-        INFO("Paired unclustered indices not empty, using them");
-        paired_lib = MakeNewLib(lib, gp.g, gp.paired_indices[lib_index]);
-    } else if (gp.clustered_indices[lib_index].size() != 0 ) {
-        INFO("clustered indices not empty, using them");
-        paired_lib = MakeNewLib(lib, gp.g, gp.clustered_indices[lib_index]);
-    } else {
-        ERROR("All paired indices are empty!");
-    }
-
-    shared_ptr<WeightCounter> counter = make_shared<ReadCountWeightCounter>(gp.g, paired_lib);
-//TODO::was copypasted from MakeScaffoldingExtender, refactor 2015 extension chhoser
-    DEBUG("creating extchooser");
-
-    auto scaff_chooser = std::make_shared<ExtensionChooser2015>(gp.g,
-                                                                counter,
-                                                                lib_index,
-                                                                storage,
-                                                                pset.scaffolder_options.cl_threshold,
-                                                                pset.scaffolder_options.var_coeff,
-                                                                pset.scaffolding2015.relative_weight_cutoff);
-
-    auto gap_joiner = std::make_shared<HammingGapJoiner>(gp.g, pset.scaffolder_options.min_gap_score,
-                                                         pset.scaffolder_options.short_overlap,
-                                                         (int) pset.scaffolder_options.basic_overlap_coeff * dataset_info.RL());
-
-    return make_shared<ScaffoldingPathExtender>(gp, cov_map,
-                                                scaff_chooser,
-                                                gap_joiner,
-                                                paired_lib->GetISMax(),
-                                                pset.loop_removal.max_loops,
-                                                false, /* investigate short loops */
-                                                params.avoid_rc_connections,
-                                                false /* jump only from tips */);
-}
-
-
-inline shared_ptr<SimpleExtender> MakeMPExtender(const config::dataset& dataset_info,
-                                                 size_t lib_index,
-                                                 const PathExtendParamsContainer& params,
-                                                 const conj_graph_pack& gp,
-                                                 const GraphCoverageMap& cov_map,
-                                                 const PathContainer& paths) {
-
-    const auto& lib = dataset_info.reads[lib_index];
-    shared_ptr<PairedInfoLibrary> paired_lib = MakeNewLib(lib, gp.g, gp.paired_indices[lib_index]);
-
-    SetSingleThresholdForLib(paired_lib, params.pset, lib.data().pi_threshold);
-    INFO("Threshold for lib #" << lib_index << ": " << paired_lib->GetSingleThreshold());
-
-    size_t max_number_of_paths_to_search = GetNumberMPPaths(gp.g);
-    DEBUG("max number of mp paths " << max_number_of_paths_to_search);
-
-    shared_ptr<MatePairExtensionChooser> chooser =
-        make_shared<MatePairExtensionChooser>(gp.g,
-                                              paired_lib,
-                                              paths,
-                                              max_number_of_paths_to_search,
-                                              params.uneven_depth);
-
-    return make_shared<SimpleExtender>(gp, cov_map,
-                                       chooser,
-                                       paired_lib->GetISMax(),
-                                       params.pset.loop_removal.mp_max_loops,
-                                       true, /* investigate short loops */
-                                       false /*use short loop coverage resolver*/);
-}
-
-
-inline shared_ptr<SimpleExtender> MakeCoordCoverageExtender(const config::dataset& dataset_info,
-                                                            size_t lib_index,
-                                                            const PathExtendParamsContainer& params,
-                                                            const conj_graph_pack& gp,
-                                                            const GraphCoverageMap& cov_map) {
-
-    const auto& lib = dataset_info.reads[lib_index];
-    shared_ptr<PairedInfoLibrary> paired_lib = MakeNewLib(lib, gp.g, gp.clustered_indices[lib_index]);
-
-    CoverageAwareIdealInfoProvider provider(gp.g, paired_lib, -1ul, 0);
-    auto coord_chooser = make_shared<CoordinatedCoverageExtensionChooser>(gp.g, provider,
-                                                                          params.pset.coordinated_coverage.max_edge_length_in_repeat,
-                                                                          params.pset.coordinated_coverage.delta,
-                                                                          params.pset.coordinated_coverage.min_path_len);
-    auto chooser = make_shared<JointExtensionChooser>(gp.g, MakeMetaExtensionChooser(paired_lib, params, gp, dataset_info.RL()), coord_chooser);
-
-    return make_shared<SimpleExtender>(gp, cov_map, chooser,
-                                       -1ul /* insert size */,
-                                       params.pset.loop_removal.mp_max_loops,
-                                       true, /* investigate short loops */
-                                       false /*use short loop coverage resolver*/);
-}
-
-
-inline shared_ptr<SimpleExtender> MakeRNAExtender(const config::dataset& dataset_info,
-                                                  size_t lib_index,
-                                                  const PathExtendParamsContainer& params,
-                                                  const conj_graph_pack& gp,
-                                                  const GraphCoverageMap& cov_map,
-                                                  bool investigate_loops) {
-
-    const auto& lib = dataset_info.reads[lib_index];
-    shared_ptr<PairedInfoLibrary> paired_lib = MakeNewLib(lib, gp.g, gp.clustered_indices[lib_index]);
-    SetSingleThresholdForLib(paired_lib, params.pset, lib.data().pi_threshold);
-    INFO("Threshold for lib #" << lib_index << ": " << paired_lib->GetSingleThreshold());
-
-    shared_ptr<WeightCounter> wc = make_shared<PathCoverWeightCounter>(gp.g, paired_lib, params.pset.normalize_weight);
-    shared_ptr<RNAExtensionChooser> extension =
-        make_shared<RNAExtensionChooser>(gp.g, wc,
-                                         GetWeightThreshold(paired_lib, params.pset),
-                                         GetPriorityCoeff(paired_lib, params.pset));
-
-    return make_shared<MultiExtender>(gp, cov_map,
-                                      extension,
-                                      paired_lib->GetISMax(),
-                                      params.pset.loop_removal.max_loops,
-                                      investigate_loops,
-                                      false /*use short loop coverage resolver*/);
-}
-
-
-inline shared_ptr<SimpleExtender> MakeRNALongReadsExtender(const config::dataset& dataset_info,
-                                                           size_t lib_index,
-                                                           const PathExtendParamsContainer& params,
-                                                           const conj_graph_pack& gp,
-                                                           const GraphCoverageMap& cov_map) {
-
-    VERIFY_MSG(false, "Long reads rna extender is not implemented yet")
-
-    const auto& lib = dataset_info.reads[lib_index];
-    size_t resolvable_repeat_length_bound = 10000ul;
-    if (!lib.is_contig_lib()) {
-        resolvable_repeat_length_bound = std::max(resolvable_repeat_length_bound, lib.data().read_length);
-    }
-    INFO("resolvable_repeat_length_bound set to " << resolvable_repeat_length_bound);
-
-    auto long_reads_ec = MakeLongReadsExtensionChooser(lib, lib_index, params, gp);
-
-    return make_shared<SimpleExtender>(gp, cov_map,
-                                       long_reads_ec,
-                                       resolvable_repeat_length_bound,
-                                       params.pset.loop_removal.max_loops,
-                                       true, /* investigate short loops */
-                                       UseCoverageResolverForSingleReads(dataset_info, lib.type()));
-}
-
-
-template<typename Base, typename T>
-inline bool instanceof(const T *ptr) {
-    return dynamic_cast<const Base*>(ptr) != nullptr;
-}
-
-
-//Used for debug purpose only
-inline void PrintExtenders(vector<shared_ptr<PathExtender> >& extenders) {
-    DEBUG("Extenders in vector:");
-    for(size_t i = 0; i < extenders.size(); ++i) {
-        string type = typeid(*extenders[i]).name();
-        DEBUG("Extender #i" << type);
-        if (instanceof<SimpleExtender>(extenders[i].get())) {
-            auto ec = ((SimpleExtender *) extenders[i].get())->GetExtensionChooser();
-            string chooser_type = typeid(*ec).name();
-            DEBUG("    Extender #i" << chooser_type);
-        }
-        else if (instanceof<ScaffoldingPathExtender>(extenders[i].get())) {
-            auto ec = ((ScaffoldingPathExtender *) extenders[i].get())->GetExtensionChooser();
-            string chooser_type = typeid(*ec).name();
-            DEBUG("    Extender #i" << chooser_type);
-        }
-    }
-}
-
-inline vector<shared_ptr<PathExtender> > MakeAllExtenders(PathExtendStage stage,
-                                                          const config::dataset& dataset_info,
-                                                          const PathExtendParamsContainer& params,
-                                                          const conj_graph_pack& gp,
-                                                          const GraphCoverageMap& cov_map,
-                                                          const ScaffoldingUniqueEdgeStorage& storage,
-                                                          const PathContainer& paths_for_mp = PathContainer()) {
-
-    vector<shared_ptr<PathExtender> > result;
-    vector<shared_ptr<PathExtender> > pes;
-    vector<shared_ptr<PathExtender> > pes2015;
-    vector<shared_ptr<PathExtender> > pe_loops;
-    vector<shared_ptr<PathExtender> > pe_scafs;
-    vector<shared_ptr<PathExtender> > mps;
-
-    size_t single_read_libs = 0;
-    size_t pe_libs = 0;
-    size_t scf_pe_libs = 0;
-    size_t mp_libs = 0;
-
-    const auto& pset = params.pset;
-
-    for (io::LibraryType lt : io::LibraryPriotity) {
-        for (size_t lib_index = 0; lib_index < dataset_info.reads.lib_count(); ++lib_index) {
-            const auto& lib = dataset_info.reads[lib_index];
-            if (lib.type() != lt)
-                continue;
-
-            //TODO: scaff2015 does not need any single read libs?
-            if (IsForSingleReadExtender(lib) && pset.sm != sm_2015) {
-                result.push_back(MakeLongReadsExtender(dataset_info, lib_index, params, gp, cov_map));
-                ++single_read_libs;
-            }
-            if (IsForPEExtender(lib)) {
-                ++pe_libs;
-                if (IsPEStage(stage) && IsOldPEEnabled(pset.sm)) {
-                    if (params.mode == config::pipeline_type::meta)
-                        //TODO proper configuration via config
-                        pes.push_back(MakeMetaExtender(dataset_info, lib_index, params, gp, cov_map, false));
-                    else if (params.mode == config::pipeline_type::moleculo)
-                        pes.push_back(MakeLongEdgePEExtender(dataset_info, lib_index, params, gp, cov_map, false));
-                    else if (pset.multi_path_extend  && !IsPolishingStage(stage))
-                        pes.push_back(MakeRNAExtender(dataset_info, lib_index, params, gp, cov_map, false));
-                    else
-                        pes.push_back(MakePEExtender(dataset_info, lib_index, params, gp, cov_map, false));
-                }
-                else if (pset.sm == sm_2015) {
-                    pes2015.push_back(MakeScaffolding2015Extender(dataset_info, lib_index, params, gp, cov_map, storage));
-                }
-            }
-            //FIXME logic is very cryptic!
-            if (IsForShortLoopExtender(lib) && IsOldPEEnabled(pset.sm)) {
-                if (params.mode == config::pipeline_type::meta)
-                    pes.push_back(MakeMetaExtender(dataset_info, lib_index, params, gp, cov_map, true));
-                else if (pset.multi_path_extend && !IsPolishingStage(stage))
-                    pes.push_back(MakeRNAExtender(dataset_info, lib_index, params, gp, cov_map, true));
-                else
-                    pe_loops.push_back(MakePEExtender(dataset_info, lib_index, params, gp, cov_map, true));
-            }
-            if (IsForScaffoldingExtender(lib) && params.use_scaffolder && pset.scaffolder_options.enabled) {
-                ++scf_pe_libs;
-                if (params.mode == config::pipeline_type::rna) {
-                    pe_scafs.push_back(MakeRNAScaffoldingExtender(dataset_info, lib_index, params, gp, cov_map));
-                }
-                else {
-                    switch (pset.sm) {
-                        case sm_old: {
-                            pe_scafs.push_back(MakeScaffoldingExtender(dataset_info, lib_index, params, gp, cov_map));
-                            break;
-                        }
-                        case sm_old_pe_2015: {
-                            pe_scafs.push_back(MakeScaffolding2015Extender(dataset_info, lib_index, params, gp, cov_map, storage));
-                            break;
-                        }
-                        case sm_combined: {
-                            pe_scafs.push_back(MakeScaffoldingExtender(dataset_info, lib_index, params, gp, cov_map));
-                            pe_scafs.push_back(MakeScaffolding2015Extender(dataset_info, lib_index, params, gp, cov_map, storage));
-                            break;
-                        }
-                        default:
-                            break;
-                    }
-                }
-            }
-            if (IsForMPExtender(lib) && IsMPStage(stage)) {
-                ++mp_libs;
-                switch (pset.sm) {
-                    case sm_old: {
-                        mps.push_back(MakeMPExtender(dataset_info, lib_index, params, gp, cov_map, paths_for_mp));
-                        break;
-                    }
-                    case sm_old_pe_2015: {
-                        mps.push_back(MakeScaffolding2015Extender(dataset_info, lib_index, params, gp, cov_map, storage));
-                        break;
-                    }
-                    case sm_2015: {
-                        mps.push_back(MakeScaffolding2015Extender(dataset_info, lib_index, params, gp, cov_map, storage));
-                        break;
-                    }
-                    case sm_combined: {
-                        mps.push_back(MakeMPExtender(dataset_info, lib_index, params, gp, cov_map, paths_for_mp));
-                        mps.push_back(MakeScaffolding2015Extender(dataset_info, lib_index, params, gp, cov_map, storage));
-                        break;
-                    }
-                    default:
-                        break;
-                }
-            }
-        }
-
-        result.insert(result.end(), pes.begin(), pes.end());
-        result.insert(result.end(), pes2015.begin(), pes2015.end());
-        result.insert(result.end(), pe_loops.begin(), pe_loops.end());
-        result.insert(result.end(), pe_scafs.begin(), pe_scafs.end());
-        result.insert(result.end(), mps.begin(), mps.end());
-        pes.clear();
-        pe_loops.clear();
-        pe_scafs.clear();
-        pes2015.clear();
-        mps.clear();
-    }
-
-    INFO("Using " << pe_libs << " paired-end " << LibStr(pe_libs));
-    INFO("Using " << scf_pe_libs << " paired-end scaffolding " << LibStr(scf_pe_libs));
-    INFO("Using " << mp_libs << " mate-pair " << LibStr(mp_libs));
-    INFO("Using " << single_read_libs << " single read " << LibStr(single_read_libs));
-    INFO("Scaffolder is " << (pset.scaffolder_options.enabled ? "on" : "off"));
-
-    if (pset.use_coordinated_coverage) {
-        INFO("Using additional coordinated coverage extender");
-        result.push_back(MakeCoordCoverageExtender(dataset_info, 0 /* lib index */, params, gp, cov_map));
-    }
-
-    PrintExtenders(result);
-    return result;
-}
-
-inline shared_ptr<scaffold_graph::ScaffoldGraph> ConstructScaffoldGraph(const config::dataset& dataset_info,
-                                                                        const pe_config::ParamSetT::ScaffoldGraphParamsT& params,
-                                                                        const conj_graph_pack& gp,
-                                                                        const ScaffoldingUniqueEdgeStorage& edge_storage) {
-    using namespace scaffold_graph;
-    vector<shared_ptr<ConnectionCondition>> conditions;
-
-    INFO("Constructing connections");
-    LengthEdgeCondition edge_condition(gp.g, edge_storage.GetMinLength());
-
-    for (size_t lib_index = 0; lib_index < dataset_info.reads.lib_count(); ++lib_index) {
-        const auto& lib = dataset_info.reads[lib_index];
-        if (lib.is_paired()) {
-            shared_ptr<PairedInfoLibrary> paired_lib;
-            if (IsForMPExtender(lib))
-                paired_lib = MakeNewLib(lib, gp.g, gp.paired_indices[lib_index]);
-            else if (IsForPEExtender(lib))
-                paired_lib = MakeNewLib(lib, gp.g, gp.clustered_indices[lib_index]);
-            else
-                INFO("Unusable paired lib #" << lib_index);
-            conditions.push_back(make_shared<AdvancedPairedConnectionCondition>(gp.g, paired_lib, lib_index,
-                                                                                params.always_add,
-                                                                                params.never_add,
-                                                                                params.relative_threshold));
-        }
-    }
-    if (params.graph_connectivity) {
-        auto as_con = make_shared<AssemblyGraphConnectionCondition>(gp.g, params.max_path_length, edge_storage);
-        for (auto e_iter = gp.g.ConstEdgeBegin(); !e_iter.IsEnd(); ++e_iter) {
-            if (edge_condition.IsSuitable(*e_iter))
-                as_con->AddInterestingEdge(*e_iter);
-        }
-        conditions.push_back(as_con);
-    }
-    INFO("Total conditions " << conditions.size());
-
-    INFO("Constructing scaffold graph from set of size " << edge_storage.GetSet().size());
-
-    DefaultScaffoldGraphConstructor constructor(gp.g, edge_storage.GetSet(), conditions, edge_condition);
-    auto scaffoldGraph = constructor.Construct();
-
-    INFO("Scaffold graph contains " << scaffoldGraph->VertexCount() << " vertices and " << scaffoldGraph->EdgeCount() << " edges");
-    return scaffoldGraph;
-}
-
-
-inline void PrintScaffoldGraph(shared_ptr<scaffold_graph::ScaffoldGraph> scaffoldGraph,
-                               const set<EdgeId>& main_edge_set,
-                               const string& filename) {
-    using namespace scaffold_graph;
-
-    auto vcolorer = make_shared<ScaffoldVertexSetColorer>(main_edge_set);
-    auto ecolorer = make_shared<ScaffoldEdgeColorer>();
-    CompositeGraphColorer <ScaffoldGraph> colorer(vcolorer, ecolorer);
-
-    INFO("Visualizing single grpah");
-    ScaffoldGraphVisualizer singleVisualizer(*scaffoldGraph, false);
-    std::ofstream single_dot;
-    single_dot.open((filename + "_single.dot").c_str());
-    singleVisualizer.Visualize(single_dot, colorer);
-    single_dot.close();
-
-    INFO("Visualizing paired grpah");
-    ScaffoldGraphVisualizer pairedVisualizer(*scaffoldGraph, true);
-    std::ofstream paired_dot;
-    paired_dot.open((filename + "_paired.dot").c_str());
-    pairedVisualizer.Visualize(paired_dot, colorer);
-    paired_dot.close();
-
-    INFO("Printing scaffold grpah");
-    std::ofstream data_stream;
-    data_stream.open((filename + ".data").c_str());
-    scaffoldGraph->Print(data_stream);
-    data_stream.close();
-}
-
-
-inline size_t FindOverlapLenForStage(PathExtendStage stage, const config::dataset& dataset_info) {
-    size_t res = 0;
-    for (const auto& lib : dataset_info.reads) {
-        if (IsForPEExtender(lib) && IsPEStage(stage)) {
-            res = max(res, (size_t) lib.data().insert_size_right_quantile);
-        } else if (IsForShortLoopExtender(lib)) {
-            res = max(res, (size_t) lib.data().insert_size_right_quantile);
-        } else if (IsForMPExtender(lib) && IsMPStage(stage)) {
-            res = max(res, (size_t) lib.data().insert_size_right_quantile);
-        }
-    }
-    return res;
-}
-
-inline bool MPLibsExist(const config::dataset& dataset_info) {
-    for (const auto& lib : dataset_info.reads)
-        if (IsForMPExtender(lib))
-            return true;
-
-    return false;
-}
-
-inline void CountMisassembliesWithReference(debruijn_graph::GenomeConsistenceChecker& genome_checker, const PathContainer& paths) {
-    size_t total_mis = 0 , gap_mis = 0;
-    genome_checker.SpellGenome();
-    for (auto iter = paths.begin(); iter != paths.end(); ++iter) {
-        BidirectionalPath *path = iter.get();
-        auto map_res = genome_checker.CountMisassemblies(*path);
-        if (map_res.misassemblies > 0) {
-            INFO ("there are " << map_res.misassemblies << " misassemblies in path: ");
-            path->PrintInfo();
-            total_mis += map_res.misassemblies;
-        }
-        if (map_res.wrong_gap_size > 0) {
-            INFO ("there are " << map_res.wrong_gap_size << " wrong gaps in path: ");
-            path->PrintInfo();
-            gap_mis += map_res.wrong_gap_size;
-        }
-    }
-    INFO ("In total found " << total_mis << " misassemblies " << " and " << gap_mis << " gaps.");
-}
-
-inline ScaffoldingUniqueEdgeStorage FillUniqueEdgeStorage(const conj_graph_pack& gp,
-                                                          const config::dataset& dataset_info,
-                                                          size_t& min_unique_length,
-                                                          double& unique_variation,
-                                                          bool autodetect) {
-
-    ScaffoldingUniqueEdgeStorage main_unique_storage;
-    //Setting scaffolding2015 parameters
-    if (autodetect) {
-        INFO("Autodetecting unique edge set parameters...");
-        bool pe_found = false;
-        //TODO constants
-        size_t min_MP_IS = 10000;
-        for (size_t i = 0; i < dataset_info.reads.lib_count(); ++i) {
-
-            if (IsForPEExtender(dataset_info.reads[i])) {
-                pe_found = true;
-            }
-            if (IsForMPExtender(dataset_info.reads[i])) {
-                min_MP_IS = min(min_MP_IS, (size_t) dataset_info.reads[i].data().mean_insert_size);
-            }
-        }
-        if (pe_found) {
-            //TODO constants
-            unique_variation = 0.5;
-            INFO("PE lib found, we believe in coverage");
-        } else {
-            unique_variation = 50;
-            INFO("No paired libs found, we do not believe in coverage");
-        }
-        min_unique_length = min_MP_IS;
-        INFO("Minimal unique edge length set to the smallest MP library IS: " << min_unique_length);
-
-    } else {
-        INFO("Unique edge set constructed with parameters from config : length " << min_unique_length
-                 << " variation " << unique_variation);
-    }
-    ScaffoldingUniqueEdgeAnalyzer unique_edge_analyzer(gp, min_unique_length, unique_variation);
-    unique_edge_analyzer.FillUniqueEdgeStorage(main_unique_storage);
-
-    return main_unique_storage;
-}
-
-
-inline void ResolveRepeatsPe(const config::dataset& dataset_info,
-                             const PathExtendParamsContainer& params,
-                             conj_graph_pack& gp) {
-
-    INFO("ExSPAnder repeat resolving tool started");
-    const pe_config::ParamSetT &pset = params.pset;
-
-    ScaffoldingUniqueEdgeStorage main_unique_storage;
-    auto sc_mode = pset.sm;
-    auto min_unique_length = pset.scaffolding2015.min_unique_length;
-    auto unique_variaton = pset.scaffolding2015.unique_coverage_variation;
-    bool detect_repeats_online = !(IsScaffolder2015Enabled(sc_mode) || params.mode == config::pipeline_type::meta);
-
-    //Fill the storage to enable unique edge check
-    if (IsScaffolder2015Enabled(sc_mode)) {
-        main_unique_storage = FillUniqueEdgeStorage(gp, dataset_info,
-                                                    min_unique_length,
-                                                    unique_variaton,
-                                                    pset.scaffolding2015.autodetect);
-    }
-
-    make_dir(params.output_dir);
-    make_dir(params.etc_dir);
-
-
-    //Scaffold graph
-    shared_ptr<scaffold_graph::ScaffoldGraph> scaffoldGraph;
-    if (pset.scaffold_graph_params.construct) {
-        scaffoldGraph = ConstructScaffoldGraph(dataset_info, params.pset.scaffold_graph_params, gp, main_unique_storage);
-        if (pset.scaffold_graph_params.output) {
-            PrintScaffoldGraph(scaffoldGraph, main_unique_storage.GetSet(), params.etc_dir + "scaffold_graph");
-        }
-    }
-
-
-    DefaultContigCorrector<ConjugateDeBruijnGraph> corrector(gp.g);
-    DefaultContigConstructor<ConjugateDeBruijnGraph> constructor(gp.g, corrector);
-    ContigWriter writer(gp.g, constructor, gp.components, params.mode == config::pipeline_type::plasmid);
-
-
-//make pe + long reads extenders
-    GraphCoverageMap cover_map(gp.g);
-    INFO("SUBSTAGE = paired-end libraries")
-    PathExtendStage exspander_stage = PathExtendStage::PEStage;
-    vector<shared_ptr<PathExtender> > all_libs =
-        MakeAllExtenders(exspander_stage, dataset_info, params, gp, cover_map, main_unique_storage);
-
-    //Parameters are subject to change
-    size_t max_is_right_quantile = max(FindOverlapLenForStage(exspander_stage, dataset_info), gp.g.k() + 100);
-    size_t min_edge_len = 100;
-    size_t max_edge_diff_pe = /*cfg::get().mode == config::pipeline_type::rna ? 0 :*/ max_is_right_quantile;
-
-    shared_ptr<CompositeExtender> mainPE = make_shared<CompositeExtender>(gp.g, cover_map, all_libs,
-                                                                          main_unique_storage,
-                                                                          max_is_right_quantile,
-                                                                          pset.extension_options.max_repeat_length,
-                                                                          detect_repeats_online);
-
-//extend pe + long reads
-    PathExtendResolver resolver(gp.g);
-    auto seeds = resolver.makeSimpleSeeds();
-    DebugOutputPaths(gp, params, seeds, "init_paths");
-    seeds.SortByLength();
-    INFO("Growing paths using paired-end and long single reads");
-    INFO("Multi path extend is " << (cfg::get().pe_params.param_set.multi_path_extend ? "on" : "off"))
-    INFO("Overlap removal is " << (cfg::get().pe_params.param_set.remove_overlaps ? "on" : "off"))
-    auto paths = resolver.extendSeeds(seeds, *mainPE);
-    paths.SortByLength();
-    DebugOutputPaths(gp, params, paths, "pe_before_overlap");
-
-    PathContainer clone_paths;
-    GraphCoverageMap clone_map(gp.g);
-    bool mp_exist = MPLibsExist(dataset_info);
-
-    if (mp_exist) {
-        ClonePathContainer(paths, clone_paths, clone_map);
-    }
-
-    exspander_stage = PathExtendStage::PEPolishing;
-    all_libs = MakeAllExtenders(exspander_stage, dataset_info, params, gp, cover_map, main_unique_storage);
-    mainPE = make_shared<CompositeExtender>(gp.g, cover_map, all_libs,
-                                            main_unique_storage,
-                                            max_is_right_quantile,
-                                            pset.extension_options.max_repeat_length,
-                                            detect_repeats_online);
-
-    //We do not run overlap removal in 2015 mode
-    if (!IsScaffolder2015Enabled(sc_mode))
-        FinalizePaths(params, paths, gp.g, cover_map, min_edge_len, max_edge_diff_pe);
-    if (params.output_broken_scaffolds) {
-        OutputBrokenScaffolds(paths, params, (int) gp.g.k(), writer,
-                              params.output_dir + (mp_exist ? "pe_contigs" : params.broken_contigs));
-    }
-    DebugOutputPaths(gp, params, paths, "pe_before_traverse");
-    if (params.traverse_loops) {
-        TraverseLoops(paths, cover_map, mainPE);
-        FinalizePaths(params, paths, gp.g, cover_map, min_edge_len, max_edge_diff_pe);
-    }
-    DebugOutputPaths(gp, params, paths, (mp_exist ? "pe_final_paths" : "final_paths"));
-    writer.OutputPaths(paths, params.output_dir + (mp_exist ? "pe_scaffolds" : params.contigs_name));
-
-    cover_map.Clear();
-    seeds.DeleteAllPaths();
-    paths.DeleteAllPaths();
-    if (!mp_exist) {
-        return;
-    }
-
-//MP
-    DebugOutputPaths(gp, params, clone_paths, "mp_before_extend");
-
-    INFO("SUBSTAGE = mate-pair libraries ")
-    exspander_stage = PathExtendStage::MPStage;
-    all_libs.clear();
-    max_is_right_quantile = FindOverlapLenForStage(exspander_stage, dataset_info);
-    PathContainer mp_paths(clone_paths);
-
-    if (IsScaffolder2015Enabled(sc_mode)) {
-        //TODO: constants
-        for (auto cur_length = min_unique_length; cur_length > 500; cur_length -= 500) {
-            ScaffoldingUniqueEdgeStorage current_unique_storage;
-            ScaffoldingUniqueEdgeAnalyzer unique_edge_analyzer(gp, cur_length, unique_variaton);
-            unique_edge_analyzer.FillUniqueEdgeStorage(current_unique_storage);
-            all_libs = MakeAllExtenders(exspander_stage, dataset_info, params, gp, clone_map, current_unique_storage, clone_paths);
-            shared_ptr<CompositeExtender> mp_main_pe = make_shared<CompositeExtender>(gp.g, clone_map, all_libs,
-                                                                                      main_unique_storage,
-                                                                                      max_is_right_quantile,
-                                                                                      pset.extension_options.max_repeat_length,
-                                                                                      detect_repeats_online);
-            INFO("Growing paths using mate-pairs unique length " << cur_length);
-            mp_paths = resolver.extendSeeds(mp_paths, *mp_main_pe);
-            DebugOutputPaths(gp, params, mp_paths, "mp_before_overlap_" + std::to_string(cur_length));
-        }
-    } else {
-        all_libs = MakeAllExtenders(exspander_stage, dataset_info, params, gp, clone_map, main_unique_storage, clone_paths);
-        shared_ptr<CompositeExtender> mp_main_pe = make_shared<CompositeExtender>(gp.g, clone_map, all_libs,
-                                                                                  main_unique_storage,
-                                                                                  max_is_right_quantile,
-                                                                                  pset.extension_options.max_repeat_length,
-                                                                                  detect_repeats_online);
-        INFO("Growing paths using mate-pairs");
-        mp_paths = resolver.extendSeeds(clone_paths, *mp_main_pe);
-
-        DebugOutputPaths(gp, params, mp_paths, "mp_before_overlap");
-        FinalizePaths(params, mp_paths, gp.g, clone_map, max_is_right_quantile, max_is_right_quantile, true);
-    }
-    DebugOutputPaths(gp, params, mp_paths, "mp_final_paths");
-    DEBUG("Paths are grown with mate-pairs");
-
-//MP end
-
-//pe again
-    INFO("SUBSTAGE = polishing paths")
-    exspander_stage = PathExtendStage::FinalizingPEStage;
-    all_libs.clear();
-    all_libs = MakeAllExtenders(exspander_stage, dataset_info, params, gp, clone_map, main_unique_storage);
-    max_is_right_quantile = FindOverlapLenForStage(exspander_stage, dataset_info);
-    shared_ptr<CompositeExtender> last_extender = make_shared<CompositeExtender>(gp.g, clone_map, all_libs,
-                                                                                 main_unique_storage,
-                                                                                 max_is_right_quantile,
-                                                                                 pset.extension_options.max_repeat_length,
-                                                                                 detect_repeats_online);
-
-    auto last_paths = resolver.extendSeeds(mp_paths, *last_extender);
-    DebugOutputPaths(gp, params, last_paths, "mp2_before_overlap");
-
-    exspander_stage = PathExtendStage::FinalPolishing;
-    all_libs = MakeAllExtenders(exspander_stage, dataset_info, params, gp, clone_map, main_unique_storage);
-    last_extender = make_shared<CompositeExtender>(gp.g, clone_map, all_libs,
-                                                   main_unique_storage,
-                                                   max_is_right_quantile,
-                                                   pset.extension_options.max_repeat_length,
-                                                   detect_repeats_online);
-    if (!IsScaffolder2015Enabled(sc_mode)) {
-        FinalizePaths(params, last_paths, gp.g, clone_map, min_edge_len, max_is_right_quantile);
-        DebugOutputPaths(gp, params, last_paths, "mp2_before_traverse");
-    }
-
-    TraverseLoops(last_paths, clone_map, last_extender);
-    FinalizePaths(params, last_paths, gp.g, clone_map, min_edge_len, max_is_right_quantile);
-
-//result
-    if (params.output_broken_scaffolds) {
-        OutputBrokenScaffolds(last_paths, params, (int) gp.g.k(), writer, params.output_dir + params.broken_contigs);
-    }
-    debruijn_graph::GenomeConsistenceChecker genome_checker (gp, main_unique_storage, 1000, 0.2);
-    DebugOutputPaths(gp, params, last_paths, "mp2_final_paths");
-    writer.OutputPaths(last_paths, params.output_dir + params.contigs_name);
-    if (gp.genome.size() > 0)
-        CountMisassembliesWithReference(genome_checker, last_paths);
-    //FinalizeUniquenessPaths();
-
-//TODO:: destructor?
-    last_paths.DeleteAllPaths();
-    seeds.DeleteAllPaths();
-    mp_paths.DeleteAllPaths();
-    clone_paths.DeleteAllPaths();
-
-    INFO("ExSPAnder repeat resolving tool finished");
-}
-
-} /* path_extend */
-
-
-
-#endif /* PATH_EXTEND_LAUNCH_HPP_ */
diff --git a/src/modules/algorithms/path_extend/pe_io.hpp b/src/modules/algorithms/path_extend/pe_io.hpp
deleted file mode 100644
index a31623c..0000000
--- a/src/modules/algorithms/path_extend/pe_io.hpp
+++ /dev/null
@@ -1,290 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-#ifndef PE_IO_HPP_
-#define PE_IO_HPP_
-
-
-#include "algorithms/genome_consistance_checker.hpp"
-#include "assembly_graph/paths/bidirectional_path.hpp"
-#include "assembly_graph/graph_support/contig_output.hpp"
-#include "assembly_graph/components/connected_component.hpp"
-#include "io/reads_io/osequencestream.hpp"
-#include <stack>
-#include <algorithm>
-
-namespace path_extend {
-
-using namespace debruijn_graph;
-
-class ContigWriter {
-protected:
-    DECL_LOGGER("PathExtendIO")
-
-protected:
-    const Graph& g_;
-    ContigConstructor<Graph> &constructor_;
-    size_t k_;
-    map<EdgeId, ExtendedContigIdT> ids_;
-    const ConnectedComponentCounter &c_counter_;
-    bool plasmid_contig_naming_;
-
-    //TODO: add constructor
-    string ToString(const BidirectionalPath& path) const {
-        stringstream ss;
-        if (path.IsInterstrandBulge() && path.Size() == 1) {
-            ss << constructor_.construct(path.Back()).first.substr(k_, g_.length(path.Back()) - k_);
-            return ss.str();
-        }
-
-        if (!path.Empty()) {
-            ss << constructor_.construct(path[0]).first.substr(0, k_);
-        }
-
-
-        size_t i = 0;
-        while (i < path.Size()) {
-            int gap = i == 0 ? 0 : path.GapAt(i);
-            if (gap > (int) k_) {
-                for (size_t j = 0; j < gap - k_; ++j) {
-                    ss << "N";
-                }
-                ss << constructor_.construct(path[i]).first;
-            }
-            else {
-                int overlapLen = (int) k_ - gap;
-                if (overlapLen >= (int) g_.length(path[i]) + (int) k_) {
-                    overlapLen -= (int) g_.length(path[i]) + (int) k_;
-                    ++i;
-                    //skipping overlapping edges
-                    while (i < path.Size() && overlapLen >= (int) g_.length(path[i]) + path.GapAt(i)) {
-                        overlapLen -= (int) g_.length(path[i]) + path.GapAt(i);
-                        ++i;
-                    }
-                    if (i == path.Size()) {
-                        break;
-                    }
-
-                    overlapLen = overlapLen + (int) k_ - path.GapAt(i);
-                    if(overlapLen < 0) {
-                        for (size_t j = 0; j < abs(overlapLen); ++j) {
-                            ss << "N";
-                        }
-                        overlapLen = 0;
-                    }
-                }
-                auto temp_str = g_.EdgeNucls(path[i]).Subseq(overlapLen).str();
-                if(i != path.Size() - 1) {
-                    for(size_t j = 0 ; j < path.TrashPreviousAt(i + 1); ++j) {
-                        temp_str.pop_back();
-                        if(temp_str.size() == 0) {
-                            break;
-                        }
-                    }
-                }
-                ss << temp_str;
-            }
-            ++i;
-        }
-        return ss.str();
-    }
-
-    string ToFASTGString(const BidirectionalPath& path) const {
-        if (path.Empty())
-            return "";
-        string res = ids_.at(path.Front()).short_id_;
-        for (size_t i = 1; i < path.Size(); ++i) {
-            if (g_.EdgeEnd(path[i - 1]) != g_.EdgeStart(path[i])) {
-                res += ";\n" + ids_.at(path[i]).short_id_;
-            }
-            else {
-                res += "," + ids_.at(path[i]).short_id_;
-            }
-        }
-        return res;
-    }
-
-
-public:
-    ContigWriter(const Graph& g,
-                 ContigConstructor<Graph> &constructor,
-                 const ConnectedComponentCounter &c_counter,
-                 bool plasmid_contig_naming = false):
-        g_(g), constructor_(constructor), k_(g.k()),
-        ids_(), c_counter_(c_counter),
-        plasmid_contig_naming_(plasmid_contig_naming)
-    {
-        MakeContigIdMap(g_, ids_, c_counter, "NODE");
-    }
-
-
-    void WriteEdges(const string &filename) const {
-        INFO("Outputting edges to " << filename);
-        io::osequencestream_with_id oss(filename);
-
-        set<EdgeId> included;
-        for (auto iter = g_.ConstEdgeBegin(); !iter.IsEnd(); ++iter) {
-            if (included.count(*iter) == 0) {
-                oss.setCoverage(g_.coverage(*iter));
-                oss.setID((int) g_.int_id(*iter));
-                oss << g_.EdgeNucls(*iter);
-
-                included.insert(*iter);
-                included.insert(g_.conjugate(*iter));
-            }
-        }
-        DEBUG("Contigs written");
-    }
-
-
-    void WritePaths(const PathContainer &paths, const string &filename) const {
-        INFO("Outputting path data to " << filename);
-        std::ofstream oss;
-        oss.open(filename.c_str());
-        int i = 1;
-        oss << paths.size() << endl;
-        for (auto iter = paths.begin(); iter != paths.end(); ++iter) {
-            //oss << i << endl;
-            i++;
-            BidirectionalPath* path = iter.get();
-            if (path->GetId() % 2 != 0) {
-                path = path->GetConjPath();
-            }
-            oss << "PATH " << path->GetId() << " " << path->Size() << " " << path->Length() + k_ << endl;
-            for (size_t j = 0; j < path->Size(); ++j) {
-                oss << g_.int_id(path->At(j)) << " " << g_.length(path->At(j)) <<  " " << path->GapAt(j) <<  " " << path->TrashPreviousAt(j) <<  " " << path->TrashCurrentAt(j) << endl;
-            }
-            //oss << endl;
-        }
-        oss.close();
-        DEBUG("Edges written");
-    }
-
-    void LoadPaths(PathContainer &paths, GraphCoverageMap &cover_map, const string &filename) const {
-        paths.clear();
-        map<size_t, EdgeId> int_ids;
-        for (auto iter = g_.ConstEdgeBegin(); !iter.IsEnd(); ++iter) {
-            int_ids.insert(make_pair(g_.int_id(*iter), *iter));
-        }
-
-        std::ifstream iss;
-        iss.open(filename);
-        size_t psize;
-        iss >> psize;
-        for(size_t i = 0; i < psize && !iss.eof(); ++i) {
-            string s;
-            size_t id;
-            size_t size;
-            size_t len;
-            iss >> s >> id >> size >> len;
-            VERIFY(s == "PATH");
-
-            BidirectionalPath * path = new BidirectionalPath(g_);
-            BidirectionalPath * conjugatePath = new BidirectionalPath(g_);
-            paths.AddPair(path, conjugatePath);
-            path->Subscribe(&cover_map);
-            conjugatePath->Subscribe(&cover_map);
-            for (size_t j = 0; !iss.eof() && j < size; ++j) {
-                size_t eid;
-                size_t elen;
-                int gap;
-                uint32_t trash_prev;
-                uint32_t trash_current;
-
-                iss >> eid >> elen >> gap >> trash_prev >> trash_current;
-                Gap gap_struct(gap, trash_prev, trash_current);
-                EdgeId edge = int_ids[eid];
-                conjugatePath->PushBack(edge, gap_struct);
-                VERIFY(g_.length(edge) == elen);
-            }
-            VERIFY(path->Length() + k_ == len);
-        }
-        VERIFY(psize == paths.size());
-        iss.close();
-    }
-
-    void WritePathsToFASTA(const PathContainer &paths,
-                           const string &filename_base,
-                           bool write_fastg = true) const {
-
-        INFO("Writing contigs to " << filename_base);
-        io::osequencestream_simple oss(filename_base + ".fasta");
-
-        std::ofstream os_fastg;
-        if (write_fastg)
-            os_fastg.open((filename_base + ".paths").c_str());
-
-        int i = 0;
-        for (auto iter = paths.begin(); iter != paths.end(); ++iter) {
-            if (iter.get()->Length() <= 0)
-                continue;
-            i++;
-            DEBUG("NODE " << i);
-            BidirectionalPath* path = iter.get();
-            path->Print();
-            string contig_id;
-            string path_string = ToString(*path);
-            if (plasmid_contig_naming_) {
-                EdgeId e = path->At(0);
-                size_t component = c_counter_.GetComponent(e);
-                contig_id = io::MakeContigComponentId(i, path_string.length(), path->Coverage(), component);
-            } else {
-                contig_id = io::MakeContigId(i, path_string.length(), path->Coverage());
-            }
-            oss.set_header(contig_id);
-            if (write_fastg) {
-                os_fastg << contig_id<< endl;
-                os_fastg << ToFASTGString(*iter.get()) << endl;
-                os_fastg << contig_id << "'" << endl;
-                os_fastg << ToFASTGString(*iter.getConjugate()) << endl;
-            }
-            oss << path_string;
-        }
-        if (write_fastg)
-            os_fastg.close();
-        DEBUG("Contigs written");
-    }
-
-    void WriteFASTGPaths(const PathContainer& paths, const string& filename) const {
-        INFO("Writing FASTG paths to " << filename);
-        std::ofstream oss(filename.c_str());
-        for (auto iter = paths.begin(); iter != paths.end(); ++iter) {
-            if (iter.get()->Length() <= 0)
-                continue;
-            oss << ToFASTGString(*iter.get()) << endl;
-            oss << ToFASTGString(*iter.getConjugate()) << endl;
-        }
-        oss.close();
-    }
-
-    void OutputPaths(const PathContainer& paths, const string& filename_base) const {
-        WritePathsToFASTA(paths, filename_base);
-    }
-
-};
-
-
-class PathInfoWriter {
-protected:
-    DECL_LOGGER("PathExtendIO")
-
-public:
-
-    void WritePaths(const PathContainer &paths, const string &filename){
-        std::ofstream oss(filename.c_str());
-
-        for (auto iter = paths.begin(); iter != paths.end(); ++iter) {
-            iter.get()->Print(oss);
-        }
-
-        oss.close();
-    }
-};
-
-}
-
-#endif /* PE_IO_HPP_ */
diff --git a/src/modules/algorithms/path_extend/scaffolder2015/connection_condition2015.cpp b/src/modules/algorithms/path_extend/scaffolder2015/connection_condition2015.cpp
deleted file mode 100644
index 14ba367..0000000
--- a/src/modules/algorithms/path_extend/scaffolder2015/connection_condition2015.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-#include "connection_condition2015.hpp"
-namespace path_extend {
-
-PairedLibConnectionCondition::PairedLibConnectionCondition(const debruijn_graph::Graph &graph,
-                             shared_ptr <PairedInfoLibrary> lib,
-                             size_t lib_index,
-                             size_t min_read_count) :
-        graph_(graph),
-        lib_(lib),
-        lib_index_(lib_index),
-        min_read_count_(min_read_count),
-//TODO reconsider condition
-        left_dist_delta_(5 * (int) lib_->GetISMax()),
-        right_dist_delta_(max(5 * (int) lib_->GetIsVar(), int(lib_->is_))) {
-}
-
-size_t PairedLibConnectionCondition::GetLibIndex() const {
-    return lib_index_;
-}
-
-set <debruijn_graph::EdgeId> PairedLibConnectionCondition::ConnectedWith(debruijn_graph::EdgeId e) const {
-    set <debruijn_graph::EdgeId> all_edges;
-    int e_length = (int) graph_.length(e);
-    lib_->FindJumpEdges(e, all_edges,  e_length - left_dist_delta_, e_length + right_dist_delta_);
-
-    set <debruijn_graph::EdgeId> result;
-    for (auto edge : all_edges) {
-        if (edge != e && edge != graph_.conjugate(e) &&
-            math::ge(GetWeight(e, edge), (double) min_read_count_)) {
-            result.insert(edge);
-        }
-    }
-    return result;
-}
-
-double PairedLibConnectionCondition::GetWeight(debruijn_graph::EdgeId e1, debruijn_graph::EdgeId e2) const {
-    int e_length = (int) graph_.length(e1);
-    return lib_->CountPairedInfo(e1, e2, e_length - left_dist_delta_, e_length + right_dist_delta_);
-}
-
-AdvancedPairedConnectionCondition::AdvancedPairedConnectionCondition(const debruijn_graph::Graph &graph,
-                                  shared_ptr <PairedInfoLibrary> lib,
-                                  size_t lib_index,
-                                  size_t always_add,
-                                  size_t never_add,
-                                  double relative_threshold):
-    PairedLibConnectionCondition(graph, lib, lib_index, never_add),
-    always_add_(always_add),
-    never_add_(never_add),
-    relative_threshold_(relative_threshold) {}
-
-set <debruijn_graph::EdgeId> AdvancedPairedConnectionCondition::ConnectedWith(debruijn_graph::EdgeId e) const {
-    set <debruijn_graph::EdgeId> all_edges;
-    int e_length = (int) graph_.length(e);
-    lib_->FindJumpEdges(e, all_edges,  e_length - left_dist_delta_, e_length + right_dist_delta_);
-
-    double max_weight = 0;
-    for (auto edge : all_edges) {
-        if (edge != e && edge != graph_.conjugate(e)) {
-            double w = GetWeight(e, edge);
-            if (math::gr(w, max_weight))
-                max_weight = w;
-        }
-    }
-    double threshold = std::max((double) never_add_, std::min((double) always_add_, max_weight * relative_threshold_));
-
-    set <debruijn_graph::EdgeId> result;
-    for (auto edge : all_edges) {
-        if (edge != e && edge != graph_.conjugate(e) &&
-            math::ge(GetWeight(e, edge), threshold)) {
-            result.insert(edge);
-        }
-    }
-    return result;
-}
-
-
-//TODO: We use same part of index twice, is it necessary?
-int PairedLibConnectionCondition::GetMedianGap(debruijn_graph::EdgeId e1, debruijn_graph::EdgeId e2) const {
-    std::vector<int> distances;
-    std::vector<double> weights;
-    int e_length = (int) graph_.length(e1);
-    lib_->CountDistances(e1, e2, distances, weights);
-    std::vector<pair<int, double> >h(distances.size());
-    for (size_t i = 0; i< distances.size(); i++) {
-        if (distances[i] >= e_length - left_dist_delta_ && distances[i] <= e_length + right_dist_delta_)
-            h.push_back(std::make_pair(distances[i], weights[i]));
-    }
-//TODO: is it really necessary?
-    std::sort(h.begin(), h.end());
-    double sum = 0.0;
-    double sum2 = 0.0;
-    for (size_t j = 0; j< h.size(); ++j) {
-        sum += h[j].second;
-    }
-    size_t i = 0;
-    for (; i < h.size(); ++i) {
-        sum2 += h[i].second;
-        if (sum2 * 2 > sum)
-            break;
-    }
-    return (int) round(h[i].first - e_length);
-}
-
-AssemblyGraphConnectionCondition::AssemblyGraphConnectionCondition(const debruijn_graph::Graph &g,
-                    size_t max_connection_length, const ScaffoldingUniqueEdgeStorage & unique_edges) :
-        g_(g), max_connection_length_(max_connection_length), interesting_edge_set_(unique_edges.GetSet()), stored_distances_() {
-}
-
-set <debruijn_graph::EdgeId> AssemblyGraphConnectionCondition::ConnectedWith(debruijn_graph::EdgeId e) const {
-    VERIFY_MSG(interesting_edge_set_.find(e)!= interesting_edge_set_.end(), " edge "<< e.int_id() << " not applicable for connection condition");
-    if (stored_distances_.find(e) != stored_distances_.end()) {
-        return stored_distances_[e];
-    }
-    stored_distances_.insert(make_pair(e, set<debruijn_graph::EdgeId>()));
-    for (auto connected: g_.OutgoingEdges(g_.EdgeEnd(e))) {
-        if (interesting_edge_set_.find(connected) != interesting_edge_set_.end()) {
-            stored_distances_[e].insert(connected);
-        }
-    }
-    DijkstraHelper<debruijn_graph::Graph>::BoundedDijkstra dijkstra(
-            DijkstraHelper<debruijn_graph::Graph>::CreateBoundedDijkstra(g_, max_connection_length_));
-    dijkstra.Run(g_.EdgeEnd(e));
-    for (auto v: dijkstra.ReachedVertices()) {
-        for (auto connected: g_.OutgoingEdges(v)) {
-            if (interesting_edge_set_.find(connected) != interesting_edge_set_.end() && dijkstra.GetDistance(v) < max_connection_length_) {
-                stored_distances_[e].insert(connected);
-            }
-        }
-    }
-    return stored_distances_[e];
-}
-void AssemblyGraphConnectionCondition::AddInterestingEdge(debruijn_graph::EdgeId e) {
-    interesting_edge_set_.insert(e);
-}
-double AssemblyGraphConnectionCondition::GetWeight(debruijn_graph::EdgeId, debruijn_graph::EdgeId) const {
-    return 1.0;
-}
-
-size_t AssemblyGraphConnectionCondition::GetLibIndex() const {
-    return (size_t) - 1;
-}
-
-}
diff --git a/src/modules/algorithms/path_extend/scaffolder2015/connection_condition2015.hpp b/src/modules/algorithms/path_extend/scaffolder2015/connection_condition2015.hpp
deleted file mode 100644
index 0cfe58e..0000000
--- a/src/modules/algorithms/path_extend/scaffolder2015/connection_condition2015.hpp
+++ /dev/null
@@ -1,90 +0,0 @@
-
-#ifndef CONNECTION_CONDITION2015_HPP
-#define CONNECTION_CONDITION2015_HPP
-#include "algorithms/genome_consistance_checker.hpp"
-#include "dev_support/logger/logger.hpp"
-#include "algorithms/path_extend/paired_library.hpp"
-#include "assembly_graph/graph_support/scaff_supplementary.hpp"
-#include <map>
-#include <set>
-
-namespace path_extend {
-
-/* Connection condition are used by both scaffolder's extension chooser and scaffold graph */
-
-class ConnectionCondition {
-public:
-// Outputs the edges e is connected with.
-//TODO  performance issue: think about inside filtering. Return only unique connected edges?
-    virtual set <debruijn_graph::EdgeId> ConnectedWith(debruijn_graph::EdgeId e) const = 0;
-// Outputs the weight of the pair e1 and e2
-    virtual double GetWeight(debruijn_graph::EdgeId e1, debruijn_graph::EdgeId e2) const = 0;
-    virtual size_t GetLibIndex() const = 0;
-    virtual ~ConnectionCondition() {
-    }
-};
-
-// Main (mate pair library) connection condition.
-class PairedLibConnectionCondition : public ConnectionCondition {
-protected:
-    const debruijn_graph::Graph &graph_;
-    shared_ptr <PairedInfoLibrary> lib_;
-    size_t lib_index_;
-//Minimal number of mate pairs to call connection sound
-    size_t min_read_count_;
-public:
-//Only paired info with gap between e1 and e2 between -left_dist_delta_ and right_dist_delta_ taken in account
-    int left_dist_delta_;
-    int right_dist_delta_;
-
-    PairedLibConnectionCondition(const debruijn_graph::Graph &graph,
-                                 shared_ptr <PairedInfoLibrary> lib,
-                                 size_t lib_index,
-                                 size_t min_read_count);
-    size_t GetLibIndex() const override;
-    set <debruijn_graph::EdgeId> ConnectedWith(debruijn_graph::EdgeId e) const override;
-    double GetWeight(debruijn_graph::EdgeId e1, debruijn_graph::EdgeId e2) const override;
-//Returns median gap size
-    int GetMedianGap (debruijn_graph::EdgeId e1, debruijn_graph::EdgeId e2) const;
-};
-
-//Advanced mate-pair connection condition
-class AdvancedPairedConnectionCondition: public PairedLibConnectionCondition {
-protected:
-    size_t always_add_;
-    size_t never_add_;
-    double relative_threshold_;
-
-public:
-    AdvancedPairedConnectionCondition(const debruijn_graph::Graph &graph,
-                                      shared_ptr <PairedInfoLibrary> lib,
-                                      size_t lib_index,
-                                      size_t always_add,
-                                      size_t never_add,
-                                      double relative_threshold);
-
-    set <debruijn_graph::EdgeId> ConnectedWith(debruijn_graph::EdgeId e) const override;
-
-};
-
-/*  Condition used to find connected in graph edges.
-*
-*/
-class AssemblyGraphConnectionCondition : public ConnectionCondition {
-protected:
-    const debruijn_graph::Graph &g_;
-//Maximal gap to the connection.
-    size_t max_connection_length_;
-    set<EdgeId> interesting_edge_set_;
-    mutable map <debruijn_graph::Graph::EdgeId, set<debruijn_graph::Graph::EdgeId>> stored_distances_;
-public:
-    AssemblyGraphConnectionCondition(const debruijn_graph::Graph &g, size_t max_connection_length,
-                                     const ScaffoldingUniqueEdgeStorage& unique_edges);
-    void AddInterestingEdge(debruijn_graph::EdgeId e);
-    set <debruijn_graph::EdgeId> ConnectedWith(debruijn_graph::EdgeId e) const override;
-    double GetWeight(debruijn_graph::EdgeId, debruijn_graph::EdgeId) const override;
-    size_t GetLibIndex() const override;
-};
-}
-
-#endif //PROJECT_CONNECTION_CONDITION2015_HPP
diff --git a/src/modules/algorithms/path_extend/scaffolder2015/extension_chooser2015.hpp b/src/modules/algorithms/path_extend/scaffolder2015/extension_chooser2015.hpp
deleted file mode 100644
index f4ba49c..0000000
--- a/src/modules/algorithms/path_extend/scaffolder2015/extension_chooser2015.hpp
+++ /dev/null
@@ -1,59 +0,0 @@
-//
-// Created by lab42 on 8/26/15.
-//
-#pragma once
-
-#include "algorithms/path_extend/extension_chooser.hpp"
-#include "connection_condition2015.hpp"
-#include "algorithms/genome_consistance_checker.hpp"
-#include "dev_support/logger/logger.hpp"
-#include <map>
-#include <set>
-namespace path_extend {
-class ExtensionChooser2015: public ScaffoldingExtensionChooser {
-private:
-    const ScaffoldingUniqueEdgeStorage& unique_edges_;
-// for possible connections e1 and e2 if weight(e1) > relative_weight_threshold_ * weight(e2) then e2 will be ignored
-    double relative_weight_threshold_;
-    PairedLibConnectionCondition paired_connection_condition_;
-    AssemblyGraphConnectionCondition graph_connection_condition_;
-// weight < absolute_weight_threshold_ will be ignored
-    size_t absolute_weight_threshold_;
-// multiplicator for the pairs which are connected in graph.
-    double graph_connection_bonus_;
-
-protected:
-//If path contains no unique edges return -1
-    pair<EdgeId, int> FindLastUniqueInPath(const BidirectionalPath& path) const;
-//Find all possible next unique edges confirmed with mate-pair information. (absolute/relative)_weight_threshold_ used for filtering
-    EdgeContainer FindNextUniqueEdge(const EdgeId from) const;
-        DECL_LOGGER("ExtensionChooser2015")
-public:
-    ExtensionChooser2015(const Graph& g,
-                         shared_ptr<WeightCounter> wc,
-                         size_t lib_index,
-                         const ScaffoldingUniqueEdgeStorage& unique_edges,
-                         double cl_weight_threshold,
-                         double is_scatter_coeff,
-                         double relative_threshold):
-            //TODO: constants are subject to reconsider
-            ScaffoldingExtensionChooser(g, wc, cl_weight_threshold, is_scatter_coeff),
-            unique_edges_(unique_edges),
-            relative_weight_threshold_(relative_threshold),
-            paired_connection_condition_(g, wc->get_libptr(), lib_index, 0),
-            graph_connection_condition_(g, 2 * unique_edges_.GetMinLength(), unique_edges),
-            //TODO to congif!
-            absolute_weight_threshold_(2),
-            graph_connection_bonus_(2) {
-        INFO("ExtensionChooser2015 created");
-    }
-/* @param edges are really not used and left for compatibility
- * @returns possible next edge if there is unique one, else returns empty container
- *
- */
-
-    EdgeContainer Filter(const BidirectionalPath& path, const EdgeContainer& edges) const override;
-};
-
-
-}
diff --git a/src/modules/algorithms/path_extend/scaffolder2015/scaffold_graph_visualizer.hpp b/src/modules/algorithms/path_extend/scaffolder2015/scaffold_graph_visualizer.hpp
deleted file mode 100644
index 2ed651c..0000000
--- a/src/modules/algorithms/path_extend/scaffolder2015/scaffold_graph_visualizer.hpp
+++ /dev/null
@@ -1,73 +0,0 @@
-//
-// Created by andrey on 21.09.15.
-//
-
-#ifndef PROJECT_SCAFFOLD_GRAPH_VISUALIZER_HPP
-#define PROJECT_SCAFFOLD_GRAPH_VISUALIZER_HPP
-
-#include "pipeline/graphio.hpp"
-#include "scaffold_graph.hpp"
-
-namespace path_extend { namespace scaffold_graph {
-
-using namespace omnigraph::visualization;
-
-
-class ScaffoldGraphLabeler : public GraphLabeler<ScaffoldGraph> {
-
-private:
-    const ScaffoldGraph &graph_;
-
-public:
-    ScaffoldGraphLabeler(const ScaffoldGraph &graph) : graph_(graph) {
-    }
-
-    string label(VertexId v) const;
-
-    string label(EdgeId e) const;
-};
-
-
-class ScaffoldEdgeColorer : public ElementColorer<ScaffoldGraph::EdgeId> {
-private:
-    static const map<size_t, string> color_map;
-
-    static const string default_color;
-
-public:
-    string GetValue(ScaffoldGraph::EdgeId e) const;
-};
-
-
-class ScaffoldVertexSetColorer : public ElementColorer<ScaffoldGraph::VertexId> {
- private:
-  set<ScaffoldGraph::VertexId> vertex_set_;
-
- public:
-  ScaffoldVertexSetColorer(const set<ScaffoldGraph::VertexId>& vertex_set): vertex_set_(vertex_set) {
-  }
-
-    string GetValue(ScaffoldGraph::VertexId v) const;
-};
-
-class ScaffoldGraphVisualizer {
-
-    const ScaffoldGraph &graph_;
-    const bool paired_;
-
-private:
-    void Visualize(GraphPrinter<ScaffoldGraph> &printer);
-
-public:
-    ScaffoldGraphVisualizer(const ScaffoldGraph &graph, bool paired = true) :
-            graph_(graph), paired_(paired) {
-    }
-
-    void Visualize(ostream &os, CompositeGraphColorer<ScaffoldGraph>& colorer);
-};
-
-} //scaffold_graph
-} //path_extend
-
-
-#endif //PROJECT_SCAFFOLD_GRAPH_VISUALIZER_HPP
diff --git a/src/modules/algorithms/path_extend/weight_counter.hpp b/src/modules/algorithms/path_extend/weight_counter.hpp
deleted file mode 100644
index a2d224b..0000000
--- a/src/modules/algorithms/path_extend/weight_counter.hpp
+++ /dev/null
@@ -1,544 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-/*
- * weight_counter.hpp
- *
- *  Created on: Feb 19, 2012
- *      Author: andrey
- */
-
-#ifndef WEIGHT_COUNTER_HPP_
-#define WEIGHT_COUNTER_HPP_
-
-#include "assembly_graph/paths/bidirectional_path.hpp"
-#include "paired_library.hpp"
-#include <algorithm>
-#include <boost/math/special_functions/fpclassify.hpp>
-
-namespace path_extend {
-
-inline int median(const vector<int>& dist, const vector<double>& w, int min, int max) {
-    VERIFY(dist.size() == w.size());
-    double S = 0;
-    for (size_t i = 0; i < w.size(); ++i) {
-        if (dist[i] >= min && dist[i] <= max)
-            S += w[i];
-    }
-    if (S == 0) {
-        DEBUG("Empty histogram");
-        return 0;
-    }
-
-    double sum = S;
-    for (size_t i = 0; i < w.size(); ++i) {
-        if (dist[i] >= min && dist[i] <= max) {
-            sum -= w[i];
-            if (sum <= S / 2) {
-                return dist[i];
-            }
-        }
-    }
-    VERIFY(false);
-    return -1;
-}
-
-struct EdgeWithPairedInfo {
-    size_t e_;
-    double pi_;
-
-    EdgeWithPairedInfo(size_t e_, double pi) :
-            e_(e_), pi_(pi) {
-
-    }
-};
-
-struct EdgeWithDistance {
-    EdgeId e_;
-    int d_;
-
-    EdgeWithDistance(EdgeId e, size_t d) :
-            e_(e), d_((int) d) {
-    }
-
-    struct DistanceComparator {
-        bool operator()(const EdgeWithDistance& e1, const EdgeWithDistance& e2) {
-            if (e1.d_ == e2.d_)
-                return e1.e_ < e2.e_;
-            return e1.d_ > e2.d_;
-        }
-    };
-
-    //static DistanceComparator comparator;
-};
-
-class IdealInfoProvider {
-public:
-    virtual ~IdealInfoProvider() {}
-
-    virtual std::vector<EdgeWithPairedInfo> FindCoveredEdges(const BidirectionalPath& path, EdgeId candidate) const = 0;
-};
-
-class BasicIdealInfoProvider : public IdealInfoProvider {
-    const shared_ptr<PairedInfoLibrary> lib_;
-public:
-    BasicIdealInfoProvider(const shared_ptr<PairedInfoLibrary>& lib) : lib_(lib) {
-    }
-
-    std::vector<EdgeWithPairedInfo> FindCoveredEdges(const BidirectionalPath& path, EdgeId candidate) const override {
-        std::vector<EdgeWithPairedInfo> covered;
-        for (int i = (int) path.Size() - 1; i >= 0; --i) {
-            double w = lib_->IdealPairedInfo(path[i], candidate,
-                                            (int) path.LengthAt(i));
-            //FIXME think if we need extremely low ideal weights
-            if (math::gr(w, 0.)) {
-                covered.push_back(EdgeWithPairedInfo(i, w));
-            }
-        }
-        return covered;
-    }
-};
-
-class WeightCounter {
-
-protected:
-    const Graph& g_;
-    const shared_ptr<PairedInfoLibrary> lib_;
-    bool normalize_weight_;
-    shared_ptr<IdealInfoProvider> ideal_provider_;
-
-public:
-
-    WeightCounter(const Graph& g, const shared_ptr<PairedInfoLibrary>& lib, 
-                  bool normalize_weight = true, 
-                  shared_ptr<IdealInfoProvider> ideal_provider = nullptr) :
-            g_(g), lib_(lib), normalize_weight_(normalize_weight), ideal_provider_(ideal_provider) {
-       if (!ideal_provider_) {
-           ideal_provider_ = make_shared<BasicIdealInfoProvider>(lib);
-       }
-    }
-
-    virtual std::set<size_t> PairInfoExist(const BidirectionalPath& path, EdgeId e, 
-                                    int gap = 0) const = 0;
-
-    virtual double CountWeight(const BidirectionalPath& path, EdgeId e,
-            const std::set<size_t>& excluded_edges = std::set<size_t>(), int gapLength = 0) const = 0;
-
-    const PairedInfoLibrary& lib() const {
-        return *lib_;
-    }
-
-    const shared_ptr<PairedInfoLibrary> get_libptr() const {
-        return lib_;
-    };
-
-private:
-    DECL_LOGGER("WeightCounter");
-};
-
-class ReadCountWeightCounter: public WeightCounter {
-
-    std::vector<EdgeWithPairedInfo> CountLib(const BidirectionalPath& path, EdgeId e,
-            int add_gap = 0) const {
-        std::vector<EdgeWithPairedInfo> answer;
-
-        for (const EdgeWithPairedInfo& e_w_pi : ideal_provider_->FindCoveredEdges(path, e)) {
-            double w = lib_->CountPairedInfo(path[e_w_pi.e_], e,
-                    (int) path.LengthAt(e_w_pi.e_) + add_gap);
-
-            if (normalize_weight_) {
-                w /= e_w_pi.pi_;
-            }
-            answer.push_back(EdgeWithPairedInfo(e_w_pi.e_, w));
-        }
-
-        return answer;
-    }
-
-public:
-
-    ReadCountWeightCounter(const Graph& g, const shared_ptr<PairedInfoLibrary>& lib,
-                            bool normalize_weight = true, 
-                            shared_ptr<IdealInfoProvider> ideal_provider = nullptr) :
-            WeightCounter(g, lib, normalize_weight, ideal_provider) {
-    }
-
-    double CountWeight(const BidirectionalPath& path, EdgeId e, 
-                        const std::set<size_t>& excluded_edges, int gap) const override {
-        double weight = 0.0;
-
-        for (const auto& e_w_pi : CountLib(path, e, gap)) {
-            if (!excluded_edges.count(e_w_pi.e_)) {
-                weight += e_w_pi.pi_;
-            }
-        }
-
-        return weight;
-    }
-
-    std::set<size_t> PairInfoExist(const BidirectionalPath& path, EdgeId e, 
-                                    int gap = 0) const override {
-        std::set<size_t> answer;
-        for (const auto& e_w_pi : CountLib(path, e, gap)) {
-            if (math::gr(e_w_pi.pi_, 0.)) {
-                answer.insert(e_w_pi.e_);
-            }
-        }
-        
-        return answer;
-    }
-
-};
-
-class PathCoverWeightCounter: public WeightCounter {
-    double single_threshold_;
-
-    double TotalIdealNonExcluded(const std::vector<EdgeWithPairedInfo>& ideally_covered_edges, 
-                        const std::set<size_t>& excluded_edges) const {
-        double ideal_total = 0.0;
-
-        for (const EdgeWithPairedInfo& e_w_pi : ideally_covered_edges) {
-            if (!excluded_edges.count(e_w_pi.e_))
-                ideal_total += e_w_pi.pi_;
-        }
-
-        return ideal_total;
-    }
-
-    std::vector<EdgeWithPairedInfo> CountLib(const BidirectionalPath& path, EdgeId e,
-            const std::vector<EdgeWithPairedInfo>& ideally_covered_edges, int add_gap = 0) const {
-        std::vector<EdgeWithPairedInfo> answer;
-
-        for (const EdgeWithPairedInfo& e_w_pi : ideally_covered_edges) {
-            double ideal_weight = e_w_pi.pi_;
-
-            double weight = lib_->CountPairedInfo(
-                    path[e_w_pi.e_], e,
-                    (int) path.LengthAt(e_w_pi.e_) + add_gap);
-
-            if (normalize_weight_) {
-                weight /= ideal_weight;
-            }
-
-            if (math::ge(weight, single_threshold_)) {
-                answer.push_back(EdgeWithPairedInfo(e_w_pi.e_, ideal_weight));
-            }
-        }
-
-        return answer;
-    }
-
-public:
-
-    PathCoverWeightCounter(const Graph& g, const shared_ptr<PairedInfoLibrary>& lib,
-                            bool normalize_weight = true, 
-                            double single_threshold = -1.,
-                            shared_ptr<IdealInfoProvider> ideal_provider = nullptr) :
-            WeightCounter(g, lib, normalize_weight, ideal_provider), single_threshold_(single_threshold) {
-        if (math::ls(single_threshold_, 0.)) {
-            single_threshold_ = lib_->GetSingleThreshold();
-        }
-    }
-
-    double CountWeight(const BidirectionalPath& path, EdgeId e,
-            const std::set<size_t>& excluded_edges, int gap) const override {
-        double lib_weight = 0.;
-        const auto ideal_coverage = ideal_provider_->FindCoveredEdges(path, e);
-
-        for (const auto& e_w_pi : CountLib(path, e, ideal_coverage, gap)) {
-            if (!excluded_edges.count(e_w_pi.e_)) {
-                lib_weight += e_w_pi.pi_;
-            }
-        }
-
-        double total_ideal_coverage = TotalIdealNonExcluded(ideal_coverage, excluded_edges);
-        return math::eq(total_ideal_coverage, 0.) ? 0. : lib_weight / total_ideal_coverage;
-    }
-
-    std::set<size_t> PairInfoExist(const BidirectionalPath& path, EdgeId e, 
-                                    int gap = 0) const override {
-        std::set<size_t> answer;
-        for (const auto& e_w_pi : CountLib(path, e, ideal_provider_->FindCoveredEdges(path, e), gap)) {
-            if (math::gr(e_w_pi.pi_, 0.)) {
-                answer.insert(e_w_pi.e_);
-            }
-        }
-        return answer;
-    }
-};
-
-class CoverageAwareIdealInfoProvider : public BasicIdealInfoProvider {
-    static constexpr double MAGIC_COEFF = 2.;
-    const Graph& g_;
-    size_t read_length_; 
-    size_t estimation_edge_length_;
-
-public:
-    //works for single lib only!!!
-    double EstimatePathCoverage(const BidirectionalPath& path) const  {
-        double answer = -1.0;
-        for (int i = (int) path.Size() - 1; i >= 0; --i) {
-            EdgeId e = path.At(i);
-            if (g_.length(e) > estimation_edge_length_) {
-                if (answer < 0 || g_.coverage(e) < answer) {
-                    answer = g_.coverage(e);
-                }
-            }
-        }
-        return answer;
-    }
-
-    CoverageAwareIdealInfoProvider(const Graph& g, const shared_ptr<PairedInfoLibrary>& lib,
-                                    size_t read_length, size_t estimation_edge_length) : 
-                BasicIdealInfoProvider(lib), g_(g), read_length_(read_length), 
-                estimation_edge_length_(estimation_edge_length) {
-        VERIFY(read_length_ > g_.k());
-    }
-
-    std::vector<EdgeWithPairedInfo> FindCoveredEdges(const BidirectionalPath& path, EdgeId candidate) const override {
-        VERIFY(read_length_ != -1ul);
-        double estimated_coverage = EstimatePathCoverage(path);
-        VERIFY(math::gr(estimated_coverage, 0.));
-
-        double correction_coeff = estimated_coverage / ((double(read_length_) - double(g_.k())) * MAGIC_COEFF);
-
-        std::vector<EdgeWithPairedInfo> answer = BasicIdealInfoProvider::FindCoveredEdges(path, candidate);
-        for (auto& e_w_pi : answer) {
-            e_w_pi.pi_ *= correction_coeff;
-        }
-        return answer;
-    }
-};
-
-//FIXME optimize number of calls of EstimatePathCoverage(path)
-class MetagenomicWeightCounter: public WeightCounter {
-    static const size_t LENGTH_BOUND = 500;
-    shared_ptr<CoverageAwareIdealInfoProvider> cov_info_provider_;
-    shared_ptr<WeightCounter> normalizing_wc_;
-    shared_ptr<WeightCounter> raw_wc_;
-
-public:
-
-    //negative raw_threshold leads to the halt if no sufficiently long edges are in the path
-    MetagenomicWeightCounter(const Graph& g, const shared_ptr<PairedInfoLibrary>& lib,
-                             size_t read_length, double normalized_threshold, double raw_threshold,
-                             size_t estimation_edge_length = LENGTH_BOUND) :
-            WeightCounter(g, lib) {
-        cov_info_provider_ = make_shared<CoverageAwareIdealInfoProvider>(g, lib, read_length, estimation_edge_length);
-        normalizing_wc_ = make_shared<PathCoverWeightCounter>(g, lib, true, normalized_threshold, cov_info_provider_);
-        if (math::ge(raw_threshold, 0.)) {
-            raw_wc_ = make_shared<PathCoverWeightCounter>(g, lib, false, raw_threshold);
-        }
-    }
-
-    double CountWeight(const BidirectionalPath& path, EdgeId e,
-            const std::set<size_t>& excluded_edges, int gap = 0) const override {
-        if (math::gr(cov_info_provider_->EstimatePathCoverage(path), 0.)) {
-            return normalizing_wc_->CountWeight(path, e, excluded_edges, gap);
-        } else if (raw_wc_) {
-            return raw_wc_->CountWeight(path, e, excluded_edges, gap);
-        } else {
-            return 0.;
-        }
-    }
-
-    std::set<size_t> PairInfoExist(const BidirectionalPath& path, EdgeId e, 
-                                    int gap = 0) const override {
-        static std::set<size_t> empty;
-        if (math::gr(cov_info_provider_->EstimatePathCoverage(path), 0.)) {
-            return normalizing_wc_->PairInfoExist(path, e, gap);
-        } else if (raw_wc_) {
-            return raw_wc_->PairInfoExist(path, e, gap);
-        } else {
-            return empty;
-        }
-    }
-};
-
-class PathsWeightCounter {
-public:
-    PathsWeightCounter(const Graph& g, shared_ptr<PairedInfoLibrary> lib, size_t min_read_count);
-    PathsWeightCounter(const PathsWeightCounter& w);
-    map<size_t, double> FindPairInfoFromPath(
-            const BidirectionalPath& path1, size_t from1, size_t to1,
-            const BidirectionalPath& path2, size_t from2, size_t to2) const;
-    double CountPairInfo(const BidirectionalPath& path1, size_t from1,
-                         size_t to1, const BidirectionalPath& path2,
-                         size_t from2, size_t to2, bool normalize = true) const;
-    double CountPairInfo(const BidirectionalPath& path1, size_t from1,
-                         size_t to1, EdgeId edge, size_t gap) const;
-    void SetCommonWeightFrom(size_t iedge, double weight);
-    void ClearCommonWeight();
-    void FindJumpCandidates(EdgeId e, int min_dist, int max_dist, size_t min_len, set<EdgeId>& result) const;
-    void FindJumpEdges(EdgeId e, set<EdgeId>& candidates, int min_dist, int max_dist, vector<EdgeWithDistance>& result) const;
-    const shared_ptr<PairedInfoLibrary> GetLib() const {
-        return lib_;
-    }
-    bool HasPI(EdgeId e1, EdgeId e2, int dist) const;
-    bool HasPI(EdgeId e1, EdgeId e2, size_t dist_min, size_t dist_max) const;
-    double PI(EdgeId e1, EdgeId e2, int dist) const;
-    bool HasIdealPI(EdgeId e1, EdgeId e2, int dist) const;
-    double IdealPI(EdgeId e1, EdgeId e2, int dist) const;
-
-private:
-    void FindPairInfo(const BidirectionalPath& path1, size_t from1, size_t to1,
-                      const BidirectionalPath& path2, size_t from2, size_t to2,
-                      map<size_t, double>& pi, double& ideal_pi) const;
-    void FindPairInfo(EdgeId e1, EdgeId e2, size_t dist, double& ideal_w,
-                      double& result_w) const;
-
-    const Graph& g_;
-    shared_ptr<PairedInfoLibrary> lib_;
-    std::map<size_t, double> common_w_;
-    size_t min_read_count_;
-    DECL_LOGGER("WeightCounter");
-};
-
-inline PathsWeightCounter::PathsWeightCounter(const Graph& g, shared_ptr<PairedInfoLibrary>lib, size_t min_read_count):g_(g), lib_(lib), min_read_count_(min_read_count){
-
-}
-
-inline PathsWeightCounter::PathsWeightCounter(const PathsWeightCounter& w): g_(w.g_), lib_(w.lib_), min_read_count_(w.min_read_count_) {
-
-}
-
-inline double PathsWeightCounter::CountPairInfo(const BidirectionalPath& path1,
-                                         size_t from1, size_t to1,
-                                         const BidirectionalPath& path2,
-                                         size_t from2, size_t to2, bool normalize) const {
-    map<size_t, double> pi;
-    double ideal_pi = 0.0;
-    FindPairInfo(path1, from1, to1, path2, from2, to2,
-                                          pi, ideal_pi);
-    double result = 0.0;
-    double all_common = 0.0;
-    for (size_t i = from1; i < to1; ++i) {
-        if (common_w_.find(i) != common_w_.end()) {
-            all_common += common_w_.at(i);
-        }
-        result += pi[i];
-    }
-    DEBUG("ideal _pi " << ideal_pi << " common " << all_common << " result " << result);
-    ideal_pi -= all_common;
-    result -= all_common;
-    double total_result = math::gr(ideal_pi, 0.0) ? result / ideal_pi : 0.0;
-    total_result = math::gr(total_result, 0.0) ? total_result : 0.0;
-    DEBUG("ideal _pi " << ideal_pi << " result " << result << " total_result " << total_result);
-    return normalize ? total_result : result;
-}
-
-inline double PathsWeightCounter::CountPairInfo(const BidirectionalPath& path1,
-                                         size_t from1, size_t to1, EdgeId edge,
-                                         size_t gap) const {
-    double result = 0.0;
-    for (size_t i1 = from1; i1 < to1; ++i1) {
-        double ideal_w, w;
-        FindPairInfo(path1.At(i1), edge, gap + path1.LengthAt(i1), ideal_w, w);
-        result += w;
-    }
-    return result;
-}
-
-inline void PathsWeightCounter::FindPairInfo(const BidirectionalPath& path1,
-                                      size_t from1, size_t to1,
-                                      const BidirectionalPath& path2,
-                                      size_t from2, size_t to2,
-                                      map<size_t, double>& pi,
-                                      double& ideal_pi) const {
-    stringstream str;
-    for (size_t i = 0; i < path2.Size(); ++i) {
-        str << g_.int_id(path2.At(i)) << " ";
-    }
-    DEBUG("pair info for path " << str.str());
-    for (size_t i1 = from1; i1 < to1; ++i1) {
-        for (size_t i2 = from2; i2 < to2; ++i2) {
-            size_t dist = path1.LengthAt(i1) + path2.Length()
-                    - path2.LengthAt(i2);
-            double ideal_w = 0.0;
-            double w = 0.0;
-            FindPairInfo(path1.At(i1), path2.At(i2), dist, ideal_w, w);
-            ideal_pi += ideal_w;
-            if (pi.find(i1) == pi.end()) {
-                pi[i1] = 0;
-            }
-            pi[i1] += w;
-        }
-    }
-}
-
-inline void PathsWeightCounter::FindPairInfo(EdgeId e1, EdgeId e2, size_t dist,
-                                      double& ideal_w, double& result_w) const {
-    ideal_w = lib_->IdealPairedInfo(e1, e2, (int) dist, true);
-    result_w = 0.0;
-    if (ideal_w == 0.0) {
-        return;
-    }
-    if (HasPI(e1, e2, (int) dist)) {
-        result_w = ideal_w;
-    }
-}
-
-inline map<size_t, double> PathsWeightCounter::FindPairInfoFromPath(
-        const BidirectionalPath& path1, size_t from1, size_t to1,
-        const BidirectionalPath& path2, size_t from2, size_t to2) const {
-    map<size_t, double> pi;
-    double ideal_pi = 0;
-    FindPairInfo(path1, from1, to1, path2, from2, to2, pi, ideal_pi);
-    return pi;
-}
-
-inline void PathsWeightCounter::FindJumpCandidates(EdgeId e, int min_dist, int max_dist, size_t min_len, set<EdgeId>& result) const {
-    result.clear();
-    lib_->FindJumpEdges(e, result, min_dist, max_dist, min_len);
-}
-
-inline void PathsWeightCounter::FindJumpEdges(EdgeId e, set<EdgeId>& edges, int min_dist, int max_dist, vector<EdgeWithDistance>& result) const {
-    result.clear();
-
-    for (auto e2 = edges.begin(); e2 != edges.end(); ++e2) {
-        vector<int> distances;
-        vector<double> weights;
-        lib_->CountDistances(e, *e2, distances, weights);
-        int median_distance = median(distances, weights, min_dist, max_dist);
-
-        if (HasPI(e, *e2, median_distance)) {
-            result.push_back(EdgeWithDistance(*e2, median_distance));
-        }
-    }
-}
-
-inline void PathsWeightCounter::SetCommonWeightFrom(size_t iedge, double weight) {
-    common_w_[iedge] = weight;
-}
-
-inline void PathsWeightCounter::ClearCommonWeight() {
-    common_w_.clear();
-}
-
-inline double PathsWeightCounter::PI(EdgeId e1, EdgeId e2, int dist) const {
-    double w = lib_->CountPairedInfo(e1, e2, dist, true);
-    return w > (double) min_read_count_ ? w : 0.0;
-}
-
-inline bool PathsWeightCounter::HasPI(EdgeId e1, EdgeId e2, int dist) const {
-    return lib_->CountPairedInfo(e1, e2, dist, true) > (double)  min_read_count_;
-}
-
-inline bool PathsWeightCounter::HasIdealPI(EdgeId e1, EdgeId e2, int dist) const {
-    return lib_->IdealPairedInfo(e1, e2, dist, true) > 0.0;
-}
-
-inline double PathsWeightCounter::IdealPI(EdgeId e1, EdgeId e2, int dist) const {
-    return lib_->IdealPairedInfo(e1, e2, dist, true);
-}
-
-inline bool PathsWeightCounter::HasPI(EdgeId e1, EdgeId e2, size_t dist_min, size_t dist_max) const {
-    return lib_->CountPairedInfo(e1, e2, (int) dist_min, (int) dist_max) > min_read_count_;
-}
-};
-
-#endif /* WEIGHT_COUNTER_HPP_ */
diff --git a/src/modules/algorithms/simplification/complex_tip_clipper.hpp b/src/modules/algorithms/simplification/complex_tip_clipper.hpp
deleted file mode 100644
index 984cfd5..0000000
--- a/src/modules/algorithms/simplification/complex_tip_clipper.hpp
+++ /dev/null
@@ -1,158 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-#pragma once
-
-#include <limits>
-
-#include "visualization/visualization.hpp"
-#include "compressor.hpp"
-#include "dominated_set_finder.hpp"
-
-
-namespace omnigraph{
-
-
-template<class Graph>
-class ComplexTipClipper {
-    typedef typename Graph::VertexId VertexId;
-    typedef typename Graph::EdgeId EdgeId;
-
-    Graph& g_;
-    double relative_coverage_treshold_;
-    size_t edge_length_treshold_;
-    size_t max_path_length_;
-    string pics_folder_;
-    std::function<void(const set<EdgeId>&)> removal_handler_;
-
-    bool CheckEdgeLenghts(const GraphComponent<Graph>& component) const {
-        for(auto e : component.edges()) {
-            if(g_.length(e) > edge_length_treshold_) {
-                return false;
-            }
-        }
-        return true;
-    }
-
-
-    bool CheckSize(const GraphComponent<Graph> & component) const {
-        return (component.vertices().size() > 1);
-    }
-
-    void RemoveComplexTip(GraphComponent<Graph>& component) {
-        ComponentRemover<Graph> remover(g_, removal_handler_);
-        remover.DeleteComponent(component.edges().begin(), component.edges().end());
-    }
-
-
-    bool CheckPathLengths(const map<VertexId, Range>& ranges) const {
-        for(auto r : ranges) {
-            if(r.second.start_pos > max_path_length_) {
-                return false;
-            }
-        }
-        return true;
-    }
-
-    double GetTipCoverage(const GraphComponent<Graph> & component) const {
-        double cov = numeric_limits<double>::max();
-        for(auto edge : component.edges()) {
-            cov = std::min(cov, g_.coverage(edge));
-        }
-        return cov;
-    }
-
-    double GetOutwardCoverage(const GraphComponent<Graph> & component) const {
-        double cov = 0.0;
-        for(auto v : component.vertices()) {
-            for(auto edge : g_.OutgoingEdges(v)) {
-                if(component.contains(edge)) {
-                    cov = max(cov, g_.coverage(edge));
-                }
-            }
-
-            for(auto edge : g_.IncomingEdges(v)) {
-                if(component.contains(edge)) {
-                    cov = max(cov, g_.coverage(edge));
-                }
-            }
-        }
-        return cov;
-    }
-
-    double GetRelativeTipCoverage(const GraphComponent<Graph> & component) const {
-        return GetTipCoverage(component) / GetOutwardCoverage(component);
-    }
-
-public:
-    ComplexTipClipper(Graph& g, double relative_coverage, size_t max_edge_len, size_t max_path_len, const string& pics_folder = "", std::function<void(const set<EdgeId>&)> removal_handler = 0) :
-            g_(g), relative_coverage_treshold_(math::ge(relative_coverage, 0.0) ? relative_coverage : std::numeric_limits<double>::max()), edge_length_treshold_(max_edge_len) ,max_path_length_(max_path_len), pics_folder_(pics_folder), removal_handler_(removal_handler)
-    { }
-
-    bool Run() {
-        size_t cnt = 0;
-        INFO("Complex tip clipper started");
-        if (!pics_folder_.empty()) {
-            make_dir(pics_folder_);
-        }
-
-        bool something_done_flag = false;
-        for (auto it = g_.SmartVertexBegin(); !it.IsEnd(); ++it) {
-            if(g_.IncomingEdgeCount(*it) != 0) {
-                continue;
-            }
-            DEBUG("Processing vertex " << g_.str(*it));
-
-            DominatedSetFinder<Graph> dom_finder(g_, *it, max_path_length_ * 2);
-
-            if(!dom_finder.FillDominated()) {
-                DEBUG("Tip contains too long paths");
-                continue;
-            }
-
-            auto component = dom_finder.AsGraphComponent();
-
-            if(!CheckEdgeLenghts(component)) {
-                DEBUG("Tip contains too long edges");
-                continue;
-            }
-
-            if(!CheckSize(component)) {
-                DEBUG("Component doesn't meet size requirements");
-                continue;
-            }
-            auto dominated = dom_finder.dominated();
-            if(!CheckPathLengths(dominated)) {
-                DEBUG("Tip contains too long paths");
-                continue;
-            }
-
-            if(math::ge(GetRelativeTipCoverage(component), relative_coverage_treshold_)) {
-                DEBUG("Tip is too high covered with respect to external edges");
-                continue;
-            }
-
-            if (!pics_folder_.empty()) {
-                visualization::WriteComponentSinksSources(component,
-                        pics_folder_
-                                + ToString(g_.int_id(*it)) //+ "_" + ToString(candidate_cnt)
-                                + ".dot");
-            }
-
-            something_done_flag = true;
-            cnt++;
-            RemoveComplexTip(component);
-        }
-        CompressAllVertices(g_);
-        DEBUG("Complex tip clipper finished");
-        DEBUG("Tips processed " << cnt);
-        return something_done_flag;
-    }
-private:
-    DECL_LOGGER("ComplexTipClipper")
-};
-
-}
diff --git a/src/modules/assembly_graph/components/graph_component.hpp b/src/modules/assembly_graph/components/graph_component.hpp
deleted file mode 100644
index e92831b..0000000
--- a/src/modules/assembly_graph/components/graph_component.hpp
+++ /dev/null
@@ -1,198 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-#pragma once
-
-#include "dev_support/standard_base.hpp"
-
-namespace omnigraph {
-//todo make handler!!!
-template<class Graph>
-class GraphComponent {
-    typedef typename Graph::VertexId VertexId;
-    typedef typename Graph::EdgeId EdgeId;
-    typedef typename std::set<VertexId>::const_iterator vertex_iterator;
-    typedef typename std::set<EdgeId>::const_iterator edge_iterator;
-    const Graph& graph_;
-    std::set<VertexId> vertices_;
-    std::set<EdgeId> edges_;
-    std::set<VertexId> sinks_;
-    std::set<VertexId> sources_;
-    std::string name_;
-
-
-    template<class VertexIt>
-    void FillVertices(VertexIt begin, VertexIt end) {
-        for (auto it = begin; it != end; ++it) {
-            vertices_.insert(*it);
-        }
-    }
-
-    template<class VertexIt>
-    void FillVertices(VertexIt begin, VertexIt end, bool add_conjugate) {
-        for (auto it = begin; it != end; ++it) {
-            vertices_.insert(*it);
-            if (add_conjugate)
-                vertices_.insert(graph_.conjugate(*it));
-        }
-    }
-
-    void FillEdges() {
-        for (auto v_it = vertices_.begin(); v_it != vertices_.end(); ++v_it) {
-            TRACE("working with vertex " << graph_.str(*v_it));
-            for (EdgeId e : graph_.OutgoingEdges(*v_it)) {
-                VertexId edge_end = graph_.EdgeEnd(e);
-                TRACE(graph_.coverage(e) << " " << graph_.length(e));
-                if (vertices_.count(edge_end) > 0) {
-                    edges_.insert(e);
-                    TRACE("Edge added");
-                }
-            }
-        }
-    }
-
-    template<class VertexIt>
-    void Fill(VertexIt begin, VertexIt end) {
-        FillVertices(begin, end);
-        FillEdges();
-        FindSinksAndSources();
-    }
-
-    template<class VertexIt>
-    void Fill(VertexIt begin, VertexIt end, bool add_conjugate) {
-        FillVertices(begin, end, add_conjugate);
-        FillEdges();
-        FindSinksAndSources();
-    }
-
-    void FindSinksAndSources() {
-        for(auto v : vertices_) {
-            for(auto e : graph_.IncomingEdges(v)) {
-                if(!contains(e) && !(contains(graph_.EdgeStart(e)))) {
-                    sources_.insert(v);
-                    break;
-                }
-            }
-
-            for(auto e : graph_.OutgoingEdges(v)) {
-                if(!contains(e) && !(contains(graph_.EdgeEnd(e)))) {
-                    sinks_.insert(v);
-                    break;
-                }
-            }
-        }
-    }
-
-public:
-    template<class VertexIt>
-    GraphComponent(const Graph &g, VertexIt begin, VertexIt end, const string &name = "") :
-        graph_(g), name_(name) {
-        Fill(begin, end);
-    }
-
-    //todo refactor and get rid of hack
-    template<class VertexIt>
-    GraphComponent(const Graph &g, VertexIt begin, VertexIt end,
-            bool add_conjugate, const string &name = "") : graph_(g), name_(name) {
-        Fill(begin, end, add_conjugate);
-    }
-
-    //Full graph component
-    GraphComponent(const Graph &g, bool fill = true, const string &name = "") : graph_(g), name_(name) {
-        if(fill) {
-            Fill(g.begin(), g.end());
-        }
-    }
-
-    //may be used for conjugate closure
-    GraphComponent(const GraphComponent& component, bool add_conjugate, const string &name = "") : graph_(component.graph_), name_(name)
-//        vertices_(component.vertices_.begin(), component.vertices_.end()),
-//        edges_(component.edges_.begin(), component.edges_.end())
-    {
-        Fill(component.v_begin(), component.v_end(), add_conjugate);
-    }
-
-    GraphComponent<Graph> &operator=(const GraphComponent<Graph> &that) {
-        VERIFY(&this->graph_ == &that.graph_);
-        this->vertices_ = that.vertices_;
-        this->edges_ = that.edges_;
-        this->name_ = that.name_;
-        return *this;
-    }
-
-    const Graph& g() const {
-        return graph_;
-    }
-
-    string name() const {
-        return name_;
-    }
-
-    size_t v_size() const {
-        return vertices_.size();
-    }
-
-    size_t e_size() const {
-        return edges_.size();
-    }
-
-    bool contains(EdgeId e) const {
-        return edges_.count(e) > 0;
-    }
-
-    bool contains(VertexId v) const {
-        return vertices_.count(v) > 0;
-    }
-
-    edge_iterator e_begin() const {
-        return edges_.begin();
-    }
-    edge_iterator e_end() const {
-        return edges_.end();
-    }
-
-    const std::set<EdgeId>& edges() const {
-        return edges_;
-    }
-
-    const std::set<VertexId>& vertices() const{
-        return vertices_;
-    }
-
-    vertex_iterator v_begin() const {
-        return vertices_.begin();
-    }
-    vertex_iterator v_end() const {
-        return vertices_.end();
-    }
-
-    const std::set<VertexId>& sinks() const {
-        return sinks_;
-    }
-
-    const std::set<VertexId>& sources() const {
-        return sources_;
-    }
-
-    bool IsBorder(VertexId v) const {
-        if(vertices_.count(v) == 0)
-            return false;
-        for (EdgeId e : graph_.IncidentEdges(v)) {
-            if (vertices_.count(graph_.EdgeStart(e)) == 0
-                    || vertices_.count(graph_.EdgeEnd(e)) == 0) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-};
-
-}
-
-
-
diff --git a/src/modules/assembly_graph/graph_alignment/pacbio/pacbio_gap_closer.hpp b/src/modules/assembly_graph/graph_alignment/pacbio/pacbio_gap_closer.hpp
deleted file mode 100644
index 2d3a0f0..0000000
--- a/src/modules/assembly_graph/graph_alignment/pacbio/pacbio_gap_closer.hpp
+++ /dev/null
@@ -1,396 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-#pragma once
-
-#include "pacbio_read_structures.hpp"
-
-#include "ConsensusCore/Poa/PoaConfig.hpp"
-#include "ConsensusCore/Poa/PoaConsensus.hpp"
-
-#include <algorithm>
-#include <fstream>
-
-namespace pacbio {
-template<class Graph>
-class PacbioGapCloser;
-
-template<class Graph>
-class GapStorage {
-    friend class PacbioGapCloser<Graph> ;
-    typedef typename Graph::EdgeId EdgeId;
-private:
-    DECL_LOGGER("PacbioGaps")
-    ;
-    Graph &g_;
-    map<EdgeId, vector<GapDescription<Graph> > > inner_index;
-    void HiddenAddGap(const GapDescription<Graph> &p) {
-        inner_index[p.start].push_back(p);
-    }
-    vector<EdgeId> index;
-    set<pair<EdgeId, EdgeId> > nonempty_pairs;
-    set<pair<EdgeId, EdgeId> > transitively_ignored_pairs;
-    set<pair<EdgeId, EdgeId> > symmetrically_ignored_pairs;
-
-public:
-    size_t min_gap_quantity;
-    size_t long_seq_limit_;
-    GapStorage(Graph &g, size_t min_gap_quantity, size_t long_seq_limit)
-            : g_(g),
-              inner_index(), min_gap_quantity(min_gap_quantity), long_seq_limit_(long_seq_limit){
-    }
-
-    size_t FillIndex() {
-        index.resize(0);
-        set<EdgeId> tmp;
-        for (auto iter = inner_index.begin(); iter != inner_index.end(); iter++) {
-            index.push_back(iter->first);
-        }
-        return index.size();
-    }
-
-    EdgeId operator[](size_t i) {
-        return index.at(i);
-    }
-
-    size_t size() const {
-        return index.size();
-    }
-
-    bool IsTransitivelyIgnored(pair<EdgeId, EdgeId> p) {
-        return (transitively_ignored_pairs.find(p) != transitively_ignored_pairs.end());
-    }
-    bool IsSymmetricallyIgnored(pair<EdgeId, EdgeId> p) {
-        return (symmetrically_ignored_pairs.find(p) != symmetrically_ignored_pairs.end());
-    }
-
-    bool IsIgnored(pair<EdgeId, EdgeId> p) {
-        return (IsTransitivelyIgnored(p) || IsSymmetricallyIgnored(p));
-    }
-    void AddGap(const GapDescription<Graph> &p, bool add_rc = false) {
-        HiddenAddGap(p);
-        if (add_rc) {
-            TRACE("Adding conjugate");
-            HiddenAddGap(p.conjugate(g_, (int) g_.k() ));
-        }
-    }
-
-    void AddStorage(const GapStorage<Graph> & to_add) {
-        const auto& idx = to_add.inner_index;
-        for (auto iter = idx.begin(); iter != idx.end(); ++iter)
-            inner_index[iter->first].insert(inner_index[iter->first].end(), iter->second.begin(), iter->second.end());
-    }
-
-    void PostProcess() {
-        FillIndex();
-
-        for (auto j_iter = index.begin(); j_iter != index.end(); j_iter++) {
-            EdgeId e = *j_iter;
-            auto cl_start = inner_index[e].begin();
-            auto iter = inner_index[e].begin();
-            vector<GapDescription<Graph> > padded_gaps;
-            while (iter != inner_index[e].end()) {
-                auto next_iter = ++iter;
-                if (next_iter == inner_index[e].end() || next_iter->end != cl_start->end) {
-                    size_t len = next_iter - cl_start;
-                    if (len >= min_gap_quantity) {
-                        nonempty_pairs.insert(make_pair(cl_start->start, cl_start->end));
-                    }
-                    cl_start = next_iter;
-                }
-            }
-        }
-
-        set<pair<EdgeId, EdgeId> > used_rc_pairs;
-        for (auto iter = nonempty_pairs.begin(); iter != nonempty_pairs.end(); ++iter) {
-            if (used_rc_pairs.find(*iter) != used_rc_pairs.end()) {
-                DEBUG("skipping pair " << g_.int_id(iter->first) << "," << g_.int_id(iter->second));
-                symmetrically_ignored_pairs.insert(make_pair(iter->first, iter->second));
-            } else {
-                DEBUG("Using pair" << g_.int_id(iter->first) << "," << g_.int_id(iter->second));
-            }
-
-            for (size_t i = 0; i < index.size(); i++) {
-                if (nonempty_pairs.find(make_pair(iter->first, index[i])) != nonempty_pairs.end()
-                        && nonempty_pairs.find(make_pair(index[i], iter->second)) != nonempty_pairs.end()) {
-                    DEBUG("pair " << g_.int_id(iter->first) << "," << g_.int_id(iter->second) << " is ignored because of edge between " << g_.int_id(index[i]));
-                    transitively_ignored_pairs.insert(make_pair(iter->first, iter->second));
-                }
-            }
-            used_rc_pairs.insert(make_pair(g_.conjugate(iter->second), g_.conjugate(iter->first)));
-        }
-    }
-
-    void DumpToFile(const string filename) {
-        ofstream filestr(filename);
-        for (auto iter = inner_index.begin(); iter != inner_index.end(); ++iter) {
-            DEBUG( g_.int_id(iter->first)<< " " <<iter->second.size());
-            filestr << g_.int_id(iter->first) << " " << iter->second.size() << endl;
-            sort(iter->second.begin(), iter->second.end());
-            for (auto j_iter = iter->second.begin(); j_iter != iter->second.end(); ++j_iter) {
-                filestr << j_iter->str(g_);
-            }
-            filestr << endl;
-        }
-    }
-
-    void LoadFromFile(const string s) {
-        FILE* file = fopen((s).c_str(), "r");
-        int res;
-        char ss[5000];
-        map<int, EdgeId> tmp_map;
-        for (auto iter = g_.ConstEdgeBegin(); !iter.IsEnd(); ++iter) {
-            tmp_map[g_.int_id(*iter)] = *iter;
-        }
-        while (!feof(file)) {
-            int first_id, second_id, first_ind, second_ind;
-            int size;
-            res = fscanf(file, "%d %d\n", &first_id, &size);
-            VERIFY(res == 2);
-            for (int i = 0; i < size; i++) {
-                res = fscanf(file, "%d %d\n", &first_id, &first_ind);
-                VERIFY(res == 2);
-                res = fscanf(file, "%d %d\n", &second_id, &second_ind);
-                VERIFY(res == 2);
-                res = fscanf(file, "%s\n", ss);
-                VERIFY(res == 1);
-                GapDescription<Graph> gap(tmp_map[first_id], tmp_map[second_id], Sequence(ss), first_ind, second_ind);
-                this->AddGap(gap);
-            }
-        }
-    }
-
-    void PadGapStrings(EdgeId e) {
-        sort(inner_index[e].begin(), inner_index[e].end());
-        auto cl_start = inner_index[e].begin();
-        auto iter = inner_index[e].begin();
-        vector<GapDescription<Graph> > padded_gaps;
-        while (iter != inner_index[e].end()) {
-            auto next_iter = ++iter;
-            if (next_iter == inner_index[e].end() || next_iter->end != cl_start->end) {
-                int start_min = 1000000000;
-                int end_max = 0;
-                size_t long_seqs = 0;
-                size_t short_seqs = 0;
-                bool exclude_long_seqs = false;
-                for (auto j_iter = cl_start; j_iter != next_iter; j_iter++) {
-                    if (g_.length(j_iter->start) - j_iter->edge_gap_start_position > 500 || j_iter->edge_gap_end_position > 500) {
-                        DEBUG("ignoring alingment to the middle of edge");
-                        continue;
-                    }
-                    if (j_iter->gap_seq.size() > long_seq_limit_)
-                        long_seqs++;
-                    else
-                        short_seqs++;
-
-                    if (j_iter->edge_gap_start_position < start_min)
-                        start_min = j_iter->edge_gap_start_position;
-                    if (j_iter->edge_gap_end_position > end_max)
-                        end_max = j_iter->edge_gap_end_position;
-                }
-
-                if (short_seqs >= min_gap_quantity && short_seqs > long_seqs)
-                    exclude_long_seqs = true;
-
-                for (auto j_iter = cl_start; j_iter != next_iter; j_iter++) {
-                    if (g_.length(j_iter->start) - j_iter->edge_gap_start_position > 500 || j_iter->edge_gap_end_position > 500)
-                        continue;
-
-                    if (exclude_long_seqs && j_iter->gap_seq.size() > long_seq_limit_)
-                        continue;
-
-                    string s = g_.EdgeNucls(j_iter->start).Subseq(start_min, j_iter->edge_gap_start_position).str();
-                    s += j_iter->gap_seq.str();
-                    s += g_.EdgeNucls(j_iter->end).Subseq(j_iter->edge_gap_end_position, end_max).str();
-                    padded_gaps.push_back(GapDescription<Graph>(j_iter->start, j_iter->end, Sequence(s), start_min, end_max));
-                }
-                cl_start = next_iter;
-            }
-        }
-        inner_index[e] = padded_gaps;
-    }
-
-    void PadGapStrings() {
-        for (auto iter = inner_index.begin(); iter != inner_index.end(); ++iter) {
-            DEBUG("Padding gaps for first edge " << g_.int_id(iter->first));
-            PadGapStrings(iter->first);
-        }
-        PostProcess();
-    }
-};
-
-template<class Graph>
-class PacbioGapCloser {
-    typedef typename Graph::EdgeId EdgeId;
-    typedef runtime_k::RtSeq Kmer;
-    typedef vector<map<Kmer, int> > KmerStorage;
-private:
-    DECL_LOGGER("PacbioGaps")
-    ;
-    Graph &g_;
-    //first edge, second edge, weight, seq
-    map<EdgeId, map<EdgeId, pair<size_t, string> > > new_edges_;
-    int closed_gaps;
-    int not_unique_gaps;
-    int chained_gaps;
-    bool consensus_gap_closing;
-    size_t max_contigs_gap_length_;
-public:
-    void CloseGapsInGraph(map<EdgeId, EdgeId> &replacement) {
-        for (auto iter = new_edges_.begin(); iter != new_edges_.end(); ++iter) {
-            if (iter->second.size() != 1) {
-                DEBUG("non-unique gap!!");
-                not_unique_gaps ++;
-                continue;
-            }
-            EdgeId first = iter->first;
-            EdgeId second = (iter->second.begin()->first);
-            if (replacement.find(first) != replacement.end() || replacement.find(second) != replacement.end()) {
-                DEBUG("sorry, gap chains are not supported yet");
-                chained_gaps++;
-                continue;
-            }
-
-            EdgeId first_conj = g_.conjugate(first);
-            EdgeId second_conj = g_.conjugate(second);
-            size_t first_id = g_.int_id(first);
-            size_t second_id = g_.int_id(second);
-            size_t first_id_conj = g_.int_id(g_.conjugate(first));
-            size_t second_id_conj = g_.int_id(g_.conjugate(second));
-            DEBUG("closing gaps between "<< first_id << " " << second_id);
-            size_t len_f = g_.length(first);
-            size_t len_s = g_.length(second);
-            size_t len_sum = iter->second.begin()->second.second.length();
-            double cov = (double)g_.length(first) * g_.coverage(first) +  (double)g_.length(second) * g_.coverage(second);
-
-            DEBUG("coverage was " << g_.coverage(first) << " " << g_.coverage(second));
-
-            EdgeId newEdge = g_.AddEdge(g_.EdgeStart(first), g_.EdgeEnd(second), Sequence(iter->second.begin()->second.second));
-            if (cov > UINT_MAX * 0.75 ) cov = UINT_MAX*0.75;
-            cov /= (double) g_.length(newEdge);
-            TRACE(g_.int_id(newEdge));
-            int len_split = int(((double) len_f * (double) len_sum) / ((double)len_s + (double)len_f));
-            if (len_split == 0) {
-                DEBUG(" zero split length, length are:" << len_f <<" " << len_sum <<" " << len_s);
-                len_split = 1;
-            }
-            g_.DeleteEdge(first);
-            g_.DeleteEdge(second);
-            g_.coverage_index().SetAvgCoverage(newEdge, cov);
-            g_.coverage_index().SetAvgCoverage(g_.conjugate(newEdge), cov);
-            size_t next_id = g_.int_id(newEdge);
-            DEBUG("and new coverage is " << g_.coverage(newEdge));
-            closed_gaps ++;
-            size_t next_id_conj = g_.int_id(g_.conjugate(newEdge));
-            TRACE(first_id << " " << second_id << " " << next_id << " " << first_id_conj << " " << second_id_conj << " " << next_id_conj << " ");
-            replacement[first] = newEdge;
-            replacement[second] = newEdge;
-            replacement[first_conj] = g_.conjugate(newEdge);
-            replacement[second_conj] = g_.conjugate(newEdge);
-        }
-        INFO("Closed " << closed_gaps << " gaps");
-        INFO("Total " << not_unique_gaps << " were not closed due to more than one possible pairing");
-        INFO("Total " << chained_gaps << " were skipped because of gap chains");
-        //TODO: chains of gaps!
-    }
-private:
-
-    void ConstructConsensus(EdgeId e, GapStorage<Graph> &storage, map<EdgeId, map<EdgeId, pair<size_t, string> > > & new_edges) {
-        auto cl_start = storage.inner_index[e].begin();
-        auto iter = storage.inner_index[e].begin();
-        size_t cur_len = 0;
-        while (iter != storage.inner_index[e].end()) {
-            auto next_iter = ++iter;
-            cur_len++;
-            if (next_iter == storage.inner_index[e].end() || next_iter->end != cl_start->end) {
-                if (cur_len >= storage.min_gap_quantity && !storage.IsIgnored(make_pair(cl_start->start, cl_start->end))) {
-                    vector<string> gap_variants;
-
-                    for (auto j_iter = cl_start; j_iter != next_iter; j_iter++) {
-                        string s = j_iter->gap_seq.str();
-                        transform(s.begin(), s.end(), s.begin(), ::toupper);
-                        gap_variants.push_back(s);
-                    }
-                    if (consensus_gap_closing || (gap_variants.size() > 0 && gap_variants[0].length() < max_contigs_gap_length_)) {
-                        map <EdgeId, pair<size_t, string>> tmp;
-                        string tmp_string;
-                        string s = g_.EdgeNucls(cl_start->start).Subseq(0, cl_start->edge_gap_start_position).str();
-                        if (consensus_gap_closing) {
-                            const ConsensusCore::PoaConsensus *pc = ConsensusCore::PoaConsensus::FindConsensus(
-                                    gap_variants,
-                                    ConsensusCore::PoaConfig::GLOBAL_ALIGNMENT);
-                            tmp_string = pc->Sequence();
-                        } else {
-                            tmp_string = gap_variants[0];
-                            if (gap_variants.size() > 1) {
-
-                                stringstream ss;
-                                for (size_t i = 0; i < gap_variants.size(); i++)
-                                    ss << gap_variants[i].length() << " ";
-                                INFO(gap_variants.size() << " gap closing variant for contigs, lengths: " << ss.str());
-                            }
-                        }
-
-                        DEBUG("consenus for " << g_.int_id(cl_start->start) << " and " << g_.int_id(cl_start->end) <<
-                                                                                          "found: ");
-                        DEBUG(tmp_string);
-                        s += tmp_string;
-                        s += g_.EdgeNucls(cl_start->end).Subseq(cl_start->edge_gap_end_position,
-                                                                g_.length(cl_start->end) + g_.k()).str();
-                        tmp.insert(make_pair(cl_start->end, make_pair(cur_len, s)));
-                        new_edges[cl_start->start] = tmp;
-                    } else {
-                        INFO ("Skipping gap of size " << gap_variants[0].length() << " multiplicity " << gap_variants.size());
-                    }
-                }
-                cl_start = next_iter;
-                cur_len = 0;
-            }
-        }
-    }
-
-public:
-    PacbioGapCloser(Graph &g, bool consensus_gap, size_t max_contigs_gap_length )
-            : g_(g), consensus_gap_closing(consensus_gap), max_contigs_gap_length_(max_contigs_gap_length) {
-        closed_gaps = 0;
-        not_unique_gaps = 0;
-        chained_gaps = 0;
-    }
-
-    void ConstructConsensus(size_t nthreads, GapStorage<Graph> &storage) {
-        vector<map<EdgeId, map<EdgeId, pair<size_t, string> > > > new_edges_by_thread;
-        new_edges_by_thread.resize(nthreads);
-        size_t storage_size = storage.size();
-# pragma omp parallel for shared(storage, new_edges_by_thread) num_threads(nthreads)
-        for (size_t i = 0; i < storage_size; i++) {
-            EdgeId e = storage[i];
-            size_t thread_num = omp_get_thread_num();
-            DEBUG("constructing consenus for first edge " << g_.int_id(e) << " in thread " <<thread_num);
-            ConstructConsensus(e, storage, new_edges_by_thread[thread_num]);
-        }
-        for (size_t i = 0; i < nthreads; i++) {
-            for (auto iter = new_edges_by_thread[i].begin(); iter != new_edges_by_thread[i].end(); ++iter) {
-                new_edges_.insert(*iter);
-            }
-        }
-    }
-    void DumpToFile(const string filename) {
-        ofstream filestr(filename);
-        for (auto iter = new_edges_.begin(); iter != new_edges_.end(); ++iter) {
-            if (iter->second.size() > 1) {
-                DEBUG("nontrivial gap closing for edge" <<g_.int_id(iter->first));
-            }
-            for (auto j_iter = iter->second.begin(); j_iter != iter->second.end(); ++j_iter) {
-                filestr << ">" << g_.int_id(iter->first) << "_" << iter->second.size() << "_" << g_.int_id(j_iter->first) << "_" << j_iter->second.first << endl;
-                filestr << j_iter->second.second << endl;
-            }
-        }
-    }
-
-};
-
-}
diff --git a/src/modules/assembly_graph/graph_support/graph_processing_algorithm.hpp b/src/modules/assembly_graph/graph_support/graph_processing_algorithm.hpp
deleted file mode 100644
index cce6c20..0000000
--- a/src/modules/assembly_graph/graph_support/graph_processing_algorithm.hpp
+++ /dev/null
@@ -1,262 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-#pragma once
-
-#include "dev_support/func.hpp"
-#include <boost/none.hpp>
-#include <atomic>
-#include "assembly_graph/graph_core/graph_iterators.hpp"
-#include "assembly_graph/components/graph_component.hpp"
-#include "math/pred.hpp"
-#include "dev_support/logger/logger.hpp"
-
-namespace omnigraph {
-
-template<class Graph>
-using HandlerF = std::function<void(typename Graph::EdgeId)>;
-
-template<class Graph>
-class EdgeProcessingAlgorithm {
-    typedef typename Graph::EdgeId EdgeId;
-    typedef pred::TypedPredicate<EdgeId> ProceedConditionT;
-
-    Graph& g_;
-    bool conjugate_symmetry_;
- protected:
-
-    Graph& g() {
-        return g_;
-    }
-
-    const Graph& g() const {
-        return g_;
-    }
-
-    virtual bool ProcessEdge(EdgeId e) = 0;
-
- public:
-    EdgeProcessingAlgorithm(Graph& g,
-                             bool conjugate_symmetry = false)
-            : g_(g), conjugate_symmetry_(conjugate_symmetry) {
-
-    }
-
-    virtual ~EdgeProcessingAlgorithm() {
-    }
-
-//    bool conjugate_symmetry() const {
-//        return conjugate_symmetry_;
-//    }
-
-    template<class Comparator = std::less<EdgeId>>
-    bool Run(const Comparator& comp = Comparator(), ProceedConditionT proceed_condition = pred::AlwaysTrue<EdgeId>()) {
-        bool triggered = false;
-        for (auto it = g_.SmartEdgeBegin(comp, conjugate_symmetry_); !it.IsEnd(); ++it) {
-            EdgeId e = *it;
-            TRACE("Current edge " << g_.str(e));
-            if (!proceed_condition(e)) {
-                TRACE("Stop condition was reached.");
-                break;
-            }
-
-            TRACE("Processing edge " << this->g().str(e));
-            triggered |= ProcessEdge(e);
-        };
-        return triggered;
-    }
-
- private:
-    DECL_LOGGER("EdgeProcessingAlgorithm");
-};
-
-template<class Graph>
-class CountingCallback {
-    typedef typename Graph::EdgeId EdgeId;
-    bool report_on_destruction_;
-    std::atomic<size_t> cnt_;
-
-public:
-    CountingCallback(bool report_on_destruction = false) :
-            report_on_destruction_(report_on_destruction), cnt_(0) {
-    }
-
-    ~CountingCallback() {
-        if (report_on_destruction_)
-            Report();
-    }
-
-    void HandleDelete(EdgeId /*e*/) {
-        cnt_++;
-    }
-
-    void Report() {
-        TRACE(cnt_ << " edges were removed.")
-        cnt_ = 0;
-    }
-
-private:
-    DECL_LOGGER("CountingCallback");
-};
-
-template<class Graph>
-std::function<void(typename Graph::EdgeId)> AddCountingCallback(CountingCallback<Graph>& cnt_callback, std::function<void(typename Graph::EdgeId)> handler) {
-    std::function<void(typename Graph::EdgeId)> cnt_handler = std::bind(&CountingCallback<Graph>::HandleDelete, std::ref(cnt_callback), std::placeholders::_1);
-    return func::Composition<typename Graph::EdgeId>(handler, cnt_handler);
-}
-template<class Graph>
-void RemoveIsolatedOrCompress(Graph& g, typename Graph::VertexId v) {
-    if (g.IsDeadStart(v) && g.IsDeadEnd(v)) {
-        g.DeleteVertex(v);
-    } else {
-        g.CompressVertex(v);
-    }
-}
-
-template<class Graph>
-class EdgeRemover {
-    typedef typename Graph::EdgeId EdgeId;
-    typedef typename Graph::VertexId VertexId;
-    typedef std::function<void(EdgeId)> HandlerF;
-
-    Graph& g_;
-    HandlerF removal_handler_;
-
- public:
-    EdgeRemover(Graph& g, HandlerF removal_handler = nullptr)
-            : g_(g),
-              removal_handler_(removal_handler) {
-    }
-
-    void DeleteEdge(EdgeId e) {
-        VertexId start = g_.EdgeStart(e);
-        VertexId end = g_.EdgeEnd(e);
-        DeleteEdgeWithNoCompression(e);
-        // NOTE: e here is already dead!
-        TRACE("Compressing locality");
-        if (!g_.RelatedVertices(start, end)) {
-            TRACE("Vertices not related");
-            TRACE("Processing end");
-            RemoveIsolatedOrCompress(g_, end);
-            TRACE("End processed");
-        }
-        TRACE("Processing start");
-        RemoveIsolatedOrCompress(g_, start);
-        TRACE("Start processed");
-    }
-
-    void DeleteEdgeWithNoCompression(EdgeId e) {
-        TRACE("Deletion of edge " << g_.str(e));
-        TRACE("Start " << g_.str(g_.EdgeStart(e)));
-        TRACE("End " << g_.str(g_.EdgeEnd(e)));
-        if (removal_handler_) {
-            TRACE("Calling handler");
-            removal_handler_(e);
-        }
-        TRACE("Deleting edge");
-        g_.DeleteEdge(e);
-    }
-
- private:
-    DECL_LOGGER("EdgeRemover");
-};
-
-template<class Graph>
-class EdgeRemovingAlgorithm : public EdgeProcessingAlgorithm<Graph> {
-    typedef EdgeProcessingAlgorithm<Graph> base;
-    typedef typename Graph::EdgeId EdgeId;
-
-    pred::TypedPredicate<EdgeId> remove_condition_;
-    EdgeRemover<Graph> edge_remover_;
-
- protected:
-    bool ProcessEdge(EdgeId e) {
-        TRACE("Checking edge " << this->g().str(e) << " for the removal condition");
-        if (remove_condition_(e)) {
-            TRACE("Check passed, removing");
-            edge_remover_.DeleteEdge(e);
-            return true;
-        }
-        TRACE("Check not passed");
-        return false;
-    }
-
- public:
-    EdgeRemovingAlgorithm(Graph& g,
-                          pred::TypedPredicate<EdgeId> remove_condition,
-                          std::function<void (EdgeId)> removal_handler = boost::none,
-                          bool conjugate_symmetry = false)
-            : base(g, conjugate_symmetry),
-              remove_condition_(remove_condition),
-              edge_remover_(g, removal_handler) {}
-
- private:
-    DECL_LOGGER("EdgeRemovingAlgorithm");
-};
-
-//todo rewrite with SmartSetIterator
-template<class Graph>
-class ComponentRemover {
- public:
-    typedef typename Graph::EdgeId EdgeId;
-    typedef typename Graph::VertexId VertexId;
-    typedef std::function<void(const std::set<EdgeId>&)> HandlerF;
-
- private:
-    Graph& g_;
-    HandlerF removal_handler_;
-
-    template<class ElemType>
-    void InsertIfNotConjugate(std::set<ElemType>& elems, ElemType elem) {
-        if (elems.count(g_.conjugate(elem)) == 0) {
-            elems.insert(elem);
-        }
-    }
-
- public:
-    ComponentRemover(Graph& g, HandlerF removal_handler = 0)
-            : g_(g),
-              removal_handler_(removal_handler) {
-    }
-
-    template<class EdgeIt>
-    void DeleteComponent(EdgeIt begin, EdgeIt end, bool alter_vertices = true) {
-        using std::set;
-        set<EdgeId> edges;
-        set<VertexId> vertices;
-
-        //cleaning conjugates and gathering vertices
-        for (EdgeIt it = begin; it != end; ++it) {
-            EdgeId e = *it;
-            InsertIfNotConjugate(edges, e);
-            InsertIfNotConjugate(vertices, g_.EdgeStart(e));
-            InsertIfNotConjugate(vertices, g_.EdgeEnd(e));
-        }
-
-        if (removal_handler_) {
-            removal_handler_(edges);
-        }
-
-        for (EdgeId e: edges) {
-            g_.DeleteEdge(e);
-        }
-
-        if (alter_vertices) {
-            for (VertexId v: vertices) {
-                RemoveIsolatedOrCompress(g_, v);
-            }
-        }
-    }
-
-    template<class Container>
-    void DeleteComponent(const Container& container, bool alter_vertices = true) {
-        DeleteComponent(container.begin(), container.end(), alter_vertices);
-    }
-
-};
-
-}
diff --git a/src/modules/assembly_graph/graph_support/parallel_processing.hpp b/src/modules/assembly_graph/graph_support/parallel_processing.hpp
deleted file mode 100644
index 9b5084b..0000000
--- a/src/modules/assembly_graph/graph_support/parallel_processing.hpp
+++ /dev/null
@@ -1,290 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-#pragma once
-
-#include "dev_support/logger/logger.hpp"
-#include "assembly_graph/graph_core/graph_iterators.hpp"
-#include "assembly_graph/graph_support/graph_processing_algorithm.hpp"
-#include "dev_support/openmp_wrapper.h"
-
-namespace omnigraph {
-
-template<class ItVec, class SmartIt, class Predicate>
-void FillInterestingFromChunkIterators(const ItVec& chunk_iterators,
-                                       SmartIt& smart_it,
-                                       const Predicate& predicate) {
-    VERIFY(chunk_iterators.size() > 1);
-    typedef typename Predicate::checked_type ElementType;
-    std::vector<std::vector<ElementType>> of_interest(omp_get_max_threads());
-
-    #pragma omp parallel for schedule(guided)
-    for (size_t i = 0; i < chunk_iterators.size() - 1; ++i) {
-        for (auto it = chunk_iterators[i], end = chunk_iterators[i + 1]; it != end; ++it) {
-            ElementType t = *it;
-            if (predicate(t)) {
-                of_interest[omp_get_thread_num()].push_back(t);
-            }
-        }
-    }
-
-    for (auto& chunk : of_interest) {
-        smart_it.insert(chunk.begin(), chunk.end());
-        chunk.clear();
-    }
-}
-
-template<class Graph, class ElementId = typename Graph::EdgeId>
-class TrivialInterestingElementFinder {
-public:
-
-    TrivialInterestingElementFinder() {
-    }
-
-    template<class SmartIt>
-    bool Run(SmartIt& /*it*/) const {
-        return false;
-    }
-};
-
-template<class Graph, class ElementId = typename Graph::EdgeId>
-class SimpleInterestingElementFinder {
-    typedef GraphEdgeIterator<Graph> EdgeIt;
-
-    const Graph& g_;
-    pred::TypedPredicate<ElementId> condition_;
-public:
-
-    SimpleInterestingElementFinder(const Graph& g,
-                                   pred::TypedPredicate<ElementId> condition = pred::AlwaysTrue<ElementId>())
-            :  g_(g), condition_(condition) {}
-
-    template<class SmartIt>
-    bool Run(SmartIt& interest) const {
-        for (EdgeIt it = EdgeIt(g_, g_.begin()), end = EdgeIt(g_, g_.end()); it != end; ++it) {
-            if (condition_(*it)) {
-                interest.push(*it);
-            }
-        }
-        return false;
-    }
-};
-
-template<class Graph, class ElementId = typename Graph::EdgeId>
-class ParallelInterestingElementFinder {
-    typedef GraphEdgeIterator<Graph> EdgeIt;
-
-    const Graph& g_;
-    pred::TypedPredicate<ElementId> condition_;
-    const size_t chunk_cnt_;
-public:
-
-    ParallelInterestingElementFinder(const Graph& g,
-                                     pred::TypedPredicate<ElementId> condition,
-                                     size_t chunk_cnt)
-            : g_(g), condition_(condition), chunk_cnt_(chunk_cnt) {}
-
-    template<class SmartIt>
-    bool Run(SmartIt& it) const {
-        TRACE("Looking for interesting elements");
-        TRACE("Splitting graph into " << chunk_cnt_ << " chunks");
-        FillInterestingFromChunkIterators(IterationHelper<Graph, ElementId>(g_).Chunks(chunk_cnt_), it, condition_);
-        TRACE("Found " << it.size() << " interesting elements");
-        return false;
-    }
-private:
-    DECL_LOGGER("ParallelInterestingElementFinder");
-};
-
-template<class Graph>
-class PersistentAlgorithmBase {
-    Graph& g_;
-protected:
-
-    PersistentAlgorithmBase(Graph& g) : g_(g) {}
-
-    Graph& g() { return g_; }
-    const Graph& g() const { return g_; }
-public:
-    virtual ~PersistentAlgorithmBase() {}
-    virtual bool Run(bool force_primary_launch = false) = 0;
-};
-
-//todo use add_condition in it_
-template<class Graph, class ElementId, class InterestingElementFinder,
-         class Comparator = std::less<ElementId>>
-class PersistentProcessingAlgorithm : public PersistentAlgorithmBase<Graph> {
-    InterestingElementFinder interest_el_finder_;
-
-    SmartSetIterator<Graph, ElementId, Comparator> it_;
-    //todo remove
-    bool tracking_;
-    size_t total_iteration_estimate_;
-
-    size_t curr_iteration_;
-
-protected:
-
-    virtual bool Process(ElementId el) = 0;
-    virtual bool Proceed(ElementId /*el*/) const { return true; }
-
-    virtual void PrepareIteration(size_t /*it_cnt*/, size_t /*total_it_estimate*/) {}
-
-public:
-
-    PersistentProcessingAlgorithm(Graph& g,
-                                      const InterestingElementFinder& interest_el_finder,
-                                      bool canonical_only = false,
-                                      const Comparator& comp = Comparator(),
-                                      bool track_changes = true,
-                                      size_t total_iteration_estimate = -1ul) :
-                                      PersistentAlgorithmBase<Graph>(g),
-                                      interest_el_finder_(interest_el_finder),
-                                      it_(g, true, comp, canonical_only),
-                                      tracking_(track_changes),
-                                      total_iteration_estimate_(total_iteration_estimate),
-                                      curr_iteration_(0) {
-        it_.Detach();
-    }
-
-    bool Run(bool force_primary_launch = false) {
-        bool primary_launch = !tracking_ || (curr_iteration_ == 0) || force_primary_launch ;
-        if (!it_.IsAttached()) {
-            it_.Attach();
-        }
-        if (primary_launch) {
-            it_.clear();
-            TRACE("Primary launch.");
-            TRACE("Start preprocessing");
-            interest_el_finder_.Run(it_);
-            TRACE(it_.size() << " edges to process after preprocessing");
-        } else {
-            TRACE(it_.size() << " edges to process");
-            VERIFY(tracking_);
-        }
-
-        if (curr_iteration_ >= total_iteration_estimate_) {
-            PrepareIteration(total_iteration_estimate_ - 1, total_iteration_estimate_);
-        } else {
-            PrepareIteration(curr_iteration_, total_iteration_estimate_);
-        }
-
-        bool triggered = false;
-        TRACE("Start processing");
-        for (; !it_.IsEnd(); ++it_) {
-            ElementId el = *it_;
-            if (!Proceed(el)) {
-                TRACE("Proceed condition turned false on element " << this->g().str(el));
-                it_.ReleaseCurrent();
-                break;
-            }
-            TRACE("Processing edge " << this->g().str(el));
-            triggered |= Process(el);
-        }
-        TRACE("Finished processing. Triggered = " << triggered);
-        if (!tracking_)
-            it_.Detach();
-
-        curr_iteration_++;
-        return triggered;
-    }
-
-};
-
-template<class Graph, class InterestingEdgeFinder,
-         class Comparator = std::less<typename Graph::EdgeId>>
-class PersistentEdgeRemovingAlgorithm : public PersistentProcessingAlgorithm<Graph,
-                                                                            typename Graph::EdgeId,
-                                                                            InterestingEdgeFinder, Comparator> {
-    typedef typename Graph::EdgeId EdgeId;
-    typedef PersistentProcessingAlgorithm<Graph, EdgeId, InterestingEdgeFinder, Comparator> base;
-    EdgeRemover<Graph> edge_remover_;
-public:
-    PersistentEdgeRemovingAlgorithm(Graph& g,
-                                    const InterestingEdgeFinder& interest_edge_finder,
-                                    std::function<void(EdgeId)> removal_handler = boost::none,
-                                    bool canonical_only = false,
-                                    const Comparator& comp = Comparator(),
-                                    bool track_changes = true,
-                                    size_t total_iteration_estimate = -1ul)
-            : base(g, interest_edge_finder,
-                   canonical_only, comp, track_changes,
-                   total_iteration_estimate),
-                   edge_remover_(g, removal_handler) {
-
-    }
-
-protected:
-
-    virtual bool ShouldRemove(EdgeId e) const = 0;
-
-    bool Process(EdgeId e) override {
-        TRACE("Checking edge " << this->g().str(e) << " for the removal condition");
-        if (ShouldRemove(e)) {
-            TRACE("Check passed, removing");
-            edge_remover_.DeleteEdge(e);
-            return true;
-        }
-        TRACE("Check not passed");
-        return false;
-    }
-
-};
-
-template<class Graph, class InterestingEdgeFinder,
-         class Comparator = std::less<typename Graph::EdgeId>>
-class ConditionEdgeRemovingAlgorithm : public PersistentEdgeRemovingAlgorithm<Graph,
-                                                                              InterestingEdgeFinder, Comparator> {
-    typedef typename Graph::EdgeId EdgeId;
-    typedef PersistentEdgeRemovingAlgorithm<Graph, InterestingEdgeFinder, Comparator> base;
-    pred::TypedPredicate<EdgeId> remove_condition_;
-protected:
-
-    bool ShouldRemove(EdgeId e) const override {
-        return remove_condition_(e);
-    }
-
-public:
-    ConditionEdgeRemovingAlgorithm(Graph& g,
-                                   const InterestingEdgeFinder& interest_edge_finder,
-                                   pred::TypedPredicate<EdgeId> remove_condition,
-                                   std::function<void(EdgeId)> removal_handler = boost::none,
-                                   bool canonical_only = false,
-                                   const Comparator& comp = Comparator(),
-                                   bool track_changes = true)
-            : base(g, interest_edge_finder,
-                   removal_handler,
-                   canonical_only, comp, track_changes),
-                   remove_condition_(remove_condition) {
-
-    }
-};
-
-template<class Graph, class Comparator = std::less<typename Graph::EdgeId>>
-class ParallelEdgeRemovingAlgorithm : public ConditionEdgeRemovingAlgorithm<Graph,
-                                                ParallelInterestingElementFinder<Graph>, Comparator> {
-    typedef ConditionEdgeRemovingAlgorithm<Graph,
-            ParallelInterestingElementFinder<Graph>, Comparator> base;
-    typedef typename Graph::EdgeId EdgeId;
-
-public:
-    ParallelEdgeRemovingAlgorithm(Graph& g,
-                                  pred::TypedPredicate<EdgeId> remove_condition,
-                                  size_t chunk_cnt,
-                                  std::function<void(EdgeId)> removal_handler = boost::none,
-                                  bool canonical_only = false,
-                                  const Comparator& comp = Comparator(),
-                                  bool track_changes = true)
-            : base(g,
-                   ParallelInterestingElementFinder<Graph>(g, remove_condition, chunk_cnt),
-                   remove_condition, removal_handler,
-                   canonical_only, comp, track_changes) {
-    }
-
-};
-
-}
diff --git a/src/modules/assembly_graph/graph_support/scaff_supplementary.cpp b/src/modules/assembly_graph/graph_support/scaff_supplementary.cpp
deleted file mode 100644
index afb3779..0000000
--- a/src/modules/assembly_graph/graph_support/scaff_supplementary.cpp
+++ /dev/null
@@ -1,66 +0,0 @@
-#include "scaff_supplementary.hpp"
-#include <algorithm>
-
-using namespace std;
-namespace path_extend {
-
-
-void ScaffoldingUniqueEdgeAnalyzer::SetCoverageBasedCutoff() {
-    vector <pair<double, size_t>> coverages;
-    map <EdgeId, size_t> long_component;
-    size_t total_len = 0, short_len = 0, cur_len = 0;
-
-    for (auto iter = gp_.g.ConstEdgeBegin(); !iter.IsEnd(); ++iter) {
-        if (gp_.g.length(*iter) > length_cutoff_) {
-            coverages.push_back(make_pair(gp_.g.coverage(*iter), gp_.g.length(*iter)));
-            total_len += gp_.g.length(*iter);
-            long_component[*iter] = 0;
-        } else {
-            short_len += gp_.g.length(*iter);
-        }
-    }
-    if (total_len == 0) {
-        WARN("not enough edges longer than "<< length_cutoff_);
-        return;
-    }
-    sort(coverages.begin(), coverages.end());
-    size_t i = 0;
-    while (cur_len < total_len / 2 && i < coverages.size()) {
-        cur_len += coverages[i].second;
-        i++;
-    }
-    median_coverage_ = coverages[i].first;
-}
-
-
-void ScaffoldingUniqueEdgeAnalyzer::FillUniqueEdgeStorage(ScaffoldingUniqueEdgeStorage &storage_) {
-    storage_.unique_edges_.clear();
-    size_t total_len = 0;
-    size_t unique_len = 0;
-    size_t unique_num = 0;
-    storage_.SetMinLength(length_cutoff_);
-    for (auto iter = gp_.g.ConstEdgeBegin(); !iter.IsEnd(); ++iter) {
-        size_t tlen = gp_.g.length(*iter);
-        total_len += tlen;
-        if (gp_.g.length(*iter) >= length_cutoff_ && gp_.g.coverage(*iter) > median_coverage_ * (1 - relative_coverage_variation_)
-                && gp_.g.coverage(*iter) < median_coverage_ * (1 + relative_coverage_variation_) ) {
-            storage_.unique_edges_.insert(*iter);
-            unique_len += tlen;
-            unique_num ++;
-        }
-    }
-    for (auto iter = storage_.begin(); iter != storage_.end(); ++iter) {
-        DEBUG (gp_.g.int_id(*iter) << " " << gp_.g.coverage(*iter) << " " << gp_.g.length(*iter) );
-    }
-    INFO ("With length cutoff: " << length_cutoff_ <<", median long edge coverage: " << median_coverage_ << ", and maximal unique coverage: " <<
-                                                                                                            relative_coverage_variation_);
-    INFO("Unique edges quantity: " << unique_num << ", unique edges length " << unique_len <<", total edges length" << total_len);
-    if (unique_len * 2 < total_len) {
-        WARN("Less than half of genome in unique edges!");
-    }
-
-}
-
-
-
-}
diff --git a/src/modules/assembly_graph/graph_support/scaff_supplementary.hpp b/src/modules/assembly_graph/graph_support/scaff_supplementary.hpp
deleted file mode 100644
index 71522f6..0000000
--- a/src/modules/assembly_graph/graph_support/scaff_supplementary.hpp
+++ /dev/null
@@ -1,77 +0,0 @@
-#pragma once
-
-#include "assembly_graph/graph_core/graph.hpp"
-#include "pipeline/graph_pack.hpp"
-#include "dev_support/logger/logger.hpp"
-
-namespace path_extend {
-    typedef debruijn_graph::EdgeId EdgeId;
-
-/* Storage of presumably unique, relatively long edges. Filled by ScaffoldingUniqueEdgeAnalyzer
- *
- */
-    class ScaffoldingUniqueEdgeStorage {
-        friend class ScaffoldingUniqueEdgeAnalyzer;
-    private:
-        set <EdgeId> unique_edges_;
-        size_t min_unique_length_;
-    public:
-        ScaffoldingUniqueEdgeStorage(): unique_edges_(){
-            DEBUG("storage created, empty");
-        }
-
-        bool IsUnique(EdgeId e) const {
-            return (unique_edges_.find(e) != unique_edges_.end());
-        }
-
-        decltype(unique_edges_.begin()) begin() const {
-            return unique_edges_.begin();
-        }
-
-        decltype(unique_edges_.end()) end() const {
-            return unique_edges_.end();
-        }
-
-        size_t size() const {
-            return unique_edges_.size();
-        }
-        size_t GetMinLength() const {
-            return min_unique_length_;
-        }
-        void SetMinLength(size_t min_length)  {
-            min_unique_length_ = min_length;
-        }
-
-        const set<EdgeId>& GetSet() const {
-            return unique_edges_;
-        }
-   protected:
-        DECL_LOGGER("ScaffoldingUniqueEdgeStorage")
-
-    };
-
-/* Auxillary class required to fillin the unique edge storage.
- *
- */
-    class ScaffoldingUniqueEdgeAnalyzer {
-
-    ;
-    private:
-        const debruijn_graph::conj_graph_pack &gp_;
-        size_t length_cutoff_;
-        double median_coverage_;
-        double relative_coverage_variation_;
-    protected:
-        DECL_LOGGER("ScaffoldingUniqueEdgeAnalyzer")
-
-
-        void SetCoverageBasedCutoff();
-    public:
-        ScaffoldingUniqueEdgeAnalyzer(const debruijn_graph::conj_graph_pack &gp, size_t apriori_length_cutoff, double max_relative_coverage):gp_(gp), length_cutoff_(apriori_length_cutoff), relative_coverage_variation_(max_relative_coverage){
-            SetCoverageBasedCutoff();
-        }
-        void FillUniqueEdgeStorage(ScaffoldingUniqueEdgeStorage &storage_);
-    };
-}
-
-
diff --git a/src/modules/assembly_graph/paths/path_utils.hpp b/src/modules/assembly_graph/paths/path_utils.hpp
deleted file mode 100644
index 212c81c..0000000
--- a/src/modules/assembly_graph/paths/path_utils.hpp
+++ /dev/null
@@ -1,128 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-/*
- * path_utils.hpp
- *
- */
-
-#pragma once
-
-#include "assembly_graph/paths/path_processor.hpp"
-
-namespace debruijn_graph {
-
-  // TODO: rewrite this function
-  template<class Graph>
-    vector<typename Graph::EdgeId> GetCommonPathsEnd(
-        const Graph& g,
-        typename Graph::EdgeId e1,
-        typename Graph::EdgeId e2,
-        size_t min_dist,
-        size_t max_dist,
-        const omnigraph::PathProcessor<Graph>& path_processor)
-  {
-      typedef typename Graph::EdgeId EdgeId;
-      typedef vector<EdgeId> Path;
-
-      //PathProcessor<Graph> path_processor(g,
-                                          //min_dist - g.length(e1),
-                                          //max_dist - g.length(e1),
-          //g.EdgeEnd(e1), g.EdgeStart(e2), callback);
-
-      omnigraph::PathStorageCallback<Graph> callback(g);
-      int error_code = path_processor.Process(g.EdgeStart(e2), min_dist - g.length(e1),
-                                              max_dist - g.length(e1), callback);
-      vector<Path> paths = callback.paths();
-
-      vector<EdgeId> result;
-      if (error_code != 0) {
-        DEBUG("Edge " << g.int_id(e1) << " path_processor problem")
-        return result;
-      }
-      if (paths.size() == 0)
-        return result;
-      if (paths.size() == 1)
-        return paths[0];
-      size_t j = 0;
-      while (j < paths[0].size()) {
-        for (size_t i = 1;  i < paths.size(); ++i) {
-          if (j == paths[i].size()) {
-            vector<EdgeId> result(paths[0].begin()+(paths[0].size() - j), paths[0].end());
-            return result;
-          } else {
-            if (paths[0][paths[0].size()-1-j] != paths[i][paths[i].size()-1-j]) {
-              vector<EdgeId> result(paths[0].begin()+(paths[0].size() - j), paths[0].end());
-              return result;
-            }
-          }
-        }
-        ++j;
-      }
-      return paths[0];
-    }
-
-
-
-  template<class Graph>
-    vector<vector<typename Graph::EdgeId> > GetAllPathsBetweenEdges(
-        const Graph& g,
-        typename Graph::EdgeId& e1,
-        typename Graph::EdgeId& e2, size_t min_dist,
-        size_t max_dist) {
-      omnigraph::PathStorageCallback<Graph> callback(g);
-      ProcessPaths(g,
-          min_dist,
-          max_dist, //0, *cfg::get().ds.IS - K + size_t(*cfg::get().ds.is_var),
-          g.EdgeEnd(e1), g.EdgeStart(e2),
-          callback);
-      auto paths = callback.paths();
-      return paths;
-    }
-
-template<class graph_pack>
-size_t GetAllPathsQuantity(const graph_pack& origin_gp,
-                           const typename graph_pack::graph_t::EdgeId& e1,
-                           const typename graph_pack::graph_t::EdgeId& e2, double d, double is_var) {
-  omnigraph::PathStorageCallback<typename graph_pack::graph_t> callback(origin_gp.g);
-  omnigraph::PathProcessor<typename graph_pack::graph_t>
-      path_processor(origin_gp.g,
-                     (size_t) d - origin_gp.g.length(e1) - size_t(is_var),
-                     (size_t) d - origin_gp.g.length(e1) + size_t(is_var),
-                     origin_gp.g.EdgeEnd(e1), 
-                     origin_gp.g.EdgeStart(e2),
-                     callback);
-  path_processor.Process();
-  auto paths = callback.paths();
-  TRACE(e1.ind_id() << " " << e2.int_id() << " " << paths.size());
-  return paths.size();
-}
-
-template<class Graph>
-Sequence MergeSequences(const Graph& g,
-                        const vector<typename Graph::EdgeId>& continuous_path) {
-    vector < Sequence > path_sequences;
-    path_sequences.push_back(g.EdgeNucls(continuous_path[0]));
-    for (size_t i = 1; i < continuous_path.size(); ++i) {
-        VERIFY(
-                g.EdgeEnd(continuous_path[i - 1])
-                == g.EdgeStart(continuous_path[i]));
-        path_sequences.push_back(g.EdgeNucls(continuous_path[i]));
-    }
-    return MergeOverlappingSequences(path_sequences, g.k());
-}
-
-template<class Graph>
-Sequence PathSequence(const Graph& g, const omnigraph::Path<typename Graph::EdgeId>& path) {
-    Sequence path_sequence = MergeSequences(g, path.sequence());
-    size_t start = path.start_pos();
-    size_t end = path_sequence.size()
-                 - g.length(path[path.size() - 1]) + path.end_pos();
-    return path_sequence.Subseq(start, end);
-}
-
-}
diff --git a/src/modules/data_structures/sequence/CMakeLists.txt b/src/modules/data_structures/sequence/CMakeLists.txt
deleted file mode 100644
index f465519..0000000
--- a/src/modules/data_structures/sequence/CMakeLists.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-############################################################################
-# Copyright (c) 2015 Saint Petersburg State University
-# Copyright (c) 2011-2014 Saint Petersburg Academic University
-# All Rights Reserved
-# See file LICENSE for details.
-############################################################################
-
-project(sequence CXX)
-
-add_library(sequence STATIC genome_storage.cpp)
diff --git a/src/modules/data_structures/sequence/genome_storage.hpp b/src/modules/data_structures/sequence/genome_storage.hpp
deleted file mode 100644
index 401576d..0000000
--- a/src/modules/data_structures/sequence/genome_storage.hpp
+++ /dev/null
@@ -1,33 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-//
-// Created by lab42 on 8/19/15.
-//
-
-#ifndef GENOME_STORAGE_HPP_
-#define GENOME_STORAGE_HPP_
-
-#include <string>
-#include "data_structures/sequence/sequence.hpp"
-namespace debruijn_graph {
-    class GenomeStorage {
-    private:
-        std::string s_;
-    public:
-        GenomeStorage():s_(""){
-        }
-
-        GenomeStorage(const std::string &s): s_(s){
-        }
-
-        Sequence GetSequence() const;
-        void SetSequence(const Sequence &s);
-        std::string str() const;
-        size_t size() const;
-    };
-}
-#endif //PROJECT_GENOME_STORAGE_HPP
diff --git a/src/modules/dev_support/CMakeLists.txt b/src/modules/dev_support/CMakeLists.txt
deleted file mode 100644
index d719227..0000000
--- a/src/modules/dev_support/CMakeLists.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-############################################################################
-# Copyright (c) 2015 Saint Petersburg State University
-# Copyright (c) 2011-2014 Saint Petersburg Academic University
-# All Rights Reserved
-# See file LICENSE for details.
-############################################################################
-
-project(dev_support CXX)
-
-add_library(dev_support STATIC
-            copy_file.cpp
-            path_helper.cpp
-            logger/logger_impl.cpp)
diff --git a/src/modules/dev_support/func.hpp b/src/modules/dev_support/func.hpp
deleted file mode 100644
index 5a8343c..0000000
--- a/src/modules/dev_support/func.hpp
+++ /dev/null
@@ -1,69 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-#pragma once
-
-#include <functional>
-
-namespace func {
-
-//to use with std::function-s
-template<class T>
-void Compose(T t, std::function<void(T)> f1,
-        std::function<void(T)> f2) {
-    if (f1)
-        f1(t);
-    if (f2)
-        f2(t);
-}
-
-template<class T>
-std::function<void(T)> Composition(std::function<void(T)> f1,
-                                     std::function<void(T)> f2) {
-    return std::bind(func::Compose<T>, std::placeholders::_1, f1, f2);
-}
-
-template<class A, class B>
-class Func {
-public:
-    typedef std::function<B(A)> function_t;
-
-    virtual B Apply(A a) const = 0;
-
-    virtual ~Func() {
-    }
-};
-
-template<class T>
-class AndOperator;
-
-template<class T>
-class OrOperator;
-
-template<class T>
-class NotOperator;
-
-template<class T>
-class Predicate: public Func<T, bool> {
-public:
-    typedef T checked_type;
-
-    bool Apply(T t) const {
-        return Check(t);
-    }
-
-    virtual bool Check(T t) const = 0;
-
-    bool operator()(T t) const { return Check(t); }
-    
-
-    virtual ~Predicate() {
-    }
-};
-
-
-}
diff --git a/src/modules/io/dataset_support/dataset_readers.hpp b/src/modules/io/dataset_support/dataset_readers.hpp
deleted file mode 100644
index 5d56151..0000000
--- a/src/modules/io/dataset_support/dataset_readers.hpp
+++ /dev/null
@@ -1,122 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-#pragma once
-
-#include "dev_support/logger/logger.hpp"
-#include "dev_support/simple_tools.hpp"
-#include "io/reads_io/io_helper.hpp"
-#include "pipeline/library.hpp"
-
-#include "pipeline/config_struct.hpp"
-
-namespace debruijn_graph {
-
-inline
-io::PairedStreamPtr paired_easy_reader(const io::SequencingLibrary<config::DataSetData> &lib,
-                                       bool followed_by_rc,
-                                       size_t insert_size,
-                                       bool change_read_order = false,
-                                       bool use_orientation = true,
-                                       io::OffsetType offset_type = io::PhredOffset) {
-  io::ReadStreamList<io::PairedRead> streams;
-  for (auto read_pair : lib.paired_reads()) {
-      streams.push_back(io::PairedEasyStream(read_pair.first, read_pair.second, followed_by_rc, insert_size, change_read_order,
-                                             use_orientation, lib.orientation(), offset_type));
-  }
-  return io::MultifileWrap<io::PairedRead>(streams);
-}
-
-inline
-io::ReadStreamList<io::SingleRead> single_easy_readers(const io::SequencingLibrary<config::DataSetData> &lib,
-                                       bool followed_by_rc,
-                                       bool including_paired_reads,
-                                       bool handle_Ns = true,
-                                       io::OffsetType offset_type = io::PhredOffset) {
-  io::ReadStreamList<io::SingleRead> streams;
-  if (including_paired_reads) {
-    for (const auto& read : lib.reads()) {
-      //do we need input_file function here?
-      streams.push_back(io::EasyStream(read, followed_by_rc, handle_Ns, offset_type));
-    }
-  } else {
-    for (const auto& read : lib.single_reads()) {
-      streams.push_back(io::EasyStream(read, followed_by_rc, handle_Ns, offset_type));
-    }
-  }
-  return streams;
-}
-
-inline
-io::SingleStreamPtr single_easy_reader(const io::SequencingLibrary<config::DataSetData> &lib,
-                                       bool followed_by_rc,
-                                       bool including_paired_reads,
-                                       bool handle_Ns = true,
-                                       io::OffsetType offset_type = io::PhredOffset) {
-  return io::MultifileWrap<io::SingleRead>(
-          single_easy_readers(lib, followed_by_rc, including_paired_reads, handle_Ns, offset_type));
-}
-
-inline
-io::PairedStreamPtr paired_easy_reader_for_libs(std::vector<size_t> libs,
-                                                bool followed_by_rc,
-                                                size_t insert_size,
-                                                bool change_read_order = false,
-                                                bool use_orientation = true,
-                                                io::OffsetType offset_type = io::PhredOffset) {
-  io::ReadStreamList<io::PairedRead> streams;
-  for (size_t i = 0; i < libs.size(); ++i) {
-    streams.push_back(paired_easy_reader(cfg::get().ds.reads[libs[i]],
-                                         followed_by_rc, insert_size, change_read_order, use_orientation, offset_type));
-  }
-  return io::MultifileWrap<io::PairedRead>(streams);
-}
-
-
-inline
-io::PairedStreamPtr paired_easy_reader(bool followed_by_rc,
-                                       size_t insert_size,
-                                       bool change_read_order = false,
-                                       bool use_orientation = true,
-                                       io::OffsetType offset_type = io::PhredOffset) {
-
-  std::vector<size_t> all_libs(cfg::get().ds.reads.lib_count());
-  for (size_t i = 0; i < cfg::get().ds.reads.lib_count(); ++i)
-    all_libs[i] = i;
-
-  // FIXME: Should we use only first library?
-  // No, this one is for all libs together
-  return paired_easy_reader_for_libs(all_libs, followed_by_rc, insert_size, change_read_order, use_orientation, offset_type);
-}
-
-
-inline
-io::SingleStreamPtr single_easy_reader_for_libs(vector<size_t> libs,
-                                                bool followed_by_rc,
-                                                bool including_paired_reads,
-                                                io::OffsetType offset_type = io::PhredOffset) {
-  io::ReadStreamList<io::SingleRead> streams;
-  for (size_t i = 0; i < libs.size(); ++i) {
-    streams.push_back(single_easy_reader(cfg::get().ds.reads[libs[i]],
-                                         followed_by_rc, including_paired_reads, offset_type));
-  }
-  return io::MultifileWrap<io::SingleRead>(streams);
-}
-
-inline
-io::SingleStreamPtr single_easy_reader(bool followed_by_rc,
-                                       bool including_paired_reads,
-                                       io::OffsetType offset_type = io::PhredOffset) {
-
-  std::vector<size_t> all_libs(cfg::get().ds.reads.lib_count());
-  for (size_t i = 0; i < cfg::get().ds.reads.lib_count(); ++i)
-    all_libs[i] = i;
-
-  return single_easy_reader_for_libs(all_libs, followed_by_rc, including_paired_reads, offset_type);
-}
-
-}
diff --git a/src/modules/io/reads_io/binary_streams.hpp b/src/modules/io/reads_io/binary_streams.hpp
deleted file mode 100644
index d7679f2..0000000
--- a/src/modules/io/reads_io/binary_streams.hpp
+++ /dev/null
@@ -1,357 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-#pragma once
-
-#include <fstream>
-
-#include "dev_support/verify.hpp"
-#include "ireader.hpp"
-#include "io/reads/single_read.hpp"
-#include "io/reads/paired_read.hpp"
-
-namespace io {
-
-// == Deprecated classes ==
-// Use FileReadStream and InsertSizeModyfing instead
-
-class BinaryFileSingleStream: public PredictableReadStream<SingleReadSeq> {
-private:
-    std::ifstream stream_;
-    ReadStreamStat read_stat_;
-    size_t current_;
-
-public:
-
-    BinaryFileSingleStream(const std::string& file_name_prefix, size_t file_num) {
-        std::string fname;
-        fname = file_name_prefix + "_" + ToString(file_num) + ".seq";
-        stream_.open(fname.c_str(), std::ios_base::binary | std::ios_base::in);
-
-        reset();
-    }
-
-    virtual bool is_open() {
-        return stream_.is_open();
-    }
-
-    virtual bool eof() {
-        return current_ == read_stat_.read_count_;
-    }
-
-    virtual BinaryFileSingleStream& operator>>(SingleReadSeq& read) {
-        read.BinRead(stream_);
-        VERIFY(current_ < read_stat_.read_count_);
-
-        ++current_;
-        return *this;
-    }
-
-    virtual void close() {
-        current_ = 0;
-        stream_.close();
-    }
-
-    virtual void reset() {
-        stream_.clear();
-        stream_.seekg(0);
-        VERIFY(stream_.good());
-        read_stat_.read(stream_);
-        current_ = 0;
-    }
-
-    virtual size_t size() const {
-        return read_stat_.read_count_;
-    }
-
-    virtual ReadStreamStat get_stat() const {
-        return read_stat_;
-    }
-
-};
-
-class BinaryFilePairedStream: public PredictableReadStream<PairedReadSeq> {
-
-private:
-    std::ifstream stream_;
-
-    size_t insert_size_;
-
-    ReadStreamStat read_stat_;
-
-    size_t current_;
-
-
-public:
-
-    BinaryFilePairedStream(const std::string& file_name_prefix, size_t file_num, size_t insert_szie): stream_(), insert_size_ (insert_szie) {
-        std::string fname;
-        fname = file_name_prefix + "_" + ToString(file_num) + ".seq";
-        stream_.open(fname.c_str(), std::ios_base::binary | std::ios_base::in);
-
-        reset();
-    }
-
-    virtual bool is_open() {
-        return stream_.is_open();
-    }
-
-    virtual bool eof() {
-        return current_ >= read_stat_.read_count_;
-    }
-
-    virtual BinaryFilePairedStream& operator>>(PairedReadSeq& read) {
-        read.BinRead(stream_, insert_size_);
-        VERIFY(current_ < read_stat_.read_count_);
-
-        ++current_;
-        return *this;
-    }
-
-    virtual void close() {
-        current_ = 0;
-        stream_.close();
-    }
-
-
-    virtual void reset() {
-        stream_.clear();
-        stream_.seekg(0);
-        VERIFY(stream_.good());
-        read_stat_.read(stream_);
-        current_ = 0;
-    }
-
-    virtual size_t size() const {
-        return read_stat_.read_count_;
-    }
-
-    ReadStreamStat get_stat() const {
-        ReadStreamStat stat = read_stat_;
-        stat.read_count_ *= 2;
-        return stat;
-    }
-};
-
-
-//template <class Read>
-//class FileReadStream: public io::PredictableIReader<Read> {
-//
-//private:
-//    std::ifstream stream_;
-//
-//    ReadStat read_stat_;
-//
-//    size_t current_;
-//
-//public:
-//
-//    FileReadStream(const std::string& file_name_prefix, size_t file_num) {
-//        std::string fname;
-//        fname = file_name_prefix + "_" + ToString(file_num) + ".seq";
-//        stream_.open(fname.c_str(), std::ios_base::binary | std::ios_base::in);
-//
-//        reset();
-//    }
-//
-//    virtual ~FileReadStream() {
-//        if (stream_.is_open()) {
-//            stream_.close();
-//        }
-//    }
-//
-//    virtual bool is_open() {
-//        return stream_.is_open();
-//    }
-//
-//    virtual bool eof() {
-//        return current_ == read_stat_.read_count_;
-//    }
-//
-//    virtual FileReadStream& operator>>(Read& read) {
-//        read.BinRead(stream_);
-//        VERIFY(current_ < read_stat_.read_count_);
-//
-//        ++current_;
-//        return *this;
-//    }
-//
-//    virtual void close() {
-//        current_ = 0;
-//        stream_.close();
-//    }
-//
-//    virtual void reset() {
-//        stream_.clear();
-//        stream_.seekg(0);
-//        VERIFY(stream_.good());
-//        read_stat_.read(stream_);
-//        current_ = 0;
-//    }
-//
-//    virtual size_t size() const {
-//        return read_stat_.read_count_;
-//    }
-//
-//    virtual ReadStat get_stat() const {
-//        return read_stat_;
-//    }
-//};
-
-//template <class Read>
-//class ReadBufferedStream: public io::PredictableIReader<Read> {
-//
-//private:
-//    std::vector<Read> * data_;
-//
-//    ReadStat read_stat_;
-//
-//    size_t current_;
-//
-//public:
-//
-//    ReadBufferedStream(io::PredictableIReader<Read>& stream) {
-//        read_stat_ = stream.get_stat();
-//        data_ = new std::vector<Read>(read_stat_.read_count_);
-//
-//        size_t i = 0;
-//        while (!stream.eof()) {
-//            stream >> (*data_)[i++];
-//        }
-//
-//        reset();
-//    }
-//
-//    virtual ~ReadBufferedStream() {
-//        delete data_;
-//    }
-//
-//    virtual bool is_open() {
-//        return true;
-//    }
-//
-//    virtual bool eof() {
-//        return current_ == read_stat_.read_count_;
-//    }
-//
-//    virtual ReadBufferedStream& operator>>(Read& read) {
-//        read = (*data_)[current_];
-//        VERIFY(current_ < read_stat_.read_count_);
-//
-//        ++current_;
-//        return *this;
-//    }
-//
-//    virtual void close() {
-//        current_ = 0;
-//    }
-//
-//    virtual void reset() {
-//        current_ = 0;
-//    }
-//
-//    virtual size_t size() const {
-//        return read_stat_.read_count_;
-//    }
-//
-//    virtual ReadStat get_stat() const {
-//        return read_stat_;
-//    }
-//};
-
-//class SeqSingleReadStreamWrapper: public Reader<SingleReadSeq> {
-//
-//private:
-//    io::IReader<io::PairedReadSeq>& stream_;
-//
-//    PairedReadSeq current_read_;
-//
-//    bool is_read_;
-//
-//public:
-//
-//    SeqSingleReadStreamWrapper(io::IReader<io::PairedReadSeq>& stream): stream_(stream), current_read_(), is_read_(false)  {
-//    }
-//
-//    virtual ~SeqSingleReadStreamWrapper() {}
-//
-//    virtual bool is_open() {
-//        return stream_.is_open();
-//    }
-//
-//    virtual bool eof() {
-//        return stream_.eof() && !is_read_;
-//    }
-//
-//    virtual SeqSingleReadStreamWrapper& operator>>(io::SingleReadSeq& read) {
-//        if (!is_read_) {
-//            stream_ >> current_read_;
-//            read = current_read_.first();
-//        } else {
-//            read = current_read_.second();
-//        }
-//        is_read_ = !is_read_;
-//        return *this;
-//    }
-//
-//    virtual void close() {
-//        stream_.close();
-//    }
-//
-//    virtual void reset() {
-//        stream_.reset();
-//        is_read_ = false;
-//    }
-//
-//    virtual ReadStat get_stat() const {
-//        return stream_.get_stat();
-//    }
-//};
-
-//class InsertSizeModifyingWrapper: public io::IReader<io::PairedReadSeq> {
-//
-//private:
-//    io::IReader<io::PairedReadSeq>& stream_;
-//
-//    size_t insert_size_;
-//
-//public:
-//
-//    InsertSizeModifyingWrapper(io::IReader<io::PairedReadSeq>& stream, size_t insert_szie): stream_(stream), insert_size_ (insert_szie) {
-//    }
-//
-//    virtual ~InsertSizeModifyingWrapper() {
-//    }
-//
-//    virtual bool is_open() {
-//        return stream_.is_open();
-//    }
-//
-//    virtual bool eof() {
-//        return stream_.eof();
-//    }
-//
-//    virtual InsertSizeModifyingWrapper& operator>>(io::PairedReadSeq& read) {
-//        stream_ >> read;
-//        read.inc_insert_size(insert_size_);
-//        return *this;
-//    }
-//
-//    virtual void close() {
-//        stream_.close();
-//    }
-//
-//    virtual void reset() {
-//        stream_.reset();
-//    }
-//
-//    virtual ReadStat get_stat() const {
-//        return stream_.get_stat();
-//    }
-//};
-
-}
diff --git a/src/modules/io/reads_io/cutting_reader_wrapper.hpp b/src/modules/io/reads_io/cutting_reader_wrapper.hpp
deleted file mode 100644
index 596329a..0000000
--- a/src/modules/io/reads_io/cutting_reader_wrapper.hpp
+++ /dev/null
@@ -1,135 +0,0 @@
-////***************************************************************************
-////* Copyright (c) 2011-2014 Saint-Petersburg Academic University
-////* All Rights Reserved
-////* See file LICENSE for details.
-////****************************************************************************
-//  todo remove!!!
-///**
-// * @file    cutting_reader_wrapper.hpp
-// * @author  Mariya Fomkina
-// * @version 1.0
-// *
-// * @section LICENSE
-// *
-// * This program is free software; you can redistribute it and/or
-// * modify it under the terms of the GNU General Public License as
-// * published by the Free Software Foundation; either version 2 of
-// * the License, or (at your option) any later version.
-// *
-// * @section DESCRIPTION
-// *
-// * CuttingReaderWrapper is the class-wrapper that reads only set
-// * number of reads from another reader.
-// */
-//
-//#ifndef COMMON_IO_CUTTINGREADERWRAPPER_HPP_
-//#define COMMON_IO_CUTTINGREADERWRAPPER_HPP_
-//
-//#include "io/ireader.hpp"
-//
-//namespace io {
-//
-//template<typename ReadType>
-//class CuttingReaderWrapper : public IReader<ReadType> {
-// public:
-//  /*
-//   * Default constructor.
-//   *
-//   * @param reader Reference to any other reader (child of IReader).
-//   * @param cut Number of reads to be read (-1 by default, i.e. all).
-//   */
-//  explicit CuttingReaderWrapper(IReader<ReadType>& reader,
-//                                size_t cut = -1)
-//      : reader_(reader), cut_(cut), read_(0) {
-//  }
-//
-//  /*
-//   * Default destructor.
-//   */
-//  /* virtual */ ~CuttingReaderWrapper() {
-//    close();
-//  }
-//
-//  /*
-//   * Check whether the stream is opened.
-//   *
-//   * @return true of the stream is opened and false otherwise.
-//   */
-//  /* virtual */ bool is_open() {
-//    return reader_.is_open();
-//  }
-//
-//  /*
-//   * Check whether we've reached the end of stream.
-//   *
-//   * @return true if the end of stream is reached and false
-//   * otherwise.
-//   */
-//  /* virtual */ bool eof() {
-//    return (read_ == cut_) || (reader_.eof());
-//  }
-//
-//  /*
-//   * Read SingleRead or PairedRead from stream (according to ReadType).
-//   *
-//   * @param read The SingleRead or PairedRead that will store read
-//   * data.
-//   *
-//   * @return Reference to this stream.
-//   */
-//  /* virtual */ CuttingReaderWrapper& operator>>(ReadType& read) {
-//    if (read_ < cut_) {
-//      reader_ >> read;
-//      ++read_;
-//    }
-//    return (*this);
-//  }
-//
-//  /*
-//   * Close the stream.
-//   */
-//  /* virtual */ void close() {
-//    reader_.close();
-//  }
-//
-//  /*
-//   * Close the stream and open it again.
-//   */
-//  /* virtual */ void reset() {
-//    read_ = 0;
-//    reader_.reset();
-//  }
-//
-//  ReadStat get_stat() const {
-//      return reader_.get_stat();
-//  }
-//
-// private:
-//  /*
-//   * @variable Internal stream readers.
-//   */
-//  IReader<ReadType>& reader_;
-//  /*
-//   * @variable Number of reads that are allowed to read (if it is less
-//   * than 0, all the reads in stream are allowed to be read).
-//   */
-//  size_t cut_;
-//  /*
-//   * @variable Number of reads that are read till the moment.
-//   */
-//  size_t read_;
-//
-//  /*
-//   * Hidden copy constructor.
-//   */
-//  explicit CuttingReaderWrapper(const CuttingReaderWrapper<ReadType>&
-//                                reader);
-//  /*
-//   * Hidden assign operator.
-//   */
-//  void operator=(const CuttingReaderWrapper<ReadType>& reader);
-//};
-//
-//}
-//
-//#endif /* COMMON_IO_CUTTINGREADERWRAPPER_HPP_ */
diff --git a/src/modules/io/reads_io/easy_reader.hpp b/src/modules/io/reads_io/easy_reader.hpp
deleted file mode 100644
index 98df7fb..0000000
--- a/src/modules/io/reads_io/easy_reader.hpp
+++ /dev/null
@@ -1,122 +0,0 @@
-////***************************************************************************
-////* Copyright (c) 2011-2014 Saint-Petersburg Academic University
-////* All Rights Reserved
-////* See file LICENSE for details.
-////****************************************************************************
-//
-//#pragma once
-//
-//#include "ireader.hpp"
-//#include "paired_readers.hpp"
-//#include "delegating_reader_wrapper.hpp"
-//#include "splitting_wrapper.hpp"
-//#include "rc_reader_wrapper.hpp"
-//#include "filtering_reader_wrapper.hpp"
-//#include "careful_filtering_reader_wrapper.hpp"
-//#include "single_read.hpp"
-//#include "io_helper.hpp"
-//
-//#include <memory>
-//
-//namespace io {
-//
-//////todo refactor, and maybe merge them once again
-////class EasyReader: public DelegatingReaderWrapper<SingleRead> {
-////    explicit EasyReader(const EasyReader& reader);
-////    void operator=(const EasyReader& reader);
-////
-////    Reader raw_reader_;
-//////    FilteringReaderWrapper<ReadType> filtered_reader_;
-////    CarefulFilteringReaderWrapper<SingleRead> filtered_reader_;
-////    RCReaderWrapper<SingleRead> rc_reader_;
-////
-////public:
-////    explicit EasyReader(const string& filename,
-////            bool followed_by_rc, OffsetType offset_type = PhredOffset) :
-////            raw_reader_(filename, offset_type), filtered_reader_(raw_reader_), rc_reader_(
-////                    filtered_reader_) {
-////        if (followed_by_rc) {
-////            Init(rc_reader_);
-////        } else {
-////            Init(filtered_reader_);
-////        }
-////    }
-////
-////    /*
-////     * Default destructor.
-////     */
-////    /* virtual */
-////    ~EasyReader() {
-////    }
-////
-////};
-////
-//////todo refactor, and maybe merge them once again
-////class EasySplittingReader: public DelegatingReaderWrapper<io::SingleRead> {
-////    explicit EasySplittingReader(const EasySplittingReader& reader);
-////    void operator=(const EasySplittingReader& reader);
-////
-////    Reader raw_reader_;
-//////    FilteringReaderWrapper<ReadType> filtered_reader_;
-////    SplittingWrapper splitting_reader_;
-////    RCReaderWrapper<io::SingleRead> rc_reader_;
-////
-////public:
-////    explicit EasySplittingReader(const io::SingleRead::FilenameType& filename,
-////            bool followed_by_rc, OffsetType offset_type = PhredOffset) :
-////            raw_reader_(filename, offset_type), splitting_reader_(raw_reader_), rc_reader_(
-////                    splitting_reader_) {
-////        if (followed_by_rc) {
-////            Init(rc_reader_);
-////        } else {
-////            Init(splitting_reader_);
-////        }
-////    }
-////
-////    /*
-////     * Default destructor.
-////     */
-////    /* virtual */
-////    ~EasySplittingReader() {
-////    }
-////
-////};
-//
-////class PairedEasyReader: public DelegatingReaderWrapper<io::PairedRead> {
-////    std::unique_ptr<IReader<io::PairedRead>> raw_reader_;
-////    CarefulFilteringReaderWrapper<io::PairedRead> filtered_reader_;
-////    RCReaderWrapper<io::PairedRead> rc_reader_;
-////
-////public:
-////    PairedEasyReader(const io::PairedRead::FilenamesType& filenames,
-////            bool followed_by_rc, size_t insert_size, bool change_read_order =
-////                    false, bool use_orientation = true, LibraryOrientation orientation = LibraryOrientation::FR,
-////                    OffsetType offset_type = PhredOffset) :
-////            raw_reader_(
-////                    new SeparateReader(filenames, insert_size,
-////                            change_read_order, use_orientation, orientation, offset_type)), filtered_reader_(
-////                    *raw_reader_), rc_reader_(filtered_reader_) {
-////        if (followed_by_rc) {
-////            Init(rc_reader_);
-////        } else {
-////            Init(filtered_reader_);
-////        }
-////    }
-////
-////    PairedEasyReader(const std::string& filename, bool followed_by_rc,
-////            size_t insert_size, bool change_read_order = false,
-////            bool use_orientation = true, LibraryOrientation orientation = LibraryOrientation::FR,
-////            OffsetType offset_type = PhredOffset) :
-////            raw_reader_(
-////                    new MixedReader(filename, insert_size, change_read_order,
-////                            use_orientation, orientation, offset_type)), filtered_reader_(
-////                    *raw_reader_), rc_reader_(filtered_reader_) {
-////        if (followed_by_rc) {
-////            Init(rc_reader_);
-////        } else {
-////            Init(filtered_reader_);
-////        }
-////    }
-////};
-//
-//}
diff --git a/src/modules/io/reads_io/is_corrupting_wrapper.hpp b/src/modules/io/reads_io/is_corrupting_wrapper.hpp
deleted file mode 100644
index f2993f3..0000000
--- a/src/modules/io/reads_io/is_corrupting_wrapper.hpp
+++ /dev/null
@@ -1,33 +0,0 @@
-////***************************************************************************
-////* Copyright (c) 2011-2014 Saint-Petersburg Academic University
-////* All Rights Reserved
-////* See file LICENSE for details.
-////****************************************************************************
-// todo remove!!!
-//#ifndef IS_CORRUPTING_WRAPPER_HPP_
-//#define IS_CORRUPTING_WRAPPER_HPP_
-//
-//namespace io {
-//
-//class ISCorruptingWrapper: public DelegatingReaderWrapper<PairedRead> {
-//private:
-//    const size_t is_;
-//public:
-//    typedef PairedRead ReadType;
-//
-//    explicit ISCorruptingWrapper(IReader<ReadType>& reader, size_t is) :
-//            DelegatingReaderWrapper<PairedRead>(reader), is_(is) {
-//    }
-//
-//    /* virtual */
-//    ISCorruptingWrapper& operator>>(ReadType& read) {
-//        (this->reader()) >> read;
-//        read = PairedRead(read.first(), read.second(), is_);
-//        return *this;
-//    }
-//
-//};
-//
-//}
-//
-//#endif /* IS_CORRUPTING_WRAPPER_HPP_ */
diff --git a/src/modules/paired_info/bwa_pair_info_filler.cpp b/src/modules/paired_info/bwa_pair_info_filler.cpp
deleted file mode 100644
index 6855138..0000000
--- a/src/modules/paired_info/bwa_pair_info_filler.cpp
+++ /dev/null
@@ -1,408 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-#include "bwa_pair_info_filler.hpp"
-
-
-namespace bwa_pair_info {
-
-void MapperReadT::ParseCigar(const string& cigar) {
-    string num = "";
-    bool left_side = true;
-    for (size_t i = 0; i < cigar.length(); ++i) {
-        if (isdigit(cigar[i])) {
-            num += cigar[i];
-        }
-        else {
-            if (cigar[i] == 'H') {
-                if (left_side)
-                    left_hard_clip_ = (uint16_t) std::stoi(num);
-                else
-                    right_hard_clip_ = (uint16_t) std::stoi(num);
-                num = "";
-            }
-            else if (cigar[i] == 'S') {
-                if (left_side)
-                    left_soft_clip_ = (uint16_t) std::stoi(num);
-                else
-                    right_soft_clip_ = (uint16_t) std::stoi(num);
-                num = "";
-            }
-            else {
-                left_side = false;
-                num = "";
-            }
-        }
-    }
-}
-
-//Correct read algnment according to orientation and clippings
-void BWACorrectingProcessor::ProcessPairedRead(const MapperReadT& l, const MapperReadT& r) {
-    using io::LibraryOrientation;
-
-    if (!l.IsValid() || !r.IsValid()) {
-        return;
-    }
-    ++count_;
-
-    MappedPositionT left_pos(edge_id_map_.at(stoi(l.get_contig_id())), l.pos());
-    MappedPositionT right_pos(edge_id_map_.at(stoi(r.get_contig_id())), r.pos());
-
-    //This function if overloaded in BWAISCounter and BWAIndexFiller
-    if (!CheckAlignments(left_pos, right_pos)) {
-        return;
-    }
-
-    int r_from_pos_to_right_end = r.len() + r.right_hard_clip() - r.left_soft_clip();
-    int l_from_pos_to_left_end = l.left_soft_clip() + l.left_hard_clip();
-
-    if ((!l.is_forward() && (lib_.orientation() == LibraryOrientation::FF || lib_.orientation() == LibraryOrientation::FR)) ||
-        (l.is_forward() && (lib_.orientation() == LibraryOrientation::RF || lib_.orientation() == LibraryOrientation::RR))) {
-        left_pos.e = g_.conjugate(left_pos.e);
-        left_pos.pos = (int) g_.length(left_pos.e) - left_pos.pos - (l.len() - l.left_soft_clip() - l.right_soft_clip()) + (int) g_.k();
-        l_from_pos_to_left_end = l.right_soft_clip() + l.right_hard_clip();
-    }
-    if ((!r.is_forward() && (lib_.orientation() == LibraryOrientation::FF || lib_.orientation() == LibraryOrientation::RF)) ||
-        (r.is_forward() && (lib_.orientation() == LibraryOrientation::FR || lib_.orientation() == LibraryOrientation::RR))) {
-        right_pos.e = g_.conjugate(right_pos.e);
-        right_pos.pos = (int) g_.length(right_pos.e) - right_pos.pos - (r.len() - r.left_soft_clip() - r.right_soft_clip()) + (int) g_.k();
-        r_from_pos_to_right_end = r.len() + r.left_hard_clip() - r.right_soft_clip();
-    }
-
-    right_pos.pos = right_pos.pos + r_from_pos_to_right_end;
-    left_pos.pos = left_pos.pos - l_from_pos_to_left_end;
-
-    //This function if overloaded in BWAISCounter and BWAIndexFiller
-    ProcessAlignments(left_pos, right_pos);
-}
-
-// ==== insert size counter overloads ====
-bool BWAISCounter::CheckAlignments(const MappedPositionT& l, const MappedPositionT& r) {
-    return l.e == r.e && g_.length(l.e) >= min_contig_len_;
-}
-
-void BWAISCounter::ProcessAlignments(const MappedPositionT& l, const MappedPositionT& r) {
-    ++mapped_count_;
-
-    int is = r.pos - l.pos;
-    if (is > 0 || !ignore_negative_) {
-        hist_[is] += 1;
-    } else {
-        ++negative_count_;
-    }
-}
-
-bool BWAISCounter::RefineInsertSize(SequencingLibraryT& reads) const {
-    using namespace omnigraph;
-    size_t correctly_mapped = mapped_count_ - negative_count_;
-    INFO(correctly_mapped << " paired reads (" << ((double) correctly_mapped * 100.0 / (double) count_) << "% of all) aligned to long edges");
-
-    if (negative_count_ > 3 * correctly_mapped)
-        WARN("Too much reads aligned with negative insert size. Is the library orientation set properly?");
-    if (mapped_count_ == 0)
-        return false;
-
-    std::map<size_t, size_t> percentiles;
-    find_mean(hist_, reads.data().mean_insert_size, reads.data().insert_size_deviation, percentiles);
-    find_median(hist_, reads.data().median_insert_size, reads.data().insert_size_mad, reads.data().insert_size_distribution);
-    if (reads.data().median_insert_size < reads.data().read_length) {
-        return false;
-    }
-
-    std::tie(reads.data().insert_size_left_quantile, reads.data().insert_size_right_quantile) =
-        GetISInterval(0.8, reads.data().insert_size_distribution);
-
-    return !reads.data().insert_size_distribution.empty();
-}
-
-// ==== pair info index filler overloads ====
-EdgePair BWAIndexFiller::ConjugatePair(EdgePair ep) const {
-    return make_pair(g_.conjugate(ep.second), g_.conjugate(ep.first));
-}
-
-void BWAIndexFiller::ProcessAlignments(const MappedPositionT& l, const MappedPositionT& r) {
-    EdgePair ep{l.e, r.e};
-    TRACE("Lpos " << l.pos << ", Rpos " << r.pos);
-    int edge_distance = (int) lib_.data().mean_insert_size  - r.pos + l.pos;
-    TRACE("Distance " << edge_distance);
-
-    paired_index_.Add(ep.first, ep.second, omnigraph::de::RawPoint(edge_distance, 1.0));
-}
-
-bool BWAIndexFiller::CheckAlignments(const MappedPositionT& l, const MappedPositionT& r) {
-    return g_.length(l.e) >= min_contig_len_ && g_.length(r.e) >= min_contig_len_;
-}
-
-
-//Main class realization
-void BWAPairInfoFiller::OutputEdges(const string &filename) const {
-    io::osequencestream_simple oss(filename);
-    for (auto it = g_.ConstEdgeBegin(true); !it.IsEnd(); ++it) {
-        debruijn_graph::EdgeId e = *it;
-        oss.set_header(ToString(g_.int_id(e)));
-        oss << g_.EdgeNucls(e);
-    }
-}
-void BWAPairInfoFiller::FillEdgeIdMap() {
-    for (auto it = g_.ConstEdgeBegin(true); !it.IsEnd(); ++it) {
-        debruijn_graph::EdgeId e = *it;
-        edge_id_map_.insert(make_pair(g_.int_id(e), e));
-    }
-}
-
-bool BWAPairInfoFiller::CreateIndex(const string& contigs) {
-    int run_res = 0;
-    string err_log = path::append_path(work_dir_, "index.err");
-    string index_line = bwa_path_ + string(" index ") + "-a is " + contigs + " 2>" + err_log;
-    index_line = path::screen_whitespaces(index_line);
-    INFO("Running bwa index ... ");
-    INFO("Command line: " << index_line);
-    run_res = system(index_line.c_str());
-    if (run_res != 0) {
-        ERROR("bwa index failed, cannot align reads");
-        return false;
-    }
-    return true;
-}
-
-
-bool BWAPairInfoFiller::RunBWA(const string& reads_file, const string& out_sam_file) const {
-    string run_command = bwa_path_ + " mem -t " + ToString(nthreads_) + " " + index_base_ + " "  + reads_file + "  > " + out_sam_file + " 2>"
-        + out_sam_file + ".txt";
-    run_command = path::screen_whitespaces(run_command);
-    INFO("Running bwa mem ...");
-    INFO("Command line: " << run_command);
-
-    int run_res = system(run_command.c_str());
-    if (run_res != 0) {
-        ERROR("bwa mem failed, cannot align reads");
-        return false;
-    }
-    return true;
-}
-
-bool BWAPairInfoFiller::AlignLib(const SequencingLibraryT& lib,
-                                 const string& sam_file_base,
-                                 vector<pair<string, string>>& resulting_sam_files) {
-
-    VERIFY_MSG(Init(), "BWA index was not constructed properly");
-    resulting_sam_files.clear();
-    size_t file_index = 0;
-    bool any_aligned = false;
-
-    for (auto iter = lib.paired_begin(); iter != lib.paired_end(); iter++) {
-        string left_reads = iter->first;
-        string left_sam = sam_file_base + "_1_" + ToString(file_index) + ".sam";
-        bool res = RunBWA(left_reads, left_sam);
-        if (!res) {
-            WARN("Failed to align left reads " << left_reads);
-            continue;
-        }
-        string right_reads = iter->second;
-        string right_sam = sam_file_base + "_2_" + ToString(file_index) + ".sam";
-        res = RunBWA(right_reads, right_sam);
-        if (!res) {
-            WARN("Failed to align right reads " << right_reads);
-            continue;
-        }
-
-        resulting_sam_files.push_back(make_pair(left_sam, right_sam));
-        any_aligned = true;
-    }
-    return any_aligned;
-}
-
-
-void BWAPairInfoFiller::ProcessSAMFiles(const string &left_sam, const string &right_sam,
-                                        BWAPairedReadProcessor& processor) {
-
-    //Left and right reads are stored in maps until pair is detected
-    unordered_map<string, MapperReadT> left_reads;
-    unordered_map<string, MapperReadT> right_reads;
-    size_t counter = 0;
-    //Check for duplicating read IDs
-    bool left_duplicated = false;
-    bool right_duplicated = false;
-
-    INFO("Reading SAM files " << left_sam << " and " << right_sam);
-    MappedSamStream lf(left_sam);
-    MappedSamStream rf(right_sam);
-    while (!lf.eof() || !rf.eof()) {
-        SingleSamRead left_read;
-        MapperReadT left_data;
-        string l_name = "";
-
-        SingleSamRead right_read;
-        MapperReadT right_data;
-        string r_name = "";
-
-        if (!lf.eof()) {
-            lf >> left_read;
-            l_name = left_read.name();
-            if (left_read.is_properly_aligned()) {
-                TRACE("Left read " << l_name);
-                left_data = MapperReadT(string(lf.get_contig_name(left_read.contig_id())),
-                                        left_read.pos(),
-                                        left_read.data_len(),
-                                        left_read.strand(),
-                                        left_read.cigar());
-            }
-            else if (!left_read.is_main_alignment()) {
-                //If not primary alignment ignore mapping
-                TRACE("Ignoring left read");
-                l_name = "";
-            }
-        }
-        if (!rf.eof()) {
-            rf >> right_read;
-            r_name = right_read.name();
-            if (right_read.is_properly_aligned()) {
-                TRACE("Right read " << r_name);
-                right_data = MapperReadT(string(rf.get_contig_name(right_read.contig_id())),
-                                         right_read.pos(),
-                                         right_read.data_len(),
-                                         right_read.strand(),
-                                         right_read.cigar());
-            }
-            else if (!right_read.is_main_alignment()) {
-                //If not primary alignment ignore mapping
-                TRACE("Ignoring right read");
-                r_name = "";
-            }
-        }
-
-        //Think about custom read names
-        if (l_name == r_name) {
-            TRACE("Equal processing");
-            //Process immideately if ids are equal in both SAM entries
-            processor.ProcessPairedRead(left_data, right_data);
-            VERBOSE_POWER2(++counter, "Processed " << counter << " paired reads");
-            continue;
-        }
-
-        if (r_name != "") {
-            auto it = left_reads.find(r_name);
-            if (it != left_reads.end())  {
-                //Right read's mate found in map
-                TRACE("Right read's mate found, processing");
-                processor.ProcessPairedRead(it->second, right_data);
-                VERBOSE_POWER2(++counter, "Processed " << counter << " paired reads");
-                //Remove mate as used
-                left_reads.erase(it);
-            }
-            else {
-                TRACE("Right read's mate not found, adding to map");
-                if (right_reads.count(r_name) == 0) {
-                    //Insert read without mate for further analysis
-                    //TODO inspect map size and performance
-                    right_reads.emplace(r_name, right_data);
-                } else {
-                    DEBUG("Right read " << r_name << " is duplicated!");
-                    //Report duplication
-                    right_duplicated = true;
-                }
-            }
-        }
-
-        if (l_name != "") {
-            auto it = right_reads.find(l_name);
-            if (it != right_reads.end()) {
-                //Left read's mate found in map
-                TRACE("Left read's mate found, processing");
-                processor.ProcessPairedRead(left_data, it->second);
-                VERBOSE_POWER2(++counter, "Processed " << counter << " paired reads");
-                //Remove mate as used
-                right_reads.erase(it);
-            }
-            else {
-                TRACE("Left read's mate not found, adding to map");
-                if (left_reads.count(l_name) == 0) {
-                    //Insert read without mate for further analysis
-                    //TODO inspect map size and performance
-                    left_reads.emplace(l_name, left_data);
-                } else {
-                    DEBUG("Left read " << r_name << " is duplicated!");
-                    //Report duplication
-                    left_duplicated = true;
-                }
-
-            }
-        }
-    }
-
-    if (left_duplicated)
-        WARN("SAM file " << left_sam << " contains duplicated read ids");
-    if (right_duplicated)
-        WARN("SAM file " << right_sam << " contains duplicated read ids");
-}
-
-bool BWAPairInfoFiller::Init() {
-    if (!index_constructed_) {
-        INFO("Initializing bwa pair info counter, working dir " << work_dir_);
-        path::make_dir(base_dir_);
-        work_dir_ = path::make_temp_dir(base_dir_, "");
-        index_base_= path::append_path(work_dir_, "long_edges.fasta");
-        INFO("Saving edges to " << index_base_);
-        OutputEdges(index_base_);
-        FillEdgeIdMap();
-        index_constructed_ = CreateIndex(index_base_);
-    }
-    return index_constructed_;
-}
-
-bool BWAPairInfoFiller::ProcessLib(size_t lib_index,
-                                   SequencingLibraryT& lib,
-                                   PairedInfoIndexT& paired_index,
-                                   size_t counter_edge_len,
-                                   size_t index_filler_edge_len) {
-    //Initialize if needed
-    Init();
-    string lib_dir =  path::append_path(work_dir_, ToString(lib_index));
-    path::make_dir(lib_dir);
-    vector<pair<string, string>> sam_files;
-    bool result = false;
-
-    INFO("Mapping lib #" << lib_index << " using BWA");
-    if (!AlignLib(lib, path::append_path(lib_dir, "single"), sam_files)) {
-        WARN("Failed to align lib #" << lib_index);
-        return false;
-    }
-
-    INFO("Estimating insert size for library #" << lib_index);
-    BWAISCounter counter(lib, edge_id_map_, g_, counter_edge_len);
-    for (const auto& sam_pair : sam_files) {
-        ProcessSAMFiles(sam_pair.first, sam_pair.second, counter);
-    }
-
-    if (!counter.RefineInsertSize(lib)) {
-        lib.data().mean_insert_size = 0.0;
-        WARN("Unable to estimate insert size paired library #" << lib_index);
-    }
-    else {
-        INFO("  Estimated insert size for paired library #" << lib_index);
-        INFO("  Insert size = " << lib.data().mean_insert_size <<
-            ", deviation = " << lib.data().insert_size_deviation <<
-            ", left quantile = " << lib.data().insert_size_left_quantile <<
-            ", right quantile = " << lib.data().insert_size_right_quantile <<
-            ", read length = " << lib.data().read_length);
-
-        INFO("Collecting paired information for library #" << lib_index);
-        paired_index.Init();
-
-        BWAIndexFiller filler(lib, edge_id_map_, g_, paired_index, index_filler_edge_len);
-        for (const auto& sam_pair : sam_files) {
-            ProcessSAMFiles(sam_pair.first, sam_pair.second, filler);
-        }
-        result = true;
-    }
-    if (remove_tmp_files_)
-        path::remove_dir(lib_dir);
-    return result;
-}
-
-
-}
diff --git a/src/modules/paired_info/bwa_pair_info_filler.hpp b/src/modules/paired_info/bwa_pair_info_filler.hpp
deleted file mode 100644
index 438fafe..0000000
--- a/src/modules/paired_info/bwa_pair_info_filler.hpp
+++ /dev/null
@@ -1,253 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-#include "assembly_graph/graph_core/graph.hpp"
-#include "pipeline/config_struct.hpp"
-
-#include <io/sam_io/sam_reader.hpp>
-#include <io/sam_io/read.hpp>
-
-#include <io/reads_io/osequencestream.hpp>
-#include <paired_info/paired_info.hpp>
-#include <paired_info/insert_size_refiner.hpp>
-
-#ifndef PROJECT_BWA_PAIR_INFO_FILLER_HPP_H
-#define PROJECT_BWA_PAIR_INFO_FILLER_HPP_H
-
-namespace bwa_pair_info {
-
-using namespace sam_reader;
-using debruijn_graph::EdgeId;
-
-typedef omnigraph::de::UnclusteredPairedInfoIndexT<debruijn_graph::Graph> PairedInfoIndexT;
-typedef io::SequencingLibrary<debruijn_graph::config::DataSetData> SequencingLibraryT;
-typedef std::pair<debruijn_graph::EdgeId, debruijn_graph::EdgeId> EdgePair;
-typedef unordered_map<size_t, debruijn_graph::EdgeId> EdgeIdMap;
-
-//More compact representation of aligned read for storing in map
-class MapperReadT {
-public:
-    MapperReadT(): contig_id_(""), pos_(-1), len_(-1), is_forward_(true),
-                   left_hard_clip_(0), right_hard_clip_(0), left_soft_clip_(0), right_soft_clip_(0){}
-
-    MapperReadT(const string& ctg_id, int32_t pos, int32_t len, bool is_forward, const string& cigar):
-        contig_id_(ctg_id), pos_(pos), len_(len), is_forward_(is_forward),
-        left_hard_clip_(0), right_hard_clip_(0), left_soft_clip_(0), right_soft_clip_(0) {
-
-        ParseCigar(cigar);
-    }
-
-    bool IsValid() const {
-        return contig_id_ != "";
-    }
-
-private:
-
-    void ParseCigar(const string& cigar);
-
-public:
-    const string &get_contig_id() const {
-        return contig_id_;
-    }
-    int32_t pos() const {
-        return pos_;
-    }
-    int32_t len() const {
-        return len_;
-    }
-    bool is_forward() const {
-        return is_forward_;
-    }
-    uint32_t left_soft_clip() const {
-        return left_soft_clip_;
-    }
-    uint32_t right_soft_clip() const {
-        return right_soft_clip_;
-    }
-    uint32_t left_hard_clip() const {
-        return left_hard_clip_;
-    }
-    uint32_t right_hard_clip() const {
-        return right_hard_clip_;
-    }
-
-private:
-    string contig_id_;
-    int32_t pos_;
-    int32_t len_;
-    bool is_forward_;
-    uint32_t left_hard_clip_:16, right_hard_clip_:16;
-    uint32_t left_soft_clip_:16, right_soft_clip_:16;
-};
-
-//Base class for aligned read processor (simple analog of SequenceMapperListener)
-class BWAPairedReadProcessor {
-public:
-    virtual void ProcessPairedRead(const MapperReadT& l, const MapperReadT& r) = 0;
-
-    virtual ~BWAPairedReadProcessor() {
-
-    }
-};
-
-//Class that corrects mapping positions according to lib orientation and clippings
-class BWACorrectingProcessor: public BWAPairedReadProcessor {
-protected:
-    const SequencingLibraryT& lib_;
-
-    const EdgeIdMap& edge_id_map_;
-
-    const debruijn_graph::Graph& g_;
-
-    size_t count_;
-
-public:
-
-    struct MappedPositionT {
-        EdgeId e;
-        int pos;
-
-        MappedPositionT(EdgeId e_, int pos_): e(e_), pos(pos_) {
-
-        }
-    };
-
-    BWACorrectingProcessor(const SequencingLibraryT& lib, const EdgeIdMap& edge_id_map, const debruijn_graph::Graph& g):
-        lib_(lib), edge_id_map_(edge_id_map), g_(g), count_(0) {
-    }
-
-    virtual bool CheckAlignments(const MappedPositionT& l, const MappedPositionT& r) = 0;
-
-    virtual void ProcessAlignments(const MappedPositionT& l, const MappedPositionT& r) = 0;
-//Correct read algnment according to orientation and clippings
-    virtual void ProcessPairedRead(const MapperReadT& l, const MapperReadT& r);
-};
-
-//Insert size counter
-class BWAISCounter: public BWACorrectingProcessor {
-private:
-    HistType hist_;
-    size_t min_contig_len_;
-    bool ignore_negative_;
-    size_t mapped_count_;
-    size_t negative_count_;
-
-public:
-    BWAISCounter(const SequencingLibraryT& lib, const EdgeIdMap& edge_id_map, const debruijn_graph::Graph& g,
-                 size_t min_contig_len, bool ignore_negative = false):
-        BWACorrectingProcessor(lib, edge_id_map, g), hist_(), min_contig_len_(min_contig_len),
-        ignore_negative_(ignore_negative), mapped_count_(0), negative_count_(0) {
-    }
-
-    bool CheckAlignments(const MappedPositionT& l, const MappedPositionT& r) override;
-
-    void ProcessAlignments(const MappedPositionT& l, const MappedPositionT& r) override;
-
-    bool RefineInsertSize(SequencingLibraryT& reads) const ;
-
-};
-
-//Pair info filler
-class BWAIndexFiller: public BWACorrectingProcessor {
-
-private:
-    PairedInfoIndexT& paired_index_;
-
-    size_t min_contig_len_;
-
-    EdgePair ConjugatePair(EdgePair ep) const;
-
-public:
-    BWAIndexFiller(const SequencingLibraryT& lib, const EdgeIdMap& edge_id_map, const debruijn_graph::Graph& g,
-                   PairedInfoIndexT& paired_index, size_t min_contig_len = 0):
-        BWACorrectingProcessor(lib, edge_id_map, g), paired_index_(paired_index), min_contig_len_(min_contig_len) {
-    }
-
-    bool CheckAlignments(const MappedPositionT& l, const MappedPositionT& r) override;
-
-    void ProcessAlignments(const MappedPositionT& l, const MappedPositionT& r) override;
-};
-
-//Class for running BWA, managing and parsing SAM files
-class BWAPairInfoFiller {
-public:
-    DECL_LOGGER("BWAPairInfo");
-
-private:
-    const debruijn_graph::Graph& g_;
-
-    string bwa_path_;
-
-    string base_dir_;
-
-    string work_dir_;
-
-    size_t nthreads_;
-
-    string index_base_;
-
-    bool index_constructed_;
-
-    bool remove_tmp_files_;
-
-    unordered_map<size_t, debruijn_graph::EdgeId> edge_id_map_;
-
-private:
-
-    //Save graph in fasta format
-    void OutputEdges(const string& filename) const;
-
-    //Construct int_id -> EdgeId map
-    void FillEdgeIdMap();
-
-    //Run bwa index
-    bool CreateIndex(const string& contigs);
-
-    //Initialize for read aligment (includes all above)
-    bool Init();
-
-    //Run bwa mem on single file
-    bool RunBWA(const string& reads_file, const string& out_sam_file) const;
-
-    //Process single read library
-    bool AlignLib(const SequencingLibraryT& lib,
-                      const string& sam_file_base,
-                      vector<pair<string, string>>& resulting_sam_files);
-
-    //Parse a pair of same files and analyze alignments with processor
-    void ProcessSAMFiles(const string &left_sam, const string &right_sam,
-                         BWAPairedReadProcessor& processor);
-
-public:
-
-    BWAPairInfoFiller(const debruijn_graph::Graph& g,
-                      const string& bwa_path,
-                      const string& work_dir,
-                      size_t nthreads = 1,
-                      bool remove_tmp = true):
-        g_(g), bwa_path_(bwa_path), base_dir_(work_dir), work_dir_(""),
-        nthreads_(nthreads), index_base_(""), index_constructed_(false),
-        remove_tmp_files_(remove_tmp),
-        edge_id_map_() {
-    }
-
-    ~BWAPairInfoFiller() {
-        if (remove_tmp_files_)
-            path::remove_if_exists(work_dir_);
-    }
-
-    //Count IS and fill pair info index for the given lib
-    bool ProcessLib(size_t lib_index,
-                    SequencingLibraryT& lib,
-                    PairedInfoIndexT& paired_index,
-                    size_t counter_edge_len,
-                    size_t index_filler_edge_len);
-};
-
-}
-
-#endif //PROJECT_BWA_PAIR_INFO_FILLER_HPP_H
diff --git a/src/modules/paired_info/pair_info_filler.hpp b/src/modules/paired_info/pair_info_filler.hpp
deleted file mode 100644
index 3d2ef1b..0000000
--- a/src/modules/paired_info/pair_info_filler.hpp
+++ /dev/null
@@ -1,119 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-/*
- * pair_info_filler.hpp
- *
- *  Created on: Oct 3, 2013
- *      Author: andrey
- */
-
-#ifndef PAIR_INFO_FILLER_HPP_
-#define PAIR_INFO_FILLER_HPP_
-
-#include "assembly_graph/graph_alignment/sequence_mapper_notifier.hpp"
-
-namespace debruijn_graph {
-
-/**
- * As for now it ignores sophisticated case of repeated consecutive
- * occurrence of edge in path due to gaps in mapping
- *
- * todo talk with Anton about simplification and speed-up of procedure with little quality loss
- */
-class LatePairedIndexFiller : public SequenceMapperListener {
-    typedef std::function<double(MappingRange, MappingRange)> WeightF;
-    typedef std::pair<EdgeId, EdgeId> EdgePair;
-public:
-    LatePairedIndexFiller(const Graph &graph, WeightF weight_f, omnigraph::de::UnclusteredPairedInfoIndexT<Graph>& paired_index)
-            : graph_(graph),
-              weight_f_(weight_f),
-              paired_index_(paired_index) {
-    }
-
-    virtual void StartProcessLibrary(size_t threads_count) {
-        paired_index_.Init();
-        buffer_pi_ = {graph_, threads_count};
-    }
-
-    virtual void StopProcessLibrary() {
-        for (size_t i = 0; i < buffer_pi_.size(); ++i)
-            MergeBuffer(i);
-
-        buffer_pi_.Clear();
-    }
-
-    virtual void ProcessPairedRead(size_t thread_index,
-                                   const io::PairedRead& r,
-                                   const MappingPath<EdgeId>& read1,
-                                   const MappingPath<EdgeId>& read2) {
-        ProcessPairedRead(buffer_pi_[thread_index], read1, read2, r.distance());
-    }
-
-    virtual void ProcessPairedRead(size_t thread_index,
-                                   const io::PairedReadSeq& r,
-                                   const MappingPath<EdgeId>& read1,
-                                   const MappingPath<EdgeId>& read2) {
-        ProcessPairedRead(buffer_pi_[thread_index], read1, read2, r.distance());
-    }
-
-    virtual void ProcessSingleRead(size_t,
-                                   const io::SingleReadSeq&,
-                                   const MappingPath<EdgeId>&) {}
-
-    virtual void ProcessSingleRead(size_t,
-                                   const io::SingleRead&,
-                                   const MappingPath<EdgeId>&) {}
-
-    virtual void MergeBuffer(size_t thread_index) {
-        paired_index_.Merge(buffer_pi_[thread_index]);
-        buffer_pi_[thread_index].Clear();
-    }
-
-    virtual ~LatePairedIndexFiller() {}
-
-private:
-    void ProcessPairedRead(omnigraph::de::PairedInfoBuffer<Graph>& paired_index,
-                           const MappingPath<EdgeId>& path1,
-                           const MappingPath<EdgeId>& path2, size_t read_distance) const {
-        for (size_t i = 0; i < path1.size(); ++i) {
-            std::pair<EdgeId, MappingRange> mapping_edge_1 = path1[i];
-            for (size_t j = 0; j < path2.size(); ++j) {
-                std::pair<EdgeId, MappingRange> mapping_edge_2 = path2[j];
-
-                EdgePair ep{mapping_edge_1.first, mapping_edge_2.first};
-
-
-                omnigraph::de::DEWeight weight =
-                        weight_f_(mapping_edge_1.second, mapping_edge_2.second);
-                size_t kmer_distance = read_distance
-                        + mapping_edge_2.second.initial_range.end_pos
-                        - mapping_edge_1.second.initial_range.start_pos;
-                int edge_distance = (int) kmer_distance
-                        + (int) mapping_edge_1.second.mapped_range.start_pos
-                        - (int) mapping_edge_2.second.mapped_range.end_pos;
-
-                paired_index.Add(mapping_edge_1.first, mapping_edge_2.first,
-                                         omnigraph::de::RawPoint(edge_distance, weight));
-            }
-        }
-    }
-
-private:
-    const Graph& graph_;
-    WeightF weight_f_;
-    omnigraph::de::UnclusteredPairedInfoIndexT<Graph>& paired_index_;
-    omnigraph::de::PairedInfoBuffersT<Graph> buffer_pi_;
-
-    DECL_LOGGER("LatePairedIndexFiller");
-};
-
-
-}
-
-
-#endif /* PAIR_INFO_FILLER_HPP_ */
diff --git a/src/modules/stages/simplification_pipeline/graph_simplification.hpp b/src/modules/stages/simplification_pipeline/graph_simplification.hpp
deleted file mode 100644
index 013443e..0000000
--- a/src/modules/stages/simplification_pipeline/graph_simplification.hpp
+++ /dev/null
@@ -1,1034 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-/*
- * graph_simplification.hpp
- *
- *  Created on: Aug 12, 2011
- *      Author: sergey
- */
-
-#pragma once
-
-#include "pipeline/config_struct.hpp"
-
-#include "algorithms/simplification/tip_clipper.hpp"
-#include "algorithms/simplification/complex_tip_clipper.hpp"
-#include "algorithms/simplification/bulge_remover.hpp"
-#include "algorithms/simplification/complex_bulge_remover.hpp"
-#include "algorithms/simplification/erroneous_connection_remover.hpp"
-#include "algorithms/simplification/relative_coverage_remover.hpp"
-#include "algorithms/simplification/mf_ec_remover.hpp"
-#include "algorithms/simplification/parallel_simplification_algorithms.hpp"
-#include "stages/simplification_pipeline/simplification_settings.hpp"
-#include "stages/simplification_pipeline/single_cell_simplification.hpp"
-
-#include "algorithms/graph_read_correction.hpp"
-
-#include "assembly_graph/graph_support/chimera_stats.hpp"
-#include "assembly_graph/graph_support/basic_edge_conditions.hpp"
-#include "assembly_graph/stats/picture_dump.hpp"
-#include "assembly_graph/graph_support/parallel_processing.hpp"
-#include "assembly_graph/graph_support/detail_coverage.hpp"
-
-#include "assembly_graph/graph_core/graph.hpp"
-
-#include "visualization/graph_colorer.hpp"
-#include "dev_support/standard_base.hpp"
-
-namespace debruijn {
-
-namespace simplification {
-
-//todo remove this line
-using namespace debruijn_graph;
-
-template<class Graph>
-using AlgoPtr = std::shared_ptr<omnigraph::PersistentAlgorithmBase<Graph>>;
-
-template<class Graph>
-using EdgeConditionT = pred::TypedPredicate<typename Graph::EdgeId>;
-
-template<class Graph>
-class ConditionParser {
-private:
-    typedef typename Graph::EdgeId EdgeId;
-
-    const Graph &g_;
-    string next_token_;
-    string input_;
-    const SimplifInfoContainer settings_;
-    size_t curr_iteration_;
-    size_t iteration_cnt_;
-    std::queue<string> tokenized_input_;
-
-    size_t max_length_bound_;
-    double max_coverage_bound_;
-
-    string ReadNext() {
-        if (!tokenized_input_.empty()) {
-            next_token_ = tokenized_input_.front();
-            tokenized_input_.pop();
-        } else {
-            next_token_ = "";
-        }
-        return next_token_;
-    }
-
-    template<typename T>
-    bool RelaxMax(T &cur_max, T t) {
-        if (t > cur_max) {
-            cur_max = t;
-            return true;
-        }
-        return false;
-    }
-
-    template<typename T>
-    bool RelaxMin(T &cur_min, T t) {
-        if (t < cur_min) {
-            cur_min = t;
-            return true;
-        }
-        return false;
-    }
-
-    double GetCoverageBound() {
-        if (next_token_ == "auto") {
-            return settings_.detected_coverage_bound();
-        } else {
-            return std::stod(next_token_);
-        }
-    }
-
-    pred::TypedPredicate<EdgeId> ParseCondition(size_t &min_length_bound,
-                                               double &min_coverage_bound) {
-        if (next_token_ == "tc_lb") {
-            double length_coeff = std::stod(ReadNext());
-
-            DEBUG("Creating tip length bound. Coeff " << length_coeff);
-            size_t length_bound = LengthThresholdFinder::MaxTipLength(
-                settings_.read_length(), g_.k(), length_coeff);
-
-            DEBUG("Length bound " << length_bound);
-
-            RelaxMin(min_length_bound, length_bound);
-            DEBUG("Min length bound - " << min_length_bound);
-            return LengthUpperBound<Graph>(g_, length_bound);
-
-        } else if (next_token_ == "rlmk") {
-            //Read length minus k
-            VERIFY_MSG(settings_.read_length() > g_.k(), "Read length was shorter than K");
-            DEBUG("Creating (rl - k) bound");
-            size_t length_bound = settings_.read_length() - g_.k();
-            RelaxMin(min_length_bound, length_bound);
-            DEBUG("Min length bound - " << min_length_bound);
-            return LengthUpperBound<Graph>(g_, length_bound);
-
-        } else if (next_token_ == "to_ec_lb") {
-            double length_coeff = std::stod(ReadNext());
-
-            DEBUG( "Creating length bound for erroneous connections originated from tip merging. Coeff " << length_coeff);
-            size_t length_bound =
-                    LengthThresholdFinder::MaxTipOriginatedECLength(
-                        settings_.read_length(), g_.k(), length_coeff);
-
-            DEBUG("Length bound " << length_bound);
-
-            RelaxMin(min_length_bound, length_bound);
-            DEBUG("Min length bound - " << min_length_bound);
-            return LengthUpperBound<Graph>(g_, length_bound);
-
-        } else if (next_token_ == "ec_lb") {
-            size_t length_coeff = std::stoll(ReadNext());
-
-            DEBUG("Creating ec length bound. Coeff " << length_coeff);
-            size_t length_bound =
-                    LengthThresholdFinder::MaxErroneousConnectionLength(
-                        g_.k(), length_coeff);
-
-            DEBUG("Length bound " << length_bound);
-
-            RelaxMin(min_length_bound, length_bound);
-            DEBUG("Min length bound - " << min_length_bound);
-            return LengthUpperBound<Graph>(g_, length_bound);
-        } else if (next_token_ == "lb") {
-            size_t length_bound = std::stoll(ReadNext());
-
-            DEBUG("Creating length bound. Value " << length_bound);
-
-            RelaxMin(min_length_bound, length_bound);
-            DEBUG("Min length bound - " << min_length_bound);
-            return LengthUpperBound<Graph>(g_, length_bound);
-        } else if (next_token_ == "cb") {
-            ReadNext();
-            double cov_bound = GetCoverageBound();
-            DEBUG("Creating coverage upper bound " << cov_bound);
-            RelaxMin(min_coverage_bound, cov_bound);
-            return CoverageUpperBound<Graph>(g_, cov_bound);
-        } else if (next_token_ == "icb") {
-            VERIFY(iteration_cnt_ != -1ul && curr_iteration_ != -1ul);
-            ReadNext();
-            double cov_bound = GetCoverageBound();
-            cov_bound = cov_bound / (double) iteration_cnt_ * (double) (curr_iteration_ + 1);
-            DEBUG("Creating iterative coverage upper bound " << cov_bound);
-            RelaxMin(min_coverage_bound, cov_bound);
-            return CoverageUpperBound<Graph>(g_, cov_bound);
-        } else if (next_token_ == "rctc") {
-            ReadNext();
-            DEBUG("Creating relative cov tip cond " << next_token_);
-            return RelativeCoverageTipCondition<Graph>(g_, std::stod(next_token_));
-        } else if (next_token_ == "disabled") {
-            DEBUG("Creating disabling condition");
-            return pred::AlwaysFalse<EdgeId>();
-        } else if (next_token_ == "mmm") {
-            ReadNext();
-            DEBUG("Creating max mismatches cond " << next_token_);
-            return MismatchTipCondition<Graph>(g_, std::stoll(next_token_));
-        } else {
-            VERIFY(false);
-            return pred::AlwaysTrue<EdgeId>();
-        }
-    }
-
-    pred::TypedPredicate<EdgeId> ParseConjunction(size_t &min_length_bound,
-                                                  double &min_coverage_bound) {
-        pred::TypedPredicate<EdgeId> answer = pred::AlwaysTrue<EdgeId>();
-        VERIFY(next_token_ == "{");
-        ReadNext();
-        while (next_token_ != "}") {
-            answer = pred::And(answer,
-                              ParseCondition(min_length_bound, min_coverage_bound));
-            ReadNext();
-        }
-        return answer;
-    }
-
-public:
-
-    ConditionParser(const Graph &g, string input, const SimplifInfoContainer &settings,
-                    size_t curr_iteration = -1ul, size_t iteration_cnt = -1ul)
-            : g_(g),
-              input_(input),
-              settings_(settings),
-              curr_iteration_(curr_iteration),
-              iteration_cnt_(iteration_cnt),
-              max_length_bound_(0),
-              max_coverage_bound_(0.) {
-        DEBUG("Creating parser for string " << input);
-        using namespace boost;
-        vector<string> tmp_tokenized_input;
-        boost::split(tmp_tokenized_input, input_, boost::is_any_of(" ,;"), boost::token_compress_on);
-        for (auto it = tmp_tokenized_input.begin();
-             it != tmp_tokenized_input.end(); ++it) {
-            tokenized_input_.push(*it);
-        }
-        ReadNext();
-    }
-
-    pred::TypedPredicate<EdgeId> operator()() {
-        DEBUG("Parsing");
-        pred::TypedPredicate<EdgeId> answer = pred::AlwaysFalse<EdgeId>();
-        VERIFY_MSG(next_token_ == "{", "Expected \"{\", but next token was " << next_token_);
-        while (next_token_ == "{") {
-            size_t min_length_bound = numeric_limits<size_t>::max();
-            double min_coverage_bound = numeric_limits<double>::max();
-            answer = pred::Or(answer,
-                             ParseConjunction(min_length_bound, min_coverage_bound));
-            RelaxMax(max_length_bound_, min_length_bound);
-            RelaxMax(max_coverage_bound_, min_coverage_bound);
-            ReadNext();
-        }
-        return answer;
-    }
-
-    size_t max_length_bound() const {
-        return max_length_bound_;
-    }
-
-    double max_coverage_bound() const {
-        return max_coverage_bound_;
-    }
-
-private:
-    DECL_LOGGER("ConditionParser");
-};
-
-//todo move to visualization
-template<class graph_pack>
-shared_ptr<omnigraph::visualization::GraphColorer<typename graph_pack::graph_t>> DefaultGPColorer(
-        const graph_pack &gp) {
-    auto mapper = MapperInstance(gp);
-    auto path1 = mapper->MapSequence(gp.genome.GetSequence()).path();
-    auto path2 = mapper->MapSequence(!gp.genome.GetSequence()).path();
-    return omnigraph::visualization::DefaultColorer(gp.g, path1, path2);
-}
-
-template<class Graph>
-class EditDistanceTrackingCallback {
-    typedef typename Graph::EdgeId EdgeId;
-    typedef typename Graph::EdgeData EdgeData;
-    const Graph &g_;
-
-public:
-    EditDistanceTrackingCallback(const Graph &g)
-            : g_(g) {
-    }
-
-    bool operator()(EdgeId edge, const vector<EdgeId> &path) const {
-        vector<Sequence> path_sequences;
-        for (auto it = path.begin(); it != path.end(); ++it) {
-            path_sequences.push_back(g_.EdgeNucls(*it));
-        }
-        Sequence path_sequence(
-            MergeOverlappingSequences(path_sequences, g_.k()));
-        size_t dist = EditDistance(g_.EdgeNucls(edge), path_sequence);
-        TRACE( "Bulge sequences with distance " << dist << " were " << g_.EdgeNucls(edge) << " and " << path_sequence);
-        return true;
-    }
-
-private:
-    DECL_LOGGER("EditDistanceTrackingCallback");
-};
-
-//template<class Graph, class SmartEdgeIt>
-//bool ClipTips(
-//    Graph &g,
-//    SmartEdgeIt &it,
-//    const config::debruijn_config::simplification::tip_clipper &tc_config,
-//    const SimplifInfoContainer &info,
-//    std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
-//
-//    INFO("Clipping tips");
-//
-//    string condition_str = tc_config.condition;
-//
-//    ConditionParser<Graph> parser(g, condition_str, info);
-//    auto condition = parser();
-//
-//    omnigraph::EdgeRemovingAlgorithm<Graph> tc(g,
-//                                               omnigraph::AddTipCondition(g, condition),
-//                                               removal_handler, true);
-//
-//    TRACE("Tip length bound " << parser.max_length_bound());
-//    return tc.RunFromIterator(it,
-//                      make_shared<LengthUpperBound<Graph>>(g, parser.max_length_bound()));
-//}
-
-//template<class Graph>
-//bool ClipTips(
-//    Graph &g,
-//    const config::debruijn_config::simplification::tip_clipper &tc_config,
-//    const SimplifInfoContainer &info,
-//    std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
-//
-//    auto it = g.SmartEdgeBegin(LengthComparator<Graph>(g), true);
-//    return ClipTips(g, it, tc_config, info, removal_handler);
-//}
-
-//enabling tip projection, todo optimize if hotspot
-template<class gp_t>
-HandlerF<typename gp_t::graph_t> WrapWithProjectionCallback(
-    gp_t &gp,
-    HandlerF<typename gp_t::graph_t> removal_handler) {
-    typedef typename gp_t::graph_t Graph;
-    typedef typename Graph::EdgeId EdgeId;
-    TipsProjector<gp_t> tip_projector(gp);
-
-    HandlerF<Graph> projecting_callback = std::bind(&TipsProjector<gp_t>::ProjectTip,
-                                             tip_projector, std::placeholders::_1);
-
-    return func::Composition<EdgeId>(std::ref(removal_handler), projecting_callback);
-}
-
-template<class Graph, class InterestingEdgeFinder>
-class LowCoverageEdgeRemovingAlgorithm : public PersistentEdgeRemovingAlgorithm<Graph,
-                                                                                InterestingEdgeFinder, CoverageComparator<Graph>> {
-    typedef typename Graph::EdgeId EdgeId;
-    typedef PersistentEdgeRemovingAlgorithm<Graph, InterestingEdgeFinder, CoverageComparator<Graph>> base;
-    SimplifInfoContainer simplif_info_;
-    std::string condition_str_;
-    pred::TypedPredicate<EdgeId> remove_condition_;
-    pred::TypedPredicate<EdgeId> proceed_condition_;
-
-protected:
-
-    void PrepareIteration(size_t it_cnt, size_t total_it_estimate) override {
-        TRACE("Preparing iteration " << it_cnt << " out of total estimate " << total_it_estimate);
-        ConditionParser<Graph> parser(this->g(), condition_str_,
-                                      simplif_info_, it_cnt, total_it_estimate);
-        remove_condition_ = omnigraph::AddAlternativesPresenceCondition(this->g(), parser());
-        TRACE("Updated remove condition");
-        proceed_condition_ = CoverageUpperBound<Graph>(this->g(), parser.max_coverage_bound());
-        TRACE("Updated proceed condition up to coverage " << parser.max_coverage_bound());
-    }
-
-    bool Proceed(EdgeId e) const override {
-        return proceed_condition_(e);
-    }
-
-    bool ShouldRemove(EdgeId e) const override {
-        return remove_condition_(e);
-    }
-
-public:
-    LowCoverageEdgeRemovingAlgorithm(Graph &g,
-                                    const InterestingEdgeFinder &interest_edge_finder,
-                                    const SimplifInfoContainer &simplif_info,
-                                    const std::string &condition_str,
-                                    std::function<void(EdgeId)> removal_handler = nullptr,
-                                    bool canonical_only = false,
-                                    bool track_changes = true,
-                                    size_t total_iteration_estimate = -1ul)
-            : base(g, interest_edge_finder,
-                   removal_handler,
-                   canonical_only,
-                   CoverageComparator<Graph>(g),
-                   track_changes,
-                   total_iteration_estimate),
-            simplif_info_(simplif_info),
-            condition_str_(condition_str),
-            remove_condition_(pred::AlwaysFalse<EdgeId>()),
-            proceed_condition_(pred::AlwaysTrue<EdgeId>()) {}
-
-private:
-    DECL_LOGGER("LowCoverageEdgeRemovingAlgorithm");
-};
-
-template<class Graph>
-AlternativesAnalyzer<Graph> ParseBRConfig(const Graph &g,
-                                          const config::debruijn_config::simplification::bulge_remover &config) {
-    size_t max_length = LengthThresholdFinder::MaxBulgeLength(
-        g.k(), config.max_bulge_length_coefficient,
-        config.max_additive_length_coefficient);
-
-    DEBUG("Length bound " << max_length);
-
-    return AlternativesAnalyzer<Graph>(g, config.max_coverage,
-                                                    max_length,
-                                                    config.max_relative_coverage,
-                                                    config.max_delta,
-                                                    config.max_relative_delta,
-                                                    config.max_number_edges);
-}
-
-template<class Graph>
-AlgoPtr<Graph> SelfConjugateEdgeRemoverInstance(Graph &g, const string &condition_str,
-                const SimplifInfoContainer &info,
-                HandlerF<Graph> removal_handler = 0) {
-    ConditionParser<Graph> parser(g, condition_str, info);
-    auto condition = pred::And(SelfConjugateCondition<Graph>(g), parser());
-
-    return std::make_shared<ParallelEdgeRemovingAlgorithm<Graph>>(g,
-                                                                  condition,
-                                                                  info.chunk_cnt(),
-                                                                  removal_handler,
-                                                                  /*canonical_only*/true);
-}
-
-template<class Graph>
-bool RemoveRelativelyLowCoverageComponents(
-        Graph &g,
-        const FlankingCoverage<Graph> &flanking_cov,
-        const config::debruijn_config::simplification::relative_coverage_comp_remover &rcc_config,
-        const SimplifInfoContainer &info,
-        typename ComponentRemover<Graph>::HandlerF removal_handler = 0) {
-    if (rcc_config.enabled) {
-        INFO("Removing relatively low covered connections");
-        size_t connecting_path_length_bound = LengthThresholdFinder::MaxErroneousConnectionLength(
-            g.k(), rcc_config.max_ec_length_coefficient);
-
-        std::string pics_dir = "";
-
-        double max_coverage = math::ge(rcc_config.max_coverage_coeff, 0.)
-                                ? info.detected_coverage_bound() * rcc_config.max_coverage_coeff
-                                : std::numeric_limits<double>::max();
-
-        omnigraph::simplification::relative_coverage::
-            RelativeCoverageComponentRemover<Graph> rel_rem(
-                g,
-                std::bind(&FlankingCoverage<Graph>::LocalCoverage,
-                          std::cref(flanking_cov), std::placeholders::_1, std::placeholders::_2),
-                rcc_config.coverage_gap, size_t(double(info.read_length()) * rcc_config.length_coeff),
-                size_t(double(info.read_length()) * rcc_config.tip_allowing_length_coeff),
-                connecting_path_length_bound,
-                max_coverage,
-                removal_handler, rcc_config.vertex_count_limit, pics_dir);
-        return rel_rem.Run();
-    } else {
-        INFO("Removal of relatively low covered connections disabled");
-        return false;
-    }
-}
-
-template<class Graph>
-bool DisconnectRelativelyLowCoverageEdges(Graph &g,
-        const FlankingCoverage<Graph> &flanking_cov,
-        const config::debruijn_config::simplification::relative_coverage_edge_disconnector &rced_config) {
-    if (rced_config.enabled) {
-        INFO("Disconnecting edges with relatively low coverage");
-        omnigraph::simplification::relative_coverage::RelativeCoverageDisconnector<
-                Graph> disconnector(g, std::bind(&FlankingCoverage<Graph>::LocalCoverage,
-                                std::cref(flanking_cov), std::placeholders::_1,
-                                std::placeholders::_2), rced_config.diff_mult);
-        return disconnector.Run();
-    } else {
-        INFO("Disconnection of relatively low covered edges disabled");
-        return false;
-    }
-}
-
-template<class Graph>
-bool RemoveComplexBulges(
-    Graph &g,
-    config::debruijn_config::simplification::complex_bulge_remover cbr_config,
-    size_t /*iteration*/ = 0) {
-    if (!cbr_config.enabled)
-        return false;
-    INFO("Removing complex bulges");
-    size_t max_length = (size_t) ((double) g.k() * cbr_config.max_relative_length);
-    size_t max_diff = cbr_config.max_length_difference;
-    omnigraph::complex_br::ComplexBulgeRemover<Graph> complex_bulge_remover(
-        g, max_length, max_diff);
-    return complex_bulge_remover.Run();
-}
-
-//template<class Graph>
-//bool RemoveIsolatedEdges(Graph &g, size_t max_length, double max_coverage, size_t max_length_any_cov,
-//                 std::function<void(typename Graph::EdgeId)> removal_handler = 0, size_t chunk_cnt = 1) {
-//    typedef typename Graph::EdgeId EdgeId;
-//
-//    //todo add info that some other edges might be removed =)
-//    INFO("Removing isolated edges");
-//    INFO("All edges shorter than " << max_length_any_cov << " will be removed");
-//    INFO("Also edges shorter than " << max_length << " and coverage smaller than " << max_coverage << " will be removed");
-//    //todo add warn on max_length_any_cov > max_length
-//
-//    auto condition = func::And<EdgeId>(
-//            make_shared<IsolatedEdgeCondition<Graph>>(g),
-//            func::Or<EdgeId>(
-//                make_shared<LengthUpperBound<Graph>>(g, max_length_any_cov),
-//                func::And<EdgeId>(
-//                    make_shared<LengthUpperBound<Graph>>(g, max_length),
-//                    make_shared<CoverageUpperBound<Graph>>(g, max_coverage)
-//                )));
-//
-//    if (chunk_cnt == 1) {
-//        omnigraph::EdgeRemovingAlgorithm<Graph> removing_algo(g, condition, removal_handler);
-//
-//        return removing_algo.Run(LengthComparator<Graph>(g),
-//                                         make_shared<LengthUpperBound<Graph>>(g, std::max(max_length, max_length_any_cov)));
-//    } else {
-//        SemiParallelAlgorithmRunner<Graph, EdgeId> runner(g);
-//        SemiParallelEdgeRemovingAlgorithm<Graph> removing_algo(g, condition, removal_handler);
-//
-//        return RunEdgeAlgorithm(g, runner, removing_algo, chunk_cnt);
-//    }
-//}
-
-template<class Graph>
-bool ClipComplexTips(Graph &g, config::debruijn_config::simplification::complex_tip_clipper ctc_conf, const SimplifInfoContainer &info, HandlerF<Graph> removal_handler = 0) {
-    if (!ctc_conf.enabled) {
-        INFO("Complex tip clipping disabled");
-        return false;
-    }
-
-    std::function<void(set<EdgeId>)> set_removal_handler_f(0);
-    if (removal_handler) {
-        set_removal_handler_f = std::bind(
-            &omnigraph::simplification::SingleEdgeAdapter<set<EdgeId>>, std::placeholders::_1, removal_handler);
-    }
-
-    INFO("Complex tip clipping");
-
-    ConditionParser<Graph> parser(g, ctc_conf.condition, info);
-    parser();
-
-    ComplexTipClipper<Graph> tip_clipper(g, ctc_conf.max_relative_coverage, ctc_conf.max_edge_len, parser.max_length_bound(), "", set_removal_handler_f);
-    return tip_clipper.Run();
-}
-
-template<class Graph>
-AlgoPtr<Graph> ShortPolyATEdgesRemoverInstance(Graph &g, size_t max_length, HandlerF<Graph> removal_handler = 0, size_t chunk_cnt = 1) {
-    auto condition = pred::And(ATCondition<Graph>(g, 0.8, max_length, false), LengthUpperBound<Graph>(g, 1));
-    return std::make_shared<ParallelEdgeRemovingAlgorithm<Graph>>(g, condition, chunk_cnt, removal_handler, true);
-}
-
-template<class Graph>
-AlgoPtr<Graph> ATTipClipperInstance(Graph &g, HandlerF<Graph> removal_handler = 0, size_t chunk_cnt = 1) {
-//TODO: review params 0.8, 200?
-    return std::make_shared<ParallelEdgeRemovingAlgorithm<Graph>>(g, ATCondition<Graph>(g, 0.8, 200, true), chunk_cnt, removal_handler, true);
-}
-
-template<class Graph>
-AlgoPtr<Graph> IsolatedEdgeRemoverInstance(Graph &g,
-                                           config::debruijn_config::simplification::isolated_edges_remover ier,
-                                           const SimplifInfoContainer &info,
-                                           HandlerF<Graph> removal_handler = 0) {
-    if (!ier.enabled) {
-        return nullptr;
-    }
-    size_t max_length_any_cov = std::max(info.read_length(), ier.max_length_any_cov);
-
-    INFO("Removing isolated edges");
-    INFO("All isolated edges shorter than " << max_length_any_cov << " will be removed");
-    INFO("Also isolated edges shorter than " << ier.max_length << " and coverage smaller than " << ier.max_coverage << " will be removed");
-
-    auto condition = pred::And(IsolatedEdgeCondition<Graph>(g),
-                              pred::Or(LengthUpperBound<Graph>(g, max_length_any_cov),
-                                      pred::And(LengthUpperBound<Graph>(g, ier.max_length),
-                                               CoverageUpperBound<Graph>(g, ier.max_coverage))));
-
-    return std::make_shared<ParallelEdgeRemovingAlgorithm<Graph>>(g,
-                                                                  condition,
-                                                                  info.chunk_cnt(),
-                                                                  removal_handler,
-                                                                  /*canonical_only*/true);
-}
-
-template<class Graph>
-pred::TypedPredicate<typename Graph::EdgeId> NecessaryBulgeCondition(const Graph &g,
-                                                                    const config::debruijn_config::simplification::bulge_remover &br_config,
-                                                                    const SimplifInfoContainer&) {
-    auto analyzer = ParseBRConfig(g, br_config);
-    return omnigraph::NecessaryBulgeCondition(g, analyzer.max_length(), analyzer.max_coverage());
-}
-
-template<class Graph>
-pred::TypedPredicate<typename Graph::EdgeId> NecessaryTipCondition(const Graph &g,
-                                                                  const config::debruijn_config::simplification::tip_clipper &tc_config,
-                                                                  const SimplifInfoContainer &info) {
-    ConditionParser<Graph> parser(g, tc_config.condition, info);
-    auto condition = parser();
-    return omnigraph::NecessaryTipCondition(g, parser.max_length_bound(),
-                                            parser.max_coverage_bound());
-}
-
-template<class Graph>
-pred::TypedPredicate<typename Graph::EdgeId> NecessaryECCondition(const Graph &g,
-                                                                 const config::debruijn_config::simplification::erroneous_connections_remover &ec_config,
-                                                                 const SimplifInfoContainer &info,
-                                                                 size_t current_iteration = 0,
-                                                                 size_t iteration_cnt = 1) {
-    ConditionParser<Graph> parser(g, ec_config.condition, info, current_iteration, iteration_cnt);
-    auto condition = parser();
-    return omnigraph::NecessaryECCondition(g, parser.max_length_bound(),
-                                           parser.max_coverage_bound());
-}
-
-template<class Graph>
-AlgoPtr<Graph> ECRemoverInstance(Graph &g,
-                                 const config::debruijn_config::simplification::erroneous_connections_remover &ec_config,
-                                 const SimplifInfoContainer &info,
-                                 HandlerF<Graph> removal_handler,
-                                 size_t iteration_cnt = 1) {
-    if (ec_config.condition.empty())
-        return nullptr;
-
-    typedef omnigraph::ParallelInterestingElementFinder<Graph> InterestingFinderT;
-    InterestingFinderT interesting_finder(g,
-                                          NecessaryECCondition(g, ec_config, info, iteration_cnt - 1, iteration_cnt),
-                                          info.chunk_cnt());
-    return make_shared<LowCoverageEdgeRemovingAlgorithm<Graph, InterestingFinderT>>(
-            g, interesting_finder, info, ec_config.condition, removal_handler,
-            /*canonical only*/ true, /*track changes*/ true, iteration_cnt);
-}
-
-template<class Graph>
-AlgoPtr<Graph> RelativeECRemoverInstance(Graph &g,
-                                         const config::debruijn_config::simplification::relative_coverage_ec_remover &rcec_config,
-                                         const SimplifInfoContainer &info,
-                                         HandlerF<Graph> removal_handler,
-                                         size_t iteration_cnt = 1) {
-    if (!rcec_config.enabled)
-        return nullptr;
-
-    return make_shared<ParallelEdgeRemovingAlgorithm<Graph>>(g,
-            AddRelativeCoverageECCondition(g, rcec_config.rcec_ratio,
-                                           AddAlternativesPresenceCondition(g, pred::TypedPredicate<typename Graph::EdgeId>
-                                                   (LengthUpperBound<Graph>(g, rcec_config.max_ec_length)))),
-            info.chunk_cnt(), removal_handler, /*canonical_only*/true);
-}
-
-template<class Graph>
-AlgoPtr<Graph> NotBulgeECRemoverInstance(Graph &g,
-                                         const config::debruijn_config::simplification::erroneous_connections_remover &ec_config,
-                                         const SimplifInfoContainer &info, HandlerF<Graph> removal_handler,
-                                         size_t iteration_cnt = 1) {
-    if (ec_config.condition.empty())
-        return nullptr;
-
-    std::string curr_condition = ec_config.condition;
-    ConditionParser<Graph> parser(g, curr_condition, info, iteration_cnt - 1, iteration_cnt);
-    auto condition = parser();
-
-    typedef omnigraph::ParallelInterestingElementFinder<Graph> InterestingFinderT;
-    InterestingFinderT interesting_finder(g, AddNotBulgeECCondition(g, AddAlternativesPresenceCondition(g, pred::And(
-                                                  LengthUpperBound<Graph>(g, parser.max_length_bound()),
-                                                  CoverageUpperBound<Graph>(g, parser.max_coverage_bound())))),
-                                          info.chunk_cnt());
-    return make_shared<LowCoverageEdgeRemovingAlgorithm<Graph, InterestingFinderT>>(
-            g, interesting_finder, info, ec_config.condition, removal_handler,
-            /*canonical only*/ true, /*track changes*/ true, iteration_cnt);
-}
-
-template<class Graph>
-AlgoPtr<Graph> TipClipperInstance(Graph &g,
-                                  const EdgeConditionT<Graph> &condition,
-                                  const SimplifInfoContainer &info,
-                                  HandlerF<Graph> removal_handler,
-                                  bool track_changes = true,
-                                  size_t /*iteration_cnt*/ = 1) {
-    return make_shared<ParallelEdgeRemovingAlgorithm<Graph, LengthComparator<Graph>>>(g,
-                                                                        AddTipCondition(g, condition),
-                                                                        info.chunk_cnt(),
-                                                                        removal_handler,
-                                                                        /*canonical_only*/true,
-                                                                        LengthComparator<Graph>(g),
-                                                                        track_changes);
-}
-
-template<class Graph>
-AlgoPtr<Graph> TipClipperInstance(Graph &g,
-                                           const config::debruijn_config::simplification::tip_clipper &tc_config,
-                                           const SimplifInfoContainer &info,
-                                           HandlerF<Graph> removal_handler,
-                                           size_t iteration_cnt = 1) {
-    if (tc_config.condition.empty())
-        return nullptr;
-
-    ConditionParser<Graph> parser(g, tc_config.condition, info);
-    auto condition = parser();
-    return TipClipperInstance(g, condition, info, removal_handler, /*track changes*/true, iteration_cnt);
-}
-
-template<class Graph>
-AlgoPtr<Graph> DeadEndInstance(Graph &g,
-                               const config::debruijn_config::simplification::dead_end_clipper &dead_end_config,
-                               const SimplifInfoContainer &info,
-                               HandlerF<Graph> removal_handler,
-                               size_t /*iteration_cnt*/ = 1) {
-    if (!dead_end_config.enabled || dead_end_config.condition.empty())
-        return nullptr;
-
-    ConditionParser<Graph> parser(g, dead_end_config.condition, info);
-    auto condition = parser();
-    return make_shared<ParallelEdgeRemovingAlgorithm<Graph, LengthComparator<Graph>>>(g,
-            AddDeadEndCondition(g, condition), info.chunk_cnt(), removal_handler, /*canonical_only*/true,
-            LengthComparator<Graph>(g), /*track changes*/true);
-}
-
-template<class Graph>
-AlgoPtr<Graph> TopologyTipClipperInstance(
-    Graph &g,
-    const config::debruijn_config::simplification::topology_tip_clipper &ttc_config,
-    const SimplifInfoContainer &info,
-    HandlerF<Graph> removal_handler) {
-
-    auto condition
-            = pred::And(LengthUpperBound<Graph>(g,
-                                               LengthThresholdFinder::MaxTipLength(info.read_length(), g.k(), ttc_config.length_coeff)),
-                       DefaultUniquenessPlausabilityCondition<Graph>(g,
-                                                                     ttc_config.uniqueness_length, ttc_config.plausibility_length));
-
-    return TipClipperInstance(g,
-                              condition, info, removal_handler, /*track changes*/false);
-}
-
-template<class Graph>
-AlgoPtr<Graph> BRInstance(Graph &g,
-                          const config::debruijn_config::simplification::bulge_remover &br_config,
-                          const SimplifInfoContainer &info,
-                          HandlerF<Graph> removal_handler,
-                          size_t /*iteration_cnt*/ = 1) {
-    typedef ParallelInterestingElementFinder<Graph,
-                                    typename Graph::EdgeId> InterestingEdgeFinder;
-    if (!br_config.enabled || (br_config.main_iteration_only && !info.main_iteration())) {
-        return nullptr;
-    }
-
-    auto alternatives_analyzer = ParseBRConfig(g, br_config);
-
-
-    InterestingEdgeFinder interesting_edge_finder(g,
-                                                  NecessaryBulgeCondition(g,
-                                                                          alternatives_analyzer.max_length(),
-                                                                          alternatives_analyzer.max_coverage()),
-                                                  info.chunk_cnt());
-    if (br_config.parallel) {
-        INFO("Creating parallel br instance");
-        return make_shared<ParallelBulgeRemover<Graph, InterestingEdgeFinder>>(g,
-                interesting_edge_finder,
-                br_config.buff_size,
-                br_config.buff_cov_diff,
-                br_config.buff_cov_rel_diff,
-                alternatives_analyzer,
-                nullptr,
-                removal_handler,
-                /*track_changes*/true);
-    } else {
-        INFO("Creating br instance");
-        return make_shared<BulgeRemover<Graph, InterestingEdgeFinder>>(g,
-                interesting_edge_finder,
-                alternatives_analyzer,
-                nullptr,
-                removal_handler,
-                /*track_changes*/true);
-    }
-}
-
-//todo make this all work for end of the edges also? switch to canonical iteration?
-//todo rename, since checking topology also
-template<class Graph>
-class FlankingCovBound : public EdgeCondition<Graph> {
-    typedef EdgeCondition<Graph> base;
-    typedef typename Graph::EdgeId EdgeId;
-    const FlankingCoverage<Graph> &flanking_cov_;
-    double max_coverage_;
-public:
-    FlankingCovBound(const Graph &g,
-                     const FlankingCoverage<Graph> &flanking_cov,
-                     double max_coverage)
-        : base(g),
-          flanking_cov_(flanking_cov),
-          max_coverage_(max_coverage) {
-    }
-
-    bool Check(EdgeId e) const override {
-        return this->g().length(e) > 1
-                    && this->g().OutgoingEdgeCount(this->g().EdgeStart(e)) > 1
-                    && math::le(flanking_cov_.CoverageOfStart(e), max_coverage_);
-    }
-
-};
-
-template<class Graph, class Comparator = std::less<typename Graph::EdgeId>>
-class ParallelDisconnectionAlgorithm : public PersistentProcessingAlgorithm<Graph,
-                                                typename Graph::EdgeId,
-                                                ParallelInterestingElementFinder<Graph>, Comparator> {
-    typedef typename Graph::EdgeId EdgeId;
-    typedef PersistentProcessingAlgorithm<Graph, EdgeId,
-            ParallelInterestingElementFinder<Graph>, Comparator> base;
-    pred::TypedPredicate<EdgeId> condition_;
-    omnigraph::simplification::relative_coverage::EdgeDisconnector<Graph> disconnector_;
-
-public:
-    ParallelDisconnectionAlgorithm(Graph &g,
-                                    pred::TypedPredicate<EdgeId> condition,
-                                    size_t chunk_cnt,
-                                    HandlerF<Graph> removal_handler,
-                                    const Comparator &comp = Comparator(),
-                                    bool track_changes = true)
-            : base(g,
-                   ParallelInterestingElementFinder<Graph>(g, condition, chunk_cnt),
-                           /*canonical_only*/false, comp, track_changes),
-                   condition_(condition),
-                   disconnector_(g, removal_handler) {
-    }
-
-    bool Process(EdgeId e) override {
-        if (condition_(e)) {
-            disconnector_(e);
-            return true;
-        }
-        return false;
-    }
-
-};
-
-template<class Graph>
-AlgoPtr<Graph> LowFlankDisconnectorInstance(Graph &g,
-                                           const FlankingCoverage<Graph> &flanking_cov,
-                                           double cov_bound,
-                                           const SimplifInfoContainer &info,
-                                           HandlerF<Graph> removal_handler) {
-    if (math::ls(cov_bound, 0.)) {
-        INFO("Flanking coverage based disconnection disabled");
-        return nullptr;
-    }
-
-    return make_shared<ParallelDisconnectionAlgorithm<Graph>>(g,
-                                                              FlankingCovBound<Graph>(g, flanking_cov, cov_bound),
-                                                              info.chunk_cnt(),
-                                                              removal_handler);
-}
-
-template<class Graph>
-bool RemoveHiddenLoopEC(Graph &g,
-                        const FlankingCoverage<Graph> &flanking_cov,
-                        double determined_coverage_threshold,
-                        config::debruijn_config::simplification::hidden_ec_remover her_config,
-                        HandlerF<Graph> removal_handler) {
-    if (her_config.enabled) {
-        INFO("Removing loops and rc loops with erroneous connections");
-        ECLoopRemover<Graph> hc(g, flanking_cov,
-                                determined_coverage_threshold,
-                                her_config.relative_threshold, removal_handler);
-        bool res = hc.Run();
-        hc.PrintLoopStats();
-        return res;
-    }
-    return false;
-}
-
-
-////todo add chunk_cnt
-//template<class Graph>
-//bool ClipTips(
-//    Graph &g,
-//    const std::string &condition,
-//    const SimplifInfoContainer &info,
-//    std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
-//
-//    if (condition != "") {
-//        ConditionParser<Graph> parser(g, condition, info);
-//        auto condition = parser();
-//        ParallelEdgeRemovingAlgorithm<Graph, LengthComparator<Graph>> algo(g,
-//                                                                           AddTipCondition(g, condition),
-//                                            info.chunk_cnt(),
-//                                            removal_handler,
-//                                            /*canonical_only*/true,
-//                                            LengthComparator<Graph>(g));
-//        return algo.Run();
-//    } else {
-//        return false;
-//    }
-//}
-
-//template<class Graph>
-//bool RemoveLowCoverageEdges(
-//    Graph &g,
-//    const std::string &condition,
-//    const SimplifInfoContainer &info,
-//    std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
-//
-//    if (condition != "") {
-//        ConditionParser<Graph> parser(g, condition, info);
-//         auto condition = parser();
-//         blahblahblah
-//         ParallelEdgeRemovingAlgorithm<Graph, CoverageComparator<Graph>> algo(g,
-//                                             condition,
-//                                             info.chunk_cnt(),
-//                                             removal_handler,
-//                                             /*canonical_only*/true,
-//                                             CoverageComparator<Graph>(g));
-//        return algo.Run();
-//    } else {
-//        return false;
-//    }
-//}
-
-
-//Parallel algo launch
-
-template<class Graph>
-void ParallelCompress(Graph &g, size_t chunk_cnt, bool loop_post_compression = true) {
-    INFO("Parallel compression");
-    debruijn::simplification::ParallelCompressor<Graph> compressor(g);
-    TwoStepAlgorithmRunner<Graph, typename Graph::VertexId> runner(g, false);
-    RunVertexAlgorithm(g, runner, compressor, chunk_cnt);
-
-    //have to call cleaner to get rid of new isolated vertices
-    omnigraph::Cleaner<Graph>(g, chunk_cnt).Run();
-
-    if (loop_post_compression) {
-        INFO("Launching post-compression to compress loops");
-        CompressAllVertices(g, chunk_cnt);
-    }
-}
-
-template<class Graph>
-bool ParallelClipTips(Graph &g,
-              const string &tip_condition,
-              const SimplifInfoContainer &info,
-              std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
-    INFO("Parallel tip clipping");
-
-    string condition_str = tip_condition;
-
-    ConditionParser<Graph> parser(g, condition_str, info);
-
-    parser();
-
-    debruijn::simplification::ParallelTipClippingFunctor<Graph> tip_clipper(g,
-        parser.max_length_bound(), parser.max_coverage_bound(), removal_handler);
-
-    AlgorithmRunner<Graph, typename Graph::VertexId> runner(g);
-
-    RunVertexAlgorithm(g, runner, tip_clipper, info.chunk_cnt());
-
-    ParallelCompress(g, info.chunk_cnt());
-    //Cleaner is launched inside ParallelCompression
-    //CleanGraph(g, info.chunk_cnt());
-
-    return true;
-}
-
-//template<class Graph>
-//bool ParallelRemoveBulges(Graph &g,
-//              const config::debruijn_config::simplification::bulge_remover &br_config,
-//              size_t /*read_length*/,
-//              std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
-//    INFO("Parallel bulge remover");
-//
-//    size_t max_length = LengthThresholdFinder::MaxBulgeLength(
-//        g.k(), br_config.max_bulge_length_coefficient,
-//        br_config.max_additive_length_coefficient);
-//
-//    DEBUG("Max bulge length " << max_length);
-//
-//    debruijn::simplification::ParallelSimpleBRFunctor<Graph> bulge_remover(g,
-//                            max_length,
-//                            br_config.max_coverage,
-//                            br_config.max_relative_coverage,
-//                            br_config.max_delta,
-//                            br_config.max_relative_delta,
-//                            removal_handler);
-//    for (VertexId v : g) {
-//        bulge_remover(v);
-//    }
-//
-//    Compress(g);
-//    return true;
-//}
-
-template<class Graph>
-bool ParallelEC(Graph &g,
-              const string &ec_condition,
-              const SimplifInfoContainer &info,
-              std::function<void(typename Graph::EdgeId)> removal_handler = 0) {
-    INFO("Parallel ec remover");
-
-    ConditionParser<Graph> parser(g, ec_condition, info);
-
-    auto condition = parser();
-
-    size_t max_length = parser.max_length_bound();
-    double max_coverage = parser.max_coverage_bound();
-
-    debruijn::simplification::CriticalEdgeMarker<Graph> critical_marker(g, info.chunk_cnt());
-    critical_marker.PutMarks();
-
-    debruijn::simplification::ParallelLowCoverageFunctor<Graph> ec_remover(g,
-                            max_length,
-                            max_coverage,
-                            removal_handler);
-
-    TwoStepAlgorithmRunner<Graph, typename Graph::EdgeId> runner(g, true);
-
-    RunEdgeAlgorithm(g, runner, ec_remover, info.chunk_cnt());
-
-    critical_marker.ClearMarks();
-
-    ParallelCompress(g, info.chunk_cnt());
-    //called in parallel compress
-    //CleanGraph(g, info.chunk_cnt());
-    return true;
-}
-
-}
-}
diff --git a/src/modules/visualization/visualization_utils.hpp b/src/modules/visualization/visualization_utils.hpp
deleted file mode 100644
index 72d4f74..0000000
--- a/src/modules/visualization/visualization_utils.hpp
+++ /dev/null
@@ -1,210 +0,0 @@
-#pragma once
-
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-#include "graph_printer.hpp"
-#include "algorithms/dijkstra/dijkstra_helper.hpp"
-#include "assembly_graph/components/splitters.hpp"
-#include "assembly_graph/components/graph_component.hpp"
-#include "visualizers.hpp"
-#include "vertex_linker.hpp"
-
-#include <fstream>
-
-namespace omnigraph {
-namespace visualization {
-
-
-template<class Graph>
-void WriteComponents(const Graph& g,
-        const string& folder_name,
-        shared_ptr<GraphSplitter<Graph>> inner_splitter,
-        shared_ptr<GraphColorer<Graph>> colorer,
-        const GraphLabeler<Graph> &labeler) {
-    EmptyGraphLinker<Graph> linker;
-//  shared_ptr<GraphComponentFilter<Graph>> checker = make_shared<ComponentSizeFilter<Graph>>(g, 1500, 2, 300);
-    auto filter = make_shared<omnigraph::SmallComponentFilter<Graph>>(g, 3);
-    shared_ptr<GraphSplitter<Graph>> splitter = make_shared<omnigraph::CollectingSplitterWrapper<Graph>>(inner_splitter, filter);
-    omnigraph::visualization::SplittingGraphVisualizer<Graph>(g, labeler, *colorer, linker).SplitAndVisualize(*splitter, folder_name);
-}
-
-template<class Graph>
-void DrawComponentsOfShortEdges(const Graph& g, const string &output_dir, size_t min_length, size_t sinks, size_t sources)
-{
-    vector<typename Graph::EdgeId> short_edges;
-    std::string pics_folder_ = output_dir + ToString(min_length) + "_" + ToString(sinks) + "_" + ToString(sources) + "_"+ "pics_polymorphic/";
-    make_dir(pics_folder_);
-    INFO("Writing pics with components consisting of short edges to " + pics_folder_);
-    shared_ptr<GraphSplitter<Graph>> splitter = LongEdgesExclusiveSplitter<Graph>(g, min_length);
-    while (splitter->HasNext()) {
-        GraphComponent<Graph> component = splitter->Next();
-        if(component.v_size() > 3 && component.sinks().size() == sinks && component.sources().size() == sources)
-        {
-            bool fail = false;
-            for(auto v : component.sources()) {
-                if(component.g().IncomingEdgeCount(v) != 1) {
-                    fail = true;
-                }
-            }
-            for(auto v : component.sinks()) {
-                if(component.g().OutgoingEdgeCount(v) != 1) {
-                    fail = true;
-                }
-            }
-
-            if(fail)
-            {
-                continue;
-            }
-
-            StrGraphLabeler<Graph> labeler(component.g());
-            CoverageGraphLabeler<Graph> labeler2(component.g());
-            CompositeLabeler<Graph> compositeLabeler(labeler, labeler2);
-            WriteComponentSinksSources(component, pics_folder_ + ToString(g.int_id(*component.vertices().begin()))
-                                                                                   + ".dot", visualization::DefaultColorer(g),
-                                                                                   compositeLabeler);
-            INFO("Component is written to " + ToString(g.int_id(*component.vertices().begin())) + ".dot");
-
-            //            PrintComponent(component,
-//                                pics_folder_ + "ShortComponents/"
-//                                        + ToString(gp.g.int_id(component.vertices_[0]))
-//                                         + ".dot");
-        }
-    }
-}
-
-
-template<class Graph>
-void WriteSizeLimitedComponents(const Graph& g,
-        const string& folder_name,
-        shared_ptr<GraphSplitter<Graph>> inner_splitter,
-        shared_ptr<GraphColorer<Graph>> colorer,
-        const GraphLabeler<Graph> &labeler, int min_component_size, int max_component_size, size_t max_components) {
-    EmptyGraphLinker<Graph> linker;
-
-    auto filter = make_shared<omnigraph::ComponentSizeFilter<Graph>>(g, 1000000000, (size_t) min_component_size, (size_t) max_component_size);
-    shared_ptr<GraphSplitter<Graph>> splitter = make_shared<omnigraph::CollectingSplitterWrapper<Graph>>(inner_splitter, filter);
-    omnigraph::visualization::SplittingGraphVisualizer<Graph>(g, labeler, *colorer, linker, false, max_components).SplitAndVisualize(*splitter, folder_name);
-}
-
-template<class Graph>
-void WriteComponent(const GraphComponent<Graph>& gc,
-        const string& file_name, shared_ptr<GraphColorer<Graph>> colorer,
-        const GraphLabeler<Graph> &labeler) {
-    EmptyGraphLinker<Graph> linker;
-    BorderDecorator<Graph> component_colorer(gc, *colorer, "yellow");
-    std::ofstream os;
-    os.open(file_name);
-    omnigraph::visualization::ComponentVisualizer<Graph>(gc.g(), true).Visualize(gc, os, labeler, component_colorer, linker);
-    os.close();
-}
-
-template<class Graph>
-void WriteComponentSinksSources(const GraphComponent<Graph>& gc,
-        const string& file_name, shared_ptr<GraphColorer<Graph>> colorer,
-        const GraphLabeler<Graph> &labeler) {
-    EmptyGraphLinker<Graph> linker;
-    SinkSourceDecorator<Graph> component_colorer(gc, *colorer);
-    std::ofstream os;
-    os.open(file_name);
-    omnigraph::visualization::ComponentVisualizer<Graph>(gc.g(), true).Visualize(gc, os, labeler, component_colorer, linker);
-    os.close();
-}
-
-template<class Graph>
-void WriteComponentSinksSources(const GraphComponent<Graph>& gc,
-        const string& file_name) {
-
-    StrGraphLabeler<Graph> labeler(gc.g());
-    CoverageGraphLabeler<Graph> labeler2(gc.g());
-    CompositeLabeler<Graph> compositeLabeler(labeler, labeler2);
-    EmptyGraphLinker<Graph> linker;
-    WriteComponentSinksSources(gc, file_name, DefaultColorer(gc.g()),
-            compositeLabeler);
-}
-
-template<class Graph>
-void WriteSimpleComponent(const GraphComponent<Graph>& gc,
-        const string& file_name, shared_ptr<GraphColorer<Graph>> colorer,
-        const GraphLabeler<Graph> &labeler) {
-    EmptyGraphLinker<Graph> linker;
-    std::ofstream os;
-    os.open(file_name);
-    omnigraph::visualization::ComponentVisualizer<Graph>(gc.g(), false).Visualize(gc, os, labeler, *colorer, linker);
-    os.close();
-}
-
-template<class Graph>
-void WriteComponentsAlongPath(const Graph& g, vector<typename Graph::EdgeId> path,
-        const string& prefix_path, shared_ptr<GraphColorer<Graph>> colorer,
-        const GraphLabeler<Graph> &labeler, bool color_path = true) {
-    auto edge_colorer = make_shared<CompositeEdgeColorer<Graph>>("black");
-    edge_colorer->AddColorer(colorer);
-    if (color_path) {
-        edge_colorer->AddColorer(make_shared<SetColorer<Graph>>(g, path, "green"));
-    }
-    shared_ptr<GraphColorer<Graph>> resulting_colorer =  make_shared<CompositeGraphColorer<Graph>>(colorer, edge_colorer);
-    shared_ptr<GraphSplitter<Graph>> rs = ReliableSplitterAlongPath<Graph>(g, path);
-    auto filter = make_shared<omnigraph::SmallComponentFilter<Graph>>(g, 3);
-    shared_ptr<GraphSplitter<Graph>> splitter = make_shared<omnigraph::CondensingSplitterWrapper<Graph>>(rs, filter);
-    WriteComponents<Graph>(g, prefix_path, splitter, resulting_colorer, labeler);
-}
-
-template<class Graph>
-class LocalityPrintingRH {
-    typedef typename Graph::EdgeId EdgeId;
-    typedef typename Graph::VertexId VertexId;
-    const Graph& g_;
-    const GraphLabeler<Graph>& labeler_;
-    std::shared_ptr<visualization::GraphColorer<Graph>> colorer_;
-    const string output_folder_;
-public:
-    LocalityPrintingRH(const Graph& g
-            , const GraphLabeler<Graph>& labeler
-            , std::shared_ptr<visualization::GraphColorer<Graph>> colorer
-            , const string& output_folder) :
-            g_(g),
-            labeler_(labeler),
-            colorer_(colorer),
-            output_folder_(output_folder) {
-//        path::make_dirs(output_folder_);
-    }
-
-    void HandleDelete(EdgeId e, const string& add_label = "") {
-        //todo magic constant
-//          map<EdgeId, string> empty_coloring;
-        auto edge_colorer = make_shared<visualization::CompositeEdgeColorer<Graph>>("black");
-        edge_colorer->AddColorer(colorer_);
-        edge_colorer->AddColorer(make_shared<visualization::SetColorer<Graph>>(g_, vector<EdgeId>(1, e), "green"));
-        shared_ptr<visualization::GraphColorer<Graph>> resulting_colorer = make_shared<visualization::CompositeGraphColorer<Graph>>(colorer_, edge_colorer);
-
-        string fn = output_folder_ + "/edge_" + ToString(g_.int_id(e)) + add_label + ".dot";
-        omnigraph::visualization::WriteComponent(omnigraph::EdgeNeighborhood<Graph>(g_, e, 50, 250)
-                , fn
-                , resulting_colorer, labeler_);
-    }
-
-private:
-    DECL_LOGGER("LocalityPrintingRH")
-    ;
-};
-
-//static void WriteFilteredComponents(const Graph& g,
-//      const string& folder_name,
-//      shared_ptr<GraphComponentFilter<Graph>> filter,
-//      shared_ptr<GraphSplitter<Graph>> splitter,
-//      shared_ptr<GraphColorer<Graph>> colorer,
-//      const GraphLabeler<Graph> &labeler) {
-//  EmptyGraphLinker<Graph> linker;
-////    shared_ptr<GraphComponentFilter<Graph>> checker = make_shared<ComponentSizeFilter<Graph>>(g, 1500, 2, 300);
-//  omnigraph::FilteringSplitterWrapper<Graph> filtered_splitter(splitter, filter);
-//  omnigraph::visualization::SplittingGraphVisualizer<Graph>(g, labeler, *colorer, linker).SplitAndVisualize(filtered_splitter, folder_name);
-//}
-
-}
-}
diff --git a/src/projects/CMakeLists.txt b/src/projects/CMakeLists.txt
index 4fd1f77..02eca68 100644
--- a/src/projects/CMakeLists.txt
+++ b/src/projects/CMakeLists.txt
@@ -19,4 +19,4 @@ if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
   endif()
 else()
   add_subdirectory(mph_test)
-endif()
\ No newline at end of file
+endif()
diff --git a/src/projects/cap/assembly_compare.hpp b/src/projects/cap/assembly_compare.hpp
index e879bc0..ec86be5 100644
--- a/src/projects/cap/assembly_compare.hpp
+++ b/src/projects/cap/assembly_compare.hpp
@@ -9,10 +9,10 @@
 
 #include "pipeline/graph_pack.hpp"
 #include "pipeline/graphio.hpp"
-#include "dev_support/simple_tools.hpp"
-#include "algorithms/simplification/cleaner.hpp"
-#include "io/reads_io/splitting_wrapper.hpp"
-#include "io/reads_io/multifile_reader.hpp"
+#include "utils/simple_tools.hpp"
+#include "modules/simplification/cleaner.hpp"
+#include "io/reads/splitting_wrapper.hpp"
+#include "io/reads/multifile_reader.hpp"
 #include <boost/algorithm/string/predicate.hpp>
 
 #include "coloring.hpp"
@@ -113,7 +113,7 @@ public:
 //    typedef typename gp_t::graph_t Graph;
 //    typedef typename Graph::EdgeId EdgeId;
 //    typedef typename Graph::VertexId VertexId;
-//    typedef NewExtendedSequenceMapper<Graph, typename gp_t::seq_t> Mapper; // gp_t::k_value + 1
+//    typedef BasicSequenceMapper<Graph, typename gp_t::seq_t> Mapper; // gp_t::k_value + 1
 //
 //    gp_t gp_;
 //    ColorHandler<Graph> coloring_;
@@ -137,7 +137,7 @@ public:
 //
 //    template<class gp_t2>
 //    void UniversalSaveGP(
-//            const gp_t2& gp/*, const omnigraph::visualization::GraphColorer<typename gp_t2::graph_t> coloring*/,
+//            const gp_t2& gp/*, const visualization::graph_colorer::GraphColorer<typename gp_t2::graph_t> coloring*/,
 //            const string& filename) {
 //        typename PrinterTraits<Graph>::Printer printer(gp.g);
 //        INFO("Saving graph to " << filename);
@@ -464,12 +464,12 @@ void ThreadAssemblies(const string& base_saves, ContigStream& base_assembly,
 //        ConstructGraph<gp_t::k_value, Graph>(gp.g, gp.index, base_assembly);
     ScanGraphPack(base_saves, gp);
     base_assembly.reset();
-    FillPos(gp, base_assembly, base_prefix);
-    FillPos(gp, assembly_to_thread, to_thread_prefix);
+    visualization::position_filler::FillPos(gp, base_assembly, base_prefix);
+    visualization::position_filler::FillPos(gp, assembly_to_thread, to_thread_prefix);
 
-    EdgePosGraphLabeler<Graph> pos_labeler(gp.g, gp.edge_pos);
-    StrGraphLabeler<Graph> str_labeler(gp.g);
-    CompositeLabeler<Graph> labeler(pos_labeler, str_labeler);
+    visualization::graph_labeler::EdgePosGraphLabeler<Graph> pos_labeler(gp.g, gp.edge_pos);
+    visualization::graph_labeler::StrGraphLabeler<Graph> str_labeler(gp.g);
+    visualization::graph_labeler::CompositeLabeler<Graph> labeler(pos_labeler, str_labeler);
 
     auto mapper = MapperInstance(gp);
 
diff --git a/src/projects/cap/assembly_problem_detection.hpp b/src/projects/cap/assembly_problem_detection.hpp
index 7040caf..6ad4075 100644
--- a/src/projects/cap/assembly_problem_detection.hpp
+++ b/src/projects/cap/assembly_problem_detection.hpp
@@ -47,7 +47,7 @@
 //    typedef io::SingleRead Contig;
 //    typedef io::IReader<io::SingleRead> ContigStream;
 //    typedef    io::MultifileReader<io::SingleRead> CompositeStream;
-//    typedef debruijn_graph::NewExtendedSequenceMapper<Graph, Index> Mapper;
+//    typedef debruijn_graph::BasicSequenceMapper<Graph, Index> Mapper;
 //
 //    const gp_t& gp_;
 //    const ColorHandler<Graph>& coloring_;
@@ -204,7 +204,7 @@
 //    }
 //
 //    void ReportLocality(VertexId v, const vector<EdgeId>& good_contig_path, const string& best_contig, const Contig& c, const string& folder) {
-//        using namespace omnigraph::visualization;
+//        using namespace visualization;
 //        make_dir(folder);
 //        LengthIdGraphLabeler<Graph> basic_labeler(gp_.g);
 //        EdgePosGraphLabeler<Graph> pos_labeler(gp_.g, gp_.edge_pos);
@@ -398,7 +398,7 @@
 //    }
 //
 //    void ReportEdge(EdgeId e, const string& folder) {
-//        using namespace omnigraph::visualization;
+//        using namespace visualization;
 //        INFO(
 //                "Can close gap between edges " << g_.str(g_.GetUniqueIncomingEdge(g_.EdgeStart(e))) << " and " << g_.str(g_.GetUniqueOutgoingEdge(g_.EdgeEnd(e))) << " with edge " << g_.str(e));
 //        LengthIdGraphLabeler<Graph> basic_labeler(g_);
@@ -407,7 +407,7 @@
 //        CompositeLabeler<Graph> labeler(basic_labeler, pos_labeler);
 //        GraphComponent<Graph> component = omnigraph::EdgeNeighborhood(g_, e);
 //        auto colorer = coloring_.ConstructColorer(component);
-//        omnigraph::visualization::WriteComponent(component, folder + ToString(g_.int_id(e)) + "_loc.dot", colorer, labeler);
+//        visualization::visualization_utils::WriteComponent(component, folder + ToString(g_.int_id(e)) + "_loc.dot", colorer, labeler);
 //    }
 //
 ////    bool CheckEdges(const vector<EdgeId>& edges) {
diff --git a/src/projects/cap/cap_commands.hpp b/src/projects/cap/cap_commands.hpp
index c4c637f..1c0c945 100644
--- a/src/projects/cap/cap_commands.hpp
+++ b/src/projects/cap/cap_commands.hpp
@@ -10,8 +10,8 @@
 #include "cap_environment.hpp"
 #include "cap_environment_manager.hpp"
 #include "mosaic.hpp"
-#include "io/reads_io/sequence_reader.hpp"
-#include "dev_support/path_helper.hpp"
+#include "io/reads/sequence_reader.hpp"
+#include "utils/path_helper.hpp"
 
 namespace online_visualization {
 
diff --git a/src/projects/cap/cap_environment.hpp b/src/projects/cap/cap_environment.hpp
index f0f24d4..fa41558 100644
--- a/src/projects/cap/cap_environment.hpp
+++ b/src/projects/cap/cap_environment.hpp
@@ -29,8 +29,8 @@ class CapEnvironment : public Environment {
   typedef Graph::VertexId VertexId;
   typedef Graph::EdgeId EdgeId;
 
-  typedef debruijn_graph::KmerStoringEdgeIndex<Graph, runtime_k::RtSeq, kmer_index_traits<runtime_k::RtSeq>, debruijn_graph::SimpleStoring> RtSetIndex;
-  typedef debruijn_graph::graph_pack<Graph, runtime_k::RtSeq, RtSetIndex> RtSeqGraphPack;
+  typedef debruijn_graph::KmerStoringEdgeIndex<Graph, RtSeq, kmer_index_traits<RtSeq>, debruijn_graph::SimpleStoring> RtSetIndex;
+  typedef debruijn_graph::graph_pack<Graph, RtSeq, RtSetIndex> RtSeqGraphPack;
   typedef debruijn_graph::KmerStoringEdgeIndex<Graph, cap::LSeq, kmer_index_traits<cap::LSeq>, debruijn_graph::SimpleStoring> LSeqIndex;
   typedef debruijn_graph::graph_pack<Graph, cap::LSeq, LSeqIndex> LSeqGraphPack;
 
diff --git a/src/projects/cap/cap_environment_manager.hpp b/src/projects/cap/cap_environment_manager.hpp
index 9628fdb..33a39f3 100644
--- a/src/projects/cap/cap_environment_manager.hpp
+++ b/src/projects/cap/cap_environment_manager.hpp
@@ -22,7 +22,7 @@
 #include "test_utils.hpp"
 
 #include "cap_environment.hpp"
-#include "io/reads_io/sequence_reader.hpp"
+#include "io/reads/sequence_reader.hpp"
 #include "pipeline/config_struct.hpp"
 #include "junk_cropping_reader.hpp"
 
diff --git a/src/projects/cap/cap_kmer_index.hpp b/src/projects/cap/cap_kmer_index.hpp
index feab11a..5b7414b 100644
--- a/src/projects/cap/cap_kmer_index.hpp
+++ b/src/projects/cap/cap_kmer_index.hpp
@@ -10,11 +10,11 @@
 #include "compare_standard.hpp"
 #include "longseq.hpp"
 #include "polynomial_hash.hpp"
-#include "utils/adt/kmer_map.hpp"
-#include "data_structures/indices/edge_position_index.hpp"
+#include "common/adt/kmer_map.hpp"
+#include "utils/indices/edge_position_index.hpp"
 
-#include "io/reads_io/sequence_reader.hpp"
-#include "data_structures/mph_index/base_hash.hpp"
+#include "io/reads/sequence_reader.hpp"
+#include "utils/mph_index/base_hash.hpp"
 
 template<>
 struct kmer_index_traits<cap::LSeq> {
diff --git a/src/projects/cap/cap_logger.hpp b/src/projects/cap/cap_logger.hpp
index b54bc48..c8bf020 100644
--- a/src/projects/cap/cap_logger.hpp
+++ b/src/projects/cap/cap_logger.hpp
@@ -5,7 +5,7 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "dev_support/logger/log_writers.hpp"
+#include "utils/logger/log_writers.hpp"
 
 /*
 #undef INFO
diff --git a/src/projects/cap/colored_graph_construction.hpp b/src/projects/cap/colored_graph_construction.hpp
index bfceb8c..091c41c 100644
--- a/src/projects/cap/colored_graph_construction.hpp
+++ b/src/projects/cap/colored_graph_construction.hpp
@@ -7,10 +7,10 @@
 
 #pragma once
 
-#include "data_structures/sequence/runtime_k.hpp"
+#include "sequence/runtime_k.hpp"
 #include "compare_standard.hpp"
 #include "cap_kmer_index.hpp"
-#include "algorithms/graph_construction.hpp"
+#include "modules/graph_construction.hpp"
 
 namespace cap {
 
@@ -76,7 +76,7 @@ public:
     void FindCoveredRanges(CoveredRanges& crs, ContigStream& stream) const {
         io::SingleRead read;
         stream.reset();
-//        NewExtendedSequenceMapper<gp_t::k_value + 1, Graph> mapper(gp_.g,
+//        BasicSequenceMapper<gp_t::k_value + 1, Graph> mapper(gp_.g,
 //                gp_.index, gp_.kmer_mapper);
         while (!stream.eof()) {
             stream >> read;
@@ -366,7 +366,7 @@ void ConstructColoredGraph(gp_t& gp,
 //        vector<ContigStream*>& streams, const string& reference, bool fill_pos = true, int br_delta = -1) {
 //    typedef typename gp_t::graph_t Graph;
 //    const size_t k = gp_t::k_value;
-//    typedef NewExtendedSequenceMapper<k + 1, Graph> Mapper;
+//    typedef BasicSequenceMapper<k + 1, Graph> Mapper;
 //
 //    INFO("Constructing de Bruijn graph for k=" << k);
 //
@@ -389,7 +389,7 @@ void ConstructColoredGraph(gp_t& gp,
 //        for (auto it = streams.begin(); it != streams.end(); ++it) {
 //            ContigStream& stream = **it;
 //            stream.reset();
-//            FillPos(gp, stream);
+//            visualization::position_filler::FillPos(gp, stream);
 //        }
 //    }
 //}
diff --git a/src/projects/cap/coloring.hpp b/src/projects/cap/coloring.hpp
index 3916129..2e33e92 100644
--- a/src/projects/cap/coloring.hpp
+++ b/src/projects/cap/coloring.hpp
@@ -8,6 +8,7 @@
 #pragma once
 
 #include <boost/format/format_fwd.hpp>
+#include <common/visualization/graph_colorer.hpp>
 
 namespace cap {
 
@@ -195,7 +196,7 @@ public:
 };
 
 template<class Graph, class Element>
-class ElementColorHandler: public GraphActionHandler<Graph>, public visualization::ElementColorer<Element> {
+class ElementColorHandler: public GraphActionHandler<Graph>, public visualization::graph_colorer::ElementColorer<Element> {
     typedef GraphActionHandler<Graph> base;
 
     // For each element will store a bitmask of used there colors.
@@ -273,7 +274,7 @@ public:
 };
 
 template<class Graph>
-class ColorHandler: public visualization::GraphColorer<Graph>, public GraphActionHandler<Graph> {
+class ColorHandler: public visualization::graph_colorer::GraphColorer<Graph>, public GraphActionHandler<Graph> {
     typedef GraphActionHandler<Graph> base;
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
@@ -369,14 +370,14 @@ public:
 
     //This is a bad unsafe code! The right way is to use shared_ptr of this class in all interfaces.
     //Then one can easily draw with this colorer without any delegation
-    shared_ptr<omnigraph::visualization::GraphColorer<Graph>> ConstructColorer() const {
-        using namespace omnigraph::visualization;
-        return shared_ptr<GraphColorer<Graph>>(new omnigraph::visualization::DelegatingGraphColorer<Graph>(*this));
+    shared_ptr<visualization::graph_colorer::GraphColorer<Graph>> ConstructColorer() const {
+        using namespace visualization;
+        return shared_ptr<GraphColorer<Graph>>(new visualization::DelegatingGraphColorer<Graph>(*this));
     }
 
-    shared_ptr<omnigraph::visualization::GraphColorer<Graph>> ConstructColorer(GraphComponent<Graph> gc) const {
-        shared_ptr<omnigraph::visualization::GraphColorer<Graph>> colorer = ConstructColorer();
-        return omnigraph::visualization::BorderDecorator<Graph>::GetInstance(gc, colorer);
+    shared_ptr<visualization::graph_colorer::GraphColorer<Graph>> ConstructColorer(GraphComponent<Graph> gc) const {
+        shared_ptr<visualization::graph_colorer::GraphColorer<Graph>> colorer = ConstructColorer();
+        return visualization::BorderDecorator<Graph>::GetInstance(gc, colorer);
     }
 
   size_t max_colors() const {
@@ -430,9 +431,9 @@ void LoadColoring(const Graph& /*g*/
 
 
 template<class Graph>
-std::auto_ptr<omnigraph::visualization::GraphColorer<Graph>> ConstructColorer(
+std::auto_ptr<visualization::graph_colorer::GraphColorer<Graph>> ConstructColorer(
         const ColorHandler<Graph>& coloring) {
-    using namespace omnigraph::visualization;
+    using namespace visualization;
     return std::auto_ptr<GraphColorer<Graph>>(
             new CompositeGraphColorer<Graph>(
                     make_shared<MapColorer<typename Graph::VertexId>>(coloring.VertexColorMap()),
@@ -440,9 +441,9 @@ std::auto_ptr<omnigraph::visualization::GraphColorer<Graph>> ConstructColorer(
 }
 
 template<class Graph>
-std::auto_ptr<omnigraph::visualization::GraphColorer<Graph>> ConstructBorderColorer(const Graph& /*g*/,
+std::auto_ptr<visualization::graph_colorer::GraphColorer<Graph>> ConstructBorderColorer(const Graph& /*g*/,
         const ColorHandler<Graph>& coloring) {
-    using namespace omnigraph::visualization;
+    using namespace visualization;
     return std::auto_ptr<GraphColorer<Graph>>(
             new CompositeGraphColorer<Graph>(
                     make_shared<FixedColorer<Graph>>("white"),
diff --git a/src/projects/cap/compare_standard.hpp b/src/projects/cap/compare_standard.hpp
index 7e0f85e..426b3f5 100644
--- a/src/projects/cap/compare_standard.hpp
+++ b/src/projects/cap/compare_standard.hpp
@@ -7,16 +7,16 @@
 
 #pragma once
 
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 
 // log
-#include "dev_support/logger/logger.hpp"
+#include "utils/logger/logger.hpp"
 
 // utils
-#include "dev_support/cpp_utils.hpp"
-#include "dev_support/path_helper.hpp"
+#include "utils/cpp_utils.hpp"
+#include "utils/path_helper.hpp"
 
-#include "dev_support/simple_tools.hpp"
+#include "utils/simple_tools.hpp"
 
 // longseq
 #include "longseq.hpp"
@@ -25,12 +25,12 @@
 #include "cap_config_struct.hpp"
 
 // io
-#include "io/reads_io/ireader.hpp"
-#include "io/reads_io/converting_reader_wrapper.hpp"
-#include "io/reads_io/vector_reader.hpp"
-#include "io/reads_io/multifile_reader.hpp"
-#include "io/reads_io/rc_reader_wrapper.hpp"
-#include "io/reads_io/osequencestream.hpp"
+#include "io/reads/ireader.hpp"
+#include "io/reads/converting_reader_wrapper.hpp"
+#include "io/reads/vector_reader.hpp"
+#include "io/reads/multifile_reader.hpp"
+#include "io/reads/rc_reader_wrapper.hpp"
+#include "io/reads/osequencestream.hpp"
 
 namespace cap {
 typedef io::SingleRead Contig;
@@ -42,6 +42,6 @@ typedef io::ReadStreamList<Contig> ContigStreams;
 }
 
 // debruijn
-#include "assembly_graph/graph_core/graph.hpp"
+#include "assembly_graph/core/graph.hpp"
 #include "pipeline/graph_pack.hpp"
-#include "algorithms/graph_construction.hpp"
+#include "modules/graph_construction.hpp"
diff --git a/src/projects/cap/comparison_utils.hpp b/src/projects/cap/comparison_utils.hpp
index eefe93d..2dddb7a 100644
--- a/src/projects/cap/comparison_utils.hpp
+++ b/src/projects/cap/comparison_utils.hpp
@@ -8,17 +8,17 @@
 #pragma once
 
 #include "pipeline/graphio.hpp"
-#include "dev_support/simple_tools.hpp"
-#include "assembly_graph/graph_core/graph.hpp"
+#include "utils/simple_tools.hpp"
+#include "assembly_graph/core/graph.hpp"
 #include "coordinates_handler.hpp"
 #include "math/xmath.h"
 #include <iostream>
 #include <vector>
-#include "dev_support/logger/logger.hpp"
-#include "io/reads_io/multifile_reader.hpp"
-#include "io/reads_io/splitting_wrapper.hpp"
-#include "io/reads_io/modifying_reader_wrapper.hpp"
-#include "io/reads_io/vector_reader.hpp"
+#include "utils/logger/logger.hpp"
+#include "io/reads/multifile_reader.hpp"
+#include "io/reads/splitting_wrapper.hpp"
+#include "io/reads/modifying_reader_wrapper.hpp"
+#include "io/reads/vector_reader.hpp"
 #include <boost/property_tree/ptree.hpp>
 #include <boost/property_tree/xml_parser.hpp>
 
@@ -137,7 +137,7 @@ inline void PrintGraphComponentContainingEdge(const string& file_name, const Gra
 }
 
 template<class Graph>
-class EdgeCoordinatesGraphLabeler: public AbstractGraphLabeler<Graph> {
+class EdgeCoordinatesGraphLabeler: public visualization::graph_labeler::AbstractGraphLabeler<Graph> {
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
 public:
diff --git a/src/projects/cap/coordinates_handler.hpp b/src/projects/cap/coordinates_handler.hpp
index 3caeb4c..a5a7ad1 100644
--- a/src/projects/cap/coordinates_handler.hpp
+++ b/src/projects/cap/coordinates_handler.hpp
@@ -10,8 +10,8 @@
 #include <cstring>
 #include <vector>
 #include <algorithm>
-#include "data_structures/sequence/sequence.hpp"
-#include "data_structures/sequence/sequence_tools.hpp"
+#include "sequence/sequence.hpp"
+#include "sequence/sequence_tools.hpp"
 
 namespace cap {
 
diff --git a/src/projects/cap/deprecated/tools_deprecated.cpp b/src/projects/cap/deprecated/tools_deprecated.cpp
index 63883cf..6f13424 100644
--- a/src/projects/cap/deprecated/tools_deprecated.cpp
+++ b/src/projects/cap/deprecated/tools_deprecated.cpp
@@ -64,8 +64,8 @@
 //     ConstructColoredGraph(gp, coloring, streams, false, br_delta);
 
 // //    INFO("Filling ref pos " << gp.genome.size());
-// //            FillPos(gp_, gp_.genome, "ref_0");
-// //            FillPos(gp_, !gp_.genome, "ref_1");
+// //            visualization::position_filler::FillPos(gp_, gp_.genome, "ref_0");
+// //            visualization::position_filler::FillPos(gp_, !gp_.genome, "ref_1");
 
 // //Indels
 // //    make_dir(output_folder + "indels/");
diff --git a/src/projects/cap/diff_masking.hpp b/src/projects/cap/diff_masking.hpp
index b4027be..67ef45e 100644
--- a/src/projects/cap/diff_masking.hpp
+++ b/src/projects/cap/diff_masking.hpp
@@ -7,10 +7,10 @@
 
 #pragma once
 
-#include "io/reads_io/read_stream_vector.hpp"
-#include "algorithms/graph_construction.hpp"
+#include "io/reads/read_stream_vector.hpp"
+#include "modules/graph_construction.hpp"
 #include "stages/simplification_pipeline/graph_simplification.hpp"
-#include "algorithms/graph_read_correction.hpp"
+#include "modules/graph_read_correction.hpp"
 #include "test_utils.hpp"
 
 #include "coloring.hpp"
@@ -135,7 +135,7 @@ ContigStreams RefineStreams(ContigStreams& streams,
                                size_t k,
                                size_t delta = 5,
                                const std::string &workdir = "tmp") {
-    typedef debruijn_graph::KmerStoringEdgeIndex<Graph, Seq, kmer_index_traits<runtime_k::RtSeq>, debruijn_graph::SimpleStoring> RefiningIndex;
+    typedef debruijn_graph::KmerStoringEdgeIndex<Graph, Seq, kmer_index_traits<RtSeq>, debruijn_graph::SimpleStoring> RefiningIndex;
     typedef graph_pack<ConjugateDeBruijnGraph, Seq, RefiningIndex> refining_gp_t;
     refining_gp_t gp(k, workdir);
 
@@ -281,7 +281,7 @@ inline void PerformIterativeRefinement(ContigStreams& streams,
                 gene_collection);
     } else {
         omp_set_num_threads(8);
-        PerformRefinement<runtime_k::RtSeq>(streams, root, suffixes, current_k,
+        PerformRefinement<RtSeq>(streams, root, suffixes, current_k,
                 gene_root, gene_collection);
     }
 
diff --git a/src/projects/cap/gene_analysis.hpp b/src/projects/cap/gene_analysis.hpp
index a174024..07f99fe 100644
--- a/src/projects/cap/gene_analysis.hpp
+++ b/src/projects/cap/gene_analysis.hpp
@@ -7,8 +7,8 @@
 
 #pragma once
 
-#include "dev_support/standard_base.hpp"
-#include "dev_support/simple_tools.hpp"
+#include "utils/standard_base.hpp"
+#include "utils/simple_tools.hpp"
 #include "comparison_utils.hpp"
 #include "boost/tokenizer.hpp"
 #include "coloring.hpp"
diff --git a/src/projects/cap/genome_correction.hpp b/src/projects/cap/genome_correction.hpp
index e9ba688..52ba5c4 100644
--- a/src/projects/cap/genome_correction.hpp
+++ b/src/projects/cap/genome_correction.hpp
@@ -11,7 +11,8 @@
 
 #include <vector>
 #include <map>
-#include "utils/adt/bag.hpp"
+#include <common/visualization/graph_labeler.hpp>
+#include "common/adt/bag.hpp"
 
 namespace cap {
 
@@ -376,7 +377,7 @@ class SimpleInDelCorrector {
 
     void GenPicAlongPath(const vector<EdgeId> path, size_t cnt) {
     utils::MakeDirPath("ref_correction");
-        WriteComponentsAlongPath(g_, StrGraphLabeler<Graph>(g_),
+        WriteComponentsAlongPath(g_, visualization::graph_labeler::StrGraphLabeler<Graph>(g_),
                 "ref_correction/" + ToString(cnt) + ".dot", 100000, 10,
                 TrivialMappingPath(g_, path), *ConstructColorer(coloring_));
     }
@@ -384,7 +385,8 @@ class SimpleInDelCorrector {
     void GenPicAroundEdge(EdgeId e, size_t cnt) {
         utils::MakeDirPath("ref_correction");
         GraphComponent<Graph> component = omnigraph::EdgeNeighborhood(g_, e, 10, 100000);
-        omnigraph::visualization::WriteComponent(g_, "ref_correction/" + ToString(cnt) + ".dot", component, coloring_.GetInstance(), StrGraphLabeler<Graph>(g_));
+        visualization::visualization_utils::WriteComponent(g_, "ref_correction/" + ToString(cnt) + ".dot", component, coloring_.GetInstance(),
+                                      visualization::graph_labeler::StrGraphLabeler<Graph>(g_));
     }
 
     void CorrectGenomePath(size_t genome_start, size_t genome_end,
diff --git a/src/projects/cap/junk_cropping_reader.hpp b/src/projects/cap/junk_cropping_reader.hpp
index 5927d75..9c21196 100644
--- a/src/projects/cap/junk_cropping_reader.hpp
+++ b/src/projects/cap/junk_cropping_reader.hpp
@@ -6,8 +6,8 @@
 //***************************************************************************
 
 #pragma once
-#include "dev_support/standard_base.hpp"
-#include "io/reads_io/delegating_reader_wrapper.hpp"
+#include "utils/standard_base.hpp"
+#include "io/reads/delegating_reader_wrapper.hpp"
 
 namespace cap {
 
diff --git a/src/projects/cap/longseq.hpp b/src/projects/cap/longseq.hpp
index 571e69e..7a454ed 100644
--- a/src/projects/cap/longseq.hpp
+++ b/src/projects/cap/longseq.hpp
@@ -10,9 +10,9 @@
 #include <cstdlib>
 #include <cstdint>
 #include "polynomial_hash.hpp"
-#include "dev_support/log.hpp"
-#include "data_structures/sequence/sequence.hpp"
-#include "dev_support/openmp_wrapper.h"
+#include "utils/log.hpp"
+#include "sequence/sequence.hpp"
+#include "utils/openmp_wrapper.h"
 
 namespace cap {
 
diff --git a/src/projects/cap/main.cpp b/src/projects/cap/main.cpp
index 7228aab..2b646c2 100644
--- a/src/projects/cap/main.cpp
+++ b/src/projects/cap/main.cpp
@@ -9,14 +9,14 @@
 #include "cap_kmer_index.hpp"
 #include "cap_logger.hpp"
 
-#include "dev_support/segfault_handler.hpp"
-#include "dev_support/stacktrace.hpp"
+#include "utils/segfault_handler.hpp"
+#include "utils/stacktrace.hpp"
 #include "pipeline/config_struct.hpp"
-#include "dev_support/simple_tools.hpp"
+#include "utils/simple_tools.hpp"
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <unistd.h>
-#include "dev_support/memory_limit.hpp"
+#include "utils/memory_limit.hpp"
 #include "io/dataset_support/read_converter.hpp"
 
 #include "cap_online_visualizer.hpp"
diff --git a/src/projects/cap/mosaic.hpp b/src/projects/cap/mosaic.hpp
index 1939a3f..ac75fbb 100644
--- a/src/projects/cap/mosaic.hpp
+++ b/src/projects/cap/mosaic.hpp
@@ -5,12 +5,12 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "dev_support/standard_base.hpp"
-#include "io/reads_io/rc_reader_wrapper.hpp"
-#include "io/reads_io/sequence_reader.hpp"
+#include "utils/standard_base.hpp"
+#include "io/reads/rc_reader_wrapper.hpp"
+#include "io/reads/sequence_reader.hpp"
 #include "diff_masking.hpp"
-#include "utils/adt/bag.hpp"
-#include "io/reads_io/vector_reader.hpp"
+#include "common/adt/bag.hpp"
+#include "io/reads/vector_reader.hpp"
 #include "visualization/graph_colorer.hpp"
 
 namespace cap {
@@ -950,7 +950,7 @@ void DrawGraph(const vector<StrandRange>& all_ranges,
                const vector<StrandRange>& full_mosaic_ranges,
                const GenomeBlockComposition& block_composition) {
     make_dir("tmp");
-    graph_pack<Graph, runtime_k::RtSeq> gp(block_composition.block_info().g().k(), "tmp", 0);
+    graph_pack<Graph, RtSeq> gp(block_composition.block_info().g().k(), "tmp", 0);
 
     auto stream = io::RCWrap(StreamInstance(ExtractSequences(all_ranges, block_composition)));
     auto streams = io::ReadStreamList<io::SingleRead>(stream);
@@ -959,9 +959,9 @@ void DrawGraph(const vector<StrandRange>& all_ranges,
 
     auto full_mosaic_pos_stream = io::RCWrap(StreamInstance(ExtractSequences(full_mosaic_ranges, block_composition), mosaic_names(full_mosaic_ranges.size())));
     INFO("Threading " << full_mosaic_ranges.size() << " full mosaics");
-    FillPos(gp, *full_mosaic_pos_stream);
+    visualization::position_filler::FillPos(gp, *full_mosaic_pos_stream);
 
-    omnigraph::DefaultLabeler<Graph> labeler(gp.g, gp.edge_pos);
+    visualization::graph_labeler::DefaultLabeler<Graph> labeler(gp.g, gp.edge_pos);
 
     shared_ptr<GraphSplitter<Graph>> splitter = omnigraph::ReliableSplitter(gp.g,
             numeric_limits<size_t>::max(),
@@ -971,8 +971,8 @@ void DrawGraph(const vector<StrandRange>& all_ranges,
     path::remove_if_exists("mosaic_pics");
     path::make_dir("mosaic_pics");
     INFO("Writing components");
-    omnigraph::visualization::WriteComponents(gp.g, "mosaic_pics/", splitter,
-            omnigraph::visualization::DefaultColorer(gp.g), labeler);
+    visualization::visualization_utils::WriteComponents(gp.g, "mosaic_pics/", splitter,
+            visualization::graph_colorer::DefaultColorer(gp.g), labeler);
     INFO("Components written");
 }
 
diff --git a/src/projects/cap/repeat_masking.hpp b/src/projects/cap/repeat_masking.hpp
index 5928bb8..ad1e19c 100644
--- a/src/projects/cap/repeat_masking.hpp
+++ b/src/projects/cap/repeat_masking.hpp
@@ -7,9 +7,9 @@
 
 #pragma once
 
-#include "data_structures/sequence/nucl.hpp"
-#include "io/reads_io/modifying_reader_wrapper.hpp"
-#include "utils/adt/bag.hpp"
+#include "sequence/nucl.hpp"
+#include "io/reads/modifying_reader_wrapper.hpp"
+#include "common/adt/bag.hpp"
 #include <boost/random/mersenne_twister.hpp>
 #include <boost/random/uniform_01.hpp>
 #include <boost/random/uniform_int.hpp>
@@ -132,7 +132,7 @@ public:
 
 class RepeatMasker : public io::SequenceModifier {
 private:
-    typedef runtime_k::RtSeq Kmer;
+    typedef RtSeq Kmer;
     typedef KeyIteratingMap<Kmer, Count, kmer_index_traits<Kmer>, SimpleStoring> KmerCountIndex;
     typedef typename KmerCountIndex::KeyWithHash KeyWithHash;
     typedef KmerCountIndex::KMerIdx KmerIdx;
diff --git a/src/projects/cap/serialization.hpp b/src/projects/cap/serialization.hpp
index 7fb38f2..1a94ce2 100644
--- a/src/projects/cap/serialization.hpp
+++ b/src/projects/cap/serialization.hpp
@@ -13,7 +13,7 @@
 #include <string>
 #include <vector>
 
-#include "data_structures/sequence/sequence.hpp"
+#include "sequence/sequence.hpp"
 
 namespace cap {
 
diff --git a/src/projects/cap/simple_inversion_finder.hpp b/src/projects/cap/simple_inversion_finder.hpp
index 3088a0a..d29a272 100644
--- a/src/projects/cap/simple_inversion_finder.hpp
+++ b/src/projects/cap/simple_inversion_finder.hpp
@@ -12,7 +12,7 @@
 #include "coordinates_handler.hpp"
 #include "compare_standard.hpp"
 #include "comparison_utils.hpp"
-#include "algorithms/dijkstra/dijkstra_helper.hpp"
+#include "assembly_graph/dijkstra/dijkstra_helper.hpp"
 
 namespace cap {
 
@@ -299,9 +299,9 @@ class SimpleInversionFinder {
     MappingPath<EdgeId> mpath = TrivialMappingPath(g_, path);
     //Path<EdgeId> cpath(path, mpath.start_pos(), mpath.end_pos());
 
-    LengthIdGraphLabeler<Graph> basic_labeler(g_);
-    EdgePosGraphLabeler<Graph> pos_labeler(g_, gp_.edge_pos);
-    CompositeLabeler<Graph> labeler(basic_labeler, pos_labeler);
+    visualization::graph_labeler::LengthIdGraphLabeler<Graph> basic_labeler(g_);
+    visualization::graph_labeler::EdgePosGraphLabeler<Graph> pos_labeler(g_, gp_.edge_pos);
+    visualization::graph_labeler::CompositeLabeler<Graph> labeler(basic_labeler, pos_labeler);
 
     WriteComponentsAlongPath(g_, labeler, out_file, edge_length, max_vertices,
       mpath, *ConstructBorderColorer(g_, coloring_));
diff --git a/src/projects/cap/stats.hpp b/src/projects/cap/stats.hpp
index 4d7f1ef..abb916e 100644
--- a/src/projects/cap/stats.hpp
+++ b/src/projects/cap/stats.hpp
@@ -11,7 +11,7 @@
 #include "assembly_graph/components/graph_component.hpp"
 #include "assembly_graph/components/splitters.hpp"
 #include "utils.hpp"
-#include "dev_support/simple_tools.hpp"
+#include "utils/simple_tools.hpp"
 #include "comparison_utils.hpp"
 #include "assembly_graph/graph_support/basic_graph_stats.hpp"
 #include "coloring.hpp"
@@ -377,12 +377,12 @@ public:
     }
 
     void CountStats() {
-        EmptyGraphLabeler<Graph> labeler;
+        visualization::graph_labeler::EmptyGraphLabeler<Graph> labeler;
         make_dir("assembly_compare");
         shared_ptr<GraphSplitter<Graph>> splitter = LongEdgesExclusiveSplitter<Graph>(this->graph(), 1000000000);
         WriteComponents(this->graph(), *splitter, *this,
                 "assembly_compare/breakpoint_graph.dot",
-                *ConstructColorer(coloring_), labeler);
+                *visualization::ConstructColorer(coloring_), labeler);
         ready_ = true;
         for (size_t i = 0; i < component_type::size; ++i) {
             INFO("Number of components of type " << ComponentClassifier<Graph>::info_printer_pos_name(i) << " is " << GetComponentNumber(i));
@@ -419,7 +419,7 @@ public:
     }
 
     void PrintComponents(component_type c_type,
-            const GraphLabeler<Graph>& labeler,
+            const visualization::graph_labeler::GraphLabeler<Graph>& labeler,
             bool create_subdir = true) const {
         string filename;
         if (create_subdir) {
@@ -460,7 +460,7 @@ public:
         }
     }
 
-    void CountStats(const GraphLabeler<Graph>& labeler, bool detailed_output =
+    void CountStats(const visualization::graph_labeler::GraphLabeler<Graph>& labeler, bool detailed_output =
             true) const {
         make_dir(output_folder_);
         BreakPointGraphStatistics<Graph> stats(graph_, coloring_);
@@ -524,14 +524,14 @@ class TrivialBreakpointFinder: public AbstractFilter<
     void ReportBreakpoint(VertexId v, const string& folder,
             const string& prefix) {
         TRACE("Vertex " << g_.str(v) << " identified as breakpoint");
-        LengthIdGraphLabeler<Graph> basic_labeler(g_);
-        EdgePosGraphLabeler<Graph> pos_labeler(g_, pos_);
+        visualization::graph_labeler::LengthIdGraphLabeler<Graph> basic_labeler(g_);
+        visualization::graph_labeler::EdgePosGraphLabeler<Graph> pos_labeler(g_, pos_);
 
-        CompositeLabeler<Graph> labeler(basic_labeler, pos_labeler);
+        visualization::graph_labeler::CompositeLabeler<Graph> labeler(basic_labeler, pos_labeler);
         VERIFY(g_.OutgoingEdgeCount(v) > 0);
         EdgeId e = g_.OutgoingEdges(v).front();
         GraphComponent<Graph> component = omnigraph::EdgeNeighborhood(g_, e);
-        visualization::WriteComponent(
+        visualization::visualization_utils::WriteComponent(
                 component,
                 folder + prefix + ToString(g_.int_id(v)) + "_loc.dot",
                 coloring_.ConstructColorer(component), labeler);
@@ -695,10 +695,10 @@ class SimpleInDelAnalyzer {
     }
 
     void WriteAltPath(EdgeId e, const vector<EdgeId>& genome_path) {
-        LengthIdGraphLabeler<Graph> basic_labeler(g_);
-        EdgePosGraphLabeler<Graph> pos_labeler(g_, edge_pos_);
+        visualization::graph_labeler::LengthIdGraphLabeler<Graph> basic_labeler(g_);
+        visualization::graph_labeler::EdgePosGraphLabeler<Graph> pos_labeler(g_, edge_pos_);
 
-        CompositeLabeler<Graph> labeler(basic_labeler, pos_labeler);
+        visualization::graph_labeler::CompositeLabeler<Graph> labeler(basic_labeler, pos_labeler);
 
         string alt_path_folder = folder_ + ToString(g_.int_id(e)) + "/";
         make_dir(alt_path_folder);
@@ -843,10 +843,10 @@ private:
         INFO(
                 "Edge " << gp_.g.str(e)
                         << " identified as rearrangement connection");
-        LengthIdGraphLabeler<Graph> basic_labeler(gp_.g);
-        EdgePosGraphLabeler<Graph> pos_labeler(gp_.g, gp_.edge_pos);
+        visualization::graph_labeler::LengthIdGraphLabeler<Graph> basic_labeler(gp_.g);
+        visualization::graph_labeler::EdgePosGraphLabeler<Graph> pos_labeler(gp_.g, gp_.edge_pos);
 
-        CompositeLabeler<Graph> labeler(basic_labeler, pos_labeler);
+        visualization::graph_labeler::CompositeLabeler<Graph> labeler(basic_labeler, pos_labeler);
 
         INFO(
                 count_ << " example start_ref_pos: " << start_ref_pos
@@ -855,7 +855,7 @@ private:
                 boost::format("%s%d_%d_%d_%d.dot") % folder % count_
                         % gp_.g.int_id(e) % start_ref_pos % end_ref_pos);
         GraphComponent<Graph> component = omnigraph::EdgeNeighborhood(gp_.g, e);
-        omnigraph::visualization::WriteComponent(component, filename, coloring_.ConstructColorer(component), labeler);
+        visualization::visualization_utils::WriteComponent(component, filename, coloring_.ConstructColorer(component), labeler);
         count_++;
     }
 
@@ -1467,10 +1467,10 @@ class MissingGenesAnalyser {
     const string output_dir_;
 
     void ReportLocality(const Sequence& s, const string& out_file) {
-        LengthIdGraphLabeler<Graph> basic_labeler(g_);
-        EdgePosGraphLabeler<Graph> pos_labeler(g_, edge_pos_);
+        visualization::graph_labeler::LengthIdGraphLabeler<Graph> basic_labeler(g_);
+        visualization::graph_labeler::EdgePosGraphLabeler<Graph> pos_labeler(g_, edge_pos_);
 
-        CompositeLabeler<Graph> labeler(basic_labeler, pos_labeler);
+        visualization::graph_labeler::CompositeLabeler<Graph> labeler(basic_labeler, pos_labeler);
 
         WriteComponentsAlongPath(g_, labeler, out_file, /*split_length*/1000, /*vertex_number*/15
                 , mapper_.MapSequence(s), *ConstructBorderColorer(g_, coloring_));
diff --git a/src/projects/cap/tools.cpp b/src/projects/cap/tools.cpp
index 41cd674..9a63635 100755
--- a/src/projects/cap/tools.cpp
+++ b/src/projects/cap/tools.cpp
@@ -6,7 +6,7 @@
 //***************************************************************************
 
 #include "compare_standard.hpp"
-#include "dev_support/logger/log_writers.hpp"
+#include "utils/logger/log_writers.hpp"
 #include "pipeline/graphio.hpp"
 #include <boost/test/unit_test.hpp>
 
diff --git a/src/projects/cap/untangling.hpp b/src/projects/cap/untangling.hpp
index dc8737b..0c2cca7 100644
--- a/src/projects/cap/untangling.hpp
+++ b/src/projects/cap/untangling.hpp
@@ -242,8 +242,8 @@
 //        Untangle(stream2, 1);
 //
 //        UntangledGraphContigMapper<bp_graph_pack<Graph>> contig_mapper(new_gp_);
-//        FillPos(new_gp_.g, contig_mapper, new_gp_.edge_pos, stream1);
-//        FillPos(new_gp_.g, contig_mapper, new_gp_.edge_pos, stream2);
+//        visualization::position_filler::FillPos(new_gp_.g, contig_mapper, new_gp_.edge_pos, stream1);
+//        visualization::position_filler::FillPos(new_gp_.g, contig_mapper, new_gp_.edge_pos, stream2);
 //    }
 //private:
 //    DECL_LOGGER("UntangledGraphConstructor")
diff --git a/src/projects/cap/visualization.hpp b/src/projects/cap/visualization.hpp
index 7b862bb..c4105b2 100644
--- a/src/projects/cap/visualization.hpp
+++ b/src/projects/cap/visualization.hpp
@@ -6,7 +6,7 @@
 //***************************************************************************
 
 #pragma once
-#include "assembly_graph/graph_alignment/sequence_mapper.hpp"
+#include "modules/alignment/sequence_mapper.hpp"
 #include "visualization/visualization_utils.hpp"
 
 namespace cap {
@@ -75,10 +75,10 @@ template<class Graph>
 void PrintColoredGraph(const Graph& g, const ColorHandler<Graph>& coloring,
         const EdgesPositionHandler<Graph>& pos, const string& output_filename) {
     shared_ptr<GraphSplitter<Graph>> splitter = ReliableSplitter<Graph>(g, 1000000, 30);
-    LengthIdGraphLabeler<Graph> basic_labeler(g);
-    EdgePosGraphLabeler<Graph> pos_labeler(g, pos);
+    visualization::graph_labeler::LengthIdGraphLabeler<Graph> basic_labeler(g);
+    visualization::graph_labeler::EdgePosGraphLabeler<Graph> pos_labeler(g, pos);
 
-    CompositeLabeler<Graph> labeler(basic_labeler, pos_labeler);
+    visualization::graph_labeler::CompositeLabeler<Graph> labeler(basic_labeler, pos_labeler);
     WriteComponents(g, splitter, output_filename,
 //                *ConstructColorer(coloring),
             *ConstructBorderColorer(g, coloring), labeler);
@@ -89,12 +89,12 @@ void PrintColoredGraphAroundEdge(const Graph& g,
     const ColorHandler<Graph>& coloring, const EdgeId edge,
     const EdgesPositionHandler<Graph>& pos, const string& output_filename) {
   INFO(output_filename);
-    LengthIdGraphLabeler<Graph> basic_labeler(g);
-    EdgePosGraphLabeler<Graph> pos_labeler(g, pos);
+    visualization::graph_labeler::LengthIdGraphLabeler<Graph> basic_labeler(g);
+    visualization::graph_labeler::EdgePosGraphLabeler<Graph> pos_labeler(g, pos);
 
-    CompositeLabeler<Graph> labeler(basic_labeler, pos_labeler);
+    visualization::graph_labeler::CompositeLabeler<Graph> labeler(basic_labeler, pos_labeler);
     GraphComponent<Graph> component = omnigraph::EdgeNeighborhood(g, edge);
-    omnigraph::visualization::WriteComponent(component, output_filename, coloring.ConstructColorer(component), labeler);
+    visualization::visualization_utils::WriteComponent(component, output_filename, coloring.ConstructColorer(component), labeler);
 }
 
 template<class Graph>
@@ -111,8 +111,8 @@ void PrintColoredGraphWithColorFilter(const Graph &g, const ColorHandler<Graph>
     LengthIdGraphLabeler<Graph> basic_labeler(g);
     EdgeCoordinatesGraphLabeler<Graph> pos_labeler(g, pos, genome_names);
 
-    CompositeLabeler<Graph> labeler(basic_labeler, pos_labeler);
-    omnigraph::visualization::WriteComponents(g, output_folder, fs, coloring.ConstructColorer(), labeler);
+    visualization::graph_labeler::CompositeLabeler<Graph> labeler(basic_labeler, pos_labeler);
+    visualization::visualization_utils::WriteComponents(g, output_folder, fs, coloring.ConstructColorer(), labeler);
 }
 
 //fixme code duplication
@@ -130,8 +130,8 @@ void PrintColoredGraphWithColorFilter(const Graph &g, const ColorHandler<Graph>
     LengthIdGraphLabeler<Graph> basic_labeler(g);
     EdgePosGraphLabeler<Graph> pos_labeler(g, pos);
 
-    CompositeLabeler<Graph> labeler(basic_labeler, pos_labeler);
-    omnigraph::visualization::WriteComponents(g, output_folder, fs, coloring.ConstructColorer(), labeler);
+    visualization::graph_labeler::CompositeLabeler<Graph> labeler(basic_labeler, pos_labeler);
+    visualization::visualization_utils::WriteComponents(g, output_folder, fs, coloring.ConstructColorer(), labeler);
 }
 
 //todo alert!!! magic constants!!!
@@ -146,7 +146,7 @@ void WriteComponentsAlongSequence(
     typedef typename gp_t::graph_t Graph;
     LengthIdGraphLabeler < Graph > basic_labeler(gp.g);
     EdgePosGraphLabeler < Graph > pos_labeler(gp.g, gp.edge_pos);
-    CompositeLabeler < Graph > labeler(basic_labeler, pos_labeler);
+    visualization::graph_labeler::CompositeLabeler < Graph > labeler(basic_labeler, pos_labeler);
 }
 
 template<class gp_t>
@@ -156,7 +156,7 @@ void PrintColoredGraphAlongRef(const gp_t& gp,
     LengthIdGraphLabeler < Graph > basic_labeler(gp.g);
     EdgePosGraphLabeler < Graph > pos_labeler(gp.g, gp.edge_pos);
 
-    CompositeLabeler < Graph > labeler(basic_labeler, pos_labeler);
+    visualization::graph_labeler::CompositeLabeler < Graph > labeler(basic_labeler, pos_labeler);
 
 //      only breakpoints
     TrivialBreakpointFinder<Graph> bp_f(gp.g, coloring, gp.edge_pos);
diff --git a/src/projects/cclean/CMakeLists.txt b/src/projects/cclean/CMakeLists.txt
new file mode 100644
index 0000000..24ce7b9
--- /dev/null
+++ b/src/projects/cclean/CMakeLists.txt
@@ -0,0 +1,30 @@
+############################################################################
+# Copyright (c) 2015 Saint Petersburg State University
+# Copyright (c) 2011-2014 Saint-Petersburg Academic University
+# All Rights Reserved
+# See file LICENSE for details.
+############################################################################
+
+project(cclean CXX)
+aux_source_directory(. SRC_LIST)
+include_directories(${CMAKE_CURRENT_SOURCE_DIR})
+file(GLOB ${CMAKE_CURRENT_SOURCE_DIR}
+    "*.hh"
+    "*.h"
+    "*.hpp"
+    "*.cpp"
+)
+add_executable(${PROJECT_NAME} ${SRC_LIST})
+
+target_link_libraries(cclean ssw input cityhash ${COMMON_LIBRARIES})
+
+if (SPADES_STATIC_BUILD)
+  set_target_properties(cclean PROPERTIES LINK_SEARCH_END_STATIC 1)
+endif()
+
+install(TARGETS cclean
+        DESTINATION bin
+        COMPONENT runtime)
+install(DIRECTORY "${SPADES_CFG_DIR}/cclean"
+        DESTINATION share/spades/configs
+        FILES_MATCHING PATTERN "*.info")
diff --git a/src/projects/cclean/adapter_index.cpp b/src/projects/cclean/adapter_index.cpp
new file mode 100644
index 0000000..29d7f3a
--- /dev/null
+++ b/src/projects/cclean/adapter_index.cpp
@@ -0,0 +1,50 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#include "adapter_index.hpp"
+#include "io/read_processor.hpp"
+#include "valid_kmer_generator.hpp"
+
+#include "io/ireadstream.hpp"
+#include "config_struct_cclean.hpp"
+
+#include <libcxx/sort.hpp>
+
+using namespace cclean;
+
+void AdapterIndexBuilder::FillAdapterIndex(const std::string &db, AdapterIndex &data) {
+  data.clear();
+
+  INFO("Reading adapter database from " << db);
+  ireadstream irs(db);
+  while (!irs.eof()) {
+    Read r;
+    irs >> r;
+    const std::string &seq = r.getSequenceString();
+
+    data.seqs_.push_back(seq);
+    data.seqs_.push_back(ReverseComplement(seq));
+  }
+
+  INFO("Filling adapter index");
+  for (size_t i = 0, e = data.seqs_.size(); i !=e; ++i) {
+    const std::string &seq = data.seqs_[i];
+    ValidKMerGenerator<cclean::K> gen(seq.c_str(), NULL, seq.size());
+
+    while (gen.HasMore()) {
+      KMer kmer = gen.kmer();
+
+      auto& entry = data.index_[kmer];
+      entry.insert(i);
+
+      gen.Next();
+    }
+  }
+
+  INFO("Done. Total " << data.seqs_.size() << " adapters processed. Total "
+                      << data.index_.size() << " unique k-mers.");
+}
diff --git a/src/projects/cclean/adapter_index.hpp b/src/projects/cclean/adapter_index.hpp
new file mode 100644
index 0000000..1bcc21f
--- /dev/null
+++ b/src/projects/cclean/adapter_index.hpp
@@ -0,0 +1,61 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#ifndef CCLEAN_ADAPTERINDEX_HPP
+#define CCLEAN_ADAPTERINDEX_HPP
+
+#include "sequence/seq.hpp"
+#include "utils/mph_index/kmer_index.hpp"
+
+#include <string>
+#include <set>
+#include <unordered_map>
+
+namespace cclean {
+const unsigned K = 10;
+typedef Seq<K> KMer;
+
+class AdapterIndex {
+  typedef std::set<std::size_t> IndexValueType;
+  std::unordered_map<KMer, IndexValueType, KMer::hash> index_;
+
+ public:
+  AdapterIndex() {}
+
+  void clear() {
+    index_.clear();
+    seqs_.clear();
+  }
+  IndexValueType& operator[](cclean::KMer s) { return index_[s]; }
+  auto find(cclean::KMer s) const -> decltype(index_.find(s)) { return index_.find(s); }
+  auto end() const -> decltype(index_.end()) { return index_.end(); }
+
+  bool contains(cclean::KMer s) const {
+    return index_.find(s) != index_.end();
+  }
+  const std::string& seq(size_t idx) const { return seqs_[idx]; }
+
+ private:
+  std::vector<std::string> seqs_;
+
+  friend class AdapterIndexBuilder;
+};
+
+class AdapterIndexBuilder {
+ public:
+  AdapterIndexBuilder() {}
+
+  void FillAdapterIndex(const std::string &db, AdapterIndex &index);
+
+ private:
+  DECL_LOGGER("Index Building");
+};
+
+  // end of namespace
+}
+
+#endif // __CCLEAN__ADAPTERINDEX_HPP__
diff --git a/src/projects/cclean/additional.cpp b/src/projects/cclean/additional.cpp
new file mode 100644
index 0000000..ed0065f
--- /dev/null
+++ b/src/projects/cclean/additional.cpp
@@ -0,0 +1,69 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#ifndef ADDITIONAL_CPP
+#define ADDITIONAL_CPP
+
+#include "output.hpp"
+#include "config_struct_cclean.hpp"
+#include "io/read_processor.hpp"
+
+  enum WorkModeType {
+    NONE = 0,
+    SINGLE_END = 1,
+    SINGLE_END_Q = 2,
+    BRUTE_SIMPLE = 3,
+    BRUTE_WITH_Q = 4
+  };
+
+  constexpr double MatchScore = 0.6;
+  constexpr double MismatchScore = 100;
+
+  class AbstractCclean {
+      // Abstract base class for cclean functors
+    public:
+      AbstractCclean(std::ostream &aligned_output, std::ostream &bed,
+                     const std::string &db,
+                     const WorkModeType &mode,
+                     const unsigned mlen,
+                     const bool full_inform = false)
+                :aligned_(0), full_inform_(full_inform), read_mlen_(mlen),
+                 mismatch_threshold_(cfg::get().mismatch_threshold),
+                 score_threshold_(cfg::get().score_treshold),
+                 aligned_part_fraction_(cfg::get().aligned_part_fraction),
+                 db_name_(db), mode_(mode), aligned_output_stream_(aligned_output),
+                 bad_stream_(bed)  {}
+      virtual Read operator()(const Read &read, bool *ok) = 0;
+      inline size_t aligned() { return aligned_; }
+      virtual ~AbstractCclean() {}
+
+    protected:
+      size_t aligned_;
+
+      const bool full_inform_;
+      const uint read_mlen_;
+      const double mismatch_threshold_;  // for nonquality mode
+      const double score_threshold_;  // for quality mode
+
+      const double aligned_part_fraction_;
+      const std::string &db_name_;
+      const WorkModeType mode_;
+
+      std::ostream &aligned_output_stream_;
+      std::ostream &bad_stream_;
+      // Abstract for clean functors
+      class AbstractCleanFunctor {
+        public:
+          inline virtual bool operator()(const Read &r,
+                          const StripedSmithWaterman::Alignment &a,
+                          double aligned_part, const std::string &adapter,
+                          double *best_score) = 0;
+          virtual ~AbstractCleanFunctor() {}
+      };
+  };
+
+#endif // ADDITIONAL_CPP
diff --git a/src/projects/cclean/brute_force_clean.cpp b/src/projects/cclean/brute_force_clean.cpp
new file mode 100644
index 0000000..de35bb3
--- /dev/null
+++ b/src/projects/cclean/brute_force_clean.cpp
@@ -0,0 +1,97 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#include "brute_force_clean.hpp"
+
+#include <string>
+#include <vector>
+#include <iostream>
+
+#include "adapter_index.hpp"
+#include <ssw/ssw_cpp.h> // Striped Smith-Waterman aligner
+#include "additional.cpp"
+#include "output.hpp"
+
+using std::string;
+using std::vector;
+using StripedSmithWaterman::Filter;
+using StripedSmithWaterman::Aligner;
+using StripedSmithWaterman::Alignment;
+using cclean_output::print_alignment;
+using cclean_output::print_bad;
+using cclean_output::print_match;
+using cclean_output::print_read;
+
+static inline bool is_alignment_good(const StripedSmithWaterman::Alignment& a,
+                              const std::string& sequence,
+                              const std::string& query,
+                              double aligned_part_fraction) {
+  // Сheck that query adjoins or even overlaps the sequence edge
+  return (std::min(a.query_end - a.query_begin + 1, a.ref_end - a.ref_begin + 1)
+         / (double) query.size() > aligned_part_fraction) &&
+         (a.ref_begin == 0 || a.ref_end == sequence.size() - 1);
+}
+
+Read BruteForceClean::operator()(const Read &read, bool *ok) {
+  const string &read_name = read.getName();
+  const string &seq_string = read.getSequenceString();
+  Filter filter; // SSW filter
+  Aligner aligner; // SSW aligner
+  aligner.SetReferenceSequence(seq_string.c_str(),
+                               static_cast<int>(seq_string.size()));
+  Alignment alignment;
+
+  //  It can be many alignment adaps, so we searching the most probable
+  double best_score;
+  if (mode_ == BRUTE_SIMPLE)  // so in both mode first overlap will initialize as best
+    best_score = mismatch_threshold_;
+  if (mode_ == BRUTE_WITH_Q)
+    best_score = score_threshold_;
+  std::string best_adapter = "";
+
+  //  For each adapter align read and adapter
+  for (std::string adapt_string: adap_seqs_) {
+
+    aligner.Align(adapt_string.c_str(), filter, &alignment);
+    if((*checker)(read, alignment, aligned_part_fraction_, adapt_string,
+                  &best_score)) {
+      best_adapter = adapt_string;
+    }
+  }
+
+  if (!best_adapter.empty())  {
+      aligner.Align(best_adapter.c_str(), filter, &alignment);
+      aligned_ += 1;
+      Read cuted_read = cclean_utils::CutRead(read, alignment.ref_begin,
+                                              alignment.ref_end);
+      if (full_inform_)  // If user want full output
+#       pragma omp critical
+        print_alignment(aligned_output_stream_, alignment, seq_string,
+                        best_adapter, read_name, db_name_);
+
+      // Cuted read must be >= minimum lenght specified by arg
+      if (cuted_read.getSequenceString().size() >= read_mlen_) {
+        if (full_inform_)  // If user want full output
+#         pragma omp critical
+          print_bad(bad_stream_, read_name, alignment.ref_begin, alignment.ref_end);
+        (*ok) = true;
+        return cuted_read;
+      }
+      else {
+        if (full_inform_)
+#         pragma omp critical
+          print_bad(bad_stream_, read_name, 0, alignment.ref_end);
+        (*ok) = false;
+        return cuted_read;
+      }
+    }
+  else {
+    // Read was not aligned with any adapter
+    (*ok) = true;
+    return read;
+  }
+}
diff --git a/src/projects/cclean/brute_force_clean.hpp b/src/projects/cclean/brute_force_clean.hpp
new file mode 100644
index 0000000..daeabe5
--- /dev/null
+++ b/src/projects/cclean/brute_force_clean.hpp
@@ -0,0 +1,72 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#ifndef BRUTE_FORCE_CLEAN_HPP
+#define BRUTE_FORCE_CLEAN_HPP
+
+#include "utils.hpp"
+#include "additional.cpp"
+
+class BruteForceClean: public AbstractCclean {
+  // Class that get read with oper() and clean it, if that possible
+  public:
+    BruteForceClean(std::ostream& aligned_output,
+                    std::ostream& bed,const std::string &db,
+                    const WorkModeType &mode,
+                    const uint mlen,
+                    const std::vector<std::string> &gen,
+                    const bool full_inform = false)
+      : AbstractCclean(aligned_output, bed, db, mode, mlen, full_inform),
+        adap_seqs_(gen)  {
+      if(mode == BRUTE_SIMPLE) checker = new BruteCleanFunctor;
+      if(mode == BRUTE_WITH_Q) checker = new BruteQualityCleanFunctor;
+    }
+    virtual ~BruteForceClean() { delete checker; }
+    // ReadProcessor class put each read in this operator
+    virtual Read operator()(const Read &read, bool *ok);
+
+  private:
+    const std::vector<std::string> &adap_seqs_;
+    std::string best_adapter_;
+    AbstractCleanFunctor *checker; // Checks is adapter in read
+
+    // Here goes functors for clean in different modes
+    class BruteCleanFunctor: public AbstractCleanFunctor {
+        virtual inline bool operator()(const Read &r,
+                                       const StripedSmithWaterman::Alignment &a,
+                                       double aligned_part, const std::string &adapter,
+                                       double *best_score) {
+          double cur_score = cclean_utils::
+                             GetMismatches(r.getSequenceString(), adapter, a);
+          if (cur_score < (*best_score) &&
+              cclean_utils::is_alignment_good(a, r.getSequenceString(), adapter,
+                                              aligned_part)) {
+            (*best_score) = cur_score;
+            return true;
+          }
+          return false;
+        }
+    };
+    class BruteQualityCleanFunctor: public AbstractCleanFunctor {
+        virtual inline bool operator()(const Read &r,
+                                       const StripedSmithWaterman::Alignment &a,
+                                       double aligned_part, const std::string &adapter,
+                                       double *best_score) {
+          double cur_score = cclean_utils::
+                             GetScoreWithQuality(a, r.getQuality().str());
+          if (cur_score >= (*best_score) &&
+              cclean_utils::is_alignment_good(a, r.getSequenceString(), adapter,
+                                              aligned_part)) {
+            (*best_score) = cur_score;
+            return true;
+          }
+          return false;
+        }
+    };
+};
+
+#endif // BRUTE_FORCE_CLEAN_HPP
diff --git a/src/modules/data_structures/sequence/seq_common.hpp b/src/projects/cclean/comparator.hpp
similarity index 61%
rename from src/modules/data_structures/sequence/seq_common.hpp
rename to src/projects/cclean/comparator.hpp
index eb987d5..355431e 100644
--- a/src/modules/data_structures/sequence/seq_common.hpp
+++ b/src/projects/cclean/comparator.hpp
@@ -5,16 +5,14 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-/*
- * seq_common.hpp
- *
- *  Created on: Jun 25, 2012
- *      Author: andrey
- */
+#ifndef COMPARATOR_H_
+#define COMPARATOR_H_
 
-#ifndef SEQ_COMMON_HPP_
-#define SEQ_COMMON_HPP_
+class Compare {
+   public:
+      bool operator() (std::string * lhs, std::string * rhs) const {
+          return *lhs < *rhs;
+      }
+};
 
-typedef u_int64_t seq_element_type;
-
-#endif /* SEQ_COMMON_HPP_ */
+#endif /* COMPARATOR_H_ */
diff --git a/src/projects/cclean/config_struct_cclean.cpp b/src/projects/cclean/config_struct_cclean.cpp
new file mode 100644
index 0000000..c9e9eda
--- /dev/null
+++ b/src/projects/cclean/config_struct_cclean.cpp
@@ -0,0 +1,44 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#include "config_struct_cclean.hpp"
+#include "pipeline/config_common.hpp"
+#include "utils/openmp_wrapper.h"
+
+void load(cclean_config& cfg, const std::string &filename) {
+  boost::property_tree::ptree pt;
+  boost::property_tree::read_info(filename, pt);
+
+  load(cfg, pt);
+}
+
+void load(cclean_config& cfg, boost::property_tree::ptree const& pt) {
+  using config_common::load;
+  load(cfg.use_quality, pt, "use_quality");
+  load(cfg.use_bruteforce, pt, "use_bruteforce");
+  load(cfg.debug_information, pt, "debug_information");
+
+  load(cfg.score_treshold, pt, "score_treshold");
+  load(cfg.mismatch_threshold, pt, "mismatch_threshold");
+  load(cfg.minimum_lenght, pt, "minimum_lenght");
+  load(cfg.nthreads, pt, "nthreads");
+  load(cfg.aligned_part_fraction, pt, "aligned_part_fraction");
+  load(cfg.buffer_size, pt, "buffer_size");
+
+  load(cfg.dataset_file_name, pt, "dataset");
+  load(cfg.database, pt, "database");
+  load(cfg.input_working_dir, pt, "input_working_dir");
+  load(cfg.output_working_dir, pt, "output_working_dir");
+
+  std::string file_name = cfg.dataset_file_name;
+  cfg.dataset.load(file_name);
+
+  // Fix number of threads according to OMP capabilities.
+  cfg.nthreads = std::min(cfg.nthreads, (unsigned)omp_get_max_threads());
+  // Inform OpenMP runtime about this :)
+  omp_set_num_threads(cfg.nthreads);
+}
diff --git a/src/projects/cclean/config_struct_cclean.hpp b/src/projects/cclean/config_struct_cclean.hpp
new file mode 100644
index 0000000..e56cc92
--- /dev/null
+++ b/src/projects/cclean/config_struct_cclean.hpp
@@ -0,0 +1,42 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#ifndef CONFIG_STRUCT_CCLEAN_HPP
+#define CONFIG_STRUCT_CCLEAN_HPP
+
+#include "pipeline/config_singl.hpp"
+#include <boost/property_tree/ptree_fwd.hpp>
+#include "pipeline/library.hpp"
+
+struct cclean_config {
+
+  bool use_quality;
+  bool use_bruteforce;
+  bool debug_information;
+
+  unsigned score_treshold;
+  unsigned mismatch_threshold;
+  unsigned minimum_lenght;
+  unsigned nthreads;
+  unsigned buffer_size;
+  double aligned_part_fraction;
+
+  std::string dataset_file_name;
+  std::string database;
+  std::string input_working_dir;
+  std::string output_working_dir;
+
+  io::DataSet<> dataset;
+};
+
+// main config load function
+void load(cclean_config& cfg, const std::string &filename);
+void load(cclean_config& cfg, boost::property_tree::ptree const& pt);
+
+typedef config_common::config<cclean_config> cfg;
+
+#endif
diff --git a/src/projects/cclean/job_wrappers.cpp b/src/projects/cclean/job_wrappers.cpp
new file mode 100644
index 0000000..3ea37c3
--- /dev/null
+++ b/src/projects/cclean/job_wrappers.cpp
@@ -0,0 +1,97 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#include <set>
+
+#include "job_wrappers.hpp"
+#include "utils/logger/log_writers.hpp"
+#include "adapter_index.hpp"
+#include "valid_kmer_generator.hpp"
+#include "adapter_index.hpp"
+#include "output.hpp"
+#include "ssw/ssw_cpp.h"
+#include "utils.hpp"
+
+using cclean_output::print_alignment;
+using cclean_output::print_bad;
+using cclean_output::print_match;
+using cclean_output::print_read;
+
+Read SimpleClean::operator()(const Read &read, bool *ok)
+{
+  const std::string& name = read.getName();
+  const std::string& sequence = read.getSequenceString();
+
+  std::set<size_t> to_check;
+  ValidKMerGenerator<cclean::K> gen(sequence.c_str(), NULL, sequence.size());
+  while (gen.HasMore()) {
+    cclean::KMer kmer = gen.kmer();
+
+    auto it = index_.find(kmer);
+    if (it != index_.end())
+      to_check.insert(it->second.begin(), it->second.end());
+
+    gen.Next();
+  }
+
+  //  Try to align the artifacts for corresponding kmers
+  StripedSmithWaterman::Aligner aligner;
+  StripedSmithWaterman::Filter filter;
+  StripedSmithWaterman::Alignment alignment; //  why it was in for loop?
+  aligner.SetReferenceSequence(sequence.c_str(), sequence.size());
+
+  //  Pointer on best match adapter
+  const std::string *best_adapter = nullptr;
+  double best_score;
+  if (mode_ == SINGLE_END)  // so in both mode first overlap will initialize as best
+    best_score = mismatch_threshold_;
+  if (mode_ == SINGLE_END_Q)
+    best_score = score_threshold_;
+  best_adapter = nullptr;
+
+  for (auto it = to_check.begin(), et = to_check.end(); it != et; ++it) {
+    const std::string &query = index_.seq(*it);
+    aligner.Align(query.c_str(), filter, &alignment);
+    // Check is this apapter better then previous best
+    if((*checker)(read, alignment, aligned_part_fraction_, query,
+                  &best_score)) {
+      best_adapter = &query;
+    }
+  }
+
+  if (best_adapter != nullptr)  {
+      aligner.Align(best_adapter->c_str(), filter, &alignment);
+      aligned_ += 1;
+      Read cuted_read = cclean_utils::CutRead(read, alignment.ref_begin,
+                                              alignment.ref_end);
+      if (full_inform_)  // If user want full output
+#       pragma omp critical
+        print_alignment(aligned_output_stream_, alignment, sequence,
+                        *best_adapter,name, db_name_);
+
+      // Cuted read must be >= minimum lenght specified by arg
+      if (cuted_read.getSequenceString().size() >= read_mlen_) {
+        if (full_inform_)
+#         pragma omp critical
+          print_bad(bad_stream_, name, alignment.ref_begin, alignment.ref_end);
+        (*ok) = true;
+        return cuted_read;
+      }
+      else {
+        if (full_inform_)
+#         pragma omp critical
+          print_bad(bad_stream_, name, 0, alignment.ref_end);
+        (*ok) = false;
+        return cuted_read;
+      }
+  }
+  else {
+    // Read was not aligned with any adapter
+    (*ok) = true;
+    return read;
+  }
+}
diff --git a/src/projects/cclean/job_wrappers.hpp b/src/projects/cclean/job_wrappers.hpp
new file mode 100644
index 0000000..7adccb1
--- /dev/null
+++ b/src/projects/cclean/job_wrappers.hpp
@@ -0,0 +1,73 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#ifndef JOB_WRAPERS_HPP
+#define JOB_WRAPERS_HPP
+
+#include "additional.cpp"
+#include "utils.hpp"
+
+namespace cclean {
+  class AdapterIndex;
+}
+
+class SimpleClean: public AbstractCclean {
+  public:
+    SimpleClean(std::ostream &aligned_output,
+                std::ostream &bed, const std::string &db,
+                const WorkModeType &mode,
+                const unsigned mlen,
+                const cclean::AdapterIndex &index,
+                const bool full_inform = false)
+      : AbstractCclean(aligned_output, bed, db, mode, mlen, full_inform),
+        index_(index)  {
+      if(mode_ == SINGLE_END) checker = new SimpleCleanFunctor;
+      if(mode_ == SINGLE_END_Q) checker = new SimpleQualityCleanFunctor;
+    }
+    virtual ~SimpleClean() { delete checker; }
+    virtual Read operator()(const Read &read, bool *ok);
+
+  private:
+    const cclean::AdapterIndex &index_;
+    AbstractCleanFunctor *checker; // Checks is adapter in read
+
+    // Here goes functors for clean in different modes
+    class SimpleCleanFunctor: public AbstractCleanFunctor {
+        virtual inline bool operator()(const Read &r,
+                                       const StripedSmithWaterman::Alignment &a,
+                                       double aligned_part, const std::string &adapter,
+                                       double *best_score) {
+          double cur_score = cclean_utils::
+                             GetMismatches(r.getSequenceString(), adapter, a);
+          if (cur_score < (*best_score) &&
+              cclean_utils::is_alignment_good(a, r.getSequenceString(), adapter,
+                                aligned_part)) {
+              (*best_score) = cur_score;
+              return true;
+          }
+          return false;
+        }
+    };
+    class SimpleQualityCleanFunctor: public AbstractCleanFunctor {
+        virtual inline bool operator()(const Read &r,
+                                       const StripedSmithWaterman::Alignment &a,
+                                       double aligned_part, const std::string &adapter,
+                                       double *best_score) {
+          double cur_score = cclean_utils::
+                             GetScoreWithQuality(a, r.getQuality().str());
+          if (cur_score >= (*best_score) &&
+              cclean_utils::is_alignment_good(a, r.getSequenceString(), adapter,
+                                aligned_part)) {
+              (*best_score) = cur_score;
+              return true;
+          }
+          return false;
+        }
+    };
+};
+
+#endif /* JOBWRAPPERS_H_ */
diff --git a/src/projects/cclean/main.cpp b/src/projects/cclean/main.cpp
new file mode 100644
index 0000000..4d50785
--- /dev/null
+++ b/src/projects/cclean/main.cpp
@@ -0,0 +1,86 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#include <iostream>
+#include <string>
+#include <map>
+#include <exception>
+
+#include "sequence/seq.hpp"
+#include "utils/logger/log_writers.hpp"
+#include "utils/memory_limit.hpp"
+#include "running_modes.hpp"
+#include "config_struct_cclean.hpp"
+#include "utils/simple_tools.hpp"
+#include "adapter_index.hpp"
+#include "utils.hpp"
+
+#include "valid_kmer_generator.hpp"
+#include "io/read_processor.hpp"
+#include "modules/ssw_cpp.h"
+#include "additional.cpp"
+
+#include "job_wrappers.hpp"
+#include "brute_force_clean.hpp"
+
+using logging::logger;
+using logging::create_logger;
+using logging::console_writer;
+using std::string;
+
+constexpr int CONFIG_FILE_ARG = 1;
+
+void usage() {
+  std::cout << "usage: cclean [program config file]" << std::endl;
+}
+
+void create_console_logger() {
+  logger *lg = create_logger("");
+  lg->add_writer(std::make_shared<console_writer>());
+  attach_logger(lg);
+}
+
+int main(int argc, char *argv[]) {
+
+  create_console_logger();
+
+  if (argc < 2) {
+    usage();
+    return EXIT_FAILURE;
+  }
+
+  std::string config_file = argv[CONFIG_FILE_ARG];
+  INFO("Loading config from " << config_file.c_str());
+  if (!path::FileExists(config_file)) {
+      ERROR("File " + config_file + " doesn't exists.");
+      return EXIT_FAILURE;
+  }
+  cfg::create_instance(config_file);
+
+  const std::string &database = cfg::get().database;
+  if (!path::FileExists(database)) {
+      ERROR("File " + database + " doesn't exists.");
+      return EXIT_FAILURE;
+  }
+  const std::string &dataset = cfg::get().dataset_file_name;
+  if (!path::FileExists(dataset)) {
+      ERROR("File " + dataset + " doesn't exists.");
+      return EXIT_FAILURE;
+  }
+
+  clock_t start = clock();
+
+  Cleaner::ProcessDataset();  // Main work here
+
+  INFO("DONE");
+  clock_t ends = clock();
+  INFO("Processor Time Spent: " << (double) (ends - start) / CLOCKS_PER_SEC
+       << " seconds.");
+  INFO("Goodbye!");
+
+  return EXIT_SUCCESS;
+}
diff --git a/src/projects/cclean/output.cpp b/src/projects/cclean/output.cpp
new file mode 100644
index 0000000..ff85f99
--- /dev/null
+++ b/src/projects/cclean/output.cpp
@@ -0,0 +1,82 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#include <iostream>
+#include <algorithm>
+#include <iostream>
+#include <fstream>
+#include "output.hpp"
+#include "utils.hpp"
+
+namespace cclean_output {
+
+void print_n_times(std::ostream& output, char c, int n) {
+  for (int i = 0; i < n; ++i) {
+    output << c;
+  }
+}
+
+void print_alignment(std::ostream& output, const StripedSmithWaterman::Alignment &data,
+        const std::string& ref, const std::string& query,
+        const std::string& name, const std::string& database_name) {
+
+  output << "Alignment: input sequence (first line) " << name << " alignes "
+         << std::endl
+         << "sequence from database (last line) " << database_name << std::endl;
+
+  std::string aligned_query, aligned_ref;
+  cclean_utils::RestoreFromCigar(ref, query, aligned_ref, aligned_query, data);
+
+  // case when pattern's start pos is less than text one
+  int text_offset = data.ref_begin - data.query_begin < 0 ? data.query_begin
+                                                            - data.ref_begin : 0;
+
+  // ref = read
+  print_n_times(output, ' ', text_offset);
+  output << ref << std::endl;
+  print_n_times(output, ' ', text_offset + data.ref_begin);
+  output << aligned_ref << std::endl;
+
+  // vertical dashes
+  print_n_times(output, ' ', text_offset + data.ref_begin);
+  for (int i = 0; i < (int)std::min(aligned_query.length(), aligned_ref.length()); ++i) {
+   aligned_query.at(i) == aligned_ref.at(i) ? output << "|" : output << "*";
+  }
+  output << std::endl;
+
+  // query = contamination
+  print_n_times(output, ' ', text_offset + data.ref_begin);
+  output << aligned_query << std::endl;
+  print_n_times(output, ' ', data.ref_begin - data.query_begin);
+  output << query << std::endl;
+  output << std::endl;
+ }
+
+void print_match(std::ostream& output, std::ostream& bed, std::map<std::string*,
+                  std::vector<int>, Compare>& res, const std::string& name,
+                  const std::string& seq, const std::string &db_name) {
+  for (auto it = res.begin(); it != res.end(); ++it) {
+   for (auto it_pos = it->second.begin(); it_pos != it->second.end(); ++it_pos) {
+
+    output << "Match: input sequence (first line) " << name << " matches "
+           << std::endl
+           << "sequence from database (2nd line) " << db_name << std::endl;
+
+    output << seq << std::endl;
+    print_n_times(output, ' ', *it_pos);
+    print_n_times(output, '|', it->first->length());
+    output << std::endl;
+    print_n_times(output, ' ', *it_pos);
+    output << *(it->first) << std::endl;
+    output << std::endl;
+
+    print_bad(bed, name, *it_pos, *it_pos + it->first->size());
+   }
+  }
+}
+//end of namespace
+}
diff --git a/src/projects/cclean/output.hpp b/src/projects/cclean/output.hpp
new file mode 100644
index 0000000..8266a45
--- /dev/null
+++ b/src/projects/cclean/output.hpp
@@ -0,0 +1,49 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#ifndef OUTPUT_HPP
+#define OUTPUT_HPP
+
+#include <string>
+#include <vector>
+#include <map>
+#include <io/read.hpp>
+#include <ostream>
+#include "comparator.hpp"
+#include "modules/ssw_cpp.h"
+
+namespace cclean_output {
+
+void print_n_times(std::ostream& output, char c, int n);
+
+void print_alignment(std::ostream& output,
+                     const StripedSmithWaterman::Alignment & data,
+                     const std::string& ref,
+                     const std::string& query, const std::string& name,
+                     const std::string& database_name);
+
+void print_match(std::ostream& output, std::ostream& bed, std::map<std::string*,
+                 std::vector<int>, Compare>& res, const std::string& name,
+                 const std::string& seq, const std::string &db_name);
+
+void print_bad(std::ostream& output, const std::string & name,
+               int start, int stop);
+
+inline void print_read(std::ostream& output, const Read &read) {
+    std::ofstream &stream =
+    reinterpret_cast<std::ofstream&>(output);
+    read.print(stream, Read::PHRED_OFFSET);
+}
+
+inline void print_bad(std::ostream& output, const std::string & name,
+                      int start, int stop) {
+         output << name << "\t" << start << "\t" << stop << std::endl;
+}
+
+// end of namespace
+}
+#endif /* OUTPUT_H_ */
diff --git a/src/projects/cclean/running_modes.cpp b/src/projects/cclean/running_modes.cpp
new file mode 100644
index 0000000..73dcdfb
--- /dev/null
+++ b/src/projects/cclean/running_modes.cpp
@@ -0,0 +1,268 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#include "running_modes.hpp"
+
+#include <string>
+#include <unordered_map>
+#include <algorithm>
+
+#include "adapter_index.hpp"
+#include "output.hpp"
+#include "io/read_processor.hpp"
+#include "pipeline/library.hpp"
+#include "utils/logger/log_writers.hpp"
+#include "job_wrappers.hpp"
+#include "brute_force_clean.hpp"
+
+AbstractCclean *Cleaner::getCleaner(std::ofstream *outf_alig_debug,
+                                    std::ofstream *outf_bad_deb,
+                                    const std::string &db, WorkModeType mode,
+                                    unsigned mlen,
+                                    const cclean::AdapterIndex &index,
+                                    bool deb_info) {
+  AbstractCclean *cleaner;  // Creating cleaner for reads
+  if (mode == SINGLE_END || mode == SINGLE_END_Q)
+    cleaner = new SimpleClean(*outf_alig_debug, *outf_bad_deb, db,
+                              mode, mlen, index, deb_info);
+  if (mode == BRUTE_SIMPLE || mode == BRUTE_WITH_Q)
+    cleaner = new BruteForceClean(*outf_alig_debug, *outf_bad_deb, db,
+                                  mode, mlen, index.GetSeqs(), deb_info);
+  return cleaner;
+}
+
+void Cleaner::ProcessDataset() {
+  // Options proceed
+  const std::string db = cfg::get().database;
+  const WorkModeType mode = getMode();
+
+  cclean::AdapterIndex index;
+  cclean::AdapterIndexBuilder().FillAdapterIndex(db, index);
+
+  const io::DataSet<> &dataset = cfg::get().dataset;
+  io::DataSet<> outdataset;
+  // Proccessing dataset. Iterating through libraries
+  for (auto it = dataset.library_begin(), et = dataset.library_end(); it != et; ++it) {
+    const io::SequencingLibrary<> &lib = *it;
+    io::SequencingLibrary<> outlib = lib;
+    outlib.clear();
+    // Iterating through paired reads in current library lib
+    for (auto I = lib.paired_begin(), E = lib.paired_end(); I != E; ++I) {
+      INFO("Correcting pair reads from " << I->first << " and " << I->second);
+
+      const std::string &file_name_l = I->first;
+      const std::string &file_name_r = I->second;
+      const std::string outcorl = getReadsFilename(cfg::get().output_working_dir,
+                                             file_name_l, "correct_l");
+      const std::string outcorr = getReadsFilename(cfg::get().output_working_dir,
+                                             file_name_r, "correct_r");
+      const std::string unpaired = getPureFilename(file_name_l) + "_" +
+                                   getPureFilename(file_name_r);
+      const std::string outcoru = getReadsFilename(cfg::get().output_working_dir,
+                                             unpaired, "correct_u");
+      const std::string outbadl = getReadsFilename(cfg::get().output_working_dir,
+                                                   file_name_l, "bad");
+      const std::string outbadr = getReadsFilename(cfg::get().output_working_dir,
+                                                   file_name_r, "bad");
+
+      std::ofstream ofcorl(outcorl.c_str());
+      std::ofstream ofbadl(outbadl.c_str());
+      std::ofstream ofcorr(outcorr.c_str());
+      std::ofstream ofbadr(outbadr.c_str());
+      std::ofstream ofunp (outcoru.c_str());
+
+      CorrectPairedReadFiles(index, file_name_l, file_name_r, &ofbadl, &ofcorl,
+                             &ofbadr, &ofcorr, &ofunp, mode);
+      outlib.push_back_paired(outcorl, outcorr);
+      outlib.push_back_single(outcoru);
+    }
+
+    for (auto I = lib.single_begin(), E = lib.single_end(); I != E; ++I) {
+      INFO("Correcting single reads from " << *I);
+
+      const std::string reads_file_name = *I;
+      const std::string outcor = getReadsFilename(cfg::get().output_working_dir,
+                                                  reads_file_name, "correct");
+      const std::string outbad = getReadsFilename(cfg::get().output_working_dir,
+                                                  reads_file_name, "bad");
+
+      std::ofstream ofgood(outcor.c_str());
+      std::ofstream ofbad(outbad.c_str());
+
+      CorrectReadFile(index, reads_file_name, &ofgood, &ofbad, mode);
+      outlib.push_back_single(outcor);
+    }
+    outdataset.push_back(outlib);
+  }
+
+  cfg::get_writable().dataset = outdataset;
+}
+
+void Cleaner::CorrectReadFile(const cclean::AdapterIndex &index,
+                              const std::string &fname, std::ofstream *outf_good,
+                              std::ofstream *outf_bad, WorkModeType mode) {
+  const unsigned nthreads = cfg::get().nthreads;
+  const std::string db = cfg::get().database;
+  const unsigned mlen = cfg::get().minimum_lenght;
+  const size_t read_buffer_size = nthreads * cfg::get().buffer_size;
+  std::vector<Read> reads(read_buffer_size);
+  std::vector<bool> res(read_buffer_size, false);
+
+  const bool deb_info = cfg::get().debug_information;
+  std::string bad_out_debug = "";
+  std::string aligned_out_debug = "";
+  if (deb_info) {
+    // Else ofstreams will be not used, so there is no sense to create empty files
+    // So ofstreams will be created with empty strings
+    bad_out_debug = getReadsFilename(cfg::get().output_working_dir,
+                                     fname, "debug.bad");
+    aligned_out_debug = getReadsFilename(cfg::get().output_working_dir,
+                                       fname, "debug.alig");
+  }
+  std::ofstream ofbad_deb(bad_out_debug.c_str());
+  std::ofstream ofalig_deb(aligned_out_debug.c_str());
+
+  unsigned buffer_no = 0;
+  unsigned count_bad = 0;
+  unsigned count_total = 0;
+
+  ireadstream irs(fname);
+  VERIFY(irs.is_open());
+
+  AbstractCclean *cleaner = getCleaner(&ofalig_deb, &ofbad_deb, db, mode, mlen,
+                                       index, deb_info);
+
+  while (!irs.eof()) {
+    unsigned buf_size = 0;
+    for (; buf_size < read_buffer_size && !irs.eof(); ++buf_size) {
+      irs >> reads[buf_size];
+    }
+    if(deb_info) INFO("Prepared batch " << buffer_no << " of "
+                      << buf_size << " reads.");
+    count_bad += CorrectReadsBatch(cleaner, &res, &reads, buf_size, nthreads);
+    count_total += buf_size;
+    if (deb_info) INFO("Processed batch " << buffer_no);
+    for (size_t i = 0; i < buf_size; ++i) { // Here output reads in files
+      reads[i].print(*(res[i] ? outf_good : outf_bad), Read::PHRED_OFFSET);
+    }
+    if(deb_info) INFO("Written batch " << buffer_no);
+    ++buffer_no;
+  }
+
+  delete cleaner;
+  // Process info about results
+  const double percent_val = static_cast<double>(count_total) / 100.0;
+  std::ostringstream percent_bad;
+  percent_bad << std::fixed << std::setprecision(2) <<
+                   (static_cast<double>(count_bad) / percent_val);
+  INFO("Total proceed " + std::to_string(count_total) + ", " +
+       std::to_string(count_bad) + " reads (" + percent_bad.str() +
+       " percents of total) is bad.");
+}
+
+void Cleaner::CorrectPairedReadFiles(const cclean::AdapterIndex &index,
+                                     const std::string &fnamel,
+                                     const std::string &fnamer, std::ofstream *ofbadl,
+                                     std::ofstream *ofcorl, std::ofstream *ofbadr,
+                                     std::ofstream *ofcorr, std::ofstream *ofunp,
+                                     WorkModeType mode) {
+  const unsigned nthreads = cfg::get().nthreads;
+  const std::string db = cfg::get().database;
+  const unsigned mlen = cfg::get().minimum_lenght;
+  const size_t read_buffer_size = nthreads * cfg::get().buffer_size;
+
+  std::vector<Read> left_reads(read_buffer_size);
+  std::vector<Read> right_reads(read_buffer_size);
+  std::vector<bool> left_res(read_buffer_size, false);
+  std::vector<bool> right_res(read_buffer_size, false);
+
+  ireadstream irsl(fnamel);
+  ireadstream irsr(fnamer);
+  VERIFY(irsl.is_open());
+  VERIFY(irsr.is_open());
+
+  const bool deb_info = cfg::get().debug_information;
+  std::string bad_out_deb_l = "";
+  std::string aligned_out_deb_l = "";
+  std::string bad_out_deb_r = "";
+  std::string aligned_out_deb_r = "";
+  if (deb_info) {
+    // Else ofstreams will be not used, so there is no sense to create empty files
+    // So ofstreams will be created with empty strings
+    bad_out_deb_l = getReadsFilename(cfg::get().output_working_dir,
+                                     fnamel, "debug.bad");
+    aligned_out_deb_l = getReadsFilename(cfg::get().output_working_dir,
+                                       fnamel, "debug.alig");
+    bad_out_deb_r = getReadsFilename(cfg::get().output_working_dir,
+                                     fnamer, "debug.bad");
+    aligned_out_deb_r = getReadsFilename(cfg::get().output_working_dir,
+                                       fnamer, "debug.alig");
+  }
+  std::ofstream ofbad_deb_l(bad_out_deb_l.c_str());
+  std::ofstream ofalig_deb_l(aligned_out_deb_l.c_str());
+  std::ofstream ofbad_deb_r(bad_out_deb_r.c_str());
+  std::ofstream ofalig_deb_r(aligned_out_deb_r.c_str());
+
+  AbstractCclean *cleaner_l = getCleaner(&ofalig_deb_l, &ofbad_deb_l, db, mode,
+                                         mlen, index, deb_info);
+  AbstractCclean *cleaner_r = getCleaner(&ofalig_deb_r, &ofbad_deb_r, db, mode,
+                                         mlen, index, deb_info);
+  unsigned buffer_no = 0;
+  unsigned count_bad_l = 0;
+  unsigned count_bad_r = 0;
+  unsigned count_total = 0;
+
+  while (!irsl.eof() && !irsr.eof()) {
+    unsigned buf_size = 0;
+    for (; buf_size < read_buffer_size && !irsl.eof() &&
+         !irsr.eof(); ++buf_size) {
+      irsl >> left_reads[buf_size];
+      irsr >> right_reads[buf_size];
+    }
+    if(deb_info) INFO("Prepared batch " << buffer_no << " of " << buf_size
+                       << " reads.");
+
+    count_bad_l += CorrectReadsBatch(cleaner_l, &left_res, &left_reads,
+                                     buf_size, nthreads);
+    count_bad_r += CorrectReadsBatch(cleaner_r, &right_res, &right_reads,
+                                     buf_size, nthreads);
+    count_total += buf_size;
+
+    if(deb_info) INFO("Processed batch " << buffer_no);
+    for (size_t i = 0; i < buf_size; ++i) {
+      if (left_res[i] && right_res[i]) {
+        left_reads[i].print(*ofcorl, Read::PHRED_OFFSET);
+        right_reads[i].print(*ofcorr, Read::PHRED_OFFSET);
+      }
+      else {
+        left_reads[i].print(*(left_res[i] ? ofunp : ofbadl),
+                            Read::PHRED_OFFSET);
+        right_reads[i].print(*(right_res[i] ? ofunp : ofbadr),
+                             Read::PHRED_OFFSET);
+      }
+    }
+    if(deb_info) INFO("Written batch " << buffer_no);
+    ++buffer_no;
+  }
+
+  delete cleaner_l;
+  delete cleaner_r;
+
+  // Process info abouts results
+  const double percent_val = static_cast<double>(count_total) / 100.0;
+  std::ostringstream percent_bad_l;
+  std::ostringstream percent_bad_r;
+  percent_bad_l << std::fixed << std::setprecision(2) <<
+                   (static_cast<double>(count_bad_l) / percent_val);
+  percent_bad_r << std::fixed << std::setprecision(2) <<
+                   (static_cast<double>(count_bad_r) / percent_val);
+  INFO("Total proceed " + std::to_string(count_total) + ", " +
+       std::to_string(count_bad_l) + " left reads (" +
+       percent_bad_l.str() + " percents of total) is bad" + ", " +
+       std::to_string(count_bad_r) + " right reads (" +
+       percent_bad_r.str() + " percents of total) is bad.");
+}
diff --git a/src/projects/cclean/running_modes.hpp b/src/projects/cclean/running_modes.hpp
new file mode 100644
index 0000000..c2709db
--- /dev/null
+++ b/src/projects/cclean/running_modes.hpp
@@ -0,0 +1,93 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#ifndef RUNNING_MODES_HPP
+#define RUNNING_MODES_HPP
+
+#include <unordered_map>
+#include <string>
+#include <iostream>
+#include <iomanip>
+#include "additional.cpp"
+#include "adapter_index.hpp"
+
+class Cleaner {
+
+  public:
+    static void ProcessDataset();
+    // Correct reads in a given file
+    static void CorrectReadFile(const cclean::AdapterIndex &index,
+                                const std::string &fname,
+                                std::ofstream *outf_good, std::ofstream *outf_bad,
+                                WorkModeType mode);
+    // Correct reads in a given pair of files
+    static void CorrectPairedReadFiles(const cclean::AdapterIndex &index,
+                                       const std::string &fnamel,
+                                       const std::string &fnamer,
+                                       std::ofstream *ofbadl,
+                                       std::ofstream *ofcorl,
+                                       std::ofstream *ofbadr,
+                                       std::ofstream *ofcorr,
+                                       std::ofstream *ofunp,
+                                       WorkModeType mode);
+    // Parallel correction of batch of reads
+    static inline unsigned CorrectReadsBatch(AbstractCclean *cleaner,
+                                             std::vector<bool> *results,
+                                             std::vector<Read> *reads,
+                                             size_t buf_size, unsigned nthreads) {
+      unsigned bad = 0;
+#     pragma omp parallel for shared(reads, results) num_threads(nthreads)
+      for (size_t i = 0; i < buf_size; ++i) {
+        bool ok;
+        (*reads)[i] = (*cleaner)((*reads)[i], &ok);
+        (*results)[i] = ok;
+        if (!ok) ++bad;
+      }
+      return bad;
+    }
+    // Get pure file name without extension
+    inline static std::string getPureFilename(const std::string &fname) {
+      std::string tmp = path::filename(fname);
+      std::string pure_file_name = "";
+      size_t pos = tmp.find(".fastq");
+      if (pos == std::string::npos)
+        pure_file_name = tmp;
+      else
+        pure_file_name = tmp.substr(0, pos);
+      return pure_file_name;
+    }
+    // Get filename for reads
+    inline static std::string getReadsFilename(const std::string &dirprefix,
+                                               const std::string &fname,
+                                               const std::string &suffix) {
+      const std::string &pure_file_name = getPureFilename(fname);
+      return (dirprefix + "/" + pure_file_name + "." + suffix + ".fastq");
+    }
+    // Define mode depends on config file data
+    inline static WorkModeType getMode() {
+        WorkModeType mode;
+        if (cfg::get().use_bruteforce) {
+          if (cfg::get().use_quality) mode = BRUTE_WITH_Q;
+          else                        mode = BRUTE_SIMPLE;
+        }
+        else {
+          if (cfg::get().use_quality) mode = SINGLE_END_Q;
+          else                        mode = SINGLE_END;
+        }
+        return mode;
+    }
+    // Create and return cleaner depends on mode
+    inline static AbstractCclean* getCleaner(std::ofstream *outf_alig_debug,
+                                             std::ofstream *outf_bad_deb,
+                                             const std::string &db,
+                                             WorkModeType mode, unsigned mlen,
+                                             const cclean::AdapterIndex &index,
+                                             bool deb_info);
+
+};
+
+#endif /* RUNNING_MODES_H_ */
diff --git a/src/projects/cclean/utils.cpp b/src/projects/cclean/utils.cpp
new file mode 100644
index 0000000..a5f0fc1
--- /dev/null
+++ b/src/projects/cclean/utils.cpp
@@ -0,0 +1,136 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#include <iostream>
+#include <algorithm>
+#include <map>
+#include <string>
+#include <vector>
+
+#include "utils.hpp"
+#include <ssw/ssw_cpp.h>
+#include <ssw/ssw_cpp.h> // Striped Smith-Waterman aligner
+#include <io/read.hpp>
+#include "additional.cpp"
+
+namespace cclean_utils {
+
+inline std::string ReverseComplement(const std::string& read) {
+  std::map<char, char> reverse;
+  reverse['C'] = 'G';
+  reverse['G'] = 'C';
+  reverse['T'] = 'A';
+  reverse['A'] = 'T';
+  reverse['N'] = 'N';
+
+  std::vector<char> res;
+  for(int i = 0; i < (int) read.length(); ++i) {
+   res.push_back(reverse[read[i]]);
+  }
+
+  std::reverse(res.begin(), res.end());
+  return std::string(res.begin(), res.end());
+}
+
+double GetScoreWithQuality(const StripedSmithWaterman::Alignment &a,
+                                            const Quality &qual)
+{ // Try to get more realistic align score depend on read quality
+  // Mathes and mismatches get from cigar alignment string below
+  double score = 0.0;
+  int ref_pos = 0, query_pos = 0;
+  for (std::vector<uint32_t>::const_iterator it = a.cigar.begin();
+       it != a.cigar.end(); ++it) {
+
+    int num = (*it & 0xFFFFFFF0) >> 4;
+    int op_code = *it & 0x0000000F;
+
+    switch (op_code) {
+      case 0: { //match
+        for (int i = 0; i < num; ++i, ++ref_pos, ++query_pos)
+          score += MatchScore;
+        break;
+      }
+      case 1: { //insert
+        for (int i = 0; i < num; ++i, ++query_pos)
+          score -= (double)qual[query_pos] / MismatchScore;
+        break;
+      }
+      case 2: { //del
+        for (int i = 0; i < num; ++i, ++ref_pos)
+          score -= (double)qual[query_pos] / MismatchScore;
+        break;
+      }
+      default:
+        break;
+    }
+  }
+  return score;
+}
+
+Read CutRead(const Read &r, int start_pos, int end_pos) {
+  if(start_pos > end_pos)  return r;
+  //  Step 1: cutting read sequence
+  Read read = r;
+  std::string read_seq = read.getSequenceString();
+  std::string cuted_read_seq(std::string(read_seq, 0, start_pos) +
+                             std::string(read_seq, end_pos + 1));
+  read.setSequence(cuted_read_seq.c_str());
+
+  //  Step 2: cutting read quality string
+  std::string qual_string = read.getQuality().str();
+  if(qual_string.empty())  return read;
+  std::string cuted_qual_string(std::string(qual_string, 0, start_pos) +
+                                std::string(qual_string, end_pos + 1));
+  read.setQuality(cuted_qual_string.c_str(), 0);
+  return read;
+}
+
+void RestoreFromCigar(const std::string& ref, const std::string& query,
+                      std::string& out_ref, std::string& out_query,
+                      const StripedSmithWaterman::Alignment& a) {
+
+  std::vector<char> aligned_ref, aligned_query;
+  int ref_pos = 0, query_pos = 0;
+  for (std::vector<uint32_t>::const_iterator it = a.cigar.begin();
+       it != a.cigar.end(); ++it) {
+    int num = (*it & 0xFFFFFFF0) >> 4;
+    int op_code = *it & 0x0000000F;
+
+    switch (op_code) {
+      case 0: { //match
+        for (int i = 0; i < num; ++i) {
+          aligned_ref.push_back(ref[a.ref_begin + ref_pos++]);
+          aligned_query.push_back(query[a.query_begin + query_pos++]);
+        }
+        break;
+      }
+      case 1: { //insert
+        for (int i = 0; i < num; ++i) {
+          aligned_ref.push_back('-');
+          aligned_query.push_back(query[a.query_begin + query_pos++]);
+        }
+        break;
+      }
+      case 2: { //del
+        for (int i = 0; i < num; ++i) {
+          aligned_ref.push_back(ref[a.ref_begin + ref_pos++]);
+          aligned_query.push_back('-');
+        }
+        break;
+     }
+      default:
+        break;
+    }
+
+  }
+
+  out_ref = std::string(aligned_ref.begin(), aligned_ref.end());
+  out_query = std::string(aligned_query.begin(), aligned_query.end());
+}
+
+  // end of namespace cclean_utils
+}
diff --git a/src/projects/cclean/utils.hpp b/src/projects/cclean/utils.hpp
new file mode 100644
index 0000000..a71a200
--- /dev/null
+++ b/src/projects/cclean/utils.hpp
@@ -0,0 +1,58 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#ifndef UTILS_HPP
+#define UTILS_HPP
+
+#include <ssw/ssw_cpp.h> // Striped Smith-Waterman aligner
+#include <io/read.hpp>
+#include "additional.cpp"
+#include "running_modes.hpp"
+#include "adapter_index.hpp"
+
+namespace cclean_utils {
+
+std::string ReverseComplement(const std::string& read);
+
+std::unordered_map<std::string, std::string> ProcessArgs(int argc, char *argv[],
+                                                         bool *ok, std::string *error);
+
+double GetScoreWithQuality(const StripedSmithWaterman::Alignment &a,
+                                            const Quality &qual);
+
+inline bool is_alignment_good(const StripedSmithWaterman::Alignment& a,
+                              const std::string& sequence,
+                              const std::string& query,
+                              double aligned_part_fraction) {
+  //  Сheck that query adjoins or even overlaps the sequence edge
+  return (std::min(a.query_end - a.query_begin + 1, a.ref_end - a.ref_begin + 1)
+         / (double) query.size() > aligned_part_fraction) /*&&
+         (a.ref_begin == 0 || a.ref_end == sequence.size() - 1)*/;
+}
+
+// Cut read from start to end position of best aligment with adapter
+Read CutRead(const Read &r, int start_pos, int end_pos);
+void RestoreFromCigar(const std::string& ref, const std::string& query,
+                      std::string& out_ref, std::string& out_query,
+                      const StripedSmithWaterman::Alignment& a);
+
+inline double GetMismatches(const std::string &read, const std::string &adapter,
+                         const StripedSmithWaterman::Alignment &a)  {
+  std::string aligned_read;
+  std::string aligned_adapter;
+  RestoreFromCigar(read, adapter, aligned_read, aligned_adapter, a);
+  int size = (int)std::min(aligned_read.length(), aligned_adapter.length());
+  int mismatched_score = 0;
+  for (int i = 0; i < size; ++i)  {
+    if (aligned_read[i] != aligned_adapter[i])
+      ++mismatched_score;
+  }
+  return static_cast<double>(mismatched_score);
+}
+// end of namespace
+}
+#endif /* UTILS_HPP */
diff --git a/src/projects/hammer/valid_kmer_generator.hpp b/src/projects/cclean/valid_kmer_generator.hpp
similarity index 85%
copy from src/projects/hammer/valid_kmer_generator.hpp
copy to src/projects/cclean/valid_kmer_generator.hpp
index c4128c4..a03a9b3 100644
--- a/src/projects/hammer/valid_kmer_generator.hpp
+++ b/src/projects/cclean/valid_kmer_generator.hpp
@@ -8,10 +8,8 @@
 #ifndef HAMMER_VALIDKMERGENERATOR_HPP_
 #define HAMMER_VALIDKMERGENERATOR_HPP_
 
-#include "globals.hpp"
-
-#include "io/reads/read.hpp"
-#include "data_structures/sequence/seq.hpp"
+#include "io/read.hpp"
+#include "sequence/seq.hpp"
 
 #include <string>
 #include <vector>
@@ -43,7 +41,7 @@ class ValidKMerGenerator {
    * read.
    */
   explicit ValidKMerGenerator(const Read &read,
-                              uint8_t bad_quality_threshold = 2) {
+                              uint32_t bad_quality_threshold = 2) {
     Reset(read.getSequenceString().data(),
           read.getQualityString().data(),
           read.getSequenceString().size(),
@@ -58,7 +56,7 @@ class ValidKMerGenerator {
    */
   explicit ValidKMerGenerator(const char *seq, const char *qual,
                               size_t len,
-                              uint8_t bad_quality_threshold = 2) {
+                              uint32_t bad_quality_threshold = 2) {
     Reset(seq, qual, len, bad_quality_threshold);
   }
 
@@ -70,7 +68,7 @@ class ValidKMerGenerator {
 
   void Reset(const char *seq, const char *qual,
              size_t len,
-             uint8_t bad_quality_threshold = 2) {
+             uint32_t bad_quality_threshold = 2) {
     kmer_ = Seq<kK>();
     seq_ = seq;
     qual_ = qual;
@@ -102,7 +100,7 @@ class ValidKMerGenerator {
   /**
    * @result last k-mer position in initial read.
    */
-  size_t pos() const {
+  int pos() const {
     return pos_;
   }
   /**
@@ -119,9 +117,9 @@ class ValidKMerGenerator {
  private:
   void TrimBadQuality();
   double Prob(uint8_t qual) {
-    return Globals::quality_probs[qual];
+    return 1 - (qual < 3 ? 0.75 : pow(10.0, -(int)qual / 10.0));
   }
-  uint8_t GetQual(uint32_t pos) {
+  uint32_t GetQual(uint32_t pos) {
     if (pos >= len_) {
       return 2;
     } else {
@@ -135,7 +133,7 @@ class ValidKMerGenerator {
   size_t end_;
   size_t len_;
   double correct_probability_;
-  uint8_t bad_quality_threshold_;
+  uint32_t bad_quality_threshold_;
   bool has_more_;
   bool first;
 
@@ -149,13 +147,13 @@ void ValidKMerGenerator<kK>::TrimBadQuality() {
   pos_ = 0;
   if (qual_)
     for (; pos_ < len_; ++pos_) {
-      if (GetQual((uint32_t)pos_) >= bad_quality_threshold_)
+      if (GetQual(pos_) >= bad_quality_threshold_)
         break;
     }
   end_ = len_;
   if (qual_)
     for (; end_ > pos_; --end_) {
-      if (GetQual((uint32_t)(end_ - 1)) >= bad_quality_threshold_)
+      if (GetQual(end_ - 1) >= bad_quality_threshold_)
         break;
   }
 }
@@ -167,8 +165,8 @@ void ValidKMerGenerator<kK>::Next() {
   } else if (first || !is_nucl(seq_[pos_ + kK - 1])) {
     // in this case we have to look for new k-mer
     correct_probability_ = 1.0;
-    uint32_t start_hypothesis = (uint32_t)pos_;
-    uint32_t i = (uint32_t)pos_;
+    uint32_t start_hypothesis = pos_;
+    uint32_t i = pos_;
     for (; i < len_; ++i) {
       if (i == kK + start_hypothesis) {
         break;
@@ -190,8 +188,8 @@ void ValidKMerGenerator<kK>::Next() {
     // good case we can just shift our previous answer
     kmer_ = kmer_ << seq_[pos_ + kK - 1];
     if (qual_) {
-      correct_probability_ *= Prob(GetQual((uint32_t)pos_ + kK - 1));
-      correct_probability_ /= Prob(GetQual((uint32_t)pos_ - 1));
+      correct_probability_ *= Prob(GetQual(pos_ + kK - 1));
+      correct_probability_ /= Prob(GetQual(pos_ - 1));
     }
     ++pos_;
   }
diff --git a/src/projects/corrector/CMakeLists.txt b/src/projects/corrector/CMakeLists.txt
index 0434323..4678d70 100644
--- a/src/projects/corrector/CMakeLists.txt
+++ b/src/projects/corrector/CMakeLists.txt
@@ -18,7 +18,7 @@ add_executable(corrector
         config_struct.cpp
         main.cpp)
 
-target_link_libraries(corrector input spades_modules ${COMMON_LIBRARIES})
+target_link_libraries(corrector input common_modules ${COMMON_LIBRARIES})
 
 
 
diff --git a/src/projects/corrector/config_struct.cpp b/src/projects/corrector/config_struct.cpp
index d799b7a..594bae8 100644
--- a/src/projects/corrector/config_struct.cpp
+++ b/src/projects/corrector/config_struct.cpp
@@ -7,7 +7,7 @@
 
 #include "config_struct.hpp"
 
-#include "dev_support/openmp_wrapper.h"
+#include "utils/openmp_wrapper.h"
 
 #include "llvm/Support/YAMLParser.h"
 #include "llvm/Support/YAMLTraits.h"
diff --git a/src/projects/corrector/contig_processor.cpp b/src/projects/corrector/contig_processor.cpp
index 7a90b62..8564d17 100644
--- a/src/projects/corrector/contig_processor.cpp
+++ b/src/projects/corrector/contig_processor.cpp
@@ -9,11 +9,11 @@
 #include "config_struct.hpp"
 #include "variants_table.hpp"
 
-#include "io/reads_io/ireader.hpp"
-#include "io/reads_io/osequencestream.hpp"
-#include "io/reads_io/file_reader.hpp"
+#include "io/reads/ireader.hpp"
+#include "io/reads/osequencestream.hpp"
+#include "io/reads/file_reader.hpp"
 #include "io/reads/single_read.hpp"
-#include "dev_support/path_helper.hpp"
+#include "utils/path_helper.hpp"
 
 #include <boost/algorithm/string.hpp>
 
diff --git a/src/projects/corrector/contig_processor.hpp b/src/projects/corrector/contig_processor.hpp
index 0a46be4..a35db3b 100644
--- a/src/projects/corrector/contig_processor.hpp
+++ b/src/projects/corrector/contig_processor.hpp
@@ -15,10 +15,10 @@
 #pragma once
 #include "interesting_pos_processor.hpp"
 #include "positional_read.hpp"
-#include "dev_support/openmp_wrapper.h"
+#include "utils/openmp_wrapper.h"
 
-#include <io/sam_io/sam_reader.hpp>
-#include <io/sam_io/read.hpp>
+#include <io/sam/sam_reader.hpp>
+#include <io/sam/read.hpp>
 #include "pipeline/library.hpp"
 
 #include <string>
diff --git a/src/projects/corrector/dataset_processor.cpp b/src/projects/corrector/dataset_processor.cpp
index 15fe997..20f3e1e 100644
--- a/src/projects/corrector/dataset_processor.cpp
+++ b/src/projects/corrector/dataset_processor.cpp
@@ -10,10 +10,10 @@
 #include "contig_processor.hpp"
 #include "config_struct.hpp"
 
-#include "io/reads_io/file_reader.hpp"
-#include "dev_support/path_helper.hpp"
-#include "io/reads_io/osequencestream.hpp"
-#include "dev_support/openmp_wrapper.h"
+#include "io/reads/file_reader.hpp"
+#include "utils/path_helper.hpp"
+#include "io/reads/osequencestream.hpp"
+#include "utils/openmp_wrapper.h"
 
 #include <boost/algorithm/string.hpp>
 
@@ -169,7 +169,7 @@ string DatasetProcessor::RunSingleBwa(const string &single, const size_t lib)  {
         return "";
     }
     string nthreads_str = to_string(nthreads_);
-    string last_line = bwa_string + " mem "+ " -v 1 -t " + nthreads_str + " " + genome_screened + " "  + single  + "  > " + path::screen_whitespaces(tmp_sam_filename);
+    string last_line = bwa_string + " mem "+ " -v 1 -t " + nthreads_str + " " + genome_screened + " "  + path::screen_whitespaces(single)  + "  > " + path::screen_whitespaces(tmp_sam_filename);
     INFO("Running bwa mem ...:" << last_line);
     run_res = system(last_line.c_str());
     if (run_res != 0) {
diff --git a/src/projects/corrector/dataset_processor.hpp b/src/projects/corrector/dataset_processor.hpp
index 397f5ed..2edf657 100644
--- a/src/projects/corrector/dataset_processor.hpp
+++ b/src/projects/corrector/dataset_processor.hpp
@@ -7,10 +7,10 @@
 
 #pragma once
 
-#include "dev_support/path_helper.hpp"
+#include "utils/path_helper.hpp"
 
-#include "io/reads_io/file_reader.hpp"
-#include "dev_support/path_helper.hpp"
+#include "io/reads/file_reader.hpp"
+#include "utils/path_helper.hpp"
 
 #include "pipeline/library.hpp"
 
diff --git a/src/projects/corrector/interesting_pos_processor.cpp b/src/projects/corrector/interesting_pos_processor.cpp
index 160f4a1..12358ef 100644
--- a/src/projects/corrector/interesting_pos_processor.cpp
+++ b/src/projects/corrector/interesting_pos_processor.cpp
@@ -8,7 +8,7 @@
 #include "interesting_pos_processor.hpp"
 #include "config_struct.hpp"
 
-#include "dev_support/logger/logger.hpp"
+#include "utils/logger/logger.hpp"
 
 using namespace std;
 
diff --git a/src/projects/corrector/main.cpp b/src/projects/corrector/main.cpp
index 07f0ee0..ff6afa8 100644
--- a/src/projects/corrector/main.cpp
+++ b/src/projects/corrector/main.cpp
@@ -8,9 +8,9 @@
 #include "dataset_processor.hpp"
 #include "pipeline/config_struct.hpp"
 
-#include "dev_support/logger/log_writers.hpp"
+#include "utils/logger/log_writers.hpp"
 #include "config_struct.hpp"
-#include "dev_support/segfault_handler.hpp"
+#include "utils/segfault_handler.hpp"
 
 #include "version.hpp"
 
diff --git a/src/projects/dipspades/CMakeLists.txt b/src/projects/dipspades/CMakeLists.txt
index b60d4b8..cecc0b8 100644
--- a/src/projects/dipspades/CMakeLists.txt
+++ b/src/projects/dipspades/CMakeLists.txt
@@ -8,11 +8,11 @@
 project(dipspades CXX)
 
 add_executable(dipspades
-	       dipspades_config.cpp
-	       utils/files_utils.cpp
+               dipspades_config.cpp
+               utils/files_utils.cpp
                main.cpp)
 
-target_link_libraries(dipspades spades_modules ${COMMON_LIBRARIES})
+target_link_libraries(dipspades common_modules ${COMMON_LIBRARIES})
 
 if (SPADES_STATIC_BUILD)
   set_target_properties(dipspades PROPERTIES LINK_SEARCH_END_STATIC 1)
diff --git a/src/projects/dipspades/consensus_contigs_constructor/consensus_contigs_constructor.hpp b/src/projects/dipspades/consensus_contigs_constructor/consensus_contigs_constructor.hpp
index 4623fa0..445641f 100644
--- a/src/projects/dipspades/consensus_contigs_constructor/consensus_contigs_constructor.hpp
+++ b/src/projects/dipspades/consensus_contigs_constructor/consensus_contigs_constructor.hpp
@@ -7,7 +7,7 @@
 
 #pragma once
 
-#include "io/reads_io/io_helper.hpp"
+#include "io/reads/io_helper.hpp"
 
 #include "utils/element_printers.hpp"
 #include "utils/files_utils.hpp"
@@ -25,7 +25,7 @@ namespace dipspades{
 class ConsensusContigsConstructor {
     conj_graph_pack &graph_pack_;
     BaseHistogram<size_t> &bulge_len_hist_;
-    NewExtendedSequenceMapper<conj_graph_pack::graph_t, conj_graph_pack::index_t> seq_mapper_;
+    BasicSequenceMapper<conj_graph_pack::graph_t, conj_graph_pack::index_t> seq_mapper_;
     VertexPathIndex path_index_;
 
     CorrectionResult correction_result_;
diff --git a/src/projects/dipspades/consensus_contigs_constructor/contig_correctors/close_gaps_corrector.hpp b/src/projects/dipspades/consensus_contigs_constructor/contig_correctors/close_gaps_corrector.hpp
index aa5047c..46b3080 100644
--- a/src/projects/dipspades/consensus_contigs_constructor/contig_correctors/close_gaps_corrector.hpp
+++ b/src/projects/dipspades/consensus_contigs_constructor/contig_correctors/close_gaps_corrector.hpp
@@ -30,7 +30,7 @@ class CloseGapsCorrector : public AbstractContigCorrector{
             if(i == gap_index[current_gap]){
                 VertexId start = g_.EdgeEnd(cur_edge);
                 VertexId end = g_.EdgeStart(path[i + 1]);
-                auto dijkstra = DijkstraHelper<Graph>::CreateTargeredBoundedDijkstra(g_,
+                auto dijkstra = DijkstraHelper<Graph>::CreateTargetedBoundedDijkstra(g_,
                         end, dsp_cfg::get().pbr.max_bulge_nucls_len); //DijkstraHelper<Graph>::CreateBoundedDijkstra(g_, dsp_cfg::get().pbr.max_bulge_nucls_len);
                 dijkstra.Run(start);
                 if(dijkstra.DistanceCounted(end)){
diff --git a/src/projects/dipspades/dipspades.hpp b/src/projects/dipspades/dipspades.hpp
index 08c3ad9..2163350 100644
--- a/src/projects/dipspades/dipspades.hpp
+++ b/src/projects/dipspades/dipspades.hpp
@@ -6,8 +6,8 @@
 //***************************************************************************
 
 
-#include "io/reads_io/splitting_wrapper.hpp"
-#include "algorithms/graph_construction.hpp"
+#include "io/reads/splitting_wrapper.hpp"
+#include "modules/graph_construction.hpp"
 #include "pipeline/stage.hpp"
 
 #include "dipspades_config.hpp"
diff --git a/src/projects/dipspades/dipspades_config.cpp b/src/projects/dipspades/dipspades_config.cpp
index 88545e8..deafb99 100644
--- a/src/projects/dipspades/dipspades_config.cpp
+++ b/src/projects/dipspades/dipspades_config.cpp
@@ -8,7 +8,7 @@
 #include "dipspades_config.hpp"
 #include "pipeline/config_common.hpp"
 #include "utils/files_utils.hpp"
-#include "dev_support/path_helper.hpp"
+#include "utils/path_helper.hpp"
 
 using namespace dipspades;
 
diff --git a/src/projects/dipspades/haplotype_assembly/conservative_regions_searcher.hpp b/src/projects/dipspades/haplotype_assembly/conservative_regions_searcher.hpp
index c5c5f91..f064ede 100644
--- a/src/projects/dipspades/haplotype_assembly/conservative_regions_searcher.hpp
+++ b/src/projects/dipspades/haplotype_assembly/conservative_regions_searcher.hpp
@@ -7,7 +7,7 @@
 
 #pragma once
 
-#include "assembly_graph/graph_alignment/sequence_mapper.hpp"
+#include "modules/alignment/sequence_mapper.hpp"
 #include "contig_separation_utils.hpp"
 
 using namespace debruijn_graph;
@@ -20,7 +20,7 @@ class ConservativeRegionsSearcher{
     SignedLabels signed_labels_;
     ConservativeRegionStorage cons_reg_storage_;
 
-    NewExtendedSequenceMapper<conj_graph_pack::graph_t, conj_graph_pack::index_t> mapper_;
+    BasicSequenceMapper<conj_graph_pack::graph_t, conj_graph_pack::index_t> mapper_;
     map<int, MappingPath<EdgeId> > contig_map_path_;
 
     typedef map<int, vector<int> > diff_labeled_contigs;
diff --git a/src/projects/dipspades/kmer_gluing/equal_sequence_gluer.hpp b/src/projects/dipspades/kmer_gluing/equal_sequence_gluer.hpp
index 487e6fa..0969365 100644
--- a/src/projects/dipspades/kmer_gluing/equal_sequence_gluer.hpp
+++ b/src/projects/dipspades/kmer_gluing/equal_sequence_gluer.hpp
@@ -122,7 +122,7 @@ public:
         size_t cnt = 0;
         for(auto it = graph_.SmartEdgeBegin(); !it.IsEnd(); ++it) {
             Sequence nucls = graph_.EdgeNucls(*it);
-            runtime_k::RtSeq kmer = nucls.start<runtime_k::RtSeq>(graph_.k() + 1) >> 'A';
+            RtSeq kmer = nucls.start<RtSeq>(graph_.k() + 1) >> 'A';
             for(size_t i = graph_.k(); i < graph_.length(*it); i++) {
                 kmer = kmer << graph_.EdgeNucls(*it)[i];
                 if(!index_.contains(kmer)) {
diff --git a/src/projects/dipspades/main.cpp b/src/projects/dipspades/main.cpp
index 7c63a75..018283f 100644
--- a/src/projects/dipspades/main.cpp
+++ b/src/projects/dipspades/main.cpp
@@ -8,13 +8,11 @@
 /*
  * Assembler Main
  */
-#include "dev_support/logger/log_writers.hpp"
-
-#include "dev_support/segfault_handler.hpp"
-#include "dev_support/memory_limit.hpp"
-#include "dev_support/copy_file.hpp"
-#include "data_structures/sequence/runtime_k.hpp"
+#include "utils/logger/log_writers.hpp"
 
+#include "utils/segfault_handler.hpp"
+#include "utils/memory_limit.hpp"
+#include "utils/copy_file.hpp"
 
 #include "pipeline/graph_pack.hpp"
 #include "stages/construction.hpp"
diff --git a/src/projects/dipspades/polymorphic_bulge_remover/bulge_paths_searcher.hpp b/src/projects/dipspades/polymorphic_bulge_remover/bulge_paths_searcher.hpp
index ac97830..b16cefb 100644
--- a/src/projects/dipspades/polymorphic_bulge_remover/bulge_paths_searcher.hpp
+++ b/src/projects/dipspades/polymorphic_bulge_remover/bulge_paths_searcher.hpp
@@ -8,7 +8,7 @@
 #pragma once
 
 #include <vector>
-#include "algorithms/dijkstra/dijkstra_helper.hpp"
+#include "assembly_graph/dijkstra/dijkstra_helper.hpp"
 #include "assembly_graph/paths/path_processor.hpp"
 #include "dipspades_config.hpp"
 
diff --git a/src/projects/dipspades/polymorphic_bulge_remover/complex_bulge_remover.hpp b/src/projects/dipspades/polymorphic_bulge_remover/complex_bulge_remover.hpp
index debe5e3..1466ed6 100644
--- a/src/projects/dipspades/polymorphic_bulge_remover/complex_bulge_remover.hpp
+++ b/src/projects/dipspades/polymorphic_bulge_remover/complex_bulge_remover.hpp
@@ -19,7 +19,7 @@
 #include "bulge_gluer.hpp"
 #include "diploid_bulge_finder.hpp"
 
-#include "io/reads_io/splitting_wrapper.hpp"
+#include "io/reads/splitting_wrapper.hpp"
 
 #include <stdlib.h>
 #include <memory.h>
diff --git a/src/projects/dipspades/polymorphic_bulge_remover/polymorphic_bulge_remover.hpp b/src/projects/dipspades/polymorphic_bulge_remover/polymorphic_bulge_remover.hpp
index 3b481b7..ccdb009 100644
--- a/src/projects/dipspades/polymorphic_bulge_remover/polymorphic_bulge_remover.hpp
+++ b/src/projects/dipspades/polymorphic_bulge_remover/polymorphic_bulge_remover.hpp
@@ -15,7 +15,7 @@
 #include "visualization/visualization.hpp"
 #include "assembly_graph/handlers/edges_position_handler.hpp"
 #include "assembly_graph/components/graph_component.hpp"
-#include "algorithms/simplification/compressor.hpp"
+#include "modules/simplification/compressor.hpp"
 
 using namespace debruijn_graph;
 
@@ -75,12 +75,12 @@ class PolymorphicBulgeRemover {
 
         graph_pack_.EnsureDebugInfo();
         make_dir(dsp_cfg::get().io.output_dir + "components/");
-        omnigraph::DefaultLabeler<Graph> labeler(graph_pack_.g, graph_pack_.edge_pos);
+        visualization::graph_labeler::DefaultLabeler<Graph> labeler(graph_pack_.g, graph_pack_.edge_pos);
         make_dir(dsp_cfg::get().io.output_dir + "components/" + component_dir + "/");
-        omnigraph::visualization::WriteComponents(graph_pack_.g,
+        visualization::visualization_utils::WriteComponents(graph_pack_.g,
                 dsp_cfg::get().io.output_dir + "components/" + component_dir + "/",
                 omnigraph::ReliableSplitter<Graph>(graph_pack_.g),
-                omnigraph::visualization::DefaultColorer(graph_pack_.g, Path<EdgeId>(), Path<EdgeId>()),
+                visualization::graph_colorer::DefaultColorer(graph_pack_.g, Path<EdgeId>(), Path<EdgeId>()),
                 labeler);
     }
 
diff --git a/src/projects/dipspades/utils/edge_gluer.hpp b/src/projects/dipspades/utils/edge_gluer.hpp
index 7cc1e50..8fdd1aa 100644
--- a/src/projects/dipspades/utils/edge_gluer.hpp
+++ b/src/projects/dipspades/utils/edge_gluer.hpp
@@ -6,7 +6,7 @@
 
 #pragma once
 
-#include "algorithms/dijkstra/neighbours_iterator.hpp"
+#include "assembly_graph/dijkstra/neighbours_iterator.hpp"
 
 using namespace debruijn_graph;
 
diff --git a/src/projects/dipspades/utils/path_routines.hpp b/src/projects/dipspades/utils/path_routines.hpp
index a251496..9ad261b 100644
--- a/src/projects/dipspades/utils/path_routines.hpp
+++ b/src/projects/dipspades/utils/path_routines.hpp
@@ -6,7 +6,7 @@
 //***************************************************************************
 
 #pragma once
-#include "assembly_graph/graph_core/graph.hpp"
+#include "assembly_graph/core/graph.hpp"
 #include "pipeline/graph_pack.hpp"
 
 using namespace debruijn_graph;
@@ -238,13 +238,14 @@ bool PathAdjacentRelatedEdges(Graph &g, vector<EdgeId> path, bool check_start =
     for(auto e = path.begin(); e != path.end() - 1; e++)
         if(VertexAdjacentRelatedEdges(g, g.EdgeEnd(*e)))
             return true;
-    if(path.size() != 0)
+    if(path.size() != 0) {
         if(check_start)
             if(VertexAdjacentRelatedEdges(g, g.EdgeStart(path[0])))
                 return true;
         if(check_end)
             if(VertexAdjacentRelatedEdges(g, g.EdgeEnd(path[path.size() - 1])))
                 return true;
+    }
     return false;
 }
 
diff --git a/src/projects/hammer/CMakeLists.txt b/src/projects/hammer/CMakeLists.txt
index 5f5277a..c0fe9c1 100644
--- a/src/projects/hammer/CMakeLists.txt
+++ b/src/projects/hammer/CMakeLists.txt
@@ -22,7 +22,7 @@ add_executable(hammer
 #  add_subdirectory(quake_count)
 #  add_subdirectory(gen_test_data)
 
-target_link_libraries(hammer input dev_support mph_index pipeline BamTools format ${COMMON_LIBRARIES})
+target_link_libraries(hammer input utils mph_index pipeline BamTools format ${COMMON_LIBRARIES})
 
 if (SPADES_STATIC_BUILD)
   set_target_properties(hammer PROPERTIES LINK_SEARCH_END_STATIC 1)
diff --git a/src/projects/hammer/config_struct_hammer.cpp b/src/projects/hammer/config_struct_hammer.cpp
index 37cd8ac..ba056b9 100644
--- a/src/projects/hammer/config_struct_hammer.cpp
+++ b/src/projects/hammer/config_struct_hammer.cpp
@@ -14,7 +14,7 @@
 
 #include "config_struct_hammer.hpp"
 #include "pipeline/config_common.hpp"
-#include "dev_support/openmp_wrapper.h"
+#include "utils/openmp_wrapper.h"
 
 #include <boost/property_tree/ptree.hpp>
 #include <string>
diff --git a/src/projects/hammer/hamcluster.cpp b/src/projects/hammer/hamcluster.cpp
index d1d2ff2..997ebd5 100644
--- a/src/projects/hammer/hamcluster.cpp
+++ b/src/projects/hammer/hamcluster.cpp
@@ -7,8 +7,8 @@
 
 #include "hamcluster.hpp"
 
-#include "utils/adt/concurrent_dsu.hpp"
-#include "io/kmers_io/mmapped_reader.hpp"
+#include "common/adt/concurrent_dsu.hpp"
+#include "io/kmers/mmapped_reader.hpp"
 #include "parallel_radix_sort.hpp"
 
 #include "config_struct_hammer.hpp"
diff --git a/src/projects/hammer/hamcluster.hpp b/src/projects/hammer/hamcluster.hpp
index 30f5356..0db51f6 100644
--- a/src/projects/hammer/hamcluster.hpp
+++ b/src/projects/hammer/hamcluster.hpp
@@ -10,10 +10,10 @@
 
 #include "kmer_stat.hpp"
 #include "kmer_data.hpp"
-#include "io/kmers_io/mmapped_reader.hpp"
+#include "io/kmers/mmapped_reader.hpp"
 
-#include "dev_support/logger/logger.hpp"
-#include "data_structures/sequence/seq.hpp"
+#include "utils/logger/logger.hpp"
+#include "sequence/seq.hpp"
 
 #include <iostream>
 #include <vector>
diff --git a/src/projects/hammer/hammer_tools.cpp b/src/projects/hammer/hammer_tools.cpp
index 1fa2461..3a14777 100644
--- a/src/projects/hammer/hammer_tools.cpp
+++ b/src/projects/hammer/hammer_tools.cpp
@@ -5,13 +5,13 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "io/reads_io/ireadstream.hpp"
+#include "io/reads/ireadstream.hpp"
 #include "valid_kmer_generator.hpp"
 #include "globals.hpp"
 #include "kmer_data.hpp"
 #include "read_corrector.hpp"
 
-#include "io/kmers_io/mmapped_writer.hpp"
+#include "io/kmers/mmapped_writer.hpp"
 
 #include <iostream>
 #include <fstream>
diff --git a/src/projects/hammer/hammer_tools.hpp b/src/projects/hammer/hammer_tools.hpp
index 3ef9a6a..caac46d 100644
--- a/src/projects/hammer/hammer_tools.hpp
+++ b/src/projects/hammer/hammer_tools.hpp
@@ -14,11 +14,11 @@
 #include <iomanip>
 #include <fstream>
 #include "io/reads/read.hpp"
-#include "io/reads_io/ireadstream.hpp"
-#include "data_structures/sequence/seq.hpp"
+#include "io/reads/ireadstream.hpp"
+#include "sequence/seq.hpp"
 #include "globals.hpp"
 #include "kmer_stat.hpp"
-#include "io/kmers_io/mmapped_reader.hpp"
+#include "io/kmers/mmapped_reader.hpp"
 
 namespace hammer {
 
diff --git a/src/projects/hammer/kmer_cluster.cpp b/src/projects/hammer/kmer_cluster.cpp
index ff153c9..d6944b2 100644
--- a/src/projects/hammer/kmer_cluster.cpp
+++ b/src/projects/hammer/kmer_cluster.cpp
@@ -5,8 +5,8 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "io/reads_io/ireadstream.hpp"
-#include "dev_support/openmp_wrapper.h"
+#include "io/reads/ireadstream.hpp"
+#include "utils/openmp_wrapper.h"
 
 #include "hammer_tools.hpp"
 #include "hamcluster.hpp"
diff --git a/src/projects/hammer/kmer_data.cpp b/src/projects/hammer/kmer_data.cpp
index 22b2aac..dd730bd 100644
--- a/src/projects/hammer/kmer_data.cpp
+++ b/src/projects/hammer/kmer_data.cpp
@@ -6,17 +6,17 @@
 //***************************************************************************
 
 #include "kmer_data.hpp"
-#include "io/reads_io/read_processor.hpp"
+#include "io/reads/read_processor.hpp"
 #include "valid_kmer_generator.hpp"
 
-#include "io/reads_io/ireadstream.hpp"
+#include "io/reads/ireadstream.hpp"
 #include "config_struct_hammer.hpp"
 
-#include "data_structures/mph_index/kmer_index_builder.hpp"
+#include "utils/mph_index/kmer_index_builder.hpp"
 
-#include "io/kmers_io/kmer_iterator.hpp"
-#include "utils/adt/bf.hpp"
-#include "utils/adt/hll.hpp"
+#include "io/kmers/kmer_iterator.hpp"
+#include "common/adt/bf.hpp"
+#include "common/adt/hll.hpp"
 
 using namespace hammer;
 
@@ -112,6 +112,8 @@ path::files_t HammerFilteringKMerSplitter::Split(size_t num_files) {
   }
   INFO("Total " << processed << " reads processed");
 
+  this->ClearBuffers();
+
   return out;
 }
 
diff --git a/src/projects/hammer/kmer_data.hpp b/src/projects/hammer/kmer_data.hpp
index 57fd1d2..ece0e53 100644
--- a/src/projects/hammer/kmer_data.hpp
+++ b/src/projects/hammer/kmer_data.hpp
@@ -9,8 +9,8 @@
 #define __HAMMER_KMER_DATA_HPP__
 
 #include "kmer_stat.hpp"
-#include "utils/adt/array_vector.hpp"
-#include "data_structures/mph_index/kmer_index.hpp"
+#include "common/adt/array_vector.hpp"
+#include "utils/mph_index/kmer_index.hpp"
 #include <vector>
 
 typedef KMerIndex<kmer_index_traits<hammer::KMer> > HammerKMerIndex;
diff --git a/src/projects/hammer/kmer_stat.hpp b/src/projects/hammer/kmer_stat.hpp
index 9501e5f..1c7284a 100644
--- a/src/projects/hammer/kmer_stat.hpp
+++ b/src/projects/hammer/kmer_stat.hpp
@@ -8,9 +8,9 @@
 #ifndef HAMMER_KMERSTAT_HPP_
 #define HAMMER_KMERSTAT_HPP_
 
-#include "dev_support/verify.hpp"
+#include "utils/verify.hpp"
 
-#include "data_structures/sequence/seq.hpp"
+#include "sequence/seq.hpp"
 
 #include <folly/SmallLocks.h>
 
diff --git a/src/projects/hammer/main.cpp b/src/projects/hammer/main.cpp
index 18077e4..1bb0dca 100644
--- a/src/projects/hammer/main.cpp
+++ b/src/projects/hammer/main.cpp
@@ -20,15 +20,15 @@
 #include "kmer_data.hpp"
 #include "expander.hpp"
 
-#include "utils/adt/concurrent_dsu.hpp"
-#include "dev_support/segfault_handler.hpp"
-#include "io/reads_io/read_processor.hpp"
-#include "io/reads_io/ireadstream.hpp"
+#include "common/adt/concurrent_dsu.hpp"
+#include "utils/segfault_handler.hpp"
+#include "io/reads/read_processor.hpp"
+#include "io/reads/ireadstream.hpp"
 
-#include "dev_support/memory_limit.hpp"
+#include "utils/memory_limit.hpp"
 
-#include "dev_support/logger/logger.hpp"
-#include "dev_support/logger/log_writers.hpp"
+#include "utils/logger/logger.hpp"
+#include "utils/logger/log_writers.hpp"
 
 #include "version.hpp"
 
diff --git a/src/projects/hammer/parallel_radix_sort.hpp b/src/projects/hammer/parallel_radix_sort.hpp
index 6a99911..2765afb 100644
--- a/src/projects/hammer/parallel_radix_sort.hpp
+++ b/src/projects/hammer/parallel_radix_sort.hpp
@@ -36,7 +36,7 @@
 #ifndef PARALLEL_RADIX_SORT_H_
 #define PARALLEL_RADIX_SORT_H_
 
-#include "dev_support/openmp_wrapper.h"
+#include "utils/openmp_wrapper.h"
 
 #include <stdint.h>
 #include <cstring>
diff --git a/src/projects/hammer/quake_correct/bithash.cpp b/src/projects/hammer/quake_correct/bithash.cpp
index 65d8203..a3b6f9b 100644
--- a/src/projects/hammer/quake_correct/bithash.cpp
+++ b/src/projects/hammer/quake_correct/bithash.cpp
@@ -6,7 +6,7 @@
 //***************************************************************************
 
 #include "bithash.h"
-#include "data_structures/sequence/nucl.hpp"
+#include "sequence/nucl.hpp"
 #include <iostream>
 #include <fstream>
 #include <cstdlib>
diff --git a/src/projects/hammer/quake_count/quake_count.cpp b/src/projects/hammer/quake_count/quake_count.cpp
index 244e650..e04aa4e 100644
--- a/src/projects/hammer/quake_count/quake_count.cpp
+++ b/src/projects/hammer/quake_count/quake_count.cpp
@@ -38,7 +38,7 @@
 #include <iomanip>
 #include "io/ireadstream.hpp"
 #include "io/read.hpp"
-#include "data_structures/sequence/seq.hpp"
+#include "sequence/seq.hpp"
 #include "kmer_freq_info.hpp"
 #include "valid_kmer_generator.hpp"
 #define SUPPRESS_UNUSED(X) ((void) (X))
diff --git a/src/projects/hammer/quake_count/quake_count_17.cpp b/src/projects/hammer/quake_count/quake_count_17.cpp
index 2771ea8..1a84fc9 100644
--- a/src/projects/hammer/quake_count/quake_count_17.cpp
+++ b/src/projects/hammer/quake_count/quake_count_17.cpp
@@ -36,7 +36,7 @@
 #include <iomanip>
 #include "io/ireadstream.hpp"
 #include "io/read.hpp"
-#include "data_structures/sequence/seq.hpp"
+#include "sequence/seq.hpp"
 #include "kmer_freq_info.hpp"
 #include "valid_kmer_generator.hpp"
 #define SUPPRESS_UNUSED(X) ((void) (X))
diff --git a/src/projects/hammer/quake_count/quake_count_19.cpp b/src/projects/hammer/quake_count/quake_count_19.cpp
index 8bc22ba..b23c711 100644
--- a/src/projects/hammer/quake_count/quake_count_19.cpp
+++ b/src/projects/hammer/quake_count/quake_count_19.cpp
@@ -36,7 +36,7 @@
 #include <iomanip>
 #include "io/ireadstream.hpp"
 #include "io/read.hpp"
-#include "data_structures/sequence/seq.hpp"
+#include "sequence/seq.hpp"
 #include "kmer_freq_info.hpp"
 #include "valid_kmer_generator.hpp"
 #define SUPPRESS_UNUSED(X) ((void) (X))
diff --git a/src/projects/hammer/quake_count/quake_count_21.cpp b/src/projects/hammer/quake_count/quake_count_21.cpp
index 24ed7f2..e3bf9b1 100644
--- a/src/projects/hammer/quake_count/quake_count_21.cpp
+++ b/src/projects/hammer/quake_count/quake_count_21.cpp
@@ -36,7 +36,7 @@
 #include <iomanip>
 #include "io/ireadstream.hpp"
 #include "io/read.hpp"
-#include "data_structures/sequence/seq.hpp"
+#include "sequence/seq.hpp"
 #include "kmer_freq_info.hpp"
 #include "valid_kmer_generator.hpp"
 #define SUPPRESS_UNUSED(X) ((void) (X))
diff --git a/src/projects/hammer/quake_count/quake_count_25.cpp b/src/projects/hammer/quake_count/quake_count_25.cpp
index 2160242..f52814c 100644
--- a/src/projects/hammer/quake_count/quake_count_25.cpp
+++ b/src/projects/hammer/quake_count/quake_count_25.cpp
@@ -36,7 +36,7 @@
 #include <iomanip>
 #include "io/ireadstream.hpp"
 #include "io/read.hpp"
-#include "data_structures/sequence/seq.hpp"
+#include "sequence/seq.hpp"
 #include "kmer_freq_info.hpp"
 #include "valid_kmer_generator.hpp"
 #define SUPPRESS_UNUSED(X) ((void) (X))
diff --git a/src/projects/hammer/quake_count/quake_count_29.cpp b/src/projects/hammer/quake_count/quake_count_29.cpp
index cdbd7cd..182910c 100644
--- a/src/projects/hammer/quake_count/quake_count_29.cpp
+++ b/src/projects/hammer/quake_count/quake_count_29.cpp
@@ -36,7 +36,7 @@
 #include <iomanip>
 #include "io/ireadstream.hpp"
 #include "io/read.hpp"
-#include "data_structures/sequence/seq.hpp"
+#include "sequence/seq.hpp"
 #include "kmer_freq_info.hpp"
 #include "valid_kmer_generator.hpp"
 #define SUPPRESS_UNUSED(X) ((void) (X))
diff --git a/src/projects/hammer/quake_count/quake_count_33.cpp b/src/projects/hammer/quake_count/quake_count_33.cpp
index 7e8cde1..ce44f6d 100644
--- a/src/projects/hammer/quake_count/quake_count_33.cpp
+++ b/src/projects/hammer/quake_count/quake_count_33.cpp
@@ -36,7 +36,7 @@
 #include <iomanip>
 #include "io/ireadstream.hpp"
 #include "io/read.hpp"
-#include "data_structures/sequence/seq.hpp"
+#include "sequence/seq.hpp"
 #include "kmer_freq_info.hpp"
 #include "valid_kmer_generator.hpp"
 
diff --git a/src/projects/hammer/quake_count/quake_count_37.cpp b/src/projects/hammer/quake_count/quake_count_37.cpp
index 2780c3e..529aae7 100644
--- a/src/projects/hammer/quake_count/quake_count_37.cpp
+++ b/src/projects/hammer/quake_count/quake_count_37.cpp
@@ -36,7 +36,7 @@
 #include <iomanip>
 #include "io/ireadstream.hpp"
 #include "io/read.hpp"
-#include "data_structures/sequence/seq.hpp"
+#include "sequence/seq.hpp"
 #include "kmer_freq_info.hpp"
 #include "valid_kmer_generator.hpp"
 #define SUPPRESS_UNUSED(X) ((void) (X))
diff --git a/src/projects/hammer/quake_count/quake_count_45.cpp b/src/projects/hammer/quake_count/quake_count_45.cpp
index 663bba3..3fab3bc 100644
--- a/src/projects/hammer/quake_count/quake_count_45.cpp
+++ b/src/projects/hammer/quake_count/quake_count_45.cpp
@@ -36,7 +36,7 @@
 #include <iomanip>
 #include "io/ireadstream.hpp"
 #include "io/read.hpp"
-#include "data_structures/sequence/seq.hpp"
+#include "sequence/seq.hpp"
 #include "kmer_freq_info.hpp"
 #include "valid_kmer_generator.hpp"
 #define SUPPRESS_UNUSED(X) ((void) (X))
diff --git a/src/projects/hammer/quake_count/quake_count_55.cpp b/src/projects/hammer/quake_count/quake_count_55.cpp
index c096b19..036a639 100644
--- a/src/projects/hammer/quake_count/quake_count_55.cpp
+++ b/src/projects/hammer/quake_count/quake_count_55.cpp
@@ -36,7 +36,7 @@
 #include <iomanip>
 #include "io/ireadstream.hpp"
 #include "io/read.hpp"
-#include "data_structures/sequence/seq.hpp"
+#include "sequence/seq.hpp"
 #include "kmer_freq_info.hpp"
 #include "valid_kmer_generator.hpp"
 #define SUPPRESS_UNUSED(X) ((void) (X))
diff --git a/src/projects/hammer/quake_count/quake_count_65.cpp b/src/projects/hammer/quake_count/quake_count_65.cpp
index 0ac0017..53b34b0 100644
--- a/src/projects/hammer/quake_count/quake_count_65.cpp
+++ b/src/projects/hammer/quake_count/quake_count_65.cpp
@@ -36,7 +36,7 @@
 #include <iomanip>
 #include "io/ireadstream.hpp"
 #include "io/read.hpp"
-#include "data_structures/sequence/seq.hpp"
+#include "sequence/seq.hpp"
 #include "kmer_freq_info.hpp"
 #include "valid_kmer_generator.hpp"
 #define SUPPRESS_UNUSED(X) ((void) (X))
diff --git a/src/projects/hammer/quake_count/quake_count_75.cpp b/src/projects/hammer/quake_count/quake_count_75.cpp
index fb8de1d..3c32f6a 100644
--- a/src/projects/hammer/quake_count/quake_count_75.cpp
+++ b/src/projects/hammer/quake_count/quake_count_75.cpp
@@ -36,7 +36,7 @@
 #include <iomanip>
 #include "io/ireadstream.hpp"
 #include "io/read.hpp"
-#include "data_structures/sequence/seq.hpp"
+#include "sequence/seq.hpp"
 #include "kmer_freq_info.hpp"
 #include "valid_kmer_generator.hpp"
 #define SUPPRESS_UNUSED(X) ((void) (X))
diff --git a/src/projects/hammer/quake_count/valid_kmer_generator.hpp b/src/projects/hammer/quake_count/valid_kmer_generator.hpp
index 270c6e0..be42726 100644
--- a/src/projects/hammer/quake_count/valid_kmer_generator.hpp
+++ b/src/projects/hammer/quake_count/valid_kmer_generator.hpp
@@ -12,7 +12,7 @@
 #include <string>
 #include <vector>
 #include "io/read.hpp"
-#include "data_structures/sequence/seq.hpp"
+#include "sequence/seq.hpp"
 /**
  * This class is designed to iterate through valid k-mers in read.
  * @example
diff --git a/src/projects/hammer/quake_enhanced/count.cpp b/src/projects/hammer/quake_enhanced/count.cpp
index 32b6ecd..8d6bede 100644
--- a/src/projects/hammer/quake_enhanced/count.cpp
+++ b/src/projects/hammer/quake_enhanced/count.cpp
@@ -14,7 +14,7 @@
 #include <unordered_map>
 #include "io/ireadstream.hpp"
 #include "io/read.hpp"
-#include "data_structures/sequence/seq.hpp"
+#include "sequence/seq.hpp"
 #include "valid_kmer_generator.hpp"
 #include "quake_enhanced/quake.hpp"
 #define SUPPRESS_UNUSED(X) ((void) (X))
diff --git a/src/projects/hammer/quake_enhanced/count/count.cpp b/src/projects/hammer/quake_enhanced/count/count.cpp
index eafe3cd..2ea1a8d 100644
--- a/src/projects/hammer/quake_enhanced/count/count.cpp
+++ b/src/projects/hammer/quake_enhanced/count/count.cpp
@@ -34,7 +34,7 @@
 #include "logging.hpp"
 #include "io/ireadstream.hpp"
 #include "io/read.hpp"
-#include "data_structures/sequence/seq.hpp"
+#include "sequence/seq.hpp"
 #include "valid_kmer_generator.hpp"
 #define SUPPRESS_UNUSED(X) ((void) (X))
 
diff --git a/src/projects/hammer/quake_enhanced/filter_trusted_enh/main.cpp b/src/projects/hammer/quake_enhanced/filter_trusted_enh/main.cpp
index cbe54e7..80a8a68 100644
--- a/src/projects/hammer/quake_enhanced/filter_trusted_enh/main.cpp
+++ b/src/projects/hammer/quake_enhanced/filter_trusted_enh/main.cpp
@@ -10,7 +10,7 @@
 #include <cstdio>
 #include <string>
 #include <unordered_map>
-#include "dev_support/logger/logger.hpp"
+#include "utils/logger/logger.hpp"
 
 using std::string;
 using std::unordered_map;
diff --git a/src/projects/hammer/valid_kmer_generator.hpp b/src/projects/hammer/valid_kmer_generator.hpp
index c4128c4..49cf71f 100644
--- a/src/projects/hammer/valid_kmer_generator.hpp
+++ b/src/projects/hammer/valid_kmer_generator.hpp
@@ -11,7 +11,7 @@
 #include "globals.hpp"
 
 #include "io/reads/read.hpp"
-#include "data_structures/sequence/seq.hpp"
+#include "sequence/seq.hpp"
 
 #include <string>
 #include <vector>
diff --git a/src/projects/ionhammer/CMakeLists.txt b/src/projects/ionhammer/CMakeLists.txt
index c78cbca..66a26ed 100644
--- a/src/projects/ionhammer/CMakeLists.txt
+++ b/src/projects/ionhammer/CMakeLists.txt
@@ -20,7 +20,7 @@ add_executable(ionhammer
                seqeval/TreephaserLite.cpp
                main.cpp)
 
-target_link_libraries(ionhammer input dev_support pipeline mph_index BamTools ${COMMON_LIBRARIES})
+target_link_libraries(ionhammer input utils pipeline mph_index BamTools ${COMMON_LIBRARIES})
 
 if (SPADES_STATIC_BUILD)
   set_target_properties(ionhammer PROPERTIES LINK_SEARCH_END_STATIC 1)
diff --git a/src/projects/ionhammer/HSeq.hpp b/src/projects/ionhammer/HSeq.hpp
index b6a3ad6..567f84f 100644
--- a/src/projects/ionhammer/HSeq.hpp
+++ b/src/projects/ionhammer/HSeq.hpp
@@ -8,7 +8,7 @@
 #ifndef __HAMMER_HSEQ_HPP__
 #define __HAMMER_HSEQ_HPP__
 
-#include "data_structures/sequence/nucl.hpp"
+#include "sequence/nucl.hpp"
 #include <city/city.h>
 
 #include <array>
diff --git a/src/projects/ionhammer/config_struct.cpp b/src/projects/ionhammer/config_struct.cpp
index d821d99..7701eef 100644
--- a/src/projects/ionhammer/config_struct.cpp
+++ b/src/projects/ionhammer/config_struct.cpp
@@ -7,7 +7,7 @@
 
 #include "config_struct.hpp"
 
-#include "dev_support/openmp_wrapper.h"
+#include "utils/openmp_wrapper.h"
 
 #include "llvm/Support/YAMLParser.h"
 #include "llvm/Support/YAMLTraits.h"
diff --git a/src/projects/ionhammer/err_helper_table.cpp b/src/projects/ionhammer/err_helper_table.cpp
index c283a5b..8e20c63 100644
--- a/src/projects/ionhammer/err_helper_table.cpp
+++ b/src/projects/ionhammer/err_helper_table.cpp
@@ -10,7 +10,7 @@
 #include <fstream>
 #include <istream>
 
-#include "dev_support/logger/logger.hpp"
+#include "utils/logger/logger.hpp"
 
 namespace hammer {
 namespace errHelper {
diff --git a/src/projects/ionhammer/err_helper_table.hpp b/src/projects/ionhammer/err_helper_table.hpp
index e24494d..342ff82 100644
--- a/src/projects/ionhammer/err_helper_table.hpp
+++ b/src/projects/ionhammer/err_helper_table.hpp
@@ -16,7 +16,7 @@
 #include <cstdlib>
 #include <cassert>
 
-#include "dev_support/logger/logger.hpp"
+#include "utils/logger/logger.hpp"
 
 namespace hammer {
 
diff --git a/src/projects/ionhammer/expander.cpp b/src/projects/ionhammer/expander.cpp
index 14f4d98..acc7d3e 100644
--- a/src/projects/ionhammer/expander.cpp
+++ b/src/projects/ionhammer/expander.cpp
@@ -11,7 +11,7 @@
 #include "kmer_data.hpp"
 #include "valid_hkmer_generator.hpp"
 
-#include "io/reads_io/file_reader.hpp"
+#include "io/reads/file_reader.hpp"
 
 #include <vector>
 #include <cstring>
diff --git a/src/projects/ionhammer/hamcluster.cpp b/src/projects/ionhammer/hamcluster.cpp
index a54a66b..a905ddf 100644
--- a/src/projects/ionhammer/hamcluster.cpp
+++ b/src/projects/ionhammer/hamcluster.cpp
@@ -8,8 +8,8 @@
 #include "hamcluster.hpp"
 
 #include "hkmer_distance.hpp"
-#include "utils/adt/concurrent_dsu.hpp"
-#include "io/kmers_io/mmapped_reader.hpp"
+#include "common/adt/concurrent_dsu.hpp"
+#include "io/kmers/mmapped_reader.hpp"
 
 #include <iostream>
 #include <sstream>
diff --git a/src/projects/ionhammer/hamcluster.hpp b/src/projects/ionhammer/hamcluster.hpp
index 23b7015..17d9b60 100644
--- a/src/projects/ionhammer/hamcluster.hpp
+++ b/src/projects/ionhammer/hamcluster.hpp
@@ -9,9 +9,9 @@
 #define HAMMER_SUBKMER_SORTER_HPP
 
 #include "kmer_data.hpp"
-#include "io/kmers_io/mmapped_reader.hpp"
+#include "io/kmers/mmapped_reader.hpp"
 
-#include "dev_support/logger/logger.hpp"
+#include "utils/logger/logger.hpp"
 #include "HSeq.hpp"
 
 #include <iostream>
diff --git a/src/projects/ionhammer/kmer_data.cpp b/src/projects/ionhammer/kmer_data.cpp
index 9b82792..3ba9779 100644
--- a/src/projects/ionhammer/kmer_data.cpp
+++ b/src/projects/ionhammer/kmer_data.cpp
@@ -9,11 +9,11 @@
 #include "config_struct.hpp"
 #include "valid_hkmer_generator.hpp"
 
-#include "data_structures/mph_index/kmer_index_builder.hpp"
+#include "utils/mph_index/kmer_index_builder.hpp"
 
-#include "io/kmers_io/mmapped_writer.hpp"
-#include "io/reads_io/file_reader.hpp"
-#include "io/reads_io/read_processor.hpp"
+#include "io/kmers/mmapped_writer.hpp"
+#include "io/reads/file_reader.hpp"
+#include "io/reads/read_processor.hpp"
 
 using namespace hammer;
 
@@ -87,6 +87,8 @@ path::files_t HammerKMerSplitter::Split(size_t num_files) {
   }
   INFO("Processed " << filler.processed() << " reads");
 
+  this->ClearBuffers();
+
   return out;
 }
 
diff --git a/src/projects/ionhammer/kmer_data.hpp b/src/projects/ionhammer/kmer_data.hpp
index 8afd216..e27458a 100644
--- a/src/projects/ionhammer/kmer_data.hpp
+++ b/src/projects/ionhammer/kmer_data.hpp
@@ -8,7 +8,7 @@
 #ifndef __HAMMER_KMER_DATA_HPP__
 #define __HAMMER_KMER_DATA_HPP__
 
-#include "data_structures/mph_index/kmer_index.hpp"
+#include "utils/mph_index/kmer_index.hpp"
 #include "hkmer.hpp"
 
 #include <vector>
diff --git a/src/projects/ionhammer/main.cpp b/src/projects/ionhammer/main.cpp
index cb3f35b..ab6fd5b 100644
--- a/src/projects/ionhammer/main.cpp
+++ b/src/projects/ionhammer/main.cpp
@@ -5,18 +5,18 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "dev_support/logger/log_writers.hpp"
+#include "utils/logger/log_writers.hpp"
 
-#include "io/reads_io/file_reader.hpp"
-#include "io/sam_io/bam_reader.hpp"
-#include "io/reads_io/paired_readers.hpp"
-#include "io/reads_io/osequencestream.hpp"
-#include "io/reads_io/read_processor.hpp"
+#include "io/reads/file_reader.hpp"
+#include "io/sam/bam_reader.hpp"
+#include "io/reads/paired_readers.hpp"
+#include "io/reads/osequencestream.hpp"
+#include "io/reads/read_processor.hpp"
 
-#include "utils/adt/concurrent_dsu.hpp"
+#include "common/adt/concurrent_dsu.hpp"
 
-#include "dev_support/segfault_handler.hpp"
-#include "dev_support/memory_limit.hpp"
+#include "utils/segfault_handler.hpp"
+#include "utils/memory_limit.hpp"
 
 #include "HSeq.hpp"
 #include "kmer_data.hpp"
@@ -27,7 +27,7 @@
 #include "expander.hpp"
 #include "config_struct.hpp"
 
-#include "dev_support/openmp_wrapper.h"
+#include "utils/openmp_wrapper.h"
 
 #include "version.hpp"
 
diff --git a/src/projects/ionhammer/read_corrector.hpp b/src/projects/ionhammer/read_corrector.hpp
index def12aa..e06df5b 100644
--- a/src/projects/ionhammer/read_corrector.hpp
+++ b/src/projects/ionhammer/read_corrector.hpp
@@ -35,7 +35,7 @@
 #include <fstream>
 
 #if 1
-#include "data_structures/sequence/nucl.hpp"
+#include "sequence/nucl.hpp"
 #include <iostream>
 #include <iomanip>
 #endif
diff --git a/src/projects/ionhammer/subcluster.cpp b/src/projects/ionhammer/subcluster.cpp
index d5dc0a2..1b27e2f 100644
--- a/src/projects/ionhammer/subcluster.cpp
+++ b/src/projects/ionhammer/subcluster.cpp
@@ -10,7 +10,7 @@
 #include "consensus.hpp"
 #include "hkmer_distance.hpp"
 #include "kmer_data.hpp"
-#include "dev_support/logger/log_writers.hpp"
+#include "utils/logger/log_writers.hpp"
 
 #include <boost/numeric/ublas/matrix.hpp>
 
diff --git a/src/projects/mph_test/CMakeLists.txt b/src/projects/mph_test/CMakeLists.txt
index 7338861..270854f 100644
--- a/src/projects/mph_test/CMakeLists.txt
+++ b/src/projects/mph_test/CMakeLists.txt
@@ -12,7 +12,7 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR})
 add_executable(spades-kmercount
                main.cpp)
 
-target_link_libraries(spades-kmercount spades_modules ${COMMON_LIBRARIES})
+target_link_libraries(spades-kmercount common_modules ${COMMON_LIBRARIES})
 
 if (SPADES_STATIC_BUILD)
   set_target_properties(spades-kmercount PROPERTIES LINK_SEARCH_END_STATIC 1)
diff --git a/src/projects/mph_test/main.cpp b/src/projects/mph_test/main.cpp
index c638f77..e5421c1 100644
--- a/src/projects/mph_test/main.cpp
+++ b/src/projects/mph_test/main.cpp
@@ -5,14 +5,13 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "dev_support/logger/log_writers.hpp"
-#include "dev_support/segfault_handler.hpp"
-#include "data_structures/indices/perfect_hash_map.hpp"
-#include "data_structures/sequence/runtime_k.hpp"
-#include "data_structures/mph_index/kmer_index_builder.hpp"
+#include "utils/logger/log_writers.hpp"
+#include "utils/segfault_handler.hpp"
+#include "utils/indices/perfect_hash_map.hpp"
+#include "utils/mph_index/kmer_index_builder.hpp"
 
-#include "io/reads_io/read_processor.hpp"
-#include "io/reads_io/io_helper.hpp"
+#include "io/reads/read_processor.hpp"
+#include "io/reads/io_helper.hpp"
 
 #include "version.hpp"
 
@@ -31,15 +30,15 @@ void create_console_logger() {
     attach_logger(lg);
 }
 
-class SimplePerfectHashMap : public debruijn_graph::KeyIteratingMap<runtime_k::RtSeq, uint32_t> {
-    using base = debruijn_graph::KeyIteratingMap<runtime_k::RtSeq, uint32_t>;
+class SimplePerfectHashMap : public debruijn_graph::KeyIteratingMap<RtSeq, uint32_t> {
+    using base = debruijn_graph::KeyIteratingMap<RtSeq, uint32_t>;
   public:
     SimplePerfectHashMap(size_t k, const std::string &workdir)
             : base(k, workdir) {}
 };
 
-class ParallelSortingSplitter : public KMerSortingSplitter<runtime_k::RtSeq> {
-  using Seq = runtime_k::RtSeq;
+class ParallelSortingSplitter : public KMerSortingSplitter<RtSeq> {
+  using Seq = RtSeq;
 
   std::vector<std::string> files_;
   unsigned nthreads_;
@@ -67,7 +66,7 @@ class ParallelSortingSplitter : public KMerSortingSplitter<runtime_k::RtSeq> {
 
           unsigned thread_id = omp_get_thread_num();
           bool stop = false;
-          runtime_k::RtSeq kmer = seq.start<runtime_k::RtSeq>(this->K_) >> 'A';
+          RtSeq kmer = seq.start<RtSeq>(this->K_) >> 'A';
           for (size_t j = this->K_ - 1; j < seq.size(); ++j) {
               kmer <<= seq[j];
               stop |= splitter_.push_back_internal(kmer, thread_id);
@@ -110,6 +109,8 @@ class ParallelSortingSplitter : public KMerSortingSplitter<runtime_k::RtSeq> {
         }
         INFO("Total " << filler.processed() << " reads processed");
 
+        this->ClearBuffers();
+
         return out;
     }
 };
@@ -169,7 +170,7 @@ int main(int argc, char* argv[]) {
             for (const auto& s : input)
                 splitter.push_back(s);
         }
-        KMerDiskCounter<runtime_k::RtSeq> counter(workdir, splitter);
+        KMerDiskCounter<RtSeq> counter(workdir, splitter);
         counter.CountAll(16, nthreads);
         INFO("K-mer counting done, kmers saved to " << counter.GetFinalKMersFname());
     } catch (std::string const &s) {
diff --git a/src/projects/mts/CMakeLists.txt b/src/projects/mts/CMakeLists.txt
new file mode 100644
index 0000000..1e06d4b
--- /dev/null
+++ b/src/projects/mts/CMakeLists.txt
@@ -0,0 +1,57 @@
+############################################################################
+# Copyright (c) 2015-2016 Saint Petersburg State University
+# Copyright (c) 2011-2014 Saint Petersburg Academic University
+# All Rights Reserved
+# See file LICENSE for details.
+############################################################################
+
+cmake_minimum_required(VERSION 2.8)
+
+project(kmer_count_filter CXX)
+
+include_directories(kmc_api)
+include_directories(${CMAKE_SOURCE_DIR}/include)
+include_directories(${EXT_DIR}/include)
+include_directories(${CMAKE_SOURCE_DIR}/debruijn)
+
+add_executable(kmer_multiplicity_counter
+        kmc_api/kmc_file.cpp
+        kmc_api/kmer_api.cpp
+        kmc_api/mmer.cpp
+        kmer_multiplicity_counter.cpp)
+
+target_link_libraries(kmer_multiplicity_counter common_modules utils input getopt_pp ${COMMON_LIBRARIES})
+
+if (SPADES_STATIC_BUILD)
+  set_target_properties(kmer_multiplicity_counter PROPERTIES LINK_SEARCH_END_STATIC 1)
+endif()
+
+add_executable(prop_binning
+               propagate.cpp
+               read_binning.cpp
+               prop_binning.cpp)
+
+target_link_libraries(prop_binning common_modules nlopt BamTools ssw getopt_pp ${COMMON_LIBRARIES})
+
+if (SPADES_STATIC_BUILD)
+  set_target_properties(prop_binning PROPERTIES LINK_SEARCH_END_STATIC 1)
+endif()
+
+add_executable(stats
+               stats.cpp)
+
+target_link_libraries(stats common_modules nlopt BamTools ssw getopt_pp ${COMMON_LIBRARIES})
+
+if (SPADES_STATIC_BUILD)
+  set_target_properties(stats PROPERTIES LINK_SEARCH_END_STATIC 1)
+endif()
+
+add_executable(contig_abundance_counter
+               contig_abundance_counter.cpp
+               contig_abundance.cpp)
+
+target_link_libraries(contig_abundance_counter common_modules nlopt BamTools ssw getopt_pp ${COMMON_LIBRARIES})
+
+if (SPADES_STATIC_BUILD)
+  set_target_properties(contig_abundance_counter PROPERTIES LINK_SEARCH_END_STATIC 1)
+endif()
diff --git a/src/projects/mts/Common.snake b/src/projects/mts/Common.snake
new file mode 100644
index 0000000..6cd6a50
--- /dev/null
+++ b/src/projects/mts/Common.snake
@@ -0,0 +1,69 @@
+configfile: "config.yaml"
+
+from itertools import chain
+from functools import partial
+import os.path
+
+from scripts.common import detect_reads
+
+#Config parameters
+IN = config["IN"]
+LOCAL_DIR = config["LOCAL_DIR"]
+SPADES = config.get("SPADES", LOCAL_DIR)
+SPADES_REASSEMBLY = config.get("SPADES_REASSEMBLY", LOCAL_DIR)
+BIN = config.get("BIN", os.path.join(LOCAL_DIR, "build/release/bin"))
+SCRIPTS = config.get("SCRIPTS", os.path.join(LOCAL_DIR, "src/projects/mts/scripts"))
+SOFT = config["SOFT"]
+K = int(config.get("K", 55))
+SMALL_K = int(config.get("SMALL_K", 21))
+MIN_CONTIG_LENGTH = int(config.get("MIN_CONTIG_LENGTH", 2000))
+THREADS = config.get("THREADS", 16)
+BINNER = config.get("BINNER", "canopy")
+
+#Autodetect samples and their reads
+SAMPLE_DIRS = set(glob_wildcards(os.path.join(IN, "{sample,sample\d+}"))[0])
+SAMPLE_COUNT = len(SAMPLE_DIRS)
+SAMPLES = list()
+for i in range(1, SAMPLE_COUNT + 1):
+    sample_name = "sample" + str(i)
+    if sample_name not in SAMPLE_DIRS:
+        raise WorkflowError("Samples must be consecutive; missing " + sample_name)
+    SAMPLES.append(sample_name)
+
+SAMPLE_READS = dict(map(lambda sample: (sample, detect_reads(os.path.join(IN, sample))), SAMPLES))
+
+#Group samples
+GROUP_SAMPLES = config.get("GROUPS", [])
+USED_SAMPLES = set(chain(*GROUP_SAMPLES))
+#TODO: double-check
+#Replace the wildcard group with unused samples
+if GROUP_SAMPLES and GROUP_SAMPLES[-1] == "*":
+    GROUP_SAMPLES[-1] = [sample for sample in SAMPLES if sample not in USED_SAMPLES]
+#Otherwise, add a single-sample group from the rest of the samples
+else:
+    for sample in SAMPLES:
+        if sample not in USED_SAMPLES:
+            GROUP_SAMPLES.append([sample])
+
+GROUPS = dict()
+group_id = 1
+for group in GROUP_SAMPLES:
+    if len(group) == 1:
+        key = group[0]
+    else:
+        key = "group" + str(group_id)
+        #SAMPLE_READS[key] = ["reads/{}/{}.fastq".format(key, dir) for dir in ["left", "right"]]
+        SAMPLE_READS[key] = ([SAMPLE_READS[s][0] for s in group], [SAMPLE_READS[s][1] for s in group])
+        group_id += 1
+    GROUPS[key] = group
+
+#Helpers for locating input files
+def sample_reads(dir, wildcards):
+    res = SAMPLE_READS[wildcards.sample][dir]
+    if res is str:
+        return [res]
+    else:
+        return res
+
+left_reads  = partial(sample_reads, 0)
+right_reads = partial(sample_reads, 1)
diff --git a/src/projects/mts/README b/src/projects/mts/README
new file mode 100644
index 0000000..5e8e6d3
--- /dev/null
+++ b/src/projects/mts/README
@@ -0,0 +1,21 @@
+1. Installing Snakemake
+If you have properly installed Python 3.3+, just
+> easy_install3 snakemake
+or
+> pip3 install snakemake
+In case you have to install Python 3 yourself, we recommend to use the Miniconda Python 3 distribution is recommended.
+With Miniconda installed, you can issue
+> conda install -c bioconda snakemake
+
+2. Running MTS
+Make a directory for output, place config.yaml there, and configure it. Then run
+> snakemake --directory <output directory>  --cores XX
+
+3. Gathering stats
+To render some interesting info, you need to specify some references in config:
+REFS: path
+or
+REFS: [path1, path2, ...]
+where path can be either a single reference or a folder with references.
+Then run the stats target manually:
+> snakemake --directory <output directory> stats_all
diff --git a/src/projects/mts/Snakefile b/src/projects/mts/Snakefile
new file mode 100644
index 0000000..25553b4
--- /dev/null
+++ b/src/projects/mts/Snakefile
@@ -0,0 +1,175 @@
+include: "Common.snake"
+
+import os
+import os.path
+
+from scripts.common import dump_dict
+
+#Path to saves of necessary assembly stage
+SAVES = "K{0}/saves/01_before_repeat_resolution/graph_pack".format(K)
+
+onstart:
+    try:
+        os.mkdir("tmp")
+    except:
+        pass
+    print("Detected", SAMPLE_COUNT, "samples in", IN)
+    print("They form: ", GROUPS)
+
+# ---- Main pipeline -----------------------------------------------------------
+
+rule all:
+    input:   dynamic("reassembly/{cag}.fasta")
+    message: "Dataset of {SAMPLE_COUNT} samples from {IN} has been processed."
+
+rule assemble:
+    input:   left=left_reads, right=right_reads
+    output:  "assembly/{sample}.fasta"
+    #TODO: remove this boilerplate
+    params:  left=lambda w: " ".join(expand("-1 {r}", r=left_reads(w))),
+             right=lambda w: " ".join(expand("-2 {r}", r=right_reads(w))),
+             dir="assembly/{sample}"
+    log:     "assembly/{sample}.log"
+    threads: THREADS
+    message: "Assembling {wildcards.sample} with SPAdes"
+    shell:   "{SPADES}/spades.py --meta -m 400 -t {threads} {params.left} {params.right}"
+             " -o {params.dir} >{log} 2>&1 && "
+             "cp {params.dir}/scaffolds.fasta {output}"
+
+rule assemble_all:
+    input:   expand("assembly/{sample}.fasta", sample=GROUPS)
+    message: "Assembled all samples"
+
+rule descriptions:
+    output:  expand("profile/{sample}.desc", sample=SAMPLES)
+    message: "Generating sample descriptions"
+    run:
+        for sample in SAMPLES:
+            with open("profile/{}.desc".format(sample), "w") as out:
+                wildcards.sample = sample
+                print(left_reads(wildcards),  file=out)
+                print(right_reads(wildcards), file=out)
+
+rule kmc:
+    input:   "profile/{sample}.desc"
+    output:  temp("tmp/{sample}.kmc_pre"), temp("tmp/{sample}.kmc_suf")
+    params:  min_mult=2, tmp="tmp/{sample}_kmc", out="tmp/{sample}"
+    log:     "profile/kmc_{sample}.log"
+    threads: THREADS
+    message: "Running kmc for {wildcards.sample}"
+    shell:   "mkdir {params.tmp} && "
+             "{SOFT}/kmc -k{SMALL_K} -t{threads} -ci{params.min_mult} -cs65535"
+             " @{input} {params.out} {params.tmp} >{log} 2>&1 && "
+             "rm -rf {params.tmp}"
+
+rule multiplicities:
+    input:   expand("tmp/{sample}.kmc_pre", sample=SAMPLES), expand("tmp/{sample}.kmc_suf", sample=SAMPLES)
+    output:  "profile/kmers.kmm"
+    params:  kmc_files=" ".join(expand("tmp/{sample}", sample=SAMPLES)), out="profile/kmers"
+    log:     "profile/kmers.log"
+    message: "Gathering {SMALL_K}-mer multiplicities from all samples"
+    shell:   "{BIN}/kmer_multiplicity_counter -n {SAMPLE_COUNT} -k {SMALL_K} -s 3"
+             " -f tmp -t {threads} -o {params.out} >{log} 2>&1 && "
+             "rm tmp/*.sorted"
+
+rule profile:
+    input:   contigs="assembly/{sample,\w+\d+}.fasta", mpl="profile/kmers.kmm"
+    output:  id="profile/{sample}.id", mpl="profile/{sample}.mpl", splits= "assembly/{sample}_splits.fasta"
+    log:     "profile/{sample}.log"
+    message: "Counting contig abundancies for {wildcards.sample}"
+    shell:   "{BIN}/contig_abundance_counter -k {SMALL_K} -w tmp -c {input.contigs}"
+             " -n {SAMPLE_COUNT} -m profile/kmers -o profile/{wildcards.sample}"
+             " -f {output.splits} -l {MIN_CONTIG_LENGTH} >{log} 2>&1"
+
+rule binning_pre:
+    input:   expand("profile/{sample}.id", sample=GROUPS)
+    output:  "binning/{binner}/profiles.in"
+    params:  " ".join(list(GROUPS.keys()))
+    message: "Preparing input for {wildcards.binner}"
+    shell:   "{SCRIPTS}/make_input.py -t {wildcards.binner} -d profile -o {output} {params}"
+
+rule canopy:
+    input:   "binning/canopy/profiles.in"
+    output:  out="binning/canopy/binning.out", prof="binning/canopy/bins.prof"
+    threads: THREADS
+    message: "Running canopy clustering"
+    shell:   "{SOFT}/cc.bin -n {threads} -i {input} -o {output.out} -c {output.prof} >binning/canopy/canopy.log 2>&1"
+
+rule combine_splits:
+    input:   expand("assembly/{sample}_splits.fasta", sample=GROUPS)
+    output:  "assembly/samples_splits.fasta"
+    message: "Combine splitted contigs"
+    shell:   "{SCRIPTS}/combine_contigs.py -r {input} > {output}"
+
+#FIXME what does gt1000 mean?
+rule concoct:
+    input:   contigs=rules.combine_splits.output[0], profiles="binning/concoct/profiles.in"
+    output:  out="binning/concoct/clustering_gt1000.csv"
+    params:  "binning/concoct"
+    message: "Running CONCOCT clustering"
+    shell:   "mkdir -p {params} && "
+             "set +u; source activate concoct_env; set -u && "
+             "concoct --composition_file {input.contigs} --coverage_file {input.profiles} -b {params}"
+
+binning_inputs = {"canopy": rules.canopy.output.out, "concoct": rules.concoct.output.out}
+
+rule binning_post:
+    input:   binning_inputs[BINNER]
+    output:  expand("annotation/{sample}.ann", sample=GROUPS)
+    message: "Preparing raw annotations"
+    shell:   "{SCRIPTS}/parse_output.py -t {BINNER} -o annotation {input}"
+
+#Post-clustering pipeline
+rule read_binning:
+    input:   contigs="assembly/{sample}.fasta", ann="annotation/{sample}.ann",
+             left=left_reads, right=right_reads
+    output:  "propagation/{sample}_edges.ann"
+    params:  saves=os.path.join("assembly/{sample}/", SAVES),
+             splits="assembly/{sample}_splits.fasta",
+             out="propagation/{sample}_edges",
+             group=lambda wildcards: GROUPS[wildcards.sample]
+             #left=" ".join(input.left), right=" ".join(input.right)
+    log:     "binning/{sample}.log"
+    message: "Propagating annotation & binning reads for {wildcards.sample}"
+    shell:
+          "{BIN}/prop_binning -k {K} -s {params.saves} -c {input.contigs}"
+          " -n {params.group} -l {input.left} -r {input.right}"
+          " -a {input.ann} -f {params.splits} -o binning -d {params.out} >{log} 2>&1"
+
+#TODO: bin profiles for CONCOCT
+rule choose_samples:
+    input:   binned=expand("propagation/{sample}_edges.ann", sample=GROUPS),
+             prof=rules.canopy.output.prof
+    output:  dynamic("binning/{cag}/left.fastq"),
+             dynamic("binning/{cag}/right.fastq")
+    log:     "binning/choose_samples.log"
+    message: "Choosing samples for all CAGs"
+    shell:   "{SCRIPTS}/choose_samples.py {input.prof} binning/ >{log} 2>&1"
+
+rule reassembly_config:
+    input:   "binning/{cag}/left.fastq"
+    output:  "reassembly/{cag}.yaml"
+    message: "Generated config file for reassembly of {wildcards.cag}"
+    run:
+        with open(output[0], "w") as outfile:
+            conf = {"k": SMALL_K, "sample_cnt": SAMPLE_COUNT,
+                    "kmer_mult": str(rules.multiplicities.params.out),
+                    "bin": wildcards.cag, "bin_prof": str(rules.canopy.output.prof),
+                    "edges_sqn": "profile/{}_edges.fasta".format(wildcards.cag),
+                    "edges_mpl": "profile/{}_edges.mpl".format(wildcards.cag),
+                    "edge_fragments_mpl": "profile/{}_edges_frag.mpl".format(wildcards.cag),
+                    "frag_size": 10000, "min_len": 100}
+            dump_dict(conf, outfile)
+
+rule reassemble:
+    input:   left="binning/{cag}/left.fastq", right="binning/{cag}/right.fastq",
+             config="reassembly/{cag}.yaml"
+    output:  "reassembly/{cag}.fasta"
+    params:  "reassembly/reassembly_{cag}"
+    log:     "reassembly/reassembly_{cag}.log"
+    threads: THREADS
+    message: "Reassembling reads for {wildcards.cag}"
+    shell:   "{SPADES_REASSEMBLY}/spades.py --meta -t {threads}"
+             " --pe1-1 {input.left} --pe1-2 {input.right} --pe1-ff"
+             " -o {params} --series-analysis {input.config} >{log} 2>&1 && "
+             "cp {params}/scaffolds.fasta {output}"
diff --git a/src/projects/mts/Stats.snake b/src/projects/mts/Stats.snake
new file mode 100644
index 0000000..5019433
--- /dev/null
+++ b/src/projects/mts/Stats.snake
@@ -0,0 +1,270 @@
+include: "Common.snake"
+
+import os
+import os.path
+
+import pandas
+from pandas import DataFrame
+
+from scripts.common import gather_refs, dump_dict
+
+#Additional config parameters
+try:
+    QUAST_DIR = config["QUAST"]
+    QUAST = os.path.join(QUAST_DIR, "quast.py")
+    METAQUAST = os.path.join(QUAST_DIR, "metaquast.py")
+except KeyError:
+    QUAST = "quast"
+    METAQUAST = "metaquast"
+
+#Autodetect bins
+CAGS, = glob_wildcards("binning/{cag,CAG\d+}/left.fastq")
+CAGS.sort()
+
+CAG_EDGES = [c + "_edges" for c in CAGS]
+
+#Detect references
+REFS = dict(gather_refs(config.get("REFS", [])))
+ALL_REFS = ",".join(path for path in REFS.values())
+
+FRAGMENT_NAMES_BY_TYPE = {"reassembly": CAG_EDGES,
+                          "initial_assembly": list(GROUPS.keys())}
+
+def ref_path(wildcards):
+    return REFS[wildcards.ref]
+
+onstart:
+    try:
+        os.mkdir("tmp")
+    except:
+        pass
+    print("Detected", SAMPLE_COUNT, "samples in", IN)
+    if CAGS:
+        print("Detected good (abundant) CAGs:", " ".join(CAGS))
+    if REFS:
+        print("Detected references:", " ".join(REFS))
+
+#===============================================================================
+#---- Statistics section -------------------------------------------------------
+#===============================================================================
+
+#---- Single alignments for samples per reference -------------------------------
+#TODO: use alignments from meta version instead
+rule quast_all_samples:
+    input:   ref_fn=ref_path, contigs=expand("assembly/{sample}.fasta", sample=GROUPS)
+    output:  summary_tsv="stats/summary/q_{ref}.tsv", report="stats/initial_assembly/{ref}/report.txt"
+    params:  "stats/initial_assembly/{ref}"
+    log:     "stats/initial_assembly/{ref}/quast.log"
+    threads: THREADS
+    message: "Aligning all samples on {wildcards.ref}"
+    shell:   "{QUAST} -t {threads} -R {input.ref_fn} {input.contigs} -o {params} >/dev/null 2>&1 && "
+             "cp {params}/report.tsv {output.summary_tsv}"
+
+rule quast_all_reassemblies:
+    input:   ref=ref_path, fragments=expand("profile/{cag_edges}.fasta", cag_edges=CAG_EDGES)
+    output:  "stats/reassembly/{ref}/report.txt"
+    params:  "stats/reassembly/{ref}"
+    log:     "stats/reassembly/{ref}/quast.log"
+    threads: THREADS
+    message: "Aligning all samples on {wildcards.ref}"
+    shell:   "{QUAST} -t {threads} -R {input.ref} {input.fragments} -o {params} >/dev/null 2>&1 && "
+             "cp {params}/report.tsv {output}"
+
+#---- Contigs of interest ------------------------------------------------------
+rule filter_ref_alignments:
+    input:   "{path}/report.txt"
+    output:  "{path}/{fragments}.info"
+    params:  "{path}/contigs_reports/nucmer_output/{fragments}.coords.filtered"
+    shell:   "if [ -f {params} ] ; then {SCRIPTS}/filter_nucmer.py {params} {output} {MIN_CONTIG_LENGTH} 70 ; else touch {output} ; fi"
+
+#---- GF of combined sample ----------------------------------------------------
+#rule combine_filtered:
+#    input:   contigs=expand("assembly/{sample}.fasta", sample=GROUPS),
+#             filters=expand("stats/{{ref}}/{sample}.cont", sample=GROUPS)
+#    output:  "stats/{ref}.fasta"
+#    message: "Gathering all interesting contigs for {wildcards.ref} into a single assembly"
+#    shell:   "{SCRIPTS}/filter_contigs.py {SAMPLE_COUNT} {output} {input.contigs} {input.filters}"
+
+rule quast_combined:
+    input:   ref=ref_path, contigs="stats/{ref}.fasta"
+    output:  "stats/q_{ref}_all/report.tsv"
+    params:  "stats/q_{ref}_all"
+    log:     "stats/q_{ref}_all.log"
+    threads: THREADS
+    message: "Aligning combined sample on {wildcards.ref}"
+    shell:   "{QUAST} -t {threads} -R {input.ref} {input.contigs} -o {params} >{log} 2>&1"
+
+# Run this
+rule quast_combined_all:
+    input:   expand("stats/q_{ref}_all/report.tsv", ref=REFS)
+    message: "Calculated QUAST metrics on all combined samples"
+
+#---- Bins of interest ---------------------------------------------------------
+rule int_bins:
+    input:   "annotation/{sample}.ann", "stats/{ref}/{sample}.info"
+    output:  "stats/{ref}/{sample}.bin"
+    message: "Filtering interesting bins for {wildcards.sample} aligned to {wildcards.ref}"
+    shell:   "{SCRIPTS}/filter_bins.py {input} > {output}"
+
+rule int_bins_all_samples:
+    input:   expand("stats/{{ref}}/{sample}.bin", sample=GROUPS)
+    output:  "stats/{ref}/total.bin"
+    message: "Gathering interesting bins for {wildcards.ref} from all samples"
+    run:
+        bins = set()
+        for in_fn in input:
+            with open(in_fn) as infile:
+                for line in infile:
+                    bins.add(line)
+        with open(output[0], "w") as outfile:
+            for bin in bins:
+                print(bin, file=outfile)
+
+# Run this
+rule int_bins_all:
+    input:   expand("stats/{ref}/total.bin", ref=REFS)
+    message: "Gathered all interesting bins"
+
+#---- GF per bin per reference -------------------------------------------------
+#Helper formatters for determining input files from different stages
+PROP = {"prelim": ("assembly/{}_splits.fasta",   "annotation/{}.ann"),
+        "prop":   ("propagation/{}_edges.fasta", "propagation/{}_edges.ann")}
+
+#TODO: split into different directories per sample
+rule split_bins:
+    input:   lambda w: PROP[w.prop][0].format(w.sample),
+             lambda w: PROP[w.prop][1].format(w.sample)
+    output:  touch("binning/{prop}/{sample}.log")
+    log:     "binning/{prop}/split_{sample}.log"
+    params:  "binning/{prop}"
+    message: "Splitting assembly of {wildcards.sample} between {wildcards.prop} bins"
+    shell:   "{SCRIPTS}/split_bins.py {input} {params} >{log}"
+
+rule cat_binned_contigs:
+    input:   expand("binning/{{prop}}/{sample}.log", sample=SAMPLES)
+    output:  "binning/{prop}/{cag,CAG\d+}.fasta"
+    params:  "`ls binning/{prop}/*-{cag}.fasta`"
+    message: "Combine binned contigs ({wildcards.prop}) for {wildcards.cag}"
+    shell:   "cat {params} > {output}"
+
+#Two helpers for determining dependencies of QUAST targets.
+#For split contigs and reassemblies, we need only corresponding FASTA.
+#For combined contigs, we need to glue their split pieces first.
+def stats_input(wildcards):
+    if wildcards.stage == "reassembly":
+        return expand("reassembly/{cag}.fasta", cag=CAGS)
+    w_bin, w_prop = wildcards.stage.split("_", 2)
+    if w_bin == "split":
+        return expand("binning/{prop}/{sample}.log", prop=w_prop, sample=GROUPS)
+    elif w_bin == "bin":
+        return expand("binning/{prop}/{cag}.fasta", prop=w_prop, cag=CAGS)
+
+def stats_data(wildcards):
+    if wildcards.stage == "reassembly":
+        return "`ls reassembly/CAG*.fasta`"
+    w_bin, w_prop = wildcards.stage.split("_", 2)
+    masks = {"bin": "CAG*", "split": "*-CAG*"}
+    return "`ls binning/{}/{}.fasta`".format(w_prop, masks[w_bin])
+
+rule quast_stats:
+    input:   stats_input
+    output:  "stats/summary/gf_{stage}.tsv"
+    params:  data=stats_data, out="stats/q_{stage}"
+    log:     "stats/q_{stage}.log"
+    threads: THREADS
+    message: "Aligning {wildcards.stage} assemblies on all references"
+    shell:   "{METAQUAST} -t {threads} -R {ALL_REFS} {params.data} -o {params.out} >{log} 2>&1 && "
+             "cp '{params.out}/summary/TSV/Genome_fraction_(%).tsv' {output}"
+
+# Run this AFTER 'all'
+rule stats_all:
+    input:   expand("stats/summary/gf_{bin}_{prop}.tsv", bin=["bin"], prop=["prelim", "prop"]), 
+             "stats/initial_assembly/total.cont"
+    message: "Gathered some numbers, deal with them."
+
+#---- Reassembly statistics ----------------------------------------------------
+
+# Run this AFTER 'reassembly_all'
+rule stats_reassembly:
+    input:   "stats/summary/gf_reassembly.tsv",
+             "stats/reassembly/total.cont"
+    output:  "stats/summary/reassembly.tsv"
+    params:  "stats/q_reassembly"
+    message: "Gathered bins stats"
+    shell:   "{SCRIPTS}/gather_stats.py {params} > {output}"
+
+#---- Propagator statistics ----------------------------------------------------
+rule prop_stats:
+    input:   prelim="annotation/{sample}.ann", prop="annotation/{sample}_edges.ann",
+             contigs="assembly/{sample}.fasta", edges="assembly/{sample}_edges.fasta",
+             ref=REFS.values() #, bins="{sample}/{ref}.bin"
+    output:  "stats/prop_{cag}/{sample}.tsv"
+    log:     "stats/prop_{cag}/{sample}.log"
+    message: "Calculating propagation statistics for {wildcards.sample}"
+    shell:   "{BIN}/stats -k {K} -s {wildcards.sample}/assembly/{SAVES} -r {input.ref}"
+             " -c {input.contigs} -a {input.prelim} -e {input.edges} -p {input.prop}"
+             " -b {wildcards.cag} -o {output} >{log}"
+
+# Run this
+rule prop_stats_all:
+    input:   expand("stats/prop_{cag}/{sample}.tsv", sample=GROUPS, cag=CAGS)
+    message: "Calculated propagation statistics"
+
+#---- CheckM stats -------------------------------------------------------------
+rule checkm:
+    input:   expand("reassembly/{cag}.fasta", cag=CAGS)
+    output:  qa="stats/checkm/qa.tsv", tree_qa="stats/checkm/tree_qa.tsv"
+    params:  dir="stats/checkm"
+    threads: THREADS
+    shell:   "set +u; source activate concoct_env; set -u \n"
+             "checkm tree -x fasta reassembly {params.dir} \n"
+             "checkm tree_qa -o 2 --tab_table -f {output.tree_qa} {params.dir}\n"
+             "checkm lineage_set {params.dir} {params.dir}/lineage.ms\n"
+             "checkm analyze -x fasta {params.dir}/lineage.ms reassembly {params.dir}\n"
+             "checkm qa -o 2 --tab_table -f {output.qa} {params.dir}/lineage.ms {params.dir}"
+
+rule parse_checkm:
+    input:   qa=rules.checkm.output.qa, tree_qa=rules.checkm.output.tree_qa
+    output:  "stats/summary/checkm.tsv"
+    #shell:   "{SCRIPTS}/parse_checkm.py {input.qa} {input.tree_qa} > {output}"
+    run:
+        table = pandas.read_table(input.qa, dtype="str")
+        tree_table = pandas.read_table(input.tree_qa, dtype="str", na_filter=False)
+        all_table = pandas.merge(table, tree_table, on="Bin Id")
+        res_table = all_table[["Bin Id", "Taxonomy (contained)", "Taxonomy (sister lineage)", "Genome size (Mbp)", "Completeness", "Contamination"]].copy()
+        def extract_taxon(taxonomy):
+            return str(taxonomy).split(";")[-1]
+        for column in ["Taxonomy (contained)", "Taxonomy (sister lineage)"]:
+            res_table[column] = res_table[column].apply(extract_taxon)
+        res_table.to_csv(output[0], index=False, sep="\t")
+
+#---- PCA ----------------------------------------------------------------------
+rule pca:
+    input:   "profile/canopy.in", "profile/canopy.out", "{sample}.cont"
+    output:  "stats/{sample}.png"
+    message: "Doing some visualization"
+    shell:
+        "Rscript {SCRIPTS}/pca.R {input} {output}"
+
+def fragments_info_by_assembly_type(wildcards):
+    frags=FRAGMENT_NAMES_BY_TYPE[wildcards.assembly_type]
+    return expand("stats/{assembly_type}/{ref}/{fragments}.info", assembly_type=wildcards.assembly_type, ref=wildcards.ref, fragments=frags)
+
+rule combine_fragments_info:
+    input:  fragments_info_by_assembly_type 
+    output: "stats/{assembly_type}/{ref}/ref.cont"
+    shell: "rm -rf {output}; for f in {input}; do name=$(basename $f .info); cat $f | sed 's/^/'$name'-/g' >> {output} ; done"
+
+rule combine_refs_info:
+    input:  expand("stats/{{assembly_type}}/{ref}/ref.cont", ref=list(REFS.keys()))
+    output:  "stats/{assembly_type}/total.cont"
+    run:
+        shell("rm -rf {output}")
+        for ref in REFS.keys():
+            shell("awk '{{print $0 \"\t{ref}\"}}' stats/{wildcards.assembly_type}/{ref}/ref.cont >> {output}")
+
+# Run this
+rule pca_total:
+    input:   "binning/canopy/profiles.in", "binning/canopy/binning.out", "stats/total.cont"
+    output:  "stats/summary/pca.png"
+    shell:   "Rscript {SCRIPTS}/pca.R {input} {output}"
diff --git a/src/projects/mts/annotation.hpp b/src/projects/mts/annotation.hpp
new file mode 100644
index 0000000..fa9ccf8
--- /dev/null
+++ b/src/projects/mts/annotation.hpp
@@ -0,0 +1,310 @@
+//***************************************************************************
+//* Copyright (c) 2015-2016 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+#pragma once
+
+#include "utils/standard_base.hpp"
+#include "pipeline/graph_pack.hpp"
+#include "modules/alignment/sequence_mapper.hpp"
+#include "io/reads/io_helper.hpp"
+#include "formats.hpp"
+
+namespace debruijn_graph {
+
+class AnnotationStream {
+    std::ifstream inner_stream_;
+    std::string line_;
+
+    ContigAnnotation Parse(const std::string& s) const {
+        ContigAnnotation annotation;
+        stringstream ss(s);
+        ss >> annotation.first;
+        string delim;
+        ss >> delim;
+        VERIFY(delim == ":");
+        while (true) {
+            bin_id bin;
+            ss >> bin;
+            if (ss.fail())
+                break;
+            annotation.second.push_back(bin);
+        }
+        return annotation;
+    }
+
+public:
+
+    AnnotationStream(const std::string& fn) : inner_stream_(fn) {
+        std::getline(inner_stream_, line_);
+    }
+
+    bool eof() const {
+        return inner_stream_.eof();
+    }
+
+    AnnotationStream& operator >>(ContigAnnotation& annotation) {
+        VERIFY(!inner_stream_.eof())
+
+        annotation = Parse(line_);
+        std::getline(inner_stream_, line_);
+        return *this;
+    }
+
+    void close() {
+        inner_stream_.close();
+    }
+};
+
+class AnnotationOutStream {
+    std::ofstream inner_stream_;
+public:
+
+    AnnotationOutStream(const std::string& fn) : inner_stream_(fn) {
+    }
+
+    AnnotationOutStream& operator <<(const ContigAnnotation& annotation) {
+        inner_stream_ << annotation.first;
+        string delim = " : ";
+        for (bin_id bin : annotation.second) {
+            inner_stream_ << delim << bin;
+            delim = " ";
+        }
+        inner_stream_ << endl;
+        return *this;
+    }
+
+    void close() {
+        inner_stream_.close();
+    }
+};
+
+class EdgeAnnotation {
+    const conj_graph_pack& gp_;
+    set<bin_id> bins_of_interest_;
+    map<EdgeId, set<bin_id>> edge_annotation_;
+
+    template<class BinCollection>
+    void InnerStickAnnotation(EdgeId e, const BinCollection& bins) {
+        edge_annotation_[e].insert(bins.begin(), bins.end());
+    }
+
+public:
+
+    EdgeAnnotation(const conj_graph_pack& gp,
+                   const set<bin_id>& bins_of_interest) :
+                       gp_(gp),
+                       bins_of_interest_(bins_of_interest)
+    {
+    }
+
+    template<class BinCollection>
+    void StickAnnotation(EdgeId e, const BinCollection& bins) {
+        InnerStickAnnotation(e, bins);
+        InnerStickAnnotation(gp_.g.conjugate(e), bins);
+    }
+
+    void StickAnnotation(EdgeId e, const bin_id& bin) {
+        StickAnnotation(e, vector<bin_id>{bin});
+    }
+
+    template<class EdgeCollection>
+    void StickAnnotation(const EdgeCollection& edges, const bin_id& bin) {
+        for (EdgeId e : edges) {
+            StickAnnotation(e, bin);
+        }
+    }
+
+    vector<bin_id> Annotation(EdgeId e) const {
+        if (!edge_annotation_.count(e)) {
+            return {};
+        }
+        const auto& annotation = get(edge_annotation_, e);
+        return vector<bin_id>(annotation.begin(), annotation.end());
+    }
+
+    set<bin_id> RelevantBins(const vector<EdgeId>& path) const {
+        set<bin_id> answer;
+        for (EdgeId e : path) {
+            insert_all(answer, Annotation(e));
+        }
+        return answer;
+    }
+
+    set<EdgeId> EdgesOfBin(bin_id bin, size_t min_length = 0) const {
+        set<EdgeId> answer;
+        for (auto ann_pair : edge_annotation_) {
+            if (ann_pair.second.count(bin) &&
+                    gp_.g.length(ann_pair.first) > min_length) {
+                answer.insert(ann_pair.first);
+            }
+        }
+        return answer;
+    }
+
+    size_t size() const {
+        return edge_annotation_.size();
+    }
+
+    const set<bin_id>& interesting_bins() const {
+        return bins_of_interest_;
+    }
+
+};
+
+class AnnotationFiller {
+    const conj_graph_pack& gp_;
+    set<bin_id> interesting_bins_;
+    shared_ptr<SequenceMapper<Graph>> mapper_;
+
+    vector<EdgeId> EdgesOfContig(const io::SingleRead& contig) const {
+        return mapper_->MapRead(contig).simple_path();
+    }
+
+    Bins FilterInteresting(const Bins& bins) const {
+        if (interesting_bins_.empty()) {
+            return bins;
+        } else {
+            Bins answer;
+            for (const bin_id& bin : bins) {
+                if (interesting_bins_.count(bin)) {
+                    answer.push_back(bin);
+                } 
+            }
+            return answer;
+        }
+    }
+
+    map<contig_id, std::set<bin_id>> LoadAnnotation(AnnotationStream& splits_annotation_stream) const {
+        map<contig_id, std::set<bin_id>> annotation_map;
+        INFO("Reading (split) contigs annotation");
+        ContigAnnotation contig_annotation;
+        size_t cnt = 0;
+        while (!splits_annotation_stream.eof()) {
+            splits_annotation_stream >> contig_annotation;
+            auto bins = FilterInteresting(contig_annotation.second);
+            if (!bins.empty()) {
+                insert_all(annotation_map[contig_annotation.first], bins);
+            }
+            ++cnt;
+        }
+        INFO(cnt << " records read; annotation available for " << annotation_map.size() << " splits");
+        return annotation_map;
+    };
+
+    void ProcessSplit(const io::SingleRead& split, std::set<bin_id> bins,
+                      map<EdgeId, map<bin_id, size_t>>& coloring) const {
+        auto mapping_path = mapper_->MapRead(split);
+        for (size_t i = 0; i < mapping_path.size(); ++i) {
+            auto map_info = mapping_path[i];
+            MappingRange mr = map_info.second;
+            auto& bin_lens = coloring[map_info.first];
+            for (bin_id b : bins) {
+                bin_lens[b] += mr.mapped_range.size();
+            }
+        }
+    }
+
+    map<EdgeId, map<bin_id, size_t>> FillColorInfo(io::SingleStream& splits_stream,
+                                               const map<contig_id, std::set<bin_id>>& split_annotation) const {
+        INFO("Sticking annotation to edges");
+        map<EdgeId, map<bin_id, size_t>> answer;
+        io::SingleRead split;
+        while (!splits_stream.eof()) {
+            splits_stream >> split;
+            auto id = GetId(split);
+            auto bins = split_annotation.find(id);
+            if (bins != split_annotation.end() && !(bins->second.empty())) {
+                ProcessSplit(split, bins->second, answer);
+                //TODO think if it is overkill
+                ProcessSplit(!split, bins->second, answer);
+            }
+        }
+        INFO("Color info available for " << answer.size() << " edges");
+        return answer;
+    };
+
+    void FilterSpuriousInfo(map<EdgeId, map<bin_id, size_t>>& coloring) const {
+        for (auto& edge_info : coloring) {
+            size_t edge_len = gp_.g.length(edge_info.first);
+            for (auto color_it = edge_info.second.begin(); color_it != edge_info.second.end(); ) {
+                if (math::ls(double(color_it->second) / double(edge_len), 0.3)) {
+                    edge_info.second.erase(color_it++);
+                } else {
+                    ++color_it;
+                }
+            }
+        }
+    }
+
+    set<bin_id> GatherAllBins(const map<EdgeId, map<bin_id, size_t>>& coloring) const {
+        set<bin_id> answer;
+        for (const auto& edge_info : coloring) {
+            for (const auto& bin_info : edge_info.second) {
+                answer.insert(bin_info.first);
+            }
+        }
+        return answer;
+    }
+
+    set<bin_id> DetermineBins(const vector<EdgeId>& path,
+                              const map<EdgeId, map<bin_id, size_t>>& coloring) const {
+        map<bin_id, size_t> path_colors;
+        size_t total_len = 0;
+        for (EdgeId e : path) {
+            size_t edge_len = gp_.g.length(e);
+            total_len += edge_len;
+            auto it = coloring.find(e);
+            if (it != coloring.end()) {
+                for (auto color_info : it->second) {
+                    //TODO think carefully
+                    path_colors[color_info.first] += edge_len; //color_info.second;
+                }
+            }
+        }
+        set<bin_id> answer;
+        for (auto color_info : path_colors) {
+            if (math::gr(double(color_info.second) / double(total_len), 0.3)) {
+                answer.insert(color_info.first);
+            }
+        }
+        return answer;
+    }
+
+public:
+
+    AnnotationFiller(const conj_graph_pack& gp,
+                     const vector<bin_id>& interesting_bins) :
+        gp_(gp),
+        interesting_bins_(interesting_bins.begin(), interesting_bins.end()),
+        mapper_(MapperInstance(gp)) {
+    }
+
+    EdgeAnnotation operator() (io::SingleStream& contig_stream,
+                     io::SingleStream& splits_stream,
+                     AnnotationStream& splits_annotation_stream) {
+        INFO("Filling edge annotation");
+        INFO("Interesting bins " << interesting_bins_);
+
+        auto coloring = FillColorInfo(splits_stream, LoadAnnotation(splits_annotation_stream));
+        FilterSpuriousInfo(coloring);
+
+        EdgeAnnotation edge_annotation(gp_, interesting_bins_.empty() ? GatherAllBins(coloring) : interesting_bins_);
+
+        io::SingleRead contig;
+        while (!contig_stream.eof()) {
+            contig_stream >> contig;
+            auto path = mapper_->MapRead(contig).simple_path();
+            auto bins = DetermineBins(path, coloring);
+            for (EdgeId e : path) {
+                edge_annotation.StickAnnotation(e, bins);
+            }
+        }
+
+        INFO("Edge annotation filled. Annotated " << edge_annotation.size() << " edges.");
+        return edge_annotation;
+    }
+};
+}
diff --git a/src/projects/mts/config.yaml b/src/projects/mts/config.yaml
new file mode 100644
index 0000000..0150528
--- /dev/null
+++ b/src/projects/mts/config.yaml
@@ -0,0 +1,10 @@
+IN: "/Sid/snurk/mts/sim/data"
+SPADES: "~/Projects/mts/assembler/"
+QUAST: "python2 ~/opt/quast-3.2/metaquast.py"
+BIN: "~/Projects/mts/assembler/build/release/bin"
+SCRIPTS: "~/Projects/mts/assembler/src/projects/mts/scripts"
+SOFT: "/home/snurk/soft/"
+REF: "/Sid/snurk/mts/nielsen/ref.fasta"
+K: 55
+small_k: 21
+MIN_CONTIG_LENGTH: 2000
diff --git a/src/projects/mts/contig_abundance.cpp b/src/projects/mts/contig_abundance.cpp
new file mode 100644
index 0000000..ef00ce7
--- /dev/null
+++ b/src/projects/mts/contig_abundance.cpp
@@ -0,0 +1,176 @@
+#include "contig_abundance.hpp"
+#include "utils/indices/kmer_splitters.hpp"
+
+namespace debruijn_graph {
+
+size_t sample_cnt_ = 0;
+
+void SetSampleCount(size_t sample_cnt) {
+    sample_cnt_ = sample_cnt;
+}
+
+size_t SampleCount() {
+    return sample_cnt_;
+}
+
+MplVector SingleClusterAnalyzer::SampleMpls(const KmerProfiles& kmer_mpls, size_t sample) const {
+    MplVector answer;
+    answer.reserve(kmer_mpls.size());
+    for (const auto& kmer_mpl : kmer_mpls) {
+        answer.push_back(kmer_mpl[sample]);
+    }
+    return answer;
+}
+
+Mpl SingleClusterAnalyzer::SampleMedian(const KmerProfiles& kmer_mpls, size_t sample) const {
+    std::vector<Mpl> sample_mpls = SampleMpls(kmer_mpls, sample);
+
+    std::nth_element(sample_mpls.begin(), sample_mpls.begin() + sample_mpls.size()/2, sample_mpls.end());
+    return sample_mpls[sample_mpls.size()/2];
+}
+
+MplVector SingleClusterAnalyzer::MedianVector(const KmerProfiles& kmer_mpls) const {
+    VERIFY(kmer_mpls.size() != 0);
+    MplVector answer(SampleCount(), 0);
+    for (size_t i = 0; i < SampleCount(); ++i) {
+        answer[i] = SampleMedian(kmer_mpls, i);
+    }
+    return answer;
+}
+
+bool SingleClusterAnalyzer::AreClose(const KmerProfile& c, const KmerProfile& v) const {
+    //VERIFY(c.size() == v.size());
+    double sum = 0;
+    size_t non_zero_cnt = 0;
+    for (size_t i = 0; i < c.size(); ++i) {
+        double norm = 1.;
+        if (c[i] != 0) {
+            //norm = std::sqrt(double(c[i]));
+            norm = double(c[i]);
+            ++non_zero_cnt;
+        }
+        sum += std::abs(double(c[i]) - double(v[i])) / norm;
+    }
+    return math::ls(sum, coord_vise_proximity_ * double(non_zero_cnt));
+}
+
+KmerProfiles SingleClusterAnalyzer::CloseKmerMpls(const KmerProfiles& kmer_mpls, const KmerProfile& center) const {
+    KmerProfiles answer;
+    for (const auto& kmer_mpl : kmer_mpls) {
+        if (AreClose(center, kmer_mpl)) {
+            answer.push_back(kmer_mpl);
+        } else {
+            TRACE("Far kmer mpl " << PrintVector(kmer_mpl));
+        }
+    }
+    return answer;
+}
+
+boost::optional<AbundanceVector> SingleClusterAnalyzer::operator()(const KmerProfiles& kmer_mpls) const {
+    auto med = MedianVector(kmer_mpls);
+    return AbundanceVector(med.begin(), med.end());
+    //return boost::optional<AbundanceVector>(answer);
+    //MplVector center = MedianVector(kmer_mpls);
+    //auto locality = CloseKmerMpls(kmer_mpls, KmerProfile(center));
+
+    //for (size_t it_cnt = 0; it_cnt < MAX_IT; ++it_cnt) {
+    //    DEBUG("Iteration " << it_cnt);
+    //    DEBUG("Center is " << PrintVector(center));
+
+    //    DEBUG("Locality size is " << locality.size()
+    //              << " making " << (double(locality.size()) / double(kmer_mpls.size()))
+    //              << " of total # points");
+
+    //    double center_share = double(locality.size()) / double(kmer_mpls.size());
+    //    if (math::ls(center_share, central_clust_share_)) {
+    //        DEBUG("Detected central area contains too few k-mers: share " << center_share
+    //                  << " ; center size " << locality.size()
+    //                  << " ; total size " << kmer_mpls.size());
+    //        return boost::none;
+    //    }
+
+    //    MplVector update = MedianVector(locality);
+    //    DEBUG("Center update is " << PrintVector(update));
+
+    //    if (center == update) {
+    //        DEBUG("Old and new centers matched on iteration " << it_cnt);
+    //        break;
+    //    }
+
+    //    center = update;
+    //    locality = CloseKmerMpls(kmer_mpls, center);
+    //}
+
+    //return boost::optional<AbundanceVector>(MeanVector(locality, sample_cnt_));
+}
+
+vector<std::string> ContigAbundanceCounter::SplitOnNs(const std::string& seq) const {
+    vector<std::string> answer;
+    for (size_t i = 0; i < seq.size(); i++) {
+        size_t j = i;
+        while (j < seq.size() && is_nucl(seq[j])) {
+            j++;
+        }
+        if (j > i) {
+            answer.push_back(seq.substr(i, j - i));
+            i = j;
+        }
+    }
+    return answer;
+}
+
+void ContigAbundanceCounter::Init(const std::string& file_prefix) {
+    VERIFY(SampleCount() != 0);
+    INFO("Loading kmer index");
+    std::ifstream kmers_in(file_prefix + ".kmm", std::ios::binary);
+    kmer_mpl_.BinRead(kmers_in, file_prefix + ".kmm");
+
+    INFO("Loading kmer profiles data");
+    const size_t data_size = SampleCount() * kmer_mpl_.size();
+    mpl_data_.resize(data_size);
+    std::ifstream mpls_in(file_prefix + ".bpr", std::ios::binary);
+    mpls_in.read((char *)&mpl_data_[0], data_size * sizeof(Mpl));
+}
+
+boost::optional<AbundanceVector> ContigAbundanceCounter::operator()(
+        const std::string& s,
+        const std::string& /*name*/) const {
+    KmerProfiles kmer_mpls;
+
+    for (const auto& seq : SplitOnNs(s)) {
+        if (seq.size() < k_)
+            continue;
+
+        auto kwh = kmer_mpl_.ConstructKWH(RtSeq(k_, seq));
+        kwh >>= 'A';
+
+        for (size_t j = k_ - 1; j < seq.size(); ++j) {
+            kwh <<= seq[j];
+            TRACE("Processing kmer " << kwh.key().str());
+            if (kmer_mpl_.valid(kwh)) {
+                TRACE("Valid");
+                KmerProfile prof(&mpl_data_[kmer_mpl_.get_value(kwh, inverter_)]);
+                kmer_mpls.push_back(prof);
+                //if (!name.empty()) {
+                //    os << PrintVector(kmer_mpl_.get_value(kwh, inverter_), sample_cnt_) << std::endl;
+                //}
+                TRACE(PrintVector(prof));
+            } else {
+                TRACE("Invalid");
+            }
+        }
+    }
+
+    double earmark_share = double(kmer_mpls.size()) / double(s.size() - k_ + 1);
+    DEBUG("Earmark k-mers: share " << earmark_share
+              << " # earmarks " << kmer_mpls.size()
+              << " ; total # " << (s.size() - k_ + 1));
+    if (math::ls(earmark_share, min_earmark_share_)) {
+        DEBUG("Too few earmarks");
+        return boost::none;
+    }
+
+    return cluster_analyzer_(kmer_mpls);
+}
+
+}
diff --git a/src/projects/mts/contig_abundance.hpp b/src/projects/mts/contig_abundance.hpp
new file mode 100644
index 0000000..fb5c9d7
--- /dev/null
+++ b/src/projects/mts/contig_abundance.hpp
@@ -0,0 +1,143 @@
+#pragma once
+
+#include "pipeline/graph_pack.hpp"
+#include "utils/indices/perfect_hash_map_builder.hpp"
+
+namespace debruijn_graph {
+
+typedef uint16_t Mpl;
+typedef std::size_t Offset;
+static const Mpl INVALID_MPL = Mpl(-1);
+
+typedef typename std::vector<Mpl> MplVector;
+typedef typename std::vector<double> AbundanceVector;
+
+void SetSampleCount(size_t sample_cnt);
+size_t SampleCount();
+
+class KmerProfile {
+
+public:
+    typedef Mpl value_type;
+
+    KmerProfile(const value_type* ptr = nullptr):
+        ptr_(ptr) {
+    }
+
+    KmerProfile(const MplVector& vec):
+        ptr_(&vec.front()) {
+    }
+
+    size_t size() const {
+        return SampleCount();
+    }
+
+    Mpl operator[](size_t i) const {
+        VERIFY(i < size());
+        return ptr_[i];
+    }
+
+    const value_type* begin() const {
+        return ptr_;
+    }
+
+    const value_type* end() const {
+        return ptr_ + size();
+    }
+
+private:
+    const value_type* ptr_;
+};
+
+typedef std::vector<KmerProfile> KmerProfiles;
+
+template<class CovVecs>
+AbundanceVector MeanVector(const CovVecs& cov_vecs) {
+    VERIFY(cov_vecs.size() != 0);
+    size_t sample_cnt = cov_vecs.front().size();
+    AbundanceVector answer(sample_cnt, 0.);
+
+    for (const auto& cov_vec : cov_vecs) {
+        for (size_t i = 0; i < sample_cnt; ++i) {
+            answer[i] += double(cov_vec[i]);
+        }
+    }
+
+    for (size_t i = 0; i < sample_cnt; ++i) {
+        answer[i] /= double(cov_vecs.size());
+    }
+    return answer;
+}
+
+template<class AbVector>
+std::string PrintVector(const AbVector& mpl_vector) {
+    stringstream ss;
+    copy(mpl_vector.begin(), mpl_vector.end(),
+         ostream_iterator<typename AbVector::value_type>(ss, " "));
+    return ss.str();
+}
+
+class SingleClusterAnalyzer {
+    static const uint MAX_IT = 10;
+
+    double coord_vise_proximity_;
+    double central_clust_share_;
+
+    MplVector SampleMpls(const KmerProfiles& kmer_mpls, size_t sample) const;
+    Mpl SampleMedian(const KmerProfiles& kmer_mpls, size_t sample) const;
+    MplVector MedianVector(const KmerProfiles& kmer_mpls) const;
+    bool AreClose(const KmerProfile& c, const KmerProfile& v) const;
+    KmerProfiles CloseKmerMpls(const KmerProfiles& kmer_mpls, const KmerProfile& center) const;
+
+public:
+    SingleClusterAnalyzer(double coord_vise_proximity = 0.7,
+                          double central_clust_share = 0.7) :
+        coord_vise_proximity_(coord_vise_proximity),
+        central_clust_share_(central_clust_share) {
+    }
+
+    boost::optional<AbundanceVector> operator()(const KmerProfiles& kmer_mpls) const;
+
+private:
+    DECL_LOGGER("SingleClusterAnalyzer");
+};
+
+class ContigAbundanceCounter {
+    typedef typename InvertableStoring::trivial_inverter<Offset> InverterT;
+
+    typedef KeyStoringMap<conj_graph_pack::seq_t,
+                          Offset,
+                          kmer_index_traits<conj_graph_pack::seq_t>,
+                          InvertableStoring> IndexT;
+
+    unsigned k_;
+    SingleClusterAnalyzer cluster_analyzer_;
+    double min_earmark_share_;
+    IndexT kmer_mpl_;
+    InverterT inverter_;
+    std::vector<Mpl> mpl_data_;
+
+    void FillMplMap(const std::string& kmers_mpl_file);
+
+    vector<std::string> SplitOnNs(const std::string& seq) const;
+
+public:
+    ContigAbundanceCounter(unsigned k,
+                           const SingleClusterAnalyzer& cluster_analyzer,
+                           const std::string& work_dir,
+                           double min_earmark_share = 0.7) :
+        k_(k),
+        cluster_analyzer_(cluster_analyzer),
+        min_earmark_share_(min_earmark_share),
+        kmer_mpl_(k_, work_dir) {
+    }
+
+    void Init(const std::string& kmer_mpl_file);
+
+    boost::optional<AbundanceVector> operator()(const std::string& s, const std::string& /*name*/ = "") const;
+
+private:
+    DECL_LOGGER("ContigAbundanceCounter");
+};
+
+}
diff --git a/src/projects/mts/contig_abundance_counter.cpp b/src/projects/mts/contig_abundance_counter.cpp
new file mode 100644
index 0000000..f2a2ba8
--- /dev/null
+++ b/src/projects/mts/contig_abundance_counter.cpp
@@ -0,0 +1,101 @@
+#include <array>
+#include <string>
+#include <iostream>
+#include "getopt_pp/getopt_pp.h"
+#include "io/reads/file_reader.hpp"
+#include "io/reads/osequencestream.hpp"
+#include "pipeline/graphio.hpp"
+#include "logger.hpp"
+#include "formats.hpp"
+#include "contig_abundance.hpp"
+
+using namespace debruijn_graph;
+
+//Helper class to have scoped DEBUG()
+class Runner {
+public:
+    static void Run(ContigAbundanceCounter& abundance_counter, size_t min_length_bound,
+                    io::FileReadStream& contigs_stream, io::osequencestream& splits_os,
+                    std::ofstream& id_out, std::ofstream& mpl_out) {
+        static const size_t split_length = 10000;
+        io::SingleRead full_contig;
+        while (!contigs_stream.eof()) {
+            contigs_stream >> full_contig;
+            DEBUG("Analyzing contig " << GetId(full_contig));
+
+            for (size_t i = 0; i < full_contig.size(); i += split_length) {
+                if (full_contig.size() - i < min_length_bound) {
+                    DEBUG("Fragment shorter than min_length_bound " << min_length_bound);
+                    break;
+                }
+
+                io::SingleRead contig = full_contig.Substr(i, std::min(i + split_length, full_contig.size()));
+                splits_os << contig;
+
+                contig_id id = GetId(contig);
+                DEBUG("Processing fragment # " << (i / split_length) << " with id " << id);
+
+                auto abundance_vec = abundance_counter(contig.GetSequenceString(), contig.name());
+
+                if (abundance_vec) {
+                    stringstream ss;
+                    copy(abundance_vec->begin(), abundance_vec->end(),
+                         ostream_iterator<Mpl>(ss, " "));
+                    DEBUG("Successfully estimated abundance of " << id << " : " << ss.str());
+
+                    id_out << id << std::endl;
+                    mpl_out << ss.str() << std::endl;
+                } else {
+                    DEBUG("Failed to estimate abundance of " << id);
+                }
+            }
+        }
+    }
+private:
+    DECL_LOGGER("ContigAbundanceCounter");
+};
+
+int main(int argc, char** argv) {
+    using namespace GetOpt;
+
+    unsigned k;
+    size_t sample_cnt, min_length_bound;
+    std::string work_dir, contigs_path, splits_path;
+    std::string kmer_mult_fn, contigs_abundance_fn;
+
+    try {
+        GetOpt_pp ops(argc, argv);
+        ops.exceptions_all();
+        ops >> Option('k', k)
+            >> Option('w', work_dir)
+            >> Option('c', contigs_path)
+            >> Option('f', splits_path)
+            >> Option('n', sample_cnt)
+            >> Option('m', kmer_mult_fn)
+            >> Option('o', contigs_abundance_fn)
+            >> Option('l', min_length_bound, size_t(0));
+    } catch(GetOptEx &ex) {
+        std::cout << "Usage: contig_abundance_counter -k <K> -w <work_dir> -c <contigs path> "
+                "-n <sample cnt> -m <kmer multiplicities path> -f <splits_path> "
+                "-o <contigs abundance path> [-l <contig length bound> (default: 0)]"  << std::endl;
+        exit(1);
+    }
+
+    //TmpFolderFixture fixture("tmp");
+    create_console_logger();
+
+    SetSampleCount(sample_cnt);
+    ContigAbundanceCounter abundance_counter(k, SingleClusterAnalyzer(), work_dir);
+    abundance_counter.Init(kmer_mult_fn);
+
+    io::FileReadStream contigs_stream(contigs_path);
+    io::osequencestream splits_os(splits_path);
+
+    std::ofstream id_out(contigs_abundance_fn + ".id");
+    std::ofstream mpl_out(contigs_abundance_fn + ".mpl");
+
+    Runner::Run(abundance_counter, min_length_bound,
+                contigs_stream, splits_os,
+                id_out, mpl_out);
+    return 0;
+}
diff --git a/src/projects/mts/formats.hpp b/src/projects/mts/formats.hpp
new file mode 100644
index 0000000..565e3b6
--- /dev/null
+++ b/src/projects/mts/formats.hpp
@@ -0,0 +1,29 @@
+#pragma once
+
+#include "io/reads/single_read.hpp"
+
+namespace debruijn_graph {
+
+typedef std::string bin_id;
+typedef std::string contig_id;
+typedef std::vector<bin_id> Bins;
+typedef std::pair<contig_id, Bins> ContigAnnotation;
+
+inline contig_id GetId(const io::SingleRead& contig) {
+//     std::string name = contig.name();
+//     size_t pos = name.find("_ID_");
+//     VERIFY(pos != std::string::npos);
+//     size_t start = pos + 4;
+//     VERIFY(start < name.size());
+//     return name.substr(start, name.size() - start);
+    return contig.name();
+}
+
+inline contig_id GetBaseId(const contig_id& id) {
+    size_t pos = id.find('_');
+    VERIFY(pos != string::npos && id.substr(0, pos) == "NODE");
+    size_t pos2 = id.find('_', pos + 1);
+    return id.substr(pos + 1, pos2 - pos - 1);
+}
+
+}
diff --git a/src/projects/mts/kmc_api/kmc_file.cpp b/src/projects/mts/kmc_api/kmc_file.cpp
new file mode 100644
index 0000000..c4c674c
--- /dev/null
+++ b/src/projects/mts/kmc_api/kmc_file.cpp
@@ -0,0 +1,1093 @@
+/*
+  This file is a part of KMC software distributed under GNU GPL 3 licence.
+  The homepage of the KMC project is http://sun.aei.polsl.pl/kmc
+
+  Authors: Sebastian Deorowicz, Agnieszka Debudaj-Grabysz, Marek Kokot
+
+  Version: 2.2.0
+  Date   : 2015-04-15
+*/
+
+#include "stdafx.h"
+#include "mmer.h"
+#include "kmc_file.h"
+#include <iostream>
+#include <tuple>
+
+
+uint64 CKMCFile::part_size = 1 << 25;
+
+
+// ----------------------------------------------------------------------------------
+// Open files *.kmc_pre & *.kmc_suf, read them to RAM, close files. 
+// The file *.kmc_suf is opened for random access
+// IN	: file_name - the name of kmer_counter's output
+// RET	: true		- if successful
+// ----------------------------------------------------------------------------------
+bool CKMCFile::OpenForRA(const std::string &file_name)
+{
+	uint64 size;
+	size_t result;
+
+	if (file_pre || file_suf)
+		return false;
+
+	if (!OpenASingleFile(file_name + ".kmc_pre", file_pre, size, (char *)"KMCP"))
+		return false;
+
+	ReadParamsFrom_prefix_file_buf(size);
+
+	fclose(file_pre);
+	file_pre = NULL;
+		
+	if (!OpenASingleFile(file_name + ".kmc_suf", file_suf, size, (char *)"KMCS"))
+		return false;
+
+	sufix_file_buf = new uchar[size];
+	result = fread(sufix_file_buf, 1, size, file_suf);
+	if (result == 0)
+		return false;
+
+	fclose(file_suf);
+	file_suf = NULL;
+
+	is_opened = opened_for_RA;
+	prefix_index = 0;
+	sufix_number = 0;
+	return true;
+}
+
+//----------------------------------------------------------------------------------
+// Open files *kmc_pre & *.kmc_suf, read *.kmc_pre to RAM, close *kmc.pre
+// *.kmc_suf is buffered
+// IN	: file_name - the name of kmer_counter's output
+// RET	: true		- if successful
+//----------------------------------------------------------------------------------
+bool CKMCFile::OpenForListing(const std::string &file_name)
+{
+	uint64 size;
+	size_t result;
+
+	if (is_opened)
+		return false;
+	
+	if (file_pre || file_suf)
+		return false;
+
+	if (!OpenASingleFile(file_name + ".kmc_pre", file_pre, size, (char *)"KMCP"))
+		return false;
+
+	ReadParamsFrom_prefix_file_buf(size);
+	fclose(file_pre);
+	file_pre = NULL;
+
+	end_of_file = total_kmers == 0;
+
+	if (!OpenASingleFile(file_name + ".kmc_suf", file_suf, size, (char *)"KMCS"))
+		return false;
+
+	sufix_file_buf = new uchar[part_size];
+	result = fread(sufix_file_buf, 1, part_size, file_suf);
+	if (result == 0)
+		return false;
+
+	is_opened = opened_for_listing;
+	prefix_index = 0;
+	sufix_number = 0;
+	index_in_partial_buf = 0;
+	return true;
+}
+//----------------------------------------------------------------------------------
+CKMCFile::CKMCFile()
+{
+	file_pre = NULL;	
+	file_suf = NULL;
+
+	prefix_file_buf = NULL;
+	sufix_file_buf = NULL;
+	signature_map = NULL;
+
+	is_opened = closed;
+	end_of_file = false;
+};
+//----------------------------------------------------------------------------------	
+CKMCFile::~CKMCFile()
+{
+	if (file_pre)
+		fclose(file_pre);
+	if (file_suf)
+		fclose(file_suf);
+	if (prefix_file_buf)
+		delete[] prefix_file_buf;
+	if (sufix_file_buf)
+		delete[] sufix_file_buf;
+	if (signature_map)
+		delete[] signature_map;
+};
+//----------------------------------------------------------------------------------	
+// Open a file, recognize its size and check its marker. Auxiliary function.
+// IN	: file_name - the name of a file to open
+// RET	: true		- if successful
+//----------------------------------------------------------------------------------
+bool CKMCFile::OpenASingleFile(const std::string &file_name, FILE *&file_handler, uint64 &size, char marker[])
+{
+	char _marker[4];
+	size_t result;
+
+	if ((file_handler = my_fopen(file_name.c_str(), "rb")) == NULL)
+		return false;
+
+	my_fseek(file_handler, 0, SEEK_END);
+	size = my_ftell(file_handler);					//the size of a whole file
+
+	my_fseek(file_handler, -4, SEEK_CUR);
+	result = fread(_marker, 1, 4, file_handler);
+	if (result == 0)
+		return false;
+
+	size = size - 4;							//the size of the file without the terminal marker
+	if (strncmp(marker, _marker, 4) != 0)
+	{
+		fclose(file_handler);
+		file_handler = NULL;
+		return false;
+	}
+
+	rewind(file_handler);
+	result = fread(_marker, 1, 4, file_handler);
+	if (result == 0)
+		return false;
+
+	size = size - 4;							//the size of the file without initial and terminal markers 
+
+	if (strncmp(marker, _marker, 4) != 0)
+	{
+		fclose(file_handler);
+		file_handler = NULL;
+		return false;
+	}
+
+	return true;
+};
+//-------------------------------------------------------------------------------------
+// Recognize current parameters from kmc_databese. Auxiliary function.
+// IN	: the size of the file *.kmc_pre, without initial and terminal markers 
+// RET	: true - if succesfull
+//----------------------------------------------------------------------------------
+bool CKMCFile::ReadParamsFrom_prefix_file_buf(uint64 &size)
+{
+	size_t prev_pos = my_ftell(file_pre);
+	my_fseek(file_pre, -12, SEEK_END);
+	size_t result;
+
+	result = fread(&kmc_version, sizeof(uint32), 1, file_pre);
+	if (kmc_version != 0 && kmc_version != 0x200) //only this versions are supported, 0 = kmc1, 0x200 = kmc2
+		return false;
+	my_fseek(file_pre, prev_pos, SEEK_SET);
+
+	if (kmc_version == 0x200)
+	{
+		my_fseek(file_pre, -8, SEEK_END);
+		
+		int64 header_offset;
+		header_offset = fgetc(file_pre);
+		
+		size = size - 4;	//file size without the size of header_offset (and without 2 markers)
+
+		my_fseek(file_pre, (0LL - (header_offset + 8)), SEEK_END);
+		result = fread(&kmer_length, 1, sizeof(uint32), file_pre);
+		result = fread(&mode, 1, sizeof(uint32), file_pre);
+		result = fread(&counter_size, 1, sizeof(uint32), file_pre);
+		result = fread(&lut_prefix_length, 1, sizeof(uint32), file_pre);
+		result = fread(&signature_len, 1, sizeof(uint32), file_pre);
+		result = fread(&min_count, 1, sizeof(uint32), file_pre);
+		original_min_count = min_count;
+		result = fread(&max_count, 1, sizeof(uint32), file_pre);
+		original_max_count = max_count;
+		result = fread(&total_kmers, 1, sizeof(uint64), file_pre);
+
+		signature_map_size = ((1 << (2 * signature_len)) + 1);
+		uint64 lut_area_size_in_bytes = size - (signature_map_size * sizeof(uint32)+header_offset + 8);
+		single_LUT_size = 1 << (2 * lut_prefix_length);
+		uint64 last_data_index = lut_area_size_in_bytes / sizeof(uint64);
+
+		rewind(file_pre);
+		my_fseek(file_pre, +4, SEEK_CUR);
+		prefix_file_buf_size = (lut_area_size_in_bytes + 8) / sizeof(uint64);		//reads without 4 bytes of a header_offset (and without markers)		
+		prefix_file_buf = new uint64[prefix_file_buf_size];
+		result = fread(prefix_file_buf, 1, (size_t)(lut_area_size_in_bytes + 8), file_pre);
+		if (result == 0)
+			return false;
+		prefix_file_buf[last_data_index] = total_kmers + 1;
+
+		signature_map = new uint32[signature_map_size];
+		result = fread(signature_map, 1, signature_map_size * sizeof(uint32), file_pre);
+		if (result == 0)
+			return false;
+
+		sufix_size = (kmer_length - lut_prefix_length) / 4;		 
+	
+		sufix_rec_size = sufix_size + counter_size;	
+
+		return true;
+	}
+	else if (kmc_version == 0)
+	{
+		prefix_file_buf_size = (size - 4) / sizeof(uint64);		//reads without 4 bytes of a header_offset (and without markers)		
+		prefix_file_buf = new uint64[prefix_file_buf_size];
+		result = fread(prefix_file_buf, 1, (size_t)(size - 4), file_pre);
+		if (result == 0)
+			return false;
+
+		my_fseek(file_pre, -8, SEEK_END);
+
+		uint64 header_offset;
+		header_offset = fgetc(file_pre);
+
+		size = size - 4;
+
+		uint64 header_index = (size - header_offset) / sizeof(uint64);
+		uint64 last_data_index = header_index;
+
+		uint64 d = prefix_file_buf[header_index];
+
+		kmer_length = (uint32)d;			//- kmer's length
+		mode = d >> 32;				//- mode: 0 or 1
+
+		header_index++;
+		counter_size = (uint32)prefix_file_buf[header_index];	//- the size of a counter in bytes; 
+		//- for mode 0 counter_size is 1, 2, 3, or 4
+		//- for mode = 1 counter_size is 4;
+		lut_prefix_length = prefix_file_buf[header_index] >> 32;		//- the number of prefix's symbols cut frm kmers; 
+		//- (kmer_length - lut_prefix_length) is divisible by 4
+
+		header_index++;
+		original_min_count = (uint32)prefix_file_buf[header_index];    //- the minimal number of kmer's appearances 
+		min_count = original_min_count;
+		original_max_count = prefix_file_buf[header_index] >> 32;      //- the maximal number of kmer's appearances
+		max_count = original_max_count;
+
+		header_index++;
+		total_kmers = prefix_file_buf[header_index];					//- the total number of kmers 
+
+		prefix_file_buf[last_data_index] = total_kmers + 1;
+
+		sufix_size = (kmer_length - lut_prefix_length) / 4;
+
+		sufix_rec_size = sufix_size + counter_size;
+
+		return true;
+
+	}
+	return false;
+}
+
+//------------------------------------------------------------------------------------------
+// Check if kmer exists. 
+// IN : kmer  - kmer
+// OUT: count - kmer's counter if kmer exists
+// RET: true  - if kmer exists
+//------------------------------------------------------------------------------------------
+bool CKMCFile::CheckKmer(CKmerAPI &kmer, float &count)
+{
+	uint32 int_counter;
+	if (CheckKmer(kmer, int_counter))
+	{
+		if (mode == 0)
+			count = (float)int_counter;
+		else
+			memcpy(&count, &int_counter, counter_size);
+		return true;
+	}
+	return false;
+}
+
+//------------------------------------------------------------------------------------------
+// Check if kmer exists. 
+// IN : kmer  - kmer
+// OUT: count - kmer's counter if kmer exists
+// RET: true  - if kmer exists
+//------------------------------------------------------------------------------------------
+bool CKMCFile::CheckKmer(CKmerAPI &kmer, uint32 &count)
+{
+	if(is_opened != opened_for_RA)
+		return false;
+	if(end_of_file)
+		return false;
+	
+	//recognize a prefix:
+	uint64 pattern_prefix_value = kmer.kmer_data[0];
+
+	uint32 pattern_offset = (sizeof(pattern_prefix_value)* 8) - (lut_prefix_length * 2) - (kmer.byte_alignment * 2);
+	int64 index_start = 0, index_stop = 0;
+
+	pattern_prefix_value = pattern_prefix_value >> pattern_offset;  //complements with 0
+	if (pattern_prefix_value >= prefix_file_buf_size)
+		return false;
+
+	if (kmc_version == 0x200)
+	{
+		uint32 signature = kmer.get_signature(signature_len);
+		uint32 bin_start_pos = signature_map[signature];
+		bin_start_pos *= single_LUT_size;				
+		//look into the array with data
+		index_start = *(prefix_file_buf + bin_start_pos + pattern_prefix_value);
+		index_stop = *(prefix_file_buf + bin_start_pos + pattern_prefix_value + 1) - 1;
+	}
+	else if (kmc_version == 0)
+	{
+		//look into the array with data
+		index_start = prefix_file_buf[pattern_prefix_value];
+		index_stop = prefix_file_buf[pattern_prefix_value + 1] - 1;
+	}
+
+	return BinarySearch(index_start, index_stop, kmer, count, pattern_offset);
+}
+
+//-----------------------------------------------------------------------------------------------
+// Check if end of file
+// RET: true - all kmers are listed
+//-----------------------------------------------------------------------------------------------
+bool CKMCFile::Eof(void)
+{
+	return end_of_file;	
+}
+
+bool CKMCFile::ReadNextKmer(CKmerAPI &kmer, float &count)
+{
+	uint32 int_counter;
+	if (ReadNextKmer(kmer, int_counter))
+	{
+		if (mode == 0)
+			count = (float)int_counter;
+		else
+			memcpy(&count, &int_counter, counter_size);
+		return true;
+	}
+	return false;
+
+}
+//-----------------------------------------------------------------------------------------------
+// Read next kmer
+// OUT: kmer - next kmer
+// OUT: count - kmer's counter
+// RET: true - if not EOF
+//-----------------------------------------------------------------------------------------------
+bool CKMCFile::ReadNextKmer(CKmerAPI &kmer, uint32 &count)
+{
+	uint64 prefix_mask = (1 << 2 * lut_prefix_length) - 1; //for kmc2 db
+
+	if(is_opened != opened_for_listing)
+		return false;
+	do
+	{
+		if(end_of_file)
+			return false;
+		
+		if(sufix_number == prefix_file_buf[prefix_index + 1]) 
+		{
+			prefix_index++;
+						
+			while (prefix_file_buf[prefix_index] == prefix_file_buf[prefix_index + 1])
+				prefix_index++;
+		}
+	
+		uint32 off = (sizeof(prefix_index) * 8) - (lut_prefix_length * 2) - kmer.byte_alignment * 2;
+			
+		uint64 temp_prefix = (prefix_index & prefix_mask) << off;	// shift prefix towards MSD. "& prefix_mask" necessary for kmc2 db format
+		
+		kmer.kmer_data[0] = temp_prefix;			// store prefix in an object CKmerAPI
+
+		for(uint32 i = 1; i < kmer.no_of_rows; i++)
+			kmer.kmer_data[i] = 0;
+
+		//read sufix:
+		uint32 row_index = 0;
+ 		uint64 suf = 0;
+	
+		off = off - 8;
+				
+ 		for(uint32 a = 0; a < sufix_size; a ++)
+		{
+			if(index_in_partial_buf == part_size)
+				Reload_sufix_file_buf();
+						
+			suf = sufix_file_buf[index_in_partial_buf++];
+			suf = suf << off;
+			kmer.kmer_data[row_index] = kmer.kmer_data[row_index] | suf;
+
+			if (off == 0)				//the end of a word in kmer_data
+			{
+					off = 56;
+					row_index++;
+			}
+			else
+					off -=8;
+		}
+	
+		//read counter:
+		if(index_in_partial_buf == part_size)
+			Reload_sufix_file_buf();
+		
+		count = sufix_file_buf[index_in_partial_buf++];
+
+		for(uint32 b = 1; b < counter_size; b++)
+		{
+			if(index_in_partial_buf == part_size)
+				Reload_sufix_file_buf();
+			
+			uint32 aux = 0x000000ff & sufix_file_buf[index_in_partial_buf++];
+			aux = aux << 8 * ( b);
+			count = aux | count;
+		}
+			
+		sufix_number++;
+	
+		if(sufix_number == total_kmers)
+			end_of_file = true;
+
+		if (mode != 0)
+		{
+			float float_counter;
+			memcpy(&float_counter, &count, counter_size);
+			if ((float_counter < min_count) || (float_counter > max_count))
+				continue;
+			else
+				break;
+		}
+
+	}
+	while((count < min_count) || (count > max_count));
+
+	return true;
+}
+//-------------------------------------------------------------------------------
+// Reload a contents of an array "sufix_file_buf" for listing mode. Auxiliary function.
+//-------------------------------------------------------------------------------
+void CKMCFile::Reload_sufix_file_buf()
+{
+	fread (sufix_file_buf, 1, (size_t) part_size, file_suf);
+	index_in_partial_buf = 0;
+};
+//-------------------------------------------------------------------------------
+// Release memory and close files in case they were opened 
+// RET: true - if files have been readed
+//-------------------------------------------------------------------------------
+bool CKMCFile::Close()
+{
+	if(is_opened)
+	{
+		if(file_pre)
+		{
+			fclose(file_pre);	
+			file_pre = NULL;
+		}
+		if(file_suf)
+		{
+			fclose(file_suf);
+			file_suf = NULL;
+		}
+	
+		is_opened = closed;
+		end_of_file = false;
+		delete [] prefix_file_buf;
+		prefix_file_buf = NULL;
+		delete [] sufix_file_buf;
+		sufix_file_buf = NULL;
+		delete[] signature_map;
+		signature_map = NULL;
+
+		return true;
+	}
+	else
+		return false;
+};
+//----------------------------------------------------------------------------------
+// Set initial values to enable listing kmers from the begining. Only in listing mode
+// RET: true - if a file has been opened for listing
+//----------------------------------------------------------------------------------
+bool CKMCFile::RestartListing(void)
+{
+	if(is_opened == opened_for_listing)
+	{
+		
+		my_fseek ( file_suf , 4 , SEEK_SET );
+		fread (sufix_file_buf, 1, (size_t) part_size, file_suf);
+		prefix_index = 0;
+		sufix_number = 0;
+		index_in_partial_buf = 0;
+
+		end_of_file = total_kmers == 0;
+
+		return true;
+	}
+	return false;
+		
+};
+//----------------------------------------------------------------------------------------
+// Set the minimal value for a counter. Kmers with counters below this theshold are ignored
+// IN	: x - minimal value for a counter
+// RET	: true - if successful 
+//----------------------------------------------------------------------------------------
+bool CKMCFile::SetMinCount(uint32 x)
+{
+	if((original_min_count <= x) && (x < max_count))
+	{
+		min_count = x;
+		return true;
+	} 
+	else
+		return false;
+}
+
+//----------------------------------------------------------------------------------------
+// Return a value of min_count. Kmers with counters below this theshold are ignored 
+// RET	: a value of min_count
+//----------------------------------------------------------------------------------------
+uint32 CKMCFile::GetMinCount(void)
+{
+	return min_count;
+};
+
+//----------------------------------------------------------------------------------------
+// Set the maximal value for a counter. Kmers with counters above this theshold are ignored
+// IN	: x - maximal value for a counter
+// RET	: true - if successful 
+//----------------------------------------------------------------------------------------
+bool CKMCFile::SetMaxCount(uint32 x)
+{
+	if((original_max_count >= x) && (x > min_count))
+	{
+		max_count = x;
+		return true; 
+	}
+	else
+		return false;
+}
+
+
+//----------------------------------------------------------------------------------------
+// Return a value of max_count. Kmers with counters above this theshold are ignored 
+// RET	: a value of max_count
+//----------------------------------------------------------------------------------------
+uint32 CKMCFile::GetMaxCount(void)
+{
+	return max_count;
+}
+
+//----------------------------------------------------------------------------------------
+// Set original (readed from *.kmer_pre) values for min_count and max_count
+//----------------------------------------------------------------------------------------
+void CKMCFile::ResetMinMaxCounts(void)
+{
+	min_count = original_min_count;
+	max_count = original_max_count;
+} 
+
+//----------------------------------------------------------------------------------------
+// Return the length of kmers
+// RET	: the length of kmers
+//----------------------------------------------------------------------------------------
+uint32 CKMCFile::KmerLength(void)
+{
+	return kmer_length;			
+}
+
+//----------------------------------------------------------------------------------------
+// Check if kmer exists
+// IN	: kmer - kmer
+// RET	: true if kmer exists
+//----------------------------------------------------------------------------------------
+bool CKMCFile::IsKmer(CKmerAPI &kmer)
+{
+	uint32 _count;
+	if(CheckKmer(kmer, _count))
+		return true;
+	else
+		return false;
+}
+
+//-----------------------------------------------------------------------------------------
+// Check the total number of kmers between current min_count and max_count
+// RET	: total number of kmers or 0 if a database has not been opened
+//-----------------------------------------------------------------------------------------
+uint64 CKMCFile::KmerCount(void)
+{
+	if(is_opened)
+		if((min_count == original_min_count) && (max_count == original_max_count))
+			return total_kmers;
+		else
+		{
+			uint32 count;
+			uint32 int_counter;
+			uint64 aux_kmerCount = 0;
+
+			if(is_opened == opened_for_RA)
+			{
+				uchar *ptr = sufix_file_buf;
+				
+				for(uint64 i = 0; i < total_kmers; i++)		
+				{
+					ptr += sufix_size;
+					int_counter = *ptr;
+					ptr++;
+
+					for(uint32 b = 1; b < counter_size; b ++)
+					{
+						uint32 aux = 0x000000ff & *(ptr);
+						aux = aux << 8 * ( b);
+						int_counter = aux | int_counter;
+						ptr++;
+					}
+					
+					if(mode == 0)
+						count = int_counter;
+					else
+						memcpy(&count, &int_counter, counter_size);
+	
+					if((count >= min_count) && (count <= max_count))
+						aux_kmerCount++;
+				}
+			}
+			else //opened_for_listing
+			{
+				CKmerAPI kmer(kmer_length);
+				float count;
+				RestartListing();
+				for(uint64 i = 0; i < total_kmers; i++)		
+				{
+					ReadNextKmer(kmer, count);
+					if((count >= min_count) && (count <= max_count))
+						aux_kmerCount++;
+				}
+				RestartListing();
+			}
+			return aux_kmerCount;
+		}
+	else
+		return 0 ;
+}
+//---------------------------------------------------------------------------------
+// Get current parameters from kmer_database
+// OUT	:	_kmer_length	- the length of kmers
+//			_mode			- mode
+//			_counter_size	- the size of a counter in bytes 
+//			_lut_prefix_length - the number of prefix's symbols cut from kmers 
+//			_min_count		- the minimal number of kmer's appearances 
+//			_max_count		- the maximal number of kmer's appearances
+//			_total_kmers	- the total number of kmers
+// RET	: true if kmer_database has been opened
+//---------------------------------------------------------------------------------
+bool CKMCFile::Info(uint32 &_kmer_length, uint32 &_mode, uint32 &_counter_size, uint32 &_lut_prefix_length, uint32 &_signature_len, uint32 &_min_count, uint32 &_max_count, uint64 &_total_kmers)
+{
+	if(is_opened)
+	{
+		_kmer_length = kmer_length;
+		_mode = mode;
+		_counter_size = counter_size;
+		_lut_prefix_length = lut_prefix_length;
+		if (kmc_version == 0x200)
+			_signature_len = signature_len;
+		else
+			_signature_len = 0; //for kmc1 there is no signature_len
+		_min_count = min_count;
+		_max_count = max_count;
+		_total_kmers = total_kmers;
+		return true;
+	}
+	return false;
+};
+
+
+//---------------------------------------------------------------------------------
+// Get counters from read
+// OUT	:	counters    	- vector of counters of each k-mer in read (of size read_len - kmer_len + 1), if some k-mer is invalid (i.e. contains 'N') the counter is equal to 0
+// IN   :   read			- 
+// RET	: true if success
+//---------------------------------------------------------------------------------
+bool CKMCFile::GetCountersForRead(const std::string& read, std::vector<uint32>& counters)
+{
+	if (is_opened != opened_for_RA)
+		return false;
+	if (kmc_version == 0x200)
+		return GetCountersForRead_kmc2(read, counters);
+	else if (kmc_version == 0)
+		return GetCountersForRead_kmc1(read, counters);
+	else
+		return false; //never should be here
+}
+
+//---------------------------------------------------------------------------------
+// Get counters from read
+// OUT	:	counters    	- vector of counters of each k-mer in read (of size read_len - kmer_len + 1), if some k-mer is invalid (i.e. contains 'N') the counter is equal to 0
+// IN   :   read			- 
+// RET	: true if success
+//---------------------------------------------------------------------------------
+bool CKMCFile::GetCountersForRead(const std::string& read, std::vector<float>& counters)
+{
+	if (is_opened != opened_for_RA)
+		return false;
+	std::vector<uint32> uint32_v;
+	if (GetCountersForRead(read, uint32_v))
+	{
+		counters.clear();
+		counters.resize(uint32_v.size());
+		if (mode == 0)
+		{
+			for (uint32 i = 0; i < uint32_v.size(); ++i)
+				counters[i] = static_cast<float>(uint32_v[i]);
+		}
+		else
+		{
+			for (uint32 i = 0; i < uint32_v.size(); ++i)
+				memcpy(&counters[i], &uint32_v[i], counter_size);
+		}
+
+		return true;
+	}
+	return false;
+}
+
+//---------------------------------------------------------------------------------
+// Auxiliary function.
+//---------------------------------------------------------------------------------
+uint32 CKMCFile::count_for_kmer_kmc1(CKmerAPI& kmer)
+{
+	//recognize a prefix:
+
+	uint64 pattern_prefix_value = kmer.kmer_data[0];
+
+	uint32 pattern_offset = (sizeof(pattern_prefix_value)* 8) - (lut_prefix_length * 2) - (kmer.byte_alignment * 2);
+
+	pattern_prefix_value = pattern_prefix_value >> pattern_offset;  //complements with 0
+	if (pattern_prefix_value >= prefix_file_buf_size)
+		return false;
+	//look into the array with data
+
+	int64 index_start = prefix_file_buf[pattern_prefix_value];
+	int64 index_stop = prefix_file_buf[pattern_prefix_value + 1] - 1;
+
+	uint32 counter = 0;
+	if (BinarySearch(index_start, index_stop, kmer, counter, pattern_offset))
+		return counter;
+	return 0;
+}
+
+//---------------------------------------------------------------------------------
+// Auxiliary function.
+//---------------------------------------------------------------------------------
+uint32 CKMCFile::count_for_kmer_kmc2(CKmerAPI& kmer, uint32 bin_start_pos)
+{
+	//recognize a prefix:
+	uint64 pattern_prefix_value = kmer.kmer_data[0];
+
+	uint32 pattern_offset = (sizeof(pattern_prefix_value)* 8) - (lut_prefix_length * 2) - (kmer.byte_alignment * 2);
+
+	pattern_prefix_value = pattern_prefix_value >> pattern_offset;  //complements with 0
+	if (pattern_prefix_value >= prefix_file_buf_size)
+		return false;
+	//look into the array with data
+
+	int64 index_start = *(prefix_file_buf + bin_start_pos + pattern_prefix_value);
+	int64 index_stop = *(prefix_file_buf + bin_start_pos + pattern_prefix_value + 1) - 1;
+
+	uint32 counter = 0;
+	if (BinarySearch(index_start, index_stop, kmer, counter, pattern_offset))
+		return counter;
+	return 0;
+}
+
+//---------------------------------------------------------------------------------
+// Auxiliary function.
+//---------------------------------------------------------------------------------
+bool CKMCFile::GetCountersForRead_kmc1(const std::string& read, std::vector<uint32>& counters)
+{	
+	uint32 read_len = static_cast<uint32>(read.length());
+	counters.resize(read.length() - kmer_length + 1);
+	std::string transformed_read = read;
+	for (char& c : transformed_read)
+		c = CKmerAPI::num_codes[(uchar)c];
+
+	uint32 i = 0;
+	CKmerAPI kmer(kmer_length);
+	uint32 pos = 0;
+	
+	uint32 counters_pos = 0;
+
+	while (i + kmer_length - 1 < read_len)
+	{
+		bool contains_N = false;
+		while (i < read_len && pos < kmer_length)
+		{
+			if (CKmerAPI::num_codes[(uchar)read[i]] < 0)
+			{
+				pos = 0;
+				kmer.clear();
+				++i;
+				uint32 wrong_kmers = MIN(i - counters_pos, static_cast<uint32>(counters.size()) - counters_pos);
+				fill_n(counters.begin() + counters_pos, wrong_kmers, 0);
+				counters_pos += wrong_kmers;
+				contains_N = true;
+				break;
+			}
+			else
+				kmer.insert2bits(pos++, CKmerAPI::num_codes[(uchar)read[i++]]);
+		}
+		if (contains_N)
+			continue;
+		if (pos == kmer_length)
+		{			
+			counters[counters_pos++] = count_for_kmer_kmc1(kmer);
+		}
+		else
+			break;
+
+		while (i < read_len)
+		{
+			if (CKmerAPI::num_codes[(uchar)read[i]] < 0)
+			{
+				pos = 0;
+				break;
+			}
+			kmer.SHL_insert2bits(CKmerAPI::num_codes[(uchar)read[i++]]);
+			counters[counters_pos++] = count_for_kmer_kmc1(kmer);
+		}
+	}
+	if (counters_pos < counters.size())
+	{
+		fill_n(counters.begin() + counters_pos, counters.size() - counters_pos, 0);
+		counters_pos = static_cast<uint32>(counters.size());
+	}
+	return true;
+}
+//---------------------------------------------------------------------------------
+// Auxiliary function.
+//---------------------------------------------------------------------------------
+bool CKMCFile::GetCountersForRead_kmc2(const std::string& read, std::vector<uint32>& counters)
+{	
+counters.resize(read.length() - kmer_length + 1);
+	std::string transformed_read = read;
+	for (char& c : transformed_read)
+		c = CKmerAPI::num_codes[(uchar)c];
+	uint32 i = 0;
+	uint32 len = 0; //length of super k-mer
+	uint32 signature_start_pos;
+	CMmer current_signature(signature_len), end_mmer(signature_len);
+
+	using super_kmers_t = std::vector<std::tuple<uint32, uint32, uint32>>;//start_pos, len, bin_no, 
+	super_kmers_t super_kmers;
+
+	while (i + kmer_length - 1 < read.length())
+	{
+		bool contains_N = false;
+		//building first signature after 'N' or at the read beginning
+		for (uint32 j = 0; j < signature_len; ++j, ++i)
+		{
+			if (transformed_read[i] < 0)//'N'
+			{
+				contains_N = true;
+				break;
+			}
+		}
+		//signature must be shorter than k-mer so if signature contains 'N', k-mer will contains it also
+		if (contains_N)
+		{
+			++i;
+			continue;
+		}
+		len = signature_len;
+		signature_start_pos = i - signature_len;
+		current_signature.insert(transformed_read.c_str() + signature_start_pos); 
+		end_mmer.set(current_signature);
+
+		for (; i < transformed_read.length(); ++i)
+		{
+			if (transformed_read[i] < 0)//'N'
+			{
+				if (len >= kmer_length)
+				{
+					super_kmers.push_back(std::make_tuple(i - len, len, signature_map[current_signature.get()]));
+				}
+				len = 0;
+				++i;
+				break;
+			}
+			end_mmer.insert(transformed_read[i]);
+			if (end_mmer < current_signature)//signature at the end of current k-mer is lower than current
+			{
+				if (len >= kmer_length)
+				{
+					super_kmers.push_back(std::make_tuple(i - len, len, signature_map[current_signature.get()]));
+					len = kmer_length - 1;
+				}
+				current_signature.set(end_mmer);
+				signature_start_pos = i - signature_len + 1;
+			}
+			else if (end_mmer == current_signature)
+			{
+				current_signature.set(end_mmer);
+				signature_start_pos = i - signature_len + 1;
+			}
+			else if (signature_start_pos + kmer_length - 1 < i)//need to find new signature
+			{
+				super_kmers.push_back(std::make_tuple(i - len, len, signature_map[current_signature.get()]));
+				len = kmer_length - 1;
+				//looking for new signature
+				++signature_start_pos;
+				//building first signature in current k-mer
+				end_mmer.insert(transformed_read.c_str() + signature_start_pos);
+				current_signature.set(end_mmer);
+				for (uint32 j = signature_start_pos + signature_len; j <= i; ++j)
+				{
+					end_mmer.insert(transformed_read[j]);
+					if (end_mmer <= current_signature)
+					{
+						current_signature.set(end_mmer);
+						signature_start_pos = j - signature_len + 1;
+					}
+				}
+			}
+			++len;
+		}
+	}
+	if (len >= kmer_length)//last one in read
+	{
+		super_kmers.push_back(std::make_tuple(i - len, len, signature_map[current_signature.get()]));
+	}
+
+	uint32 counters_pos = 0;
+	if (super_kmers.empty())
+	{
+		fill_n(counters.begin(), counters.size(), 0);
+		return true;
+	}
+
+	CKmerAPI kmer(kmer_length);
+
+	uint32 last_end = 0;
+
+	//'N' somewhere in first k-mer
+	if (std::get<0>(super_kmers.front()) > 0)
+	{
+		fill_n(counters.begin(), std::get<0>(super_kmers.front()), 0);
+		last_end = std::get<0>(super_kmers.front());
+		counters_pos = std::get<0>(super_kmers.front());
+	}
+	for (auto& super_kmer : super_kmers)
+	{
+		//'N's between super k-mers
+		if (last_end < std::get<0>(super_kmer))
+		{
+			uint32 gap = std::get<0>(super_kmer) -last_end;
+			fill_n(counters.begin() + counters_pos, kmer_length + gap - 1, 0);
+			counters_pos += kmer_length + gap - 1;
+		}
+		last_end = std::get<0>(super_kmer) + std::get<1>(super_kmer);
+
+		kmer.clear();
+		kmer.from_binary(transformed_read.c_str() + std::get<0>(super_kmer));
+
+		uint32 bin_start_pos = std::get<2>(super_kmer) * single_LUT_size;
+		counters[counters_pos++] = count_for_kmer_kmc2(kmer, bin_start_pos);
+
+		for (uint32 i = std::get<0>(super_kmer) +kmer_length; i < std::get<0>(super_kmer) +std::get<1>(super_kmer); ++i)
+		{
+			kmer.SHL_insert2bits(transformed_read[i]);
+			counters[counters_pos++] = count_for_kmer_kmc2(kmer, bin_start_pos);
+		}
+	}
+	//'N's at the end of read
+	if (counters_pos < counters.size())
+	{
+		fill_n(counters.begin() + counters_pos, counters.size() - counters_pos, 0);
+		counters_pos = static_cast<uint32>(counters.size());
+	}
+
+	return true;
+}
+
+
+//---------------------------------------------------------------------------------
+// Auxiliary function.
+//---------------------------------------------------------------------------------
+bool CKMCFile::BinarySearch(int64 index_start, int64 index_stop, const CKmerAPI& kmer, uint32& counter, uint32 pattern_offset)
+{
+	uchar *sufix_byte_ptr = nullptr;
+	uint64 sufix = 0;
+
+	//sufix_offset is always 56
+	uint32 sufix_offset = 56;			// the ofset of a sufix is for shifting the sufix towards MSB, to compare the sufix with a pattern
+	// Bytes of a pattern to search are always shifted towards MSB
+
+	uint32 row_index = 0;				// the number of a current row in an array kmer_data
+
+	bool found = false;
+
+	while (index_start <= index_stop)
+	{
+		int64 mid_index = (index_start + index_stop) / 2;
+		sufix_byte_ptr = &sufix_file_buf[mid_index * sufix_rec_size];
+
+		uint64 pattern = 0;
+
+		pattern_offset = (lut_prefix_length + kmer.byte_alignment) * 2;
+
+		row_index = 0;
+		for (uint32 a = 0; a < sufix_size; a++)		//check byte by byte
+		{
+			pattern = kmer.kmer_data[row_index];
+			pattern = pattern << pattern_offset;
+			pattern = pattern & 0xff00000000000000;
+
+			sufix = sufix_byte_ptr[a];
+			sufix = sufix << sufix_offset;
+
+			if (pattern != sufix)
+				break;
+
+			pattern_offset += 8;
+
+			if (pattern_offset == 64)				//the end of a word
+			{
+				pattern_offset = 0;
+				row_index++;
+			}
+		}
+
+		if (pattern == sufix)
+		{
+			found = true;
+			break;
+		}
+		if (sufix < pattern)
+			index_start = mid_index + 1;
+		else
+			index_stop = mid_index - 1;
+	}
+
+	if (found)
+	{
+		sufix_byte_ptr += sufix_size;
+
+		counter = *sufix_byte_ptr;
+
+		for (uint32 b = 1; b < counter_size; b++)
+		{
+			uint32 aux = 0x000000ff & *(sufix_byte_ptr + b);
+
+			aux = aux << 8 * (b);
+			counter = aux | counter;
+		}
+		if (mode != 0)
+		{
+			float float_counter;
+			memcpy(&float_counter, &counter, counter_size);
+			return (float_counter >= min_count) && (float_counter <= max_count);
+		}
+		return (counter >= min_count) && (counter <= max_count);
+	}
+	return false;
+}
+
+
+// ***** EOF
diff --git a/src/projects/mts/kmc_api/kmc_file.h b/src/projects/mts/kmc_api/kmc_file.h
new file mode 100644
index 0000000..73676f9
--- /dev/null
+++ b/src/projects/mts/kmc_api/kmc_file.h
@@ -0,0 +1,141 @@
+/*
+  This file is a part of KMC software distributed under GNU GPL 3 licence.
+  The homepage of the KMC project is http://sun.aei.polsl.pl/kmc
+
+  Authors: Sebastian Deorowicz, Agnieszka Debudaj-Grabysz, Marek Kokot
+
+  Version: 2.2.0
+  Date   : 2015-04-15
+*/
+
+#ifndef _KMC_FILE_H
+#define _KMC_FILE_H
+
+#include "kmer_defs.h"
+#include "kmer_api.h"
+#include <string>
+#include <vector>
+
+class CKMCFile
+{
+	enum open_mode {closed, opened_for_RA, opened_for_listing};
+	open_mode is_opened;
+
+	bool end_of_file;
+
+	FILE *file_pre;
+	FILE *file_suf;
+
+	uint64* prefix_file_buf;
+	uint64 prefix_file_buf_size;
+	uint64 prefix_index;			// The current prefix's index in an array "prefix_file_buf", readed from *.kmc_pre
+	uint32 single_LUT_size;			// The size of a single LUT (in no. of elements)
+
+	uint32* signature_map;
+	uint32 signature_map_size;
+	
+	uchar* sufix_file_buf;
+	uint64 sufix_number;			// The sufix's number to be listed
+	uint64 index_in_partial_buf;	// The current byte's number in an array "sufix_file_buf", for listing mode
+
+	uint32 kmer_length;
+	uint32 mode;
+	uint32 counter_size;
+	uint32 lut_prefix_length;
+	uint32 signature_len;
+	uint32 min_count;
+	uint32 max_count;
+	uint64 total_kmers;
+
+	uint32 kmc_version;
+	uint32 sufix_size;		// sufix's size in bytes 
+	uint32 sufix_rec_size;  // sufix_size + counter_size
+
+	uint32 original_min_count;
+	uint32 original_max_count;
+
+	static uint64 part_size; // the size of a block readed to sufix_file_buf, in listing mode 
+	
+	bool BinarySearch(int64 index_start, int64 index_stop, const CKmerAPI& kmer, uint32& counter, uint32 pattern_offset);
+
+	// Open a file, recognize its size and check its marker. Auxiliary function.
+	bool OpenASingleFile(const std::string &file_name, FILE *&file_handler, uint64 &size, char marker[]);	
+
+	// Recognize current parameters. Auxiliary function.
+	bool ReadParamsFrom_prefix_file_buf(uint64 &size);	
+
+	// Reload a contents of an array "sufix_file_buf" for listing mode. Auxiliary function. 
+	void Reload_sufix_file_buf();
+
+	// Implementation of GetCountersForRead for kmc1 database format
+	bool GetCountersForRead_kmc1(const std::string& read, std::vector<uint32>& counters);
+
+	// Implementation of GetCountersForRead for kmc2 database format
+	bool GetCountersForRead_kmc2(const std::string& read, std::vector<uint32>& counters);
+public:
+		
+	CKMCFile();
+	~CKMCFile();
+
+	// Open files *.kmc_pre & *.kmc_suf, read them to RAM, close files. *.kmc_suf is opened for random access
+	bool OpenForRA(const std::string &file_name);
+
+	// Open files *kmc_pre & *.kmc_suf, read *.kmc_pre to RAM, *.kmc_suf is buffered
+	bool OpenForListing(const std::string& file_name);
+
+	// Return next kmer in CKmerAPI &kmer. Return its counter in float &count. Return true if not EOF
+	bool ReadNextKmer(CKmerAPI &kmer, float &count);
+
+	bool ReadNextKmer(CKmerAPI &kmer, uint32 &count);
+	// Release memory and close files in case they were opened 
+	bool Close();
+
+	// Set the minimal value for a counter. Kmers with counters below this theshold are ignored
+	bool SetMinCount(uint32 x);
+
+	// Return a value of min_count. Kmers with counters below this theshold are ignored 
+	uint32 GetMinCount(void);
+
+	// Set the maximal value for a counter. Kmers with counters above this theshold are ignored
+	bool SetMaxCount(uint32 x);
+
+	// Return a value of max_count. Kmers with counters above this theshold are ignored 
+	uint32 GetMaxCount(void);
+
+	// Return the total number of kmers between min_count and max_count
+	uint64 KmerCount(void);
+
+	// Return the length of kmers
+	uint32 KmerLength(void);
+
+	// Set initial values to enable listing kmers from the begining. Only in listing mode
+	bool RestartListing(void);
+
+	// Return true if all kmers are listed
+	bool Eof(void);
+
+	// Return true if kmer exists. In this case return kmer's counter in count
+	bool CheckKmer(CKmerAPI &kmer, float &count);
+
+	bool CheckKmer(CKmerAPI &kmer, uint32 &count);
+
+	// Return true if kmer exists
+	bool IsKmer(CKmerAPI &kmer);
+
+	// Set original (readed from *.kmer_pre) values for min_count and max_count
+	void ResetMinMaxCounts(void);
+
+	// Get current parameters from kmer_database
+	bool Info(uint32 &_kmer_length, uint32 &_mode, uint32 &_counter_size, uint32 &_lut_prefix_length, uint32 &_signature_len, uint32 &_min_count, uint32 &_max_count, uint64 &_total_kmers);
+	
+	// Get counters for all k-mers in read
+	bool GetCountersForRead(const std::string& read, std::vector<uint32>& counters);
+	bool GetCountersForRead(const std::string& read, std::vector<float>& counters);
+	private:
+		uint32 count_for_kmer_kmc1(CKmerAPI& kmer);
+		uint32 count_for_kmer_kmc2(CKmerAPI& kmer, uint32 bin_start_pos);
+};
+
+#endif
+
+// ***** EOF
diff --git a/src/projects/mts/kmc_api/kmer_api.cpp b/src/projects/mts/kmc_api/kmer_api.cpp
new file mode 100644
index 0000000..befd9fe
--- /dev/null
+++ b/src/projects/mts/kmc_api/kmer_api.cpp
@@ -0,0 +1,48 @@
+/*
+  This file is a part of KMC software distributed under GNU GPL 3 licence.
+  The homepage of the KMC project is http://sun.aei.polsl.pl/kmc
+
+  Authors: Sebastian Deorowicz and Agnieszka Debudaj-Grabysz
+
+  Version: 2.2.0
+  Date   : 2015-04-15
+*/
+
+
+#include "stdafx.h"
+#include "kmer_api.h"
+#include <vector>
+#include <math.h>
+
+using namespace std;
+
+const char CKmerAPI::char_codes[] = {'A','C', 'G', 'T'};	
+char CKmerAPI::num_codes[];
+CKmerAPI::_si CKmerAPI::_init; 
+uchar CKmerAPI::rev_comp_bytes_LUT[] = {
+    0xff, 0xbf, 0x7f, 0x3f, 0xef, 0xaf, 0x6f, 0x2f, 0xdf, 0x9f, 0x5f, 0x1f, 0xcf, 0x8f, 0x4f, 0x0f,
+    0xfb, 0xbb, 0x7b, 0x3b, 0xeb, 0xab, 0x6b, 0x2b, 0xdb, 0x9b, 0x5b, 0x1b, 0xcb, 0x8b, 0x4b, 0x0b,
+    0xf7, 0xb7, 0x77, 0x37, 0xe7, 0xa7, 0x67, 0x27, 0xd7, 0x97, 0x57, 0x17, 0xc7, 0x87, 0x47, 0x07,
+    0xf3, 0xb3, 0x73, 0x33, 0xe3, 0xa3, 0x63, 0x23, 0xd3, 0x93, 0x53, 0x13, 0xc3, 0x83, 0x43, 0x03,
+    0xfe, 0xbe, 0x7e, 0x3e, 0xee, 0xae, 0x6e, 0x2e, 0xde, 0x9e, 0x5e, 0x1e, 0xce, 0x8e, 0x4e, 0x0e,
+    0xfa, 0xba, 0x7a, 0x3a, 0xea, 0xaa, 0x6a, 0x2a, 0xda, 0x9a, 0x5a, 0x1a, 0xca, 0x8a, 0x4a, 0x0a,
+    0xf6, 0xb6, 0x76, 0x36, 0xe6, 0xa6, 0x66, 0x26, 0xd6, 0x96, 0x56, 0x16, 0xc6, 0x86, 0x46, 0x06,
+    0xf2, 0xb2, 0x72, 0x32, 0xe2, 0xa2, 0x62, 0x22, 0xd2, 0x92, 0x52, 0x12, 0xc2, 0x82, 0x42, 0x02,
+    0xfd, 0xbd, 0x7d, 0x3d, 0xed, 0xad, 0x6d, 0x2d, 0xdd, 0x9d, 0x5d, 0x1d, 0xcd, 0x8d, 0x4d, 0x0d,
+    0xf9, 0xb9, 0x79, 0x39, 0xe9, 0xa9, 0x69, 0x29, 0xd9, 0x99, 0x59, 0x19, 0xc9, 0x89, 0x49, 0x09,
+    0xf5, 0xb5, 0x75, 0x35, 0xe5, 0xa5, 0x65, 0x25, 0xd5, 0x95, 0x55, 0x15, 0xc5, 0x85, 0x45, 0x05,
+    0xf1, 0xb1, 0x71, 0x31, 0xe1, 0xa1, 0x61, 0x21, 0xd1, 0x91, 0x51, 0x11, 0xc1, 0x81, 0x41, 0x01,
+    0xfc, 0xbc, 0x7c, 0x3c, 0xec, 0xac, 0x6c, 0x2c, 0xdc, 0x9c, 0x5c, 0x1c, 0xcc, 0x8c, 0x4c, 0x0c,
+    0xf8, 0xb8, 0x78, 0x38, 0xe8, 0xa8, 0x68, 0x28, 0xd8, 0x98, 0x58, 0x18, 0xc8, 0x88, 0x48, 0x08,
+    0xf4, 0xb4, 0x74, 0x34, 0xe4, 0xa4, 0x64, 0x24, 0xd4, 0x94, 0x54, 0x14, 0xc4, 0x84, 0x44, 0x04,
+    0xf0, 0xb0, 0x70, 0x30, 0xe0, 0xa0, 0x60, 0x20, 0xd0, 0x90, 0x50, 0x10, 0xc0, 0x80, 0x40, 0x00
+};
+uint64 CKmerAPI::alignment_mask[] = {
+    0xFFFFFFFFFFFFFFFFULL,
+    0x3FFFFFFFFFFFFFFFULL,
+    0x0FFFFFFFFFFFFFFFULL,
+    0x03FFFFFFFFFFFFFFULL,
+    0x00FFFFFFFFFFFFFFULL
+};
+
+// ***** EOF
diff --git a/src/projects/mts/kmc_api/kmer_api.h b/src/projects/mts/kmc_api/kmer_api.h
new file mode 100644
index 0000000..e652aa2
--- /dev/null
+++ b/src/projects/mts/kmc_api/kmer_api.h
@@ -0,0 +1,596 @@
+/*
+This file is a part of KMC software distributed under GNU GPL 3 licence.
+The homepage of the KMC project is http://sun.aei.polsl.pl/kmc
+
+Authors: Sebastian Deorowicz and Agnieszka Debudaj-Grabysz
+
+Version: 2.2.0
+Date   : 2015-04-15
+*/
+
+#ifndef _KMER_API_H
+#define _KMER_API_H
+
+
+#include "kmer_defs.h"
+#include <string>
+#include <iostream>
+#include "mmer.h"
+class CKMCFile;
+
+class CKmerAPI
+{
+protected:
+
+	uint64 *kmer_data;				// An array to store kmer's data. On 64 bits 32 symbols can be stored
+									// Data are shifted to let sufix's symbols to start with a border of a byte
+
+	
+	uint32 kmer_length;				// Kmer's length, in symbols
+	uchar byte_alignment;			// A number of "empty" symbols placed before prefix to let sufix's symbols to start with a border of a byte
+
+	uint32 no_of_rows;				// A number of 64-bits words allocated for kmer_data 	
+
+	friend class CKMCFile;
+	
+	//----------------------------------------------------------------------------------
+	inline void clear()
+	{
+		memset(kmer_data, 0, sizeof(*kmer_data) * no_of_rows);
+	}
+
+	//----------------------------------------------------------------------------------
+	inline void insert2bits(uint32 pos, uchar val)
+	{
+		kmer_data[(pos + byte_alignment) >> 5] += (uint64)val << (62 - (((pos + byte_alignment) & 31) * 2));
+	}
+
+	inline uchar extract2bits(uint32 pos)
+	{
+		return (kmer_data[(pos + byte_alignment) >> 5] >> (62 - (((pos + byte_alignment) & 31) * 2))) & 3;
+	}
+	//----------------------------------------------------------------------------------
+	inline void SHL_insert2bits(uchar val)
+	{
+		kmer_data[0] <<= 2;
+		if (byte_alignment)
+		{
+			uint64 mask = ~(((1ull << 2 * byte_alignment) - 1) << (64 - 2 * byte_alignment));
+			kmer_data[0] &= mask;
+		}
+		for (uint32 i = 1; i < no_of_rows; ++i)
+		{
+			kmer_data[i - 1] += kmer_data[i] >> 62;
+			kmer_data[i] <<= 2;
+		}
+		kmer_data[no_of_rows - 1] += (uint64)val << (62 - (((kmer_length - 1 + byte_alignment) & 31) * 2));
+	}
+	// ----------------------------------------------------------------------------------
+	inline void from_binary(const char* kmer)
+	{
+		clear();
+		for (uint32 i = 0; i < kmer_length; ++i)
+			insert2bits(i, kmer[i]);
+	}
+
+	// ----------------------------------------------------------------------------------
+	template<typename RandomAccessIterator>
+	inline void to_string_impl(RandomAccessIterator iter)
+	{
+		uchar *byte_ptr;
+		uchar c;
+		uchar temp_byte_alignment = byte_alignment;
+		uint32 cur_string_size = 0;
+		for (uint32 row_counter = 0; row_counter < no_of_rows; row_counter++)
+		{
+			byte_ptr = reinterpret_cast<uchar*>(&kmer_data[row_counter]);
+
+			byte_ptr += 7;					// shift a pointer towards a MSB
+
+			for (uint32 i = 0; (i < kmer_length) && (i < 32); i += 4)		// 32 symbols of any "row" in kmer_data
+			{
+				if ((i == 0) && temp_byte_alignment)				// check if a byte_alignment placed before a prefix is to be skipped
+					temp_byte_alignment--;
+				else
+				{
+					c = 0xc0 & *byte_ptr;			//11000000
+					c = c >> 6;
+					*(iter + cur_string_size++) = char_codes[c];
+					if (cur_string_size == kmer_length) break;
+				}
+
+				if ((i == 0) && temp_byte_alignment)				// check if a  byte_alignment placed before a prefix is to be skipped
+					temp_byte_alignment--;
+				else
+				{
+					c = 0x30 & *byte_ptr;			//00110000
+					c = c >> 4;
+					*(iter + cur_string_size++) = char_codes[c];
+					if (cur_string_size == kmer_length) break;
+				}
+
+				if ((i == 0) && temp_byte_alignment)				// check if a  byte_alignment placed before a prefix is to be skipped
+					temp_byte_alignment--;
+				else
+				{
+					c = 0x0c & *byte_ptr;			//00001100
+					c = c >> 2;
+					*(iter + cur_string_size++) = char_codes[c];
+					if (cur_string_size == kmer_length) break;
+				}
+				// no need to check byte alignment as its length is at most 3 
+				c = 0x03 & *byte_ptr;			//00000011
+				*(iter + cur_string_size++) = char_codes[c];
+				if (cur_string_size == kmer_length) break;
+
+				byte_ptr--;
+			}
+		}
+	}
+	
+	// ----------------------------------------------------------------------------------
+	template<typename RandomAccessIterator>
+	inline bool from_string_impl(const RandomAccessIterator iter, uint32 len)
+	{
+		unsigned char c_char;
+		uchar c_binary;
+		uchar temp_byte_alignment;
+		if (kmer_length != len)
+		{
+			if (kmer_length && kmer_data)
+				delete[] kmer_data;
+
+			kmer_length = len;
+
+			if (kmer_length % 4)
+				byte_alignment = 4 - (kmer_length % 4);
+			else
+				byte_alignment = 0;
+
+
+			if (kmer_length != 0)
+			{
+				no_of_rows = (((kmer_length + byte_alignment) % 32) ? (kmer_length + byte_alignment) / 32 + 1 : (kmer_length + byte_alignment) / 32);
+				//no_of_rows = (int)ceil((double)(kmer_length + byte_alignment) / 32);
+				kmer_data = new uint64[no_of_rows];
+				//memset(kmer_data, 0, sizeof(*kmer_data) * no_of_rows);
+			}
+		}
+
+		memset(kmer_data, 0, sizeof(*kmer_data) * no_of_rows);
+		temp_byte_alignment = byte_alignment;
+		uint32 i = 0;
+		uint32 i_in_string = 0;
+		uchar *byte_ptr;
+
+		for (uint32 row_index = 0; row_index < no_of_rows; row_index++)
+		{
+			byte_ptr = reinterpret_cast<uchar*>(&kmer_data[row_index]);
+			byte_ptr += 7;					// shift a pointer towards a MSB
+
+			while (i < kmer_length)
+			{
+				if ((i_in_string == 0) && temp_byte_alignment)				// check if a byte_alignment placed before a prefix is to be skipped
+				{
+					temp_byte_alignment--;
+					i++;
+				}
+				else
+				{
+					c_char = *(iter + i_in_string);
+					c_binary = num_codes[c_char];
+					c_binary = c_binary << 6;		//11000000
+					*byte_ptr = *byte_ptr | c_binary;
+					i++;
+					i_in_string++;
+					if (i_in_string == kmer_length) break;
+				}
+
+				if ((i_in_string == 0) && temp_byte_alignment)				// check if a byte_alignment placed before a prefix is to be skipped
+				{
+					temp_byte_alignment--;
+					i++;
+				}
+				else
+				{
+					c_char = *(iter + i_in_string);
+					c_binary = num_codes[c_char];
+					c_binary = c_binary << 4;
+					*byte_ptr = *byte_ptr | c_binary;
+					i++;
+					i_in_string++;
+					if (i_in_string == kmer_length) break;
+				}
+
+				//!!!if((i == 0) && temp_byte_alignment)	//poprawka zg3oszona przez Maaka D3ugosza			// check if a byte_alignment placed before a prefix is to be skipped
+				if ((i_in_string == 0) && temp_byte_alignment)				// check if a byte_alignment placed before a prefix is to be skipped
+				{
+					temp_byte_alignment--;
+					i++;
+				}
+				else
+				{
+					c_char = *(iter + i_in_string);
+					c_binary = num_codes[c_char];
+					c_binary = c_binary << 2;
+					*byte_ptr = *byte_ptr | c_binary;
+					i++;
+					i_in_string++;
+					if (i_in_string == kmer_length) break;
+				}
+
+				c_char = *(iter + i_in_string);
+				c_binary = num_codes[c_char];
+				*byte_ptr = *byte_ptr | c_binary;
+				i++;
+				i_in_string++;
+				if (i_in_string == kmer_length) break;
+
+				if (i % 32 == 0)
+					break; //check if a new "row" is to be started
+				byte_ptr--;
+			}
+		};
+		return true;
+	}
+public:
+	static const char char_codes[];
+	static char num_codes[256];
+	static uchar rev_comp_bytes_LUT[];
+	static uint64 alignment_mask[];
+	struct _si  
+	{
+		_si()
+		{
+			for (int i = 0; i < 256; i++)
+                num_codes[i] = -1;
+			num_codes['A'] = num_codes['a'] = 0;
+			num_codes['C'] = num_codes['c'] = 1;
+			num_codes['G'] = num_codes['g'] = 2;
+			num_codes['T'] = num_codes['t'] = 3;
+        }
+    } static _init;
+
+
+// ----------------------------------------------------------------------------------
+// The constructor creates kmer for the number of symbols equal to length. 
+// The array kmer_data has the size of ceil((length + byte_alignment) / 32))
+// IN	: length - a number of symbols of a kmer
+// ----------------------------------------------------------------------------------
+	inline CKmerAPI(uint32 length = 0)
+	{
+		if(length)
+		{
+			if(length % 4)
+				byte_alignment = 4 - (length % 4);	
+			else
+				byte_alignment = 0;
+
+			no_of_rows = (((length + byte_alignment) % 32) ? (length + byte_alignment) / 32 + 1 : (length + byte_alignment) / 32); 
+			//no_of_rows = (int)ceil((double)(length + byte_alignment) / 32);
+			kmer_data = new uint64[no_of_rows];
+
+			memset(kmer_data, 0, sizeof(*kmer_data) * no_of_rows);
+		}
+		else
+		{
+			kmer_data = NULL;
+			no_of_rows = 0;
+			byte_alignment = 0;
+		}
+		kmer_length = length;
+	};
+//-----------------------------------------------------------------------
+// The destructor
+//-----------------------------------------------------------------------
+	inline ~CKmerAPI()
+	{
+		if (kmer_data != NULL)
+			delete [] kmer_data;
+	};
+
+//-----------------------------------------------------------------------
+// The copy constructor
+//-----------------------------------------------------------------------
+	inline CKmerAPI(const CKmerAPI &kmer)
+	{
+		kmer_length = kmer.kmer_length;
+		byte_alignment = kmer.byte_alignment;
+		no_of_rows = kmer.no_of_rows;
+		
+		kmer_data = new uint64[no_of_rows];
+			
+		for(uint32 i = 0; i < no_of_rows; i++)
+			kmer_data[i] = kmer.kmer_data[i];
+
+	};
+
+//-----------------------------------------------------------------------
+// The operator =
+//-----------------------------------------------------------------------	
+	inline CKmerAPI& operator=(const CKmerAPI &kmer)
+	{
+		if(kmer.kmer_length != kmer_length)		
+		{
+			if(kmer_length && kmer_data)
+				delete [] kmer_data;
+		
+			kmer_length = kmer.kmer_length;
+			byte_alignment = kmer.byte_alignment;
+			no_of_rows = kmer.no_of_rows;
+		
+			kmer_data = new uint64[no_of_rows];
+		}
+
+		for(uint32 i = 0; i < no_of_rows; i++)
+			kmer_data[i] = kmer.kmer_data[i];
+
+		return *this;
+	};
+
+//-----------------------------------------------------------------------
+// The operator ==
+//-----------------------------------------------------------------------
+	inline bool operator==(const CKmerAPI &kmer)
+	{
+			if(kmer.kmer_length != kmer_length)
+				return false;
+
+			for(uint32 i = 0; i < no_of_rows; i++)
+				if(kmer.kmer_data[i] != kmer_data[i])
+					return false;
+
+			return true;
+
+	};
+
+//-----------------------------------------------------------------------
+// Operator < . If arguments differ in length a result is undefined
+//-----------------------------------------------------------------------
+	inline bool operator<(const CKmerAPI &kmer)
+	{
+			if(kmer.kmer_length != kmer_length)
+				return false;					
+
+			for(uint32 i = 0; i < no_of_rows; i++)
+				if(kmer.kmer_data[i] > kmer_data[i])
+					return true;
+				else
+					if(kmer.kmer_data[i] < kmer_data[i])
+						return false;
+				
+			return false;
+	};
+
+//-----------------------------------------------------------------------
+// Return a symbol of a kmer from an indicated position (numbered form 0).
+// The symbol is returned as an ASCI character A/C/G/T
+// IN	: pos - a position of a symbol
+// RET	: symbol - a symbol placed on a position pos
+//-----------------------------------------------------------------------
+	inline char get_asci_symbol(unsigned int pos)
+	{
+		if(pos >= kmer_length)
+			return 0;
+		
+		uint32 current_row = (pos + byte_alignment) / 32;
+		uint32 current_pos = ((pos + byte_alignment) % 32) * 2;
+		uint64 mask = 0xc000000000000000 >> current_pos;
+		uint64 symbol = kmer_data[current_row] & mask;
+		symbol = symbol >> (64 - current_pos - 2);
+		return char_codes[symbol];
+	
+	};
+
+	//-----------------------------------------------------------------------
+	// Return a symbol of a kmer from an indicated position (numbered form 0)
+	// The symbol is returned as a numerical value 0/1/2/3
+	// IN	: pos - a position of a symbol
+	// RET	: symbol - a symbol placed on a position pos
+	//-----------------------------------------------------------------------
+	inline uchar get_num_symbol(unsigned int pos)
+	{
+		if (pos >= kmer_length)
+			return 0;
+
+		uint32 current_row = (pos + byte_alignment) / 32;
+		uint32 current_pos = ((pos + byte_alignment) % 32) * 2;
+		uint64 mask = 0xc000000000000000 >> current_pos;
+		uint64 symbol = kmer_data[current_row] & mask;
+		symbol = symbol >> (64 - current_pos - 2);
+		uchar* byte_ptr = reinterpret_cast<uchar*>(&symbol);
+		return *byte_ptr;
+
+	};
+
+	//-----------------------------------------------------------------------
+	// Convert kmer into string (an alphabet ACGT)
+	// RET	: string kmer
+	//-----------------------------------------------------------------------
+	inline std::string to_string()
+	{
+		std::string string_kmer;		
+		string_kmer.resize(kmer_length);
+		to_string_impl(string_kmer.begin());	
+		return string_kmer;
+	};
+	//-----------------------------------------------------------------------
+	// Convert kmer into string (an alphabet ACGT). The function assumes enough memory was allocated
+	// OUT	: str - string kmer. 
+	//-----------------------------------------------------------------------
+	inline void to_string(char *str)
+	{
+		to_string_impl(str);
+		str[kmer_length] = '\0';
+	};
+
+	//-----------------------------------------------------------------------
+	// Convert kmer into string (an alphabet ACGT)
+	// OUT 	: str - string kmer
+	//-----------------------------------------------------------------------
+	inline void to_string(std::string &str)
+	{	
+		str.resize(kmer_length);
+		to_string_impl(str.begin());
+	};
+
+	//-----------------------------------------------------------------------
+	// Convert a string of an alphabet ACGT into a kmer of a CKmerAPI
+	// IN	: kmer_string	- a string of an alphabet ACGT
+	// RET	: true			- if succesfull
+	//-----------------------------------------------------------------------
+	inline bool from_string(const char* kmer_string)
+	{
+		uint32 len = 0;
+		for (;  kmer_string[len] != '\0' ; ++len)
+		{
+			if (num_codes[(uchar)kmer_string[len]] == -1)
+				return false;
+		}
+		return from_string_impl(kmer_string, len);
+	}
+
+	//-----------------------------------------------------------------------
+	// Convert a string of an alphabet ACGT into a kmer of a CKmerAPI
+	// IN	: kmer_string	- a string of an alphabet ACGT
+	// RET	: true			- if succesfull
+	//-----------------------------------------------------------------------
+	inline bool from_string(const std::string& kmer_string)
+	{					
+		for (uint32 ii = 0; ii < kmer_string.size(); ++ii)
+		{
+			if (num_codes[(uchar)kmer_string[ii]] == -1)
+				return false;
+		}
+		return from_string_impl(kmer_string.begin(), static_cast<uint32>(kmer_string.length()));		
+	}
+
+	//-----------------------------------------------------------------------
+	// Convert k-mer to its reverse complement
+	//-----------------------------------------------------------------------
+	inline bool reverse()
+	{
+		if (kmer_data == NULL)
+		{
+			return false;
+		}
+
+		// number of bytes used to store the k-mer in the 0-th row
+		const uint32 size_in_byte = ((kmer_length + byte_alignment) / 4) / no_of_rows;
+		uchar* byte1;
+		uchar* byte2;
+
+		if (no_of_rows == 1)
+		{
+			*kmer_data <<= 2 * byte_alignment;
+			byte1 = reinterpret_cast<uchar*>(kmer_data)+8 - size_in_byte;
+			byte2 = reinterpret_cast<uchar*>(kmer_data)+7;
+
+			for (uint32 i_bytes = 0; i_bytes < size_in_byte / 2; ++i_bytes)
+			{
+				unsigned char temp = rev_comp_bytes_LUT[*byte1];
+				*byte1 = rev_comp_bytes_LUT[*byte2];
+				*byte2 = temp;
+
+				++byte1;
+				--byte2;
+			}
+
+			if (size_in_byte % 2)
+			{
+				*byte1 = rev_comp_bytes_LUT[*byte1];
+			}
+		}
+		else
+		{
+			for (uint32 i_rows = no_of_rows - 1; i_rows > 0; --i_rows)
+			{
+				kmer_data[i_rows] >>= 64 - 8 * size_in_byte - 2 * byte_alignment;
+
+				// more significant row
+				uint64 previous = kmer_data[i_rows - 1];
+				previous <<= 8 * size_in_byte + 2 * byte_alignment;
+				kmer_data[i_rows] |= previous;
+
+				byte1 = reinterpret_cast<uchar*>(kmer_data + i_rows);
+				byte2 = reinterpret_cast<uchar*>(kmer_data + i_rows) + 7;
+
+				for (int i_bytes = 0; i_bytes < 4; ++i_bytes)
+				{
+					unsigned char temp = rev_comp_bytes_LUT[*byte1];
+					*byte1 = rev_comp_bytes_LUT[*byte2];
+					*byte2 = temp;
+
+					++byte1;
+					--byte2;
+				}
+			}
+
+			// clear less significant bits
+			kmer_data[0] >>= 64 - 8 * size_in_byte - 2 * byte_alignment;
+			kmer_data[0] <<= 64 - 8 * size_in_byte;
+
+			byte1 = reinterpret_cast<uchar*>(kmer_data)+8 - size_in_byte;
+			byte2 = reinterpret_cast<uchar*>(kmer_data)+7;
+
+			for (uint32 i_bytes = 0; i_bytes < size_in_byte / 2; ++i_bytes)
+			{
+				unsigned char temp = rev_comp_bytes_LUT[*byte1];
+				*byte1 = rev_comp_bytes_LUT[*byte2];
+				*byte2 = temp;
+
+				++byte1;
+				--byte2;
+			}
+
+			if (size_in_byte % 2)
+			{
+				*byte1 = rev_comp_bytes_LUT[*byte1];
+			}
+
+			for (uint32 i_rows = 0; i_rows < no_of_rows / 2; ++i_rows)
+			{
+				std::swap(kmer_data[i_rows], kmer_data[no_of_rows - i_rows - 1]);
+			}
+		}
+
+		// clear alignment
+		*kmer_data &= alignment_mask[byte_alignment];
+
+		return true;
+	}
+
+//-----------------------------------------------------------------------
+// Counts a signature of an existing kmer
+// IN	: sig_len	- the length of a signature
+// RET	: signature value
+//-----------------------------------------------------------------------
+	 uint32 get_signature(uint32 sig_len)
+	 {
+		 uchar symb;
+		 CMmer cur_mmr(sig_len);
+		 
+		 for(uint32 i = 0; i < sig_len; ++i)
+		 {
+			 symb = get_num_symbol(i);
+			 cur_mmr.insert(symb);
+		 }
+		 CMmer min_mmr(cur_mmr);
+		 for (uint32 i = sig_len; i < kmer_length; ++i)
+		 {
+			 symb = get_num_symbol(i);
+			 cur_mmr.insert(symb);
+			 
+			 if (cur_mmr < min_mmr)
+				 min_mmr = cur_mmr;
+		 }
+		 return min_mmr.get();
+	 }
+	
+	 
+};
+
+
+#endif
+
+// ***** EOF
diff --git a/src/projects/mts/kmc_api/kmer_defs.h b/src/projects/mts/kmc_api/kmer_defs.h
new file mode 100644
index 0000000..4a88d60
--- /dev/null
+++ b/src/projects/mts/kmc_api/kmer_defs.h
@@ -0,0 +1,54 @@
+/*
+  This file is a part of KMC software distributed under GNU GPL 3 licence.
+  The homepage of the KMC project is http://sun.aei.polsl.pl/kmc
+
+  Authors: Sebastian Deorowicz and Agnieszka Debudaj-Grabysz
+
+  Version: 2.2.0
+  Date   : 2015-04-15
+*/
+
+
+#ifndef _KMER_DEFS_H
+#define _KMER_DEFS_H
+
+#define KMC_VER		"2.2.0"
+#define KMC_DATE	"2015-04-15"
+
+#define MIN(x,y)	((x) < (y) ? (x) : (y))
+
+#ifndef WIN32
+	#include <stdint.h>
+	#include <stdio.h>
+	#include <stdlib.h>
+	#include <math.h>
+	#include <string.h>
+
+	#define _TCHAR	char
+	#define _tmain	main
+
+	#define my_fopen    fopen
+	#define my_fseek    fseek
+	#define my_ftell    ftell
+
+
+	#include <stdio.h>
+	#include <algorithm>
+	#include <iostream>
+	using namespace std;
+
+#else
+	#define my_fopen    fopen
+	#define my_fseek    _fseeki64
+	#define my_ftell    _ftelli64
+#endif
+	//typedef unsigned char uchar;
+
+	typedef int int32;
+	typedef unsigned int uint32;
+	typedef long long int64;
+	typedef unsigned long long uint64;
+	typedef unsigned char uchar;
+#endif
+
+// ***** EOF
diff --git a/src/projects/mts/kmc_api/mmer.cpp b/src/projects/mts/kmc_api/mmer.cpp
new file mode 100644
index 0000000..ed3ea11
--- /dev/null
+++ b/src/projects/mts/kmc_api/mmer.cpp
@@ -0,0 +1,49 @@
+#include "stdafx.h"
+/*
+  This file is a part of KMC software distributed under GNU GPL 3 licence.
+  The homepage of the KMC project is http://sun.aei.polsl.pl/kmc
+  
+  Authors: Sebastian Deorowicz, Agnieszka Debudaj-Grabysz, Marek Kokot
+  
+  Version: 2.2.0
+  Date   : 2015-04-15
+*/
+
+#include "../kmc_api/mmer.h"
+
+
+uint32 CMmer::norm5[];
+uint32 CMmer::norm6[];
+uint32 CMmer::norm7[];
+uint32 CMmer::norm8[];
+
+CMmer::_si CMmer::_init;
+
+
+//--------------------------------------------------------------------------
+CMmer::CMmer(uint32 _len)
+{
+	switch (_len)
+	{
+	case 5:
+		norm = norm5;
+		break;
+	case 6:
+		norm = norm6;
+		break;
+	case 7:
+		norm = norm7;
+		break;
+	case 8:
+		norm = norm8;
+		break;
+	default:
+		break;
+	}
+	len = _len;
+	mask = (1 << _len * 2) - 1;
+	str = 0;
+}
+
+//--------------------------------------------------------------------------
+
diff --git a/src/projects/mts/kmc_api/mmer.h b/src/projects/mts/kmc_api/mmer.h
new file mode 100644
index 0000000..79187f8
--- /dev/null
+++ b/src/projects/mts/kmc_api/mmer.h
@@ -0,0 +1,182 @@
+/*
+  This file is a part of KMC software distributed under GNU GPL 3 licence.
+  The homepage of the KMC project is http://sun.aei.polsl.pl/kmc
+  
+  Authors: Sebastian Deorowicz, Agnieszka Debudaj-Grabysz, Marek Kokot
+  
+  Version: 2.2.0
+  Date   : 2015-04-15
+*/
+
+#ifndef _MMER_H
+#define _MMER_H
+#include "kmer_defs.h"
+
+// *************************************************************************
+// *************************************************************************
+
+
+class CMmer
+{
+	uint32 str;
+	uint32 mask;
+	uint32 current_val;
+	uint32* norm;
+	uint32 len;
+	static uint32 norm5[1 << 10];
+	static uint32 norm6[1 << 12];
+	static uint32 norm7[1 << 14];	
+	static uint32 norm8[1 << 16];
+
+	static bool is_allowed(uint32 mmer, uint32 len)
+	{
+		if ((mmer & 0x3f) == 0x3f)            // TTT suffix
+			return false;
+		if ((mmer & 0x3f) == 0x3b)            // TGT suffix
+			return false;
+		if ((mmer & 0x3c) == 0x3c)            // TG* suffix
+			return false;
+
+		for (uint32 j = 0; j < len - 3; ++j)
+		if ((mmer & 0xf) == 0)                // AA inside
+			return false;
+		else
+			mmer >>= 2;
+
+		if (mmer == 0)            // AAA prefix
+			return false;
+		if (mmer == 0x04)        // ACA prefix
+			return false;
+		if ((mmer & 0xf) == 0)    // *AA prefix
+			return false;
+	
+		return true;
+	}
+
+	friend class CSignatureMapper;
+	struct _si
+	{			
+		static uint32 get_rev(uint32 mmer, uint32 len)
+		{
+			uint32 rev = 0;
+			uint32 shift = len*2 - 2;
+			for(uint32 i = 0 ; i < len ; ++i)
+			{
+				rev += (3 - (mmer & 3)) << shift;
+				mmer >>= 2;
+				shift -= 2;
+			}
+			return rev;
+		}
+
+		
+
+		static void init_norm(uint32* norm, uint32 len)
+		{
+			uint32 special = 1 << len * 2;
+			for(uint32 i = 0 ; i < special ; ++i)
+			{				
+				uint32 rev = get_rev(i, len);
+				uint32 str_val = is_allowed(i, len) ? i : special;
+				uint32 rev_val = is_allowed(rev, len) ? rev : special;
+				norm[i] = MIN(str_val, rev_val);				
+			}
+		}
+
+		_si()
+		{
+			init_norm(norm5, 5);
+			init_norm(norm6, 6);
+			init_norm(norm7, 7);
+			init_norm(norm8, 8);
+		}
+
+	}static _init;
+public:
+	CMmer(uint32 _len);
+	inline void insert(uchar symb);
+	inline uint32 get() const;
+	inline bool operator==(const CMmer& x);
+	inline bool operator<(const CMmer& x);
+	inline void clear();
+	inline bool operator<=(const CMmer& x);
+	inline void set(const CMmer& x);
+	inline void insert(const char* seq);
+	
+};
+
+
+
+//--------------------------------------------------------------------------
+inline void CMmer::insert(uchar symb)
+{
+	str <<= 2;
+	str += symb;
+	str &= mask;
+
+	current_val = norm[str];
+}
+
+//--------------------------------------------------------------------------
+inline uint32 CMmer::get() const
+{
+	return current_val;
+}
+
+//--------------------------------------------------------------------------
+inline bool CMmer::operator==(const CMmer& x)
+{
+	return current_val == x.current_val;
+}
+
+//--------------------------------------------------------------------------
+inline bool CMmer::operator<(const CMmer& x)
+{
+	return current_val < x.current_val;
+}
+
+//--------------------------------------------------------------------------
+inline void CMmer::clear()
+{
+	str = 0;
+}
+
+//--------------------------------------------------------------------------
+inline bool CMmer::operator<=(const CMmer& x)
+{
+	return current_val <= x.current_val;
+}
+
+//--------------------------------------------------------------------------
+inline void CMmer::set(const CMmer& x)
+{
+	str = x.str;
+	current_val = x.current_val;
+}
+
+//--------------------------------------------------------------------------
+inline void CMmer::insert(const char* seq)
+{
+	switch (len)
+	{
+	case 5: 
+		str = (seq[0] << 8) + (seq[1] << 6) + (seq[2] << 4) + (seq[3] << 2) + (seq[4]);
+		break;
+	case 6:
+		str = (seq[0] << 10) + (seq[1] << 8) + (seq[2] << 6) + (seq[3] << 4) + (seq[4] << 2) + (seq[5]);
+		break;
+	case 7:
+		str = (seq[0] << 12) + (seq[1] << 10) + (seq[2] << 8) + (seq[3] << 6) + (seq[4] << 4 ) + (seq[5] << 2) + (seq[6]);
+		break;
+	case 8:
+		str = (seq[0] << 14) + (seq[1] << 12) + (seq[2] << 10) + (seq[3] << 8) + (seq[4] << 6) + (seq[5] << 4) + (seq[6] << 2) + (seq[7]);
+		break;
+	default:
+		break;
+	}
+
+	current_val = norm[str];
+}
+
+
+#endif
\ No newline at end of file
diff --git a/src/projects/mts/kmc_api/stdafx.h b/src/projects/mts/kmc_api/stdafx.h
new file mode 100644
index 0000000..e7d6ecf
--- /dev/null
+++ b/src/projects/mts/kmc_api/stdafx.h
@@ -0,0 +1,4 @@
+#include <cstdio>
+#include <algorithm>
+#include <iostream>
+using namespace std;
diff --git a/src/projects/mts/kmer_multiplicity_counter.cpp b/src/projects/mts/kmer_multiplicity_counter.cpp
new file mode 100644
index 0000000..37d4a62
--- /dev/null
+++ b/src/projects/mts/kmer_multiplicity_counter.cpp
@@ -0,0 +1,256 @@
+#include <string>
+#include <vector>
+#include <set>
+#include <fstream>
+#include <sstream>
+#include <iostream>
+#include <memory>
+#include <algorithm>
+#include <libcxx/sort.hpp>
+#include <boost/optional/optional.hpp>
+#include "getopt_pp/getopt_pp.h"
+#include "kmc_api/kmc_file.h"
+//#include "omp.h"
+#include "io/kmers/mmapped_reader.hpp"
+#include "utils/path_helper.hpp"
+#include "utils/simple_tools.hpp"
+#include "utils/indices/perfect_hash_map_builder.hpp"
+#include "utils/indices/kmer_splitters.hpp"
+#include "logger.hpp"
+
+using std::string;
+using std::vector;
+
+const string KMER_PARSED_EXTENSION = ".bin";
+const string KMER_SORTED_EXTENSION = ".sorted";
+
+class KmerMultiplicityCounter {
+
+    size_t k_, sample_cnt_;
+    std::string file_prefix_;
+
+    //TODO: get rid of intermediate .bin file
+    string ParseKmc(const string& filename) {
+        CKMCFile kmcFile;
+        kmcFile.OpenForListing(filename);
+        CKmerAPI kmer((unsigned int) k_);
+        uint32 count;
+        std::string parsed_filename = filename + KMER_PARSED_EXTENSION;
+        std::ofstream output(parsed_filename, std::ios::binary);
+        while (kmcFile.ReadNextKmer(kmer, count)) {
+            RtSeq seq(k_, kmer.to_string());
+            seq.BinWrite(output);
+            seq_element_type tmp = count;
+            output.write((char*) &(tmp), sizeof(seq_element_type));
+        }
+        output.close();
+        return parsed_filename;
+    }
+
+    string SortKmersCountFile(const string& filename) {
+        MMappedRecordArrayReader<seq_element_type> ins(filename, RtSeq::GetDataSize(k_) + 1, false);
+        libcxx::sort(ins.begin(), ins.end(), array_less<seq_element_type>());
+        std::string sorted_filename = filename + KMER_SORTED_EXTENSION;
+        std::ofstream out(sorted_filename);
+        out.write((char*) ins.data(), ins.data_size());
+        out.close();
+        remove(filename.c_str());
+        return sorted_filename;
+    }
+
+    bool ReadKmerWithCount(std::ifstream& infile, std::pair<RtSeq, uint32>& res) {
+        RtSeq seq(res.first.size());
+        if (!seq.BinRead(infile)) {
+            return false;
+        }
+        seq_element_type tmp;
+        infile.read((char*) &tmp, sizeof(seq_element_type));
+        res = {seq, (uint32) tmp};
+        return true;
+    }
+
+    void FilterCombinedKmers(const std::vector<string>& files, size_t all_min) {
+        size_t n = files.size();
+        vector<std::unique_ptr<ifstream>> infiles;
+        infiles.reserve(n);
+        for (auto fn : files) {
+            INFO("Processing " << fn);
+            auto parsed = ParseKmc(fn);
+            auto sorted = SortKmersCountFile(parsed);
+            infiles.emplace_back(new std::ifstream(sorted));
+        }
+        vector<std::pair<RtSeq, uint32>> top_kmer(n, {RtSeq(k_), 0});
+        vector<bool> alive(n, false);
+
+        for (size_t i = 0; i < n; i++) {
+            alive[i] = ReadKmerWithCount(*infiles[i], top_kmer[i]);
+        }
+
+        std::ofstream output_kmer(file_prefix_ + ".kmer", std::ios::binary);
+        std::ofstream output_cnt(file_prefix_ + ".mpl");
+
+        RtSeq::less3 kmer_less;
+        while (true) {
+            boost::optional<RtSeq> min_kmer;
+            size_t cnt_min = 0;
+            for (size_t i = 0; i < n; ++i) {
+                if (alive[i]) {
+                    RtSeq& cur_kmer = top_kmer[i].first;
+                    if (!min_kmer || kmer_less(cur_kmer, *min_kmer)) {
+                        min_kmer = cur_kmer;
+                        cnt_min = 0;
+                    }
+                    if (cur_kmer == *min_kmer) {
+                        cnt_min++;
+                    }
+                }
+            }
+            if (!min_kmer) {
+                break;
+            }
+            if (cnt_min >= all_min) {
+                std::vector<uint32> cnt_vector(n, 0);
+                min_kmer.get().BinWrite(output_kmer);
+                for (size_t i = 0; i < n; ++i) {
+                    if (alive[i] && top_kmer[i].first == *min_kmer) {
+                        cnt_vector[i] += top_kmer[i].second;
+                    }
+                }
+                string delim = "";
+                for (auto cnt : cnt_vector) {
+                    output_cnt << delim << cnt;
+                    delim = " ";
+                }
+                output_cnt << std::endl;
+            }
+            for (size_t i = 0; i < n; ++i) {
+                if (alive[i] && top_kmer[i].first == *min_kmer) {
+                    alive[i] = ReadKmerWithCount(*infiles[i], top_kmer[i]);
+                }
+            }
+        }
+    }
+
+    void BuildKmerIndex(size_t sample_cnt, const std::string& workdir, size_t nthreads) {
+        INFO("Initializing kmer profile index");
+
+        //TODO: extract into a common header
+        typedef size_t Offset;
+        typedef uint16_t Mpl;
+        using namespace debruijn_graph;
+
+        KeyStoringMap<RtSeq, Offset, kmer_index_traits<RtSeq>, InvertableStoring>
+            kmer_mpl(k_, workdir);
+        InvertableStoring::trivial_inverter<Offset> inverter;
+
+        static const size_t read_buffer_size = 0; //FIXME some buffer size
+        DeBruijnKMerKMerSplitter<StoringTypeFilter<InvertableStoring>>
+            splitter(kmer_mpl.workdir(), k_, k_, true, read_buffer_size);
+
+        //TODO: get rid of temporary .mker & .mpl files
+        splitter.AddKMers(file_prefix_ + ".kmer");
+
+        KMerDiskCounter<RtSeq> counter(kmer_mpl.workdir(), splitter);
+
+        BuildIndex(kmer_mpl, counter, 16, nthreads);
+
+        INFO("Kmer profile fill start");
+        //We must allocate the whole buffer for all profiles at once
+        //to avoid pointer invalidation after possible vector resize
+        const size_t data_size = sample_cnt * kmer_mpl.size();
+
+        std::vector<Mpl> mpl_data;
+        mpl_data.reserve(data_size);
+        INFO("Allocated buffer of " << data_size << " elements");
+        std::ifstream kmers_in(file_prefix_ + ".kmer", std::ios::binary);
+        std::ifstream kmers_mpl_in(file_prefix_ + ".mpl");
+        while (true) {
+            RtSeq kmer(k_);
+            kmer.BinRead(kmers_in);
+            if (kmers_in.fail()) {
+                break;
+            }
+
+//            VERIFY(kmer_str.length() == k_);
+//            conj_graph_pack::seq_t kmer(k_, kmer_str.c_str());
+//            kmer = gp_.kmer_mapper.Substitute(kmer);
+
+            Offset offset = mpl_data.size();
+            for (size_t i = 0; i < sample_cnt; ++i) {
+                Mpl mpl;
+                kmers_mpl_in >> mpl;
+                VERIFY(!kmers_mpl_in.fail());
+                mpl_data.push_back(mpl);
+            }
+            //Double-check we haven't invalidated vector views
+            VERIFY(mpl_data.size() <= data_size);
+
+            auto kwh = kmer_mpl.ConstructKWH(kmer);
+            VERIFY(kmer_mpl.valid(kwh));
+            kmer_mpl.put_value(kwh, offset, inverter);
+        }
+
+        std::ofstream map_file(file_prefix_ + ".kmm", std::ios_base::binary | std::ios_base::out);
+        kmer_mpl.BinWrite(map_file);
+
+        std::ofstream mpl_file(file_prefix_ + ".bpr", std::ios_base::binary | std::ios_base::out);
+        mpl_file.write((const char *)&mpl_data[0], mpl_data.size() * sizeof(Mpl));
+
+        INFO("Kmer profile fill finish");
+    }
+
+public:
+    KmerMultiplicityCounter(size_t k, std::string file_prefix):
+        k_(k), file_prefix_(std::move(file_prefix)) {
+    }
+
+    void CombineMultiplicities(const vector<string>& input_files, size_t min_samples, const string& work_dir, size_t nthreads = 1) {
+        FilterCombinedKmers(input_files, min_samples);
+        BuildKmerIndex(input_files.size(), work_dir, nthreads);
+    }
+private:
+    DECL_LOGGER("KmerMultiplicityCounter");
+};
+
+void PrintUsageInfo() {
+    std::cout << "Usage: kmer_multiplicity_counter [options] -f files_dir" << std::endl;
+    std::cout << "Options:" << std::endl;
+    std::cout << "-k - kmer length" << std::endl;
+    std::cout << "-n - sample count" << std::endl;
+    std::cout << "-o - output file prefix" << std::endl;
+    std::cout << "-t - number of threads (default: 1)" << std::endl;
+    std::cout << "-s - minimal number of samples to contain kmer" << std::endl;
+    std::cout << "files_dir must contain two files (.kmc_pre and .kmc_suf) with kmer multiplicities for each sample from 1 to n" << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+    using namespace GetOpt;
+    create_console_logger();
+
+    size_t k, sample_cnt, min_samples, nthreads;
+    string output, work_dir;
+
+    try {
+        GetOpt_pp ops(argc, argv);
+        ops.exceptions_all();
+        ops >> Option('k', k)
+            >> Option('n', sample_cnt)
+            >> Option('s', min_samples)
+            >> Option('o', output)
+            >> Option('t', "threads", nthreads, size_t(1))
+            >> Option('f', work_dir)
+        ;
+    } catch(GetOptEx &ex) {
+        PrintUsageInfo();
+        exit(1);
+    }
+
+    std::vector<string> input_files;
+    for (size_t i = 1; i <= sample_cnt; ++i) {
+        input_files.push_back(work_dir + "/sample" + ToString(i));
+    }
+
+    KmerMultiplicityCounter kmcounter(k, output);
+    kmcounter.CombineMultiplicities(input_files, min_samples, work_dir, nthreads);
+    return 0;
+}
diff --git a/src/projects/mts/log.properties b/src/projects/mts/log.properties
new file mode 100644
index 0000000..3a7d6e2
--- /dev/null
+++ b/src/projects/mts/log.properties
@@ -0,0 +1,10 @@
+default=INFO
+
+#SingleClusterAnalyzer=TRACE
+#ContigAbundanceCounter=TRACE
+
+#EdgeAnnotationPropagator=TRACE
+#ConnectingPathPropagator=TRACE
+#ContigPropagator=TRACE
+#TipPropagator=TRACE
+#AnnotationChecker+TRACE
diff --git a/src/projects/mts/logger.hpp b/src/projects/mts/logger.hpp
new file mode 100644
index 0000000..a8d2b02
--- /dev/null
+++ b/src/projects/mts/logger.hpp
@@ -0,0 +1,11 @@
+#include "utils/logger/log_writers.hpp"
+
+void create_console_logger() {
+    using namespace logging;
+
+    string log_props_file = "log.properties";
+
+    logger *lg = create_logger(path::FileExists(log_props_file) ? log_props_file : "");
+    lg->add_writer(std::make_shared<console_writer>());
+    attach_logger(lg);
+}
diff --git a/src/projects/mts/mts.py b/src/projects/mts/mts.py
new file mode 100755
index 0000000..b80f145
--- /dev/null
+++ b/src/projects/mts/mts.py
@@ -0,0 +1,73 @@
+#!/usr/bin/python
+from __future__ import (print_function)
+
+import argparse
+import subprocess
+import sys
+import os
+import os.path
+import shutil
+
+#copied from http://stackoverflow.com/questions/431684/how-do-i-cd-in-python/13197763#13197763
+class cd:
+    """Context manager for changing the current working directory"""
+    def __init__(self, newPath):
+        self.newPath = os.path.expanduser(newPath)
+
+    def __enter__(self):
+        self.savedPath = os.getcwd()
+        os.chdir(self.newPath)
+
+    def __exit__(self, etype, value, traceback):
+        os.chdir(self.savedPath)
+
+parser = argparse.ArgumentParser(description="MTS - Metagenomic Time Series")
+
+parser.add_argument("--threads", "-t", type=int, default=8, help="Number of threads")
+parser.add_argument("dir", type=str, help="Output directory")
+parser.add_argument("--config", "-c", type=str, default="", help="config.yaml to be copied to the directory (unnecessary if config.yaml is already there)")
+parser.add_argument("--stats", "-s", action="store_true", help="Calculate stats (when the REFS parameter in config.yaml is provided)")
+parser.add_argument("--reuse-assemblies", action="store_true", help="Use existing assemblies (put them in the corresponding folders)")
+parser.add_argument("--verbose", "-v", action="store_true", help="Increase verbosity level")
+
+args = parser.parse_args()
+
+exec_dir=os.path.dirname(os.path.realpath(sys.argv[0]))
+LOCAL_DIR = os.path.realpath(os.path.join(exec_dir, "../../../"))
+
+base_params = ["snakemake", "--directory", os.path.realpath(args.dir), "--cores", str(args.threads), "--config", "LOCAL_DIR" + "=" + LOCAL_DIR]
+
+if args.verbose:
+    base_params.extend(["-p", "--verbose"])
+
+if args.config:
+    if os.path.exists(os.path.join(args.dir, "config.yaml")):
+        print("Config path specified, but config.yaml already exists in output folder " + args.dir)
+        sys.exit(239)
+
+if not os.path.exists(args.dir):
+    os.makedirs(args.dir)
+
+print("Output folder set to " + args.dir)
+
+if args.config:
+    print("Copying config from " + args.config)
+    shutil.copy(args.config, args.dir)
+
+with cd(exec_dir):
+    def call_snake(extra_params=[]):
+        subprocess.check_call(base_params + extra_params, stdout=sys.stdout, stderr=sys.stderr)
+    
+    print("Step #1 - Assembly")
+    if args.reuse_assemblies:
+        call_snake(["assemble_all", "--touch"])
+
+    call_snake()
+    
+    if args.stats:
+        print("Step #2a - Assembly statistics")
+        call_snake(["--snakefile", "Stats.snake", "stats_all"])
+    
+        print("Step #2b - Reassembly statistics")
+        call_snake(["--snakefile", "Stats.snake", "stats_reassembly"])
+
diff --git a/src/projects/mts/prop_binning.cpp b/src/projects/mts/prop_binning.cpp
new file mode 100644
index 0000000..0df9038
--- /dev/null
+++ b/src/projects/mts/prop_binning.cpp
@@ -0,0 +1,128 @@
+//***************************************************************************
+//* Copyright (c) 2015-2016 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#include "getopt_pp/getopt_pp.h"
+#include "io/reads/io_helper.hpp"
+#include "io/reads/osequencestream.hpp"
+#include "pipeline/graphio.hpp"
+#include "logger.hpp"
+#include "read_binning.hpp"
+#include "propagate.hpp"
+#include "visualization/position_filler.hpp"
+
+using namespace debruijn_graph;
+
+std::string add_suffix(const std::string& path, const std::string& suffix) {
+    auto ext = path::extension(path);
+    return path.substr(0, path.length() - ext.length()) + suffix + ext;
+}
+
+void DumpEdgesAndAnnotation(const Graph& g,
+                            const EdgeAnnotation& edge_annotation,
+                            const string& out_edges,
+                            const string& out_annotation) {
+    INFO("Dumping edges to " << out_edges << "; their annotation to " << out_annotation);
+    io::osequencestream oss(out_edges);
+    AnnotationOutStream annotation_out(out_annotation);
+    for (auto it = g.ConstEdgeBegin(true); !it.IsEnd(); ++it) {
+        EdgeId e = *it;
+        io::SingleRead edge_read("NODE_" + ToString(g.int_id(e)),
+                                 g.EdgeNucls(e).str());
+        oss << edge_read;
+        auto relevant_bins = edge_annotation.Annotation(e);
+        if (!relevant_bins.empty()) {
+            annotation_out << ContigAnnotation(GetId(edge_read),
+                                               vector<bin_id>(relevant_bins.begin(), relevant_bins.end()));
+        }
+    }
+}
+
+int main(int argc, char** argv) {
+    using namespace GetOpt;
+
+    //TmpFolderFixture fixture("tmp");
+    create_console_logger();
+
+    size_t k;
+    string saves_path, contigs_path, splits_path, annotation_path;
+    vector<string> sample_names, left_reads, right_reads;
+    string out_root, propagation_dump;
+    vector<bin_id> bins_of_interest;
+    bool no_binning;
+    try {
+        GetOpt_pp ops(argc, argv);
+        ops.exceptions_all();
+        ops >> Option('k', k)
+            >> Option('s', saves_path)
+            >> Option('c', contigs_path)
+            >> Option('f', splits_path)
+            >> Option('a', annotation_path)
+            >> Option('n', sample_names)
+            >> Option('l', left_reads)
+            >> Option('r', right_reads)
+            >> Option('o', out_root)
+            >> Option('d', propagation_dump, "")
+            >> Option('b', bins_of_interest, {})
+            >> OptionPresent('p', no_binning);
+    } catch(GetOptEx &ex) {
+        cout << "Usage: prop_binning -k <K> -s <saves path> -c <contigs path> -f <splits path> "
+                "-a <binning annotation> -n <sample names> -l <left reads> -r <right reads> -o <output root> "
+                "[-d <propagation info dump>] [-p to disable binning] [-b <bins of interest>*]"  << endl;
+        exit(1);
+    }
+
+    for (const auto& bin_id : bins_of_interest) {
+        VERIFY_MSG(bin_id.find_last_of(',') == std::string::npos, "Specify bins of interest via space, not comma");
+    }
+
+    conj_graph_pack gp(k, "tmp", 1);
+    gp.kmer_mapper.Attach();
+
+    INFO("Load graph and clustered paired info from " << saves_path);
+    graphio::ScanWithClusteredIndices(saves_path, gp, gp.clustered_indices);
+
+    //Propagation stage
+    INFO("Using contigs from " << contigs_path);
+    io::FileReadStream contigs_stream(contigs_path);
+    io::FileReadStream split_stream(splits_path);
+
+    AnnotationStream annotation_in(annotation_path);
+
+    AnnotationFiller filler(gp, bins_of_interest);
+    EdgeAnnotation edge_annotation = filler(contigs_stream, split_stream, annotation_in);
+
+    INFO("Propagation launched");
+    AnnotationPropagator propagator(gp);
+    propagator.Run(contigs_stream, edge_annotation);
+    INFO("Propagation finished");
+
+    if (!propagation_dump.empty()) {
+        INFO("Dumping propagation info to " << propagation_dump);
+        DumpEdgesAndAnnotation(gp.g, edge_annotation,
+                               propagation_dump + ".fasta",
+                               propagation_dump + ".ann");
+    }
+
+    if (no_binning) {
+        INFO("Binning was disabled with -p flag");
+        return 0;
+    }
+    //Binning stage
+//    contigs_stream.reset();
+//    INFO("Using propagated annotation from " << propagated_path);
+//    AnnotationStream binning_stream(propagated_path);
+    for (size_t i = 0; i < sample_names.size(); ++i) {
+        ContigBinner binner(gp, edge_annotation, out_root, sample_names[i]);
+        INFO("Initializing binner for " << sample_names[i]);
+        auto paired_stream = io::PairedEasyStream(left_reads[i], right_reads[i], false, 0);
+        INFO("Running binner on " << left_reads[i] << " and " << right_reads[i]);
+        binner.Run(*paired_stream);
+        binner.close();
+    }
+
+    return 0;
+}
diff --git a/src/projects/mts/propagate.cpp b/src/projects/mts/propagate.cpp
new file mode 100644
index 0000000..be650e8
--- /dev/null
+++ b/src/projects/mts/propagate.cpp
@@ -0,0 +1,331 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#include "utils/simple_tools.hpp"
+
+//#include "pipeline/graphio.hpp"
+#include "pipeline/graph_pack.hpp"
+//#include "io/reads_io/file_reader.hpp"
+#include "modules/simplification/tip_clipper.hpp"
+#include "propagate.hpp"
+#include "visualization.hpp"
+
+namespace debruijn_graph {
+static const size_t EDGE_LENGTH_THRESHOLD = 2000;
+
+//FIXME 2kb edge length threshold might affect tip propagator in undesired way
+class EdgeAnnotationPropagator {
+    const conj_graph_pack& gp_;
+    const string name_;
+    size_t edge_length_threshold_;
+
+protected:
+    const conj_graph_pack& gp() const {
+        return gp_;
+    }
+
+    const Graph& g() const {
+        return gp_.g;
+    }
+
+    virtual set<EdgeId> PropagateEdges(const set<EdgeId>& edges) const = 0;
+
+public:
+    EdgeAnnotationPropagator(const conj_graph_pack& gp,
+                             const string& name,
+                             size_t edge_length_threshold = EDGE_LENGTH_THRESHOLD) :
+                    gp_(gp),
+                    name_(name),
+                    edge_length_threshold_(edge_length_threshold) {}
+
+    const std::string& name() const {
+        return name_;
+    }
+
+    std::map<bin_id, set<EdgeId>> Propagate(EdgeAnnotation& edge_annotation) const {
+        std::map<bin_id, set<EdgeId>> answer;
+        DEBUG("Propagating with propagator: " << name_);
+        for (bin_id bin : edge_annotation.interesting_bins()) {
+            DEBUG("Processing bin " << bin << " with propagator: " << name_);
+            auto init_edges = edge_annotation.EdgesOfBin(bin, edge_length_threshold_);
+            DEBUG("Initial edge cnt " << init_edges.size() << " (edge length threshold " << edge_length_threshold_ << ")");
+            auto raw_propagated = PropagateEdges(init_edges);
+            set<EdgeId> propagated;
+            std::set_difference(raw_propagated.begin(), raw_propagated.end(),
+                                init_edges.begin(), init_edges.end(),
+                                std::inserter(propagated, propagated.end()));
+            answer[bin] = std::move(propagated);
+        }
+        DEBUG("Finished propagating with propagator: " << name_);
+        return answer;
+    }
+
+    virtual ~EdgeAnnotationPropagator() {}
+private:
+    DECL_LOGGER("EdgeAnnotationPropagator");
+};
+
+class ConnectingPathPropagator : public EdgeAnnotationPropagator {
+    size_t path_length_threshold_;
+    size_t path_edge_cnt_;
+    const EdgeAnnotation& debug_annotation_;
+
+    bin_id DetermineBin(const set<EdgeId>& edges) const {
+        map<bin_id, size_t> cnt_map;
+        for (EdgeId e : edges) {
+            for (auto b : debug_annotation_.Annotation(e)) {
+                cnt_map[b]++;
+            }
+        }
+        bin_id candidate = "";
+        for (auto cnt_el : cnt_map) {
+            if (cnt_el.second > edges.size() / 2) {
+                if (candidate.empty())
+                    candidate = cnt_el.first;
+                else
+                    return "";
+            }
+        }
+        return candidate;
+    }
+
+    bool BadPath(const vector<EdgeId>& path, bin_id base_bin) const {
+        size_t cnt = 0;
+        for (EdgeId e : path) {
+            if (g().length(e) < 2000) 
+                continue;
+            auto ann = debug_annotation_.Annotation(e);
+            if (!ann.empty() &&
+                std::find(ann.begin(), ann.end(), base_bin) == ann.end()) {
+                cnt++;
+            }
+        }
+        return cnt > 0;
+    }
+
+    set<VertexId> CollectEdgeStarts(const set<EdgeId>& edges) const {
+        set<VertexId> answer;
+        for (EdgeId e : edges) {
+            answer.insert(g().EdgeStart(e));
+        }
+        return answer;
+    }
+
+    set<EdgeId> PropagateEdges(const set<EdgeId>& edges) const override {
+        //static size_t pic_cnt = 0;
+        bin_id bin = DetermineBin(edges);
+        if (!bin.empty()) {
+            DEBUG("Bin determined as " << bin);
+        } else {
+            DEBUG("Failed to determine bin");
+        }
+        set<EdgeId> answer;
+        set<VertexId> starts = CollectEdgeStarts(edges);
+        for (EdgeId e : edges) {
+            PathProcessor<Graph> path_searcher(g(), g().EdgeEnd(e), path_length_threshold_);
+            for (VertexId v : starts) {
+                auto callback = AdapterCallback<Graph>([&](const vector<EdgeId>& path) {
+                    //if (pic_cnt < 10) {
+                    //if (BadPath(path, bin)) {
+                    //    auto to_draw = path;
+                    //    to_draw.insert(to_draw.begin(), e);
+                    //    PrintAnnotatedAlongPath(gp(), to_draw, debug_annotation_, "/home/snurk/tmp/pics/pic_" + ToString(++pic_cnt) + "_");
+                    //}
+                    //}
+                    insert_all(answer, path);
+                }, true);
+                TRACE("Launching path search between edge " << g().str(e) << " and vertex "
+                        << g().str(v) << " with length bound " << path_length_threshold_);
+                path_searcher.Process(v, 0, path_length_threshold_, callback, path_edge_cnt_);
+            }
+        }
+        return answer;
+    }
+
+public:
+    ConnectingPathPropagator(const conj_graph_pack& gp,
+                             size_t path_length_threshold, 
+                             size_t path_edge_cnt,
+                             const EdgeAnnotation& ann) :
+        EdgeAnnotationPropagator(gp, "ConnectingPath"),
+        path_length_threshold_(path_length_threshold),
+        path_edge_cnt_(path_edge_cnt),
+        debug_annotation_(ann) {}
+
+private:
+    DECL_LOGGER("ConnectingPathPropagator");
+};
+
+//FIXME make threshold coverage-aware
+class PairedInfoPropagator : public EdgeAnnotationPropagator {
+    omnigraph::de::DEWeight weight_threshold_;
+    set<EdgeId> PropagateEdges(const set<EdgeId>& edges) const override {
+        set<EdgeId> answer;
+        for (EdgeId e1 : edges) {
+            DEBUG("Searching for paired neighbours of " << g().str(e1));
+            for (const auto& index : gp().clustered_indices)
+                for (auto i : index.Get(e1))
+                    for (auto point : i.second)
+                        if (math::ge(point.weight, weight_threshold_)) {
+                            DEBUG("Adding (" << g().str(e1) << "," << g().str(i.first) << "); " << point);
+                            answer.insert(i.first);
+                        }	    
+        }
+        return answer;
+    }
+public:
+    PairedInfoPropagator(const conj_graph_pack& gp, omnigraph::de::DEWeight threshold):
+        EdgeAnnotationPropagator(gp, "PairedInfo"), weight_threshold_(threshold) {}
+private:
+    DECL_LOGGER("PairedInfoPropagator");
+};
+
+class ContigPropagator : public EdgeAnnotationPropagator {
+public:
+    ContigPropagator(const conj_graph_pack& gp,
+                     io::SingleStream& contigs) :
+        EdgeAnnotationPropagator(gp, "ContigPropagator"),
+        contigs_(contigs),
+        mapper_(MapperInstance(gp))
+    {}
+protected:
+    set<EdgeId> PropagateEdges(const set<EdgeId>& edges) const override {
+        contigs_.reset();
+        set<EdgeId> answer;
+        io::SingleRead contig;
+        while (!contigs_.eof()) {
+            contigs_ >> contig;
+            auto edges_of_contig = mapper_->MapRead(contig).simple_path();
+            for (EdgeId e : edges_of_contig) {
+                if (edges.count(e)) {
+                    DEBUG("Edge " << gp().g.str(e) << " belongs to the contig #" << 
+                            contig.name() << " of " << edges_of_contig.size() << " edges");
+                    insert_all(answer, edges_of_contig);
+                    break;
+                }
+            }
+        }
+        return answer;
+    }
+
+private:
+    io::SingleStream& contigs_;
+    shared_ptr<SequenceMapper<Graph>> mapper_;
+
+    DECL_LOGGER("ContigPropagator");
+};
+
+class TipPropagator : public EdgeAnnotationPropagator {
+
+public:
+    TipPropagator(const conj_graph_pack& gp) :
+        EdgeAnnotationPropagator(gp, "TipPropagator"), tipper_(gp.g) {}
+
+protected:
+    set<EdgeId> PropagateEdges(const set<EdgeId>& edges) const override {
+        set<EdgeId> answer;
+        for (EdgeId e1 : edges) {
+            auto v = g().EdgeEnd(e1);
+            auto neighbours = g().OutgoingEdges(v);
+            auto e2_it = std::find_if(neighbours.begin(), neighbours.end(), [&](EdgeId e2){return edges.count(e2);});
+            if (e2_it == neighbours.end()) {
+                TRACE(e1.int_id() << " has no neighbours from the same bin");
+                continue;
+            }
+            TRACE("Finding tips between " << e1.int_id() << " and " << e2_it->int_id());
+            for (EdgeId posTip : g().IncidentEdges(v)) {
+                if (edges.count(posTip))
+                    continue;
+                TRACE("Checking " << posTip.int_id() << "...");
+                if (tipper_.Check(posTip)) {
+                    TRACE("A tip is found!");
+                    answer.insert(posTip);
+                }
+            }
+        }
+        return answer;
+    }
+
+private:
+    TipCondition<Graph> tipper_;
+    DECL_LOGGER("TipPropagator");
+};
+
+class AnnotationChecker {
+    const Graph& g_;
+    const EdgeAnnotation& edge_annotation_;
+    size_t edge_length_threshold_;
+public:
+    AnnotationChecker(const Graph& g,
+                      const EdgeAnnotation& edge_annotation,
+                      size_t edge_length_threshold = EDGE_LENGTH_THRESHOLD) :
+        g_(g),
+        edge_annotation_(edge_annotation),
+        edge_length_threshold_(edge_length_threshold) {
+    }
+
+    size_t Check(bin_id bin, const set<EdgeId>& propagated_edges) {
+        DEBUG("Checking edges to be annotated with " << bin);
+        size_t answer = 0;
+        for (EdgeId e : propagated_edges) {
+            if (g_.length(e) < edge_length_threshold_)
+                continue;
+            auto ann = edge_annotation_.Annotation(e);
+            for (auto b : ann) {
+                if (b != bin) {
+                    DEBUG("Edge " << g_.str(e) << " already was annotated as " << b);
+                    ++answer;
+                    break;
+                }
+            }
+        }
+        return answer;
+    }
+
+private:
+    DECL_LOGGER("AnnotationChecker");
+};
+
+void AnnotationPropagator::Run(io::SingleStream& /*contigs*/, 
+                     EdgeAnnotation& edge_annotation
+                     /*const string& annotation_out_fn*/) {
+    std::vector<std::shared_ptr<EdgeAnnotationPropagator>> propagator_pipeline {
+        std::make_shared<ConnectingPathPropagator>(gp_, 8000, 10, edge_annotation),
+        std::make_shared<TipPropagator>(gp_), 
+        std::make_shared<PairedInfoPropagator>(gp_, 10.)};//,
+//        std::make_shared<ContigPropagator>(gp_, contigs)};//,
+//        std::make_shared<ConnectingPathPropagator>(gp_, 8000, 10, edge_annotation),
+//        std::make_shared<ContigPropagator>(gp_, contigs),
+//        std::make_shared<TipPropagator>(gp_)};
+
+    AnnotationChecker checker(gp_.g, edge_annotation);
+
+    for (const auto& bin_id : edge_annotation.interesting_bins()) {
+        size_t problem_cnt = checker.Check(bin_id, edge_annotation.EdgesOfBin(bin_id, EDGE_LENGTH_THRESHOLD));
+        DEBUG("Bin " << bin_id << " had " << problem_cnt << " problems");
+    }
+
+    for (auto prop_ptr : propagator_pipeline) {
+        DEBUG("Propagating with: " << prop_ptr->name());
+        auto propagation_map = prop_ptr->Propagate(edge_annotation);
+
+        DEBUG("Extending " << propagation_map.size() << " bins after propagation with: " << prop_ptr->name());
+        for (const auto& bin_prop : propagation_map) {
+            const auto& bin_id = bin_prop.first;
+            const auto& edges = bin_prop.second;
+            DEBUG("Extending bin " << bin_id << " with "
+                      << edges.size() << " edges and their conjugates");
+            size_t problem_cnt = checker.Check(bin_id, edges);
+            DEBUG("Propagation of bin " << bin_id << " with " << prop_ptr->name()
+                      << " lead to " << problem_cnt << " binning problems");
+            edge_annotation.StickAnnotation(edges, bin_id);
+        }
+        DEBUG("Applied bin extensions from propagator " << prop_ptr->name());
+    }
+}
+
+}
diff --git a/src/projects/mts/propagate.hpp b/src/projects/mts/propagate.hpp
new file mode 100644
index 0000000..1c3ce0f
--- /dev/null
+++ b/src/projects/mts/propagate.hpp
@@ -0,0 +1,29 @@
+//***************************************************************************
+//* Copyright (c) 2015-2016 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#include "io/reads/single_read.hpp"
+#include "io/reads/io_helper.hpp"
+#include "io/reads/osequencestream.hpp"
+#include "annotation.hpp"
+
+namespace debruijn_graph {
+
+class AnnotationPropagator {
+    const conj_graph_pack& gp_;
+
+public:
+    AnnotationPropagator(const conj_graph_pack& gp) :
+                     gp_(gp) {
+    }
+
+    void Run(io::SingleStream& contigs, EdgeAnnotation& edge_annotation);
+
+private:
+    DECL_LOGGER("AnnotationChecker");
+};
+
+}
diff --git a/src/projects/mts/read_binning.cpp b/src/projects/mts/read_binning.cpp
new file mode 100644
index 0000000..ac2dea2
--- /dev/null
+++ b/src/projects/mts/read_binning.cpp
@@ -0,0 +1,90 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#include "utils/simple_tools.hpp"
+#include "utils/logger/log_writers.hpp"
+
+#include "pipeline/graphio.hpp"
+#include "io/reads/file_reader.hpp"
+#include "read_binning.hpp"
+
+namespace debruijn_graph {
+
+set<bin_id> ContigBinner::RelevantBins(const io::SingleRead& r) const {
+    return edge_annotation_.RelevantBins(mapper_->MapRead(r).simple_path());
+}
+
+void ContigBinner::Init(bin_id bin) {
+    string out_dir = out_root_ + "/" + ToString(bin) + "/";
+    path::make_dirs(out_dir);
+    out_streams_.insert(make_pair(bin, make_shared<io::OPairedReadStream>(out_dir + sample_name_ + "_1.fastq",
+                                                                          out_dir + sample_name_ + "_2.fastq")));
+}
+
+void ContigBinner::Run(io::PairedStream& paired_reads) {
+    io::PairedRead paired_read;
+    while (!paired_reads.eof()) {
+        paired_reads >> paired_read;
+        set<bin_id> bins;
+        insert_all(bins, RelevantBins(paired_read.first()));
+        insert_all(bins, RelevantBins(paired_read.second()));
+        for (auto bin : bins) {
+            if (out_streams_.find(bin) == out_streams_.end()) {
+                Init(bin);
+            }
+            (*(out_streams_[bin])) << paired_read;
+        }
+    }
+}
+
+};
+
+//todo make it take dataset info
+/*
+int main(int argc, char** argv) {
+    using namespace debruijn_graph;
+
+    if (argc < 9) {
+        cout << "Usage: read_binning <K> <saves path> <contigs path> <contigs binning info> "
+                "<left reads> <right reads> <output root> <sample name> (<bins of interest>)*"  << endl;
+        exit(1);
+    }
+
+    //TmpFolderFixture fixture("tmp");
+    create_console_logger();
+    size_t k = lexical_cast<size_t>(argv[1]);
+    string saves_path = argv[2];
+    string contigs_path = argv[3];
+    string contigs_binning_path = argv[4];
+    string left_reads = argv[5];
+    string right_reads = argv[6];
+    string out_root = argv[7];
+    string sample_name = argv[8];
+
+    std::vector<bin_id> bins_of_interest;
+    for (int i = 9; i < argc; ++i) {
+        bins_of_interest.push_back(argv[i]);
+    }
+
+    conj_graph_pack gp(k, "tmp", 0);
+    gp.kmer_mapper.Attach();
+    INFO("Load graph from " << saves_path);
+    graphio::ScanGraphPack(saves_path, gp);
+
+    ContigBinner binner(gp, bins_of_interest);
+
+    auto contigs_stream_ptr = make_shared<io::FileReadStream>(contigs_path);
+    AnnotationStream binning_stream(contigs_binning_path);
+
+    binner.Init(out_root, sample_name, *contigs_stream_ptr, binning_stream);
+
+    auto paired_stream = io::PairedEasyStream(left_reads, right_reads, false, 0);
+    binner.Run(*paired_stream);
+    binner.close();
+    return 0;
+}
+*/
diff --git a/src/projects/mts/read_binning.hpp b/src/projects/mts/read_binning.hpp
new file mode 100644
index 0000000..87aeadd
--- /dev/null
+++ b/src/projects/mts/read_binning.hpp
@@ -0,0 +1,92 @@
+//***************************************************************************
+//* Copyright (c) 2015-2016 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+#pragma once
+
+#include "annotation.hpp"
+#include "io/reads/io_helper.hpp"
+
+namespace io {
+
+class OSingleReadStream {
+    std::ofstream os_;
+
+public:
+    OSingleReadStream(const std::string& fn) :
+        os_(fn) {
+    }
+
+    OSingleReadStream& operator<<(const SingleRead& read) {
+        os_ << "@" << read.name() << std::endl;
+        os_ << read.GetSequenceString() << std::endl;
+        os_ << "+" << std::endl;
+        os_ << read.GetPhredQualityString() << std::endl;
+        return *this;
+    }
+
+    void close() {
+        os_.close();
+    }
+};
+
+class OPairedReadStream {
+    OSingleReadStream l_os_;
+    OSingleReadStream r_os_;
+
+public:
+    OPairedReadStream(const std::string& l_fn, const std::string& r_fn) :
+        l_os_(l_fn), r_os_(r_fn) {
+    }
+
+    OPairedReadStream& operator<<(const PairedRead& read) {
+        l_os_ << read.first();
+        r_os_ << read.second();
+        return *this;
+    }
+
+    void close() {
+        l_os_.close();
+        r_os_.close();
+    }
+};
+
+}
+
+namespace debruijn_graph {
+
+class ContigBinner {
+    const conj_graph_pack& gp_;
+    const EdgeAnnotation& edge_annotation_;
+    std::string out_root_;
+    std::string sample_name_;
+    shared_ptr<SequenceMapper<Graph>> mapper_;
+
+    map<bin_id, std::shared_ptr<io::OPairedReadStream>> out_streams_;
+
+    set<bin_id> RelevantBins(const io::SingleRead& r) const;
+
+    void Init(bin_id bin);
+
+public:
+    ContigBinner(const conj_graph_pack& gp, 
+                 const EdgeAnnotation& edge_annotation,
+                 const std::string& out_root,
+                 const std::string& sample_name) :
+                     gp_(gp),
+                     edge_annotation_(edge_annotation),
+                     out_root_(out_root),
+                     sample_name_(sample_name),
+                     mapper_(MapperInstance(gp)) {
+    }
+
+    void Run(io::PairedStream& paired_reads);
+
+    void close() {
+        out_streams_.clear();
+    }
+};
+
+}
diff --git a/src/modules/empty.cpp b/src/projects/mts/scripts/__init__.py
similarity index 100%
rename from src/modules/empty.cpp
rename to src/projects/mts/scripts/__init__.py
diff --git a/src/projects/mts/scripts/calc_kmers_mpl.py b/src/projects/mts/scripts/calc_kmers_mpl.py
new file mode 100755
index 0000000..26382cf
--- /dev/null
+++ b/src/projects/mts/scripts/calc_kmers_mpl.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python3
+
+import os
+import argparse
+
+def parse_args():
+	parser = argparse.ArgumentParser(description="Kmers mpl filter")
+	parser.add_argument("-om", "--one-min", default=3, type=int, help="min kmer mpl in one sample")
+	parser.add_argument("-am", "--all-min", default=3, type=int, help="min kmer mpl in all samples")
+	parser.add_argument("-kl", "--kmer-len", default=31, type=int, help="kmer length")
+	parser.add_argument("samples_dir", help="directory with samples")
+	parser.add_argument("output", help="output files prefix")
+	args = parser.parse_args()
+	return args
+
+def calc_mpl(args):
+	if not os.path.exists(args.samples_dir):
+		os.makedirs(args.samples_dir)
+
+	files = [f for f in os.listdir(args.samples_dir) if os.path.isfile(os.path.join(args.samples_dir, f))]
+
+	cmd = "/home/toxa31/work/algorithmic-biology/assembler/src/kmer_count_filter/kmer_count_filter -kl {} -one-min {} -all-min {}".format(
+		args.kmer_len, args.one_min, args.all_min)
+
+	for f in files:
+		cmd = cmd + " " + args.samples_dir + "/" + f
+
+	cmd = cmd + " " + args.output
+
+	print(cmd)
+
+	os.system(cmd)
+
+def main():
+	args = parse_args()
+	calc_mpl(args)
+
+main()
\ No newline at end of file
diff --git a/src/projects/mts/scripts/canopy_launch.sh b/src/projects/mts/scripts/canopy_launch.sh
new file mode 100755
index 0000000..5f17acc
--- /dev/null
+++ b/src/projects/mts/scripts/canopy_launch.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+if [ "$#" -lt 3 ]; then
+    echo "Usage: script.sh <canopy.in> <canopy.out> <canopy.prof> [thread_cnt = 4]"
+    exit
+fi
+
+thread_cnt=4
+if [ "$#" -ge 4 ]; then
+    thread_cnt=$4
+fi
+
+/home/snurk/soft/mgs-canopy-algorithm/src/cc.bin -n $thread_cnt -i $1 -o $2 -c $3 #--max_canopy_dist 0.1 --max_close_dist 0.4 --max_merge_dist 0.05 --min_step_dist 0.01 --max_num_canopy_walks 3 --stop_fraction 1 --canopy_size_stats_file stat --filter_min_obs 1 --filter_max_dominant_obs 1.0
+
+#/home/snurk/soft/canopy/cc.bin -n 32 -i $1 -o bin_canopy -c prof_canopy --max_canopy_dist 0.1 --max_close_dist 0.4 --max_merge_dist 0.05 --min_step_dist 0.01 --max_num_canopy_walks 3 --stop_fraction 1 --canopy_size_stats_file stat --filter_min_obs 1 --filter_max_dominant_obs 1.0
+
+#/home/ygorshkov/Projects/canopy/cc.bin -n 32 -i canopy_mod.in -o bin_canopy -c prof_canopy --max_canopy_dist 0.1 --max_close_dist 0.4 --max_merge_dist 0.1 --min_step_dist 0.005 --max_num_canopy_walks 5 --stop_fraction 1 --canopy_size_stats_file stat
diff --git a/src/projects/mts/scripts/choose_samples.py b/src/projects/mts/scripts/choose_samples.py
new file mode 100755
index 0000000..cd58c54
--- /dev/null
+++ b/src/projects/mts/scripts/choose_samples.py
@@ -0,0 +1,61 @@
+#!/usr/bin/python
+from __future__ import (print_function)
+
+import glob
+from operator import itemgetter
+from os import path
+import subprocess
+import sys
+
+if len(sys.argv) < 3:
+    print("Usage: choose_samples.py <canopy.prof> <binning dir> [CAGS+]")
+    exit(1)
+
+PROF = sys.argv[1]
+DIR = sys.argv[2]
+CAGS = None
+if len(sys.argv) == 4:
+    CAGS = set(sys.argv[3:])
+DESIRED_ABUNDANCE = 50
+MIN_ABUNDANCE = 4
+MIN_TOTAL_ABUNDANCE = 20
+
+#Assuming that samples are enumerated consecutively from 1 to N
+with open(PROF) as input:
+    for line in input:
+        params = line.split()
+        CAG = params[0]
+        if CAGS and CAG not in CAGS:
+            continue
+        profile = map(float, params[1:])
+
+        print("Profile of", CAG, ":", profile)
+
+        weighted_profile = list((i, ab)
+            for i, ab in enumerate(profile) if ab >= MIN_ABUNDANCE and path.exists("{}/{}/sample{}_1.fastq".format(DIR, CAG, i + 1)))
+        weighted_profile.sort(key = itemgetter(1))
+
+        sum = 0
+        samples = []
+        #If we have overabundant samples, use the least.
+        try:
+            i = next(x for x, _ in weighted_profile if profile[x] >= DESIRED_ABUNDANCE)
+            sum = profile[i]
+            samples = [i + 1]
+        except StopIteration:
+            #If there isn't any, collect from samples, starting from the largest
+            for i, _ in reversed(weighted_profile):
+                sum += profile[i]
+                samples.append(i + 1)
+                if sum >= DESIRED_ABUNDANCE:
+                    break
+
+        print("Chosen samples are", samples, "with total mean abundance", sum)
+        if sum < MIN_TOTAL_ABUNDANCE:
+            print(CAG, "is too scarce; skipping")
+            continue
+
+        for suf, name in [("1", "left"), ("2", "right")]:
+            reads = ["{}/{}/sample{}_{}.fastq".format(DIR, CAG, sample, suf) for sample in samples]
+            with open("{}/{}/{}.fastq".format(DIR, CAG, name), "w") as output:
+                subprocess.check_call(["cat"] + reads, stdout=output)
diff --git a/src/projects/mts/scripts/combine_contigs.py b/src/projects/mts/scripts/combine_contigs.py
new file mode 100755
index 0000000..16b448f
--- /dev/null
+++ b/src/projects/mts/scripts/combine_contigs.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+from __future__ import print_function
+import sys
+import os
+import re
+from Bio import SeqIO
+
+replace = False
+
+if sys.argv[1] == "-r":
+    replace = True
+    files = sys.argv[2:]
+else:
+    files = sys.argv[1:]
+
+sample_re = re.compile("sample\d+")
+
+output = sys.stdout
+
+for file in files:
+    sample = sample_re.search(file).group(0)
+    for seq in SeqIO.parse(file, "fasta"):
+        seq_id = seq.id
+        if replace:
+            seq_id = seq_id.replace(",", "~")
+        seq.id = sample + "-" + seq_id
+        seq.description = ""
+        SeqIO.write(seq, output, "fasta")
diff --git a/src/projects/mts/scripts/common.py b/src/projects/mts/scripts/common.py
new file mode 100644
index 0000000..4146665
--- /dev/null
+++ b/src/projects/mts/scripts/common.py
@@ -0,0 +1,121 @@
+from __future__ import print_function
+try:
+    from future_builtins import zip
+except:
+    pass
+
+import os
+import os.path
+try:
+    import yaml
+    def load_dict(input):
+        return yaml.load(input)
+    def dump_dict(dict, output):
+        yaml.dump(dict, output)
+except:
+    def load_dict(input):
+        def load_pairs():
+            for line in input:
+                params = line.split(":", 2)
+                yield (params[0].strip(), params[1].strip())
+        return dict(load_pairs())
+    def dump_dict(dict, output):
+        for k, v in dict.items():
+            print(k, ": ", v, sep="", file=output)
+
+FASTA_EXTS = {".fasta", ".fa", ".fna", ".fsa", ".fastq", ".fastq.gz", ".fq", ".fq.gz", ".fna.gz"}
+def gather_paths(path, basename=False):
+    for filename in os.listdir(path):
+        name = os.path.basename(filename)
+        for ext in FASTA_EXTS:
+            if not name.endswith(ext):
+                continue
+            filepath = os.path.join(path, filename)
+            if basename:
+                yield (name[0:-len(ext)], filepath)
+            else:
+                yield filepath
+
+def detect_reads(dir):
+    return sorted(list(gather_paths(dir)))[:2]
+
+#Autodetect references
+def gather_refs(data):
+    if type(data) is list:
+        for path in data:
+            for ref in gather_refs(path):
+                yield ref
+    else:
+        if data.startswith("@"):
+            with open(data[1:]) as input:
+                for ref in load_dict(input).items():
+                    yield ref
+        elif os.path.isdir(data):
+            for ref in gather_paths(data, True):
+                yield ref
+        else:
+            yield (os.path.splitext(os.path.basename(data))[0], data)
+
+def get_id(internal_id, sample):
+    res = internal_id.split("_", 2)[1]
+    return sample + "-" + res
+
+def load_annotation(file, normalize=True):
+    res = dict()
+    sample, _ = os.path.splitext(os.path.basename(file))
+    with open(file) as input:
+        for line in input:
+            info = line.split(" : ")
+            id = get_id(info[0], sample) if normalize else info[0]
+            bins = info[1].split()
+            if id in res:
+                res[id].update(bins)
+            else:
+                res[id] = set(bins)
+    return res
+
+class Row:
+    def __init__(self, data, colnames):
+        self.data = data
+        self.colnames = colnames
+
+    def __getitem__(self, index):
+        return self.data[self.colnames[index]]
+
+class Table:
+    def __init__(self):
+        self.data = []
+        self.colnames = None
+        self.rownames = None
+        self.rows = 0
+
+    @staticmethod
+    def read(filepath, sep="\t", headers=False):
+        res = Table()
+        with open(filepath) as input:
+            for line in input:
+                params = line.strip("\n").split(sep)
+                if not res.colnames:
+                    res.rownames = dict()
+                    if headers:
+                        res.colnames = dict(zip(params[1:], range(len(params))))
+                        continue
+                    else:
+                        res.colnames = dict((i, i) for i in range(len(params)))
+                if headers:
+                    res.rownames[params[0]] = res.rows
+                    res.data.append(params[1:])
+                else:
+                    res.rownames[res.rows] = res.rows
+                    res.data.append(params)
+                res.rows += 1
+        return res
+
+    def __getitem__(self, index):
+        return Row(self.data[self.rownames[index]], self.colnames)
+
+    def zip_with(self, other, method):
+        for rowname, i in self.rownames.items():
+            for colname, j in self.colnames.items():
+                other_cell = other.data[other.rownames[rowname]][other.colnames[colname]]
+                method(rowname, colname, self.data[i][j], other_cell)
diff --git a/src/projects/mts/scripts/filter_nucmer.py b/src/projects/mts/scripts/filter_nucmer.py
new file mode 100755
index 0000000..eae66a1
--- /dev/null
+++ b/src/projects/mts/scripts/filter_nucmer.py
@@ -0,0 +1,54 @@
+#!/usr/bin/python
+from __future__ import print_function
+
+import re
+import sys
+from os import path
+
+def print_usage():
+    print("For a sample assembly aligned to a reference, outputs only contigs which were aligned more than <threshold> percent of their length total, and that percent.")
+    print("Usage: filter_nucmer.py <nucmer coords filtered> <output file> <length> <threshold>")
+    print("Parameters:")
+    print("<length> is minimal contig length (default: INF)")
+    print("<threshold> is the minimal total alignment of a contig (0-100%)")
+
+if len(sys.argv) != 5:
+    print_usage()
+    sys.exit(1)
+
+nucmer_output_fn = sys.argv[1]
+output_fn = sys.argv[2]
+min_length = int(sys.argv[3])
+threshold = float(sys.argv[4])
+
+if not path.exists(nucmer_output_fn):
+    print("File {} doesn't exist".format(nucmer_output_fn))
+    sys.exit(2)
+
+with open(nucmer_output_fn, "r") as nucmer_output:
+    with open(output_fn, "w") as output:
+        align_data = re.compile("\d+ \d+ \| \d+ \d+ \| \d+ (\d+) \| [\d.]+ \| [^ ]+ NODE_(\d+)_length_(\d+)")
+        contig = None
+        contig_len = 0
+        align_len = 0
+        def process_contig():
+            per = 100.0 * align_len / contig_len
+            if per > threshold and contig_len >= min_length:
+                print("{}\t{}\t{}".format(contig, contig_len, per), file=output)
+                return align_len
+            return 0
+        for line in nucmer_output:
+            res = align_data.search(line)
+            if res is None:
+                continue
+            new_contig = res.group(2)
+            if contig != new_contig:
+                if contig is not None:
+                    process_contig()
+                contig = new_contig
+                contig_len = int(res.group(3))
+                align_len = 0
+            #Assuming that all alignments of the same contig are consequent
+            align_len += int(res.group(1))
+        #Print the last contig separately
+        process_contig()
diff --git a/src/projects/mts/scripts/gather_stats.py b/src/projects/mts/scripts/gather_stats.py
new file mode 100755
index 0000000..a65c1a5
--- /dev/null
+++ b/src/projects/mts/scripts/gather_stats.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+import pandas
+from pandas import DataFrame
+
+from math import isnan
+import os.path
+import sys
+
+quast_dir = sys.argv[1]
+
+res_table = DataFrame(columns=["bin", "ref", "GF", "purity", "NGA50", "misassemblies"])
+gf_table = pandas.read_table(os.path.join(quast_dir, "summary", "TSV", "Genome_fraction_(%).tsv"), dtype=str).set_index("Assemblies")
+gfs = gf_table.apply(pandas.to_numeric, errors="coerce")
+best_ref = gfs.apply(lambda col: col.idxmax())
+
+for bin, ref in best_ref.iteritems():
+    if type(ref) is float:
+        row = {"bin": bin, "GF": "-", "ref": "unknown", "purity": "-", "NGA50": "-", "misassemblies": "-"}
+    else:
+        all_stats = pandas.read_table(os.path.join(quast_dir, "runs_per_reference", ref, "report.tsv"), index_col=0)
+        col = all_stats.get(bin)
+        purity = 100 - float(col["Unaligned length"]) / float(col["Total length"]) * 100
+        row = {"bin": bin, "GF": col["Genome fraction (%)"], "ref": ref, "purity": "{0:.2f}".format(purity),
+               "NGA50": col["NGA50"], "misassemblies": col["# misassemblies"]}
+    res_table = res_table.append(row, ignore_index=True)
+
+res_table.to_csv(sys.stdout, index=False, sep="\t")
diff --git a/src/projects/mts/scripts/gen_samples.py b/src/projects/mts/scripts/gen_samples.py
new file mode 100755
index 0000000..f975b73
--- /dev/null
+++ b/src/projects/mts/scripts/gen_samples.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python3
+from __future__ import print_function
+
+import argparse
+import os
+import os.path
+import random
+import shutil
+import subprocess
+import sys
+from common import gather_refs, dump_dict
+from scipy.stats import expon
+
+def gen_profile(args):
+    if args.distribution == "uni":
+        #def rand():
+        #    return random.randint(0, args.scale)
+        pass
+    elif args.distribution == "exp":
+        def rand():
+            return int(expon.rvs(scale=args.scale))
+
+    refs = dict(gather_refs(args.references))
+    if args.dump_desc:
+        with open(args.dump_desc, "w") as desc:
+            dump_dict(refs, desc)
+    for ref in refs:
+        print(ref, end=" ")
+        for _ in range(args.samples):
+            print(rand(), end=" ")
+        print()
+
+def gen_samples(args):
+    refs = dict(gather_refs(args.references.split(",")))
+    try:
+        os.mkdir(args.out_dir)
+    except OSError:
+        pass
+
+    read_len = args.read_length
+    adj_qual = "2" * read_len + "\n"
+
+    with open(args.profile) as input:
+        first_line = True
+        for line in input:
+            params = line.split()
+            ref_name = params[0]
+            ref_path = refs.get(ref_name)
+            if not ref_path:
+                print("Warning: no reference provided for", ref_name)
+                continue
+            for i, abundance in enumerate(map(int, params[1:]), start=1):
+                ref_len = os.stat(ref_path).st_size
+                reads = ref_len * abundance // read_len
+                print("Generating", reads, "reads for subsample", i, "of", ref_name)
+                sample_dir = os.path.join(args.out_dir, "sample" + str(i))
+                if first_line:
+                    shutil.rmtree(sample_dir, ignore_errors=True)
+                    subprocess.check_call(["mkdir", "-p", sample_dir])
+
+                temp_1 = sample_dir + ".tmp.r1.fastq"
+                temp_2 = sample_dir + ".tmp.r2.fastq"
+                subprocess.check_call(["wgsim", "-N", str(reads), "-r", "0", "-1", str(read_len), "-2", str(read_len), "-d", "300", "-s", "10", "-e", "{:.2f}".format(args.error_rate), "-S", str(i), ref_path, temp_1, temp_2], stdout=subprocess.DEVNULL)
+
+                print("Merging temporary files")
+                for temp, out in [(temp_1, os.path.join(sample_dir, "r1.fastq")), (temp_2, os.path.join(sample_dir, "r2.fastq"))]:
+                    with open(temp) as input, open(out, "a") as output:
+                        for line in input:
+                            if line.startswith("IIIII"): #TODO: remove this hack
+                                output.write(adj_qual)
+                            else:
+                                output.write(line)
+                    os.remove(temp)
+            print()
+            first_line = False
+
+parser = argparse.ArgumentParser(description="Metagenomic Time Series Simulator")
+parser.add_argument("--references", "-r", type=str, help="Comma-separated list of references, or a directory with them, or a desc file with reference paths prepended with @", required=True)
+subparsers = parser.add_subparsers()
+
+gen_profile_args = subparsers.add_parser("prof", help="Generate a profile for the reference set")
+gen_profile_args.add_argument("--dump-desc", "-d", type=str, help="Dump description file with reference paths")
+gen_profile_args.add_argument("--samples", "-n", type=int, help="Sample count", default=1)
+gen_profile_args.add_argument("--scale", "-s", type=int, help="Distribution scale", default=20)
+gen_profile_args.add_argument("--distribution", "-t", choices=["uni", "exp"], help="Distribution type", default="uni")
+gen_profile_args.set_defaults(func=gen_profile)
+
+gen_samples_args = subparsers.add_parser("gen", help="Generate reads using a profile")
+gen_samples_args.add_argument("--out-dir", "-o", type=str, help="Output directory. Will be totally overwritten!")
+gen_samples_args.add_argument("--read-length", "-l", type=int, help="Read length", default=100)
+gen_samples_args.add_argument("--error-rate", "-e", type=float, help="Base error rate", default=0)
+gen_samples_args.add_argument("profile", type=str, help="File with reference profiles")
+gen_samples_args.set_defaults(func=gen_samples)
+
+args = parser.parse_args()
+args.func(args)
diff --git a/src/projects/mts/scripts/make_input.py b/src/projects/mts/scripts/make_input.py
new file mode 100755
index 0000000..ae6984c
--- /dev/null
+++ b/src/projects/mts/scripts/make_input.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+from __future__ import print_function
+try:
+    from itertools import izip as zip
+except ImportError:
+    pass
+
+import argparse
+import os
+import sys
+
+parser = argparse.ArgumentParser(description="Binner input formatter")
+parser.add_argument("--type", "-t", type=str, help="Binner type (canopy or concoct)", default="canopy")
+parser.add_argument("--output", "-o", type=str, help="Output file")
+parser.add_argument("--dir", "-d", type=str, help="Directory with profiles (pairs of .id .mpl files)")
+parser.add_argument("samples", type=str, nargs="+", help="Sample names")
+
+args = parser.parse_args()
+
+class CanopyFormatter:
+    def __init__(self):
+        pass
+
+    def header(self, file, samples):
+        pass
+
+    def profile(self, file, contig, profile):
+        print(contig, profile, file=out)
+
+class ConcoctFormatter:
+    def __init__(self):
+        pass
+
+    def header(self, file, samples):
+        print("\t".join(["contig"] + ["cov_mean_" + sample for sample in samples]), file=out)
+
+    def profile(self, file, contig, profile):
+        print(contig.replace(",", "~"), profile.replace(" ", "\t"), sep="\t", file=out)
+
+formatters = {"canopy": CanopyFormatter(), "concoct": ConcoctFormatter()}
+formatter = formatters[args.type]
+
+with open(args.output, "w") as out:
+    formatter.header(out, args.samples)
+    for sample in args.samples:
+        id_file = "{}/{}.id".format(args.dir, sample)
+        mpl_file = "{}/{}.mpl".format(args.dir, sample)
+
+        print("Processing abundances from %s" % id_file)
+
+        with open(id_file, "r") as ctg_id, open(mpl_file, "r") as ctg_mpl:
+            for cid, cmpl in zip(ctg_id, ctg_mpl):
+                formatter.profile(out, sample + "-" + cid.strip(), cmpl.strip())
diff --git a/src/projects/mts/scripts/make_points_matrix.py b/src/projects/mts/scripts/make_points_matrix.py
new file mode 100755
index 0000000..875462b
--- /dev/null
+++ b/src/projects/mts/scripts/make_points_matrix.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+
+import random
+
+ctg = open("canopy/contigs.in", "r")
+ctr = open("canopy/clusters.out", "r")
+
+out = open("canopy/points_matrix.csv", "w")
+
+ctg_to_ctr = dict()
+
+while True:
+	s = ctr.readline().strip()
+	if (s == ""):
+		break
+	a = s.split()
+	ctr_id = a[0][3:]
+
+	if (random.randint(1, 25) == 1):
+		ctg_to_ctr[a[1]] = ctr_id
+
+while True:
+	s = ctg.readline().strip()
+	if s == "":
+		break
+
+	a = s.split()
+	if (a[0] in ctg_to_ctr):
+		out.write(ctg_to_ctr[a[0]])
+		for x in a[1:]:
+			out.write("," + x)
+
+		out.write("\n")
+
+out.close()
\ No newline at end of file
diff --git a/src/projects/mts/scripts/parse_output.py b/src/projects/mts/scripts/parse_output.py
new file mode 100755
index 0000000..17c44bd
--- /dev/null
+++ b/src/projects/mts/scripts/parse_output.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+from __future__ import print_function
+
+import argparse
+import os.path
+
+argparser = argparse.ArgumentParser(description="Binner output formatter")
+argparser.add_argument("--type", "-t", type=str, help="Binner type (canopy or concoct)", default="canopy")
+argparser.add_argument("--output", "-o", type=str, help="Output directory with annotations")
+argparser.add_argument("input", type=str, help="File with binning info")
+
+class Parser:
+    def __init__(self):
+        self.samples_annotation = dict()
+
+    def add(self, line):
+        sample_contig, bin_id = self.parse(line)
+        sample_contig = sample_contig.split('-', 1)
+        sample = sample_contig[0]
+        contig = sample_contig[1]
+        if sample not in self.samples_annotation:
+            self.samples_annotation[sample] = dict()
+
+        annotation = self.samples_annotation[sample]
+        if contig not in annotation:
+            annotation[contig] = list()
+
+        annotation[contig].append(bin_id)
+
+class CanopyParser(Parser):
+    def parse(self, line):
+        annotation_str = line.split()
+        bin_id = annotation_str[0].strip()
+        sample_contig = annotation_str[1].strip()
+        return (sample_contig, bin_id)
+
+class ConcoctParser(Parser):
+    def parse(self, line):
+        annotation_str = line.split(",", 1)
+        bin_id = annotation_str[1].strip()
+        sample_contig = annotation_str[0].replace("~", ",")
+        return (sample_contig, bin_id)
+
+parsers = {"canopy": CanopyParser(), "concoct": ConcoctParser()}
+
+args = argparser.parse_args()
+parser = parsers[args.type]
+
+with open(args.input, "r") as input_file:
+    for line in input_file:
+        parser.add(line)
+
+for sample, annotation in parser.samples_annotation.items():
+    with open(os.path.join(args.output, sample + ".ann"), "w") as sample_out:
+        annotation = parser.samples_annotation[sample]
+
+        for contig in annotation:
+            print(contig, ":", " ".join(annotation[contig]), file=sample_out)
diff --git a/src/projects/mts/scripts/pca.R b/src/projects/mts/scripts/pca.R
new file mode 100644
index 0000000..1d41f86
--- /dev/null
+++ b/src/projects/mts/scripts/pca.R
@@ -0,0 +1,77 @@
+library(stringr)
+
+format_ids <- function(table) {
+  table$contig <- paste0(str_extract(table$contig, "\\w+\\d+-"), str_replace(str_extract(table$contig, "NODE_\\d+"), "NODE_", ""))
+  unique(table)
+}
+
+load_binning <- function(canopy_in, canopy_out) {
+  data <- read.table(canopy_in)
+  names(data) <- c('contig', sapply(seq(1, dim(data)[2]-1, 1),
+                                    function(x) {paste('mlt', x, sep='')}))
+  data <- format_ids(data)
+  binned <- read.table(canopy_out)
+  names(binned) <- c('clust', 'contig')
+  binned <- format_ids(binned)
+  merge(x=data, y=binned, by='contig')
+}
+
+load_clusters <- function(canopy_in, canopy_out, int_contigs) {
+  data <- load_binning(canopy_in, canopy_out)
+  if (missing(int_contigs)) {
+    pieces <- split(data, data$clust)[1:10]
+    lims <- lapply(pieces, function(x) head(x, 50))
+    do.call(rbind, c(lims, list(make.row.names=FALSE)))
+  } else {
+    interesting <- read.table(int_contigs)
+    names(interesting) <- c('contig', 'length', 'alignment', 'ref')
+    droplevels(merge(x=data, y=interesting, by='contig'))
+  }
+}
+
+do_prc <- function(clusters) {
+  prcomp(~ ., data = clusters[, grep('mlt', colnames(clusters))])
+}
+
+print_clusters <- function(pr, clust, image) {
+  if (!missing(image))
+    png(filename=image, width=1024, height=768)
+  lev <- levels(factor(clust))
+  cols <- 1:length(lev)
+  #layout(rbind(1,2), heights=c(7,1))
+  plot(pr$x, col = as.numeric(clust), xlim=c(-100, 200), ylim=c(-50,50))
+  a <- split(as.data.frame(pr$x), clust)
+  for (l in lev) {
+    x <- a[[l]]
+    text(median(x$PC1), median(x$PC2), l)
+  }
+  legend("center", "bottom", legend=lev, col=cols, pch=1)
+  #dev.off()
+}
+
+#For debugging
+local_data <- function() {
+  clusters <- load_clusters("/Volumes/Chihua-Sid/mts/out/sample9.in",
+                            "/Volumes/Chihua-Sid/mts/out/sample9.out",
+                            "/Volumes/Chihua-Sid/mts/out/70p_3.log")
+
+  prc_data <- do_prc(clusters)
+  print_clusters(prc_data, clusters$clust)
+  prc_data
+}
+
+args <- commandArgs(trailingOnly = TRUE)
+in_fn <- args[1]
+out_fn <- args[2]
+if (length(args) < 4) {
+  image_out <- args[3]
+  clusters <- load_clusters(in_fn, out_fn)
+} else {
+  cont_fn <- args[3]
+  image_out <- args[4]
+  clusters <- load_clusters(in_fn, out_fn, cont_fn)
+}
+
+print(clusters[1:10,])
+prc_data <- do_prc(clusters)
+print_clusters(prc_data, clusters$clust, image_out)
diff --git a/src/projects/mts/scripts/ref_stats.sh b/src/projects/mts/scripts/ref_stats.sh
new file mode 100755
index 0000000..59dcb8d
--- /dev/null
+++ b/src/projects/mts/scripts/ref_stats.sh
@@ -0,0 +1,63 @@
+#/bin/bash
+
+if [ "$#" -lt 3 ]; then
+    echo "Usage: identify.sh <assemblies_folder> <refs_folder> <out_dir>"
+    exit 1
+fi
+
+CTG_LENGTH_THR=5000
+process_cnt=4
+thread_cnt=8
+assemblies_folder=$1
+refs_folder=$2
+#canopy_out=$3
+out_dir=$3
+
+folder=$out_dir/metaquast
+
+export LC_ALL=C
+mkdir -p $out_dir
+
+~/git/quast/metaquast.py --debug -R $refs_folder -o $out_dir/metaquast $assemblies_folder/*.fasta
+
+#awk ' {print $2,$1} ' $canopy_out | sort > $folder/clusters.txt
+
+rm -rf $out_dir/ref_summary.txt
+
+for ref in $refs_folder/*.fasta ; do
+    echo "Processing reference $ref" 
+    ref_name=$(basename "$ref")
+    ref_name="${ref_name%.*}"
+
+    rm -rf $out_dir/${ref_name}.ctgs
+
+    #for sample in $assemblies_out_dir/sample9.fasta ; do
+    for sample in $assemblies_folder/*.fasta ; do 
+        sample_name=$(basename "$sample")
+        sample_name="${sample_name%.*}"
+        aligned=$out_dir/metaquast/quast_corrected_input/${sample_name}_to_${ref_name}.fasta
+        ~/git/ngs_scripts/contig_length_filter.py $CTG_LENGTH_THR $aligned $out_dir/long.fasta.tmp
+        ~/git/ngs_scripts/contig_info.py $out_dir/long.fasta.tmp $out_dir/ctg.info.tmp
+        sed_command="s/ID_/${sample_name}-/g"
+        grep -Eo "ID_.*$" $out_dir/ctg.info.tmp | sed -e $sed_command >> $out_dir/${ref_name}.ctgs
+        rm $out_dir/long.fasta.tmp
+        rm $out_dir/ctg.info.tmp
+    done
+
+    sed 's/$/ '"${ref_name}"'/g' $out_dir/${ref_name}.ctgs >> $out_dir/ref_summary.txt
+
+    #sort $out_dir/${ref_name}.ctgs.tmp > $out_dir/${ref_name}.ctgs
+
+    #join $out_dir/${ref_name}.ctgs $out_dir/clusters.txt | awk ' { print $2 } ' | sort | uniq -c | sort -nr | head -10 
+
+    #join $out_dir/${ref_name}.ctgs $out_dir/clusters.txt > $out_dir/join.txt 
+    #awk ' { print $2 } ' $out_dir/join.txt | sort | uniq -c | sort -nr | head -10 
+
+    report=$out_dir/metaquast/runs_per_reference/$ref_name/report.txt
+
+    grep "Assembly" $report
+    grep "Genome fraction" $report
+done
+
+#rm -rf $out_dir
+echo "Finished"
diff --git a/src/projects/mts/scripts/split_bins.py b/src/projects/mts/scripts/split_bins.py
new file mode 100755
index 0000000..dea8914
--- /dev/null
+++ b/src/projects/mts/scripts/split_bins.py
@@ -0,0 +1,30 @@
+#!/usr/bin/python
+from __future__ import print_function
+
+import os
+from os import path
+import sys
+from Bio import SeqIO
+import common
+import subprocess
+
+def print_usage():
+        print("Usage: split_bins.py <contigs> <binning info> <output directory>")
+
+contigs = sys.argv[1]
+sample, _ = path.splitext(path.basename(contigs))
+out_dir = sys.argv[3]
+
+binning = common.load_annotation(sys.argv[2], False)
+
+subprocess.call("rm -f {}/{}-*.fasta".format(out_dir, sample), shell=True)
+
+cags = set()
+for seq in SeqIO.parse(contigs, "fasta"):
+    seq_id = seq.id
+    seq.id = sample + "-" + seq_id
+    #seq.id = common.get_id(seq.id, sample)
+    seq.description = ""
+    for cag in binning.get(seq_id, []):
+        with open(path.join(out_dir, "{}-{}.fasta".format(sample, cag)), "a") as output:
+            SeqIO.write(seq, output, "fasta")
diff --git a/src/projects/mts/stats.cpp b/src/projects/mts/stats.cpp
new file mode 100644
index 0000000..603da47
--- /dev/null
+++ b/src/projects/mts/stats.cpp
@@ -0,0 +1,194 @@
+/*
+ * stats.cpp
+ *
+ *  Created on: 3 Dec 2015
+ *      Author: idmit
+ */
+
+#include "pipeline/graphio.hpp"
+#include "pipeline/graph_pack.hpp"
+#include "utils/simple_tools.hpp"
+#include "utils/path_helper.hpp"
+#include "utils/logger/log_writers.hpp"
+#include "math/xmath.h"
+#include <iostream>
+#include <vector>
+#include "io/reads/multifile_reader.hpp"
+#include "io/reads/splitting_wrapper.hpp"
+#include "io/reads/modifying_reader_wrapper.hpp"
+#include "io/reads/vector_reader.hpp"
+#include "io/reads/file_reader.hpp"
+#include "annotation.hpp"
+#include "visualization.hpp"
+#include "visualization/position_filler.hpp"
+#include "modules/simplification/tip_clipper.hpp"
+#include "getopt_pp/getopt_pp.h"
+
+using namespace debruijn_graph;
+
+io::SingleRead ReadSequence(io::SingleStream& reader) {
+    VERIFY(!reader.eof());
+    io::SingleRead read;
+    reader >> read;
+    return read;
+}
+
+io::SingleRead ReadGenome(const string& genome_path) {
+    path::CheckFileExistenceFATAL(genome_path);
+    auto genome_stream_ptr = std::make_shared<io::FileReadStream>(genome_path);
+    return ReadSequence(*genome_stream_ptr);
+}
+
+EdgeAnnotation LoadAnnotation(const conj_graph_pack& gp,
+                              const vector<bin_id>& bins_of_interest,
+                              io::SingleStream& contigs_stream,
+                              io::SingleStream& splits_stream,
+                              const string& annotation_path) {
+    AnnotationFiller filler(gp, bins_of_interest);
+    AnnotationStream annotation_stream(annotation_path);
+    return filler(contigs_stream, splits_stream, annotation_stream);
+}
+
+class BinnedInfo : public pair<size_t, size_t> {
+public:
+    BinnedInfo(): pair(0, 0) {}
+};
+
+void add_edge_info(BinnedInfo& info, size_t edge_length) {
+    ++info.first;
+    info.second += edge_length;
+}
+
+ostream& operator<<(ostream& str, const BinnedInfo& info) {
+    str << info.first << "\t" << info.second;
+    return str;
+}
+
+void create_console_logger() {
+    logging::logger *log = logging::create_logger("", logging::L_INFO);
+    log->add_writer(std::make_shared<logging::console_writer>());
+    logging::attach_logger(log);
+}
+
+int main(int argc, char** argv) {
+    create_console_logger();
+
+    using namespace GetOpt;
+
+    size_t k;
+    string saves_path, contigs_path, splits_path, edges_path;
+    vector<string> genomes_path;
+    string annotation_in_fn, prop_annotation_in_fn;
+    string table_fn, graph_dir;
+    vector<bin_id> bins_of_interest;
+
+    try {
+        GetOpt_pp ops(argc, argv);
+        ops.exceptions_all();
+        ops >> Option('k', k)
+            >> Option('s', saves_path)
+            >> Option('r', genomes_path)
+            >> Option('c', contigs_path)
+            >> Option('f', splits_path)
+            >> Option('a', annotation_in_fn)
+            >> Option('e', edges_path)
+            >> Option('p', prop_annotation_in_fn)
+            >> Option('o', table_fn)
+            //>> Option('d', graph_dir, "")
+            >> Option('b', bins_of_interest, {})
+        ;
+    } catch(GetOptEx &ex) {
+        cout << "Usage: stats -k <K> -s <saves path> -r <genomes path>+ "
+                "-f <splits_path> -c <contigs_path> -a <init binning info> -e <edges_path> -p <propagated binning info> "
+                "-o <stats table> [-d <graph directory> (currently disabled)] [-b (<bins of interest>)+]"
+             << endl;
+        exit(1);
+    }
+    //TmpFolderFixture fixture("tmp");
+
+    conj_graph_pack gp(k, "tmp", 0);
+    gp.kmer_mapper.Attach();
+    INFO("Load graph from " << saves_path);
+    graphio::ScanGraphPack(saves_path, gp);
+    gp.edge_pos.Attach();
+
+    ofstream output(table_fn);
+
+    output << "Reference\t"
+           << "Aligned edges\tAlignment length\t"
+           << "Binned edges\tBinned length\t"
+           << "Unbinned edges\tUnbinned length\t"
+           << "Pre-binned edges\tPre-binned length\t"
+           << "Propagated edges\tPropagated length" << endl;
+
+    for (const auto genome_path : genomes_path) {
+        auto ref_name = path::basename(genome_path);
+        io::SingleRead genome = ReadGenome(genome_path);
+
+        visualization::position_filler::FillPos(gp, genome_path, "", true);
+
+        io::FileReadStream contigs_stream(contigs_path);
+        io::FileReadStream splits_stream(splits_path);
+        EdgeAnnotation edge_annotation = LoadAnnotation(
+            gp, bins_of_interest, contigs_stream, 
+            splits_stream, annotation_in_fn);
+
+        io::FileReadStream edges_stream(edges_path);
+        io::FileReadStream edges_stream2(edges_path);
+        EdgeAnnotation prop_edge_annotation = LoadAnnotation(
+            gp, bins_of_interest, 
+            edges_stream, edges_stream2, 
+            prop_annotation_in_fn);
+
+        shared_ptr<SequenceMapper<Graph>> mapper(MapperInstance(gp));
+
+        BinnedInfo pre_binned_info, prop_binned_info, binned_info,
+                   unbinned_info, total_info;
+
+        auto genome_graph_path = mapper->MapRead(genome);
+        std::set<EdgeId> unbinned_edges;
+
+        gp.EnsurePos();
+        for (size_t i = 0; i < genome_graph_path.size(); ++i) {
+            EdgeId e = genome_graph_path[i].first;
+            auto range = genome_graph_path[i].second.mapped_range;
+            add_edge_info(total_info, gp.g.length(e));
+            if (edge_annotation.Annotation(e).empty()) {
+                if (prop_edge_annotation.Annotation(e).empty()) {
+                    // Only check for prop_annotation is necessary
+                    if (unbinned_edges.count(e) == 0) {
+                        unbinned_edges.insert(e);
+                        add_edge_info(unbinned_info, range.size());
+                        /*std::cout << e.int_id() << "\t"
+                                  << gp.g.length(e) << "\t"
+                                  << range.size() << std::endl;*/
+                        if (!graph_dir.empty()) {
+                            std::string dot_export_path =
+                                graph_dir + "/" + ref_name + "/" + std::to_string(e.int_id()) + ".dot";
+                            PrintColoredAnnotatedGraphAroundEdge(
+                                gp, e, prop_edge_annotation, dot_export_path);
+                        }
+                    }
+                } else {
+                    DEBUG(e.int_id() << " was propagated\n");
+                    add_edge_info(prop_binned_info, gp.g.length(e));
+                    add_edge_info(binned_info, gp.g.length(e));
+                }
+            } else {
+                add_edge_info(pre_binned_info, gp.g.length(e));
+                if (prop_edge_annotation.Annotation(e).empty()) {
+                    WARN(e.int_id() << " was lost during propagation\n");
+                } else {
+                    add_edge_info(binned_info, gp.g.length(e));
+                }
+            }
+        }
+
+        output << ref_name         << "\t"
+               << total_info       << "\t"
+               << binned_info      << "\t"
+               << unbinned_info    << "\t"
+               << pre_binned_info  << "\t"
+               << prop_binned_info << endl;
+    }
+}
diff --git a/src/projects/mts/test.py b/src/projects/mts/test.py
new file mode 100755
index 0000000..8c0c19f
--- /dev/null
+++ b/src/projects/mts/test.py
@@ -0,0 +1,205 @@
+#!/usr/bin/python
+from __future__ import print_function
+
+import argparse
+import os
+import os.path
+import re
+import shutil
+import sys
+import subprocess
+from traceback import print_exc
+import yaml
+
+from scripts.common import Table
+
+#Log class, use it, not print
+class Log:
+    text = ""
+
+    def log(self, s):
+        self.text += s + "\n"
+        print(s)
+
+    def warn(self, s):
+        msg = "WARNING: " + s
+        self.text += msg + "\n"
+        sys.stdout.write(msg)
+        sys.stdout.flush()
+
+    def err(self, s):
+        msg = "ERROR: " + s + "\n"
+        self.text += msg
+        sys.stdout.write(msg)
+        sys.stdout.flush()
+
+    def print_log(self):
+        print(self.text)
+
+    def get_log(self):
+        return self.text
+
+log = Log()
+
+# Taken from teamcity.py
+# Compile SPAdes
+def compile_spades(args, dataset_info, working_dir):
+    if not args.cfg_compilation:
+        log.log("Forced to use current SPAdes build, will not compile SPAdes");
+    elif 'spades_compile' not in dataset_info.__dict__ or dataset_info.spades_compile:
+        comp_params = ' '
+        if 'compilation_params' in dataset_info.__dict__:
+            comp_params = " ".join(dataset_info.compilation_params)
+
+        bin_dir = 'build_spades'
+        if not os.path.exists(bin_dir):
+            os.makedirs(bin_dir)
+        os.chdir(bin_dir)
+
+        #Compilation
+        err_code = os.system('cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX=' + working_dir + ' ' + os.path.join(working_dir, 'src') + comp_params)
+        err_code = err_code | os.system('make -j 16')
+        err_code = err_code | os.system('make install')
+
+        os.chdir(working_dir)
+
+        if err_code != 0:
+            # Compile from the beginning if failed
+            shutil.rmtree('bin', True)
+            shutil.rmtree('build_spades', True)
+            return os.system('./spades_compile.sh ' + comp_params)
+    return 0
+
+def compile_mts(workdir):
+    #if not args.cfg_compilation:
+    #    log.log("Forced to use current build, will not compile");
+    #    return 0
+    os.chdir(workdir)
+    ecode = subprocess.call("./prepare_cfg")
+    if ecode != 0:
+        return ecode
+    return subprocess.call(["make", "-C", "build/release/projects/mts"])
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--config", "-c", help="Config template")
+    parser.add_argument("dir", help="Output directory")
+    parser.add_argument("--saves", "-s", type=str)
+    parser.add_argument("--no-clean", action="store_true")
+    parser.add_argument("--etalons", "-e", type=str, help="Directory of GF etalons")
+    args = parser.parse_args()
+    return args
+
+def prepare_config(args, workdir):
+    with open(os.path.join(args.config)) as template:
+        params = yaml.load(template)
+        params["BIN"] = os.path.join(workdir, "build/release/bin")
+        params["SCRIPTS"] = os.path.join(workdir, "src/projects/mts/scripts")
+        with open(os.path.join(args.dir, "config.yaml"), "w") as config:
+            config.write(yaml.dump(params))
+
+def run_mts(args, workdir):
+    if not args.no_clean:
+        shutil.rmtree(args.dir, True)
+    if not os.path.exists(args.dir):
+        os.mkdir(args.dir)
+        prepare_config(args, workdir)
+    mts_args = ["./mts.py", "--stats", args.dir]
+    if args.saves:
+        log.log("Copying saves from" + args.saves)
+        for saves_dir in ["assembly", "reassembly"]:
+            full_dir = os.path.join(args.saves, saves_dir)
+            if os.path.isdir(full_dir):
+                #shutil.copytree(os.path.join(args.saves, saves_dir), os.path.join(args.dir, saves_dir))
+                os.symlink(full_dir, os.path.join(args.dir, saves_dir))
+            else:
+                log.warn("No " + saves_dir + " dir provided; skipping")
+        #Don't touch symlinked assemblies because it may corrupt other runs with the same dependencies
+        #mts_args.append("--reuse-assemblies")
+    os.chdir(os.path.join(workdir, "src/projects/mts"))
+    return subprocess.call(mts_args)
+
+def check_etalons(args, workdir):
+    class mut:
+        res = 0
+
+    re_num = re.compile("-?\d+(?:\.\d+)?")
+    def read_cell(str):
+        maybe_num = re_num.search(str)
+        if not maybe_num:
+            return 0
+        return float(maybe_num.group(0))
+
+    #TODO: more configurable? Ideally, to set custom threshold for each cell
+
+    #Margin values should stay close to margin, otherwise it's a false pos/neg
+    pos_threshold = 95
+    neg_threshold = 5
+    #For the rest ("floating" clusters we're unsure of), we allow broader +/- margin
+    threshold = 10
+
+    def compare_gf(ref, cag, val1, val2):
+        log.log("Comparing {} in {}: {} vs {}".format(cag, ref, val1, val2))
+        et_val = read_cell(val1)
+        est_val = read_cell(val2)
+        lower = pos_threshold if et_val > pos_threshold else max(0,   et_val - threshold)
+        upper = neg_threshold if et_val < neg_threshold else min(100, et_val + threshold)
+        if est_val < lower:
+            log.err("GF of {} in {} = {}% is less than expected {:.2f}%".format(cag, ref, est_val, lower))
+            mut.res = 7
+        elif est_val > upper:
+            log.err("GF of {} in {} = {}% is higher than expected {:.2f}%".format(cag, ref, est_val, upper))
+            mut.res = 7
+
+    for file in os.listdir(args.etalons):
+        etalon = os.path.join(args.etalons, file)
+        estimated = os.path.join(args.dir, "stats", "summary", file)
+        log.log("Trying to compare " + etalon + " and " + estimated)
+        if not os.path.isfile(estimated):
+            log.warn("No table provided for " + file)
+            continue
+        try:
+            log.log("Loading " + etalon)
+            et_table = Table.read(etalon, headers=True)
+            log.log("Loading " + estimated)
+            est_table = Table.read(estimated, headers=True)
+            log.log("Comparing GF for " + file)
+            et_table.zip_with(est_table, compare_gf)
+        except:
+            log.err("Cannot load {}".format(file))
+            raise
+    return mut.res
+
+if __name__ == "__main__":
+    try:
+        sys.stderr = sys.stdout
+        args = parse_args()
+        workdir = os.getcwd()
+        ecode = 0
+
+        #compile
+        #if compile_spades(args, dataset_info, working_dir) != 0:
+        #    log.err("SPAdes compilation finished abnormally with exit code " + str(ecode))
+        #    sys.exit(3)
+
+        ecode = compile_mts(workdir)
+        if ecode != 0:
+            log.err("MTS compilation finished abnormally with exit code " + str(ecode))
+            sys.exit(3)
+
+        ecode = run_mts(args, workdir)
+        if ecode != 0:
+            log.err("Error while running MTS: " + str(ecode))
+
+        if args.etalons:
+            ecode = check_etalons(args, workdir)
+
+        sys.exit(ecode)
+
+    except SystemExit:
+        raise
+
+    except:
+        log.err("The following unexpected error occured during the run:")
+        print_exc()
+        sys.exit(239)
diff --git a/src/projects/mts/visualization.hpp b/src/projects/mts/visualization.hpp
new file mode 100644
index 0000000..8ab87b5
--- /dev/null
+++ b/src/projects/mts/visualization.hpp
@@ -0,0 +1,66 @@
+#pragma once
+
+#include "visualization/graph_colorer.hpp"
+#include "visualization/visualization_utils.hpp"
+
+namespace debruijn_graph {
+
+template <class Graph>
+class AnnotatedGraphColorer
+    : public visualization::graph_colorer::GraphColorer<Graph> {
+
+    EdgeAnnotation annotation_;
+    std::map<bin_id, std::string> color_map_;
+
+public:
+    AnnotatedGraphColorer(const EdgeAnnotation& annotation)
+        : annotation_(annotation) {
+        std::vector<std::string> preset_colors({"red", "blue", "yellow", "orange", "purple", "pink"});
+        VERIFY(annotation_.interesting_bins().size() <= preset_colors.size());
+        size_t i = 0;
+        for (const auto& b_id : annotation_.interesting_bins()) {
+            color_map_[b_id] = preset_colors[i];
+            ++i;
+        }
+    }
+
+    string GetValue(typename Graph::VertexId) const { return "black"; }
+
+    string GetValue(typename Graph::EdgeId edge) const {
+        if (annotation_.Annotation(edge).empty()) {
+            return "black";
+        }
+        vector<std::string> colors;
+        auto ann = annotation_.Annotation(edge);
+        std::ostringstream ss;
+        std::transform(ann.begin(), ann.end(), ostream_iterator<string>(ss, ":"), [&](bin_id b){
+            return get(color_map_, b);
+        });
+        return ss.str();
+    }
+
+};
+
+void PrintColoredAnnotatedGraphAroundEdge(const conj_graph_pack& gp,
+                                          const EdgeId& edge,
+                                          const EdgeAnnotation& annotation,
+                                          const string& output_filename) {
+    //std::cout << output_filename << std::endl;
+    visualization::graph_labeler::DefaultLabeler<Graph> labeler(gp.g, gp.edge_pos);
+    auto colorer_ptr =
+        std::make_shared<AnnotatedGraphColorer<Graph>>(annotation);
+    GraphComponent<Graph> component = omnigraph::EdgeNeighborhood(gp.g, edge, 100, 10000);
+    visualization::visualization_utils::WriteComponent<Graph>(component, output_filename, colorer_ptr, labeler);
+}
+
+void PrintAnnotatedAlongPath(const conj_graph_pack& gp,
+                                          const vector<EdgeId>& path,
+                                          const EdgeAnnotation& annotation,
+                                          const string& output_prefix) {
+    visualization::graph_labeler::DefaultLabeler<Graph> labeler(gp.g, gp.edge_pos);
+    auto colorer_ptr =
+        std::make_shared<AnnotatedGraphColorer<Graph>>(annotation);
+    visualization::visualization_utils::WriteComponentsAlongPath<Graph>(gp.g, path, output_prefix, colorer_ptr, labeler);
+}
+
+}
\ No newline at end of file
diff --git a/src/projects/online_vis/CMakeLists.txt b/src/projects/online_vis/CMakeLists.txt
index d020b82..f662879 100644
--- a/src/projects/online_vis/CMakeLists.txt
+++ b/src/projects/online_vis/CMakeLists.txt
@@ -24,11 +24,11 @@ include_directories(./drawing_commands)
 include_directories(${CMAKE_SOURCE_DIR}/debruijn)
 
 if (READLINE_FOUND AND CURSES_FOUND)
-  target_link_libraries(online_vis spades_modules nlopt format ${COMMON_LIBRARIES} ${READLINE_LIBRARY} ${CURSES_NCURSES_LIBRARY})
+  target_link_libraries(online_vis common_modules nlopt format ${COMMON_LIBRARIES} ${READLINE_LIBRARY} ${CURSES_NCURSES_LIBRARY})
 elseif (READLINE_FOUND)
-  target_link_libraries(online_vis spades_modules nlopt format ${COMMON_LIBRARIES} ${READLINE_LIBRARY})
+  target_link_libraries(online_vis common_modules nlopt format ${COMMON_LIBRARIES} ${READLINE_LIBRARY})
 else()
-  target_link_libraries(online_vis spades_modules nlopt format ${COMMON_LIBRARIES})
+  target_link_libraries(online_vis common_modules nlopt format ${COMMON_LIBRARIES})
 endif()
 
 if (DEFINED static_build)
diff --git a/src/projects/online_vis/debruijn_environment.hpp b/src/projects/online_vis/debruijn_environment.hpp
index 0bd7e3a..9886b25 100644
--- a/src/projects/online_vis/debruijn_environment.hpp
+++ b/src/projects/online_vis/debruijn_environment.hpp
@@ -25,7 +25,7 @@ class DebruijnEnvironment : public Environment {
         GraphElementFinder<Graph> element_finder_;
         std::shared_ptr<MapperClass> mapper_;
         FillerClass filler_;
-        omnigraph::DefaultLabeler<Graph> labeler_;
+        visualization::graph_labeler::DefaultLabeler<Graph> labeler_;
         debruijn_graph::ReadPathFinder<Graph> path_finder_;
         ColoringClass coloring_;
         //CompositeLabeler<Graph> labeler_;
@@ -87,7 +87,7 @@ class DebruijnEnvironment : public Environment {
             DEBUG("Colorer done");
             Path<EdgeId> path1 = mapper_->MapSequence(gp_.genome.GetSequence()).path();
             Path<EdgeId> path2 = mapper_->MapSequence(!gp_.genome.GetSequence()).path();
-            coloring_ = omnigraph::visualization::DefaultColorer(gp_.g, path1, path2);
+            coloring_ = visualization::graph_colorer::DefaultColorer(gp_.g, path1, path2);
             ResetPositions();
         }
 
@@ -193,7 +193,7 @@ class DebruijnEnvironment : public Environment {
             return filler_;
         }
 
-        omnigraph::GraphLabeler<Graph>& labeler() {
+        visualization::graph_labeler::GraphLabeler<Graph>& labeler() {
             return labeler_;
         }
 
diff --git a/src/projects/online_vis/drawing_commands/draw_contig_command.hpp b/src/projects/online_vis/drawing_commands/draw_contig_command.hpp
index 37b90b9..0db8d64 100644
--- a/src/projects/online_vis/drawing_commands/draw_contig_command.hpp
+++ b/src/projects/online_vis/drawing_commands/draw_contig_command.hpp
@@ -10,7 +10,7 @@
 #include "../environment.hpp"
 #include "../command.hpp"
 #include "../errors.hpp"
-#include "io/reads_io/wrapper_collection.hpp"
+#include "io/reads/wrapper_collection.hpp"
 
 namespace online_visualization {
 class DrawContigCommand : public DrawingCommand {
diff --git a/src/projects/online_vis/drawing_commands/draw_missasemblies.hpp b/src/projects/online_vis/drawing_commands/draw_missasemblies.hpp
index c3b2011..9b8ef4f 100644
--- a/src/projects/online_vis/drawing_commands/draw_missasemblies.hpp
+++ b/src/projects/online_vis/drawing_commands/draw_missasemblies.hpp
@@ -9,7 +9,7 @@
 #include "../environment.hpp"
 #include "../command.hpp"
 #include "../errors.hpp"
-#include "io/reads_io/wrapper_collection.hpp"
+#include "io/reads/wrapper_collection.hpp"
 
 namespace online_visualization {
 class DrawMisassemblies : public DrawingCommand {
@@ -179,7 +179,7 @@ public:
 
         string file = args[1];
         
-        FillPos(curr_env.graph_pack(), file, "miss", true);
+        visualization::position_filler::FillPos(curr_env.graph_pack(), file, "miss", true);
         cout << "All contigs are mapped" << endl;
 
 
diff --git a/src/projects/online_vis/drawing_commands/draw_polymorphic_regions.hpp b/src/projects/online_vis/drawing_commands/draw_polymorphic_regions.hpp
index 68ae311..d719cf8 100644
--- a/src/projects/online_vis/drawing_commands/draw_polymorphic_regions.hpp
+++ b/src/projects/online_vis/drawing_commands/draw_polymorphic_regions.hpp
@@ -10,7 +10,7 @@
 #include "../environment.hpp"
 #include "../command.hpp"
 #include "../errors.hpp"
-#include "io/reads_io/wrapper_collection.hpp"
+#include "io/reads/wrapper_collection.hpp"
 
 namespace online_visualization {
 
@@ -32,16 +32,15 @@ class DrawPolymorphicRegions : public DrawingCommand {
                 verticesToAdd.push_back(curr_env.graph().EdgeEnd(edge));
             }
         }
-        GraphComponent<Graph> polymorphicComponent(curr_env.graph(), verticesToAdd.begin(), verticesToAdd.end());
-        return polymorphicComponent;
+        return GraphComponent<Graph>::FromVertices(curr_env.graph(), verticesToAdd);
     }
 
     void DrawPicture(DebruijnEnvironment& curr_env, Sequence& genome) const {
         size_t windowSize = 500;
         for(size_t i = 0; i < genome.size() - windowSize - 1 - curr_env.k_value(); ++i)
         {
-            runtime_k::RtSeq firstKmer = genome.Subseq(i).start<runtime_k::RtSeq>(curr_env.k_value() + 1);
-            runtime_k::RtSeq secondKmer = genome.Subseq(i + windowSize).start<runtime_k::RtSeq>(curr_env.k_value() + 1);
+            RtSeq firstKmer = genome.Subseq(i).start<RtSeq>(curr_env.k_value() + 1);
+            RtSeq secondKmer = genome.Subseq(i + windowSize).start<RtSeq>(curr_env.k_value() + 1);
             firstKmer = curr_env.kmer_mapper().Substitute(firstKmer);
             secondKmer = curr_env.kmer_mapper().Substitute(secondKmer);
             pair<EdgeId, size_t> positionFirst = curr_env.index().get(firstKmer);
@@ -80,8 +79,12 @@ class DrawPolymorphicRegions : public DrawingCommand {
 
                     if(polymorphicRegion.e_size() > 5)
                     {
-                        visualization::WriteComponentSinksSources(polymorphicRegion, curr_env.folder() + "/" + ToString(curr_env.graph().int_id(*polymorphicRegion.vertices().begin())) + ".dot", visualization::DefaultColorer(curr_env.graph()),
-                                                                  curr_env.labeler());
+                        using namespace visualization::visualization_utils;
+                        WriteComponentSinksSources(polymorphicRegion,
+                                                   curr_env.folder() + "/" +
+                                                           ToString(curr_env.graph().int_id(*polymorphicRegion.vertices().begin())) + ".dot",
+                                                   visualization::graph_colorer::DefaultColorer(curr_env.graph()),
+                                                   curr_env.labeler());
 
                         INFO("Component is written to " + curr_env.folder() + ToString(curr_env.graph().int_id(*polymorphicRegion.vertices().begin())) + ".dot");
                     }
diff --git a/src/projects/online_vis/drawing_commands/draw_poorly_assembled.hpp b/src/projects/online_vis/drawing_commands/draw_poorly_assembled.hpp
index 23c69ed..2eb6ead 100644
--- a/src/projects/online_vis/drawing_commands/draw_poorly_assembled.hpp
+++ b/src/projects/online_vis/drawing_commands/draw_poorly_assembled.hpp
@@ -10,9 +10,9 @@
 #include "../environment.hpp"
 #include "../command.hpp"
 #include "../errors.hpp"
-#include "io/reads_io/wrapper_collection.hpp"
+#include "io/reads/wrapper_collection.hpp"
 #include <boost/algorithm/string.hpp>
-#include "assembly_graph/graph_core/basic_graph_stats.hpp"
+#include "assembly_graph/core/basic_graph_stats.hpp"
 
 #include <boost/algorithm/string/predicate.hpp>
 
@@ -120,7 +120,7 @@ public:
 class UnresolvedPrinter : public RepeatProcessor {
 
     void DrawGap(DebruijnEnvironment& curr_env, const vector<EdgeId>& path, string filename, string /*label*/ = "") const {
-        omnigraph::visualization::WriteComponentsAlongPath<Graph>(curr_env.graph(), path, filename, curr_env.coloring(), curr_env.labeler());
+        visualization::visualization_utils::WriteComponentsAlongPath<Graph>(curr_env.graph(), path, filename, curr_env.coloring(), curr_env.labeler());
         LOG("The pictures is written to " << filename);
     }
 
diff --git a/src/projects/online_vis/drawing_commands/draw_position_command.hpp b/src/projects/online_vis/drawing_commands/draw_position_command.hpp
index 51e792b..19bbe6e 100644
--- a/src/projects/online_vis/drawing_commands/draw_position_command.hpp
+++ b/src/projects/online_vis/drawing_commands/draw_position_command.hpp
@@ -17,7 +17,7 @@
 namespace online_visualization {
     class DrawPositionCommand : public DrawingCommand {
         private:
-            void DrawPicture(DebruijnEnvironment& curr_env, runtime_k::RtSeq kmer, string label = "") const {
+            void DrawPicture(DebruijnEnvironment& curr_env, RtSeq kmer, string label = "") const {
                 kmer = curr_env.kmer_mapper().Substitute(kmer);
                 if (!curr_env.index().contains(kmer)) {
                     cout << "No corresponding graph location " << endl;
@@ -72,7 +72,7 @@ namespace online_visualization {
                 }
 
                 if (CheckPositionBounds(position, genome.size(), curr_env.k_value())) {
-                    DrawPicture(curr_env, genome.Subseq(position).start<runtime_k::RtSeq>(curr_env.k_value() + 1), args[1]);
+                    DrawPicture(curr_env, genome.Subseq(position).start<RtSeq>(curr_env.k_value() + 1), args[1]);
                 }
 
             }
diff --git a/src/projects/online_vis/drawing_commands/drawing_command.hpp b/src/projects/online_vis/drawing_commands/drawing_command.hpp
index c825b7e..4fcba92 100644
--- a/src/projects/online_vis/drawing_commands/drawing_command.hpp
+++ b/src/projects/online_vis/drawing_commands/drawing_command.hpp
@@ -11,7 +11,7 @@
 #include "../command.hpp"
 #include "../errors.hpp"
 #include "../argument_list.hpp"
-#include "assembly_graph/graph_alignment/sequence_mapper.hpp"
+#include "modules/alignment/sequence_mapper.hpp"
 #include "io/reads/single_read.hpp"
 
 namespace online_visualization {
@@ -28,7 +28,7 @@ protected:
         //linkstream  << curr_env.folder_ << "/" << curr_env.file_name_base_ << "_latest.dot";
         //EdgePosGraphLabeler<Graph> labeler(curr_env.graph(), gp_.edge_pos);
         omnigraph::GraphComponent<Graph> component = VertexNeighborhood(curr_env.graph(), vertex, curr_env.max_vertices_, curr_env.edge_length_bound_);
-        omnigraph::visualization::WriteComponent<Graph>(component, file_name, curr_env.coloring_, curr_env.labeler());
+        visualization::visualization_utils::WriteComponent<Graph>(component, file_name, curr_env.coloring_, curr_env.labeler());
         //WriteComponents <Graph> (curr_env.graph(), splitter, linkstream.str(), *DefaultColorer(curr_env.graph(), curr_env.coloring_), curr_env.labeler());
         LOG("The picture is written to " << file_name);
 
@@ -42,7 +42,7 @@ protected:
         string directory = namestream.str();
         make_dir(directory);
         namestream << label << "_";
-        omnigraph::visualization::WriteComponentsAlongPath<Graph>(curr_env.graph(), path, namestream.str(), curr_env.coloring_, curr_env.labeler());
+        visualization::visualization_utils::WriteComponentsAlongPath<Graph>(curr_env.graph(), path, namestream.str(), curr_env.coloring_, curr_env.labeler());
         LOG("The pictures is written to " << directory);
 
         curr_env.picture_counter_++;
@@ -61,7 +61,7 @@ protected:
         make_dir(namestream.str());
         namestream << label;
         make_dir(namestream.str());
-        omnigraph::visualization::WriteSizeLimitedComponents<Graph>(curr_env.graph(), namestream.str(), omnigraph::ConnectedSplitter<Graph>(curr_env.graph()), curr_env.coloring_, curr_env.labeler(), min_size, max_size, 10000000);
+        visualization::visualization_utils::WriteSizeLimitedComponents<Graph>(curr_env.graph(), namestream.str(), omnigraph::ConnectedSplitter<Graph>(curr_env.graph()), curr_env.coloring_, curr_env.labeler(), min_size, max_size, 10000000);
         LOG("The pictures is written to " << namestream.str());
         curr_env.picture_counter_++;
     }
diff --git a/src/projects/online_vis/drawing_commands/show_position_command.hpp b/src/projects/online_vis/drawing_commands/show_position_command.hpp
index eb9daa1..f957b39 100644
--- a/src/projects/online_vis/drawing_commands/show_position_command.hpp
+++ b/src/projects/online_vis/drawing_commands/show_position_command.hpp
@@ -17,7 +17,7 @@
 namespace online_visualization {
     class ShowPositionCommand : public DrawingCommand {
         private:
-            int ShowPicture(DebruijnEnvironment& curr_env, runtime_k::RtSeq kmer, string label = "") const {
+            int ShowPicture(DebruijnEnvironment& curr_env, RtSeq kmer, string label = "") const {
                 kmer = curr_env.kmer_mapper().Substitute(kmer);
                 if (!curr_env.index().contains(kmer)) {
                     FireNoCorrespondingGraphLocation(label);
@@ -70,7 +70,7 @@ namespace online_visualization {
                     genome = !genome;
                 }
                 if (CheckPositionBounds(position, genome.size(), curr_env.k_value())) {
-                    int result = ShowPicture(curr_env, genome.Subseq(position).start<runtime_k::RtSeq>(curr_env.k_value() + 1), args[1]);
+                    int result = ShowPicture(curr_env, genome.Subseq(position).start<RtSeq>(curr_env.k_value() + 1), args[1]);
                     if (result) 
                         FireGenericError("Something is wrong");
                 }
diff --git a/src/projects/online_vis/environment.hpp b/src/projects/online_vis/environment.hpp
index 8f6a05a..ff2eaaf 100644
--- a/src/projects/online_vis/environment.hpp
+++ b/src/projects/online_vis/environment.hpp
@@ -14,11 +14,11 @@
 
 namespace online_visualization {
 
-typedef debruijn_graph::NewExtendedSequenceMapper<debruijn_graph::Graph, Index> MapperClass;
-typedef debruijn_graph::PosFiller<Graph> FillerClass;
+typedef debruijn_graph::BasicSequenceMapper<debruijn_graph::Graph, Index> MapperClass;
+typedef visualization::position_filler::PosFiller<Graph> FillerClass;
 typedef debruijn_graph::KmerMapper<Graph> KmerMapperClass;
 typedef omnigraph::GraphElementFinder<Graph> ElementFinder;
-typedef shared_ptr<omnigraph::visualization::GraphColorer<Graph>> ColoringClass;
+typedef shared_ptr<visualization::graph_colorer::GraphColorer<Graph>> ColoringClass;
 
 class Environment : private boost::noncopyable {
  protected:
diff --git a/src/projects/online_vis/main.cpp b/src/projects/online_vis/main.cpp
index 2a7d08a..7684637 100644
--- a/src/projects/online_vis/main.cpp
+++ b/src/projects/online_vis/main.cpp
@@ -9,15 +9,15 @@
 #include "vis_logger.hpp"
 
 #include "standard_vis.hpp"
-#include "dev_support/segfault_handler.hpp"
-#include "dev_support/stacktrace.hpp"
+#include "utils/segfault_handler.hpp"
+#include "utils/stacktrace.hpp"
 #include "pipeline/config_struct.hpp"
-#include "io/reads_io/io_helper.hpp"
-#include "dev_support/simple_tools.hpp"
+#include "io/reads/io_helper.hpp"
+#include "utils/simple_tools.hpp"
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <unistd.h>
-#include "dev_support/memory_limit.hpp"
+#include "utils/memory_limit.hpp"
 #include "io/dataset_support/read_converter.hpp"
 
 #include "debruijn_online_visualizer.hpp"
diff --git a/src/projects/online_vis/online_visualizer.hpp b/src/projects/online_vis/online_visualizer.hpp
index 551a9f3..2d6e337 100644
--- a/src/projects/online_vis/online_visualizer.hpp
+++ b/src/projects/online_vis/online_visualizer.hpp
@@ -13,7 +13,7 @@
 #include "command.hpp"
 #include "loaded_environments.hpp"
 #include "environment.hpp"
-#include "dev_support/autocompletion.hpp"
+#include "utils/autocompletion.hpp"
 
 //#include "all_commands.hpp"
 #include "base_commands.hpp"
diff --git a/src/projects/online_vis/position_commands/fill_position_command.hpp b/src/projects/online_vis/position_commands/fill_position_command.hpp
index 604f926..28c3ea3 100644
--- a/src/projects/online_vis/position_commands/fill_position_command.hpp
+++ b/src/projects/online_vis/position_commands/fill_position_command.hpp
@@ -50,7 +50,7 @@ namespace online_visualization {
                 string name = args[1];
                 string file = args[2];
 
-                FillPos(curr_env.graph_pack(), file, name, true);
+                visualization::position_filler::FillPos(curr_env.graph_pack(), file, name, true);
             }
     };
 }
diff --git a/src/projects/online_vis/processing_commands.hpp b/src/projects/online_vis/processing_commands.hpp
index 6d1a620..a9ca0b4 100644
--- a/src/projects/online_vis/processing_commands.hpp
+++ b/src/projects/online_vis/processing_commands.hpp
@@ -43,7 +43,7 @@ private:
             length = curr_env.edge_length_bound();
         }
 
-        pred::TypedPredicate<EdgeId> condition = LengthUpperBound<Graph>(curr_env.graph(), length);
+        func::TypedPredicate<EdgeId> condition = LengthUpperBound<Graph>(curr_env.graph(), length);
         if (args.size() > 2 && (args[2] == "Y" || args[2] == "y")) {
             cout << "Trying to activate genome quality condition" << endl;
             if (curr_env.genome().size() == 0) {
@@ -60,7 +60,7 @@ private:
         }
         debruijn::simplification::SimplifInfoContainer info(debruijn_graph::config::pipeline_type::base);
         info.set_chunk_cnt(10);
-        debruijn::simplification::TipClipperInstance(curr_env.graph(), condition, info, (omnigraph::HandlerF<Graph>)nullptr)->Run();
+        debruijn::simplification::TipClipperInstance(curr_env.graph(), condition, info, (omnigraph::EdgeRemovalHandlerF<Graph>)nullptr)->Run();
     }
 };
 }
diff --git a/src/projects/online_vis/standard_vis.hpp b/src/projects/online_vis/standard_vis.hpp
index 68fde86..a2626e1 100644
--- a/src/projects/online_vis/standard_vis.hpp
+++ b/src/projects/online_vis/standard_vis.hpp
@@ -8,7 +8,7 @@
 #pragma once
 
 #include "pipeline/graph_pack.hpp"
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 
 #include <readline/readline.h>
 #include <readline/history.h>
diff --git a/src/projects/online_vis/statistics_commands/junction_sequence_command.hpp b/src/projects/online_vis/statistics_commands/junction_sequence_command.hpp
index ee1f5fd..331bc50 100644
--- a/src/projects/online_vis/statistics_commands/junction_sequence_command.hpp
+++ b/src/projects/online_vis/statistics_commands/junction_sequence_command.hpp
@@ -10,8 +10,8 @@
 #include "../environment.hpp"
 #include "../command.hpp"
 #include "../errors.hpp"
-#include "assembly_graph/graph_core/basic_graph_stats.hpp"
-#include "assembly_graph/graph_alignment/sequence_mapper.hpp"
+#include "assembly_graph/core/basic_graph_stats.hpp"
+#include "modules/alignment/sequence_mapper.hpp"
 #include "assembly_graph/paths/path_utils.hpp"
 
 namespace online_visualization {
diff --git a/src/projects/online_vis/statistics_commands/print_contigs_stats.hpp b/src/projects/online_vis/statistics_commands/print_contigs_stats.hpp
index eaf3485..41ed613 100644
--- a/src/projects/online_vis/statistics_commands/print_contigs_stats.hpp
+++ b/src/projects/online_vis/statistics_commands/print_contigs_stats.hpp
@@ -10,7 +10,7 @@
 #include "../environment.hpp"
 #include "../command.hpp"
 #include "../errors.hpp"
-#include "assembly_graph/graph_core/basic_graph_stats.hpp"
+#include "assembly_graph/core/basic_graph_stats.hpp"
 
 namespace online_visualization {
     class PrintContigsStatsCommand : public LocalCommand<DebruijnEnvironment> {
diff --git a/src/projects/online_vis/vis_logger.hpp b/src/projects/online_vis/vis_logger.hpp
index 42bd6a7..a0c0dbe 100644
--- a/src/projects/online_vis/vis_logger.hpp
+++ b/src/projects/online_vis/vis_logger.hpp
@@ -5,7 +5,7 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "dev_support/logger/log_writers.hpp"
+#include "utils/logger/log_writers.hpp"
 
 #undef INFO
 #define INFO(message)                       \
@@ -16,13 +16,13 @@
 
 #define LOG(message)                                                      \
 {                                                                         \
-    std::cout << message << endl;                                       \
+    std::cout << message << std::endl;                                              \
 }                                                                         \
 
 //#define trace(message)                      LOG_MSG(logging::L_TRACE, message)
 #define debug(print, message)               \
 {                                           \
     if (print) {                            \
-        std::cout << message << std::endl;  \
+        std::cout << message << std::endl;            \
     }                                       \
 }                                           
diff --git a/src/projects/scaffold_correction/CMakeLists.txt b/src/projects/scaffold_correction/CMakeLists.txt
index 45e47b8..3f0f591 100644
--- a/src/projects/scaffold_correction/CMakeLists.txt
+++ b/src/projects/scaffold_correction/CMakeLists.txt
@@ -9,7 +9,7 @@ project(moleculo CXX)
 
 add_executable(scaffold_correction
                main.cpp)
-target_link_libraries(scaffold_correction spades_modules ${COMMON_LIBRARIES})
+target_link_libraries(scaffold_correction common_modules ${COMMON_LIBRARIES})
 
 if (SPADES_STATIC_BUILD)
   set_target_properties(scaffold_correction PROPERTIES LINK_SEARCH_END_STATIC 1)
diff --git a/src/projects/scaffold_correction/main.cpp b/src/projects/scaffold_correction/main.cpp
index 9086e90..56eca8d 100644
--- a/src/projects/scaffold_correction/main.cpp
+++ b/src/projects/scaffold_correction/main.cpp
@@ -7,14 +7,13 @@
 /*
  * Assembler Main
  */
-#include "dev_support/logger/log_writers.hpp"
-
-#include "dev_support/segfault_handler.hpp"
-#include "dev_support/stacktrace.hpp"
-#include "dev_support/memory_limit.hpp"
-#include "dev_support/copy_file.hpp"
-#include "dev_support/perfcounter.hpp"
-#include "data_structures/sequence/runtime_k.hpp"
+#include "utils/logger/log_writers.hpp"
+
+#include "utils/segfault_handler.hpp"
+#include "utils/stacktrace.hpp"
+#include "utils/memory_limit.hpp"
+#include "utils/copy_file.hpp"
+#include "utils/perfcounter.hpp"
 #include "scaffold_correction.hpp"
 
 #include "pipeline/config_struct.hpp"
diff --git a/src/projects/scaffold_correction/scaffold_correction.hpp b/src/projects/scaffold_correction/scaffold_correction.hpp
index 0237e6b..7f056aa 100644
--- a/src/projects/scaffold_correction/scaffold_correction.hpp
+++ b/src/projects/scaffold_correction/scaffold_correction.hpp
@@ -5,16 +5,16 @@
 //***************************************************************************
 
 #pragma once
-#include "io/reads_io/osequencestream.hpp"
-#include "io/reads_io/file_reader.hpp"
+#include "io/reads/osequencestream.hpp"
+#include "io/reads/file_reader.hpp"
 #include "pipeline/stage.hpp"
 #include "pipeline/graph_pack.hpp"
 #include "assembly_graph/paths/path_processor.hpp"
 #include "stages/construction.hpp"
 #include "pipeline/config_struct.hpp"
-#include "algorithms/dijkstra/dijkstra_algorithm.hpp"
-#include "algorithms/dijkstra/dijkstra_helper.hpp"
-#include "assembly_graph/graph_core/basic_graph_stats.hpp"
+#include "assembly_graph/dijkstra/dijkstra_algorithm.hpp"
+#include "assembly_graph/dijkstra/dijkstra_helper.hpp"
+#include "assembly_graph/core/basic_graph_stats.hpp"
 
 namespace scaffold_correction {
     typedef debruijn_graph::ConjugateDeBruijnGraph Graph;
@@ -229,14 +229,13 @@ namespace spades {
     public:
         typedef debruijn_graph::config::debruijn_config::scaffold_correction Config;
     private:
-        size_t k_;
         std::string output_file_;
         const Config &config_;
     public:
-        ScaffoldCorrectionStage(size_t k, string output_file,
+        ScaffoldCorrectionStage(string output_file,
                 const Config &config) :
                 AssemblyStage("ScaffoldCorrection", "scaffold_correction"),
-                k_(k), output_file_(output_file), config_(config) {
+                output_file_(output_file), config_(config) {
         }
 
         vector<Sequence> CollectScaffoldParts(const io::SingleRead &scaffold) const {
@@ -324,7 +323,7 @@ namespace spades {
                               cfg::get().load_from,
                               cfg::get().output_saves});
         manager.add(new debruijn_graph::Construction())
-               .add(new ScaffoldCorrectionStage(cfg::get().K, cfg::get().output_dir + "corrected_scaffolds.fasta", *cfg::get().sc_cor));
+               .add(new ScaffoldCorrectionStage(cfg::get().output_dir + "corrected_scaffolds.fasta", *cfg::get().sc_cor));
         INFO("Output directory: " << cfg::get().output_dir);
         conj_gp.kmer_mapper.Attach();
         manager.run(conj_gp, cfg::get().entry_point.c_str());
diff --git a/src/projects/spades/CMakeLists.txt b/src/projects/spades/CMakeLists.txt
index f245266..e8f4743 100644
--- a/src/projects/spades/CMakeLists.txt
+++ b/src/projects/spades/CMakeLists.txt
@@ -14,11 +14,13 @@ add_executable(spades main.cpp
             second_phase_setup.cpp
             distance_estimation.cpp
             repeat_resolving.cpp
-            pacbio_aligning.cpp
-            chromosome_removal.cpp)
-  
+            contig_output_stage.cpp
+            hybrid_aligning.cpp
+            chromosome_removal.cpp
+            ../mts/contig_abundance.cpp)
+
 target_include_directories(spades PRIVATE ${EXT_DIR}/include/ConsensusCore)
-target_link_libraries(spades ConsensusCore spades_modules nlopt BamTools ssw ${COMMON_LIBRARIES})
+target_link_libraries(spades ConsensusCore common_modules nlopt BamTools ssw ${COMMON_LIBRARIES})
 
 if (SPADES_STATIC_BUILD)
   set_target_properties(spades PROPERTIES LINK_SEARCH_END_STATIC 1)
diff --git a/src/projects/spades/chromosome_removal.cpp b/src/projects/spades/chromosome_removal.cpp
index f2282d5..fdedc68 100644
--- a/src/projects/spades/chromosome_removal.cpp
+++ b/src/projects/spades/chromosome_removal.cpp
@@ -6,11 +6,13 @@
 
 #include "assembly_graph/graph_support/contig_output.hpp"
 #include "stages/simplification_pipeline/graph_simplification.hpp"
-#include "algorithms/simplification/ec_threshold_finder.hpp"
-#include "assembly_graph/graph_core/basic_graph_stats.hpp"
-
+#include "modules/simplification/ec_threshold_finder.hpp"
+#include "assembly_graph/core/basic_graph_stats.hpp"
 #include "chromosome_removal.hpp"
 
+#include "math/xmath.h"
+
+
 namespace debruijn_graph {
 
 
@@ -90,16 +92,8 @@ size_t ChromosomeRemoval::CalculateComponentSize(EdgeId e, Graph &g_) {
 
 double ChromosomeRemoval::RemoveLongGenomicEdges(conj_graph_pack &gp, size_t long_edge_bound, double coverage_limits, double external_chromosome_coverage){
     INFO("Removing of long chromosomal edges started");
-    vector <pair<double, size_t> > coverages;
-    size_t total_len = 0, short_len = 0, cur_len = 0;
-    for (auto iter = gp.g.ConstEdgeBegin(); ! iter.IsEnd(); ++iter){
-        if (gp.g.length(*iter) > cfg::get().pd->edge_length_for_median) {
-            coverages.push_back(make_pair(gp.g.coverage(*iter), gp.g.length(*iter)));
-            total_len += gp.g.length(*iter);
-        } else {
-            short_len += gp.g.length(*iter);
-        }
-    }
+    CoverageUniformityAnalyzer coverage_analyzer(gp.g, long_edge_bound);
+    size_t total_len = coverage_analyzer.TotalLongEdgeLength();
     if (total_len == 0) {
         if (external_chromosome_coverage < 1.0) {
             WARN("plasmid detection failed, not enough long edges");
@@ -109,29 +103,17 @@ double ChromosomeRemoval::RemoveLongGenomicEdges(conj_graph_pack &gp, size_t lon
         }
         return 0;
     }
-    std::sort(coverages.begin(), coverages.end());
-    size_t i = 0;
-    while (cur_len < total_len/2 && i <coverages.size()) {
-        cur_len += coverages[i].second;
-        i++;
-    }
 
     double median_long_edge_coverage;
     if (external_chromosome_coverage < 1.0) {
-        median_long_edge_coverage = coverages[i-1].first;
-        INFO ("genomic coverage is "<< median_long_edge_coverage << " calculated of length " << size_t (double(total_len) * 0.5));
-        size_t outsiders_length = 0;
-        for (size_t j = 0; j < coverages.size(); j++) {
-            if ( coverages[j].first >= median_long_edge_coverage * (1 + coverage_limits) || coverages[j].first <= median_long_edge_coverage * (1 - coverage_limits)) {
-                outsiders_length += coverages[j].second;
-            }
-        }
-        if (outsiders_length * 5 > total_len) {
-            WARN ("More than 20% of long edges have coverage significantly different from median (total " << size_t (double(outsiders_length) * 0.5) <<" of "<< size_t (double(total_len) * 0.5) << " bases).");
+        median_long_edge_coverage = coverage_analyzer.CountMedianCoverage();
+        double fraction = coverage_analyzer.UniformityFraction(coverage_limits, median_long_edge_coverage);
+        if (math::gr(0.8, fraction)) {
+            WARN ("More than 20% of long edges have coverage significantly different from median (total " << size_t ((1-fraction) * 0.5 * double(total_len)) <<" of "<< size_t (double(total_len) * 0.5) << " bases).");
             WARN ("In most cases it means that either read coverage is uneven or significant contamination is present - both of these two cases make plasmidSPAdes' results unreliable");
             WARN ("However, that situation may still be OK if you expect to see large plasmids in your dataset, so plasmidSPAdes will continue to work");
         } else {
-            INFO(size_t(double(outsiders_length)/ double(total_len) * 100) << "% of bases from long edges have coverage significantly different from median");
+            INFO(size_t((1 - fraction) * 100) << "% of bases from long edges have coverage significantly different from median");
         }
         for (auto iter = gp.g.ConstEdgeBegin(); ! iter.IsEnd(); ++iter) {
             if (long_component_.find(*iter) == long_component_.end()) {
@@ -167,8 +149,7 @@ void ChromosomeRemoval::PlasmidSimplify(conj_graph_pack &gp, size_t long_edge_bo
     DEBUG("Simplifying graph for plasmid project");
     size_t iteration_count = 10;
     for (size_t i = 0; i < iteration_count; i++) {
-        //pred::TypedPredicate<typename Graph::EdgeId> condition = make_shared<LengthUpperBound<Graph>>(gp.g, long_edge_bound) ;
-        omnigraph::EdgeRemovingAlgorithm<Graph> tc(gp.g, pred::And(DeadEndCondition<Graph>(gp.g), LengthUpperBound<Graph>(gp.g, long_edge_bound)),
+        omnigraph::EdgeRemovingAlgorithm<Graph> tc(gp.g, func::And(DeadEndCondition<Graph>(gp.g), LengthUpperBound<Graph>(gp.g, long_edge_bound)),
                                                    removal_handler, true);
         tc.Run();
     }
diff --git a/src/projects/spades/chromosome_removal.hpp b/src/projects/spades/chromosome_removal.hpp
index f5e2cf9..77eb078 100644
--- a/src/projects/spades/chromosome_removal.hpp
+++ b/src/projects/spades/chromosome_removal.hpp
@@ -7,7 +7,8 @@
 #pragma once
 
 #include "pipeline/stage.hpp"
-#include "assembly_graph/graph_core/graph.hpp"
+#include "assembly_graph/core/graph.hpp"
+#include "assembly_graph/graph_support/coverage_uniformity_analyzer.hpp"
 
 namespace debruijn_graph {
 
diff --git a/src/projects/spades/contig_output_stage.cpp b/src/projects/spades/contig_output_stage.cpp
new file mode 100644
index 0000000..fd309e6
--- /dev/null
+++ b/src/projects/spades/contig_output_stage.cpp
@@ -0,0 +1,55 @@
+//***************************************************************************
+//* Copyright (c) 2015-2017 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#include "contig_output_stage.hpp"
+#include "assembly_graph/paths/bidirectional_path_io/bidirectional_path_output.hpp"
+
+namespace debruijn_graph {
+
+void ContigOutput::run(conj_graph_pack &gp, const char*) {
+    auto output_dir = cfg::get().output_dir + contig_name_prefix_;
+
+    OutputContigs(gp.g, output_dir + "before_rr", false);
+    OutputContigsToFASTG(gp.g, output_dir + "assembly_graph", gp.components);
+
+    if (output_paths_ && gp.contig_paths.size() != 0) {
+        DefaultContigCorrector<ConjugateDeBruijnGraph> corrector(gp.g);
+        DefaultContigConstructor<ConjugateDeBruijnGraph> constructor(gp.g, corrector);
+
+        auto name_generator = path_extend::MakeContigNameGenerator(cfg::get().mode, gp);
+        path_extend::ContigWriter writer(gp.g, constructor, gp.components, name_generator);
+
+        bool output_broken_scaffolds = cfg::get().pe_params.param_set.scaffolder_options.enabled &&
+            cfg::get().use_scaffolder &&
+            cfg::get().co.obs_mode != config::output_broken_scaffolds::none;
+
+        if (output_broken_scaffolds) {
+            int min_gap = 0;
+            if (cfg::get().co.obs_mode == config::output_broken_scaffolds::break_all) {
+                min_gap = 1;
+            } else if (cfg::get().co.obs_mode == config::output_broken_scaffolds::break_gaps) {
+                min_gap = int(gp.g.k());
+            } else {
+                WARN("Unsupported contig output mode");
+            }
+
+            path_extend::ScaffoldBreaker breaker(min_gap);
+            path_extend::PathContainer broken_scaffolds;
+            breaker.Break(gp.contig_paths, broken_scaffolds);
+            writer.OutputPaths(broken_scaffolds, output_dir + cfg::get().co.contigs_name);
+        }
+
+        writer.OutputPaths(gp.contig_paths, output_dir + cfg::get().co.scaffolds_name);
+
+        OutputContigsToGFA(gp.g, gp.contig_paths, output_dir + "assembly_graph");
+    } else {
+        OutputContigs(gp.g, output_dir + "simplified_contigs", cfg::get().use_unipaths);
+        OutputContigs(gp.g, output_dir + cfg::get().co.contigs_name, false);
+    }
+}
+
+}
diff --git a/src/projects/spades/contig_output_stage.hpp b/src/projects/spades/contig_output_stage.hpp
new file mode 100644
index 0000000..de06d3d
--- /dev/null
+++ b/src/projects/spades/contig_output_stage.hpp
@@ -0,0 +1,29 @@
+//***************************************************************************
+//* Copyright (c) 2015-2017 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#pragma once
+
+#include "pipeline/stage.hpp"
+
+namespace debruijn_graph {
+
+
+class ContigOutput : public spades::AssemblyStage {
+private:
+    bool output_paths_;
+    string contig_name_prefix_;
+
+public:
+    ContigOutput(bool output_paths = true, bool preliminary = false, const string& contig_name_prefix = "")
+        : AssemblyStage("Contig Output", preliminary ? "preliminary_contig_output" : "contig_output"),
+          output_paths_(output_paths), contig_name_prefix_(contig_name_prefix) { }
+
+    void run(conj_graph_pack &gp, const char *);
+
+};
+
+}
\ No newline at end of file
diff --git a/src/projects/spades/distance_estimation.cpp b/src/projects/spades/distance_estimation.cpp
index ed6ebf2..1950e85 100644
--- a/src/projects/spades/distance_estimation.cpp
+++ b/src/projects/spades/distance_estimation.cpp
@@ -41,8 +41,7 @@ void estimate_with_estimator(const Graph &graph,
                                             cfg::get().amb_de.relative_length_threshold,
                                             cfg::get().amb_de.relative_seq_threshold);
         PairInfoFilter<Graph>(amb_de_checker).Filter(clustered_index);
-    }
-    else
+    } else
         PairInfoFilter<Graph>(checker).Filter(clustered_index);
 //    filter.Filter(clustered_index);
     DEBUG("Info Filtered");
@@ -126,8 +125,7 @@ void estimate_distance(conj_graph_pack& gp,
     }  else
         weight_function = UnityFunction;
 
-//    PairInfoWeightFilter<Graph> filter(gp.g, config.de.filter_threshold);
-    PairInfoWeightChecker<Graph> checker(gp.g, config.de.filter_threshold);
+    PairInfoWeightChecker<Graph> checker(gp.g, config.de.clustered_filter_threshold);
 
     INFO("Weight Filter Done");
 
@@ -228,7 +226,7 @@ void DistanceEstimation::run(conj_graph_pack &gp, const char*) {
             }
             if (!cfg::get().preserve_raw_paired_index) {
                 INFO("Clearing raw paired index");
-                gp.paired_indices[i].Clear();
+                gp.paired_indices[i].clear();
             }
         }
 }
diff --git a/src/projects/spades/gap_closer.cpp b/src/projects/spades/gap_closer.cpp
index e311945..4a17509 100644
--- a/src/projects/spades/gap_closer.cpp
+++ b/src/projects/spades/gap_closer.cpp
@@ -7,19 +7,16 @@
 
 #include "gap_closer.hpp"
 #include "assembly_graph/stats/picture_dump.hpp"
-#include "algorithms/simplification/compressor.hpp"
+#include "modules/simplification/compressor.hpp"
 #include "io/dataset_support/read_converter.hpp"
 #include <stack>
 
 namespace debruijn_graph {
 
-template<class Graph, class SequenceMapper>
 class GapCloserPairedIndexFiller {
 private:
-    typedef typename Graph::EdgeId EdgeId;
-    typedef typename Graph::VertexId VertexId;
     const Graph &graph_;
-    const SequenceMapper &mapper_;
+    const SequenceMapper<Graph> &mapper_;
 
     size_t CorrectLength(Path<EdgeId> path, size_t idx) const {
         size_t answer = graph_.length(path[idx]);
@@ -143,13 +140,13 @@ private:
         INFO("Merging paired indices");
         for (auto &index: buffer_pi) {
             paired_index.Merge(index);
-            index.Clear();
+            index.clear();
         }
     }
 
 public:
 
-    GapCloserPairedIndexFiller(const Graph &graph, const SequenceMapper &mapper)
+    GapCloserPairedIndexFiller(const Graph &graph, const SequenceMapper<Graph> &mapper)
             : graph_(graph), mapper_(mapper) { }
 
     /**
@@ -167,11 +164,7 @@ public:
 
 };
 
-template<class Graph, class SequenceMapper>
 class GapCloser {
-public:
-    typedef std::function<bool(const Sequence &)> SequenceCheckF;
-private:
     typedef typename Graph::EdgeId EdgeId;
     typedef typename Graph::VertexId VertexId;
 
@@ -183,22 +176,6 @@ private:
     const int init_gap_val_;
     const omnigraph::de::DEWeight weight_threshold_;
 
-    SequenceMapper mapper_;
-    std::unordered_set<runtime_k::RtSeq> new_kmers_;
-
-    bool CheckNoKmerClash(const Sequence &s) {
-        runtime_k::RtSeq kmer(k_ + 1, s);
-        kmer >>= 'A';
-        for (size_t i = k_; i < s.size(); ++i) {
-            kmer <<= s[i];
-            if (new_kmers_.count(kmer)) {
-                return false;
-            }
-        }
-        std::vector<EdgeId> path = mapper_.MapSequence(s).simple_path();
-        return path.empty();
-    }
-
     std::vector<size_t> DiffPos(const Sequence &s1, const Sequence &s2) const {
         VERIFY(s1.size() == s2.size());
         std::vector<size_t> answer;
@@ -259,67 +236,40 @@ private:
                           : long_seq.Subseq(long_seq.size() - short_seq.size()) == short_seq;
     }
 
-    void AddEdge(VertexId start, VertexId end, const Sequence &s) {
-        runtime_k::RtSeq kmer(k_ + 1, s);
-        kmer >>= 'A';
-        for (size_t i = k_; i < s.size(); ++i) {
-            kmer <<= s[i];
-            new_kmers_.insert(kmer);
-            new_kmers_.insert(!kmer);
-        }
-        g_.AddEdge(start, end, s);
-    }
-
-    bool CorrectLeft(EdgeId first, EdgeId second, int overlap, const vector<size_t> &diff_pos) {
+    void CorrectLeft(EdgeId first, EdgeId second, int overlap, const vector<size_t> &diff_pos) {
         DEBUG("Can correct first with sequence from second.");
         Sequence new_sequence = g_.EdgeNucls(first).Subseq(g_.length(first) - overlap + diff_pos.front(),
                                                            g_.length(first) + k_ - overlap)
                                 + g_.EdgeNucls(second).First(k_);
         DEBUG("Checking new k+1-mers.");
-        if (CheckNoKmerClash(new_sequence)) {
-            DEBUG("Check ok.");
-            DEBUG("Splitting first edge.");
-            pair<EdgeId, EdgeId> split_res = g_.SplitEdge(first, g_.length(first) - overlap + diff_pos.front());
-            first = split_res.first;
-            tips_paired_idx_.Remove(split_res.second);
-            DEBUG("Adding new edge.");
-            VERIFY(MatchesEnd(new_sequence, g_.VertexNucls(g_.EdgeEnd(first)), true));
-            VERIFY(MatchesEnd(new_sequence, g_.VertexNucls(g_.EdgeStart(second)), false));
-            AddEdge(g_.EdgeEnd(first), g_.EdgeStart(second),
-                    new_sequence);
-            return true;
-        } else {
-            DEBUG("Check fail.");
-            DEBUG("Filled k-mer already present in graph");
-            return false;
-        }
-        return false;
+        DEBUG("Check ok.");
+        DEBUG("Splitting first edge.");
+        pair<EdgeId, EdgeId> split_res = g_.SplitEdge(first, g_.length(first) - overlap + diff_pos.front());
+        first = split_res.first;
+        tips_paired_idx_.Remove(split_res.second);
+        DEBUG("Adding new edge.");
+        VERIFY(MatchesEnd(new_sequence, g_.VertexNucls(g_.EdgeEnd(first)), true));
+        VERIFY(MatchesEnd(new_sequence, g_.VertexNucls(g_.EdgeStart(second)), false));
+        g_.AddEdge(g_.EdgeEnd(first), g_.EdgeStart(second),
+                new_sequence);
     }
 
-    bool CorrectRight(EdgeId first, EdgeId second, int overlap, const vector<size_t> &diff_pos) {
+    void CorrectRight(EdgeId first, EdgeId second, int overlap, const vector<size_t> &diff_pos) {
         DEBUG("Can correct second with sequence from first.");
         Sequence new_sequence =
                 g_.EdgeNucls(first).Last(k_) + g_.EdgeNucls(second).Subseq(overlap, diff_pos.back() + 1 + k_);
         DEBUG("Checking new k+1-mers.");
-        if (CheckNoKmerClash(new_sequence)) {
-            DEBUG("Check ok.");
-            DEBUG("Splitting second edge.");
-            pair<EdgeId, EdgeId> split_res = g_.SplitEdge(second, diff_pos.back() + 1);
-            second = split_res.second;
-            tips_paired_idx_.Remove(split_res.first);
-            DEBUG("Adding new edge.");
-            VERIFY(MatchesEnd(new_sequence, g_.VertexNucls(g_.EdgeEnd(first)), true));
-            VERIFY(MatchesEnd(new_sequence, g_.VertexNucls(g_.EdgeStart(second)), false));
-
-            AddEdge(g_.EdgeEnd(first), g_.EdgeStart(second),
-                    new_sequence);
-            return true;
-        } else {
-            DEBUG("Check fail.");
-            DEBUG("Filled k-mer already present in graph");
-            return false;
-        }
-        return false;
+        DEBUG("Check ok.");
+        DEBUG("Splitting second edge.");
+        pair<EdgeId, EdgeId> split_res = g_.SplitEdge(second, diff_pos.back() + 1);
+        second = split_res.second;
+        tips_paired_idx_.Remove(split_res.first);
+        DEBUG("Adding new edge.");
+        VERIFY(MatchesEnd(new_sequence, g_.VertexNucls(g_.EdgeEnd(first)), true));
+        VERIFY(MatchesEnd(new_sequence, g_.VertexNucls(g_.EdgeStart(second)), false));
+
+        g_.AddEdge(g_.EdgeEnd(first), g_.EdgeStart(second),
+                new_sequence);
     }
 
     bool HandlePositiveHammingDistanceCase(EdgeId first, EdgeId second, int overlap) {
@@ -327,9 +277,11 @@ private:
         vector<size_t> diff_pos = DiffPos(g_.EdgeNucls(first).Last(overlap),
                                           g_.EdgeNucls(second).First(overlap));
         if (CanCorrectLeft(first, overlap, diff_pos)) {
-            return CorrectLeft(first, second, overlap, diff_pos);
+            CorrectLeft(first, second, overlap, diff_pos);
+            return true;
         } else if (CanCorrectRight(second, overlap, diff_pos)) {
-            return CorrectRight(first, second, overlap, diff_pos);
+            CorrectRight(first, second, overlap, diff_pos);
+            return true;
         } else {
             DEBUG("Can't correct tips due to the graph structure");
             return false;
@@ -338,6 +290,7 @@ private:
 
     bool HandleSimpleCase(EdgeId first, EdgeId second, int overlap) {
         DEBUG("Match was perfect. No correction needed");
+        DEBUG("Overlap " << overlap);
         //strange info guard
         VERIFY(overlap <= k_);
         if (overlap == k_) {
@@ -347,15 +300,10 @@ private:
         //old code
         Sequence edge_sequence = g_.EdgeNucls(first).Last(k_)
                                  + g_.EdgeNucls(second).Subseq(overlap, k_);
-        if (CheckNoKmerClash(edge_sequence)) {
-            DEBUG("Gap filled: Gap size = " << k_ - overlap << "  Result seq "
-                  << edge_sequence.str());
-            AddEdge(g_.EdgeEnd(first), g_.EdgeStart(second), edge_sequence);
-            return true;
-        } else {
-            DEBUG("Filled k-mer already present in graph");
-            return false;
-        }
+        DEBUG("Gap filled: Gap size = " << k_ - overlap << "  Result seq "
+              << edge_sequence.str());
+        g_.AddEdge(g_.EdgeEnd(first), g_.EdgeStart(second), edge_sequence);
+        return true;
     }
 
     bool ProcessPair(EdgeId first, EdgeId second) {
@@ -367,14 +315,11 @@ private:
             DEBUG("Trying to join conjugate edges " << g_.int_id(first));
             return false;
         }
-        //may be negative!
-        int gap = max(init_gap_val_,
-                      -1 * (int) (min(g_.length(first), g_.length(second)) - 1));
 
         Sequence seq1 = g_.EdgeNucls(first);
         Sequence seq2 = g_.EdgeNucls(second);
-        TRACE("Checking possible gaps from " << gap << " to " << k_ - min_intersection_);
-        for (; gap <= k_ - (int) min_intersection_; ++gap) {
+        TRACE("Checking possible gaps from 1 to " << k_ - min_intersection_);
+        for (int gap = 1; gap <= k_ - (int) min_intersection_; ++gap) {
             int overlap = k_ - gap;
             size_t hamming_distance = HammingDistance(g_.EdgeNucls(first).Last(overlap),
                                                       g_.EdgeNucls(second).First(overlap));
@@ -439,7 +384,6 @@ public:
 
     GapCloser(Graph &g, omnigraph::de::PairedInfoIndexT<Graph> &tips_paired_idx,
               size_t min_intersection, double weight_threshold,
-              const SequenceMapper &mapper,
               size_t hamming_dist_bound = 0 /*min_intersection_ / 5*/)
             : g_(g),
               k_((int) g_.k()),
@@ -447,9 +391,7 @@ public:
               min_intersection_(min_intersection),
               hamming_dist_bound_(hamming_dist_bound),
               init_gap_val_(-10),
-              weight_threshold_(weight_threshold),
-              mapper_(mapper),
-              new_kmers_() {
+              weight_threshold_(weight_threshold)  {
         VERIFY(min_intersection_ < g_.k());
         DEBUG("weight_threshold=" << weight_threshold_);
         DEBUG("min_intersect=" << min_intersection_);
@@ -462,19 +404,17 @@ private:
 
 template<class Streams>
 void CloseGaps(conj_graph_pack &gp, Streams &streams) {
-    typedef NewExtendedSequenceMapper<Graph, Index> Mapper;
     auto mapper = MapperInstance(gp);
-    GapCloserPairedIndexFiller<Graph, Mapper> gcpif(gp.g, *mapper);
+    GapCloserPairedIndexFiller gcpif(gp.g, *mapper);
     PairedIndexT tips_paired_idx(gp.g);
     gcpif.FillIndex(tips_paired_idx, streams);
-    GapCloser<Graph, Mapper> gap_closer(gp.g, tips_paired_idx,
-                                        cfg::get().gc.minimal_intersection, cfg::get().gc.weight_threshold,
-                                        *mapper);
+    GapCloser gap_closer(gp.g, tips_paired_idx,
+                         cfg::get().gc.minimal_intersection, cfg::get().gc.weight_threshold);
     gap_closer.CloseShortGaps();
 }
 
 void GapClosing::run(conj_graph_pack &gp, const char *) {
-    omnigraph::DefaultLabeler<Graph> labeler(gp.g, gp.edge_pos);
+    visualization::graph_labeler::DefaultLabeler<Graph> labeler(gp.g, gp.edge_pos);
     stats::detail_info_printer printer(gp, labeler, cfg::get().output_dir);
     printer(config::info_printer_pos::before_first_gap_closer);
 
diff --git a/src/projects/spades/gap_closing.hpp b/src/projects/spades/gap_closing.hpp
new file mode 100644
index 0000000..182f055
--- /dev/null
+++ b/src/projects/spades/gap_closing.hpp
@@ -0,0 +1,74 @@
+#pragma once
+
+#include "assembly_graph/core/graph.hpp"
+#include "assembly_graph/graph_support/basic_vertex_conditions.hpp"
+#include "assembly_graph/graph_support/edge_removal.hpp"
+#include "modules/simplification/compressor.hpp"
+
+namespace debruijn_graph {
+
+namespace gap_closing {
+typedef omnigraph::GapDescription<Graph> GapDescription;
+
+class GapJoiner {
+    Graph& g_;
+    omnigraph::EdgeRemover<Graph> edge_remover_;
+    bool add_flanks_;
+
+    EdgeId ClipEnd(EdgeId e, size_t pos) {
+        VERIFY(pos > 0);
+        VERIFY(omnigraph::TerminalVertexCondition<Graph>(g_).Check(g_.EdgeEnd(e)));
+        VERIFY(e != g_.conjugate(e));
+        if (pos == g_.length(e)) {
+            return e;
+        } else {
+            auto split_res = g_.SplitEdge(e, pos);
+            edge_remover_.DeleteEdge(split_res.second);
+            return split_res.first;
+        }
+    }
+
+    EdgeId ClipStart(EdgeId e, size_t pos) {
+        return g_.conjugate(ClipEnd(g_.conjugate(e), g_.length(e) - pos));
+    }
+
+    EdgeId AddEdge(VertexId v1, VertexId v2, const Sequence& gap_seq) {
+        if (!add_flanks_) {
+            VERIFY_MSG(g_.VertexNucls(v1) == gap_seq.Subseq(0, g_.k()), 
+                       g_.VertexNucls(v1) << " not equal " << gap_seq.Subseq(0, g_.k()));
+            VERIFY_MSG(g_.VertexNucls(v2) == gap_seq.Subseq(gap_seq.size() - g_.k()),
+                       g_.VertexNucls(v2) << " not equal " << gap_seq.Subseq(gap_seq.size() - g_.k()));
+            return g_.AddEdge(v1, v2, gap_seq);
+        } else {
+            DEBUG("Adding gap seq " << gap_seq);
+            DEBUG("Between vertices " << g_.VertexNucls(v1) << " and " << g_.VertexNucls(v2));
+            return g_.AddEdge(v1, v2, g_.VertexNucls(v1) + gap_seq + g_.VertexNucls(v2));
+        }
+    }
+
+public:
+    GapJoiner(Graph& g, bool add_flanks = false) :
+            g_(g),
+            edge_remover_(g),
+            add_flanks_(add_flanks) {
+    }
+
+    EdgeId operator() (const GapDescription& gap, bool compress = true) {
+        VERIFY(gap.start != gap.end && gap.start != g_.conjugate(gap.end));
+        DEBUG("Processing gap " << gap.str(g_));
+        EdgeId start = ClipEnd(gap.start, gap.edge_gap_start_position);
+        EdgeId end = ClipStart(gap.end, gap.edge_gap_end_position);
+        EdgeId new_edge = AddEdge(g_.EdgeEnd(start), g_.EdgeStart(end), gap.gap_seq);
+
+        if (compress) {
+            return omnigraph::Compressor<Graph>(g_).CompressVertexEdgeId(g_.EdgeStart(new_edge));
+        } else {
+            return new_edge;
+        }
+    }
+private:
+    DECL_LOGGER("GapJoiner");
+};
+
+}
+}
diff --git a/src/projects/spades/hybrid_aligning.cpp b/src/projects/spades/hybrid_aligning.cpp
new file mode 100644
index 0000000..ffdd915
--- /dev/null
+++ b/src/projects/spades/hybrid_aligning.cpp
@@ -0,0 +1,462 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#include "modules/alignment/pacbio/pac_index.hpp"
+#include "hybrid_gap_closer.hpp"
+#include "modules/alignment/long_read_mapper.hpp"
+#include "modules/alignment/short_read_mapper.hpp"
+#include "io/reads/wrapper_collection.hpp"
+#include "assembly_graph/stats/picture_dump.hpp"
+#include "hybrid_aligning.hpp"
+#include "pair_info_count.hpp"
+#include "io/reads/multifile_reader.hpp"
+
+namespace debruijn_graph {
+
+namespace gap_closing {
+
+//TODO standard aligner badly needs spurious match filtering
+class GapTrackingListener : public SequenceMapperListener {
+    const Graph& g_;
+    GapStorage& gap_storage_;
+    const GapStorage empty_storage_;
+    vector<GapStorage> buffer_storages_;
+
+    const GapDescription INVALID_GAP;
+
+    boost::optional<Sequence> Subseq(const io::SingleRead& read, size_t start, size_t end) const {
+        DEBUG("Requesting subseq of read length " << read.size() << " from " << start << " to " << end);
+        VERIFY(end > start);
+        //if (end == start) {
+        //    DEBUG("Returning empty sequence");
+        //    return boost::make_optional(Sequence());
+        //}
+        auto subread = read.Substr(start, end);
+        if (subread.IsValid()) {
+            DEBUG("Gap seq valid. Length " << subread.size());
+            return boost::make_optional(subread.sequence());
+        } else {
+            DEBUG("Gap seq invalid. Length " << subread.size());
+            DEBUG("sequence: " << subread.GetSequenceString());
+            return boost::none;
+        }
+    }
+
+    boost::optional<Sequence> Subseq(const io::SingleReadSeq& read, size_t start, size_t end) const {
+        return boost::make_optional(read.sequence().Subseq(start, end));
+    }
+
+    template<class ReadT>
+    GapDescription
+    CreateDescription(const ReadT& read, size_t seq_start, size_t seq_end,
+                      EdgeId left, size_t left_offset,
+                      EdgeId right, size_t right_offset) const {
+        VERIFY(left_offset > 0 && right_offset < g_.length(right));
+
+        DEBUG("Creating gap description");
+
+        //trying to shift on the left edge
+        if (seq_start >= seq_end) {
+            //+1 is a trick to avoid empty gap sequences
+            size_t overlap = seq_start - seq_end + 1;
+            DEBUG("Overlap of size " << overlap << " detected. Fixing.");
+            size_t left_shift = std::min(overlap, left_offset - 1);
+            VERIFY(seq_start >= left_shift);
+            seq_start -= left_shift;
+            left_offset -= left_shift;
+        }
+        //trying to shift on the right edge
+        if (seq_start >= seq_end) {
+            //+1 is a trick to avoid empty gap sequences
+            size_t overlap = seq_start - seq_end + 1;
+            DEBUG("Overlap of size " << overlap << " remained. Fixing.");
+            size_t right_shift = std::min(overlap, g_.length(right) - right_offset - 1);
+            VERIFY(seq_end + right_shift <= read.size());
+            seq_end += right_shift;
+            right_offset += right_shift;
+        }
+
+        if (seq_start < seq_end) {
+            auto gap_seq = Subseq(read, seq_start, seq_end);
+            if (gap_seq) {
+                DEBUG("Gap info successfully created");
+                return GapDescription(left, right,
+                                      *gap_seq,
+                                      left_offset,
+                                      right_offset);
+            } else {
+                DEBUG("Something wrong with read subsequence");
+            }
+        } else {
+            size_t overlap = seq_start - seq_end + 1;
+            DEBUG("Failed to fix overlap of size " << overlap);
+        }
+        return INVALID_GAP;
+    }
+
+    template<class ReadT>
+    vector<GapDescription> InferGaps(const ReadT& read,
+            const MappingPath<EdgeId>& mapping) const {
+        TerminalVertexCondition<Graph> tip_condition(g_);
+        DEBUG("Inferring gaps")
+        VERIFY(!mapping.empty());
+        vector<GapDescription> answer;
+        for (size_t i = 0; i < mapping.size() - 1; ++i) {
+            EdgeId e1 = mapping.edge_at(i);
+            EdgeId e2 = mapping.edge_at(i + 1);
+
+            //sorry, loops and other special cases
+            if (e1 != e2 && e1 != g_.conjugate(e2)
+                && e1 != g_.conjugate(e1) && e2 != g_.conjugate(e2)
+                && tip_condition.Check(g_.EdgeEnd(e1))
+                && tip_condition.Check(g_.EdgeStart(e2))) {
+
+                MappingRange mr1 = mapping.mapping_at(i);
+                MappingRange mr2 = mapping.mapping_at(i + 1);
+                DEBUG("Creating description from mapping ranges " << mr1 << " and " << mr2);
+                size_t seq_start = mr1.initial_range.end_pos + g_.k();
+                size_t seq_end = mr2.initial_range.start_pos;
+
+                auto gap = CreateDescription(read, seq_start, seq_end,
+                                             e1, mr1.mapped_range.end_pos,
+                                             e2, mr2.mapped_range.start_pos);
+
+                if (gap != INVALID_GAP) {
+                    answer.push_back(gap);
+                }
+            }
+        }
+        return answer;
+    }
+
+    template<class ReadT>
+    void InnerProcessRead(size_t thread_index, const ReadT& read, const MappingPath<EdgeId>& mapping) {
+        DEBUG("Processing read");
+        if (!mapping.empty()) {
+            for (const auto& gap: InferGaps(read, mapping)) {
+                DEBUG("Adding gap info " << gap.str(g_));
+                buffer_storages_[thread_index].AddGap(gap);
+            }
+        } else {
+            DEBUG("Mapping was empty");
+        }
+        DEBUG("Read processed");
+    }
+
+public:
+
+    //ALERT passed path_storage should be empty!
+    GapTrackingListener(const Graph& g,
+                        GapStorage& gap_storage) :
+            g_(g), gap_storage_(gap_storage), empty_storage_(gap_storage) {
+        VERIFY(empty_storage_.size() == 0);
+    }
+
+    void StartProcessLibrary(size_t threads_count) override {
+        for (size_t i = 0; i < threads_count; ++i) {
+            buffer_storages_.push_back(empty_storage_);
+        }
+    }
+
+    void StopProcessLibrary() override {
+        //FIXME put this code into ancestor
+        for (size_t i = 0; i < buffer_storages_.size(); ++i) {
+            MergeBuffer(i);
+        }
+        buffer_storages_.clear();
+    }
+
+    void MergeBuffer(size_t thread_index) override {
+        DEBUG("Merge buffer " << thread_index << " with size " << buffer_storages_[thread_index].size());
+        gap_storage_.AddStorage(buffer_storages_[thread_index]);
+        buffer_storages_[thread_index].clear();
+        DEBUG("Now size " << gap_storage_.size());
+    }
+
+    void ProcessSingleRead(size_t thread_index,
+                           const io::SingleRead& read,
+                           const MappingPath<EdgeId>& mapping) override {
+        InnerProcessRead(thread_index, read, mapping);
+    }
+
+    void ProcessSingleRead(size_t thread_index,
+                           const io::SingleReadSeq& read,
+                           const MappingPath<EdgeId>& mapping) override {
+        InnerProcessRead(thread_index, read, mapping);
+    }
+
+    void ProcessPairedRead(size_t,
+                           const io::PairedReadSeq&,
+                           const MappingPath<EdgeId>&,
+                           const MappingPath<EdgeId>&) override {
+        //nothing to do
+    }
+
+    void ProcessPairedRead(size_t,
+                           const io::PairedRead&,
+                           const MappingPath<EdgeId>&,
+                           const MappingPath<EdgeId>&) override {
+        //nothing to do
+    }
+
+private:
+    DECL_LOGGER("GapTrackingListener");
+};
+
+bool IsNontrivialAlignment(const vector<vector<EdgeId>>& aligned_edges) {
+    for (size_t j = 0; j < aligned_edges.size(); j++)
+        if (aligned_edges[j].size() > 1)
+            return true;
+    return false;
+}
+
+io::SingleStreamPtr GetReadsStream(const io::SequencingLibrary<config::DataSetData>& lib) {
+    io::ReadStreamList<io::SingleRead> streams;
+    for (const auto& reads : lib.single_reads())
+        //do we need input_file function here?
+        //TODO add decent support for N-s?
+        streams.push_back(make_shared<io::FixingWrapper>(make_shared<io::FileReadStream>(reads)));
+    return io::MultifileWrap(streams);
+}
+
+class PacbioAligner {
+    const pacbio::PacBioMappingIndex<Graph>& pac_index_;
+    PathStorage<Graph>& path_storage_;
+    GapStorage& gap_storage_;
+    pacbio::StatsCounter stats_;
+    const PathStorage<Graph> empty_path_storage_;
+    const GapStorage empty_gap_storage_;
+    const size_t read_buffer_size_;
+
+    void ProcessReadsBatch(const std::vector<io::SingleRead>& reads, size_t thread_cnt) {
+        vector<PathStorage<Graph>> long_reads_by_thread(thread_cnt,
+                                                        empty_path_storage_);
+        vector<GapStorage> gaps_by_thread(thread_cnt,
+                                          empty_gap_storage_);
+        vector<pacbio::StatsCounter> stats_by_thread(thread_cnt);
+
+        size_t longer_500 = 0;
+        size_t aligned = 0;
+        size_t nontrivial_aligned = 0;
+
+        #pragma omp parallel for reduction(+: longer_500, aligned, nontrivial_aligned)
+        for (size_t i = 0; i < reads.size(); ++i) {
+            size_t thread_num = omp_get_thread_num();
+            Sequence seq(reads[i].sequence());
+            auto current_read_mapping = pac_index_.GetReadAlignment(seq);
+            for (const auto& gap : current_read_mapping.gaps)
+                gaps_by_thread[thread_num].AddGap(gap);
+
+            const auto& aligned_edges = current_read_mapping.main_storage;
+            for (const auto& path : aligned_edges)
+                long_reads_by_thread[thread_num].AddPath(path, 1, true);
+
+            //counting stats:
+            for (const auto& path : aligned_edges)
+                stats_by_thread[thread_num].path_len_in_edges[path.size()]++;
+
+            if (seq.size() > 500) {
+                longer_500++;
+                if (aligned_edges.size() > 0) {
+                    aligned++;
+                    stats_by_thread[thread_num].seeds_percentage[
+                            size_t(floor(double(current_read_mapping.seed_num) * 1000.0
+                                         / (double) seq.size()))]++;
+
+                    if (IsNontrivialAlignment(aligned_edges)) {
+                        nontrivial_aligned++;
+                    }
+                }
+            }
+        }
+
+        INFO("Read batch of size: " << reads.size() << " processed; "
+                                    << longer_500 << " of them longer than 500; among long reads aligned: "
+                                    << aligned << "; paths of more than one edge received: "
+                                    << nontrivial_aligned);
+
+        for (size_t i = 0; i < thread_cnt; i++) {
+            path_storage_.AddStorage(long_reads_by_thread[i]);
+            gap_storage_.AddStorage(gaps_by_thread[i]);
+            stats_.AddStorage(stats_by_thread[i]);
+        }
+    }
+
+public:
+    PacbioAligner(const pacbio::PacBioMappingIndex<Graph>& pac_index,
+                  PathStorage<Graph>& path_storage,
+                  GapStorage& gap_storage,
+                  size_t read_buffer_size = 50000) :
+            pac_index_(pac_index),
+            path_storage_(path_storage),
+            gap_storage_(gap_storage),
+            empty_path_storage_(path_storage),
+            empty_gap_storage_(gap_storage),
+            read_buffer_size_(read_buffer_size) {
+        VERIFY(empty_path_storage_.size() == 0);
+        VERIFY(empty_gap_storage_.size() == 0);
+    }
+
+    void operator()(io::SingleStream& read_stream, size_t thread_cnt) {
+        size_t n = 0;
+        size_t buffer_no = 0;
+        while (!read_stream.eof()) {
+            std::vector<io::SingleRead> read_buffer;
+            read_buffer.reserve(read_buffer_size_);
+            io::SingleRead read;
+            for (size_t buf_size = 0; buf_size < read_buffer_size_ && !read_stream.eof(); ++buf_size) {
+                read_stream >> read;
+                read_buffer.push_back(std::move(read));
+            }
+            INFO("Prepared batch " << buffer_no << " of " << read_buffer.size() << " reads.");
+            DEBUG("master thread number " << omp_get_thread_num());
+            ProcessReadsBatch(read_buffer, thread_cnt);
+            ++buffer_no;
+            n += read_buffer.size();
+            INFO("Processed " << n << " reads");
+        }
+    }
+
+    const pacbio::StatsCounter& stats() const {
+        return stats_;
+    }
+};
+
+void PacbioAlignLibrary(const conj_graph_pack& gp,
+                        const io::SequencingLibrary<config::DataSetData>& lib,
+                        PathStorage<Graph>& path_storage,
+                        GapStorage& gap_storage,
+                        size_t thread_cnt) {
+    INFO("Aligning library with Pacbio aligner");
+
+    INFO("Using seed size: " << cfg::get().pb.pacbio_k);
+
+    //initializing index
+    pacbio::PacBioMappingIndex<Graph> pac_index(gp.g,
+                                                cfg::get().pb.pacbio_k,
+                                                cfg::get().K,
+                                                cfg::get().pb.ignore_middle_alignment,
+                                                cfg::get().output_dir,
+                                                cfg::get().pb);
+
+    PacbioAligner aligner(pac_index, path_storage, gap_storage);
+
+    auto stream = GetReadsStream(lib);
+    aligner(*stream, thread_cnt);
+
+    INFO("For library of " << (lib.is_long_read_lib() ? "long reads" : "contigs") << " :");
+    aligner.stats().report();
+    INFO("PacBio aligning finished");
+}
+
+void CloseGaps(conj_graph_pack& gp, bool rtype,
+               const GapStorage& gap_storage, 
+               size_t min_weight) {
+    INFO("Closing gaps with long reads");
+
+    HybridGapCloser::ConsensusF consensus_f;
+    if (rtype) {
+        consensus_f = &PoaConsensus;
+    } else {
+        consensus_f = [=](const vector<string>& gap_seqs) {
+            return TrivialConsenus(gap_seqs, cfg::get().pb.max_contigs_gap_length);
+        };
+    }
+
+    HybridGapCloser gap_closer(gp.g, gap_storage,
+                               min_weight, consensus_f,
+                               cfg::get().pb.long_seq_limit);
+    auto replacement = gap_closer();
+
+    for (size_t j = 0; j < cfg::get().ds.reads.lib_count(); j++) {
+        gp.single_long_reads[j].ReplaceEdges(replacement);
+    }
+
+    INFO("Closing gaps with long reads finished");
+}
+}
+using namespace gap_closing;
+
+bool ShouldAlignWithPacbioAligner(io::LibraryType lib_type) {
+    return lib_type == io::LibraryType::UntrustedContigs || 
+           lib_type == io::LibraryType::PacBioReads ||
+           lib_type == io::LibraryType::SangerReads ||
+           lib_type == io::LibraryType::NanoporeReads; //||
+//           lib_type == io::LibraryType::TSLReads;
+}
+
+void HybridLibrariesAligning::run(conj_graph_pack& gp, const char*) {
+    using namespace omnigraph;
+
+    bool make_additional_saves = parent_->saves_policy().make_saves_;
+    for (size_t lib_id = 0; lib_id < cfg::get().ds.reads.lib_count(); ++lib_id) {
+        if (cfg::get().ds.reads[lib_id].is_hybrid_lib()) {
+            INFO("Hybrid library detected: #" << lib_id);
+
+            const auto& lib = cfg::get().ds.reads[lib_id];
+            bool rtype = lib.is_long_read_lib();
+
+            auto& path_storage = gp.single_long_reads[lib_id];
+            GapStorage gap_storage(gp.g);
+
+            if (ShouldAlignWithPacbioAligner(lib.type())) {
+                //TODO put alternative alignment right here
+                PacbioAlignLibrary(gp, lib,
+                                   path_storage, gap_storage,
+                                   cfg::get().max_threads);
+            } else {
+                gp.EnsureBasicMapping();
+                GapTrackingListener mapping_listener(gp.g, gap_storage);
+                INFO("Processing reads from hybrid library " << lib_id);
+
+                //FIXME make const
+                auto& reads = cfg::get_writable().ds.reads[lib_id];
+
+                SequenceMapperNotifier notifier(gp);
+                //FIXME pretty awful, would be much better if listeners were shared ptrs
+                LongReadMapper read_mapper(gp.g, gp.single_long_reads[lib_id],
+                                           ChooseProperReadPathExtractor(gp.g, reads.type()));
+
+                notifier.Subscribe(lib_id, &mapping_listener);
+                notifier.Subscribe(lib_id, &read_mapper);
+
+                auto mapper_ptr = ChooseProperMapper(gp, reads);
+                //FIXME think of N's proper handling
+                auto single_streams = single_easy_readers(reads, false,
+                                                          /*map_paired*/false, /*handle Ns*/false);
+
+                notifier.ProcessLibrary(single_streams, lib_id, *mapper_ptr);
+                cfg::get_writable().ds.reads[lib_id].data().single_reads_mapped = true;
+
+                INFO("Finished processing long reads from lib " << lib_id);
+                gp.index.Detach();
+            }
+
+            if (make_additional_saves) {
+                INFO("Producing additional saves");
+                path_storage.DumpToFile(cfg::get().output_saves + "long_reads_before_rep.mpr",
+                                        map<EdgeId, EdgeId>(), /*min_stats_cutoff*/rtype ? 1 : 0, true);
+                gap_storage.DumpToFile(cfg::get().output_saves + "gaps.mpr");
+            }
+
+            INFO("Padding gaps");
+            size_t min_gap_quantity = rtype ? cfg::get().pb.pacbio_min_gap_quantity
+                                            : cfg::get().pb.contigs_min_gap_quantity;
+
+            INFO("Min gap weight set to " << min_gap_quantity);
+            gap_storage.PrepareGapsForClosure(min_gap_quantity, /*max flank length*/500);
+
+            gap_closing::CloseGaps(gp, rtype, gap_storage, min_gap_quantity);
+        }
+    }
+
+    visualization::graph_labeler::DefaultLabeler<Graph> labeler(gp.g, gp.edge_pos);
+    stats::detail_info_printer printer(gp, labeler, cfg::get().output_dir);
+    printer(config::info_printer_pos::final_gap_closed);
+}
+
+}
diff --git a/src/projects/spades/pacbio_aligning.hpp b/src/projects/spades/hybrid_aligning.hpp
similarity index 75%
rename from src/projects/spades/pacbio_aligning.hpp
rename to src/projects/spades/hybrid_aligning.hpp
index 4e7d2a9..d29d694 100644
--- a/src/projects/spades/pacbio_aligning.hpp
+++ b/src/projects/spades/hybrid_aligning.hpp
@@ -11,10 +11,10 @@
 
 namespace debruijn_graph {
 
-class PacBioAligning : public spades::AssemblyStage {
+class HybridLibrariesAligning : public spades::AssemblyStage {
 public:
-    PacBioAligning()
-            : AssemblyStage("PacBio Aligning", "pacbio_aligning") {
+    HybridLibrariesAligning()
+            : AssemblyStage("Hybrid Aligning", "hybrid_aligning") {
     }
     void run(conj_graph_pack &gp, const char*);
 };
diff --git a/src/projects/spades/hybrid_gap_closer.hpp b/src/projects/spades/hybrid_gap_closer.hpp
new file mode 100644
index 0000000..0443715
--- /dev/null
+++ b/src/projects/spades/hybrid_gap_closer.hpp
@@ -0,0 +1,743 @@
+//***************************************************************************
+//* Copyright (c) 2015 Saint Petersburg State University
+//* Copyright (c) 2011-2014 Saint Petersburg Academic University
+//* All Rights Reserved
+//* See file LICENSE for details.
+//***************************************************************************
+
+#pragma once
+
+#include "assembly_graph/core/graph.hpp"
+#include "modules/alignment/sequence_mapper.hpp"
+#include "ConsensusCore/Poa/PoaConfig.hpp"
+#include "ConsensusCore/Poa/PoaConsensus.hpp"
+#include "gap_closing.hpp"
+
+#include <algorithm>
+#include <fstream>
+
+namespace debruijn_graph {
+namespace gap_closing {
+typedef vector<GapDescription> GapInfos;
+
+typedef pair<EdgeId, EdgeId> EdgePair;
+inline EdgePair Conjugate(const Graph& g, EdgePair ep) {
+    return EdgePair(g.conjugate(ep.second), g.conjugate(ep.first));
+}
+
+inline bool IsCanonical(const Graph& g, const EdgePair& ep) {
+    return ep <= Conjugate(g, ep);
+}
+
+inline bool IsCanonical(const Graph& g, EdgeId a, EdgeId b) {
+    return IsCanonical(g, EdgePair(a,b));
+}
+
+inline bool IsCanonical(const Graph& g, EdgeId e) {
+    return e <= g.conjugate(e);
+}
+
+inline EdgePair GetCanonical(const Graph& g, const EdgePair& ep) {
+    return IsCanonical(g, ep) ? ep : Conjugate(g, ep);
+}
+
+class GapStorage {
+public:
+    typedef typename GapInfos::const_iterator gap_info_it;
+    typedef std::pair<gap_info_it, gap_info_it> info_it_pair;
+private:
+    typedef std::function<bool (gap_info_it, gap_info_it)> CandidatesPred;
+    typedef std::function<bool (const EdgePair&)> EdgePairPred;
+    typedef std::function<bool (const GapDescription&)> DescriptionPred;
+    typedef std::set<EdgePair> ConnectionSet;
+
+    const Graph& g_;
+
+    map<EdgeId, GapInfos> inner_index_;
+    vector<EdgeId> index_;
+
+    DECL_LOGGER("GapStorage");
+
+    void HiddenAddGap(const GapDescription& p) {
+        inner_index_[p.start].push_back(p);
+    }
+
+    size_t FillIndex() {
+        VERIFY(index_.empty());
+        index_.reserve(inner_index_.size());
+        set<EdgeId> tmp;
+        for (const auto& kv : inner_index_) {
+            index_.push_back(kv.first);
+        }
+        return index_.size();
+    }
+
+    typename std::vector<GapDescription>::iterator
+    const_iterator_cast(std::vector<GapDescription> &v,
+                        typename std::vector<GapDescription>::const_iterator iter) const {
+        return v.begin() + (iter - v.cbegin());
+    }
+
+    //Function should return true if corresponding part of the index should be removed
+    void FilterByCandidates(const CandidatesPred &filter_f) {
+        for (auto it = inner_index_.begin(); it != inner_index_.end(); ) {
+            vector<GapDescription>& gaps = it->second;
+            auto ep_ranges = EdgePairGaps(gaps);
+
+            auto copy_dest = gaps.begin();
+            for (const info_it_pair& ep_gaps : ep_ranges) {
+                if (filter_f(ep_gaps.first, ep_gaps.second)) {
+                    DEBUG("Erasing candidates between " << g_.int_id(ep_gaps.first->start) << " and "
+                                                        << g_.int_id(ep_gaps.first->end));
+                } else {
+                    if (copy_dest == const_iterator_cast(gaps, ep_gaps.first)) {
+                        copy_dest = const_iterator_cast(gaps, ep_gaps.second);
+                    } else {
+                        copy_dest = std::move(ep_gaps.first, ep_gaps.second, copy_dest);
+                    }
+                }
+            }
+            if (copy_dest == gaps.begin()) {
+                inner_index_.erase(it++);
+            } else {
+                gaps.erase(copy_dest, gaps.end());
+                ++it;
+            }
+        }
+    }
+
+    void FilterByEdgePair(const EdgePairPred &filter_f) {
+        FilterByCandidates([=](gap_info_it info_start, gap_info_it /*info_end*/) {
+            return filter_f(EdgePair(info_start->start, info_start->end));
+        });
+    }
+
+    void FilterByDescription(const DescriptionPred &filter_f) {
+        for (auto it = inner_index_.begin(); it != inner_index_.end(); ) {
+            vector<GapDescription>& gaps = it->second;
+            auto res_it = std::remove_if(gaps.begin(), gaps.end(), filter_f);
+            if (res_it == gaps.begin()) {
+                inner_index_.erase(it++);
+            } else {
+                gaps.erase(res_it, gaps.end());
+                ++it;
+            }
+        }
+    }
+
+    vector<EdgeId> SecondEdges(const GapInfos& edge_gaps) const {
+        vector<EdgeId> jump_edges;
+        for (auto it_pair : EdgePairGaps(edge_gaps)) {
+            jump_edges.push_back(it_pair.first->end);
+        }
+        return jump_edges;
+    };
+
+    ConnectionSet GetAllConnections() const {
+        ConnectionSet answer;
+        for (const auto& e_gaps : inner_index_) {
+            EdgeId e1 = e_gaps.first;
+            for (EdgeId e2: SecondEdges(e_gaps.second)) {
+                EdgePair ep(e1, e2);
+                answer.insert(ep);
+                answer.insert(Conjugate(g_, ep));
+            }
+        }
+        return answer;
+    };
+
+    //outputs set of transitively-redundant CANONICAL connections
+    ConnectionSet DetectTransitive() const {
+        auto all_connections = GetAllConnections();
+        ConnectionSet answer;
+        for (auto it = all_connections.begin(), end_it = all_connections.end(); it != end_it; ) {
+            EdgeId left = it->first;
+            vector<EdgeId> right_options;
+            auto inner_it = it;
+            for (; inner_it != end_it && inner_it->first == left; ++inner_it) {
+                right_options.push_back(inner_it->second);
+            }
+
+            for (size_t i = 0; i < right_options.size(); ++i) {
+                for (size_t j = 0; j < right_options.size(); ++j) {
+                    if (i == j)
+                        continue;
+                    if (all_connections.count(EdgePair(right_options[i], right_options[j]))) {
+                        //TODO should we add sanity checks that other edges of the triangle are not there?
+                        answer.insert(GetCanonical(g_, EdgePair(left, right_options[j])));
+                        DEBUG("pair " << g_.int_id(left) << "," << g_.int_id(right_options[j])
+                                      << " is ignored because of edge between "
+                                      << g_.int_id(right_options[i]));
+                    }
+                }
+            }
+            it = inner_it;
+        }
+        return answer;
+    }
+
+    std::set<EdgeId> AmbiguouslyExtending() const {
+        std::set<EdgeId> answer;
+        std::set<EdgeId> left_edges;
+        for (const auto& e_gaps : inner_index_) {
+            EdgeId e1 = e_gaps.first;
+            for (EdgeId e2: SecondEdges(e_gaps.second)) {
+                if (!left_edges.insert(e1).second) {
+                    answer.insert(e1);
+                }
+                if (!left_edges.insert(g_.conjugate(e2)).second) {
+                    answer.insert(g_.conjugate(e2));
+                }
+            }
+        }
+        return answer;
+    }
+
+    void FilterIndex(size_t min_weight, size_t max_flank) {
+        DEBUG("Filtering by maximal allowed flanking length " << max_flank);
+        FilterByDescription([=](const GapDescription &gap) {
+            return gap.edge_gap_start_position + max_flank < g_.length(gap.start)
+                   || gap.edge_gap_end_position > max_flank;
+        });
+
+        DEBUG("Filtering by weight " << min_weight);
+        FilterByCandidates([=](gap_info_it info_start, gap_info_it info_end) {
+            auto cnt = std::distance(info_start, info_end);
+            VERIFY(cnt > 0);
+            return size_t(cnt) < min_weight;
+        });
+
+
+        DEBUG("Filtering transitive gaps");
+        ConnectionSet transitive_ignore = DetectTransitive();
+
+        FilterByEdgePair([&](const EdgePair &ep) {
+            VERIFY(IsCanonical(g_, ep));
+            return transitive_ignore.count(ep);
+        });
+
+        DEBUG("Filtering ambiguous situations");
+        std::set<EdgeId> ambiguously_extending = AmbiguouslyExtending();
+        FilterByEdgePair([&](const EdgePair &ep) {
+            return ambiguously_extending.count(ep.first) ||
+                    ambiguously_extending.count(g_.conjugate(ep.second));
+        });
+    }
+
+public:
+
+    GapStorage(const Graph& g)
+            : g_(g) {
+    }
+
+    const map<EdgeId, GapInfos>& inner_index() const {
+        return inner_index_;
+    };
+
+    EdgeId operator[](size_t i) const {
+        return index_.at(i);
+    }
+
+    size_t size() const {
+        return index_.size();
+    }
+
+    void AddGap(const GapDescription& p) {
+        if (IsCanonical(g_, p.start, p.end)) {
+            HiddenAddGap(p);
+        } else {
+            HiddenAddGap(p.conjugate(g_));
+        }
+    }
+
+    void AddStorage(const GapStorage& to_add) {
+        const auto& idx = to_add.inner_index_;
+        for (auto iter = idx.begin(); iter != idx.end(); ++iter)
+            inner_index_[iter->first].insert(inner_index_[iter->first].end(), iter->second.begin(), iter->second.end());
+    }
+
+    void clear() {
+        GapStorage empty(g_);
+        std::swap(inner_index_, empty.inner_index_);
+        std::swap(index_, empty.index_);
+    }
+
+    void DumpToFile(const string filename) const {
+        ofstream filestr(filename);
+        for (const auto& e_gaps : inner_index_) {
+            EdgeId e = e_gaps.first;
+            auto gaps = e_gaps.second;
+            DEBUG(g_.int_id(e) << " " << gaps.size());
+            filestr << g_.int_id(e) << " " << gaps.size() << endl;
+            std::sort(gaps.begin(), gaps.end());
+            for (const auto& gap : gaps) {
+                filestr << gap.str(g_);
+            }
+            filestr << endl;
+        }
+    }
+
+//    void LoadFromFile(const string s) {
+//        FILE* file = fopen((s).c_str(), "r");
+//        int res;
+//        char ss[5000];
+//        map<int, EdgeId> tmp_map;
+//        for (auto iter = g.ConstEdgeBegin(); !iter.IsEnd(); ++iter) {
+//            tmp_map[g.int_id(*iter)] = *iter;
+//        }
+//        while (!feof(file)) {
+//            int first_id, second_id, first_ind, second_ind;
+//            int size;
+//            res = fscanf(file, "%d %d\n", &first_id, &size);
+//            VERIFY(res == 2);
+//            for (int i = 0; i < size; i++) {
+//                res = fscanf(file, "%d %d\n", &first_id, &first_ind);
+//                VERIFY(res == 2);
+//                res = fscanf(file, "%d %d\n", &second_id, &second_ind);
+//                VERIFY(res == 2);
+//                res = fscanf(file, "%s\n", ss);
+//                VERIFY(res == 1);
+//                GapDescription<Graph> gap(tmp_map[first_id], tmp_map[second_id], Sequence(ss), first_ind, second_ind);
+//                this->AddGap(gap);
+//            }
+//        }
+//    }
+
+    //edge_gaps must be sorted
+    vector<info_it_pair> EdgePairGaps(const GapInfos& edge_gaps) const {
+        vector<info_it_pair> answer;
+        auto ep_start = edge_gaps.begin();
+        for (auto it = ep_start; it != edge_gaps.end(); ++it) {
+            if (it->end != ep_start->end) {
+                answer.push_back({ep_start, it});
+                ep_start = it;
+            }
+        }
+        answer.push_back({ep_start, edge_gaps.end()});
+        return answer;
+    };
+
+    void PrepareGapsForClosure(size_t min_weight, size_t max_flank) {
+        for (auto& e_gaps : inner_index_) {
+            auto& gaps = e_gaps.second;
+            std::sort(gaps.begin(), gaps.end());
+        }
+        DEBUG("Raw extensions available for " << inner_index_.size() << " edges");
+
+        FilterIndex(min_weight, max_flank);
+        DEBUG("Filtered extensions available for " << inner_index_.size() << " edges");
+        FillIndex();
+    }
+};
+
+inline string PoaConsensus(const vector<string>& gap_seqs) {
+    const ConsensusCore::PoaConsensus* pc = ConsensusCore::PoaConsensus::FindConsensus(
+            gap_seqs,
+            ConsensusCore::PoaConfig::GLOBAL_ALIGNMENT);
+    return pc->Sequence();
+}
+
+inline string TrivialConsenus(const vector<string>& gap_seqs, size_t max_length) {
+    VERIFY(!gap_seqs.empty());
+    return gap_seqs.front().length() < max_length ? gap_seqs.front() : "";
+}
+
+/*Keys are actual edges of the graph, values are original edges*/
+/*In general many-to-many relationship*/
+class EdgeFateTracker : omnigraph::GraphActionHandler<Graph> {
+    map<EdgeId, set<EdgeId>> storage_;
+
+    void FillRelevant(EdgeId e, set<EdgeId>& relevant) const {
+        auto it = storage_.find(e);
+        if (it != storage_.end()) {
+            //one of novel edges
+            relevant.insert(it->second.begin(), it->second.end());
+        } else {
+            //one of original edges
+            relevant.insert(e);
+        }
+    }
+
+public:
+    EdgeFateTracker(const Graph& g) :
+            omnigraph::GraphActionHandler<Graph>(g, "EdgeFateTracker") {
+    }
+
+    void HandleAdd(EdgeId e) override {
+        if (!storage_.count(e))
+            storage_[e] = {};
+    }
+
+    void HandleDelete(EdgeId e) override {
+        storage_.erase(e);
+    }
+
+    void HandleMerge(const vector<EdgeId>& old_edges, EdgeId new_edge) override {
+        set<EdgeId> relevant_records;
+        for (EdgeId e : old_edges) {
+            FillRelevant(e, relevant_records);
+        }
+        storage_[new_edge] = relevant_records;
+    }
+
+    void HandleGlue(EdgeId /*new_edge*/, EdgeId /*edge1*/, EdgeId /*edge2*/) override {
+        VERIFY(false);
+    }
+
+    void HandleSplit(EdgeId old_edge, EdgeId new_edge_1,
+                             EdgeId new_edge_2) override {
+        set<EdgeId> relevant_records;
+        FillRelevant(old_edge, relevant_records);
+        storage_[new_edge_1] = relevant_records;
+        storage_[new_edge_2] = relevant_records;
+    }
+
+    map<EdgeId, EdgeId> Old2NewMapping() const {
+        map<EdgeId, EdgeId> old_2_new;
+        for (const auto& new_2_olds : storage_) {
+            for (EdgeId e : new_2_olds.second) {
+                VERIFY(!old_2_new.count(e));
+                old_2_new[e] = new_2_olds.first;
+            }
+        }
+        return old_2_new;
+    }
+
+};
+
+class MultiGapJoiner {
+    typedef map<EdgeId, pair<size_t, size_t>> SplitInfo;
+
+    Graph& g_;
+    GapJoiner inner_joiner_;
+
+    bool CheckGapsValidity(const vector<GapDescription>& gaps) const {
+        vector<GapDescription> answer;
+        return std::all_of(gaps.begin(), gaps.end(), [&](const GapDescription &gap) {
+            return IsCanonical(g_, gap.start, gap.end) && gap.start != gap.end && gap.start != g_.conjugate(gap.end);
+        });
+    }
+
+    void Add(size_t idx, EdgeId e, size_t pos, SplitInfo& primary, SplitInfo& secondary) const {
+        SplitInfo* storage = &primary;
+        if (!IsCanonical(g_, e)) {
+            e = g_.conjugate(e);
+            pos = g_.length(e) - pos;
+            storage = &secondary;
+        }
+        VERIFY(!storage->count(e));
+        storage->insert(make_pair(e, make_pair(idx, pos)));
+    }
+
+    vector<EdgeId> EdgesNeedingSplit(const SplitInfo& left_split_info, const SplitInfo& right_split_info) const {
+        vector<EdgeId> answer;
+        for (EdgeId e : key_set(left_split_info))
+            if (right_split_info.count(e))
+                answer.push_back(e);
+        return answer;
+    }
+
+    size_t ArtificialSplitPos(size_t left_split, size_t right_split) const {
+        if (right_split < left_split + 2) {
+            DEBUG("Artificial split impossible");
+            return -1ul;
+        }
+        return (left_split + right_split) / 2;
+    }
+
+    bool Update(EdgeId& e, size_t& gap_pos, EdgePair split_orig_ep, EdgePair split_res, bool gap_start) const {
+        EdgeId split_orig = split_orig_ep.first;
+        if (e == split_orig_ep.second) {
+            split_orig = split_orig_ep.second;
+            split_res = Conjugate(g_, split_res);
+        }
+        if (e == split_orig) {
+            if (gap_start) {
+                e = split_res.second;
+                gap_pos = gap_pos - g_.length(split_res.first);
+            } else {
+                e = split_res.first;
+            }
+            return true;
+        }
+        return false;
+    }
+
+    void UpdateGap(GapDescription& gap, EdgePair split_orig, EdgePair split_res) const {
+        bool u1 = Update(gap.start, gap.edge_gap_start_position, split_orig, split_res, true);
+        bool u2 = Update(gap.end, gap.edge_gap_end_position, split_orig, split_res, false);
+        VERIFY(u1 != u2);
+    }
+
+    bool CheckInsert(EdgeId e, set<EdgeId>& used_edges) const {
+        return used_edges.insert(e).second;
+    }
+
+    bool CheckInsert(const vector<EdgeId> edges, set<EdgeId>& used_edges) const {
+        for (EdgeId e : edges) {
+            if (!CheckInsert(e, used_edges)) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    std::set<EdgeId> RelevantEdges(const GapDescription& gap) const {
+        std::set<EdgeId> answer;
+        answer.insert(gap.start);
+        answer.insert(g_.conjugate(gap.start));
+        answer.insert(gap.end);
+        answer.insert(g_.conjugate(gap.end));
+        return answer;
+    }
+
+    bool CheckGaps(const vector<GapDescription>& gaps) const {
+        set<EdgeId> used_edges;
+        for (const auto& gap : gaps) {
+            const auto relevant = RelevantEdges(gap);
+            //TODO check the semantics of all_of
+            if (!std::all_of(relevant.begin(), relevant.end(), [&](const EdgeId& e) {
+                return used_edges.insert(e).second;
+            })) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    vector<GapDescription> ArtificialSplitAndGapUpdate(vector<GapDescription> canonical_gaps) const {
+        SplitInfo left_split_pos;
+        SplitInfo right_split_pos;
+        for (size_t i = 0; i < canonical_gaps.size(); ++i) {
+            const auto& gap = canonical_gaps[i];
+            DEBUG("Processing gap " << gap.str(g_));
+            Add(i, gap.start, gap.edge_gap_start_position, right_split_pos, left_split_pos);
+            Add(i, gap.end, gap.edge_gap_end_position, left_split_pos, right_split_pos);
+        }
+
+        set<size_t> to_ignore;
+
+        for (EdgeId e : EdgesNeedingSplit(left_split_pos, right_split_pos)) {
+            size_t artificial_split_pos = ArtificialSplitPos(left_split_pos[e].second, right_split_pos[e].second);
+            if (artificial_split_pos == -1ul) {
+                to_ignore.insert(left_split_pos[e].first);
+                to_ignore.insert(right_split_pos[e].first);
+            } else {
+                DEBUG("Splitting edge " << g_.str(e) << " at pos " << artificial_split_pos);
+                DEBUG("Will update gap " << canonical_gaps[left_split_pos[e].first].str(g_) << " and " << canonical_gaps[right_split_pos[e].first].str(g_));
+                EdgePair ep(e, g_.conjugate(e));
+                auto split_res = g_.SplitEdge(e, artificial_split_pos);
+                UpdateGap(canonical_gaps[left_split_pos[e].first], ep, split_res);
+                UpdateGap(canonical_gaps[right_split_pos[e].first], ep, split_res);
+            }
+        }
+
+        vector<GapDescription> updated_gaps;
+        updated_gaps.reserve(canonical_gaps.size());
+        for (size_t i = 0; i < canonical_gaps.size(); ++i) {
+            if (!to_ignore.count(i)) {
+                updated_gaps.push_back(canonical_gaps[i]);
+            }
+        }
+
+        VERIFY(CheckGaps(updated_gaps));
+        return updated_gaps;
+    };
+
+public:
+    MultiGapJoiner(Graph& g) : g_(g), inner_joiner_(g, true) {
+    }
+
+    //Resulting graph should be condensed
+    void operator()(const vector<GapDescription>& gaps) {
+        size_t closed_gaps = 0;
+        VERIFY_MSG(CheckGapsValidity(gaps), "Gap check failed");
+        for (const auto& gap : ArtificialSplitAndGapUpdate(gaps)) {
+            inner_joiner_(gap, /*condense*/false);
+            ++closed_gaps;
+        }
+        INFO("Closed " << closed_gaps << " gaps");
+    }
+private:
+    DECL_LOGGER("MultiGapJoiner");
+};
+
+class HybridGapCloser {
+public:
+    typedef std::function<string (const vector<string>&)> ConsensusF;
+private:
+    typedef RtSeq Kmer;
+    typedef typename GapStorage::gap_info_it gap_info_it;
+
+    DECL_LOGGER("HybridGapCloser");
+
+    Graph& g_;
+    const GapStorage& storage_;
+    const size_t min_weight_;
+    ConsensusF consensus_;
+    const size_t long_seq_limit_;
+    const size_t max_consensus_reads_;
+
+    const GapDescription INVALID_GAP;
+
+    string PrintLengths(const vector<string>& gap_seqs) const {
+        stringstream ss;
+        for (const auto& gap_v : gap_seqs)
+            ss << gap_v.length() << " ";
+        return ss.str();
+    }
+
+    GapDescription ConstructConsensus(EdgeId start,
+                                      EdgeId end,
+                                      size_t edge_gap_start_position,
+                                      size_t edge_gap_end_position,
+                                      const vector<string>& gap_variants) const {
+        DEBUG(gap_variants.size() << " gap closing variants, lengths: " << PrintLengths(gap_variants));
+        DEBUG("var size original " << gap_variants.size());
+        vector<string> new_gap_variants(gap_variants.begin(), gap_variants.end());
+        new_gap_variants.resize(std::min(max_consensus_reads_, gap_variants.size()));
+        auto s = consensus_(new_gap_variants);
+        DEBUG("consenus for " << g_.int_id(start)
+                              << " and " << g_.int_id(end)
+                              << " found: '" << s << "'");
+        return GapDescription(start, end,
+                              Sequence(s),
+                              edge_gap_start_position, edge_gap_end_position);
+    }
+
+    //all gaps guaranteed to correspond to a single edge pair
+    GapInfos PadGaps(gap_info_it start, gap_info_it end) const {
+        size_t start_min = std::numeric_limits<size_t>::max();
+        size_t end_max = 0;
+        size_t long_seqs = 0;
+        size_t short_seqs = 0;
+        for (auto it = start; it != end; ++it) {
+            const auto& gap = *it;
+            if (gap.gap_seq.size() > long_seq_limit_)
+                long_seqs++;
+            else
+                short_seqs++;
+
+            start_min = std::min(start_min, gap.edge_gap_start_position);
+            end_max = std::max(end_max, gap.edge_gap_end_position);
+        }
+
+        const bool exclude_long_seqs = (short_seqs >= min_weight_ && short_seqs > long_seqs);
+
+        GapInfos answer;
+        for (auto it = start; it != end; ++it) {
+            const auto& gap = *it;
+
+            if (exclude_long_seqs && gap.gap_seq.size() > long_seq_limit_)
+                continue;
+
+            string s = g_.EdgeNucls(gap.start).Subseq(start_min + g_.k(), gap.edge_gap_start_position + g_.k()).str();
+            s += gap.gap_seq.str();
+            s += g_.EdgeNucls(gap.end).Subseq(gap.edge_gap_end_position, end_max).str();
+            answer.push_back(GapDescription(gap.start, gap.end, Sequence(s), start_min, end_max));
+        }
+        return answer;
+    }
+
+    GapDescription ConstructConsensus(gap_info_it start_it, gap_info_it end_it) const {
+        DEBUG("Considering extension " << g_.str(start_it->end));
+        size_t cur_len = end_it - start_it;
+
+        //low weight connections filtered earlier
+        VERIFY(cur_len >= min_weight_);
+
+        auto padded_gaps = PadGaps(start_it, end_it);
+        //all start and end positions are equal here
+        if (padded_gaps.size() < min_weight_) {
+            DEBUG("Connection weight too low after padding");
+            return INVALID_GAP;
+        }
+
+        vector<string> gap_variants;
+        std::transform(padded_gaps.begin(), padded_gaps.end(), std::back_inserter(gap_variants), 
+                       [](const GapDescription& gap) {
+            return gap.gap_seq.str();
+        });
+
+        //for (auto it = start_it; it != end_it; ++it) {
+        //    VERIFY(it->start == start_it->start);
+        //    VERIFY(it->end == start_it->end);
+        //    VERIFY(it->edge_gap_start_position == start_it->edge_gap_start_position);
+        //    VERIFY(it->edge_gap_end_position == start_it->edge_gap_end_position);
+        //}
+        auto padded_gap = padded_gaps.front();
+
+        return ConstructConsensus(padded_gap.start, padded_gap.end,
+                                  padded_gap.edge_gap_start_position,
+                                  padded_gap.edge_gap_end_position,
+                                  gap_variants);
+    }
+
+    GapDescription ConstructConsensus(EdgeId e) const {
+        DEBUG("Constructing consensus for edge " << g_.str(e));
+        vector<GapDescription> closures;
+        for (const auto& edge_pair_gaps : storage_.EdgePairGaps(get(storage_.inner_index(), e))) {
+            auto consensus = ConstructConsensus(edge_pair_gaps.first, edge_pair_gaps.second);
+            if (consensus != INVALID_GAP) {
+                closures.push_back(consensus);
+            }
+        }
+
+        if (closures.size() == 1) {
+            DEBUG("Found unique extension " << closures.front().str(g_));
+            return closures.front();
+        }
+
+        if (closures.size() > 1) {
+            DEBUG("Non-unique extension");
+        }
+        return INVALID_GAP;
+    }
+
+    vector<GapDescription> ConstructConsensus() const {
+        vector<vector<GapDescription>> closures_by_thread(omp_get_max_threads());
+
+        # pragma omp parallel for
+        for (size_t i = 0; i < storage_.size(); i++) {
+            EdgeId e = storage_[i];
+            size_t thread_num = omp_get_thread_num();
+            GapDescription gap = ConstructConsensus(e);
+            if (gap != INVALID_GAP) {
+                closures_by_thread[thread_num].push_back(gap);
+            }
+        }
+
+        vector<GapDescription> closures;
+        for (auto& new_per_thread : closures_by_thread) {
+            std::copy(new_per_thread.begin(), new_per_thread.end(), std::back_inserter(closures));
+            new_per_thread.clear();
+        }
+        return closures;
+    }
+
+public:
+    HybridGapCloser(Graph& g, const GapStorage& storage,
+                    size_t min_weight, ConsensusF consensus,
+                    size_t long_seq_limit,
+                    size_t max_consensus_reads = 20)
+            : g_(g), storage_(storage),
+              min_weight_(min_weight),
+              consensus_(consensus),
+              long_seq_limit_(long_seq_limit),
+              max_consensus_reads_(max_consensus_reads) {
+    }
+
+    map<EdgeId, EdgeId> operator()() {
+        EdgeFateTracker fate_tracker(g_);
+        MultiGapJoiner gap_joiner(g_);
+
+        gap_joiner(ConstructConsensus());
+
+        CompressAllVertices(g_, true, /*chunk_cnt*/100);
+        return fate_tracker.Old2NewMapping();
+    };
+
+};
+
+}
+}
diff --git a/src/projects/spades/launch.hpp b/src/projects/spades/launch.hpp
index 7d3eb40..42f3bf6 100644
--- a/src/projects/spades/launch.hpp
+++ b/src/projects/spades/launch.hpp
@@ -19,18 +19,40 @@
 #include "second_phase_setup.hpp"
 #include "repeat_resolving.hpp"
 #include "distance_estimation.hpp"
-#include "pacbio_aligning.hpp"
+#include "hybrid_aligning.hpp"
 #include "chromosome_removal.hpp"
+#include "series_analysis.hpp"
 #include "pipeline/stage.hpp"
+#include "contig_output_stage.hpp"
 
 namespace spades {
 
+inline bool MetaCompatibleLibraries() {
+    const auto& libs = cfg::get().ds.reads;
+    if (libs[0].type() != io::LibraryType::PairedEnd)
+        return false;
+    if (libs.lib_count() > 2)
+        return false;
+    if (libs.lib_count() == 2 &&
+        libs[1].type() != io::LibraryType::TSLReads &&
+        libs[1].type() != io::LibraryType::PacBioReads && libs[1].type() != io::LibraryType::NanoporeReads)
+            return false;
+    return true;
+}
+
+inline bool HybridLibrariesPresent() {
+    for (size_t lib_id = 0; lib_id < cfg::get().ds.reads.lib_count(); ++lib_id) 
+        if (cfg::get().ds.reads[lib_id].is_hybrid_lib()) 
+            return true;
+    return false;
+}
+
 void assemble_genome() {
     INFO("SPAdes started");
-    if (cfg::get().mode == debruijn_graph::config::pipeline_type::meta &&
-            (cfg::get().ds.reads.lib_count() != 1 || cfg::get().ds.reads[0].type() != io::LibraryType::PairedEnd)) {
-            ERROR("Sorry, current version of metaSPAdes can work with single library only (paired-end only).");
-            exit(239);
+    if (cfg::get().mode == debruijn_graph::config::pipeline_type::meta && !MetaCompatibleLibraries()) {
+        ERROR("Sorry, current version of metaSPAdes can work either with single library (paired-end only) "
+                      "or in paired-end + TSLR mode.");
+        exit(239);
     }
 
     INFO("Starting from stage: " << cfg::get().entry_point);
@@ -53,7 +75,6 @@ void assemble_genome() {
                                             cfg::get().flanking_range,
                                             cfg::get().pos.max_mapping_gap,
                                             cfg::get().pos.max_gap_diff);
-
     if (cfg::get().need_mapping) {
         INFO("Will need read mapping, kmer mapper will be attached");
         conj_gp.kmer_mapper.Attach();
@@ -75,44 +96,38 @@ void assemble_genome() {
         SPAdes.add(new debruijn_graph::MismatchCorrection());
     if (cfg::get().rr_enable) {
         if (two_step_rr) {
+            string prelim_prefix = "preliminary_";
             if (cfg::get().use_intermediate_contigs)
                 SPAdes.add(new debruijn_graph::PairInfoCount(true))
                       .add(new debruijn_graph::DistanceEstimation(true))
                       .add(new debruijn_graph::RepeatResolution(true))
-                      .add(new debruijn_graph::SecondPhaseSetup());
+                      .add(new debruijn_graph::ContigOutput(true, true, prelim_prefix))
+                      .add(new debruijn_graph::SecondPhaseSetup(prelim_prefix));
 
             SPAdes.add(new debruijn_graph::Simplification());
         }
 
+        if (!cfg::get().series_analysis.empty())
+            SPAdes.add(new debruijn_graph::SeriesAnalysis());
+
         if (cfg::get().pd) {
             SPAdes.add(new debruijn_graph::ChromosomeRemoval());
         }
 
-        //begin pacbio
-        bool run_pacbio = false;
-        for (size_t i = 0; i < cfg::get().ds.reads.lib_count(); ++i) {
-            if (cfg::get().ds.reads[i].is_pacbio_alignable()) {
-                run_pacbio = true;
-                break;
-            }
+        if (HybridLibrariesPresent()) {
+            SPAdes.add(new debruijn_graph::HybridLibrariesAligning());
         }
-        if (run_pacbio) {
-            //currently not integrated with two step rr process
-            VERIFY(!two_step_rr);
-            SPAdes.add(new debruijn_graph::PacBioAligning());
-        }
-        //not a handler, no graph modification allowed after PacBioAligning stage!
-        //end pacbio
-        
-        SPAdes.add(new debruijn_graph::PairInfoCount())
+
+        //No graph modification allowed after HybridLibrariesAligning stage!
+
+        SPAdes.add(new debruijn_graph::ContigOutput(false, false, "pre_pe_"))
+              .add(new debruijn_graph::PairInfoCount())
               .add(new debruijn_graph::DistanceEstimation())
               .add(new debruijn_graph::RepeatResolution());
-
-        
-    } else {
-        SPAdes.add(new debruijn_graph::ContigOutput());
     }
 
+    SPAdes.add(new debruijn_graph::ContigOutput());
+
     SPAdes.run(conj_gp, cfg::get().entry_point.c_str());
 
     // For informing spades.py about estimated params
diff --git a/src/projects/spades/main.cpp b/src/projects/spades/main.cpp
index a14d4fa..e162e2e 100644
--- a/src/projects/spades/main.cpp
+++ b/src/projects/spades/main.cpp
@@ -8,12 +8,12 @@
 /*
  * Assembler Main
  */
-#include "dev_support/logger/log_writers.hpp"
+#include "utils/logger/log_writers.hpp"
 
-#include "dev_support/memory_limit.hpp"
-#include "dev_support/segfault_handler.hpp"
+#include "utils/memory_limit.hpp"
+#include "utils/segfault_handler.hpp"
 #include "launch.hpp"
-#include "dev_support/copy_file.hpp"
+#include "utils/copy_file.hpp"
 #include "version.hpp"
 
 void load_config(const vector<string>& cfg_fns) {
diff --git a/src/projects/spades/mismatch_correction.cpp b/src/projects/spades/mismatch_correction.cpp
index d19ffb2..dd181ad 100644
--- a/src/projects/spades/mismatch_correction.cpp
+++ b/src/projects/spades/mismatch_correction.cpp
@@ -5,7 +5,7 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include <algorithms/mismatch_shall_not_pass.hpp>
+#include <modules/mismatch_shall_not_pass.hpp>
 #include "mismatch_correction.hpp"
 
 #include "io/dataset_support/read_converter.hpp"
@@ -21,7 +21,7 @@ void MismatchCorrection::run(conj_graph_pack &gp, const char*) {
         if (dataset.reads[i].is_mismatch_correctable())
             libs.push_back(i);
     }
-    auto streams = single_binary_readers_for_libs(dataset, libs, true,  true);
+    auto streams = io::single_binary_readers_for_libs(dataset, libs, true,  true);
     size_t corrected = MismatchShallNotPass<conj_graph_pack, io::SingleReadSeq>(gp, 2).ParallelStopAllMismatches(streams, 1);
     INFO("Corrected " << corrected << " nucleotides");
 }
diff --git a/src/projects/spades/pacbio_aligning.cpp b/src/projects/spades/pacbio_aligning.cpp
deleted file mode 100644
index 974251f..0000000
--- a/src/projects/spades/pacbio_aligning.cpp
+++ /dev/null
@@ -1,185 +0,0 @@
-//***************************************************************************
-//* Copyright (c) 2015 Saint Petersburg State University
-//* Copyright (c) 2011-2014 Saint Petersburg Academic University
-//* All Rights Reserved
-//* See file LICENSE for details.
-//***************************************************************************
-
-#include "assembly_graph/graph_alignment/pacbio/pac_index.hpp"
-#include "assembly_graph/graph_alignment/pacbio/pacbio_gap_closer.hpp"
-#include "assembly_graph/graph_alignment/long_read_storage.hpp"
-#include "io/reads_io/wrapper_collection.hpp"
-#include "assembly_graph/stats/picture_dump.hpp"
-#include "pacbio_aligning.hpp"
-
-namespace debruijn_graph {
-
-void ProcessReadsBatch(conj_graph_pack &gp,
-                       std::vector<io::SingleRead>& reads,
-                       pacbio::PacBioMappingIndex<ConjugateDeBruijnGraph>& pac_index,
-                       PathStorage<Graph>& long_reads, pacbio::GapStorage<Graph>& gaps,
-                       size_t buf_size, int n, size_t min_gap_quantity, pacbio::StatsCounter& stats) {
-    vector<PathStorage<Graph> > long_reads_by_thread(cfg::get().max_threads,
-                                                     PathStorage<Graph>(gp.g));
-    vector<pacbio::GapStorage<Graph> > gaps_by_thread(cfg::get().max_threads,
-                                              pacbio::GapStorage<Graph>(gp.g, min_gap_quantity,cfg::get().pb.long_seq_limit));
-    vector<pacbio::StatsCounter> stats_by_thread(cfg::get().max_threads);
-
-    size_t longer_500 = 0;
-    size_t aligned = 0;
-    size_t nontrivial_aligned = 0;
-
-#   pragma omp parallel for shared(reads, long_reads_by_thread, pac_index, n, aligned, nontrivial_aligned)
-    for (size_t i = 0; i < buf_size; ++i) {
-        if (i % 1000 == 0) {
-            DEBUG("thread number " << omp_get_thread_num());
-        }
-        size_t thread_num = omp_get_thread_num();
-        Sequence seq(reads[i].sequence());
-#       pragma omp atomic
-        n++;
-        auto current_read_mapping = pac_index.GetReadAlignment(seq);
-        auto aligned_edges = current_read_mapping.main_storage;
-        auto gaps = current_read_mapping.gaps;
-        for (auto iter = gaps.begin(); iter != gaps.end(); ++iter)
-            gaps_by_thread[thread_num].AddGap(*iter, true);
-
-        for (auto iter = aligned_edges.begin(); iter != aligned_edges.end(); ++iter)
-            long_reads_by_thread[thread_num].AddPath(*iter, 1, true);
-        //counting stats:
-        for (auto iter = aligned_edges.begin(); iter != aligned_edges.end(); ++iter) {
-            stats_by_thread[thread_num].path_len_in_edges[iter->size()]++;
-        }
-#       pragma omp critical
-        {
-//            INFO(current_read_mapping.seed_num);
-            if (seq.size() > 500) {
-                longer_500++;
-                if (aligned_edges.size() > 0) {
-                    aligned++;
-                    stats_by_thread[thread_num].seeds_percentage[size_t(
-                            floor(double(current_read_mapping.seed_num) * 1000.0 / (double) seq.size()))]++;
-                    for (size_t j = 0; j < aligned_edges.size(); j++) {
-                        if (aligned_edges[j].size() > 1) {
-                            nontrivial_aligned++;
-                            break;
-                        }
-                    }
-                }
-            }
-        }
-#       pragma omp critical
-        {
-            VERBOSE_POWER(n, " reads processed");
-        }
-    }
-    INFO("Read batch of size: " << buf_size << " processed; "<< longer_500 << " of them longer than 500; among long reads aligned: " << aligned << "; paths of more than one edge received: " << nontrivial_aligned );
-
-    for (size_t i = 0; i < cfg::get().max_threads; i++) {
-        long_reads.AddStorage(long_reads_by_thread[i]);
-        gaps.AddStorage(gaps_by_thread[i]);
-        stats.AddStorage(stats_by_thread[i]);
-    }
-}
-
-void align_pacbio(conj_graph_pack &gp, int lib_id, bool make_additional_saves) {
-    io::ReadStreamList<io::SingleRead> streams;
-    for (const auto& reads : cfg::get().ds.reads[lib_id].single_reads())
-      //do we need input_file function here?
-      streams.push_back(make_shared<io::FixingWrapper>(make_shared<io::FileReadStream>(reads)));
-
-    //make_shared<io::FixingWrapper>(make_shared<io::FileReadStream>(file));
-    //    auto pacbio_read_stream = single_easy_reader(cfg::get().ds.reads[lib_id],
-//    false, false);
-
-//    io::ReadStreamList<io::SingleRead> streams(pacbio_read_stream);
- //   pacbio_read_stream.release();
-    int n = 0;
-    PathStorage<Graph>& long_reads = gp.single_long_reads[lib_id];
-    pacbio::StatsCounter stats;
-    size_t min_gap_quantity = 2;
-    size_t rtype = 0;
-    bool consensus_gap_closing = false;
-    if (cfg::get().ds.reads[lib_id].type() == io::LibraryType::PacBioReads || 
-        cfg::get().ds.reads[lib_id].type() == io::LibraryType::SangerReads || 
-        cfg::get().ds.reads[lib_id].type() == io::LibraryType::NanoporeReads) {
-        min_gap_quantity = cfg::get().pb.pacbio_min_gap_quantity;
-        rtype = 1;
-        consensus_gap_closing = true;
-    } else {
-        min_gap_quantity = cfg::get().pb.contigs_min_gap_quantity;
-        rtype = 2;
-    }
-    pacbio::GapStorage<ConjugateDeBruijnGraph> gaps(gp.g, min_gap_quantity, cfg::get().pb.long_seq_limit);
-    size_t read_buffer_size = 50000;
-    std::vector<io::SingleRead> reads(read_buffer_size);
-    io::SingleRead read;
-    size_t buffer_no = 0;
-    INFO("Usign seed size: " << cfg::get().pb.pacbio_k);
-    pacbio::PacBioMappingIndex<ConjugateDeBruijnGraph> pac_index(gp.g,
-                                                         cfg::get().pb.pacbio_k,
-                                                         cfg::get().K, cfg::get().pb.ignore_middle_alignment, cfg::get().output_dir, cfg::get().pb);
-
-//    path_extend::ContigWriter cw(gp.g);
-//    cw.WriteEdges("before_rr_with_ids.fasta");
-//    ofstream filestr("pacbio_mapped.mpr");
-//    filestr.close();
-    for (auto iter = streams.begin(); iter != streams.end(); ++iter) {
-        auto &stream = *iter;
-        while (!stream.eof()) {
-            size_t buf_size = 0;
-            for (; buf_size < read_buffer_size && !stream.eof(); ++buf_size)
-                stream >> reads[buf_size];
-            INFO("Prepared batch " << buffer_no << " of " << buf_size << " reads.");
-            DEBUG("master thread number " << omp_get_thread_num());
-            ProcessReadsBatch(gp, reads, pac_index, long_reads, gaps, buf_size, n, min_gap_quantity, stats);
-     //       INFO("Processed batch " << buffer_no);
-            ++buffer_no;
-        }
-    }
-    string ss = (rtype == 1 ? "long reads": "contigs");
-    INFO("For lib " << lib_id << " of " << ss <<" :");
-    stats.report();
-    map<EdgeId, EdgeId> replacement;
-    size_t min_stats_cutoff =(rtype == 1 ? 1  : 0);
-    if (make_additional_saves)
-        long_reads.DumpToFile(cfg::get().output_saves + "long_reads_before_rep.mpr",
-                          replacement, min_stats_cutoff, true);
-    gaps.DumpToFile(cfg::get().output_saves + "gaps.mpr");
-    gaps.PadGapStrings();
-    if (make_additional_saves)
-        gaps.DumpToFile(cfg::get().output_saves +  "gaps_padded.mpr");
-    pacbio::PacbioGapCloser<Graph> gap_closer(gp.g, consensus_gap_closing, cfg::get().pb.max_contigs_gap_length);
-    gap_closer.ConstructConsensus(cfg::get().max_threads, gaps);
-    gap_closer.CloseGapsInGraph(replacement);
-    long_reads.ReplaceEdges(replacement);
-    for(int j = 0; j < lib_id; j++) {
-        gp.single_long_reads[j].ReplaceEdges(replacement);
-    }
-
-    gap_closer.DumpToFile(cfg::get().output_saves + "gaps_pb_closed.fasta");
-    INFO("PacBio aligning finished");
-    return;
-}
-
-void PacBioAligning::run(conj_graph_pack &gp, const char*) {
-    using namespace omnigraph;
-    omnigraph::DefaultLabeler<Graph> labeler(gp.g, gp.edge_pos);
-    int lib_id = -1;
-    bool make_additional_saves = parent_->saves_policy().make_saves_;
-    for (size_t i = 0; i < cfg::get().ds.reads.lib_count(); ++i) {
-        if ( cfg::get().ds.reads[i].is_pacbio_alignable() ) {
-            lib_id = (int) i;
-            align_pacbio(gp, lib_id, make_additional_saves);
-        }
-    }
-
-    if (lib_id == -1)
-        INFO("no PacBio lib found");
-
-    stats::detail_info_printer printer(gp, labeler, cfg::get().output_dir);
-    printer(config::info_printer_pos::final_gap_closed);
-}
-
-}
-
diff --git a/src/projects/spades/pair_info_count.cpp b/src/projects/spades/pair_info_count.cpp
index bc01e1d..30edba3 100644
--- a/src/projects/spades/pair_info_count.cpp
+++ b/src/projects/spades/pair_info_count.cpp
@@ -9,27 +9,129 @@
 #include "io/dataset_support/read_converter.hpp"
 
 #include "pair_info_count.hpp"
-#include "assembly_graph/graph_alignment/short_read_mapper.hpp"
-#include "assembly_graph/graph_alignment/long_read_mapper.hpp"
+#include "modules/alignment/short_read_mapper.hpp"
+#include "modules/alignment/long_read_mapper.hpp"
+#include "modules/alignment/bwa_sequence_mapper.hpp"
 #include "paired_info/pair_info_filler.hpp"
-#include "algorithms/path_extend/split_graph_pair_info.hpp"
-#include "paired_info/bwa_pair_info_filler.hpp"
+#include "modules/path_extend/split_graph_pair_info.hpp"
+
+#include "adt/bf.hpp"
+#include "adt/hll.hpp"
 
 namespace debruijn_graph {
 
+typedef io::SequencingLibrary<config::DataSetData> SequencingLib;
+using PairedInfoFilter = bf::counting_bloom_filter<std::pair<EdgeId, EdgeId>, 2>;
+using EdgePairCounter = hll::hll<std::pair<EdgeId, EdgeId>>;
+
+class DEFilter : public SequenceMapperListener {
+  public:
+    DEFilter(PairedInfoFilter &filter, const Graph &g)
+            : bf_(filter), g_(g) {}
+
+    void ProcessPairedRead(size_t,
+                           const io::PairedRead&,
+                           const MappingPath<EdgeId>& read1,
+                           const MappingPath<EdgeId>& read2) override {
+        ProcessPairedRead(read1, read2);
+    }
+    void ProcessPairedRead(size_t,
+                           const io::PairedReadSeq&,
+                           const MappingPath<EdgeId>& read1,
+                           const MappingPath<EdgeId>& read2) override {
+        ProcessPairedRead(read1, read2);
+    }
+  private:
+    void ProcessPairedRead(const MappingPath<EdgeId>& path1,
+                           const MappingPath<EdgeId>& path2) {
+        for (size_t i = 0; i < path1.size(); ++i) {
+            std::pair<EdgeId, MappingRange> mapping_edge_1 = path1[i];
+            for (size_t j = 0; j < path2.size(); ++j) {
+                std::pair<EdgeId, MappingRange> mapping_edge_2 = path2[j];
+                bf_.add({mapping_edge_1.first, mapping_edge_2.first});
+                bf_.add({g_.conjugate(mapping_edge_2.first), g_.conjugate(mapping_edge_1.first)});
+            }
+        }
+    }
+
+    PairedInfoFilter &bf_;
+    const Graph &g_;
+};
+
+class EdgePairCounterFiller : public SequenceMapperListener {
+    static uint64_t EdgePairHash(const std::pair<EdgeId, EdgeId> &e) {
+        uint64_t h1 = e.first.hash();
+        return CityHash64WithSeeds((const char*)&h1, sizeof(h1), e.second.hash(), 0x0BADF00D);
+    }
+
+  public:
+    EdgePairCounterFiller(size_t thread_num)
+            : counter_(EdgePairHash) {
+        buf_.reserve(thread_num);
+        for (unsigned i = 0; i < thread_num; ++i)
+          buf_.emplace_back(EdgePairHash);
+    }
+
+    void MergeBuffer(size_t i) override {
+        counter_.merge(buf_[i]);
+        buf_[i].clear();
+    }
 
-bool RefineInsertSizeForLib(conj_graph_pack &gp, size_t ilib, size_t edge_length_threshold) {
+    void ProcessPairedRead(size_t idx,
+                           const io::PairedRead&,
+                           const MappingPath<EdgeId>& read1,
+                           const MappingPath<EdgeId>& read2) override {
+        ProcessPairedRead(buf_[idx], read1, read2);
+    }
+    void ProcessPairedRead(size_t idx,
+                           const io::PairedReadSeq&,
+                           const MappingPath<EdgeId>& read1,
+                           const MappingPath<EdgeId>& read2) override {
+        ProcessPairedRead(buf_[idx], read1, read2);
+    }
 
+    std::pair<double, bool> cardinality() const {
+        return counter_.cardinality();
+    }
+  private:
+    void ProcessPairedRead(EdgePairCounter &buf,
+                           const MappingPath<EdgeId>& path1,
+                           const MappingPath<EdgeId>& path2) {
+        for (size_t i = 0; i < path1.size(); ++i) {
+            std::pair<EdgeId, MappingRange> mapping_edge_1 = path1[i];
+            for (size_t j = 0; j < path2.size(); ++j) {
+                std::pair<EdgeId, MappingRange> mapping_edge_2 = path2[j];
+                buf.add({mapping_edge_1.first, mapping_edge_2.first});
+            }
+        }
+    }
+
+    std::vector<EdgePairCounter> buf_;
+    EdgePairCounter counter_;
+};
+
+static bool CollectLibInformation(const conj_graph_pack &gp,
+                                  size_t &edgepairs,
+                                  size_t ilib, size_t edge_length_threshold) {
     INFO("Estimating insert size (takes a while)");
-    InsertSizeCounter hist_counter(gp, edge_length_threshold, /* ignore negative */ true);
+    InsertSizeCounter hist_counter(gp, edge_length_threshold);
+    EdgePairCounterFiller pcounter(cfg::get().max_threads);
+
     SequenceMapperNotifier notifier(gp);
     notifier.Subscribe(ilib, &hist_counter);
+    notifier.Subscribe(ilib, &pcounter);
 
-    auto& reads = cfg::get_writable().ds.reads[ilib];
+    SequencingLib &reads = cfg::get_writable().ds.reads[ilib];
+    auto &data = reads.data();
     auto paired_streams = paired_binary_readers(reads, false);
 
+    notifier.ProcessLibrary(paired_streams, ilib, *ChooseProperMapper(gp, reads, cfg::get().bwa.bwa_enable));
+    //Check read length after lib processing since mate pairs a not used until this step
     VERIFY(reads.data().read_length != 0);
-    notifier.ProcessLibrary(paired_streams, ilib, *ChooseProperMapper(gp, reads));
+
+    auto pres = pcounter.cardinality();
+    edgepairs = (!pres.second ? 64ull * 1024 * 1024 : size_t(pres.first));
+    INFO("Edge pairs: " << edgepairs << (!pres.second ? " (rough upper limit)" : ""));
 
     INFO(hist_counter.mapped() << " paired reads (" <<
          ((double) hist_counter.mapped() * 100.0 / (double) hist_counter.total()) <<
@@ -39,125 +141,150 @@ bool RefineInsertSizeForLib(conj_graph_pack &gp, size_t ilib, size_t edge_length
     if (hist_counter.mapped() == 0)
         return false;
 
+
     std::map<size_t, size_t> percentiles;
-    hist_counter.FindMean(reads.data().mean_insert_size, reads.data().insert_size_deviation, percentiles);
-    hist_counter.FindMedian(reads.data().median_insert_size, reads.data().insert_size_mad,
-                            reads.data().insert_size_distribution);
-    if (reads.data().median_insert_size < gp.k_value + 2) {
+    hist_counter.FindMean(data.mean_insert_size, data.insert_size_deviation, percentiles);
+    hist_counter.FindMedian(data.median_insert_size, data.insert_size_mad,
+                            data.insert_size_distribution);
+    if (data.median_insert_size < gp.k_value + 2)
         return false;
-    }
 
-    std::tie(reads.data().insert_size_left_quantile,
-             reads.data().insert_size_right_quantile) = omnigraph::GetISInterval(0.8,
-                                                                                 reads.data().insert_size_distribution);
+    std::tie(data.insert_size_left_quantile,
+             data.insert_size_right_quantile) = omnigraph::GetISInterval(0.8,
+                                                                         data.insert_size_distribution);
 
-    return !reads.data().insert_size_distribution.empty();
+    return !data.insert_size_distribution.empty();
 }
 
-void ProcessSingleReads(conj_graph_pack &gp, size_t ilib,
-                        bool use_binary = true) {
+// FIXME: This needs to be static
+void ProcessSingleReads(conj_graph_pack &gp,
+                        size_t ilib,
+                        bool use_binary = true,
+                        bool map_paired = false) {
+    //FIXME make const
     auto& reads = cfg::get_writable().ds.reads[ilib];
+
     SequenceMapperNotifier notifier(gp);
-    GappedLongReadMapper read_mapper(gp, gp.single_long_reads[ilib]);
-    SimpleLongReadMapper simple_read_mapper(gp, gp.single_long_reads[ilib]);
+    //FIXME pretty awful, would be much better if listeners were shared ptrs
+    LongReadMapper read_mapper(gp.g, gp.single_long_reads[ilib],
+                               ChooseProperReadPathExtractor(gp.g, reads.type()));
 
-    if(reads.type() == io::LibraryType::PathExtendContigs) {
-        notifier.Subscribe(ilib, &read_mapper);
-    } else {
-        notifier.Subscribe(ilib, &simple_read_mapper);
-    }
+    notifier.Subscribe(ilib, &read_mapper);
 
-    auto mapper_ptr = ChooseProperMapper(gp, reads);
+    auto mapper_ptr = ChooseProperMapper(gp, reads, cfg::get().bwa.bwa_enable);
     if (use_binary) {
-        auto single_streams = single_binary_readers(reads, false, true);
+        auto single_streams = single_binary_readers(reads, false, map_paired);
         notifier.ProcessLibrary(single_streams, ilib, *mapper_ptr);
     } else {
         auto single_streams = single_easy_readers(reads, false,
-                                                  true, /*handle Ns*/false);
+                                                  map_paired, /*handle Ns*/false);
         notifier.ProcessLibrary(single_streams, ilib, *mapper_ptr);
     }
     cfg::get_writable().ds.reads[ilib].data().single_reads_mapped = true;
 }
 
-void ProcessPairedReads(conj_graph_pack &gp, size_t ilib, bool map_single_reads) {
-    auto& reads = cfg::get_writable().ds.reads[ilib];
-    bool calculate_threshold = (reads.type() == io::LibraryType::PairedEnd);
-    SequenceMapperNotifier notifier(gp);
-    INFO("Left insert size qauntile " << reads.data().insert_size_left_quantile <<
-         ", right insert size quantile " << reads.data().insert_size_right_quantile);
+static void ProcessPairedReads(conj_graph_pack &gp,
+                               std::unique_ptr<PairedInfoFilter> filter, unsigned filter_threshold,
+                               size_t ilib) {
+    SequencingLib &reads = cfg::get_writable().ds.reads[ilib];
+    const auto &data = reads.data();
 
-    SimpleLongReadMapper read_mapper(gp, gp.single_long_reads[ilib]);
-    if (map_single_reads) {
-        notifier.Subscribe(ilib, &read_mapper);
-    }
+    bool calculate_threshold = (reads.type() == io::LibraryType::PairedEnd &&
+        !cfg::get().pe_params.param_set.extension_options.use_default_single_threshold);
+    unsigned round_thr = 0;
+    // Do not round if filtering is disabled
+    if (filter)
+        round_thr = unsigned(std::min(cfg::get().de.max_distance_coeff * data.insert_size_deviation * cfg::get().de.rounding_coeff,
+                                      cfg::get().de.rounding_thr));
 
-    path_extend::SplitGraphPairInfo split_graph(
-            gp, (size_t) reads.data().median_insert_size,
-            (size_t) reads.data().insert_size_deviation,
-            (size_t) reads.data().insert_size_left_quantile,
-            (size_t) reads.data().insert_size_right_quantile,
-            reads.data().read_length, gp.g.k(),
-            cfg::get().pe_params.param_set.split_edge_length,
-            reads.data().insert_size_distribution);
-    if (calculate_threshold) {
+    SequenceMapperNotifier notifier(gp);
+    INFO("Left insert size quantile " << data.insert_size_left_quantile <<
+         ", right insert size quantile " << data.insert_size_right_quantile <<
+         ", filtering threshold " << filter_threshold <<
+         ", rounding threshold " << round_thr);
+
+    path_extend::SplitGraphPairInfo
+            split_graph(gp, (size_t)data.median_insert_size,
+                        (size_t) data.insert_size_deviation,
+                        (size_t) data.insert_size_left_quantile,
+                        (size_t) data.insert_size_right_quantile,
+                        data.read_length, gp.g.k(),
+                        cfg::get().pe_params.param_set.split_edge_length,
+                        data.insert_size_distribution);
+    if (calculate_threshold)
         notifier.Subscribe(ilib, &split_graph);
+
+    LatePairedIndexFiller::WeightF weight;
+    if (filter) {
+        weight = [&](const std::pair<EdgeId, EdgeId> &ep,
+                     const MappingRange&, const MappingRange&) {
+            return (filter->lookup(ep) > filter_threshold ? 1. : 0.);
+        };
+    } else {
+        weight = [&](const std::pair<EdgeId, EdgeId> &,
+                     const MappingRange&, const MappingRange&) {
+            return 1.;
+        };
     }
 
-    LatePairedIndexFiller pif(gp.g, PairedReadCountWeight, gp.paired_indices[ilib]);
+    LatePairedIndexFiller pif(gp.g,
+                              weight, round_thr,
+                              gp.paired_indices[ilib]);
     notifier.Subscribe(ilib, &pif);
 
-    auto paired_streams = paired_binary_readers(reads, false, (size_t) reads.data().mean_insert_size);
-    notifier.ProcessLibrary(paired_streams, ilib, *ChooseProperMapper(gp, reads));
+    auto paired_streams = paired_binary_readers(reads, false, (size_t) data.mean_insert_size);
+    notifier.ProcessLibrary(paired_streams, ilib, *ChooseProperMapper(gp, reads, cfg::get().bwa.bwa_enable));
     cfg::get_writable().ds.reads[ilib].data().pi_threshold = split_graph.GetThreshold();
-
-    if (map_single_reads) {
-        ProcessSingleReads(gp, ilib);
-    }
 }
 
-bool HasGoodRRLibs() {
-    for (size_t i = 0; i < cfg::get().ds.reads.lib_count(); ++i) {
-        const auto &lib = cfg::get().ds.reads[i];
+static bool HasGoodRRLibs() {
+    for (const auto &lib : cfg::get().ds.reads) {
         if (lib.is_contig_lib())
             continue;
+
         if (lib.is_paired() &&
-            lib.data().mean_insert_size == 0.0) {
+            lib.data().mean_insert_size == 0.0)
             continue;
-        }
-        if (lib.is_repeat_resolvable()) {
+
+        if (lib.is_repeat_resolvable())
             return true;
-        }
     }
+
     return false;
 }
 
-bool HasOnlyMP() {
-    for (size_t i = 0; i < cfg::get().ds.reads.lib_count(); ++i) {
-        if (cfg::get().ds.reads[i].type() == io::LibraryType::PathExtendContigs)
+static bool HasOnlyMP() {
+    for (const auto &lib : cfg::get().ds.reads) {
+        if (lib.type() == io::LibraryType::PathExtendContigs)
             continue;
-        if (cfg::get().ds.reads[i].type() != io::LibraryType::MatePairs &&
-            cfg::get().ds.reads[i].type() != io::LibraryType::HQMatePairs) {
+
+        if (lib.type() != io::LibraryType::MatePairs &&
+            lib.type() != io::LibraryType::HQMatePairs)
             return false;
-        }
     }
+
     return true;
 }
 
 //todo improve logic
-bool ShouldMapSingleReads(size_t ilib) {
+static bool ShouldMapSingleReads(size_t ilib) {
     using config::single_read_resolving_mode;
     switch (cfg::get().single_reads_rr) {
-        case single_read_resolving_mode::none: {
-            return false;
-        }
-        case single_read_resolving_mode::all: {
+        case single_read_resolving_mode::all:
             return true;
-        }
-        case single_read_resolving_mode::only_single_libs: {
+        case single_read_resolving_mode::only_single_libs:
             //Map when no PacBio/paried libs or only mate-pairs or single lib itself
-            return !HasGoodRRLibs() || HasOnlyMP() ||
-                   (cfg::get().ds.reads[ilib].type() == io::LibraryType::SingleReads);
-        }
+            if (!HasGoodRRLibs() || HasOnlyMP() ||
+                cfg::get().ds.reads[ilib].type() == io::LibraryType::SingleReads) {
+                if (cfg::get().mode != debruijn_graph::config::pipeline_type::meta) {
+                    return true;
+                } else {
+                    WARN("Single reads are not used in metagenomic mode");
+                }
+            }
+            break;
+        case single_read_resolving_mode::none:
+            break;
         default:
             VERIFY_MSG(false, "Invalid mode value");
     }
@@ -168,86 +295,82 @@ void PairInfoCount::run(conj_graph_pack &gp, const char *) {
     gp.InitRRIndices();
     gp.EnsureBasicMapping();
 
-    //fixme implement better universal logic
-    size_t edge_length_threshold = cfg::get().mode == config::pipeline_type::meta ? 1000 : stats::Nx(gp.g, 50);
+    //TODO implement better universal logic
+    size_t edge_length_threshold = cfg::get().mode == config::pipeline_type::meta ? 900 : stats::Nx(gp.g, 50);
     INFO("Min edge length for estimation: " << edge_length_threshold);
-    bwa_pair_info::BWAPairInfoFiller bwa_counter(gp.g,
-                                                 cfg::get().bwa.path_to_bwa,
-                                                 path::append_path(cfg::get().output_dir, "bwa_count"),
-                                                 cfg::get().max_threads, !cfg::get().bwa.debug);
-
     for (size_t i = 0; i < cfg::get().ds.reads.lib_count(); ++i) {
-        const auto &lib = cfg::get().ds.reads[i];
-
-        if (cfg::get().bwa.bwa_enable && lib.is_bwa_alignable()) {
-            //Run insert size estimation and pair index filler together to save disc space (removes SAM file right after processing the lib)
-            bwa_counter.ProcessLib(i, cfg::get_writable().ds.reads[i], gp.paired_indices[i],
-                                   edge_length_threshold, cfg::get().bwa.min_contig_len);
-        } else if (lib.is_paired()) {
-            INFO("Estimating insert size for library #" << i);
-            const auto &lib_data = lib.data();
-            size_t rl = lib_data.read_length;
-            size_t k = cfg::get().K;
-            bool insert_size_refined = RefineInsertSizeForLib(gp, i, edge_length_threshold);
-
-            if (!insert_size_refined) {
-                cfg::get_writable().ds.reads[i].data().mean_insert_size = 0.0;
-                WARN("Unable to estimate insert size for paired library #" << i);
-                if (rl > 0 && rl <= k) {
-                    WARN("Maximum read length (" << rl << ") should be greater than K (" << k << ")");
-                } else if (rl <= k * 11 / 10) {
-                    WARN("Maximum read length (" << rl << ") is probably too close to K (" << k << ")");
-                } else {
-                    WARN("None of paired reads aligned properly. Please, check orientation of your read pairs.");
+        auto &lib = cfg::get_writable().ds.reads[i];
+        if (lib.is_hybrid_lib()) {
+            INFO("Library #" << i << " was mapped earlier on hybrid aligning stage, skipping");
+            continue;
+        } else if (lib.is_contig_lib()) {
+            INFO("Mapping contigs library #" << i);
+            ProcessSingleReads(gp, i, false);
+        } else {
+            if (lib.is_paired()) {
+                INFO("Estimating insert size for library #" << i);
+                const auto &lib_data = lib.data();
+                size_t rl = lib_data.read_length;
+                size_t k = cfg::get().K;
+
+                size_t edgepairs = 0;
+                if (!CollectLibInformation(gp, edgepairs, i, edge_length_threshold)) {
+                    cfg::get_writable().ds.reads[i].data().mean_insert_size = 0.0;
+                    WARN("Unable to estimate insert size for paired library #" << i);
+                    if (rl > 0 && rl <= k) {
+                        WARN("Maximum read length (" << rl << ") should be greater than K (" << k << ")");
+                    } else if (rl <= k * 11 / 10) {
+                        WARN("Maximum read length (" << rl << ") is probably too close to K (" << k << ")");
+                    } else {
+                        WARN("None of paired reads aligned properly. Please, check orientation of your read pairs.");
+                    }
+                    continue;
                 }
-                continue;
-            } else {
+
                 INFO("  Insert size = " << lib_data.mean_insert_size <<
                      ", deviation = " << lib_data.insert_size_deviation <<
                      ", left quantile = " << lib_data.insert_size_left_quantile <<
                      ", right quantile = " << lib_data.insert_size_right_quantile <<
                      ", read length = " << lib_data.read_length);
 
-                if (lib_data.mean_insert_size < 1.1 * (double) rl) {
+                if (lib_data.mean_insert_size < 1.1 * (double) rl)
                     WARN("Estimated mean insert size " << lib_data.mean_insert_size
                          << " is very small compared to read length " << rl);
-                }
-            }
-        }
-    }
 
-    for (size_t i = 0; i < cfg::get().ds.reads.lib_count(); ++i) {
-        const auto &lib = cfg::get().ds.reads[i];
-        if (lib.is_pacbio_alignable()) {
-            INFO("Library #" << i << " was mapped by PacBio mapper, skipping");
-            continue;
-        } else if (lib.is_contig_lib()) {
-            INFO("Mapping contigs library #" << i);
-            ProcessSingleReads(gp, i, false);
-        } else if (cfg::get().bwa.bwa_enable && lib.is_bwa_alignable()) {
-            INFO("Library #" << i << " was mapped by BWA, skipping");
-            continue;
-        } else {
-            INFO("Mapping library #" << i);
-            bool map_single_reads = ShouldMapSingleReads(i);
-            cfg::get_writable().use_single_reads |= map_single_reads;
-
-            if(cfg::get().mode == debruijn_graph::config::pipeline_type::meta 
-                        && cfg::get().use_single_reads) {
-                map_single_reads = false;
-                cfg::get_writable().use_single_reads = false;
-                WARN("Single reads mappings are not used in metagenomic mode");
-            }
+                std::unique_ptr<PairedInfoFilter> filter;
+                unsigned filter_threshold = cfg::get().de.raw_filter_threshold;
+
+                // Only filter paired-end libraries
+                if (filter_threshold && lib.type() == io::LibraryType::PairedEnd) {
+                    filter.reset(new PairedInfoFilter([](const std::pair<EdgeId, EdgeId> &e, uint64_t seed) {
+                                uint64_t h1 = e.first.hash();
+                                return CityHash64WithSeeds((const char*)&h1, sizeof(h1), e.second.hash(), seed);
+                            },
+                            12 * edgepairs));
+
+                    INFO("Filtering data for library #" << i);
+                    {
+                        SequenceMapperNotifier notifier(gp);
+                        DEFilter filter_counter(*filter, gp.g);
+                        notifier.Subscribe(i, &filter_counter);
+
+                        auto reads = paired_binary_readers(lib, false);
+                        VERIFY(lib.data().read_length != 0);
+                        notifier.ProcessLibrary(reads, i, *ChooseProperMapper(gp, lib, cfg::get().bwa.bwa_enable));
+                    }
+                }
 
-            if (lib.is_paired() && lib.data().mean_insert_size != 0.0) {
-                INFO("Mapping paired reads (takes a while) ");
-                ProcessPairedReads(gp, i, map_single_reads);
-            } else if (map_single_reads) {
-                INFO("Mapping single reads (takes a while) ");
-                ProcessSingleReads(gp, i);
+                INFO("Mapping library #" << i);
+                if (lib.data().mean_insert_size != 0.0) {
+                    INFO("Mapping paired reads (takes a while) ");
+                    ProcessPairedReads(gp, std::move(filter), filter_threshold, i);
+                }
             }
 
-            if (map_single_reads) {
+            if (ShouldMapSingleReads(i)) {
+                cfg::get_writable().use_single_reads = true;
+                INFO("Mapping single reads of library #" << i);
+                ProcessSingleReads(gp, i, /*use_binary*/true, /*map_paired*/true);
                 INFO("Total paths obtained from single reads: " << gp.single_long_reads[i].size());
             }
         }
diff --git a/src/projects/spades/repeat_resolving.cpp b/src/projects/spades/repeat_resolving.cpp
index e5044d8..8deb72b 100644
--- a/src/projects/spades/repeat_resolving.cpp
+++ b/src/projects/spades/repeat_resolving.cpp
@@ -5,73 +5,63 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "dev_support/logger/logger.hpp"
+#include "utils/logger/logger.hpp"
 #include "assembly_graph/stats/picture_dump.hpp"
-#include "visualization/graph_labeler.hpp"
-#include "paired_info/distance_estimation.hpp"
-#include "paired_info/smoothing_distance_estimation.hpp"
-#include "algorithms/path_extend/path_extend_launch.hpp"
-#include "assembly_graph/graph_support/contig_output.hpp"
-#include "visualization/position_filler.hpp"
-#include "assembly_graph/graph_alignment/long_read_storage.hpp"
+#include "modules/path_extend/pipeline/launcher.hpp"
+
 #include "repeat_resolving.hpp"
 
 namespace debruijn_graph {
 
-void PEResolving(conj_graph_pack& gp) {
-    string scaffolds_name = cfg::get().mode == config::pipeline_type::rna ? "transcripts" : "scaffolds";
-    bool output_broke_scaffolds = cfg::get().mode != config::pipeline_type::rna;
-
-    path_extend::PathExtendParamsContainer params(cfg::get().pe_params,
+static void PEResolving(conj_graph_pack& gp) {
+    path_extend::PathExtendParamsContainer params(cfg::get().ds,
+                                                  cfg::get().pe_params,
                                                   cfg::get().output_dir,
-                                                  "final_contigs",
-                                                  scaffolds_name,
                                                   cfg::get().mode,
                                                   cfg::get().uneven_depth,
                                                   cfg::get().avoid_rc_connections,
-                                                  cfg::get().use_scaffolder,
-                                                  output_broke_scaffolds);
+                                                  cfg::get().use_scaffolder);
 
-    path_extend::ResolveRepeatsPe(cfg::get().ds, params, gp);
+    path_extend::PathExtendLauncher exspander(cfg::get().ds, params, gp);
+    exspander.Launch();
 }
 
-inline bool HasValidLibs() {
+static bool HasValidLibs() {
     for (const auto& lib : cfg::get().ds.reads) {
-        if (lib.is_repeat_resolvable()) {
-            if (!lib.is_paired() || !math::eq(lib.data().mean_insert_size, 0.0)) {
-                return true;
-            } 
+        if (!lib.is_repeat_resolvable())
+            continue;
+
+        if (!lib.is_paired() ||
+            !math::eq(lib.data().mean_insert_size, 0.0)) {
+            return true;
         }
     }
+    
     return false;
 }
 
 void RepeatResolution::run(conj_graph_pack &gp, const char*) {
-    if (cfg::get().developer_mode) {
+    if (cfg::get().developer_mode)
         stats::PrepareForDrawing(gp);
-    }
 
-    omnigraph::DefaultLabeler<Graph> labeler(gp.g, gp.edge_pos);
+    visualization::graph_labeler::DefaultLabeler<Graph> labeler(gp.g, gp.edge_pos);
     stats::detail_info_printer printer(gp, labeler, cfg::get().output_dir);
     printer(config::info_printer_pos::before_repeat_resolution);
 
     //todo awful hack to get around PE using cfg::get everywhere...
+    //Is it possible to fix this problem now or still too soon?
     auto tmp_params_storage = cfg::get().pe_params;
     if (preliminary_) {
         INFO("Setting up preliminary path extend settings")
         cfg::get_writable().pe_params = *cfg::get().prelim_pe_params;
     }
-    OutputContigs(gp.g, cfg::get().output_dir + "before_rr", false);
-    OutputContigsToFASTG(gp.g, cfg::get().output_dir + "assembly_graph",gp.components);
 
     bool no_valid_libs = !HasValidLibs();
-
     bool use_single_reads = cfg::get().use_single_reads;
     if (cfg::get().rr_enable && no_valid_libs && !use_single_reads)
         WARN("Insert size was not estimated for any of the paired libraries, repeat resolution module will not run.");
 
     if ((no_valid_libs || cfg::get().rm == config::resolving_mode::none) && !use_single_reads) {
-        OutputContigs(gp.g, cfg::get().output_dir + "final_contigs", false);
         return;
     }
     if (cfg::get().rm == config::resolving_mode::path_extend) {
@@ -79,7 +69,6 @@ void RepeatResolution::run(conj_graph_pack &gp, const char*) {
         PEResolving(gp);
     } else {
         INFO("Unsupported repeat resolver");
-        OutputContigs(gp.g, cfg::get().output_dir + "final_contigs", false);
     }
     if (preliminary_) {
         INFO("Restoring initial path extend settings")
@@ -87,15 +76,6 @@ void RepeatResolution::run(conj_graph_pack &gp, const char*) {
     }
 }
 
-void ContigOutput::run(conj_graph_pack &gp, const char*) {
-    OutputContigs(gp.g, cfg::get().output_dir + "simplified_contigs", cfg::get().use_unipaths);
-    OutputContigs(gp.g, cfg::get().output_dir + "before_rr", false);
-    OutputContigsToFASTG(gp.g, cfg::get().output_dir + "assembly_graph", gp.components);
-    OutputContigs(gp.g, cfg::get().output_dir + "final_contigs", false);
 
-}
 
 } // debruijn_graph
-
-
-
diff --git a/src/projects/spades/repeat_resolving.hpp b/src/projects/spades/repeat_resolving.hpp
index 8178e4a..8d34eeb 100644
--- a/src/projects/spades/repeat_resolving.hpp
+++ b/src/projects/spades/repeat_resolving.hpp
@@ -26,17 +26,5 @@ public:
     void run(conj_graph_pack &gp, const char *);
 };
 
-class ContigOutput : public spades::AssemblyStage {
-public:
-    ContigOutput()
-            : AssemblyStage("Contig Output", "contig_output") { }
-
-    void load(conj_graph_pack &, const std::string &, const char *) { }
-
-    void save(const conj_graph_pack &, const std::string &, const char *) const { }
-
-    void run(conj_graph_pack &gp, const char *);
-};
-
 }
 
diff --git a/src/projects/spades/second_phase_setup.cpp b/src/projects/spades/second_phase_setup.cpp
index 9f09674..f85e6dd 100644
--- a/src/projects/spades/second_phase_setup.cpp
+++ b/src/projects/spades/second_phase_setup.cpp
@@ -19,8 +19,9 @@ namespace debruijn_graph {
 void SecondPhaseSetup::run(conj_graph_pack &gp, const char*) {
     INFO("Preparing second phase");
     gp.ClearRRIndices();
+    gp.ClearPaths();
 
-    std::string old_pe_contigs_filename = cfg::get().output_dir + "final_contigs.fasta";
+    std::string old_pe_contigs_filename = cfg::get().output_dir + contig_name_prefix_ + "final_contigs.fasta";
     std::string new_pe_contigs_filename = cfg::get().output_dir + "first_pe_contigs.fasta";
 
     VERIFY(path::check_existence(old_pe_contigs_filename));
@@ -35,7 +36,7 @@ void SecondPhaseSetup::run(conj_graph_pack &gp, const char*) {
     cfg::get_writable().ds.reads.push_back(untrusted_contigs);
 
     //FIXME get rid of this awful variable
-    cfg::get_writable().use_single_reads = false;
+    VERIFY(!cfg::get().use_single_reads);
     INFO("Ready to run second phase");
 }
 
diff --git a/src/projects/spades/second_phase_setup.hpp b/src/projects/spades/second_phase_setup.hpp
index bd40d88..87fc7c4 100644
--- a/src/projects/spades/second_phase_setup.hpp
+++ b/src/projects/spades/second_phase_setup.hpp
@@ -12,9 +12,12 @@ namespace debruijn_graph {
 
 //todo rename
 class SecondPhaseSetup : public spades::AssemblyStage {
+private:
+    string contig_name_prefix_;
+
 public:
-    SecondPhaseSetup()
-            : AssemblyStage("Second Phase Setup", "second_phase_setup") { }
+    SecondPhaseSetup(const string& contig_name_prefix = "")
+            : AssemblyStage("Second Phase Setup", "second_phase_setup"),contig_name_prefix_(contig_name_prefix)  { }
 
     void run(conj_graph_pack &gp, const char *);
 };
diff --git a/src/projects/spades/series_analysis.hpp b/src/projects/spades/series_analysis.hpp
new file mode 100644
index 0000000..7860e51
--- /dev/null
+++ b/src/projects/spades/series_analysis.hpp
@@ -0,0 +1,323 @@
+#pragma once
+
+#include "pipeline/stage.hpp"
+#include "assembly_graph/graph_support/graph_processing_algorithm.hpp"
+#include "assembly_graph/graph_support/basic_edge_conditions.hpp"
+#include "modules/simplification/tip_clipper.hpp"
+#include "projects/mts/contig_abundance.hpp"
+#include "io/reads/osequencestream.hpp"
+
+#include "llvm/Support/YAMLParser.h"
+#include "llvm/Support/YAMLTraits.h"
+
+namespace debruijn_graph {
+
+struct SeriesAnalysisConfig {
+    uint k;
+    uint sample_cnt;
+    uint frag_size;
+    uint min_len;
+
+    std::string kmer_mult, bin, bin_prof, edges_sqn, edges_mpl, edge_fragments_mpl;
+};
+
+}
+
+namespace llvm { namespace yaml {
+
+template<> struct MappingTraits<debruijn_graph::SeriesAnalysisConfig> {
+    static void mapping(IO& io, debruijn_graph::SeriesAnalysisConfig& cfg) {
+        io.mapRequired("k", cfg.k);
+        io.mapRequired("sample_cnt", cfg.sample_cnt);
+        io.mapRequired("kmer_mult", cfg.kmer_mult);
+        io.mapRequired("bin", cfg.bin);
+        io.mapRequired("bin_prof", cfg.bin_prof);
+        io.mapRequired("min_len", cfg.min_len);
+        io.mapRequired("edges_sqn", cfg.edges_sqn);
+        io.mapRequired("edges_mpl", cfg.edges_mpl);
+        io.mapRequired("edge_fragments_mpl", cfg.edge_fragments_mpl);
+        io.mapRequired("frag_size", cfg.frag_size);
+    }
+};
+
+} }
+
+namespace debruijn_graph {
+
+template<class graph_pack>
+shared_ptr<visualization::graph_colorer::GraphColorer<typename graph_pack::graph_t>> DefaultGPColorer(
+    const graph_pack& gp) {
+    io::SingleRead genome("ref", gp.genome.str());
+    auto mapper = MapperInstance(gp);
+    auto path1 = mapper->MapRead(genome).path();
+    auto path2 = mapper->MapRead(!genome).path();
+    return visualization::graph_colorer::DefaultColorer(gp.g, path1, path2);
+}
+
+inline double l2_norm(const AbundanceVector& v) {
+    double s = 0.;
+    for (auto val : v) {
+        s += val * val;
+    }
+    return std::sqrt(s);
+}
+
+inline double cosine_sim(const AbundanceVector& v1, const AbundanceVector& v2) {
+    double s = 0.;
+    for (size_t i = 0; i < v1.size(); ++i) {
+        s += v1[i] * v2[i];
+    }
+    return s / (l2_norm(v1) * l2_norm(v2));
+}
+
+template<class Graph>
+class EdgeAbundance: public omnigraph::GraphActionHandler<Graph> {
+    typedef map<EdgeId, AbundanceVector> Storage;
+    typedef Storage::const_iterator const_iterator;
+    Storage edge_abundance_;
+    const ContigAbundanceCounter& abundance_counter_;
+
+public:
+    EdgeAbundance(const Graph& g, const ContigAbundanceCounter& abundance_counter) :
+        omnigraph::GraphActionHandler<Graph>(g, "EdgeAbundance"),
+        abundance_counter_(abundance_counter){}
+
+    void Fill() {
+        for (auto it = this->g().ConstEdgeBegin(true); !it.IsEnd(); ++it) {
+            HandleAdd(*it);
+        }
+    }
+
+    virtual void HandleAdd(EdgeId e) override {
+        auto ab = abundance_counter_(this->g().EdgeNucls(e).str());
+        if (!ab) {
+            INFO("Couldn't estimate abundance of edge " << this->g().str(e));
+        } else {
+            edge_abundance_[e] = *ab;
+        }
+    }
+
+    const_iterator begin() const {
+        return edge_abundance_.begin();
+    }
+
+    const_iterator end() const {
+        return edge_abundance_.end();
+    }
+
+    const_iterator find(EdgeId e) const {
+        return edge_abundance_.find(e);
+    }
+
+    size_t count(EdgeId e) const {
+        return edge_abundance_.count(e);
+    }
+
+private:
+    DECL_LOGGER("EdgeAbundance");
+};
+
+template<class Graph>
+class AggressiveClearing: public omnigraph::EdgeProcessingAlgorithm<Graph> {
+    typedef typename Graph::EdgeId EdgeId;
+    const EdgeAbundance<Graph>& edge_abundance_;
+    const AbundanceVector base_profile_;
+    const double similarity_threshold_;
+    const double norm_ratio_threshold_;
+    EdgeRemover<Graph> edge_remover_;
+    func::TypedPredicate<EdgeId> topological_condition_;
+
+protected:
+    virtual bool ProcessEdge(EdgeId e) override {
+        DEBUG("Processing edge " << this->g().str(e));
+        if (!topological_condition_(e)) {
+            DEBUG("Topological condition failed");
+            return false;
+        }
+        auto it = edge_abundance_.find(e);
+        if (it == edge_abundance_.end()) {
+            DEBUG("Edge " << this->g().str(e) << " did not have valid abundance profile");
+            return false;
+        }
+        const auto& profile = it->second;
+        DEBUG("Edge profile " << PrintVector(profile));
+        double sim = cosine_sim(profile, base_profile_);
+        double norm_ratio = l2_norm(profile) / l2_norm(base_profile_);
+
+        DEBUG("Similarity between edge and base profiles " << sim);
+        DEBUG("Norm ratio " << norm_ratio);
+        if (math::ls(norm_ratio, norm_ratio_threshold_)
+                || math::ls(sim, similarity_threshold_)) {
+            DEBUG("Removing edge " << this->g().str(e));
+
+            edge_remover_.DeleteEdge(e);
+            return true;
+        }
+        return false;
+    }
+
+public:
+    AggressiveClearing(Graph &g,
+                       const EdgeAbundance<Graph>& edge_abundance,
+                       const AbundanceVector& base_profile,
+                       double similarity_threshold,
+                       double norm_ratio_threshold,
+                       const std::function<void(EdgeId)> &removal_handler = 0) :
+        EdgeProcessingAlgorithm<Graph>(g, true),
+        edge_abundance_(edge_abundance),
+        base_profile_(base_profile),
+        similarity_threshold_(similarity_threshold),
+        norm_ratio_threshold_(norm_ratio_threshold),
+        edge_remover_(g, removal_handler),
+        topological_condition_(func::Or(AlternativesPresenceCondition<Graph>(g), TipCondition<Graph>(g))) {
+            DEBUG("Base profile " << PrintVector(base_profile_));
+        }
+private:
+    DECL_LOGGER("AggressiveClearing");
+};
+
+class SeriesAnalysis : public spades::AssemblyStage {
+
+    boost::optional<AbundanceVector> InferAbundance(const std::string& bin_mult_fn,
+                                                    const std::string& b_id) const {
+        path::CheckFileExistenceFATAL(bin_mult_fn);
+
+        ifstream is(bin_mult_fn);
+        vector<AbundanceVector> abundances;
+        while (true) {
+            string name;
+            is >> name;
+            if (!is.fail()) {
+                AbundanceVector vec(SampleCount(), 0.0);
+                for (size_t i = 0; i < SampleCount(); ++i) {
+                    is >> vec[i];
+                    VERIFY(!is.fail());
+                }
+                if (name == b_id) {
+                    abundances.push_back(vec);
+                }
+            } else {
+                INFO("Read " << abundances.size() << " profiles for bin " << b_id);
+                break;
+            }
+        }
+        return boost::optional<AbundanceVector>(MeanVector(abundances));
+    }
+
+    void PrintEdgeFragmentProfiles(const conj_graph_pack &gp, const ContigAbundanceCounter &abundance_counter, 
+                                   size_t split_length, size_t min_len, std::ostream &os) const {
+        for (auto it = gp.g.ConstEdgeBegin(true); !it.IsEnd(); ++it) {
+            EdgeId e = *it;
+            io::SingleRead full_contig(ToString(gp.g.int_id(e)), gp.g.EdgeNucls(e).str());
+            for (size_t i = 0; i < full_contig.size(); i += split_length) {
+                if (full_contig.size() - i < min_len) {
+                    DEBUG("Fragment shorter than min_length_bound " << min_len);
+                    break;
+                }
+
+                io::SingleRead contig = full_contig.Substr(i, std::min(i + split_length, full_contig.size()));
+
+                DEBUG("Processing fragment # " << (i / split_length) << " with id " << contig.name());
+
+                auto abundance_vec = abundance_counter(contig.GetSequenceString(), contig.name());
+
+                if (abundance_vec) {
+                    size_t len = contig.GetSequenceString().size();
+                    os << contig.name() << " " << len << " " << PrintVector(*abundance_vec) << std::endl;
+                    //copy(abundance_vec->begin(), abundance_vec->begin() + config.sample_cnt,
+                    //     ostream_iterator<Mpl>(ss, " "));
+                    DEBUG("Successfully estimated abundance of " << contig.name());
+                } else {
+                    DEBUG("Failed to estimate abundance of " << contig.name());
+                }
+            }
+        }
+    }
+
+public:
+    SeriesAnalysis() : AssemblyStage("Series Analysis", "series_analysis") { }
+
+    void load(conj_graph_pack &, const std::string &, const char *) { }
+
+    void save(const conj_graph_pack &, const std::string &, const char *) const { }
+
+    void run(conj_graph_pack &gp, const char *) {
+        std::string cfg = cfg::get().series_analysis;
+        INFO("Series analysis enabled with config " << cfg);
+
+        auto Buf = llvm::MemoryBuffer::getFile(cfg);
+        VERIFY_MSG(Buf, "Failed to load config file " + cfg);
+
+        llvm::yaml::Input yin(*Buf.get());
+        SeriesAnalysisConfig config;
+        yin >> config;
+
+        SetSampleCount(config.sample_cnt);
+
+        ContigAbundanceCounter abundance_counter(config.k, 
+                                                 SingleClusterAnalyzer(2., 0.4),
+                                                 cfg::get().tmp_dir);
+
+        DEBUG("Initiating abundance counter");
+        abundance_counter.Init(config.kmer_mult);
+        DEBUG("Abundance counter ready");
+
+        if (!config.edges_sqn.empty()) {
+            io::osequencestream oss(config.edges_sqn);
+            for (auto it = gp.g.ConstEdgeBegin(true); !it.IsEnd(); ++it) {
+                EdgeId e = *it;
+                string s = gp.g.EdgeNucls(e).str();
+                oss << io::SingleRead(io::MakeContigId(gp.g.int_id(e), s.size()), s);
+            }
+        }
+
+        if (!config.edges_mpl.empty()) {
+            ofstream os(config.edges_mpl);
+            PrintEdgeFragmentProfiles(gp, abundance_counter, -1ul, config.min_len, os);
+        }
+
+        if (!config.edge_fragments_mpl.empty()) {
+            ofstream os(config.edge_fragments_mpl);
+            PrintEdgeFragmentProfiles(gp, abundance_counter, config.frag_size, config.min_len, os);
+        }
+
+        boost::optional<AbundanceVector> bin_profile = InferAbundance(config.bin_prof, config.bin);
+        if (!bin_profile) {
+            ERROR("Couldn't estimate profile of bin");
+            return;
+        }
+
+        EdgeAbundance<Graph> edge_abundance(gp.g, abundance_counter);
+        edge_abundance.Fill();
+
+        gp.EnsureBasicMapping();
+        gp.FillQuality();
+        visualization::graph_labeler::DefaultLabeler<Graph> labeler(gp.g, gp.edge_pos);
+        auto colorer = DefaultGPColorer(gp);
+        path::make_dir(cfg::get().output_dir + "pictures/");
+        QualityEdgeLocalityPrintingRH<Graph> qual_removal_handler(gp.g, gp.edge_qual, labeler, colorer,
+                                       cfg::get().output_dir + "pictures/");
+
+        INFO("Launching aggressive graph clearing");
+        //positive quality edges removed (folder colored_edges_deleted)
+        AggressiveClearing<Graph> clearing(gp.g, edge_abundance,
+                                            *bin_profile, 0.8, 0.3, [&](EdgeId e) {
+                        qual_removal_handler.HandleDelete(e);});
+        clearing.Run();
+        INFO("Graph clearing finished");
+
+        INFO("Drawing edges with failed abundance estimate")
+        path::make_dir(cfg::get().output_dir + "pictures_no_ab/");
+        QualityEdgeLocalityPrintingRH<Graph> qual_removal_handler2(gp.g, gp.edge_qual, labeler, colorer,
+                                       cfg::get().output_dir + "pictures_no_ab/");
+
+        for (auto it = gp.g.ConstEdgeBegin(true); !it.IsEnd(); ++it) {
+            EdgeId e = *it;
+            if (edge_abundance.count(e) == 0) {
+                qual_removal_handler2.HandleDelete(e);
+            }
+        }
+    }
+};
+
+}
diff --git a/src/projects/truseq_analysis/AlignmentAnalyserNew.cpp b/src/projects/truseq_analysis/AlignmentAnalyserNew.cpp
index b0c4f8f..de95af6 100644
--- a/src/projects/truseq_analysis/AlignmentAnalyserNew.cpp
+++ b/src/projects/truseq_analysis/AlignmentAnalyserNew.cpp
@@ -9,8 +9,8 @@
 // Created by anton on 5/15/15.
 //
 
-#include "dev_support/standard_base.hpp"
-#include "algorithms/dijkstra/dijkstra_helper.hpp"
+#include "utils/standard_base.hpp"
+#include "assembly_graph/dijkstra/dijkstra_helper.hpp"
 #include "AlignmentAnalyserNew.hpp"
 
 namespace alignment_analysis {
diff --git a/src/projects/truseq_analysis/AlignmentAnalyserNew.hpp b/src/projects/truseq_analysis/AlignmentAnalyserNew.hpp
index 0ad6484..d0a65d3 100644
--- a/src/projects/truseq_analysis/AlignmentAnalyserNew.hpp
+++ b/src/projects/truseq_analysis/AlignmentAnalyserNew.hpp
@@ -7,7 +7,7 @@
 
 #pragma once
 
-#include "assembly_graph/graph_core/graph.hpp"
+#include "assembly_graph/core/graph.hpp"
 #include "assembly_graph/paths/mapping_path.hpp"
 #include "consistent_mapping.h"
 
diff --git a/src/projects/truseq_analysis/CMakeLists.txt b/src/projects/truseq_analysis/CMakeLists.txt
index 0b07475..3fa5aa1 100644
--- a/src/projects/truseq_analysis/CMakeLists.txt
+++ b/src/projects/truseq_analysis/CMakeLists.txt
@@ -11,5 +11,5 @@ add_executable(truseq_analysis
                main.cpp
                alignment_analyser.cpp AlignmentAnalyserNew.cpp consistent_mapping.cpp analysis_pipeline.cpp)
 
-target_link_libraries(truseq_analysis spades_modules ${COMMON_LIBRARIES})
+target_link_libraries(truseq_analysis common_modules ${COMMON_LIBRARIES})
 
diff --git a/src/projects/truseq_analysis/alignment_analyser.cpp b/src/projects/truseq_analysis/alignment_analyser.cpp
index 9f5c102..11e03ef 100644
--- a/src/projects/truseq_analysis/alignment_analyser.cpp
+++ b/src/projects/truseq_analysis/alignment_analyser.cpp
@@ -5,7 +5,7 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 #include "alignment_analyser.hpp"
 
 namespace alignment_analysis {
diff --git a/src/projects/truseq_analysis/alignment_analyser.hpp b/src/projects/truseq_analysis/alignment_analyser.hpp
index 2da4fde..7bca8d8 100644
--- a/src/projects/truseq_analysis/alignment_analyser.hpp
+++ b/src/projects/truseq_analysis/alignment_analyser.hpp
@@ -7,7 +7,7 @@
 
 #pragma once
 
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 #include "pipeline/graph_pack.hpp"
 #include "consistent_mapping.h"
 
@@ -18,7 +18,7 @@ namespace alignment_analysis {
         typedef debruijn_graph::conj_graph_pack::graph_t Graph;
         typedef Graph::EdgeId EdgeId;
         typedef Graph::VertexId VertexId;
-        typedef debruijn_graph::NewExtendedSequenceMapper<Graph, debruijn_graph::conj_graph_pack::index_t> Mapper;
+        typedef debruijn_graph::BasicSequenceMapper<Graph, debruijn_graph::conj_graph_pack::index_t> Mapper;
         stringstream log_;
         const Graph &graph_;
         const Mapper &mapper_;
diff --git a/src/projects/truseq_analysis/analysis_pipeline.cpp b/src/projects/truseq_analysis/analysis_pipeline.cpp
index 413e6cc..2b39f5f 100644
--- a/src/projects/truseq_analysis/analysis_pipeline.cpp
+++ b/src/projects/truseq_analysis/analysis_pipeline.cpp
@@ -9,10 +9,10 @@
 // Created by anton on 16.05.15.
 //
 
+#include "io/reads/file_reader.hpp"
 #include "stages/construction.hpp"
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 #include "analysis_pipeline.hpp"
-#include "modules/io/reads_io/file_reader.hpp"
 
 spades::VariationDetectionStage::VariationDetectionStage(string output_file, const Config &config) : AssemblyStage("VariationDetection", "variation_detection"),
                                                                                                      output_file_(output_file), config_(config) {
@@ -138,4 +138,4 @@ vector <alignment_analysis::ConsistentMapping> spades::VariationDetectionStage::
         }
     }
     return result;
-}
\ No newline at end of file
+}
diff --git a/src/projects/truseq_analysis/analysis_pipeline.hpp b/src/projects/truseq_analysis/analysis_pipeline.hpp
index a2d330f..4269650 100644
--- a/src/projects/truseq_analysis/analysis_pipeline.hpp
+++ b/src/projects/truseq_analysis/analysis_pipeline.hpp
@@ -7,7 +7,7 @@
 
 #pragma once
 
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 #include <pipeline/stage.hpp>
 #include "alignment_analyser.hpp"
 #include "AlignmentAnalyserNew.hpp"
diff --git a/src/projects/truseq_analysis/consistent_mapping.cpp b/src/projects/truseq_analysis/consistent_mapping.cpp
index 2e3cc63..449f9cf 100644
--- a/src/projects/truseq_analysis/consistent_mapping.cpp
+++ b/src/projects/truseq_analysis/consistent_mapping.cpp
@@ -5,7 +5,7 @@
 //* See file LICENSE for details.
 //***************************************************************************
 
-#include "dev_support/standard_base.hpp"
+#include "utils/standard_base.hpp"
 #include "AlignmentAnalyserNew.hpp"
 #include "consistent_mapping.h"
 
diff --git a/src/projects/truseq_analysis/main.cpp b/src/projects/truseq_analysis/main.cpp
index 3cd961b..1588396 100644
--- a/src/projects/truseq_analysis/main.cpp
+++ b/src/projects/truseq_analysis/main.cpp
@@ -8,10 +8,10 @@
 /*
  * TruSeq Analysis Main
  */
-#include "dev_support/logger/log_writers.hpp"
-#include "dev_support/segfault_handler.hpp"
-#include "dev_support/memory_limit.hpp"
-#include "dev_support/copy_file.hpp"
+#include "utils/logger/log_writers.hpp"
+#include "utils/segfault_handler.hpp"
+#include "utils/memory_limit.hpp"
+#include "utils/copy_file.hpp"
 #include "pipeline/config_struct.hpp"
 #include "analysis_pipeline.hpp"
 
diff --git a/src/spades_pipeline/corrector_logic.py b/src/spades_pipeline/corrector_logic.py
index 158d6fb..7459c5f 100644
--- a/src/spades_pipeline/corrector_logic.py
+++ b/src/spades_pipeline/corrector_logic.py
@@ -12,6 +12,7 @@ import os
 import sys
 import shutil
 import support
+import process_cfg
 from site import addsitedir
 from distutils import dir_util
 
@@ -26,7 +27,7 @@ def prepare_config_corr(filename, cfg, ext_python_modules_home):
     data = pyyaml.load(open(filename, 'r'))
     data["dataset"] = cfg.dataset
     data["output_dir"] = cfg.output_dir
-    data["work_dir"] = os.path.join(cfg.output_dir, 'tmp')
+    data["work_dir"] = process_cfg.process_spaces(cfg.tmp_dir)
     #data["hard_memory_limit"] = cfg.max_memory
     data["max_nthreads"] = cfg.max_threads
     data["bwa"] = cfg.bwa
@@ -66,6 +67,9 @@ def run_corrector(configs_dir, execution_home, cfg,
     support.sys_call(command, log)
     if not os.path.isfile(result):
         support.error("Mismatch correction finished abnormally: " + result + " not found!")
+    if os.path.isdir(cfg.tmp_dir):
+        shutil.rmtree(cfg.tmp_dir)
+
 
 
 
diff --git a/src/spades_pipeline/hammer_logic.py b/src/spades_pipeline/hammer_logic.py
index 1e2b035..1d971b8 100644
--- a/src/spades_pipeline/hammer_logic.py
+++ b/src/spades_pipeline/hammer_logic.py
@@ -75,6 +75,8 @@ def prepare_config_bh(filename, cfg, log):
         subst_dict["input_qvoffset"] = cfg.qvoffset
     if "count_filter_singletons" in cfg.__dict__:
         subst_dict["count_filter_singletons"] = cfg.count_filter_singletons
+    if "read_buffer_size" in cfg.__dict__:
+        subst_dict["count_split_buffer"] = cfg.read_buffer_size
     process_cfg.substitute_params(filename, subst_dict, log)
 
 
diff --git a/src/spades_pipeline/options_storage.py b/src/spades_pipeline/options_storage.py
index 92e6580..1919e5a 100644
--- a/src/spades_pipeline/options_storage.py
+++ b/src/spades_pipeline/options_storage.py
@@ -27,12 +27,13 @@ MAX_LIBS_NUMBER = 9
 OLD_STYLE_READS_OPTIONS = ["--12", "-1", "-2", "-s"]
 SHORT_READS_TYPES = {"pe": "paired-end", "s": "single", "mp": "mate-pairs", "hqmp": "hq-mate-pairs", "nxmate": "nxmate"}
 # other libs types:
-LONG_READS_TYPES = ["pacbio", "sanger", "nanopore", "trusted-contigs", "untrusted-contigs"]
+LONG_READS_TYPES = ["pacbio", "sanger", "nanopore", "tslr", "trusted-contigs", "untrusted-contigs"]
 
 # final contigs and scaffolds names
 contigs_name = "contigs.fasta"
 scaffolds_name = "scaffolds.fasta"
 assembly_graph_name = "assembly_graph.fastg"
+assembly_graph_name_gfa = "assembly_graph.gfa"
 contigs_paths = "contigs.paths"
 scaffolds_paths = "scaffolds.paths"
 transcripts_name = "transcripts.fasta"
@@ -88,6 +89,7 @@ cov_cutoff = 'off'  # default is 'off'
 # hidden options
 mismatch_corrector = None
 reference = None
+series_analysis = None
 configs_dir = None
 iterations = None
 bh_heap_check = None
@@ -129,7 +131,7 @@ dict_of_rel2abs = dict()
 long_options = "12= threads= memory= tmp-dir= iterations= phred-offset= sc iontorrent meta large-genome rna plasmid "\
                "only-error-correction only-assembler "\
                "disable-gzip-output disable-gzip-output:false disable-rr disable-rr:false " \
-               "help version test debug debug:false reference= config-file= dataset= "\
+               "help version test debug debug:false reference= series-analysis= config-file= dataset= "\
                "bh-heap-check= spades-heap-check= read-buffer-size= help-hidden "\
                "mismatch-correction mismatch-correction:false careful careful:false "\
                "continue restart-from= diploid truseq cov-cutoff= configs-dir= stop-after=".split()
@@ -245,6 +247,7 @@ def usage(spades_version, show_hidden=False, mode=None):
         sys.stderr.write("--sanger\t<filename>\tfile with Sanger reads\n")
         sys.stderr.write("--pacbio\t<filename>\tfile with PacBio reads\n")
         sys.stderr.write("--nanopore\t<filename>\tfile with Nanopore reads\n")
+    sys.stderr.write("--tslr\t<filename>\tfile with TSLR-contigs\n")
     sys.stderr.write("--trusted-contigs\t<filename>\tfile with trusted contigs\n")
     sys.stderr.write("--untrusted-contigs\t<filename>\tfile with untrusted contigs\n")
     if mode == "dip":
@@ -310,6 +313,7 @@ def usage(spades_version, show_hidden=False, mode=None):
                              " of mismatches and short indels" + "\n")
         sys.stderr.write("--reference\t<filename>\tfile with reference for deep analysis"\
                              " (only in debug mode)" + "\n")
+        sys.stderr.write("--series-analysis\t<filename>\tconfig for metagenomics-series-augmented reassembly" + "\n")
         sys.stderr.write("--configs-dir\t<configs_dir>\tdirectory with configs" + "\n")
         sys.stderr.write("-i/--iterations\t<int>\t\tnumber of iterations for read error"\
                              " correction [default: %s]\n" % ITERATIONS)
@@ -334,8 +338,8 @@ def usage(spades_version, show_hidden=False, mode=None):
 
 
 def auto_K_allowed():
-    return not k_mers and not single_cell and not iontorrent and not meta 
-    # kmers were set by default, not SC, and not IonTorrent data, and not metagenomic
+    return not k_mers and not single_cell and not iontorrent and not rna and not meta
+    # kmers were set by default, not SC, not IonTorrent data and not rna and temporary not meta
 
 
 def set_default_values():
@@ -501,3 +505,10 @@ def enable_truseq_mode():
     correct_scaffolds = True
     run_truseq_postprocessing = True
     only_assembler = True
+
+
+def will_rerun(options):
+    for opt, arg in options:
+        if opt == '--continue' or opt.startswith('--restart-from'):  # checks both --restart-from k33 and --restart-from=k33
+            return True
+    return False
diff --git a/src/spades_pipeline/spades_logic.py b/src/spades_pipeline/spades_logic.py
index 1aafd6b..8b47c0d 100644
--- a/src/spades_pipeline/spades_logic.py
+++ b/src/spades_pipeline/spades_logic.py
@@ -63,6 +63,8 @@ def prepare_config_spades(filename, cfg, log, additional_contigs_fname, K, stage
     if "bwa_paired" in cfg.__dict__:
         subst_dict["bwa_enable"] = bool_to_str(True)
     subst_dict["path_to_bwa"] =  os.path.join(execution_home, "bwa-spades")
+    if "series_analysis" in cfg.__dict__:
+        subst_dict["series_analysis"] = cfg.series_analysis
     process_cfg.substitute_params(filename, subst_dict, log)
 
 
@@ -120,15 +122,17 @@ def reveal_original_k_mers(RL):
 def add_configs(command, configs_dir):
     #Order matters here!
     mode_config_mapping = [("single_cell", "mda_mode"), 
-                           ("meta", "meta_mode"), 
+                           ("meta", "meta_mode"),
                            ("truseq_mode", "moleculo_mode"),
                            ("rna", "rna_mode"),
+                           ("large_genome", "large_genome_mode"),
                            ("plasmid", "plasmid_mode"),
                            ("careful", "careful_mode"),
                            ("diploid_mode", "diploid_mode")]
-
     for (mode, config) in mode_config_mapping:
         if options_storage.__dict__[mode]:
+            if mode == "rna" or mode == "meta":
+                command.append(os.path.join(configs_dir, "mda_mode.info"))
             command.append(os.path.join(configs_dir, config + ".info"))
     
 
@@ -143,7 +147,8 @@ def run_iteration(configs_dir, execution_home, cfg, log, K, prev_K, last_one):
             (options_storage.restart_from == ("k%d" % K) or options_storage.restart_from.startswith("k%d:" % K))):
             log.info("\n== Skipping assembler: " + ("K%d" % K) + " (already processed)")
             return
-        if options_storage.restart_from and options_storage.restart_from.find(":") != -1:
+        if options_storage.restart_from and options_storage.restart_from.find(":") != -1 \
+                and options_storage.restart_from.startswith("k%d:" % K):
             stage = options_storage.restart_from[options_storage.restart_from.find(":") + 1:]
         support.continue_from_here(log)
 
@@ -237,7 +242,7 @@ def run_spades(configs_dir, execution_home, cfg, dataset_data, ext_python_module
     used_K = []
 
     # checking and removing conflicting K-mer directories
-    if options_storage.restart_from:
+    if options_storage.restart_from and (options_storage.restart_k_mers != options_storage.original_k_mers):
         processed_K = []
         for k in range(options_storage.MIN_K, options_storage.MAX_K, 2):
             cur_K_dir = os.path.join(cfg.output_dir, "K%d" % k)
@@ -362,6 +367,9 @@ def run_spades(configs_dir, execution_home, cfg, dataset_data, ext_python_module
                 if os.path.isfile(os.path.join(latest, "scaffolds.paths")):
                     if not os.path.isfile(cfg.result_scaffolds_paths) or not options_storage.continue_mode:
                         shutil.copyfile(os.path.join(latest, "scaffolds.paths"), cfg.result_scaffolds_paths)
+            if os.path.isfile(os.path.join(latest, "assembly_graph.gfa")):
+                if not os.path.isfile(cfg.result_graph_gfa) or not options_storage.continue_mode:
+                    shutil.copyfile(os.path.join(latest, "assembly_graph.gfa"), cfg.result_graph_gfa)
             if os.path.isfile(os.path.join(latest, "assembly_graph.fastg")):
                 if not os.path.isfile(cfg.result_graph) or not options_storage.continue_mode:
                     shutil.copyfile(os.path.join(latest, "assembly_graph.fastg"), cfg.result_graph)
diff --git a/src/spades_pipeline/support.py b/src/spades_pipeline/support.py
index 2df2199..7fc8d15 100644
--- a/src/spades_pipeline/support.py
+++ b/src/spades_pipeline/support.py
@@ -80,6 +80,7 @@ def check_binaries(binary_dir, log):
 
 def check_file_existence(input_filename, message="", log=None, dipspades=False):
     filename = abspath(expanduser(input_filename))
+    check_path_is_ascii(filename, message)
     if not os.path.isfile(filename):
         error("file not found: %s (%s)" % (filename, message), log=log, dipspades=dipspades)
     options_storage.dict_of_rel2abs[input_filename] = filename
@@ -88,17 +89,25 @@ def check_file_existence(input_filename, message="", log=None, dipspades=False):
 
 def check_dir_existence(input_dirname, message="", log=None, dipspades=False):
     dirname = abspath(expanduser(input_dirname))
+    check_path_is_ascii(dirname, message)
     if not os.path.isdir(dirname):
         error("directory not found: %s (%s)" % (dirname, message), log=log, dipspades=dipspades)
     options_storage.dict_of_rel2abs[input_dirname] = dirname
     return dirname
 
+
+def check_path_is_ascii(path, message=""):
+    if not is_ascii_string(path):
+        error("path contains non-ASCII characters: %s (%s)" % (path, message))
+
+
 def ensure_dir_existence(dirname):
     if os.path.isfile(dirname):
         os.remove(dirname)
     if not os.path.exists(dirname):
         os.makedirs(dirname)
 
+
 def recreate_dir(dirname):
     if os.path.exists(dirname):
         shutil.rmtree(dirname)
@@ -172,6 +181,18 @@ def get_available_memory():
     return None
 
 
+# based on http://stackoverflow.com/questions/196345/how-to-check-if-a-string-in-python-is-in-ascii
+def is_ascii_string(line):
+    try:
+        line.encode('ascii')
+    except UnicodeDecodeError:  # python2
+        return False
+    except UnicodeEncodeError:  # python3
+        return False
+    else:
+        return True
+
+
 def process_readline(line, is_python3=sys.version.startswith('3.')):
     if is_python3:
         return str(line, 'utf-8').rstrip()
diff --git a/test_dataset_plasmid/pl1.fq.gz b/test_dataset_plasmid/pl1.fq.gz
new file mode 100644
index 0000000..c938262
Binary files /dev/null and b/test_dataset_plasmid/pl1.fq.gz differ
diff --git a/test_dataset_plasmid/pl2.fq.gz b/test_dataset_plasmid/pl2.fq.gz
new file mode 100644
index 0000000..f24a455
Binary files /dev/null and b/test_dataset_plasmid/pl2.fq.gz differ

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/spades.git



More information about the debian-med-commit mailing list