[med-svn] [stacks] 01/05: Imported Upstream version 1.40

Andreas Tille tille at debian.org
Thu May 19 11:34:01 UTC 2016


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository stacks.

commit 709db0da7ed0a7253b2451bcf5820847f9bc24ad
Author: Andreas Tille <tille at debian.org>
Date:   Thu May 19 12:52:36 2016 +0200

    Imported Upstream version 1.40
---
 ChangeLog                       |  16 ++
 Makefile.am                     |   3 +-
 Makefile.in                     |   3 +-
 configure                       |  20 +-
 configure.ac                    |   2 +-
 scripts/denovo_map.pl           |  14 +-
 scripts/integrate_alignments.py | 426 ++++++++++++++++++++++++++++++++++++++++
 scripts/ref_map.pl              |   8 +-
 src/aln_utils.cc                | 258 ++++++++++++++++++++++++
 src/aln_utils.h                 |   5 +
 src/cstacks.cc                  |  59 +++---
 src/locus.cc                    |  13 ++
 src/locus.h                     |   1 +
 src/rxstacks.cc                 | 104 +++++++---
 src/rxstacks.h                  |   1 +
 src/sql_utilities.h             |  57 +++---
 src/sstacks.cc                  |   5 +-
 src/write.cc                    |  16 +-
 18 files changed, 898 insertions(+), 113 deletions(-)

diff --git a/ChangeLog b/ChangeLog
index 2614b51..6642845 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,19 @@
+Stacks 1.40 - May 04, 2016
+--------------------------
+	Feature: Changed process_radtags and process_shortreads to print FASTQ/FASTA headers using
+	"/1" and "/2" to represent the read number, instead of "_1" and "_2".
+	Bugfix: fixed a regression where allele depths were not being loaded due to the use of the new
+	*.models.tsv file. This file lacks the raw reads and therefore we can't count the raw stack depth
+	when running sstacks.
+	Bugfix: cstacks was calling errant SNPs in loci with a sample containing one gapped locus and
+	one ungapped locus matching the same catalog locus.
+
+Stacks 1.39 - April 23, 2016
+----------------------------
+	Bugfix: rxstacks was not adjusting reads/SNPs to account for alignment gaps. There was also an
+	bug in reading the input files.
+	Bugfix: denovo_map.pl and ref_map.pl were not processing parents/progeny properly.
+
 Stacks 1.38 - April 18, 2016
 ----------------------------
 	Feature: denovo_map.pl and ref_map.pl now print depth of coverage for each sample. The ustacks
diff --git a/Makefile.am b/Makefile.am
index 00c7114..96e3b88 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -141,7 +141,8 @@ clone_filter_LDADD    = $(BAM_LIBS)
 
 dist_bin_SCRIPTS = scripts/denovo_map.pl scripts/ref_map.pl scripts/export_sql.pl \
 	scripts/sort_read_pairs.pl scripts/exec_velvet.pl scripts/load_sequences.pl \
-	scripts/index_radtags.pl scripts/load_radtags.pl scripts/stacks_export_notify.pl
+	scripts/index_radtags.pl scripts/load_radtags.pl scripts/stacks_export_notify.pl \
+	scripts/integrate_alignments.py
 
 dist_noinst_SCRIPTS = autogen.sh scripts/extract_interpop_chars.pl scripts/convert_stacks.pl
 
diff --git a/Makefile.in b/Makefile.in
index a3958c0..c7e396b 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -806,7 +806,8 @@ clone_filter_LDFLAGS = $(OPENMP_CFLAGS)
 clone_filter_LDADD = $(BAM_LIBS)
 dist_bin_SCRIPTS = scripts/denovo_map.pl scripts/ref_map.pl scripts/export_sql.pl \
 	scripts/sort_read_pairs.pl scripts/exec_velvet.pl scripts/load_sequences.pl \
-	scripts/index_radtags.pl scripts/load_radtags.pl scripts/stacks_export_notify.pl
+	scripts/index_radtags.pl scripts/load_radtags.pl scripts/stacks_export_notify.pl \
+	scripts/integrate_alignments.py
 
 dist_noinst_SCRIPTS = autogen.sh scripts/extract_interpop_chars.pl scripts/convert_stacks.pl
 nobase_pkgdata_DATA = sql/mysql.cnf.dist sql/catalog_index.sql sql/stacks.sql sql/tag_index.sql sql/chr_index.sql \
diff --git a/configure b/configure
index bed374a..02e451f 100755
--- a/configure
+++ b/configure
@@ -1,6 +1,6 @@
 #! /bin/sh
 # Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.69 for Stacks 1.38.
+# Generated by GNU Autoconf 2.69 for Stacks 1.40.
 #
 #
 # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@@ -577,8 +577,8 @@ MAKEFLAGS=
 # Identity of this package.
 PACKAGE_NAME='Stacks'
 PACKAGE_TARNAME='stacks'
-PACKAGE_VERSION='1.38'
-PACKAGE_STRING='Stacks 1.38'
+PACKAGE_VERSION='1.40'
+PACKAGE_STRING='Stacks 1.40'
 PACKAGE_BUGREPORT=''
 PACKAGE_URL=''
 
@@ -1283,7 +1283,7 @@ if test "$ac_init_help" = "long"; then
   # Omit some internal or obsolete options to make the list less imposing.
   # This message is too long to be a string in the A/UX 3.1 sh.
   cat <<_ACEOF
-\`configure' configures Stacks 1.38 to adapt to many kinds of systems.
+\`configure' configures Stacks 1.40 to adapt to many kinds of systems.
 
 Usage: $0 [OPTION]... [VAR=VALUE]...
 
@@ -1349,7 +1349,7 @@ fi
 
 if test -n "$ac_init_help"; then
   case $ac_init_help in
-     short | recursive ) echo "Configuration of Stacks 1.38:";;
+     short | recursive ) echo "Configuration of Stacks 1.40:";;
    esac
   cat <<\_ACEOF
 
@@ -1456,7 +1456,7 @@ fi
 test -n "$ac_init_help" && exit $ac_status
 if $ac_init_version; then
   cat <<\_ACEOF
-Stacks configure 1.38
+Stacks configure 1.40
 generated by GNU Autoconf 2.69
 
 Copyright (C) 2012 Free Software Foundation, Inc.
@@ -1913,7 +1913,7 @@ cat >config.log <<_ACEOF
 This file contains any messages produced by compilers while
 running configure, to aid debugging if configure makes a mistake.
 
-It was created by Stacks $as_me 1.38, which was
+It was created by Stacks $as_me 1.40, which was
 generated by GNU Autoconf 2.69.  Invocation command line was
 
   $ $0 $@
@@ -2776,7 +2776,7 @@ fi
 
 # Define the identity of the package.
  PACKAGE='stacks'
- VERSION='1.38'
+ VERSION='1.40'
 
 
 cat >>confdefs.h <<_ACEOF
@@ -6225,7 +6225,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
 # report actual input values of CONFIG_FILES etc. instead of their
 # values after options handling.
 ac_log="
-This file was extended by Stacks $as_me 1.38, which was
+This file was extended by Stacks $as_me 1.40, which was
 generated by GNU Autoconf 2.69.  Invocation command line was
 
   CONFIG_FILES    = $CONFIG_FILES
@@ -6291,7 +6291,7 @@ _ACEOF
 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
 ac_cs_version="\\
-Stacks config.status 1.38
+Stacks config.status 1.40
 configured by $0, generated by GNU Autoconf 2.69,
   with options \\"\$ac_cs_config\\"
 
diff --git a/configure.ac b/configure.ac
index 71b3f52..dac7cdd 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2,7 +2,7 @@
 # Process this file with autoconf to produce a configure script.
 
 AC_PREREQ(2.59)
-AC_INIT([Stacks], [1.38])
+AC_INIT([Stacks], [1.40])
 AC_CONFIG_AUX_DIR([config])
 AM_INIT_AUTOMAKE([-Wall -Werror foreign parallel-tests subdir-objects])
 AC_CONFIG_SRCDIR([src/ustacks.cc])
diff --git a/scripts/denovo_map.pl b/scripts/denovo_map.pl
index c5ddaaf..cd5f0c1 100755
--- a/scripts/denovo_map.pl
+++ b/scripts/denovo_map.pl
@@ -299,7 +299,7 @@ sub initialize_samples {
 
     my ($local_gzip, $file, $prefix, $suffix, $path, $found, $i);
 
-    if (length(scalar(@{$sample_list})) > 0 && scalar(@{$samples}) == 0) {
+    if (scalar(@{$sample_list}) > 0 && scalar(@{$samples}) == 0) {
         my @suffixes = ("fq",    "fastq", "fq.gz",   "fastq.gz", "fa",    "fasta", "fa.gz",   "fasta.gz");
         my @fmts     = ("fastq", "fastq", "gzfastq", "gzfastq",  "fasta", "fasta", "gzfasta", "gzfasta");
 
@@ -331,10 +331,10 @@ sub initialize_samples {
                 die("Unable to find sample '$sample' in directory '$sample_path' as specified in the population map, '$popmap_path'.\n");
             }
         }
-        
+
     } else {
         #
-        # Process any samples were specified on the command line.
+        # Process any samples that were specified on the command line.
         #
         foreach $sample (@{$parents}, @{$progeny}, @{$samples}) {
             $local_gzip = false;
@@ -346,7 +346,7 @@ sub initialize_samples {
                 $local_gzip = true;
                 ($prefix, $suffix) = ($prefix =~ /^(.+)\.(.+)$/);
             }
-            
+
             $sample->{'suffix'}  = $suffix;
             $sample->{'suffix'} .= ".gz" if ($local_gzip == true);
 
@@ -385,13 +385,13 @@ sub initialize_samples {
             }
         }
 
-        foreach $sample (@parents) {
+        foreach $sample (@{$parents}) {
             $sample->{'type'} = "parent";
         }
-        foreach $sample (@progeny) {
+        foreach $sample (@{$progeny}) {
             $sample->{'type'} = "progeny";
         }
-        foreach $sample (@samples) {
+        foreach $sample (@{$samples}) {
             $sample->{'type'} = "sample";
         }
     }
diff --git a/scripts/integrate_alignments.py b/scripts/integrate_alignments.py
new file mode 100755
index 0000000..f8a88ee
--- /dev/null
+++ b/scripts/integrate_alignments.py
@@ -0,0 +1,426 @@
+#!/usr/bin/env python
+
+import optparse
+import sys
+import os
+import gzip
+import re
+
+#
+# Global configuration variables.
+#
+path     = ""
+aln_path = ""
+out_path = ""
+batch_id = -1
+
+def parse_command_line():
+    global out_path
+    global aln_path
+    global path
+    global batch_id
+
+    p = optparse.OptionParser()
+
+    #
+    # Add options.
+    #
+    p.add_option("-o", action="store", dest="out_path",
+                 help="write modified Stacks files to this output path.")
+    p.add_option("-a", action="store", dest="aln_path",
+                 help="SAM file containing catalog locus alignments.")
+    p.add_option("-p", action="store", dest="path",
+                 help="path to Stacks directory.")
+    p.add_option("-b", action="store", dest="batch_id",
+                 help="Stacks batch ID.")
+
+    #
+    # Parse the command line
+    #
+    (opts, args) = p.parse_args()
+
+    if opts.out_path != None:
+        out_path = opts.out_path
+    if opts.aln_path != None:
+        aln_path = opts.aln_path
+    if opts.path != None:
+        path = opts.path
+    if opts.batch_id != None:
+        batch_id = int(opts.batch_id)
+
+    if len(out_path) == 0 or os.path.exists(out_path) == False:
+        print >> sys.stderr, "You must specify a valid path to write files to."
+        p.print_help()
+        sys.exit()
+
+    if out_path.endswith("/") == False:
+        out_path += "/"
+
+    if len(aln_path) == 0 or os.path.exists(aln_path) == False:
+        print >> sys.stderr, "You must specify a valid path to a SAM file containing catalog locus alignments."
+        p.print_help()
+        sys.exit()
+
+    if len(path) == 0 or os.path.exists(path) == False:
+        print >> sys.stderr, "You must specify a valid path to Stacks input files."
+        p.print_help()
+        sys.exit()
+
+    if batch_id < 0:
+        pritn >> sys.stderr, "You must specify the batch ID that was supplied to Stacks."
+        p.print_help()
+        sys.exit()
+
+    if path.endswith("/") == False:
+        path += "/"
+        
+        
+def find_stacks_files(path, files):
+    try:
+        entries = os.listdir(path)
+
+        for entry in entries:
+            pos = entry.find(".matches.tsv.gz")
+            if (pos == -1):
+                pos = entry.find(".matches.tsv")
+            if (pos != -1):
+                files.append(entry[0:pos])
+        print >> sys.stderr, "Found", len(files), "Stacks samples."
+
+    except:
+        print >> sys.stderr, "Unable to read files from Stacks directory, '", path, "'"
+
+
+def parse_catalog_alignments(aln_path, alns):
+    fh = open(aln_path, "r")
+
+    for line in fh:
+	line = line.strip("\n")
+
+	if len(line) == 0 or line[0] == "#" or line[0] == "@":
+            continue
+
+	parts = line.split("\t")
+        locus = int(parts[0])
+        chr   = parts[2]
+        bp    = int(parts[3])
+        flag  = int(parts[1])
+
+        #
+        # Check which strand the read is aligned to.
+        #
+        if flag & 16 > 0:
+            alns[locus] = (chr, bp, "-");
+        else:
+            alns[locus] = (chr, bp, "+");
+
+    fh.close()
+    print >> sys.stderr, "Loaded", len(alns), "catalog locus alignments from '", aln_path, "'."
+
+
+def convert_sample(path, file, out_path, alns):
+    matches = {}
+    #
+    # Open the matches file and load the matches to the catalog.
+    #
+    p = path + file + ".matches.tsv.gz"
+    if os.path.exists(p):
+        gzipped = True;
+        fh = gzip.open(p, 'rb')
+    else:
+        gzipped = False;
+        fh = open(path + file + ".matches.tsv", "r")
+
+    for line in fh:
+	line = line.strip("\n")
+
+	if len(line) == 0 or line[0] == "#":
+            continue
+
+	parts = line.split("\t")
+
+        cat_locus    = int(parts[2])
+        sample_locus = int(parts[4])
+        matches[sample_locus] = cat_locus
+
+    fh.close()
+
+    #
+    # Open the tags file and rewrite it with the alignment coordinates.
+    #
+    if gzipped:
+        fh = gzip.open(path + file + ".tags.tsv.gz", "rb")
+    else:
+        fh = open(path + file + ".tags.tsv", "r")
+
+    out_fh = open(out_path + file + ".tags.tsv", "w")
+
+    alns_written = {}
+
+    for line in fh:
+        if line[0] == "#":
+            out_fh.write(line)
+            continue
+
+	if len(line) == 0:
+            continue
+
+	parts        = line.split("\t")
+        sample_locus = int(parts[2])
+        read_type    = parts[6]
+
+        if read_type == "consensus":
+            if sample_locus not in matches:
+                continue;
+            cat_locus = matches[sample_locus]
+            if cat_locus not in alns:
+                continue;
+
+            (chr, bp, strand) = alns[cat_locus]
+
+            if sample_locus in alns_written:
+                alns_written[sample_locus] += 1
+            else:
+                alns_written[sample_locus]  = 1;
+                
+            buf = "\t".join(parts[0:3]) + "\t" + chr + "\t" + str(bp) + "\t" + strand + "\t" + "\t".join(parts[6:])
+            out_fh.write(buf)
+
+        elif sample_locus in alns_written:
+            out_fh.write(line)
+
+    fh.close()
+    out_fh.close()
+
+    #
+    # Open the SNPs, Alleles, and Matches files and rewrite those that had alignment coordinates.
+    #
+    if gzipped:
+        fh = gzip.open(path + file + ".snps.tsv.gz", "rb")
+    else:
+        fh = open(path + file + ".snps.tsv", "r")
+
+    out_fh = open(out_path + file + ".snps.tsv", "w")
+
+    for line in fh:
+        if line[0] == "#":
+            out_fh.write(line)
+            continue
+
+	if len(line) == 0:
+            continue
+
+	parts        = line.split("\t")
+        sample_locus = int(parts[2])
+
+        if sample_locus in alns_written:
+            out_fh.write(line)
+
+    fh.close()
+    out_fh.close()
+
+    if gzipped:
+        fh = gzip.open(path + file + ".alleles.tsv.gz", "rb")
+    else:
+        fh = open(path + file + ".alleles.tsv", "r")
+
+    out_fh = open(out_path + file + ".alleles.tsv", "w")
+
+    for line in fh:
+        if line[0] == "#":
+            out_fh.write(line)
+            continue
+
+	if len(line) == 0:
+            continue
+
+	parts        = line.split("\t")
+        sample_locus = int(parts[2])
+
+        if sample_locus in alns_written:
+            out_fh.write(line)
+
+    fh.close()
+    out_fh.close()
+
+    if gzipped:
+        fh = gzip.open(path + file + ".matches.tsv.gz", "rb")
+    else:
+        fh = open(path + file + ".matches.tsv", "r")
+
+    out_fh = open(out_path + file + ".matches.tsv", "w")
+
+    for line in fh:
+        if line[0] == "#":
+            out_fh.write(line)
+            continue
+
+	if len(line) == 0:
+            continue
+
+	parts        = line.split("\t")
+        sample_locus = int(parts[2])
+
+        if sample_locus in alns_written:
+            out_fh.write(line)
+
+    fh.close()
+    out_fh.close()
+
+    #
+    # If it exists, open the model file and rewrite it with the alignment coordinates.
+    #
+    if gzipped:
+        if os.path.exists(path + file + ".models.tsv.gz") == False:
+            return len(alns_written)
+    elif os.path.exists(path + file + ".models.tsv") == False:
+        return len(alns_written)
+
+    if gzipped:
+        fh = gzip.open(path + file + ".models.tsv.gz", "rb")
+    else:
+        fh = open(path + file + ".models.tsv", "r")
+
+    out_fh = open(out_path + file + ".models.tsv", "w")
+
+    for line in fh:
+        if line[0] == "#":
+            out_fh.write(line)
+            continue
+
+	if len(line) == 0:
+            continue
+
+	parts        = line.split("\t")
+        sample_locus = int(parts[2])
+        read_type    = parts[6]
+
+        if sample_locus in alns_written:
+            if read_type == "consensus":
+                (chr, bp, strand) = alns[cat_locus]
+                buf = "\t".join(parts[0:3]) + "\t" + chr + "\t" + str(bp) + "\t" + strand + "\t" + "\t".join(parts[6:])
+                out_fh.write(buf)
+            else:
+                out_fh.write(line)
+
+    fh.close()
+    out_fh.close()
+
+    return len(alns_written)
+
+
+def convert_catalog(path, batch_id, out_path, alns):
+    #
+    # Open the tags file and rewrite it with the alignment coordinates.
+    #
+    p = path + "batch_" + str(batch_id) + ".catalog.tags.tsv.gz"
+    if os.path.exists(p):
+        gzipped = True;
+        fh = gzip.open(path + "batch_" + str(batch_id) + ".catalog.tags.tsv.gz", "rb")
+    else:
+        gzipped = False;
+        fh = open(path + "batch_" + str(batch_id) + ".catalog.tags.tsv", "r")
+
+    out_fh = open(out_path + "batch_" + str(batch_id) + ".catalog.tags.tsv", "w")
+
+    alns_written = {}
+
+    for line in fh:
+        if line[0] == "#":
+            out_fh.write(line)
+            continue
+
+	if len(line) == 0:
+            continue
+
+	parts     = line.split("\t")
+        cat_locus = int(parts[2])
+
+        if cat_locus not in alns:
+            continue;
+
+        (chr, bp, strand) = alns[cat_locus]
+
+        if cat_locus in alns_written:
+            alns_written[cat_locus] += 1
+        else:
+            alns_written[cat_locus]  = 1;
+               
+        buf = "\t".join(parts[0:3]) + "\t" + chr + "\t" + str(bp) + "\t" + strand + "\t" + "\t".join(parts[6:])
+        out_fh.write(buf)
+
+    fh.close()
+    out_fh.close()
+
+    if gzipped:
+        fh = gzip.open(path + "batch_" + str(batch_id) + ".catalog.snps.tsv.gz", "rb")
+    else:
+        fh = open(path + "batch_" + str(batch_id) + ".catalog.snps.tsv", "r")
+
+    out_fh = open(out_path + "batch_" + str(batch_id) + ".catalog.snps.tsv", "w")
+
+    for line in fh:
+        if line[0] == "#":
+            out_fh.write(line)
+            continue
+
+	if len(line) == 0:
+            continue
+
+	parts     = line.split("\t")
+        cat_locus = int(parts[2])
+
+        if cat_locus in alns_written:
+            out_fh.write(line)
+
+    fh.close()
+    out_fh.close()
+
+    if gzipped:
+        fh = gzip.open(path + "batch_" + str(batch_id) + ".catalog.alleles.tsv.gz", "rb")
+    else:
+        fh = open(path + "batch_" + str(batch_id) + ".catalog.alleles.tsv", "r")
+
+    out_fh = open(out_path + "batch_" + str(batch_id) + ".catalog.alleles.tsv", "w")
+
+    for line in fh:
+        if line[0] == "#":
+            out_fh.write(line)
+            continue
+
+	if len(line) == 0:
+            continue
+
+	parts     = line.split("\t")
+        cat_locus = int(parts[2])
+
+        if cat_locus in alns_written:
+            out_fh.write(line)
+
+    fh.close()
+    out_fh.close()
+
+    return len(alns_written)
+
+
+parse_command_line()
+
+files = []
+alns  = {}
+
+find_stacks_files(path, files)
+parse_catalog_alignments(aln_path, alns)
+
+i  = 1
+for file in files:
+    print >> sys.stderr, "Processing file", str(i), "of", len(files), "['" +  file + "']"
+    cnt = convert_sample(path, file, out_path, alns)
+    print >> sys.stderr, "  Added alignments for", cnt, "loci."
+    i += 1
+
+#
+# Now process the catalog.
+#
+print >> sys.stderr, "Processing the catalog"
+cnt = convert_catalog(path, batch_id, out_path, alns)
+print >> sys.stderr, "  Added alignments for", cnt, "catalog loci."
diff --git a/scripts/ref_map.pl b/scripts/ref_map.pl
index d6ed872..18ec64b 100755
--- a/scripts/ref_map.pl
+++ b/scripts/ref_map.pl
@@ -292,7 +292,7 @@ sub initialize_samples {
 
     my ($local_gzip, $file, $prefix, $suffix, $path, $found, $i);
 
-    if (length(scalar(@{$sample_list})) > 0 && scalar(@{$samples}) == 0) {
+    if (scalar(@{$sample_list}) > 0 && scalar(@{$samples}) == 0) {
         my @suffixes = ("sam", "bam", "map", "bowtie");
         my @fmts     = ("sam", "bam", "map", "bowtie");
 
@@ -365,13 +365,13 @@ sub initialize_samples {
             }
         }
 
-        foreach $sample (@parents) {
+        foreach $sample (@{$parents}) {
             $sample->{'type'} = "parent";
         }
-        foreach $sample (@progeny) {
+        foreach $sample (@{$progeny}) {
             $sample->{'type'} = "progeny";
         }
-        foreach $sample (@samples) {
+        foreach $sample (@{$samples}) {
             $sample->{'type'} = "sample";
         }
     }
diff --git a/src/aln_utils.cc b/src/aln_utils.cc
index a253981..baf00a4 100644
--- a/src/aln_utils.cc
+++ b/src/aln_utils.cc
@@ -123,6 +123,113 @@ apply_cigar_to_seq(const char *seq, vector<pair<char, uint> > &cigar)
     return edited_seq;
 }
 
+string
+apply_cigar_to_model_seq(const char *seq, vector<pair<char, uint> > &cigar)
+{
+    uint   size = cigar.size();
+    char   op;
+    uint   dist, bp, edited_bp, stop;
+    string edited_seq;
+
+    //
+    // Calculate the overall sequence length.
+    //
+    uint seqlen = 0;
+    for (uint i = 0; i < size; i++)
+        seqlen += cigar[i].second;
+
+    bp  = 0;
+
+    edited_seq.reserve(seqlen);
+
+    for (uint i = 0; i < size; i++)  {
+        op   = cigar[i].first;
+        dist = cigar[i].second;
+
+        switch(op) {
+        case 'S':
+            stop = bp + dist;
+            while (bp < stop) {
+                edited_seq.push_back('U');
+                bp++;
+            }
+            break;
+        case 'D':
+	    edited_bp = 0;
+            while (edited_bp < dist) {
+                edited_seq.push_back('U');
+		edited_bp++;
+            }
+            break;
+        case 'I':
+        case 'M':
+            stop = bp + dist;
+            while (bp < stop) {
+                edited_seq.push_back(seq[bp]);
+                bp++;
+            }
+            break;
+        default:
+            break;
+        }
+    }
+
+    return edited_seq;
+}
+
+int
+apply_cigar_to_seq(char *seq, uint seq_len, const char *old_seq, vector<pair<char, uint> > &cigar)
+{
+    uint   size = cigar.size();
+    char   op;
+    uint   dist, bp, seq_bp, oldseq_len, stop;
+
+    oldseq_len = strlen(old_seq);
+    bp         = 0;
+    seq_bp     = 0;
+
+    for (uint i = 0; i < size; i++)  {
+        op   = cigar[i].first;
+        dist = cigar[i].second;
+
+        switch(op) {
+        case 'S':
+            stop = seq_bp + dist;
+            stop = stop > seq_len ? seq_len : stop;
+            while (seq_bp < stop) {
+                seq[seq_bp] = 'N';
+                seq_bp++;
+                bp++;
+            }
+            break;
+        case 'D':
+            stop = seq_bp + dist;
+            stop = stop > seq_len ? seq_len : stop;
+            while (seq_bp < stop) {
+                seq[seq_bp] = 'N';
+		seq_bp++;
+            }
+            break;
+        case 'I':
+        case 'M':
+            stop = bp + dist;
+            stop = stop > seq_len ? seq_len : stop;
+            while (bp < stop) {
+                seq[seq_bp] = old_seq[bp];
+                seq_bp++;
+                bp++;
+            }
+            break;
+        default:
+            break;
+        }
+    }
+
+    seq[seq_len] = '\0';
+
+    return 0;
+}
+
 int
 apply_cigar_to_model_seq(char *seq, uint seq_len, const char *model, vector<pair<char, uint> > &cigar)
 {
@@ -176,6 +283,53 @@ apply_cigar_to_model_seq(char *seq, uint seq_len, const char *model, vector<pair
     return 0;
 }
 
+string
+remove_cigar_from_seq(const char *seq, vector<pair<char, uint> > &cigar)
+{
+    uint   size = cigar.size();
+    char   op;
+    uint   dist, bp, edited_bp, stop;
+    string edited_seq;
+
+    //
+    // Calculate the overall sequence length.
+    //
+    uint seqlen = 0;
+    for (uint i = 0; i < size; i++)
+        seqlen += cigar[i].first != 'D' ? cigar[i].second : 0; 
+
+    bp  = 0;
+
+    edited_seq.reserve(seqlen);
+
+    for (uint i = 0; i < size; i++)  {
+        op   = cigar[i].first;
+        dist = cigar[i].second;
+
+        switch(op) {
+        case 'D':
+	    edited_bp = 0;
+            while (edited_bp < dist) {
+                edited_bp++;
+		bp++;
+            }
+            break;
+        case 'I':
+        case 'M':
+            stop = bp + dist;
+            while (bp < stop) {
+                edited_seq.push_back(seq[bp]);
+                bp++;
+            }
+            break;
+        default:
+            break;
+        }
+    }
+
+    return edited_seq;
+}
+
 int
 adjust_snps_for_gaps(vector<pair<char, uint> > &cigar, Locus *loc)
 {
@@ -214,3 +368,107 @@ adjust_snps_for_gaps(vector<pair<char, uint> > &cigar, Locus *loc)
     
     return 0;
 }
+
+int
+adjust_and_add_snps_for_gaps(vector<pair<char, uint> > &cigar, Locus *loc)
+{
+    uint   size = cigar.size();
+    char   op;
+    uint   dist, bp, new_bp, stop, snp_cnt;
+    SNP   *s;
+
+    bp      = 0;
+    new_bp  = 0;
+    snp_cnt = loc->snps.size();
+    
+    vector<SNP *> snps;
+
+    for (uint i = 0; i < size; i++)  {
+        op   = cigar[i].first;
+        dist = cigar[i].second;
+
+        switch(op) {
+        case 'D':
+            stop = new_bp + dist;
+            while (new_bp < stop) {
+                s = new SNP;
+                s->col    = new_bp;
+                s->type   = snp_type_unk;
+                s->rank_1 = 'N';
+                snps.push_back(s);
+                new_bp++;
+            }
+            break;
+        case 'I':
+        case 'M':
+        case 'S':
+            stop = bp + dist > snp_cnt ? snp_cnt : bp + dist;
+            while (bp < stop) {
+                loc->snps[bp]->col = new_bp;
+                snps.push_back(loc->snps[bp]);
+                bp++;
+                new_bp++;
+            }
+            break;
+        default:
+            break;
+        }
+    }    
+
+    loc->snps.clear();
+
+    for (uint i = 0; i < snps.size(); i++)
+        loc->snps.push_back(snps[i]);
+
+    return 0;
+}
+
+int
+remove_snps_from_gaps(vector<pair<char, uint> > &cigar, Locus *loc)
+{
+    uint   size = cigar.size();
+    char   op;
+    uint   dist, bp, new_bp, stop, snp_cnt;
+    SNP   *s;
+
+    bp      = 0;
+    new_bp  = 0;
+    snp_cnt = loc->snps.size();
+    
+    vector<SNP *> snps;
+
+    for (uint i = 0; i < size; i++)  {
+        op   = cigar[i].first;
+        dist = cigar[i].second;
+
+        switch(op) {
+        case 'D':
+            stop = bp + dist;
+            while (bp < stop) {
+                delete loc->snps[bp];
+                bp++;
+            }
+            break;
+        case 'I':
+        case 'M':
+        case 'S':
+            stop = bp + dist > snp_cnt ? snp_cnt : bp + dist;
+            while (bp < stop) {
+                loc->snps[bp]->col = new_bp;
+                snps.push_back(loc->snps[bp]);
+                bp++;
+                new_bp++;
+            }
+            break;
+        default:
+            break;
+        }
+    }    
+
+    loc->snps.clear();
+
+    for (uint i = 0; i < snps.size(); i++)
+        loc->snps.push_back(snps[i]);
+
+    return 0;
+}
diff --git a/src/aln_utils.h b/src/aln_utils.h
index 795fd24..c19b76e 100644
--- a/src/aln_utils.h
+++ b/src/aln_utils.h
@@ -36,7 +36,12 @@ using std::vector;
 string invert_cigar(string);
 int    parse_cigar(const char *, vector<pair<char, uint> > &);
 string apply_cigar_to_seq(const char *, vector<pair<char, uint> > &);
+string remove_cigar_from_seq(const char *, vector<pair<char, uint> > &);
+string apply_cigar_to_model_seq(const char *, vector<pair<char, uint> > &);
+int    apply_cigar_to_seq(char *, uint, const char *, vector<pair<char, uint> > &);
 int    apply_cigar_to_model_seq(char *, uint, const char *, vector<pair<char, uint> > &);
 int    adjust_snps_for_gaps(vector<pair<char, uint> > &, Locus *);
+int    adjust_and_add_snps_for_gaps(vector<pair<char, uint> > &, Locus *);
+int    remove_snps_from_gaps(vector<pair<char, uint> > &, Locus *);
 
 #endif  // __ALN_UTILS_H__
diff --git a/src/cstacks.cc b/src/cstacks.cc
index 464cc99..7bf608c 100644
--- a/src/cstacks.cc
+++ b/src/cstacks.cc
@@ -186,7 +186,9 @@ int update_catalog_index(map<int, CLocus *> &catalog, map<string, int> &cat_inde
     return 0;
 }
 
-int characterize_mismatch_snps(CLocus *catalog_tag, QLocus *query_tag) {
+int
+characterize_mismatch_snps(CLocus *catalog_tag, QLocus *query_tag)
+{
     set<int> snp_cols;
     uint i;
     for (i = 0; i < catalog_tag->snps.size(); i++)
@@ -328,6 +330,34 @@ merge_matches(map<int, CLocus *> &catalog, map<int, QLocus *> &sample, pair<int,
                 break;
             }
 
+        //
+        // If we have already matched a query locus to this catalog locus for the current
+        // sample, we must re-align the sequences in case changes have been made to the
+        // sequence by the previous matching sequence.
+        //
+        if (ctag->match_cnt > 0) {
+            string query_allele, query_seq, cat_allele, cat_seq;
+            // cerr << "    Warning: Catalog locus " << ctag->id
+            //      << ", Sample " << qtag->sample_id << ", locus " << qtag->id
+            //      << "; sequence length has changed since original alignment: "
+            //      << qseq_len << " <-> " << ctag->len
+            //      << "; re-aligning.\n";
+
+            //
+            // Find the proper query allele to align against the catalog. We can align
+            // against the catalog consensus because the allele strings may have changed.
+            //
+            query_allele = qtag->matches[match_index]->query_type;
+            for (uint k = 0; k < qtag->strings.size(); k++)
+                if (qtag->strings[k].first == query_allele) {
+                    query_seq = qtag->strings[k].second;
+                    break;
+                }
+            aln->init(ctag->len, qtag->len);
+            aln->align(ctag->con, query_seq);
+            cigar_str = invert_cigar(aln->result().cigar);
+        }
+
         bool gapped_aln = false;
         if (cigar_str.length() > 0)
             gapped_aln = true;
@@ -337,32 +367,7 @@ merge_matches(map<int, CLocus *> &catalog, map<int, QLocus *> &sample, pair<int,
         // Adjust the postition of any SNPs that were shifted down sequence due to a gap.
         //
         if (gapped_aln) {
-            qseq_len = parse_cigar(cigar_str.c_str(), cigar);
-
-            if (ctag->match_cnt > 0) {
-                string query_allele, query_seq, cat_allele, cat_seq;
-                // cerr << "    Warning: Catalog locus " << ctag->id
-                //      << ", Sample " << qtag->sample_id << ", locus " << qtag->id
-                //      << "; sequence length has changed since original alignment: "
-                //      << qseq_len << " <-> " << ctag->len
-                //      << "; re-aligning.\n";
-
-                //
-                // Find the proper query allele to align against the catalog. We can align
-                // against the catalog consensus because the allele strings may have changed.
-                //
-                query_allele = qtag->matches[match_index]->query_type;
-                for (uint k = 0; k < qtag->strings.size(); k++)
-                    if (qtag->strings[k].first == query_allele) {
-                        query_seq = qtag->strings[k].second;
-                        break;
-                    }
-                aln->init(ctag->len, qtag->len);
-                aln->align(ctag->con, query_seq);
-                cigar_str = invert_cigar(aln->result().cigar);
-                parse_cigar(cigar_str.c_str(), cigar);
-            }
-
+            qseq_len  = parse_cigar(cigar_str.c_str(), cigar);
             qseq      = apply_cigar_to_seq(qtag->con, cigar);
             adjust_snps_for_gaps(cigar, qtag);
 
diff --git a/src/locus.cc b/src/locus.cc
index be6e4a4..67ffea9 100644
--- a/src/locus.cc
+++ b/src/locus.cc
@@ -55,6 +55,19 @@ Locus::add_consensus(const char *seq)
 }
 
 int 
+Locus::add_model(const char *seq) 
+{
+    if (this->model != NULL)
+        delete [] this->model;
+
+    this->model = new char[this->len + 1];
+    strncpy(this->model, seq, this->len);
+    this->model[this->len] = '\0';
+    
+    return 0;
+}
+
+int 
 Locus::populate_alleles() 
 {
     vector<SNP *>::iterator  i;
diff --git a/src/locus.h b/src/locus.h
index 2d2e78f..864c7de 100644
--- a/src/locus.h
+++ b/src/locus.h
@@ -95,6 +95,7 @@ class Locus {
     uint sort_bp(uint k = 0);
     int snp_index(uint);
     int add_consensus(const char *);
+    int add_model(const char *);
     virtual int populate_alleles();
 };
 
diff --git a/src/rxstacks.cc b/src/rxstacks.cc
index 9f87091..3ca8781 100644
--- a/src/rxstacks.cc
+++ b/src/rxstacks.cc
@@ -228,6 +228,11 @@ int main (int argc, char* argv[]) {
 	    Datum   *d;
 	    Locus   *loc;
 	    CSLocus *cloc;
+            uint     seq_len;
+            string   seq, model;
+            char    *adj_seq;
+            vector<pair<char, uint> > cigar;
+            vector<char *>            reads;
 
             #pragma omp for schedule(dynamic, 1) reduction(+:nuc_cnt) reduction(+:unk_hom_cnt) reduction(+:unk_het_cnt) \
 		reduction(+:hom_unk_cnt) reduction(+:het_unk_cnt) reduction(+:hom_het_cnt) reduction(+:het_hom_cnt) \
@@ -267,6 +272,27 @@ int main (int argc, char* argv[]) {
 		    continue;
 		}
 
+                //
+                // If this locus was matched to the catalog using a gapped alignment, adjust the sequence
+                // and SNP positions.
+                //
+                if (d->cigar != NULL) {
+                    seq_len = parse_cigar(d->cigar, cigar);
+                    seq     = apply_cigar_to_seq(loc->con, cigar);
+                    model   = apply_cigar_to_model_seq(loc->model, cigar);
+                    loc->add_consensus(seq.c_str());
+                    loc->add_model(model.c_str());
+                    adjust_and_add_snps_for_gaps(cigar, loc);
+
+                    for (uint k = 0; k < loc->reads.size(); k++) {
+                        reads.push_back(loc->reads[k]);
+
+                        adj_seq = new char[seq_len + 1];
+                        apply_cigar_to_seq(adj_seq, seq_len, loc->reads[k], cigar);
+                        loc->reads[k] = adj_seq;
+                    }
+                }
+
 		prune_nucleotides(cloc, loc, d, log_snp_fh,
 				  nuc_cnt,
 				  unk_hom_cnt, unk_het_cnt, 
@@ -284,6 +310,25 @@ int main (int argc, char* argv[]) {
 		    if (loc->blacklisted)
 			blacklist_cnt++;
 		}
+
+                //
+                // If this locus was matched to the catalog using a gapped alignment, de-adjust the sequence
+                // and SNP positions for writing back to disk.
+                //
+                if (d->cigar != NULL) {
+                    seq   = remove_cigar_from_seq(loc->con, cigar);
+                    model = remove_cigar_from_seq(loc->model, cigar);
+                    loc->add_consensus(seq.c_str());
+                    loc->add_model(model.c_str());
+                    remove_snps_from_gaps(cigar, loc);
+
+                    for (uint k = 0; k < loc->reads.size(); k++) {
+                        adj_seq = loc->reads[k];
+                        loc->reads[k] = reads[k];
+                        delete [] adj_seq;
+                    }
+                    reads.clear();
+                }
 	    }
         }
 
@@ -845,8 +890,19 @@ prune_nucleotides(CSLocus *cloc, Locus *loc, Datum *d, ofstream &log_fh, unsigne
     nuc_cnt += loc->len;
 
     for (uint i = 0; i < loc->snps.size() && i < cloc->snps.size(); i++) {
+
+        //
+        // Safety checks.
+        //
+        if (loc->snps[i]->col != cloc->snps[i]->col)
+            cerr << "Warning: comparing mismatched SNPs in catalog locus " << cloc->id
+                 << " and sample " << loc->sample_id << ", locus " << loc->id << "; col: " << loc->snps[i]->col << "\n";
+        if (loc->snps[i]->type == snp_type_het && cloc->snps[i]->type == snp_type_hom)
+            cerr << "Warning: sample locus is variable while catalog locus is fixed; catalog locus " << cloc->id
+                 << " and sample " << loc->sample_id << ", locus " << loc->id << "; col: " << loc->snps[i]->col << "\n";
+        
 	//
-	// Either their is an unknown call in locus, or, there is a snp in the catalog and any state in the locus.
+	// Either there is an unknown call in locus, or, there is a snp in the catalog and any state in the locus.
 	//
 	if ((loc->snps[i]->type == snp_type_unk) || 
 	    (cloc->snps[i]->type == snp_type_het && loc->snps[i]->type == snp_type_hom)) {
@@ -854,11 +910,13 @@ prune_nucleotides(CSLocus *cloc, Locus *loc, Datum *d, ofstream &log_fh, unsigne
 	    // cerr << "  Looking at SNP call in tag " << loc->id << " at position " << i << "; col: " << loc->snps[i]->col << "\n"
 	    // 	 << "    Catalog column: " << cloc->snps[i]->col << " (" << i << "); Sample column: " << loc->snps[i]->col << " (" << i << ")\n"
 	    // 	 << "    Sample has model call type: " << (loc->snps[i]->type == snp_type_unk ? "Unknown" : "Homozygous") << "; nucleotides: '" 
-	    // 	 << loc->snps[i]->rank_1 << "' and '" << loc->snps[i]->rank_2 << "'\n"
+	    // 	 << loc->snps[i]->rank_1 << "' and '" << loc->snps[i]->rank_2 << "'; model call: " << loc->model[loc->snps[i]->col] << "\n"
 	    // 	 << "    Catalog has model call type: " << (cloc->snps[i]->type == snp_type_het ? "Heterozygous" : "Homozygous") << "; nucleotides: '" 
 	    // 	 << cloc->snps[i]->rank_1 << "' and '" << cloc->snps[i]->rank_2 << "'\n";
 
-	    if (loc->snps[i]->rank_1 == 'N' || cloc->snps[i]->rank_1 == 'N') continue;
+	    if (loc->snps[i]->rank_1 == 'N' || cloc->snps[i]->rank_1 == 'N' ||
+                loc->snps[i]->rank_1 == '-' || cloc->snps[i]->rank_1 == '-')
+                continue;
 
 	    cnucs.insert(cloc->snps[i]->rank_1);
 	    if (cloc->snps[i]->rank_2 != 0) cnucs.insert(cloc->snps[i]->rank_2);
@@ -895,22 +953,21 @@ prune_nucleotides(CSLocus *cloc, Locus *loc, Datum *d, ofstream &log_fh, unsigne
 	    //
 	    invoke_model(loc, i, nucs);
 
-	    if (verbose) {
-		#pragma omp critical
-		log_model_calls(loc, log_fh,
-				unk_hom_cnt, unk_het_cnt, 
-				hom_unk_cnt, het_unk_cnt, 
-				hom_het_cnt, het_hom_cnt);
-	    } else {
-		log_model_calls(loc, log_fh,
-				unk_hom_cnt, unk_het_cnt, 
-				hom_unk_cnt, het_unk_cnt, 
-				hom_het_cnt, het_hom_cnt);
-	    }
+            cnucs.clear();
 	}
+    }
 
-	nucs.clear();
-	cnucs.clear();
+    if (verbose) {
+        #pragma omp critical
+        log_model_calls(loc, log_fh,
+                        unk_hom_cnt, unk_het_cnt, 
+                        hom_unk_cnt, het_unk_cnt, 
+                        hom_het_cnt, het_hom_cnt);
+    } else {
+        log_model_calls(loc, log_fh,
+                        unk_hom_cnt, unk_het_cnt, 
+                        hom_unk_cnt, het_unk_cnt, 
+                        hom_het_cnt, het_hom_cnt);
     }
 
     //
@@ -1295,13 +1352,14 @@ write_results(string file, map<int, Locus *> &m)
 		 << tag_1->sample_id    << "\t" 
 		 << tag_1->id           << "\t\t\t\t";
 
-	    if (tag_1->comp_type[j] == primary)
-		sstr << "primary" << "\t";
-	    else
-		sstr << "secondary" << "\t";
+	    if (tag_1->comp_type[j] == primary) {
+		sstr << "primary" << "\t"
+                     << tag_1->comp_cnt[j]  << "\t";
+            } else {
+		sstr << "secondary" << "\t\t";
+            }
 
-	    sstr << tag_1->comp_cnt[j]  << "\t" 
-		 << tag_1->comp[j]      << "\t" 
+	    sstr << tag_1->comp[j]      << "\t" 
 		 << tag_1->reads[j]     << "\t\t\t\t\n";
 	}
 
diff --git a/src/rxstacks.h b/src/rxstacks.h
index 084ed5e..5361251 100644
--- a/src/rxstacks.h
+++ b/src/rxstacks.h
@@ -62,6 +62,7 @@ using std::set;
 #include "PopSum.h"
 #include "utils.h"
 #include "sql_utilities.h"
+#include "aln_utils.h"
 #include "models.h"
 #include "mst.h"
 
diff --git a/src/sql_utilities.h b/src/sql_utilities.h
index 1eafe7d..8fc23d1 100644
--- a/src/sql_utilities.h
+++ b/src/sql_utilities.h
@@ -54,37 +54,36 @@ load_loci(string sample,  map<int, LocusT *> &loci, bool store_reads, bool load_
     char *line      = (char *) malloc(sizeof(char) * max_len);
     int   size      = max_len;
     bool  gzip      = false;
-    bool  open_fail = false;
+    bool  open_fail = true;
     int   fh_status = 1;
 
-    // 
-    // First, try to parse the models file to pull in the consensus sequence and model string
-    // for each locus. If the models file is not available or we are requested to store the
-    // reads from each stack, fall back to the tags file.
-    //
-    if (!store_reads) {
-        f = sample + ".models.tsv";
-        fh.open(f.c_str(), ifstream::in);
-        if (fh.fail())
-            open_fail = true;
-    }
-
-    if (!store_reads && open_fail) {
-        //
-        // Test for a gzipped MODELs file.
-        //
-        f = sample + ".models.tsv.gz";
-        gz_fh = gzopen(f.c_str(), "rb");
-        if (!gz_fh) {
-            open_fail = true;
-        } else {
-            open_fail = false;
-            #if ZLIB_VERNUM >= 0x1240
-            gzbuffer(gz_fh, libz_buffer_size);
-            #endif
-            gzip = true;
-        }
-    }
+    // // 
+    // // First, try to parse the models file to pull in the consensus sequence and model string
+    // // for each locus. If the models file is not available or we are requested to store the
+    // // reads from each stack, fall back to the tags file.
+    // //
+    // if (!store_reads) {
+    //     f = sample + ".models.tsv";
+    //     fh.open(f.c_str(), ifstream::in);
+    //     open_fail = fh.fail() ? true : false;
+    // }
+
+    // if (!store_reads && open_fail) {
+    //     //
+    //     // Test for a gzipped MODELs file.
+    //     //
+    //     f = sample + ".models.tsv.gz";
+    //     gz_fh = gzopen(f.c_str(), "rb");
+    //     if (!gz_fh) {
+    //         open_fail = true;
+    //     } else {
+    //         open_fail = false;
+    //         #if ZLIB_VERNUM >= 0x1240
+    //         gzbuffer(gz_fh, libz_buffer_size);
+    //         #endif
+    //         gzip = true;
+    //     }
+    // }
 
     if (open_fail) {
         //
diff --git a/src/sstacks.cc b/src/sstacks.cc
index d795af6..3b10560 100644
--- a/src/sstacks.cc
+++ b/src/sstacks.cc
@@ -799,7 +799,8 @@ search_for_gaps(map<int, Locus *> &catalog, map<int, QLocus *> &sample,
 
 	initialize_kmers(gapped_kmer_len, num_kmers, kmers);
 
-        #pragma omp for schedule(dynamic) reduction(+:matches) reduction(+:nomatches) reduction(+:mmatches) reduction(+:gapped_aln) reduction(+:ver_hap) reduction(+:tot_hap) reduction(+:bad_aln) reduction(+:no_haps)
+        #pragma omp for schedule(dynamic) reduction(+:matches) reduction(+:nomatches) reduction(+:mmatches) reduction(+:gapped_aln) reduction(+:ver_hap) \
+                                          reduction(+:tot_hap) reduction(+:bad_aln) reduction(+:no_haps)
         for (uint i = 0; i < keys.size(); i++) {
             query = sample[keys[i]];
 
@@ -1051,7 +1052,7 @@ verify_gapped_match(map<int, Locus *> &catalog, QLocus *query,
     }
 
     //
-    // 4. Assign the allele hits after verifying there is a match between catalog and query allele..
+    // 4. Assign the allele hits after verifying there is a match between catalog and query allele.
     //
     for (query_it = query_hits.begin(); query_it != query_hits.end(); query_it++) {
         query_allele = query_it->first;
diff --git a/src/write.cc b/src/write.cc
index 6b0b0b1..2b9df77 100644
--- a/src/write.cc
+++ b/src/write.cc
@@ -42,12 +42,12 @@ write_fasta(ofstream *fh, Read *href, bool overhang) {
     	    "_" << tile << 
     	    "_" << href->x <<
     	    "_" << href->y <<
-    	    "_" << href->read << "\n" <<
+    	    "/" << href->read << "\n" <<
     	    href->seq + offset << "\n";
     else 
     	*fh <<
     	    ">" << href->machine <<
-    	    "_" << href->read << "\n" <<
+    	    "/" << href->read << "\n" <<
     	    href->seq + offset << "\n";
 
     if (fh->fail()) return -1;
@@ -70,12 +70,12 @@ write_fasta(gzFile *fh, Read *href, bool overhang) {
     	    "_" << tile << 
     	    "_" << href->x <<
     	    "_" << href->y <<
-    	    "_" << href->read << "\n" <<
+    	    "/" << href->read << "\n" <<
     	    href->seq + offset << "\n";
     else 
     	sstr <<
     	    ">" << href->machine <<
-    	    "_" << href->read << "\n" <<
+    	    "/" << href->read << "\n" <<
     	    href->seq + offset << "\n";
 
     int res = gzputs(*fh, sstr.str().c_str());
@@ -126,14 +126,14 @@ write_fastq(ofstream *fh, Read *href, bool overhang) {
     	    "_" << tile << 
     	    "_" << href->x << 
     	    "_" << href->y << 
-    	    "_" << href->read << "\n" <<
+    	    "/" << href->read << "\n" <<
     	    href->seq + offset << "\n" <<
     	    "+\n" <<
     	    href->phred + offset << "\n";
     else
     	*fh <<
     	    "@" << href->machine << 
-    	    "_" << href->read << "\n" <<
+    	    "/" << href->read << "\n" <<
     	    href->seq + offset << "\n" <<
     	    "+\n" <<
     	    href->phred + offset << "\n";
@@ -161,14 +161,14 @@ write_fastq(gzFile *fh, Read *href, bool overhang) {
     	    "_" << tile << 
     	    "_" << href->x << 
     	    "_" << href->y << 
-    	    "_" << href->read << "\n" <<
+    	    "/" << href->read << "\n" <<
     	    href->seq + offset << "\n" <<
     	    "+\n" <<
     	    href->phred + offset << "\n";
     else
     	sstr <<
     	    "@" << href->machine << 
-    	    "_" << href->read << "\n" <<
+    	    "/" << href->read << "\n" <<
     	    href->seq + offset << "\n" <<
     	    "+\n" <<
     	    href->phred + offset << "\n";

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/stacks.git



More information about the debian-med-commit mailing list