[med-svn] [Git][med-team/fieldbioinformatics][master] 2 commits: Reviewing and modifying 2to3 patch.

Malihe Asemani gitlab at salsa.debian.org
Fri Apr 10 12:32:44 BST 2020



Malihe Asemani pushed to branch master at Debian Med / fieldbioinformatics


Commits:
4781dac7 by Malihe Asemani at 2020-04-10T04:56:42-05:00
Reviewing and modifying 2to3 patch.
Correcting trailing whitespace.

- - - - -
15b89ac3 by Malihe Asemani at 2020-04-10T06:24:58-05:00
Correcting Indentation

- - - - -


3 changed files:

- debian/patches/2to3.patch
- + debian/patches/indentation_correction.patch
- debian/patches/series


Changes:

=====================================
debian/patches/2to3.patch
=====================================
@@ -4,28 +4,8 @@ Description: Result of
    find . -name "*.py" -exec 2to3 -wn \{\} \;
  to port from Python2 to Python3
 
---- a/ansible/generate_user_passwords.py
-+++ b/ansible/generate_user_passwords.py
-@@ -36,7 +36,7 @@ def get_users_from_stdin():
-     return users
- 
- def usage():
--    print("Usage: {} [-l <length>] USER1[:PASSWORD1] USER2[:PASSWORD2] USER3[:PASSWORD3] ...".format( sys.argv[0] ) )
-+    print(("Usage: {} [-l <length>] USER1[:PASSWORD1] USER2[:PASSWORD2] USER3[:PASSWORD3] ...".format( sys.argv[0] ) ))
-     print ("   User names can also be passed via stdin (each username:password on a separate line)")
- 
- 
-@@ -69,7 +69,7 @@ def main(argv):
-             sys.exit(0)
-         if (opt == '-l' or opt == '--length'):
-            password_length = int(arg)
--    print (generate_yaml( usernames, password_length ))
-+    print((generate_yaml( usernames, password_length )))
- 
- if __name__ == '__main__' :
-     main(sys.argv[1:])
---- a/artic/align_trim_fasta.py
-+++ b/artic/align_trim_fasta.py
+--- fieldbioinformatics.orig/artic/align_trim_fasta.py
++++ fieldbioinformatics/artic/align_trim_fasta.py
 @@ -6,8 +6,8 @@
  import argparse
  import pysam
@@ -37,7 +17,7 @@ Description: Result of
  
  def find_query_pos(alignment, reference_pos):
      nearest = -1
-@@ -37,12 +37,12 @@ def go(args):
+@@ -37,12 +37,12 @@
          query_align_start = find_query_pos(s, primer_start)
          query_align_end = find_query_pos(s, primer_end)
  
@@ -52,17 +32,17 @@ Description: Result of
          #query_align_end + 30])
  
  parser = argparse.ArgumentParser(description='Trim alignments from an amplicon scheme.')
---- a/artic/checkdir.py
-+++ b/artic/checkdir.py
-@@ -5,4 +5,4 @@ import sys
+--- fieldbioinformatics.orig/artic/checkdir.py
++++ fieldbioinformatics/artic/checkdir.py
+@@ -5,4 +5,4 @@
  passexists = os.path.exists('data/%s/pass' % (sys.argv[1]))
  failexists = os.path.exists('data/%s/fail' % (sys.argv[1]))
  
 -print "%s\t%s\t%s" % (sys.argv[1], passexists, failexists)
 +print("%s\t%s\t%s" % (sys.argv[1], passexists, failexists))
---- a/artic/collatestats.py
-+++ b/artic/collatestats.py
-@@ -6,12 +6,12 @@ for fn in sys.argv[1:]:
+--- fieldbioinformatics.orig/artic/collatestats.py
++++ fieldbioinformatics/artic/collatestats.py
+@@ -6,12 +6,12 @@
  	fh = open(fn)
  	headers = fh.readline()
  	if not headerprinted:
@@ -77,9 +57,9 @@ Description: Result of
  
  	
  
---- a/artic/collect_quals.py
-+++ b/artic/collect_quals.py
-@@ -17,13 +17,13 @@ def get_runs(dataset):
+--- fieldbioinformatics.orig/artic/collect_quals.py
++++ fieldbioinformatics/artic/collect_quals.py
+@@ -17,13 +17,13 @@
  	return cur.fetchall()
  
  runs = get_runs(sys.argv[2])
@@ -91,13 +71,13 @@ Description: Result of
  	fh.readline()
  	for ln in fh:
 -		print "%s\tma\t%s" % (row['batch'], ln), 
-+		print("%s\tma\t%s" % (row['batch'], ln), end=' ') 
++		print("%s\tma\t%s" % (row['batch'], ln), end=' ')
  	fh.close()
  
  #	fh = open("EM_079517_%s_hq_bwa.idystats.txt" % (row['batch']))
---- a/artic/collect_times.py
-+++ b/artic/collect_times.py
-@@ -25,12 +25,12 @@ t2 = collect_times('data/%s/fail' % (run
+--- fieldbioinformatics.orig/artic/collect_times.py
++++ fieldbioinformatics/artic/collect_times.py
+@@ -25,12 +25,12 @@
  with open("times/%s.times.txt" % (run,), "w") as fh:
  	start_time = float(min(t1[0], t2[0]))
  	end_time = float(max(t1[1], t2[1]))
@@ -112,8 +92,8 @@ Description: Result of
 +	), file=fh)
  
  
---- a/artic/combineruns.py
-+++ b/artic/combineruns.py
+--- fieldbioinformatics.orig/artic/combineruns.py
++++ fieldbioinformatics/artic/combineruns.py
 @@ -2,16 +2,16 @@
  import csv
  import sys
@@ -138,9 +118,9 @@ Description: Result of
 +	for sample, barcodes in samples.items():
 +		print("variants.sh %s %s %s %s" % (ref, sample, primer_scheme, " ".join(barcodes)))
 +	print("cd ..")
---- a/artic/convertscheme.py
-+++ b/artic/convertscheme.py
-@@ -10,6 +10,6 @@ for ln in open(sys.argv[1]):
+--- fieldbioinformatics.orig/artic/convertscheme.py
++++ fieldbioinformatics/artic/convertscheme.py
+@@ -10,6 +10,6 @@
  
  	a,pair,b = cols[3].split('_')
  
@@ -148,9 +128,9 @@ Description: Result of
 +	print("%s\t%s\t%s\t%s\t%s\t%s\t%s" % (cols[0], cols[1], cols[2], cols[3], 0, direction, pair))
  
  
---- a/artic/copyunprocessedfiles.py
-+++ b/artic/copyunprocessedfiles.py
-@@ -28,7 +28,7 @@ for root, dirs, files in os.walk(input_d
+--- fieldbioinformatics.orig/artic/copyunprocessedfiles.py
++++ fieldbioinformatics/artic/copyunprocessedfiles.py
+@@ -28,7 +28,7 @@
  					os.makedirs(checkdir)
  				movefrom = input_dir + '/' + albacore_root + '/' + name
  				moveto = process_dir + '/' + albacore_root + '/' + name
@@ -159,18 +139,18 @@ Description: Result of
  				shutil.copy(movefrom, moveto)
  
  
---- a/artic/countreads.py
-+++ b/artic/countreads.py
-@@ -13,5 +13,5 @@ cmd = "UPDATE runs SET num_reads_align =
+--- fieldbioinformatics.orig/artic/countreads.py
++++ fieldbioinformatics/artic/countreads.py
+@@ -13,5 +13,5 @@
       get_aligned("EM_079517_%s_hq_marginalign.sorted.bam" % (sys.argv[3],)),
  	 sys.argv[2]
  )
 -print cmd
 +print(cmd)
  
---- a/artic/cov.py
-+++ b/artic/cov.py
-@@ -5,7 +5,7 @@ import sys
+--- fieldbioinformatics.orig/artic/cov.py
++++ fieldbioinformatics/artic/cov.py
+@@ -5,7 +5,7 @@
  from tabulate import tabulate
  from pandas import DataFrame
  import collections
@@ -179,7 +159,7 @@ Description: Result of
  from operator import attrgetter
  from Bio import SeqIO
  
-@@ -27,11 +27,11 @@ class OrderedDefaultdict(collections.Ord
+@@ -27,11 +27,11 @@
  
      def __reduce__(self):  # optional, for pickle support
          args = (self.default_factory,) if self.default_factory else tuple()
@@ -193,7 +173,7 @@ Description: Result of
  
  def shell(cmd):
      p = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE)
-@@ -67,18 +67,18 @@ OrderedDefaultdict(list)
+@@ -67,18 +67,18 @@
  
  runs = get_runs()
  directory = '.'
@@ -217,9 +197,9 @@ Description: Result of
 -    print "\t".join([str(s) for s in row.values()])
 +    print("\t".join([str(s) for s in list(row.values())]))
  
---- a/artic/coverages.py
-+++ b/artic/coverages.py
-@@ -13,7 +13,7 @@ def collect_depths(bamfile):
+--- fieldbioinformatics.orig/artic/coverages.py
++++ fieldbioinformatics/artic/coverages.py
+@@ -13,7 +13,7 @@
  	if not os.path.exists(bamfile):
  		raise SystemExit("bamfile %s doesn't exist" % (bamfile,))
  
@@ -228,15 +208,15 @@ Description: Result of
  
  	p = subprocess.Popen(['samtools', 'depth', bamfile],
                               stdout=subprocess.PIPE)
-@@ -28,4 +28,4 @@ def collect_depths(bamfile):
+@@ -28,4 +28,4 @@
  bamfn = "EM_079517_%s_marginalign.sorted.bam" % (sys.argv[4])
  depths = collect_depths(bamfn)
  covered = len([a for a in depths if a >= 25])
 -print "UPDATE runs SET mean_cov = %s, median_cov = %s, covered = %s WHERE batch = '%s';" % (numpy.mean(depths), numpy.median(depths), covered, sys.argv[2])
 +print("UPDATE runs SET mean_cov = %s, median_cov = %s, covered = %s WHERE batch = '%s';" % (numpy.mean(depths), numpy.median(depths), covered, sys.argv[2]))
---- a/artic/fasta.py
-+++ b/artic/fasta.py
-@@ -7,19 +7,19 @@ def extract_fast5(path, basecaller, flow
+--- fieldbioinformatics.orig/artic/fasta.py
++++ fieldbioinformatics/artic/fasta.py
+@@ -7,19 +7,19 @@
  	for fast5 in Fast5FileSet(path, 0, basecaller):
  		read_flowcell_id= fast5.get_flowcell_id()
  		if flowcell_id != read_flowcell_id:
@@ -259,9 +239,9 @@ Description: Result of
  		fast5.close()
  
  extract_fast5(sys.argv[1], 'ONT Albacore Sequencing Software=1.0.4', sys.argv[2])
---- a/artic/generate_csv_ig.py
-+++ b/artic/generate_csv_ig.py
-@@ -7,7 +7,7 @@ from Bio import SeqIO
+--- fieldbioinformatics.orig/artic/generate_csv_ig.py
++++ fieldbioinformatics/artic/generate_csv_ig.py
+@@ -7,7 +7,7 @@
  import re
  
  def clean(s):
@@ -270,7 +250,7 @@ Description: Result of
  	return s
  
  public = 1
-@@ -39,10 +39,10 @@ for line in lines:
+@@ -39,10 +39,10 @@
  			line.append(line[2])
  		newlist.append(['EBOV', line[0], 'Goodfellow', 'SLE', line[2] + '--', line[3], line[4]])
  	else:
@@ -284,7 +264,7 @@ Description: Result of
  				line.append('_'.join([locstring[0], locstring[1]]))
  			line.append(locstring[0])
  		else:
-@@ -53,7 +53,7 @@ for line in newlist:
+@@ -53,7 +53,7 @@
  	counts[line[6]] += 1
  
  colourDict= {}
@@ -293,9 +273,9 @@ Description: Result of
  	colourDict[key] = colours[n]
  
  header = 'tree_id,id,__latitude,__longitude,prefec,prefec__shape,prefec__colour,date,__day,__month,__year,all'
---- a/artic/generate_report.py
-+++ b/artic/generate_report.py
-@@ -71,31 +71,31 @@ Alignments of differences within cluster
+--- fieldbioinformatics.orig/artic/generate_report.py
++++ fieldbioinformatics/artic/generate_report.py
+@@ -71,31 +71,31 @@
  
  """ % (dist, prefix)
  
@@ -336,9 +316,9 @@ Description: Result of
  
  
  for c in clusters:
---- a/artic/generate_tree_figure.py
-+++ b/artic/generate_tree_figure.py
-@@ -50,7 +50,7 @@ def get_meta_new(metadata, big_tree):
+--- fieldbioinformatics.orig/artic/generate_tree_figure.py
++++ fieldbioinformatics/artic/generate_tree_figure.py
+@@ -50,7 +50,7 @@
  def get_colours(clusters, tree, colours):	
  	#get a list of prefectures for both clusters
  	both_leaves = []
@@ -347,7 +327,7 @@ Description: Result of
  		b = ["'" + clusters[c][0] + "'", "'" + clusters[c][1] + "'"]
  		for a in tree.get_common_ancestor(b).get_leaves():
  			both_leaves.append(a.name[1:-1])
-@@ -62,16 +62,16 @@ def get_colours(clusters, tree, colours)
+@@ -62,16 +62,16 @@
  		#print each, metadata[each]['instrument'], metadata[each]['prefec']
  		if metadata[each]['instrument'] == 'MinION':
  			counts[metadata[each]['prefec']] += 1	
@@ -368,7 +348,7 @@ Description: Result of
  	if mode == 'small':
  		#delete unwanted leaves
  		keep_leaves = []
-@@ -81,7 +81,7 @@ def render_tree(tree, mode, cluster, col
+@@ -81,7 +81,7 @@
  		delete_leaves = [leaf for leaf in tree.get_leaf_names() if leaf not in keep_leaves]
  		#if cluster == 'Boke':
  		#	delete_leaves.extend(duplicates)
@@ -377,7 +357,7 @@ Description: Result of
  		for leaf in delete_leaves:
  			if tree.search_nodes(name=leaf)[0]:
  				n = tree.search_nodes(name=leaf)[0]
-@@ -141,7 +141,7 @@ if mode == 'small':
+@@ -141,7 +141,7 @@
  	ts.scale = 750000
  
  #add legend
@@ -386,7 +366,7 @@ Description: Result of
  	ts.legend.add_face(CircleFace(radius=size[mode]/2, color=colourDict[each]), column=0)
  	ts.legend.add_face(TextFace(each, ftype="Helvetica", fsize=size[mode]), column=1)
  ts.legend.add_face(CircleFace(radius=size[mode]/2, color='#F1F1F1'), column=0)
-@@ -167,8 +167,8 @@ if mode == 'big':
+@@ -167,8 +167,8 @@
  	cluster = 'big'
  	render_tree(big_tree, mode, cluster, colourDict, width=2000, position='float')
  elif mode == 'small':
@@ -397,9 +377,9 @@ Description: Result of
  else:
 -	print 'Mode not recognised: %s' %mode
 +	print('Mode not recognised: %s' %mode)
---- a/artic/get-alignment.py
-+++ b/artic/get-alignment.py
-@@ -11,7 +11,7 @@ def main(args):
+--- fieldbioinformatics.orig/artic/get-alignment.py
++++ fieldbioinformatics/artic/get-alignment.py
+@@ -11,7 +11,7 @@
  	ids = set([record.id for record in records])
  	lens = set([len(record.seq) for record in records])
  	if len(lens) != 1:
@@ -408,7 +388,7 @@ Description: Result of
  		sys.exit()
  	ignore = set(['-'])
  	discrim = defaultdict(str)
-@@ -22,12 +22,12 @@ def main(args):
+@@ -22,12 +22,12 @@
  			continue
  
  		if args.minfreq:
@@ -423,7 +403,7 @@ Description: Result of
  
  
  		discrim_pos.append(posn)
-@@ -39,7 +39,7 @@ def main(args):
+@@ -39,7 +39,7 @@
  
  #	print >>sys.stderr, discrim_pos
  	for each in discrim:
@@ -432,9 +412,9 @@ Description: Result of
  
  if __name__ == '__main__':
  	import argparse
---- a/artic/intersection_vcf.py
-+++ b/artic/intersection_vcf.py
-@@ -36,9 +36,9 @@ for vcffile in sys.argv[2:]:
+--- fieldbioinformatics.orig/artic/intersection_vcf.py
++++ fieldbioinformatics/artic/intersection_vcf.py
+@@ -36,9 +36,9 @@
  	fn = float(len(truthset - vcfset))
  	tpr = tp / (tp + fn)
  
@@ -448,9 +428,9 @@ Description: Result of
 +	print("Missing: %s" % (truthset - vcfset), file=sys.stderr)
 +	print("Extra:   %s" % (vcfset - truthset), file=sys.stderr)
  
---- a/artic/intersection_vcf_interrogate.py
-+++ b/artic/intersection_vcf_interrogate.py
-@@ -29,14 +29,14 @@ def read_vcf(fn):
+--- fieldbioinformatics.orig/artic/intersection_vcf_interrogate.py
++++ fieldbioinformatics/artic/intersection_vcf_interrogate.py
+@@ -29,14 +29,14 @@
  
  def filter_set(vcfinfo, threshold):
  	vcfset = set()
@@ -467,7 +447,7 @@ Description: Result of
  
  for ln in open(sys.argv[1]):
  	sample, tag, truthset_fn, vcffile_fn = ln.rstrip().split("\t")
-@@ -45,7 +45,7 @@ for ln in open(sys.argv[1]):
+@@ -45,7 +45,7 @@
  	try:
  		vcfinfo = read_vcf(vcffile_fn)
  	except IOError:
@@ -476,7 +456,7 @@ Description: Result of
  		continue
  
          vcf_reader = vcf.Reader(open(vcffile_fn, 'r'))
-@@ -57,11 +57,11 @@ for ln in open(sys.argv[1]):
+@@ -57,11 +57,11 @@
  		else:
  			state = 'Unknown'
  
@@ -490,7 +470,7 @@ Description: Result of
  
  	
  	continue
-@@ -73,7 +73,7 @@ for ln in open(sys.argv[1]):
+@@ -73,7 +73,7 @@
  		fn = float(len(truthset - vcfset))
  		tpr = tp / (tp + fn)
  
@@ -499,9 +479,9 @@ Description: Result of
  
  #for sample in vcfset & truthset:
  #		print vcfinfo[sample]
---- a/artic/intersection_vcf_stats.py
-+++ b/artic/intersection_vcf_stats.py
-@@ -30,12 +30,12 @@ def read_vcf(fn):
+--- fieldbioinformatics.orig/artic/intersection_vcf_stats.py
++++ fieldbioinformatics/artic/intersection_vcf_stats.py
+@@ -30,12 +30,12 @@
  
  def filter_set(vcfinfo, threshold):
  	vcfset = set()
@@ -516,7 +496,7 @@ Description: Result of
  
  
  for ln in open(sys.argv[1]):
-@@ -45,7 +45,7 @@ for ln in open(sys.argv[1]):
+@@ -45,7 +45,7 @@
  	try:
  		vcfinfo = read_vcf(vcffile_fn)
  	except IOError:
@@ -525,7 +505,7 @@ Description: Result of
  		continue
  
  	for threshold in [0.0]:
-@@ -57,7 +57,7 @@ for ln in open(sys.argv[1]):
+@@ -57,7 +57,7 @@
  		fp = float(len(vcfset - truthset))
  		tpr = tp / (tp + fn)
  
@@ -534,17 +514,17 @@ Description: Result of
  #		print "FN: %s" % (truthset - vcfset)
  #		print "FP: %s" % (vcfset - truthset)
  
---- a/artic/lengths.py
-+++ b/artic/lengths.py
+--- fieldbioinformatics.orig/artic/lengths.py
++++ fieldbioinformatics/artic/lengths.py
 @@ -3,4 +3,4 @@
  import sys
  from Bio import SeqIO
  
 -for rec in SeqIO.parse(sys.stdin, "fasta"): print rec.id, len(rec)
 +for rec in SeqIO.parse(sys.stdin, "fasta"): print(rec.id, len(rec))
---- a/artic/make_stats_file.py
-+++ b/artic/make_stats_file.py
-@@ -27,25 +27,25 @@ for row in runs:
+--- fieldbioinformatics.orig/artic/make_stats_file.py
++++ fieldbioinformatics/artic/make_stats_file.py
+@@ -27,25 +27,25 @@
  #		)
  #        for refnum in [1,2,3,4,5]:
          for refnum in [2]:
@@ -576,9 +556,9 @@ Description: Result of
 +		))
  
  
---- a/artic/makecommands.py
-+++ b/artic/makecommands.py
-@@ -26,14 +26,14 @@ for row in runs:
+--- fieldbioinformatics.orig/artic/makecommands.py
++++ fieldbioinformatics/artic/makecommands.py
+@@ -26,14 +26,14 @@
  	for ref in refs:	
  		batch2 = row['batch2'] if row['batch2'] else 'na'
  		if len(sys.argv) > 3 and sys.argv[3] == 'consensus':
@@ -586,7 +566,7 @@ Description: Result of
 +			print("consensus.sh ", end=' ')
  		elif len(sys.argv) > 3:
 -			print sys.argv[3] + " ", 
-+			print(sys.argv[3] + " ", end=' ') 
++			print(sys.argv[3] + " ", end=' ')
  		else:
 -			print "align.sh ",
 +			print("align.sh ", end=' ')
@@ -600,9 +580,9 @@ Description: Result of
 -		   	   (ref, row['batch'], row['batch'], row['batch'], batch2)
 +			print("%s %s %s %s_hq %s hq" % \
 +		   	   (ref, row['batch'], row['batch'], row['batch'], batch2))
---- a/artic/movematchfiles.py
-+++ b/artic/movematchfiles.py
-@@ -23,7 +23,7 @@ for root, dirs, files in os.walk(input_d
+--- fieldbioinformatics.orig/artic/movematchfiles.py
++++ fieldbioinformatics/artic/movematchfiles.py
+@@ -23,7 +23,7 @@
  					os.makedirs(checkdir)
  				movefrom = input_dir + '/' + albacore_root + '/' + name
  				moveto = output_dir + '/' + albacore_root + '/' + name
@@ -611,9 +591,9 @@ Description: Result of
  				shutil.move(movefrom, moveto)
  
  
---- a/artic/moveprocessedfiles.py
-+++ b/artic/moveprocessedfiles.py
-@@ -23,7 +23,7 @@ for root, dirs, files in os.walk(input_d
+--- fieldbioinformatics.orig/artic/moveprocessedfiles.py
++++ fieldbioinformatics/artic/moveprocessedfiles.py
+@@ -23,7 +23,7 @@
  					os.makedirs(checkdir)
  				movefrom = input_dir + '/' + albacore_root + '/' + name
  				moveto = process_dir + '/' + albacore_root + '/' + name
@@ -622,17 +602,17 @@ Description: Result of
  				shutil.move(movefrom, moveto)
  
  
---- a/artic/mungeheaders.py
-+++ b/artic/mungeheaders.py
-@@ -7,4 +7,4 @@ for ln in open(sys.argv[1]):
+--- fieldbioinformatics.orig/artic/mungeheaders.py
++++ fieldbioinformatics/artic/mungeheaders.py
+@@ -7,4 +7,4 @@
  	if '00000000-0000-0000-0000-000000000000' in ln:
  		ln = ln.replace('00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-%012d' % (id))
  		id += 1
 -	print ln,
 +	print(ln, end=' ')
---- a/artic/nanopolish_header.py
-+++ b/artic/nanopolish_header.py
-@@ -5,7 +5,7 @@ import sys
+--- fieldbioinformatics.orig/artic/nanopolish_header.py
++++ fieldbioinformatics/artic/nanopolish_header.py
+@@ -5,7 +5,7 @@
  
  recs = list(SeqIO.parse(open(sys.argv[1], "r"), "fasta"))
  if len (recs) != 1:
@@ -642,8 +622,8 @@ Description: Result of
  
 -print "%s:%d-%d" % (recs[0].id, 1, len(recs[0])+1)
 +print("%s:%d-%d" % (recs[0].id, 1, len(recs[0])+1))
---- a/artic/pdf_tree.py
-+++ b/artic/pdf_tree.py
+--- fieldbioinformatics.orig/artic/pdf_tree.py
++++ fieldbioinformatics/artic/pdf_tree.py
 @@ -1,8 +1,9 @@
  #!/usr/bin/env python
  
@@ -655,7 +635,7 @@ Description: Result of
  sys.setdefaultencoding( 'ISO8859-1' )
  
  from ete3 import Tree, NodeStyle, TreeStyle, CircleFace, TextFace, PhyloTree, faces
-@@ -49,7 +50,7 @@ def read_positions(fn):
+@@ -49,7 +50,7 @@
  	with open(fn) as csvfile:
  		for ln in csvfile:
  			cols = ln.split("\t")
@@ -664,7 +644,7 @@ Description: Result of
  			positions.append(int(cols[0]))
  	return positions
  
-@@ -172,7 +173,7 @@ def main(args):
+@@ -172,7 +173,7 @@
  	#legend
  	if args.legend:
  		legend = {}
@@ -673,9 +653,9 @@ Description: Result of
  			legend[s['prefec']] = s['prefec__colour']
  		for p in sorted(legend.keys()):
  			ts.legend.add_face(CircleFace(4, legend[p]), column=0)
---- a/artic/quality.py
-+++ b/artic/quality.py
-@@ -3,6 +3,6 @@ import sys
+--- fieldbioinformatics.orig/artic/quality.py
++++ fieldbioinformatics/artic/quality.py
+@@ -3,6 +3,6 @@
  import numpy
  
  for record in SeqIO.parse(sys.argv[1], "fastq"):
@@ -683,24 +663,9 @@ Description: Result of
 +	print(numpy.mean(record.letter_annotations["phred_quality"]))
  
  
---- a/artic/rampart.py
-+++ b/artic/rampart.py
-@@ -25,10 +25,10 @@ def run(parser, args):
- 		read_file = "%s.fasta" % (args.sample)
- 
- 	if not os.path.exists(ref):
--		print(colored.red('Scheme reference file not found: ') + ref)
-+		print((colored.red('Scheme reference file not found: ') + ref))
- 		raise SystemExit
- 	if not os.path.exists(bed):
--		print(colored.red('Scheme BED file not found: ') + bed)
-+		print((colored.red('Scheme BED file not found: ') + bed))
- 		raise SystemExit
- 
- 	cmds.append("bwa index %s" % (ref,))
---- a/artic/root.py
-+++ b/artic/root.py
-@@ -9,6 +9,6 @@ root = sys.argv[2]
+--- fieldbioinformatics.orig/artic/root.py
++++ fieldbioinformatics/artic/root.py
+@@ -9,6 +9,6 @@
  t = Tree(tree)
  t.set_outgroup(t & root)
  
@@ -708,9 +673,9 @@ Description: Result of
 +print(t.write())
  
  
---- a/artic/root_and_deheader.py
-+++ b/artic/root_and_deheader.py
-@@ -16,6 +16,6 @@ for leaf in t.iter_leaves():
+--- fieldbioinformatics.orig/artic/root_and_deheader.py
++++ fieldbioinformatics/artic/root_and_deheader.py
+@@ -16,6 +16,6 @@
  	elif cols[1] == 'SLE':
  		leaf.name = cols[0]
  
@@ -718,9 +683,9 @@ Description: Result of
 +print(t.write())
  
  
---- a/artic/runstats.py
-+++ b/artic/runstats.py
-@@ -5,7 +5,7 @@ import sys
+--- fieldbioinformatics.orig/artic/runstats.py
++++ fieldbioinformatics/artic/runstats.py
+@@ -5,7 +5,7 @@
  from tabulate import tabulate
  from pandas import DataFrame
  import collections
@@ -729,7 +694,7 @@ Description: Result of
  from operator import attrgetter
  from copy import copy
  
-@@ -27,11 +27,11 @@ class OrderedDefaultdict(collections.Ord
+@@ -27,11 +27,11 @@
  
      def __reduce__(self):  # optional, for pickle support
          args = (self.default_factory,) if self.default_factory else tuple()
@@ -743,7 +708,7 @@ Description: Result of
  
  def shell(cmd):
  	p = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE)
-@@ -66,17 +66,17 @@ OrderedDefaultdict(list)
+@@ -66,17 +66,17 @@
  #	for barcode in ['NB%02d' % (i,) for i in xrange(1,13)]:
  
  runs = get_runs()
@@ -765,9 +730,9 @@ Description: Result of
 +	print("\t".join([str(s) for s in list(row.values())]))
  
  #print tabulate(table, tablefmt='pipe', headers='keys')
---- a/artic/split-clusters.py
-+++ b/artic/split-clusters.py
-@@ -16,7 +16,7 @@ def main():
+--- fieldbioinformatics.orig/artic/split-clusters.py
++++ fieldbioinformatics/artic/split-clusters.py
+@@ -16,7 +16,7 @@
  	groups = set([c['group'] for c in clusters])
  
  	for group in groups:
@@ -776,9 +741,9 @@ Description: Result of
  		with open('%s-cluster%s' %(sys.argv[1], group), 'w') as fout:
  			SeqIO.write([records[i['node']] for i in clusters if i['group'] == group], fout, 'fasta')
  
---- a/artic/stats.py
-+++ b/artic/stats.py
-@@ -4,7 +4,7 @@ import sys
+--- fieldbioinformatics.orig/artic/stats.py
++++ fieldbioinformatics/artic/stats.py
+@@ -4,7 +4,7 @@
  import shutil
  from collections import defaultdict
  import re
@@ -787,7 +752,7 @@ Description: Result of
  
  lookup = dict([(i['Flowcell'], i) for i in runs.load_runs(sys.argv[2])])
  
-@@ -18,8 +18,8 @@ for root, dirs, files in os.walk(sys.arg
+@@ -18,8 +18,8 @@
  				flowcells[m.group(1)] += 1
  			unique.add(name)
  
@@ -799,9 +764,9 @@ Description: Result of
  	else:
 -		print >>sys.stderr, "No such flowcell %s" % (k,)
 +		print("No such flowcell %s" % (k,), file=sys.stderr)
---- a/artic/tagfastas.py
-+++ b/artic/tagfastas.py
-@@ -7,7 +7,7 @@ import os.path
+--- fieldbioinformatics.orig/artic/tagfastas.py
++++ fieldbioinformatics/artic/tagfastas.py
+@@ -7,7 +7,7 @@
  from Bio import SeqIO
  import json
  import subprocess
@@ -810,7 +775,7 @@ Description: Result of
  
  """ 
  go through the runsamples
-@@ -38,7 +38,7 @@ for sample in runsamples['data']:
+@@ -38,7 +38,7 @@
      run_name = cols[0]
      fn = '%s/%s.vcf' % (run_name, sample['sample_id'])
      if not os.path.exists(fn):
@@ -819,7 +784,7 @@ Description: Result of
          continue
  
      if fn in processed:
-@@ -46,17 +46,17 @@ for sample in runsamples['data']:
+@@ -46,17 +46,17 @@
      processed[fn] = True
  
      cmd = "margin_cons.py refs/Zika_FP.fasta %s/%s.vcf %s/%s.primertrimmed.sorted.bam" % (run_name, sample['sample_id'], run_name, sample['sample_id'])
@@ -840,9 +805,9 @@ Description: Result of
      
      """        
  {u'pregnancy_week': u'', u'municipality': u'murici', u'patient_sex': u'male', u'host_species': u'human', u'lab_internal_sample_id': u'', u'sample_id': u'ZBRD103', u'minion_barcodes': u'', u'ct': u'29.09', u'lab_id_lacen': u'150101004197', u'collection_date': u'2015-08-20', u'amplicon_concentration_pool_1': u'', u'pregnancy_trimester': u'', u'sample_number': u'103', u'symptoms': u'', u'creation_persistent_id': u'9EDCA6E1F234B3A6E160D5E819D8918D', u'state': u'alagoas', u'extraction_date': u'2016-06-13', u'creation_host_timestamp': u'09/08/2016 21:06:44', u'rt_positive': u'1', u'patient_age': u'25', u'modification_account_name': u'Admin', u'modification_persistent_id': u'9EDCA6E1F234B3A6E160D5E819D8918D', u'lab': u'lacen_maceio', u'onset_date': u'2015-08-18', u'microcephaly': u'', u'sample_type': u'', u'creation_account_name': u'Admin', u'modification_host_timestamp': u'', u'country': u'brazil', u'notes': u'', u'pregnant': u''}
---- a/artic/vcffilter.py
-+++ b/artic/vcffilter.py
-@@ -27,10 +27,10 @@ def filter(record):
+--- fieldbioinformatics.orig/artic/vcffilter.py
++++ fieldbioinformatics/artic/vcffilter.py
+@@ -27,10 +27,10 @@
  
  number_vcf = 0
  for record in vcf_reader:
@@ -855,10 +820,10 @@ Description: Result of
 +		print("Filtering %s" % (record), file=sys.stderr)
  	
 -print >>sys.stderr, "Output %s records" % (number_vcf)		
-+print("Output %s records" % (number_vcf), file=sys.stderr)		
---- a/artic/vcffilterqual.py
-+++ b/artic/vcffilterqual.py
-@@ -27,10 +27,10 @@ def filter(record):
++print("Output %s records" % (number_vcf), file=sys.stderr)
+--- fieldbioinformatics.orig/artic/vcffilterqual.py
++++ fieldbioinformatics/artic/vcffilterqual.py
+@@ -27,10 +27,10 @@
  
  number_vcf = 0
  for record in vcf_reader:
@@ -871,9 +836,9 @@ Description: Result of
 +		print("Filtering %s" % (record), file=sys.stderr)
  	
 -print >>sys.stderr, "Output %s records" % (number_vcf)		
-+print("Output %s records" % (number_vcf), file=sys.stderr)		
---- a/artic/zipfast5frombam.py
-+++ b/artic/zipfast5frombam.py
++print("Output %s records" % (number_vcf), file=sys.stderr)
+--- fieldbioinformatics.orig/artic/zipfast5frombam.py
++++ fieldbioinformatics/artic/zipfast5frombam.py
 @@ -3,7 +3,7 @@
  # Written by Nick Loman
  # zipfast5frombam.py bamfile fastafile zipfile
@@ -883,9 +848,9 @@ Description: Result of
  
  import pysam
  import sys
---- a/barcodes/demultiplex.py
-+++ b/barcodes/demultiplex.py
-@@ -28,9 +28,9 @@ def align_seq(seq,args):
+--- fieldbioinformatics.orig/barcodes/demultiplex.py
++++ fieldbioinformatics/barcodes/demultiplex.py
+@@ -28,9 +28,9 @@
          resultdict[match]=dict()
          resultdict[match]["score"]=score
  
@@ -897,7 +862,7 @@ Description: Result of
      #for result in results:
      #    print result
      result = results[0]
-@@ -60,9 +60,9 @@ def nucl_align(sQSeq,sRSeq,query,target)
+@@ -60,9 +60,9 @@
          dEle2Int[ele.lower()] = i
          dInt2Ele[i] = ele
      nEleNum = len(lEle)
@@ -910,7 +875,7 @@ Description: Result of
              if lEle[i] == lEle[j]:
                  lScore[i*nEleNum+j] = 3
              else:
-@@ -175,7 +175,7 @@ def buildPath(q, r, nQryBeg, nRefBeg, lC
+@@ -175,7 +175,7 @@
  
          if c == 'M':
              sQ += q[nQOff : nQOff+n]
@@ -919,7 +884,7 @@ Description: Result of
              sR += r[nROff : nROff+n]
              nQOff += n
              nROff += n
-@@ -263,7 +263,7 @@ def main():
+@@ -263,7 +263,7 @@
          #print sequence
  
          id_,score=align_seq(sequence,args)
@@ -928,7 +893,7 @@ Description: Result of
          if id_ not in resultdict:
              resultdict[id_]=dict()
              resultdict[id_]["counter"]=0
-@@ -274,17 +274,17 @@ def main():
+@@ -274,17 +274,17 @@
          resultdict[id_]["sequences"].append(fasta)
  
      ##print resultdict
@@ -952,9 +917,9 @@ Description: Result of
  
  
  
---- a/docs/conf.py
-+++ b/docs/conf.py
-@@ -52,18 +52,18 @@ source_suffix = ['.rst', '.md']
+--- fieldbioinformatics.orig/docs/conf.py
++++ fieldbioinformatics/docs/conf.py
+@@ -52,18 +52,18 @@
  master_doc = 'index'
  
  # General information about the project.
@@ -978,7 +943,7 @@ Description: Result of
  
  # The language for content autogenerated by Sphinx. Refer to documentation
  # for a list of supported languages.
-@@ -133,8 +133,8 @@ latex_elements = {
+@@ -133,8 +133,8 @@
  # (source start file, target name, title,
  #  author, documentclass [howto, manual, or own class]).
  latex_documents = [
@@ -989,7 +954,7 @@ Description: Result of
  ]
  
  
-@@ -143,7 +143,7 @@ latex_documents = [
+@@ -143,7 +143,7 @@
  # One entry per manual page. List of tuples
  # (source start file, name, description, authors, manual section).
  man_pages = [
@@ -998,7 +963,7 @@ Description: Result of
       [author], 1)
  ]
  
-@@ -154,7 +154,7 @@ man_pages = [
+@@ -154,7 +154,7 @@
  # (source start file, target name, title, author,
  #  dir menu entry, description, category)
  texinfo_documents = [


=====================================
debian/patches/indentation_correction.patch
=====================================
@@ -0,0 +1,166 @@
+Description: Correction of  wrong indentations.
+Inconsistent use of tabs and spaces in indentation is fixed.
+Author: Malihe Asemani <ml.asemani at gmail.com>
+Last-Update: Fri, 10 Apr 2020
+--- fieldbioinformatics.orig/artic/collect_times.py
++++ fieldbioinformatics/artic/collect_times.py
+@@ -8,11 +8,10 @@
+ run=sys.argv[1]
+ 
+ def collect_times(directory):
+-        p = subprocess.Popen(['poretools', 'times', directory],
+-                             stdout=subprocess.PIPE)
+-	stamps = [row['unix_timestamp'] for row in csv.DictReader(p.stdout, dialect='excel-tab')]
++        p = subprocess.Popen(['poretools', 'times', directory], stdout=subprocess.PIPE)
++        stamps = [row['unix_timestamp'] for row in csv.DictReader(p.stdout, dialect='excel-tab')]
+ 
+-	return min(stamps), max(stamps), len(stamps)
++        return min(stamps), max(stamps), len(stamps)
+ 
+ 
+ # mean depth
+--- fieldbioinformatics.orig/artic/copyunprocessedfiles.py
++++ fieldbioinformatics/artic/copyunprocessedfiles.py
+@@ -19,18 +19,17 @@
+ 		basecalled_files.add(name)
+ 
+ for root, dirs, files in os.walk(input_dir, topdown=False):
+-	    for name in files:
+-			if name not in basecalled_files:
+-				albacore_root = root[len(input_dir):]
+-				# move it
+-				checkdir = process_dir + '/' + albacore_root
+-				if not os.path.exists(checkdir):
+-					os.makedirs(checkdir)
+-				movefrom = input_dir + '/' + albacore_root + '/' + name
+-				moveto = process_dir + '/' + albacore_root + '/' + name
+-				print("Copy %s to %s" % (movefrom, moveto))
+-				shutil.copy(movefrom, moveto)
+-
++	for name in files:
++		if name not in basecalled_files:
++			albacore_root = root[len(input_dir):]
++			# move it
++			checkdir = process_dir + '/' + albacore_root
++			if not os.path.exists(checkdir):
++				os.makedirs(checkdir)
++			movefrom = input_dir + '/' + albacore_root + '/' + name
++			moveto = process_dir + '/' + albacore_root + '/' + name
++			print("Copy %s to %s" % (movefrom, moveto))
++			shutil.copy(movefrom, moveto)
+ 
+ 
+ 
+--- fieldbioinformatics.orig/artic/make_stats_file.py
++++ fieldbioinformatics/artic/make_stats_file.py
+@@ -13,8 +13,8 @@
+ def get_runs(dataset):
+ 	if dataset == 'all':
+         	cur.execute("select * from runs where include = 'T'")
+-	else:
+-		cur.execute("select * from runs where runs.dataset = ? and include = 'T'", (dataset,))
++        else:
++                cur.execute("select * from runs where runs.dataset = ? and include = 'T'", (dataset,))
+         return cur.fetchall()
+ 
+ runs = get_runs(sys.argv[2])
+@@ -37,7 +37,7 @@
+ #			'EM_079517_mut30_2.mutations.txt',
+ #			'%s_hq_EM_079517_mut30_2_np_primer.filtered.vcf' % (row['Batch'])
+ #		)
+-    	print("%s\tnp-new-filter075-30\t%s\t%s" % (
++        print("%s\tnp-new-filter075-30\t%s\t%s" % (
+ 			row['Batch'],
+ 			'../refs/EM_079517_mut30_2.mutations.txt',
+ 			'%s_hq_EM_079517_mut30_2_np_primer.filtered075_30.vcf' % (row['Batch'])
+--- fieldbioinformatics.orig/artic/movematchfiles.py
++++ fieldbioinformatics/artic/movematchfiles.py
+@@ -14,17 +14,17 @@
+ 		basecalled_files.add(name)
+ 
+ for root, dirs, files in os.walk(input_dir, topdown=False):
+-	    for name in files:
+-			if name not in basecalled_files and matchpattern in name:
+-				albacore_root = root[len(input_dir):]
+-				# move it
+-				checkdir = output_dir + '/' + albacore_root
+-				if not os.path.exists(checkdir):
+-					os.makedirs(checkdir)
+-				movefrom = input_dir + '/' + albacore_root + '/' + name
+-				moveto = output_dir + '/' + albacore_root + '/' + name
+-				print("Move %s to %s" % (movefrom, moveto))
+-				shutil.move(movefrom, moveto)
++	for name in files:
++		if name not in basecalled_files and matchpattern in name:
++			albacore_root = root[len(input_dir):]
++			# move it
++			checkdir = output_dir + '/' + albacore_root
++			if not os.path.exists(checkdir):
++				os.makedirs(checkdir)
++			movefrom = input_dir + '/' + albacore_root + '/' + name
++			moveto = output_dir + '/' + albacore_root + '/' + name
++			print("Move %s to %s" % (movefrom, moveto))
++			shutil.move(movefrom, moveto)
+ 
+ 
+ 
+--- fieldbioinformatics.orig/artic/moveprocessedfiles.py
++++ fieldbioinformatics/artic/moveprocessedfiles.py
+@@ -14,17 +14,17 @@
+ 		basecalled_files.add(name)
+ 
+ for root, dirs, files in os.walk(input_dir, topdown=False):
+-	    for name in files:
+-			if name in basecalled_files:
+-				albacore_root = root[len(input_dir):]
+-				# move it
+-				checkdir = process_dir + '/' + albacore_root
+-				if not os.path.exists(checkdir):
+-					os.makedirs(checkdir)
+-				movefrom = input_dir + '/' + albacore_root + '/' + name
+-				moveto = process_dir + '/' + albacore_root + '/' + name
+-				print("Move %s to %s" % (movefrom, moveto))
+-				shutil.move(movefrom, moveto)
++	for name in files:
++		if name in basecalled_files:
++			albacore_root = root[len(input_dir):]
++			# move it
++			checkdir = process_dir + '/' + albacore_root
++			if not os.path.exists(checkdir):
++				os.makedirs(checkdir)
++			movefrom = input_dir + '/' + albacore_root + '/' + name
++			moveto = process_dir + '/' + albacore_root + '/' + name
++			print("Move %s to %s" % (movefrom, moveto))
++			shutil.move(movefrom, moveto)
+ 
+ 
+ 
+--- fieldbioinformatics.orig/artic/split-clusters.py
++++ fieldbioinformatics/artic/split-clusters.py
+@@ -10,12 +10,12 @@
+ def main():
+         records = SeqIO.to_dict(SeqIO.parse(open(sys.argv[1]), 'fasta'))
+ 
+-	reader = csv.DictReader(sys.stdin, dialect="excel-tab")
+-	clusters = list(reader)
++        reader = csv.DictReader(sys.stdin, dialect="excel-tab")
++        clusters = list(reader)
+ 
+-	groups = set([c['group'] for c in clusters])
++        groups = set([c['group'] for c in clusters])
+ 
+-	for group in groups:
++        for group in groups:
+ 		print("cluster%s\t%s-cluster%s" % (group, sys.argv[1], group))
+ 		with open('%s-cluster%s' %(sys.argv[1], group), 'w') as fout:
+ 			SeqIO.write([records[i['node']] for i in clusters if i['group'] == group], fout, 'fasta')
+--- fieldbioinformatics.orig/artic/intersection_vcf_interrogate.py
++++ fieldbioinformatics/artic/intersection_vcf_interrogate.py
+@@ -47,7 +47,6 @@
+ 	except IOError:
+ 		print("Cannot open %s" % (vcffile_fn), file=sys.stderr)
+ 		continue
+-
+         vcf_reader = vcf.Reader(open(vcffile_fn, 'r'))
+         for record in vcf_reader:
+                 if record.POS > MAX_COORD: continue


=====================================
debian/patches/series
=====================================
@@ -1 +1,2 @@
 2to3.patch
+indentation_correction.patch



View it on GitLab: https://salsa.debian.org/med-team/fieldbioinformatics/-/compare/7a353aab0c3f51316243173ac412d82a14c76489...15b89ac3579069c83b9ec4c1acbd3119be2e0cb8

-- 
View it on GitLab: https://salsa.debian.org/med-team/fieldbioinformatics/-/compare/7a353aab0c3f51316243173ac412d82a14c76489...15b89ac3579069c83b9ec4c1acbd3119be2e0cb8
You're receiving this email because of your account on salsa.debian.org.


-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/debian-med-commit/attachments/20200410/4776ac78/attachment-0001.html>


More information about the debian-med-commit mailing list