[med-svn] [Git][med-team/centrifuge][master] 6 commits: Use 2to3 to port to Python3

Andreas Tille gitlab at salsa.debian.org
Tue Sep 10 07:38:00 BST 2019



Andreas Tille pushed to branch master at Debian Med / centrifuge


Commits:
7416efb0 by Andreas Tille at 2019-09-10T06:07:17Z
Use 2to3 to port to Python3

- - - - -
a8203d6d by Andreas Tille at 2019-09-10T06:07:54Z
Fix Depends

- - - - -
b96b8f0f by Andreas Tille at 2019-09-10T06:08:16Z
debhelper-compat 12

- - - - -
3f21054b by Andreas Tille at 2019-09-10T06:08:19Z
Standards-Version: 4.4.0

- - - - -
199e8685 by Andreas Tille at 2019-09-10T06:28:45Z
use dh-python

- - - - -
c8c350ba by Andreas Tille at 2019-09-10T06:31:09Z
Upload to unstable

- - - - -


6 changed files:

- debian/changelog
- − debian/compat
- debian/control
- + debian/patches/2to3.patch
- debian/patches/series
- debian/rules


Changes:

=====================================
debian/changelog
=====================================
@@ -1,3 +1,13 @@
+centrifuge (1.0.3-3) unstable; urgency=medium
+
+  * Use 2to3 to port to Python3
+    Closes: #936281
+  * debhelper-compat 12
+  * Standards-Version: 4.4.0
+  * use dh-python
+
+ -- Andreas Tille <tille at debian.org>  Tue, 10 Sep 2019 08:20:22 +0200
+
 centrifuge (1.0.3-2) unstable; urgency=medium
 
   [ Steffen Moeller ]


=====================================
debian/compat deleted
=====================================
@@ -1 +0,0 @@
-11


=====================================
debian/control
=====================================
@@ -3,10 +3,12 @@ Maintainer: Debian Med Packaging Team <debian-med-packaging at lists.alioth.debian.
 Uploaders: Andreas Tille <tille at debian.org>
 Section: science
 Priority: optional
-Build-Depends: debhelper (>= 11~),
+Build-Depends: debhelper-compat (= 12),
+               dh-python,
+               dh-sequence-python3,
                hisat2,
                jellyfish
-Standards-Version: 4.3.0
+Standards-Version: 4.4.0
 Vcs-Browser: https://salsa.debian.org/med-team/centrifuge
 Vcs-Git: https://salsa.debian.org/med-team/centrifuge.git
 Homepage: https://ccb.jhu.edu/software/centrifuge/
@@ -15,7 +17,8 @@ Package: centrifuge
 Architecture: any
 Depends: ${shlibs:Depends},
          ${misc:Depends},
-         python,
+         ${python3:Depends},
+         python3,
          hisat2,
          jellyfish
 Description: rapid and memory-efficient system for classification of DNA sequences


=====================================
debian/patches/2to3.patch
=====================================
@@ -0,0 +1,442 @@
+Description: Use 2to3 to port to Python3
+Bug-Debian: https://bugs.debian.org/936281
+Author: Andreas Tille <tille at debian.org>
+Last-Update: Tue, 10 Sep 2019 08:02:24 +0200
+
+--- a/Makefile
++++ b/Makefile
+@@ -364,11 +364,11 @@ centrifuge.bat:
+ 
+ centrifuge-build.bat:
+ 	echo "@echo off" > centrifuge-build.bat
+-	echo "python %~dp0/centrifuge-build %*" >> centrifuge-build.bat
++	echo "python3 %~dp0/centrifuge-build %*" >> centrifuge-build.bat
+ 
+ centrifuge-inspect.bat:
+ 	echo "@echo off" > centrifuge-inspect.bat
+-	echo "python %~dp0/centrifuge-inspect %*" >> centrifuge-inspect.bat
++	echo "python3 %~dp0/centrifuge-inspect %*" >> centrifuge-inspect.bat
+ 
+ 
+ .PHONY: centrifuge-src
+--- a/centrifuge-build
++++ b/centrifuge-build
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/python3
+ 
+ """
+  Copyright 2014, Daehwan Kim <infphilo at gmail.com>
+--- a/centrifuge-inspect
++++ b/centrifuge-inspect
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/python3
+ 
+ """
+  Copyright 2014, Daehwan Kim <infphilo at gmail.com>
+--- a/evaluation/centrifuge_evaluate.py
++++ b/evaluation/centrifuge_evaluate.py
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/python3
+ 
+ import sys, os, subprocess, inspect
+ import platform, multiprocessing
+@@ -25,7 +25,7 @@ def read_taxonomy_tree(tax_file):
+ """
+ def compare_scm(centrifuge_out, true_out, taxonomy_tree, rank):
+     ancestors = set()
+-    for tax_id in taxonomy_tree.keys():
++    for tax_id in list(taxonomy_tree.keys()):
+         if tax_id in ancestors:
+             continue
+         while True:
+@@ -106,7 +106,7 @@ def compare_scm(centrifuge_out, true_out
+             unclassified += 1
+ 
+     raw_unique_classified = 0
+-    for value in db_dic.values():
++    for value in list(db_dic.values()):
+         if len(value) == 1:
+             raw_unique_classified += 1
+     return classified, unique_classified, unclassified, len(db_dic), raw_unique_classified
+@@ -152,7 +152,7 @@ def compare_abundance(centrifuge_out, tr
+         if tax_id in db_dic:
+             SSR += (abundance - db_dic[tax_id]) ** 2;
+             if debug:
+-                print >> sys.stderr, "\t\t\t\t{:<10}: {:.6} vs. {:.6} (truth vs. centrifuge)".format(tax_id, abundance, db_dic[tax_id])
++                print("\t\t\t\t{:<10}: {:.6} vs. {:.6} (truth vs. centrifuge)".format(tax_id, abundance, db_dic[tax_id]), file=sys.stderr)
+         else:
+             SSR += (abundance) ** 2
+ 
+@@ -179,7 +179,7 @@ def sql_execute(sql_db, sql_query):
+ """
+ def create_sql_db(sql_db):
+     if os.path.exists(sql_db):
+-        print >> sys.stderr, sql_db, "already exists!"
++        print(sql_db, "already exists!", file=sys.stderr)
+         return
+     
+     columns = [
+@@ -316,7 +316,7 @@ def evaluate(index_base,
+         os.mkdir(index_path)
+     index_fnames = ["%s/%s.%d.cf" % (index_path, index_base, i+1) for i in range(3)]
+     if not check_files(index_fnames):
+-        print >> sys.stderr, "Downloading indexes: %s" % ("index")
++        print("Downloading indexes: %s" % ("index"), file=sys.stderr)
+         os.system("cd %s; wget ftp://ftp.ccb.jhu.edu/pub/infphilo/centrifuge/data/%s.tar.gz; tar xvzf %s.tar.gz; rm %s.tar.gz; ln -s %s/%s* .; cd -" % \
+                       (index_path, index_base, index_base, index_base, index_base, index_base))
+         assert check_files(index_fnames)        
+@@ -356,7 +356,7 @@ def evaluate(index_base,
+     scm_fname = "%s/%s.scm" % (read_path, read_base)
+     read_fnames = [read1_fname, read2_fname, truth_fname, scm_fname]
+     if not check_files(read_fnames):
+-        print >> sys.stderr, "Simulating reads %s_1.fq %s_2.fq ..." % (read_base, read_base)
++        print("Simulating reads %s_1.fq %s_2.fq ..." % (read_base, read_base), file=sys.stderr)
+         centrifuge_simulate = os.path.join(path_base, "centrifuge_simulate_reads.py")
+         simulate_cmd = [centrifuge_simulate,
+                         "--num-fragment", str(num_fragment)]
+@@ -377,11 +377,11 @@ def evaluate(index_base,
+     else:
+         base_fname = read_base + "_single"
+ 
+-    print >> sys.stderr, "Database: %s" % (index_base)
++    print("Database: %s" % (index_base), file=sys.stderr)
+     if paired:
+-        print >> sys.stderr, "\t%d million pairs" % (num_fragment / 1000000)
++        print("\t%d million pairs" % (num_fragment / 1000000), file=sys.stderr)
+     else:
+-        print >> sys.stderr, "\t%d million reads" % (num_fragment / 1000000)
++        print("\t%d million reads" % (num_fragment / 1000000), file=sys.stderr)
+ 
+     program_bin_base = "%s/.." % path_base
+     def get_program_version(program, version):
+@@ -428,7 +428,7 @@ def evaluate(index_base,
+         if version:
+             program_name += ("_%s" % version)
+ 
+-        print >> sys.stderr, "\t%s\t%s" % (program_name, str(datetime.now()))
++        print("\t%s\t%s" % (program_name, str(datetime.now())), file=sys.stderr)
+         if paired:
+             program_dir = program_name + "_paired"
+         else:
+@@ -449,7 +449,7 @@ def evaluate(index_base,
+         program_cmd = get_program_cmd(program, version, read1_fname, read2_fname, out_fname)
+         start_time = datetime.now()
+         if verbose:
+-            print >> sys.stderr, "\t", start_time, " ".join(program_cmd)
++            print("\t", start_time, " ".join(program_cmd), file=sys.stderr)
+         if program in ["centrifuge"]:
+             proc = subprocess.Popen(program_cmd, stdout=open(out_fname, "w"), stderr=subprocess.PIPE)
+         else:
+@@ -462,7 +462,7 @@ def evaluate(index_base,
+         if duration < 0.1:
+             duration = 0.1
+         if verbose:
+-            print >> sys.stderr, "\t", finish_time, "finished:", duration            
++            print("\t", finish_time, "finished:", duration, file=sys.stderr)            
+ 
+         results = {"strain"  : [0, 0, 0],
+                    "species" : [0, 0, 0],
+@@ -484,21 +484,21 @@ def evaluate(index_base,
+             # if rank == "strain":
+             #    assert num_cases == num_fragment
+ 
+-            print >> sys.stderr, "\t\t%s" % rank
+-            print >> sys.stderr, "\t\t\tsensitivity: {:,} / {:,} ({:.2%})".format(classified, num_cases, float(classified) / num_cases)
+-            print >> sys.stderr, "\t\t\tprecision  : {:,} / {:,} ({:.2%})".format(classified, raw_classified, float(classified) / raw_classified)
+-            print >> sys.stderr, "\n\t\t\tfor uniquely classified ",
++            print("\t\t%s" % rank, file=sys.stderr)
++            print("\t\t\tsensitivity: {:,} / {:,} ({:.2%})".format(classified, num_cases, float(classified) / num_cases), file=sys.stderr)
++            print("\t\t\tprecision  : {:,} / {:,} ({:.2%})".format(classified, raw_classified, float(classified) / raw_classified), file=sys.stderr)
++            print("\n\t\t\tfor uniquely classified ", end=' ', file=sys.stderr)
+             if paired:
+-                print >> sys.stderr, "pairs"
++                print("pairs", file=sys.stderr)
+             else:
+-                print >> sys.stderr, "reads"
+-            print >> sys.stderr, "\t\t\t\t\tsensitivity: {:,} / {:,} ({:.2%})".format(unique_classified, num_cases, float(unique_classified) / num_cases)
+-            print >> sys.stderr, "\t\t\t\t\tprecision  : {:,} / {:,} ({:.2%})".format(unique_classified, raw_unique_classified, float(unique_classified) / raw_unique_classified)
++                print("reads", file=sys.stderr)
++            print("\t\t\t\t\tsensitivity: {:,} / {:,} ({:.2%})".format(unique_classified, num_cases, float(unique_classified) / num_cases), file=sys.stderr)
++            print("\t\t\t\t\tprecision  : {:,} / {:,} ({:.2%})".format(unique_classified, raw_unique_classified, float(unique_classified) / raw_unique_classified), file=sys.stderr)
+ 
+             # Calculate sum of squared residuals in abundance
+             if rank == "strain":
+                 abundance_SSR = compare_abundance("centrifuge_report.tsv", truth_fname, taxonomy_tree, debug)
+-                print >> sys.stderr, "\t\t\tsum of squared residuals in abundance: {}".format(abundance_SSR)
++                print("\t\t\tsum of squared residuals in abundance: {}".format(abundance_SSR), file=sys.stderr)
+ 
+         if runtime_only:
+             os.chdir("..")
+--- a/evaluation/centrifuge_simulate_reads.py
++++ b/evaluation/centrifuge_simulate_reads.py
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/python3
+ 
+ #
+ # Copyright 2015, Daehwan Kim <infphilo at gmail.com>
+@@ -156,7 +156,7 @@ def read_transcript(genomes_seq, gtf_fil
+             transcripts[transcript_id][2].append([left, right])
+ 
+     # Sort exons and merge where separating introns are <=5 bps
+-    for tran, [chr, strand, exons] in transcripts.items():
++    for tran, [chr, strand, exons] in list(transcripts.items()):
+             exons.sort()
+             tmp_exons = [exons[0]]
+             for i in range(1, len(exons)):
+@@ -167,7 +167,7 @@ def read_transcript(genomes_seq, gtf_fil
+             transcripts[tran] = [chr, strand, tmp_exons]
+ 
+     tmp_transcripts = {}
+-    for tran, [chr, strand, exons] in transcripts.items():
++    for tran, [chr, strand, exons] in list(transcripts.items()):
+         exon_lens = [e[1] - e[0] + 1 for e in exons]
+         transcript_len = sum(exon_lens)
+         if transcript_len >= frag_len:
+@@ -444,8 +444,8 @@ def getSamAlignment(dna, exons, genome_s
+         MD += ("{}".format(MD_match_len))
+ 
+     if len(read_seq) != read_len:
+-        print >> sys.stderr, "read length differs:", len(read_seq), "vs.", read_len
+-        print >> sys.stderr, pos, "".join(cigars), cigar_descs, MD, XM, NM, Zs
++        print("read length differs:", len(read_seq), "vs.", read_len, file=sys.stderr)
++        print(pos, "".join(cigars), cigar_descs, MD, XM, NM, Zs, file=sys.stderr)
+         assert False
+ 
+     return pos, cigars, cigar_descs, MD, XM, NM, Zs, read_seq
+@@ -575,8 +575,8 @@ def samRepOk(genome_seq, read_seq, chr,
+         tMD += ("{}".format(match_len))
+ 
+     if tMD != MD or tXM != XM or tNM != NM or XM > max_mismatch or XM != NM:
+-        print >> sys.stderr, chr, pos, cigar, MD, XM, NM, Zs
+-        print >> sys.stderr, tMD, tXM, tNM
++        print(chr, pos, cigar, MD, XM, NM, Zs, file=sys.stderr)
++        print(tMD, tXM, tNM, file=sys.stderr)
+         assert False
+         
+         
+@@ -631,7 +631,7 @@ def simulate_reads(index_fname, base_fna
+     # Read genome sequences into memory
+     genomes_fname = index_fname + ".fa"
+     if not os.path.exists(genomes_fname):
+-        print >> sys.stderr, "Extracting genomes from Centrifuge index to %s, which may take a few hours ..."  % (genomes_fname)
++        print("Extracting genomes from Centrifuge index to %s, which may take a few hours ..."  % (genomes_fname), file=sys.stderr)
+         extract_cmd = [centrifuge_inspect,
+                        index_fname]
+         extract_proc = subprocess.Popen(extract_cmd, stdout=open(genomes_fname, 'w'))
+@@ -660,15 +660,15 @@ def simulate_reads(index_fname, base_fna
+     assert num_frag == sum(expr_profile)
+ 
+     if dna:
+-        genome_ids = genome_seqs.keys()
++        genome_ids = list(genome_seqs.keys())
+     else:
+-        transcript_ids = transcripts.keys()
++        transcript_ids = list(transcripts.keys())
+         random.shuffle(transcript_ids)
+         assert len(transcript_ids) >= len(expr_profile)
+ 
+     # Truth table
+     truth_file = open(base_fname + ".truth", "w")
+-    print >> truth_file, "taxID\tgenomeLen\tnumReads\tabundance\tname"
++    print("taxID\tgenomeLen\tnumReads\tabundance\tname", file=truth_file)
+     truth_list = []
+     normalized_sum = 0.0
+     debug_num_frag = 0
+@@ -695,19 +695,19 @@ def simulate_reads(index_fname, base_fna
+         if can_tax_id in names:
+             name = names[can_tax_id]
+         abundance = raw_abundance / genome_len / normalized_sum
+-        print >> truth_file, "{}\t{}\t{}\t{:.6}\t{}".format(tax_id, genome_len, t_num_frags, abundance, name)
++        print("{}\t{}\t{}\t{:.6}\t{}".format(tax_id, genome_len, t_num_frags, abundance, name), file=truth_file)
+     truth_file.close()
+ 
+     # Sequence Classification Map (SCM) - something I made up ;-)
+     scm_file = open(base_fname + ".scm", "w")
+ 
+     # Write SCM header
+-    print >> scm_file, "@HD\tVN:1.0\tSO:unsorted"
+-    for tax_id in genome_seqs.keys():
++    print("@HD\tVN:1.0\tSO:unsorted", file=scm_file)
++    for tax_id in list(genome_seqs.keys()):
+         name = ""
+         if tax_id in names:
+             name = names[tax_id]
+-        print >> scm_file, "@SQ\tTID:%s\tSN:%s\tLN:%d" % (tax_id, name, len(genome_seqs[tax_id]))
++        print("@SQ\tTID:%s\tSN:%s\tLN:%d" % (tax_id, name, len(genome_seqs[tax_id])), file=scm_file)
+ 
+     read_file = open(base_fname + "_1.fa", "w")
+     if paired_end:
+@@ -718,11 +718,11 @@ def simulate_reads(index_fname, base_fna
+         t_num_frags = expr_profile[t]
+         if dna:
+             tax_id = genome_ids[t]
+-            print >> sys.stderr, "TaxID: %s, num fragments: %d" % (tax_id, t_num_frags)
++            print("TaxID: %s, num fragments: %d" % (tax_id, t_num_frags), file=sys.stderr)
+         else:
+             transcript_id = transcript_ids[t]
+             chr, strand, transcript_len, exons = transcripts[transcript_id]
+-            print >> sys.stderr, transcript_id, t_num_frags
++            print(transcript_id, t_num_frags, file=sys.stderr)
+ 
+         genome_seq = genome_seqs[tax_id]
+         genome_len = len(genome_seq)
+@@ -763,14 +763,14 @@ def simulate_reads(index_fname, base_fna
+                 XS = "\tXS:A:{}".format(strand)
+                 TI = "\tTI:Z:{}".format(transcript_id)                
+ 
+-            print >> read_file, ">{}".format(cur_read_id)
+-            print >> read_file, read_seq
++            print(">{}".format(cur_read_id), file=read_file)
++            print(read_seq, file=read_file)
+             output = "{}\t{}\t{}\t{}\tNM:i:{}\tMD:Z:{}".format(cur_read_id, tax_id, pos + 1, cigar_str, NM, MD)
+             if paired_end:
+-                print >> read2_file, ">{}".format(cur_read_id)
+-                print >> read2_file, reverse_complement(read2_seq)
++                print(">{}".format(cur_read_id), file=read2_file)
++                print(reverse_complement(read2_seq), file=read2_file)
+                 output += "\t{}\t{}\tNM2:i:{}\tMD2:Z:{}".format(pos2 + 1, cigar2_str, NM2, MD2)
+-            print >> scm_file, output
++            print(output, file=scm_file)
+                 
+             cur_read_id += 1
+             
+@@ -865,7 +865,7 @@ if __name__ == '__main__':
+         parser.print_help()
+         exit(1)
+     if not args.dna:
+-        print >> sys.stderr, "Error: --rna is not implemented."
++        print("Error: --rna is not implemented.", file=sys.stderr)
+         exit(1)
+     # if args.dna:
+     #    args.expr_profile = "constant"
+--- a/evaluation/test/centrifuge_evaluate_mason.py
++++ b/evaluation/test/centrifuge_evaluate_mason.py
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/python3
+ 
+ import sys, os, subprocess, inspect
+ import platform, multiprocessing
+@@ -27,7 +27,7 @@ def compare_scm(centrifuge_out, true_out
+     higher_ranked = {}
+         
+     ancestors = set()
+-    for tax_id in taxonomy_tree.keys():
++    for tax_id in list(taxonomy_tree.keys()):
+         if tax_id in ancestors:
+             continue
+         while True:
+@@ -82,7 +82,7 @@ def compare_scm(centrifuge_out, true_out
+ 
+         fields = line.strip().split('\t')
+         if len(fields) != 3:
+-            print >> sys.stderr, "Warning: %s missing" % (line.strip())
++            print("Warning: %s missing" % (line.strip()), file=sys.stderr)
+             continue
+         read_name, tax_id = fields[1:3] 
+         # Traverse up taxonomy tree to match the given rank parameter
+@@ -117,7 +117,7 @@ def compare_scm(centrifuge_out, true_out
+             # print read_name
+ 
+     raw_unique_classified = 0
+-    for read_name, maps in db_dic.items():
++    for read_name, maps in list(db_dic.items()):
+         if len(maps) == 1 and read_name not in higher_ranked:
+             raw_unique_classified += 1
+     return classified, unique_classified, unclassified, len(db_dic), raw_unique_classified
+@@ -184,7 +184,7 @@ def evaluate(index_base,
+                       read_fname]
+ 
+     if verbose:
+-        print >> sys.stderr, ' '.join(centrifuge_cmd)
++        print(' '.join(centrifuge_cmd), file=sys.stderr)
+ 
+     out_fname = "centrifuge.output"
+     proc = subprocess.Popen(centrifuge_cmd, stdout=open(out_fname, "w"), stderr=subprocess.PIPE)
+@@ -208,12 +208,12 @@ def evaluate(index_base,
+         # if rank == "strain":
+         #    assert num_cases == num_fragment
+ 
+-        print >> sys.stderr, "\t\t%s" % rank
+-        print >> sys.stderr, "\t\t\tsensitivity: {:,} / {:,} ({:.2%})".format(classified, num_cases, float(classified) / num_cases)
+-        print >> sys.stderr, "\t\t\tprecision  : {:,} / {:,} ({:.2%})".format(classified, raw_classified, float(classified) / raw_classified)
+-        print >> sys.stderr, "\n\t\t\tfor uniquely classified "
+-        print >> sys.stderr, "\t\t\t\t\tsensitivity: {:,} / {:,} ({:.2%})".format(unique_classified, num_cases, float(unique_classified) / num_cases)
+-        print >> sys.stderr, "\t\t\t\t\tprecision  : {:,} / {:,} ({:.2%})".format(unique_classified, raw_unique_classified, float(unique_classified) / raw_unique_classified)
++        print("\t\t%s" % rank, file=sys.stderr)
++        print("\t\t\tsensitivity: {:,} / {:,} ({:.2%})".format(classified, num_cases, float(classified) / num_cases), file=sys.stderr)
++        print("\t\t\tprecision  : {:,} / {:,} ({:.2%})".format(classified, raw_classified, float(classified) / raw_classified), file=sys.stderr)
++        print("\n\t\t\tfor uniquely classified ", file=sys.stderr)
++        print("\t\t\t\t\tsensitivity: {:,} / {:,} ({:.2%})".format(unique_classified, num_cases, float(unique_classified) / num_cases), file=sys.stderr)
++        print("\t\t\t\t\tprecision  : {:,} / {:,} ({:.2%})".format(unique_classified, raw_unique_classified, float(unique_classified) / raw_unique_classified), file=sys.stderr)
+ 
+         # Calculate sum of squared residuals in abundance
+         """
+@@ -252,12 +252,12 @@ def evaluate(index_base,
+         if rank_taxID not in true_abundance:
+             true_abundance[rank_taxID] = 0.0
+         true_abundance[rank_taxID] += (reads / float(genomeSize))
+-    for taxID, reads in true_abundance.items():
++    for taxID, reads in list(true_abundance.items()):
+         true_abundance[taxID] /= total_sum
+ 
+-    print >> sys.stderr, "number of genomes:", num_genomes
+-    print >> sys.stderr, "number of species:", num_species
+-    print >> sys.stderr, "number of uniq species:", len(true_abundance)
++    print("number of genomes:", num_genomes, file=sys.stderr)
++    print("number of species:", num_species, file=sys.stderr)
++    print("number of uniq species:", len(true_abundance), file=sys.stderr)
+ 
+     read_fname = "centrifuge_data/bacteria_sim10M/bacteria_sim10M.fa"
+     summary_fname = "centrifuge.summary"
+@@ -271,14 +271,14 @@ def evaluate(index_base,
+                       read_fname]
+ 
+     if verbose:
+-        print >> sys.stderr, ' '.join(centrifuge_cmd)
++        print(' '.join(centrifuge_cmd), file=sys.stderr)
+ 
+     out_fname = "centrifuge.output"
+     proc = subprocess.Popen(centrifuge_cmd, stdout=open(out_fname, "w"), stderr=subprocess.PIPE)
+     proc.communicate()
+ 
+     calc_abundance = {}
+-    for taxID in true_abundance.keys():
++    for taxID in list(true_abundance.keys()):
+         calc_abundance[taxID] = 0.0
+     first = True
+     for line in open(summary_fname):
+@@ -296,12 +296,12 @@ def evaluate(index_base,
+         """
+ 
+     abundance_file = open("abundance.cmp", 'w')
+-    print >> abundance_file, "taxID\ttrue\tcalc\trank"
++    print("taxID\ttrue\tcalc\trank", file=abundance_file)
+     for rank in ranks:
+         if rank == "strain":
+             continue
+         true_abundance_rank, calc_abundance_rank = {}, {}
+-        for taxID in true_abundance.keys():
++        for taxID in list(true_abundance.keys()):
+             assert taxID in calc_abundance
+             rank_taxID = taxID
+             while True:
+@@ -322,11 +322,11 @@ def evaluate(index_base,
+             calc_abundance_rank[rank_taxID] += calc_abundance[taxID]
+ 
+         ssr = 0.0 # Sum of Squared Residuals
+-        for taxID in true_abundance_rank.keys():
++        for taxID in list(true_abundance_rank.keys()):
+             assert taxID in calc_abundance_rank
+             ssr += (true_abundance_rank[taxID] - calc_abundance_rank[taxID]) ** 2
+-            print >> abundance_file, "%s\t%.6f\t%.6f\t%s" % (taxID, true_abundance_rank[taxID], calc_abundance_rank[taxID], rank)
+-        print >> sys.stderr, "%s) Sum of squared residuals: %.6f" % (rank, ssr)
++            print("%s\t%.6f\t%.6f\t%s" % (taxID, true_abundance_rank[taxID], calc_abundance_rank[taxID], rank), file=abundance_file)
++        print("%s) Sum of squared residuals: %.6f" % (rank, ssr), file=sys.stderr)
+     abundance_file.close()
+ 
+ 


=====================================
debian/patches/series
=====================================
@@ -3,3 +3,4 @@ fix_auto_ptr_usage_in_gcc-7.patch
 0003-Fix-make-install-DESTDIR.patch
 hardening.patch
 no_msse2.patch
+2to3.patch


=====================================
debian/rules
=====================================
@@ -9,7 +9,7 @@ include /usr/share/dpkg/default.mk
 export DEB_BUILD_MAINT_OPTIONS = hardening=+all
 export POPCNT_CAPABILITY=0
 %:
-	dh $@
+	dh $@ --with python3
 
 override_dh_auto_install:
 	dh_auto_install -- prefix=/usr/lib/centrifuge



View it on GitLab: https://salsa.debian.org/med-team/centrifuge/compare/3d1220dfe7fe555221b2d4d13c43eedd0ca488c2...c8c350baaea33549d4b570992524275b688a4285

-- 
View it on GitLab: https://salsa.debian.org/med-team/centrifuge/compare/3d1220dfe7fe555221b2d4d13c43eedd0ca488c2...c8c350baaea33549d4b570992524275b688a4285
You're receiving this email because of your account on salsa.debian.org.


-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/debian-med-commit/attachments/20190910/01645c28/attachment-0001.html>


More information about the debian-med-commit mailing list