[med-svn] [microbegps] 08/13: use 2to3 to port to Python3 - seems to be successful

Andreas Tille tille at debian.org
Mon Sep 21 18:37:13 UTC 2015


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository microbegps.

commit 911129f1ebd26dd5ab806dd4d27f4ee1877b436c
Author: Andreas Tille <tille at debian.org>
Date:   Thu May 7 08:08:36 2015 +0000

    use 2to3 to port to Python3 - seems to be successful
---
 debian/control            |  12 +-
 debian/patches/2to3.patch | 558 ++++++++++++++++++++++++++++++++++++++++++++++
 debian/patches/series     |   1 +
 debian/rules              |   2 +-
 4 files changed, 566 insertions(+), 7 deletions(-)

diff --git a/debian/control b/debian/control
index 20c7f14..d389447 100644
--- a/debian/control
+++ b/debian/control
@@ -5,9 +5,9 @@ Maintainer: Debian Med Packaging Team <debian-med-packaging at lists.alioth.debian.
 Uploaders: Andreas Tille <tille at debian.org>
 Build-Depends: debhelper (>= 9),
                dh-python,
-               python,
-               python-setuptools,
-               python-qt4
+               python3,
+               python3-setuptools,
+               python3-pyqt4
 Standards-Version: 3.9.6
 Vcs-Browser: http://anonscm.debian.org/viewvc/debian-med/trunk/packages/microbegps/trunk/
 Vcs-Svn: svn://anonscm.debian.org/debian-med/trunk/packages/microbegps/trunk/
@@ -16,9 +16,9 @@ Homepage: http://sourceforge.net/projects/microbegps/
 Package: microbegps
 Architecture: any
 Depends: ${misc:Depends},
-         ${python:Depends},
-         python-matplotlib-venn,
-         python-qt4
+         ${python3:Depends},
+         python3-matplotlib-venn,
+         python3-pyqt4
 Description: Explorative Taxonomic Profiling Tool for Metagenomic Data
  MicrobeGPS is a bioinformatics tool for the analysis of metagenomic
  sequencing data. The goal is to profile the composition of metagenomic
diff --git a/debian/patches/2to3.patch b/debian/patches/2to3.patch
new file mode 100644
index 0000000..2b3e8b1
--- /dev/null
+++ b/debian/patches/2to3.patch
@@ -0,0 +1,558 @@
+Author: Andreas Tille <tille at debian.org>
+Last-Update: Wed, 06 May 2015 15:53:45 +0200
+Description: Patch created by 2to3 to run with Python3
+
+--- a/microbegps/gps.py
++++ b/microbegps/gps.py
+@@ -5,11 +5,11 @@ Analysis tools used by MicrobeGPS
+ 
+ import glob
+ import numpy as np
+-import valcov
++from . import valcov
+ import re
+ import os
+ import gzip
+-import taxonomy
++from . import taxonomy
+ 
+ class MappedRead:
+ 	def __init__(self):
+@@ -203,7 +203,7 @@ def filter_raw(target_table,read_table,m
+ 			if lrt > max_matches:
+ 				del_reads.append(mp)
+ 			if lrt == 1:
+-				target_table[iter(read_table[mp].matches).next()].unique += 1
++				target_table[next(iter(read_table[mp].matches))].unique += 1
+ 				unique_reads += 1
+ 	for d in del_reads:
+ 		for gn in read_table[d].matches:
+@@ -291,15 +291,15 @@ def get_reference_table(target_table, re
+ 
+ 	# calculate simple metrics over references
+ 	for r in ref_table:
+-		lengths = [x.length for x in ref_table[r].targets.itervalues()]
+-		homog = [x.cov_homog for x in ref_table[r].targets.itervalues()]
+-		qual = [x.map_qual for x in ref_table[r].targets.itervalues()]
+-		unique = sum([x.unique for x in ref_table[r].targets.itervalues()])
++		lengths = [x.length for x in ref_table[r].targets.values()]
++		homog = [x.cov_homog for x in ref_table[r].targets.values()]
++		qual = [x.map_qual for x in ref_table[r].targets.values()]
++		unique = sum([x.unique for x in ref_table[r].targets.values()])
+ 		t_len = float(sum(lengths))
+ 		
+ 		readnames = set()
+-		for t in ref_table[r].targets.itervalues():
+-			readnames.update(t.reads.iterkeys())
++		for t in ref_table[r].targets.values():
++			readnames.update(iter(t.reads.keys()))
+ 		reads = len(readnames)
+ 		
+ 		t_homog = 0.
+@@ -353,15 +353,15 @@ def get_reference_table_NCBI(target_tabl
+ 
+ 	# calculate simple metrics over references
+ 	for r in ref_table:
+-		lengths = [x.length for x in ref_table[r].targets.itervalues()]
+-		homog = [x.cov_homog for x in ref_table[r].targets.itervalues()]
+-		qual = [x.map_qual for x in ref_table[r].targets.itervalues()]
+-		unique = sum([x.unique for x in ref_table[r].targets.itervalues()])
++		lengths = [x.length for x in ref_table[r].targets.values()]
++		homog = [x.cov_homog for x in ref_table[r].targets.values()]
++		qual = [x.map_qual for x in ref_table[r].targets.values()]
++		unique = sum([x.unique for x in ref_table[r].targets.values()])
+ 		t_len = float(sum(lengths))
+ 		
+ 		readnames = set()
+-		for t in ref_table[r].targets.itervalues():
+-			readnames.update(t.reads.iterkeys())
++		for t in ref_table[r].targets.values():
++			readnames.update(iter(t.reads.keys()))
+ 		reads = len(readnames)
+ 		
+ 		t_homog = 0.
+@@ -402,9 +402,9 @@ def filter_ref_table(ref_table,read_tabl
+ 	pr('--- Updating Unique Reads information.')
+ 	for rf in ref_table:
+ 		ref_table[rf].unique = 0
+-	for rd in read_table.itervalues():
++	for rd in read_table.values():
+ 		if len(rd.matches) == 1:
+-			ref_table[iter(rd.matches).next()].unique += 1
++			ref_table[next(iter(rd.matches))].unique += 1
+ 				
+ 
+ 
+@@ -491,7 +491,7 @@ def create_groups(usrmat, thr=0.1, statu
+ 		shared = {gr:np.sum(np.logical_and(groups[gr].usrreads,usrmat[:,i])) for gr in groups}
+ 		sig = {gr:shared[gr] for gr in shared if shared[gr]>mapped*thr}
+ 		N = len(sig)
+-		print "Current reference:",i," Number of groups:",N
++		print("Current reference:",i," Number of groups:",N)
+ 		if N == 0:
+ 			# no significant similarity to existing group --> create new group
+ 			gr = Group()
+@@ -501,16 +501,16 @@ def create_groups(usrmat, thr=0.1, statu
+ 			groups[grID] = gr
+ 		elif N == 1:
+ 			# significant similarity with exactly 1 group --> assign to that group
+-			grID = sig.keys()[0]
++			grID = list(sig.keys())[0]
+ 			groups[grID].members.append(i)
+ 			groups[grID].usrreads = np.logical_or(groups[grID].usrreads,usrmat[:,i])
+ 		else:
+ 			# significant similarity with > 1 groups: assign to first group
+-			grID = sig.keys()[0]
++			grID = list(sig.keys())[0]
+ 			groups[grID].members.append(i)
+ 			groups[grID].usrreads = np.logical_or(groups[grID].usrreads,usrmat[:,i])
+ 			# ... and add other groups
+-			for otherID in sig.keys()[1:]:
++			for otherID in list(sig.keys())[1:]:
+ 				for m in groups[otherID].members:
+ 					groups[grID].members.append(m)
+ 				groups[grID].usrreads = np.logical_or(groups[grID].usrreads,groups[otherID].usrreads)
+@@ -530,7 +530,7 @@ def create_groups_2(usrmat, allmat, thr=
+ 	
+ 	for i in range(rfs):
+ 		if status: status('--- Grouping reference %i of %i'%(i+1,rfs))
+-		print "Current reference:",i," Number of groups:",len(groups)
++		print("Current reference:",i," Number of groups:",len(groups))
+ 		sig = dict()
+ 
+ 		usr_reads = np.sum(usrmat[:,i])
+@@ -558,13 +558,13 @@ def create_groups_2(usrmat, allmat, thr=
+ 			groups[grID] = gr
+ 		elif N == 1:
+ 			# significant similarity with exactly 1 group --> assign to that group
+-			grID = sig.keys()[0]
++			grID = list(sig.keys())[0]
+ 			groups[grID].members.append(i)
+ 			groups[grID].usrreads = np.logical_or(groups[grID].usrreads,usrmat[:,i])
+ 			groups[grID].allreads = np.logical_or(groups[grID].allreads,allmat[:,i])
+ 		else:
+ 			# significant similarity with > 1 groups: assign to best group
+-			grID = sig.keys()[np.argmax(sig.values())]
++			grID = list(sig.keys())[np.argmax(list(sig.values()))]
+ 			groups[grID].members.append(i)
+ 			groups[grID].usrreads = np.logical_or(groups[grID].usrreads,usrmat[:,i])
+ 			groups[grID].allreads = np.logical_or(groups[grID].allreads,allmat[:,i])
+@@ -588,7 +588,7 @@ def create_groups_dc(usrmat, allmat, n2i
+ 		item = level1.setdefault(name[0:3], list())
+ 		item.append(n2i[name])
+ 	level2 = dict()
+-	for b,bucket in enumerate(level1.itervalues()):
++	for b,bucket in enumerate(level1.values()):
+ 		l1_groups = dict()
+ 		for i in bucket:
+ 			sig = dict()
+@@ -618,7 +618,7 @@ def create_groups_dc(usrmat, allmat, n2i
+ 				l1_groups[l1ID] = gr
+ 			else:
+ 				# significant similarity with > 1 groups: assign to best group
+-				l1ID = sig.keys()[np.argmax(sig.values())]
++				l1ID = list(sig.keys())[np.argmax(list(sig.values()))]
+ 				l1_groups[l1ID].members.append(i)
+ 				l1_groups[l1ID].usrreads = np.logical_or(l1_groups[l1ID].usrreads,usrmat[:,i])
+ 				l1_groups[l1ID].allreads = np.logical_or(l1_groups[l1ID].allreads,allmat[:,i])		
+@@ -650,7 +650,7 @@ def create_groups_dc(usrmat, allmat, n2i
+ 			groups[grID] = level2[l1ID]
+ 		else:
+ 			# significant similarity with > 1 groups: assign to best group
+-			grID = sig.keys()[np.argmax(sig.values())]
++			grID = list(sig.keys())[np.argmax(list(sig.values()))]
+ 			groups[grID].members.extend(level2[l1ID].members)
+ 			groups[grID].usrreads = np.logical_or(groups[grID].usrreads,level2[l1ID].usrreads)
+ 			groups[grID].allreads = np.logical_or(groups[grID].allreads,level2[l1ID].allreads)
+@@ -671,19 +671,19 @@ def enrich_groups(groups,references,read
+ 		for m in groups[g]:
+ 			egroups[g].members[i2n[m]] = references[i2n[m]]
+ 			for t in references[i2n[m]].targets:
+-				readnames.update(references[i2n[m]].targets[t].reads.iterkeys())
++				readnames.update(iter(references[i2n[m]].targets[t].reads.keys()))
+ 			t_len += float(references[i2n[m]].length)
+ 		
+ 		egroups[g].reads = len(readnames)
+-		egroups[g].max_val = max([m.validity for m in egroups[g].members.itervalues()])
+-		egroups[g].cov = sum([m.coverage*m.length/t_len for m in egroups[g].members.itervalues()])
+-		egroups[g].cov_homog = sum([m.cov_homog*m.length/t_len for m in egroups[g].members.itervalues()])
+-		egroups[g].map_qual = sum([m.map_qual*m.length/t_len for m in egroups[g].members.itervalues()])
++		egroups[g].max_val = max([m.validity for m in egroups[g].members.values()])
++		egroups[g].cov = sum([m.coverage*m.length/t_len for m in egroups[g].members.values()])
++		egroups[g].cov_homog = sum([m.cov_homog*m.length/t_len for m in egroups[g].members.values()])
++		egroups[g].map_qual = sum([m.map_qual*m.length/t_len for m in egroups[g].members.values()])
+ 		
+ 		# count the group-unique reads, i.e. reads mapping only to that group
+ 		group_unique = set()
+-		for mbm in egroups[g].members.itervalues():
+-			for trg in mbm.targets.itervalues():
++		for mbm in egroups[g].members.values():
++			for trg in mbm.targets.values():
+ 				for rd in trg.reads:
+ 					if rd in group_unique:
+ 						continue
+--- a/microbegps/gui.py
++++ b/microbegps/gui.py
+@@ -7,7 +7,7 @@ import sys, math
+ import pkgutil
+ import pkg_resources
+ import threading
+-import cPickle
++import pickle
+ 
+ from PyQt4 import QtGui, QtCore
+ 
+@@ -19,11 +19,11 @@ import matplotlib_venn as venn
+ try:
+ 	import microbegps.gps as gps
+ except ImportError:
+-	import gps
+-import taxonomy
+-import modules
++	from . import gps
++from . import taxonomy
++from . import modules
+ 
+-from __version__ import __version__,__url__
++from .__version__ import __version__,__url__
+ 
+ 
+ #from gui.help_texts import HelpTexts as HelpTexts
+@@ -531,7 +531,7 @@ class Pipeline(QtGui.QWidget):
+ 
+ 				from multiprocessing.pool import ThreadPool
+ 				pool = ThreadPool(5)
+-				pool.map(gps.calculate_valcov_one,p.ref_table.itervalues())
++				pool.map(gps.calculate_valcov_one,iter(p.ref_table.values()))
+ 				pool.close()
+ 				
+ 				
+@@ -714,7 +714,7 @@ class Pipeline(QtGui.QWidget):
+ 			try:
+ 				from multiprocessing.pool import ThreadPool
+ 				pool = ThreadPool(5)
+-				pool.map(gps.calculate_valcov_one,ref_table.itervalues())
++				pool.map(gps.calculate_valcov_one,iter(ref_table.values()))
+ 				pool.close()
+ 				#GpsTools.calculate_valcov(p.ref_table,printer)
+ 				def custom_filt2(ref):
+@@ -892,7 +892,7 @@ class GPSAnalyzer(QtGui.QMainWindow):
+ 				self.pr('<b><font color="DarkRed">Failed loading module %s</font></b><br>Message: <i>%s</i>'%(mod_name,e.message))			
+ 		self.modules = modules_d
+ 		if len(self.modules) > 0:
+-			self.pr('Loaded modules: <i>%s</i>'%(', '.join(self.modules.iterkeys())))
++			self.pr('Loaded modules: <i>%s</i>'%(', '.join(iter(self.modules.keys()))))
+ 
+ 	def initUI(self):
+ 		self.pipelineGui = Pipeline(self)
+@@ -1123,7 +1123,7 @@ class GPSAnalyzer(QtGui.QMainWindow):
+ 			# Create entry for the group
+ 			n_mem = len(grp.members)
+ 			# find a suitable name. Try to use LCA of all supporting references.
+-			taxids = [ref.name for ref in grp.members.itervalues()]
++			taxids = [ref.name for ref in grp.members.values()]
+ 			cand_name = taxonomy.find_lowest_common_ancestor_name(taxids, self.taxonomy_nodes, self.taxonomy_names)
+ 			if not cand_name:
+ 				#  find member with most unique reads -> use as representative
+@@ -1229,7 +1229,7 @@ class GPSAnalyzer(QtGui.QMainWindow):
+ 				self.pr("Collecting all reads for "+ref_name,False)
+ 				# collect all read names
+ 				reads = dict()
+-				for trg in self.references[ref_name].targets.itervalues():
++				for trg in self.references[ref_name].targets.values():
+ 					for rd in trg.reads:
+ 						reads[rd] = trg.reads[rd][2]
+ 				for rd in reads:
+@@ -1244,7 +1244,7 @@ class GPSAnalyzer(QtGui.QMainWindow):
+ 				self.pr("Collecting all unique reads for "+ref_name,False)
+ 				# collect all read names
+ 				reads = dict()
+-				for trg in self.references[ref_name].targets.itervalues():
++				for trg in self.references[ref_name].targets.values():
+ 					for rd in trg.reads:
+ 						if len(self.reads[str(rd)].matches) == 1:
+ 							reads[rd] = trg.reads[rd][2]
+@@ -1259,7 +1259,7 @@ class GPSAnalyzer(QtGui.QMainWindow):
+ 				self.pr("Collecting all matches of read "+rd_name,False)
+ 				for mtc in self.reads[rd_name].matches:
+ 					map_qual = 1
+-					for trg in self.references[mtc].targets.itervalues():
++					for trg in self.references[mtc].targets.values():
+ 						if rd_name in trg.reads:
+ 							map_qual = trg.reads[rd_name][2]
+ 							break
+@@ -1284,7 +1284,7 @@ class GPSAnalyzer(QtGui.QMainWindow):
+ 				self.pr("Collecting all genomes sharing reads with "+ref_name,False)
+ 				group_id = self.ref2group[ref_name]
+ 				related = dict()
+-				for trg in self.references[ref_name].targets.itervalues():
++				for trg in self.references[ref_name].targets.values():
+ 					for rd in trg.reads:
+ 						for mtc in self.reads[rd].matches:
+ 							if self.ref2group[mtc] != group_id:
+@@ -1333,7 +1333,7 @@ class GPSAnalyzer(QtGui.QMainWindow):
+ 					quals = []
+ 					uquals = []
+ 					name = str(item.text(0))
+-					for trg in self.references[name].targets.itervalues():
++					for trg in self.references[name].targets.values():
+ 						for rd in trg.reads:
+ 							if len(self.reads[rd].matches) == 1:
+ 								uquals.append(trg.reads[rd][2])
+@@ -1356,7 +1356,7 @@ class GPSAnalyzer(QtGui.QMainWindow):
+ 					quals = []
+ 					uquals = []
+ 					name = str(item.parent().text(0))
+-					for trg in self.references[name].targets.itervalues():
++					for trg in self.references[name].targets.values():
+ 						for rd in trg.reads:
+ 							if len(self.reads[rd].matches) == 1:
+ 								uquals.append(trg.reads[rd][2])
+@@ -1461,7 +1461,7 @@ class GPSAnalyzer(QtGui.QMainWindow):
+ 					name = str(item.text(0))
+ 					taxid = self.references[name].name
+ 					reads[taxid] = set()
+-					for trg in self.references[name].targets.itervalues():
++					for trg in self.references[name].targets.values():
+ 						reads[taxid].update(trg.reads) # add all read names to the set
+ 				elif item.i_type == "candidate":
+ 					for ref in range(item.childCount()):
+@@ -1470,10 +1470,10 @@ class GPSAnalyzer(QtGui.QMainWindow):
+ 						taxid = self.references[name].name
+ 						if not taxid in reads:
+ 							reads[taxid] = set()
+-							for trg in self.references[name].targets.itervalues():
++							for trg in self.references[name].targets.values():
+ 								reads[taxid].update(trg.reads) # add all read names to the set
+ 			# build the lineage of every taxid
+-			taxids = reads.keys()
++			taxids = list(reads.keys())
+ 			for taxid in taxids:
+ 				lineage = [taxid]
+ 				while True:
+@@ -1490,7 +1490,7 @@ class GPSAnalyzer(QtGui.QMainWindow):
+ 
+ 		if self.enablePlottingBox.isChecked():
+ 			# now color the tree
+-			for taxid,item in self.taxid_to_item.iteritems():
++			for taxid,item in self.taxid_to_item.items():
+ 				if not taxid == 1:
+ 					item.setBackground(0,QtGui.QBrush(QtGui.QColor(255,255,255)))
+ 					item.setText(1,'')
+@@ -1515,11 +1515,11 @@ class GPSAnalyzer(QtGui.QMainWindow):
+ 					nm1 = str(sel_items[0].text(0))
+ 					nm2 = str(sel_items[1].text(0))
+ 					reads1 = set()
+-					for t in self.references[nm1].targets.itervalues():
+-						reads1.update(t.reads.keys())
++					for t in self.references[nm1].targets.values():
++						reads1.update(list(t.reads.keys()))
+ 					reads2 = set()
+-					for t in self.references[nm2].targets.itervalues():
+-						reads2.update(t.reads.keys())
++					for t in self.references[nm2].targets.values():
++						reads2.update(list(t.reads.keys()))
+ 					nreads1 = len(reads1)
+ 					nreads2 = len(reads2)
+ 					shared = len(reads1.intersection(reads2))
+@@ -1536,14 +1536,14 @@ class GPSAnalyzer(QtGui.QMainWindow):
+ 					nm2 = str(sel_items[1].text(0))
+ 					nm3 = str(sel_items[2].text(0))
+ 					reads1 = set()
+-					for t in self.references[nm1].targets.itervalues():
+-						reads1.update(t.reads.keys())
++					for t in self.references[nm1].targets.values():
++						reads1.update(list(t.reads.keys()))
+ 					reads2 = set()
+-					for t in self.references[nm2].targets.itervalues():
+-						reads2.update(t.reads.keys())
++					for t in self.references[nm2].targets.values():
++						reads2.update(list(t.reads.keys()))
+ 					reads3 = set()
+-					for t in self.references[nm3].targets.itervalues():
+-						reads3.update(t.reads.keys())
++					for t in self.references[nm3].targets.values():
++						reads3.update(list(t.reads.keys()))
+ 					reg100 = len(reads1.difference(reads2).difference(reads3))
+ 					reg010 = len(reads2.difference(reads1).difference(reads3))
+ 					reg110 = len(reads1.intersection(reads2).difference(reads3))
+@@ -1629,7 +1629,7 @@ class GPSAnalyzer(QtGui.QMainWindow):
+ 							'reads':self.reads,
+ 							'settings_pipeline':self.settings_pipeline,
+ 							'settings_analyzer':self.settings_analyzer}
+-					cPickle.dump(s_data,open(fname,'wb'),protocol=-1)
++					pickle.dump(s_data,open(fname,'wb'),protocol=-1)
+ 				except Exception as e:
+ 					self.pr('Error while saving file: '+e.message)
+ 					raise
+@@ -1648,7 +1648,7 @@ class GPSAnalyzer(QtGui.QMainWindow):
+ 			self.pr("Attention: this may take a while!")
+ 			def loadthisfile():
+ 				try:
+-					settings = cPickle.load(open(fname,'rb'))
++					settings = pickle.load(open(fname,'rb'))
+ 					assert type(settings) == dict
+ 					try:
+ 						sgroups = settings['candidates']
+--- a/microbegps/__init__.py
++++ b/microbegps/__init__.py
+@@ -1,3 +1,3 @@
+-from __version__ import __version__,__url__
++from .__version__ import __version__,__url__
+ #__version__='1.0.0'
+ #__url__='https://sourceforge.net/p/microbegps'
+\ No newline at end of file
+--- a/microbegps/modules/composition_analysis.py
++++ b/microbegps/modules/composition_analysis.py
+@@ -79,15 +79,15 @@ class GPSModule:
+ 		
+ 		# collect all read names for each reference
+ 		self.reads = dict()
+-		for ref in self.GPS.references.itervalues():
++		for ref in self.GPS.references.values():
+ 			taxid = ref.name
+ 			self.reads[taxid] = set()
+-			for trg in ref.targets.itervalues():
++			for trg in ref.targets.values():
+ 				self.reads[taxid].update(trg.reads) # add all read names to the set
+ 				
+ 		self.rank_tids = dict() # set of TaxIDs for a specific rank
+ 		# calculate the read sets at the higher taxonomic ranks
+-		for taxid in self.reads.keys():
++		for taxid in list(self.reads.keys()):
+ 			# trace the lineage of each taxid and propagate the reads to the higher ranks
+ 			lineage = [taxid]
+ 			while True:
+@@ -104,7 +104,7 @@ class GPSModule:
+ 		
+ 		# we are done with the analysis. Fill the Combo Box widgets accordingly
+ 		self.taxRankBox.clear()
+-		self.taxRankBox.addItems(self.rank_tids.keys())
++		self.taxRankBox.addItems(list(self.rank_tids.keys()))
+ 		self.taxRankBox.setEnabled(True)
+ 		self.taxGroupBox.setEnabled(True)
+ 		self.getFromTree.setEnabled(True)
+@@ -128,7 +128,7 @@ class GPSModule:
+ 		if currentItem < 0:
+ 			return
+ 		taxon = str(self.taxGroupBox.currentText())
+-		children = [tid for tid in self.reads.keys() if self.GPS.taxonomy_names.get(self.GPS.taxonomy_nodes.get(tid))==taxon]
++		children = [tid for tid in list(self.reads.keys()) if self.GPS.taxonomy_names.get(self.GPS.taxonomy_nodes.get(tid))==taxon]
+ 		self.taxSelectedText.setText('Found %i taxa below %s'%(len(children),taxon))
+ 		self.selectedTaxa = children
+ 		self.selectedParent = taxon
+@@ -138,7 +138,7 @@ class GPSModule:
+ 			return
+ 		self.GPS.graphicsTab.setCurrentIndex(1)
+ 		taxon = self.GPS.phyloTreeWidget.currentItem().text(0)
+-		children = [tid for tid in self.reads.keys() if self.GPS.taxonomy_names.get(self.GPS.taxonomy_nodes.get(tid))==taxon]
++		children = [tid for tid in list(self.reads.keys()) if self.GPS.taxonomy_names.get(self.GPS.taxonomy_nodes.get(tid))==taxon]
+ 		self.taxSelectedText.setText('Found %i taxa below %s'%(len(children),taxon))
+ 		self.selectedTaxa = children
+ 		self.selectedParent = taxon
+@@ -148,7 +148,7 @@ class GPSModule:
+ 			return
+ 		tax_names = [self.GPS.taxonomy_names.get(tid,tid) for tid in self.selectedTaxa]
+ 		tax_abundances = [len(self.reads[tid]) for tid in self.selectedTaxa]
+-		sort_order = sorted(range(len(tax_abundances)), key=tax_abundances.__getitem__)[::-1]
++		sort_order = sorted(list(range(len(tax_abundances))), key=tax_abundances.__getitem__)[::-1]
+ 		tax_names = [tax_names[i] for i in sort_order]
+ 		for i in range(7,len(tax_names)):
+ 			tax_names[i] = ''
+--- a/microbegps/modules/export_table.py
++++ b/microbegps/modules/export_table.py
+@@ -98,7 +98,7 @@ class GPSModule:
+ 			ofile.write(str(i+1))
+ 			
+ 			# Candidate name: Try to use LCA of all supporting references.
+-			taxids = [ref.name for ref in grp.members.itervalues()]
++			taxids = [ref.name for ref in grp.members.values()]
+ 			cand_name = taxonomy.find_lowest_common_ancestor_name(taxids, self.GPS.taxonomy_nodes, self.GPS.taxonomy_names)
+ 			if not cand_name:
+ 				#  find member with most unique reads -> use as representative
+--- a/microbegps/taxonomy.py
++++ b/microbegps/taxonomy.py
+@@ -14,7 +14,7 @@ def parse_nodes_dmp(node_file):
+ 	for line in node_file:
+ 		fields = line.rstrip('\t|\n').split('\t|\t')
+ 		if len(fields) < 4:
+-			print 'skipped line',line
++			print('skipped line',line)
+ 			continue
+ 		taxid = int(fields[0])
+ 		parent_id = int(fields[1])
+@@ -30,7 +30,7 @@ def parse_names_dmp(names_file):
+ 	for line in names_file:
+ 		fields = line.rstrip('\t|\n').split('\t|\t')
+ 		if len(fields) != 4:
+-			print 'skipped line',line
++			print('skipped line',line)
+ 			continue
+ 		taxid = int(fields[0])
+ 		name = fields[1]
+@@ -123,7 +123,7 @@ def candidates_to_LCA_tree(groups, nodes
+ 	num_reads = []
+ 	for grp in groups:
+ 		# collect all taxids
+-		taxids = [m.name for m in grp.members.itervalues()]
++		taxids = [m.name for m in grp.members.values()]
+ 		lca = find_lowest_common_ancestor(taxids, nodes)
+ 		if lca:
+ 			lca_list.append(lca)
+--- a/microbegps/valcov.py
++++ b/microbegps/valcov.py
+@@ -123,9 +123,9 @@ class NBinom(Distribution):
+ 								  maxiter=10000)
+ 				self._p2 = (self._p1)/(self._p1+mean)
+ 			except:
+-				print "Warning: MLE for negative binomial failed. Using MOM."
++				print("Warning: MLE for negative binomial failed. Using MOM.")
+ 				if var < mean:
+-					print "Warning: var < mean"
++					print("Warning: var < mean")
+ 					var = 1.01*mean
+ 				self._p1 = mean**2 / (var - mean)
+ 				self._p2 = mean / var
+@@ -185,7 +185,7 @@ class NbTail(TailDistribution):
+ 		if isinstance(nbinom, NBinom):
+ 			self._parent = nbinom
+ 		else:
+-			raise(Exception("NbTail must be connected to a NBinom object"))
++			raise Exception
+ 	
+ 	def pmf(self, x):
+ 		if np.isscalar(x) and x == 0:
+@@ -222,7 +222,7 @@ class PoissonTail(TailDistribution):
+ 		if isinstance(poisson, Poisson):
+ 			self._parent = poisson
+ 		else:
+-			raise(Exception("PoissonTail must be connected to a Poisson object"))
++			raise Exception
+ 	
+ 	def pmf(self, x):
+ 		if self._p1 < 2.:
+@@ -322,9 +322,9 @@ def init_gamma(mixture_model, dataset):
+ class DS_cov:
+ 	def __init__(self,ref):
+ 		""" fill dataset with data provided in Reference ref """
+-		t_lns = [t.length for t in ref.targets.itervalues()]
++		t_lns = [t.length for t in ref.targets.values()]
+ 		# r_info contains read position and length for each read
+-		r_info = [[[r[0],r[1]] for r in t.reads.itervalues()] for t in ref.targets.itervalues()]
++		r_info = [[[r[0],r[1]] for r in t.reads.values()] for t in ref.targets.values()]
+ 
+ 		# pile up coverage for each target and summarize in one array
+ 		cov = [np.zeros((l,),dtype=np.int16) for l in t_lns]
+@@ -346,9 +346,9 @@ class DS_cov:
+ class DS_dst:
+ 	def __init__(self,ref):
+ 		""" fill dataset with data provided in Reference ref """
+-		t_lns = [t.length for t in ref.targets.itervalues()]
+-		r_pos = [[r[0] for r in t.reads.itervalues()] for t in ref.targets.itervalues()]
+-		r_len = [[r[1] for r in t.reads.itervalues()] for t in ref.targets.itervalues()]
++		t_lns = [t.length for t in ref.targets.values()]
++		r_pos = [[r[0] for r in t.reads.values()] for t in ref.targets.values()]
++		r_len = [[r[1] for r in t.reads.values()] for t in ref.targets.values()]
+ 		r_len = np.concatenate([np.array(ls) for ls in r_len])
+ 		positions = [np.sort(np.array(ps)) for ps in r_pos]
+ 		distances = np.concatenate([p[1:] - p[:-1] for p in positions])
diff --git a/debian/patches/series b/debian/patches/series
new file mode 100644
index 0000000..998d10f
--- /dev/null
+++ b/debian/patches/series
@@ -0,0 +1 @@
+2to3.patch
diff --git a/debian/rules b/debian/rules
index f76d975..12c5036 100755
--- a/debian/rules
+++ b/debian/rules
@@ -4,5 +4,5 @@ export DH_VERBOSE := 1
 export PYBUILD_NAME=microbegps
 
 %:
-	dh $@ --with python2 --buildsystem=pybuild
+	dh $@ --with python3 --buildsystem=pybuild
 

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/microbegps.git



More information about the debian-med-commit mailing list