[med-svn] [python-pbcore] 01/04: Support Python3 using 2to3 rather than patching the code base

Afif Elghraoui afif-guest at moszumanska.debian.org
Wed Aug 5 08:59:53 UTC 2015


This is an automated email from the git hooks/post-receive script.

afif-guest pushed a commit to branch master
in repository python-pbcore.

commit b8e186fc6fa656261769e72f4632e1b5e8ec74af
Author: Afif Elghraoui <afif at ghraoui.name>
Date:   Wed Aug 5 01:37:10 2015 -0700

    Support Python3 using 2to3 rather than patching the code base
    
    This will be easier to manage, hopefully, in the absence of upstream
    support.
---
 debian/patches/modernize-python.patch | 1780 ---------------------------------
 debian/patches/series                 |    2 +-
 debian/patches/support-python3.patch  |   38 +
 3 files changed, 39 insertions(+), 1781 deletions(-)

diff --git a/debian/patches/modernize-python.patch b/debian/patches/modernize-python.patch
deleted file mode 100644
index f1400a5..0000000
--- a/debian/patches/modernize-python.patch
+++ /dev/null
@@ -1,1780 +0,0 @@
-Description: Modernize Python code
- These patches were mostly generated by running
- python-modernize on the source tree. setup.py was then
- slightly modified by hand.
-Author: Afif Elghraoui <afif at ghraoui.name>
-Forwarded: no
-Last-Update: 2015-06-28
----
-This patch header follows DEP-3: http://dep.debian.net/deps/dep3/
---- python-pbcore.orig/doc/conf.py
-+++ python-pbcore/doc/conf.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- # -*- coding: utf-8 -*-
- #
- # pbcore documentation build configuration file, created by
-@@ -13,7 +14,7 @@
- 
- import sys, os
- globals = {}
--execfile("../pbcore/__init__.py", globals)
-+exec(compile(open("../pbcore/__init__.py").read(), "../pbcore/__init__.py", 'exec'), globals)
- __VERSION__ = globals["__VERSION__"]
- 
- 
---- python-pbcore.orig/pbcore/chemistry/chemistry.py
-+++ python-pbcore/pbcore/chemistry/chemistry.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- #################################################################################
- # Copyright (c) 2011-2015, Pacific Biosciences of California, Inc.
- #
-@@ -54,7 +55,7 @@
-             mappings[(bindingKit, sequencingKit, softwareVersion)] = sequencingChemistry
-         return mappings
-     except:
--        raise ChemistryLookupError, "Error loading chemistry mapping xml"
-+        raise ChemistryLookupError("Error loading chemistry mapping xml")
- 
- def _loadBarcodeMappings():
-     mappingFname = resource_filename(Requirement.parse('pbcore'),'pbcore/chemistry/resources/mapping.xml')
-@@ -80,8 +81,7 @@
-         instrumentControlVersion = ".".join(verComponents)
-         return (bindingKit, sequencingKit, instrumentControlVersion)
-     except Exception as e:
--        raise ChemistryLookupError, \
--            ("Could not find, or extract chemistry information from, %s" % (metadataXmlPath,))
-+        raise ChemistryLookupError("Could not find, or extract chemistry information from, %s" % (metadataXmlPath,))
- 
- def decodeTriple(bindingKit, sequencingKit, softwareVersion):
-     """
---- python-pbcore.orig/pbcore/data/__init__.py
-+++ python-pbcore/pbcore/data/__init__.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- #################################################################################
- # Copyright (c) 2011-2015, Pacific Biosciences of California, Inc.
- #
-@@ -29,6 +30,8 @@
- #################################################################################
- 
- from pkg_resources import Requirement, resource_filename
-+from six.moves import map
-+from six.moves import range
- 
- DATA_FILES = {'aligned_reads_1.cmp.h5':
-                   ['m110818_075520_42141_c100129202555500000315043109121112_s1_p0.bas.h5',
-@@ -79,8 +82,8 @@
-     cmp.h5 file.
-     '''
-     return [{'cmph5' : _getAbsPath(cmph5),
--             'bash5s': map(_getAbsPath, bash5s)}
--            for cmph5, bash5s in DATA_FILES.items()]
-+             'bash5s': list(map(_getAbsPath, bash5s))}
-+            for cmph5, bash5s in list(DATA_FILES.items())]
- 
- def getCmpH5AndBas():
-     '''
-@@ -137,11 +140,11 @@
-     """
-     Returns a list of FOFN files
-     """
--    return map(_getAbsPath,
-+    return list(map(_getAbsPath,
-                ["1.4_bas_files.fofn",
-                 "2.0_bax_files.fofn",
-                 "2.1_bax_files.fofn",
--                "2.1_ccs_files.fofn"])
-+                "2.1_ccs_files.fofn"]))
- 
- def getBcFofn():
-     return _getAbsPath("bc_files.fofn")
---- python-pbcore.orig/pbcore/io/BarcodeH5Reader.py
-+++ python-pbcore/pbcore/io/BarcodeH5Reader.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- #################################################################################$$
- # Copyright (c) 2011,2012, Pacific Biosciences of California, Inc.
- #
-@@ -31,6 +32,9 @@
- import numpy as n
- 
- from pbcore.io.FofnIO import readFofn
-+import six
-+from six.moves import range
-+from functools import reduce
- 
- BARCODE_DELIMITER = "--"
- BC_DS_PATH        = "BarcodeCalls/best"
-@@ -99,7 +103,7 @@
-     """Write a barcode file from a list of labeled ZMWs. In addition
-     to labeledZmws, this function takes a
-     pbbarcode.BarcodeLabeler."""
--    bestScores = map(lambda z: z.toBestRecord(), labeledZmws)
-+    bestScores = [z.toBestRecord() for z in labeledZmws]
-     outDta = n.vstack(bestScores)
-     outH5 = h5.File(outFile, 'a')
- 
-@@ -127,9 +131,9 @@
-         def makeRecord(lZmw):
-             zmws = makeArray(nBarcodes * lZmw.nScored, lZmw.holeNumber)
-             adapters = n.concatenate([makeArray(nBarcodes, i) for i in \
--                                          xrange(1, lZmw.nScored + 1)])
--            idxs = n.concatenate([range(0, nBarcodes) for i in \
--                                      xrange(0, lZmw.nScored)])
-+                                          range(1, lZmw.nScored + 1)])
-+            idxs = n.concatenate([list(range(0, nBarcodes)) for i in \
-+                                      range(0, lZmw.nScored)])
-             scores = n.concatenate(lZmw.allScores)
-             return n.transpose(n.vstack((zmws, adapters, idxs, scores)))
- 
-@@ -163,12 +167,12 @@
-         self._movieName = self.bestDS.attrs['movieName']
-         # zmw => LabeledZmw
-         labeledZmws = [LabeledZmw.fromBestRecord(self.bestDS[i,:]) for i in
--                       xrange(0, self.bestDS.shape[0])]
-+                       range(0, self.bestDS.shape[0])]
-         self.labeledZmws = dict([(lZmw.holeNumber, lZmw) for lZmw in labeledZmws])
- 
-         # barcode => LabeledZmws
-         self.bcLabelToLabeledZmws = {l:[] for l in self.barcodeLabels}
--        for lZmw in self.labeledZmws.values():
-+        for lZmw in list(self.labeledZmws.values()):
-             d = self.bcLabelToLabeledZmws[self.barcodeLabels[lZmw.bestIdx]]
-             d.append(lZmw)
- 
-@@ -211,7 +215,7 @@
-             return (n.min(x), n.max(x))
-         # these aren't the ranges of ZMWs, but the ranges for the
-         # scored ZMWs.
--        self._bins = map(lambda z : rng(z.holeNumbers), self._parts)
-+        self._bins = [rng(z.holeNumbers) for z in self._parts]
- 
-     def choosePart(self, holeNumber):
-         for i,b in enumerate(self._bins):
-@@ -239,8 +243,7 @@
- 
-     def labeledZmwsFromBarcodeLabel(self, bcLabel):
-         lzmws = reduce(lambda x,y: x + y,
--                      map(lambda z: z.labeledZmwsFromBarcodeLabel(bcLabel),
--                          self._parts))
-+                      [z.labeledZmwsFromBarcodeLabel(bcLabel) for z in self._parts])
-         return sorted(lzmws, key=lambda z: z.holeNumber)
- 
-     def __iter__(self):
-@@ -256,7 +259,7 @@
-             return self.labeledZmwsFromBarcodeLabel(item)
-         elif isinstance(item, slice):
-             return [ self.labeledZmwFromHoleNumber(self, item)
--                    for r in xrange(*item.indices(len(self)))]
-+                    for r in range(*item.indices(len(self)))]
-         elif isinstance(item, list) or isinstance(item, n.ndarray):
-             if len(item) == 0:
-                 return []
-@@ -266,7 +269,7 @@
-                     return [ self.labeledZmwFromHoleNumber(r) for r in item ]
-                 elif entryType == bool or issubclass(entryType, n.bool_):
-                     return [ self.labeledZmwFromHoleNumber(r) for r in n.flatnonzero(item) ]
--        raise TypeError, "Invalid type for BasH5Reader slicing"
-+        raise TypeError("Invalid type for BasH5Reader slicing")
- 
- 
- class BarcodeH5Fofn(object):
-@@ -290,7 +293,7 @@
-                 self._byMovie[bc.movieName].append(bc)
- 
-         self.mpReaders = { movieName: parts[0] if len(parts) == 1 else MPBarcodeH5Reader(parts)
--                           for movieName, parts in self._byMovie.iteritems() }
-+                           for movieName, parts in six.iteritems(self._byMovie) }
- 
-     @property
-     def holeNumbers(self):
-@@ -298,7 +301,7 @@
-                           for hn in reader.holeNumbers])
-     @property
-     def movieNames(self):
--        return self.mpReaders.keys()
-+        return list(self.mpReaders.keys())
-     @property
-     def barcodeLabels(self):
-         return self._bcH5s[0].barcodeLabels
-@@ -309,8 +312,7 @@
- 
-     def labeledZmwsFromBarcodeLabel(self, item):
-         lzmws = reduce(lambda x,y: x + y,
--                      map(lambda z: z.labeledZmwsFromBarcodeLabel(item),
--                          self._bcH5s))
-+                      [z.labeledZmwsFromBarcodeLabel(item) for z in self._bcH5s])
-         return sorted(lzmws, key=lambda z: z.holeNumber )
- 
-     def labeledZmwFromName(self, item):
---- python-pbcore.orig/pbcore/io/BasH5IO.py
-+++ python-pbcore/pbcore/io/BasH5IO.py
-@@ -1,3 +1,8 @@
-+from __future__ import absolute_import
-+import six
-+from six.moves import map
-+from six.moves import range
-+from six.moves import zip
- #################################################################################
- # Copyright (c) 2011-2015, Pacific Biosciences of California, Inc.
- #
-@@ -58,7 +63,7 @@
-     return e - b
- 
- def removeNones(lst):
--    return filter(lambda x: x!=None, lst)
-+    return [x for x in lst if x!=None]
- 
- # ZMW hole Types
- SEQUENCING_ZMW = 0
-@@ -215,7 +220,7 @@
-         insert, used to forming the CCS consensus.
-         """
-         if not self.baxH5.hasConsensusBasecalls:
--            raise ValueError, "No CCS reads in this file"
-+            raise ValueError("No CCS reads in this file")
-         return self.baxH5._ccsNumPasses[self.index]
- 
-     #
-@@ -228,7 +233,7 @@
-         specified extent of the polymerase read.
-         """
-         if not self.baxH5.hasRawBasecalls:
--            raise ValueError, "No raw reads in this file"
-+            raise ValueError("No raw reads in this file")
-         hqStart, hqEnd = self.hqRegion
-         readStart = hqStart if readStart is None else readStart
-         readEnd   = hqEnd if readEnd is None else readEnd
-@@ -248,7 +253,7 @@
-             HQ region.
-         """
-         if not self.baxH5.hasRawBasecalls:
--            raise ValueError, "No raw reads in this file"
-+            raise ValueError("No raw reads in this file")
-         offsets = self.baxH5._offsetsByHole[self.holeNumber]
-         numEvent = offsets[1] - offsets[0]
-         polymeraseBegin = 0
-@@ -269,7 +274,7 @@
-             HQ region.
-         """
-         if not self.baxH5.hasRawBasecalls:
--            raise ValueError, "No raw reads in this file"
-+            raise ValueError("No raw reads in this file")
-         return [ self.read(readStart, readEnd)
-                  for (readStart, readEnd) in self.unclippedInsertRegions ]
- 
-@@ -281,7 +286,7 @@
-         production code.
-         """
-         if not self.baxH5.hasRawBasecalls:
--            raise ValueError, "No raw reads in this file"
-+            raise ValueError("No raw reads in this file")
-         return [ self.read(readStart, readEnd)
-                  for (readStart, readEnd) in self.insertRegions ]
- 
-@@ -294,7 +299,7 @@
-         by production code.
-         """
-         if not self.baxH5.hasRawBasecalls:
--            raise ValueError, "No raw reads in this file"
-+            raise ValueError("No raw reads in this file")
-         return [ self.read(readStart, readEnd)
-                  for (readStart, readEnd) in self.adapterRegions ]
- 
-@@ -311,14 +316,14 @@
-             HQ region.
-         """
-         if not self.baxH5.hasRawBasecalls:
--            raise ValueError, "No raw reads in this file"
-+            raise ValueError("No raw reads in this file")
-         return [ self.read(readStart, readEnd)
-                  for (readStart, readEnd) in self.unclippedAdapterRegions ]
- 
-     @property
-     def ccsRead(self):
-         if not self.baxH5.hasConsensusBasecalls:
--            raise ValueError, "No CCS reads in this file"
-+            raise ValueError("No CCS reads in this file")
-         baseOffset  = self.baxH5._ccsOffsetsByHole[self.holeNumber]
-         if (baseOffset[1] - baseOffset[0]) <= 0:
-             return None
-@@ -356,7 +361,7 @@
-                 self.offsetBegin <=
-                 self.offsetEnd   <=
-                 zmwOffsetEnd):
--            raise IndexError, "Invalid slice of Zmw!"
-+            raise IndexError("Invalid slice of Zmw!")
- 
-     def _getBasecallsGroup(self):
-         return self.baxH5._basecallsGroup
-@@ -424,18 +429,18 @@
-     holeNumber = h5Group["ZMW/HoleNumber"].value
-     endOffset = np.cumsum(numEvent)
-     beginOffset = np.hstack(([0], endOffset[0:-1]))
--    offsets = zip(beginOffset, endOffset)
--    return dict(zip(holeNumber, offsets))
-+    offsets = list(zip(beginOffset, endOffset))
-+    return dict(list(zip(holeNumber, offsets)))
- 
- def _makeRegionTableIndex(regionTableHoleNumbers):
-     #  returns a dict: holeNumber -> (startRow, endRow)
-     diffs = np.ediff1d(regionTableHoleNumbers,
-                        to_begin=[1], to_end=[1])
-     changepoints = np.flatnonzero(diffs)
--    startsAndEnds = zip(changepoints[:-1],
--                        changepoints[1:])
--    return dict(zip(np.unique(regionTableHoleNumbers),
--                    startsAndEnds))
-+    startsAndEnds = list(zip(changepoints[:-1],
-+                        changepoints[1:]))
-+    return dict(list(zip(np.unique(regionTableHoleNumbers),
-+                    startsAndEnds)))
- 
- class BaxH5Reader(object):
-     """
-@@ -447,7 +452,7 @@
-             self.filename = op.abspath(op.expanduser(filename))
-             self.file = h5py.File(self.filename, "r")
-         except IOError:
--            raise IOError, ("Invalid or nonexistent bax/bas file %s" % filename)
-+            raise IOError("Invalid or nonexistent bax/bas file %s" % filename)
- 
-         #
-         # Raw base calls?
-@@ -493,7 +498,7 @@
-         it to the ZMW data.
-         """
-         holeNumbers = self._mainBasecallsGroup["ZMW/HoleNumber"].value
--        self._holeNumberToIndex = dict(zip(holeNumbers, range(len(holeNumbers))))
-+        self._holeNumberToIndex = dict(list(zip(holeNumbers, list(range(len(holeNumbers))))))
- 
-         #
-         # Region table
-@@ -544,7 +549,7 @@
-         try:
-             fh = h5py.File(op.abspath(op.expanduser(regionH5Filename)), "r")
-         except IOError:
--            raise IOError, ("Invalid or nonexistent file %s" % regionH5Filename)
-+            raise IOError("Invalid or nonexistent file %s" % regionH5Filename)
- 
-         self._loadRegions(fh)
-         fh.close()
-@@ -556,7 +561,7 @@
-         if not np.in1d(rgnHoleNumbers, baxHoleNumbers).all():
-             msg = "Region file (%s) does not contain the same hole numbers as " \
-                   "bas/bax file (%s)"
--            raise IOError, (msg % (regionH5Filename, self.filename))
-+            raise IOError(msg % (regionH5Filename, self.filename))
- 
-     @property
-     def sequencingZmws(self):
-@@ -619,7 +624,7 @@
-         else:
-             movieNameString = movieNameAttr
- 
--        if not isinstance(movieNameString, basestring):
-+        if not isinstance(movieNameString, six.string_types):
-             raise TypeError("Unsupported movieName {m} of type {t}."
-                              .format(m=movieNameString,
-                                      t=type(movieNameString)))
-@@ -661,7 +666,7 @@
-         if triple:
-             return triple
-         else:
--            raise ChemistryLookupError, "Could not find chemistry barcodes in file or companion metadata.xml"
-+            raise ChemistryLookupError("Could not find chemistry barcodes in file or companion metadata.xml")
- 
-     @property
-     def sequencingChemistry(self):
-@@ -682,7 +687,7 @@
-                 if tripleFromXML is not None:
-                     self._sequencingChemistry = decodeTriple(*tripleFromXML)
-                 else:
--                    raise ChemistryLookupError, "Chemistry information could not be found for this file"
-+                    raise ChemistryLookupError("Chemistry information could not be found for this file")
-         return self._sequencingChemistry
- 
-     def __len__(self):
-@@ -700,7 +705,7 @@
-         self.close()
- 
-     def listZmwMetrics(self):
--        return self._basecallsGroup["ZMWMetrics"].keys()
-+        return list(self._basecallsGroup["ZMWMetrics"].keys())
- 
-     def zmwMetric(self, name, index):
-         # we are going to cache these lazily because it is very likely
-@@ -795,7 +800,7 @@
-                 self.filename = op.abspath(op.expanduser(filename))
-                 self.file = h5py.File(self.filename, "r")
-             except IOError:
--                raise IOError, ("Invalid or nonexistent bas/bax file %s" % filename)
-+                raise IOError("Invalid or nonexistent bas/bax file %s" % filename)
- 
- 
-             # Is this a multi-part or single-part?
-@@ -814,7 +819,7 @@
-             self.file        = None
-             self._parts      = [ BaxH5Reader(fn) for fn in partFilenames ]
-             holeLookupDict   = { hn : (i + 1)
--                                 for i in xrange(len(self._parts))
-+                                 for i in range(len(self._parts))
-                                  for hn in self._parts[i]._holeNumberToIndex }
-             self._holeLookup = lambda hn: holeLookupDict[hn]
-         self._sequencingZmws = np.concatenate([ part.sequencingZmws
-@@ -883,7 +888,7 @@
-             return self._getitemScalar(holeNumbers)
-         elif isinstance(holeNumbers, slice):
-             return [ self._getitemScalar(r)
--                     for r in xrange(*holeNumbers.indices(len(self)))]
-+                     for r in range(*holeNumbers.indices(len(self)))]
-         elif isinstance(holeNumbers, list) or isinstance(holeNumbers, np.ndarray):
-             if len(holeNumbers) == 0:
-                 return []
-@@ -893,7 +898,7 @@
-                     return [ self._getitemScalar(r) for r in holeNumbers ]
-                 elif entryType == bool or issubclass(entryType, np.bool_):
-                     return [ self._getitemScalar(r) for r in np.flatnonzero(holeNumbers) ]
--        raise TypeError, "Invalid type for BasH5Reader slicing"
-+        raise TypeError("Invalid type for BasH5Reader slicing")
- 
-     @property
-     def movieName(self):
-@@ -964,7 +969,7 @@
-             else:
-                 basFilenames.append(arg)
- 
--        movieNames = map(sniffMovieName, basFilenames)
-+        movieNames = list(map(sniffMovieName, basFilenames))
-         movieNamesAndFiles = sorted(zip(movieNames, basFilenames))
- 
-         self.readers = OrderedDict(
-@@ -973,7 +978,7 @@
- 
-     @property
-     def movieNames(self):
--        return self.readers.keys()
-+        return list(self.readers.keys())
- 
-     def __getitem__(self, key):
-         """
-@@ -998,7 +1003,7 @@
-             if indices[2] == "ccs":
-                 result = result.ccsRead
-             else:
--                start, end = map(int, indices[2].split("_"))
-+                start, end = list(map(int, indices[2].split("_")))
-                 result = result.read(start, end)
-         return result
- 
-@@ -1007,20 +1012,20 @@
-     #
- 
-     def __iter__(self):
--        for reader in self.readers.values():
-+        for reader in list(self.readers.values()):
-             for zmw in reader: yield zmw
- 
-     def reads(self):
--        for reader in self.readers.values():
-+        for reader in list(self.readers.values()):
-             for read in reader.reads():
-                 yield read
- 
-     def subreads(self):
--        for reader in self.readers.values():
-+        for reader in list(self.readers.values()):
-             for read in reader.subreads():
-                 yield read
- 
-     def ccsReads(self):
--        for reader in self.readers.values():
-+        for reader in list(self.readers.values()):
-             for read in reader.ccsReads():
-                 yield read
---- python-pbcore.orig/pbcore/io/FastaIO.py
-+++ python-pbcore/pbcore/io/FastaIO.py
-@@ -33,6 +33,9 @@
- """
- Streaming I/O support for FASTA files.
- """
-+from __future__ import absolute_import
-+from six.moves import map
-+from six.moves import range
- 
- __all__ = [ "FastaRecord",
-             "FastaReader",
-@@ -273,7 +276,7 @@
- ##
- def wrap(s, columns):
-     return "\n".join(s[start:start+columns]
--                     for start in xrange(0, len(s), columns))
-+                     for start in range(0, len(s), columns))
- 
- 
- 
-@@ -298,7 +301,7 @@
-     # only "id" makes it into the fai.
-     offsetEnd = 0
-     for line in open(faidxFilename):
--        length, offset, lineWidth, blen = map(int, line.split()[-4:])
-+        length, offset, lineWidth, blen = list(map(int, line.split()[-4:]))
-         newlineWidth = blen - lineWidth                                # 2 for DOS, 1 for UNIX
-         header_    = fastaView[offsetEnd:offset]
-         assert (header_[0] == ">" and header_[-1] == "\n")
-@@ -333,7 +336,7 @@
-         if isinstance(spec, slice):
-             start, stop, stride = spec.indices(len(self))
-             if stride != 1:
--                raise ValueError, "Unsupported stride"
-+                raise ValueError("Unsupported stride")
-         elif spec < 0:
-             start = self.faiRecord.length + spec
-             stop = start + 1
-@@ -343,7 +346,7 @@
-             stop = start + 1
-             stride = 1
-         if not (0 <= start <= stop <= self.faiRecord.length):
--            raise IndexError, "Out of bounds"
-+            raise IndexError("Out of bounds")
-         startOffset = fileOffset(self.faiRecord, start)
-         endOffset   = fileOffset(self.faiRecord, stop)
-         snip = self.view[startOffset:endOffset].translate(None, "\r\n")
-@@ -441,16 +444,16 @@
-             key = len(self) + key
- 
-         if isinstance(key, slice):
--            indices = xrange(*key.indices(len(self)))
-+            indices = range(*key.indices(len(self)))
-             return [ IndexedFastaRecord(self.view, self.contigLookup[i])
-                      for i in indices ]
-         elif key in self.contigLookup:
-             return IndexedFastaRecord(self.view, self.contigLookup[key])
-         else:
--            raise IndexError, "Contig not in FastaTable"
-+            raise IndexError("Contig not in FastaTable")
- 
-     def __iter__(self):
--        return (self[i] for i in xrange(len(self)))
-+        return (self[i] for i in range(len(self)))
- 
-     def __len__(self):
-         return len(self.fai)
---- python-pbcore.orig/pbcore/io/FastqIO.py
-+++ python-pbcore/pbcore/io/FastqIO.py
-@@ -33,6 +33,8 @@
- """
- I/O support for FASTQ files
- """
-+from __future__ import absolute_import
-+from six.moves import range
- 
- __all__ = [ "FastqRecord",
-             "FastqReader",
-@@ -205,7 +207,7 @@
-         One-shot iteration support
-         """
-         while True:
--            lines = [next(self.file) for i in xrange(4)]
-+            lines = [next(self.file) for i in range(4)]
-             yield FastqRecord(lines[0][1:-1],
-                               lines[1][:-1],
-                               qualityString=lines[3][:-1])
---- python-pbcore.orig/pbcore/io/FofnIO.py
-+++ python-pbcore/pbcore/io/FofnIO.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- #################################################################################
- # Copyright (c) 2011-2015, Pacific Biosciences of California, Inc.
- #
-@@ -33,6 +34,7 @@
- from pbcore.io.base import getFileHandle
- from os.path import dirname, isabs, join, abspath, expanduser
- import xml.etree.ElementTree as ET
-+import six
- 
- 
- __all__ = [ "readFofn",
-@@ -48,7 +50,7 @@
-     FOFN that are relative (i.e., do not contain a leading '/') will
-     be reckoned from the directory containing the FOFN.
-     """
--    if isinstance(f, basestring):
-+    if isinstance(f, six.string_types):
-         fofnRoot = dirname(abspath(expanduser(f)))
-     else:
-         fofnRoot = None
-@@ -62,7 +64,7 @@
-         elif fofnRoot is not None:
-             yield join(fofnRoot, path)
-         else:
--            raise IOError, "Cannot handle relative paths in StringIO FOFN"
-+            raise IOError("Cannot handle relative paths in StringIO FOFN")
- 
- def readInputXML(fname):
-     tree = ET.parse(fname)
---- python-pbcore.orig/pbcore/io/GffIO.py
-+++ python-pbcore/pbcore/io/GffIO.py
-@@ -36,6 +36,9 @@
- The specification for the GFF format is available at
-     http://www.sequenceontology.org/gff3.shtml
- """
-+from __future__ import absolute_import
-+import six
-+from six.moves import map
- 
- __all__ = [ "Gff3Record",
-             "GffReader",
-@@ -105,7 +108,7 @@
-         columns = s.rstrip().rstrip(";").split("\t")
-         try:
-             assert len(columns) == len(cls._GFF_COLUMNS)
--            attributes = map(tupleFromGffAttribute, columns[-1].split(";"))
-+            attributes = list(map(tupleFromGffAttribute, columns[-1].split(";")))
-             (_seqid, _source, _type, _start,
-              _end, _score, _strand, _phase)  = columns[:-1]
-             return Gff3Record(_seqid, int(_start), int(_end), _type,
-@@ -124,7 +127,7 @@
-     def __str__(self):
-         formattedAttributes = ";".join(
-             ("%s=%s" % (k, self._formatField(v))
--             for (k, v) in self.attributes.iteritems()))
-+             for (k, v) in six.iteritems(self.attributes)))
-         formattedFixedColumns = "\t".join(
-             self._formatField(getattr(self, k))
-             for k in self._GFF_COLUMNS[:-1])
---- python-pbcore.orig/pbcore/io/_utils.py
-+++ python-pbcore/pbcore/io/_utils.py
-@@ -29,8 +29,10 @@
- #################################################################################
- 
- from __future__ import absolute_import
-+from __future__ import print_function
- import h5py, numpy as np
- from cStringIO import StringIO
-+import six
- 
- 
- def arrayFromDataset(ds, offsetBegin, offsetEnd):
-@@ -75,7 +77,7 @@
- 
- def is_string_like(obj):
-     'Return True if *obj* looks like a string'
--    if isinstance(obj, (str, unicode)): return True
-+    if isinstance(obj, (str, six.text_type)): return True
-     # numpy strings are subclass of str, ma strings are not
-     if ma.isMaskedArray(obj):
-         if obj.ndim == 0 and obj.dtype.kind in 'SU':
-@@ -189,8 +191,8 @@
-             newrec[name] = 0
- 
-     if jointype != 'inner' and defaults is not None: # fill in the defaults enmasse
--        newrec_fields = newrec.dtype.fields.keys()
--        for k, v in defaults.items():
-+        newrec_fields = list(newrec.dtype.fields.keys())
-+        for k, v in list(defaults.items()):
-             if k in newrec_fields:
-                 newrec[k] = v
- 
-@@ -234,7 +236,7 @@
-     """
-     Pretty-print a recarray
-     """
--    print "foo"
-+    print("foo")
- 
- 
- class CommonEqualityMixin(object):
---- python-pbcore.orig/pbcore/io/align/BamAlignment.py
-+++ python-pbcore/pbcore/io/align/BamAlignment.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- #################################################################################
- # Copyright (c) 2011-2015, Pacific Biosciences of California, Inc.
- #
-@@ -36,6 +37,7 @@
- from pbcore.sequence import reverseComplement
- from ._BamSupport import *
- from ._AlignmentMixin import AlignmentRecordMixin
-+from six.moves import range
- 
- __all__ = [ "BamAlignment" ]
- 
-@@ -67,7 +69,7 @@
-     @wraps(method)
-     def f(bamAln, *args, **kwargs):
-         if not bamAln.bam.isReferenceLoaded:
--            raise UnavailableFeature, "this feature requires loaded reference sequence"
-+            raise UnavailableFeature("this feature requires loaded reference sequence")
-         else:
-             return method(bamAln, *args, **kwargs)
-     return f
-@@ -76,7 +78,7 @@
-     @wraps(method)
-     def f(bamAln, *args, **kwargs):
-         if bamAln.rowNumber is None:
--            raise UnavailableFeature, "this feature requires a PacBio BAM index"
-+            raise UnavailableFeature("this feature requires a PacBio BAM index")
-         else:
-             return method(bamAln, *args, **kwargs)
-     return f
-@@ -85,7 +87,7 @@
-     @wraps(method)
-     def f(bamAln, *args, **kwargs):
-         if bamAln.isUnmapped:
--            raise UnavailableFeature, "this feature requires a *mapped* BAM record"
-+            raise UnavailableFeature("this feature requires a *mapped* BAM record")
-         else:
-             return method(bamAln, *args, **kwargs)
-     return f
-@@ -180,7 +182,7 @@
-         if (refStart >= refEnd or
-             refStart >= self.tEnd or
-             refEnd   <= self.tStart):
--            raise IndexError, "Clipping query does not overlap alignment"
-+            raise IndexError("Clipping query does not overlap alignment")
- 
-         # The clipping region must intersect the alignment, though it
-         # does not have to be contained wholly within it.
-@@ -305,7 +307,7 @@
-     @requiresReference
-     def reference(self, aligned=True, orientation="native"):
-         if not (orientation == "native" or orientation == "genomic"):
--            raise ValueError, "Bad `orientation` value"
-+            raise ValueError("Bad `orientation` value")
-         tSeq = self.bam.referenceFasta[self.referenceName].sequence[self.tStart:self.tEnd]
-         shouldRC = orientation == "native" and self.isReverseStrand
-         tSeqOriented = reverseComplement(tSeq) if shouldRC else tSeq
-@@ -401,10 +403,9 @@
-         oriented genomically in the file.
-         """
-         if not (orientation == "native" or orientation == "genomic"):
--            raise ValueError, "Bad `orientation` value"
-+            raise ValueError("Bad `orientation` value")
-         if self.isUnmapped and (orientation != "native" or aligned == True):
--            raise UnavailableFeature, \
--                "Cannot get genome oriented/aligned features from unmapped BAM record"
-+            raise UnavailableFeature("Cannot get genome oriented/aligned features from unmapped BAM record")
-         # 1. Extract in native orientation
-         tag, kind_, dtype_ = PULSE_FEATURE_TAGS[featureName]
-         data_ = self.peer.opt(tag)
-@@ -477,10 +478,9 @@
- 
-     def read(self, aligned=True, orientation="native"):
-         if not (orientation == "native" or orientation == "genomic"):
--            raise ValueError, "Bad `orientation` value"
-+            raise ValueError("Bad `orientation` value")
-         if self.isUnmapped and (orientation != "native" or aligned == True):
--            raise UnavailableFeature, \
--                "Cannot get genome oriented/aligned features from unmapped BAM record"
-+            raise UnavailableFeature("Cannot get genome oriented/aligned features from unmapped BAM record")
-         data = np.fromstring(self.peer.seq, dtype=np.int8)
-         s = self.aStart - self.qStart
-         e = self.aEnd   - self.qStart
-@@ -520,7 +520,7 @@
-             transcript = self.transcript(style="exonerate+")
-             refPos = self.referencePositions()
-             refPosString = "".join([str(pos % 10) for pos in refPos])
--            for i in xrange(0, len(alignedRef), COLUMNS):
-+            for i in range(0, len(alignedRef), COLUMNS):
-                 val += "\n"
-                 val += "  " + refPosString[i:i+COLUMNS] + "\n"
-                 val += "  " + alignedRef  [i:i+COLUMNS] + "\n"
-@@ -540,7 +540,7 @@
-         if key in self.bam.pbi.columnNames:
-             return self.bam.pbi[self.rowNumber][key]
-         else:
--            raise AttributeError, "no such column in pbi index"
-+            raise AttributeError("no such column in pbi index")
- 
-     def __dir__(self):
-         if self.bam.pbi is not None:
---- python-pbcore.orig/pbcore/io/align/BamIO.py
-+++ python-pbcore/pbcore/io/align/BamIO.py
-@@ -1,3 +1,7 @@
-+from __future__ import absolute_import
-+from six.moves import map
-+from six.moves import range
-+from six.moves import zip
- #################################################################################
- # Copyright (c) 2011-2015, Pacific Biosciences of California, Inc.
- #
-@@ -52,7 +56,7 @@
-     @wraps(method)
-     def f(bamReader, *args, **kwargs):
-         if not bamReader.peer._hasIndex():
--            raise UnavailableFeature, "this feature requires an standard BAM index file (bam.bai)"
-+            raise UnavailableFeature("this feature requires an standard BAM index file (bam.bai)")
-         else:
-             return method(bamReader, *args, **kwargs)
-     return f
-@@ -71,11 +75,11 @@
-         refNames   = [r["SN"] for r in refRecords]
-         refLengths = [r["LN"] for r in refRecords]
-         refMD5s    = [r["M5"] for r in refRecords]
--        refIds = map(self.peer.gettid, refNames)
-+        refIds = list(map(self.peer.gettid, refNames))
-         nRefs = len(refRecords)
- 
-         if nRefs > 0:
--            self._referenceInfoTable = np.rec.fromrecords(zip(
-+            self._referenceInfoTable = np.rec.fromrecords(list(zip(
-                 refIds,
-                 refIds,
-                 refNames,
-@@ -83,14 +87,14 @@
-                 refLengths,
-                 refMD5s,
-                 np.zeros(nRefs, dtype=np.uint32),
--                np.zeros(nRefs, dtype=np.uint32)),
-+                np.zeros(nRefs, dtype=np.uint32))),
-                 dtype=[('ID', '<i8'), ('RefInfoID', '<i8'),
-                        ('Name', 'O'), ('FullName', 'O'),
-                        ('Length', '<i8'), ('MD5', 'O'),
-                        ('StartRow', '<u4'), ('EndRow', '<u4')])
-             self._referenceDict = {}
--            self._referenceDict.update(zip(refIds, self._referenceInfoTable))
--            self._referenceDict.update(zip(refNames, self._referenceInfoTable))
-+            self._referenceDict.update(list(zip(refIds, self._referenceInfoTable)))
-+            self._referenceDict.update(list(zip(refNames, self._referenceInfoTable)))
-         else:
-             self._referenceInfoTable = None
-             self._referenceDict = None
-@@ -98,7 +102,7 @@
-     def _loadReadGroupInfo(self):
-         rgs = self.peer.header["RG"]
-         readGroupTable_ = []
--        pulseFeaturesInAll_ = frozenset(PULSE_FEATURE_TAGS.keys())
-+        pulseFeaturesInAll_ = frozenset(list(PULSE_FEATURE_TAGS.keys()))
-         for rg in rgs:
-             rgID = rgAsInt(rg["ID"])
-             rgName = rg["PU"]
-@@ -110,7 +114,7 @@
-             rgChem = decodeTriple(*triple)
-             rgReadType = ds["READTYPE"]
-             readGroupTable_.append((rgID, rgName, rgReadType, rgChem))
--            pulseFeaturesInAll_ = pulseFeaturesInAll_.intersection(ds.keys())
-+            pulseFeaturesInAll_ = pulseFeaturesInAll_.intersection(list(ds.keys()))
- 
-         self._readGroupTable = np.rec.fromrecords(
-             readGroupTable_,
-@@ -147,7 +151,7 @@
-         fastaIdsAndLens = set((c.id, len(c)) for c in ft)
-         bamIdsAndLens   = set((c.Name, c.Length) for c in self.referenceInfoTable)
-         if not bamIdsAndLens.issubset(fastaIdsAndLens):
--            raise ReferenceMismatch, "FASTA file must contain superset of reference contigs in BAM"
-+            raise ReferenceMismatch("FASTA file must contain superset of reference contigs in BAM")
-         self.referenceFasta = ft
- 
-     def _checkFileCompatibility(self):
-@@ -172,7 +176,7 @@
-         self.referenceFasta = None
-         if referenceFastaFname is not None:
-             if self.isUnmapped:
--                raise ValueError, "Unmapped BAM file--reference FASTA should not be given as argument to BamReader"
-+                raise ValueError("Unmapped BAM file--reference FASTA should not be given as argument to BamReader")
-             self._loadReferenceFasta(referenceFastaFname)
- 
-     @property
-@@ -343,7 +347,7 @@
-         if exists(pbiFname):
-             self.pbi = PacBioBamIndex(pbiFname)
-         else:
--            raise IOError, "IndexedBamReader requires bam.pbi index file"
-+            raise IOError("IndexedBamReader requires bam.pbi index file")
- 
-     def atRowNumber(self, rn):
-         offset = self.pbi.virtualFileOffset[rn]
-@@ -360,7 +364,7 @@
-             return self[ix]
- 
-     def __iter__(self):
--        for rn in xrange(len(self.pbi)):
-+        for rn in range(len(self.pbi)):
-             yield self.atRowNumber(rn)
- 
-     def __len__(self):
-@@ -372,7 +376,7 @@
-             return self.atRowNumber(rowNumbers)
-         elif isinstance(rowNumbers, slice):
-             return [ self.atRowNumber(r)
--                     for r in xrange(*rowNumbers.indices(len(self)))]
-+                     for r in range(*rowNumbers.indices(len(self)))]
-         elif isinstance(rowNumbers, list) or isinstance(rowNumbers, np.ndarray):
-             if len(rowNumbers) == 0:
-                 return []
-@@ -382,13 +386,13 @@
-                     return [ self.atRowNumber(r) for r in rowNumbers ]
-                 elif entryType == bool or issubclass(entryType, np.bool_):
-                     return [ self.atRowNumber(r) for r in np.flatnonzero(rowNumbers) ]
--        raise TypeError, "Invalid type for IndexedBamReader slicing"
-+        raise TypeError("Invalid type for IndexedBamReader slicing")
- 
-     def __getattr__(self, key):
-         if key in self.pbi.columnNames:
-             return self.pbi[key]
-         else:
--            raise AttributeError, "no such column in pbi index"
-+            raise AttributeError("no such column in pbi index")
- 
-     def __dir__(self):
-         return self.pbi.columnNames
---- python-pbcore.orig/pbcore/io/align/BlasrIO.py
-+++ python-pbcore/pbcore/io/align/BlasrIO.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- #################################################################################
- # Copyright (c) 2011-2015, Pacific Biosciences of California, Inc.
- #
---- python-pbcore.orig/pbcore/io/align/CmpH5IO.py
-+++ python-pbcore/pbcore/io/align/CmpH5IO.py
-@@ -1,3 +1,8 @@
-+from __future__ import absolute_import
-+import six
-+from six.moves import map
-+from six.moves import range
-+from six.moves import zip
- #################################################################################
- # Copyright (c) 2011-2015, Pacific Biosciences of California, Inc.
- #
-@@ -69,9 +74,9 @@
- _basemapArray  = np.ndarray(shape=(max(_basemap.keys()) + 1,), dtype=np.byte)
- _cBasemapArray = np.ndarray(shape=(max(_basemap.keys()) + 1,), dtype=np.byte)
- 
--for (e, v) in _basemap.iteritems():
-+for (e, v) in six.iteritems(_basemap):
-     _basemapArray[e] = v
--for (e, v) in _cBasemap.iteritems():
-+for (e, v) in six.iteritems(_cBasemap):
-     _cBasemapArray[e] = v
- 
- _baseEncodingToInt = np.array([-1]*16)
-@@ -163,7 +168,7 @@
-     elif dtype == np.int8:
-         return a[a != ord("-")]
-     else:
--        raise Exception, "Invalid pulse array type"
-+        raise Exception("Invalid pulse array type")
- 
- 
- 
-@@ -281,7 +286,7 @@
-         if (refStart >= refEnd or
-             refStart >= self.tEnd or
-             refEnd   <= self.tStart):
--            raise IndexError, "Clipping query does not overlap alignment"
-+            raise IndexError("Clipping query does not overlap alignment")
-         else:
-             return ClippedCmpH5Alignment(self, refStart, refEnd)
- 
-@@ -588,7 +593,7 @@
-         transcript = self.transcript(style="exonerate+")
-         refPos = self.referencePositions()
-         refPosString = "".join([str(pos % 10) for pos in refPos])
--        for i in xrange(0, len(alignedRef), COLUMNS):
-+        for i in range(0, len(alignedRef), COLUMNS):
-             val += "\n"
-             val += "  " + refPosString[i:i+COLUMNS] + "\n"
-             val += "  " + alignedRef  [i:i+COLUMNS] + "\n"
-@@ -719,7 +724,7 @@
-                 self.filename = abspath(expanduser(filenameOrH5File))
-                 self.file = h5py.File(self.filename, "r")
-             except IOError:
--                raise IOError, ("Invalid or nonexistent cmp.h5 file %s" % filenameOrH5File)
-+                raise IOError("Invalid or nonexistent cmp.h5 file %s" % filenameOrH5File)
- 
-         self._loadAlignmentInfo(sharedAlignmentIndex)
-         self._loadMovieInfo()
-@@ -753,7 +758,7 @@
-         for (alnGroupId, alnGroupPath) in zip(self.file["/AlnGroup/ID"],
-                                               self.file["/AlnGroup/Path"]):
-             alnGroup = self.file[alnGroupPath]
--            self._alignmentGroupById[alnGroupId] = dict(alnGroup.items())
-+            self._alignmentGroupById[alnGroupId] = dict(list(alnGroup.items()))
- 
- 
-     def _loadMovieInfo(self):
-@@ -767,10 +772,10 @@
-             timeScale = [1.0] * numMovies
- 
-         self._movieInfoTable = np.rec.fromrecords(
--            zip(self.file["/MovieInfo/ID"],
-+            list(zip(self.file["/MovieInfo/ID"],
-                 self.file["/MovieInfo/Name"],
-                 frameRate,
--                timeScale),
-+                timeScale)),
-             dtype=[("ID"                  , int),
-                    ("Name"                , object),
-                    ("FrameRate"           , float),
-@@ -787,10 +792,10 @@
-         # missing chemistry info.
-         assert (self._readGroupTable is None) and (self._readGroupDict is None)
-         self._readGroupTable = np.rec.fromrecords(
--            zip(self._movieInfoTable.ID,
-+            list(zip(self._movieInfoTable.ID,
-                 self._movieInfoTable.Name,
-                 [self.readType] * len(self._movieInfoTable.ID),
--                self.sequencingChemistry),
-+                self.sequencingChemistry)),
-             dtype=[("ID"                 , np.int32),
-                    ("MovieName"          , "O"),
-                    ("ReadType"           , "O"),
-@@ -800,18 +805,18 @@
- 
-     def _loadReferenceInfo(self):
-         _referenceGroupTbl = np.rec.fromrecords(
--            zip(self.file["/RefGroup/ID"],
-+            list(zip(self.file["/RefGroup/ID"],
-                 self.file["/RefGroup/RefInfoID"],
--                [path[1:] for path in self.file["/RefGroup/Path"]]),
-+                [path[1:] for path in self.file["/RefGroup/Path"]])),
-             dtype=[("ID"       , int),
-                    ("RefInfoID", int),
-                    ("Name"     , object)])
- 
-         _referenceInfoTbl = np.rec.fromrecords(
--            zip(self.file["/RefInfo/ID"],
-+            list(zip(self.file["/RefInfo/ID"],
-                 self.file["/RefInfo/FullName"],
-                 self.file["/RefInfo/Length"],
--                self.file["/RefInfo/MD5"]) ,
-+                self.file["/RefInfo/MD5"])) ,
-             dtype=[("RefInfoID", int),
-                    ("FullName" , object),
-                    ("Length"   , int),
-@@ -840,7 +845,7 @@
-                     record.Name     in self._referenceDict or
-                     record.FullName in self._referenceDict or
-                     record.MD5      in self._referenceDict):
--                    raise ValueError, "Duplicate reference contig sequence or identifier"
-+                    raise ValueError("Duplicate reference contig sequence or identifier")
-                 else:
-                     self._referenceDict[shortName]       = record
-                     self._referenceDict[record.ID]       = record
-@@ -859,10 +864,10 @@
- 
-         if "Barcode" in self.file["/AlnInfo"]:
-             # Build forward and backwards id<->label lookup tables
--            self._barcodeName = OrderedDict(zip(self.file["/BarcodeInfo/ID"],
--                                                self.file["/BarcodeInfo/Name"]))
--            self._barcode     = OrderedDict(zip(self.file["/BarcodeInfo/Name"],
--                                                self.file["/BarcodeInfo/ID"]))
-+            self._barcodeName = OrderedDict(list(zip(self.file["/BarcodeInfo/ID"],
-+                                                self.file["/BarcodeInfo/Name"])))
-+            self._barcode     = OrderedDict(list(zip(self.file["/BarcodeInfo/Name"],
-+                                                self.file["/BarcodeInfo/ID"])))
-             # Barcode ID per row
-             self._barcodes = self.file["/AlnInfo/Barcode"].value[:,1]
- 
-@@ -890,7 +895,7 @@
-                 # Old way
-                 self._sequencingChemistry = mi["SequencingChemistry"].value
-             else:
--                raise ChemistryLookupError, "Chemistry information could not be found in cmp.h5!"
-+                raise ChemistryLookupError("Chemistry information could not be found in cmp.h5!")
-         return self._sequencingChemistry
- 
- 
-@@ -1032,8 +1037,8 @@
-             >>> c.versionAtLeast("1.3.0")
-             False
-         """
--        myVersionTuple = map(int, self.version.split(".")[:3])
--        minimalVersionTuple = map(int, minimalVersion.split(".")[:3])
-+        myVersionTuple = list(map(int, self.version.split(".")[:3]))
-+        minimalVersionTuple = list(map(int, minimalVersion.split(".")[:3]))
-         return myVersionTuple >= minimalVersionTuple
- 
-     def softwareVersion(self, programName):
-@@ -1041,8 +1046,8 @@
-         Return the version of program `programName` that processed
-         this file.
-         """
--        filelog = dict(zip(self.file["/FileLog/Program"],
--                           self.file["/FileLog/Version"]))
-+        filelog = dict(list(zip(self.file["/FileLog/Program"],
-+                           self.file["/FileLog/Version"])))
-         return filelog.get(programName, None)
- 
-     @property
-@@ -1062,7 +1067,7 @@
- 
-     @property
-     def movieNames(self):
--        return set([mi.Name for mi in self._movieDict.values()])
-+        return set([mi.Name for mi in list(self._movieDict.values())])
- 
-     @property
-     def ReadGroupID(self):
-@@ -1152,7 +1157,7 @@
-         """
- 
-         if not self.isSorted:
--            raise Exception, "CmpH5 is not sorted"
-+            raise Exception("CmpH5 is not sorted")
-         rowNumbers = self._readLocatorByKey[refKey](refStart, refEnd, justIndices=True)
-         if justIndices:
-             return rowNumbers
-@@ -1173,8 +1178,8 @@
-             False
- 
-         """
--        return all(featureName in alnGroup.keys()
--                   for alnGroup in self._alignmentGroupById.values())
-+        return all(featureName in list(alnGroup.keys())
-+                   for alnGroup in list(self._alignmentGroupById.values()))
- 
-     def pulseFeaturesAvailable(self):
-         """
-@@ -1186,9 +1191,9 @@
-             [u'QualityValue', u'IPD', u'PulseWidth', u'InsertionQV', u'DeletionQV']
- 
-         """
--        pulseFeaturesByMovie = [ alnGroup.keys()
--                                 for alnGroup in self._alignmentGroupById.values() ]
--        pulseFeaturesAvailableAsSet = set.intersection(*map(set, pulseFeaturesByMovie))
-+        pulseFeaturesByMovie = [ list(alnGroup.keys())
-+                                 for alnGroup in list(self._alignmentGroupById.values()) ]
-+        pulseFeaturesAvailableAsSet = set.intersection(*list(map(set, pulseFeaturesByMovie)))
-         pulseFeaturesAvailableAsSet.discard("AlnArray")
-         return list(pulseFeaturesAvailableAsSet)
- 
-@@ -1239,7 +1244,7 @@
-             return CmpH5Alignment(self, rowNumbers)
-         elif isinstance(rowNumbers, slice):
-             return [CmpH5Alignment(self, r)
--                    for r in xrange(*rowNumbers.indices(len(self)))]
-+                    for r in range(*rowNumbers.indices(len(self)))]
-         elif isinstance(rowNumbers, list) or isinstance(rowNumbers, np.ndarray):
-             if len(rowNumbers) == 0:
-                 return []
-@@ -1249,10 +1254,10 @@
-                     return [CmpH5Alignment(self, r) for r in rowNumbers]
-                 elif entryType == bool or issubclass(entryType, np.bool_):
-                     return [CmpH5Alignment(self, r) for r in np.flatnonzero(rowNumbers)]
--        raise TypeError, "Invalid type for CmpH5Reader slicing"
-+        raise TypeError("Invalid type for CmpH5Reader slicing")
- 
-     def __iter__(self):
--        return (self[i] for i in xrange(len(self)))
-+        return (self[i] for i in range(len(self)))
- 
-     def __len__(self):
-         return len(self.alignmentIndex)
---- python-pbcore.orig/pbcore/io/align/PacBioBamIndex.py
-+++ python-pbcore/pbcore/io/align/PacBioBamIndex.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- #################################################################################
- # Copyright (c) 2011-2015, Pacific Biosciences of California, Inc.
- #
-@@ -35,6 +36,9 @@
- from os.path import abspath, expanduser
- from functools import wraps
- from collections import namedtuple
-+import six
-+from six.moves import range
-+from six.moves import zip
- 
- class PacBioBamIndex(object):
-     """
-@@ -54,8 +58,8 @@
-     """
-     def _loadColumns(self, f):
-         g = f["PacBioBamIndex/Columns"]
--        columnNamesAndColumns = sorted([ (k, v[:]) for (k, v) in g.iteritems() ])
--        columnNames, columns = zip(*columnNamesAndColumns)
-+        columnNamesAndColumns = sorted([ (k, v[:]) for (k, v) in six.iteritems(g) ])
-+        columnNames, columns = list(zip(*columnNamesAndColumns))
-         return np.rec.fromarrays(columns, names=columnNames)
- 
-     def _loadVersion(self, f):
-@@ -72,7 +76,7 @@
-                 self._columns = self._loadColumns(f)
-                 self._offsets = self._loadOffsets(f)
-             except Exception as e:
--                raise IOError, "Malformed bam.pbi file: " + str(e)
-+                raise IOError("Malformed bam.pbi file: " + str(e))
- 
- 
-     @property
-@@ -87,7 +91,7 @@
-         if columnName in self.columnNames:
-             return self._columns[columnName]
-         else:
--            raise AttributeError, "pbi has no column named '%s'" % columnName
-+            raise AttributeError("pbi has no column named '%s'" % columnName)
- 
-     def __getitem__(self, rowNumber):
-         return self._columns[rowNumber]
-@@ -100,7 +104,7 @@
-         return len(self._columns)
- 
-     def __iter__(self):
--        for i in xrange(len(self)):
-+        for i in range(len(self)):
-             yield self[i]
- 
-     def rangeQuery(self, winId, winStart, winEnd):
---- python-pbcore.orig/pbcore/io/align/_AlignmentMixin.py
-+++ python-pbcore/pbcore/io/align/_AlignmentMixin.py
-@@ -1,3 +1,5 @@
-+from __future__ import absolute_import
-+from six.moves import map
- #################################################################################
- # Copyright (c) 2011-2015, Pacific Biosciences of California, Inc.
- #
-@@ -86,8 +88,8 @@
-             elif readName.endswith("ccs"):
-                 return False
-             else:
--                q = map(int, rQuery.split("_"))
--                r = map(int, readName.split("/")[-1].split("_"))
-+                q = list(map(int, rQuery.split("_")))
-+                r = list(map(int, readName.split("/")[-1].split("_")))
-                 return rangeOverlap(q, r)
- 
-         fields = query.split("/")
---- python-pbcore.orig/pbcore/io/align/_BamSupport.py
-+++ python-pbcore/pbcore/io/align/_BamSupport.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- #################################################################################
- # Copyright (c) 2011-2015, Pacific Biosciences of California, Inc.
- #
-@@ -31,6 +32,8 @@
- # Author: David Alexander
- 
- import numpy as np
-+from six.moves import range
-+from six.moves import zip
- 
- class UnavailableFeature(Exception): pass
- class Unimplemented(Exception):      pass
-@@ -103,9 +106,9 @@
-     for i, (fl, fu) in enumerate(zip(framepoints, framepoints[1:])):
-         if (fu > fl + 1):
-             m = (fl + fu)/2
--            for f in xrange(fl, m):
-+            for f in range(fl, m):
-                 frameToCode[f] = i
--            for f in xrange(m, fu):
-+            for f in range(m, fu):
-                 frameToCode[f] = i + 1
-         else:
-             frameToCode[fl] = i
---- python-pbcore.orig/pbcore/io/align/__init__.py
-+++ python-pbcore/pbcore/io/align/__init__.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- #################################################################################
- # Copyright (c) 2011-2015, Pacific Biosciences of California, Inc.
- #
-@@ -28,7 +29,7 @@
- # POSSIBILITY OF SUCH DAMAGE.
- #################################################################################
- 
--from CmpH5IO      import *
--from BamIO        import *
--from BamAlignment import *
--from BlasrIO      import *
-+from .CmpH5IO      import *
-+from .BamIO        import *
-+from .BamAlignment import *
-+from .BlasrIO      import *
---- python-pbcore.orig/pbcore/io/base.py
-+++ python-pbcore/pbcore/io/base.py
-@@ -34,6 +34,7 @@
- from __future__ import absolute_import
- import gzip
- from os.path import abspath, expanduser
-+import six
- 
- __all__ = [ "ReaderBase", "WriterBase" ]
- 
-@@ -53,7 +54,7 @@
-     """
-     assert mode in ("r", "w")
- 
--    if isinstance(filenameOrFile, basestring):
-+    if isinstance(filenameOrFile, six.string_types):
-         filename = abspath(expanduser(filenameOrFile))
-         if filename.endswith(".gz"):
-             return gzip.open(filename, mode)
---- python-pbcore.orig/pbcore/io/opener.py
-+++ python-pbcore/pbcore/io/opener.py
-@@ -1,3 +1,5 @@
-+from __future__ import absolute_import
-+from __future__ import print_function
- #################################################################################
- # Copyright (c) 2011-2015, Pacific Biosciences of California, Inc.
- #
-@@ -54,7 +56,7 @@
-     elif fname.endswith("bam"):
-         return IndexedBamReader(fname, referenceFasta)
-     else:
--        raise ValueError, "Invalid alignment file suffix"
-+        raise ValueError("Invalid alignment file suffix")
- 
- def openAlignmentFile(fname, referenceFasta=None):
-     """
-@@ -97,7 +99,7 @@
-     elif ext == "fofn":          return BasH5Collection
-     elif ext == "bam":           return openAlignmentFile
-     else:
--        raise ValueError, ("No known opener class for extension %s" % ext)
-+        raise ValueError("No known opener class for extension %s" % ext)
- 
- def _extension(fname):
-     parts = fname.split(".")
-@@ -119,7 +121,7 @@
-     import sys, code
- 
-     if len(sys.argv) < 2:
--        print "Requires at least one argument!"
-+        print("Requires at least one argument!")
-         return 1
- 
-     fname = sys.argv[1]
---- python-pbcore.orig/pbcore/io/rangeQueries.py
-+++ python-pbcore/pbcore/io/rangeQueries.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- #################################################################################
- # Copyright (c) 2011-2015, Pacific Biosciences of California, Inc.
- #
-@@ -31,6 +32,7 @@
- import h5py as h
- import numpy as n
- import bisect
-+from six.moves import zip
- 
- def rightmostBinSearch(vec, val):
-     """
-@@ -127,7 +129,7 @@
-     Return a function which can be called iteratively to find reads
-     quickly.
-     """
--    if not cmpH5.isSorted: raise Exception, "CmpH5 is not sorted"
-+    if not cmpH5.isSorted: raise Exception("CmpH5 is not sorted")
-     offsets = cmpH5.file["/RefGroup/OffsetTable"].value
-     offStart, offEnd = offsets[offsets[:,0] == refSeq, 1:3].ravel()
- 
-@@ -163,7 +165,7 @@
-     cmpH5 is an hdf5 object representing a pointer to a sorted cmp.h5
-     file.
-     """
--    if not cmpH5.isSorted: raise Exception, "CmpH5 is not sorted"
-+    if not cmpH5.isSorted: raise Exception("CmpH5 is not sorted")
-     return makeReadLocator(cmpH5, coords[0])(coords[1], coords[2], justIndices)
- 
- def getCoverageInRange(cmpH5, coords, rowNumbers=None):
-@@ -172,7 +174,7 @@
-     element represents the number of reads overlapping that position
-     in the cmp.h5 file.
-     """
--    if not cmpH5.isSorted: raise Exception, "CmpH5 is not sorted"
-+    if not cmpH5.isSorted: raise Exception("CmpH5 is not sorted")
-     if rowNumbers==None:
-         rowNumbers  = getReadsInRange(cmpH5, coords, justIndices=True)
-     if (len(rowNumbers))==0:
---- python-pbcore.orig/pbcore/sequence.py
-+++ python-pbcore/pbcore/sequence.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- #################################################################################
- # Copyright (c) 2011-2015, Pacific Biosciences of California, Inc.
- #
---- python-pbcore.orig/pbcore/util/Process.py
-+++ python-pbcore/pbcore/util/Process.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- #################################################################################
- # Copyright (c) 2011-2015, Pacific Biosciences of California, Inc.
- #
---- python-pbcore.orig/pbcore/util/ToolRunner.py
-+++ python-pbcore/pbcore/util/ToolRunner.py
-@@ -1,3 +1,5 @@
-+from __future__ import absolute_import
-+from __future__ import print_function
- #################################################################################
- # Copyright (c) 2011-2015, Pacific Biosciences of California, Inc.
- #
-@@ -95,7 +97,7 @@
-             try:
-                 import ipdb
-             except ImportError:
--                print "--debug requires module 'ipdb'"
-+                print("--debug requires module 'ipdb'")
-                 return -1
-             with ipdb.launch_ipdb_on_exception():
-                 self.run()
---- python-pbcore.orig/pbcore/util/decorators.py
-+++ python-pbcore/pbcore/util/decorators.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- import warnings
- 
- def deprecated(func):
---- python-pbcore.orig/setup.py
-+++ python-pbcore/setup.py
-@@ -1,12 +1,9 @@
-+from __future__ import absolute_import
- from setuptools import setup, Extension, find_packages
- import sys
- 
--if ("install" in sys.argv) and sys.version_info < (2, 7, 0):
--    print "pbcore requires Python 2.7"
--    sys.exit(-1)
--
- globals = {}
--execfile("pbcore/__init__.py", globals)
-+exec(compile(open("pbcore/__init__.py").read(), "pbcore/__init__.py", 'exec'), globals)
- __VERSION__ = globals["__VERSION__"]
- 
- setup(
---- python-pbcore.orig/tests/test_pbcore_data.py
-+++ python-pbcore/tests/test_pbcore_data.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- from nose.tools import assert_equal
- from pbcore import data
- 
---- python-pbcore.orig/tests/test_pbcore_io_AlnFileReaders.py
-+++ python-pbcore/tests/test_pbcore_io_AlnFileReaders.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- from numpy.testing import (assert_array_almost_equal as ASIM,
-                            assert_array_equal        as AEQ)
- from nose.tools import (nottest,
-@@ -14,6 +15,8 @@
- from pbcore.io import CmpH5Reader, BamReader, IndexedBamReader
- from pbcore.sequence import reverseComplement as RC
- from pbcore.chemistry import ChemistryLookupError
-+from six.moves import range
-+from six.moves import zip
- 
- 
- class _BasicAlnFileReaderTests(object):
-@@ -151,7 +154,7 @@
-         EQ(985, ac1.referenceEnd)
-         EQ([(983, 'A', '-'),
-             (984, 'C', 'C')],
--           zip(ac1.referencePositions(), ac1.reference(), ac1.read()))
-+           list(zip(ac1.referencePositions(), ac1.reference(), ac1.read())))
- 
-         ac2 = a.clippedTo(982, 986)
-         EQ(982, ac2.referenceStart)
-@@ -161,13 +164,13 @@
-             (984, 'C', 'C'),
-             (985, '-', 'G'),
-             (985, 'T', 'T')],
--           zip(ac2.referencePositions(), ac2.reference(), ac2.read()))
-+           list(zip(ac2.referencePositions(), ac2.reference(), ac2.read())))
- 
-         ac3 = a.clippedTo(984, 985)
-         EQ(984, ac3.referenceStart)
-         EQ(985, ac3.referenceEnd)
-         EQ([(984, 'C', 'C')],
--           zip(ac3.referencePositions(), ac3.reference(), ac3.read()))
-+           list(zip(ac3.referencePositions(), ac3.reference(), ac3.read())))
- 
-         # Get a more interesting (more gappy) rev strand aln
-         b = self.alns[3]
-@@ -192,7 +195,7 @@
-             (2210, 'C', 'C'),
-             (2209, 'T', 'T'),
-             (2208, 'G', '-')],
--           zip(bc1.referencePositions(), bc1.reference(), bc1.read()))
-+           list(zip(bc1.referencePositions(), bc1.reference(), bc1.read())))
- 
-         bc2 = b.clippedTo(2207, 2215)
-         EQ([(2214, 'C', 'C'),
-@@ -203,7 +206,7 @@
-             (2209, 'T', 'T'),
-             (2208, 'G', '-'),
-             (2207, 'G', 'G')],
--           zip(bc2.referencePositions(), bc2.reference(), bc2.read()))
-+           list(zip(bc2.referencePositions(), bc2.reference(), bc2.read())))
- 
-         bc3 = b.clippedTo(2209, 2214)
-         EQ([(2213, 'A', 'A'),
-@@ -211,7 +214,7 @@
-             (2211, 'G', 'G'),
-             (2210, 'C', 'C'),
-             (2209, 'T', 'T')],
--           zip(bc3.referencePositions(), bc3.reference(), bc3.read()))
-+           list(zip(bc3.referencePositions(), bc3.reference(), bc3.read())))
- 
- 
-         # Test clipping in a large deletion
-@@ -259,15 +262,15 @@
-     def testClippingsVsBaxData(self):
-         self.f.attach(self.BAX_FILE)
-         for aln in [self.alns[52], self.alns[8]]:
--            for cS in xrange(aln.tStart, aln.tEnd + 1):
--                for cE in xrange(cS + 1, min(aln.tEnd, cS + 10)):
-+            for cS in range(aln.tStart, aln.tEnd + 1):
-+                for cE in range(cS + 1, min(aln.tEnd, cS + 10)):
-                     ca = aln.clippedTo(cS, cE)
-                     EQ(ca.zmwRead.basecalls(),
-                        ca.read(aligned=False, orientation="native"))
- 
-     def testReadsInRange(self):
-         wLen = 1000
--        for wStart in xrange(0, 50000, wLen):
-+        for wStart in range(0, 50000, wLen):
-             wEnd = wStart + wLen
-             expectedNames = set([ a.readName for a in self.alns
-                                   if (a.referenceName == "lambda_NEB3011" and
---- python-pbcore.orig/tests/test_pbcore_io_BarcodeH5Reader.py
-+++ python-pbcore/tests/test_pbcore_io_BarcodeH5Reader.py
-@@ -1,3 +1,5 @@
-+from __future__ import absolute_import
-+from __future__ import print_function
- import nose.tools
- import numpy
- import numpy.testing
-@@ -6,6 +8,7 @@
- 
- from pbcore.data import MOVIE_NAME_BC
- from pbcore.io.BarcodeH5Reader import BarcodeH5Reader, BarcodeH5Fofn, MPBarcodeH5Reader, LabeledZmw
-+from six.moves import map
- 
- class TestBarcodeH5Reader(object):
-     """Tests of BarcodeH5Reader against a generic BarcodeH5 file
-@@ -13,8 +16,8 @@
- 
-     def __init__(self):
-         bcFiles = pbcore.data.getBcH5s()
--        print bcFiles
--        self.bc1, self.bc2, self.bc3 = map(BarcodeH5Reader, bcFiles)
-+        print(bcFiles)
-+        self.bc1, self.bc2, self.bc3 = list(map(BarcodeH5Reader, bcFiles))
- 
-     def test_BarcodeH5Reader_basicTest(self):
-         """Test that BcH5Reader correctly sets movie name, barcode labels, and hole numbers
-@@ -48,15 +51,15 @@
-         """Test that BcH5Reader correctly iterates over it's labeled ZMWs
-         """
- 
--        labeledZmws1 = [ lZmw for lZmw in self.bc1.labeledZmws.values() ]
-+        labeledZmws1 = [ lZmw for lZmw in list(self.bc1.labeledZmws.values()) ]
-         sortedZmws1 = sorted(labeledZmws1, key=lambda z: z.holeNumber)
-         nose.tools.assert_equal(sortedZmws1, list(self.bc1))
- 
--        labeledZmws2 = [ lZmw for lZmw in self.bc2.labeledZmws.values() ]
-+        labeledZmws2 = [ lZmw for lZmw in list(self.bc2.labeledZmws.values()) ]
-         sortedZmws2 = sorted(labeledZmws2, key=lambda z: z.holeNumber)
-         nose.tools.assert_equal(sortedZmws2, list(self.bc2))
- 
--        labeledZmws3 = [ lZmw for lZmw in self.bc3.labeledZmws.values() ]
-+        labeledZmws3 = [ lZmw for lZmw in list(self.bc3.labeledZmws.values()) ]
-         sortedZmws3 = sorted(labeledZmws3, key=lambda z: z.holeNumber)
-         nose.tools.assert_equal(sortedZmws3, list(self.bc3))
- 
-@@ -66,9 +69,9 @@
- 
-     def __init__(self):
-         bcFofn = pbcore.data.getBcFofn()
--        print bcFofn
-+        print(bcFofn)
-         self.bcFofn = BarcodeH5Fofn(bcFofn)
--        print self.bcFofn
-+        print(self.bcFofn)
- 
-     def test_BasH5Fofn_basicTest(self):
-         """Test that BcH5Fofn correctly sets movie name, barcode labels, and hole numbers
---- python-pbcore.orig/tests/test_pbcore_io_BasH5Collection.py
-+++ python-pbcore/tests/test_pbcore_io_BasH5Collection.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- from nose.tools import assert_equal, assert_true, assert_false
- from numpy.testing import assert_array_equal
- from StringIO import StringIO
---- python-pbcore.orig/tests/test_pbcore_io_BasH5Reader.py
-+++ python-pbcore/tests/test_pbcore_io_BasH5Reader.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- import inspect
- import os
- 
-@@ -10,6 +11,8 @@
- 
- from pbcore.io.BasH5IO import BasH5Reader, Zmw, ZmwRead, CCSZmwRead
- from pbcore.chemistry import ChemistryLookupError
-+from six.moves import map
-+from six.moves import zip
- 
- class TestBasH5Reader_14:
-     """Tests of BasH5Reader against a 1.4 bas.h5 file, no multipart with
-@@ -19,7 +22,7 @@
-     def __init__(self):
-         self.cmpH5 = pbcore.io.CmpH5Reader(pbcore.data.getCmpH5())
-         basFiles = pbcore.data.getBasH5s()
--        self.bas1, self.bas2 = map(pbcore.io.BasH5Reader, basFiles)
-+        self.bas1, self.bas2 = list(map(pbcore.io.BasH5Reader, basFiles))
- 
-     def test_BasH5Reader_basicTest(self):
-         """Test that BasH5Reader correctly sets moviename, identifies the
-@@ -344,8 +347,8 @@
-         productivities = {}
-         for filename in self.baxh5_filenames:
-             f = h5py.File(filename, 'r')
--            hn_to_prod = dict(zip(f["PulseData/BaseCalls/ZMW/HoleNumber"],
--                                  f["PulseData/BaseCalls/ZMWMetrics/Productivity"]))
-+            hn_to_prod = dict(list(zip(f["PulseData/BaseCalls/ZMW/HoleNumber"],
-+                                  f["PulseData/BaseCalls/ZMWMetrics/Productivity"])))
-             productivities.update(hn_to_prod)
-             f.close()
- 
---- python-pbcore.orig/tests/test_pbcore_io_BlasrIO.py
-+++ python-pbcore/tests/test_pbcore_io_BlasrIO.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- 
- from pbcore.io import M4Reader, M5Reader
- import pbcore.data as D
---- python-pbcore.orig/tests/test_pbcore_io_FastaIO.py
-+++ python-pbcore/tests/test_pbcore_io_FastaIO.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- from nose.tools import assert_equal, assert_true, assert_false
- from pbcore import data
- from pbcore.io import FastaReader, FastaWriter, FastaRecord
---- python-pbcore.orig/tests/test_pbcore_io_FastaTable.py
-+++ python-pbcore/tests/test_pbcore_io_FastaTable.py
-@@ -1,6 +1,8 @@
-+from __future__ import absolute_import
- from nose.tools import assert_equal, assert_true, assert_false
- from pbcore import data
- from pbcore.io import FastaReader, FastaWriter, IndexedFastaReader
-+from six.moves import zip
- 
- 
- class TestIndexedFastaReader:
---- python-pbcore.orig/tests/test_pbcore_io_FastqIO.py
-+++ python-pbcore/tests/test_pbcore_io_FastqIO.py
-@@ -1,9 +1,11 @@
-+from __future__ import absolute_import
- from nose.tools import assert_equal, assert_true, assert_false
- from numpy.testing import assert_array_equal
- from pbcore import data
- from StringIO import StringIO
- 
- from pbcore.io.FastqIO import *
-+from six.moves import range
- 
- 
- # Test QV <-> string conversion routines
-@@ -12,7 +14,7 @@
-         self.ascii = \
-             "!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`" + \
-             "abcdefghijklmnopqrstuvwxyz{|}~"
--        self.qvs = range(0, 94)
-+        self.qvs = list(range(0, 94))
- 
-     def testAsciiFromQvs(self):
-         assert_equal(self.ascii, asciiFromQvs(self.qvs))
-@@ -107,7 +109,7 @@
-     def test_eq(self):
-         header = 'r1'
-         seq = 'ACGT'
--        qvs = list(xrange(10, 10 + len(seq)))
-+        qvs = list(range(10, 10 + len(seq)))
-         r1 = FastqRecord(header, seq, qvs)
-         r2 = FastqRecord(header, seq, qvs)
-         assert_true(r1 == r2)
-@@ -116,7 +118,7 @@
-     def test_not_equal(self):
-         header = 'r1'
-         seq = 'ACGT'
--        qvs = list(xrange(10, 10 + len(seq)))
-+        qvs = list(range(10, 10 + len(seq)))
-         r1 = FastqRecord(header, seq, qvs)
-         r2 = FastqRecord('r2', seq, qvs)
-         assert_true(r1 != r2)
-@@ -138,12 +140,12 @@
-     def test_readFastq1(self):
-         r1 = FastqReader(self.fastq1)
-         l = list(r1)
--        assert_equal([FastqRecord("seq1", "GATTACA", range(22, 29))], l)
-+        assert_equal([FastqRecord("seq1", "GATTACA", list(range(22, 29)))], l)
- 
-     def test_readFastq2(self):
-         r2 = FastqReader(self.fastq2)
-         l = list(r2)
--        assert_equal([FastqRecord("seq1", "GATTACA", range(22, 29)),
-+        assert_equal([FastqRecord("seq1", "GATTACA", list(range(22, 29))),
-                       FastqRecord("seq2", "CATTAGA", [31]*7) ],
-                      l)
- 
---- python-pbcore.orig/tests/test_pbcore_io_FofnIO.py
-+++ python-pbcore/tests/test_pbcore_io_FofnIO.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- from nose.tools import assert_equal, assert_true, assert_false
- from numpy.testing import assert_array_equal
- from StringIO import StringIO
---- python-pbcore.orig/tests/test_pbcore_io_GffIO.py
-+++ python-pbcore/tests/test_pbcore_io_GffIO.py
-@@ -1,7 +1,9 @@
-+from __future__ import absolute_import
- from nose.tools import assert_equal, assert_raises
- from StringIO import StringIO
- from pbcore.io import GffWriter, Gff3Record, GffReader
- from pbcore import data
-+from six.moves import zip
- 
- class TestGff3Record:
- 
---- python-pbcore.orig/tests/test_pbcore_io_rangeQueries.py
-+++ python-pbcore/tests/test_pbcore_io_rangeQueries.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- from nose.tools import assert_equal
- from numpy.testing import assert_array_equal
- 
-@@ -7,6 +8,7 @@
- 
- import bisect
- from numpy import *
-+from six.moves import range
- 
- def brute_force_lm_search(vec, val):
-     if (val not in vec):
-@@ -56,7 +58,7 @@
-         # This is a brute force check that reads in range returns the
-         # right answer for 50-base windows of lambda
-         for BLOCKSIZE in [50, 77]:
--            for winStart in xrange(0, 45000, BLOCKSIZE):
-+            for winStart in range(0, 45000, BLOCKSIZE):
-                 winEnd = winStart + BLOCKSIZE
-                 assert_array_equal(brute_force_reads_in_range(winStart, winEnd, self.cmpH5.tStart, self.cmpH5.tEnd),
-                                    self.cmpH5.readsInRange(1, winStart, winEnd, justIndices=True))
-@@ -65,7 +67,7 @@
- 
-     def test_coverage_in_range2(self):
-         # Brute force over lambda
--        for winStart in xrange(0, 45000, 50):
-+        for winStart in range(0, 45000, 50):
-             winEnd = winStart + 1
-             assert_array_equal([len(brute_force_reads_in_range(winStart, winEnd, self.cmpH5.tStart, self.cmpH5.tEnd))],
-                                RQ.getCoverageInRange(self.cmpH5, (1, winStart, winEnd)))
---- python-pbcore.orig/tests/test_pbcore_io_unaligned_bam.py
-+++ python-pbcore/tests/test_pbcore_io_unaligned_bam.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- from numpy.testing import (assert_array_almost_equal as ASIM,
-                            assert_array_equal        as AEQ)
- from nose.tools import (nottest,
---- python-pbcore.orig/tests/test_pbcore_util_sequences.py
-+++ python-pbcore/tests/test_pbcore_util_sequences.py
-@@ -1,3 +1,4 @@
-+from __future__ import absolute_import
- import nose
- from nose.tools import assert_equal, assert_true, assert_false
- from pbcore import sequence
diff --git a/debian/patches/series b/debian/patches/series
index 3b9561b..20bb1a0 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1,3 +1,3 @@
-modernize-python.patch
+support-python3.patch
 remove-convenience-script.patch
 doc-theme.patch
diff --git a/debian/patches/support-python3.patch b/debian/patches/support-python3.patch
new file mode 100644
index 0000000..32fdc85
--- /dev/null
+++ b/debian/patches/support-python3.patch
@@ -0,0 +1,38 @@
+Description: Run 2to3 at build time to allow making a Python 3 package
+ Following directions at <https://pythonhosted.org/setuptools/python3.html>
+ we run 2to3 at build time to more easily support both Python 2 and Python 3.
+ Doing this, there was an issue with converting the doctests similar to what is
+ described at <http://bugs.python.org/issue12611>. To get around this,
+ we disable converting doctests as described in the first link above.
+Author: Afif Elghraoui <afif at ghraoui.name>
+Forwarded: no
+Last-Update: 2015-08-04
+--- python-pbcore.orig/setup.py
++++ python-pbcore/setup.py
+@@ -1,15 +1,13 @@
+ #!/usr/bin/env python
+ # -*- coding: utf-8 -*-
+ 
+-from setuptools import setup, Extension, find_packages
++from setuptools import setup, Extension, find_packages, setuptools
+ import sys
+ 
+-if ("install" in sys.argv) and sys.version_info < (2, 7, 0):
+-    print "pbcore requires Python 2.7"
+-    sys.exit(-1)
++setuptools.run_2to3_on_doctests = False
+ 
+ globals = {}
+-execfile("pbcore/__init__.py", globals)
++exec(open("pbcore/__init__.py").read(), globals)
+ __VERSION__ = globals["__VERSION__"]
+ 
+ setup(
+@@ -29,6 +27,7 @@
+                                'data/datasets/*.*',
+                                'data/datasets/yieldtest/*.*']
+                                },
++    use_2to3 = True,
+     zip_safe = False,
+     entry_points = { "console_scripts" : [ ".open = pbcore.io.opener:entryPoint" ] },
+     scripts=['bin/dataset.py'],

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/python-pbcore.git



More information about the debian-med-commit mailing list