[med-svn] [python-pbcore] 08/12: Add patches to globally rename dataset.py to pbdataset
Afif Elghraoui
afif-guest at moszumanska.debian.org
Sun Sep 20 08:25:27 UTC 2015
This is an automated email from the git hooks/post-receive script.
afif-guest pushed a commit to branch master
in repository python-pbcore.
commit d154b16680dd46c93a03a921999b93ba7bb912e4
Author: Afif Elghraoui <afif at ghraoui.name>
Date: Sun Sep 20 00:14:28 2015 -0700
Add patches to globally rename dataset.py to pbdataset
---
debian/patches/rename-datasetpy.patch | 179 ++++++++++++++++++++++++++++++++++
debian/patches/series | 1 +
2 files changed, 180 insertions(+)
diff --git a/debian/patches/rename-datasetpy.patch b/debian/patches/rename-datasetpy.patch
new file mode 100644
index 0000000..04e1ebe
--- /dev/null
+++ b/debian/patches/rename-datasetpy.patch
@@ -0,0 +1,179 @@
+Description: Rename dataset.py to pbdataset
+ dataset.py is not a name suitable for system-wide installation. It
+ is referenced by several parts of the source distribution here (especially
+ the tests), so this patch corrects those references. Upstream is not interested
+ in renaming since system-wide installation was not their intention for
+ this package [1].
+Author: Afif Elghraoui
+Forwarded: https://github.com/PacificBiosciences/pbcore/issues/21#issuecomment-141742545
+Last-Update: 2015-09-20
+--- python-pbcore.orig/tests/test_pbdataset.py
++++ python-pbcore/tests/test_pbdataset.py
+@@ -22,7 +22,7 @@
+
+ log = logging.getLogger(__name__)
+
+-datasetpy_not_available = subprocess.call(["which", "dataset.py"])
++datasetpy_not_available = subprocess.call(["which", "pbdataset"])
+
+ class TestDataSet(unittest.TestCase):
+ """Unit and integrationt tests for the DataSet class and \
+@@ -86,7 +86,7 @@
+ "pbdataset script is not available")
+ def test_split_cli(self):
+ outdir = tempfile.mkdtemp(suffix="dataset-unittest")
+- cmd = "dataset.py split --outdir {o} --contigs --chunks 2 {d}".format(
++ cmd = "pbdataset split --outdir {o} --contigs --chunks 2 {d}".format(
+ o=outdir,
+ d=data.getXml(8))
+ log.debug(cmd)
+@@ -102,7 +102,7 @@
+ def test_create_cli(self):
+ log.debug("Absolute")
+ outdir = tempfile.mkdtemp(suffix="dataset-unittest")
+- cmd = "dataset.py create --type AlignmentSet {o} {i1} {i2}".format(
++ cmd = "pbdataset create --type AlignmentSet {o} {i1} {i2}".format(
+ o=os.path.join(outdir, 'pbalchemysim.alignmentset.xml'),
+ i1=data.getXml(8), i2=data.getXml(11))
+ log.debug(cmd)
+@@ -113,7 +113,7 @@
+
+ log.debug("Relative")
+ outdir = tempfile.mkdtemp(suffix="dataset-unittest")
+- cmd = ("dataset.py create --relative --type AlignmentSet "
++ cmd = ("pbdataset create --relative --type AlignmentSet "
+ "{o} {i1} {i2}".format(
+ o=os.path.join(outdir, 'pbalchemysim.alignmentset.xml'),
+ i1=data.getXml(8),
+--- python-pbcore.orig/tests/test_pbdataset_subtypes.py
++++ python-pbcore/tests/test_pbdataset_subtypes.py
+@@ -270,7 +270,7 @@
+ outdir = tempfile.mkdtemp(suffix="dataset-unittest")
+ datafile = os.path.join(outdir, "merged.bam")
+ xmlfile = os.path.join(outdir, "merged.xml")
+- cmd = "dataset.py consolidate {i} {d} {x}".format(i=data.getXml(12),
++ cmd = "pbdataset consolidate {i} {d} {x}".format(i=data.getXml(12),
+ d=datafile,
+ x=xmlfile)
+ log.debug(cmd)
+@@ -304,7 +304,7 @@
+ outdir = tempfile.mkdtemp(suffix="dataset-unittest")
+ datafile = os.path.join(outdir, "merged.bam")
+ xmlfile = os.path.join(outdir, "merged.xml")
+- cmd = "dataset.py consolidate --numFiles 2 {i} {d} {x}".format(
++ cmd = "pbdataset consolidate --numFiles 2 {i} {d} {x}".format(
+ i=testFile, d=datafile, x=xmlfile)
+ log.debug(cmd)
+ o, r, m = backticks(cmd)
+--- python-pbcore.orig/doc/pbcore.io.dataset.rst
++++ python-pbcore/doc/pbcore.io.dataset.rst
+@@ -9,20 +9,20 @@
+ The API and console entry points are designed with the set operations one might
+ perform on the various types of data held by a DataSet XML in mind: merge,
+ split, write etc. While various types of DataSets can be found in XML files,
+-the API (and in a way the console entry point, dataset.py) has DataSet as its
++the API (and in a way the console entry point, pbdataset) has DataSet as its
+ base type, with various subtypes extending or replacing functionality as
+ needed.
+
+
+ Console Entry Point Usage
+ =============================
+-The following entry points are available through the main script: dataset.py::
++The following entry points are available through the main script: pbdataset::
+
+- usage: dataset.py [-h] [-v] [--debug]
++ usage: pbdataset [-h] [-v] [--debug]
+ {create,filter,merge,split,validate,loadstats,consolidate}
+ ...
+
+- Run dataset.py by specifying a command.
++ Run pbdataset by specifying a command.
+
+ optional arguments:
+ -h, --help show this help message and exit
+@@ -35,7 +35,7 @@
+
+ Create::
+
+- usage: dataset.py create [-h] [--type DSTYPE] [--novalidate] [--relative]
++ usage: pbdataset create [-h] [--type DSTYPE] [--novalidate] [--relative]
+ outfile infile [infile ...]
+
+ Create an XML file from a fofn or bam
+@@ -53,7 +53,7 @@
+
+ Filter::
+
+- usage: dataset.py filter [-h] infile outfile filters [filters ...]
++ usage: pbdataset filter [-h] infile outfile filters [filters ...]
+
+ Add filters to an XML file
+
+@@ -67,7 +67,7 @@
+
+ Union::
+
+- usage: dataset.py union [-h] outfile infiles [infiles ...]
++ usage: pbdataset union [-h] outfile infiles [infiles ...]
+
+ Combine XML (and BAM) files
+
+@@ -80,7 +80,7 @@
+
+ Validate::
+
+- usage: dataset.py validate [-h] infile
++ usage: pbdataset validate [-h] infile
+
+ Validate ResourceId files (XML validation only available in testing)
+
+@@ -92,7 +92,7 @@
+
+ Load PipeStats::
+
+- usage: dataset.py loadstats [-h] [--outfile OUTFILE] infile statsfile
++ usage: pbdataset loadstats [-h] [--outfile OUTFILE] infile statsfile
+
+ Load an sts.xml file into a DataSet XML file
+
+@@ -106,7 +106,7 @@
+
+ Split::
+
+- usage: dataset.py split [-h] [--contigs] [--chunks CHUNKS] [--subdatasets]
++ usage: pbdataset split [-h] [--contigs] [--chunks CHUNKS] [--subdatasets]
+ [--outdir OUTDIR]
+ infile ...
+
+@@ -145,8 +145,8 @@
+
+ 2. Merge the files into a FOFN-like dataset (bams aren't touched)::
+
+- # dataset.py merge <out_fn> <in_fn> [<in_fn> <in_fn> ...]
+- dataset.py merge merged.alignmentset.xml movie1.alignmentset.xml movie2.alignmentset.xml
++ # pbdataset merge <out_fn> <in_fn> [<in_fn> <in_fn> ...]
++ pbdataset merge merged.alignmentset.xml movie1.alignmentset.xml movie2.alignmentset.xml
+
+ 3. Split the dataset into chunks by contig (rname) (bams aren't touched). Note
+ that supplying output files splits the dataset into that many output files
+@@ -155,7 +155,7 @@
+ automatically. Specifying a number of chunks instead will produce that many
+ files, with contig or even sub contig (reference window) splitting.::
+
+- dataset.py split --contigs --chunks 8 merged.alignmentset.xml
++ pbdataset split --contigs --chunks 8 merged.alignmentset.xml
+
+ 4. Quiver then consumes these chunks::
+
+--- python-pbcore.orig/bin/dataset.py
++++ python-pbcore/bin/dataset.py
+@@ -38,7 +38,7 @@
+ return parser
+
+ def get_parser():
+- description = 'Run dataset.py by specifying a command.'
++ description = 'Run pbdataset by specifying a command.'
+ parser = argparse.ArgumentParser(version=__VERSION__,
+ description=description)
+ parser.add_argument("--debug", default=False, action='store_true',
diff --git a/debian/patches/series b/debian/patches/series
index 4cfcc9a..99e9723 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1,3 +1,4 @@
doc-theme.patch
enable-build-time-testing.patch
skip-integration-tests.patch
+rename-datasetpy.patch
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/python-pbcore.git
More information about the debian-med-commit
mailing list