[med-svn] r16783 - in trunk/packages/python-biopython/trunk/debian: . Tests_fixme
Andreas Tille
tille at moszumanska.debian.org
Fri Apr 25 07:27:49 UTC 2014
Author: tille
Date: 2014-04-25 07:27:49 +0000 (Fri, 25 Apr 2014)
New Revision: 16783
Added:
trunk/packages/python-biopython/trunk/debian/Tests_fixme/
trunk/packages/python-biopython/trunk/debian/Tests_fixme/run_failed_tests.py
trunk/packages/python-biopython/trunk/debian/Tests_fixme/run_failing_tests
Modified:
trunk/packages/python-biopython/trunk/debian/README.test
trunk/packages/python-biopython/trunk/debian/python-biopython-doc.docs
trunk/packages/python-biopython/trunk/debian/rules
Log:
Since we are running the test suite directly not via setup.py the failed tests would stop the build. So these are separated now and left for further inspection via a script running all the tests.
Modified: trunk/packages/python-biopython/trunk/debian/README.test
===================================================================
--- trunk/packages/python-biopython/trunk/debian/README.test 2014-04-25 06:40:31 UTC (rev 16782)
+++ trunk/packages/python-biopython/trunk/debian/README.test 2014-04-25 07:27:49 UTC (rev 16783)
@@ -7,4 +7,14 @@
for different reasons from the Debian packaging you might try your luck
in Tests_avoid.
+
+It has turned out that some tests were failing in the Build process
+for no obvious reasons. To track down this problem a script
+ Tests_fixme/run_failing_tests
+is provided to simplify further inspection of the problem.
+This script copies those tests that were failing in the Build process
+and the data needed by the tests into a temporary directory and runs
+the tests with all available Python interpreters.
+
+
-- Andreas Tille <tille at debian.org> Thu, 06 Mar 2014 11:51:27 +0100
Added: trunk/packages/python-biopython/trunk/debian/Tests_fixme/run_failed_tests.py
===================================================================
--- trunk/packages/python-biopython/trunk/debian/Tests_fixme/run_failed_tests.py (rev 0)
+++ trunk/packages/python-biopython/trunk/debian/Tests_fixme/run_failed_tests.py 2014-04-25 07:27:49 UTC (rev 16783)
@@ -0,0 +1,435 @@
+#!/usr/bin/env python
+# This code is part of the Biopython distribution and governed by its
+# license. Please see the LICENSE file that should have been included
+# as part of this package.
+"""Run a set of PyUnit-based regression tests.
+
+This will find all modules whose name is "test_*.py" in the test
+directory, and run them. Various command line options provide
+additional facilities.
+
+Command line options:
+
+--help -- show usage info
+--offline -- skip tests which require internet access
+-g;--generate -- write the output file for a test instead of comparing it.
+ The name of the test to write the output for must be
+ specified.
+-v;--verbose -- run tests with higher verbosity (does not affect our
+ print-and-compare style unit tests).
+<test_name> -- supply the name of one (or more) tests to be run.
+ The .py file extension is optional.
+doctest -- run the docstring tests.
+By default, all tests are run.
+"""
+
+# The default verbosity (not verbose)
+from __future__ import print_function
+
+VERBOSITY = 0
+
+# standard modules
+import sys
+import os
+import re
+import getopt
+import time
+import traceback
+import unittest
+import doctest
+import distutils.util
+import gc
+from io import BytesIO
+
+# Note, we want to be able to call run_tests.py BEFORE
+# Biopython is installed, so we can't use this:
+# from Bio._py3k import StringIO
+try:
+ from StringIO import StringIO # Python 2 (byte strings)
+except ImportError:
+ from io import StringIO # Python 3 (unicode strings)
+
+
+def is_pypy():
+ import platform
+ try:
+ if platform.python_implementation()=='PyPy':
+ return True
+ except AttributeError:
+ #New in Python 2.6, not in Jython yet either
+ pass
+ return False
+
+
+def is_numpy():
+ if is_pypy():
+ return False
+ try:
+ import numpy
+ del numpy
+ return True
+ except ImportError:
+ return False
+
+# This is the list of modules containing docstring tests.
+# If you develop docstring tests for other modules, please add
+# those modules here. Please sort names alphabetically.
+DOCTEST_MODULES = [ ]
+#Silently ignore any doctests for modules requiring numpy!
+if is_numpy():
+ DOCTEST_MODULES.extend([ ])
+
+
+try:
+ import sqlite3
+ del sqlite3
+except ImportError:
+ #Missing on Jython or Python 2.4
+ DOCTEST_MODULES.remove("Bio.SeqIO")
+ DOCTEST_MODULES.remove("Bio.SearchIO")
+
+#Skip Bio.bgzf doctest for broken gzip, see http://bugs.python.org/issue17666
+def _have_bug17666():
+ """Debug function to check if Python's gzip is broken (PRIVATE).
+
+ Checks for http://bugs.python.org/issue17666 expected in Python 2.7.4,
+ 3.2.4 and 3.3.1 only.
+ """
+ if os.name == 'java':
+ #Jython not affected
+ return False
+ import gzip
+ #Would like to use byte literal here:
+ bgzf_eof = "\x1f\x8b\x08\x04\x00\x00\x00\x00\x00\xff\x06\x00BC" + \
+ "\x02\x00\x1b\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ if sys.version_info[0] >= 3:
+ import codecs
+ bgzf_eof = codecs.latin_1_encode(bgzf_eof)[0]
+ h = gzip.GzipFile(fileobj=BytesIO(bgzf_eof))
+ try:
+ data = h.read()
+ h.close()
+ assert not data, "Should be zero length, not %i" % len(data)
+ return False
+ except TypeError as err:
+ #TypeError: integer argument expected, got 'tuple'
+ h.close()
+ return True
+if _have_bug17666():
+ DOCTEST_MODULES.remove("Bio.bgzf")
+
+#HACK: Since Python2.5 under Windows have slightly different str(float) output,
+#we're removing doctests that may fail because of this
+if sys.platform == "win32" and sys.version_info < (2, 6):
+ DOCTEST_MODULES.remove("Bio.SearchIO._model.hit")
+ DOCTEST_MODULES.remove("Bio.SearchIO._model.hsp")
+
+system_lang = os.environ.get('LANG', 'C') # Cache this
+
+
+def main(argv):
+ """Run tests, return number of failures (integer)."""
+ # insert our paths in sys.path:
+ # ../build/lib.*
+ # ..
+ # Q. Why this order?
+ # A. To find the C modules (which are in ../build/lib.*/Bio)
+ # Q. Then, why ".."?
+ # A. Because Martel may not be in ../build/lib.*
+ test_path = sys.path[0] or "."
+ source_path = os.path.abspath("%s/.." % test_path)
+ sys.path.insert(1, source_path)
+ build_path = os.path.abspath("%s/../build/lib.%s-%s" % (
+ test_path, distutils.util.get_platform(), sys.version[:3]))
+ if os.access(build_path, os.F_OK):
+ sys.path.insert(1, build_path)
+
+ # Using "export LANG=C" (which should work on Linux and similar) can
+ # avoid problems detecting optional command line tools on
+ # non-English OS (we may want 'command not found' in English).
+ # HOWEVER, we do not want to change the default encoding which is
+ # rather important on Python 3 with unicode.
+ #lang = os.environ['LANG']
+
+ # get the command line options
+ try:
+ opts, args = getopt.getopt(argv, 'gv', ["generate", "verbose",
+ "doctest", "help", "offline"])
+ except getopt.error as msg:
+ print(msg)
+ print(__doc__)
+ return 2
+
+ verbosity = VERBOSITY
+
+ # deal with the options
+ for o, a in opts:
+ if o == "--help":
+ print(__doc__)
+ return 0
+ if o == "--offline":
+ print("Skipping any tests requiring internet access")
+ #This is a bit of a hack...
+ import requires_internet
+ requires_internet.check.available = False
+ #The check() function should now report internet not available
+ if o == "-g" or o == "--generate":
+ if len(args) > 1:
+ print("Only one argument (the test name) needed for generate")
+ print(__doc__)
+ return 2
+ elif len(args) == 0:
+ print("No test name specified to generate output for.")
+ print(__doc__)
+ return 2
+ # strip off .py if it was included
+ if args[0][-3:] == ".py":
+ args[0] = args[0][:-3]
+
+ test = ComparisonTestCase(args[0])
+ test.generate_output()
+ return 0
+
+ if o == "-v" or o == "--verbose":
+ verbosity = 2
+
+ # deal with the arguments, which should be names of tests to run
+ for arg_num in range(len(args)):
+ # strip off the .py if it was included
+ if args[arg_num][-3:] == ".py":
+ args[arg_num] = args[arg_num][:-3]
+
+ print("Python version: %s" % sys.version)
+ print("Operating system: %s %s" % (os.name, sys.platform))
+
+ # run the tests
+ runner = TestRunner(args, verbosity)
+ return runner.run()
+
+
+class ComparisonTestCase(unittest.TestCase):
+ """Run a print-and-compare test and compare its output against expected output.
+ """
+
+ def __init__(self, name, output=None):
+ """Initialize with the test to run.
+
+ Arguments:
+ o name - The name of the test. The expected output should be
+ stored in the file output/name.
+ o output - The output that was generated when this test was run.
+ """
+ unittest.TestCase.__init__(self)
+ self.name = name
+ self.output = output
+
+ def shortDescription(self):
+ return self.name
+
+ def runTest(self):
+ # check the expected output to be consistent with what
+ # we generated
+ outputdir = os.path.join(TestRunner.testdir, "output")
+ outputfile = os.path.join(outputdir, self.name)
+ try:
+ if sys.version_info[0] >= 3:
+ #Python 3 problem: Can't use utf8 on output/test_geo
+ #due to micro (\xb5) and degrees (\xb0) symbols
+ expected = open(outputfile, encoding="latin")
+ else:
+ expected = open(outputfile, 'rU')
+ except IOError:
+ self.fail("Warning: Can't open %s for test %s" % (outputfile, self.name))
+
+ self.output.seek(0)
+ # first check that we are dealing with the right output
+ # the first line of the output file is the test name
+ expected_test = expected.readline().strip()
+
+ if expected_test != self.name:
+ expected.close()
+ raise ValueError("\nOutput: %s\nExpected: %s"
+ % (self.name, expected_test))
+
+ # now loop through the output and compare it to the expected file
+ while True:
+ expected_line = expected.readline()
+ output_line = self.output.readline()
+
+ # stop looping if either of the info handles reach the end
+ if not(expected_line) or not(output_line):
+ # make sure both have no information left
+ assert expected_line == '', "Unread: %s" % expected_line
+ assert output_line == '', "Extra output: %s" % output_line
+ break
+
+ # normalize the newlines in the two lines
+ expected_line = expected_line.strip("\r\n")
+ output_line = output_line.strip("\r\n")
+
+ # if the line is a doctest or PyUnit time output like:
+ # Ran 2 tests in 0.285s
+ # ignore it, so we don't have problems with different running times
+ if re.compile("^Ran [0-9]+ tests? in ").match(expected_line):
+ pass
+ # otherwise make sure the two lines are the same
+ elif expected_line != output_line:
+ expected.close()
+ raise ValueError("\nOutput : %s\nExpected: %s"
+ % (repr(output_line), repr(expected_line)))
+ expected.close()
+
+ def generate_output(self):
+ """Generate the golden output for the specified test.
+ """
+ outputdir = os.path.join(TestRunner.testdir, "output")
+ outputfile = os.path.join(outputdir, self.name)
+
+ output_handle = open(outputfile, 'w')
+
+ # write the test name as the first line of the output
+ output_handle.write(self.name + "\n")
+
+ # remember standard out so we can reset it after we are done
+ save_stdout = sys.stdout
+ try:
+ # write the output from the test into a string
+ sys.stdout = output_handle
+ __import__(self.name)
+ finally:
+ output_handle.close()
+ # return standard out to its normal setting
+ sys.stdout = save_stdout
+
+
+class TestRunner(unittest.TextTestRunner):
+
+ if __name__ == '__main__':
+ file = sys.argv[0]
+ else:
+ file = __file__
+ testdir = os.path.dirname(file) or os.curdir
+
+ def __init__(self, tests=[], verbosity=0):
+ # if no tests were specified to run, we run them all
+ # including the doctests
+ self.tests = tests
+ if not self.tests:
+ # Make a list of all applicable test modules.
+ names = os.listdir(TestRunner.testdir)
+ for name in names:
+ if name[:5] == "test_" and name[-3:] == ".py":
+ self.tests.append(name[:-3])
+ self.tests.sort()
+ self.tests.append("doctest")
+ if "doctest" in self.tests:
+ self.tests.remove("doctest")
+ self.tests.extend(DOCTEST_MODULES)
+ stream = StringIO()
+ unittest.TextTestRunner.__init__(self, stream,
+ verbosity=verbosity)
+
+ def runTest(self, name):
+ from Bio import MissingExternalDependencyError
+ result = self._makeResult()
+ output = StringIO()
+ # Restore the language and thus default encoding (in case a prior
+ # test changed this, e.g. to help with detecting command line tools)
+ global system_lang
+ os.environ['LANG']=system_lang
+ # Note the current directory:
+ cur_dir = os.path.abspath(".")
+ try:
+ stdout = sys.stdout
+ sys.stdout = output
+ if name.startswith("test_"):
+ sys.stderr.write("%s ... " % name)
+ #It's either a unittest or a print-and-compare test
+ suite = unittest.TestLoader().loadTestsFromName(name)
+ if suite.countTestCases()==0:
+ # This is a print-and-compare test instead of a
+ # unittest-type test.
+ test = ComparisonTestCase(name, output)
+ suite = unittest.TestSuite([test])
+ else:
+ #It's a doc test
+ sys.stderr.write("%s docstring test ... " % name)
+ #Can't use fromlist=name.split(".") until python 2.5+
+ module = __import__(name, None, None, name.split("."))
+ suite = doctest.DocTestSuite(module, optionflags=doctest.ELLIPSIS)
+ del module
+ suite.run(result)
+ if cur_dir != os.path.abspath("."):
+ sys.stderr.write("FAIL\n")
+ result.stream.write(result.separator1+"\n")
+ result.stream.write("ERROR: %s\n" % name)
+ result.stream.write(result.separator2+"\n")
+ result.stream.write("Current directory changed\n")
+ result.stream.write("Was: %s\n" % cur_dir)
+ result.stream.write("Now: %s\n" % os.path.abspath("."))
+ os.chdir(cur_dir)
+ if not result.wasSuccessful():
+ result.printErrors()
+ return False
+ elif result.wasSuccessful():
+ sys.stderr.write("ok\n")
+ return True
+ else:
+ sys.stderr.write("FAIL\n")
+ result.printErrors()
+ return False
+ except MissingExternalDependencyError as msg:
+ sys.stderr.write("skipping. %s\n" % msg)
+ return True
+ except Exception as msg:
+ # This happened during the import
+ sys.stderr.write("ERROR\n")
+ result.stream.write(result.separator1+"\n")
+ result.stream.write("ERROR: %s\n" % name)
+ result.stream.write(result.separator2+"\n")
+ result.stream.write(traceback.format_exc())
+ return False
+ except KeyboardInterrupt as err:
+ # Want to allow this, and abort the test
+ # (see below for special case)
+ raise err
+ except:
+ # This happens in Jython with java.lang.ClassFormatError:
+ # Invalid method Code length ...
+ sys.stderr.write("ERROR\n")
+ result.stream.write(result.separator1+"\n")
+ result.stream.write("ERROR: %s\n" % name)
+ result.stream.write(result.separator2+"\n")
+ result.stream.write(traceback.format_exc())
+ return False
+ finally:
+ sys.stdout = stdout
+ #Running under PyPy we were leaking file handles...
+ gc.collect()
+
+ def run(self):
+ """Run tests, return number of failures (integer)."""
+ failures = 0
+ startTime = time.time()
+ for test in self.tests:
+ ok = self.runTest(test)
+ if not ok:
+ failures += 1
+ total = len(self.tests)
+ stopTime = time.time()
+ timeTaken = stopTime - startTime
+ sys.stderr.write(self.stream.getvalue())
+ sys.stderr.write('-' * 70 + "\n")
+ sys.stderr.write("Ran %d test%s in %.3f seconds\n" %
+ (total, total != 1 and "s" or "", timeTaken))
+ sys.stderr.write("\n")
+ if failures:
+ sys.stderr.write("FAILED (failures = %d)\n" % failures)
+ return failures
+
+
+if __name__ == "__main__":
+ errors = main(sys.argv[1:])
+ if errors:
+ #Doing a sys.exit(...) isn't nice if run from IDLE...
+ sys.exit(1)
Added: trunk/packages/python-biopython/trunk/debian/Tests_fixme/run_failing_tests
===================================================================
--- trunk/packages/python-biopython/trunk/debian/Tests_fixme/run_failing_tests (rev 0)
+++ trunk/packages/python-biopython/trunk/debian/Tests_fixme/run_failing_tests 2014-04-25 07:27:49 UTC (rev 16783)
@@ -0,0 +1,46 @@
+#!/bin/sh
+# This script copies those tests that were failing in the Build process
+# and the data needed by the tests into a temporary directory and runs
+# them with all available Python interpreters
+
+if [ "$ADTTMP" = "" ] ; then
+ ADTTMP=`mktemp -d /tmp/python-biopython-failed-tests.XXXXXX`
+fi
+
+DOC=/usr/share/doc/python-biopython-doc/
+TESTS=$DOC/Tests
+
+mkdir $ADTTMP/Tests
+cp -a run_failed_tests.py $ADTTMP/Tests
+
+cd $ADTTMP
+mkdir Doc
+cp -a $DOC/Doc/Tutorial.tex* Doc
+cd Tests
+mkdir Clustalw
+mkdir Fasta
+cp -a $TESTS/Fasta/f00[12]* Fasta
+mkdir GenBank
+cp -a $TESTS/GenBank/NC_005816.gb* GenBank
+mkdir Graphics
+cp -a $TESTS/Graphics/README* Graphics
+mkdir Medline
+cp -a $TESTS/Medline/pubmed_result1.txt* Medline
+mkdir Phylip
+cp -a $TESTS/Phylip/hennigian.phy* Phylip
+mkdir Quality
+cp -a $TESTS/Quality/example.fasta* Quality
+
+FAILINGTESTS="GenomeDiagram Fasttree_tool Mafft_tool ColorSpiral trie"
+for ft in $FAILINGTESTS ; do
+ cp -a $TESTS/test_${ft}.py* .
+done
+find .. -name "*.gz" -exec gunzip \{\} \;
+
+for ft in $FAILINGTESTS ; do
+ for pi in $(pyversions -i) $(py3versions -i); do
+ $pi run_failed_tests.py -v test_$ft 2>&1 | tee > ../${ft}_${pi}.log
+ done
+done
+rm -fr *.pyc __pycache__
+echo "Log files of tests can be found in $ADTTMP"
Property changes on: trunk/packages/python-biopython/trunk/debian/Tests_fixme/run_failing_tests
___________________________________________________________________
Added: svn:executable
+ *
Modified: trunk/packages/python-biopython/trunk/debian/python-biopython-doc.docs
===================================================================
--- trunk/packages/python-biopython/trunk/debian/python-biopython-doc.docs 2014-04-25 06:40:31 UTC (rev 16782)
+++ trunk/packages/python-biopython/trunk/debian/python-biopython-doc.docs 2014-04-25 07:27:49 UTC (rev 16783)
@@ -1,3 +1,4 @@
Doc/
debian/README.test
debian/tests/run-unit-test
+debian/Tests_fixme
Modified: trunk/packages/python-biopython/trunk/debian/rules
===================================================================
--- trunk/packages/python-biopython/trunk/debian/rules 2014-04-25 06:40:31 UTC (rev 16782)
+++ trunk/packages/python-biopython/trunk/debian/rules 2014-04-25 07:27:49 UTC (rev 16783)
@@ -8,12 +8,36 @@
#export PYBUILD_DESTDIR_python3=debian/python3-biopython/
BUILDARCH := $(shell dpkg-architecture -qDEB_BUILD_ARCH)
+
+
+# If `$(CURDIR)/setup.py test` would be called these tests need to be excluded since they require to be online
+#EXCLUDEONLINE=Entrez_online HotRand NCBI_qblast SCOP_online SeqIO_online TogoWS
+# Since `run_texts.py --online` is used the test requiring online connections are excluded by the test suite
+EXCLUDEONLINE=
+
+# avoid PAML_tools and EmbossPhylipNew since paml resp. embassy-phylip are non-free
+# This would not trigger a failure but bloats the build log with irrelevant noise
+EXCLUDENONFREE=PAML_tools EmbossPhylipNew
+
+# avoid testing tools that are not packaged for Debian yet (or will never be packaged since they might be outdated)
+# This would not trigger a failure but bloats the build log with irrelevant noise
+EXCLUDENOTPACKAGED=MSAProbs_tool NACCESS_tool PopGen_DFDist PopGen_FDist PopGen_GenePop PopGen_GenePop_EasyController PopGen_SimCoal XXmotif_tool
+
+# avoid amd64 only tools on other architectures
ifeq ($(BUILDARCH),amd64)
EXCLUDEBWA=
else
EXCLUDEBWA=BWA_tool
endif
+# avoid tests requiring data base connections if no server is running while we are doing the build
+# This would not trigger a failure but bloats the build log with irrelevant noise
+EXCLUDEDATABASE=BioSQL_MySQLdb BioSQL_psycopg2
+
+# FIXME: These tests are failing for unknown reasons and this needs to be investigated
+# specifically since test_ColorSpiral and test_trie are running when called manually
+EXCLUDEFAILINGTESTS=GenomeDiagram Fasttree_tool Mafft_tool ColorSpiral trie
+
%:
dh $@ --with python2,python3 --buildsystem=pybuild
@@ -23,24 +47,8 @@
dh_numpy3 -ppython3-biopython
override_dh_auto_test:
- # 0. if we would call `$(CURDIR)/setup.py test` these tests need to be excluded since they require to be online
- # Entrez_online HotRand NCBI_qblast SCOP_online SeqIO_online TogoWS
- # 1. avoid PAML_tools and EmbossPhylipNew since paml resp. embassy-phylip are non-free
- # 2. avoid testing tools that are not packaged for Debian yet
- # 3. avoid amd64 only tools on other architectures
- # 4. avoid tests with data base connections if no server is running while we are doing the build
- # 5. FIXME: These tests are failing for unknown reasons and this needs to be investigated
- # specifically test_trie runs when called separately
- # While 2. and 3. do not create errors there is no point in bloating
- # the build log and this kind of documentation makes things more transparent
mkdir -p Tests_avoid
- for avoid in \
- PAML_tools EmbossPhylipNew \
- MSAProbs_tool NACCESS_tool PopGen_DFDist PopGen_FDist PopGen_GenePop PopGen_GenePop_EasyController PopGen_SimCoal XXmotif_tool \
- $(EXCLUDEBWA) \
- BioSQL_MySQLdb BioSQL_psycopg2 \
- GenomeDiagram Fasttree_tool Mafft_tool \
- ColorSpiral trie \
+ for avoid in $(EXCLUDEONLINE) $(EXCLUDENONFREE) $(EXCLUDENOTPACKAGED) $(EXCLUDEBWA) $(EXCLUDEDATABASE) $(EXCLUDEFAILINGTESTS) \
; do \
mv Tests/test_$${avoid}.py Tests_avoid ; \
done
More information about the debian-med-commit
mailing list