[med-svn] [cwltest] 01/02: New upstream version 1.0.20161124105442

Andreas Tille tille at debian.org
Sun Dec 11 16:20:41 UTC 2016


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository cwltest.

commit 7375ba62c9c524c95133f4a97c6549a616fcfdc1
Author: Andreas Tille <tille at debian.org>
Date:   Sun Dec 11 17:19:31 2016 +0100

    New upstream version 1.0.20161124105442
---
 PKG-INFO                              |  39 +++++
 README.rst                            |  28 +++
 cwltest.egg-info/PKG-INFO             |  39 +++++
 cwltest.egg-info/SOURCES.txt          |  11 ++
 cwltest.egg-info/dependency_links.txt |   1 +
 cwltest.egg-info/entry_points.txt     |   3 +
 cwltest.egg-info/requires.txt         |   6 +
 cwltest.egg-info/top_level.txt        |   1 +
 cwltest.egg-info/zip-safe             |   1 +
 cwltest/__init__.py                   | 311 ++++++++++++++++++++++++++++++++++
 setup.cfg                             |  10 ++
 setup.py                              |  43 +++++
 12 files changed, 493 insertions(+)

diff --git a/PKG-INFO b/PKG-INFO
new file mode 100644
index 0000000..6f4259b
--- /dev/null
+++ b/PKG-INFO
@@ -0,0 +1,39 @@
+Metadata-Version: 1.1
+Name: cwltest
+Version: 1.0.20161124105442
+Summary: Common workflow language testing framework
+Home-page: https://github.com/common-workflow-language/cwltest
+Author: Common workflow language working group
+Author-email: common-workflow-language at googlegroups.com
+License: Apache 2.0
+Download-URL: https://github.com/common-workflow-language/cwltest
+Description: ==========================================
+        Common workflow language testing framework
+        ==========================================
+        
+        This is a testing tool for checking the output of Tools and Workflows described
+        with the Common Workflow Language.  Among other uses, it is used to run the CWL
+        conformance tests.
+        
+        This is written and tested for Python 2.7.
+        
+        Install
+        -------
+        
+        Installing the official package from PyPi::
+        
+          pip install cwltest
+        
+        Or from source::
+        
+          git clone https://github.com/common-workflow-language/cwltest.git
+          cd cwltest && python setup.py install
+        
+        Run on the command line
+        -----------------------
+        
+        Simple command::
+        
+          cwltest --test test-descriptions.yml --tool cwl-runner
+        
+Platform: UNKNOWN
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..61baf34
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,28 @@
+==========================================
+Common workflow language testing framework
+==========================================
+
+This is a testing tool for checking the output of Tools and Workflows described
+with the Common Workflow Language.  Among other uses, it is used to run the CWL
+conformance tests.
+
+This is written and tested for Python 2.7.
+
+Install
+-------
+
+Installing the official package from PyPi::
+
+  pip install cwltest
+
+Or from source::
+
+  git clone https://github.com/common-workflow-language/cwltest.git
+  cd cwltest && python setup.py install
+
+Run on the command line
+-----------------------
+
+Simple command::
+
+  cwltest --test test-descriptions.yml --tool cwl-runner
diff --git a/cwltest.egg-info/PKG-INFO b/cwltest.egg-info/PKG-INFO
new file mode 100644
index 0000000..6f4259b
--- /dev/null
+++ b/cwltest.egg-info/PKG-INFO
@@ -0,0 +1,39 @@
+Metadata-Version: 1.1
+Name: cwltest
+Version: 1.0.20161124105442
+Summary: Common workflow language testing framework
+Home-page: https://github.com/common-workflow-language/cwltest
+Author: Common workflow language working group
+Author-email: common-workflow-language at googlegroups.com
+License: Apache 2.0
+Download-URL: https://github.com/common-workflow-language/cwltest
+Description: ==========================================
+        Common workflow language testing framework
+        ==========================================
+        
+        This is a testing tool for checking the output of Tools and Workflows described
+        with the Common Workflow Language.  Among other uses, it is used to run the CWL
+        conformance tests.
+        
+        This is written and tested for Python 2.7.
+        
+        Install
+        -------
+        
+        Installing the official package from PyPi::
+        
+          pip install cwltest
+        
+        Or from source::
+        
+          git clone https://github.com/common-workflow-language/cwltest.git
+          cd cwltest && python setup.py install
+        
+        Run on the command line
+        -----------------------
+        
+        Simple command::
+        
+          cwltest --test test-descriptions.yml --tool cwl-runner
+        
+Platform: UNKNOWN
diff --git a/cwltest.egg-info/SOURCES.txt b/cwltest.egg-info/SOURCES.txt
new file mode 100644
index 0000000..bfdcb95
--- /dev/null
+++ b/cwltest.egg-info/SOURCES.txt
@@ -0,0 +1,11 @@
+README.rst
+setup.cfg
+setup.py
+cwltest/__init__.py
+cwltest.egg-info/PKG-INFO
+cwltest.egg-info/SOURCES.txt
+cwltest.egg-info/dependency_links.txt
+cwltest.egg-info/entry_points.txt
+cwltest.egg-info/requires.txt
+cwltest.egg-info/top_level.txt
+cwltest.egg-info/zip-safe
\ No newline at end of file
diff --git a/cwltest.egg-info/dependency_links.txt b/cwltest.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/cwltest.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/cwltest.egg-info/entry_points.txt b/cwltest.egg-info/entry_points.txt
new file mode 100644
index 0000000..60dd27b
--- /dev/null
+++ b/cwltest.egg-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+cwltest = cwltest:main
+
diff --git a/cwltest.egg-info/requires.txt b/cwltest.egg-info/requires.txt
new file mode 100644
index 0000000..b06160e
--- /dev/null
+++ b/cwltest.egg-info/requires.txt
@@ -0,0 +1,6 @@
+schema-salad >= 1.14
+typing >= 3.5.2
+junit-xml >= 1.7
+
+[:python_version == "2.7"]
+futures >= 3.0.5
diff --git a/cwltest.egg-info/top_level.txt b/cwltest.egg-info/top_level.txt
new file mode 100644
index 0000000..5ec7944
--- /dev/null
+++ b/cwltest.egg-info/top_level.txt
@@ -0,0 +1 @@
+cwltest
diff --git a/cwltest.egg-info/zip-safe b/cwltest.egg-info/zip-safe
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/cwltest.egg-info/zip-safe
@@ -0,0 +1 @@
+
diff --git a/cwltest/__init__.py b/cwltest/__init__.py
new file mode 100755
index 0000000..74e6891
--- /dev/null
+++ b/cwltest/__init__.py
@@ -0,0 +1,311 @@
+#!/usr/bin/env python
+
+import argparse
+import json
+import os
+import subprocess
+import sys
+import shutil
+import tempfile
+import junit_xml
+import ruamel.yaml as yaml
+import ruamel.yaml.scanner as yamlscanner
+import pipes
+import logging
+import schema_salad.ref_resolver
+from concurrent.futures import ThreadPoolExecutor
+from typing import Any, Dict, List, Text
+
+_logger = logging.getLogger("cwltest")
+_logger.addHandler(logging.StreamHandler())
+_logger.setLevel(logging.INFO)
+
+UNSUPPORTED_FEATURE = 33
+
+
+class CompareFail(Exception):
+
+    @classmethod
+    def format(cls, expected, actual, cause=None):
+        # type: (Any, Any, Any) -> CompareFail
+        message = u"expected: %s\ngot: %s" % (
+            json.dumps(expected, indent=4, sort_keys=True),
+            json.dumps(actual, indent=4, sort_keys=True))
+        if cause:
+            message += u"\ncaused by: %s" % cause
+        return cls(message)
+
+
+class TestResult(object):
+
+    """Encapsulate relevant test result data."""
+
+    def __init__(self, return_code, standard_output, error_output):
+        # type: (int, str, str) -> None
+        self.return_code = return_code
+        self.standard_output = standard_output
+        self.error_output = error_output
+
+    def create_test_case(self, test):
+        # type: (Dict[Text, Any]) -> junit_xml.TestCase
+        doc = test.get(u'doc', 'N/A').strip()
+        return junit_xml.TestCase(doc, stdout=self.standard_output, stderr=self.error_output)
+
+
+def compare_file(expected, actual):
+    # type: (Dict[str,Any], Dict[str,Any]) -> None
+    if "path" in expected:
+        comp = "path"
+        if "path" not in actual:
+            actual["path"] = actual["location"]
+    else:
+        comp = "location"
+    if expected[comp] != "Any" and (not (actual[comp].endswith("/" + expected[comp]) or
+                                    ("/" not in actual[comp] and expected[comp] == actual[comp]))):
+        raise CompareFail.format(expected, actual, u"%s does not end with %s" % (actual[comp], expected[comp]))
+
+    check_keys = set(expected.keys()) - {'path', 'location'}
+
+    for k in check_keys:
+        try:
+            compare(expected.get(k), actual.get(k))
+        except CompareFail as e:
+            raise CompareFail.format(expected, actual, u"field '%s' failed comparison: %s" %(
+                k, e.message
+            ))
+
+
+def compare_directory(expected, actual):
+    # type: (Dict[str,Any], Dict[str,Any]) -> None
+    if actual.get("class") != 'Directory':
+        raise CompareFail.format(expected, actual, u"expected object with a class 'Directory'")
+    if "listing" not in actual:
+        raise CompareFail.format(expected, actual, u"'listing' is mandatory field in Directory object")
+    for i in expected["listing"]:
+        found = False
+        for j in actual["listing"]:
+            try:
+                compare(i, j)
+                found = True
+                break
+            except CompareFail:
+                pass
+        if not found:
+            raise CompareFail.format(expected, actual, u"%s not found" % json.dumps(i, indent=4, sort_keys=True))
+
+
+def compare_dict(expected, actual):
+    # type: (Dict[str,Any], Dict[str,Any]) -> None
+    for c in expected:
+        try:
+            compare(expected[c], actual.get(c))
+        except CompareFail as e:
+            raise CompareFail.format(expected, actual, u"failed comparison for key '%s': %s" % (c, e))
+    extra_keys = set(actual.keys()).difference(expected.keys())
+    for k in extra_keys:
+        if actual[k] is not None:
+            raise CompareFail.format(expected, actual, u"unexpected key '%s'" % k)
+
+
+def compare(expected, actual):  # type: (Any, Any) -> None
+    if expected == "Any":
+        return
+    if expected is not None and actual is None:
+        raise CompareFail.format(expected, actual)
+
+    try:
+        if isinstance(expected, dict):
+            if not isinstance(actual, dict):
+                raise CompareFail.format(expected, actual)
+
+            if expected.get("class") == "File":
+                compare_file(expected, actual)
+            elif expected.get("class") == "Directory":
+                compare_directory(expected, actual)
+            else:
+                compare_dict(expected, actual)
+
+        elif isinstance(expected, list):
+            if not isinstance(actual, list):
+                raise CompareFail.format(expected, actual)
+
+            if len(expected) != len(actual):
+                raise CompareFail.format(expected, actual, u"lengths don't match")
+            for c in xrange(0, len(expected)):
+                try:
+                    compare(expected[c], actual[c])
+                except CompareFail as e:
+                    raise CompareFail.format(expected, actual, e)
+        else:
+            if expected != actual:
+                raise CompareFail.format(expected, actual)
+
+    except Exception as e:
+        raise CompareFail(str(e))
+
+
+def run_test(args, i, tests):  # type: (argparse.Namespace, int, List[Dict[str, str]]) -> TestResult
+    out = {}  # type: Dict[str,Any]
+    outdir = outstr = outerr = test_command = None
+    t = tests[i]
+    try:
+        test_command = [args.tool]
+        test_command.extend(args.args)
+        # Add prefixes if running on MacOSX so that boot2docker writes to /Users
+        if 'darwin' in sys.platform:
+            outdir = tempfile.mkdtemp(prefix=os.path.abspath(os.path.curdir))
+            test_command.extend(["--tmp-outdir-prefix={}".format(outdir), "--tmpdir-prefix={}".format(outdir)])
+        else:
+            outdir = tempfile.mkdtemp()
+        test_command.extend(["--outdir={}".format(outdir),
+                             "--quiet",
+                             t["tool"]])
+        if t.get("job"):
+            test_command.append(t["job"])
+
+        sys.stderr.write("\rTest [%i/%i] " % (i + 1, len(tests)))
+        sys.stderr.flush()
+
+        process = subprocess.Popen(test_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        outstr, outerr = process.communicate()
+        return_code = process.poll()
+        if return_code:
+            raise subprocess.CalledProcessError(return_code, " ".join(test_command))
+
+        out = json.loads(outstr)
+    except ValueError as v:
+        _logger.error(str(v))
+        _logger.error(outstr)
+        _logger.error(outerr)
+    except subprocess.CalledProcessError as err:
+        if err.returncode == UNSUPPORTED_FEATURE:
+            return TestResult(UNSUPPORTED_FEATURE, outstr, outerr)
+        else:
+            _logger.error(u"""Test failed: %s""", " ".join([pipes.quote(tc) for tc in test_command]))
+            _logger.error(t.get("doc"))
+            _logger.error("Returned non-zero")
+            _logger.error(outerr)
+            return TestResult(1, outstr, outerr)
+    except (yamlscanner.ScannerError, TypeError) as e:
+        _logger.error(u"""Test failed: %s""", " ".join([pipes.quote(tc) for tc in test_command]))
+        _logger.error(outstr)
+        _logger.error(u"Parse error %s", str(e))
+        _logger.error(outerr)
+    except KeyboardInterrupt:
+        _logger.error(u"""Test interrupted: %s""", " ".join([pipes.quote(tc) for tc in test_command]))
+        raise
+
+    failed = False
+
+    try:
+        compare(t.get("output"), out)
+    except CompareFail as ex:
+        _logger.warn(u"""Test failed: %s""", " ".join([pipes.quote(tc) for tc in test_command]))
+        _logger.warn(t.get("doc"))
+        _logger.warn(u"Compare failure %s", ex)
+        failed = True
+
+    if outdir:
+        shutil.rmtree(outdir, True)
+
+    return TestResult((1 if failed else 0), outstr, outerr)
+
+
+def main():  # type: () -> int
+    parser = argparse.ArgumentParser(description='Compliance tests for cwltool')
+    parser.add_argument("--test", type=str, help="YAML file describing test cases", required=True)
+    parser.add_argument("--basedir", type=str, help="Basedir to use for tests", default=".")
+    parser.add_argument("-l", action="store_true", help="List tests then exit")
+    parser.add_argument("-n", type=str, default=None, help="Run a specific tests, format is 1,3-6,9")
+    parser.add_argument("--tool", type=str, default="cwl-runner",
+                        help="CWL runner executable to use (default 'cwl-runner'")
+    parser.add_argument("--only-tools", action="store_true", help="Only test tools")
+    parser.add_argument("--junit-xml", type=str, default=None, help="Path to JUnit xml file")
+    parser.add_argument("args", help="arguments to pass first to tool runner", nargs=argparse.REMAINDER)
+    parser.add_argument("-j", type=int, default=1, help="Specifies the number of tests to run simultaneously (defaults to one).")
+
+    args = parser.parse_args()
+    if '--' in args.args:
+        args.args.remove('--')
+
+    if not args.test:
+        parser.print_help()
+        return 1
+
+    with open(args.test) as f:
+        tests = yaml.load(f)
+
+    failures = 0
+    unsupported = 0
+    passed = 0
+    suite_name, _ = os.path.splitext(os.path.basename(args.test))
+    report = junit_xml.TestSuite(suite_name, [])
+
+    if args.only_tools:
+        alltests = tests
+        tests = []
+        for t in alltests:
+            loader = schema_salad.ref_resolver.Loader({"id": "@id"})
+            cwl = loader.resolve_ref(t["tool"])[0]
+            if isinstance(cwl, dict):
+                if cwl["class"] == "CommandLineTool":
+                    tests.append(t)
+            else:
+                raise Exception("Unexpected code path.")
+
+    if args.l:
+        for i, t in enumerate(tests):
+            print u"[%i] %s" % (i + 1, t["doc"].strip())
+        return 0
+
+    if args.n is not None:
+        ntest = []
+        for s in args.n.split(","):
+            sp = s.split("-")
+            if len(sp) == 2:
+                ntest.extend(range(int(sp[0]) - 1, int(sp[1])))
+            else:
+                ntest.append(int(s) - 1)
+    else:
+        ntest = range(0, len(tests))
+
+    total = 0
+    with ThreadPoolExecutor(max_workers=args.j) as executor:
+        jobs = [executor.submit(run_test, args, i, tests)
+                for i in ntest]
+        try:
+            for i, job in zip(ntest, jobs):
+                test_result = job.result()
+                test_case = test_result.create_test_case(tests[i])
+                total += 1
+                if test_result.return_code == 1:
+                    failures += 1
+                    test_case.add_failure_info("N/A")
+                elif test_result.return_code == UNSUPPORTED_FEATURE:
+                    unsupported += 1
+                    test_case.add_skipped_info("Unsupported")
+                else:
+                    passed += 1
+                report.test_cases.append(test_case)
+        except KeyboardInterrupt:
+            for job in jobs:
+                job.cancel()
+            _logger.error("Tests interrupted")
+
+    if args.junit_xml:
+        with open(args.junit_xml, 'w') as fp:
+            junit_xml.TestSuite.to_file(fp, [report])
+
+    if failures == 0 and unsupported == 0:
+        _logger.info("All tests passed")
+        return 0
+    elif failures == 0 and unsupported > 0:
+        _logger.warn("%i tests passed, %i unsupported features", total - unsupported, unsupported)
+        return 0
+    else:
+        _logger.warn("%i tests passed, %i failures, %i unsupported features", total - (failures + unsupported), failures, unsupported)
+        return 1
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..1e15f17
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,10 @@
+[flake8]
+ignore = E124,E128,E129,E201,E202,E225,E226,E231,E265,E271,E302,E303,F401,E402,E501,W503,E731,F811,F821,F841
+
+[easy_install]
+
+[egg_info]
+tag_build = .20161124105442
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/setup.py b/setup.py
new file mode 100755
index 0000000..2540262
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+import os
+
+import setuptools.command.egg_info as egg_info_cmd
+from setuptools import setup, find_packages
+
+SETUP_DIR = os.path.dirname(__file__)
+README = os.path.join(SETUP_DIR, 'README.rst')
+
+try:
+    import gittaggers
+    tagger = gittaggers.EggInfoFromGit
+except ImportError:
+    tagger = egg_info_cmd.egg_info
+
+setup(name='cwltest',
+      version='1.0',
+      description='Common workflow language testing framework',
+      long_description=open(README).read(),
+      author='Common workflow language working group',
+      author_email='common-workflow-language at googlegroups.com',
+      url="https://github.com/common-workflow-language/cwltest",
+      download_url="https://github.com/common-workflow-language/cwltest",
+      license='Apache 2.0',
+      packages=["cwltest"],
+      install_requires=[
+          'schema-salad >= 1.14',
+          'typing >= 3.5.2',
+          'junit-xml >= 1.7'
+      ],
+      extras_require={
+          ':python_version == "2.7"': [
+              'futures >= 3.0.5',
+          ],
+      },
+      tests_require=[],
+      entry_points={
+          'console_scripts': [ "cwltest=cwltest:main" ]
+      },
+      zip_safe=True,
+      cmdclass={'egg_info': tagger},
+)

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/cwltest.git



More information about the debian-med-commit mailing list