[med-svn] [Git][med-team/cwltest][master] 4 commits: routine-update: New upstream version
Michael R. Crusoe (@crusoe)
gitlab at salsa.debian.org
Sat Sep 7 12:46:11 BST 2024
Michael R. Crusoe pushed to branch master at Debian Med / cwltest
Commits:
803e6644 by Michael R. Crusoe at 2024-09-07T13:38:36+02:00
routine-update: New upstream version
- - - - -
c19119d6 by Michael R. Crusoe at 2024-09-07T13:38:37+02:00
New upstream version 2.5.20240714110256
- - - - -
e8d604f4 by Michael R. Crusoe at 2024-09-07T13:38:37+02:00
Update upstream source from tag 'upstream/2.5.20240714110256'
Update to upstream version '2.5.20240714110256'
with Debian dir d586493a492fcf2295fc39560215c65e42918ce3
- - - - -
fc6a8f33 by Michael R. Crusoe at 2024-09-07T13:38:58+02:00
routine-update: Ready to upload to unstable
- - - - -
20 changed files:
- .flake8
- .gitignore
- Makefile
- PKG-INFO
- cwltest.egg-info/PKG-INFO
- cwltest.egg-info/SOURCES.txt
- cwltest/_version.py
- cwltest/argparser.py
- cwltest/main.py
- cwltest/plugin.py
- cwltest/utils.py
- debian/changelog
- mypy-requirements.txt
- + tests/test-data/badgedir.yaml
- tests/test-data/conformance_test_v1.2.cwltest.yaml
- tests/test-data/mock_cwl_runner.py
- + tests/test-data/v1.0/null-expression2-tool.cwl
- + tests/test_badgedir.py
- tests/test_plugin.py
- tox.ini
Changes:
=====================================
.flake8
=====================================
@@ -5,3 +5,5 @@ select = B,C,E,F,W,T4
extend-ignore = E501,B905
# when Python 3.10 is the minimum version, re-enable check B905 for zip + strict
extend-select = B9
+per-file-ignores=
+ ./tests/test_badgedir.py:B950
=====================================
.gitignore
=====================================
@@ -7,3 +7,12 @@ build
dist
cwltest/_version.py
+
+# Generated by tox
+coverage.xml
+.coverage
+.coverage.*
+pydocstyle_report.txt
+
+# Generated by `make release-test`
+testenv*
=====================================
Makefile
=====================================
@@ -183,7 +183,7 @@ release:
python -m build testenv2/src/${PACKAGE} && \
pip install twine && \
twine upload testenv2/src/${PACKAGE}/dist/* && \
- git tag ${VERSION} && git push --tags
+ git tag --no-sign ${VERSION} && git push --tags
flake8: FORCE
flake8 $(PYSOURCES)
=====================================
PKG-INFO
=====================================
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: cwltest
-Version: 2.5.20240425111257
+Version: 2.5.20240714110256
Summary: Common Workflow Language testing framework
Author-email: Common workflow language working group <common-workflow-language at googlegroups.com>
License: Apache 2.0
=====================================
cwltest.egg-info/PKG-INFO
=====================================
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: cwltest
-Version: 2.5.20240425111257
+Version: 2.5.20240714110256
Summary: Common Workflow Language testing framework
Author-email: Common workflow language working group <common-workflow-language at googlegroups.com>
License: Apache 2.0
=====================================
cwltest.egg-info/SOURCES.txt
=====================================
@@ -91,6 +91,7 @@ mypy-stubs/rdflib/plugins/parsers/notation3.pyi
mypy-stubs/ruamel/__init__.pyi
tests/__init__.py
tests/test_argparse.py
+tests/test_badgedir.py
tests/test_categories.py
tests/test_compare.py
tests/test_exclude_tags.py
@@ -103,6 +104,7 @@ tests/test_short_names.py
tests/test_string_id.py
tests/test_timeout.py
tests/util.py
+tests/test-data/badgedir.yaml
tests/test-data/conformance_test_v1.0.cwltest.yml
tests/test-data/conformance_test_v1.2.cwltest.yaml
tests/test-data/cores.txt
@@ -133,4 +135,5 @@ tests/test-data/v1.0/cat1-job.json
tests/test-data/v1.0/cat1-testcli.cwl
tests/test-data/v1.0/cat2-job.json
tests/test-data/v1.0/empty.json
-tests/test-data/v1.0/hello.txt
\ No newline at end of file
+tests/test-data/v1.0/hello.txt
+tests/test-data/v1.0/null-expression2-tool.cwl
\ No newline at end of file
=====================================
cwltest/_version.py
=====================================
@@ -12,5 +12,5 @@ __version__: str
__version_tuple__: VERSION_TUPLE
version_tuple: VERSION_TUPLE
-__version__ = version = '2.5.20240425111257'
-__version_tuple__ = version_tuple = (2, 5, 20240425111257)
+__version__ = version = '2.5.20240714110256'
+__version_tuple__ = version_tuple = (2, 5, 20240714110256)
=====================================
cwltest/argparser.py
=====================================
@@ -18,6 +18,9 @@ def arg_parser() -> argparse.ArgumentParser:
parser.add_argument(
"--basedir", type=str, help="Basedir to use for tests", default="."
)
+ parser.add_argument(
+ "--baseuri", type=str, help="Base URI to use links in the report", default=None
+ )
parser.add_argument("-l", action="store_true", help="List tests then exit")
parser.add_argument(
"-n", type=str, default=None, help="Run specific tests, format is 1,3-6,9"
=====================================
cwltest/main.py
=====================================
@@ -13,7 +13,7 @@ import schema_salad.avro
import schema_salad.ref_resolver
import schema_salad.schema
from cwltest.argparser import arg_parser
-from cwltest.utils import CWLTestConfig, TestResult
+from cwltest.utils import CWLTestConfig, CWLTestReport, TestResult
from schema_salad.exceptions import ValidationException
from cwltest import logger, utils
@@ -58,7 +58,11 @@ def _run_test(
sys.stderr.flush()
config = CWLTestConfig(
basedir=args.basedir,
+ test_baseuri=args.baseuri,
+ test_basedir=args.test_basedir,
classname=args.classname,
+ entry=args.test,
+ entry_line=test["line"],
tool=args.tool,
args=args.args,
testargs=args.testargs,
@@ -96,6 +100,12 @@ def main() -> int:
arg_parser().print_help()
return 1
+ args.test_basedir = os.path.dirname(utils.absuri(args.test)) + "/"
+ if args.baseuri is None:
+ args.baseuri = "file://" + args.test_basedir
+ if not args.baseuri.endswith("/"):
+ args.baseuri = args.baseuri + "/"
+
try:
tests, metadata = utils.load_and_validate_tests(args.test)
except ValidationException:
@@ -106,8 +116,8 @@ def main() -> int:
suite_name, _ = os.path.splitext(os.path.basename(args.test))
report: Optional[junit_xml.TestSuite] = junit_xml.TestSuite(suite_name, [])
- ntotal = defaultdict(int) # type: Dict[str, int]
- npassed = defaultdict(int) # type: Dict[str, int]
+ ntotal: Dict[str, int] = defaultdict(int)
+ npassed: Dict[str, List[CWLTestReport]] = defaultdict(list)
if args.only_tools:
alltests = tests
@@ -233,7 +243,7 @@ def main() -> int:
junit_xml.to_xml_report_file(xml, [cast(junit_xml.TestSuite, report)])
if args.badgedir:
- utils.generate_badges(args.badgedir, ntotal, npassed)
+ utils.generate_badges(args.badgedir, ntotal, npassed, nfailures, nunsupported)
if failures == 0 and unsupported == 0:
logger.info("All tests passed")
=====================================
cwltest/plugin.py
=====================================
@@ -20,6 +20,7 @@ from typing import (
Union,
cast,
)
+from urllib.parse import urljoin
import pytest
from cwltest.compare import CompareFail, compare
@@ -64,6 +65,13 @@ def _run_test_hook_or_plain(
"""Run tests using a provided pytest_cwl_execute_test hook or the --cwl-runner."""
processfile, jobfile = utils.prepare_test_paths(test, config.basedir)
start_time = time.time()
+ reltool = os.path.relpath(test["tool"], start=config.test_basedir)
+ tooluri = urljoin(config.test_baseuri, reltool)
+ if test.get("job", None):
+ reljob = os.path.relpath(test["job"], start=config.test_basedir)
+ joburi = urljoin(config.test_baseuri, reljob)
+ else:
+ joburi = None
outerr = ""
hook_out = hook(config=config, processfile=processfile, jobfile=jobfile)
if not hook_out:
@@ -74,7 +82,14 @@ def _run_test_hook_or_plain(
if returncode == UNSUPPORTED_FEATURE:
if REQUIRED not in test.get("tags", ["required"]):
return utils.TestResult(
- UNSUPPORTED_FEATURE, outstr, "", duration, config.classname
+ UNSUPPORTED_FEATURE,
+ outstr,
+ "",
+ duration,
+ config.classname,
+ config.entry,
+ tooluri,
+ joburi,
)
elif returncode != 0:
if not bool(test.get("should_fail", False)):
@@ -82,9 +97,26 @@ def _run_test_hook_or_plain(
logger.warning(test.get("doc"))
message = "Returned non-zero but it should be zero"
return utils.TestResult(
- 1, outstr, outerr, duration, config.classname, message
+ 1,
+ outstr,
+ outerr,
+ duration,
+ config.classname,
+ config.entry,
+ tooluri,
+ joburi,
+ message,
)
- return utils.TestResult(0, outstr, outerr, duration, config.classname)
+ return utils.TestResult(
+ 0,
+ outstr,
+ outerr,
+ duration,
+ config.classname,
+ config.entry,
+ tooluri,
+ joburi,
+ )
if bool(test.get("should_fail", False)):
return utils.TestResult(
1,
@@ -92,6 +124,9 @@ def _run_test_hook_or_plain(
outerr,
duration,
config.classname,
+ config.entry,
+ tooluri,
+ joburi,
"Test should failed, but it did not.",
)
@@ -111,6 +146,9 @@ def _run_test_hook_or_plain(
outerr,
duration,
config.classname,
+ config.entry,
+ tooluri,
+ joburi,
fail_message,
)
@@ -137,6 +175,10 @@ class CWLItem(pytest.Item):
cwl_args = self.config.getoption("cwl_args")
config = utils.CWLTestConfig(
basedir=self.config.getoption("cwl_basedir"),
+ test_baseuri=self.config.getoption("cwl_basedir"),
+ test_basedir=self.config.getoption("cwl_basedir"),
+ entry="tests.yaml",
+ entry_line="0",
outdir=str(
self.config._tmp_path_factory.mktemp( # type: ignore[attr-defined]
self.spec.get("label", "unlabled_test")
@@ -382,7 +424,7 @@ def pytest_sessionfinish(session: pytest.Session, exitstatus: int) -> None:
_,
) = utils.parse_results(results, tests)
if cwl_badgedir := session.config.getoption("cwl_badgedir"):
- utils.generate_badges(cwl_badgedir, ntotal, npassed)
+ utils.generate_badges(cwl_badgedir, ntotal, npassed, nfailures, nunsupported)
def pytest_addhooks(pluginmanager: "PytestPluginManager") -> None:
=====================================
cwltest/utils.py
=====================================
@@ -20,6 +20,7 @@ from typing import (
Union,
cast,
)
+from urllib.parse import urljoin
import junit_xml
import ruamel.yaml.scanner
@@ -46,7 +47,11 @@ class CWLTestConfig:
def __init__(
self,
+ entry: str,
+ entry_line: str,
basedir: Optional[str] = None,
+ test_baseuri: Optional[str] = None,
+ test_basedir: Optional[str] = None,
outdir: Optional[str] = None,
classname: Optional[str] = None,
tool: Optional[str] = None,
@@ -58,8 +63,13 @@ class CWLTestConfig:
) -> None:
"""Initialize test configuration."""
self.basedir: str = basedir or os.getcwd()
+ self.test_baseuri: str = test_baseuri or "file://" + self.basedir
+ self.test_basedir: str = test_basedir or self.basedir
self.outdir: Optional[str] = outdir
self.classname: str = classname or ""
+ self.entry = urljoin(
+ self.test_baseuri, os.path.basename(entry) + f"#L{entry_line}"
+ )
self.tool: str = tool or "cwl-runner"
self.args: List[str] = args or []
self.testargs: List[str] = testargs or []
@@ -68,6 +78,20 @@ class CWLTestConfig:
self.runner_quiet: bool = runner_quiet or True
+class CWLTestReport:
+ """Encapsulate relevant test result data for a markdown report."""
+
+ def __init__(
+ self, id: str, category: List[str], entry: str, tool: str, job: Optional[str]
+ ) -> None:
+ """Initialize a CWLTestReport object."""
+ self.id = id
+ self.category = category
+ self.entry = entry
+ self.tool = tool
+ self.job = job
+
+
class TestResult:
"""Encapsulate relevant test result data."""
@@ -78,6 +102,9 @@ class TestResult:
error_output: str,
duration: float,
classname: str,
+ entry: str,
+ tool: str,
+ job: Optional[str],
message: str = "",
) -> None:
"""Initialize a TestResult object."""
@@ -87,6 +114,9 @@ class TestResult:
self.duration = duration
self.message = message
self.classname = classname
+ self.entry = entry
+ self.tool = tool
+ self.job = job
def create_test_case(self, test: Dict[str, Any]) -> junit_xml.TestCase:
"""Create a jUnit XML test case from this test result."""
@@ -108,6 +138,25 @@ class TestResult:
case.failure_message = self.message
return case
+ def create_report_entry(self, test: Dict[str, Any]) -> CWLTestReport:
+ return CWLTestReport(
+ test.get("id", "no-id"),
+ test.get("tags", ["required"]),
+ self.entry,
+ self.tool,
+ self.job,
+ )
+
+
+def _clean_ruamel_list(obj: List[Any]) -> Any:
+ """Entrypoint to transform roundtrip loaded ruamel.yaml to plain objects."""
+ new_list = []
+ for entry in obj:
+ e: Any = _clean_ruamel(entry)
+ e["line"] = str(entry.lc.line)
+ new_list.append(e)
+ return new_list
+
def _clean_ruamel(obj: Any) -> Any:
"""Transform roundtrip loaded ruamel.yaml to plain objects."""
@@ -132,13 +181,17 @@ def _clean_ruamel(obj: Any) -> Any:
def generate_badges(
- badgedir: str, ntotal: Dict[str, int], npassed: Dict[str, int]
+ badgedir: str,
+ ntotal: Dict[str, int],
+ npassed: Dict[str, List[CWLTestReport]],
+ nfailures: Dict[str, List[CWLTestReport]],
+ nunsupported: Dict[str, List[CWLTestReport]],
) -> None:
"""Generate badges with conformance levels."""
os.mkdir(badgedir)
for t, v in ntotal.items():
- percent = int((npassed[t] / float(v)) * 100)
- if npassed[t] == v:
+ percent = int((len(npassed[t]) / float(v)) * 100)
+ if len(npassed[t]) == v:
color = "green"
elif t == "required":
color = "red"
@@ -156,6 +209,42 @@ def generate_badges(
)
)
+ with open(f"{badgedir}/{t}.md", "w") as out:
+ print(f"# `{t}` tests", file=out)
+
+ print("## List of passed tests", file=out)
+ for e in npassed[t]:
+ base = f"[{shortname(e.id)}]({e.entry})"
+ tool = f"[tool]({e.tool})"
+ if e.job:
+ arr = [tool, f"[job]({e.job})"]
+ else:
+ arr = [tool]
+ args = ", ".join(arr)
+ print(f"- {base} ({args})", file=out)
+
+ print("## List of failed tests", file=out)
+ for e in nfailures[t]:
+ base = f"[{shortname(e.id)}]({e.entry})"
+ tool = f"[tool]({e.tool})"
+ if e.job:
+ arr = [tool, f"[job]({e.job})"]
+ else:
+ arr = [tool]
+ args = ", ".join(arr)
+ print(f"- {base} ({args})", file=out)
+
+ print("## List of unsupported tests", file=out)
+ for e in nunsupported[t]:
+ base = f"[{shortname(e.id)}]({e.entry})"
+ tool = f"[tool]({e.tool})"
+ if e.job:
+ arr = [tool, f"[job]({e.job})"]
+ else:
+ arr = [tool]
+ args = ", ".join(arr)
+ print(f"- {base} ({args})", file=out)
+
def get_test_number_by_key(
tests: List[Dict[str, str]], key: str, value: str
@@ -194,7 +283,7 @@ def load_and_validate_tests(path: str) -> Tuple[Any, Dict[str, Any]]:
tests, metadata = schema_salad.schema.load_and_validate(
document_loader, avsc_names, path, True
)
- tests = cast(List[Dict[str, Any]], _clean_ruamel(tests))
+ tests = cast(List[Dict[str, Any]], _clean_ruamel_list(tests))
return tests, metadata
@@ -209,10 +298,10 @@ def parse_results(
int, # passed
int, # failures
int, # unsupported
- Dict[str, int],
- Dict[str, int],
- Dict[str, int],
- Dict[str, int],
+ Dict[str, int], # total for each tag
+ Dict[str, List[CWLTestReport]], # passed for each tag
+ Dict[str, List[CWLTestReport]], # failures for each tag
+ Dict[str, List[CWLTestReport]], # unsupported for each tag
Optional[junit_xml.TestSuite],
]:
"""
@@ -226,12 +315,13 @@ def parse_results(
failures = 0
unsupported = 0
ntotal: Dict[str, int] = defaultdict(int)
- nfailures: Dict[str, int] = defaultdict(int)
- nunsupported: Dict[str, int] = defaultdict(int)
- npassed: Dict[str, int] = defaultdict(int)
+ nfailures: Dict[str, List[CWLTestReport]] = defaultdict(list)
+ nunsupported: Dict[str, List[CWLTestReport]] = defaultdict(list)
+ npassed: Dict[str, List[CWLTestReport]] = defaultdict(list)
for i, test_result in enumerate(results):
test_case = test_result.create_test_case(tests[i])
+ test_report = test_result.create_report_entry(tests[i])
test_case.url = (
f"cwltest:{suite_name}#{i + 1}"
if suite_name is not None
@@ -247,21 +337,16 @@ def parse_results(
if return_code == 0:
passed += 1
for t in tags:
- npassed[t] += 1
+ npassed[t].append(test_report)
elif return_code != 0 and return_code != UNSUPPORTED_FEATURE:
failures += 1
for t in tags:
- nfailures[t] += 1
- test_case.add_failure_info(output=test_result.message)
- elif return_code == UNSUPPORTED_FEATURE and category == REQUIRED:
- failures += 1
- for t in tags:
- nfailures[t] += 1
+ nfailures[t].append(test_report)
test_case.add_failure_info(output=test_result.message)
elif category != REQUIRED and return_code == UNSUPPORTED_FEATURE:
unsupported += 1
for t in tags:
- nunsupported[t] += 1
+ nunsupported[t].append(test_report)
test_case.add_skipped_info("Unsupported")
else:
raise Exception(
@@ -352,6 +437,15 @@ def run_test_plain(
test_command: List[str] = []
duration = 0.0
number = "?"
+
+ reltool = os.path.relpath(test["tool"], start=config.test_basedir)
+ tooluri = urljoin(config.test_baseuri, reltool)
+ if test.get("job", None):
+ reljob = os.path.relpath(test["job"], start=config.test_basedir)
+ joburi = urljoin(config.test_baseuri, reljob)
+ else:
+ joburi = None
+
if test_number is not None:
number = str(test_number)
process: Optional[subprocess.Popen[str]] = None
@@ -385,10 +479,26 @@ def run_test_plain(
"tags", ["required"]
):
return TestResult(
- UNSUPPORTED_FEATURE, outstr, outerr, duration, config.classname
+ UNSUPPORTED_FEATURE,
+ outstr,
+ outerr,
+ duration,
+ config.classname,
+ config.entry,
+ tooluri,
+ joburi,
)
if test.get("should_fail", False):
- return TestResult(0, outstr, outerr, duration, config.classname)
+ return TestResult(
+ 0,
+ outstr,
+ outerr,
+ duration,
+ config.classname,
+ config.entry,
+ tooluri,
+ joburi,
+ )
if test_number:
logger.error(
"""Test %i failed: %s""",
@@ -405,7 +515,17 @@ def run_test_plain(
logger.error("Does not support required feature")
else:
logger.error("Returned non-zero")
- return TestResult(1, outstr, outerr, duration, config.classname, str(err))
+ return TestResult(
+ 1,
+ outstr,
+ outerr,
+ duration,
+ config.classname,
+ config.entry,
+ tooluri,
+ joburi,
+ str(err),
+ )
except (ruamel.yaml.scanner.ScannerError, TypeError) as err:
logger.error(
"""Test %s failed: %s""",
@@ -437,6 +557,9 @@ def run_test_plain(
outerr,
duration,
config.classname,
+ config.entry,
+ tooluri,
+ joburi,
invalid_json_msg,
)
except subprocess.TimeoutExpired:
@@ -457,6 +580,9 @@ def run_test_plain(
outerr,
float(cast(int, config.timeout)),
config.classname,
+ config.entry,
+ tooluri,
+ joburi,
"Test timed out",
)
finally:
@@ -480,7 +606,16 @@ def run_test_plain(
)
logger.warning(test.get("doc", "").replace("\n", " ").strip())
logger.warning("Returned zero but it should be non-zero")
- return TestResult(1, outstr, outerr, duration, config.classname)
+ return TestResult(
+ 1,
+ outstr,
+ outerr,
+ duration,
+ config.classname,
+ config.entry,
+ tooluri,
+ joburi,
+ )
try:
compare(test.get("output"), out)
@@ -503,6 +638,9 @@ def run_test_plain(
outerr,
duration,
config.classname,
+ config.entry,
+ tooluri,
+ joburi,
fail_message,
)
@@ -514,3 +652,10 @@ def shortname(name: str) -> str:
It is a workaround of https://github.com/common-workflow-language/schema_salad/issues/511.
"""
return [n for n in re.split("[/#]", name) if len(n)][-1]
+
+
+def absuri(path: str) -> str:
+ """Return an absolute URI."""
+ if "://" in path:
+ return path
+ return "file://" + os.path.abspath(path)
=====================================
debian/changelog
=====================================
@@ -1,3 +1,9 @@
+cwltest (2.5.20240714110256-1) unstable; urgency=medium
+
+ * New upstream version
+
+ -- Michael R. Crusoe <crusoe at debian.org> Sat, 07 Sep 2024 13:38:58 +0200
+
cwltest (2.5.20240425111257-1) unstable; urgency=medium
* d/control: build-dep on setuptools-scm to set the version correctly.
=====================================
mypy-requirements.txt
=====================================
@@ -1,4 +1,4 @@
-mypy==1.10.0
+mypy==1.10.1
types-setuptools
types-requests
types-PyYAML
=====================================
tests/test-data/badgedir.yaml
=====================================
@@ -0,0 +1,39 @@
+- output: {}
+ job: empty.yml
+ tool: return-0.cwl
+ id: success_w_job
+ doc: Successful test with a job file
+ tags: [ command_line_tool ]
+
+- output: {}
+ tool: return-0.cwl
+ id: success_wo_job
+ doc: Successful test without a job file
+ tags: [ command_line_tool ]
+
+- output: {}
+ job: empty.yml
+ tool: return-1.cwl
+ id: failure_w_job
+ doc: Failed test with a job file
+ tags: [ command_line_tool ]
+
+- output: {}
+ tool: return-1.cwl
+ id: failure_wo_job
+ doc: Failed test without a job file
+ tags: [ command_line_tool ]
+
+- output: {}
+ job: empty.yml
+ tool: return-unsupported.cwl
+ id: unsupported_w_job
+ doc: Unsupported test with a job file
+ tags: [ command_line_tool ]
+
+- output: {}
+ job: null
+ tool: return-unsupported.cwl
+ id: unsupported_wo_job
+ doc: Unsupported test without a job file
+ tags: [ command_line_tool ]
=====================================
tests/test-data/conformance_test_v1.2.cwltest.yaml
=====================================
@@ -12,4 +12,10 @@
tool: v1.0/cat1-testcli.cwl
id: cl_optional_bindings_provided
doc: Test command line with optional input (provided)
- tags: [ command_line_tool ]
\ No newline at end of file
+ tags: [ command_line_tool ]
+
+- tool: v1.0/null-expression2-tool.cwl
+ should_fail: true
+ id: expression_any_nodefaultany
+ doc: Test Any without defaults cannot be unspecified.
+ tags: [ required, inline_javascript, expression_tool ]
=====================================
tests/test-data/mock_cwl_runner.py
=====================================
@@ -15,7 +15,7 @@ TIMEOUT_TOOL = "timeout.cwl"
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument("processfile")
- parser.add_argument("jobfile")
+ parser.add_argument("jobfile", nargs='?', default=None)
parser.add_argument("--version", action="version", version="%(prog)s 1.0")
parser.add_argument("--outdir")
parser.add_argument("--quiet", action="store_true")
=====================================
tests/test-data/v1.0/null-expression2-tool.cwl
=====================================
@@ -0,0 +1,14 @@
+#!/usr/bin/env cwl-runner
+
+class: ExpressionTool
+requirements:
+ - class: InlineJavascriptRequirement
+cwlVersion: v1.2
+
+inputs:
+ i1: Any
+
+outputs:
+ output: int
+
+expression: "$({'output': (inputs.i1 == 'the-default' ? 1 : 2)})"
\ No newline at end of file
=====================================
tests/test_badgedir.py
=====================================
@@ -0,0 +1,98 @@
+import os
+import json
+from pathlib import Path
+from textwrap import dedent
+
+import schema_salad.ref_resolver
+
+from .util import get_data, run_with_mock_cwl_runner
+
+
+def test_badgedir(tmp_path: Path) -> None:
+ badgedir = tmp_path / "badgedir"
+
+ args = [
+ "--test",
+ schema_salad.ref_resolver.file_uri(
+ get_data("tests/test-data/conformance_test_v1.2.cwltest.yaml")
+ ),
+ "--badgedir",
+ str(badgedir),
+ ]
+ cwd = os.getcwd()
+ try:
+ os.chdir(get_data("tests/test-data/"))
+ error_code, stdout, stderr = run_with_mock_cwl_runner(args)
+ finally:
+ os.chdir(cwd)
+
+ assert error_code == 1
+ required_json = badgedir / "required.json"
+ assert required_json.exists()
+ with open(required_json) as file:
+ obj = json.load(file)
+ assert obj.get("subject", "") == "required"
+ assert obj.get("status", "") == "0%"
+ assert obj.get("color", "") == "red"
+
+ required_md = badgedir / "required.md"
+ assert required_md.exists()
+ with open(required_md) as file:
+ s = file.read()
+ assert "file://" in s
+ assert "tests/test-data/conformance_test_v1.2.cwltest.yaml" in s
+ assert "v1.0/cat-job.json" in s
+ assert "v1.0/cat1-testcli.cwl" in s
+
+ clt = badgedir / "command_line_tool.json"
+ assert clt.exists()
+ with open(clt) as file:
+ obj = json.load(file)
+ assert obj.get("subject", "") == "command_line_tool"
+ assert obj.get("status", "") == "0%"
+ assert obj.get("color", "") == "yellow"
+ assert (badgedir / "command_line_tool.md").exists()
+
+
+def test_badgedir_report_with_baseuri(tmp_path: Path) -> None:
+ badgedir = tmp_path / "badgedir"
+
+ baseuri = "https://example.com/specified/uri"
+
+ args = [
+ "--test",
+ schema_salad.ref_resolver.file_uri(get_data("tests/test-data/badgedir.yaml")),
+ "--badgedir",
+ str(badgedir),
+ "--baseuri",
+ baseuri,
+ ]
+ cwd = os.getcwd()
+ try:
+ os.chdir(get_data("tests/test-data/"))
+ error_code, stdout, stderr = run_with_mock_cwl_runner(args)
+ finally:
+ os.chdir(cwd)
+
+ clt_md = badgedir / "command_line_tool.md"
+ assert clt_md.exists()
+ with open(clt_md) as file:
+ contents = file.read()
+ assert contents == markdown_report_with(baseuri)
+
+
+def markdown_report_with(baseuri: str) -> str:
+ return dedent(
+ f"""
+ # `command_line_tool` tests
+ ## List of passed tests
+ - [success_w_job]({baseuri}/badgedir.yaml#L0) ([tool]({baseuri}/return-0.cwl), [job]({baseuri}/empty.yml))
+ - [success_wo_job]({baseuri}/badgedir.yaml#L7) ([tool]({baseuri}/return-0.cwl))
+ ## List of failed tests
+ - [failure_w_job]({baseuri}/badgedir.yaml#L13) ([tool]({baseuri}/return-1.cwl), [job]({baseuri}/empty.yml))
+ - [failure_wo_job]({baseuri}/badgedir.yaml#L20) ([tool]({baseuri}/return-1.cwl))
+ ## List of unsupported tests
+ - [unsupported_w_job]({baseuri}/badgedir.yaml#L26) ([tool]({baseuri}/return-unsupported.cwl), [job]({baseuri}/empty.yml))
+ - [unsupported_wo_job]({baseuri}/badgedir.yaml#L33) ([tool]({baseuri}/return-unsupported.cwl))
+ """
+ )[1:]
=====================================
tests/test_plugin.py
=====================================
@@ -13,6 +13,7 @@ def _load_v1_0_dir(path: Path) -> None:
inner_dir = os.path.join(path.parent, "v1.0")
os.mkdir(inner_dir)
shutil.copy(get_data("tests/test-data/v1.0/cat1-testcli.cwl"), inner_dir)
+ shutil.copy(get_data("tests/test-data/v1.0/null-expression2-tool.cwl"), inner_dir)
shutil.copy(get_data("tests/test-data/v1.0/cat-job.json"), inner_dir)
shutil.copy(get_data("tests/test-data/v1.0/cat-n-job.json"), inner_dir)
shutil.copy(get_data("tests/test-data/v1.0/hello.txt"), inner_dir)
@@ -104,7 +105,7 @@ def test_no_label(pytester: "Pytester") -> None:
result = pytester.runpytest(
"-k", "conformance_test_v1.2.cwltest.yaml", "--cwl-tags", "required"
)
- result.assert_outcomes(passed=1, skipped=1)
+ result.assert_outcomes(passed=2, skipped=1)
def test_cwltool_hook(pytester: "Pytester") -> None:
=====================================
tox.ini
=====================================
@@ -83,8 +83,8 @@ skip_install = true
[testenv:py312-lintreadme]
description = Lint the README.rst->.md conversion
commands =
- python -m build --outdir {distdir}
- twine check {distdir}/*
+ python -m build --outdir dist
+ twine check dist/*
deps =
twine
build
View it on GitLab: https://salsa.debian.org/med-team/cwltest/-/compare/9118bf2f28b610642ace5fddb14625fd92fbb322...fc6a8f332e80a9fa3501fe5836a981cefa5c5b91
--
View it on GitLab: https://salsa.debian.org/med-team/cwltest/-/compare/9118bf2f28b610642ace5fddb14625fd92fbb322...fc6a8f332e80a9fa3501fe5836a981cefa5c5b91
You're receiving this email because of your account on salsa.debian.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/debian-med-commit/attachments/20240907/6009813e/attachment-0001.htm>
More information about the debian-med-commit
mailing list